newHostVM: Cleanups, improved error messages.

git-svn-id: http://pcsx2.googlecode.com/svn/branches/newHostVM@4022 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2010-11-16 04:53:52 +00:00
parent 239c9f83d8
commit 9a0a89aa13
5 changed files with 120 additions and 80 deletions

View File

@ -36,6 +36,11 @@ void iopMemoryReserve::Reserve()
{ {
_parent::Reserve(HostMemoryMap::IOPmem); _parent::Reserve(HostMemoryMap::IOPmem);
//_parent::Reserve(EmuConfig.HostMap.IOP); //_parent::Reserve(EmuConfig.HostMap.IOP);
}
void iopMemoryReserve::Commit()
{
_parent::Commit();
iopMem = (IopVM_MemoryAllocMess*)m_reserve.GetPtr(); iopMem = (IopVM_MemoryAllocMess*)m_reserve.GetPtr();
} }
@ -105,6 +110,7 @@ void iopMemoryReserve::Decommit()
safe_aligned_free(psxMemWLUT); safe_aligned_free(psxMemWLUT);
psxMemRLUT = NULL; psxMemRLUT = NULL;
iopMem = NULL;
} }
void iopMemoryReserve::Release() void iopMemoryReserve::Release()

View File

@ -154,14 +154,14 @@ void memMapPhy()
vtlb_MapBlock(iopMem->Main,0x1c000000,0x00800000); vtlb_MapBlock(iopMem->Main,0x1c000000,0x00800000);
// Generic Handlers; These fallback to mem* stuff... // Generic Handlers; These fallback to mem* stuff...
vtlb_MapHandler(tlb_fallback_7,0x14000000,0x10000); vtlb_MapHandler(tlb_fallback_7,0x14000000, _64kb);
vtlb_MapHandler(tlb_fallback_4,0x18000000,0x10000); vtlb_MapHandler(tlb_fallback_4,0x18000000, _64kb);
vtlb_MapHandler(tlb_fallback_5,0x1a000000,0x10000); vtlb_MapHandler(tlb_fallback_5,0x1a000000, _64kb);
vtlb_MapHandler(tlb_fallback_6,0x12000000,0x10000); vtlb_MapHandler(tlb_fallback_6,0x12000000, _64kb);
vtlb_MapHandler(tlb_fallback_8,0x1f000000,0x10000); vtlb_MapHandler(tlb_fallback_8,0x1f000000, _64kb);
vtlb_MapHandler(tlb_fallback_3,0x1f400000,0x10000); vtlb_MapHandler(tlb_fallback_3,0x1f400000, _64kb);
vtlb_MapHandler(tlb_fallback_2,0x1f800000,0x10000); vtlb_MapHandler(tlb_fallback_2,0x1f800000, _64kb);
vtlb_MapHandler(tlb_fallback_8,0x1f900000,0x10000); vtlb_MapHandler(tlb_fallback_8,0x1f900000, _64kb);
// Hardware Register Handlers : specialized/optimized per-page handling of HW register accesses // Hardware Register Handlers : specialized/optimized per-page handling of HW register accesses
// (note that hw_by_page handles are assigned in memReset prior to calling this function) // (note that hw_by_page handles are assigned in memReset prior to calling this function)
@ -187,9 +187,9 @@ void memMapKernelMem()
//lower 512 mb: direct map //lower 512 mb: direct map
//vtlb_VMap(0x00000000,0x00000000,0x20000000); //vtlb_VMap(0x00000000,0x00000000,0x20000000);
//0x8* mirror //0x8* mirror
vtlb_VMap(0x80000000,0x00000000,0x20000000); vtlb_VMap(0x80000000, 0x00000000, _1mb*512);
//0xa* mirror //0xa* mirror
vtlb_VMap(0xA0000000,0x00000000,0x20000000); vtlb_VMap(0xA0000000, 0x00000000, _1mb*512);
} }
//what do do with these ? //what do do with these ?
@ -630,6 +630,11 @@ void eeMemoryReserve::Reserve()
{ {
_parent::Reserve(HostMemoryMap::EEmem); _parent::Reserve(HostMemoryMap::EEmem);
//_parent::Reserve(EmuConfig.HostMap.IOP); //_parent::Reserve(EmuConfig.HostMap.IOP);
}
void eeMemoryReserve::Commit()
{
_parent::Commit();
eeMem = (EEVM_MemoryAllocMess*)m_reserve.GetPtr(); eeMem = (EEVM_MemoryAllocMess*)m_reserve.GetPtr();
} }
@ -762,6 +767,12 @@ void eeMemoryReserve::Reset()
LoadBIOS(); LoadBIOS();
} }
void eeMemoryReserve::Decommit()
{
_parent::Decommit();
eeMem = NULL;
}
void eeMemoryReserve::Release() void eeMemoryReserve::Release()
{ {
safe_delete(mmap_faultHandler); safe_delete(mmap_faultHandler);
@ -827,6 +838,8 @@ static __aligned16 vtlb_PageProtectionInfo m_PageProtectInfo[Ps2MemSize::MainRam
// //
int mmap_GetRamPageInfo( u32 paddr ) int mmap_GetRamPageInfo( u32 paddr )
{ {
pxAssume( eeMem );
paddr &= ~0xfff; paddr &= ~0xfff;
uptr ptr = (uptr)PSM( paddr ); uptr ptr = (uptr)PSM( paddr );
@ -842,6 +855,8 @@ int mmap_GetRamPageInfo( u32 paddr )
// paddr - physically mapped PS2 address // paddr - physically mapped PS2 address
void mmap_MarkCountedRamPage( u32 paddr ) void mmap_MarkCountedRamPage( u32 paddr )
{ {
pxAssume( eeMem );
paddr &= ~0xfff; paddr &= ~0xfff;
uptr ptr = (uptr)PSM( paddr ); uptr ptr = (uptr)PSM( paddr );
@ -869,6 +884,8 @@ void mmap_MarkCountedRamPage( u32 paddr )
// from code residing in this page will use manual protection. // from code residing in this page will use manual protection.
static __fi void mmap_ClearCpuBlock( uint offset ) static __fi void mmap_ClearCpuBlock( uint offset )
{ {
pxAssume( eeMem );
int rampage = offset >> 12; int rampage = offset >> 12;
// Assertion: This function should never be run on a block that's already under // Assertion: This function should never be run on a block that's already under
@ -883,6 +900,8 @@ static __fi void mmap_ClearCpuBlock( uint offset )
void mmap_PageFaultHandler::OnPageFaultEvent( const PageFaultInfo& info, bool& handled ) void mmap_PageFaultHandler::OnPageFaultEvent( const PageFaultInfo& info, bool& handled )
{ {
pxAssume( eeMem );
// get bad virtual address // get bad virtual address
uptr offset = info.addr - (uptr)eeMem->Main; uptr offset = info.addr - (uptr)eeMem->Main;
if( offset >= Ps2MemSize::MainRam ) return; if( offset >= Ps2MemSize::MainRam ) return;
@ -899,5 +918,5 @@ void mmap_ResetBlockTracking()
{ {
//DbgCon.WriteLn( "vtlb/mmap: Block Tracking reset..." ); //DbgCon.WriteLn( "vtlb/mmap: Block Tracking reset..." );
memzero( m_PageProtectInfo ); memzero( m_PageProtectInfo );
HostSys::MemProtect( eeMem->Main, Ps2MemSize::MainRam, PageAccess_ReadWrite() ); if (eeMem) HostSys::MemProtect( eeMem->Main, Ps2MemSize::MainRam, PageAccess_ReadWrite() );
} }

View File

@ -256,65 +256,55 @@ _tmpl(void) vtlbUnmappedPWriteLg(u32 addr,const OperandType* data) { vtlb_BusErr
static mem8_t __fastcall vtlbDefaultPhyRead8(u32 addr) static mem8_t __fastcall vtlbDefaultPhyRead8(u32 addr)
{ {
Console.Error("vtlbDefaultPhyRead8: 0x%08X", addr); pxFailDev(pxsFmt("(VTLB) Attempted read8 from unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted read from an unmapped physical address.");
return 0; return 0;
} }
static mem16_t __fastcall vtlbDefaultPhyRead16(u32 addr) static mem16_t __fastcall vtlbDefaultPhyRead16(u32 addr)
{ {
Console.Error("vtlbDefaultPhyRead16: 0x%08X", addr); pxFailDev(pxsFmt("(VTLB) Attempted read16 from unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted read from an unmapped physical address.");
return 0; return 0;
} }
static mem32_t __fastcall vtlbDefaultPhyRead32(u32 addr) static mem32_t __fastcall vtlbDefaultPhyRead32(u32 addr)
{ {
Console.Error("vtlbDefaultPhyRead32: 0x%08X", addr); pxFailDev(pxsFmt("(VTLB) Attempted read32 from unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted read from an unmapped physical address.");
return 0; return 0;
} }
static void __fastcall vtlbDefaultPhyRead64(u32 addr, mem64_t* dest) static void __fastcall vtlbDefaultPhyRead64(u32 addr, mem64_t* dest)
{ {
Console.Error("vtlbDefaultPhyRead64: 0x%08X", addr); pxFailDev(pxsFmt("(VTLB) Attempted read64 from unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted read from an unmapped physical address.");
} }
static void __fastcall vtlbDefaultPhyRead128(u32 addr, mem128_t* dest) static void __fastcall vtlbDefaultPhyRead128(u32 addr, mem128_t* dest)
{ {
Console.Error("vtlbDefaultPhyRead128: 0x%08X", addr); pxFailDev(pxsFmt("(VTLB) Attempted read128 from unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted read from an unmapped physical address.");
} }
static void __fastcall vtlbDefaultPhyWrite8(u32 addr, mem8_t data) static void __fastcall vtlbDefaultPhyWrite8(u32 addr, mem8_t data)
{ {
Console.Error("vtlbDefaultPhyWrite8: 0x%08X",addr); pxFailDev(pxsFmt("(VTLB) Attempted write8 to unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted write to an unmapped physical address.");
} }
static void __fastcall vtlbDefaultPhyWrite16(u32 addr, mem16_t data) static void __fastcall vtlbDefaultPhyWrite16(u32 addr, mem16_t data)
{ {
Console.Error("vtlbDefaultPhyWrite16: 0x%08X",addr); pxFailDev(pxsFmt("(VTLB) Attempted write16 to unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted write to an unmapped physical address.");
} }
static void __fastcall vtlbDefaultPhyWrite32(u32 addr, mem32_t data) static void __fastcall vtlbDefaultPhyWrite32(u32 addr, mem32_t data)
{ {
Console.Error("vtlbDefaultPhyWrite32: 0x%08X",addr); pxFailDev(pxsFmt("(VTLB) Attempted write32 to unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted write to an unmapped physical address.");
} }
static void __fastcall vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data) static void __fastcall vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data)
{ {
Console.Error("vtlbDefaultPhyWrite64: 0x%08X",addr); pxFailDev(pxsFmt("(VTLB) Attempted write64 to unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted write to an unmapped physical address.");
} }
static void __fastcall vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data) static void __fastcall vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data)
{ {
Console.Error("vtlbDefaultPhyWrite128: 0x%08X",addr); pxFailDev(pxsFmt("(VTLB) Attempted write128 to unmapped physical address @ 0x%08X.", addr));
pxFailDev("(VTLB) Attempted write to an unmapped physical address.");
} }
#undef _tmpl #undef _tmpl
@ -334,6 +324,8 @@ __ri void vtlb_ReassignHandler( vtlbHandler rv,
vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128, vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128 ) vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128 )
{ {
pxAssume(rv < VTLB_HANDLER_ITEMS);
vtlbdata.RWFT[0][0][rv] = (void*)((r8!=0) ? r8 : vtlbDefaultPhyRead8); vtlbdata.RWFT[0][0][rv] = (void*)((r8!=0) ? r8 : vtlbDefaultPhyRead8);
vtlbdata.RWFT[1][0][rv] = (void*)((r16!=0) ? r16 : vtlbDefaultPhyRead16); vtlbdata.RWFT[1][0][rv] = (void*)((r16!=0) ? r16 : vtlbDefaultPhyRead16);
vtlbdata.RWFT[2][0][rv] = (void*)((r32!=0) ? r32 : vtlbDefaultPhyRead32); vtlbdata.RWFT[2][0][rv] = (void*)((r32!=0) ? r32 : vtlbDefaultPhyRead32);
@ -349,7 +341,7 @@ __ri void vtlb_ReassignHandler( vtlbHandler rv,
vtlbHandler vtlb_NewHandler() vtlbHandler vtlb_NewHandler()
{ {
pxAssertDev( vtlbHandlerCount < 127, "VTLB allowed handler count exceeded!" ); pxAssertDev( vtlbHandlerCount < VTLB_HANDLER_ITEMS, "VTLB handler count overflow!" );
return vtlbHandlerCount++; return vtlbHandlerCount++;
} }
@ -363,7 +355,7 @@ vtlbHandler vtlb_NewHandler()
// Returns a handle for the newly created handler See vtlb_MapHandler for use of the return value. // Returns a handle for the newly created handler See vtlb_MapHandler for use of the return value.
// //
__ri vtlbHandler vtlb_RegisterHandler( vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128, __ri vtlbHandler vtlb_RegisterHandler( vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128) vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128)
{ {
vtlbHandler rv = vtlb_NewHandler(); vtlbHandler rv = vtlb_NewHandler();
vtlb_ReassignHandler( rv, r8, r16, r32, r64, r128, w8, w16, w32, w64, w128 ); vtlb_ReassignHandler( rv, r8, r16, r32, r64, r128, w8, w16, w32, w64, w128 );
@ -396,15 +388,14 @@ void vtlb_MapHandler(vtlbHandler handler, u32 start, u32 size)
void vtlb_MapBlock(void* base, u32 start, u32 size, u32 blocksize) void vtlb_MapBlock(void* base, u32 start, u32 size, u32 blocksize)
{ {
s32 baseint = (s32)base;
verify(0==(start&VTLB_PAGE_MASK)); verify(0==(start&VTLB_PAGE_MASK));
verify(0==(size&VTLB_PAGE_MASK) && size>0); verify(0==(size&VTLB_PAGE_MASK) && size>0);
if (blocksize==0) if (!blocksize)
blocksize=size; blocksize = size;
verify(0==(blocksize&VTLB_PAGE_MASK) && blocksize>0); verify(0==(blocksize&VTLB_PAGE_MASK) && blocksize>0);
verify(0==(size%blocksize)); verify(0==(size%blocksize));
s32 baseint = (s32)base;
u32 end = start + (size - VTLB_PAGE_SIZE); u32 end = start + (size - VTLB_PAGE_SIZE);
pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) ); pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
@ -452,67 +443,70 @@ __fi void* vtlb_GetPhyPtr(u32 paddr)
//virtual mappings //virtual mappings
//TODO: Add invalid paddr checks //TODO: Add invalid paddr checks
void vtlb_VMap(u32 vaddr,u32 paddr,u32 sz) void vtlb_VMap(u32 vaddr,u32 paddr,u32 size)
{ {
verify(0==(vaddr&VTLB_PAGE_MASK)); verify(0==(vaddr&VTLB_PAGE_MASK));
verify(0==(paddr&VTLB_PAGE_MASK)); verify(0==(paddr&VTLB_PAGE_MASK));
verify(0==(sz&VTLB_PAGE_MASK) && sz>0); verify(0==(size&VTLB_PAGE_MASK) && size>0);
while(sz>0) while (size > 0)
{ {
s32 pme; s32 pme;
if (paddr>=VTLB_PMAP_SZ) if (paddr >= VTLB_PMAP_SZ)
{ {
pme=UnmappedPhyHandler0; pme = UnmappedPhyHandler0;
if (paddr&0x80000000) if (paddr & 0x80000000)
pme=UnmappedPhyHandler1; pme = UnmappedPhyHandler1;
pme|=0x80000000; pme |= 0x80000000;
pme|=paddr;// top bit is set anyway ... pme |= paddr;// top bit is set anyway ...
} }
else else
{ {
pme=vtlbdata.pmap[paddr>>VTLB_PAGE_BITS]; pme = vtlbdata.pmap[paddr>>VTLB_PAGE_BITS];
if (pme<0) if (pme<0)
pme|=paddr;// top bit is set anyway ... pme |= paddr;// top bit is set anyway ...
} }
vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS]=pme-vaddr; vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = pme-vaddr;
vaddr+=VTLB_PAGE_SIZE; vaddr += VTLB_PAGE_SIZE;
paddr+=VTLB_PAGE_SIZE; paddr += VTLB_PAGE_SIZE;
sz-=VTLB_PAGE_SIZE; size -= VTLB_PAGE_SIZE;
} }
} }
void vtlb_VMapBuffer(u32 vaddr,void* buffer,u32 sz) void vtlb_VMapBuffer(u32 vaddr,void* buffer,u32 size)
{ {
verify(0==(vaddr&VTLB_PAGE_MASK)); verify(0==(vaddr&VTLB_PAGE_MASK));
verify(0==(sz&VTLB_PAGE_MASK) && sz>0); verify(0==(size&VTLB_PAGE_MASK) && size>0);
u32 bu8=(u32)buffer;
while(sz>0) u32 bu8 = (u32)buffer;
while (size > 0)
{ {
vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS]=bu8-vaddr; vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = bu8-vaddr;
vaddr+=VTLB_PAGE_SIZE; vaddr += VTLB_PAGE_SIZE;
bu8+=VTLB_PAGE_SIZE; bu8 += VTLB_PAGE_SIZE;
sz-=VTLB_PAGE_SIZE; size -= VTLB_PAGE_SIZE;
} }
} }
void vtlb_VMapUnmap(u32 vaddr,u32 sz) void vtlb_VMapUnmap(u32 vaddr,u32 size)
{ {
verify(0==(vaddr&VTLB_PAGE_MASK)); verify(0==(vaddr&VTLB_PAGE_MASK));
verify(0==(sz&VTLB_PAGE_MASK) && sz>0); verify(0==(size&VTLB_PAGE_MASK) && size>0);
while(sz>0) while (size > 0)
{ {
u32 handl=UnmappedVirtHandler0; u32 handl = UnmappedVirtHandler0;
if (vaddr&0x80000000) if (vaddr & 0x80000000)
{ {
handl=UnmappedVirtHandler1; handl = UnmappedVirtHandler1;
} }
handl|=vaddr; // top bit is set anyway ...
handl|=0x80000000; handl |= vaddr; // top bit is set anyway ...
vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS]=handl-vaddr; handl |= 0x80000000;
vaddr+=VTLB_PAGE_SIZE;
sz-=VTLB_PAGE_SIZE; vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = handl-vaddr;
vaddr += VTLB_PAGE_SIZE;
size -= VTLB_PAGE_SIZE;
} }
} }
@ -589,6 +583,14 @@ void vtlb_Core_Free()
safe_aligned_free( vtlbdata.vmap ); safe_aligned_free( vtlbdata.vmap );
} }
static wxString GetHostVmErrorMsg()
{
return pxE(".Error:HostVmReserve",
L"Your system is too low on virtual resources for PCSX2 to run. This can be "
L"caused by having a small or disabled swapfile, or by other programs that are "
L"hogging resources."
);
}
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
// VtlbMemoryReserve (implementations) // VtlbMemoryReserve (implementations)
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
@ -605,16 +607,23 @@ void VtlbMemoryReserve::SetBaseAddr( uptr newaddr )
void VtlbMemoryReserve::Reserve( sptr hostptr ) void VtlbMemoryReserve::Reserve( sptr hostptr )
{ {
m_reserve.ReserveAt( hostptr ); if (!m_reserve.ReserveAt( hostptr ))
if (!m_reserve.IsOk()) {
throw Exception::OutOfMemory( m_reserve.GetName() ); throw Exception::OutOfMemory( m_reserve.GetName() )
.SetDiagMsg(L"Vtlb memory could not be reserved.")
.SetUserMsg(GetHostVmErrorMsg());
}
} }
void VtlbMemoryReserve::Commit() void VtlbMemoryReserve::Commit()
{ {
if (IsCommitted()) return; if (IsCommitted()) return;
if (!m_reserve.Commit()) if (!m_reserve.Commit())
throw Exception::OutOfMemory( m_reserve.GetName() ); {
throw Exception::OutOfMemory( m_reserve.GetName() )
.SetDiagMsg(L"Vtlb memory could not be committed.")
.SetUserMsg(GetHostVmErrorMsg());
}
} }
void VtlbMemoryReserve::Reset() void VtlbMemoryReserve::Reset()

View File

@ -128,8 +128,10 @@ public:
} }
void Reserve(); void Reserve();
void Release(); void Commit();
void Decommit();
void Reset(); void Reset();
void Release();
}; };
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
@ -147,9 +149,10 @@ public:
} }
void Reserve(); void Reserve();
void Commit();
void Decommit();
void Release(); void Release();
void Reset(); void Reset();
void Decommit();
}; };
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
@ -182,12 +185,14 @@ namespace vtlb_private
static const uint VTLB_PMAP_ITEMS = VTLB_PMAP_SZ / VTLB_PAGE_SIZE; static const uint VTLB_PMAP_ITEMS = VTLB_PMAP_SZ / VTLB_PAGE_SIZE;
static const uint VTLB_VMAP_ITEMS = _4gb / VTLB_PAGE_SIZE; static const uint VTLB_VMAP_ITEMS = _4gb / VTLB_PAGE_SIZE;
static const uint VTLB_HANDLER_ITEMS = 128;
struct MapData struct MapData
{ {
// first indexer -- 8/16/32/64/128 bit tables [values 0-4] // first indexer -- 8/16/32/64/128 bit tables [values 0-4]
// second indexer -- read/write [0 or 1] // second indexer -- read/write [0 or 1]
// third indexer -- 128 possible handlers! // third indexer -- 128 possible handlers!
void* RWFT[5][2][128]; void* RWFT[5][2][VTLB_HANDLER_ITEMS];
s32 pmap[VTLB_PMAP_ITEMS]; //512KB s32 pmap[VTLB_PMAP_ITEMS]; //512KB

View File

@ -583,8 +583,9 @@ static void recThrowHardwareDeficiency( const wxChar* extFail )
wxString GetMsg_RecVmFailed() wxString GetMsg_RecVmFailed()
{ {
return pxE( ".Error:Recompiler:VirtualMemoryAlloc", return pxE( ".Error:Recompiler:VirtualMemoryAlloc",
L"This recompiler was unable to reserve contiguous memory required " L"This recompiler was unable to reserve contiguous memory required for internal caches. "
L"for internal caches. This problem may be fixable by reducing the default " L"This error can be caused by low virtual memory resources, such as a small or disabled swapfile, "
L"or by another program that is hogging a lot of memory. You can also try reducing the default "
L"cache sizes for all PCSX2 recompilers, found under Host Settings." L"cache sizes for all PCSX2 recompilers, found under Host Settings."
); );
} }