newHostVM branch: work-in-progress stuff...

git-svn-id: http://pcsx2.googlecode.com/svn/branches/newHostVM@3958 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2010-10-22 16:23:52 +00:00
parent 495e872e98
commit 01541f2c92
38 changed files with 859 additions and 271 deletions

View File

@ -133,14 +133,83 @@ protected:
virtual void DoDeletion(); virtual void DoDeletion();
}; };
// --------------------------------------------------------------------------------------
enum PageProtectionMode // PageProtectionMode
// --------------------------------------------------------------------------------------
class PageProtectionMode
{ {
Protect_NoAccess = 0, protected:
Protect_ReadOnly, bool m_read;
Protect_ReadWrite bool m_write;
bool m_exec;
public:
PageProtectionMode()
{
All( false );
}
PageProtectionMode& Read( bool allow=true )
{
m_read = allow;
return *this;
}
PageProtectionMode& Write( bool allow=true )
{
m_write = allow;
return *this;
}
PageProtectionMode& Execute( bool allow=true )
{
m_exec = allow;
return *this;
}
PageProtectionMode& All( bool allow=true )
{
m_read = m_write = m_exec = allow;
return *this;
}
bool CanRead() const { return m_read; }
bool CanWrite() const { return m_write; }
bool CanExecute() const { return m_exec && m_read; }
wxString ToString() const;
}; };
static __fi PageProtectionMode PageAccess_None()
{
return PageProtectionMode();
}
static __fi PageProtectionMode PageAccess_ReadOnly()
{
return PageProtectionMode().Read();
}
static __fi PageProtectionMode PageAccess_WriteOnly()
{
return PageProtectionMode().Write();
}
static __fi PageProtectionMode PageAccess_ReadWrite()
{
return PageAccess_ReadOnly().Write();
}
static __fi PageProtectionMode PageAccess_ExecOnly()
{
return PageAccess_ReadOnly().Execute();
}
static __fi PageProtectionMode PageAccess_Any()
{
return PageProtectionMode().All();
}
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
// HostSys // HostSys
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
@ -148,22 +217,25 @@ enum PageProtectionMode
// platform prior to wxWidgets .. it should prolly be removed -- air) // platform prior to wxWidgets .. it should prolly be removed -- air)
namespace HostSys namespace HostSys
{ {
void* MmapReserve(uptr base, size_t size);
void MmapCommit(void* base, size_t size);
void MmapReset(void* base, size_t size);
// Maps a block of memory for use as a recompiled code buffer. // Maps a block of memory for use as a recompiled code buffer.
// The allocated block has code execution privileges.
// Returns NULL on allocation failure. // Returns NULL on allocation failure.
extern void *Mmap(uptr base, u32 size); extern void* Mmap(uptr base, size_t size);
// Unmaps a block allocated by SysMmap // Unmaps a block allocated by SysMmap
extern void Munmap(uptr base, u32 size); extern void Munmap(uptr base, size_t size);
extern void MemProtect( void* baseaddr, size_t size, PageProtectionMode mode, bool allowExecution=false ); extern void MemProtect( void* baseaddr, size_t size, const PageProtectionMode& mode );
extern void Munmap( void* base, u32 size ); extern void Munmap( void* base, size_t size );
template< uint size > template< uint size >
void MemProtectStatic( u8 (&arr)[size], PageProtectionMode mode, bool allowExecution=false ) void MemProtectStatic( u8 (&arr)[size], const PageProtectionMode& mode )
{ {
MemProtect( arr, size, mode, allowExecution ); MemProtect( arr, size, mode );
} }
} }

View File

@ -19,46 +19,104 @@
#include <sys/mman.h> #include <sys/mman.h>
#include <signal.h> #include <signal.h>
namespace HostSys static __ri void PageSizeAssertionTest( size_t size )
{ {
static const uptr m_pagemask = getpagesize()-1; pxAssert( (__pagesize == getpagesize()), pxsFmt(
"Internal system error: Operating system pagesize does not match compiled pagesize.\n\t"
L"\tOS Page Size: 0x%x (%d), Compiled Page Size: 0x%x (%u)",
getpagesize(), getpagesize(), __pagesize, __pagesize )
);
void *Mmap(uptr base, u32 size) pxAssertDev( (size & (__pagesize-1)) == 0, pxsFmt(
L"Memory block size must be a multiple of the target platform's page size.\n"
L"\tPage Size: 0x%x (%u), Block Size: 0x%x (%u)",
__pagesize, __pagesize, size, size )
);
}
void* HostSys::MmapReserve(uptr base, size_t size)
{
PageSizeAssertionTest(size);
// On linux a reserve-without-commit is performed by using mmap on a read-only
// or anonymous source, with PROT_NONE (no-access) permission. Since the mapping
// is completely inaccessible, the OS will simply reserve it and will not put it
// against the commit table.
return mmap((void*)base, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
}
void HostSys::MmapCommit(void* base, size_t size)
{
// In linux, reserved memory is automatically committed when its permissions are
// changed to something other than PROT_NONE. Since PCSX2 code *must* do that itself
// prior to making use of memory anyway, this call should be a NOP.
}
void HostSys::MmapReset(void* base, size_t size)
{
// On linux the only way to reset the memory is to unmap and remap it as PROT_NONE.
// That forces linux to unload all committed pages and start from scratch.
// FIXME: Ideally this code would have some threading lock on it to prevent any other
// malloc/free code in the current process from interfering with the operation, but I
// can't think of any good way to do that. (generally it shouldn't be a problem in
// PCSX2 anyway, since MmapReset is only called when the ps2vm is suspended; so that
// pretty well stops all PCSX2 threads anyway).
Munmap(base, size);
void* result = Mmap(base, size);
pxAssertRel (result != base, pxsFmt(
"Virtual memory decommit failed: memory at 0x%08X -> 0x%08X could not be remapped. "
"This is likely caused by multi-thread memory contention.", base, base+size
));
}
void* HostSys::Mmap(void* base, size_t size)
{
PageSizeAssertionTest(size);
// MAP_ANONYMOUS - means we have no associated file handle (or device).
return mmap(base, size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
}
void HostSys::Munmap(void* base, size_t size)
{
munmap(base, size);
}
void HostSys::MemProtect( void* baseaddr, size_t size, const PageProtectionMode& mode )
{
PageSizeAssertionTest(size);
uint lnxmode = 0;
if (mode.CanWrite()) lnxmode |= PROT_WRITE;
if (mode.CanRead()) lnxmode |= PROT_READ;
if (mode.CanExecute()) lnxmode |= PROT_EXEC | PROT_READ;
int result = mprotect( baseaddr, size, lnxmode );
if (!result)
{ {
u8 *Mem; switch(errno)
Mem = (u8*)mmap((uptr*)base, size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
if (Mem == MAP_FAILED) Console.Warning("Mmap Failed!");
return Mem;
}
void Munmap(uptr base, u32 size)
{
munmap((uptr*)base, size);
}
void MemProtect( void* baseaddr, size_t size, PageProtectionMode mode, bool allowExecution )
{
pxAssertDev( (size & (__pagesize-1)) == 0, wxsFormat(
L"Memory block size must be a multiple of the target platform's page size.\n"
L"\tPage Size: 0x%04x (%d), Block Size: 0x%04x (%d)",
__pagesize, __pagesize, size, size )
);
int lnxmode = 0;
// make sure size is aligned to the system page size:
// Check is redundant against the assertion above, but might as well...
size = (size + m_pagemask) & ~m_pagemask;
switch( mode )
{ {
case Protect_NoAccess: break; case EINVAL:
case Protect_ReadOnly: lnxmode = PROT_READ; break; pxFailDev(pxsFmt(L"mprotect returned EINVAL @ 0x%08X -> 0x%08X (mode=%s)"),
case Protect_ReadWrite: lnxmode = PROT_READ | PROT_WRITE; break; baseaddr, (uptr)baseaddr+size, mode.ToString().c_str()
);
break;
case ENOMEM:
throw Exception::OutOfMemory( psxFmt( L"mprotect failed @ 0x%08X -> 0x%08X (mode=%s)"),
baseaddr, (uptr)baseaddr+size, mode.ToString().c_str()
);
break;
case EACCES:
break;
} }
throw Exception::OutOfMemory()
if( allowExecution ) lnxmode |= PROT_EXEC;
mprotect( baseaddr, size, lnxmode );
} }
} }

View File

@ -17,46 +17,81 @@
#include "Utilities/RedtapeWindows.h" #include "Utilities/RedtapeWindows.h"
#include <winnt.h> #include <winnt.h>
namespace HostSys void* HostSys::MmapReserve(uptr base, size_t size)
{ {
void *Mmap(uptr base, u32 size) return VirtualAlloc((void*)base, size, MEM_RESERVE, PAGE_NOACCESS);
}
void HostSys::MmapCommit(void* base, size_t size)
{
// Execution flag for this and the Reserve should match... ?
void* result = VirtualAlloc(base, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
pxAssumeDev(result, L"VirtualAlloc COMMIT failed: " + Exception::WinApiError().GetMsgFromWindows());
}
void HostSys::MmapReset(void* base, size_t size)
{
// Execution flag is actually irrelevant for this operation, but whatever.
//void* result = VirtualAlloc((void*)base, size, MEM_RESET, PAGE_EXECUTE_READWRITE);
//pxAssumeDev(result, L"VirtualAlloc RESET failed: " + Exception::WinApiError().GetMsgFromWindows());
VirtualFree(base, size, MEM_DECOMMIT);
}
void* HostSys::Mmap(uptr base, size_t size)
{
return VirtualAlloc((void*)base, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
}
void HostSys::Munmap(uptr base, size_t size)
{
if (!base) return;
VirtualFree((void*)base, size, MEM_DECOMMIT);
VirtualFree((void*)base, 0, MEM_RELEASE);
}
void HostSys::MemProtect( void* baseaddr, size_t size, const PageProtectionMode& mode )
{
pxAssertDev( ((size & (__pagesize-1)) == 0), wxsFormat(
L"Memory block size must be a multiple of the target platform's page size.\n"
L"\tPage Size: 0x%04x (%d), Block Size: 0x%04x (%d)",
__pagesize, __pagesize, size, size )
);
DWORD winmode = PAGE_NOACCESS;
// Windows has some really bizarre memory protection enumeration that uses bitwise
// numbering (like flags) but is in fact not a flag value. *Someone* from the early
// microsoft days wasn't a very good coder, me thinks. --air
if (mode.CanExecute())
{ {
return VirtualAlloc((void*)base, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE); winmode = mode.CanWrite() ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
} }
else if (mode.CanRead())
void Munmap(uptr base, u32 size)
{ {
if( base == NULL ) return; winmode = mode.CanWrite() ? PAGE_READWRITE : PAGE_READONLY;
VirtualFree((void*)base, size, MEM_DECOMMIT);
VirtualFree((void*)base, 0, MEM_RELEASE);
} }
void MemProtect( void* baseaddr, size_t size, PageProtectionMode mode, bool allowExecution ) DWORD OldProtect; // enjoy my uselessness, yo!
if (!VirtualProtect( baseaddr, size, winmode, &OldProtect ))
{ {
pxAssertDev( ((size & (__pagesize-1)) == 0), wxsFormat( throw Exception::WinApiError().SetDiagMsg(
L"Memory block size must be a multiple of the target platform's page size.\n" pxsFmt(L"VirtualProtect failed @ 0x%08X -> 0x%08X (mode=%s)",
L"\tPage Size: 0x%04x (%d), Block Size: 0x%04x (%d)", baseaddr, (uptr)baseaddr + size, mode.ToString().c_str()
__pagesize, __pagesize, size, size ) ));
);
DWORD winmode = 0;
switch( mode )
{
case Protect_NoAccess:
winmode = ( allowExecution ) ? PAGE_EXECUTE : PAGE_NOACCESS;
break;
case Protect_ReadOnly:
winmode = ( allowExecution ) ? PAGE_EXECUTE_READ : PAGE_READONLY;
break;
case Protect_ReadWrite:
winmode = ( allowExecution ) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
break;
}
DWORD OldProtect; // enjoy my uselessness, yo!
VirtualProtect( baseaddr, size, winmode, &OldProtect );
} }
} }
wxString PageProtectionMode::ToString() const
{
wxString modeStr;
if (m_read) modeStr += L"Read";
if (m_write) modeStr += L"Write";
if (m_exec) modeStr += L"Exec";
if (modeStr.Length() <= 5) modeStr += L"Only";
return modeStr;
}

View File

@ -675,7 +675,7 @@ void wxAppWithHelpers::OnDeleteThread( wxCommandEvent& evt )
return; return;
} }
pxThreadLog.Write(thr->GetName(), (wxString)L"Thread object deleted successfully" + (thr->HasPendingException() ? wxEmptyString : L"[exception pending!]")); pxThreadLog.Write(thr->GetName(), (wxString)L"Thread object deleted successfully" + (thr->HasPendingException() ? L" [exception pending!]" : wxEmptyString));
thr->RethrowException(); thr->RethrowException();
} }

View File

@ -59,13 +59,13 @@ void x86capabilities::SIMD_EstablishMXCSRmask()
// the fxsave buffer must be 16-byte aligned to avoid GPF. I just save it to an // the fxsave buffer must be 16-byte aligned to avoid GPF. I just save it to an
// unused portion of recSSE, since it has plenty of room to spare. // unused portion of recSSE, since it has plenty of room to spare.
HostSys::MemProtectStatic( recSSE, Protect_ReadWrite, true ); HostSys::MemProtectStatic( recSSE, PageAccess_ReadWrite() );
xSetPtr( recSSE ); xSetPtr( recSSE );
xFXSAVE( ptr[&targetFXSAVE] ); xFXSAVE( ptr[&targetFXSAVE] );
xRET(); xRET();
HostSys::MemProtectStatic( recSSE, Protect_ReadOnly, true ); HostSys::MemProtectStatic( recSSE, PageAccess_ExecOnly() );
CallAddress( recSSE ); CallAddress( recSSE );
@ -310,7 +310,7 @@ u32 x86capabilities::CalculateMHz() const
// Results of CPU // Results of CPU
void x86capabilities::SIMD_ExceptionTest() void x86capabilities::SIMD_ExceptionTest()
{ {
HostSys::MemProtectStatic( recSSE, Protect_ReadWrite, true ); HostSys::MemProtectStatic( recSSE, PageAccess_ReadWrite() );
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
// SIMD Instruction Support Detection (Second Pass) // SIMD Instruction Support Detection (Second Pass)
@ -336,7 +336,7 @@ void x86capabilities::SIMD_ExceptionTest()
xMOVDQU( xmm1, ptr[ecx] ); xMOVDQU( xmm1, ptr[ecx] );
xRET(); xRET();
HostSys::MemProtectStatic( recSSE, Protect_ReadOnly, true ); HostSys::MemProtectStatic( recSSE, PageAccess_ExecOnly() );
bool sse3_result = _test_instruction( recSSE ); // sse3 bool sse3_result = _test_instruction( recSSE ); // sse3
bool ssse3_result = _test_instruction( funcSSSE3 ); bool ssse3_result = _test_instruction( funcSSSE3 );

View File

@ -245,7 +245,7 @@ void isoFile::_WriteBlock(const u8* src, uint lsn)
void isoFile::_WriteBlockD(const u8* src, uint lsn) void isoFile::_WriteBlockD(const u8* src, uint lsn)
{ {
// Find and ignore blocks that have already been dumped: // Find and ignore blocks that have already been dumped:
for (uint i=0; i<m_dtablesize; ++i) for (int i=0; i<m_dtablesize; ++i)
{ {
if (m_dtable[i] == lsn) return; if (m_dtable[i] == lsn) return;
} }

View File

@ -359,11 +359,19 @@ void JALR()
} } } // end namespace R5900::Interpreter::OpcodeImpl } } } // end namespace R5900::Interpreter::OpcodeImpl
////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------
// R5900cpu/intCpu interface (implementations)
// --------------------------------------------------------------------------------------
static void intReserve()
{
// fixme : detect cpu for use the optimize asm code
}
static void intAlloc() static void intAlloc()
{ {
// fixme : detect cpu for use the optimize asm code // Nothing to do!
} }
static void intReset() static void intReset()
@ -419,19 +427,20 @@ static void intShutdown() {
static void intThrowException( const BaseR5900Exception& ex ) static void intThrowException( const BaseR5900Exception& ex )
{ {
// No tricks needed; C++ stack unwnding shoud suffice for MSW and GCC alike. // No tricks needed; C++ stack unwnding should suffice for MSW and GCC alike.
ex.Rethrow(); ex.Rethrow();
} }
static void intThrowException( const BaseException& ex ) static void intThrowException( const BaseException& ex )
{ {
// No tricks needed; C++ stack unwnding shoud suffice for MSW and GCC alike. // No tricks needed; C++ stack unwnding should suffice for MSW and GCC alike.
ex.Rethrow(); ex.Rethrow();
} }
R5900cpu intCpu = R5900cpu intCpu =
{ {
intAlloc, intReserve,
intShutdown, intShutdown,
intReset, intReset,

View File

@ -137,9 +137,9 @@ void memMapVUmicro()
void memMapPhy() void memMapPhy()
{ {
// Main memory // Main memory
vtlb_MapBlock(eeMem->Main, 0x00000000,Ps2MemSize::Base);//mirrored on first 256 mb ? vtlb_MapBlock(eeMem->Main, 0x00000000,Ps2MemSize::MainRam);//mirrored on first 256 mb ?
// High memory, uninstalled on the configuration we emulate // High memory, uninstalled on the configuration we emulate
vtlb_MapHandler(null_handler, Ps2MemSize::Base, 0x10000000 - Ps2MemSize::Base); vtlb_MapHandler(null_handler, Ps2MemSize::MainRam, 0x10000000 - Ps2MemSize::MainRam);
// Various ROMs (all read-only) // Various ROMs (all read-only)
vtlb_MapBlock(eeMem->ROM, 0x1fc00000,Ps2MemSize::Rom); vtlb_MapBlock(eeMem->ROM, 0x1fc00000,Ps2MemSize::Rom);
@ -796,15 +796,17 @@ enum vtlb_ProtectionMode
struct vtlb_PageProtectionInfo struct vtlb_PageProtectionInfo
{ {
// Ram De-mapping -- used to convert fully translated/mapped offsets into psM back // Ram De-mapping -- used to convert fully translated/mapped offsets (which reside with
// into their originating ps2 physical ram address. Values are assigned when pages // in the eeMem->Main block) back into their originating ps2 physical ram address.
// are marked for protection. // Values are assigned when pages are marked for protection. since pages are automatically
// cleared and reset when TLB-remapped, stale values in this table (due to on-the-fly TLB
// changes) will be re-assigned the next time the page is accessed.
u32 ReverseRamMap; u32 ReverseRamMap;
vtlb_ProtectionMode Mode; vtlb_ProtectionMode Mode;
}; };
static __aligned16 vtlb_PageProtectionInfo m_PageProtectInfo[Ps2MemSize::Base >> 12]; static __aligned16 vtlb_PageProtectionInfo m_PageProtectInfo[Ps2MemSize::MainRam >> 12];
// returns: // returns:
@ -820,14 +822,14 @@ int mmap_GetRamPageInfo( u32 paddr )
uptr ptr = (uptr)PSM( paddr ); uptr ptr = (uptr)PSM( paddr );
uptr rampage = ptr - (uptr)eeMem->Main; uptr rampage = ptr - (uptr)eeMem->Main;
if (rampage >= Ps2MemSize::Base) if (rampage >= Ps2MemSize::MainRam)
return -1; //not in ram, no tracking done ... return -1; //not in ram, no tracking done ...
rampage >>= 12; rampage >>= 12;
return ( m_PageProtectInfo[rampage].Mode == ProtMode_Manual ) ? 1 : 0; return ( m_PageProtectInfo[rampage].Mode == ProtMode_Manual ) ? 1 : 0;
} }
// paddr - physically mapped address // paddr - physically mapped PS2 address
void mmap_MarkCountedRamPage( u32 paddr ) void mmap_MarkCountedRamPage( u32 paddr )
{ {
paddr &= ~0xfff; paddr &= ~0xfff;
@ -835,8 +837,9 @@ void mmap_MarkCountedRamPage( u32 paddr )
uptr ptr = (uptr)PSM( paddr ); uptr ptr = (uptr)PSM( paddr );
int rampage = (ptr - (uptr)eeMem->Main) >> 12; int rampage = (ptr - (uptr)eeMem->Main) >> 12;
// Important: reassign paddr here, since TLB changes could alter the paddr->psM mapping // Important: Update the ReverseRamMap here because TLB changes could alter the paddr
// (and clear blocks accordingly), but don't necessarily clear the protection status. // mapping into eeMem->Main.
m_PageProtectInfo[rampage].ReverseRamMap = paddr; m_PageProtectInfo[rampage].ReverseRamMap = paddr;
if( m_PageProtectInfo[rampage].Mode == ProtMode_Write ) if( m_PageProtectInfo[rampage].Mode == ProtMode_Write )
@ -848,7 +851,7 @@ void mmap_MarkCountedRamPage( u32 paddr )
); );
m_PageProtectInfo[rampage].Mode = ProtMode_Write; m_PageProtectInfo[rampage].Mode = ProtMode_Write;
HostSys::MemProtect( &eeMem->Main[rampage<<12], __pagesize, Protect_ReadOnly ); HostSys::MemProtect( &eeMem->Main[rampage<<12], __pagesize, PageAccess_ReadOnly() );
} }
// offset - offset of address relative to psM. // offset - offset of address relative to psM.
@ -863,7 +866,7 @@ static __fi void mmap_ClearCpuBlock( uint offset )
pxAssertMsg( m_PageProtectInfo[rampage].Mode != ProtMode_Manual, pxAssertMsg( m_PageProtectInfo[rampage].Mode != ProtMode_Manual,
"Attempted to clear a block that is already under manual protection." ); "Attempted to clear a block that is already under manual protection." );
HostSys::MemProtect( &eeMem->Main[rampage<<12], __pagesize, Protect_ReadWrite ); HostSys::MemProtect( &eeMem->Main[rampage<<12], __pagesize, PageAccess_ReadWrite() );
m_PageProtectInfo[rampage].Mode = ProtMode_Manual; m_PageProtectInfo[rampage].Mode = ProtMode_Manual;
Cpu->Clear( m_PageProtectInfo[rampage].ReverseRamMap, 0x400 ); Cpu->Clear( m_PageProtectInfo[rampage].ReverseRamMap, 0x400 );
} }
@ -872,7 +875,7 @@ void mmap_PageFaultHandler::OnPageFaultEvent( const PageFaultInfo& info, bool& h
{ {
// get bad virtual address // get bad virtual address
uptr offset = info.addr - (uptr)eeMem->Main; uptr offset = info.addr - (uptr)eeMem->Main;
if( offset >= Ps2MemSize::Base ) return; if( offset >= Ps2MemSize::MainRam ) return;
mmap_ClearCpuBlock( offset ); mmap_ClearCpuBlock( offset );
handled = true; handled = true;
@ -886,5 +889,5 @@ void mmap_ResetBlockTracking()
{ {
//DbgCon.WriteLn( "vtlb/mmap: Block Tracking reset..." ); //DbgCon.WriteLn( "vtlb/mmap: Block Tracking reset..." );
memzero( m_PageProtectInfo ); memzero( m_PageProtectInfo );
HostSys::MemProtect( eeMem->Main, Ps2MemSize::Base, Protect_ReadWrite ); HostSys::MemProtect( eeMem->Main, Ps2MemSize::MainRam, PageAccess_ReadWrite() );
} }

View File

@ -24,6 +24,10 @@
#include <xmmintrin.h> #include <xmmintrin.h>
// [TODO] This *could* be replaced with an assignment operator on u128 that implicitly
// uses _mm_store and _mm_load internally. However, there are alignment concerns --
// u128 is not alignment strict. (we would need a u128 and u128a for types known to
// be strictly 128-bit aligned).
static __fi void CopyQWC( void* dest, const void* src ) static __fi void CopyQWC( void* dest, const void* src )
{ {
_mm_store_ps( (float*)dest, _mm_load_ps((const float*)src) ); _mm_store_ps( (float*)dest, _mm_load_ps((const float*)src) );

View File

@ -17,7 +17,7 @@
namespace Ps2MemSize namespace Ps2MemSize
{ {
static const uint Base = 0x02000000; // 32 MB main memory! static const uint MainRam = 0x02000000; // 32 MB main memory!
static const uint Rom = 0x00400000; // 4 MB main rom static const uint Rom = 0x00400000; // 4 MB main rom
static const uint Rom1 = 0x00040000; // DVD player static const uint Rom1 = 0x00040000; // DVD player
static const uint Rom2 = 0x00080000; // Chinese rom extension (?) static const uint Rom2 = 0x00080000; // Chinese rom extension (?)
@ -61,9 +61,9 @@ typedef u128 mem128_t;
// full breadth of PS2 RAM and ROM mappings are directly supported. // full breadth of PS2 RAM and ROM mappings are directly supported.
struct EEVM_MemoryAllocMess struct EEVM_MemoryAllocMess
{ {
u8 (&Main)[Ps2MemSize::Base]; // Main memory (hard-wired to 32MB) u8 (&Main)[Ps2MemSize::MainRam]; // Main memory (hard-wired to 32MB)
u8 _padding1[0x1e000000-Ps2MemSize::Base] u8 _padding1[0x1e000000-Ps2MemSize::MainRam]
u8 (&ROM1)[Ps2MemSize::Rom1]; // DVD player u8 (&ROM1)[Ps2MemSize::Rom1]; // DVD player
u8 _padding2[0x1e040000-(0x1e000000+Ps2MemSize::Rom1)] u8 _padding2[0x1e040000-(0x1e000000+Ps2MemSize::Rom1)]
@ -80,8 +80,8 @@ struct EEVM_MemoryAllocMess
struct EEVM_MemoryAllocMess struct EEVM_MemoryAllocMess
{ {
u8 Main[Ps2MemSize::MainRam]; // Main memory (hard-wired to 32MB)
u8 Scratch[Ps2MemSize::Scratch]; // Scratchpad! u8 Scratch[Ps2MemSize::Scratch]; // Scratchpad!
u8 Main[Ps2MemSize::Base]; // Main memory (hard-wired to 32MB)
u8 ROM[Ps2MemSize::Rom]; // Boot rom (4MB) u8 ROM[Ps2MemSize::Rom]; // Boot rom (4MB)
u8 ROM1[Ps2MemSize::Rom1]; // DVD player u8 ROM1[Ps2MemSize::Rom1]; // DVD player
u8 ROM2[Ps2MemSize::Rom2]; // Chinese extensions u8 ROM2[Ps2MemSize::Rom2]; // Chinese extensions

View File

@ -102,9 +102,15 @@ typedef int BOOL;
typedef void FnType_Void(); typedef void FnType_Void();
typedef FnType_Void* Fnptr_Void; typedef FnType_Void* Fnptr_Void;
static const sptr _64kb = 0x10000;
static const sptr _16kb = _64kb / 4;
static const sptr _128kb = _64kb * 2;
static const sptr _256kb = _128kb * 2;
static const s64 _1mb = 0x100000; static const s64 _1mb = 0x100000;
static const s64 _8mb = _1mb * 8; static const s64 _8mb = _1mb * 8;
static const s64 _16mb = _1mb * 16; static const s64 _16mb = _1mb * 16;
static const s64 _64mb = _1mb * 64;
static const s64 _256mb = _1mb * 256; static const s64 _256mb = _1mb * 256;
static const s64 _1gb = _256mb * 4; static const s64 _1gb = _256mb * 4;

View File

@ -178,11 +178,12 @@ extern bool iopEventTestIsActive;
// Branching status used when throwing exceptions. // Branching status used when throwing exceptions.
extern bool iopIsDelaySlot; extern bool iopIsDelaySlot;
//////////////////////////////////////////////////////////////////// // --------------------------------------------------------------------------------------
// R3000A Public Interface / API // R3000Acpu
// --------------------------------------------------------------------------------------
struct R3000Acpu { struct R3000Acpu {
void (*Allocate)(); void (*Reserve)();
void (*Reset)(); void (*Reset)();
void (*Execute)(); void (*Execute)();
s32 (*ExecuteBlock)( s32 eeCycles ); // executes the given number of EE cycles. s32 (*ExecuteBlock)( s32 eeCycles ); // executes the given number of EE cycles.

View File

@ -150,10 +150,14 @@ static void doBranch(s32 tar) {
iopEventTest(); iopEventTest();
} }
static void intReserve() {
}
static void intAlloc() { static void intAlloc() {
} }
static void intReset() { static void intReset() {
intAlloc();
} }
static void intExecute() { static void intExecute() {
@ -181,7 +185,7 @@ static void intShutdown() {
} }
R3000Acpu psxInt = { R3000Acpu psxInt = {
intAlloc, intReserve,
intReset, intReset,
intExecute, intExecute,
intExecuteBlock, intExecuteBlock,

View File

@ -273,25 +273,25 @@ extern void __fastcall eeloadReplaceOSDSYS();
struct R5900cpu struct R5900cpu
{ {
// Memory allocation function, for allocating virtual memory spaces needed by // Memory allocation function, for allocating virtual memory spaces needed by
// the emulator. (ints/recs are free to allocate additional memory while running // the virtual cpu provider. Allocating additional heap memory from this method is
// code, however any virtual mapped memory should always be allocated as soon // NOT recommended. Heap allocations should be performed by Reset only. This
// as possible, to claim the memory before some plugin does..) // maximizes the likeliness of reservations claiming addresses they prefer.
// //
// Thread Affinity: // Thread Affinity:
// Can be called from any thread. Execute status must be suspended or stopped // Called from the main/UI thread only. Cpu execution status is guaranteed to
// to prevent multi-thread race conditions. // be inactive. No locking is necessary.
// //
// Exception Throws: // Exception Throws:
// HardwareDeficiency - The host machine's hardware does not support this CPU provider.
// OutOfMemory - Not enough memory, or the memory areas required were already // OutOfMemory - Not enough memory, or the memory areas required were already
// reserved. // reserved.
// void (*Reserve)();
void (*Allocate)();
// Deallocates ram allocated by Allocate and/or by runtime code execution. // Deallocates ram allocated by Allocate, Reserve, and/or by runtime code execution.
// //
// Thread Affinity: // Thread Affinity:
// Can be called from any thread. Execute status must be suspended or stopped // Called from the main/UI thread only. Cpu execution status is guaranteed to
// to prevent multi-thread race conditions. // be inactive. No locking is necessary.
// //
// Exception Throws: None. This function is a destructor, and should not throw. // Exception Throws: None. This function is a destructor, and should not throw.
// //
@ -302,8 +302,10 @@ struct R5900cpu
// rely on the CPU/VM states almost entirely. // rely on the CPU/VM states almost entirely.
// //
// Thread Affinity: // Thread Affinity:
// Can be called from any thread. Execute status must be suspended or stopped // Can be called from any thread. CPU execution status is indeterminate and may
// to prevent multi-thread race conditions. // already be in progress. Implementations should be sure to queue and execute
// resets at the earliest safe convenience (typically right before recompiling a
// new block of code, or after a vsync event).
// //
// Exception Throws: Emulator-defined. Common exception types to expect are // Exception Throws: Emulator-defined. Common exception types to expect are
// OutOfMemory, Stream Exceptions // OutOfMemory, Stream Exceptions
@ -365,7 +367,7 @@ struct R5900cpu
// better off replaced with some generic API callbacks from VTLB block protection. // better off replaced with some generic API callbacks from VTLB block protection.
// Also: the calls from COP0's TLB remap code should be replaced with full recompiler // Also: the calls from COP0's TLB remap code should be replaced with full recompiler
// resets, since TLB remaps affect more than just the code they contain (code that // resets, since TLB remaps affect more than just the code they contain (code that
// may reference the remaped blocks via memory loads/stores, for example). // may reference the remapped blocks via memory loads/stores, for example).
// //
// Thread Affinity Rule: // Thread Affinity Rule:
// Can be called from any thread (namely for being called from debugging threads) // Can be called from any thread (namely for being called from debugging threads)

View File

@ -142,7 +142,7 @@ void SaveStateBase::FreezeBios()
} }
static const int MainMemorySizeInBytes = static const int MainMemorySizeInBytes =
Ps2MemSize::Base + Ps2MemSize::Scratch + Ps2MemSize::Hardware + Ps2MemSize::MainRam + Ps2MemSize::Scratch + Ps2MemSize::Hardware +
Ps2MemSize::IopRam + Ps2MemSize::IopHardware + 0x0100; Ps2MemSize::IopRam + Ps2MemSize::IopHardware + 0x0100;
void SaveStateBase::FreezeMainMemory() void SaveStateBase::FreezeMainMemory()
@ -152,7 +152,7 @@ void SaveStateBase::FreezeMainMemory()
// First Block - Memory Dumps // First Block - Memory Dumps
// --------------------------- // ---------------------------
FreezeMem(eeMem->Main, Ps2MemSize::Base); // 32 MB main memory FreezeMem(eeMem->Main, Ps2MemSize::MainRam); // 32 MB main memory
FreezeMem(eeMem->Scratch, Ps2MemSize::Scratch); // scratch pad FreezeMem(eeMem->Scratch, Ps2MemSize::Scratch); // scratch pad
FreezeMem(eeHw, Ps2MemSize::Hardware); // hardware memory FreezeMem(eeHw, Ps2MemSize::Hardware); // hardware memory

View File

@ -56,6 +56,248 @@ void SrcType_PageFault::_DispatchRaw( ListenerIterator iter, const ListenerItera
} while( (++iter != iend) && !m_handled ); } while( (++iter != iend) && !m_handled );
} }
// --------------------------------------------------------------------------------------
// BaseVirtualMemoryReserve (implementations)
// --------------------------------------------------------------------------------------
BaseVirtualMemoryReserve::BaseVirtualMemoryReserve( const wxString& name )
: Name( name )
{
m_commited = 0;
m_reserved = 0;
m_baseptr = NULL;
m_block_size = __pagesize;
m_prot_mode = PageAccess_None();
}
// Parameters:
// upper_bounds - criteria that must be met for the allocation to be valid.
// If the OS refuses to allocate the memory below the specified address, the
// object will fail to initialize and an exception will be thrown.
void* BaseVirtualMemoryReserve::Reserve( uint size, uptr base, uptr upper_bounds )
{
if (!pxAssertDev( m_baseptr == NULL, "(VirtualMemoryReserve) Invalid object state; object has already been reserved." ))
return m_baseptr;
m_reserved = (size + __pagesize-4) / __pagesize;
uptr reserved_bytes = m_reserved * __pagesize;
m_baseptr = (void*)HostSys::MmapReserve(base, reserved_bytes);
if (!m_baseptr && (upper_bounds != 0 && (((uptr)m_baseptr + reserved_bytes) > upper_bounds)))
{
if (base)
{
DevCon.Warning( L"%s default address 0x%08x is unavailable; falling back on OS-default address.", Name.c_str(), base );
// Let's try again at an OS-picked memory area, and then hope it meets needed
// boundschecking criteria below.
SafeSysMunmap( m_baseptr, reserved_bytes );
m_baseptr = (void*)HostSys::MmapReserve( NULL, reserved_bytes );
}
if ((upper_bounds != 0) && (((uptr)m_baseptr + reserved_bytes) > upper_bounds))
{
SafeSysMunmap( m_baseptr, reserved_bytes );
// returns null, caller should throw an exception or handle appropriately.
}
}
if (!m_baseptr) return NULL;
DevCon.WriteLn( Color_Blue, L"%s mapped @ 0x%08X -> 0x%08X [%umb]", Name.c_str(),
m_baseptr, (uptr)m_baseptr+reserved_bytes, reserved_bytes / _1mb);
if (m_def_commit)
{
HostSys::MmapCommit(m_baseptr, m_def_commit*__pagesize);
HostSys::MemProtect(m_baseptr, m_def_commit*__pagesize, m_prot_mode);
}
return m_baseptr;
}
// Clears all committed blocks, restoring the allocation to a reserve only.
void BaseVirtualMemoryReserve::Reset()
{
if (!m_commited) return;
HostSys::MemProtect(m_baseptr, m_commited*__pagesize, PageAccess_None());
HostSys::MmapReset(m_baseptr, m_commited*__pagesize);
m_commited = 0;
}
void BaseVirtualMemoryReserve::Free()
{
HostSys::Munmap((uptr)m_baseptr, m_reserved*__pagesize);
}
void BaseVirtualMemoryReserve::OnPageFaultEvent(const PageFaultInfo& info, bool& handled)
{
uptr offset = (info.addr - (uptr)m_baseptr) / __pagesize;
if (offset >= m_reserved) return;
try {
if (!m_commited && m_def_commit)
{
const uint camt = m_def_commit * __pagesize;
// first block being committed! Commit the default requested
// amount if its different from the blocksize.
HostSys::MmapCommit(m_baseptr, camt);
HostSys::MemProtect(m_baseptr, camt, m_prot_mode);
u8* init = (u8*)m_baseptr;
u8* endpos = init + camt;
for( ; init<endpos; init += m_block_size*__pagesize )
OnCommittedBlock(init);
handled = true;
m_commited += m_def_commit * __pagesize;
return;
}
void* bleh = (u8*)m_baseptr + (offset * __pagesize);
// Depending on the operating system, one or both of these could fail if the system
// is low on either physical ram or virtual memory.
HostSys::MmapCommit(bleh, m_block_size*__pagesize);
HostSys::MemProtect(bleh, m_block_size*__pagesize, m_prot_mode);
m_commited += m_block_size;
OnCommittedBlock(bleh);
handled = true;
}
catch (Exception::OutOfMemory& ex)
{
OnOutOfMemory( ex, (u8*)m_baseptr + (offset * __pagesize), handled );
}
#ifndef __WXMSW__
// In windows we can let exceptions bubble out of the pag fault handler. SEH will more
// or less handle them in a semi-expected way, and might even avoid a GPF long enough
// for the system to log the error or something.
// In Linux, however, the SIGNAL handler is very limited in what it can do, and not only
// can't we let the C++ exception try to unwind the stack, we can't really log it either.
// We can't issue a proper assertion (requires user popup). We can't do jack or shit,
// *unless* its attached to a debugger; then we can, at a bare minimum, trap it.
catch (Exception::BaseException& ex)
{
wxTrap();
handled = false;
}
#endif
}
RecompiledCodeReserve::RecompiledCodeReserve( const wxString& name, uint defCommit )
: BaseVirtualMemoryReserve( name )
{
m_block_size = (1024 * 128) / __pagesize;
m_prot_mode = PageAccess_Any();
m_def_commit = defCommit / __pagesize;
}
template< u8 data >
__noinline void memset_sse_a( void* dest, const size_t size )
{
const uint MZFqwc = size / 16;
pxAssert( (size & 0xf) == 0 );
static __aligned16 const u8 loadval[8] = { data,data,data,data,data,data,data,data };
__m128 srcreg = _mm_load_ps( (float*)loadval );
srcreg = _mm_loadh_pi( srcreg, (__m64*)loadval );
float (*destxmm)[4] = (float(*)[4])dest;
#define StoreDestIdx(idx) case idx: _mm_store_ps(&destxmm[idx-1][0], srcreg)
switch( MZFqwc & 0x07 )
{
StoreDestIdx(0x07);
StoreDestIdx(0x06);
StoreDestIdx(0x05);
StoreDestIdx(0x04);
StoreDestIdx(0x03);
StoreDestIdx(0x02);
StoreDestIdx(0x01);
}
destxmm += (MZFqwc & 0x07);
for( uint i=0; i<MZFqwc / 8; ++i, destxmm+=8 )
{
_mm_store_ps(&destxmm[0][0], srcreg);
_mm_store_ps(&destxmm[1][0], srcreg);
_mm_store_ps(&destxmm[2][0], srcreg);
_mm_store_ps(&destxmm[3][0], srcreg);
_mm_store_ps(&destxmm[4][0], srcreg);
_mm_store_ps(&destxmm[5][0], srcreg);
_mm_store_ps(&destxmm[6][0], srcreg);
_mm_store_ps(&destxmm[7][0], srcreg);
}
};
void RecompiledCodeReserve::OnCommittedBlock( void* block )
{
if (IsDevBuild)
{
// Clear the recompiled code block to 0xcc (INT3) -- this helps disasm tools show
// the assembly dump more cleanly. We don't clear the block on Release builds since
// it can add a noticeable amount of overhead to large block recompilations.
memset_sse_a<0xcc>( block, m_block_size * __pagesize );
}
}
void RecompiledCodeReserve::OnOutOfMemory( const Exception::OutOfMemory& ex, void* blockptr, bool& handled )
{
// Truncate and reset reserves of all other in-use recompiler caches, as this should
// help free up quite a bit of emergency memory.
//Cpu->SetCacheReserve( (Cpu->GetCacheReserve() * 3) / 2 );
Cpu->Reset();
//CpuVU0->SetCacheReserve( (CpuVU0->GetCacheReserve() * 3) / 2 );
CpuVU0->Reset();
//CpuVU1->SetCacheReserve( (CpuVU1->GetCacheReserve() * 3) / 2 );
CpuVU1->Reset();
//psxCpu->SetCacheReserve( (psxCpu->GetCacheReserve() * 3) / 2 );
psxCpu->Reset();
// Since the recompiler is happy writing away to memory, we have to truncate the reserve
// to include the page currently being accessed, and cannot go any smaller. This will
// allow the rec to finish emitting the current block of instructions, detect that it has
// exceeded the threshold buffer, and reset the buffer on its own.
// Note: We attempt to commit multiple pages first, since a single block of recompiled
// code can pretty easily surpass 4k. We should have enough for this, since we just
// cleared the other rec caches above -- but who knows what could happen if the user
// has another process sucking up RAM or if the operating system is fickle. If even
// that fails, give up and kill the process.
try
{
uint cusion = std::min<uint>( m_block_size, 4 );
HostSys::MmapCommit((u8*)blockptr, cusion * __pagesize);
HostSys::MemProtect((u8*)blockptr, cusion * __pagesize, m_prot_mode);
handled = true;
}
catch (Exception::BaseException&)
{
// Fickle has become our reality. By setting handled to FALSE, the OS should kill
// the process for us. No point trying to log anything; this is a super-awesomely
// serious condition that likely means the system is hosed. ;)
handled = false;
}
}
#if _MSC_VER #if _MSC_VER
# include "svnrev.h" # include "svnrev.h"
@ -306,27 +548,27 @@ SysAllocVM::~SysAllocVM() throw()
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
SysCpuProviderPack::SysCpuProviderPack() SysCpuProviderPack::SysCpuProviderPack()
{ {
Console.WriteLn( "Allocating memory for recompilers..." ); Console.WriteLn( "Reserving memory for recompilers..." );
CpuProviders = new CpuInitializerSet(); CpuProviders = new CpuInitializerSet();
try { try {
recCpu.Allocate(); recCpu.Reserve();
} }
catch( Exception::RuntimeError& ex ) catch( Exception::RuntimeError& ex )
{ {
m_RecExceptionEE = ex.Clone(); m_RecExceptionEE = ex.Clone();
Console.Error( L"EE Recompiler Allocation Failed:\n" + ex.FormatDiagnosticMessage() ); Console.Error( L"EE Recompiler Reservation Failed:\n" + ex.FormatDiagnosticMessage() );
recCpu.Shutdown(); recCpu.Shutdown();
} }
try { try {
psxRec.Allocate(); psxRec.Reserve();
} }
catch( Exception::RuntimeError& ex ) catch( Exception::RuntimeError& ex )
{ {
m_RecExceptionIOP = ex.Clone(); m_RecExceptionIOP = ex.Clone();
Console.Error( L"IOP Recompiler Allocation Failed:\n" + ex.FormatDiagnosticMessage() ); Console.Error( L"IOP Recompiler Reservation Failed:\n" + ex.FormatDiagnosticMessage() );
psxRec.Shutdown(); psxRec.Shutdown();
} }
@ -424,9 +666,11 @@ void SysClearExecutionCache()
Cpu->Reset(); Cpu->Reset();
psxCpu->Reset(); psxCpu->Reset();
// mVU's VU0 needs to be properly initialised for macro mode even if it's not used for micro mode!
// mVU's VU0 needs to be properly initialized for macro mode even if it's not used for micro mode!
if (CHECK_EEREC) if (CHECK_EEREC)
((BaseVUmicroCPU*)GetCpuProviders().CpuProviders->microVU0)->Reset(); ((BaseVUmicroCPU*)GetCpuProviders().CpuProviders->microVU0)->Reset();
CpuVU0->Reset(); CpuVU0->Reset();
CpuVU1->Reset(); CpuVU1->Reset();
@ -448,15 +692,13 @@ u8* SysMmapEx(uptr base, u32 size, uptr bounds, const char *caller)
{ {
DbgCon.Warning( "First try failed allocating %s at address 0x%x", caller, base ); DbgCon.Warning( "First try failed allocating %s at address 0x%x", caller, base );
// memory allocation *must* have the top bit clear, so let's try again // Let's try again at an OS-picked memory area, and then hope it meets needed
// with NULL (let the OS pick something for us). // boundschecking criteria below.
SafeSysMunmap( Mem, size ); SafeSysMunmap( Mem, size );
Mem = (u8*)HostSys::Mmap( NULL, size ); Mem = (u8*)HostSys::Mmap( NULL, size );
} }
if( bounds != 0 && (((uptr)Mem + size) > bounds) ) if( (bounds != 0) && (((uptr)Mem + size) > bounds) )
{ {
DevCon.Warning( "Second try failed allocating %s, block ptr 0x%x does not meet required criteria.", caller, Mem ); DevCon.Warning( "Second try failed allocating %s, block ptr 0x%x does not meet required criteria.", caller, Mem );
SafeSysMunmap( Mem, size ); SafeSysMunmap( Mem, size );

View File

@ -24,6 +24,42 @@
typedef SafeArray<u8> VmStateBuffer; typedef SafeArray<u8> VmStateBuffer;
class BaseVUmicroCPU; class BaseVUmicroCPU;
// This is a table of default virtual map addresses for ps2vm components. These locations
// are provided and used to assist in debugging and possibly hacking; as it makes it possible
// for a programmer to know exactly where to look (consistently!) for the base address of
// the various virtual machine components. These addresses can be keyed directly into the
// debugger's disasm window to get disassembl of recompiled code, and they can be used to help
// identify recompiled code addresses in the callstack.
// All of these areas should be reserved as soon as possible during program startup, and its
// important that none of the areas overlap. In all but superVU's case, failure due to overlap
// or other conflict will result in the operating system picking a preferred address for the mapping.
namespace HostMemoryMap
{
// superVU is OLD SCHOOL, and it requires its allocation to be in the lower 256mb
// of the virtual memory space. (8mb)
static const uptr sVUrec = 0x0f1e0000;
// PS2 main memory, SPR, and ROMs
static const uptr EEmem = 0x20000000;
// IOP main memory and ROMs
static const uptr IOPmem = 0x28000000;
// EE recompiler code cache area (64mb)
static const uptr EErec = 0x30000000;
// IOP recompiler code cache area (16 or 32mb)
static const uptr IOPrec = 0x34000000;
// microVU1 recompiler code cache area (64mb)
static const uptr mVU0rec = 0x38000000;
// microVU0 recompiler code cache area (64mb)
static const uptr mVU1rec = 0x40000000;
}
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------
// SysAllocVM // SysAllocVM
// -------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------

View File

@ -58,6 +58,9 @@ protected:
virtual void OnPageFaultEvent( const PageFaultInfo& evtinfo, bool& handled ) {} virtual void OnPageFaultEvent( const PageFaultInfo& evtinfo, bool& handled ) {}
}; };
// --------------------------------------------------------------------------------------
// EventListener_PageFault
// --------------------------------------------------------------------------------------
class EventListener_PageFault : public IEventListener_PageFault class EventListener_PageFault : public IEventListener_PageFault
{ {
public: public:
@ -84,6 +87,92 @@ protected:
virtual void _DispatchRaw( ListenerIterator iter, const ListenerIterator& iend, const PageFaultInfo& evt ); virtual void _DispatchRaw( ListenerIterator iter, const ListenerIterator& iend, const PageFaultInfo& evt );
}; };
// --------------------------------------------------------------------------------------
// BaseVirtualMemoryReserve (WIP!!)
// --------------------------------------------------------------------------------------
class BaseVirtualMemoryReserve : public EventListener_PageFault
{
DeclareNoncopyableObject( BaseVirtualMemoryReserve );
public:
wxString Name;
protected:
void* m_baseptr;
// reserved memory (in pages).
uptr m_reserved;
// Incremental size by which the buffer grows (in pages)
uptr m_block_size;
// Protection mode to be applied to committed blocks.
PageProtectionMode m_prot_mode;
// Specifies the number of blocks that should be committed automatically when the
// reserve is created. Typically this chunk is larger than the block size, and
// should be based on whatever typical overhead is needed for basic block use.
uint m_def_commit;
// Records the number of pages committed to memory.
// (metric for analysis of buffer usage)
uptr m_commited;
public:
BaseVirtualMemoryReserve( const wxString& name );
virtual ~BaseVirtualMemoryReserve() throw()
{
Free();
}
void* Reserve( uint size, uptr base = 0, uptr upper_bounds = 0 );
void Reset();
void Free();
uptr GetReserveSizeInBytes() const { return m_reserved * __pagesize; }
uptr GetReserveSizeInPages() const { return m_reserved; }
u8* GetPtr() { return (u8*)m_baseptr; }
const u8* GetPtr() const { return (u8*)m_baseptr; }
u8* GetPtrEnd() { return (u8*)m_baseptr + (m_reserved * __pagesize); }
const u8* GetPtrEnd() const { return (u8*)m_baseptr + (m_reserved * __pagesize); }
operator void*() { return m_baseptr; }
operator const void*() const { return m_baseptr; }
operator u8*() { return (u8*)m_baseptr; }
operator const u8*() const { return (u8*)m_baseptr; }
protected:
void OnPageFaultEvent( const PageFaultInfo& info, bool& handled );
virtual void OnCommittedBlock( void* block )=0;
virtual void OnOutOfMemory( const Exception::OutOfMemory& ex, void* blockptr, bool& handled )
{
throw;
}
};
// --------------------------------------------------------------------------------------
// RecompiledCodeReserve
// --------------------------------------------------------------------------------------
class RecompiledCodeReserve : public BaseVirtualMemoryReserve
{
protected:
public:
RecompiledCodeReserve( const wxString& name, uint defCommit = 0 );
void OnCommittedBlock( void* block );
void OnOutOfMemory( const Exception::OutOfMemory& ex, void* blockptr, bool& handled );
operator void*() { return m_baseptr; }
operator const void*() const { return m_baseptr; }
operator u8*() { return (u8*)m_baseptr; }
operator const u8*() const { return (u8*)m_baseptr; }
};
#ifdef __LINUX__ #ifdef __LINUX__
# define PCSX2_PAGEFAULT_PROTECT # define PCSX2_PAGEFAULT_PROTECT
@ -105,3 +194,4 @@ extern int SysPageFaultExceptionFilter(struct _EXCEPTION_POINTERS* eps);
extern void InstallSignalHandler(); extern void InstallSignalHandler();
extern SrcType_PageFault Source_PageFault; extern SrcType_PageFault Source_PageFault;

View File

@ -72,6 +72,14 @@ public:
virtual const char* GetShortName() const=0; virtual const char* GetShortName() const=0;
virtual wxString GetLongName() const=0; virtual wxString GetLongName() const=0;
// returns the number of bytes committed to the working caches for this CPU
// provider (typically this refers to recompiled code caches, but could also refer
// to other optional growable allocations).
virtual size_t GetCommittedCache() const
{
return 0;
}
virtual void Allocate()=0; virtual void Allocate()=0;
virtual void Shutdown()=0; virtual void Shutdown()=0;
virtual void Reset()=0; virtual void Reset()=0;

View File

@ -107,6 +107,7 @@ enum MenuIdentifiers
MenuId_Config_SysSettings, MenuId_Config_SysSettings,
MenuId_Config_McdSettings, MenuId_Config_McdSettings,
MenuId_Config_AppSettings, MenuId_Config_AppSettings,
MenuId_Config_GameDatabase,
MenuId_Config_BIOS, MenuId_Config_BIOS,
// Plugin ID order is important. Must match the order in tbl_PluginInfo. // Plugin ID order is important. Must match the order in tbl_PluginInfo.

View File

@ -21,8 +21,6 @@ Dialogs::GameDatabaseDialog::GameDatabaseDialog(wxWindow* parent)
: BaseConfigurationDialog( parent, AddAppName(_("Game Database - %s")), 580 ) : BaseConfigurationDialog( parent, AddAppName(_("Game Database - %s")), 580 )
{ {
ScopedBusyCursor busy( Cursor_ReallyBusy ); ScopedBusyCursor busy( Cursor_ReallyBusy );
*this += new Panels::GameDatabasePanel(this); *this += new Panels::GameDatabasePanel(this);
AddOkCancel(); AddOkCancel();
} }

View File

@ -155,6 +155,7 @@ void MainEmuFrame::ConnectMenus()
ConnectMenu( MenuId_Config_SysSettings, Menu_SysSettings_Click ); ConnectMenu( MenuId_Config_SysSettings, Menu_SysSettings_Click );
ConnectMenu( MenuId_Config_McdSettings, Menu_McdSettings_Click ); ConnectMenu( MenuId_Config_McdSettings, Menu_McdSettings_Click );
ConnectMenu( MenuId_Config_AppSettings, Menu_WindowSettings_Click ); ConnectMenu( MenuId_Config_AppSettings, Menu_WindowSettings_Click );
ConnectMenu( MenuId_Config_GameDatabase,Menu_GameDatabase_Click );
ConnectMenu( MenuId_Config_BIOS, Menu_SelectPluginsBios_Click ); ConnectMenu( MenuId_Config_BIOS, Menu_SelectPluginsBios_Click );
ConnectMenu( MenuId_Config_ResetAll, Menu_ResetAllSettings_Click ); ConnectMenu( MenuId_Config_ResetAll, Menu_ResetAllSettings_Click );
@ -421,7 +422,8 @@ MainEmuFrame::MainEmuFrame(wxWindow* parent, const wxString& title)
m_menuConfig.Append(MenuId_Config_SysSettings, _("Emulation &Settings") ); m_menuConfig.Append(MenuId_Config_SysSettings, _("Emulation &Settings") );
m_menuConfig.Append(MenuId_Config_McdSettings, _("&Memory cards") ); m_menuConfig.Append(MenuId_Config_McdSettings, _("&Memory cards") );
m_menuConfig.Append(MenuId_Config_BIOS, _("&Plugin/BIOS Selector...") ); m_menuConfig.Append(MenuId_Config_BIOS, _("&Plugin/BIOS Selector") );
m_menuConfig.Append(MenuId_Config_GameDatabase, _("Game Database Editor") );
m_menuConfig.AppendSeparator(); m_menuConfig.AppendSeparator();
m_menuConfig.Append(MenuId_Config_GS, _("&Video (GS)"), m_PluginMenuPacks[PluginId_GS]); m_menuConfig.Append(MenuId_Config_GS, _("&Video (GS)"), m_PluginMenuPacks[PluginId_GS]);

View File

@ -162,6 +162,7 @@ protected:
void Menu_SysSettings_Click(wxCommandEvent &event); void Menu_SysSettings_Click(wxCommandEvent &event);
void Menu_McdSettings_Click(wxCommandEvent &event); void Menu_McdSettings_Click(wxCommandEvent &event);
void Menu_GameDatabase_Click(wxCommandEvent &event);
void Menu_WindowSettings_Click(wxCommandEvent &event); void Menu_WindowSettings_Click(wxCommandEvent &event);
void Menu_GSSettings_Click(wxCommandEvent &event); void Menu_GSSettings_Click(wxCommandEvent &event);
void Menu_SelectPluginsBios_Click(wxCommandEvent &event); void Menu_SelectPluginsBios_Click(wxCommandEvent &event);

View File

@ -48,6 +48,11 @@ void MainEmuFrame::Menu_McdSettings_Click(wxCommandEvent &event)
AppOpenDialog<McdConfigDialog>( this ); AppOpenDialog<McdConfigDialog>( this );
} }
void MainEmuFrame::Menu_GameDatabase_Click(wxCommandEvent &event)
{
AppOpenDialog<McdConfigDialog>( this );
}
void MainEmuFrame::Menu_WindowSettings_Click(wxCommandEvent &event) void MainEmuFrame::Menu_WindowSettings_Click(wxCommandEvent &event)
{ {
wxCommandEvent evt( pxEvt_SetSettingsPage ); wxCommandEvent evt( pxEvt_SetSettingsPage );

View File

@ -59,7 +59,7 @@ Panels::GameDatabasePanel::GameDatabasePanel( wxWindow* parent )
gameFixes[i] = new pxCheckBox(this, EnumToString(i), wxCHK_3STATE | wxCHK_ALLOW_3RD_STATE_FOR_USER ); gameFixes[i] = new pxCheckBox(this, EnumToString(i), wxCHK_3STATE | wxCHK_ALLOW_3RD_STATE_FOR_USER );
*this += Heading(_("Game Database Editor")).Bold() | StdExpand(); *this += Heading(_("Game Database Editor")).Bold() | StdExpand();
*this += Heading(_("This panel lets you add and edit game titles, game fixes, and game patches.")) | StdExpand(); //*this += Heading(_("This panel lets you add and edit game titles, game fixes, and game patches.")) | StdExpand();
wxFlexGridSizer& sizer1(*new wxFlexGridSizer(5, StdPadding)); wxFlexGridSizer& sizer1(*new wxFlexGridSizer(5, StdPadding));
sizer1.AddGrowableCol(0); sizer1.AddGrowableCol(0);

View File

@ -104,7 +104,7 @@ __fi tDMA_TAG *SPRdmaGetAddr(u32 addr, bool write)
// FIXME: Why??? DMA uses physical addresses // FIXME: Why??? DMA uses physical addresses
addr &= 0x1ffffff0; addr &= 0x1ffffff0;
if (addr < Ps2MemSize::Base) if (addr < Ps2MemSize::MainRam)
{ {
return (tDMA_TAG*)&eeMem->Main[addr]; return (tDMA_TAG*)&eeMem->Main[addr];
} }
@ -133,7 +133,7 @@ __ri tDMA_TAG *dmaGetAddr(u32 addr, bool write)
// FIXME: Why??? DMA uses physical addresses // FIXME: Why??? DMA uses physical addresses
addr &= 0x1ffffff0; addr &= 0x1ffffff0;
if (addr < Ps2MemSize::Base) if (addr < Ps2MemSize::MainRam)
{ {
return (tDMA_TAG*)&eeMem->Main[addr]; return (tDMA_TAG*)&eeMem->Main[addr];
} }

View File

@ -78,9 +78,9 @@ DataType __fastcall vtlb_memRead(u32 addr)
switch( DataSize ) switch( DataSize )
{ {
case 8: return ((vtlbMemR8FP*)vtlbdata.RWFT[0][0][hand])(paddr); case 8: return ((vtlbMemR8FP*)vtlbdata.RWFT[0][0][hand])(paddr);
case 16: return ((vtlbMemR16FP*)vtlbdata.RWFT[1][0][hand])(paddr); case 16: return ((vtlbMemR16FP*)vtlbdata.RWFT[1][0][hand])(paddr);
case 32: return ((vtlbMemR32FP*)vtlbdata.RWFT[2][0][hand])(paddr); case 32: return ((vtlbMemR32FP*)vtlbdata.RWFT[2][0][hand])(paddr);
jNO_DEFAULT; jNO_DEFAULT;
} }
@ -562,33 +562,46 @@ void vtlb_Term()
} }
// Reserves the vtlb core allocation used by various emulation components! // Reserves the vtlb core allocation used by various emulation components!
// // [TODO] basemem - request allocating memory at the specified virtual location, which can allow
void vtlb_Core_Alloc() // for easier debugging and/or 3rd party cheat programs. If 0, the operating system
// default is used.
void vtlb_Core_Alloc( /*uptr basemem*/ )
{ {
if( vtlbdata.alloc_base != NULL ) return; if( vtlbdata.alloc_base != NULL ) return;
vtlbdata.alloc_current = 0; vtlbdata.alloc_current = 0;
vtlbdata.alloc_base = SysMmapEx( HostMemoryMap::EEmem, VTLB_ALLOC_SIZE, 0x80000000, "Vtlb" );
#ifdef __LINUX__ #ifndef __WXMSW__
vtlbdata.alloc_base = SysMmapEx( 0x16000000, VTLB_ALLOC_SIZE, 0x80000000, "Vtlb" ); // [TODO] Win32 can fall back on this, since malloc always maps below 2GB. (but we need to
#else // make sure we flag it and call the right free -- and really it should never fail anyway
// Win32 just needs this, since malloc always maps below 2GB. // since SysMmapEx should still grab addresses below the 2gb line when given the 0 param
vtlbdata.alloc_base = (u8*)_aligned_malloc( VTLB_ALLOC_SIZE, 4096 ); // (OS picks the location).
if( vtlbdata.alloc_base == NULL )
throw Exception::OutOfMemory( pxsFmt(L"PS2 mappable system ram (%u megs)", VTLB_ALLOC_SIZE / _1mb) ); //if (!vtlbdata.alloc_base)
// vtlbdata.alloc_base = (u8*)_aligned_malloc( VTLB_ALLOC_SIZE, 4096 );
#endif #endif
if (!vtlbdata.alloc_base)
throw Exception::OutOfMemory( pxsFmt(L"PS2 mappable system ram (%u megs)", VTLB_ALLOC_SIZE / _1mb) );
vtlbdata.vmap = (s32*)_aligned_malloc( VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap), 16 );
if (!vtlbdata.vmap)
throw Exception::OutOfMemory( pxsFmt(L"VTLB virtual LUT (%u megs)", VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap) / _1mb) );
} }
void vtlb_Core_Shutdown() void vtlb_Core_Shutdown()
{ {
if( vtlbdata.alloc_base == NULL ) return; safe_aligned_free( vtlbdata.vmap );
#ifdef __LINUX__ if (!vtlbdata.alloc_base) return;
SafeSysMunmap( vtlbdata.alloc_base, VTLB_ALLOC_SIZE ); SafeSysMunmap( vtlbdata.alloc_base, VTLB_ALLOC_SIZE );
#else
#ifdef __WXMSW__
// Make sure and unprotect memory first, since CrtDebug will try to write to it. // Make sure and unprotect memory first, since CrtDebug will try to write to it.
HostSys::MemProtect( vtlbdata.alloc_base, VTLB_ALLOC_SIZE, Protect_ReadWrite ); //HostSys::MemProtect( vtlbdata.alloc_base, VTLB_ALLOC_SIZE, Protect_ReadWrite );
safe_aligned_free( vtlbdata.alloc_base ); //safe_aligned_free( vtlbdata.alloc_base );
#endif #endif
} }
@ -613,9 +626,8 @@ u8* vtlb_malloc( uint size, uint align )
void vtlb_free( void* pmem, uint size ) void vtlb_free( void* pmem, uint size )
{ {
if (!pmem) return;
vtlbdata.alloc_current -= size; vtlbdata.alloc_current -= size;
pxAssertDev( vtlbdata.alloc_current >= 0, "(vtlb_free) mismatched calls to vtlb_malloc and free detected via memory underflow." ); pxAssertDev( vtlbdata.alloc_current >= 0, "(vtlb_free) mismatched calls to vtlb_malloc and free detected via memory underflow." );
return;
} }

View File

@ -87,8 +87,8 @@ extern void vtlb_DynGenRead32_Const( u32 bits, bool sign, u32 addr_const );
namespace vtlb_private namespace vtlb_private
{ {
// Allocate enough memory for both EE and IOP memory space (IOP is roughly 2.5mb, // Allocate enough memory for EE, IOP, and VU, memory space (IOP + VU is roughly
// so we alloc 4mb for now -- a little more than is needed). // 2.5mb, so we alloc 4mb for now -- a little more than is needed).
static const uint VTLB_ALLOC_SIZE = sizeof(*eeMem) + (_1mb*4); static const uint VTLB_ALLOC_SIZE = sizeof(*eeMem) + (_1mb*4);
static const uint VTLB_PAGE_BITS = 12; static const uint VTLB_PAGE_BITS = 12;
@ -101,16 +101,17 @@ namespace vtlb_private
struct MapData struct MapData
{ {
u8* alloc_base; //base of the memory array
int alloc_current; //current base
s32 pmap[VTLB_PMAP_ITEMS]; //512KB
s32 vmap[VTLB_VMAP_ITEMS]; //4MB
// first indexer -- 8/16/32/64/128 bit tables [values 0-4] // first indexer -- 8/16/32/64/128 bit tables [values 0-4]
// second indexer -- read/write [0 or 1] // second indexer -- read/write [0 or 1]
// third indexer -- 128 possible handlers! // third indexer -- 128 possible handlers!
void* RWFT[5][2][128]; void* RWFT[5][2][128];
s32 pmap[VTLB_PMAP_ITEMS]; //512KB
s32* vmap; //4MB (allocated by vtlb_init)
u8* alloc_base; //base of the memory array
int alloc_current; //current base
}; };
extern __aligned(64) MapData vtlbdata; extern __aligned(64) MapData vtlbdata;

View File

@ -1083,7 +1083,7 @@ BOOL CALLBACK BrowserProc(HWND hWnd,UINT uMsg,WPARAM wParam,LPARAM lParam)
case IDC_SKIPMPEG: case IDC_SKIPMPEG:
{ {
u8 *p = eeMem->Main; u8 *p = eeMem->Main;
u8 *d = p + Ps2MemSize::Base; u8 *d = p + Ps2MemSize::MainRam;
d -= 16; d -= 16;
u32 *u; u32 *u;

View File

@ -59,7 +59,7 @@ bool FirstSearch;
bool FirstShow; bool FirstShow;
char olds[Ps2MemSize::Base]; char olds[Ps2MemSize::MainRam];
char tn[100]; char tn[100];
char to[100]; char to[100];

View File

@ -22,6 +22,7 @@
#include "iR3000A.h" #include "iR3000A.h"
#include "BaseblockEx.h" #include "BaseblockEx.h"
#include "PageFaultSource.h"
#include <time.h> #include <time.h>
@ -50,13 +51,8 @@ uptr psxhwLUT[0x10000];
#define HWADDR(mem) (psxhwLUT[mem >> 16] + (mem)) #define HWADDR(mem) (psxhwLUT[mem >> 16] + (mem))
#define MAPBASE 0x48000000 static RecompiledCodeReserve recMem(L"R3000A recompiled code cache", _1mb * 2);
#define RECMEM_SIZE (8*1024*1024)
// R3000A statics
int psxreclog = 0;
static u8 *recMem = NULL; // the recompiled blocks will be here
static BASEBLOCK *recRAM = NULL; // and the ptr to the blocks here static BASEBLOCK *recRAM = NULL; // and the ptr to the blocks here
static BASEBLOCK *recROM = NULL; // and here static BASEBLOCK *recROM = NULL; // and here
static BASEBLOCK *recROM1 = NULL; // also here static BASEBLOCK *recROM1 = NULL; // also here
@ -346,7 +342,7 @@ static DynGenFunc* _DynGen_EnterRecompiledCode()
static void _DynGen_Dispatchers() static void _DynGen_Dispatchers()
{ {
// In case init gets called multiple times: // In case init gets called multiple times:
HostSys::MemProtectStatic( iopRecDispatchers, Protect_ReadWrite, false ); HostSys::MemProtectStatic( iopRecDispatchers, PageAccess_ReadWrite() );
// clear the buffer to 0xcc (easier debugging). // clear the buffer to 0xcc (easier debugging).
memset_8<0xcc,__pagesize>( iopRecDispatchers ); memset_8<0xcc,__pagesize>( iopRecDispatchers );
@ -363,7 +359,7 @@ static void _DynGen_Dispatchers()
iopJITCompileInBlock = _DynGen_JITCompileInBlock(); iopJITCompileInBlock = _DynGen_JITCompileInBlock();
iopEnterRecompiledCode = _DynGen_EnterRecompiledCode(); iopEnterRecompiledCode = _DynGen_EnterRecompiledCode();
HostSys::MemProtectStatic( iopRecDispatchers, Protect_ReadOnly, true ); HostSys::MemProtectStatic( iopRecDispatchers, PageAccess_ExecOnly() );
recBlocks.SetJITCompile( iopJITCompile ); recBlocks.SetJITCompile( iopJITCompile );
} }
@ -759,18 +755,13 @@ static u8* m_recBlockAlloc = NULL;
static const uint m_recBlockAllocSize = static const uint m_recBlockAllocSize =
(((Ps2MemSize::IopRam + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4) * sizeof(BASEBLOCK)); (((Ps2MemSize::IopRam + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4) * sizeof(BASEBLOCK));
static void recReserve()
{
recMem.Reserve( _16mb, HostMemoryMap::IOPrec );
}
static void recAlloc() static void recAlloc()
{ {
// Note: the VUrec depends on being able to grab an allocation below the 0x10000000 line,
// so we give the EErec an address above that to try first as it's basemem address, hence
// the 0x28000000 pick (0x20000000 is picked by the EE)
if( recMem == NULL )
recMem = (u8*)SysMmapEx( 0x28000000, RECMEM_SIZE, 0, "recAlloc(R3000a)" );
if( recMem == NULL )
throw Exception::OutOfMemory( L"R3000A recompiled code cache" );
// Goal: Allocate BASEBLOCKs for every possible branch target in IOP memory. // Goal: Allocate BASEBLOCKs for every possible branch target in IOP memory.
// Any 4-byte aligned address makes a valid branch target as per MIPS design (all instructions are // Any 4-byte aligned address makes a valid branch target as per MIPS design (all instructions are
// always 4 bytes long). // always 4 bytes long).
@ -795,19 +786,17 @@ static void recAlloc()
if( s_pInstCache == NULL ) if( s_pInstCache == NULL )
throw Exception::OutOfMemory( L"R3000 InstCache." ); throw Exception::OutOfMemory( L"R3000 InstCache." );
ProfilerRegisterSource( "IOP Rec", recMem, RECMEM_SIZE ); ProfilerRegisterSource( "IOP Rec", recMem, recMem.GetReserveSizeInBytes() );
_DynGen_Dispatchers(); _DynGen_Dispatchers();
} }
void recResetIOP() void recResetIOP()
{ {
// calling recResetIOP without first calling recInit is bad mojo. recAlloc();
pxAssert( recMem != NULL ); recMem.Reset();
pxAssert( m_recBlockAlloc != NULL );
DevCon.WriteLn( "iR3000A Recompiler reset." ); DevCon.WriteLn( "iR3000A Recompiler reset." );
memset_8<0xcc,RECMEM_SIZE>( recMem ); // 0xcc is INT3
iopClearRecLUT((BASEBLOCK*)m_recBlockAlloc, iopClearRecLUT((BASEBLOCK*)m_recBlockAlloc,
(((Ps2MemSize::IopRam + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4))); (((Ps2MemSize::IopRam + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4)));
@ -854,8 +843,8 @@ void recResetIOP()
static void recShutdown() static void recShutdown()
{ {
ProfilerTerminateSource( "IOPRec" ); ProfilerTerminateSource( "IOPRec" );
recMem.Free();
SafeSysMunmap(recMem, RECMEM_SIZE);
safe_aligned_free( m_recBlockAlloc ); safe_aligned_free( m_recBlockAlloc );
safe_free( s_pInstCache ); safe_free( s_pInstCache );
@ -1202,8 +1191,9 @@ static void __fastcall iopRecRecompile( const u32 startpc )
pxAssert( startpc ); pxAssert( startpc );
// if recPtr reached the mem limit reset whole mem // if recPtr reached the mem limit reset whole mem
if (((uptr)recPtr - (uptr)recMem) >= (RECMEM_SIZE - 0x10000)) if (recPtr >= (recMem.GetPtrEnd() - _64kb)) {
recResetIOP(); recResetIOP();
}
x86SetPtr( recPtr ); x86SetPtr( recPtr );
x86Align(16); x86Align(16);
@ -1390,12 +1380,12 @@ StartRecomp:
} }
} }
pxAssert( x86Ptr < recMem+RECMEM_SIZE ); pxAssert( xGetPtr() < recMem.GetPtrEnd() );
pxAssert(x86Ptr - recPtr < 0x10000); pxAssert(xGetPtr() - recPtr < _64kb);
s_pCurBlockEx->x86size = x86Ptr - recPtr; s_pCurBlockEx->x86size = xGetPtr() - recPtr;
recPtr = x86Ptr; recPtr = xGetPtr();
pxAssert( (g_psxHasConstReg&g_psxFlushedConstReg) == g_psxHasConstReg ); pxAssert( (g_psxHasConstReg&g_psxFlushedConstReg) == g_psxHasConstReg );
@ -1404,7 +1394,7 @@ StartRecomp:
} }
R3000Acpu psxRec = { R3000Acpu psxRec = {
recAlloc, recReserve,
recResetIOP, recResetIOP,
recExecute, recExecute,
recExecuteBlock, recExecuteBlock,

View File

@ -28,6 +28,7 @@
#include "Dump.h" #include "Dump.h"
#include "System/SysThreads.h" #include "System/SysThreads.h"
#include "System/PageFaultSource.h"
#include "GS.h" #include "GS.h"
#include "CDVD/CDVD.h" #include "CDVD/CDVD.h"
@ -63,7 +64,8 @@ bool g_cpuFlushedPC, g_cpuFlushedCode, g_recompilingDelaySlot, g_maySignalExcept
#define X86 #define X86
static const int RECCONSTBUF_SIZE = 16384 * 2; // 64 bit consts in 32 bit units static const int RECCONSTBUF_SIZE = 16384 * 2; // 64 bit consts in 32 bit units
static u8 *recMem = NULL; // the recompiled blocks will be here static RecompiledCodeReserve recMem(L"R5900-32 recompiled code cache", _1mb * 4);
static u32* recConstBuf = NULL; // 64-bit pseudo-immediates static u32* recConstBuf = NULL; // 64-bit pseudo-immediates
static BASEBLOCK *recRAM = NULL; // and the ptr to the blocks here static BASEBLOCK *recRAM = NULL; // and the ptr to the blocks here
static BASEBLOCK *recROM = NULL; // and here static BASEBLOCK *recROM = NULL; // and here
@ -506,7 +508,7 @@ static DynGenFunc* _DynGen_EnterRecompiledCode()
static void _DynGen_Dispatchers() static void _DynGen_Dispatchers()
{ {
// In case init gets called multiple times: // In case init gets called multiple times:
HostSys::MemProtectStatic( eeRecDispatchers, Protect_ReadWrite, false ); HostSys::MemProtectStatic( eeRecDispatchers, PageAccess_ReadWrite() );
// clear the buffer to 0xcc (easier debugging). // clear the buffer to 0xcc (easier debugging).
memset_8<0xcc,__pagesize>( eeRecDispatchers ); memset_8<0xcc,__pagesize>( eeRecDispatchers );
@ -523,7 +525,7 @@ static void _DynGen_Dispatchers()
JITCompileInBlock = _DynGen_JITCompileInBlock(); JITCompileInBlock = _DynGen_JITCompileInBlock();
EnterRecompiledCode = _DynGen_EnterRecompiledCode(); EnterRecompiledCode = _DynGen_EnterRecompiledCode();
HostSys::MemProtectStatic( eeRecDispatchers, Protect_ReadOnly, true ); HostSys::MemProtectStatic( eeRecDispatchers, PageAccess_ExecOnly() );
recBlocks.SetJITCompile( JITCompile ); recBlocks.SetJITCompile( JITCompile );
} }
@ -531,24 +533,23 @@ static void _DynGen_Dispatchers()
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
// //
static const int REC_CACHEMEM = 0x01000000;
static void __fastcall dyna_block_discard(u32 start,u32 sz); static void __fastcall dyna_block_discard(u32 start,u32 sz);
// memory allocation handle for the entire BASEBLOCK and stack allocations. // memory allocation handle for the entire BASEBLOCK and stack allocations.
static u8* m_recBlockAlloc = NULL; static u8* m_recBlockAlloc = NULL;
static const uint m_recBlockAllocSize = static const uint m_recBlockAllocSize =
(((Ps2MemSize::Base + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4) * sizeof(BASEBLOCK)) (((Ps2MemSize::MainRam + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4) * sizeof(BASEBLOCK))
+ RECCONSTBUF_SIZE * sizeof(u32) + Ps2MemSize::Base; + RECCONSTBUF_SIZE * sizeof(u32) + Ps2MemSize::MainRam;
static void recThrowHardwareDeficiency( const wxChar* extFail ) static void recThrowHardwareDeficiency( const wxChar* extFail )
{ {
throw Exception::HardwareDeficiency() throw Exception::HardwareDeficiency()
.SetDiagMsg(wxsFormat( L"R5900-32 recompiler init failed: %s is not available.", extFail)) .SetDiagMsg(pxsFmt( L"R5900-32 recompiler init failed: %s is not available.", extFail))
.SetUserMsg(wxsFormat(_("%s Extensions not found. The R5900-32 recompiler requires a host CPU with MMX, SSE, and SSE2 extensions."), extFail )); .SetUserMsg(pxsFmt(_("%s Extensions not found. The R5900-32 recompiler requires a host CPU with MMX, SSE, and SSE2 extensions."), extFail ));
} }
static void recAlloc() static void recReserve()
{ {
// Hardware Requirements Check... // Hardware Requirements Check...
@ -561,27 +562,14 @@ static void recAlloc()
if ( !x86caps.hasStreamingSIMD2Extensions ) if ( !x86caps.hasStreamingSIMD2Extensions )
recThrowHardwareDeficiency( L"SSE2" ); recThrowHardwareDeficiency( L"SSE2" );
if( recMem == NULL ) recMem.Reserve( _64mb, HostMemoryMap::EErec );
{ }
// It's handy to have a constant base address for the EE recompiler buffer, since it
// allows me to key in the address directly in the debugger, and also recognize EE
// recompiled code from user-provisioned stack traces. But besides those, the recompiler
// has no actual restrictions on where it's compiled code buffer is located.
// Note: the SuperVU recompiler depends on being able to grab an allocation below the
// 0x10000000 line, so we give the EErec an address above that to try first as it's
// basemem address, hence the 0x20000000 pick.
const uint cachememsize = REC_CACHEMEM+0x1000;
recMem = (u8*)SysMmapEx( 0x20000000, cachememsize, 0, "recAlloc(R5900)" );
}
if( recMem == NULL )
throw Exception::OutOfMemory( L"R5900-32 recompiled code cache" );
static void recAlloc()
{
// Goal: Allocate BASEBLOCKs for every possible branch target in PS2 memory. // Goal: Allocate BASEBLOCKs for every possible branch target in PS2 memory.
// Any 4-byte aligned address makes a valid branch target as per MIPS design (all instructions are // Any 4-byte aligned address makes a valid branch target as per MIPS design (all
// always 4 bytes long). // instructions are always 4 bytes long).
if( m_recBlockAlloc == NULL ) if( m_recBlockAlloc == NULL )
m_recBlockAlloc = (u8*) _aligned_malloc( m_recBlockAllocSize, 4096 ); m_recBlockAlloc = (u8*) _aligned_malloc( m_recBlockAllocSize, 4096 );
@ -590,7 +578,7 @@ static void recAlloc()
throw Exception::OutOfMemory( L"R5900-32 BASEBLOCK tables" ); throw Exception::OutOfMemory( L"R5900-32 BASEBLOCK tables" );
u8* curpos = m_recBlockAlloc; u8* curpos = m_recBlockAlloc;
recRAM = (BASEBLOCK*)curpos; curpos += (Ps2MemSize::Base / 4) * sizeof(BASEBLOCK); recRAM = (BASEBLOCK*)curpos; curpos += (Ps2MemSize::MainRam / 4) * sizeof(BASEBLOCK);
recROM = (BASEBLOCK*)curpos; curpos += (Ps2MemSize::Rom / 4) * sizeof(BASEBLOCK); recROM = (BASEBLOCK*)curpos; curpos += (Ps2MemSize::Rom / 4) * sizeof(BASEBLOCK);
recROM1 = (BASEBLOCK*)curpos; curpos += (Ps2MemSize::Rom1 / 4) * sizeof(BASEBLOCK); recROM1 = (BASEBLOCK*)curpos; curpos += (Ps2MemSize::Rom1 / 4) * sizeof(BASEBLOCK);
recConstBuf = (u32*)curpos; curpos += RECCONSTBUF_SIZE * sizeof(u32); recConstBuf = (u32*)curpos; curpos += RECCONSTBUF_SIZE * sizeof(u32);
@ -607,7 +595,7 @@ static void recAlloc()
// No errors.. Proceed with initialization: // No errors.. Proceed with initialization:
ProfilerRegisterSource( "EE Rec", recMem, REC_CACHEMEM+0x1000 ); ProfilerRegisterSource( "EE Rec", recMem, recMem.GetReserveSizeInBytes() );
_DynGen_Dispatchers(); _DynGen_Dispatchers();
x86FpuState = FPU_STATE; x86FpuState = FPU_STATE;
@ -619,29 +607,31 @@ struct ManualPageTracking
u8 counter; u8 counter;
}; };
static __aligned16 u16 manual_page[Ps2MemSize::Base >> 12]; static __aligned16 u16 manual_page[Ps2MemSize::MainRam >> 12];
static __aligned16 u8 manual_counter[Ps2MemSize::Base >> 12]; static __aligned16 u8 manual_counter[Ps2MemSize::MainRam >> 12];
static u32 eeRecIsReset = false; static u32 eeRecIsReset = false;
static u32 eeRecNeedsReset = false;
static bool eeRecIsActive = false;
//////////////////////////////////////////////////// ////////////////////////////////////////////////////
void recResetEE( void ) static void recResetRaw()
{ {
//AtomicExchange( eeRecNeedsReset, false ); recAlloc();
if( AtomicExchange( eeRecIsReset, true ) ) return; if( AtomicExchange( eeRecIsReset, true ) ) return;
AtomicExchange( eeRecNeedsReset, false );
Console.WriteLn( Color_StrongBlack, "EE/iR5900-32 Recompiler Reset" ); Console.WriteLn( Color_StrongBlack, "EE/iR5900-32 Recompiler Reset" );
recMem.Reset();
maxrecmem = 0; maxrecmem = 0;
if (IsDevBuild) memzero_ptr<m_recBlockAllocSize - Ps2MemSize::MainRam>( m_recBlockAlloc ); // Excluding the 32mb ram copy
memset_8<0xcc, REC_CACHEMEM>(recMem); // 0xcc is INT3
memzero_ptr<m_recBlockAllocSize - Ps2MemSize::Base>( m_recBlockAlloc ); // Excluding the 32mb ram copy
memzero_ptr<RECCONSTBUF_SIZE * sizeof(u32)>(recConstBuf); memzero_ptr<RECCONSTBUF_SIZE * sizeof(u32)>(recConstBuf);
memzero( manual_page );
memzero( manual_counter );
ClearRecLUT((BASEBLOCK*)m_recBlockAlloc, ClearRecLUT((BASEBLOCK*)m_recBlockAlloc,
(((Ps2MemSize::Base + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4))); (((Ps2MemSize::MainRam + Ps2MemSize::Rom + Ps2MemSize::Rom1) / 4)));
if( s_pInstCache ) if( s_pInstCache )
memset( s_pInstCache, 0, sizeof(EEINST)*s_nInstCacheSize ); memset( s_pInstCache, 0, sizeof(EEINST)*s_nInstCacheSize );
@ -693,12 +683,12 @@ void recResetEE( void )
branch = 0; branch = 0;
} }
static void recShutdown( void ) static void recShutdown()
{ {
ProfilerTerminateSource( "EERec" ); ProfilerTerminateSource( "EERec" );
recMem.Free();
recBlocks.Reset(); recBlocks.Reset();
SafeSysMunmap( recMem, REC_CACHEMEM );
safe_aligned_free( m_recBlockAlloc ); safe_aligned_free( m_recBlockAlloc );
recRAM = recROM = recROM1 = NULL; recRAM = recROM = recROM1 = NULL;
recConstBuf = NULL; recConstBuf = NULL;
@ -708,6 +698,17 @@ static void recShutdown( void )
s_nInstCacheSize = 0; s_nInstCacheSize = 0;
} }
static void recResetEE()
{
if (eeRecIsActive)
{
AtomicExchange( eeRecNeedsReset, true );
return;
}
recResetRaw();
}
void recStep( void ) void recStep( void )
{ {
} }
@ -1354,14 +1355,21 @@ static void __fastcall recRecompile( const u32 startpc )
pxAssume( startpc ); pxAssume( startpc );
// if recPtr reached the mem limit reset whole mem // if recPtr reached the mem limit reset whole mem
if ( ( (uptr)recPtr - (uptr)recMem ) >= REC_CACHEMEM-0x40000 || dumplog == 0xffffffff) { if (recPtr >= (recMem.GetPtrEnd() - _64kb)) {
recResetEE(); AtomicExchange( eeRecNeedsReset, true );
} }
if ( (recConstBufPtr - recConstBuf) >= RECCONSTBUF_SIZE - 64 ) { else if ((recConstBufPtr - recConstBuf) >= RECCONSTBUF_SIZE - 64) {
Console.WriteLn("EE recompiler stack reset"); Console.WriteLn("EE recompiler stack reset");
recResetEE(); AtomicExchange( eeRecNeedsReset, true );
} }
if (eeRecNeedsReset) recResetRaw();
// From here on we need to have EE recompile resets disabled, since to reset
// the rec while we're writing to it typically leads to GPF.
ScopedBool active_scope(eeRecIsActive);
xSetPtr( recPtr ); xSetPtr( recPtr );
recPtr = xGetAlignedCallTarget(); recPtr = xGetAlignedCallTarget();
@ -1764,7 +1772,7 @@ StartRecomp:
pxAssert( (pc-startpc)>>2 <= 0xffff ); pxAssert( (pc-startpc)>>2 <= 0xffff );
s_pCurBlockEx->size = (pc-startpc)>>2; s_pCurBlockEx->size = (pc-startpc)>>2;
if (HWADDR(pc) <= Ps2MemSize::Base) { if (HWADDR(pc) <= Ps2MemSize::MainRam) {
BASEBLOCKEX *oldBlock; BASEBLOCKEX *oldBlock;
int i; int i;
@ -1836,11 +1844,11 @@ StartRecomp:
} }
} }
pxAssert( xGetPtr() < recMem+REC_CACHEMEM ); pxAssert( xGetPtr() < recMem.GetPtrEnd() );
pxAssert( recConstBufPtr < recConstBuf + RECCONSTBUF_SIZE ); pxAssert( recConstBufPtr < recConstBuf + RECCONSTBUF_SIZE );
pxAssert( x86FpuState == 0 ); pxAssert( x86FpuState == 0 );
pxAssert(xGetPtr() - recPtr < 0x10000); pxAssert(xGetPtr() - recPtr < _64kb);
s_pCurBlockEx->x86size = xGetPtr() - recPtr; s_pCurBlockEx->x86size = xGetPtr() - recPtr;
recPtr = xGetPtr(); recPtr = xGetPtr();
@ -1878,7 +1886,7 @@ static void recThrowException( const BaseException& ex )
R5900cpu recCpu = R5900cpu recCpu =
{ {
recAlloc, recReserve,
recShutdown, recShutdown,
recResetEE, recResetEE,

View File

@ -287,7 +287,7 @@ void vtlb_dynarec_init()
hasBeenCalled = true; hasBeenCalled = true;
// In case init gets called multiple times: // In case init gets called multiple times:
HostSys::MemProtectStatic( m_IndirectDispatchers, Protect_ReadWrite, false ); HostSys::MemProtectStatic( m_IndirectDispatchers, PageAccess_ReadWrite() );
// clear the buffer to 0xcc (easier debugging). // clear the buffer to 0xcc (easier debugging).
memset_8<0xcc,0x1000>( m_IndirectDispatchers ); memset_8<0xcc,0x1000>( m_IndirectDispatchers );
@ -309,7 +309,7 @@ void vtlb_dynarec_init()
} }
} }
HostSys::MemProtectStatic( m_IndirectDispatchers, Protect_ReadOnly, true ); HostSys::MemProtectStatic( m_IndirectDispatchers, PageAccess_ExecOnly() );
} }
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////

View File

@ -123,7 +123,7 @@ void microVU::reset() {
x86SetPtr(dispCache); x86SetPtr(dispCache);
mVUdispatcherA(this); mVUdispatcherA(this);
mVUdispatcherB(this); mVUdispatcherB(this);
mVUemitSearch(); mVUemitSearch();
// Clear All Program Data // Clear All Program Data
//memset(&prog, 0, sizeof(prog)); //memset(&prog, 0, sizeof(prog));
@ -190,7 +190,7 @@ static void mVUresizeCache(mV, u32 size) {
if (mVU->cache) Console.WriteLn(Color_Green, "microVU%d: Attempting to resize Cache [%dmb]", mVU->index, size/_1mb); if (mVU->cache) Console.WriteLn(Color_Green, "microVU%d: Attempting to resize Cache [%dmb]", mVU->index, size/_1mb);
u8* cache = SysMmapEx(0, size, 0, (mVU->index ? "Micro VU1 RecCache" : "Micro VU0 RecCache")); u8* cache = SysMmapEx(mVU->index ? HostMemoryMap::mVU1rec : HostMemoryMap::mVU0rec, size, 0, (mVU->index ? "Micro VU1 RecCache" : "Micro VU0 RecCache"));
if(!cache && !mVU->cache) throw Exception::OutOfMemory( wxsFormat( L"Micro VU%d recompiled code cache", mVU->index) ); if(!cache && !mVU->cache) throw Exception::OutOfMemory( wxsFormat( L"Micro VU%d recompiled code cache", mVU->index) );
if(!cache) { Console.Error("microVU%d Error - Cache Resize Failed...", mVU->index); mVU->reset(); return; } if(!cache) { Console.Error("microVU%d Error - Cache Resize Failed...", mVU->index); mVU->reset(); return; }
if (mVU->cache) { if (mVU->cache) {

View File

@ -171,7 +171,7 @@ struct microVU {
u32 progMemMask; // VU Micro Memory Size (in u32's) u32 progMemMask; // VU Micro Memory Size (in u32's)
u32 cacheSize; // VU Cache Size u32 cacheSize; // VU Cache Size
microProgManager prog; // Micro Program Data microProgManager prog; // Micro Program Data
ScopedPtr<microRegAlloc> regAlloc; // Reg Alloc Class ScopedPtr<microRegAlloc> regAlloc; // Reg Alloc Class
ScopedPtr<AsciiFile> logFile; // Log File Pointer ScopedPtr<AsciiFile> logFile; // Log File Pointer

View File

@ -481,7 +481,7 @@ static __pagealigned u8 mVUsearchXMM[__pagesize];
// Generates a custom optimized block-search function // Generates a custom optimized block-search function
// Note: Structs must be 16-byte aligned! (GCC doesn't guarantee this) // Note: Structs must be 16-byte aligned! (GCC doesn't guarantee this)
void mVUcustomSearch() { void mVUcustomSearch() {
HostSys::MemProtectStatic(mVUsearchXMM, Protect_ReadWrite, false); HostSys::MemProtectStatic(mVUsearchXMM, PageAccess_ReadWrite());
memset_8<0xcc,__pagesize>(mVUsearchXMM); memset_8<0xcc,__pagesize>(mVUsearchXMM);
xSetPtr(mVUsearchXMM); xSetPtr(mVUsearchXMM);
@ -526,5 +526,5 @@ void mVUcustomSearch() {
exitPoint.SetTarget(); exitPoint.SetTarget();
xRET(); xRET();
HostSys::MemProtectStatic(mVUsearchXMM, Protect_ReadOnly, true); HostSys::MemProtectStatic(mVUsearchXMM, PageAccess_ExecOnly());
} }

View File

@ -295,7 +295,7 @@ static void nVifGen(int usn, int mask, int curCycle) {
void VifUnpackSSE_Init() void VifUnpackSSE_Init()
{ {
HostSys::MemProtectStatic(nVifUpkExec, Protect_ReadWrite, false); HostSys::MemProtectStatic(nVifUpkExec, PageAccess_ReadWrite());
memset8<0xcc>( nVifUpkExec ); memset8<0xcc>( nVifUpkExec );
xSetPtr( nVifUpkExec ); xSetPtr( nVifUpkExec );
@ -306,5 +306,5 @@ void VifUnpackSSE_Init()
nVifGen(a, b, c); nVifGen(a, b, c);
}}} }}}
HostSys::MemProtectStatic(nVifUpkExec, Protect_ReadOnly, true); HostSys::MemProtectStatic(nVifUpkExec, PageAccess_ExecOnly());
} }

View File

@ -72,7 +72,7 @@ extern void iDumpVU1Registers();
#define SUPERVU_CHECKCONDITION 0 // has to be 0!! #define SUPERVU_CHECKCONDITION 0 // has to be 0!!
#define VU_EXESIZE 0x00800000 static const uint VU_EXESIZE = _1mb * 8;
#define _Imm11_ (s32)( (vucode & 0x400) ? (0xfffffc00 | (vucode & 0x3ff)) : (vucode & 0x3ff) ) #define _Imm11_ (s32)( (vucode & 0x400) ? (0xfffffc00 | (vucode & 0x3ff)) : (vucode & 0x3ff) )
#define _UImm11_ (s32)(vucode & 0x7ff) #define _UImm11_ (s32)(vucode & 0x7ff)
@ -352,7 +352,7 @@ static void SuperVUAlloc(int vuindex)
{ {
// upper 4 bits must be zero! // upper 4 bits must be zero!
// Changed "first try base" to 0xf1e0000, since 0x0c000000 liked to fail a lot. (cottonvibes) // Changed "first try base" to 0xf1e0000, since 0x0c000000 liked to fail a lot. (cottonvibes)
s_recVUMem = SysMmapEx(0xf1e0000, VU_EXESIZE, 0x10000000, "SuperVUAlloc"); s_recVUMem = SysMmapEx(HostMemoryMap::sVUrec, VU_EXESIZE, 0x10000000, "SuperVUAlloc");
// Try again at some other random memory location... whatever. >_< // Try again at some other random memory location... whatever. >_<
if( s_recVUMem == NULL ) if( s_recVUMem == NULL )
@ -361,7 +361,7 @@ static void SuperVUAlloc(int vuindex)
if (s_recVUMem == NULL) if (s_recVUMem == NULL)
{ {
throw Exception::VirtualMemoryMapConflict() throw Exception::VirtualMemoryMapConflict()
.SetDiagMsg(wxsFormat( L"SuperVU failed to allocate virtual memory below 256MB." )) .SetDiagMsg(pxsFmt( L"SuperVU failed to allocate virtual memory below 256MB." ))
.SetUserMsg(pxE( ".Error:superVU:VirtualMemoryAlloc", .SetUserMsg(pxE( ".Error:superVU:VirtualMemoryAlloc",
L"Out of Memory (sorta): The SuperVU recompiler was unable to reserve the specific memory " L"Out of Memory (sorta): The SuperVU recompiler was unable to reserve the specific memory "
L"ranges required, and will not be available for use. This is not a critical error, since " L"ranges required, and will not be available for use. This is not a critical error, since "