newHostVM:

* Preliminary implementation for the SpatialArrayReserve class, which will be used for recompiler lookup tables (large portions of these tables are never used by many games).  Will put it to use and test it soon.
 * Finished implementation of TryResize method for reservations, which will be used by PCSX2 if the operating system runs low on ram.

git-svn-id: http://pcsx2.googlecode.com/svn/branches/newHostVM@3992 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2010-11-03 17:37:29 +00:00
parent 5579350d22
commit dc7f00d05e
6 changed files with 190 additions and 103 deletions

View File

@ -425,6 +425,10 @@
RelativePath="..\..\include\Utilities\MemsetFast.inl"
>
</File>
<File
RelativePath="..\..\include\Utilities\PageFaultSource.h"
>
</File>
<File
RelativePath="..\..\include\Utilities\Path.h"
>

View File

@ -107,16 +107,11 @@ protected:
uptr m_reserved;
// Incremental size by which the buffer grows (in pages)
uptr m_block_size;
uptr m_blocksize;
// Protection mode to be applied to committed blocks.
PageProtectionMode m_prot_mode;
// Specifies the number of blocks that should be committed automatically when the
// reserve is created. Typically this chunk is larger than the block size, and
// should be based on whatever typical overhead is needed for basic block use.
uint m_def_commit;
// Records the number of pages committed to memory.
// (metric for analysis of buffer usage)
uptr m_commited;
@ -131,6 +126,9 @@ public:
virtual void* Reserve( uint size, uptr base = 0, uptr upper_bounds = 0 );
virtual void Reset();
virtual void Free();
virtual bool TryResize( uint newsize );
virtual void CommitBlocks( uptr page, uint blocks );
bool IsOk() const { return m_baseptr != NULL; }
wxString GetName() const { return Name; }
@ -153,6 +151,17 @@ public:
protected:
void OnPageFaultEvent( const PageFaultInfo& info, bool& handled );
// This function is called from OnPageFaultEvent after the address has been translated
// and confirmed to apply to this reserved area in question. OnPageFaultEvent contains
// a try/catch exception handler, which ensures "reasonable" error response behavior if
// this function throws exceptions.
//
// Important: This method is called from the context of an exception/signal handler. On
// Windows this isn't a big deal (most operations are ok). On Linux, however, logging
// and other facilities are probably not a good idea.
virtual void DoCommitAndProtect( uptr offset )=0;
// This function is called for every committed block.
virtual void OnCommittedBlock( void* block )=0;
virtual void OnOutOfMemory( const Exception::OutOfMemory& ex, void* blockptr, bool& handled )
{
@ -190,27 +199,25 @@ class SpatialArrayReserve : public BaseVirtualMemoryReserve
typedef BaseVirtualMemoryReserve __parent;
protected:
uint m_numblocks;
// Array of block bits, each bit indicating if the block has been committed to memory
// or not. The array length is typically determined via ((numblocks+7) / 8), though the
// actual array size may be larger in order to accommodate 32-bit or 128-bit accelerated
// operations.
ScopedAlignedAlloc<u8,16> m_blockbits;
public:
SpatialArrayReserve( const wxString& name, uint defCommit = 0 );
virtual void* Reserve( uint size, uptr base = 0, uptr upper_bounds = 0 );
virtual void Reset();
void OnCommittedBlock( void* block );
void OnOutOfMemory( const Exception::OutOfMemory& ex, void* blockptr, bool& handled );
// This method allows the programmer to specify the block size of the array as a function
// of its reserved size. This function *must* be called *after* the reserve has been made.
// Calls to this function prior to initializing the reserve will be ignored (and will
// generate an assertion in debug builds).
SpatialArrayReserve& SetBlockCount( uint blocks );
// Sets the block size via pages (pages are defined by the __pagesize global, which is
// typically 4096).
SpatialArrayReserve& SetBlockSizeInPages( uint bytes );
// This method assigns the block size of the spatial array, in bytes. The actual size of
// each block will be rounded up to the nearest page size. The resulting size is returned.
uint SetBlockSize( uint bytes );
@ -219,6 +226,9 @@ public:
operator u8*() { return (u8*)m_baseptr; }
operator const u8*() const { return (u8*)m_baseptr; }
protected:
uint _calcBlockBitArrayLength() const;
};
#ifdef __LINUX__

View File

@ -58,7 +58,7 @@ BaseVirtualMemoryReserve::BaseVirtualMemoryReserve( const wxString& name )
m_commited = 0;
m_reserved = 0;
m_baseptr = NULL;
m_block_size = __pagesize;
m_blocksize = __pagesize;
m_prot_mode = PageAccess_None();
}
@ -118,45 +118,84 @@ void BaseVirtualMemoryReserve::Free()
HostSys::Munmap((uptr)m_baseptr, m_reserved*__pagesize);
}
void BaseVirtualMemoryReserve::OnPageFaultEvent(const PageFaultInfo& info, bool& handled)
// If growing the array, or if shrinking the array to some point that's still *greater* than the
// committed memory range, then attempt a passive "on-the-fly" resize that maps/unmaps some portion
// of the reserve.
//
// If the above conditions are not met, or if the map/unmap fails, this method returns false.
// The caller will be responsible for manually resetting the reserve.
//
// Parameters:
// newsize - new size of the reserved buffer, in bytes.
bool BaseVirtualMemoryReserve::TryResize( uint newsize )
{
uptr offset = (info.addr - (uptr)m_baseptr) / __pagesize;
if (offset >= m_reserved) return;
uint newPages = (newsize + __pagesize - 1) / __pagesize;
try {
if (newPages > m_reserved)
{
uint toReservePages = newPages - m_reserved;
uint toReserveBytes = toReservePages * __pagesize;
if (!m_commited && m_def_commit)
DevCon.WriteLn( L"%-32s is being expanded by %u pages.", Name.c_str(), toReservePages);
m_baseptr = (void*)HostSys::MmapReserve((uptr)GetPtrEnd(), toReserveBytes);
if (!m_baseptr)
{
const uint camt = m_def_commit * __pagesize;
// first block being committed! Commit the default requested
// amount if its different from the blocksize.
HostSys::MmapCommitPtr(m_baseptr, camt, m_prot_mode);
u8* init = (u8*)m_baseptr;
u8* endpos = init + camt;
for( ; init<endpos; init += m_block_size*__pagesize )
OnCommittedBlock(init);
m_commited += m_def_commit;
handled = true;
return;
Console.Warning("%-32s could not be passively resized due to virtual memory conflict!");
Console.Indent().Warning("(attempted to map memory @ 0x%08X -> 0x%08X", m_baseptr, (uptr)m_baseptr+toReserveBytes);
}
void* bleh = (u8*)m_baseptr + (offset * __pagesize);
DevCon.WriteLn( Color_Blue, L"%-32s @ 0x%08X -> 0x%08X [%umb]", Name.c_str(),
m_baseptr, (uptr)m_baseptr+toReserveBytes, toReserveBytes / _1mb);
}
else if (newPages < m_reserved)
{
if (m_commited > newsize) return false;
// Depending on the operating system, one or both of these could fail if the system
// is low on either physical ram or virtual memory.
HostSys::MmapCommitPtr(bleh, m_block_size*__pagesize, m_prot_mode);
uint toRemovePages = m_reserved - newPages;
uint toRemoveBytes = toRemovePages * __pagesize;
m_commited += m_block_size;
OnCommittedBlock(bleh);
DevCon.WriteLn( L"%-32s is being shrunk by %u pages.", Name.c_str(), toRemovePages);
HostSys::MmapResetPtr(GetPtrEnd(), toRemoveBytes);
DevCon.WriteLn( Color_Blue, L"%-32s @ 0x%08X -> 0x%08X [%umb]", Name.c_str(),
m_baseptr, (uptr)m_baseptr+toRemoveBytes, toRemoveBytes / _1mb);
}
return true;
}
void BaseVirtualMemoryReserve::CommitBlocks( uptr page, uint blocks )
{
const uint blocksbytes = blocks * m_blocksize * __pagesize;
void* blockptr = (u8*)m_baseptr + (page * __pagesize);
// Depending on the operating system, one or both of these could fail if the system
// is low on either physical ram or virtual memory.
HostSys::MmapCommitPtr(blockptr, blocksbytes, m_prot_mode);
u8* init = (u8*)blockptr;
u8* endpos = init + blocksbytes;
for( ; init<endpos; init += m_blocksize*__pagesize )
OnCommittedBlock(init);
m_commited += m_blocksize * blocks;
}
void BaseVirtualMemoryReserve::OnPageFaultEvent(const PageFaultInfo& info, bool& handled)
{
sptr offset = (info.addr - (uptr)m_baseptr) / __pagesize;
if ((offset < 0) || ((uptr)offset >= m_reserved)) return;
try {
DoCommitAndProtect( offset );
handled = true;
}
catch (Exception::OutOfMemory& ex)
{
handled = false;
OnOutOfMemory( ex, (u8*)m_baseptr + (offset * __pagesize), handled );
}
#ifndef __WXMSW__
@ -170,8 +209,8 @@ void BaseVirtualMemoryReserve::OnPageFaultEvent(const PageFaultInfo& info, bool&
// *unless* its attached to a debugger; then we can, at a bare minimum, trap it.
catch (Exception::BaseException& ex)
{
wxTrap();
handled = false;
wxTrap();
}
#endif
}
@ -181,14 +220,79 @@ void BaseVirtualMemoryReserve::OnPageFaultEvent(const PageFaultInfo& info, bool&
// SpatialArrayReserve (implementations)
// --------------------------------------------------------------------------------------
uint SpatialArrayReserve::_calcBlockBitArrayLength() const
{
return (m_numblocks + 127) / 128;
}
void* SpatialArrayReserve::Reserve( uint size, uptr base, uptr upper_bounds )
{
return __parent::Reserve( size, base, upper_bounds );
}
// Resets/clears the spatial array, reducing the memory commit pool overhead to zero (0).
void SpatialArrayReserve::Reset()
{
__parent::Reset();
memzero_sse_a(m_blockbits.GetPtr(), _calcBlockBitArrayLength());
}
// This method allows the programmer to specify the block size of the array as a function
// of its reserved size. This function *must* be called *after* the reserve has been made,
// and *before* the array contents have been accessed.
//
// Calls to this function prior to initializing the reserve or after the reserve has been
// accessed (resulting in committed blocks) will be ignored -- and will generate an assertion
// in debug builds.
SpatialArrayReserve& SpatialArrayReserve::SetBlockCount( uint blocks )
{
pxAssumeDev( !m_commited, "Invalid object state: SetBlockCount must be called prior to reserved memory accesses." );
// Calculate such that the last block extends past the end of the array, if necessary.
m_numblocks = blocks;
m_blocksize = (m_reserved + m_numblocks-1) / m_numblocks;
return *this;
}
// Sets the block size via pages (pages are defined by the __pagesize global, which is
// typically 4096).
//
// This method must be called prior to accessing or modifying the array contents. Calls to
// a modified buffer will be ignored (and generate an assertion in dev/debug modes).
SpatialArrayReserve& SpatialArrayReserve::SetBlockSizeInPages( uint pages )
{
if (pxAssertDev(m_commited, "Invalid object state: Block size can only be changed prior to accessing or modifying the reserved buffer contents."))
{
m_blocksize = pages;
m_numblocks = (m_reserved + m_blocksize - 1) / m_blocksize;
m_blockbits.Alloc( _calcBlockBitArrayLength() );
}
return *this;
}
// This method assigns the block size of the spatial array, in bytes. The actual size of
// each block will be rounded up to the nearest page size. The resulting size is returned.
//
// This method must be called prior to accessing or modifying the array contents. Calls to
// a modified buffer will be ignored (and generate an assertion in dev/debug modes).
uint SpatialArrayReserve::SetBlockSize( uint bytes )
{
SetBlockSizeInPages((bytes + __pagesize - 1) / __pagesize);
return m_blocksize * __pagesize;
}
void SpatialArrayReserve::OnCommittedBlock( void* block )
{
// Determine the block position in the blockbits array, flag it, and be done!
uptr relative = (uptr)m_baseptr - (uptr)block;
pxAssume( (relative % (m_blocksize * __pagesize)) == 0);
relative /= m_blocksize * __pagesize;
m_blockbits[relative/32] |= 1 << (relative & 31);
m_commited += m_blocksize;
}
void SpatialArrayReserve::OnOutOfMemory( const Exception::OutOfMemory& ex, void* blockptr, bool& handled )

View File

@ -43,7 +43,7 @@ extern void resetNewVif(int idx);
RecompiledCodeReserve::RecompiledCodeReserve( const wxString& name, uint defCommit )
: BaseVirtualMemoryReserve( name )
{
m_block_size = (1024 * 128) / __pagesize;
m_blocksize = (1024 * 128) / __pagesize;
m_prot_mode = PageAccess_Any();
m_def_commit = defCommit / __pagesize;
@ -68,6 +68,11 @@ void RecompiledCodeReserve::_termProfiler()
ProfilerTerminateSource( m_profiler_name );
}
uint RecompiledCodeReserve::_calcDefaultCommitInBlocks() const
{
return (m_def_commit + m_blocksize - 1) / m_blocksize;
}
void* RecompiledCodeReserve::Reserve( uint size, uptr base, uptr upper_bounds )
{
if (!__parent::Reserve(size, base, upper_bounds)) return NULL;
@ -76,55 +81,6 @@ void* RecompiledCodeReserve::Reserve( uint size, uptr base, uptr upper_bounds )
}
// If growing the array, or if shrinking the array to some point that's still *greater* than the
// committed memory range, then attempt a passive "on-the-fly" resize that maps/unmaps some portion
// of the reserve.
//
// If the above conditions are not met, or if the map/unmap fails, this method returns false.
// The caller will be responsible for manually resetting the reserve.
//
// Parameters:
// newsize - new size of the reserved buffer, in bytes.
bool RecompiledCodeReserve::TryResize( uint newsize )
{
uint newPages = (newsize + __pagesize - 1) / __pagesize;
if (newPages > m_reserved)
{
uint toReservePages = newPages - m_reserved;
uint toReserveBytes = toReservePages * __pagesize;
DevCon.WriteLn( L"%-32s is being expanded by %u pages.", Name.c_str(), toReservePages);
m_baseptr = (void*)HostSys::MmapReserve((uptr)GetPtrEnd(), toReserveBytes);
if (!m_baseptr)
{
Console.Warning("%-32s could not be passively resized due to virtual memory conflict!");
Console.Indent().Warning("(attempted to map memory @ 0x%08X -> 0x%08X", m_baseptr, (uptr)m_baseptr+toReserveBytes);
}
DevCon.WriteLn( Color_Blue, L"%-32s @ 0x%08X -> 0x%08X [%umb]", Name.c_str(),
m_baseptr, (uptr)m_baseptr+toReserveBytes, toReserveBytes / _1mb);
}
else if (newPages < m_reserved)
{
if (m_commited > newsize) return false;
uint toRemovePages = m_reserved - newPages;
uint toRemoveBytes = toRemovePages * __pagesize;
DevCon.WriteLn( L"%-32s is being shrunk by %u pages.", Name.c_str(), toRemovePages);
HostSys::MmapResetPtr(GetPtrEnd(), toRemoveBytes);
DevCon.WriteLn( Color_Blue, L"%-32s @ 0x%08X -> 0x%08X [%umb]", Name.c_str(),
m_baseptr, (uptr)m_baseptr+toRemoveBytes, toRemoveBytes / _1mb);
}
return true;
}
// Sets the abbreviated name used by the profiler. Name should be under 10 characters long.
// After a name has been set, a profiler source will be automatically registered and cleared
// in accordance with changes in the reserve area.
@ -135,6 +91,11 @@ RecompiledCodeReserve& RecompiledCodeReserve::SetProfilerName( const wxString& s
return *this;
}
void RecompiledCodeReserve::DoCommitAndProtect( uptr page )
{
CommitBlocks(page, (m_commited || !m_def_commit) ? 1 : _calcDefaultCommitInBlocks() );
}
void RecompiledCodeReserve::OnCommittedBlock( void* block )
{
if (IsDevBuild)
@ -143,7 +104,7 @@ void RecompiledCodeReserve::OnCommittedBlock( void* block )
// the assembly dump more cleanly. We don't clear the block on Release builds since
// it can add a noticeable amount of overhead to large block recompilations.
memset_sse_a<0xcc>( block, m_block_size * __pagesize );
memset_sse_a<0xcc>( block, m_blocksize * __pagesize );
}
}
@ -184,7 +145,7 @@ void RecompiledCodeReserve::OnOutOfMemory( const Exception::OutOfMemory& ex, voi
ResetProcessReserves();
uint cusion = std::min<uint>( m_block_size, 4 );
uint cusion = std::min<uint>( m_blocksize, 4 );
HostSys::MmapCommitPtr((u8*)blockptr, cusion * __pagesize, m_prot_mode);
handled = true;

View File

@ -30,6 +30,11 @@ class RecompiledCodeReserve : public BaseVirtualMemoryReserve
typedef BaseVirtualMemoryReserve __parent;
protected:
// Specifies the number of blocks that should be committed automatically when the
// reserve is created. Typically this chunk is larger than the block size, and
// should be based on whatever typical overhead is needed for basic block use.
uint m_def_commit;
wxString m_profiler_name;
bool m_profiler_registered;
@ -47,8 +52,6 @@ public:
return SetProfilerName( fromUTF8(shortname) );
}
virtual bool TryResize( uint newsize );
operator void*() { return m_baseptr; }
operator const void*() const { return m_baseptr; }
@ -57,7 +60,10 @@ public:
protected:
void ResetProcessReserves() const;
void DoCommitAndProtect( uptr page );
void _registerProfiler();
void _termProfiler();
uint _calcDefaultCommitInBlocks() const;
};

View File

@ -81,8 +81,8 @@ const __aligned(32) mVU_Globals mVUglob = {
static __fi void mVUthrowHardwareDeficiency(const wxChar* extFail, int vuIndex) {
throw Exception::HardwareDeficiency()
.SetDiagMsg(wxsFormat(L"microVU%d recompiler init failed: %s is not available.", vuIndex, extFail))
.SetUserMsg(wxsFormat(_("%s Extensions not found. microVU requires a host CPU with MMX, SSE, and SSE2 extensions."), extFail ));
.SetDiagMsg(pxsFmt(L"microVU%d recompiler init failed: %s is not available.", vuIndex, extFail))
.SetUserMsg(pxsFmt(_("%s Extensions not found. microVU requires a host CPU with MMX, SSE, and SSE2 extensions."), extFail ));
}
void microVU::reserveCache()
@ -91,8 +91,8 @@ void microVU::reserveCache()
cache_reserve->SetProfilerName( pxsFmt("mVU%urec", index) );
cache = index ?
(u8*)cache_reserve->Reserve( cacheSize, HostMemoryMap::mVU1rec ) :
(u8*)cache_reserve->Reserve( cacheSize, HostMemoryMap::mVU0rec );
(u8*)cache_reserve->Reserve( cacheSize * _1mb, HostMemoryMap::mVU1rec ) :
(u8*)cache_reserve->Reserve( cacheSize * _1mb, HostMemoryMap::mVU0rec );
if(!cache_reserve->IsOk())
throw Exception::VirtualMemoryMapConflict().SetDiagMsg(pxsFmt( L"Micro VU%u Recompiler Cache", index ));
@ -114,6 +114,8 @@ void microVU::init(uint vuIndex) {
progSize = (index ? 0x4000 : 0x1000) / 4;
progMemMask = progSize-1;
reserveCache();
dispCache = SysMmapEx(0, mVUdispCacheSize, 0, (index ? "Micro VU1 Dispatcher" : "Micro VU0 Dispatcher"));
if (!dispCache) throw Exception::OutOfMemory( index ? L"Micro VU1 Dispatcher" : L"Micro VU0 Dispatcher" );
memset(dispCache, 0xcc, mVUdispCacheSize);