mirror of https://github.com/PCSX2/pcsx2.git
Allocate memory in an x86-64-compatible way (#3523)
Allocate memory in an x86-64-compatible way Another part of #3451 Note: While this shouldn't change how anything works, it's been the #1 source of breakage of 32-bit builds in #3451 (it was the cause for the failure of win32 to allocate memory and the failure of linux-32 afterward) so we should definitely make sure it gets tested see #3523 for more information
This commit is contained in:
parent
4f56db9f18
commit
75aac90452
|
@ -25,6 +25,7 @@
|
|||
// [TODO] OS-X (Darwin) platforms should use the Mach exception model (not implemented)
|
||||
|
||||
#include "EventSource.h"
|
||||
#include <atomic>
|
||||
|
||||
struct PageFaultInfo
|
||||
{
|
||||
|
@ -122,6 +123,64 @@ protected:
|
|||
};
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryManager: Manages the allocation of PCSX2 VM
|
||||
// Ensures that all memory is close enough together for rip-relative addressing
|
||||
// --------------------------------------------------------------------------------------
|
||||
class VirtualMemoryManager
|
||||
{
|
||||
DeclareNoncopyableObject(VirtualMemoryManager);
|
||||
|
||||
wxString m_name;
|
||||
|
||||
uptr m_baseptr;
|
||||
|
||||
// An array to track page usage (to trigger asserts if things try to overlap)
|
||||
std::atomic<bool> *m_pageuse;
|
||||
|
||||
// reserved memory (in pages)
|
||||
u32 m_pages_reserved;
|
||||
|
||||
public:
|
||||
// If upper_bounds is nonzero and the OS fails to allocate memory that is below it,
|
||||
// calls to IsOk() will return false and Alloc() will always return null pointers
|
||||
// strict indicates that the allocation should quietly fail if the memory can't be mapped at `base`
|
||||
VirtualMemoryManager(const wxString &name, uptr base, size_t size, uptr upper_bounds = 0, bool strict = false);
|
||||
~VirtualMemoryManager();
|
||||
|
||||
void *GetBase() const { return (void *)m_baseptr; }
|
||||
|
||||
// Request the use of the memory at offsetLocation bytes from the start of the reserved memory area
|
||||
// offsetLocation must be page-aligned
|
||||
void *Alloc(uptr offsetLocation, size_t size) const;
|
||||
|
||||
void *AllocAtAddress(void *address, size_t size) const {
|
||||
return Alloc(size, (uptr)address - m_baseptr);
|
||||
}
|
||||
|
||||
void Free(void *address, size_t size) const;
|
||||
|
||||
// Was this VirtualMemoryManager successfully able to get its memory mapping?
|
||||
// (If not, calls to Alloc will return null pointers)
|
||||
bool IsOk() const { return m_baseptr != 0; }
|
||||
};
|
||||
|
||||
typedef std::shared_ptr<const VirtualMemoryManager> VirtualMemoryManagerPtr;
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryBumpAllocator: Allocates memory for things that don't have explicitly-reserved spots
|
||||
// --------------------------------------------------------------------------------------
|
||||
class VirtualMemoryBumpAllocator
|
||||
{
|
||||
const VirtualMemoryManagerPtr m_allocator;
|
||||
std::atomic<uptr> m_baseptr{0};
|
||||
const uptr m_endptr = 0;
|
||||
public:
|
||||
VirtualMemoryBumpAllocator(VirtualMemoryManagerPtr allocator, size_t size, uptr offsetLocation);
|
||||
void *Alloc(size_t size);
|
||||
const VirtualMemoryManagerPtr& GetAllocator() { return m_allocator; }
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryReserve
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -132,6 +191,9 @@ class VirtualMemoryReserve
|
|||
protected:
|
||||
wxString m_name;
|
||||
|
||||
// Where the memory came from (so we can return it)
|
||||
VirtualMemoryManagerPtr m_allocator;
|
||||
|
||||
// Default size of the reserve, in bytes. Can be specified when the object is constructed.
|
||||
// Is used as the reserve size when Reserve() is called, unless an override is specified
|
||||
// in the Reserve parameters.
|
||||
|
@ -155,17 +217,32 @@ protected:
|
|||
// as well.
|
||||
bool m_allow_writes;
|
||||
|
||||
// Allows the implementation to decide how much memory it needs to allocate if someone requests the given size
|
||||
// Should translate requests of size 0 to m_defsize
|
||||
virtual size_t GetSize(size_t requestedSize);
|
||||
|
||||
public:
|
||||
VirtualMemoryReserve(const wxString &name = wxEmptyString, size_t size = 0);
|
||||
VirtualMemoryReserve(const wxString &name, size_t size = 0);
|
||||
virtual ~VirtualMemoryReserve()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
virtual void *Reserve(size_t size = 0, uptr base = 0, uptr upper_bounds = 0);
|
||||
virtual void *ReserveAt(uptr base = 0, uptr upper_bounds = 0)
|
||||
// Initialize with the given piece of memory
|
||||
// Note: The memory is already allocated, the allocator is for future use to free the region
|
||||
// It may be null in which case there is no way to free the memory in a way it will be usable again
|
||||
virtual void *Assign(VirtualMemoryManagerPtr allocator, void *baseptr, size_t size);
|
||||
|
||||
void *Reserve(VirtualMemoryManagerPtr allocator, uptr baseOffset, size_t size = 0)
|
||||
{
|
||||
return Reserve(m_defsize, base, upper_bounds);
|
||||
size = GetSize(size);
|
||||
void *allocation = allocator->Alloc(baseOffset, size);
|
||||
return Assign(std::move(allocator), allocation, size);
|
||||
}
|
||||
void *Reserve(VirtualMemoryBumpAllocator& allocator, size_t size = 0)
|
||||
{
|
||||
size = GetSize(size);
|
||||
return Assign(allocator.GetAllocator(), allocator.Alloc(size), size);
|
||||
}
|
||||
|
||||
virtual void Reset();
|
||||
|
@ -177,7 +254,7 @@ public:
|
|||
virtual void AllowModification();
|
||||
|
||||
bool IsOk() const { return m_baseptr != NULL; }
|
||||
wxString GetName() const { return m_name; }
|
||||
const wxString& GetName() const { return m_name; }
|
||||
|
||||
uptr GetReserveSizeInBytes() const { return m_pages_reserved * __pagesize; }
|
||||
uptr GetReserveSizeInPages() const { return m_pages_reserved; }
|
||||
|
@ -189,8 +266,6 @@ public:
|
|||
u8 *GetPtrEnd() { return (u8 *)m_baseptr + (m_pages_reserved * __pagesize); }
|
||||
const u8 *GetPtrEnd() const { return (u8 *)m_baseptr + (m_pages_reserved * __pagesize); }
|
||||
|
||||
VirtualMemoryReserve &SetName(const wxString &newname);
|
||||
VirtualMemoryReserve &SetBaseAddr(uptr newaddr);
|
||||
VirtualMemoryReserve &SetPageAccessOnCommit(const PageProtectionMode &mode);
|
||||
|
||||
operator void *() { return m_baseptr; }
|
||||
|
|
|
@ -67,68 +67,24 @@ void SrcType_PageFault::_DispatchRaw(ListenerIterator iter, const ListenerIterat
|
|||
} while ((++iter != iend) && !m_handled);
|
||||
}
|
||||
|
||||
static size_t pageAlign(size_t size)
|
||||
{
|
||||
return (size + __pagesize - 1) / __pagesize * __pagesize;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryReserve (implementations)
|
||||
// VirtualMemoryManager (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
VirtualMemoryReserve::VirtualMemoryReserve(const wxString &name, size_t size)
|
||||
: m_name(name)
|
||||
|
||||
VirtualMemoryManager::VirtualMemoryManager(const wxString &name, uptr base, size_t size, uptr upper_bounds, bool strict)
|
||||
: m_name(name), m_baseptr(0), m_pageuse(nullptr), m_pages_reserved(0)
|
||||
{
|
||||
m_defsize = size;
|
||||
if (!size) return;
|
||||
|
||||
m_pages_commited = 0;
|
||||
m_pages_reserved = 0;
|
||||
m_baseptr = NULL;
|
||||
m_prot_mode = PageAccess_None();
|
||||
m_allow_writes = true;
|
||||
}
|
||||
uptr reserved_bytes = pageAlign(size);
|
||||
m_pages_reserved = reserved_bytes / __pagesize;
|
||||
|
||||
VirtualMemoryReserve &VirtualMemoryReserve::SetName(const wxString &newname)
|
||||
{
|
||||
m_name = newname;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VirtualMemoryReserve &VirtualMemoryReserve::SetBaseAddr(uptr newaddr)
|
||||
{
|
||||
if (!pxAssertDev(!m_pages_reserved, "Invalid object state: you must release the virtual memory reserve prior to changing its base address!"))
|
||||
return *this;
|
||||
|
||||
m_baseptr = (void *)newaddr;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VirtualMemoryReserve &VirtualMemoryReserve::SetPageAccessOnCommit(const PageProtectionMode &mode)
|
||||
{
|
||||
m_prot_mode = mode;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Notes:
|
||||
// * This method should be called if the object is already in an released (unreserved) state.
|
||||
// Subsequent calls will be ignored, and the existing reserve will be returned.
|
||||
//
|
||||
// Parameters:
|
||||
// size - size of the reserve, in bytes. (optional)
|
||||
// If not specified (or zero), then the default size specified in the constructor for the
|
||||
// object instance is used.
|
||||
//
|
||||
// upper_bounds - criteria that must be met for the allocation to be valid.
|
||||
// If the OS refuses to allocate the memory below the specified address, the
|
||||
// object will fail to initialize and an exception will be thrown.
|
||||
void *VirtualMemoryReserve::Reserve(size_t size, uptr base, uptr upper_bounds)
|
||||
{
|
||||
if (!pxAssertDev(m_baseptr == NULL, "(VirtualMemoryReserve) Invalid object state; object has already been reserved."))
|
||||
return m_baseptr;
|
||||
|
||||
if (!size)
|
||||
size = m_defsize;
|
||||
if (!size)
|
||||
return NULL;
|
||||
|
||||
m_pages_reserved = (size + __pagesize - 4) / __pagesize;
|
||||
uptr reserved_bytes = m_pages_reserved * __pagesize;
|
||||
|
||||
m_baseptr = (void *)HostSys::MmapReserve(base, reserved_bytes);
|
||||
m_baseptr = (uptr)HostSys::MmapReserve(base, reserved_bytes);
|
||||
|
||||
if (!m_baseptr || (upper_bounds != 0 && (((uptr)m_baseptr + reserved_bytes) > upper_bounds))) {
|
||||
DevCon.Warning(L"%s: host memory @ %ls -> %ls is unavailable; attempting to map elsewhere...",
|
||||
|
@ -139,17 +95,176 @@ void *VirtualMemoryReserve::Reserve(size_t size, uptr base, uptr upper_bounds)
|
|||
if (base) {
|
||||
// Let's try again at an OS-picked memory area, and then hope it meets needed
|
||||
// boundschecking criteria below.
|
||||
m_baseptr = HostSys::MmapReserve(0, reserved_bytes);
|
||||
m_baseptr = (uptr)HostSys::MmapReserve(0, reserved_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
if ((upper_bounds != 0) && (((uptr)m_baseptr + reserved_bytes) > upper_bounds)) {
|
||||
bool fulfillsRequirements = true;
|
||||
if (strict && m_baseptr != base)
|
||||
fulfillsRequirements = false;
|
||||
if ((upper_bounds != 0) && ((m_baseptr + reserved_bytes) > upper_bounds))
|
||||
fulfillsRequirements = false;
|
||||
if (!fulfillsRequirements) {
|
||||
SafeSysMunmap(m_baseptr, reserved_bytes);
|
||||
// returns null, caller should throw an exception or handle appropriately.
|
||||
}
|
||||
|
||||
if (!m_baseptr) return;
|
||||
|
||||
m_pageuse = new std::atomic<bool>[m_pages_reserved]();
|
||||
|
||||
FastFormatUnicode mbkb;
|
||||
uint mbytes = reserved_bytes / _1mb;
|
||||
if (mbytes)
|
||||
mbkb.Write("[%umb]", mbytes);
|
||||
else
|
||||
mbkb.Write("[%ukb]", reserved_bytes / 1024);
|
||||
|
||||
DevCon.WriteLn(Color_Gray, L"%-32s @ %ls -> %ls %ls", WX_STR(m_name),
|
||||
pxsPtr(m_baseptr), pxsPtr((uptr)m_baseptr + reserved_bytes), mbkb.c_str());
|
||||
}
|
||||
|
||||
VirtualMemoryManager::~VirtualMemoryManager()
|
||||
{
|
||||
if (m_pageuse) delete[] m_pageuse;
|
||||
if (m_baseptr) HostSys::Munmap(m_baseptr, m_pages_reserved * __pagesize);
|
||||
}
|
||||
|
||||
static bool VMMMarkPagesAsInUse(std::atomic<bool> *begin, std::atomic<bool> *end) {
|
||||
for (auto current = begin; current < end; current++) {
|
||||
bool expected = false;
|
||||
if (!current->compare_exchange_strong(expected, true), std::memory_order_relaxed) {
|
||||
// This was already allocated! Undo the things we've set until this point
|
||||
while (--current >= begin) {
|
||||
if (!current->compare_exchange_strong(expected, false, std::memory_order_relaxed)) {
|
||||
// In the time we were doing this, someone set one of the things we just set to true back to false
|
||||
// This should never happen, but if it does we'll just stop and hope nothing bad happens
|
||||
pxAssert(0);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void *VirtualMemoryManager::Alloc(uptr offsetLocation, size_t size) const
|
||||
{
|
||||
size = pageAlign(size);
|
||||
if (!pxAssertDev(offsetLocation % __pagesize == 0, "(VirtualMemoryManager) alloc at unaligned offsetLocation"))
|
||||
return nullptr;
|
||||
if (!pxAssertDev(size + offsetLocation <= m_pages_reserved * __pagesize, "(VirtualMemoryManager) alloc outside reserved area"))
|
||||
return nullptr;
|
||||
if (m_baseptr == 0)
|
||||
return nullptr;
|
||||
auto puStart = &m_pageuse[offsetLocation / __pagesize];
|
||||
auto puEnd = &m_pageuse[(offsetLocation+size) / __pagesize];
|
||||
if (!pxAssertDev(VMMMarkPagesAsInUse(puStart, puEnd), "(VirtualMemoryManager) allocation requests overlapped"))
|
||||
return nullptr;
|
||||
return (void *)(m_baseptr + offsetLocation);
|
||||
}
|
||||
|
||||
void VirtualMemoryManager::Free(void *address, size_t size) const
|
||||
{
|
||||
uptr offsetLocation = (uptr)address - m_baseptr;
|
||||
if (!pxAssertDev(offsetLocation % __pagesize == 0, "(VirtualMemoryManager) free at unaligned address")) {
|
||||
uptr newLoc = pageAlign(offsetLocation);
|
||||
size -= (offsetLocation - newLoc);
|
||||
offsetLocation = newLoc;
|
||||
}
|
||||
if (!pxAssertDev(size % __pagesize == 0, "(VirtualMemoryManager) free with unaligned size"))
|
||||
size -= size % __pagesize;
|
||||
if (!pxAssertDev(size + offsetLocation <= m_pages_reserved * __pagesize, "(VirtualMemoryManager) free outside reserved area"))
|
||||
return;
|
||||
auto puStart = &m_pageuse[offsetLocation / __pagesize];
|
||||
auto puEnd = &m_pageuse[(offsetLocation+size) / __pagesize];
|
||||
for (; puStart < puEnd; puStart++) {
|
||||
bool expected = true;
|
||||
if (!puStart->compare_exchange_strong(expected, false, std::memory_order_relaxed)) {
|
||||
pxAssertDev(0, "(VirtaulMemoryManager) double-free");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryBumpAllocator (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
VirtualMemoryBumpAllocator::VirtualMemoryBumpAllocator(VirtualMemoryManagerPtr allocator, uptr offsetLocation, size_t size)
|
||||
: m_allocator(std::move(allocator)), m_baseptr((uptr)m_allocator->Alloc(offsetLocation, size)), m_endptr(m_baseptr + size)
|
||||
{
|
||||
if (m_baseptr.load() == 0)
|
||||
pxAssertDev(0, "(VirtualMemoryBumpAllocator) tried to construct from bad VirtualMemoryManager");
|
||||
}
|
||||
|
||||
void *VirtualMemoryBumpAllocator::Alloc(size_t size)
|
||||
{
|
||||
if (m_baseptr.load() == 0) // True if constructed from bad VirtualMemoryManager (assertion was on initialization)
|
||||
return nullptr;
|
||||
|
||||
size_t reservedSize = pageAlign(size);
|
||||
|
||||
uptr out = m_baseptr.fetch_add(reservedSize, std::memory_order_relaxed);
|
||||
|
||||
if (!pxAssertDev(out - reservedSize + size <= m_endptr, "(VirtualMemoryBumpAllocator) ran out of memory"))
|
||||
return nullptr;
|
||||
|
||||
return (void *)out;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// VirtualMemoryReserve (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
VirtualMemoryReserve::VirtualMemoryReserve(const wxString &name, size_t size)
|
||||
: m_name(name)
|
||||
{
|
||||
m_defsize = size;
|
||||
|
||||
m_allocator = nullptr;
|
||||
m_pages_commited = 0;
|
||||
m_pages_reserved = 0;
|
||||
m_baseptr = nullptr;
|
||||
m_prot_mode = PageAccess_None();
|
||||
m_allow_writes = true;
|
||||
}
|
||||
|
||||
VirtualMemoryReserve &VirtualMemoryReserve::SetPageAccessOnCommit(const PageProtectionMode &mode)
|
||||
{
|
||||
m_prot_mode = mode;
|
||||
return *this;
|
||||
}
|
||||
|
||||
size_t VirtualMemoryReserve::GetSize(size_t requestedSize)
|
||||
{
|
||||
if (!requestedSize)
|
||||
return pageAlign(m_defsize);
|
||||
return pageAlign(requestedSize);
|
||||
}
|
||||
|
||||
// Notes:
|
||||
// * This method should be called if the object is already in an released (unreserved) state.
|
||||
// Subsequent calls will be ignored, and the existing reserve will be returned.
|
||||
//
|
||||
// Parameters:
|
||||
// baseptr - the new base pointer that's about to be assigned
|
||||
// size - size of the region pointed to by baseptr
|
||||
//
|
||||
void *VirtualMemoryReserve::Assign(VirtualMemoryManagerPtr allocator, void * baseptr, size_t size)
|
||||
{
|
||||
if (!pxAssertDev(m_baseptr == NULL, "(VirtualMemoryReserve) Invalid object state; object has already been reserved."))
|
||||
return m_baseptr;
|
||||
|
||||
if (!size)
|
||||
return nullptr;
|
||||
|
||||
m_allocator = std::move(allocator);
|
||||
|
||||
m_baseptr = baseptr;
|
||||
|
||||
uptr reserved_bytes = pageAlign(size);
|
||||
m_pages_reserved = reserved_bytes / __pagesize;
|
||||
|
||||
if (!m_baseptr)
|
||||
return NULL;
|
||||
return nullptr;
|
||||
|
||||
FastFormatUnicode mbkb;
|
||||
uint mbytes = reserved_bytes / _1mb;
|
||||
|
@ -184,7 +299,10 @@ void VirtualMemoryReserve::Reset()
|
|||
|
||||
void VirtualMemoryReserve::Release()
|
||||
{
|
||||
SafeSysMunmap(m_baseptr, m_pages_reserved * __pagesize);
|
||||
if (!m_baseptr) return;
|
||||
Reset();
|
||||
m_allocator->Free(m_baseptr, m_pages_reserved * __pagesize);
|
||||
m_baseptr = nullptr;
|
||||
}
|
||||
|
||||
bool VirtualMemoryReserve::Commit()
|
||||
|
@ -222,7 +340,7 @@ void VirtualMemoryReserve::ForbidModification()
|
|||
// newsize - new size of the reserved buffer, in bytes.
|
||||
bool VirtualMemoryReserve::TryResize(uint newsize)
|
||||
{
|
||||
uint newPages = (newsize + __pagesize - 1) / __pagesize;
|
||||
uint newPages = pageAlign(newsize) / __pagesize;
|
||||
|
||||
if (newPages > m_pages_reserved) {
|
||||
uint toReservePages = newPages - m_pages_reserved;
|
||||
|
@ -230,11 +348,10 @@ bool VirtualMemoryReserve::TryResize(uint newsize)
|
|||
|
||||
DevCon.WriteLn(L"%-32s is being expanded by %u pages.", WX_STR(m_name), toReservePages);
|
||||
|
||||
m_baseptr = (void *)HostSys::MmapReserve((uptr)GetPtrEnd(), toReserveBytes);
|
||||
|
||||
if (!m_baseptr) {
|
||||
Console.Warning("%-32s could not be passively resized due to virtual memory conflict!");
|
||||
if (!m_allocator->AllocAtAddress(GetPtrEnd(), toReserveBytes)) {
|
||||
Console.Warning("%-32s could not be passively resized due to virtual memory conflict!", WX_STR(m_name));
|
||||
Console.Indent().Warning("(attempted to map memory @ %08p -> %08p)", m_baseptr, (uptr)m_baseptr + toReserveBytes);
|
||||
return false;
|
||||
}
|
||||
|
||||
DevCon.WriteLn(Color_Gray, L"%-32s @ %08p -> %08p [%umb]", WX_STR(m_name),
|
||||
|
@ -248,12 +365,13 @@ bool VirtualMemoryReserve::TryResize(uint newsize)
|
|||
|
||||
DevCon.WriteLn(L"%-32s is being shrunk by %u pages.", WX_STR(m_name), toRemovePages);
|
||||
|
||||
HostSys::MmapResetPtr(GetPtrEnd(), toRemoveBytes);
|
||||
m_allocator->Free(GetPtrEnd() - toRemoveBytes, toRemoveBytes);
|
||||
|
||||
DevCon.WriteLn(Color_Gray, L"%-32s @ %08p -> %08p [%umb]", WX_STR(m_name),
|
||||
m_baseptr, (uptr)m_baseptr + toRemoveBytes, toRemoveBytes / _1mb);
|
||||
m_baseptr, GetPtrEnd(), GetReserveSizeInBytes() / _1mb);
|
||||
}
|
||||
|
||||
m_pages_reserved = newPages;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,9 +33,9 @@ iopMemoryReserve::iopMemoryReserve()
|
|||
{
|
||||
}
|
||||
|
||||
void iopMemoryReserve::Reserve()
|
||||
void iopMemoryReserve::Reserve(VirtualMemoryManagerPtr allocator)
|
||||
{
|
||||
_parent::Reserve(HostMemoryMap::IOPmem);
|
||||
_parent::Reserve(std::move(allocator), HostMemoryMap::IOPmemOffset);
|
||||
//_parent::Reserve(EmuConfig.HostMap.IOP);
|
||||
}
|
||||
|
||||
|
@ -119,12 +119,6 @@ void iopMemoryReserve::Decommit()
|
|||
iopMem = NULL;
|
||||
}
|
||||
|
||||
void iopMemoryReserve::Release()
|
||||
{
|
||||
_parent::Release();
|
||||
iopMem = NULL;
|
||||
}
|
||||
|
||||
|
||||
u8 __fastcall iopMemRead8(u32 mem)
|
||||
{
|
||||
|
|
|
@ -709,9 +709,9 @@ eeMemoryReserve::eeMemoryReserve()
|
|||
{
|
||||
}
|
||||
|
||||
void eeMemoryReserve::Reserve()
|
||||
void eeMemoryReserve::Reserve(VirtualMemoryManagerPtr allocator)
|
||||
{
|
||||
_parent::Reserve(HostMemoryMap::EEmem);
|
||||
_parent::Reserve(std::move(allocator), HostMemoryMap::EEmemOffset);
|
||||
//_parent::Reserve(EmuConfig.HostMap.IOP);
|
||||
}
|
||||
|
||||
|
@ -856,11 +856,9 @@ void eeMemoryReserve::Decommit()
|
|||
eeMem = NULL;
|
||||
}
|
||||
|
||||
void eeMemoryReserve::Release()
|
||||
eeMemoryReserve::~eeMemoryReserve()
|
||||
{
|
||||
safe_delete(mmap_faultHandler);
|
||||
_parent::Release();
|
||||
eeMem = NULL;
|
||||
vtlb_Term();
|
||||
}
|
||||
|
||||
|
|
|
@ -57,9 +57,9 @@ void RecompiledCodeReserve::_termProfiler()
|
|||
{
|
||||
}
|
||||
|
||||
void* RecompiledCodeReserve::Reserve( size_t size, uptr base, uptr upper_bounds )
|
||||
void* RecompiledCodeReserve::Assign( VirtualMemoryManagerPtr allocator, void *baseptr, size_t size )
|
||||
{
|
||||
if (!_parent::Reserve(size, base, upper_bounds)) return NULL;
|
||||
if (!_parent::Assign(std::move(allocator), baseptr, size)) return NULL;
|
||||
|
||||
Commit();
|
||||
|
||||
|
@ -112,7 +112,6 @@ void RecompiledCodeReserve::ThrowIfNotOk() const
|
|||
));
|
||||
}
|
||||
|
||||
|
||||
void SysOutOfMemory_EmergencyResponse(uptr blocksize)
|
||||
{
|
||||
// An out of memory error occurred. All we can try to do in response is reset the various
|
||||
|
@ -352,11 +351,60 @@ static wxString GetMemoryErrorVM()
|
|||
);
|
||||
}
|
||||
|
||||
namespace HostMemoryMap {
|
||||
// For debuggers
|
||||
uptr EEmem, IOPmem, VUmem, EErec, IOPrec, VIF0rec, VIF1rec, mVU0rec, mVU1rec, bumpAllocator;
|
||||
}
|
||||
|
||||
/// Attempts to find a spot near static variables for the main memory
|
||||
static VirtualMemoryManagerPtr makeMainMemoryManager() {
|
||||
// Everything looks nicer when the start of all the sections is a nice round looking number.
|
||||
// Also reduces the variation in the address due to small changes in code.
|
||||
// Breaks ASLR but so does anything else that tries to make addresses constant for our debugging pleasure
|
||||
uptr codeBase = (uptr)(void*)makeMainMemoryManager / (1 << 28) * (1 << 28);
|
||||
|
||||
// The allocation is ~640mb in size, slighly under 3*2^28.
|
||||
// We'll hope that the code generated for the PCSX2 executable stays under 512mb (which is likely)
|
||||
// On x86-64, code can reach 8*2^28 from its address [-6*2^28, 4*2^28] is the region that allows for code in the 640mb allocation to reach 512mb of code that either starts at codeBase or 256mb before it.
|
||||
// We start high and count down because on macOS code starts at the beginning of useable address space, so starting as far ahead as possible reduces address variations due to code size. Not sure about other platforms. Obviously this only actually affects what shows up in a debugger and won't affect performance or correctness of anything.
|
||||
for (int offset = 4; offset >= -6; offset--) {
|
||||
uptr base = codeBase + (offset << 28);
|
||||
if ((sptr)base < 0 || (sptr)(base + HostMemoryMap::Size - 1) < 0) {
|
||||
// VTLB will throw a fit if we try to put EE main memory here
|
||||
continue;
|
||||
}
|
||||
auto mgr = std::make_shared<VirtualMemoryManager>("Main Memory Manager", base, HostMemoryMap::Size, /*upper_bounds=*/0, /*strict=*/true);
|
||||
if (mgr->IsOk()) {
|
||||
return mgr;
|
||||
}
|
||||
}
|
||||
|
||||
// If the above failed and it's x86-64, recompiled code is going to break!
|
||||
// If it's i386 anything can reach anything so it doesn't matter
|
||||
if (sizeof(void*) == 8) {
|
||||
pxAssertRel(0, "Failed to find a good place for the main memory allocation, recompilers may fail");
|
||||
}
|
||||
return std::make_shared<VirtualMemoryManager>("Main Memory Manager", 0, HostMemoryMap::Size);
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// SysReserveVM (implementations)
|
||||
// --------------------------------------------------------------------------------------
|
||||
SysMainMemory::SysMainMemory()
|
||||
: m_mainMemory(makeMainMemoryManager())
|
||||
, m_bumpAllocator(m_mainMemory, HostMemoryMap::bumpAllocatorOffset, HostMemoryMap::Size - HostMemoryMap::bumpAllocatorOffset)
|
||||
{
|
||||
uptr base = (uptr)MainMemory()->GetBase();
|
||||
HostMemoryMap::EEmem = base + HostMemoryMap::EEmemOffset;
|
||||
HostMemoryMap::IOPmem = base + HostMemoryMap::IOPmemOffset;
|
||||
HostMemoryMap::VUmem = base + HostMemoryMap::VUmemOffset;
|
||||
HostMemoryMap::EErec = base + HostMemoryMap::EErecOffset;
|
||||
HostMemoryMap::IOPrec = base + HostMemoryMap::IOPrecOffset;
|
||||
HostMemoryMap::VIF0rec = base + HostMemoryMap::VIF0recOffset;
|
||||
HostMemoryMap::VIF1rec = base + HostMemoryMap::VIF1recOffset;
|
||||
HostMemoryMap::mVU0rec = base + HostMemoryMap::mVU0recOffset;
|
||||
HostMemoryMap::mVU1rec = base + HostMemoryMap::mVU1recOffset;
|
||||
HostMemoryMap::bumpAllocator = base + HostMemoryMap::bumpAllocatorOffset;
|
||||
}
|
||||
|
||||
SysMainMemory::~SysMainMemory()
|
||||
|
@ -374,9 +422,9 @@ void SysMainMemory::ReserveAll()
|
|||
DevCon.WriteLn( Color_StrongBlue, "Mapping host memory for virtual systems..." );
|
||||
ConsoleIndentScope indent(1);
|
||||
|
||||
m_ee.Reserve();
|
||||
m_iop.Reserve();
|
||||
m_vu.Reserve();
|
||||
m_ee.Reserve(MainMemory());
|
||||
m_iop.Reserve(MainMemory());
|
||||
m_vu.Reserve(MainMemory());
|
||||
}
|
||||
|
||||
void SysMainMemory::CommitAll()
|
||||
|
|
|
@ -41,48 +41,41 @@ class RecompiledCodeReserve;
|
|||
|
||||
namespace HostMemoryMap
|
||||
{
|
||||
#ifdef ASAN_WORKAROUND
|
||||
// address sanitizer uses a shadow memory to monitor the state of the memory. Shadow is computed
|
||||
// as S = (M >> 3) + 0x20000000. So PCSX2 can't use 0x20000000 to 0x3FFFFFFF... Just add another
|
||||
// 0x20000000 offset to avoid conflict.
|
||||
static const uptr EEmem = 0x40000000;
|
||||
static const uptr IOPmem = 0x44000000;
|
||||
static const uptr VUmem = 0x48000000;
|
||||
static const uptr EErec = 0x50000000;
|
||||
static const uptr IOPrec = 0x54000000;
|
||||
static const uptr VIF0rec = 0x56000000;
|
||||
static const uptr VIF1rec = 0x58000000;
|
||||
static const uptr mVU0rec = 0x5C000000;
|
||||
static const uptr mVU1rec = 0x60000000;
|
||||
#else
|
||||
static const u32 Size = 0x28000000;
|
||||
|
||||
// The actual addresses may not be equivalent to Base + Offset in the event that allocation at Base failed
|
||||
// Each of these offsets has a debugger-accessible equivalent variable without the Offset suffix that will hold the actual address (not here because we don't want code using it)
|
||||
|
||||
// PS2 main memory, SPR, and ROMs
|
||||
static const uptr EEmem = 0x20000000;
|
||||
static const u32 EEmemOffset = 0x00000000;
|
||||
|
||||
// IOP main memory and ROMs
|
||||
static const uptr IOPmem = 0x24000000;
|
||||
static const u32 IOPmemOffset = 0x04000000;
|
||||
|
||||
// VU0 and VU1 memory.
|
||||
static const uptr VUmem = 0x28000000;
|
||||
static const u32 VUmemOffset = 0x08000000;
|
||||
|
||||
// EE recompiler code cache area (64mb)
|
||||
static const uptr EErec = 0x30000000;
|
||||
static const u32 EErecOffset = 0x10000000;
|
||||
|
||||
// IOP recompiler code cache area (16 or 32mb)
|
||||
static const uptr IOPrec = 0x34000000;
|
||||
static const u32 IOPrecOffset = 0x14000000;
|
||||
|
||||
// newVif0 recompiler code cache area (16mb)
|
||||
static const uptr VIF0rec = 0x36000000;
|
||||
static const u32 VIF0recOffset = 0x16000000;
|
||||
|
||||
// newVif1 recompiler code cache area (32mb)
|
||||
static const uptr VIF1rec = 0x38000000;
|
||||
static const u32 VIF1recOffset = 0x18000000;
|
||||
|
||||
// microVU1 recompiler code cache area (32 or 64mb)
|
||||
static const uptr mVU0rec = 0x3C000000;
|
||||
static const u32 mVU0recOffset = 0x1C000000;
|
||||
|
||||
// microVU0 recompiler code cache area (64mb)
|
||||
static const uptr mVU1rec = 0x40000000;
|
||||
#endif
|
||||
static const u32 mVU1recOffset = 0x20000000;
|
||||
|
||||
// Bump allocator for any other small allocations
|
||||
// size: Difference between it and HostMemoryMap::Size, so nothing should allocate higher than it!
|
||||
static const u32 bumpAllocatorOffset = 0x24000000;
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -92,14 +85,19 @@ namespace HostMemoryMap
|
|||
class SysMainMemory
|
||||
{
|
||||
protected:
|
||||
eeMemoryReserve m_ee;
|
||||
iopMemoryReserve m_iop;
|
||||
vuMemoryReserve m_vu;
|
||||
const VirtualMemoryManagerPtr m_mainMemory;
|
||||
VirtualMemoryBumpAllocator m_bumpAllocator;
|
||||
eeMemoryReserve m_ee;
|
||||
iopMemoryReserve m_iop;
|
||||
vuMemoryReserve m_vu;
|
||||
|
||||
public:
|
||||
SysMainMemory();
|
||||
virtual ~SysMainMemory();
|
||||
|
||||
const VirtualMemoryManagerPtr& MainMemory() { return m_mainMemory; }
|
||||
VirtualMemoryBumpAllocator& BumpAllocator() { return m_bumpAllocator; }
|
||||
|
||||
virtual void ReserveAll();
|
||||
virtual void CommitAll();
|
||||
virtual void ResetAll();
|
||||
|
|
|
@ -34,9 +34,9 @@ public:
|
|||
RecompiledCodeReserve( const wxString& name=wxEmptyString, uint defCommit = 0 );
|
||||
virtual ~RecompiledCodeReserve();
|
||||
|
||||
virtual void* Reserve( size_t size, uptr base=0, uptr upper_bounds=0 );
|
||||
virtual void Reset();
|
||||
virtual bool Commit();
|
||||
virtual void* Assign( VirtualMemoryManagerPtr allocator, void *baseptr, size_t size ) override;
|
||||
virtual void Reset() override;
|
||||
virtual bool Commit() override;
|
||||
|
||||
virtual RecompiledCodeReserve& SetProfilerName( const wxString& shortname );
|
||||
virtual RecompiledCodeReserve& SetProfilerName( const char* shortname )
|
||||
|
|
|
@ -27,9 +27,9 @@ vuMemoryReserve::vuMemoryReserve()
|
|||
{
|
||||
}
|
||||
|
||||
void vuMemoryReserve::Reserve()
|
||||
void vuMemoryReserve::Reserve(VirtualMemoryManagerPtr allocator)
|
||||
{
|
||||
_parent::Reserve(HostMemoryMap::VUmem);
|
||||
_parent::Reserve(std::move(allocator), HostMemoryMap::VUmemOffset);
|
||||
//_parent::Reserve(EmuConfig.HostMemMap.VUmem);
|
||||
|
||||
u8* curpos = m_reserve.GetPtr();
|
||||
|
@ -39,10 +39,8 @@ void vuMemoryReserve::Reserve()
|
|||
VU1.Mem = curpos; curpos += VU1_MEMSIZE;
|
||||
}
|
||||
|
||||
void vuMemoryReserve::Release()
|
||||
vuMemoryReserve::~vuMemoryReserve()
|
||||
{
|
||||
_parent::Release();
|
||||
|
||||
VU0.Micro = VU0.Mem = NULL;
|
||||
VU1.Micro = VU1.Mem = NULL;
|
||||
}
|
||||
|
|
|
@ -791,19 +791,28 @@ void vtlb_Term()
|
|||
//nothing to do for now
|
||||
}
|
||||
|
||||
constexpr size_t VMAP_SIZE = sizeof(sptr) * VTLB_VMAP_ITEMS;
|
||||
|
||||
// Reserves the vtlb core allocation used by various emulation components!
|
||||
// [TODO] basemem - request allocating memory at the specified virtual location, which can allow
|
||||
// for easier debugging and/or 3rd party cheat programs. If 0, the operating system
|
||||
// default is used.
|
||||
void vtlb_Core_Alloc()
|
||||
{
|
||||
// Can't return regions to the bump allocator
|
||||
static sptr* vmap = nullptr;
|
||||
if (!vmap)
|
||||
vmap = (sptr*)GetVmMemory().BumpAllocator().Alloc(VMAP_SIZE);
|
||||
if (!vtlbdata.vmap)
|
||||
{
|
||||
vtlbdata.vmap = (sptr*)_aligned_malloc( VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap), 16 );
|
||||
if (!vtlbdata.vmap)
|
||||
bool okay = HostSys::MmapCommitPtr(vmap, VMAP_SIZE, PageProtectionMode().Read().Write());
|
||||
if (okay) {
|
||||
vtlbdata.vmap = vmap;
|
||||
} else {
|
||||
throw Exception::OutOfMemory( L"VTLB Virtual Address Translation LUT" )
|
||||
.SetDiagMsg(pxsFmt("(%u megs)", VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap) / _1mb)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -825,7 +834,10 @@ void vtlb_Alloc_Ppmap()
|
|||
|
||||
void vtlb_Core_Free()
|
||||
{
|
||||
safe_aligned_free( vtlbdata.vmap );
|
||||
if (vtlbdata.vmap) {
|
||||
HostSys::MmapResetPtr(vtlbdata.vmap, VMAP_SIZE);
|
||||
vtlbdata.vmap = nullptr;
|
||||
}
|
||||
safe_aligned_free( vtlbdata.ppmap );
|
||||
}
|
||||
|
||||
|
@ -844,9 +856,9 @@ VtlbMemoryReserve::VtlbMemoryReserve( const wxString& name, size_t size )
|
|||
m_reserve.SetPageAccessOnCommit( PageAccess_ReadWrite() );
|
||||
}
|
||||
|
||||
void VtlbMemoryReserve::Reserve( sptr hostptr )
|
||||
void VtlbMemoryReserve::Reserve( VirtualMemoryManagerPtr allocator, sptr offset )
|
||||
{
|
||||
if (!m_reserve.ReserveAt( hostptr ))
|
||||
if (!m_reserve.Reserve( std::move(allocator), offset ))
|
||||
{
|
||||
throw Exception::OutOfMemory( m_reserve.GetName() )
|
||||
.SetDiagMsg(L"Vtlb memory could not be reserved.")
|
||||
|
@ -876,11 +888,6 @@ void VtlbMemoryReserve::Decommit()
|
|||
m_reserve.Reset();
|
||||
}
|
||||
|
||||
void VtlbMemoryReserve::Release()
|
||||
{
|
||||
m_reserve.Release();
|
||||
}
|
||||
|
||||
bool VtlbMemoryReserve::IsCommitted() const
|
||||
{
|
||||
return !!m_reserve.GetCommittedPageCount();
|
||||
|
|
44
pcsx2/vtlb.h
44
pcsx2/vtlb.h
|
@ -100,13 +100,8 @@ protected:
|
|||
|
||||
public:
|
||||
VtlbMemoryReserve( const wxString& name, size_t size );
|
||||
virtual ~VtlbMemoryReserve()
|
||||
{
|
||||
m_reserve.Release();
|
||||
}
|
||||
|
||||
void Reserve( sptr hostptr );
|
||||
virtual void Release();
|
||||
void Reserve( VirtualMemoryManagerPtr allocator, sptr offset );
|
||||
|
||||
virtual void Commit();
|
||||
virtual void Reset();
|
||||
|
@ -124,16 +119,12 @@ class eeMemoryReserve : public VtlbMemoryReserve
|
|||
|
||||
public:
|
||||
eeMemoryReserve();
|
||||
virtual ~eeMemoryReserve()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
~eeMemoryReserve();
|
||||
|
||||
void Reserve();
|
||||
void Commit();
|
||||
void Decommit();
|
||||
void Reset();
|
||||
void Release();
|
||||
void Reserve(VirtualMemoryManagerPtr allocator);
|
||||
void Commit() override;
|
||||
void Decommit() override;
|
||||
void Reset() override;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -145,16 +136,11 @@ class iopMemoryReserve : public VtlbMemoryReserve
|
|||
|
||||
public:
|
||||
iopMemoryReserve();
|
||||
virtual ~iopMemoryReserve()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
|
||||
void Reserve();
|
||||
void Commit();
|
||||
void Decommit();
|
||||
void Release();
|
||||
void Reset();
|
||||
void Reserve(VirtualMemoryManagerPtr allocator);
|
||||
void Commit() override;
|
||||
void Decommit() override;
|
||||
void Reset() override;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -166,15 +152,11 @@ class vuMemoryReserve : public VtlbMemoryReserve
|
|||
|
||||
public:
|
||||
vuMemoryReserve();
|
||||
virtual ~vuMemoryReserve()
|
||||
{
|
||||
Release();
|
||||
}
|
||||
~vuMemoryReserve();
|
||||
|
||||
void Reserve();
|
||||
void Release();
|
||||
void Reserve(VirtualMemoryManagerPtr allocator);
|
||||
|
||||
void Reset();
|
||||
void Reset() override;
|
||||
};
|
||||
|
||||
namespace vtlb_private
|
||||
|
|
|
@ -632,7 +632,7 @@ static void recReserveCache()
|
|||
|
||||
while (!recMem->IsOk())
|
||||
{
|
||||
if (recMem->Reserve( m_ConfiguredCacheReserve * _1mb, HostMemoryMap::IOPrec ) != NULL) break;
|
||||
if (recMem->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::IOPrecOffset, m_ConfiguredCacheReserve * _1mb) != NULL) break;
|
||||
|
||||
// If it failed, then try again (if possible):
|
||||
if (m_ConfiguredCacheReserve < 4) break;
|
||||
|
|
|
@ -480,7 +480,7 @@ static void recReserveCache()
|
|||
|
||||
while (!recMem->IsOk())
|
||||
{
|
||||
if (recMem->Reserve( m_ConfiguredCacheReserve * _1mb, HostMemoryMap::EErec ) != NULL) break;
|
||||
if (recMem->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::EErecOffset, m_ConfiguredCacheReserve * _1mb) != NULL) break;
|
||||
|
||||
// If it failed, then try again (if possible):
|
||||
if (m_ConfiguredCacheReserve < 16) break;
|
||||
|
|
|
@ -38,8 +38,8 @@ void mVUreserveCache(microVU& mVU) {
|
|||
mVU.cache_reserve->SetProfilerName(pxsFmt("mVU%urec", mVU.index));
|
||||
|
||||
mVU.cache = mVU.index ?
|
||||
(u8*)mVU.cache_reserve->Reserve(mVU.cacheSize * _1mb, HostMemoryMap::mVU1rec):
|
||||
(u8*)mVU.cache_reserve->Reserve(mVU.cacheSize * _1mb, HostMemoryMap::mVU0rec);
|
||||
(u8*)mVU.cache_reserve->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::mVU1recOffset, mVU.cacheSize * _1mb):
|
||||
(u8*)mVU.cache_reserve->Reserve(GetVmMemory().MainMemory(), HostMemoryMap::mVU0recOffset, mVU.cacheSize * _1mb);
|
||||
|
||||
mVU.cache_reserve->ThrowIfNotOk();
|
||||
}
|
||||
|
|
|
@ -76,6 +76,6 @@ extern void releaseNewVif(int idx);
|
|||
|
||||
extern __aligned16 nVifStruct nVif[2];
|
||||
extern __aligned16 nVifCall nVifUpk[(2*2*16)*4]; // ([USN][Masking][Unpack Type]) [curCycle]
|
||||
extern __aligned16 u32 nVifMask[3][4][4]; // [MaskNumber][CycleNumber][Vector]
|
||||
extern __aligned16 u32 nVifMask[3][4][4]; // [MaskNumber][CycleNumber][Vector]
|
||||
|
||||
static const bool newVifDynaRec = 1; // Use code in newVif_Dynarec.inl
|
||||
|
|
|
@ -34,7 +34,8 @@ void dVifReserve(int idx) {
|
|||
if(!nVif[idx].recReserve)
|
||||
nVif[idx].recReserve = new RecompiledCodeReserve(pxsFmt(L"VIF%u Unpack Recompiler Cache", idx), _8mb);
|
||||
|
||||
nVif[idx].recReserve->Reserve( 8 * _1mb, idx ? HostMemoryMap::VIF1rec : HostMemoryMap::VIF0rec );
|
||||
auto offset = idx ? HostMemoryMap::VIF1recOffset : HostMemoryMap::VIF0recOffset;
|
||||
nVif[idx].recReserve->Reserve(GetVmMemory().MainMemory(), offset, 8 * _1mb);
|
||||
}
|
||||
|
||||
void dVifReset(int idx) {
|
||||
|
|
|
@ -424,7 +424,7 @@ void VifUnpackSSE_Init()
|
|||
|
||||
nVifUpkExec = new RecompiledCodeReserve(L"VIF SSE-optimized Unpacking Functions", _64kb);
|
||||
nVifUpkExec->SetProfilerName("iVIF-SSE");
|
||||
nVifUpkExec->Reserve( _64kb );
|
||||
nVifUpkExec->Reserve(GetVmMemory().BumpAllocator(), _64kb);
|
||||
|
||||
nVifUpkExec->ThrowIfNotOk();
|
||||
|
||||
|
|
Loading…
Reference in New Issue