Common/MemArenaWin: Rewrite LazyMemoryRegion to manually map memory blocks into the large memory region as needed.
Internal details: The large region is split into individual same-sized blocks of memory. On creation, we allocate a single block of memory that will always remain zero, and map that into the entire memory region. Then, the first time any of these blocks is written to, we swap the mapped zero block out with a newly allocated block of memory. On clear, we swap back to the zero block and deallocate the data blocks. That way we only actually allocate one zero block as well as a handful of real data blocks where the JitCache actually writes to.
This commit is contained in:
parent
eb235d6ee3
commit
3364d571cc
|
@ -160,9 +160,34 @@ public:
|
|||
///
|
||||
void Release();
|
||||
|
||||
///
|
||||
/// Ensure that the memory page at the given byte offset from the start of the memory region is
|
||||
/// writable. We use this on Windows as a workaround to only actually commit pages as they are
|
||||
/// written to. On other OSes this does nothing.
|
||||
///
|
||||
/// @param offset The offset into the memory region that should be made writable if it isn't.
|
||||
///
|
||||
void EnsureMemoryPageWritable(size_t offset)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
const size_t block_index = offset / BLOCK_SIZE;
|
||||
if (m_writable_block_handles[block_index] == nullptr)
|
||||
MakeMemoryBlockWritable(block_index);
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
void* m_memory = nullptr;
|
||||
size_t m_size = 0;
|
||||
|
||||
#ifdef _WIN32
|
||||
void* m_zero_block = nullptr;
|
||||
constexpr static size_t BLOCK_SIZE = 8 * 1024 * 1024; // size of allocated memory blocks
|
||||
WindowsMemoryFunctions m_memory_functions;
|
||||
std::vector<void*> m_writable_block_handles;
|
||||
|
||||
void MakeMemoryBlockWritable(size_t offset);
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <windows.h>
|
||||
|
||||
#include "Common/Align.h"
|
||||
#include "Common/Assert.h"
|
||||
#include "Common/CommonFuncs.h"
|
||||
#include "Common/CommonTypes.h"
|
||||
|
@ -441,7 +442,10 @@ void MemArena::UnmapFromMemoryRegion(void* view, size_t size)
|
|||
UnmapViewOfFile(view);
|
||||
}
|
||||
|
||||
LazyMemoryRegion::LazyMemoryRegion() = default;
|
||||
LazyMemoryRegion::LazyMemoryRegion()
|
||||
{
|
||||
InitWindowsMemoryFunctions(&m_memory_functions);
|
||||
}
|
||||
|
||||
LazyMemoryRegion::~LazyMemoryRegion()
|
||||
{
|
||||
|
@ -455,15 +459,67 @@ void* LazyMemoryRegion::Create(size_t size)
|
|||
if (size == 0)
|
||||
return nullptr;
|
||||
|
||||
void* memory = VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (!m_memory_functions.m_api_ms_win_core_memory_l1_1_6_handle.IsOpen())
|
||||
return nullptr;
|
||||
|
||||
// reserve block of memory
|
||||
const size_t memory_size = Common::AlignUp(size, BLOCK_SIZE);
|
||||
const size_t block_count = memory_size / BLOCK_SIZE;
|
||||
u8* memory =
|
||||
static_cast<u8*>(static_cast<PVirtualAlloc2>(m_memory_functions.m_address_VirtualAlloc2)(
|
||||
nullptr, nullptr, memory_size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS,
|
||||
nullptr, 0));
|
||||
if (!memory)
|
||||
{
|
||||
NOTICE_LOG_FMT(MEMMAP, "Memory allocation of {} bytes failed.", size);
|
||||
NOTICE_LOG_FMT(MEMMAP, "Memory reservation of {} bytes failed.", size);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// split into individual block-sized regions
|
||||
for (size_t i = 0; i < block_count - 1; ++i)
|
||||
{
|
||||
if (!VirtualFree(memory + i * BLOCK_SIZE, BLOCK_SIZE, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
|
||||
{
|
||||
NOTICE_LOG_FMT(MEMMAP, "Region splitting failed: {}", GetLastErrorString());
|
||||
|
||||
// release every split block as well as the remaining unsplit one
|
||||
for (size_t j = 0; j < i + 1; ++j)
|
||||
VirtualFree(memory + j * BLOCK_SIZE, 0, MEM_RELEASE);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
m_memory = memory;
|
||||
m_size = size;
|
||||
m_size = memory_size;
|
||||
|
||||
// allocate a single block of real memory in the page file
|
||||
HANDLE zero_block = CreateFileMapping(INVALID_HANDLE_VALUE, nullptr, PAGE_READONLY,
|
||||
GetHighDWORD(BLOCK_SIZE), GetLowDWORD(BLOCK_SIZE), nullptr);
|
||||
if (zero_block == nullptr)
|
||||
{
|
||||
NOTICE_LOG_FMT(MEMMAP, "CreateFileMapping() failed for zero block: {}", GetLastErrorString());
|
||||
Release();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
m_zero_block = zero_block;
|
||||
|
||||
// map the zero page into every block
|
||||
for (size_t i = 0; i < block_count; ++i)
|
||||
{
|
||||
void* result = static_cast<PMapViewOfFile3>(m_memory_functions.m_address_MapViewOfFile3)(
|
||||
zero_block, nullptr, memory + i * BLOCK_SIZE, 0, BLOCK_SIZE, MEM_REPLACE_PLACEHOLDER,
|
||||
PAGE_READONLY, nullptr, 0);
|
||||
if (!result)
|
||||
{
|
||||
NOTICE_LOG_FMT(MEMMAP, "Mapping the zero block failed: {}", GetLastErrorString());
|
||||
Release();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
m_writable_block_handles.resize(block_count, nullptr);
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
@ -471,19 +527,105 @@ void* LazyMemoryRegion::Create(size_t size)
|
|||
void LazyMemoryRegion::Clear()
|
||||
{
|
||||
ASSERT(m_memory);
|
||||
u8* const memory = static_cast<u8*>(m_memory);
|
||||
|
||||
VirtualFree(m_memory, m_size, MEM_DECOMMIT);
|
||||
VirtualAlloc(m_memory, m_size, MEM_COMMIT, PAGE_READWRITE);
|
||||
// reset every writable block back to the zero block
|
||||
for (size_t i = 0; i < m_writable_block_handles.size(); ++i)
|
||||
{
|
||||
if (m_writable_block_handles[i] == nullptr)
|
||||
continue;
|
||||
|
||||
// unmap the writable block
|
||||
if (!static_cast<PUnmapViewOfFileEx>(m_memory_functions.m_address_UnmapViewOfFileEx)(
|
||||
memory + i * BLOCK_SIZE, MEM_PRESERVE_PLACEHOLDER))
|
||||
{
|
||||
PanicAlertFmt("Failed to unmap the writable block: {}", GetLastErrorString());
|
||||
}
|
||||
|
||||
// free the writable block
|
||||
if (!CloseHandle(m_writable_block_handles[i]))
|
||||
{
|
||||
PanicAlertFmt("Failed to free the writable block: {}", GetLastErrorString());
|
||||
}
|
||||
m_writable_block_handles[i] = nullptr;
|
||||
|
||||
// map the zero block
|
||||
void* map_result = static_cast<PMapViewOfFile3>(m_memory_functions.m_address_MapViewOfFile3)(
|
||||
m_zero_block, nullptr, memory + i * BLOCK_SIZE, 0, BLOCK_SIZE, MEM_REPLACE_PLACEHOLDER,
|
||||
PAGE_READONLY, nullptr, 0);
|
||||
if (!map_result)
|
||||
{
|
||||
PanicAlertFmt("Failed to re-map the zero block: {}", GetLastErrorString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LazyMemoryRegion::Release()
|
||||
{
|
||||
if (m_memory)
|
||||
{
|
||||
VirtualFree(m_memory, 0, MEM_RELEASE);
|
||||
// unmap all pages and release the not-zero block handles
|
||||
u8* const memory = static_cast<u8*>(m_memory);
|
||||
for (size_t i = 0; i < m_writable_block_handles.size(); ++i)
|
||||
{
|
||||
static_cast<PUnmapViewOfFileEx>(m_memory_functions.m_address_UnmapViewOfFileEx)(
|
||||
memory + i * BLOCK_SIZE, MEM_PRESERVE_PLACEHOLDER);
|
||||
if (m_writable_block_handles[i])
|
||||
{
|
||||
CloseHandle(m_writable_block_handles[i]);
|
||||
m_writable_block_handles[i] = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (m_zero_block)
|
||||
{
|
||||
CloseHandle(m_zero_block);
|
||||
m_zero_block = nullptr;
|
||||
}
|
||||
if (m_memory)
|
||||
{
|
||||
u8* const memory = static_cast<u8*>(m_memory);
|
||||
const size_t block_count = m_size / BLOCK_SIZE;
|
||||
for (size_t i = 0; i < block_count; ++i)
|
||||
VirtualFree(memory + i * BLOCK_SIZE, 0, MEM_RELEASE);
|
||||
m_memory = nullptr;
|
||||
m_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void LazyMemoryRegion::MakeMemoryBlockWritable(size_t block_index)
|
||||
{
|
||||
u8* const memory = static_cast<u8*>(m_memory);
|
||||
|
||||
// unmap the zero block
|
||||
if (!static_cast<PUnmapViewOfFileEx>(m_memory_functions.m_address_UnmapViewOfFileEx)(
|
||||
memory + block_index * BLOCK_SIZE, MEM_PRESERVE_PLACEHOLDER))
|
||||
{
|
||||
PanicAlertFmt("Failed to unmap the zero block: {}", GetLastErrorString());
|
||||
return;
|
||||
}
|
||||
|
||||
// allocate a fresh block to map
|
||||
HANDLE block = CreateFileMapping(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
|
||||
GetHighDWORD(BLOCK_SIZE), GetLowDWORD(BLOCK_SIZE), nullptr);
|
||||
if (block == nullptr)
|
||||
{
|
||||
PanicAlertFmt("CreateFileMapping() failed for writable block: {}", GetLastErrorString());
|
||||
return;
|
||||
}
|
||||
|
||||
// map the new block
|
||||
void* map_result = static_cast<PMapViewOfFile3>(m_memory_functions.m_address_MapViewOfFile3)(
|
||||
block, nullptr, memory + block_index * BLOCK_SIZE, 0, BLOCK_SIZE, MEM_REPLACE_PLACEHOLDER,
|
||||
PAGE_READWRITE, nullptr, 0);
|
||||
if (!map_result)
|
||||
{
|
||||
PanicAlertFmt("Failed to map the writable block: {}", GetLastErrorString());
|
||||
CloseHandle(block);
|
||||
return;
|
||||
}
|
||||
|
||||
m_writable_block_handles[block_index] = block;
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
|
|
@ -121,9 +121,14 @@ void JitBaseBlockCache::FinalizeBlock(JitBlock& block, bool block_link,
|
|||
{
|
||||
size_t index = FastLookupIndexForAddress(block.effectiveAddress, block.feature_flags);
|
||||
if (m_entry_points_ptr)
|
||||
{
|
||||
m_entry_points_arena.EnsureMemoryPageWritable(index * sizeof(u8*));
|
||||
m_entry_points_ptr[index] = block.normalEntry;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_fast_block_map_fallback[index] = █
|
||||
}
|
||||
block.fast_block_map_index = index;
|
||||
|
||||
block.physical_addresses = physical_addresses;
|
||||
|
@ -485,9 +490,14 @@ JitBlock* JitBaseBlockCache::MoveBlockIntoFastCache(u32 addr, CPUEmuFeatureFlags
|
|||
// And create a new one
|
||||
size_t index = FastLookupIndexForAddress(addr, feature_flags);
|
||||
if (m_entry_points_ptr)
|
||||
{
|
||||
m_entry_points_arena.EnsureMemoryPageWritable(index * sizeof(u8*));
|
||||
m_entry_points_ptr[index] = block->normalEntry;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_fast_block_map_fallback[index] = block;
|
||||
}
|
||||
block->fast_block_map_index = index;
|
||||
|
||||
return block;
|
||||
|
|
Loading…
Reference in New Issue