Move fault handling to inside memory manager class, make key for pagemap the page base

This commit is contained in:
Jared M. White 2024-07-06 19:48:48 -05:00
parent 619cd0aa38
commit af86eaad1a
4 changed files with 49 additions and 69 deletions

View File

@ -47,16 +47,30 @@ MemoryManager::MemoryManager(Core::System& system) : m_system(system)
MemoryManager::~MemoryManager() = default;
std::optional<size_t> MemoryManager::GetDirtyPageIndexFromAddress(u64 address)
u64 MemoryManager::GetDirtyPageIndexFromAddress(u64 address)
{
size_t page_size = Common::PageSize();
size_t page_mask = page_size - 1;
return (address & ~page_mask) >> 12;
const size_t page_size = Common::PageSize();
const size_t page_mask = page_size - 1;
return address & ~page_mask;
}
bool MemoryManager::VirtualProtectMemory(u8* data, size_t size, u64 flag)
bool MemoryManager::HandleFault(uintptr_t fault_address)
{
return m_arena.VirtualProtectMemoryRegion(data, size, flag);
u8* page_base_bytes = reinterpret_cast<u8*>(fault_address);
if (!IsAddressInEmulatedMemory(page_base_bytes) || IsPageDirty(fault_address) || !(Core::IsCPUThread() || Core::IsGPUThread()))
{
return false;
}
SetPageDirtyBit(fault_address, 0x1, true);
bool change_protection =
m_arena.VirtualProtectMemoryRegion(page_base_bytes, 0x1, PAGE_READWRITE);
if (!change_protection)
{
return false;
}
return true;
}
void MemoryManager::WriteProtectPhysicalMemoryRegions()
@ -65,20 +79,21 @@ void MemoryManager::WriteProtectPhysicalMemoryRegions()
{
if (!region.active)
continue;
size_t page_size = Common::PageSize();
bool change_protection =
m_arena.VirtualProtectMemoryRegion((*region.out_pointer), region.size, PAGE_READONLY);
if (!change_protection)
{
PanicAlertFmt("Memory::WriteProtectPhysicalMemoryRegions(): Failed to write protect for "
"this block of memory at 0x{:08X}.",
reinterpret_cast<u64>(*region.out_pointer));
}
const size_t page_size = Common::PageSize();
for (size_t i = 0; i < region.size; i += page_size)
{
bool change_protection = VirtualProtectMemory((*region.out_pointer) + i, page_size, PAGE_READONLY);
if (!change_protection)
{
PanicAlertFmt(
"Memory::WriteProtectPhysicalMemoryRegions(): Failed to write protect for this block of memory at 0x{:08X}.",
reinterpret_cast<u64>(*region.out_pointer));
}
std::optional<size_t> index =
GetDirtyPageIndexFromAddress(reinterpret_cast<u64>(*region.out_pointer + i));
if (index.has_value())
m_dirty_pages[index.value()] = false;
const uintptr_t index = reinterpret_cast<uintptr_t>((*region.out_pointer) + i);
m_dirty_pages[index] = false;
}
}
}
@ -207,19 +222,19 @@ bool MemoryManager::IsAddressInFastmemArea(const u8* address) const
bool MemoryManager::IsAddressInEmulatedMemory(const u8* address) const
{
if (m_ram && address > m_ram && address <= m_ram + GetRamSize())
if (!!m_ram && address >= m_ram && address < m_ram + GetRamSize())
{
return true;
}
else if (m_exram && address > m_exram && address <= m_exram + GetExRamSize())
else if (!!m_exram && address >= m_exram && address < m_exram + GetExRamSize())
{
return true;
}
else if (m_l1_cache && address > m_l1_cache && address <= m_l1_cache + GetL1CacheSize())
else if (!!m_l1_cache && address >= m_l1_cache && address < m_l1_cache + GetL1CacheSize())
{
return true;
}
else if (m_fake_vmem && address > m_fake_vmem && address <= m_fake_vmem + GetFakeVMemSize())
else if (!!m_fake_vmem && address >= m_fake_vmem && address < m_fake_vmem + GetFakeVMemSize())
{
return true;
}
@ -391,54 +406,45 @@ void MemoryManager::DoState(PointerWrap& p, bool delta)
}
if (delta)
{
u32 page_size = static_cast<u32>(Common::PageSize());
const u32 page_size = static_cast<u32>(Common::PageSize());
p.Do(m_dirty_pages);
for (size_t i = 0; i < current_ram_size; i++)
for (size_t i = 0; i < current_ram_size; i += page_size)
{
if (IsPageDirty(reinterpret_cast<uintptr_t>(&m_ram[i])))
{
p.DoArray(m_ram + i, page_size);
i += page_size;
}
}
for (size_t i = 0; i < current_l1_cache_size; i++)
for (size_t i = 0; i < current_l1_cache_size; i += page_size)
{
if (IsPageDirty(reinterpret_cast<uintptr_t>(&m_l1_cache[i])))
{
p.DoArray(m_l1_cache + i, page_size);
i += page_size;
}
}
p.DoMarker("Memory RAM");
if (current_have_fake_vmem)
{
for (size_t i = 0; i < current_fake_vmem_size; i++)
for (size_t i = 0; i < current_fake_vmem_size; i += page_size)
{
if (IsPageDirty(reinterpret_cast<uintptr_t>(&m_fake_vmem[i])))
{
p.DoArray(m_fake_vmem + i, page_size);
i += page_size;
}
}
}
p.DoMarker("Memory FakeVMEM");
if (current_have_exram)
{
for (size_t i = 0; i < current_exram_size; i++)
for (size_t i = 0; i < current_exram_size; i += page_size)
{
if (IsPageDirty(reinterpret_cast<uintptr_t>(&m_exram[i])))
{
p.DoArray(m_exram + i, page_size);
i += page_size;
}
}
}
p.DoMarker("Memory EXRAM");
if (p.IsWriteMode())
{
ResetDirtyPages();
}
}
else
{
@ -690,26 +696,14 @@ void MemoryManager::Write_U64_Swap(u64 value, u32 address)
bool MemoryManager::IsPageDirty(uintptr_t address)
{
std::optional<size_t> index = GetDirtyPageIndexFromAddress(address);
if (index.has_value())
{
return m_dirty_pages[index.value()];
}
else
{
return true;
}
return m_dirty_pages[GetDirtyPageIndexFromAddress(address)];
}
void MemoryManager::SetPageDirtyBit(uintptr_t address, size_t size, bool dirty)
{
for (size_t i = 0; i < size; i++)
{
std::optional<size_t> index = GetDirtyPageIndexFromAddress(address + i);
if (index.has_value())
{
m_dirty_pages[index.value()] = dirty;
}
m_dirty_pages[GetDirtyPageIndexFromAddress(address + i)] = dirty;
}
}

View File

@ -137,6 +137,7 @@ public:
void SetPageDirtyBit(uintptr_t address, size_t size, bool dirty);
void ResetDirtyPages();
bool VirtualProtectMemory(u8* data, size_t size, u64 flag);
bool HandleFault(uintptr_t fault_address);
std::map<u64, u8>& GetDirtyPages() { return m_dirty_pages; }
@ -266,7 +267,7 @@ private:
std::map<u64, u8> m_dirty_pages;
std::optional<size_t> GetDirtyPageIndexFromAddress(u64 address);
u64 GetDirtyPageIndexFromAddress(u64 address);
void WriteProtectPhysicalMemoryRegions();
void InitMMIO(bool is_wii);

View File

@ -63,23 +63,8 @@ static LONG NTAPI Handler(PEXCEPTION_POINTERS pPtrs)
uintptr_t fault_address = (uintptr_t)pPtrs->ExceptionRecord->ExceptionInformation[1];
SContext* ctx = pPtrs->ContextRecord;
Core::System& system = Core::System::GetInstance();
Memory::MemoryManager& memory = system.GetMemory();
if (!memory.IsPageDirty(fault_address))
if (system.GetMemory().HandleFault(fault_address))
{
size_t page_size = Common::PageSize();
memory.SetPageDirtyBit(fault_address, page_size, true);
size_t page_mask = page_size - 1;
u64 page_index = fault_address & page_mask;
bool change_protection = memory.VirtualProtectMemory(reinterpret_cast<u8*>(fault_address),
page_size - page_index, PAGE_READWRITE);
if (!change_protection)
{
return EXCEPTION_CONTINUE_SEARCH;
}
return EXCEPTION_CONTINUE_EXECUTION;
}
else if (system.GetJitInterface().HandleFault(fault_address, ctx))

View File

@ -495,7 +495,7 @@ void SaveAs(Core::System& system, const std::string& filename, bool wait)
current_buffer.resize(buffer_size);
ptr = current_buffer.data();
PointerWrap p(&ptr, buffer_size, PointerWrap::Mode::Write);
DoState(system, p, false);
DoState(system, p, true);
if (p.IsWriteMode())
{
@ -901,7 +901,7 @@ void LoadAs(Core::System& system, const std::string& filename)
{
u8* ptr = buffer.data();
PointerWrap p(&ptr, buffer.size(), PointerWrap::Mode::Read);
DoState(system, p, false);
DoState(system, p, true);
loaded = true;
loadedSuccessfully = p.IsReadMode();
}