Merge pull request #11445 from JosJuice/jit64-wraparound-backpatch
Jit64: Properly handle backpatching overflowed address calculations
This commit is contained in:
commit
34a459bed7
|
@ -157,6 +157,11 @@ void MemoryManager::Init()
|
|||
m_is_initialized = true;
|
||||
}
|
||||
|
||||
bool MemoryManager::IsAddressInFastmemArea(const u8* address) const
|
||||
{
|
||||
return address >= m_fastmem_arena && address < m_fastmem_arena + m_fastmem_arena_size;
|
||||
}
|
||||
|
||||
bool MemoryManager::InitFastmemArena()
|
||||
{
|
||||
// Here we set up memory mappings for fastmem. The basic idea of fastmem is that we reserve 4 GiB
|
||||
|
@ -194,15 +199,15 @@ bool MemoryManager::InitFastmemArena()
|
|||
constexpr size_t guard_size = 0x8000'0000;
|
||||
constexpr size_t memory_size = ppc_view_size * 2 + guard_size * 3;
|
||||
|
||||
u8* fastmem_arena = m_arena.ReserveMemoryRegion(memory_size);
|
||||
if (!fastmem_arena)
|
||||
m_fastmem_arena = m_arena.ReserveMemoryRegion(memory_size);
|
||||
if (!m_fastmem_arena)
|
||||
{
|
||||
PanicAlertFmt("Memory::InitFastmemArena(): Failed finding a memory base.");
|
||||
return false;
|
||||
}
|
||||
|
||||
m_physical_base = fastmem_arena + guard_size;
|
||||
m_logical_base = fastmem_arena + ppc_view_size + guard_size * 2;
|
||||
m_physical_base = m_fastmem_arena + guard_size;
|
||||
m_logical_base = m_fastmem_arena + ppc_view_size + guard_size * 2;
|
||||
|
||||
for (const PhysicalMemoryRegion& region : m_physical_regions)
|
||||
{
|
||||
|
@ -222,6 +227,7 @@ bool MemoryManager::InitFastmemArena()
|
|||
}
|
||||
|
||||
m_is_fastmem_arena_initialized = true;
|
||||
m_fastmem_arena_size = memory_size;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -371,6 +377,8 @@ void MemoryManager::ShutdownFastmemArena()
|
|||
|
||||
m_arena.ReleaseMemoryRegion();
|
||||
|
||||
m_fastmem_arena = nullptr;
|
||||
m_fastmem_arena_size = 0;
|
||||
m_physical_base = nullptr;
|
||||
m_logical_base = nullptr;
|
||||
|
||||
|
|
|
@ -72,6 +72,7 @@ public:
|
|||
u32 GetExRamSize() const { return m_exram_size; }
|
||||
u32 GetExRamMask() const { return m_exram_mask; }
|
||||
|
||||
bool IsAddressInFastmemArea(const u8* address) const;
|
||||
u8* GetPhysicalBase() const { return m_physical_base; }
|
||||
u8* GetLogicalBase() const { return m_logical_base; }
|
||||
u8* GetPhysicalPageMappingsBase() const { return m_physical_page_mappings_base; }
|
||||
|
@ -146,6 +147,8 @@ private:
|
|||
// are used to set up a full GC or Wii memory map in process memory.
|
||||
// In 64-bit, this might point to "high memory" (above the 32-bit limit),
|
||||
// so be sure to load it into a 64-bit register.
|
||||
u8* m_fastmem_arena = nullptr;
|
||||
size_t m_fastmem_arena_size = 0;
|
||||
u8* m_physical_base = nullptr;
|
||||
u8* m_logical_base = nullptr;
|
||||
|
||||
|
|
|
@ -227,19 +227,27 @@ bool Jit64::HandleFault(uintptr_t access_address, SContext* ctx)
|
|||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
|
||||
// TODO: do we properly handle off-the-end?
|
||||
const auto base_ptr = reinterpret_cast<uintptr_t>(memory.GetPhysicalBase());
|
||||
if (access_address >= base_ptr && access_address < base_ptr + 0x100010000)
|
||||
return BackPatch(static_cast<u32>(access_address - base_ptr), ctx);
|
||||
if (memory.IsAddressInFastmemArea(reinterpret_cast<u8*>(access_address)))
|
||||
{
|
||||
auto& ppc_state = system.GetPPCState();
|
||||
const uintptr_t memory_base = reinterpret_cast<uintptr_t>(
|
||||
ppc_state.msr.DR ? memory.GetLogicalBase() : memory.GetPhysicalBase());
|
||||
|
||||
const auto logical_base_ptr = reinterpret_cast<uintptr_t>(memory.GetLogicalBase());
|
||||
if (access_address >= logical_base_ptr && access_address < logical_base_ptr + 0x100010000)
|
||||
return BackPatch(static_cast<u32>(access_address - logical_base_ptr), ctx);
|
||||
if (access_address < memory_base || access_address >= memory_base + 0x1'0000'0000)
|
||||
{
|
||||
WARN_LOG_FMT(DYNA_REC,
|
||||
"Jit64 address calculation overflowed! Please report if this happens a lot. "
|
||||
"PC {:#018x}, access address {:#018x}, memory base {:#018x}, MSR.DR {}",
|
||||
ctx->CTX_PC, access_address, memory_base, ppc_state.msr.DR);
|
||||
}
|
||||
|
||||
return BackPatch(ctx);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Jit64::BackPatch(u32 emAddress, SContext* ctx)
|
||||
bool Jit64::BackPatch(SContext* ctx)
|
||||
{
|
||||
u8* codePtr = reinterpret_cast<u8*>(ctx->CTX_PC);
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public:
|
|||
|
||||
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
|
||||
bool HandleStackFault() override;
|
||||
bool BackPatch(u32 emAddress, SContext* ctx);
|
||||
bool BackPatch(SContext* ctx);
|
||||
|
||||
void EnableOptimization();
|
||||
void EnableBlockLink();
|
||||
|
|
|
@ -123,8 +123,30 @@ bool JitArm64::HandleFault(uintptr_t access_address, SContext* ctx)
|
|||
success = HandleStackFault();
|
||||
|
||||
// If the fault is in JIT code space, look for fastmem areas.
|
||||
if (!success && IsInSpace((u8*)ctx->CTX_PC))
|
||||
success = HandleFastmemFault(access_address, ctx);
|
||||
if (!success && IsInSpace(reinterpret_cast<u8*>(ctx->CTX_PC)))
|
||||
{
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
|
||||
if (memory.IsAddressInFastmemArea(reinterpret_cast<u8*>(access_address)))
|
||||
{
|
||||
auto& ppc_state = system.GetPPCState();
|
||||
const uintptr_t memory_base = reinterpret_cast<uintptr_t>(
|
||||
ppc_state.msr.DR ? memory.GetLogicalBase() : memory.GetPhysicalBase());
|
||||
|
||||
if (access_address < memory_base || access_address >= memory_base + 0x1'0000'0000)
|
||||
{
|
||||
ERROR_LOG_FMT(DYNA_REC,
|
||||
"JitArm64 address calculation overflowed. This should never happen! "
|
||||
"PC {:#018x}, access address {:#018x}, memory base {:#018x}, MSR.DR {}",
|
||||
ctx->CTX_PC, access_address, memory_base, ppc_state.msr.DR);
|
||||
}
|
||||
else
|
||||
{
|
||||
success = HandleFastmemFault(ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!success)
|
||||
{
|
||||
|
|
|
@ -33,7 +33,7 @@ public:
|
|||
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
|
||||
void DoBacktrace(uintptr_t access_address, SContext* ctx);
|
||||
bool HandleStackFault() override;
|
||||
bool HandleFastmemFault(uintptr_t access_address, SContext* ctx);
|
||||
bool HandleFastmemFault(SContext* ctx);
|
||||
|
||||
void ClearCache() override;
|
||||
|
||||
|
|
|
@ -16,13 +16,11 @@
|
|||
#include "Common/StringUtil.h"
|
||||
#include "Common/Swap.h"
|
||||
|
||||
#include "Core/HW/Memmap.h"
|
||||
#include "Core/PowerPC/Gekko.h"
|
||||
#include "Core/PowerPC/JitArm64/Jit_Util.h"
|
||||
#include "Core/PowerPC/JitArmCommon/BackPatch.h"
|
||||
#include "Core/PowerPC/MMU.h"
|
||||
#include "Core/PowerPC/PowerPC.h"
|
||||
#include "Core/System.h"
|
||||
|
||||
using namespace Arm64Gen;
|
||||
|
||||
|
@ -304,22 +302,8 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, MemAccessMode mode, ARM64Reg RS,
|
|||
}
|
||||
}
|
||||
|
||||
bool JitArm64::HandleFastmemFault(uintptr_t access_address, SContext* ctx)
|
||||
bool JitArm64::HandleFastmemFault(SContext* ctx)
|
||||
{
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
|
||||
if (!(access_address >= (uintptr_t)memory.GetPhysicalBase() &&
|
||||
access_address < (uintptr_t)memory.GetPhysicalBase() + 0x100010000) &&
|
||||
!(access_address >= (uintptr_t)memory.GetLogicalBase() &&
|
||||
access_address < (uintptr_t)memory.GetLogicalBase() + 0x100010000))
|
||||
{
|
||||
ERROR_LOG_FMT(DYNA_REC,
|
||||
"Exception handler - access below memory space. PC: {:#018x} {:#018x} < {:#018x}",
|
||||
ctx->CTX_PC, access_address, (uintptr_t)memory.GetPhysicalBase());
|
||||
return false;
|
||||
}
|
||||
|
||||
const u8* pc = reinterpret_cast<const u8*>(ctx->CTX_PC);
|
||||
auto slow_handler_iter = m_fault_to_handler.upper_bound(pc);
|
||||
|
||||
|
|
Loading…
Reference in New Issue