Jit64: Properly handle backpatching overflowed address calculations

Previously we would only backpatch overflowed address calculations
if the overflow was 0x1000 or less. Now we can handle the full 2 GiB
of overflow in both directions.

I'm also making equivalent changes to JitArm64's code. This isn't because
it needs it – JitArm64 address calculations should never overflow – but
because I wanted to get rid of the 0x100001000 inherited from Jit64 that
makes even less sense for JitArm64 than for Jit64.
This commit is contained in:
JosJuice 2023-01-15 12:55:29 +01:00
parent f3ee167a3c
commit 611e721a4d
7 changed files with 58 additions and 33 deletions

View File

@ -157,6 +157,11 @@ void MemoryManager::Init()
m_is_initialized = true;
}
bool MemoryManager::IsAddressInFastmemArea(const u8* address) const
{
return address >= m_fastmem_arena && address < m_fastmem_arena + m_fastmem_arena_size;
}
bool MemoryManager::InitFastmemArena()
{
// Here we set up memory mappings for fastmem. The basic idea of fastmem is that we reserve 4 GiB
@ -194,15 +199,15 @@ bool MemoryManager::InitFastmemArena()
constexpr size_t guard_size = 0x8000'0000;
constexpr size_t memory_size = ppc_view_size * 2 + guard_size * 3;
u8* fastmem_arena = m_arena.ReserveMemoryRegion(memory_size);
if (!fastmem_arena)
m_fastmem_arena = m_arena.ReserveMemoryRegion(memory_size);
if (!m_fastmem_arena)
{
PanicAlertFmt("Memory::InitFastmemArena(): Failed finding a memory base.");
return false;
}
m_physical_base = fastmem_arena + guard_size;
m_logical_base = fastmem_arena + ppc_view_size + guard_size * 2;
m_physical_base = m_fastmem_arena + guard_size;
m_logical_base = m_fastmem_arena + ppc_view_size + guard_size * 2;
for (const PhysicalMemoryRegion& region : m_physical_regions)
{
@ -222,6 +227,7 @@ bool MemoryManager::InitFastmemArena()
}
m_is_fastmem_arena_initialized = true;
m_fastmem_arena_size = memory_size;
return true;
}
@ -371,6 +377,8 @@ void MemoryManager::ShutdownFastmemArena()
m_arena.ReleaseMemoryRegion();
m_fastmem_arena = nullptr;
m_fastmem_arena_size = 0;
m_physical_base = nullptr;
m_logical_base = nullptr;

View File

@ -72,6 +72,7 @@ public:
u32 GetExRamSize() const { return m_exram_size; }
u32 GetExRamMask() const { return m_exram_mask; }
bool IsAddressInFastmemArea(const u8* address) const;
u8* GetPhysicalBase() const { return m_physical_base; }
u8* GetLogicalBase() const { return m_logical_base; }
u8* GetPhysicalPageMappingsBase() const { return m_physical_page_mappings_base; }
@ -146,6 +147,8 @@ private:
// are used to set up a full GC or Wii memory map in process memory.
// In 64-bit, this might point to "high memory" (above the 32-bit limit),
// so be sure to load it into a 64-bit register.
u8* m_fastmem_arena = nullptr;
size_t m_fastmem_arena_size = 0;
u8* m_physical_base = nullptr;
u8* m_logical_base = nullptr;

View File

@ -227,19 +227,27 @@ bool Jit64::HandleFault(uintptr_t access_address, SContext* ctx)
auto& system = Core::System::GetInstance();
auto& memory = system.GetMemory();
// TODO: do we properly handle off-the-end?
const auto base_ptr = reinterpret_cast<uintptr_t>(memory.GetPhysicalBase());
if (access_address >= base_ptr && access_address < base_ptr + 0x100010000)
return BackPatch(static_cast<u32>(access_address - base_ptr), ctx);
if (memory.IsAddressInFastmemArea(reinterpret_cast<u8*>(access_address)))
{
auto& ppc_state = system.GetPPCState();
const uintptr_t memory_base = reinterpret_cast<uintptr_t>(
ppc_state.msr.DR ? memory.GetLogicalBase() : memory.GetPhysicalBase());
const auto logical_base_ptr = reinterpret_cast<uintptr_t>(memory.GetLogicalBase());
if (access_address >= logical_base_ptr && access_address < logical_base_ptr + 0x100010000)
return BackPatch(static_cast<u32>(access_address - logical_base_ptr), ctx);
if (access_address < memory_base || access_address >= memory_base + 0x1'0000'0000)
{
WARN_LOG_FMT(DYNA_REC,
"Jit64 address calculation overflowed! Please report if this happens a lot. "
"PC {:#018x}, access address {:#018x}, memory base {:#018x}, MSR.DR {}",
ctx->CTX_PC, access_address, memory_base, ppc_state.msr.DR);
}
return BackPatch(ctx);
}
return false;
}
bool Jit64::BackPatch(u32 emAddress, SContext* ctx)
bool Jit64::BackPatch(SContext* ctx)
{
u8* codePtr = reinterpret_cast<u8*>(ctx->CTX_PC);

View File

@ -51,7 +51,7 @@ public:
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
bool HandleStackFault() override;
bool BackPatch(u32 emAddress, SContext* ctx);
bool BackPatch(SContext* ctx);
void EnableOptimization();
void EnableBlockLink();

View File

@ -123,8 +123,30 @@ bool JitArm64::HandleFault(uintptr_t access_address, SContext* ctx)
success = HandleStackFault();
// If the fault is in JIT code space, look for fastmem areas.
if (!success && IsInSpace((u8*)ctx->CTX_PC))
success = HandleFastmemFault(access_address, ctx);
if (!success && IsInSpace(reinterpret_cast<u8*>(ctx->CTX_PC)))
{
auto& system = Core::System::GetInstance();
auto& memory = system.GetMemory();
if (memory.IsAddressInFastmemArea(reinterpret_cast<u8*>(access_address)))
{
auto& ppc_state = system.GetPPCState();
const uintptr_t memory_base = reinterpret_cast<uintptr_t>(
ppc_state.msr.DR ? memory.GetLogicalBase() : memory.GetPhysicalBase());
if (access_address < memory_base || access_address >= memory_base + 0x1'0000'0000)
{
ERROR_LOG_FMT(DYNA_REC,
"JitArm64 address calculation overflowed. This should never happen! "
"PC {:#018x}, access address {:#018x}, memory base {:#018x}, MSR.DR {}",
ctx->CTX_PC, access_address, memory_base, ppc_state.msr.DR);
}
else
{
success = HandleFastmemFault(ctx);
}
}
}
if (!success)
{

View File

@ -33,7 +33,7 @@ public:
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
void DoBacktrace(uintptr_t access_address, SContext* ctx);
bool HandleStackFault() override;
bool HandleFastmemFault(uintptr_t access_address, SContext* ctx);
bool HandleFastmemFault(SContext* ctx);
void ClearCache() override;

View File

@ -16,13 +16,11 @@
#include "Common/StringUtil.h"
#include "Common/Swap.h"
#include "Core/HW/Memmap.h"
#include "Core/PowerPC/Gekko.h"
#include "Core/PowerPC/JitArm64/Jit_Util.h"
#include "Core/PowerPC/JitArmCommon/BackPatch.h"
#include "Core/PowerPC/MMU.h"
#include "Core/PowerPC/PowerPC.h"
#include "Core/System.h"
using namespace Arm64Gen;
@ -304,22 +302,8 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, MemAccessMode mode, ARM64Reg RS,
}
}
bool JitArm64::HandleFastmemFault(uintptr_t access_address, SContext* ctx)
bool JitArm64::HandleFastmemFault(SContext* ctx)
{
auto& system = Core::System::GetInstance();
auto& memory = system.GetMemory();
if (!(access_address >= (uintptr_t)memory.GetPhysicalBase() &&
access_address < (uintptr_t)memory.GetPhysicalBase() + 0x100010000) &&
!(access_address >= (uintptr_t)memory.GetLogicalBase() &&
access_address < (uintptr_t)memory.GetLogicalBase() + 0x100010000))
{
ERROR_LOG_FMT(DYNA_REC,
"Exception handler - access below memory space. PC: {:#018x} {:#018x} < {:#018x}",
ctx->CTX_PC, access_address, (uintptr_t)memory.GetPhysicalBase());
return false;
}
const u8* pc = reinterpret_cast<const u8*>(ctx->CTX_PC);
auto slow_handler_iter = m_fault_to_handler.upper_bound(pc);