CPU: Correctly mask upper 1.5GB of KUSEG

Stops fastmem going into a loop when trying to backpatch accesses
above 512MB.
This commit is contained in:
Stenzek 2025-04-08 21:39:28 +10:00
parent 0a1e8e27f0
commit 0479500357
No known key found for this signature in database
8 changed files with 47 additions and 39 deletions

View File

@ -625,7 +625,7 @@ void Bus::MapFastmemViews()
auto MapRAM = [](u32 base_address) {
// Don't map RAM that isn't accessible.
if ((base_address & CPU::PHYSICAL_MEMORY_ADDRESS_MASK) >= g_ram_mapped_size)
if (CPU::VirtualAddressToPhysical(base_address) >= g_ram_mapped_size)
return;
u8* ram_ptr = g_ram + (base_address & g_ram_mask);
@ -676,7 +676,7 @@ void Bus::RemapFastmemViews()
bool Bus::CanUseFastmemForAddress(VirtualMemoryAddress address)
{
const PhysicalMemoryAddress paddr = address & CPU::PHYSICAL_MEMORY_ADDRESS_MASK;
const PhysicalMemoryAddress paddr = CPU::VirtualAddressToPhysical(address);
switch (g_settings.cpu_fastmem_mode)
{

View File

@ -777,9 +777,8 @@ template<PGXPMode pgxp_mode>
}
else if (block->HasFlag(BlockFlags::NeedsDynamicFetchTicks))
{
AddPendingTicks(
static_cast<TickCount>(block->size * static_cast<u32>(*Bus::GetMemoryAccessTimePtr(
block->pc & PHYSICAL_MEMORY_ADDRESS_MASK, MemoryAccessSize::Word))));
AddPendingTicks(static_cast<TickCount>(
block->size * static_cast<u32>(*Bus::GetMemoryAccessTimePtr(block->pc & KSEG_MASK, MemoryAccessSize::Word))));
}
else
{
@ -857,8 +856,8 @@ bool CPU::CodeCache::ReadBlockInstructions(u32 start_pc, BlockInstructionList* i
const PageProtectionMode protection = GetProtectionModeForPC(start_pc);
const bool use_icache = CPU::IsCachedAddress(start_pc);
const bool dynamic_fetch_ticks = (!use_icache && Bus::GetMemoryAccessTimePtr(start_pc & PHYSICAL_MEMORY_ADDRESS_MASK,
MemoryAccessSize::Word) != nullptr);
const bool dynamic_fetch_ticks =
(!use_icache && Bus::GetMemoryAccessTimePtr(VirtualAddressToPhysical(start_pc), MemoryAccessSize::Word) != nullptr);
u32 pc = start_pc;
bool is_branch_delay_slot = false;
bool is_load_delay_slot = false;

View File

@ -2476,8 +2476,8 @@ template<PGXPMode pgxp_mode, bool debug>
if (s_trace_to_log)
LogInstruction(g_state.current_instruction.bits, g_state.current_instruction_pc, true);
// handle all mirrors of the syscall trampoline
const u32 masked_pc = (g_state.current_instruction_pc & PHYSICAL_MEMORY_ADDRESS_MASK);
// handle all mirrors of the syscall trampoline. will catch 200000A0 etc, but those aren't fetchable anyway
const u32 masked_pc = (g_state.current_instruction_pc & KSEG_MASK);
if (masked_pc == 0xA0) [[unlikely]]
HandleA0Syscall();
else if (masked_pc == 0xB0) [[unlikely]]
@ -2721,7 +2721,10 @@ ALWAYS_INLINE_RELEASE bool CPU::DoInstructionRead(PhysicalMemoryAddress address,
{
using namespace Bus;
address &= PHYSICAL_MEMORY_ADDRESS_MASK;
// We can shortcut around VirtualAddressToPhysical() here because we're never going to be
// calling with an out-of-range address.
DebugAssert(VirtualAddressToPhysical(address) == (address & KSEG_MASK));
address &= KSEG_MASK;
if (address < RAM_MIRROR_END)
{
@ -2764,7 +2767,8 @@ TickCount CPU::GetInstructionReadTicks(VirtualMemoryAddress address)
{
using namespace Bus;
address &= PHYSICAL_MEMORY_ADDRESS_MASK;
DebugAssert(VirtualAddressToPhysical(address) == (address & KSEG_MASK));
address &= KSEG_MASK;
if (address < RAM_MIRROR_END)
{
@ -2784,7 +2788,8 @@ TickCount CPU::GetICacheFillTicks(VirtualMemoryAddress address)
{
using namespace Bus;
address &= PHYSICAL_MEMORY_ADDRESS_MASK;
DebugAssert(VirtualAddressToPhysical(address) == (address & KSEG_MASK));
address &= KSEG_MASK;
if (address < RAM_MIRROR_END)
{
@ -3030,7 +3035,7 @@ ALWAYS_INLINE bool CPU::DoSafeMemoryAccess(VirtualMemoryAddress address, u32& va
return true;
}
address &= PHYSICAL_MEMORY_ADDRESS_MASK;
address &= KSEG_MASK;
}
break;
@ -3046,7 +3051,7 @@ ALWAYS_INLINE bool CPU::DoSafeMemoryAccess(VirtualMemoryAddress address, u32& va
case 0x05: // KSEG1 - physical memory uncached
{
address &= PHYSICAL_MEMORY_ADDRESS_MASK;
address &= KSEG_MASK;
}
break;
}
@ -3231,7 +3236,7 @@ bool CPU::SafeReadMemoryBytes(VirtualMemoryAddress addr, void* data, u32 length)
using namespace Bus;
const u32 seg = (addr >> 29);
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & PHYSICAL_MEMORY_ADDRESS_MASK) >= RAM_MIRROR_END) ||
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & KSEG_MASK) >= RAM_MIRROR_END) ||
(((addr & g_ram_mask) + length) > g_ram_size))
{
u8* ptr = static_cast<u8*>(data);
@ -3255,7 +3260,7 @@ bool CPU::SafeWriteMemoryBytes(VirtualMemoryAddress addr, const void* data, u32
using namespace Bus;
const u32 seg = (addr >> 29);
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & PHYSICAL_MEMORY_ADDRESS_MASK) >= RAM_MIRROR_END) ||
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & KSEG_MASK) >= RAM_MIRROR_END) ||
(((addr & g_ram_mask) + length) > g_ram_size))
{
const u8* ptr = static_cast<const u8*>(data);
@ -3284,7 +3289,7 @@ bool CPU::SafeZeroMemoryBytes(VirtualMemoryAddress addr, u32 length)
using namespace Bus;
const u32 seg = (addr >> 29);
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & PHYSICAL_MEMORY_ADDRESS_MASK) >= RAM_MIRROR_END) ||
if ((seg != 0 && seg != 4 && seg != 5) || (((addr + length) & KSEG_MASK) >= RAM_MIRROR_END) ||
(((addr & g_ram_mask) + length) > g_ram_size))
{
while ((addr & 3u) != 0 && length > 0)
@ -3328,7 +3333,7 @@ void* CPU::GetDirectReadMemoryPointer(VirtualMemoryAddress address, MemoryAccess
if (seg != 0 && seg != 4 && seg != 5)
return nullptr;
const PhysicalMemoryAddress paddr = address & PHYSICAL_MEMORY_ADDRESS_MASK;
const PhysicalMemoryAddress paddr = VirtualAddressToPhysical(address);
if (paddr < RAM_MIRROR_END)
{
if (read_ticks)
@ -3364,7 +3369,7 @@ void* CPU::GetDirectWriteMemoryPointer(VirtualMemoryAddress address, MemoryAcces
if (seg != 0 && seg != 4 && seg != 5)
return nullptr;
const PhysicalMemoryAddress paddr = address & PHYSICAL_MEMORY_ADDRESS_MASK;
const PhysicalMemoryAddress paddr = address & KSEG_MASK;
if (paddr < RAM_MIRROR_END)
return &g_ram[paddr & g_ram_mask];

View File

@ -10,6 +10,13 @@ struct fastjmp_buf;
namespace CPU {
// Memory address mask used for fetching as well as loadstores (removes cached/uncached/user/kernel bits).
enum : PhysicalMemoryAddress
{
KSEG_MASK = 0x1FFFFFFF,
KUSEG_MASK = 0x7FFFFFFF,
};
void SetPC(u32 new_pc);
// exceptions
@ -104,7 +111,8 @@ ALWAYS_INLINE static Segment GetSegmentForAddress(VirtualMemoryAddress address)
ALWAYS_INLINE static constexpr PhysicalMemoryAddress VirtualAddressToPhysical(VirtualMemoryAddress address)
{
return (address & PHYSICAL_MEMORY_ADDRESS_MASK);
// KUSEG goes to the first 2GB, others are only 512MB.
return (address & ((address & 0x80000000u) ? KSEG_MASK : KUSEG_MASK));
}
ALWAYS_INLINE static VirtualMemoryAddress PhysicalAddressToVirtual(PhysicalMemoryAddress address, Segment segment)

View File

@ -8,6 +8,7 @@
#include "cpu_pgxp.h"
#include "bus.h"
#include "cpu_core.h"
#include "cpu_core_private.h"
#include "cpu_disasm.h"
#include "gpu_types.h"
#include "settings.h"
@ -270,7 +271,8 @@ ALWAYS_INLINE_RELEASE CPU::PGXPValue* CPU::PGXP::GetPtr(u32 addr)
if ((addr & SCRATCHPAD_ADDR_MASK) == SCRATCHPAD_ADDR)
return &s_mem[PGXP_MEM_SCRATCH_OFFSET + ((addr & SCRATCHPAD_OFFSET_MASK) >> 2)];
const u32 paddr = (addr & PHYSICAL_MEMORY_ADDRESS_MASK);
// Don't worry about >512MB here for performance reasons.
const u32 paddr = (addr & KSEG_MASK);
if (paddr < Bus::RAM_MIRROR_END)
return &s_mem[(paddr & Bus::g_ram_mask) >> 2];
else

View File

@ -84,7 +84,7 @@ void CPU::Recompiler::Recompiler::BeginBlock()
if (g_settings.bios_tty_logging)
{
const u32 masked_pc = (m_block->pc & PHYSICAL_MEMORY_ADDRESS_MASK);
const u32 masked_pc = VirtualAddressToPhysical(m_block->pc);
if (masked_pc == 0xa0)
GenerateCall(reinterpret_cast<const void*>(&CPU::HandleA0Syscall));
else if (masked_pc == 0xb0)
@ -1735,8 +1735,7 @@ void CPU::Recompiler::Recompiler::TruncateBlock()
const TickCount* CPU::Recompiler::Recompiler::GetFetchMemoryAccessTimePtr() const
{
const TickCount* ptr =
Bus::GetMemoryAccessTimePtr(m_block->pc & PHYSICAL_MEMORY_ADDRESS_MASK, MemoryAccessSize::Word);
const TickCount* ptr = Bus::GetMemoryAccessTimePtr(VirtualAddressToPhysical(m_block->pc), MemoryAccessSize::Word);
AssertMsg(ptr, "Address has dynamic fetch ticks");
return ptr;
}
@ -2437,10 +2436,9 @@ CPU::Recompiler::Recompiler::SpecValue CPU::Recompiler::Recompiler::SpecReadMem(
return value;
}
const PhysicalMemoryAddress phys_addr = address & PHYSICAL_MEMORY_ADDRESS_MASK;
if (Bus::IsRAMAddress(phys_addr))
if (CPU::CodeCache::AddressInRAM(address))
{
u32 ram_offset = phys_addr & Bus::g_ram_mask;
u32 ram_offset = address & Bus::g_ram_mask;
std::memcpy(&value, &Bus::g_ram[ram_offset], sizeof(value));
return value;
}
@ -2457,9 +2455,10 @@ void CPU::Recompiler::Recompiler::SpecWriteMem(u32 address, SpecValue value)
return;
}
const PhysicalMemoryAddress phys_addr = address & PHYSICAL_MEMORY_ADDRESS_MASK;
if ((address & SCRATCHPAD_ADDR_MASK) == SCRATCHPAD_ADDR || Bus::IsRAMAddress(phys_addr))
if ((address & SCRATCHPAD_ADDR_MASK) == SCRATCHPAD_ADDR)
m_speculative_constants.memory.emplace(address, value);
else if (CPU::CodeCache::AddressInRAM(address))
m_speculative_constants.memory.emplace(address & Bus::g_ram_mask, value);
}
void CPU::Recompiler::Recompiler::SpecInvalidateMem(VirtualMemoryAddress address)

View File

@ -12,11 +12,6 @@
namespace CPU {
// Memory address mask used for fetching as well as loadstores (removes cached/uncached/user/kernel bits).
enum : PhysicalMemoryAddress
{
PHYSICAL_MEMORY_ADDRESS_MASK = 0x1FFFFFFF
};
enum : u32
{
INSTRUCTION_SIZE = sizeof(u32)

View File

@ -4,6 +4,7 @@
#include "memory_scanner.h"
#include "bus.h"
#include "cpu_core.h"
#include "cpu_core_private.h"
#include "common/log.h"
@ -11,7 +12,7 @@
LOG_CHANNEL(Cheats);
static bool IsValidScanAddress(PhysicalMemoryAddress address)
static bool IsValidScanAddress(VirtualMemoryAddress address)
{
if ((address & CPU::SCRATCHPAD_ADDR_MASK) == CPU::SCRATCHPAD_ADDR &&
(address & CPU::SCRATCHPAD_OFFSET_MASK) < CPU::SCRATCHPAD_SIZE)
@ -19,12 +20,11 @@ static bool IsValidScanAddress(PhysicalMemoryAddress address)
return true;
}
address &= CPU::PHYSICAL_MEMORY_ADDRESS_MASK;
if (address < Bus::RAM_MIRROR_END)
const PhysicalMemoryAddress phys_address = CPU::VirtualAddressToPhysical(address);
if (phys_address < Bus::RAM_MIRROR_END)
return true;
if (address >= Bus::BIOS_BASE && address < (Bus::BIOS_BASE + Bus::BIOS_SIZE))
if (phys_address >= Bus::BIOS_BASE && phys_address < (Bus::BIOS_BASE + Bus::BIOS_SIZE))
return true;
return false;