PowerPC: Add access size parameter to MMU::IsOptimizableRAMAddress
For correctness, we need to check not only the start address of the memory access but also the end address.
This commit is contained in:
parent
4e57b66dcf
commit
b972329ed0
|
@ -441,7 +441,7 @@ void EmuCodeBlock::SafeLoadToRegImmediate(X64Reg reg_value, u32 address, int acc
|
||||||
BitSet32 registersInUse, bool signExtend)
|
BitSet32 registersInUse, bool signExtend)
|
||||||
{
|
{
|
||||||
// If the address is known to be RAM, just load it directly.
|
// If the address is known to be RAM, just load it directly.
|
||||||
if (m_jit.jo.fastmem_arena && m_jit.m_mmu.IsOptimizableRAMAddress(address))
|
if (m_jit.jo.fastmem_arena && m_jit.m_mmu.IsOptimizableRAMAddress(address, accessSize))
|
||||||
{
|
{
|
||||||
UnsafeLoadToReg(reg_value, Imm32(address), accessSize, 0, signExtend);
|
UnsafeLoadToReg(reg_value, Imm32(address), accessSize, 0, signExtend);
|
||||||
return;
|
return;
|
||||||
|
@ -656,7 +656,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
|
||||||
m_jit.js.fifoBytesSinceCheck += accessSize >> 3;
|
m_jit.js.fifoBytesSinceCheck += accessSize >> 3;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
else if (m_jit.jo.fastmem_arena && m_jit.m_mmu.IsOptimizableRAMAddress(address))
|
else if (m_jit.jo.fastmem_arena && m_jit.m_mmu.IsOptimizableRAMAddress(address, accessSize))
|
||||||
{
|
{
|
||||||
WriteToConstRamAddress(accessSize, arg, address);
|
WriteToConstRamAddress(accessSize, arg, address);
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -136,7 +136,7 @@ void JitArm64::SafeLoadToReg(u32 dest, s32 addr, s32 offsetReg, u32 flags, s32 o
|
||||||
if (is_immediate)
|
if (is_immediate)
|
||||||
mmio_address = m_mmu.IsOptimizableMMIOAccess(imm_addr, access_size);
|
mmio_address = m_mmu.IsOptimizableMMIOAccess(imm_addr, access_size);
|
||||||
|
|
||||||
if (is_immediate && m_mmu.IsOptimizableRAMAddress(imm_addr))
|
if (is_immediate && m_mmu.IsOptimizableRAMAddress(imm_addr, access_size))
|
||||||
{
|
{
|
||||||
set_addr_reg_if_needed();
|
set_addr_reg_if_needed();
|
||||||
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, dest_reg, XA, regs_in_use,
|
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, dest_reg, XA, regs_in_use,
|
||||||
|
@ -308,7 +308,7 @@ void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s
|
||||||
|
|
||||||
js.fifoBytesSinceCheck += accessSize >> 3;
|
js.fifoBytesSinceCheck += accessSize >> 3;
|
||||||
}
|
}
|
||||||
else if (is_immediate && m_mmu.IsOptimizableRAMAddress(imm_addr))
|
else if (is_immediate && m_mmu.IsOptimizableRAMAddress(imm_addr, access_size))
|
||||||
{
|
{
|
||||||
set_addr_reg_if_needed();
|
set_addr_reg_if_needed();
|
||||||
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, RS, XA, regs_in_use, fprs_in_use);
|
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, RS, XA, regs_in_use, fprs_in_use);
|
||||||
|
|
|
@ -174,7 +174,7 @@ void JitArm64::lfXX(UGeckoInstruction inst)
|
||||||
if (!jo.memcheck)
|
if (!jo.memcheck)
|
||||||
fprs_in_use[DecodeReg(VD)] = 0;
|
fprs_in_use[DecodeReg(VD)] = 0;
|
||||||
|
|
||||||
if (is_immediate && m_mmu.IsOptimizableRAMAddress(imm_addr))
|
if (is_immediate && m_mmu.IsOptimizableRAMAddress(imm_addr, BackPatchInfo::GetFlagSize(flags)))
|
||||||
{
|
{
|
||||||
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, VD, XA, regs_in_use, fprs_in_use);
|
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, VD, XA, regs_in_use, fprs_in_use);
|
||||||
}
|
}
|
||||||
|
@ -399,7 +399,7 @@ void JitArm64::stfXX(UGeckoInstruction inst)
|
||||||
STR(IndexType::Unsigned, ARM64Reg::X2, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
|
STR(IndexType::Unsigned, ARM64Reg::X2, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr));
|
||||||
js.fifoBytesSinceCheck += accessSize >> 3;
|
js.fifoBytesSinceCheck += accessSize >> 3;
|
||||||
}
|
}
|
||||||
else if (m_mmu.IsOptimizableRAMAddress(imm_addr))
|
else if (m_mmu.IsOptimizableRAMAddress(imm_addr, BackPatchInfo::GetFlagSize(flags)))
|
||||||
{
|
{
|
||||||
set_addr_reg_if_needed();
|
set_addr_reg_if_needed();
|
||||||
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, V0, XA, regs_in_use,
|
EmitBackpatchRoutine(flags, MemAccessMode::AlwaysFastAccess, V0, XA, regs_in_use,
|
||||||
|
|
|
@ -915,7 +915,7 @@ std::optional<ReadResult<std::string>> MMU::HostTryReadString(const Core::CPUThr
|
||||||
return ReadResult<std::string>(c->translated, std::move(s));
|
return ReadResult<std::string>(c->translated, std::move(s));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MMU::IsOptimizableRAMAddress(const u32 address) const
|
bool MMU::IsOptimizableRAMAddress(const u32 address, const u32 access_size) const
|
||||||
{
|
{
|
||||||
if (m_power_pc.GetMemChecks().HasAny())
|
if (m_power_pc.GetMemChecks().HasAny())
|
||||||
return false;
|
return false;
|
||||||
|
@ -926,12 +926,12 @@ bool MMU::IsOptimizableRAMAddress(const u32 address) const
|
||||||
if (m_ppc_state.m_enable_dcache)
|
if (m_ppc_state.m_enable_dcache)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// TODO: This API needs to take an access size
|
|
||||||
//
|
|
||||||
// We store whether an access can be optimized to an unchecked access
|
// We store whether an access can be optimized to an unchecked access
|
||||||
// in dbat_table.
|
// in dbat_table.
|
||||||
u32 bat_result = m_dbat_table[address >> BAT_INDEX_SHIFT];
|
const u32 last_byte_address = address + (access_size >> 3) - 1;
|
||||||
return (bat_result & BAT_PHYSICAL_BIT) != 0;
|
const u32 bat_result_1 = m_dbat_table[address >> BAT_INDEX_SHIFT];
|
||||||
|
const u32 bat_result_2 = m_dbat_table[last_byte_address >> BAT_INDEX_SHIFT];
|
||||||
|
return (bat_result_1 & bat_result_2 & BAT_PHYSICAL_BIT) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <XCheckTLBFlag flag>
|
template <XCheckTLBFlag flag>
|
||||||
|
|
|
@ -245,7 +245,7 @@ public:
|
||||||
// Result changes based on the BAT registers and MSR.DR. Returns whether
|
// Result changes based on the BAT registers and MSR.DR. Returns whether
|
||||||
// it's safe to optimize a read or write to this address to an unguarded
|
// it's safe to optimize a read or write to this address to an unguarded
|
||||||
// memory access. Does not consider page tables.
|
// memory access. Does not consider page tables.
|
||||||
bool IsOptimizableRAMAddress(u32 address) const;
|
bool IsOptimizableRAMAddress(u32 address, u32 access_size) const;
|
||||||
u32 IsOptimizableMMIOAccess(u32 address, u32 access_size) const;
|
u32 IsOptimizableMMIOAccess(u32 address, u32 access_size) const;
|
||||||
bool IsOptimizableGatherPipeWrite(u32 address) const;
|
bool IsOptimizableGatherPipeWrite(u32 address) const;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue