Jit64: Avoid System::GetInstance() and ppcState.
This commit is contained in:
parent
dfc14db313
commit
fc394bdbdb
|
@ -136,12 +136,11 @@ bool Jit64::HandleFault(uintptr_t access_address, SContext* ctx)
|
|||
// Only instructions that access I/O will get these, and there won't be that
|
||||
// many of them in a typical program/game.
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
auto& memory = m_system.GetMemory();
|
||||
|
||||
if (memory.IsAddressInFastmemArea(reinterpret_cast<u8*>(access_address)))
|
||||
{
|
||||
auto& ppc_state = system.GetPPCState();
|
||||
auto& ppc_state = m_system.GetPPCState();
|
||||
const uintptr_t memory_base = reinterpret_cast<uintptr_t>(
|
||||
ppc_state.msr.DR ? memory.GetLogicalBase() : memory.GetPhysicalBase());
|
||||
|
||||
|
@ -253,8 +252,7 @@ void Jit64::Init()
|
|||
{
|
||||
EnableBlockLink();
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
auto& memory = m_system.GetMemory();
|
||||
|
||||
jo.fastmem_arena = m_fastmem_enabled && memory.InitFastmemArena();
|
||||
jo.optimizeGatherPipe = true;
|
||||
|
@ -322,8 +320,7 @@ void Jit64::Shutdown()
|
|||
{
|
||||
FreeCodeSpace();
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
auto& memory = m_system.GetMemory();
|
||||
memory.ShutdownFastmemArena();
|
||||
|
||||
blocks.Shutdown();
|
||||
|
@ -344,7 +341,7 @@ void Jit64::FallBackToInterpreter(UGeckoInstruction inst)
|
|||
|
||||
Interpreter::Instruction instr = Interpreter::GetInterpreterOp(inst);
|
||||
ABI_PushRegistersAndAdjustStack({}, 0);
|
||||
ABI_CallFunctionPC(instr, &Core::System::GetInstance().GetInterpreter(), inst.hex);
|
||||
ABI_CallFunctionPC(instr, &m_system.GetInterpreter(), inst.hex);
|
||||
ABI_PopRegistersAndAdjustStack({}, 0);
|
||||
|
||||
// If the instruction wrote to any registers which were marked as discarded,
|
||||
|
@ -404,29 +401,27 @@ void Jit64::DoNothing(UGeckoInstruction _inst)
|
|||
// Yup, just don't do anything.
|
||||
}
|
||||
|
||||
static const bool ImHereDebug = false;
|
||||
static const bool ImHereLog = false;
|
||||
static std::map<u32, int> been_here;
|
||||
|
||||
static void ImHere()
|
||||
void Jit64::ImHere(Jit64& jit)
|
||||
{
|
||||
auto& ppc_state = jit.m_ppc_state;
|
||||
static File::IOFile f;
|
||||
if (ImHereLog)
|
||||
if (jit.m_im_here_log)
|
||||
{
|
||||
if (!f)
|
||||
f.Open("log64.txt", "w");
|
||||
|
||||
f.WriteString(fmt::format("{0:08x}\n", PowerPC::ppcState.pc));
|
||||
f.WriteString(fmt::format("{0:08x}\n", ppc_state.pc));
|
||||
}
|
||||
if (been_here.find(PowerPC::ppcState.pc) != been_here.end())
|
||||
auto& been_here = jit.m_been_here;
|
||||
auto it = been_here.find(ppc_state.pc);
|
||||
if (it != been_here.end())
|
||||
{
|
||||
been_here.find(PowerPC::ppcState.pc)->second++;
|
||||
if ((been_here.find(PowerPC::ppcState.pc)->second) & 1023)
|
||||
it->second++;
|
||||
if (it->second & 1023)
|
||||
return;
|
||||
}
|
||||
INFO_LOG_FMT(DYNA_REC, "I'm here - PC = {:08x} , LR = {:08x}", PowerPC::ppcState.pc,
|
||||
LR(PowerPC::ppcState));
|
||||
been_here[PowerPC::ppcState.pc] = 1;
|
||||
INFO_LOG_FMT(DYNA_REC, "I'm here - PC = {:08x} , LR = {:08x}", ppc_state.pc, LR(ppc_state));
|
||||
been_here[ppc_state.pc] = 1;
|
||||
}
|
||||
|
||||
bool Jit64::Cleanup()
|
||||
|
@ -440,18 +435,18 @@ bool Jit64::Cleanup()
|
|||
CMP(64, R(RSCRATCH), Imm32(GPFifo::GATHER_PIPE_SIZE));
|
||||
FixupBranch exit = J_CC(CC_L);
|
||||
ABI_PushRegistersAndAdjustStack({}, 0);
|
||||
ABI_CallFunctionP(GPFifo::UpdateGatherPipe, &Core::System::GetInstance().GetGPFifo());
|
||||
ABI_CallFunctionP(GPFifo::UpdateGatherPipe, &m_system.GetGPFifo());
|
||||
ABI_PopRegistersAndAdjustStack({}, 0);
|
||||
SetJumpTarget(exit);
|
||||
did_something = true;
|
||||
}
|
||||
|
||||
// SPEED HACK: MMCR0/MMCR1 should be checked at run-time, not at compile time.
|
||||
if (MMCR0(PowerPC::ppcState).Hex || MMCR1(PowerPC::ppcState).Hex)
|
||||
if (MMCR0(m_ppc_state).Hex || MMCR1(m_ppc_state).Hex)
|
||||
{
|
||||
ABI_PushRegistersAndAdjustStack({}, 0);
|
||||
ABI_CallFunctionCCCP(PowerPC::UpdatePerformanceMonitor, js.downcountAmount, js.numLoadStoreInst,
|
||||
js.numFloatingPointInst, &PowerPC::ppcState);
|
||||
js.numFloatingPointInst, &m_ppc_state);
|
||||
ABI_PopRegistersAndAdjustStack({}, 0);
|
||||
did_something = true;
|
||||
}
|
||||
|
@ -663,25 +658,24 @@ void Jit64::Trace()
|
|||
std::string fregs;
|
||||
|
||||
#ifdef JIT_LOG_GPR
|
||||
for (size_t i = 0; i < std::size(PowerPC::ppcState.gpr); i++)
|
||||
for (size_t i = 0; i < std::size(m_ppc_state.gpr); i++)
|
||||
{
|
||||
regs += fmt::format("r{:02d}: {:08x} ", i, PowerPC::ppcState.gpr[i]);
|
||||
regs += fmt::format("r{:02d}: {:08x} ", i, m_ppc_state.gpr[i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JIT_LOG_FPR
|
||||
for (size_t i = 0; i < std::size(PowerPC::ppcState.ps); i++)
|
||||
for (size_t i = 0; i < std::size(m_ppc_state.ps); i++)
|
||||
{
|
||||
fregs += fmt::format("f{:02d}: {:016x} ", i, PowerPC::ppcState.ps[i].PS0AsU64());
|
||||
fregs += fmt::format("f{:02d}: {:016x} ", i, m_ppc_state.ps[i].PS0AsU64());
|
||||
}
|
||||
#endif
|
||||
|
||||
DEBUG_LOG_FMT(DYNA_REC,
|
||||
"JIT64 PC: {:08x} SRR0: {:08x} SRR1: {:08x} FPSCR: {:08x} "
|
||||
"MSR: {:08x} LR: {:08x} {} {}",
|
||||
PowerPC::ppcState.pc, SRR0(PowerPC::ppcState), SRR1(PowerPC::ppcState),
|
||||
PowerPC::ppcState.fpscr.Hex, PowerPC::ppcState.msr.Hex, PowerPC::ppcState.spr[8],
|
||||
regs, fregs);
|
||||
m_ppc_state.pc, SRR0(m_ppc_state), SRR1(m_ppc_state), m_ppc_state.fpscr.Hex,
|
||||
m_ppc_state.msr.Hex, m_ppc_state.spr[8], regs, fregs);
|
||||
}
|
||||
|
||||
void Jit64::Jit(u32 em_address)
|
||||
|
@ -720,7 +714,7 @@ void Jit64::Jit(u32 em_address, bool clear_cache_and_retry_on_failure)
|
|||
|
||||
if (!jo.profile_blocks)
|
||||
{
|
||||
if (Core::System::GetInstance().GetCPU().IsStepping())
|
||||
if (m_system.GetCPU().IsStepping())
|
||||
{
|
||||
block_size = 1;
|
||||
|
||||
|
@ -744,8 +738,8 @@ void Jit64::Jit(u32 em_address, bool clear_cache_and_retry_on_failure)
|
|||
if (code_block.m_memory_exception)
|
||||
{
|
||||
// Address of instruction could not be translated
|
||||
PowerPC::ppcState.npc = nextPC;
|
||||
PowerPC::ppcState.Exceptions |= EXCEPTION_ISI;
|
||||
m_ppc_state.npc = nextPC;
|
||||
m_ppc_state.Exceptions |= EXCEPTION_ISI;
|
||||
PowerPC::CheckExceptions();
|
||||
WARN_LOG_FMT(POWERPC, "ISI exception at {:#010x}", nextPC);
|
||||
return;
|
||||
|
@ -822,8 +816,6 @@ bool Jit64::SetEmitterStateToFreeCodeRegion()
|
|||
|
||||
bool Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
||||
{
|
||||
auto& system = Core::System::GetInstance();
|
||||
|
||||
js.firstFPInstructionFound = false;
|
||||
js.isLastInstruction = false;
|
||||
js.blockStart = em_address;
|
||||
|
@ -839,10 +831,10 @@ bool Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
|||
b->normalEntry = start;
|
||||
|
||||
// Used to get a trace of the last few blocks before a crash, sometimes VERY useful
|
||||
if (ImHereDebug)
|
||||
if (m_im_here_debug)
|
||||
{
|
||||
ABI_PushRegistersAndAdjustStack({}, 0);
|
||||
ABI_CallFunction(ImHere);
|
||||
ABI_CallFunctionP(ImHere, this);
|
||||
ABI_PopRegistersAndAdjustStack({}, 0);
|
||||
}
|
||||
|
||||
|
@ -895,9 +887,9 @@ bool Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
|||
// the start of the block in case our guess turns out wrong.
|
||||
for (int gqr : gqr_static)
|
||||
{
|
||||
u32 value = GQR(PowerPC::ppcState, gqr);
|
||||
u32 value = GQR(m_ppc_state, gqr);
|
||||
js.constantGqr[gqr] = value;
|
||||
CMP_or_TEST(32, PPCSTATE(spr[SPR_GQR0 + gqr]), Imm32(value));
|
||||
CMP_or_TEST(32, PPCSTATE_SPR(SPR_GQR0 + gqr), Imm32(value));
|
||||
J_CC(CC_NZ, target);
|
||||
}
|
||||
js.constantGqrValid = gqr_static;
|
||||
|
@ -944,7 +936,7 @@ bool Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
|||
js.mustCheckFifo = false;
|
||||
BitSet32 registersInUse = CallerSavedRegistersInUse();
|
||||
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
|
||||
ABI_CallFunctionP(GPFifo::FastCheckGatherPipe, &system.GetGPFifo());
|
||||
ABI_CallFunctionP(GPFifo::FastCheckGatherPipe, &m_system.GetGPFifo());
|
||||
ABI_PopRegistersAndAdjustStack(registersInUse, 0);
|
||||
gatherPipeIntCheck = true;
|
||||
}
|
||||
|
@ -961,7 +953,7 @@ bool Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
|||
SetJumpTarget(extException);
|
||||
TEST(32, PPCSTATE(msr), Imm32(0x0008000));
|
||||
FixupBranch noExtIntEnable = J_CC(CC_Z, true);
|
||||
MOV(64, R(RSCRATCH), ImmPtr(&system.GetProcessorInterface().m_interrupt_cause));
|
||||
MOV(64, R(RSCRATCH), ImmPtr(&m_system.GetProcessorInterface().m_interrupt_cause));
|
||||
TEST(32, MatR(RSCRATCH),
|
||||
Imm32(ProcessorInterface::INT_CAUSE_CP | ProcessorInterface::INT_CAUSE_PE_TOKEN |
|
||||
ProcessorInterface::INT_CAUSE_PE_FINISH));
|
||||
|
@ -1013,7 +1005,7 @@ bool Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
|||
js.firstFPInstructionFound = true;
|
||||
}
|
||||
|
||||
auto& cpu = system.GetCPU();
|
||||
auto& cpu = m_system.GetCPU();
|
||||
if (m_enable_debugging && breakpoints.IsAddressBreakPoint(op.address) && !cpu.IsStepping())
|
||||
{
|
||||
gpr.Flush();
|
||||
|
@ -1190,7 +1182,7 @@ void Jit64::IntializeSpeculativeConstants()
|
|||
const u8* target = nullptr;
|
||||
for (auto i : code_block.m_gpr_inputs)
|
||||
{
|
||||
u32 compileTimeValue = PowerPC::ppcState.gpr[i];
|
||||
u32 compileTimeValue = m_ppc_state.gpr[i];
|
||||
if (PowerPC::IsOptimizableGatherPipeWrite(compileTimeValue) ||
|
||||
PowerPC::IsOptimizableGatherPipeWrite(compileTimeValue - 0x8000) ||
|
||||
compileTimeValue == 0xCC000000)
|
||||
|
@ -1207,7 +1199,7 @@ void Jit64::IntializeSpeculativeConstants()
|
|||
JMP(asm_routines.dispatcher_no_check, true);
|
||||
SwitchToNearCode();
|
||||
}
|
||||
CMP(32, PPCSTATE(gpr[i]), Imm32(compileTimeValue));
|
||||
CMP(32, PPCSTATE_GPR(i), Imm32(compileTimeValue));
|
||||
J_CC(CC_NZ, target);
|
||||
gpr.SetImmediate32(i, compileTimeValue, false);
|
||||
}
|
||||
|
|
|
@ -260,6 +260,8 @@ private:
|
|||
|
||||
void ResetFreeMemoryRanges();
|
||||
|
||||
static void ImHere(Jit64& jit);
|
||||
|
||||
JitBlockCache blocks{*this};
|
||||
TrampolineCache trampolines{*this};
|
||||
|
||||
|
@ -270,6 +272,10 @@ private:
|
|||
|
||||
HyoutaUtilities::RangeSizeSet<u8*> m_free_ranges_near;
|
||||
HyoutaUtilities::RangeSizeSet<u8*> m_free_ranges_far;
|
||||
|
||||
const bool m_im_here_debug = false;
|
||||
const bool m_im_here_log = false;
|
||||
std::map<u32, int> m_been_here;
|
||||
};
|
||||
|
||||
void LogGeneratedX86(size_t size, const PPCAnalyst::CodeBuffer& code_buffer, const u8* normalEntry,
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
using namespace Gen;
|
||||
|
||||
Jit64AsmRoutineManager::Jit64AsmRoutineManager(Jit64& jit) : CommonAsmRoutines(jit), m_jit{jit}
|
||||
Jit64AsmRoutineManager::Jit64AsmRoutineManager(Jit64& jit) : CommonAsmRoutines(jit)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -45,9 +45,11 @@ void Jit64AsmRoutineManager::Generate()
|
|||
// waste a bit of space for a second shadow, but whatever.
|
||||
ABI_PushRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8, /*frame*/ 16);
|
||||
|
||||
auto& ppc_state = m_jit.m_ppc_state;
|
||||
|
||||
// Two statically allocated registers.
|
||||
// MOV(64, R(RMEM), Imm64((u64)Memory::physical_base));
|
||||
MOV(64, R(RPPCSTATE), Imm64((u64)&PowerPC::ppcState + 0x80));
|
||||
MOV(64, R(RPPCSTATE), Imm64((u64)&ppc_state + 0x80));
|
||||
|
||||
MOV(64, PPCSTATE(stored_stack_pointer), R(RSP));
|
||||
|
||||
|
@ -81,7 +83,7 @@ void Jit64AsmRoutineManager::Generate()
|
|||
|
||||
dispatcher_no_timing_check = GetCodePtr();
|
||||
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& system = m_jit.m_system;
|
||||
|
||||
FixupBranch dbg_exit;
|
||||
if (enable_debugging)
|
||||
|
|
|
@ -11,8 +11,6 @@ namespace Gen
|
|||
class X64CodeBlock;
|
||||
}
|
||||
|
||||
class JitBase;
|
||||
|
||||
// In Dolphin, we don't use inline assembly. Instead, we generate all machine-near
|
||||
// code at runtime. In the case of fixed code like this, after writing it, we write
|
||||
// protect the memory, essentially making it work just like precompiled code.
|
||||
|
@ -43,6 +41,4 @@ public:
|
|||
private:
|
||||
void Generate();
|
||||
void GenerateCommon();
|
||||
|
||||
JitBase& m_jit;
|
||||
};
|
||||
|
|
|
@ -780,7 +780,7 @@ void Jit64::FloatCompare(UGeckoInstruction inst, bool upper)
|
|||
SetJumpTarget(continue3);
|
||||
}
|
||||
|
||||
MOV(64, PPCSTATE(cr.fields[crf]), R(RSCRATCH));
|
||||
MOV(64, PPCSTATE_CR(crf), R(RSCRATCH));
|
||||
}
|
||||
|
||||
void Jit64::fcmpX(UGeckoInstruction inst)
|
||||
|
|
|
@ -148,16 +148,16 @@ void Jit64::ComputeRC(preg_t preg, bool needs_test, bool needs_sext)
|
|||
|
||||
if (arg.IsImm())
|
||||
{
|
||||
MOV(64, PPCSTATE(cr.fields[0]), Imm32(arg.SImm32()));
|
||||
MOV(64, PPCSTATE_CR(0), Imm32(arg.SImm32()));
|
||||
}
|
||||
else if (needs_sext)
|
||||
{
|
||||
MOVSX(64, 32, RSCRATCH, arg);
|
||||
MOV(64, PPCSTATE(cr.fields[0]), R(RSCRATCH));
|
||||
MOV(64, PPCSTATE_CR(0), R(RSCRATCH));
|
||||
}
|
||||
else
|
||||
{
|
||||
MOV(64, PPCSTATE(cr.fields[0]), arg);
|
||||
MOV(64, PPCSTATE_CR(0), arg);
|
||||
}
|
||||
|
||||
if (CheckMergedBranch(0))
|
||||
|
@ -391,14 +391,14 @@ void Jit64::DoMergedBranch()
|
|||
if (js.op[1].branchIsIdleLoop)
|
||||
{
|
||||
if (next.LK)
|
||||
MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4));
|
||||
MOV(32, PPCSTATE_SPR(SPR_LR), Imm32(nextPC + 4));
|
||||
|
||||
WriteIdleExit(js.op[1].branchTo);
|
||||
}
|
||||
else if (next.OPCD == 16) // bcx
|
||||
{
|
||||
if (next.LK)
|
||||
MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4));
|
||||
MOV(32, PPCSTATE_SPR(SPR_LR), Imm32(nextPC + 4));
|
||||
|
||||
u32 destination;
|
||||
if (next.AA)
|
||||
|
@ -410,18 +410,18 @@ void Jit64::DoMergedBranch()
|
|||
else if ((next.OPCD == 19) && (next.SUBOP10 == 528)) // bcctrx
|
||||
{
|
||||
if (next.LK)
|
||||
MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4));
|
||||
MOV(32, R(RSCRATCH), PPCSTATE(spr[SPR_CTR]));
|
||||
MOV(32, PPCSTATE_SPR(SPR_LR), Imm32(nextPC + 4));
|
||||
MOV(32, R(RSCRATCH), PPCSTATE_SPR(SPR_CTR));
|
||||
AND(32, R(RSCRATCH), Imm32(0xFFFFFFFC));
|
||||
WriteExitDestInRSCRATCH(next.LK, nextPC + 4);
|
||||
}
|
||||
else if ((next.OPCD == 19) && (next.SUBOP10 == 16)) // bclrx
|
||||
{
|
||||
MOV(32, R(RSCRATCH), PPCSTATE(spr[SPR_LR]));
|
||||
MOV(32, R(RSCRATCH), PPCSTATE_SPR(SPR_LR));
|
||||
if (!m_enable_blr_optimization)
|
||||
AND(32, R(RSCRATCH), Imm32(0xFFFFFFFC));
|
||||
if (next.LK)
|
||||
MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4));
|
||||
MOV(32, PPCSTATE_SPR(SPR_LR), Imm32(nextPC + 4));
|
||||
WriteBLRExit();
|
||||
}
|
||||
else
|
||||
|
@ -551,12 +551,12 @@ void Jit64::cmpXX(UGeckoInstruction inst)
|
|||
(u64)gpr.Imm32(a) - (u64)comparand.Imm32();
|
||||
if (compareResult == (s32)compareResult)
|
||||
{
|
||||
MOV(64, PPCSTATE(cr.fields[crf]), Imm32((u32)compareResult));
|
||||
MOV(64, PPCSTATE_CR(crf), Imm32((u32)compareResult));
|
||||
}
|
||||
else
|
||||
{
|
||||
MOV(64, R(RSCRATCH), Imm64(compareResult));
|
||||
MOV(64, PPCSTATE(cr.fields[crf]), R(RSCRATCH));
|
||||
MOV(64, PPCSTATE_CR(crf), R(RSCRATCH));
|
||||
}
|
||||
|
||||
if (merge_branch)
|
||||
|
@ -573,7 +573,7 @@ void Jit64::cmpXX(UGeckoInstruction inst)
|
|||
RCX64Reg Ra = gpr.Bind(a, RCMode::Read);
|
||||
RegCache::Realize(Ra);
|
||||
|
||||
MOV(64, PPCSTATE(cr.fields[crf]), Ra);
|
||||
MOV(64, PPCSTATE_CR(crf), Ra);
|
||||
if (merge_branch)
|
||||
{
|
||||
TEST(64, Ra, Ra);
|
||||
|
@ -621,7 +621,7 @@ void Jit64::cmpXX(UGeckoInstruction inst)
|
|||
|
||||
if (comparand.IsImm() && comparand.Imm32() == 0)
|
||||
{
|
||||
MOV(64, PPCSTATE(cr.fields[crf]), R(input));
|
||||
MOV(64, PPCSTATE_CR(crf), R(input));
|
||||
// Place the comparison next to the branch for macro-op fusion
|
||||
if (merge_branch)
|
||||
TEST(64, R(input), R(input));
|
||||
|
@ -629,7 +629,7 @@ void Jit64::cmpXX(UGeckoInstruction inst)
|
|||
else
|
||||
{
|
||||
SUB(64, R(input), comparand);
|
||||
MOV(64, PPCSTATE(cr.fields[crf]), R(input));
|
||||
MOV(64, PPCSTATE_CR(crf), R(input));
|
||||
}
|
||||
|
||||
if (merge_branch)
|
||||
|
|
|
@ -320,7 +320,7 @@ void Jit64::dcbx(UGeckoInstruction inst)
|
|||
FixupBranch bat_lookup_failed;
|
||||
MOV(32, R(effective_address), R(addr));
|
||||
const u8* loop_start = GetCodePtr();
|
||||
if (PowerPC::ppcState.msr.IR)
|
||||
if (m_ppc_state.msr.IR)
|
||||
{
|
||||
// Translate effective address to physical address.
|
||||
bat_lookup_failed = BATAddressLookup(addr, tmp, PowerPC::ibat_table.data());
|
||||
|
@ -349,7 +349,7 @@ void Jit64::dcbx(UGeckoInstruction inst)
|
|||
|
||||
SwitchToFarCode();
|
||||
SetJumpTarget(invalidate_needed);
|
||||
if (PowerPC::ppcState.msr.IR)
|
||||
if (m_ppc_state.msr.IR)
|
||||
SetJumpTarget(bat_lookup_failed);
|
||||
|
||||
BitSet32 registersInUse = CallerSavedRegistersInUse();
|
||||
|
@ -422,7 +422,7 @@ void Jit64::dcbz(UGeckoInstruction inst)
|
|||
end_dcbz_hack = J_CC(CC_L);
|
||||
}
|
||||
|
||||
bool emit_fast_path = PowerPC::ppcState.msr.DR && m_jit.jo.fastmem_arena;
|
||||
bool emit_fast_path = m_ppc_state.msr.DR && m_jit.jo.fastmem_arena;
|
||||
|
||||
if (emit_fast_path)
|
||||
{
|
||||
|
|
|
@ -23,7 +23,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
|
|||
JITDISABLE(bJITLoadStorePairedOff);
|
||||
|
||||
// For performance, the AsmCommon routines assume address translation is on.
|
||||
FALLBACK_IF(!PowerPC::ppcState.msr.DR);
|
||||
FALLBACK_IF(!m_ppc_state.msr.DR);
|
||||
|
||||
s32 offset = inst.SIMM_12;
|
||||
bool indexed = inst.OPCD == 4;
|
||||
|
@ -90,7 +90,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
|
|||
// UU[SCALE]UUUUU[TYPE] where SCALE is 6 bits and TYPE is 3 bits, so we have to AND with
|
||||
// 0b0011111100000111, or 0x3F07.
|
||||
MOV(32, R(RSCRATCH2), Imm32(0x3F07));
|
||||
AND(32, R(RSCRATCH2), PPCSTATE(spr[SPR_GQR0 + i]));
|
||||
AND(32, R(RSCRATCH2), PPCSTATE_SPR(SPR_GQR0 + i));
|
||||
LEA(64, RSCRATCH,
|
||||
M(w ? asm_routines.single_store_quantized : asm_routines.paired_store_quantized));
|
||||
// 8-bit operations do not zero upper 32-bits of 64-bit registers.
|
||||
|
@ -112,7 +112,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
|
|||
JITDISABLE(bJITLoadStorePairedOff);
|
||||
|
||||
// For performance, the AsmCommon routines assume address translation is on.
|
||||
FALLBACK_IF(!PowerPC::ppcState.msr.DR);
|
||||
FALLBACK_IF(!m_ppc_state.msr.DR);
|
||||
|
||||
s32 offset = inst.SIMM_12;
|
||||
bool indexed = inst.OPCD == 4;
|
||||
|
@ -147,7 +147,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
|
|||
// Stash PC in case asm_routine causes exception
|
||||
MOV(32, PPCSTATE(pc), Imm32(js.compilerPC));
|
||||
// Get the high part of the GQR register
|
||||
OpArg gqr = PPCSTATE(spr[SPR_GQR0 + i]);
|
||||
OpArg gqr = PPCSTATE_SPR(SPR_GQR0 + i);
|
||||
gqr.AddMemOffset(2);
|
||||
MOV(32, R(RSCRATCH2), Imm32(0x3F07));
|
||||
AND(32, R(RSCRATCH2), gqr);
|
||||
|
|
|
@ -20,7 +20,7 @@ using namespace Gen;
|
|||
|
||||
static OpArg CROffset(int field)
|
||||
{
|
||||
return PPCSTATE(cr.fields[field]);
|
||||
return PPCSTATE_CR(field);
|
||||
}
|
||||
|
||||
void Jit64::GetCRFieldBit(int field, int bit, X64Reg out, bool negate)
|
||||
|
@ -216,9 +216,9 @@ void Jit64::UpdateFPExceptionSummary(X64Reg fpscr, X64Reg tmp1, X64Reg tmp2)
|
|||
OR(32, R(fpscr), R(tmp1));
|
||||
}
|
||||
|
||||
static void DoICacheReset()
|
||||
static void DoICacheReset(PowerPC::PowerPCState& ppc_state)
|
||||
{
|
||||
PowerPC::ppcState.iCache.Reset();
|
||||
ppc_state.iCache.Reset();
|
||||
}
|
||||
|
||||
void Jit64::mtspr(UGeckoInstruction inst)
|
||||
|
@ -282,11 +282,11 @@ void Jit64::mtspr(UGeckoInstruction inst)
|
|||
|
||||
MOV(32, R(RSCRATCH), Rd);
|
||||
BTR(32, R(RSCRATCH), Imm8(31 - 20)); // ICFI
|
||||
MOV(32, PPCSTATE(spr[iIndex]), R(RSCRATCH));
|
||||
MOV(32, PPCSTATE_SPR(iIndex), R(RSCRATCH));
|
||||
FixupBranch dont_reset_icache = J_CC(CC_NC);
|
||||
BitSet32 regs = CallerSavedRegistersInUse();
|
||||
ABI_PushRegistersAndAdjustStack(regs, 0);
|
||||
ABI_CallFunction(DoICacheReset);
|
||||
ABI_CallFunctionP(DoICacheReset, &m_ppc_state);
|
||||
ABI_PopRegistersAndAdjustStack(regs, 0);
|
||||
SetJumpTarget(dont_reset_icache);
|
||||
return;
|
||||
|
@ -299,7 +299,7 @@ void Jit64::mtspr(UGeckoInstruction inst)
|
|||
// OK, this is easy.
|
||||
RCOpArg Rd = gpr.BindOrImm(d, RCMode::Read);
|
||||
RegCache::Realize(Rd);
|
||||
MOV(32, PPCSTATE(spr[iIndex]), Rd);
|
||||
MOV(32, PPCSTATE_SPR(iIndex), Rd);
|
||||
}
|
||||
|
||||
void Jit64::mfspr(UGeckoInstruction inst)
|
||||
|
@ -323,7 +323,7 @@ void Jit64::mfspr(UGeckoInstruction inst)
|
|||
RCX64Reg rax = gpr.Scratch(RAX);
|
||||
RCX64Reg rcx = gpr.Scratch(RCX);
|
||||
|
||||
auto& core_timing_globals = Core::System::GetInstance().GetCoreTiming().GetGlobals();
|
||||
auto& core_timing_globals = m_system.GetCoreTiming().GetGlobals();
|
||||
MOV(64, rcx, ImmPtr(&core_timing_globals));
|
||||
|
||||
// An inline implementation of CoreTiming::GetFakeTimeBase, since in timer-heavy games the
|
||||
|
@ -355,7 +355,7 @@ void Jit64::mfspr(UGeckoInstruction inst)
|
|||
MOV(64, rax, MDisp(rcx, offsetof(CoreTiming::Globals, fake_TB_start_value)));
|
||||
SHR(64, rdx, Imm8(3));
|
||||
ADD(64, rax, rdx);
|
||||
MOV(64, PPCSTATE(spr[SPR_TL]), rax);
|
||||
MOV(64, PPCSTATE_SPR(SPR_TL), rax);
|
||||
|
||||
if (CanMergeNextInstructions(1))
|
||||
{
|
||||
|
@ -422,7 +422,7 @@ void Jit64::mfspr(UGeckoInstruction inst)
|
|||
{
|
||||
RCX64Reg Rd = gpr.Bind(d, RCMode::Write);
|
||||
RegCache::Realize(Rd);
|
||||
MOV(32, Rd, PPCSTATE(spr[iIndex]));
|
||||
MOV(32, Rd, PPCSTATE_SPR(iIndex));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -457,8 +457,7 @@ void Jit64::mtmsr(UGeckoInstruction inst)
|
|||
FixupBranch noExceptionsPending = J_CC(CC_Z, true);
|
||||
|
||||
// Check if a CP interrupt is waiting and keep the GPU emulation in sync (issue 4336)
|
||||
auto& system = Core::System::GetInstance();
|
||||
MOV(64, R(RSCRATCH), ImmPtr(&system.GetProcessorInterface().m_interrupt_cause));
|
||||
MOV(64, R(RSCRATCH), ImmPtr(&m_system.GetProcessorInterface().m_interrupt_cause));
|
||||
TEST(32, MatR(RSCRATCH), Imm32(ProcessorInterface::INT_CAUSE_CP));
|
||||
FixupBranch cpInt = J_CC(CC_NZ, true);
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ const X64Reg* FPURegCache::GetAllocationOrder(size_t* count) const
|
|||
|
||||
OpArg FPURegCache::GetDefaultLocation(preg_t preg) const
|
||||
{
|
||||
return PPCSTATE(ps[preg].ps0);
|
||||
return PPCSTATE_PS0(preg);
|
||||
}
|
||||
|
||||
BitSet32 FPURegCache::GetRegUtilization() const
|
||||
|
|
|
@ -27,7 +27,7 @@ void GPRRegCache::LoadRegister(preg_t preg, X64Reg new_loc)
|
|||
|
||||
OpArg GPRRegCache::GetDefaultLocation(preg_t preg) const
|
||||
{
|
||||
return PPCSTATE(gpr[preg]);
|
||||
return PPCSTATE_GPR(preg);
|
||||
}
|
||||
|
||||
const X64Reg* GPRRegCache::GetAllocationOrder(size_t* count) const
|
||||
|
|
|
@ -209,10 +209,10 @@ template <typename T>
|
|||
class MMIOReadCodeGenerator : public MMIO::ReadHandlingMethodVisitor<T>
|
||||
{
|
||||
public:
|
||||
MMIOReadCodeGenerator(Gen::X64CodeBlock* code, BitSet32 registers_in_use, Gen::X64Reg dst_reg,
|
||||
u32 address, bool sign_extend)
|
||||
: m_code(code), m_registers_in_use(registers_in_use), m_dst_reg(dst_reg), m_address(address),
|
||||
m_sign_extend(sign_extend)
|
||||
MMIOReadCodeGenerator(Core::System* system, Gen::X64CodeBlock* code, BitSet32 registers_in_use,
|
||||
Gen::X64Reg dst_reg, u32 address, bool sign_extend)
|
||||
: m_system(system), m_code(code), m_registers_in_use(registers_in_use), m_dst_reg(dst_reg),
|
||||
m_address(address), m_sign_extend(sign_extend)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -273,11 +273,12 @@ private:
|
|||
void CallLambda(int sbits, const std::function<T(Core::System&, u32)>* lambda)
|
||||
{
|
||||
m_code->ABI_PushRegistersAndAdjustStack(m_registers_in_use, 0);
|
||||
m_code->ABI_CallLambdaPC(lambda, &Core::System::GetInstance(), m_address);
|
||||
m_code->ABI_CallLambdaPC(lambda, m_system, m_address);
|
||||
m_code->ABI_PopRegistersAndAdjustStack(m_registers_in_use, 0);
|
||||
MoveOpArgToReg(sbits, R(ABI_RETURN));
|
||||
}
|
||||
|
||||
Core::System* m_system;
|
||||
Gen::X64CodeBlock* m_code;
|
||||
BitSet32 m_registers_in_use;
|
||||
Gen::X64Reg m_dst_reg;
|
||||
|
@ -293,19 +294,22 @@ void EmuCodeBlock::MMIOLoadToReg(MMIO::Mapping* mmio, Gen::X64Reg reg_value,
|
|||
{
|
||||
case 8:
|
||||
{
|
||||
MMIOReadCodeGenerator<u8> gen(this, registers_in_use, reg_value, address, sign_extend);
|
||||
MMIOReadCodeGenerator<u8> gen(&m_jit.m_system, this, registers_in_use, reg_value, address,
|
||||
sign_extend);
|
||||
mmio->GetHandlerForRead<u8>(address).Visit(gen);
|
||||
break;
|
||||
}
|
||||
case 16:
|
||||
{
|
||||
MMIOReadCodeGenerator<u16> gen(this, registers_in_use, reg_value, address, sign_extend);
|
||||
MMIOReadCodeGenerator<u16> gen(&m_jit.m_system, this, registers_in_use, reg_value, address,
|
||||
sign_extend);
|
||||
mmio->GetHandlerForRead<u16>(address).Visit(gen);
|
||||
break;
|
||||
}
|
||||
case 32:
|
||||
{
|
||||
MMIOReadCodeGenerator<u32> gen(this, registers_in_use, reg_value, address, sign_extend);
|
||||
MMIOReadCodeGenerator<u32> gen(&m_jit.m_system, this, registers_in_use, reg_value, address,
|
||||
sign_extend);
|
||||
mmio->GetHandlerForRead<u32>(address).Visit(gen);
|
||||
break;
|
||||
}
|
||||
|
@ -367,7 +371,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
|
|||
}
|
||||
|
||||
FixupBranch exit;
|
||||
const bool dr_set = (flags & SAFE_LOADSTORE_DR_ON) || PowerPC::ppcState.msr.DR;
|
||||
const bool dr_set = (flags & SAFE_LOADSTORE_DR_ON) || m_jit.m_ppc_state.msr.DR;
|
||||
const bool fast_check_address = !slowmem && dr_set && m_jit.jo.fastmem_arena;
|
||||
if (fast_check_address)
|
||||
{
|
||||
|
@ -442,8 +446,7 @@ void EmuCodeBlock::SafeLoadToRegImmediate(X64Reg reg_value, u32 address, int acc
|
|||
u32 mmioAddress = PowerPC::IsOptimizableMMIOAccess(address, accessSize);
|
||||
if (accessSize != 64 && mmioAddress)
|
||||
{
|
||||
auto& system = Core::System::GetInstance();
|
||||
auto& memory = system.GetMemory();
|
||||
auto& memory = m_jit.m_system.GetMemory();
|
||||
MMIOLoadToReg(memory.GetMMIOMapping(), reg_value, registersInUse, mmioAddress, accessSize,
|
||||
signExtend);
|
||||
return;
|
||||
|
@ -537,7 +540,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
|||
}
|
||||
|
||||
FixupBranch exit;
|
||||
const bool dr_set = (flags & SAFE_LOADSTORE_DR_ON) || PowerPC::ppcState.msr.DR;
|
||||
const bool dr_set = (flags & SAFE_LOADSTORE_DR_ON) || m_jit.m_ppc_state.msr.DR;
|
||||
const bool fast_check_address = !slowmem && dr_set && m_jit.jo.fastmem_arena;
|
||||
if (fast_check_address)
|
||||
{
|
||||
|
|
|
@ -305,7 +305,7 @@ void CommonAsmRoutines::GenMfcr()
|
|||
if (i != 0)
|
||||
SHL(32, R(dst), Imm8(4));
|
||||
|
||||
MOV(64, R(cr_val), PPCSTATE(cr.fields[i]));
|
||||
MOV(64, R(cr_val), PPCSTATE_CR(i));
|
||||
|
||||
// EQ: Bits 31-0 == 0; set flag bit 1
|
||||
TEST(32, R(cr_val), R(cr_val));
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
enum EQuantizeType : u32;
|
||||
|
||||
class Jit64;
|
||||
|
||||
class QuantizedMemoryRoutines : public EmuCodeBlock
|
||||
{
|
||||
public:
|
||||
|
@ -24,7 +26,7 @@ private:
|
|||
class CommonAsmRoutines : public CommonAsmRoutinesBase, public QuantizedMemoryRoutines
|
||||
{
|
||||
public:
|
||||
explicit CommonAsmRoutines(Jit64& jit) : QuantizedMemoryRoutines(jit) {}
|
||||
explicit CommonAsmRoutines(Jit64& jit) : QuantizedMemoryRoutines(jit), m_jit(jit) {}
|
||||
void GenFrsqrte();
|
||||
void GenFres();
|
||||
void GenMfcr();
|
||||
|
@ -37,4 +39,6 @@ protected:
|
|||
void GenQuantizedSingleLoads();
|
||||
void GenQuantizedStores();
|
||||
void GenQuantizedSingleStores();
|
||||
|
||||
Jit64& m_jit;
|
||||
};
|
||||
|
|
|
@ -3,17 +3,38 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "Common/CommonTypes.h"
|
||||
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
|
||||
#include "Core/PowerPC/PowerPC.h"
|
||||
|
||||
// We offset by 0x80 because the range of one byte memory offsets is
|
||||
// -0x80..0x7f.
|
||||
#define PPCSTATE(x) \
|
||||
MDisp(RPPCSTATE, (int)((char*)&PowerPC::ppcState.x - (char*)&PowerPC::ppcState) - 0x80)
|
||||
// In case you want to disable the ppcstate register:
|
||||
// #define PPCSTATE(x) M(&PowerPC::ppcState.x)
|
||||
#define PPCSTATE_LR PPCSTATE(spr[SPR_LR])
|
||||
#define PPCSTATE_CTR PPCSTATE(spr[SPR_CTR])
|
||||
#define PPCSTATE_SRR0 PPCSTATE(spr[SPR_SRR0])
|
||||
#define PPCSTATE_SRR1 PPCSTATE(spr[SPR_SRR1])
|
||||
#define PPCSTATE_OFF(i) (static_cast<int>(offsetof(PowerPC::PowerPCState, i)) - 0x80)
|
||||
#define PPCSTATE_OFF_ARRAY(elem, i) \
|
||||
(static_cast<int>(offsetof(PowerPC::PowerPCState, elem[0]) + \
|
||||
sizeof(PowerPC::PowerPCState::elem[0]) * (i)) - \
|
||||
0x80)
|
||||
|
||||
#define PPCSTATE_OFF_GPR(i) PPCSTATE_OFF_ARRAY(gpr, i)
|
||||
#define PPCSTATE_OFF_CR(i) PPCSTATE_OFF_ARRAY(cr.fields, i)
|
||||
#define PPCSTATE_OFF_SR(i) PPCSTATE_OFF_ARRAY(sr, i)
|
||||
#define PPCSTATE_OFF_SPR(i) PPCSTATE_OFF_ARRAY(spr, i)
|
||||
|
||||
static_assert(std::is_same_v<decltype(PowerPC::PowerPCState::ps[0]), PowerPC::PairedSingle&>);
|
||||
#define PPCSTATE_OFF_PS0(i) (PPCSTATE_OFF_ARRAY(ps, i) + offsetof(PowerPC::PairedSingle, ps0))
|
||||
#define PPCSTATE_OFF_PS1(i) (PPCSTATE_OFF_ARRAY(ps, i) + offsetof(PowerPC::PairedSingle, ps1))
|
||||
|
||||
#define PPCSTATE(i) MDisp(RPPCSTATE, PPCSTATE_OFF(i))
|
||||
#define PPCSTATE_GPR(i) MDisp(RPPCSTATE, PPCSTATE_OFF_ARRAY(gpr, i))
|
||||
#define PPCSTATE_CR(i) MDisp(RPPCSTATE, PPCSTATE_OFF_ARRAY(cr.fields, i))
|
||||
#define PPCSTATE_SR(i) MDisp(RPPCSTATE, PPCSTATE_OFF_ARRAY(sr, i))
|
||||
#define PPCSTATE_SPR(i) MDisp(RPPCSTATE, PPCSTATE_OFF_ARRAY(spr, i))
|
||||
#define PPCSTATE_PS0(i) MDisp(RPPCSTATE, PPCSTATE_OFF_PS0(i))
|
||||
#define PPCSTATE_PS1(i) MDisp(RPPCSTATE, PPCSTATE_OFF_PS1(i))
|
||||
|
||||
#define PPCSTATE_LR PPCSTATE_SPR(SPR_LR)
|
||||
#define PPCSTATE_CTR PPCSTATE_SPR(SPR_CTR)
|
||||
#define PPCSTATE_SRR0 PPCSTATE_SPR(SPR_SRR0)
|
||||
#define PPCSTATE_SRR1 PPCSTATE_SPR(SPR_SRR1)
|
||||
|
|
Loading…
Reference in New Issue