Merge pull request #6854 from lioncash/access

Jit/CachedInterpreter: Minor cleanup to code buffer accesses
This commit is contained in:
Markus Wick 2018-05-14 07:49:22 +02:00 committed by GitHub
commit db4d8d7ad3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 48 additions and 47 deletions

View File

@ -198,16 +198,17 @@ void CachedInterpreter::Jit(u32 address)
js.downcountAmount = 0; js.downcountAmount = 0;
js.curBlock = b; js.curBlock = b;
PPCAnalyst::CodeOp* ops = code_buffer.codebuffer;
b->checkedEntry = GetCodePtr(); b->checkedEntry = GetCodePtr();
b->normalEntry = GetCodePtr(); b->normalEntry = GetCodePtr();
PPCAnalyst::CodeOp* const ops = code_buffer.codebuffer;
for (u32 i = 0; i < code_block.m_num_instructions; i++) for (u32 i = 0; i < code_block.m_num_instructions; i++)
{ {
js.downcountAmount += ops[i].opinfo->numCycles; PPCAnalyst::CodeOp& op = ops[i];
u32 function = HLE::GetFirstFunctionIndex(ops[i].address); js.downcountAmount += op.opinfo->numCycles;
u32 function = HLE::GetFirstFunctionIndex(op.address);
if (function != 0) if (function != 0)
{ {
HLE::HookType type = HLE::GetFunctionTypeByIndex(function); HLE::HookType type = HLE::GetFunctionTypeByIndex(function);
@ -216,7 +217,7 @@ void CachedInterpreter::Jit(u32 address)
HLE::HookFlag flags = HLE::GetFunctionFlagsByIndex(function); HLE::HookFlag flags = HLE::GetFunctionFlagsByIndex(function);
if (HLE::IsEnabled(flags)) if (HLE::IsEnabled(flags))
{ {
m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(WritePC, op.address);
m_code.emplace_back(Interpreter::HLEFunction, function); m_code.emplace_back(Interpreter::HLEFunction, function);
if (type == HLE::HookType::Replace) if (type == HLE::HookType::Replace)
{ {
@ -228,22 +229,22 @@ void CachedInterpreter::Jit(u32 address)
} }
} }
if (!ops[i].skip) if (!op.skip)
{ {
bool check_fpu = (ops[i].opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound; const bool check_fpu = (op.opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound;
bool endblock = (ops[i].opinfo->flags & FL_ENDBLOCK) != 0; const bool endblock = (op.opinfo->flags & FL_ENDBLOCK) != 0;
bool memcheck = (ops[i].opinfo->flags & FL_LOADSTORE) && jo.memcheck; const bool memcheck = (op.opinfo->flags & FL_LOADSTORE) && jo.memcheck;
if (check_fpu) if (check_fpu)
{ {
m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(WritePC, op.address);
m_code.emplace_back(CheckFPU, js.downcountAmount); m_code.emplace_back(CheckFPU, js.downcountAmount);
js.firstFPInstructionFound = true; js.firstFPInstructionFound = true;
} }
if (endblock || memcheck) if (endblock || memcheck)
m_code.emplace_back(WritePC, ops[i].address); m_code.emplace_back(WritePC, op.address);
m_code.emplace_back(PPCTables::GetInterpreterOp(ops[i].inst), ops[i].inst); m_code.emplace_back(PPCTables::GetInterpreterOp(op.inst), op.inst);
if (memcheck) if (memcheck)
m_code.emplace_back(CheckDSI, js.downcountAmount); m_code.emplace_back(CheckDSI, js.downcountAmount);
if (endblock) if (endblock)

View File

@ -651,8 +651,6 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
js.numLoadStoreInst = 0; js.numLoadStoreInst = 0;
js.numFloatingPointInst = 0; js.numFloatingPointInst = 0;
PPCAnalyst::CodeOp* ops = code_buf->codebuffer;
const u8* start = const u8* start =
AlignCode4(); // TODO: Test if this or AlignCode16 make a difference from GetCodePtr AlignCode4(); // TODO: Test if this or AlignCode16 make a difference from GetCodePtr
b->checkedEntry = start; b->checkedEntry = start;
@ -740,13 +738,16 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
} }
// Translate instructions // Translate instructions
PPCAnalyst::CodeOp* const ops = code_buf->codebuffer;
for (u32 i = 0; i < code_block.m_num_instructions; i++) for (u32 i = 0; i < code_block.m_num_instructions; i++)
{ {
js.compilerPC = ops[i].address; PPCAnalyst::CodeOp& op = ops[i];
js.op = &ops[i];
js.compilerPC = op.address;
js.op = &op;
js.instructionNumber = i; js.instructionNumber = i;
js.instructionsLeft = (code_block.m_num_instructions - 1) - i; js.instructionsLeft = (code_block.m_num_instructions - 1) - i;
const GekkoOPInfo* opinfo = ops[i].opinfo; const GekkoOPInfo* opinfo = op.opinfo;
js.downcountAmount += opinfo->numCycles; js.downcountAmount += opinfo->numCycles;
js.fastmemLoadStore = nullptr; js.fastmemLoadStore = nullptr;
js.fixupExceptionHandler = false; js.fixupExceptionHandler = false;
@ -762,8 +763,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
} }
// Gather pipe writes using a non-immediate address are discovered by profiling. // Gather pipe writes using a non-immediate address are discovered by profiling.
bool gatherPipeIntCheck = bool gatherPipeIntCheck = js.fifoWriteAddresses.find(op.address) != js.fifoWriteAddresses.end();
js.fifoWriteAddresses.find(ops[i].address) != js.fifoWriteAddresses.end();
// Gather pipe writes using an immediate address are explicitly tracked. // Gather pipe writes using an immediate address are explicitly tracked.
if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo)) if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo))
@ -798,7 +798,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
gpr.Flush(RegCache::FlushMode::MaintainState); gpr.Flush(RegCache::FlushMode::MaintainState);
fpr.Flush(RegCache::FlushMode::MaintainState); fpr.Flush(RegCache::FlushMode::MaintainState);
MOV(32, PPCSTATE(pc), Imm32(ops[i].address)); MOV(32, PPCSTATE(pc), Imm32(op.address));
WriteExternalExceptionExit(); WriteExternalExceptionExit();
SwitchToNearCode(); SwitchToNearCode();
@ -806,7 +806,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
SetJumpTarget(noExtIntEnable); SetJumpTarget(noExtIntEnable);
} }
u32 function = HLE::GetFirstFunctionIndex(ops[i].address); u32 function = HLE::GetFirstFunctionIndex(op.address);
if (function != 0) if (function != 0)
{ {
HLE::HookType type = HLE::GetFunctionTypeByIndex(function); HLE::HookType type = HLE::GetFunctionTypeByIndex(function);
@ -827,7 +827,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
} }
} }
if (!ops[i].skip) if (!op.skip)
{ {
if ((opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound) if ((opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound)
{ {
@ -842,7 +842,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
// If a FPU exception occurs, the exception handler will read // If a FPU exception occurs, the exception handler will read
// from PC. Update PC with the latest value in case that happens. // from PC. Update PC with the latest value in case that happens.
MOV(32, PPCSTATE(pc), Imm32(ops[i].address)); MOV(32, PPCSTATE(pc), Imm32(op.address));
OR(32, PPCSTATE(Exceptions), Imm32(EXCEPTION_FPU_UNAVAILABLE)); OR(32, PPCSTATE(Exceptions), Imm32(EXCEPTION_FPU_UNAVAILABLE));
WriteExceptionExit(); WriteExceptionExit();
SwitchToNearCode(); SwitchToNearCode();
@ -850,8 +850,8 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
js.firstFPInstructionFound = true; js.firstFPInstructionFound = true;
} }
if (SConfig::GetInstance().bEnableDebugging && if (SConfig::GetInstance().bEnableDebugging && breakpoints.IsAddressBreakPoint(op.address) &&
breakpoints.IsAddressBreakPoint(ops[i].address) && !CPU::IsStepping()) !CPU::IsStepping())
{ {
// Turn off block linking if there are breakpoints so that the Step Over command does not // Turn off block linking if there are breakpoints so that the Step Over command does not
// link this block. // link this block.
@ -860,7 +860,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
gpr.Flush(); gpr.Flush();
fpr.Flush(); fpr.Flush();
MOV(32, PPCSTATE(pc), Imm32(ops[i].address)); MOV(32, PPCSTATE(pc), Imm32(op.address));
ABI_PushRegistersAndAdjustStack({}, 0); ABI_PushRegistersAndAdjustStack({}, 0);
ABI_CallFunction(PowerPC::CheckBreakPoints); ABI_CallFunction(PowerPC::CheckBreakPoints);
ABI_PopRegistersAndAdjustStack({}, 0); ABI_PopRegistersAndAdjustStack({}, 0);
@ -868,7 +868,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
TEST(32, MatR(RSCRATCH), Imm32(0xFFFFFFFF)); TEST(32, MatR(RSCRATCH), Imm32(0xFFFFFFFF));
FixupBranch noBreakpoint = J_CC(CC_Z); FixupBranch noBreakpoint = J_CC(CC_Z);
WriteExit(ops[i].address); WriteExit(op.address);
SetJumpTarget(noBreakpoint); SetJumpTarget(noBreakpoint);
} }
@ -879,22 +879,22 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
// output, which needs to be bound in the actual instruction compilation. // output, which needs to be bound in the actual instruction compilation.
// TODO: make this smarter in the case that we're actually register-starved, i.e. // TODO: make this smarter in the case that we're actually register-starved, i.e.
// prioritize the more important registers. // prioritize the more important registers.
for (int reg : ops[i].regsIn) for (int reg : op.regsIn)
{ {
if (gpr.NumFreeRegisters() < 2) if (gpr.NumFreeRegisters() < 2)
break; break;
if (ops[i].gprInReg[reg] && !gpr.R(reg).IsImm()) if (op.gprInReg[reg] && !gpr.R(reg).IsImm())
gpr.BindToRegister(reg, true, false); gpr.BindToRegister(reg, true, false);
} }
for (int reg : ops[i].fregsIn) for (int reg : op.fregsIn)
{ {
if (fpr.NumFreeRegisters() < 2) if (fpr.NumFreeRegisters() < 2)
break; break;
if (ops[i].fprInXmm[reg]) if (op.fprInXmm[reg])
fpr.BindToRegister(reg, true, false); fpr.BindToRegister(reg, true, false);
} }
CompileInstruction(ops[i]); CompileInstruction(op);
if (jo.memcheck && (opinfo->flags & FL_LOADSTORE)) if (jo.memcheck && (opinfo->flags & FL_LOADSTORE))
{ {
@ -903,7 +903,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
FixupBranch memException; FixupBranch memException;
ASSERT_MSG(DYNA_REC, !(js.fastmemLoadStore && js.fixupExceptionHandler), ASSERT_MSG(DYNA_REC, !(js.fastmemLoadStore && js.fixupExceptionHandler),
"Fastmem loadstores shouldn't have exception handler fixups (PC=%x)!", "Fastmem loadstores shouldn't have exception handler fixups (PC=%x)!",
ops[i].address); op.address);
if (!js.fastmemLoadStore && !js.fixupExceptionHandler) if (!js.fastmemLoadStore && !js.fixupExceptionHandler)
{ {
TEST(32, PPCSTATE(Exceptions), Imm32(EXCEPTION_DSI)); TEST(32, PPCSTATE(Exceptions), Imm32(EXCEPTION_DSI));
@ -934,9 +934,9 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
} }
// If we have a register that will never be used again, flush it. // If we have a register that will never be used again, flush it.
for (int j : ~ops[i].gprInUse) for (int j : ~op.gprInUse)
gpr.StoreFromRegister(j); gpr.StoreFromRegister(j);
for (int j : ~ops[i].fprInUse) for (int j : ~op.fprInUse)
fpr.StoreFromRegister(j); fpr.StoreFromRegister(j);
if (opinfo->flags & FL_LOADSTORE) if (opinfo->flags & FL_LOADSTORE)
@ -949,7 +949,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
#if defined(_DEBUG) || defined(DEBUGFAST) #if defined(_DEBUG) || defined(DEBUGFAST)
if (gpr.SanityCheck() || fpr.SanityCheck()) if (gpr.SanityCheck() || fpr.SanityCheck())
{ {
std::string ppc_inst = GekkoDisassembler::Disassemble(ops[i].inst.hex, em_address); std::string ppc_inst = GekkoDisassembler::Disassemble(op.inst.hex, em_address);
// NOTICE_LOG(DYNA_REC, "Unflushed register: %s", ppc_inst.c_str()); // NOTICE_LOG(DYNA_REC, "Unflushed register: %s", ppc_inst.c_str());
} }
#endif #endif

View File

@ -601,8 +601,6 @@ void JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBlock*
js.curBlock = b; js.curBlock = b;
js.carryFlagSet = false; js.carryFlagSet = false;
PPCAnalyst::CodeOp* ops = code_buf->codebuffer;
const u8* start = GetCodePtr(); const u8* start = GetCodePtr();
b->checkedEntry = start; b->checkedEntry = start;
@ -651,13 +649,16 @@ void JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBlock*
fpr.Start(js.fpa); fpr.Start(js.fpa);
// Translate instructions // Translate instructions
PPCAnalyst::CodeOp* const ops = code_buf->codebuffer;
for (u32 i = 0; i < code_block.m_num_instructions; i++) for (u32 i = 0; i < code_block.m_num_instructions; i++)
{ {
js.compilerPC = ops[i].address; PPCAnalyst::CodeOp& op = ops[i];
js.op = &ops[i];
js.compilerPC = op.address;
js.op = &op;
js.instructionNumber = i; js.instructionNumber = i;
js.instructionsLeft = (code_block.m_num_instructions - 1) - i; js.instructionsLeft = (code_block.m_num_instructions - 1) - i;
const GekkoOPInfo* opinfo = ops[i].opinfo; const GekkoOPInfo* opinfo = op.opinfo;
js.downcountAmount += opinfo->numCycles; js.downcountAmount += opinfo->numCycles;
js.isLastInstruction = i == (code_block.m_num_instructions - 1); js.isLastInstruction = i == (code_block.m_num_instructions - 1);
@ -665,8 +666,7 @@ void JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBlock*
js.downcountAmount += PatchEngine::GetSpeedhackCycles(js.compilerPC); js.downcountAmount += PatchEngine::GetSpeedhackCycles(js.compilerPC);
// Gather pipe writes using a non-immediate address are discovered by profiling. // Gather pipe writes using a non-immediate address are discovered by profiling.
bool gatherPipeIntCheck = bool gatherPipeIntCheck = js.fifoWriteAddresses.find(op.address) != js.fifoWriteAddresses.end();
js.fifoWriteAddresses.find(ops[i].address) != js.fifoWriteAddresses.end();
if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo)) if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo))
{ {
@ -740,7 +740,7 @@ void JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBlock*
SetJumpTarget(exit); SetJumpTarget(exit);
} }
if (!ops[i].skip) if (!op.skip)
{ {
if ((opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound) if ((opinfo->flags & FL_USE_FPU) && !js.firstFPInstructionFound)
{ {
@ -771,13 +771,13 @@ void JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBlock*
js.firstFPInstructionFound = true; js.firstFPInstructionFound = true;
} }
CompileInstruction(ops[i]); CompileInstruction(op);
if (!CanMergeNextInstructions(1) || js.op[1].opinfo->type != ::OpType::Integer) if (!CanMergeNextInstructions(1) || js.op[1].opinfo->type != ::OpType::Integer)
FlushCarry(); FlushCarry();
// If we have a register that will never be used again, flush it. // If we have a register that will never be used again, flush it.
gpr.StoreRegisters(~ops[i].gprInUse); gpr.StoreRegisters(~op.gprInUse);
fpr.StoreRegisters(~ops[i].fprInUse); fpr.StoreRegisters(~op.fprInUse);
} }
i += js.skipInstructions; i += js.skipInstructions;