Jit64: Optimized idle skipping detection.

This commit is contained in:
degasus 2018-07-28 01:52:22 +02:00
parent 4b1adab785
commit 55db7c7a05
8 changed files with 147 additions and 110 deletions

View File

@ -29,8 +29,8 @@ static std::array<GekkoOPTemplate, 54> primarytable =
{59, Interpreter::RunTable59, {"RunTable59", OpType::Subtable, 0, 0, 0, 0, 0}}, {59, Interpreter::RunTable59, {"RunTable59", OpType::Subtable, 0, 0, 0, 0, 0}},
{63, Interpreter::RunTable63, {"RunTable63", OpType::Subtable, 0, 0, 0, 0, 0}}, {63, Interpreter::RunTable63, {"RunTable63", OpType::Subtable, 0, 0, 0, 0, 0}},
{16, Interpreter::bcx, {"bcx", OpType::System, FL_ENDBLOCK, 1, 0, 0, 0}}, {16, Interpreter::bcx, {"bcx", OpType::Branch, FL_ENDBLOCK, 1, 0, 0, 0}},
{18, Interpreter::bx, {"bx", OpType::System, FL_ENDBLOCK, 1, 0, 0, 0}}, {18, Interpreter::bx, {"bx", OpType::Branch, FL_ENDBLOCK, 1, 0, 0, 0}},
{3, Interpreter::twi, {"twi", OpType::System, FL_ENDBLOCK, 1, 0, 0, 0}}, {3, Interpreter::twi, {"twi", OpType::System, FL_ENDBLOCK, 1, 0, 0, 0}},
{17, Interpreter::sc, {"sc", OpType::System, FL_ENDBLOCK, 2, 0, 0, 0}}, {17, Interpreter::sc, {"sc", OpType::System, FL_ENDBLOCK, 2, 0, 0, 0}},

View File

@ -87,25 +87,22 @@ void Jit64::bx(UGeckoInstruction inst)
gpr.Flush(); gpr.Flush();
fpr.Flush(); fpr.Flush();
u32 destination;
if (inst.AA)
destination = SignExt26(inst.LI << 2);
else
destination = js.compilerPC + SignExt26(inst.LI << 2);
#ifdef ACID_TEST #ifdef ACID_TEST
if (inst.LK) if (inst.LK)
AND(32, PPCSTATE(cr), Imm32(~(0xFF000000))); AND(32, PPCSTATE(cr), Imm32(~(0xFF000000)));
#endif #endif
if (destination == js.compilerPC) if (js.op->branchIsIdleLoop)
{ {
ABI_PushRegistersAndAdjustStack({}, 0); ABI_PushRegistersAndAdjustStack({}, 0);
ABI_CallFunction(CoreTiming::Idle); ABI_CallFunction(CoreTiming::Idle);
ABI_PopRegistersAndAdjustStack({}, 0); ABI_PopRegistersAndAdjustStack({}, 0);
MOV(32, PPCSTATE(pc), Imm32(destination)); MOV(32, PPCSTATE(pc), Imm32(js.op->branchTo));
WriteExceptionExit(); WriteExceptionExit();
return;
} }
WriteExit(destination, inst.LK, js.compilerPC + 4); else
{
WriteExit(js.op->branchTo, inst.LK, js.compilerPC + 4);
}
} }
// TODO - optimize to hell and beyond // TODO - optimize to hell and beyond
@ -154,18 +151,24 @@ void Jit64::bcx(UGeckoInstruction inst)
return; return;
} }
u32 destination;
if (inst.AA)
destination = SignExt16(inst.BD << 2);
else
destination = js.compilerPC + SignExt16(inst.BD << 2);
{ {
RCForkGuard gpr_guard = gpr.Fork(); RCForkGuard gpr_guard = gpr.Fork();
RCForkGuard fpr_guard = fpr.Fork(); RCForkGuard fpr_guard = fpr.Fork();
gpr.Flush(); gpr.Flush();
fpr.Flush(); fpr.Flush();
WriteExit(destination, inst.LK, js.compilerPC + 4);
if (js.op->branchIsIdleLoop)
{
ABI_PushRegistersAndAdjustStack({}, 0);
ABI_CallFunction(CoreTiming::Idle);
ABI_PopRegistersAndAdjustStack({}, 0);
MOV(32, PPCSTATE(pc), Imm32(js.op->branchTo));
WriteExceptionExit();
}
else
{
WriteExit(js.op->branchTo, inst.LK, js.compilerPC + 4);
}
} }
if ((inst.BO & BO_DONT_CHECK_CONDITION) == 0) if ((inst.BO & BO_DONT_CHECK_CONDITION) == 0)

View File

@ -12,6 +12,7 @@
#include "Common/CommonTypes.h" #include "Common/CommonTypes.h"
#include "Common/MathUtil.h" #include "Common/MathUtil.h"
#include "Common/x64Emitter.h" #include "Common/x64Emitter.h"
#include "Core/CoreTiming.h"
#include "Core/PowerPC/Jit64/Jit.h" #include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64/RegCache/JitRegCache.h" #include "Core/PowerPC/Jit64/RegCache/JitRegCache.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h" #include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
@ -361,7 +362,19 @@ void Jit64::DoMergedBranch()
// Code that handles successful PPC branching. // Code that handles successful PPC branching.
const UGeckoInstruction& next = js.op[1].inst; const UGeckoInstruction& next = js.op[1].inst;
const u32 nextPC = js.op[1].address; const u32 nextPC = js.op[1].address;
if (next.OPCD == 16) // bcx
if (js.op[1].branchIsIdleLoop)
{
if (next.LK)
MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4));
ABI_PushRegistersAndAdjustStack({}, 0);
ABI_CallFunction(CoreTiming::Idle);
ABI_PopRegistersAndAdjustStack({}, 0);
MOV(32, PPCSTATE(pc), Imm32(js.op[1].branchTo));
WriteExceptionExit();
}
else if (next.OPCD == 16) // bcx
{ {
if (next.LK) if (next.LK)
MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4)); MOV(32, PPCSTATE(spr[SPR_LR]), Imm32(nextPC + 4));

View File

@ -119,41 +119,6 @@ void Jit64::lXXx(UGeckoInstruction inst)
signExtend = true; signExtend = true;
} }
if (!CPU::IsStepping() && inst.OPCD == 32 && CanMergeNextInstructions(2) &&
(inst.hex & 0xFFFF0000) == 0x800D0000 &&
(js.op[1].inst.hex == 0x28000000 ||
(SConfig::GetInstance().bWii && js.op[1].inst.hex == 0x2C000000)) &&
js.op[2].inst.hex == 0x4182fff8)
{
s32 offset = (s32)(s16)inst.SIMM_16;
RCX64Reg Ra = gpr.Bind(a, RCMode::Read);
RCX64Reg Rd = gpr.Bind(d, RCMode::Write);
RegCache::Realize(Ra, Rd);
SafeLoadToReg(Rd, Ra, accessSize, offset, CallerSavedRegistersInUse(), signExtend);
// if it's still 0, we can wait until the next event
TEST(32, Rd, Rd);
FixupBranch noIdle = J_CC(CC_NZ);
BitSet32 registersInUse = CallerSavedRegistersInUse();
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
ABI_CallFunction(CoreTiming::Idle);
ABI_PopRegistersAndAdjustStack(registersInUse, 0);
// ! we must continue executing of the loop after exception handling, maybe there is still 0 in
// r0
// MOV(32, PPCSTATE(pc), Imm32(js.compilerPC));
WriteExceptionExit();
SetJumpTarget(noIdle);
// js.compilerPC += 8;
return;
}
// Determine whether this instruction updates inst.RA // Determine whether this instruction updates inst.RA
bool update; bool update;
if (inst.OPCD == 31) if (inst.OPCD == 31)

View File

@ -76,12 +76,6 @@ void JitArm64::bx(UGeckoInstruction inst)
INSTRUCTION_START INSTRUCTION_START
JITDISABLE(bJITBranchOff); JITDISABLE(bJITBranchOff);
u32 destination;
if (inst.AA)
destination = SignExt26(inst.LI << 2);
else
destination = js.compilerPC + SignExt26(inst.LI << 2);
if (inst.LK) if (inst.LK)
{ {
ARM64Reg WA = gpr.GetReg(); ARM64Reg WA = gpr.GetReg();
@ -105,7 +99,7 @@ void JitArm64::bx(UGeckoInstruction inst)
gpr.Flush(FlushMode::FLUSH_ALL); gpr.Flush(FlushMode::FLUSH_ALL);
fpr.Flush(FlushMode::FLUSH_ALL); fpr.Flush(FlushMode::FLUSH_ALL);
if (destination == js.compilerPC) if (js.op->branchIsIdleLoop)
{ {
// make idle loops go faster // make idle loops go faster
ARM64Reg WA = gpr.GetReg(); ARM64Reg WA = gpr.GetReg();
@ -115,11 +109,11 @@ void JitArm64::bx(UGeckoInstruction inst)
BLR(XA); BLR(XA);
gpr.Unlock(WA); gpr.Unlock(WA);
WriteExceptionExit(js.compilerPC); WriteExceptionExit(js.op->branchTo);
return; return;
} }
WriteExit(destination, inst.LK, js.compilerPC + 4); WriteExit(js.op->branchTo, inst.LK, js.compilerPC + 4);
} }
void JitArm64::bcx(UGeckoInstruction inst) void JitArm64::bcx(UGeckoInstruction inst)
@ -160,16 +154,25 @@ void JitArm64::bcx(UGeckoInstruction inst)
} }
gpr.Unlock(WA); gpr.Unlock(WA);
u32 destination;
if (inst.AA)
destination = SignExt16(inst.BD << 2);
else
destination = js.compilerPC + SignExt16(inst.BD << 2);
gpr.Flush(FlushMode::FLUSH_MAINTAIN_STATE); gpr.Flush(FlushMode::FLUSH_MAINTAIN_STATE);
fpr.Flush(FlushMode::FLUSH_MAINTAIN_STATE); fpr.Flush(FlushMode::FLUSH_MAINTAIN_STATE);
WriteExit(destination, inst.LK, js.compilerPC + 4); if (js.op->branchIsIdleLoop)
{
// make idle loops go faster
ARM64Reg WA = gpr.GetReg();
ARM64Reg XA = EncodeRegTo64(WA);
MOVP2R(XA, &CoreTiming::Idle);
BLR(XA);
gpr.Unlock(WA);
WriteExceptionExit(js.op->branchTo);
}
else
{
WriteExit(js.op->branchTo, inst.LK, js.compilerPC + 4);
}
SwitchToNearCode(); SwitchToNearCode();

View File

@ -346,37 +346,6 @@ void JitArm64::lXX(UGeckoInstruction inst)
} }
SafeLoadToReg(d, update ? a : (a ? a : -1), offsetReg, flags, offset, update); SafeLoadToReg(d, update ? a : (a ? a : -1), offsetReg, flags, offset, update);
// LWZ idle skipping
if (inst.OPCD == 32 && CanMergeNextInstructions(2) &&
(inst.hex & 0xFFFF0000) == 0x800D0000 && // lwz r0, XXXX(r13)
(js.op[1].inst.hex == 0x28000000 ||
(SConfig::GetInstance().bWii && js.op[1].inst.hex == 0x2C000000)) && // cmpXwi r0,0
js.op[2].inst.hex == 0x4182fff8) // beq -8
{
ARM64Reg WA = gpr.GetReg();
ARM64Reg XA = EncodeRegTo64(WA);
// if it's still 0, we can wait until the next event
FixupBranch noIdle = CBNZ(gpr.R(d));
FixupBranch far = B();
SwitchToFarCode();
SetJumpTarget(far);
gpr.Flush(FLUSH_MAINTAIN_STATE);
fpr.Flush(FLUSH_MAINTAIN_STATE);
MOVP2R(XA, &CoreTiming::Idle);
BLR(XA);
gpr.Unlock(WA);
WriteExceptionExit(js.compilerPC);
SwitchToNearCode();
SetJumpTarget(noIdle);
}
} }
void JitArm64::stX(UGeckoInstruction inst) void JitArm64::stX(UGeckoInstruction inst)

View File

@ -640,6 +640,90 @@ void PPCAnalyzer::SetInstructionStats(CodeBlock* block, CodeOp* code, const Gekk
code->outputCR0 = true; code->outputCR0 = true;
code->outputCR1 = true; code->outputCR1 = true;
} }
code->branchUsesCtr = false;
code->branchTo = UINT32_MAX;
// For branch with immediate addresses (bx/bcx), compute the destination.
if (code->inst.OPCD == 18) // bx
{
if (code->inst.AA) // absolute
code->branchTo = SignExt26(code->inst.LI << 2);
else
code->branchTo = code->address + SignExt26(code->inst.LI << 2);
}
else if (code->inst.OPCD == 16) // bcx
{
if (code->inst.AA) // absolute
code->branchTo = SignExt16(code->inst.BD << 2);
else
code->branchTo = code->address + SignExt16(code->inst.BD << 2);
if (!(code->inst.BO & BO_DONT_DECREMENT_FLAG))
code->branchUsesCtr = true;
}
else if (code->inst.OPCD == 19 && code->inst.SUBOP10 == 16) // bclrx
{
if (!(code->inst.BO & BO_DONT_DECREMENT_FLAG))
code->branchUsesCtr = true;
}
else if (code->inst.OPCD == 19 && code->inst.SUBOP10 == 528) // bcctrx
{
if (!(code->inst.BO & BO_DONT_DECREMENT_FLAG))
code->branchUsesCtr = true;
}
}
bool PPCAnalyzer::IsBusyWaitLoop(CodeBlock* block, CodeOp* code, size_t instructions)
{
// Very basic algorithm to detect busy wait loops:
// * It loops to itself and does not contain any other branches.
// * It does not write to memory.
// * It only reads from registers it wrote to earlier in the loop, or it
// does not write to these registers.
//
// Would benefit a lot from basic inlining support - a lot of the most
// used busy loops are DSP register interactions, which are bl/cmp/bne
// (with the bl target a pure function that follows the above rules). We
// don't detect these at the moment.
std::bitset<32> write_disallowed_regs;
std::bitset<32> written_regs;
for (size_t i = 0; i <= instructions; ++i)
{
if (code[i].opinfo->type == OpType::Branch)
{
if (code[i].branchUsesCtr)
return false;
if (code[i].branchTo == block->m_address && i == instructions)
return true;
}
else if (code[i].opinfo->type != OpType::Integer && code[i].opinfo->type != OpType::Load)
{
// In the future, some subsets of other instruction types might get
// supported. Right now, only try loops that have this very
// restricted instruction set.
return false;
}
else
{
for (int reg : code[i].regsIn)
{
if (reg == -1)
continue;
if (written_regs[reg])
continue;
write_disallowed_regs[reg] = true;
}
for (int reg : code[i].regsOut)
{
if (reg == -1)
continue;
if (write_disallowed_regs[reg])
return false;
written_regs[reg] = true;
}
}
}
return false;
} }
u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std::size_t block_size) u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std::size_t block_size)
@ -692,16 +776,16 @@ u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std:
code[i].opinfo = opinfo; code[i].opinfo = opinfo;
code[i].address = address; code[i].address = address;
code[i].inst = inst; code[i].inst = inst;
code[i].branchTo = UINT32_MAX;
code[i].branchToIndex = UINT32_MAX;
code[i].skip = false; code[i].skip = false;
block->m_stats->numCycles += opinfo->numCycles; block->m_stats->numCycles += opinfo->numCycles;
block->m_physical_addresses.insert(result.physical_address); block->m_physical_addresses.insert(result.physical_address);
SetInstructionStats(block, &code[i], opinfo, static_cast<u32>(i)); SetInstructionStats(block, &code[i], opinfo, static_cast<u32>(i));
code[i].branchIsIdleLoop =
code[i].branchTo == block->m_address && IsBusyWaitLoop(block, code, i);
bool follow = false; bool follow = false;
u32 destination = 0;
bool conditional_continue = false; bool conditional_continue = false;
@ -715,7 +799,6 @@ u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std:
{ {
// Always follow BX instructions. // Always follow BX instructions.
follow = true; follow = true;
destination = SignExt26(inst.LI << 2) + (inst.AA ? 0 : address);
if (inst.LK) if (inst.LK)
{ {
found_call = true; found_call = true;
@ -727,7 +810,6 @@ u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std:
{ {
// Always follow unconditional BCX instructions, but they are very rare. // Always follow unconditional BCX instructions, but they are very rare.
follow = true; follow = true;
destination = SignExt16(inst.BD << 2) + (inst.AA ? 0 : address);
if (inst.LK) if (inst.LK)
{ {
found_call = true; found_call = true;
@ -744,7 +826,7 @@ u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std:
// the LR value on the stack as there are no spare registers. So we'd need // the LR value on the stack as there are no spare registers. So we'd need
// to check all store instruction to not alias with the stack. // to check all store instruction to not alias with the stack.
follow = true; follow = true;
destination = code[caller].address + 4; code[i].branchTo = code[caller].address + 4;
found_call = false; found_call = false;
code[i].skip = true; code[i].skip = true;
@ -796,7 +878,7 @@ u32 PPCAnalyzer::Analyze(u32 address, CodeBlock* block, CodeBuffer* buffer, std:
{ {
// Follow the unconditional branch. // Follow the unconditional branch.
numFollows++; numFollows++;
address = destination; address = code[i].branchTo;
} }
else else
{ {

View File

@ -27,13 +27,14 @@ struct CodeOp // 16B
UGeckoInstruction inst; UGeckoInstruction inst;
GekkoOPInfo* opinfo; GekkoOPInfo* opinfo;
u32 address; u32 address;
u32 branchTo; // if 0, not a branch u32 branchTo; // if UINT32_MAX, not a branch
int branchToIndex; // index of target block
BitSet32 regsOut; BitSet32 regsOut;
BitSet32 regsIn; BitSet32 regsIn;
BitSet32 fregsIn; BitSet32 fregsIn;
s8 fregOut; s8 fregOut;
bool isBranchTarget; bool isBranchTarget;
bool branchUsesCtr;
bool branchIsIdleLoop;
bool wantsCR0; bool wantsCR0;
bool wantsCR1; bool wantsCR1;
bool wantsFPRF; bool wantsFPRF;
@ -213,6 +214,7 @@ private:
void ReorderInstructionsCore(u32 instructions, CodeOp* code, bool reverse, ReorderType type); void ReorderInstructionsCore(u32 instructions, CodeOp* code, bool reverse, ReorderType type);
void ReorderInstructions(u32 instructions, CodeOp* code); void ReorderInstructions(u32 instructions, CodeOp* code);
void SetInstructionStats(CodeBlock* block, CodeOp* code, const GekkoOPInfo* opinfo, u32 index); void SetInstructionStats(CodeBlock* block, CodeOp* code, const GekkoOPInfo* opinfo, u32 index);
bool IsBusyWaitLoop(CodeBlock* block, CodeOp* code, size_t instructions);
// Options // Options
u32 m_options = 0; u32 m_options = 0;