JitArm64: Encode logical immediates at compile-time where possible

Manually encoding and decoding logical immediates is error-prone.
Using ORRI2R and friends lets us avoid doing the work manually,
but in exchange, there is a runtime performance penalty. It's
probably rather small, but still, it would be nice if we could
let the compiler do the work at compile-time. And that's exactly
what this commit does, so now I have no excuse for trying to
manually write logical immediates anymore.
This commit is contained in:
JosJuice 2021-07-06 16:53:04 +02:00
parent 10861ed8ce
commit 9e80db123f
9 changed files with 67 additions and 67 deletions

View File

@ -1124,17 +1124,16 @@ public:
} }
// Wrapper around AND x, y, imm etc. // Wrapper around AND x, y, imm etc.
// If you are sure the imm will work, no need to pass a scratch register. // If you are sure the imm will work, preferably construct a LogicalImm directly instead,
// If the imm is constant, preferably call EncodeLogicalImm directly instead of using these // since that is constexpr and thus can be done at compile-time for constant values.
// functions, as this lets the computation of the imm encoding be performed during compilation. void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch)
void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG)
{ {
ANDSI2R(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, scratch); ANDSI2R(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, scratch);
} }
void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags,
ARM64Reg scratch); ARM64Reg scratch);

View File

@ -799,7 +799,7 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
fpr.Flush(FlushMode::MaintainState); fpr.Flush(FlushMode::MaintainState);
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
ORRI2R(WA, WA, EXCEPTION_FPU_UNAVAILABLE); ORR(WA, WA, LogicalImm(EXCEPTION_FPU_UNAVAILABLE, 32));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
gpr.Unlock(WA); gpr.Unlock(WA);

View File

@ -24,7 +24,7 @@ void JitArm64::sc(UGeckoInstruction inst)
ARM64Reg WA = gpr.GetReg(); ARM64Reg WA = gpr.GetReg();
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
ORRI2R(WA, WA, EXCEPTION_SYSCALL); ORR(WA, WA, LogicalImm(EXCEPTION_SYSCALL, 32));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
gpr.Unlock(WA); gpr.Unlock(WA);

View File

@ -401,7 +401,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
{ {
fpscr_reg = gpr.GetReg(); fpscr_reg = gpr.GetReg();
LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr)); LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr));
ANDI2R(fpscr_reg, fpscr_reg, ~FPCC_MASK); AND(fpscr_reg, fpscr_reg, LogicalImm(~FPCC_MASK, 32));
} }
ARM64Reg V0Q = ARM64Reg::INVALID_REG; ARM64Reg V0Q = ARM64Reg::INVALID_REG;
@ -450,7 +450,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
// A == B // A == B
ORR(XA, XA, 64 - 63, 0, true); ORR(XA, XA, 64 - 63, 0, true);
if (fprf) if (fprf)
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_EQ << FPRF_SHIFT); ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_EQ << FPRF_SHIFT, 32));
continue1 = B(); continue1 = B();
@ -458,7 +458,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
MOVI2R(XA, PowerPC::ConditionRegister::PPCToInternal(PowerPC::CR_SO)); MOVI2R(XA, PowerPC::ConditionRegister::PPCToInternal(PowerPC::CR_SO));
if (fprf) if (fprf)
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_SO << FPRF_SHIFT); ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_SO << FPRF_SHIFT, 32));
if (a != b) if (a != b)
{ {
@ -467,7 +467,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
SetJumpTarget(pGreater); SetJumpTarget(pGreater);
ORR(XA, XA, 0, 0, true); ORR(XA, XA, 0, 0, true);
if (fprf) if (fprf)
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_GT << FPRF_SHIFT); ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_GT << FPRF_SHIFT, 32));
continue3 = B(); continue3 = B();
@ -475,7 +475,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
ORR(XA, XA, 64 - 62, 1, true); ORR(XA, XA, 64 - 62, 1, true);
ORR(XA, XA, 0, 0, true); ORR(XA, XA, 0, 0, true);
if (fprf) if (fprf)
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_LT << FPRF_SHIFT); ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_LT << FPRF_SHIFT, 32));
SetJumpTarget(continue2); SetJumpTarget(continue2);
SetJumpTarget(continue3); SetJumpTarget(continue3);
@ -532,7 +532,7 @@ void JitArm64::fctiwzx(UGeckoInstruction inst)
const ARM64Reg WA = gpr.GetReg(); const ARM64Reg WA = gpr.GetReg();
m_float_emit.FCVTS(WA, EncodeRegToDouble(VB), RoundingMode::Z); m_float_emit.FCVTS(WA, EncodeRegToDouble(VB), RoundingMode::Z);
ORRI2R(EncodeRegTo64(WA), EncodeRegTo64(WA), 0xFFF8'0000'0000'0000ULL); ORR(EncodeRegTo64(WA), EncodeRegTo64(WA), LogicalImm(0xFFF8'0000'0000'0000ULL, 64));
m_float_emit.FMOV(EncodeRegToDouble(VD), EncodeRegTo64(WA)); m_float_emit.FMOV(EncodeRegToDouble(VD), EncodeRegTo64(WA));
gpr.Unlock(WA); gpr.Unlock(WA);

View File

@ -611,7 +611,7 @@ void JitArm64::rlwinmx(UGeckoInstruction inst)
else if (!inst.SH) else if (!inst.SH)
{ {
// Immediate mask // Immediate mask
ANDI2R(gpr.R(a), gpr.R(s), mask); AND(gpr.R(a), gpr.R(s), LogicalImm(mask, 32));
} }
else if (inst.ME == 31 && 31 < inst.SH + inst.MB) else if (inst.ME == 31 && 31 < inst.SH + inst.MB)
{ {

View File

@ -550,7 +550,7 @@ void JitArm64::dcbx(UGeckoInstruction inst)
else else
MOV(addr, gpr.R(b)); MOV(addr, gpr.R(b));
ANDI2R(addr, addr, ~31); // mask sizeof cacheline AND(addr, addr, LogicalImm(~31, 32)); // mask sizeof cacheline
BitSet32 gprs_to_push = gpr.GetCallerSavedUsed(); BitSet32 gprs_to_push = gpr.GetCallerSavedUsed();
BitSet32 fprs_to_push = fpr.GetCallerSavedUsed(); BitSet32 fprs_to_push = fpr.GetCallerSavedUsed();
@ -618,13 +618,13 @@ void JitArm64::dcbz(UGeckoInstruction inst)
ARM64Reg base = is_imm_a ? gpr.R(b) : gpr.R(a); ARM64Reg base = is_imm_a ? gpr.R(b) : gpr.R(a);
u32 imm_offset = is_imm_a ? gpr.GetImm(a) : gpr.GetImm(b); u32 imm_offset = is_imm_a ? gpr.GetImm(a) : gpr.GetImm(b);
ADDI2R(addr_reg, base, imm_offset, addr_reg); ADDI2R(addr_reg, base, imm_offset, addr_reg);
ANDI2R(addr_reg, addr_reg, ~31); AND(addr_reg, addr_reg, LogicalImm(~31, 32));
} }
else else
{ {
// Both are registers // Both are registers
ADD(addr_reg, gpr.R(a), gpr.R(b)); ADD(addr_reg, gpr.R(a), gpr.R(b));
ANDI2R(addr_reg, addr_reg, ~31); AND(addr_reg, addr_reg, LogicalImm(~31, 32));
} }
} }
else else
@ -637,7 +637,7 @@ void JitArm64::dcbz(UGeckoInstruction inst)
} }
else else
{ {
ANDI2R(addr_reg, gpr.R(b), ~31); AND(addr_reg, gpr.R(b), LogicalImm(~31, 32));
} }
} }

View File

@ -217,7 +217,7 @@ void JitArm64::twx(UGeckoInstruction inst)
fpr.Flush(FlushMode::MaintainState); fpr.Flush(FlushMode::MaintainState);
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
ORRI2R(WA, WA, EXCEPTION_PROGRAM); ORR(WA, WA, LogicalImm(EXCEPTION_PROGRAM, 32));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
gpr.Unlock(WA); gpr.Unlock(WA);
@ -290,7 +290,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
SUB(Xresult, Xresult, XB); SUB(Xresult, Xresult, XB);
// a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67 // a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67
ORRI2R(XB, ARM64Reg::ZR, 0xAAAAAAAAAAAAAAAA); ORR(XB, ARM64Reg::ZR, LogicalImm(0xAAAAAAAAAAAAAAAA, 64));
ADD(XB, XB, 1); ADD(XB, XB, 1);
UMULH(Xresult, Xresult, XB); UMULH(Xresult, Xresult, XB);
@ -440,20 +440,20 @@ void JitArm64::crXXX(UGeckoInstruction inst)
switch (bit) switch (bit)
{ {
case PowerPC::CR_SO_BIT: case PowerPC::CR_SO_BIT:
ANDI2R(XA, XA, ~(u64(1) << PowerPC::CR_EMU_SO_BIT)); AND(XA, XA, LogicalImm(~(u64(1) << PowerPC::CR_EMU_SO_BIT), 64));
break; break;
case PowerPC::CR_EQ_BIT: case PowerPC::CR_EQ_BIT:
FixGTBeforeSettingCRFieldBit(XA); FixGTBeforeSettingCRFieldBit(XA);
ORRI2R(XA, XA, 1); ORR(XA, XA, LogicalImm(1, 64));
break; break;
case PowerPC::CR_GT_BIT: case PowerPC::CR_GT_BIT:
ORRI2R(XA, XA, u64(1) << 63); ORR(XA, XA, LogicalImm(u64(1) << 63, 64));
break; break;
case PowerPC::CR_LT_BIT: case PowerPC::CR_LT_BIT:
ANDI2R(XA, XA, ~(u64(1) << PowerPC::CR_EMU_LT_BIT)); AND(XA, XA, LogicalImm(~(u64(1) << PowerPC::CR_EMU_LT_BIT), 64));
break; break;
} }
return; return;
@ -475,23 +475,23 @@ void JitArm64::crXXX(UGeckoInstruction inst)
switch (bit) switch (bit)
{ {
case PowerPC::CR_SO_BIT: case PowerPC::CR_SO_BIT:
ORRI2R(XA, XA, u64(1) << PowerPC::CR_EMU_SO_BIT); ORR(XA, XA, LogicalImm(u64(1) << PowerPC::CR_EMU_SO_BIT, 64));
break; break;
case PowerPC::CR_EQ_BIT: case PowerPC::CR_EQ_BIT:
ANDI2R(XA, XA, 0xFFFF'FFFF'0000'0000); AND(XA, XA, LogicalImm(0xFFFF'FFFF'0000'0000, 64));
break; break;
case PowerPC::CR_GT_BIT: case PowerPC::CR_GT_BIT:
ANDI2R(XA, XA, ~(u64(1) << 63)); AND(XA, XA, LogicalImm(~(u64(1) << 63), 64));
break; break;
case PowerPC::CR_LT_BIT: case PowerPC::CR_LT_BIT:
ORRI2R(XA, XA, u64(1) << PowerPC::CR_EMU_LT_BIT); ORR(XA, XA, LogicalImm(u64(1) << PowerPC::CR_EMU_LT_BIT, 64));
break; break;
} }
ORRI2R(XA, XA, u64(1) << 32); ORR(XA, XA, LogicalImm(u64(1) << 32, 64));
return; return;
} }
@ -709,12 +709,12 @@ void JitArm64::mcrfs(UGeckoInstruction inst)
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr)); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr));
LSR(WCR, WA, shift); LSR(WCR, WA, shift);
ANDI2R(WCR, WCR, 0xF); AND(WCR, WCR, LogicalImm(0xF, 32));
if (mask != 0) if (mask != 0)
{ {
const u32 inverted_mask = ~mask; const u32 inverted_mask = ~mask;
ANDI2R(WA, WA, inverted_mask); AND(WA, WA, LogicalImm(inverted_mask, 32));
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr));
} }

View File

@ -102,7 +102,7 @@ void JitArm64::GenerateAsm()
ARM64Reg pc_masked = ARM64Reg::W25; ARM64Reg pc_masked = ARM64Reg::W25;
ARM64Reg cache_base = ARM64Reg::X27; ARM64Reg cache_base = ARM64Reg::X27;
ARM64Reg block = ARM64Reg::X30; ARM64Reg block = ARM64Reg::X30;
ORRI2R(pc_masked, ARM64Reg::WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3); ORR(pc_masked, ARM64Reg::WZR, LogicalImm(JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3, 32));
AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1)); AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1));
MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap()); MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap());
LDR(block, cache_base, EncodeRegTo64(pc_masked)); LDR(block, cache_base, EncodeRegTo64(pc_masked));
@ -116,7 +116,7 @@ void JitArm64::GenerateAsm()
FixupBranch pc_missmatch = B(CC_NEQ); FixupBranch pc_missmatch = B(CC_NEQ);
LDR(IndexType::Unsigned, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr)); LDR(IndexType::Unsigned, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr));
ANDI2R(pc_and_msr2, pc_and_msr2, JitBaseBlockCache::JIT_CACHE_MSR_MASK); AND(pc_and_msr2, pc_and_msr2, LogicalImm(JitBaseBlockCache::JIT_CACHE_MSR_MASK, 32));
LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, msrBits)); LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, msrBits));
CMP(pc_and_msr, pc_and_msr2); CMP(pc_and_msr, pc_and_msr2);
FixupBranch msr_missmatch = B(CC_NEQ); FixupBranch msr_missmatch = B(CC_NEQ);
@ -238,7 +238,7 @@ void JitArm64::GenerateFres()
UBFX(ARM64Reg::X2, ARM64Reg::X1, 52, 11); // Grab the exponent UBFX(ARM64Reg::X2, ARM64Reg::X1, 52, 11); // Grab the exponent
m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0); m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0);
CMP(ARM64Reg::X2, 895); CMP(ARM64Reg::X2, 895);
ANDI2R(ARM64Reg::X3, ARM64Reg::X1, Common::DOUBLE_SIGN); AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(Common::DOUBLE_SIGN, 64));
FixupBranch small_exponent = B(CCFlags::CC_LO); FixupBranch small_exponent = B(CCFlags::CC_LO);
MOVI2R(ARM64Reg::X4, 1148LL); MOVI2R(ARM64Reg::X4, 1148LL);
@ -251,14 +251,14 @@ void JitArm64::GenerateFres()
LDP(IndexType::Signed, ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::X2, 0); LDP(IndexType::Signed, ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::X2, 0);
UBFX(ARM64Reg::X1, ARM64Reg::X1, 37, 10); // Grab lower part of mantissa UBFX(ARM64Reg::X1, ARM64Reg::X1, 37, 10); // Grab lower part of mantissa
MOVI2R(ARM64Reg::W4, 1); MOVI2R(ARM64Reg::W4, 1);
ANDI2R(ARM64Reg::X0, ARM64Reg::X0, Common::DOUBLE_SIGN | Common::DOUBLE_EXP); AND(ARM64Reg::X0, ARM64Reg::X0, LogicalImm(Common::DOUBLE_SIGN | Common::DOUBLE_EXP, 64));
MADD(ARM64Reg::W1, ARM64Reg::W3, ARM64Reg::W1, ARM64Reg::W4); MADD(ARM64Reg::W1, ARM64Reg::W3, ARM64Reg::W1, ARM64Reg::W4);
SUB(ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W1, ArithOption(ARM64Reg::W1, ShiftType::LSR, 1)); SUB(ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W1, ArithOption(ARM64Reg::W1, ShiftType::LSR, 1));
ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X1, ArithOption(ARM64Reg::X1, ShiftType::LSL, 29)); ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X1, ArithOption(ARM64Reg::X1, ShiftType::LSL, 29));
RET(); RET();
SetJumpTarget(small_exponent); SetJumpTarget(small_exponent);
TSTI2R(ARM64Reg::X1, Common::DOUBLE_EXP | Common::DOUBLE_FRAC); TST(ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP | Common::DOUBLE_FRAC, 64));
FixupBranch zero = B(CCFlags::CC_EQ); FixupBranch zero = B(CCFlags::CC_EQ);
MOVI2R(ARM64Reg::X4, MOVI2R(ARM64Reg::X4,
Common::BitCast<u64>(static_cast<double>(std::numeric_limits<float>::max()))); Common::BitCast<u64>(static_cast<double>(std::numeric_limits<float>::max())));
@ -289,15 +289,15 @@ void JitArm64::GenerateFrsqrte()
// inf, even the mantissa matches. But the mantissa does not match for most other inputs, so in // inf, even the mantissa matches. But the mantissa does not match for most other inputs, so in
// the normal case we calculate the mantissa using the table-based algorithm from the interpreter. // the normal case we calculate the mantissa using the table-based algorithm from the interpreter.
TSTI2R(ARM64Reg::X1, Common::DOUBLE_EXP | Common::DOUBLE_FRAC); TST(ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP | Common::DOUBLE_FRAC, 64));
m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0); m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0);
FixupBranch zero = B(CCFlags::CC_EQ); FixupBranch zero = B(CCFlags::CC_EQ);
ANDI2R(ARM64Reg::X2, ARM64Reg::X1, Common::DOUBLE_EXP); AND(ARM64Reg::X2, ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP, 64));
MOVI2R(ARM64Reg::X3, Common::DOUBLE_EXP); MOVI2R(ARM64Reg::X3, Common::DOUBLE_EXP);
CMP(ARM64Reg::X2, ARM64Reg::X3); CMP(ARM64Reg::X2, ARM64Reg::X3);
FixupBranch nan_or_inf = B(CCFlags::CC_EQ); FixupBranch nan_or_inf = B(CCFlags::CC_EQ);
FixupBranch negative = TBNZ(ARM64Reg::X1, 63); FixupBranch negative = TBNZ(ARM64Reg::X1, 63);
ANDI2R(ARM64Reg::X3, ARM64Reg::X1, Common::DOUBLE_FRAC); AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(Common::DOUBLE_FRAC, 64));
FixupBranch normal = CBNZ(ARM64Reg::X2); FixupBranch normal = CBNZ(ARM64Reg::X2);
// "Normalize" denormal values // "Normalize" denormal values
@ -306,18 +306,18 @@ void JitArm64::GenerateFrsqrte()
MOVI2R(ARM64Reg::X2, 0x00C0'0000'0000'0000); MOVI2R(ARM64Reg::X2, 0x00C0'0000'0000'0000);
LSLV(ARM64Reg::X4, ARM64Reg::X1, ARM64Reg::X4); LSLV(ARM64Reg::X4, ARM64Reg::X1, ARM64Reg::X4);
SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52)); SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52));
ANDI2R(ARM64Reg::X3, ARM64Reg::X4, Common::DOUBLE_FRAC - 1); AND(ARM64Reg::X3, ARM64Reg::X4, LogicalImm(Common::DOUBLE_FRAC - 1, 64));
SetJumpTarget(normal); SetJumpTarget(normal);
LSR(ARM64Reg::X2, ARM64Reg::X2, 48); LSR(ARM64Reg::X2, ARM64Reg::X2, 48);
ANDI2R(ARM64Reg::X2, ARM64Reg::X2, 0x10); AND(ARM64Reg::X2, ARM64Reg::X2, LogicalImm(0x10, 64));
MOVP2R(ARM64Reg::X1, &Common::frsqrte_expected); MOVP2R(ARM64Reg::X1, &Common::frsqrte_expected);
ORR(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X8, ShiftType::LSR, 48)); ORR(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X8, ShiftType::LSR, 48));
EORI2R(ARM64Reg::X2, ARM64Reg::X2, 0x10); EOR(ARM64Reg::X2, ARM64Reg::X2, LogicalImm(0x10, 64));
ADD(ARM64Reg::X2, ARM64Reg::X1, ARM64Reg::X2, ArithOption(ARM64Reg::X2, ShiftType::LSL, 3)); ADD(ARM64Reg::X2, ARM64Reg::X1, ARM64Reg::X2, ArithOption(ARM64Reg::X2, ShiftType::LSL, 3));
LDP(IndexType::Signed, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::X2, 0); LDP(IndexType::Signed, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::X2, 0);
UBFX(ARM64Reg::X3, ARM64Reg::X3, 37, 11); UBFX(ARM64Reg::X3, ARM64Reg::X3, 37, 11);
ANDI2R(ARM64Reg::X0, ARM64Reg::X0, Common::DOUBLE_SIGN | Common::DOUBLE_EXP); AND(ARM64Reg::X0, ARM64Reg::X0, LogicalImm(Common::DOUBLE_SIGN | Common::DOUBLE_EXP, 64));
MSUB(ARM64Reg::W3, ARM64Reg::W3, ARM64Reg::W2, ARM64Reg::W1); MSUB(ARM64Reg::W3, ARM64Reg::W3, ARM64Reg::W2, ARM64Reg::W1);
ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 26)); ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 26));
RET(); RET();
@ -354,17 +354,17 @@ void JitArm64::GenerateConvertDoubleToSingle()
LSR(ARM64Reg::X1, ARM64Reg::X0, 32); LSR(ARM64Reg::X1, ARM64Reg::X0, 32);
FixupBranch denormal = B(CCFlags::CC_LS); FixupBranch denormal = B(CCFlags::CC_LS);
ANDI2R(ARM64Reg::X1, ARM64Reg::X1, 0xc0000000); AND(ARM64Reg::X1, ARM64Reg::X1, LogicalImm(0xc0000000, 64));
BFXIL(ARM64Reg::X1, ARM64Reg::X0, 29, 30); BFXIL(ARM64Reg::X1, ARM64Reg::X0, 29, 30);
RET(); RET();
SetJumpTarget(denormal); SetJumpTarget(denormal);
LSR(ARM64Reg::X3, ARM64Reg::X0, 21); LSR(ARM64Reg::X3, ARM64Reg::X0, 21);
MOVZ(ARM64Reg::X0, 905); MOVZ(ARM64Reg::X0, 905);
ORRI2R(ARM64Reg::W3, ARM64Reg::W3, 0x80000000); ORR(ARM64Reg::W3, ARM64Reg::W3, LogicalImm(0x80000000, 32));
SUB(ARM64Reg::W2, ARM64Reg::W0, ARM64Reg::W2); SUB(ARM64Reg::W2, ARM64Reg::W0, ARM64Reg::W2);
LSRV(ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::W2); LSRV(ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::W2);
ANDI2R(ARM64Reg::X3, ARM64Reg::X1, 0x80000000); AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(0x80000000, 64));
ORR(ARM64Reg::X1, ARM64Reg::X3, ARM64Reg::X2); ORR(ARM64Reg::X1, ARM64Reg::X3, ARM64Reg::X2);
RET(); RET();
} }
@ -375,7 +375,7 @@ void JitArm64::GenerateConvertSingleToDouble()
UBFX(ARM64Reg::W1, ARM64Reg::W0, 23, 8); UBFX(ARM64Reg::W1, ARM64Reg::W0, 23, 8);
FixupBranch normal_or_nan = CBNZ(ARM64Reg::W1); FixupBranch normal_or_nan = CBNZ(ARM64Reg::W1);
ANDI2R(ARM64Reg::W1, ARM64Reg::W0, 0x007fffff); AND(ARM64Reg::W1, ARM64Reg::W0, LogicalImm(0x007fffff, 32));
FixupBranch denormal = CBNZ(ARM64Reg::W1); FixupBranch denormal = CBNZ(ARM64Reg::W1);
// Zero // Zero
@ -383,10 +383,10 @@ void JitArm64::GenerateConvertSingleToDouble()
RET(); RET();
SetJumpTarget(denormal); SetJumpTarget(denormal);
ANDI2R(ARM64Reg::W2, ARM64Reg::W0, 0x80000000); AND(ARM64Reg::W2, ARM64Reg::W0, LogicalImm(0x80000000, 32));
CLZ(ARM64Reg::X3, ARM64Reg::X1); CLZ(ARM64Reg::X3, ARM64Reg::X1);
LSL(ARM64Reg::X2, ARM64Reg::X2, 32); LSL(ARM64Reg::X2, ARM64Reg::X2, 32);
ORRI2R(ARM64Reg::X4, ARM64Reg::X3, 0xffffffffffffffc0); ORR(ARM64Reg::X4, ARM64Reg::X3, LogicalImm(0xffffffffffffffc0, 64));
SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52)); SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52));
ADD(ARM64Reg::X3, ARM64Reg::X4, 23); ADD(ARM64Reg::X3, ARM64Reg::X4, 23);
LSLV(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::X3); LSLV(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::X3);
@ -397,12 +397,12 @@ void JitArm64::GenerateConvertSingleToDouble()
SetJumpTarget(normal_or_nan); SetJumpTarget(normal_or_nan);
CMP(ARM64Reg::W1, 0xff); CMP(ARM64Reg::W1, 0xff);
ANDI2R(ARM64Reg::W2, ARM64Reg::W0, 0x40000000); AND(ARM64Reg::W2, ARM64Reg::W0, LogicalImm(0x40000000, 32));
CSET(ARM64Reg::W4, CCFlags::CC_NEQ); CSET(ARM64Reg::W4, CCFlags::CC_NEQ);
ANDI2R(ARM64Reg::W3, ARM64Reg::W0, 0xc0000000); AND(ARM64Reg::W3, ARM64Reg::W0, LogicalImm(0xc0000000, 32));
EOR(ARM64Reg::W2, ARM64Reg::W4, ARM64Reg::W2, ArithOption(ARM64Reg::W2, ShiftType::LSR, 30)); EOR(ARM64Reg::W2, ARM64Reg::W4, ARM64Reg::W2, ArithOption(ARM64Reg::W2, ShiftType::LSR, 30));
MOVI2R(ARM64Reg::X1, 0x3800000000000000); MOVI2R(ARM64Reg::X1, 0x3800000000000000);
ANDI2R(ARM64Reg::W4, ARM64Reg::W0, 0x3fffffff); AND(ARM64Reg::W4, ARM64Reg::W0, LogicalImm(0x3fffffff, 32));
LSL(ARM64Reg::X3, ARM64Reg::X3, 32); LSL(ARM64Reg::X3, ARM64Reg::X3, 32);
CMP(ARM64Reg::W2, 0); CMP(ARM64Reg::W2, 0);
CSEL(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::ZR, CCFlags::CC_NEQ); CSEL(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::ZR, CCFlags::CC_NEQ);
@ -423,9 +423,10 @@ void JitArm64::GenerateFPRF(bool single)
constexpr ARM64Reg fprf_reg = ARM64Reg::W3; constexpr ARM64Reg fprf_reg = ARM64Reg::W3;
constexpr ARM64Reg fpscr_reg = ARM64Reg::W4; constexpr ARM64Reg fpscr_reg = ARM64Reg::W4;
const auto INPUT_EXP_MASK = single ? Common::FLOAT_EXP : Common::DOUBLE_EXP; const int input_size = single ? 32 : 64;
const auto INPUT_FRAC_MASK = single ? Common::FLOAT_FRAC : Common::DOUBLE_FRAC; const u64 input_exp_mask = single ? Common::FLOAT_EXP : Common::DOUBLE_EXP;
constexpr u32 OUTPUT_SIGN_MASK = 0xC; const u64 input_frac_mask = single ? Common::FLOAT_FRAC : Common::DOUBLE_FRAC;
constexpr u32 output_sign_mask = 0xC;
// This code is duplicated for the most common cases for performance. // This code is duplicated for the most common cases for performance.
// For the less common cases, we branch to an existing copy of this code. // For the less common cases, we branch to an existing copy of this code.
@ -439,7 +440,7 @@ void JitArm64::GenerateFPRF(bool single)
LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr)); LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr));
CMP(input_reg, 0); // Grab sign bit (conveniently the same bit for floats as for integers) CMP(input_reg, 0); // Grab sign bit (conveniently the same bit for floats as for integers)
ANDI2R(exp_reg, input_reg, INPUT_EXP_MASK); // Grab exponent AND(exp_reg, input_reg, LogicalImm(input_exp_mask, input_size)); // Grab exponent
// Most branches handle the sign in the same way. Perform that handling before branching // Most branches handle the sign in the same way. Perform that handling before branching
MOVI2R(ARM64Reg::W3, Common::PPC_FPCLASS_PN); MOVI2R(ARM64Reg::W3, Common::PPC_FPCLASS_PN);
@ -449,7 +450,7 @@ void JitArm64::GenerateFPRF(bool single)
FixupBranch zero_or_denormal = CBZ(exp_reg); FixupBranch zero_or_denormal = CBZ(exp_reg);
// exp != 0 // exp != 0
MOVI2R(temp_reg, INPUT_EXP_MASK); MOVI2R(temp_reg, input_exp_mask);
CMP(exp_reg, temp_reg); CMP(exp_reg, temp_reg);
FixupBranch nan_or_inf = B(CCFlags::CC_EQ); FixupBranch nan_or_inf = B(CCFlags::CC_EQ);
@ -458,25 +459,25 @@ void JitArm64::GenerateFPRF(bool single)
// exp == 0 // exp == 0
SetJumpTarget(zero_or_denormal); SetJumpTarget(zero_or_denormal);
TSTI2R(input_reg, INPUT_FRAC_MASK); TST(input_reg, LogicalImm(input_frac_mask, input_size));
FixupBranch denormal = B(CCFlags::CC_NEQ); FixupBranch denormal = B(CCFlags::CC_NEQ);
// exp == 0 && frac == 0 // exp == 0 && frac == 0
LSR(ARM64Reg::W1, fprf_reg, 3); LSR(ARM64Reg::W1, fprf_reg, 3);
MOVI2R(fprf_reg, Common::PPC_FPCLASS_PZ & ~OUTPUT_SIGN_MASK); MOVI2R(fprf_reg, Common::PPC_FPCLASS_PZ & ~output_sign_mask);
BFI(fprf_reg, ARM64Reg::W1, 4, 1); BFI(fprf_reg, ARM64Reg::W1, 4, 1);
const u8* write_fprf_and_ret = GetCodePtr(); const u8* write_fprf_and_ret = GetCodePtr();
emit_write_fprf_and_ret(); emit_write_fprf_and_ret();
// exp == 0 && frac != 0 // exp == 0 && frac != 0
SetJumpTarget(denormal); SetJumpTarget(denormal);
ORRI2R(fprf_reg, fprf_reg, Common::PPC_FPCLASS_PD & ~OUTPUT_SIGN_MASK); ORR(fprf_reg, fprf_reg, LogicalImm(Common::PPC_FPCLASS_PD & ~output_sign_mask, 32));
B(write_fprf_and_ret); B(write_fprf_and_ret);
// exp == EXP_MASK // exp == EXP_MASK
SetJumpTarget(nan_or_inf); SetJumpTarget(nan_or_inf);
TSTI2R(input_reg, INPUT_FRAC_MASK); TST(input_reg, LogicalImm(input_frac_mask, input_size));
ORRI2R(ARM64Reg::W1, fprf_reg, Common::PPC_FPCLASS_PINF & ~OUTPUT_SIGN_MASK); ORR(ARM64Reg::W1, fprf_reg, LogicalImm(Common::PPC_FPCLASS_PINF & ~output_sign_mask, 32));
MOVI2R(ARM64Reg::W2, Common::PPC_FPCLASS_QNAN); MOVI2R(ARM64Reg::W2, Common::PPC_FPCLASS_QNAN);
CSEL(fprf_reg, ARM64Reg::W1, ARM64Reg::W2, CCFlags::CC_EQ); CSEL(fprf_reg, ARM64Reg::W1, ARM64Reg::W2, CCFlags::CC_EQ);
B(write_fprf_and_ret); B(write_fprf_and_ret);

View File

@ -244,7 +244,7 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f
LDR(IndexType::Unsigned, scratch2_reg, src_reg, offset); LDR(IndexType::Unsigned, scratch2_reg, src_reg, offset);
if (format != ColorFormat::RGBA8888) if (format != ColorFormat::RGBA8888)
ORRI2R(scratch2_reg, scratch2_reg, 0xFF000000); ORR(scratch2_reg, scratch2_reg, LogicalImm(0xFF000000, 32));
STR(IndexType::Unsigned, scratch2_reg, dst_reg, m_dst_ofs); STR(IndexType::Unsigned, scratch2_reg, dst_reg, m_dst_ofs);
load_bytes = format == ColorFormat::RGB888 ? 3 : 4; load_bytes = format == ColorFormat::RGB888 ? 3 : 4;
break; break;
@ -279,7 +279,7 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 2)); ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 2));
// A // A
ORRI2R(scratch1_reg, scratch1_reg, 0xFF000000); ORR(scratch1_reg, scratch1_reg, LogicalImm(0xFF000000, 32));
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs); STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
load_bytes = 2; load_bytes = 2;