Merge pull request #9446 from Dentomologist/convert_shifttype_to_enum_class

Arm64Emitter: Convert ShiftType to enum class
This commit is contained in:
LC 2021-01-18 05:26:22 -05:00 committed by GitHub
commit 04ccd4cb80
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 105 additions and 77 deletions

View File

@ -1263,7 +1263,7 @@ void ARM64XEmitter::ISB(BarrierType type)
// Add/Subtract (extended register)
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
ADD(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
ADD(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
@ -1273,7 +1273,7 @@ void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Optio
void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
EncodeArithmeticInst(0, true, Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
EncodeArithmeticInst(0, true, Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
@ -1283,7 +1283,7 @@ void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Opti
void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
SUB(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
SUB(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
@ -1293,7 +1293,7 @@ void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Optio
void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
EncodeArithmeticInst(1, true, Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0));
EncodeArithmeticInst(1, true, Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
@ -1303,7 +1303,7 @@ void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Opti
void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm)
{
CMN(Rn, Rm, ArithOption(Rn, ST_LSL, 0));
CMN(Rn, Rm, ArithOption(Rn, ShiftType::LSL, 0));
}
void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
@ -1313,7 +1313,7 @@ void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm)
{
CMP(Rn, Rm, ArithOption(Rn, ST_LSL, 0));
CMP(Rn, Rm, ArithOption(Rn, ShiftType::LSL, 0));
}
void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option)
@ -1553,13 +1553,13 @@ void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm, ArithOption Shift)
void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm)
{
if (IsGPR(Rd) && IsGPR(Rm))
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0));
ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ShiftType::LSL, 0));
else
ASSERT_MSG(DYNA_REC, false, "Non-GPRs not supported in MOV");
}
void ARM64XEmitter::MVN(ARM64Reg Rd, ARM64Reg Rm)
{
ORN(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0));
ORN(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ShiftType::LSL, 0));
}
void ARM64XEmitter::LSL(ARM64Reg Rd, ARM64Reg Rm, int shift)
{
@ -2016,7 +2016,7 @@ void ARM64XEmitter::MOVI2R(ARM64Reg Rd, u64 imm, bool optimize)
// Max unsigned value (or if signed, -1)
// Set to ~ZR
ARM64Reg ZR = Is64Bit(Rd) ? SP : WSP;
ORN(Rd, ZR, ZR, ArithOption(ZR, ST_LSL, 0));
ORN(Rd, ZR, ZR, ArithOption(ZR, ShiftType::LSL, 0));
return;
}

View File

@ -277,12 +277,16 @@ constexpr ARM64Reg EncodeRegToQuad(ARM64Reg reg)
return static_cast<ARM64Reg>(reg | 0xC0);
}
enum ShiftType
enum class ShiftType
{
ST_LSL = 0,
ST_LSR = 1,
ST_ASR = 2,
ST_ROR = 3,
// Logical Shift Left
LSL = 0,
// Logical Shift Right
LSR = 1,
// Arithmetic Shift Right
ASR = 2,
// Rotate Right
ROR = 3,
};
enum class IndexType
@ -437,7 +441,7 @@ public:
m_width = WidthSpecifier::Width32Bit;
m_extend = ExtendSpecifier::UXTW;
}
m_shifttype = ST_LSL;
m_shifttype = ShiftType::LSL;
}
ArithOption(ARM64Reg Rd, ShiftType shift_type, u32 shift)
{
@ -466,7 +470,7 @@ public:
case TypeSpecifier::ExtendedReg:
return (static_cast<u32>(m_extend) << 13) | (m_shift << 10);
case TypeSpecifier::ShiftedReg:
return (m_shifttype << 22) | (m_shift << 10);
return (static_cast<u32>(m_shifttype) << 22) | (m_shift << 10);
default:
DEBUG_ASSERT_MSG(DYNA_REC, false, "Invalid type in GetData");
break;
@ -699,14 +703,38 @@ public:
void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
// Wrap the above for saner syntax
void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { AND(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { BIC(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ORR(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ORN(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { EOR(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { EON(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ANDS(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { BICS(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
AND(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
BIC(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
ORR(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
ORN(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
EOR(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
EON(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
ANDS(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{
BICS(Rd, Rn, Rm, ArithOption(Rd, ShiftType::LSL, 0));
}
// Convenience wrappers around ORR. These match the official convenience syntax.
void MOV(ARM64Reg Rd, ARM64Reg Rm, ArithOption Shift);
void MOV(ARM64Reg Rd, ARM64Reg Rm);

View File

@ -567,7 +567,7 @@ void JitArm64::rlwinmx(UGeckoInstruction inst)
{
ARM64Reg WA = gpr.GetReg();
MOVI2R(WA, mask);
AND(gpr.R(a), WA, gpr.R(s), ArithOption(gpr.R(s), ST_ROR, 32 - inst.SH));
AND(gpr.R(a), WA, gpr.R(s), ArithOption(gpr.R(s), ShiftType::ROR, 32 - inst.SH));
gpr.Unlock(WA);
}
@ -592,7 +592,7 @@ void JitArm64::rlwnmx(UGeckoInstruction inst)
{
gpr.BindToRegister(a, a == s);
ARM64Reg WA = gpr.GetReg();
ArithOption Shift(gpr.R(s), ST_ROR, 32 - (gpr.GetImm(b) & 0x1f));
ArithOption Shift(gpr.R(s), ShiftType::ROR, 32 - (gpr.GetImm(b) & 0x1f));
MOVI2R(WA, mask);
AND(gpr.R(a), WA, gpr.R(s), Shift);
gpr.Unlock(WA);
@ -656,7 +656,7 @@ void JitArm64::srawix(UGeckoInstruction inst)
if (a != s)
{
ASR(RA, RS, amount);
ANDS(dest, RA, RS, ArithOption(RS, ST_LSL, 32 - amount));
ANDS(dest, RA, RS, ArithOption(RS, ShiftType::LSL, 32 - amount));
}
else
{
@ -1500,7 +1500,7 @@ void JitArm64::rlwimix(UGeckoInstruction inst)
MOVI2R(WA, mask);
BIC(WB, gpr.R(a), WA);
AND(WA, WA, gpr.R(s), ArithOption(gpr.R(s), ST_ROR, 32 - inst.SH));
AND(WA, WA, gpr.R(s), ArithOption(gpr.R(s), ShiftType::ROR, 32 - inst.SH));
ORR(gpr.R(a), WB, WA);
gpr.Unlock(WA, WB);

View File

@ -91,7 +91,7 @@ void JitArm64::mcrxr(UGeckoInstruction inst)
LDRB(IndexType::Unsigned, WB, PPC_REG, PPCSTATE_OFF(xer_so_ov));
// [0 SO OV CA]
ADD(WA, WA, WB, ArithOption(WB, ST_LSL, 2));
ADD(WA, WA, WB, ArithOption(WB, ShiftType::LSL, 2));
// [SO OV CA 0] << 3
LSL(WA, WA, 4);
@ -136,7 +136,7 @@ void JitArm64::mfsrin(UGeckoInstruction inst)
ARM64Reg RB = gpr.R(b);
UBFM(index, RB, 28, 31);
ADD(index64, PPC_REG, index64, ArithOption(index64, ST_LSL, 2));
ADD(index64, PPC_REG, index64, ArithOption(index64, ShiftType::LSL, 2));
LDR(IndexType::Unsigned, gpr.R(d), index64, PPCSTATE_OFF(sr[0]));
gpr.Unlock(index);
@ -155,7 +155,7 @@ void JitArm64::mtsrin(UGeckoInstruction inst)
ARM64Reg RB = gpr.R(b);
UBFM(index, RB, 28, 31);
ADD(index64, PPC_REG, index64, ArithOption(index64, ST_LSL, 2));
ADD(index64, PPC_REG, index64, ArithOption(index64, ShiftType::LSL, 2));
STR(IndexType::Unsigned, gpr.R(d), index64, PPCSTATE_OFF(sr[0]));
gpr.Unlock(index);
@ -282,7 +282,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
ADD(XB, XB, 1);
UMULH(Xresult, Xresult, XB);
ADD(Xresult, XA, Xresult, ArithOption(Xresult, ST_LSR, 3));
ADD(Xresult, XA, Xresult, ArithOption(Xresult, ShiftType::LSR, 3));
STR(IndexType::Unsigned, Xresult, PPC_REG, PPCSTATE_OFF(spr[SPR_TL]));
if (CanMergeNextInstructions(1))
@ -332,9 +332,9 @@ void JitArm64::mfspr(UGeckoInstruction inst)
ARM64Reg WA = gpr.GetReg();
LDRH(IndexType::Unsigned, RD, PPC_REG, PPCSTATE_OFF(xer_stringctrl));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca));
ORR(RD, RD, WA, ArithOption(WA, ST_LSL, XER_CA_SHIFT));
ORR(RD, RD, WA, ArithOption(WA, ShiftType::LSL, XER_CA_SHIFT));
LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_so_ov));
ORR(RD, RD, WA, ArithOption(WA, ST_LSL, XER_OV_SHIFT));
ORR(RD, RD, WA, ArithOption(WA, ShiftType::LSL, XER_OV_SHIFT));
gpr.Unlock(WA);
}
break;
@ -633,7 +633,7 @@ void JitArm64::mfcr(UGeckoInstruction inst)
else
{
UBFX(XC, CR, 61, 1);
ORR(XA, XC, XA, ArithOption(XA, ST_LSL, 4));
ORR(XA, XC, XA, ArithOption(XA, ShiftType::LSL, 4));
}
// EQ
@ -648,7 +648,7 @@ void JitArm64::mfcr(UGeckoInstruction inst)
// LT
UBFX(XC, CR, 62, 1);
ORR(WA, WA, WC, ArithOption(WC, ST_LSL, 3));
ORR(WA, WA, WC, ArithOption(WC, ShiftType::LSL, 3));
}
gpr.Unlock(WC);

View File

@ -97,7 +97,7 @@ void JitArm64::GenerateAsm()
ARM64Reg cache_base = X27;
ARM64Reg block = X30;
ORRI2R(pc_masked, WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3);
AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ST_LSL, 1));
AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1));
MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap());
LDR(block, cache_base, EncodeRegTo64(pc_masked));
FixupBranch not_found = CBZ(block);
@ -224,7 +224,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -238,7 +238,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -252,7 +252,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -266,7 +266,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -288,7 +288,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -302,7 +302,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -316,7 +316,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.UCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -330,7 +330,7 @@ void JitArm64::GenerateCommonAsm()
float_emit.SCVTF(32, D0, D0);
MOVP2R(addr_reg, &m_dequantizeTableS);
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
RET(X30);
@ -387,7 +387,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
@ -414,7 +414,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
@ -442,7 +442,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
@ -469,7 +469,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1, 0);
@ -511,7 +511,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
@ -537,7 +537,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
@ -563,7 +563,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);
@ -589,7 +589,7 @@ void JitArm64::GenerateCommonAsm()
{
auto emit_quantize = [this, &float_emit, scale_reg]() {
MOVP2R(X2, &m_quantizeTableS);
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ST_LSL, 3));
ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3));
float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0);
float_emit.FMUL(32, D0, D0, D1);

View File

@ -195,7 +195,7 @@ int VertexLoaderARM64::ReadVertex(u64 attribute, int format, int count_in, int c
FixupBranch dont_store = B(CC_GT);
MOVP2R(EncodeRegTo64(scratch2_reg), VertexLoaderManager::position_cache);
ADD(EncodeRegTo64(scratch1_reg), EncodeRegTo64(scratch2_reg), EncodeRegTo64(count_reg),
ArithOption(EncodeRegTo64(count_reg), ST_LSL, 4));
ArithOption(EncodeRegTo64(count_reg), ShiftType::LSL, 4));
m_float_emit.STUR(write_size, coords, EncodeRegTo64(scratch1_reg), -16);
SetJumpTarget(dont_store);
}
@ -248,20 +248,20 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// B
AND(scratch2_reg, scratch3_reg, 32, 4);
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 3));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 5));
ORR(scratch1_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 16));
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 3));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 5));
ORR(scratch1_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 16));
// G
UBFM(scratch2_reg, scratch3_reg, 5, 10);
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 6));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 8));
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 8));
// R
UBFM(scratch2_reg, scratch3_reg, 11, 15);
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 3));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 2));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 3));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 2));
// A
ORRI2R(scratch1_reg, scratch1_reg, 0xFF000000);
@ -286,18 +286,18 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// G
AND(scratch2_reg, scratch3_reg, 32, 3);
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 8));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 8));
// B
UBFM(scratch2_reg, scratch3_reg, 12, 15);
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 16));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 16));
// A
UBFM(scratch2_reg, scratch3_reg, 8, 11);
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 24));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 24));
// Final duplication
ORR(scratch1_reg, scratch1_reg, scratch1_reg, ArithOption(scratch1_reg, ST_LSL, 4));
ORR(scratch1_reg, scratch1_reg, scratch1_reg, ArithOption(scratch1_reg, ShiftType::LSL, 4));
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
load_bytes = 2;
@ -323,26 +323,26 @@ void VertexLoaderARM64::ReadColor(u64 attribute, int format, s32 offset)
// A
UBFM(scratch2_reg, scratch3_reg, 0, 5);
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 6));
ORR(scratch1_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 24));
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6));
ORR(scratch1_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 24));
// B
UBFM(scratch2_reg, scratch3_reg, 6, 11);
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 6));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 16));
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 16));
// G
UBFM(scratch2_reg, scratch3_reg, 12, 17);
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 6));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 8));
ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2));
ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 8));
// R
UBFM(scratch2_reg, scratch3_reg, 18, 23);
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSL, 2));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ST_LSR, 4));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2));
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 4));
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);