From f0f206714fd1754da93691ea13776500a9b6d278 Mon Sep 17 00:00:00 2001 From: Dentomologist Date: Sat, 6 Feb 2021 10:50:33 -0800 Subject: [PATCH] Arm64Gen: Convert ARM64Reg to enum class Most changes are just adding ARM64Reg:: in front of the constants. --- Source/Core/Common/Arm64Emitter.cpp | 498 +++++++----------- Source/Core/Common/Arm64Emitter.h | 79 +-- Source/Core/Core/PowerPC/JitArm64/Jit.cpp | 145 ++--- .../PowerPC/JitArm64/JitArm64_BackPatch.cpp | 114 ++-- .../Core/PowerPC/JitArm64/JitArm64_Branch.cpp | 2 +- .../JitArm64/JitArm64_FloatingPoint.cpp | 2 +- .../PowerPC/JitArm64/JitArm64_Integer.cpp | 26 +- .../PowerPC/JitArm64/JitArm64_LoadStore.cpp | 72 +-- .../JitArm64/JitArm64_LoadStoreFloating.cpp | 48 +- .../JitArm64/JitArm64_LoadStorePaired.cpp | 56 +- .../Core/PowerPC/JitArm64/JitArm64_Paired.cpp | 6 +- .../PowerPC/JitArm64/JitArm64_RegCache.cpp | 183 ++++--- .../Core/PowerPC/JitArm64/JitArm64_RegCache.h | 16 +- .../JitArm64/JitArm64_SystemRegisters.cpp | 32 +- Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp | 430 +++++++-------- .../Core/Core/PowerPC/JitArm64/Jit_Util.cpp | 44 +- Source/Core/VideoCommon/VertexLoaderARM64.cpp | 64 +-- .../Core/PowerPC/JitArm64/MovI2R.cpp | 4 +- 18 files changed, 859 insertions(+), 962 deletions(-) diff --git a/Source/Core/Common/Arm64Emitter.cpp b/Source/Core/Common/Arm64Emitter.cpp index ae07647339..3ffb64401f 100644 --- a/Source/Core/Common/Arm64Emitter.cpp +++ b/Source/Core/Common/Arm64Emitter.cpp @@ -507,8 +507,8 @@ void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr ASSERT_MSG(DYNA_REC, distance >= -0x40000 && distance <= 0x3FFFF, "%s: Received too large distance: %" PRIx64, __func__, distance); - Rt = DecodeReg(Rt); - Write32((b64Bit << 31) | (0x34 << 24) | (op << 24) | (((u32)distance << 5) & 0xFFFFE0) | Rt); + Write32((b64Bit << 31) | (0x34 << 24) | (op << 24) | (((u32)distance << 5) & 0xFFFFE0) | + DecodeReg(Rt)); } void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr) @@ -524,9 +524,8 @@ void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const voi ASSERT_MSG(DYNA_REC, distance >= -0x3FFF && distance < 0x3FFF, "%s: Received too large distance: %" PRIx64, __func__, distance); - Rt = DecodeReg(Rt); Write32((b64Bit << 31) | (0x36 << 24) | (op << 24) | (bits << 19) | - (((u32)distance << 5) & 0x7FFE0) | Rt); + (((u32)distance << 5) & 0x7FFE0) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr) @@ -546,8 +545,7 @@ void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr) void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 opc, u32 op2, u32 op3, u32 op4, ARM64Reg Rn) { - Rn = DecodeReg(Rn); - Write32((0x6B << 25) | (opc << 21) | (op2 << 16) | (op3 << 10) | (Rn << 5) | op4); + Write32((0x6B << 25) | (opc << 21) | (op2 << 16) | (op3 << 10) | (DecodeReg(Rn) << 5) | op4); } void ARM64XEmitter::EncodeExceptionInst(u32 instenc, u32 imm) @@ -561,7 +559,8 @@ void ARM64XEmitter::EncodeExceptionInst(u32 instenc, u32 imm) void ARM64XEmitter::EncodeSystemInst(u32 op0, u32 op1, u32 CRn, u32 CRm, u32 op2, ARM64Reg Rt) { - Write32((0x354 << 22) | (op0 << 19) | (op1 << 16) | (CRn << 12) | (CRm << 8) | (op2 << 5) | Rt); + Write32((0x354 << 22) | (op0 << 19) | (op1 << 16) | (CRn << 12) | (CRm << 8) | (op2 << 5) | + DecodeReg(Rt)); } void ARM64XEmitter::EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, ARM64Reg Rn, @@ -569,11 +568,9 @@ void ARM64XEmitter::EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, A { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); Write32((b64Bit << 31) | (flags << 29) | (ArithEnc[instenc] << 21) | - (Option.IsExtended() ? (1 << 21) : 0) | (Rm << 16) | Option.GetData() | (Rn << 5) | Rd); + (Option.IsExtended() ? (1 << 21) : 0) | (DecodeReg(Rm) << 16) | Option.GetData() | + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, @@ -581,10 +578,8 @@ void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, A { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rm = DecodeReg(Rm); - Rn = DecodeReg(Rn); - Write32((b64Bit << 31) | (op << 30) | (flags << 29) | (0xD0 << 21) | (Rm << 16) | (Rn << 5) | Rd); + Write32((b64Bit << 31) | (op << 30) | (flags << 29) | (0xD0 << 21) | (DecodeReg(Rm) << 16) | + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond) @@ -594,9 +589,8 @@ void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 n ASSERT_MSG(DYNA_REC, !(imm & ~0x1F), "%s: too large immediate: %d", __func__, imm); ASSERT_MSG(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __func__, nzcv); - Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (imm << 16) | (cond << 12) | - (1 << 11) | (Rn << 5) | nzcv); + (1 << 11) | (DecodeReg(Rn) << 5) | nzcv); } void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, @@ -606,10 +600,8 @@ void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u ASSERT_MSG(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __func__, nzcv); - Rm = DecodeReg(Rm); - Rn = DecodeReg(Rn); - Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (Rm << 16) | (cond << 12) | - (Rn << 5) | nzcv); + Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (DecodeReg(Rm) << 16) | + (cond << 12) | (DecodeReg(Rn) << 5) | nzcv); } void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, @@ -617,32 +609,25 @@ void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rm = DecodeReg(Rm); - Rn = DecodeReg(Rn); - Write32((b64Bit << 31) | (CondSelectEnc[instenc][0] << 30) | (0xD4 << 21) | (Rm << 16) | - (cond << 12) | (CondSelectEnc[instenc][1] << 10) | (Rn << 5) | Rd); + Write32((b64Bit << 31) | (CondSelectEnc[instenc][0] << 30) | (0xD4 << 21) | + (DecodeReg(Rm) << 16) | (cond << 12) | (CondSelectEnc[instenc][1] << 10) | + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn) { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (0x2D6 << 21) | (Data1SrcEnc[instenc][0] << 16) | - (Data1SrcEnc[instenc][1] << 10) | (Rn << 5) | Rd); + (Data1SrcEnc[instenc][1] << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rm = DecodeReg(Rm); - Rn = DecodeReg(Rn); - Write32((b64Bit << 31) | (0x0D6 << 21) | (Rm << 16) | (Data2SrcEnc[instenc] << 10) | (Rn << 5) | - Rd); + Write32((b64Bit << 31) | (0x0D6 << 21) | (DecodeReg(Rm) << 16) | (Data2SrcEnc[instenc] << 10) | + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, @@ -650,12 +635,9 @@ void ARM64XEmitter::EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, AR { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rm = DecodeReg(Rm); - Rn = DecodeReg(Rn); - Ra = DecodeReg(Ra); - Write32((b64Bit << 31) | (0xD8 << 21) | (Data3SrcEnc[instenc][0] << 21) | (Rm << 16) | - (Data3SrcEnc[instenc][1] << 15) | (Ra << 10) | (Rn << 5) | Rd); + Write32((b64Bit << 31) | (0xD8 << 21) | (Data3SrcEnc[instenc][0] << 21) | (DecodeReg(Rm) << 16) | + (Data3SrcEnc[instenc][1] << 15) | (DecodeReg(Ra) << 10) | (DecodeReg(Rn) << 5) | + DecodeReg(Rd)); } void ARM64XEmitter::EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, @@ -663,11 +645,9 @@ void ARM64XEmitter::EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rm = DecodeReg(Rm); - Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (LogicalEnc[instenc][0] << 29) | (0x5 << 25) | - (LogicalEnc[instenc][1] << 21) | Shift.GetData() | (Rm << 16) | (Rn << 5) | Rd); + (LogicalEnc[instenc][1] << 21) | Shift.GetData() | (DecodeReg(Rm) << 16) | + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm) @@ -677,22 +657,18 @@ void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm) ASSERT_MSG(DYNA_REC, !(imm & 0xFFFFF), "%s: offset too large %d", __func__, imm); - Rt = DecodeReg(Rt); if (b64Bit && bitop != 0x2) // LDRSW(0x2) uses 64bit reg, doesn't have 64bit bit set bitop |= 0x1; - Write32((bitop << 30) | (bVec << 26) | (0x18 << 24) | (imm << 5) | Rt); + Write32((bitop << 30) | (bVec << 26) | (0x18 << 24) | (imm << 5) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeLoadStoreExcInst(u32 instenc, ARM64Reg Rs, ARM64Reg Rt2, ARM64Reg Rn, ARM64Reg Rt) { - Rs = DecodeReg(Rs); - Rt2 = DecodeReg(Rt2); - Rn = DecodeReg(Rn); - Rt = DecodeReg(Rt); Write32((LoadStoreExcEnc[instenc][0] << 30) | (0x8 << 24) | (LoadStoreExcEnc[instenc][1] << 23) | - (LoadStoreExcEnc[instenc][2] << 22) | (LoadStoreExcEnc[instenc][3] << 21) | (Rs << 16) | - (LoadStoreExcEnc[instenc][4] << 15) | (Rt2 << 10) | (Rn << 5) | Rt); + (LoadStoreExcEnc[instenc][2] << 22) | (LoadStoreExcEnc[instenc][3] << 21) | + (DecodeReg(Rs) << 16) | (LoadStoreExcEnc[instenc][4] << 15) | (DecodeReg(Rt2) << 10) | + (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, @@ -719,10 +695,8 @@ void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, else if (b64Bit && !bVec) opc = 2; - Rt = DecodeReg(Rt); - Rt2 = DecodeReg(Rt2); - Rn = DecodeReg(Rn); - Write32((opc << 30) | (bVec << 26) | (op << 22) | (imm << 15) | (Rt2 << 10) | (Rn << 5) | Rt); + Write32((opc << 30) | (bVec << 26) | (op << 22) | (imm << 15) | (DecodeReg(Rt2) << 10) | + (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, s32 imm) @@ -734,10 +708,8 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s: offset too large %d", __func__, imm); - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (offset << 12) | (op2 << 10) | (Rn << 5) | - Rt); + Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (offset << 12) | (op2 << 10) | + (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm, u8 size) @@ -757,9 +729,8 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s(IndexType::Unsigned): offset too large %d", __func__, imm); - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (imm << 10) | (Rn << 5) | Rt); + Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (imm << 10) | (DecodeReg(Rn) << 5) | + DecodeReg(Rt)); } void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos) @@ -768,30 +739,25 @@ void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount ASSERT_MSG(DYNA_REC, !(imm & ~0xFFFF), "%s: immediate out of range: %d", __func__, imm); - Rd = DecodeReg(Rd); Write32((b64Bit << 31) | (op << 29) | (0x25 << 23) | (static_cast(pos) << 21) | (imm << 5) | - Rd); + DecodeReg(Rd)); } void ARM64XEmitter::EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) { bool b64Bit = Is64Bit(Rd); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (op << 29) | (0x26 << 23) | (b64Bit << 22) | (immr << 16) | - (imms << 10) | (Rn << 5) | Rd); + (imms << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) { - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - ARM64Reg decoded_Rm = DecodeReg(Rm.GetReg()); + const int decoded_Rm = DecodeReg(Rm.GetReg()); Write32((size << 30) | (opc << 22) | (0x1C1 << 21) | (decoded_Rm << 16) | Rm.GetData() | - (1 << 11) | (Rn << 5) | Rt); + (1 << 11) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, @@ -801,10 +767,8 @@ void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s: immediate too large: %x", __func__, imm); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (op << 30) | (flags << 29) | (0x11 << 24) | (shift << 22) | (imm << 10) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, @@ -814,11 +778,8 @@ void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 i // Use Rn to determine bitness here. bool b64Bit = Is64Bit(Rn); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((b64Bit << 31) | (op << 29) | (0x24 << 23) | (n << 22) | (immr << 16) | (imms << 10) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, @@ -853,30 +814,24 @@ void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64R imm >>= 2; } - Rt = DecodeReg(Rt); - Rt2 = DecodeReg(Rt2); - Rn = DecodeReg(Rn); - ASSERT_MSG(DYNA_REC, imm >= -64 && imm < 64, "imm too large for load/store pair!"); Write32((op << 30) | (0b101 << 27) | (type_encode << 23) | (load << 22) | ((imm & 0x7F) << 15) | - (Rt2 << 10) | (Rn << 5) | Rt); + (DecodeReg(Rt2) << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64XEmitter::EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm) { - Rd = DecodeReg(Rd); - - Write32((op << 31) | ((imm & 0x3) << 29) | (0x10 << 24) | ((imm & 0x1FFFFC) << 3) | Rd); + Write32((op << 31) | ((imm & 0x3) << 29) | (0x10 << 24) | ((imm & 0x1FFFFC) << 3) | + DecodeReg(Rd)); } void ARM64XEmitter::EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm) { ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", __func__, imm); - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - Write32((size << 30) | (0b111 << 27) | (op << 22) | ((imm & 0x1FF) << 12) | (Rn << 5) | Rt); + Write32((size << 30) | (0b111 << 27) | (op << 22) | ((imm & 0x1FF) << 12) | (DecodeReg(Rn) << 5) | + DecodeReg(Rt)); } static constexpr bool IsInRangeImm19(s64 distance) @@ -927,8 +882,8 @@ void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch) ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), "%s(%d): Received too large distance: %" PRIx64, __func__, static_cast(branch.type), distance); const bool b64Bit = Is64Bit(branch.reg); - const ARM64Reg reg = DecodeReg(branch.reg); - inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (MaskImm19(distance) << 5) | reg; + inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (MaskImm19(distance) << 5) | + DecodeReg(branch.reg); } break; case FixupBranch::Type::BConditional: @@ -943,9 +898,8 @@ void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch) { ASSERT_MSG(DYNA_REC, IsInRangeImm14(distance), "%s(%d): Received too large distance: %" PRIx64, __func__, static_cast(branch.type), distance); - const ARM64Reg reg = DecodeReg(branch.reg); inst = ((branch.bit & 0x20) << 26) | (0x1B << 25) | (Not << 24) | ((branch.bit & 0x1F) << 19) | - (MaskImm14(distance) << 5) | reg; + (MaskImm14(distance) << 5) | DecodeReg(branch.reg); } break; case FixupBranch::Type::B: @@ -1100,11 +1054,11 @@ void ARM64XEmitter::RET(ARM64Reg Rn) } void ARM64XEmitter::ERET() { - EncodeUnconditionalBranchInst(4, 0x1F, 0, 0, SP); + EncodeUnconditionalBranchInst(4, 0x1F, 0, 0, ARM64Reg::SP); } void ARM64XEmitter::DRPS() { - EncodeUnconditionalBranchInst(5, 0x1F, 0, 0, SP); + EncodeUnconditionalBranchInst(5, 0x1F, 0, 0, ARM64Reg::SP); } // Exception generation @@ -1170,7 +1124,7 @@ void ARM64XEmitter::_MSR(PStateField field, u8 imm) ASSERT_MSG(DYNA_REC, false, "Invalid PStateField to do a imm move to"); break; } - EncodeSystemInst(0, op1, 4, imm, op2, WSP); + EncodeSystemInst(0, op1, 4, imm, op2, ARM64Reg::WSP); } static void GetSystemReg(PStateField field, int& o0, int& op1, int& CRn, int& CRm, int& op2) @@ -1223,7 +1177,7 @@ void ARM64XEmitter::_MSR(PStateField field, ARM64Reg Rt) int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0; ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "MSR: Rt must be 64-bit"); GetSystemReg(field, o0, op1, CRn, CRm, op2); - EncodeSystemInst(o0, op1, CRn, CRm, op2, DecodeReg(Rt)); + EncodeSystemInst(o0, op1, CRn, CRm, op2, Rt); } void ARM64XEmitter::MRS(ARM64Reg Rt, PStateField field) @@ -1231,7 +1185,7 @@ void ARM64XEmitter::MRS(ARM64Reg Rt, PStateField field) int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0; ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "MRS: Rt must be 64-bit"); GetSystemReg(field, o0, op1, CRn, CRm, op2); - EncodeSystemInst(o0 | 4, op1, CRn, CRm, op2, DecodeReg(Rt)); + EncodeSystemInst(o0 | 4, op1, CRn, CRm, op2, Rt); } void ARM64XEmitter::CNTVCT(Arm64Gen::ARM64Reg Rt) @@ -1239,28 +1193,28 @@ void ARM64XEmitter::CNTVCT(Arm64Gen::ARM64Reg Rt) ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "CNTVCT: Rt must be 64-bit"); // MRS , CNTVCT_EL0 ; Read CNTVCT_EL0 into Xt - EncodeSystemInst(3 | 4, 3, 0xe, 0, 2, DecodeReg(Rt)); + EncodeSystemInst(3 | 4, 3, 0xe, 0, 2, Rt); } void ARM64XEmitter::HINT(SystemHint op) { - EncodeSystemInst(0, 3, 2, 0, static_cast(op), WSP); + EncodeSystemInst(0, 3, 2, 0, static_cast(op), ARM64Reg::WSP); } void ARM64XEmitter::CLREX() { - EncodeSystemInst(0, 3, 3, 0, 2, WSP); + EncodeSystemInst(0, 3, 3, 0, 2, ARM64Reg::WSP); } void ARM64XEmitter::DSB(BarrierType type) { - EncodeSystemInst(0, 3, 3, static_cast(type), 4, WSP); + EncodeSystemInst(0, 3, 3, static_cast(type), 4, ARM64Reg::WSP); } void ARM64XEmitter::DMB(BarrierType type) { - EncodeSystemInst(0, 3, 3, static_cast(type), 5, WSP); + EncodeSystemInst(0, 3, 3, static_cast(type), 5, ARM64Reg::WSP); } void ARM64XEmitter::ISB(BarrierType type) { - EncodeSystemInst(0, 3, 3, static_cast(type), 6, WSP); + EncodeSystemInst(0, 3, 3, static_cast(type), 6, ARM64Reg::WSP); } // Add/Subtract (extended register) @@ -1311,7 +1265,7 @@ void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm) void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) { - EncodeArithmeticInst(0, true, Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Option); + EncodeArithmeticInst(0, true, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, Rm, Option); } void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm) @@ -1321,7 +1275,7 @@ void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm) void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) { - EncodeArithmeticInst(1, true, Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Option); + EncodeArithmeticInst(1, true, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, Rm, Option); } // Add/Subtract (with carry) @@ -1479,7 +1433,7 @@ void ARM64XEmitter::SMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) } void ARM64XEmitter::SMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - SMADDL(Rd, Rn, Rm, SP); + SMADDL(Rd, Rn, Rm, ARM64Reg::SP); } void ARM64XEmitter::SMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) { @@ -1487,7 +1441,7 @@ void ARM64XEmitter::SMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) } void ARM64XEmitter::SMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - EncodeData3SrcInst(4, Rd, Rn, Rm, SP); + EncodeData3SrcInst(4, Rd, Rn, Rm, ARM64Reg::SP); } void ARM64XEmitter::UMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) { @@ -1495,7 +1449,7 @@ void ARM64XEmitter::UMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) } void ARM64XEmitter::UMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - UMADDL(Rd, Rn, Rm, SP); + UMADDL(Rd, Rn, Rm, ARM64Reg::SP); } void ARM64XEmitter::UMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) { @@ -1503,15 +1457,15 @@ void ARM64XEmitter::UMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) } void ARM64XEmitter::UMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - EncodeData3SrcInst(7, Rd, Rn, Rm, SP); + EncodeData3SrcInst(7, Rd, Rn, Rm, ARM64Reg::SP); } void ARM64XEmitter::MUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - EncodeData3SrcInst(0, Rd, Rn, Rm, SP); + EncodeData3SrcInst(0, Rd, Rn, Rm, ARM64Reg::SP); } void ARM64XEmitter::MNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - EncodeData3SrcInst(1, Rd, Rn, Rm, SP); + EncodeData3SrcInst(1, Rd, Rn, Rm, ARM64Reg::SP); } // Logical (shifted register) @@ -1550,19 +1504,19 @@ void ARM64XEmitter::BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shif void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm, ArithOption Shift) { - ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, Shift); + ORR(Rd, Is64Bit(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR, Rm, Shift); } void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm) { if (IsGPR(Rd) && IsGPR(Rm)) - ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ShiftType::LSL, 0)); + ORR(Rd, Is64Bit(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR, Rm, ArithOption(Rm, ShiftType::LSL, 0)); else ASSERT_MSG(DYNA_REC, false, "Non-GPRs not supported in MOV"); } void ARM64XEmitter::MVN(ARM64Reg Rd, ARM64Reg Rm) { - ORN(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ShiftType::LSL, 0)); + ORN(Rd, Is64Bit(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR, Rm, ArithOption(Rm, ShiftType::LSL, 0)); } void ARM64XEmitter::LSL(ARM64Reg Rd, ARM64Reg Rm, int shift) { @@ -1603,7 +1557,7 @@ void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool inver } void ARM64XEmitter::TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert) { - EncodeLogicalImmInst(3, Is64Bit(Rn) ? ZR : WZR, Rn, immr, imms, invert); + EncodeLogicalImmInst(3, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, immr, imms, invert); } // Add/subtract (immediate) @@ -1625,7 +1579,7 @@ void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift) } void ARM64XEmitter::CMP(ARM64Reg Rn, u32 imm, bool shift) { - EncodeAddSubImmInst(1, true, shift, imm, Rn, Is64Bit(Rn) ? SP : WSP); + EncodeAddSubImmInst(1, true, shift, imm, Rn, Is64Bit(Rn) ? ARM64Reg::SP : ARM64Reg::WSP); } // Data Processing (Immediate) @@ -1684,10 +1638,9 @@ void ARM64XEmitter::EXTR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u32 shift) { bool sf = Is64Bit(Rd); bool N = sf; - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - Write32((sf << 31) | (0x27 << 23) | (N << 22) | (Rm << 16) | (shift << 10) | (Rm << 5) | Rd); + + Write32((sf << 31) | (0x27 << 23) | (N << 22) | (DecodeReg(Rm) << 16) | (shift << 10) | + (DecodeReg(Rm) << 5) | DecodeReg(Rd)); } void ARM64XEmitter::SXTB(ARM64Reg Rd, ARM64Reg Rn) { @@ -1742,59 +1695,59 @@ void ARM64XEmitter::STP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, // Load/Store Exclusive void ARM64XEmitter::STXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(0, Rs, SP, Rt, Rn); + EncodeLoadStoreExcInst(0, Rs, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STLXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(1, Rs, SP, Rt, Rn); + EncodeLoadStoreExcInst(1, Rs, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDXRB(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(2, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(2, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDAXRB(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(3, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(3, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STLRB(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(4, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(4, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDARB(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(5, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(5, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(6, Rs, SP, Rt, Rn); + EncodeLoadStoreExcInst(6, Rs, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STLXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(7, Rs, SP, Rt, Rn); + EncodeLoadStoreExcInst(7, Rs, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDXRH(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(8, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(8, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDAXRH(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(9, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(9, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STLRH(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(10, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(10, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDARH(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(11, SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(11, ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(12 + Is64Bit(Rt), Rs, SP, Rt, Rn); + EncodeLoadStoreExcInst(12 + Is64Bit(Rt), Rs, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(14 + Is64Bit(Rt), Rs, SP, Rt, Rn); + EncodeLoadStoreExcInst(14 + Is64Bit(Rt), Rs, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) { @@ -1806,27 +1759,27 @@ void ARM64XEmitter::STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) } void ARM64XEmitter::LDXR(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(20 + Is64Bit(Rt), SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(20 + Is64Bit(Rt), ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDAXR(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(22 + Is64Bit(Rt), SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(22 + Is64Bit(Rt), ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) { - EncodeLoadStoreExcInst(24 + Is64Bit(Rt), SP, Rt2, Rt, Rn); + EncodeLoadStoreExcInst(24 + Is64Bit(Rt), ARM64Reg::SP, Rt2, Rt, Rn); } void ARM64XEmitter::LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) { - EncodeLoadStoreExcInst(26 + Is64Bit(Rt), SP, Rt2, Rt, Rn); + EncodeLoadStoreExcInst(26 + Is64Bit(Rt), ARM64Reg::SP, Rt2, Rt, Rn); } void ARM64XEmitter::STLR(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(28 + Is64Bit(Rt), SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(28 + Is64Bit(Rt), ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } void ARM64XEmitter::LDAR(ARM64Reg Rt, ARM64Reg Rn) { - EncodeLoadStoreExcInst(30 + Is64Bit(Rt), SP, SP, Rt, Rn); + EncodeLoadStoreExcInst(30 + Is64Bit(Rt), ARM64Reg::SP, ARM64Reg::SP, Rt, Rn); } // Load/Store no-allocate pair (offset) @@ -2153,7 +2106,7 @@ void ARM64XEmitter::MOVI2RImpl(ARM64Reg Rd, T imm) break; case Approach::ORRBase: - constexpr ARM64Reg zero_reg = sizeof(T) == 8 ? ZR : WZR; + constexpr ARM64Reg zero_reg = sizeof(T) == 8 ? ARM64Reg::ZR : ARM64Reg::WZR; const bool success = TryORRI2R(Rd, zero_reg, best_base); ASSERT(success); break; @@ -2225,21 +2178,21 @@ void ARM64XEmitter::ABI_PushRegisters(BitSet32 registers) // The first push must adjust the SP, else a context switch may invalidate everything below SP. if (num_regs & 1) { - STR(IndexType::Pre, (ARM64Reg)(X0 + *it++), SP, -stack_size); + STR(IndexType::Pre, ARM64Reg::X0 + *it++, ARM64Reg::SP, -stack_size); } else { - ARM64Reg first_reg = (ARM64Reg)(X0 + *it++); - ARM64Reg second_reg = (ARM64Reg)(X0 + *it++); - STP(IndexType::Pre, first_reg, second_reg, SP, -stack_size); + ARM64Reg first_reg = ARM64Reg::X0 + *it++; + ARM64Reg second_reg = ARM64Reg::X0 + *it++; + STP(IndexType::Pre, first_reg, second_reg, ARM64Reg::SP, -stack_size); } // Fast store for all other registers, this is always an even number. for (int i = 0; i < (num_regs - 1) / 2; i++) { - ARM64Reg odd_reg = (ARM64Reg)(X0 + *it++); - ARM64Reg even_reg = (ARM64Reg)(X0 + *it++); - STP(IndexType::Signed, odd_reg, even_reg, SP, 16 * (i + 1)); + ARM64Reg odd_reg = ARM64Reg::X0 + *it++; + ARM64Reg even_reg = ARM64Reg::X0 + *it++; + STP(IndexType::Signed, odd_reg, even_reg, ARM64Reg::SP, 16 * (i + 1)); } ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __func__); @@ -2255,10 +2208,10 @@ void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask) return; // We must adjust the SP in the end, so load the first (two) registers at least. - ARM64Reg first = (ARM64Reg)(X0 + *it++); + ARM64Reg first = ARM64Reg::X0 + *it++; ARM64Reg second; if (!(num_regs & 1)) - second = (ARM64Reg)(X0 + *it++); + second = ARM64Reg::X0 + *it++; else second = {}; @@ -2268,16 +2221,16 @@ void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask) // Fast load for all but the first (two) registers, this is always an even number. for (int i = 0; i < (num_regs - 1) / 2; i++) { - ARM64Reg odd_reg = (ARM64Reg)(X0 + *it++); - ARM64Reg even_reg = (ARM64Reg)(X0 + *it++); - LDP(IndexType::Signed, odd_reg, even_reg, SP, 16 * (i + 1)); + ARM64Reg odd_reg = ARM64Reg::X0 + *it++; + ARM64Reg even_reg = ARM64Reg::X0 + *it++; + LDP(IndexType::Signed, odd_reg, even_reg, ARM64Reg::SP, 16 * (i + 1)); } // Post loading the first (two) registers. if (num_regs & 1) - LDR(IndexType::Post, first, SP, stack_size); + LDR(IndexType::Post, first, ARM64Reg::SP, stack_size); else - LDP(IndexType::Post, first, second, SP, stack_size); + LDP(IndexType::Post, first, second, ARM64Reg::SP, stack_size); ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __func__); } @@ -2286,8 +2239,6 @@ void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask) void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) { - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); u32 encoded_size = 0; u32 encoded_imm = 0; @@ -2331,19 +2282,17 @@ void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, } Write32((encoded_size << 30) | (0xF << 26) | (type == IndexType::Unsigned ? (1 << 24) : 0) | - (size == 128 ? (1 << 23) : 0) | (opc << 22) | (encoded_imm << 10) | (Rn << 5) | Rt); + (size == 128 ? (1 << 23) : 0) | (opc << 22) | (encoded_imm << 10) | (DecodeReg(Rn) << 5) | + DecodeReg(Rt)); } void ARM64FloatEmitter::EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s only supports double and single registers!", __func__); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - Write32((M << 31) | (S << 29) | (0b11110001 << 21) | (type << 22) | (Rm << 16) | (opcode << 12) | - (1 << 11) | (Rn << 5) | Rd); + Write32((M << 31) | (S << 29) | (0b11110001 << 21) | (type << 22) | (DecodeReg(Rm) << 16) | + (opcode << 12) | (1 << 11) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, @@ -2351,31 +2300,23 @@ void ARM64FloatEmitter::EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, { ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __func__); bool quad = IsQuad(Rd); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - Write32((quad << 30) | (U << 29) | (0b1110001 << 21) | (size << 22) | (Rm << 16) | - (opcode << 11) | (1 << 10) | (Rn << 5) | Rd); + Write32((quad << 30) | (U << 29) | (0b1110001 << 21) | (size << 22) | (DecodeReg(Rm) << 16) | + (opcode << 11) | (1 << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitCopy(bool Q, u32 op, u32 imm5, u32 imm4, ARM64Reg Rd, ARM64Reg Rn) { - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((Q << 30) | (op << 29) | (0b111 << 25) | (imm5 << 16) | (imm4 << 11) | (1 << 10) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __func__); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); Write32((Q << 30) | (U << 29) | (0b1110001 << 21) | (size << 22) | (opcode << 12) | (1 << 11) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, @@ -2383,11 +2324,9 @@ void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, { ASSERT_MSG(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __func__); bool quad = IsQuad(Rt); - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); Write32((quad << 30) | (0b1101 << 24) | (L << 22) | (R << 21) | (opcode << 13) | (S << 12) | - (size << 10) | (Rn << 5) | Rt); + (size << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, @@ -2395,33 +2334,26 @@ void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, { ASSERT_MSG(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __func__); bool quad = IsQuad(Rt); - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - Write32((quad << 30) | (0x1B << 23) | (L << 22) | (R << 21) | (Rm << 16) | (opcode << 13) | - (S << 12) | (size << 10) | (Rn << 5) | Rt); + Write32((quad << 30) | (0x1B << 23) | (L << 22) | (R << 21) | (DecodeReg(Rm) << 16) | + (opcode << 13) | (S << 12) | (size << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64FloatEmitter::Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (opcode << 15) | (1 << 14) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - ASSERT_MSG(DYNA_REC, Rn <= SP, "%s only supports GPR as source!", __func__); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); + ASSERT_MSG(DYNA_REC, Rn <= ARM64Reg::SP, "%s only supports GPR as source!", __func__); Write32((sf << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (rmode << 19) | (opcode << 16) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round, @@ -2433,8 +2365,6 @@ void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, Roundin // Use the encoding that transfers the result to a GPR. const bool sf = Is64Bit(Rd); const int type = IsDouble(Rn) ? 1 : 0; - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); int opcode = (sign ? 1 : 0); int rmode = 0; switch (round) @@ -2462,8 +2392,6 @@ void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, Roundin { // Use the encoding (vector, single) that keeps the result in the fp register. int sz = IsDouble(Rn); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); int opcode = 0; switch (round) { @@ -2486,7 +2414,7 @@ void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, Roundin break; } Write32((0x5E << 24) | (sign << 29) | (sz << 22) | (1 << 21) | (opcode << 12) | (2 << 10) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } } @@ -2503,11 +2431,8 @@ void ARM64FloatEmitter::FCVTU(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round) void ARM64FloatEmitter::EmitConversion2(bool sf, bool S, bool direction, u32 type, u32 rmode, u32 opcode, int scale, ARM64Reg Rd, ARM64Reg Rn) { - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((sf << 31) | (S << 29) | (0xF0 << 21) | (direction << 21) | (type << 22) | (rmode << 19) | - (opcode << 16) | (scale << 10) | (Rn << 5) | Rd); + (opcode << 16) | (scale << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Reg Rn, ARM64Reg Rm) @@ -2515,11 +2440,8 @@ void ARM64FloatEmitter::EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Re ASSERT_MSG(DYNA_REC, !IsQuad(Rn), "%s doesn't support vector!", __func__); bool is_double = IsDouble(Rn); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - - Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | (op << 14) | - (1 << 13) | (Rn << 5) | opcode2); + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (DecodeReg(Rm) << 16) | + (op << 14) | (1 << 13) | (DecodeReg(Rn) << 5) | opcode2); } void ARM64FloatEmitter::EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd, ARM64Reg Rn, @@ -2528,12 +2450,8 @@ void ARM64FloatEmitter::EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); bool is_double = IsDouble(Rd); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - - Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | (cond << 12) | - (3 << 10) | (Rn << 5) | Rd); + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (DecodeReg(Rm) << 16) | + (cond << 12) | (3 << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) @@ -2550,12 +2468,8 @@ void ARM64FloatEmitter::EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, else if (size == 64) encoded_size = 3; - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - - Write32((quad << 30) | (7 << 25) | (encoded_size << 22) | (Rm << 16) | (op << 12) | (1 << 11) | - (Rn << 5) | Rd); + Write32((quad << 30) | (7 << 25) | (encoded_size << 22) | (DecodeReg(Rm) << 16) | (op << 12) | + (1 << 11) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64Reg Rd, u32 imm8) @@ -2564,10 +2478,8 @@ void ARM64FloatEmitter::EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64R bool is_double = !IsSingle(Rd); - Rd = DecodeReg(Rd); - Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (type << 22) | (imm8 << 13) | - (1 << 12) | (imm5 << 5) | Rd); + (1 << 12) | (imm5 << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, @@ -2575,21 +2487,15 @@ void ARM64FloatEmitter::EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opc { ASSERT_MSG(DYNA_REC, immh, "%s bad encoding! Can't have zero immh", __func__); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((Q << 30) | (U << 29) | (0xF << 24) | (immh << 19) | (immb << 16) | (opcode << 11) | - (1 << 10) | (Rn << 5) | Rd); + (1 << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitScalarShiftImm(bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((2 << 30) | (U << 29) | (0x3E << 23) | (immh << 19) | (immb << 16) | (opcode << 11) | - (1 << 10) | (Rn << 5) | Rd); + (1 << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitLoadStoreMultipleStructure(u32 size, bool L, u32 opcode, ARM64Reg Rt, @@ -2605,11 +2511,8 @@ void ARM64FloatEmitter::EmitLoadStoreMultipleStructure(u32 size, bool L, u32 opc else if (size == 64) encoded_size = 3; - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - - Write32((quad << 30) | (3 << 26) | (L << 22) | (opcode << 12) | (encoded_size << 10) | (Rn << 5) | - Rt); + Write32((quad << 30) | (3 << 26) | (L << 22) | (opcode << 12) | (encoded_size << 10) | + (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64FloatEmitter::EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 opcode, @@ -2625,12 +2528,8 @@ void ARM64FloatEmitter::EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 else if (size == 64) encoded_size = 3; - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - - Write32((quad << 30) | (0b11001 << 23) | (L << 22) | (Rm << 16) | (opcode << 12) | - (encoded_size << 10) | (Rn << 5) | Rt); + Write32((quad << 30) | (0b11001 << 23) | (L << 22) | (DecodeReg(Rm) << 16) | (opcode << 12) | + (encoded_size << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64FloatEmitter::EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, @@ -2638,11 +2537,8 @@ void ARM64FloatEmitter::EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, { ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (opcode << 15) | (1 << 14) | - (Rn << 5) | Rd); + (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, bool H, @@ -2650,22 +2546,18 @@ void ARM64FloatEmitter::EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, { bool quad = IsQuad(Rd); - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - - Write32((quad << 30) | (U << 29) | (0xF << 24) | (size << 22) | (L << 21) | (Rm << 16) | - (opcode << 12) | (H << 11) | (Rn << 5) | Rd); + Write32((quad << 30) | (U << 29) | (0xF << 24) | (size << 22) | (L << 21) | + (DecodeReg(Rm) << 16) | (opcode << 12) | (H << 11) | (DecodeReg(Rn) << 5) | + DecodeReg(Rd)); } void ARM64FloatEmitter::EmitLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm) { ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", __func__, imm); - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - Write32((size << 30) | (0xF << 26) | (op << 22) | ((imm & 0x1FF) << 12) | (Rn << 5) | Rt); + Write32((size << 30) | (0xF << 26) | (op << 22) | ((imm & 0x1FF) << 12) | (DecodeReg(Rn) << 5) | + DecodeReg(Rt)); } void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type, ARM64Reg Rt, @@ -2709,14 +2601,10 @@ void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type, imm >>= 2; } - Rt = DecodeReg(Rt); - Rt2 = DecodeReg(Rt2); - Rn = DecodeReg(Rn); - ASSERT_MSG(DYNA_REC, imm >= -64 && imm < 64, "imm too large for load/store pair!"); Write32((opc << 30) | (0b1011 << 26) | (type_encode << 23) | (load << 22) | ((imm & 0x7F) << 15) | - (Rt2 << 10) | (Rn << 5) | Rt); + (DecodeReg(Rt2) << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64FloatEmitter::EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64Reg Rt, ARM64Reg Rn, @@ -2756,12 +2644,10 @@ void ARM64FloatEmitter::EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64 if (load) encoded_op |= 1; - Rt = DecodeReg(Rt); - Rn = DecodeReg(Rn); - ARM64Reg decoded_Rm = DecodeReg(Rm.GetReg()); + const int decoded_Rm = DecodeReg(Rm.GetReg()); Write32((encoded_size << 30) | (encoded_op << 22) | (0b111100001 << 21) | (decoded_Rm << 16) | - Rm.GetData() | (1 << 11) | (Rn << 5) | Rt); + Rm.GetData() | (1 << 11) | (DecodeReg(Rn) << 5) | DecodeReg(Rt)); } void ARM64FloatEmitter::EncodeModImm(bool Q, u8 op, u8 cmode, u8 o2, ARM64Reg Rd, u8 abcdefgh) @@ -2776,9 +2662,8 @@ void ARM64FloatEmitter::EncodeModImm(bool Q, u8 op, u8 cmode, u8 o2, ARM64Reg Rd }; } v; v.hex = abcdefgh; - Rd = DecodeReg(Rd); Write32((Q << 30) | (op << 29) | (0xF << 24) | (v.abc << 16) | (cmode << 12) | (o2 << 11) | - (1 << 10) | (v.defgh << 5) | Rd); + (1 << 10) | (v.defgh << 5) | DecodeReg(Rd)); } void ARM64FloatEmitter::LDR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) @@ -2864,7 +2749,7 @@ void ARM64FloatEmitter::LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn) bool S = 0; u32 opcode = 0; u32 encoded_size = 0; - ARM64Reg encoded_reg = INVALID_REG; + ARM64Reg encoded_reg = ARM64Reg::INVALID_REG; if (size == 8) { @@ -2915,7 +2800,7 @@ void ARM64FloatEmitter::LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Re bool S = 0; u32 opcode = 0; u32 encoded_size = 0; - ARM64Reg encoded_reg = INVALID_REG; + ARM64Reg encoded_reg = ARM64Reg::INVALID_REG; if (size == 8) { @@ -2983,7 +2868,7 @@ void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn) bool S = 0; u32 opcode = 0; u32 encoded_size = 0; - ARM64Reg encoded_reg = INVALID_REG; + ARM64Reg encoded_reg = ARM64Reg::INVALID_REG; if (size == 8) { @@ -3034,7 +2919,7 @@ void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Re bool S = 0; u32 opcode = 0; u32 encoded_size = 0; - ARM64Reg encoded_reg = INVALID_REG; + ARM64Reg encoded_reg = ARM64Reg::INVALID_REG; if (size == 8) { @@ -3175,9 +3060,8 @@ void ARM64FloatEmitter::FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top) // TODO ASSERT_MSG(DYNA_REC, 0, "FMOV: Unhandled case"); } - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Write32((sf << 31) | (0x1e2 << 20) | (rmode << 19) | (opcode << 16) | (Rn << 5) | Rd); + Write32((sf << 31) | (0x1e2 << 20) | (rmode << 19) | (opcode << 16) | (DecodeReg(Rn) << 5) | + DecodeReg(Rd)); } } @@ -3275,14 +3159,10 @@ void ARM64FloatEmitter::EmitScalar3Source(bool isDouble, ARM64Reg Rd, ARM64Reg R ARM64Reg Ra, int opcode) { int type = isDouble ? 1 : 0; - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); - Rm = DecodeReg(Rm); - Ra = DecodeReg(Ra); int o1 = opcode >> 1; int o0 = opcode & 1; - m_emit->Write32((0x1F << 24) | (type << 22) | (o1 << 21) | (Rm << 16) | (o0 << 15) | (Ra << 10) | - (Rn << 5) | Rd); + m_emit->Write32((0x1F << 24) | (type << 22) | (o1 << 21) | (DecodeReg(Rm) << 16) | (o0 << 15) | + (DecodeReg(Ra) << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } // Scalar floating point immediate @@ -3536,7 +3416,7 @@ void ARM64FloatEmitter::INS(u8 size, ARM64Reg Rd, u8 index1, ARM64Reg Rn, u8 ind void ARM64FloatEmitter::UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) { bool b64Bit = Is64Bit(Rd); - ASSERT_MSG(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __func__); + ASSERT_MSG(DYNA_REC, Rd < ARM64Reg::SP, "%s destination must be a GPR!", __func__); ASSERT_MSG(DYNA_REC, !(b64Bit && size != 64), "%s must have a size of 64 when destination is 64bit!", __func__); u32 imm5 = 0; @@ -3567,7 +3447,7 @@ void ARM64FloatEmitter::UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) void ARM64FloatEmitter::SMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) { bool b64Bit = Is64Bit(Rd); - ASSERT_MSG(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __func__); + ASSERT_MSG(DYNA_REC, Rd < ARM64Reg::SP, "%s destination must be a GPR!", __func__); ASSERT_MSG(DYNA_REC, size != 64, "%s doesn't support 64bit destination. Use UMOV!", __func__); u32 imm5 = 0; @@ -3619,10 +3499,9 @@ void ARM64FloatEmitter::SCVTF(ARM64Reg Rd, ARM64Reg Rn) { // Source is in FP register (like destination!). We must use a vector encoding. bool sign = false; - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); int sz = IsDouble(Rn); - Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (Rn << 5) | Rd); + Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (DecodeReg(Rn) << 5) | + DecodeReg(Rd)); } else { @@ -3640,10 +3519,9 @@ void ARM64FloatEmitter::UCVTF(ARM64Reg Rd, ARM64Reg Rn) { // Source is in FP register (like destination!). We must use a vector encoding. bool sign = true; - Rd = DecodeReg(Rd); - Rn = DecodeReg(Rn); int sz = IsDouble(Rn); - Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (Rn << 5) | Rd); + Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (DecodeReg(Rn) << 5) | + DecodeReg(Rd)); } else { @@ -4036,11 +3914,11 @@ void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp) } } - if (bundled_loadstore && tmp != INVALID_REG) + if (bundled_loadstore && tmp != ARM64Reg::INVALID_REG) { int num_regs = registers.Count(); - m_emit->SUB(SP, SP, num_regs * 16); - m_emit->ADD(tmp, SP, 0); + m_emit->SUB(ARM64Reg::SP, ARM64Reg::SP, num_regs * 16); + m_emit->ADD(tmp, ARM64Reg::SP, 0); std::vector island_regs; for (int i = 0; i < 32; ++i) { @@ -4059,9 +3937,9 @@ void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp) } if (count == 1) - island_regs.push_back((ARM64Reg)(Q0 + i)); + island_regs.push_back(ARM64Reg::Q0 + i); else - ST1(64, count, IndexType::Post, (ARM64Reg)(Q0 + i), tmp); + ST1(64, count, IndexType::Post, ARM64Reg::Q0 + i, tmp); i += count - 1; } @@ -4085,15 +3963,15 @@ void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp) std::vector pair_regs; for (auto it : registers) { - pair_regs.push_back((ARM64Reg)(Q0 + it)); + pair_regs.push_back(ARM64Reg::Q0 + it); if (pair_regs.size() == 2) { - STP(128, IndexType::Pre, pair_regs[0], pair_regs[1], SP, -32); + STP(128, IndexType::Pre, pair_regs[0], pair_regs[1], ARM64Reg::SP, -32); pair_regs.clear(); } } if (pair_regs.size()) - STR(128, IndexType::Pre, pair_regs[0], SP, -16); + STR(128, IndexType::Pre, pair_regs[0], ARM64Reg::SP, -16); } } void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp) @@ -4117,7 +3995,7 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp) } } - if (bundled_loadstore && tmp != INVALID_REG) + if (bundled_loadstore && tmp != ARM64Reg::INVALID_REG) { // The temporary register is only used to indicate that we can use this code path std::vector island_regs; @@ -4132,9 +4010,9 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp) } if (count == 1) - island_regs.push_back((ARM64Reg)(Q0 + i)); + island_regs.push_back(ARM64Reg::Q0 + i); else - LD1(64, count, IndexType::Post, (ARM64Reg)(Q0 + i), SP); + LD1(64, count, IndexType::Post, ARM64Reg::Q0 + i, ARM64Reg::SP); i += count - 1; } @@ -4146,12 +4024,12 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp) pair_regs.push_back(it); if (pair_regs.size() == 2) { - LDP(128, IndexType::Post, pair_regs[0], pair_regs[1], SP, 32); + LDP(128, IndexType::Post, pair_regs[0], pair_regs[1], ARM64Reg::SP, 32); pair_regs.clear(); } } if (pair_regs.size()) - LDR(128, IndexType::Post, pair_regs[0], SP, 16); + LDR(128, IndexType::Post, pair_regs[0], ARM64Reg::SP, 16); } else { @@ -4166,14 +4044,14 @@ void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp) { // First load must be a regular LDR if odd odd = false; - LDR(128, IndexType::Post, (ARM64Reg)(Q0 + i), SP, 16); + LDR(128, IndexType::Post, ARM64Reg::Q0 + i, ARM64Reg::SP, 16); } else { - pair_regs.push_back((ARM64Reg)(Q0 + i)); + pair_regs.push_back(ARM64Reg::Q0 + i); if (pair_regs.size() == 2) { - LDP(128, IndexType::Post, pair_regs[1], pair_regs[0], SP, 32); + LDP(128, IndexType::Post, pair_regs[1], pair_regs[0], ARM64Reg::SP, 32); pair_regs.clear(); } } @@ -4193,7 +4071,7 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + ASSERT_MSG(DYNA_REC, scratch != ARM64Reg::INVALID_REG, "ANDI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm); MOVI2R(scratch, imm); @@ -4210,7 +4088,7 @@ void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + ASSERT_MSG(DYNA_REC, scratch != ARM64Reg::INVALID_REG, "ORRI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm); MOVI2R(scratch, imm); @@ -4227,7 +4105,7 @@ void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + ASSERT_MSG(DYNA_REC, scratch != ARM64Reg::INVALID_REG, "EORI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm); MOVI2R(scratch, imm); @@ -4244,7 +4122,7 @@ void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + ASSERT_MSG(DYNA_REC, scratch != ARM64Reg::INVALID_REG, "ANDSI2R - failed to construct logical immediate value from %08x, need scratch", (u32)imm); MOVI2R(scratch, imm); @@ -4274,7 +4152,7 @@ void ARM64XEmitter::AddImmediate(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool shift, void ARM64XEmitter::ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, ARM64Reg scratch) { - bool has_scratch = scratch != INVALID_REG; + bool has_scratch = scratch != ARM64Reg::INVALID_REG; u64 imm_neg = Is64Bit(Rd) ? u64(-s64(imm)) : u64(-s64(imm)) & 0xFFFFFFFFuLL; bool neg_neg = negative ? false : true; @@ -4360,7 +4238,7 @@ void ARM64XEmitter::SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) void ARM64XEmitter::CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch) { - ADDI2R_internal(Is64Bit(Rn) ? ZR : WZR, Rn, imm, true, true, scratch); + ADDI2R_internal(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, true, true, scratch); } bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) @@ -4441,7 +4319,7 @@ void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool if (value == 0.0f) { - FMOV(Rd, IsDouble(Rd) ? ZR : WZR); + FMOV(Rd, IsDouble(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR); if (negate) FNEG(Rd, Rd); // TODO: There are some other values we could generate with the float-imm instruction, like @@ -4453,7 +4331,7 @@ void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool } else { - ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + ASSERT_MSG(DYNA_REC, scratch != ARM64Reg::INVALID_REG, "Failed to find a way to generate FP immediate %f without scratch", value); if (negate) value = -value; @@ -4469,7 +4347,7 @@ void ARM64FloatEmitter::MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch) { // TODO: Make it work with more element sizes // TODO: Optimize - there are shorter solution for many values - ARM64Reg s = (ARM64Reg)(S0 + DecodeReg(Rd)); + ARM64Reg s = ARM64Reg::S0 + DecodeReg(Rd); MOVI2F(s, value, scratch); DUP(32, Rd, Rd, 0); } diff --git a/Source/Core/Common/Arm64Emitter.h b/Source/Core/Common/Arm64Emitter.h index 874f1d2fed..06c632537b 100644 --- a/Source/Core/Common/Arm64Emitter.h +++ b/Source/Core/Common/Arm64Emitter.h @@ -23,7 +23,7 @@ namespace Arm64Gen // 010 - VFP single precision // 100 - VFP double precision // 110 - VFP quad precision -enum ARM64Reg +enum class ARM64Reg { // 32bit registers W0 = 0, @@ -224,9 +224,21 @@ enum ARM64Reg WZR = WSP, ZR = SP, - INVALID_REG = 0xFFFFFFFF + INVALID_REG = -1, }; +constexpr int operator&(const ARM64Reg& reg, const int mask) +{ + return static_cast(reg) & mask; +} +constexpr int operator|(const ARM64Reg& reg, const int mask) +{ + return static_cast(reg) | mask; +} +constexpr ARM64Reg operator+(const ARM64Reg& reg, const int addend) +{ + return static_cast(static_cast(reg) + addend); +} constexpr bool Is64Bit(ARM64Reg reg) { return (reg & 0x20) != 0; @@ -256,9 +268,13 @@ constexpr bool IsGPR(ARM64Reg reg) return static_cast(reg) < 0x40; } -constexpr ARM64Reg DecodeReg(ARM64Reg reg) +constexpr int DecodeReg(ARM64Reg reg) { - return static_cast(reg & 0x1F); + return reg & 0x1F; +} +constexpr ARM64Reg EncodeRegTo32(ARM64Reg reg) +{ + return static_cast(DecodeReg(reg)); } constexpr ARM64Reg EncodeRegTo64(ARM64Reg reg) { @@ -266,7 +282,7 @@ constexpr ARM64Reg EncodeRegTo64(ARM64Reg reg) } constexpr ARM64Reg EncodeRegToSingle(ARM64Reg reg) { - return static_cast(DecodeReg(reg) + S0); + return static_cast(ARM64Reg::S0 | DecodeReg(reg)); } constexpr ARM64Reg EncodeRegToDouble(ARM64Reg reg) { @@ -578,7 +594,7 @@ public: // Unconditional Branch (register) void BR(ARM64Reg Rn); void BLR(ARM64Reg Rn); - void RET(ARM64Reg Rn = X30); + void RET(ARM64Reg Rn = ARM64Reg::X30); void ERET(); void DRPS(); @@ -648,15 +664,15 @@ public: // Aliases void CSET(ARM64Reg Rd, CCFlags cond) { - ARM64Reg zr = Is64Bit(Rd) ? ZR : WZR; + ARM64Reg zr = Is64Bit(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR; CSINC(Rd, zr, zr, (CCFlags)((u32)cond ^ 1)); } void CSETM(ARM64Reg Rd, CCFlags cond) { - ARM64Reg zr = Is64Bit(Rd) ? ZR : WZR; + ARM64Reg zr = Is64Bit(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR; CSINV(Rd, zr, zr, (CCFlags)((u32)cond ^ 1)); } - void NEG(ARM64Reg Rd, ARM64Reg Rs) { SUB(Rd, Is64Bit(Rd) ? ZR : WZR, Rs); } + void NEG(ARM64Reg Rd, ARM64Reg Rs) { SUB(Rd, Is64Bit(Rd) ? ARM64Reg::ZR : ARM64Reg::WZR, Rs); } // Data-Processing 1 source void RBIT(ARM64Reg Rd, ARM64Reg Rn); void REV16(ARM64Reg Rd, ARM64Reg Rn); @@ -704,10 +720,10 @@ public: void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); - void TST(ARM64Reg Rn, ARM64Reg Rm) { ANDS(Is64Bit(Rn) ? ZR : WZR, Rn, Rm); } + void TST(ARM64Reg Rn, ARM64Reg Rm) { ANDS(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, Rm); } void TST(ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) { - ANDS(Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Shift); + ANDS(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, Rm, Shift); } // Wrap the above for saner syntax @@ -879,22 +895,22 @@ public: // Wrapper around AND x, y, imm etc. If you are sure the imm will work, no need to pass a scratch // register. - void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG) + void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG) { - ANDSI2R(Is64Bit(Rn) ? ZR : WZR, Rn, imm, scratch); + ANDSI2R(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, scratch); } - void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, ARM64Reg scratch); - void ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); - void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm); bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm); @@ -925,9 +941,9 @@ public: ARM64Reg ABI_SetupLambda(const std::function* f) { auto trampoline = &ARM64XEmitter::CallLambdaTrampoline; - MOVP2R(X8, trampoline); - MOVP2R(X0, const_cast((const void*)f)); - return X8; + MOVP2R(ARM64Reg::X8, trampoline); + MOVP2R(ARM64Reg::X0, const_cast((const void*)f)); + return ARM64Reg::X8; } // Plain function call @@ -962,9 +978,9 @@ public: // Loadstore multiple structure void LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn); - void LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = SP); + void LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = ARM64Reg::SP); void ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn); - void ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = SP); + void ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = ARM64Reg::SP); // Loadstore paired void LDP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); @@ -1109,12 +1125,13 @@ public: void MOVI(u8 size, ARM64Reg Rd, u64 imm, u8 shift = 0); void BIC(u8 size, ARM64Reg Rd, u8 imm, u8 shift = 0); - void MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch = INVALID_REG, bool negate = false); - void MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch = INVALID_REG); + void MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch = ARM64Reg::INVALID_REG, + bool negate = false); + void MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch = ARM64Reg::INVALID_REG); // ABI related - void ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp = INVALID_REG); - void ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp = INVALID_REG); + void ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp = ARM64Reg::INVALID_REG); + void ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp = ARM64Reg::INVALID_REG); private: ARM64XEmitter* m_emit; diff --git a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp index 27420cbb09..864a19ba18 100644 --- a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp @@ -157,9 +157,9 @@ void JitArm64::FallBackToInterpreter(UGeckoInstruction inst) } Interpreter::Instruction instr = PPCTables::GetInterpreterOp(inst); - MOVP2R(X8, instr); - MOVI2R(W0, inst.hex); - BLR(X8); + MOVP2R(ARM64Reg::X8, instr); + MOVI2R(ARM64Reg::W0, inst.hex); + BLR(ARM64Reg::X8); if (js.op->opinfo->flags & FL_ENDBLOCK) { @@ -213,10 +213,10 @@ void JitArm64::HLEFunction(u32 hook_index) gpr.Flush(FlushMode::All); fpr.Flush(FlushMode::All); - MOVP2R(X8, &HLE::Execute); - MOVI2R(W0, js.compilerPC); - MOVI2R(W1, hook_index); - BLR(X8); + MOVP2R(ARM64Reg::X8, &HLE::Execute); + MOVI2R(ARM64Reg::W0, js.compilerPC); + MOVI2R(ARM64Reg::W1, hook_index); + BLR(ARM64Reg::X8); } void JitArm64::DoNothing(UGeckoInstruction inst) @@ -236,31 +236,31 @@ void JitArm64::Cleanup() { static_assert(PPCSTATE_OFF(gather_pipe_ptr) <= 504); static_assert(PPCSTATE_OFF(gather_pipe_ptr) + 8 == PPCSTATE_OFF(gather_pipe_base_ptr)); - LDP(IndexType::Signed, X0, X1, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); - SUB(X0, X0, X1); - CMP(X0, GPFifo::GATHER_PIPE_SIZE); + LDP(IndexType::Signed, ARM64Reg::X0, ARM64Reg::X1, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); + SUB(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X1); + CMP(ARM64Reg::X0, GPFifo::GATHER_PIPE_SIZE); FixupBranch exit = B(CC_LT); - MOVP2R(X0, &GPFifo::UpdateGatherPipe); - BLR(X0); + MOVP2R(ARM64Reg::X0, &GPFifo::UpdateGatherPipe); + BLR(ARM64Reg::X0); SetJumpTarget(exit); } // SPEED HACK: MMCR0/MMCR1 should be checked at run-time, not at compile time. if (MMCR0.Hex || MMCR1.Hex) { - MOVP2R(X8, &PowerPC::UpdatePerformanceMonitor); - MOVI2R(X0, js.downcountAmount); - MOVI2R(X1, js.numLoadStoreInst); - MOVI2R(X2, js.numFloatingPointInst); - BLR(X8); + MOVP2R(ARM64Reg::X8, &PowerPC::UpdatePerformanceMonitor); + MOVI2R(ARM64Reg::X0, js.downcountAmount); + MOVI2R(ARM64Reg::X1, js.numLoadStoreInst); + MOVI2R(ARM64Reg::X2, js.numFloatingPointInst); + BLR(ARM64Reg::X8); } } void JitArm64::DoDownCount() { - LDR(IndexType::Unsigned, W0, PPC_REG, PPCSTATE_OFF(downcount)); - SUBSI2R(W0, W0, js.downcountAmount, W1); - STR(IndexType::Unsigned, W0, PPC_REG, PPCSTATE_OFF(downcount)); + LDR(IndexType::Unsigned, ARM64Reg::W0, PPC_REG, PPCSTATE_OFF(downcount)); + SUBSI2R(ARM64Reg::W0, ARM64Reg::W0, js.downcountAmount, ARM64Reg::W1); + STR(IndexType::Unsigned, ARM64Reg::W0, PPC_REG, PPCSTATE_OFF(downcount)); } void JitArm64::ResetStack() @@ -268,8 +268,8 @@ void JitArm64::ResetStack() if (!m_enable_blr_optimization) return; - LDR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer)); - ADD(SP, X0, 0); + LDR(IndexType::Unsigned, ARM64Reg::X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer)); + ADD(ARM64Reg::SP, ARM64Reg::X0, 0); } void JitArm64::AllocStack() @@ -317,9 +317,9 @@ void JitArm64::WriteExit(u32 destination, bool LK, u32 exit_address_after_return if (LK) { // Push {ARM_PC+20; PPC_PC} on the stack - MOVI2R(X1, exit_address_after_return); - ADR(X0, 20); - STP(IndexType::Pre, X0, X1, SP, -16); + MOVI2R(ARM64Reg::X1, exit_address_after_return); + ADR(ARM64Reg::X0, 20); + STP(IndexType::Pre, ARM64Reg::X0, ARM64Reg::X1, ARM64Reg::SP, -16); } JitBlock* b = js.curBlock; @@ -363,9 +363,9 @@ void JitArm64::WriteExit(Arm64Gen::ARM64Reg dest, bool LK, u32 exit_address_afte else { // Push {ARM_PC, PPC_PC} on the stack - MOVI2R(X1, exit_address_after_return); - ADR(X0, 12); - STP(IndexType::Pre, X0, X1, SP, -16); + MOVI2R(ARM64Reg::X1, exit_address_after_return); + ADR(ARM64Reg::X0, 12); + STP(IndexType::Pre, ARM64Reg::X0, ARM64Reg::X1, ARM64Reg::SP, -16); BL(dispatcher); @@ -393,7 +393,7 @@ void JitArm64::FakeLKExit(u32 exit_address_after_return) ARM64Reg code_reg = gpr.GetReg(); MOVI2R(after_reg, exit_address_after_return); ADR(EncodeRegTo64(code_reg), 12); - STP(IndexType::Pre, EncodeRegTo64(code_reg), EncodeRegTo64(after_reg), SP, -16); + STP(IndexType::Pre, EncodeRegTo64(code_reg), EncodeRegTo64(after_reg), ARM64Reg::SP, -16); gpr.Unlock(after_reg, code_reg); FixupBranch skip_exit = BL(); @@ -427,13 +427,13 @@ void JitArm64::WriteBLRExit(Arm64Gen::ARM64Reg dest) EndTimeProfile(js.curBlock); // Check if {ARM_PC, PPC_PC} matches the current state. - LDP(IndexType::Post, X2, X1, SP, 16); - CMP(W1, DISPATCHER_PC); + LDP(IndexType::Post, ARM64Reg::X2, ARM64Reg::X1, ARM64Reg::SP, 16); + CMP(ARM64Reg::W1, DISPATCHER_PC); FixupBranch no_match = B(CC_NEQ); DoDownCount(); // overwrites X0 + X1 - RET(X2); + RET(ARM64Reg::X2); SetJumpTarget(no_match); @@ -448,19 +448,19 @@ void JitArm64::WriteExceptionExit(u32 destination, bool only_external) { Cleanup(); - LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(Exceptions)); + LDR(IndexType::Unsigned, ARM64Reg::W30, PPC_REG, PPCSTATE_OFF(Exceptions)); MOVI2R(DISPATCHER_PC, destination); - FixupBranch no_exceptions = CBZ(W30); + FixupBranch no_exceptions = CBZ(ARM64Reg::W30); static_assert(PPCSTATE_OFF(pc) <= 252); static_assert(PPCSTATE_OFF(pc) + 4 == PPCSTATE_OFF(npc)); STP(IndexType::Signed, DISPATCHER_PC, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc)); if (only_external) - MOVP2R(X8, &PowerPC::CheckExternalExceptions); + MOVP2R(ARM64Reg::X8, &PowerPC::CheckExternalExceptions); else - MOVP2R(X8, &PowerPC::CheckExceptions); - BLR(X8); + MOVP2R(ARM64Reg::X8, &PowerPC::CheckExceptions); + BLR(ARM64Reg::X8); LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(npc)); @@ -479,8 +479,8 @@ void JitArm64::WriteExceptionExit(ARM64Reg dest, bool only_external) Cleanup(); - LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(Exceptions)); - FixupBranch no_exceptions = CBZ(W30); + LDR(IndexType::Unsigned, ARM64Reg::W30, PPC_REG, PPCSTATE_OFF(Exceptions)); + FixupBranch no_exceptions = CBZ(ARM64Reg::W30); static_assert(PPCSTATE_OFF(pc) <= 252); static_assert(PPCSTATE_OFF(pc) + 4 == PPCSTATE_OFF(npc)); @@ -527,15 +527,16 @@ void JitArm64::DumpCode(const u8* start, const u8* end) void JitArm64::BeginTimeProfile(JitBlock* b) { - MOVP2R(X0, &b->profile_data); - LDR(IndexType::Unsigned, X1, X0, offsetof(JitBlock::ProfileData, runCount)); - ADD(X1, X1, 1); + MOVP2R(ARM64Reg::X0, &b->profile_data); + LDR(IndexType::Unsigned, ARM64Reg::X1, ARM64Reg::X0, offsetof(JitBlock::ProfileData, runCount)); + ADD(ARM64Reg::X1, ARM64Reg::X1, 1); // Fetch the current counter register - CNTVCT(X2); + CNTVCT(ARM64Reg::X2); // stores runCount and ticStart - STP(IndexType::Signed, X1, X2, X0, offsetof(JitBlock::ProfileData, runCount)); + STP(IndexType::Signed, ARM64Reg::X1, ARM64Reg::X2, ARM64Reg::X0, + offsetof(JitBlock::ProfileData, runCount)); } void JitArm64::EndTimeProfile(JitBlock* b) @@ -544,20 +545,22 @@ void JitArm64::EndTimeProfile(JitBlock* b) return; // Fetch the current counter register - CNTVCT(X1); + CNTVCT(ARM64Reg::X1); - MOVP2R(X0, &b->profile_data); + MOVP2R(ARM64Reg::X0, &b->profile_data); - LDR(IndexType::Unsigned, X2, X0, offsetof(JitBlock::ProfileData, ticStart)); - SUB(X1, X1, X2); + LDR(IndexType::Unsigned, ARM64Reg::X2, ARM64Reg::X0, offsetof(JitBlock::ProfileData, ticStart)); + SUB(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::X2); // loads ticCounter and downcountCounter - LDP(IndexType::Signed, X2, X3, X0, offsetof(JitBlock::ProfileData, ticCounter)); - ADD(X2, X2, X1); - ADDI2R(X3, X3, js.downcountAmount, X1); + LDP(IndexType::Signed, ARM64Reg::X2, ARM64Reg::X3, ARM64Reg::X0, + offsetof(JitBlock::ProfileData, ticCounter)); + ADD(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X1); + ADDI2R(ARM64Reg::X3, ARM64Reg::X3, js.downcountAmount, ARM64Reg::X1); // stores ticCounter and downcountCounter - STP(IndexType::Signed, X2, X3, X0, offsetof(JitBlock::ProfileData, ticCounter)); + STP(IndexType::Signed, ARM64Reg::X2, ARM64Reg::X3, ARM64Reg::X0, + offsetof(JitBlock::ProfileData, ticCounter)); } void JitArm64::Run() @@ -666,16 +669,16 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC) int gqr = *code_block.m_gqr_used.begin(); if (!code_block.m_gqr_modified[gqr] && !GQR(gqr)) { - LDR(IndexType::Unsigned, W0, PPC_REG, PPCSTATE_OFF_SPR(SPR_GQR0 + gqr)); - FixupBranch no_fail = CBZ(W0); + LDR(IndexType::Unsigned, ARM64Reg::W0, PPC_REG, PPCSTATE_OFF_SPR(SPR_GQR0 + gqr)); + FixupBranch no_fail = CBZ(ARM64Reg::W0); FixupBranch fail = B(); SwitchToFarCode(); SetJumpTarget(fail); MOVI2R(DISPATCHER_PC, js.blockStart); STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc)); - MOVI2R(W0, static_cast(JitInterface::ExceptionType::PairedQuantize)); - MOVP2R(X1, &JitInterface::CompileExceptionCheck); - BLR(X1); + MOVI2R(ARM64Reg::W0, static_cast(JitInterface::ExceptionType::PairedQuantize)); + MOVP2R(ARM64Reg::X1, &JitInterface::CompileExceptionCheck); + BLR(ARM64Reg::X1); B(dispatcher_no_check); SwitchToNearCode(); SetJumpTarget(no_fail); @@ -719,10 +722,10 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC) js.fifoBytesSinceCheck = 0; js.mustCheckFifo = false; - gpr.Lock(W30); + gpr.Lock(ARM64Reg::W30); BitSet32 regs_in_use = gpr.GetCallerSavedUsed(); BitSet32 fprs_in_use = fpr.GetCallerSavedUsed(); - regs_in_use[W30] = 0; + regs_in_use[DecodeReg(ARM64Reg::W30)] = 0; FixupBranch Exception = B(); SwitchToFarCode(); @@ -730,20 +733,20 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC) FixupBranch exit = B(); SetJumpTarget(Exception); ABI_PushRegisters(regs_in_use); - m_float_emit.ABI_PushRegisters(fprs_in_use, X30); - MOVP2R(X8, &GPFifo::FastCheckGatherPipe); - BLR(X8); - m_float_emit.ABI_PopRegisters(fprs_in_use, X30); + m_float_emit.ABI_PushRegisters(fprs_in_use, ARM64Reg::X30); + MOVP2R(ARM64Reg::X8, &GPFifo::FastCheckGatherPipe); + BLR(ARM64Reg::X8); + m_float_emit.ABI_PopRegisters(fprs_in_use, ARM64Reg::X30); ABI_PopRegisters(regs_in_use); // Inline exception check - LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(Exceptions)); - TBZ(W30, 3, done_here); // EXCEPTION_EXTERNAL_INT - LDR(IndexType::Unsigned, W30, PPC_REG, PPCSTATE_OFF(msr)); - TBZ(W30, 11, done_here); - MOVP2R(X30, &ProcessorInterface::m_InterruptCause); - LDR(IndexType::Unsigned, W30, X30, 0); - TST(W30, 23, 2); + LDR(IndexType::Unsigned, ARM64Reg::W30, PPC_REG, PPCSTATE_OFF(Exceptions)); + TBZ(ARM64Reg::W30, 3, done_here); // EXCEPTION_EXTERNAL_INT + LDR(IndexType::Unsigned, ARM64Reg::W30, PPC_REG, PPCSTATE_OFF(msr)); + TBZ(ARM64Reg::W30, 11, done_here); + MOVP2R(ARM64Reg::X30, &ProcessorInterface::m_InterruptCause); + LDR(IndexType::Unsigned, ARM64Reg::W30, ARM64Reg::X30, 0); + TST(ARM64Reg::W30, 23, 2); B(CC_EQ, done_here); gpr.Flush(FlushMode::MaintainState); @@ -751,7 +754,7 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC) WriteExceptionExit(js.compilerPC, true); SwitchToNearCode(); SetJumpTarget(exit); - gpr.Unlock(W30); + gpr.Unlock(ARM64Reg::W30); // So we don't check exceptions twice gatherPipeIntCheck = false; diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp index b410064f20..de3a8bf683 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_BackPatch.cpp @@ -62,30 +62,30 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR { if (flags & BackPatchInfo::FLAG_SIZE_F32) { - m_float_emit.FCVT(32, 64, D0, RS); - m_float_emit.REV32(8, D0, D0); - m_float_emit.STR(32, D0, MEM_REG, addr); + m_float_emit.FCVT(32, 64, ARM64Reg::D0, RS); + m_float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); + m_float_emit.STR(32, ARM64Reg::D0, MEM_REG, addr); } else if (flags & BackPatchInfo::FLAG_SIZE_F32I) { - m_float_emit.REV32(8, D0, RS); - m_float_emit.STR(32, D0, MEM_REG, addr); + m_float_emit.REV32(8, ARM64Reg::D0, RS); + m_float_emit.STR(32, ARM64Reg::D0, MEM_REG, addr); } else if (flags & BackPatchInfo::FLAG_SIZE_F32X2) { - m_float_emit.FCVTN(32, D0, RS); - m_float_emit.REV32(8, D0, D0); - m_float_emit.STR(64, Q0, MEM_REG, addr); + m_float_emit.FCVTN(32, ARM64Reg::D0, RS); + m_float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); + m_float_emit.STR(64, ARM64Reg::Q0, MEM_REG, addr); } else if (flags & BackPatchInfo::FLAG_SIZE_F32X2I) { - m_float_emit.REV32(8, D0, RS); - m_float_emit.STR(64, Q0, MEM_REG, addr); + m_float_emit.REV32(8, ARM64Reg::D0, RS); + m_float_emit.STR(64, ARM64Reg::Q0, MEM_REG, addr); } else { - m_float_emit.REV64(8, Q0, RS); - m_float_emit.STR(64, Q0, MEM_REG, addr); + m_float_emit.REV64(8, ARM64Reg::Q0, RS); + m_float_emit.STR(64, ARM64Reg::Q0, MEM_REG, addr); } } else if (flags & BackPatchInfo::FLAG_LOAD && flags & BackPatchInfo::FLAG_MASK_FLOAT) @@ -103,7 +103,7 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR } else if (flags & BackPatchInfo::FLAG_STORE) { - ARM64Reg temp = W0; + ARM64Reg temp = ARM64Reg::W0; if (flags & BackPatchInfo::FLAG_SIZE_32) REV32(temp, RS); else if (flags & BackPatchInfo::FLAG_SIZE_16) @@ -120,8 +120,8 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR { // This literally only stores 32bytes of zeros to the target address ADD(addr, addr, MEM_REG); - STP(IndexType::Signed, ZR, ZR, addr, 0); - STP(IndexType::Signed, ZR, ZR, addr, 16); + STP(IndexType::Signed, ARM64Reg::ZR, ARM64Reg::ZR, addr, 0); + STP(IndexType::Signed, ARM64Reg::ZR, ARM64Reg::ZR, addr, 16); } else { @@ -179,112 +179,112 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR } ABI_PushRegisters(gprs_to_push); - m_float_emit.ABI_PushRegisters(fprs_to_push, X30); + m_float_emit.ABI_PushRegisters(fprs_to_push, ARM64Reg::X30); if (flags & BackPatchInfo::FLAG_STORE && flags & BackPatchInfo::FLAG_MASK_FLOAT) { if (flags & BackPatchInfo::FLAG_SIZE_F32) { - m_float_emit.FCVT(32, 64, D0, RS); - m_float_emit.UMOV(32, W0, Q0, 0); - MOVP2R(X8, &PowerPC::Write_U32); - BLR(X8); + m_float_emit.FCVT(32, 64, ARM64Reg::D0, RS); + m_float_emit.UMOV(32, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U32); + BLR(ARM64Reg::X8); } else if (flags & BackPatchInfo::FLAG_SIZE_F32I) { - m_float_emit.UMOV(32, W0, RS, 0); - MOVP2R(X8, &PowerPC::Write_U32); - BLR(X8); + m_float_emit.UMOV(32, ARM64Reg::W0, RS, 0); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U32); + BLR(ARM64Reg::X8); } else if (flags & BackPatchInfo::FLAG_SIZE_F32X2) { - m_float_emit.FCVTN(32, D0, RS); - m_float_emit.UMOV(64, X0, D0, 0); - ROR(X0, X0, 32); - MOVP2R(X8, &PowerPC::Write_U64); - BLR(X8); + m_float_emit.FCVTN(32, ARM64Reg::D0, RS); + m_float_emit.UMOV(64, ARM64Reg::X0, ARM64Reg::D0, 0); + ROR(ARM64Reg::X0, ARM64Reg::X0, 32); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U64); + BLR(ARM64Reg::X8); } else if (flags & BackPatchInfo::FLAG_SIZE_F32X2I) { - m_float_emit.UMOV(64, X0, RS, 0); - ROR(X0, X0, 32); - MOVP2R(X8, &PowerPC::Write_U64); - BLR(X8); + m_float_emit.UMOV(64, ARM64Reg::X0, RS, 0); + ROR(ARM64Reg::X0, ARM64Reg::X0, 32); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U64); + BLR(ARM64Reg::X8); } else { - MOVP2R(X8, &PowerPC::Write_U64); - m_float_emit.UMOV(64, X0, RS, 0); - BLR(X8); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U64); + m_float_emit.UMOV(64, ARM64Reg::X0, RS, 0); + BLR(ARM64Reg::X8); } } else if (flags & BackPatchInfo::FLAG_LOAD && flags & BackPatchInfo::FLAG_MASK_FLOAT) { if (flags & BackPatchInfo::FLAG_SIZE_F32) { - MOVP2R(X8, &PowerPC::Read_U32); - BLR(X8); - m_float_emit.INS(32, RS, 0, X0); + MOVP2R(ARM64Reg::X8, &PowerPC::Read_U32); + BLR(ARM64Reg::X8); + m_float_emit.INS(32, RS, 0, ARM64Reg::X0); } else { - MOVP2R(X8, &PowerPC::Read_F64); - BLR(X8); - m_float_emit.INS(64, RS, 0, X0); + MOVP2R(ARM64Reg::X8, &PowerPC::Read_F64); + BLR(ARM64Reg::X8); + m_float_emit.INS(64, RS, 0, ARM64Reg::X0); } } else if (flags & BackPatchInfo::FLAG_STORE) { - MOV(W0, RS); + MOV(ARM64Reg::W0, RS); if (flags & BackPatchInfo::FLAG_SIZE_32) - MOVP2R(X8, &PowerPC::Write_U32); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U32); else if (flags & BackPatchInfo::FLAG_SIZE_16) - MOVP2R(X8, &PowerPC::Write_U16); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U16); else - MOVP2R(X8, &PowerPC::Write_U8); + MOVP2R(ARM64Reg::X8, &PowerPC::Write_U8); - BLR(X8); + BLR(ARM64Reg::X8); } else if (flags & BackPatchInfo::FLAG_ZERO_256) { - MOVP2R(X8, &PowerPC::ClearCacheLine); - BLR(X8); + MOVP2R(ARM64Reg::X8, &PowerPC::ClearCacheLine); + BLR(ARM64Reg::X8); } else { if (flags & BackPatchInfo::FLAG_SIZE_32) - MOVP2R(X8, &PowerPC::Read_U32); + MOVP2R(ARM64Reg::X8, &PowerPC::Read_U32); else if (flags & BackPatchInfo::FLAG_SIZE_16) - MOVP2R(X8, &PowerPC::Read_U16); + MOVP2R(ARM64Reg::X8, &PowerPC::Read_U16); else if (flags & BackPatchInfo::FLAG_SIZE_8) - MOVP2R(X8, &PowerPC::Read_U8); + MOVP2R(ARM64Reg::X8, &PowerPC::Read_U8); - BLR(X8); + BLR(ARM64Reg::X8); if (!(flags & BackPatchInfo::FLAG_REVERSE)) { - MOV(RS, W0); + MOV(RS, ARM64Reg::W0); } else { if (flags & BackPatchInfo::FLAG_SIZE_32) - REV32(RS, W0); + REV32(RS, ARM64Reg::W0); else if (flags & BackPatchInfo::FLAG_SIZE_16) - REV16(RS, W0); + REV16(RS, ARM64Reg::W0); } if (flags & BackPatchInfo::FLAG_EXTEND) SXTH(RS, RS); } - m_float_emit.ABI_PopRegisters(fprs_to_push, X30); + m_float_emit.ABI_PopRegisters(fprs_to_push, ARM64Reg::X30); ABI_PopRegisters(gprs_to_push); } if (in_far_code) { - RET(X30); + RET(ARM64Reg::X30); SwitchToNearCode(); } } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp index 7f7cb9a19b..3e5ce8e688 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp @@ -236,7 +236,7 @@ void JitArm64::bclrx(UGeckoInstruction inst) (inst.BO & BO_DONT_DECREMENT_FLAG) == 0 || (inst.BO & BO_DONT_CHECK_CONDITION) == 0; ARM64Reg WA = gpr.GetReg(); - ARM64Reg WB = inst.LK ? gpr.GetReg() : INVALID_REG; + ARM64Reg WB = inst.LK ? gpr.GetReg() : ARM64Reg::INVALID_REG; FixupBranch pCTRDontBranch; if ((inst.BO & BO_DONT_DECREMENT_FLAG) == 0) // Decrement and test CTR diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp index ed4dee4ca6..b6a99f1c0b 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp @@ -303,7 +303,7 @@ void JitArm64::fcmpX(UGeckoInstruction inst) FixupBranch pNaN, pLesser, pGreater; FixupBranch continue1, continue2, continue3; - ORR(XA, ZR, 32, 0, true); + ORR(XA, ARM64Reg::ZR, 32, 0, true); m_float_emit.FCMP(VA, VB); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp index 6a4600d67c..d368ce3230 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp @@ -27,7 +27,7 @@ void JitArm64::ComputeRC0(u64 imm) gpr.BindCRToRegister(0, false); MOVI2R(gpr.CR(0), imm); if (imm & 0x80000000) - SXTW(gpr.CR(0), DecodeReg(gpr.CR(0))); + SXTW(gpr.CR(0), EncodeRegTo32(gpr.CR(0))); } void JitArm64::ComputeCarry(ARM64Reg reg) @@ -64,7 +64,7 @@ void JitArm64::ComputeCarry(bool Carry) return; } - STRB(IndexType::Unsigned, WSP, PPC_REG, PPCSTATE_OFF(xer_ca)); + STRB(IndexType::Unsigned, ARM64Reg::WSP, PPC_REG, PPCSTATE_OFF(xer_ca)); } void JitArm64::ComputeCarry() @@ -89,7 +89,7 @@ void JitArm64::FlushCarry() return; ARM64Reg WA = gpr.GetReg(); - CSINC(WA, WSP, WSP, CC_CC); + CSINC(WA, ARM64Reg::WSP, ARM64Reg::WSP, CC_CC); STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca)); gpr.Unlock(WA); @@ -417,7 +417,7 @@ void JitArm64::negx(UGeckoInstruction inst) else { gpr.BindToRegister(d, d == a); - SUB(gpr.R(d), WSP, gpr.R(a)); + SUB(gpr.R(d), ARM64Reg::WSP, gpr.R(a)); if (inst.Rc) ComputeRC0(gpr.R(d)); } @@ -481,7 +481,7 @@ void JitArm64::cmpl(UGeckoInstruction inst) if (gpr.IsImm(b) && !gpr.GetImm(b)) { - MOV(DecodeReg(CR), gpr.R(a)); + MOV(EncodeRegTo32(CR), gpr.R(a)); return; } @@ -537,7 +537,7 @@ void JitArm64::cmpli(UGeckoInstruction inst) if (!B) { - MOV(DecodeReg(CR), gpr.R(a)); + MOV(EncodeRegTo32(CR), gpr.R(a)); return; } @@ -670,7 +670,7 @@ void JitArm64::srawix(UGeckoInstruction inst) if (js.op->wantsCA) { ARM64Reg WA = gpr.GetReg(); - ARM64Reg dest = inplace_carry ? WA : WSP; + ARM64Reg dest = inplace_carry ? WA : ARM64Reg::WSP; if (a != s) { ASR(RA, RS, amount); @@ -689,7 +689,7 @@ void JitArm64::srawix(UGeckoInstruction inst) } else { - CSINC(WA, WSP, WSP, CC_EQ); + CSINC(WA, ARM64Reg::WSP, ARM64Reg::WSP, CC_EQ); STRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca)); } gpr.Unlock(WA); @@ -844,7 +844,7 @@ void JitArm64::addzex(UGeckoInstruction inst) if (js.carryFlagSet) { gpr.BindToRegister(d, d == a); - ADCS(gpr.R(d), gpr.R(a), WZR); + ADCS(gpr.R(d), gpr.R(a), ARM64Reg::WZR); } else if (d == a) { @@ -913,7 +913,7 @@ void JitArm64::subfex(UGeckoInstruction inst) if (js.carryFlagSet) { MOVI2R(WA, ~i + j); - ADC(gpr.R(d), WA, WZR); + ADC(gpr.R(d), WA, ARM64Reg::WZR); } else { @@ -1011,7 +1011,7 @@ void JitArm64::subfzex(UGeckoInstruction inst) if (js.carryFlagSet) { MVN(gpr.R(d), gpr.R(a)); - ADCS(gpr.R(d), gpr.R(d), WZR); + ADCS(gpr.R(d), gpr.R(d), ARM64Reg::WZR); } else { @@ -1074,7 +1074,7 @@ void JitArm64::addex(UGeckoInstruction inst) if (js.carryFlagSet) { MOVI2R(WA, i + j); - ADC(gpr.R(d), WA, WZR); + ADC(gpr.R(d), WA, ARM64Reg::WZR); } else { @@ -1502,7 +1502,7 @@ void JitArm64::srawx(UGeckoInstruction inst) SetJumpTarget(bit_is_not_zero); CMP(RS, 0); CSET(WA, CC_LT); - CSINV(WB, WZR, WZR, CC_GE); + CSINV(WB, ARM64Reg::WZR, ARM64Reg::WZR, CC_GE); SetJumpTarget(is_zero); SetJumpTarget(bit_is_zero); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp index 2974a0c065..fbc106941f 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp @@ -25,12 +25,12 @@ using namespace Arm64Gen; void JitArm64::SafeLoadToReg(u32 dest, s32 addr, s32 offsetReg, u32 flags, s32 offset, bool update) { // We want to make sure to not get LR as a temp register - gpr.Lock(W0, W30); + gpr.Lock(ARM64Reg::W0, ARM64Reg::W30); gpr.BindToRegister(dest, dest == (u32)addr || dest == (u32)offsetReg); ARM64Reg dest_reg = gpr.R(dest); - ARM64Reg up_reg = INVALID_REG; - ARM64Reg off_reg = INVALID_REG; + ARM64Reg up_reg = ARM64Reg::INVALID_REG; + ARM64Reg off_reg = ARM64Reg::INVALID_REG; if (addr != -1 && !gpr.IsImm(addr)) up_reg = gpr.R(addr); @@ -38,7 +38,7 @@ void JitArm64::SafeLoadToReg(u32 dest, s32 addr, s32 offsetReg, u32 flags, s32 o if (offsetReg != -1 && !gpr.IsImm(offsetReg)) off_reg = gpr.R(offsetReg); - ARM64Reg addr_reg = W0; + ARM64Reg addr_reg = ARM64Reg::W0; u32 imm_addr = 0; bool is_immediate = false; @@ -113,8 +113,8 @@ void JitArm64::SafeLoadToReg(u32 dest, s32 addr, s32 offsetReg, u32 flags, s32 o BitSet32 regs_in_use = gpr.GetCallerSavedUsed(); BitSet32 fprs_in_use = fpr.GetCallerSavedUsed(); - regs_in_use[W0] = 0; - regs_in_use[dest_reg] = 0; + regs_in_use[DecodeReg(ARM64Reg::W0)] = 0; + regs_in_use[DecodeReg(dest_reg)] = 0; u32 access_size = BackPatchInfo::GetFlagSize(flags); u32 mmio_address = 0; @@ -135,18 +135,18 @@ void JitArm64::SafeLoadToReg(u32 dest, s32 addr, s32 offsetReg, u32 flags, s32 o EmitBackpatchRoutine(flags, jo.fastmem, jo.fastmem, dest_reg, XA, regs_in_use, fprs_in_use); } - gpr.Unlock(W0, W30); + gpr.Unlock(ARM64Reg::W0, ARM64Reg::W30); } void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s32 offset) { // We want to make sure to not get LR as a temp register - gpr.Lock(W0, W1, W30); + gpr.Lock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W30); ARM64Reg RS = gpr.R(value); - ARM64Reg reg_dest = INVALID_REG; - ARM64Reg reg_off = INVALID_REG; + ARM64Reg reg_dest = ARM64Reg::INVALID_REG; + ARM64Reg reg_off = ARM64Reg::INVALID_REG; if (regOffset != -1 && !gpr.IsImm(regOffset)) reg_off = gpr.R(regOffset); @@ -155,10 +155,10 @@ void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s BitSet32 regs_in_use = gpr.GetCallerSavedUsed(); BitSet32 fprs_in_use = fpr.GetCallerSavedUsed(); - regs_in_use[W0] = 0; - regs_in_use[W1] = 0; + regs_in_use[DecodeReg(ARM64Reg::W0)] = 0; + regs_in_use[DecodeReg(ARM64Reg::W1)] = 0; - ARM64Reg addr_reg = W1; + ARM64Reg addr_reg = ARM64Reg::W1; u32 imm_addr = 0; bool is_immediate = false; @@ -238,22 +238,22 @@ void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s else accessSize = 8; - LDR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); + LDR(IndexType::Unsigned, ARM64Reg::X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); if (accessSize == 32) { - REV32(W1, RS); - STR(IndexType::Post, W1, X0, 4); + REV32(ARM64Reg::W1, RS); + STR(IndexType::Post, ARM64Reg::W1, ARM64Reg::X0, 4); } else if (accessSize == 16) { - REV16(W1, RS); - STRH(IndexType::Post, W1, X0, 2); + REV16(ARM64Reg::W1, RS); + STRH(IndexType::Post, ARM64Reg::W1, ARM64Reg::X0, 2); } else { - STRB(IndexType::Post, RS, X0, 1); + STRB(IndexType::Post, RS, ARM64Reg::X0, 1); } - STR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); + STR(IndexType::Unsigned, ARM64Reg::X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); js.fifoBytesSinceCheck += accessSize >> 3; } else if (jo.fastmem_arena && is_immediate && PowerPC::IsOptimizableRAMAddress(imm_addr)) @@ -274,7 +274,7 @@ void JitArm64::SafeStoreFromReg(s32 dest, u32 value, s32 regOffset, u32 flags, s EmitBackpatchRoutine(flags, jo.fastmem, jo.fastmem, RS, XA, regs_in_use, fprs_in_use); } - gpr.Unlock(W0, W1, W30); + gpr.Unlock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W30); } void JitArm64::lXX(UGeckoInstruction inst) @@ -538,9 +538,9 @@ void JitArm64::dcbx(UGeckoInstruction inst) INSTRUCTION_START JITDISABLE(bJITLoadStoreOff); - gpr.Lock(W0); + gpr.Lock(ARM64Reg::W0); - ARM64Reg addr = W0; + ARM64Reg addr = ARM64Reg::W0; u32 a = inst.RA, b = inst.RB; @@ -555,17 +555,17 @@ void JitArm64::dcbx(UGeckoInstruction inst) BitSet32 fprs_to_push = fpr.GetCallerSavedUsed(); ABI_PushRegisters(gprs_to_push); - m_float_emit.ABI_PushRegisters(fprs_to_push, X30); + m_float_emit.ABI_PushRegisters(fprs_to_push, ARM64Reg::X30); - MOVI2R(X1, 32); - MOVI2R(X2, 0); - MOVP2R(X3, &JitInterface::InvalidateICache); - BLR(X3); + MOVI2R(ARM64Reg::X1, 32); + MOVI2R(ARM64Reg::X2, 0); + MOVP2R(ARM64Reg::X3, &JitInterface::InvalidateICache); + BLR(ARM64Reg::X3); - m_float_emit.ABI_PopRegisters(fprs_to_push, X30); + m_float_emit.ABI_PopRegisters(fprs_to_push, ARM64Reg::X30); ABI_PopRegisters(gprs_to_push); - gpr.Unlock(W0); + gpr.Unlock(ARM64Reg::W0); } void JitArm64::dcbt(UGeckoInstruction inst) @@ -596,9 +596,9 @@ void JitArm64::dcbz(UGeckoInstruction inst) int a = inst.RA, b = inst.RB; - gpr.Lock(W0); + gpr.Lock(ARM64Reg::W0); - ARM64Reg addr_reg = W0; + ARM64Reg addr_reg = ARM64Reg::W0; if (a) { @@ -645,12 +645,12 @@ void JitArm64::dcbz(UGeckoInstruction inst) BitSet32 gprs_to_push = gpr.GetCallerSavedUsed(); BitSet32 fprs_to_push = fpr.GetCallerSavedUsed(); - gprs_to_push[W0] = 0; + gprs_to_push[DecodeReg(ARM64Reg::W0)] = 0; - EmitBackpatchRoutine(BackPatchInfo::FLAG_ZERO_256, true, true, W0, EncodeRegTo64(addr_reg), - gprs_to_push, fprs_to_push); + EmitBackpatchRoutine(BackPatchInfo::FLAG_ZERO_256, true, true, ARM64Reg::W0, + EncodeRegTo64(addr_reg), gprs_to_push, fprs_to_push); - gpr.Unlock(W0); + gpr.Unlock(ARM64Reg::W0); } void JitArm64::eieio(UGeckoInstruction inst) diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp index db0a7e21e6..e881551f64 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStoreFloating.cpp @@ -78,11 +78,11 @@ void JitArm64::lfXX(UGeckoInstruction inst) const RegType type = (flags & BackPatchInfo::FLAG_SIZE_F64) != 0 ? RegType::LowerPair : RegType::DuplicatedSingle; - gpr.Lock(W0, W30); - fpr.Lock(Q0); + gpr.Lock(ARM64Reg::W0, ARM64Reg::W30); + fpr.Lock(ARM64Reg::Q0); const ARM64Reg VD = fpr.RW(inst.FD, type); - ARM64Reg addr_reg = W0; + ARM64Reg addr_reg = ARM64Reg::W0; if (update) { @@ -164,9 +164,9 @@ void JitArm64::lfXX(UGeckoInstruction inst) BitSet32 regs_in_use = gpr.GetCallerSavedUsed(); BitSet32 fprs_in_use = fpr.GetCallerSavedUsed(); - regs_in_use[W0] = 0; - fprs_in_use[0] = 0; // Q0 - fprs_in_use[VD - Q0] = 0; + regs_in_use[DecodeReg(ARM64Reg::W0)] = 0; + fprs_in_use[DecodeReg(ARM64Reg::Q0)] = 0; + fprs_in_use[DecodeReg(VD)] = 0; if (jo.fastmem_arena && is_immediate && PowerPC::IsOptimizableRAMAddress(imm_addr)) { @@ -177,8 +177,8 @@ void JitArm64::lfXX(UGeckoInstruction inst) EmitBackpatchRoutine(flags, jo.fastmem, jo.fastmem, VD, XA, regs_in_use, fprs_in_use); } - gpr.Unlock(W0, W30); - fpr.Unlock(Q0); + gpr.Unlock(ARM64Reg::W0, ARM64Reg::W30); + fpr.Unlock(ARM64Reg::Q0); } void JitArm64::stfXX(UGeckoInstruction inst) @@ -242,8 +242,8 @@ void JitArm64::stfXX(UGeckoInstruction inst) u32 imm_addr = 0; bool is_immediate = false; - gpr.Lock(W0, W1, W30); - fpr.Lock(Q0); + gpr.Lock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W30); + fpr.Lock(ARM64Reg::Q0); const bool single = (flags & BackPatchInfo::FLAG_SIZE_F32) && fpr.IsSingle(inst.FS, true); @@ -255,7 +255,7 @@ void JitArm64::stfXX(UGeckoInstruction inst) flags |= BackPatchInfo::FLAG_SIZE_F32I; } - ARM64Reg addr_reg = W1; + ARM64Reg addr_reg = ARM64Reg::W1; if (update) { @@ -344,9 +344,9 @@ void JitArm64::stfXX(UGeckoInstruction inst) BitSet32 regs_in_use = gpr.GetCallerSavedUsed(); BitSet32 fprs_in_use = fpr.GetCallerSavedUsed(); - regs_in_use[W0] = 0; - regs_in_use[W1] = 0; - fprs_in_use[0] = 0; // Q0 + regs_in_use[DecodeReg(ARM64Reg::W0)] = 0; + regs_in_use[DecodeReg(ARM64Reg::W1)] = 0; + fprs_in_use[DecodeReg(ARM64Reg::Q0)] = 0; if (is_immediate) { @@ -358,25 +358,25 @@ void JitArm64::stfXX(UGeckoInstruction inst) else accessSize = 32; - LDR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); + LDR(IndexType::Unsigned, ARM64Reg::X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); if (flags & BackPatchInfo::FLAG_SIZE_F64) { - m_float_emit.REV64(8, Q0, V0); + m_float_emit.REV64(8, ARM64Reg::Q0, V0); } else if (flags & BackPatchInfo::FLAG_SIZE_F32) { - m_float_emit.FCVT(32, 64, D0, EncodeRegToDouble(V0)); - m_float_emit.REV32(8, D0, D0); + m_float_emit.FCVT(32, 64, ARM64Reg::D0, EncodeRegToDouble(V0)); + m_float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); } else if (flags & BackPatchInfo::FLAG_SIZE_F32I) { - m_float_emit.REV32(8, D0, V0); + m_float_emit.REV32(8, ARM64Reg::D0, V0); } - m_float_emit.STR(accessSize, IndexType::Post, accessSize == 64 ? Q0 : D0, X0, - accessSize >> 3); + m_float_emit.STR(accessSize, IndexType::Post, accessSize == 64 ? ARM64Reg::Q0 : ARM64Reg::D0, + ARM64Reg::X0, accessSize >> 3); - STR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); + STR(IndexType::Unsigned, ARM64Reg::X0, PPC_REG, PPCSTATE_OFF(gather_pipe_ptr)); js.fifoBytesSinceCheck += accessSize >> 3; if (update) @@ -399,6 +399,6 @@ void JitArm64::stfXX(UGeckoInstruction inst) { EmitBackpatchRoutine(flags, jo.fastmem, jo.fastmem, V0, XA, regs_in_use, fprs_in_use); } - gpr.Unlock(W0, W1, W30); - fpr.Unlock(Q0); + gpr.Unlock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W30); + fpr.Unlock(ARM64Reg::Q0); } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp index 366298a2bd..1b4fcc3f85 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStorePaired.cpp @@ -34,13 +34,13 @@ void JitArm64::psq_l(UGeckoInstruction inst) const bool update = inst.OPCD == 57; const s32 offset = inst.SIMM_12; - gpr.Lock(W0, W1, W2, W30); - fpr.Lock(Q0, Q1); + gpr.Lock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W30); + fpr.Lock(ARM64Reg::Q0, ARM64Reg::Q1); const ARM64Reg arm_addr = gpr.R(inst.RA); - constexpr ARM64Reg scale_reg = W0; - constexpr ARM64Reg addr_reg = W1; - constexpr ARM64Reg type_reg = W2; + constexpr ARM64Reg scale_reg = ARM64Reg::W0; + constexpr ARM64Reg addr_reg = ARM64Reg::W1; + constexpr ARM64Reg type_reg = ARM64Reg::W2; ARM64Reg VS; if (inst.RA || update) // Always uses the register on update @@ -81,22 +81,22 @@ void JitArm64::psq_l(UGeckoInstruction inst) UBFM(type_reg, scale_reg, 16, 18); // Type UBFM(scale_reg, scale_reg, 24, 29); // Scale - MOVP2R(X30, inst.W ? single_load_quantized : paired_load_quantized); - LDR(EncodeRegTo64(type_reg), X30, ArithOption(EncodeRegTo64(type_reg), true)); + MOVP2R(ARM64Reg::X30, inst.W ? single_load_quantized : paired_load_quantized); + LDR(EncodeRegTo64(type_reg), ARM64Reg::X30, ArithOption(EncodeRegTo64(type_reg), true)); BLR(EncodeRegTo64(type_reg)); VS = fpr.RW(inst.RS, RegType::Single); - m_float_emit.ORR(EncodeRegToDouble(VS), D0, D0); + m_float_emit.ORR(EncodeRegToDouble(VS), ARM64Reg::D0, ARM64Reg::D0); } if (inst.W) { - m_float_emit.FMOV(S0, 0x70); // 1.0 as a Single - m_float_emit.INS(32, VS, 1, Q0, 0); + m_float_emit.FMOV(ARM64Reg::S0, 0x70); // 1.0 as a Single + m_float_emit.INS(32, VS, 1, ARM64Reg::Q0, 0); } - gpr.Unlock(W0, W1, W2, W30); - fpr.Unlock(Q0, Q1); + gpr.Unlock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W30); + fpr.Unlock(ARM64Reg::Q0, ARM64Reg::Q1); } void JitArm64::psq_st(UGeckoInstruction inst) @@ -116,17 +116,17 @@ void JitArm64::psq_st(UGeckoInstruction inst) const bool update = inst.OPCD == 61; const s32 offset = inst.SIMM_12; - gpr.Lock(W0, W1, W2, W30); - fpr.Lock(Q0, Q1); + gpr.Lock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W30); + fpr.Lock(ARM64Reg::Q0, ARM64Reg::Q1); const bool single = fpr.IsSingle(inst.RS); const ARM64Reg arm_addr = gpr.R(inst.RA); const ARM64Reg VS = fpr.R(inst.RS, single ? RegType::Single : RegType::Register); - constexpr ARM64Reg scale_reg = W0; - constexpr ARM64Reg addr_reg = W1; - constexpr ARM64Reg type_reg = W2; + constexpr ARM64Reg scale_reg = ARM64Reg::W0; + constexpr ARM64Reg addr_reg = ARM64Reg::W1; + constexpr ARM64Reg type_reg = ARM64Reg::W2; BitSet32 gprs_in_use = gpr.GetCallerSavedUsed(); BitSet32 fprs_in_use = fpr.GetCallerSavedUsed(); @@ -169,14 +169,14 @@ void JitArm64::psq_st(UGeckoInstruction inst) { if (single) { - m_float_emit.ORR(D0, VS, VS); + m_float_emit.ORR(ARM64Reg::D0, VS, VS); } else { if (inst.W) - m_float_emit.FCVT(32, 64, D0, VS); + m_float_emit.FCVT(32, 64, ARM64Reg::D0, VS); else - m_float_emit.FCVTN(32, D0, VS); + m_float_emit.FCVTN(32, ARM64Reg::D0, VS); } LDR(IndexType::Unsigned, scale_reg, PPC_REG, PPCSTATE_OFF_SPR(SPR_GQR0 + inst.I)); @@ -192,26 +192,26 @@ void JitArm64::psq_st(UGeckoInstruction inst) SwitchToFarCode(); SetJumpTarget(fail); // Slow - MOVP2R(X30, &paired_store_quantized[16 + inst.W * 8]); - LDR(EncodeRegTo64(type_reg), X30, ArithOption(EncodeRegTo64(type_reg), true)); + MOVP2R(ARM64Reg::X30, &paired_store_quantized[16 + inst.W * 8]); + LDR(EncodeRegTo64(type_reg), ARM64Reg::X30, ArithOption(EncodeRegTo64(type_reg), true)); ABI_PushRegisters(gprs_in_use); - m_float_emit.ABI_PushRegisters(fprs_in_use, X30); + m_float_emit.ABI_PushRegisters(fprs_in_use, ARM64Reg::X30); BLR(EncodeRegTo64(type_reg)); - m_float_emit.ABI_PopRegisters(fprs_in_use, X30); + m_float_emit.ABI_PopRegisters(fprs_in_use, ARM64Reg::X30); ABI_PopRegisters(gprs_in_use); FixupBranch continue1 = B(); SwitchToNearCode(); SetJumpTarget(pass); // Fast - MOVP2R(X30, &paired_store_quantized[inst.W * 8]); - LDR(EncodeRegTo64(type_reg), X30, ArithOption(EncodeRegTo64(type_reg), true)); + MOVP2R(ARM64Reg::X30, &paired_store_quantized[inst.W * 8]); + LDR(EncodeRegTo64(type_reg), ARM64Reg::X30, ArithOption(EncodeRegTo64(type_reg), true)); BLR(EncodeRegTo64(type_reg)); SetJumpTarget(continue1); } - gpr.Unlock(W0, W1, W2, W30); - fpr.Unlock(Q0, Q1); + gpr.Unlock(ARM64Reg::W0, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W30); + fpr.Unlock(ARM64Reg::Q0, ARM64Reg::Q1); } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp index 00cd92c39b..7055a05079 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp @@ -123,8 +123,8 @@ void JitArm64::ps_maddXX(UGeckoInstruction inst) const ARM64Reg VB = reg_encoder(fpr.R(b, type)); const ARM64Reg VC = reg_encoder(fpr.R(c, type)); const ARM64Reg VD = reg_encoder(fpr.RW(d, type)); - ARM64Reg V0Q = INVALID_REG; - ARM64Reg V0 = INVALID_REG; + ARM64Reg V0Q = ARM64Reg::INVALID_REG; + ARM64Reg V0 = ARM64Reg::INVALID_REG; if (d != b && (d == a || d == c)) { V0Q = fpr.GetReg(); @@ -262,7 +262,7 @@ void JitArm64::ps_maddXX(UGeckoInstruction inst) fpr.FixSinglePrecision(d); - if (V0Q != INVALID_REG) + if (V0Q != ARM64Reg::INVALID_REG) fpr.Unlock(V0Q); } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp index c92518c243..8fcdcc8131 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp @@ -42,7 +42,7 @@ ARM64Reg Arm64RegCache::GetReg() // We can't return anything reasonable in this case. Return INVALID_REG and watch the failure // happen ASSERT_MSG(DYNA_REC, 0, "All available registers are locked!"); - return INVALID_REG; + return ARM64Reg::INVALID_REG; } void Arm64RegCache::UpdateLastUsed(BitSet32 regs_used) @@ -122,17 +122,17 @@ void Arm64GPRCache::Start(PPCAnalyst::BlockRegStats& stats) bool Arm64GPRCache::IsCalleeSaved(ARM64Reg reg) const { static constexpr std::array callee_regs{{ - X28, - X27, - X26, - X25, - X24, - X23, - X22, - X21, - X20, - X19, - INVALID_REG, + ARM64Reg::X28, + ARM64Reg::X27, + ARM64Reg::X26, + ARM64Reg::X25, + ARM64Reg::X24, + ARM64Reg::X23, + ARM64Reg::X22, + ARM64Reg::X21, + ARM64Reg::X20, + ARM64Reg::X19, + ARM64Reg::INVALID_REG, }}; return std::find(callee_regs.begin(), callee_regs.end(), EncodeRegTo64(reg)) != callee_regs.end(); @@ -180,7 +180,7 @@ void Arm64GPRCache::FlushRegister(size_t index, bool maintain_state) if (!maintain_state) { - UnlockRegister(DecodeReg(host_reg)); + UnlockRegister(EncodeRegTo32(host_reg)); reg.Flush(); } } @@ -188,7 +188,7 @@ void Arm64GPRCache::FlushRegister(size_t index, bool maintain_state) { if (!reg.GetImm()) { - m_emit->STR(IndexType::Unsigned, bitsize == 64 ? ZR : WZR, PPC_REG, + m_emit->STR(IndexType::Unsigned, bitsize == 64 ? ARM64Reg::ZR : ARM64Reg::WZR, PPC_REG, u32(guest_reg.ppc_offset)); } else @@ -198,7 +198,7 @@ void Arm64GPRCache::FlushRegister(size_t index, bool maintain_state) m_emit->MOVI2R(host_reg, reg.GetImm()); m_emit->STR(IndexType::Unsigned, host_reg, PPC_REG, u32(guest_reg.ppc_offset)); - UnlockRegister(DecodeReg(host_reg)); + UnlockRegister(EncodeRegTo32(host_reg)); } if (!maintain_state) @@ -228,8 +228,8 @@ void Arm64GPRCache::FlushRegisters(BitSet32 regs, bool maintain_state) m_emit->STP(IndexType::Signed, RX1, RX2, PPC_REG, u32(ppc_offset)); if (!maintain_state) { - UnlockRegister(DecodeReg(RX1)); - UnlockRegister(DecodeReg(RX2)); + UnlockRegister(EncodeRegTo32(RX1)); + UnlockRegister(EncodeRegTo32(RX2)); reg1.Flush(); reg2.Flush(); } @@ -299,14 +299,14 @@ ARM64Reg Arm64GPRCache::R(const GuestRegInfo& guest_reg) break; } // We've got an issue if we end up here - return INVALID_REG; + return ARM64Reg::INVALID_REG; } void Arm64GPRCache::SetImmediate(const GuestRegInfo& guest_reg, u32 imm) { OpArg& reg = guest_reg.reg; if (reg.GetType() == RegType::Register) - UnlockRegister(DecodeReg(reg.GetReg())); + UnlockRegister(EncodeRegTo32(reg.GetReg())); reg.LoadToImm(imm); } @@ -332,36 +332,36 @@ void Arm64GPRCache::GetAllocationOrder() // Callee saved registers first in hopes that we will keep everything stored there first static constexpr std::array allocation_order{{ // Callee saved - W27, - W26, - W25, - W24, - W23, - W22, - W21, - W20, - W19, + ARM64Reg::W27, + ARM64Reg::W26, + ARM64Reg::W25, + ARM64Reg::W24, + ARM64Reg::W23, + ARM64Reg::W22, + ARM64Reg::W21, + ARM64Reg::W20, + ARM64Reg::W19, // Caller saved - W17, - W16, - W15, - W14, - W13, - W12, - W11, - W10, - W9, - W8, - W7, - W6, - W5, - W4, - W3, - W2, - W1, - W0, - W30, + ARM64Reg::W17, + ARM64Reg::W16, + ARM64Reg::W15, + ARM64Reg::W14, + ARM64Reg::W13, + ARM64Reg::W12, + ARM64Reg::W11, + ARM64Reg::W10, + ARM64Reg::W9, + ARM64Reg::W8, + ARM64Reg::W7, + ARM64Reg::W6, + ARM64Reg::W5, + ARM64Reg::W4, + ARM64Reg::W3, + ARM64Reg::W2, + ARM64Reg::W1, + ARM64Reg::W0, + ARM64Reg::W30, }}; for (ARM64Reg reg : allocation_order) @@ -381,11 +381,10 @@ BitSet32 Arm64GPRCache::GetCallerSavedUsed() const void Arm64GPRCache::FlushByHost(ARM64Reg host_reg) { - host_reg = DecodeReg(host_reg); for (size_t i = 0; i < m_guest_registers.size(); ++i) { const OpArg& reg = m_guest_registers[i]; - if (reg.GetType() == RegType::Register && DecodeReg(reg.GetReg()) == host_reg) + if (reg.GetType() == RegType::Register && DecodeReg(reg.GetReg()) == DecodeReg(host_reg)) { FlushRegister(i, false); return; @@ -520,7 +519,7 @@ ARM64Reg Arm64FPRCache::R(size_t preg, RegType type) break; } // We've got an issue if we end up here - return INVALID_REG; + return ARM64Reg::INVALID_REG; } ARM64Reg Arm64FPRCache::RW(size_t preg, RegType type) @@ -589,40 +588,40 @@ void Arm64FPRCache::GetAllocationOrder() { static constexpr std::array allocation_order{{ // Callee saved - Q8, - Q9, - Q10, - Q11, - Q12, - Q13, - Q14, - Q15, + ARM64Reg::Q8, + ARM64Reg::Q9, + ARM64Reg::Q10, + ARM64Reg::Q11, + ARM64Reg::Q12, + ARM64Reg::Q13, + ARM64Reg::Q14, + ARM64Reg::Q15, // Caller saved - Q16, - Q17, - Q18, - Q19, - Q20, - Q21, - Q22, - Q23, - Q24, - Q25, - Q26, - Q27, - Q28, - Q29, - Q30, - Q31, - Q7, - Q6, - Q5, - Q4, - Q3, - Q2, - Q1, - Q0, + ARM64Reg::Q16, + ARM64Reg::Q17, + ARM64Reg::Q18, + ARM64Reg::Q19, + ARM64Reg::Q20, + ARM64Reg::Q21, + ARM64Reg::Q22, + ARM64Reg::Q23, + ARM64Reg::Q24, + ARM64Reg::Q25, + ARM64Reg::Q26, + ARM64Reg::Q27, + ARM64Reg::Q28, + ARM64Reg::Q29, + ARM64Reg::Q30, + ARM64Reg::Q31, + ARM64Reg::Q7, + ARM64Reg::Q6, + ARM64Reg::Q5, + ARM64Reg::Q4, + ARM64Reg::Q3, + ARM64Reg::Q2, + ARM64Reg::Q1, + ARM64Reg::Q0, }}; for (ARM64Reg reg : allocation_order) @@ -648,15 +647,15 @@ void Arm64FPRCache::FlushByHost(ARM64Reg host_reg) bool Arm64FPRCache::IsCalleeSaved(ARM64Reg reg) const { static constexpr std::array callee_regs{{ - Q8, - Q9, - Q10, - Q11, - Q12, - Q13, - Q14, - Q15, - INVALID_REG, + ARM64Reg::Q8, + ARM64Reg::Q9, + ARM64Reg::Q10, + ARM64Reg::Q11, + ARM64Reg::Q12, + ARM64Reg::Q13, + ARM64Reg::Q14, + ARM64Reg::Q15, + ARM64Reg::INVALID_REG, }}; return std::find(callee_regs.begin(), callee_regs.end(), reg) != callee_regs.end(); @@ -745,7 +744,7 @@ BitSet32 Arm64FPRCache::GetCallerSavedUsed() const for (const auto& it : m_host_registers) { if (it.IsLocked()) - registers[it.GetReg() - Q0] = true; + registers[DecodeReg(it.GetReg())] = true; } return registers; } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h index 9000b0d28a..aba4213a43 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h @@ -18,11 +18,11 @@ // Dedicated host registers // memory base register -constexpr Arm64Gen::ARM64Reg MEM_REG = Arm64Gen::X28; +constexpr Arm64Gen::ARM64Reg MEM_REG = Arm64Gen::ARM64Reg::X28; // ppcState pointer -constexpr Arm64Gen::ARM64Reg PPC_REG = Arm64Gen::X29; +constexpr Arm64Gen::ARM64Reg PPC_REG = Arm64Gen::ARM64Reg::X29; // PC register when calling the dispatcher -constexpr Arm64Gen::ARM64Reg DISPATCHER_PC = Arm64Gen::W26; +constexpr Arm64Gen::ARM64Reg DISPATCHER_PC = Arm64Gen::ARM64Reg::W26; #define PPCSTATE_OFF(elem) (offsetof(PowerPC::PowerPCState, elem)) @@ -84,13 +84,13 @@ public: m_type = RegType::Immediate; m_value = imm; - m_reg = Arm64Gen::INVALID_REG; + m_reg = Arm64Gen::ARM64Reg::INVALID_REG; } void Flush() { // Invalidate any previous information m_type = RegType::NotLoaded; - m_reg = Arm64Gen::INVALID_REG; + m_reg = Arm64Gen::ARM64Reg::INVALID_REG; // Arbitrarily large value that won't roll over on a lot of increments m_last_used = 0xFFFF; @@ -104,8 +104,8 @@ public: private: // For REG_REG - RegType m_type = RegType::NotLoaded; // store type - Arm64Gen::ARM64Reg m_reg = Arm64Gen::INVALID_REG; // host register we are in + RegType m_type = RegType::NotLoaded; // store type + Arm64Gen::ARM64Reg m_reg = Arm64Gen::ARM64Reg::INVALID_REG; // host register we are in // For REG_IMM u32 m_value = 0; // IMM value @@ -130,7 +130,7 @@ public: bool operator!=(Arm64Gen::ARM64Reg reg) const { return !operator==(reg); } private: - Arm64Gen::ARM64Reg m_reg = Arm64Gen::INVALID_REG; + Arm64Gen::ARM64Reg m_reg = Arm64Gen::ARM64Reg::INVALID_REG; bool m_locked = false; }; diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp index 3eacca7e72..074f4d3c42 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp @@ -17,7 +17,7 @@ using namespace Arm64Gen; FixupBranch JitArm64::JumpIfCRFieldBit(int field, int bit, bool jump_if_set) { ARM64Reg XA = gpr.CR(field); - ARM64Reg WA = DecodeReg(XA); + ARM64Reg WA = EncodeRegTo32(XA); switch (bit) { @@ -26,7 +26,7 @@ FixupBranch JitArm64::JumpIfCRFieldBit(int field, int bit, bool jump_if_set) case PowerPC::CR_EQ_BIT: // check bits 31-0 == 0 return jump_if_set ? CBZ(WA) : CBNZ(WA); case PowerPC::CR_GT_BIT: // check val > 0 - CMP(XA, SP); + CMP(XA, ARM64Reg::SP); return B(jump_if_set ? CC_GT : CC_LE); case PowerPC::CR_LT_BIT: // check bit 62 set return jump_if_set ? TBNZ(XA, 62) : TBZ(XA, 62); @@ -84,7 +84,7 @@ void JitArm64::mcrxr(UGeckoInstruction inst) ARM64Reg WA = gpr.GetReg(); ARM64Reg XA = EncodeRegTo64(WA); ARM64Reg XB = gpr.CR(inst.CRFD); - ARM64Reg WB = DecodeReg(XB); + ARM64Reg WB = EncodeRegTo32(XB); // Copy XER[0-3] into CR[inst.CRFD] LDRB(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(xer_ca)); @@ -99,8 +99,8 @@ void JitArm64::mcrxr(UGeckoInstruction inst) LDR(XB, XB, XA); // Clear XER[0-3] - STRB(IndexType::Unsigned, WZR, PPC_REG, PPCSTATE_OFF(xer_ca)); - STRB(IndexType::Unsigned, WZR, PPC_REG, PPCSTATE_OFF(xer_so_ov)); + STRB(IndexType::Unsigned, ARM64Reg::WZR, PPC_REG, PPCSTATE_OFF(xer_ca)); + STRB(IndexType::Unsigned, ARM64Reg::WZR, PPC_REG, PPCSTATE_OFF(xer_so_ov)); gpr.Unlock(WA); } @@ -278,7 +278,7 @@ void JitArm64::mfspr(UGeckoInstruction inst) SUB(Xresult, Xresult, XB); // a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67 - ORRI2R(XB, ZR, 0xAAAAAAAAAAAAAAAA); + ORRI2R(XB, ARM64Reg::ZR, 0xAAAAAAAAAAAAAAAA); ADD(XB, XB, 1); UMULH(Xresult, Xresult, XB); @@ -461,7 +461,7 @@ void JitArm64::crXXX(UGeckoInstruction inst) ARM64Reg WB = gpr.GetReg(); ARM64Reg XB = EncodeRegTo64(WB); ORR(XB, XA, 64 - 63, 0, true); // XA | 1<<63 - CMP(XA, ZR); + CMP(XA, ARM64Reg::ZR); CSEL(XA, XA, XB, CC_NEQ); gpr.Unlock(WB); } @@ -509,7 +509,7 @@ void JitArm64::crXXX(UGeckoInstruction inst) bool negate = i ? negateB : negateA; ARM64Reg XC = gpr.CR(field); - ARM64Reg WC = DecodeReg(XC); + ARM64Reg WC = EncodeRegTo32(XC); switch (bit) { case PowerPC::CR_SO_BIT: // check bit 61 set @@ -519,12 +519,12 @@ void JitArm64::crXXX(UGeckoInstruction inst) break; case PowerPC::CR_EQ_BIT: // check bits 31-0 == 0 - CMP(WC, WZR); + CMP(WC, ARM64Reg::WZR); CSET(out, negate ? CC_NEQ : CC_EQ); break; case PowerPC::CR_GT_BIT: // check val > 0 - CMP(XC, ZR); + CMP(XC, ARM64Reg::ZR); CSET(out, negate ? CC_LE : CC_GT); break; @@ -565,7 +565,7 @@ void JitArm64::crXXX(UGeckoInstruction inst) int bit = 3 - (inst.CRBD & 3); gpr.Unlock(WB); - WB = INVALID_REG; + WB = ARM64Reg::INVALID_REG; gpr.BindCRToRegister(field, true); XB = gpr.CR(field); @@ -577,7 +577,7 @@ void JitArm64::crXXX(UGeckoInstruction inst) ARM64Reg WC = gpr.GetReg(); ARM64Reg XC = EncodeRegTo64(WC); ORR(XC, XB, 64 - 63, 0, true); // XB | 1<<63 - CMP(XB, ZR); + CMP(XB, ARM64Reg::ZR); CSEL(XB, XB, XC, CC_NEQ); gpr.Unlock(WC); } @@ -623,7 +623,7 @@ void JitArm64::mfcr(UGeckoInstruction inst) for (int i = 0; i < 8; i++) { ARM64Reg CR = gpr.CR(i); - ARM64Reg WCR = DecodeReg(CR); + ARM64Reg WCR = EncodeRegTo32(CR); // SO if (i == 0) @@ -638,12 +638,12 @@ void JitArm64::mfcr(UGeckoInstruction inst) // EQ ORR(WC, WA, 32 - 1, 0); // WA | 1<<1 - CMP(WCR, WZR); + CMP(WCR, ARM64Reg::WZR); CSEL(WA, WC, WA, CC_EQ); // GT ORR(WC, WA, 32 - 2, 0); // WA | 1<<2 - CMP(CR, ZR); + CMP(CR, ARM64Reg::ZR); CSEL(WA, WC, WA, CC_GT); // LT @@ -672,7 +672,7 @@ void JitArm64::mtcrf(UGeckoInstruction inst) { gpr.BindCRToRegister(i, false); ARM64Reg CR = gpr.CR(i); - ARM64Reg WCR = DecodeReg(CR); + ARM64Reg WCR = EncodeRegTo32(CR); if (i != 7) LSR(WCR, RS, 28 - i * 4); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp index 7751d938d3..4bf6e627e8 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp @@ -28,27 +28,27 @@ void JitArm64::GenerateAsm() enter_code = GetCodePtr(); ABI_PushRegisters(regs_to_save); - m_float_emit.ABI_PushRegisters(regs_to_save_fpr, X30); + m_float_emit.ABI_PushRegisters(regs_to_save_fpr, ARM64Reg::X30); MOVP2R(PPC_REG, &PowerPC::ppcState); // Swap the stack pointer, so we have proper guard pages. - ADD(X0, SP, 0); - MOVP2R(X1, &m_saved_stack_pointer); - STR(IndexType::Unsigned, X0, X1, 0); - MOVP2R(X1, &m_stack_pointer); - LDR(IndexType::Unsigned, X0, X1, 0); - FixupBranch no_fake_stack = CBZ(X0); - ADD(SP, X0, 0); + ADD(ARM64Reg::X0, ARM64Reg::SP, 0); + MOVP2R(ARM64Reg::X1, &m_saved_stack_pointer); + STR(IndexType::Unsigned, ARM64Reg::X0, ARM64Reg::X1, 0); + MOVP2R(ARM64Reg::X1, &m_stack_pointer); + LDR(IndexType::Unsigned, ARM64Reg::X0, ARM64Reg::X1, 0); + FixupBranch no_fake_stack = CBZ(ARM64Reg::X0); + ADD(ARM64Reg::SP, ARM64Reg::X0, 0); SetJumpTarget(no_fake_stack); // Push {nullptr; -1} as invalid destination on the stack. - MOVI2R(X0, 0xFFFFFFFF); - STP(IndexType::Pre, ZR, X0, SP, -16); + MOVI2R(ARM64Reg::X0, 0xFFFFFFFF); + STP(IndexType::Pre, ARM64Reg::ZR, ARM64Reg::X0, ARM64Reg::SP, -16); // Store the stack pointer, so we can reset it if the BLR optimization fails. - ADD(X0, SP, 0); - STR(IndexType::Unsigned, X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer)); + ADD(ARM64Reg::X0, ARM64Reg::SP, 0); + STR(IndexType::Unsigned, ARM64Reg::X0, PPC_REG, PPCSTATE_OFF(stored_stack_pointer)); // The PC will be loaded into DISPATCHER_PC after the call to CoreTiming::Advance(). // Advance() does an exception check so we don't know what PC to use until afterwards. @@ -93,18 +93,18 @@ void JitArm64::GenerateAsm() SetJumpTarget(membaseend); // iCache[(address >> 2) & iCache_Mask]; - ARM64Reg pc_masked = W25; - ARM64Reg cache_base = X27; - ARM64Reg block = X30; - ORRI2R(pc_masked, WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3); + ARM64Reg pc_masked = ARM64Reg::W25; + ARM64Reg cache_base = ARM64Reg::X27; + ARM64Reg block = ARM64Reg::X30; + ORRI2R(pc_masked, ARM64Reg::WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3); AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1)); MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap()); LDR(block, cache_base, EncodeRegTo64(pc_masked)); FixupBranch not_found = CBZ(block); // b.effectiveAddress != addr || b.msrBits != msr - ARM64Reg pc_and_msr = W25; - ARM64Reg pc_and_msr2 = W24; + ARM64Reg pc_and_msr = ARM64Reg::W25; + ARM64Reg pc_and_msr2 = ARM64Reg::W24; LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, effectiveAddress)); CMP(pc_and_msr, DISPATCHER_PC); FixupBranch pc_missmatch = B(CC_NEQ); @@ -125,28 +125,28 @@ void JitArm64::GenerateAsm() // Call C version of Dispatch(). STR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc)); - MOVP2R(X8, reinterpret_cast(&JitBase::Dispatch)); - MOVP2R(X0, this); - BLR(X8); + MOVP2R(ARM64Reg::X8, reinterpret_cast(&JitBase::Dispatch)); + MOVP2R(ARM64Reg::X0, this); + BLR(ARM64Reg::X8); - FixupBranch no_block_available = CBZ(X0); + FixupBranch no_block_available = CBZ(ARM64Reg::X0); // set the mem_base based on MSR flags and jump to next block. LDR(IndexType::Unsigned, ARM64Reg::W28, PPC_REG, PPCSTATE_OFF(msr)); FixupBranch physmem = TBNZ(ARM64Reg::W28, 31 - 27); MOVP2R(MEM_REG, Memory::physical_base); - BR(X0); + BR(ARM64Reg::X0); SetJumpTarget(physmem); MOVP2R(MEM_REG, Memory::logical_base); - BR(X0); + BR(ARM64Reg::X0); // Call JIT SetJumpTarget(no_block_available); ResetStack(); - MOVP2R(X0, this); - MOV(W1, DISPATCHER_PC); - MOVP2R(X8, reinterpret_cast(&JitTrampoline)); - BLR(X8); + MOVP2R(ARM64Reg::X0, this); + MOV(ARM64Reg::W1, DISPATCHER_PC); + MOVP2R(ARM64Reg::X8, reinterpret_cast(&JitTrampoline)); + BLR(ARM64Reg::X8); LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc)); B(dispatcher_no_check); @@ -158,15 +158,15 @@ void JitArm64::GenerateAsm() // Check the state pointer to see if we are exiting // Gets checked on at the end of every slice - MOVP2R(X0, CPU::GetStatePtr()); - LDR(IndexType::Unsigned, W0, X0, 0); + MOVP2R(ARM64Reg::X0, CPU::GetStatePtr()); + LDR(IndexType::Unsigned, ARM64Reg::W0, ARM64Reg::X0, 0); - CMP(W0, 0); + CMP(ARM64Reg::W0, 0); FixupBranch Exit = B(CC_NEQ); SetJumpTarget(to_start_of_timing_slice); - MOVP2R(X8, &CoreTiming::Advance); - BLR(X8); + MOVP2R(ARM64Reg::X8, &CoreTiming::Advance); + BLR(ARM64Reg::X8); // Load the PC back into DISPATCHER_PC (the exception handler might have changed it) LDR(IndexType::Unsigned, DISPATCHER_PC, PPC_REG, PPCSTATE_OFF(pc)); @@ -177,13 +177,13 @@ void JitArm64::GenerateAsm() SetJumpTarget(Exit); // Reset the stack pointer, as the BLR optimization have touched it. - MOVP2R(X1, &m_saved_stack_pointer); - LDR(IndexType::Unsigned, X0, X1, 0); - ADD(SP, X0, 0); + MOVP2R(ARM64Reg::X1, &m_saved_stack_pointer); + LDR(IndexType::Unsigned, ARM64Reg::X0, ARM64Reg::X1, 0); + ADD(ARM64Reg::SP, ARM64Reg::X0, 0); - m_float_emit.ABI_PopRegisters(regs_to_save_fpr, X30); + m_float_emit.ABI_PopRegisters(regs_to_save_fpr, ARM64Reg::X30); ABI_PopRegisters(regs_to_save); - RET(X30); + RET(ARM64Reg::X30); JitRegister::Register(enter_code, GetCodePtr(), "JIT_Dispatcher"); @@ -201,8 +201,8 @@ void JitArm64::GenerateCommonAsm() // Q0 is the return for loads // is the register for stores // Q1 is a temporary - ARM64Reg addr_reg = X1; - ARM64Reg scale_reg = X0; + ARM64Reg addr_reg = ARM64Reg::X1; + ARM64Reg scale_reg = ARM64Reg::X0; ARM64FloatEmitter float_emit(this); const u8* start = GetCodePtr(); @@ -211,129 +211,129 @@ void JitArm64::GenerateCommonAsm() const u8* loadPairedFloatTwo = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LD1(32, 1, D0, addr_reg); - float_emit.REV32(8, D0, D0); - RET(X30); + float_emit.LD1(32, 1, ARM64Reg::D0, addr_reg); + float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); + RET(ARM64Reg::X30); } const u8* loadPairedU8Two = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.UXTL(8, D0, D0); - float_emit.UXTL(16, D0, D0); - float_emit.UCVTF(32, D0, D0); + float_emit.LDR(16, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.UXTL(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedS8Two = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.SXTL(8, D0, D0); - float_emit.SXTL(16, D0, D0); - float_emit.SCVTF(32, D0, D0); + float_emit.LDR(16, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.SXTL(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedU16Two = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LD1(16, 1, D0, addr_reg); - float_emit.REV16(8, D0, D0); - float_emit.UXTL(16, D0, D0); - float_emit.UCVTF(32, D0, D0); + float_emit.LD1(16, 1, ARM64Reg::D0, addr_reg); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedS16Two = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LD1(16, 1, D0, addr_reg); - float_emit.REV16(8, D0, D0); - float_emit.SXTL(16, D0, D0); - float_emit.SCVTF(32, D0, D0); + float_emit.LD1(16, 1, ARM64Reg::D0, addr_reg); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedFloatOne = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(32, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.REV32(8, D0, D0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); + RET(ARM64Reg::X30); } const u8* loadPairedU8One = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(8, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.UXTL(8, D0, D0); - float_emit.UXTL(16, D0, D0); - float_emit.UCVTF(32, D0, D0); + float_emit.LDR(8, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.UXTL(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedS8One = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(8, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.SXTL(8, D0, D0); - float_emit.SXTL(16, D0, D0); - float_emit.SCVTF(32, D0, D0); + float_emit.LDR(8, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.SXTL(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedU16One = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.REV16(8, D0, D0); - float_emit.UXTL(16, D0, D0); - float_emit.UCVTF(32, D0, D0); + float_emit.LDR(16, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } const u8* loadPairedS16One = GetCodePtr(); { ADD(addr_reg, addr_reg, MEM_REG); - float_emit.LDR(16, IndexType::Unsigned, D0, addr_reg, 0); - float_emit.REV16(8, D0, D0); - float_emit.SXTL(16, D0, D0); - float_emit.SCVTF(32, D0, D0); + float_emit.LDR(16, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SXTL(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SCVTF(32, ARM64Reg::D0, ARM64Reg::D0); MOVP2R(addr_reg, &m_dequantizeTableS); ADD(scale_reg, addr_reg, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); - RET(X30); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); + RET(ARM64Reg::X30); } JitRegister::Register(start, GetCodePtr(), "JIT_QuantizedLoad"); @@ -370,245 +370,245 @@ void JitArm64::GenerateCommonAsm() const u8* storePairedFloatSlow; { storePairedFloat = GetCodePtr(); - float_emit.REV32(8, D0, D0); + float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(64, Q0, 0, addr_reg, SP); - RET(X30); + float_emit.ST1(64, ARM64Reg::Q0, 0, addr_reg, ARM64Reg::SP); + RET(ARM64Reg::X30); storePairedFloatSlow = GetCodePtr(); - float_emit.UMOV(64, X0, Q0, 0); - ROR(X0, X0, 32); - MOVP2R(X2, &PowerPC::Write_U64); - BR(X2); + float_emit.UMOV(64, ARM64Reg::X0, ARM64Reg::Q0, 0); + ROR(ARM64Reg::X0, ARM64Reg::X0, 32); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U64); + BR(ARM64Reg::X2); } const u8* storePairedU8; const u8* storePairedU8Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); - float_emit.FCVTZU(32, D0, D0); - float_emit.UQXTN(16, D0, D0); - float_emit.UQXTN(8, D0, D0); + float_emit.FCVTZU(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UQXTN(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UQXTN(8, ARM64Reg::D0, ARM64Reg::D0); }; storePairedU8 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(16, Q0, 0, addr_reg, SP); - RET(X30); + float_emit.ST1(16, ARM64Reg::Q0, 0, addr_reg, ARM64Reg::SP); + RET(ARM64Reg::X30); storePairedU8Slow = GetCodePtr(); emit_quantize(); - float_emit.UMOV(16, W0, Q0, 0); - REV16(W0, W0); - MOVP2R(X2, &PowerPC::Write_U16); - BR(X2); + float_emit.UMOV(16, ARM64Reg::W0, ARM64Reg::Q0, 0); + REV16(ARM64Reg::W0, ARM64Reg::W0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U16); + BR(ARM64Reg::X2); } const u8* storePairedS8; const u8* storePairedS8Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); - float_emit.FCVTZS(32, D0, D0); - float_emit.SQXTN(16, D0, D0); - float_emit.SQXTN(8, D0, D0); + float_emit.FCVTZS(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SQXTN(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SQXTN(8, ARM64Reg::D0, ARM64Reg::D0); }; storePairedS8 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(16, Q0, 0, addr_reg, SP); - RET(X30); + float_emit.ST1(16, ARM64Reg::Q0, 0, addr_reg, ARM64Reg::SP); + RET(ARM64Reg::X30); storePairedS8Slow = GetCodePtr(); emit_quantize(); - float_emit.UMOV(16, W0, Q0, 0); - REV16(W0, W0); - MOVP2R(X2, &PowerPC::Write_U16); - BR(X2); + float_emit.UMOV(16, ARM64Reg::W0, ARM64Reg::Q0, 0); + REV16(ARM64Reg::W0, ARM64Reg::W0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U16); + BR(ARM64Reg::X2); } const u8* storePairedU16; const u8* storePairedU16Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); - float_emit.FCVTZU(32, D0, D0); - float_emit.UQXTN(16, D0, D0); - float_emit.REV16(8, D0, D0); + float_emit.FCVTZU(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UQXTN(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); }; storePairedU16 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(32, Q0, 0, addr_reg, SP); - RET(X30); + float_emit.ST1(32, ARM64Reg::Q0, 0, addr_reg, ARM64Reg::SP); + RET(ARM64Reg::X30); storePairedU16Slow = GetCodePtr(); emit_quantize(); - float_emit.REV32(8, D0, D0); - float_emit.UMOV(32, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U32); - BR(X2); + float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UMOV(32, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U32); + BR(ARM64Reg::X2); } const u8* storePairedS16; // Used by Viewtiful Joe's intro movie const u8* storePairedS16Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1, 0); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1, 0); - float_emit.FCVTZS(32, D0, D0); - float_emit.SQXTN(16, D0, D0); - float_emit.REV16(8, D0, D0); + float_emit.FCVTZS(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SQXTN(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); }; storePairedS16 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(32, Q0, 0, addr_reg, SP); - RET(X30); + float_emit.ST1(32, ARM64Reg::Q0, 0, addr_reg, ARM64Reg::SP); + RET(ARM64Reg::X30); storePairedS16Slow = GetCodePtr(); emit_quantize(); - float_emit.REV32(8, D0, D0); - float_emit.UMOV(32, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U32); - BR(X2); + float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UMOV(32, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U32); + BR(ARM64Reg::X2); } const u8* storeSingleFloat; const u8* storeSingleFloatSlow; { storeSingleFloat = GetCodePtr(); - float_emit.REV32(8, D0, D0); + float_emit.REV32(8, ARM64Reg::D0, ARM64Reg::D0); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.STR(32, IndexType::Unsigned, D0, addr_reg, 0); - RET(X30); + float_emit.STR(32, IndexType::Unsigned, ARM64Reg::D0, addr_reg, 0); + RET(ARM64Reg::X30); storeSingleFloatSlow = GetCodePtr(); - float_emit.UMOV(32, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U32); - BR(X2); + float_emit.UMOV(32, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U32); + BR(ARM64Reg::X2); } const u8* storeSingleU8; // Used by MKWii const u8* storeSingleU8Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1); - float_emit.FCVTZU(32, D0, D0); - float_emit.UQXTN(16, D0, D0); - float_emit.UQXTN(8, D0, D0); + float_emit.FCVTZU(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UQXTN(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UQXTN(8, ARM64Reg::D0, ARM64Reg::D0); }; storeSingleU8 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(8, Q0, 0, addr_reg); - RET(X30); + float_emit.ST1(8, ARM64Reg::Q0, 0, addr_reg); + RET(ARM64Reg::X30); storeSingleU8Slow = GetCodePtr(); emit_quantize(); - float_emit.UMOV(8, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U8); - BR(X2); + float_emit.UMOV(8, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U8); + BR(ARM64Reg::X2); } const u8* storeSingleS8; const u8* storeSingleS8Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1); - float_emit.FCVTZS(32, D0, D0); - float_emit.SQXTN(16, D0, D0); - float_emit.SQXTN(8, D0, D0); + float_emit.FCVTZS(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SQXTN(16, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SQXTN(8, ARM64Reg::D0, ARM64Reg::D0); }; storeSingleS8 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.ST1(8, Q0, 0, addr_reg); - RET(X30); + float_emit.ST1(8, ARM64Reg::Q0, 0, addr_reg); + RET(ARM64Reg::X30); storeSingleS8Slow = GetCodePtr(); emit_quantize(); - float_emit.SMOV(8, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U8); - BR(X2); + float_emit.SMOV(8, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U8); + BR(ARM64Reg::X2); } const u8* storeSingleU16; // Used by MKWii const u8* storeSingleU16Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1); - float_emit.FCVTZU(32, D0, D0); - float_emit.UQXTN(16, D0, D0); + float_emit.FCVTZU(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.UQXTN(16, ARM64Reg::D0, ARM64Reg::D0); }; storeSingleU16 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.REV16(8, D0, D0); - float_emit.ST1(16, Q0, 0, addr_reg); - RET(X30); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.ST1(16, ARM64Reg::Q0, 0, addr_reg); + RET(ARM64Reg::X30); storeSingleU16Slow = GetCodePtr(); emit_quantize(); - float_emit.UMOV(16, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U16); - BR(X2); + float_emit.UMOV(16, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U16); + BR(ARM64Reg::X2); } const u8* storeSingleS16; const u8* storeSingleS16Slow; { auto emit_quantize = [this, &float_emit, scale_reg]() { - MOVP2R(X2, &m_quantizeTableS); - ADD(scale_reg, X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); - float_emit.LDR(32, IndexType::Unsigned, D1, scale_reg, 0); - float_emit.FMUL(32, D0, D0, D1); + MOVP2R(ARM64Reg::X2, &m_quantizeTableS); + ADD(scale_reg, ARM64Reg::X2, scale_reg, ArithOption(scale_reg, ShiftType::LSL, 3)); + float_emit.LDR(32, IndexType::Unsigned, ARM64Reg::D1, scale_reg, 0); + float_emit.FMUL(32, ARM64Reg::D0, ARM64Reg::D0, ARM64Reg::D1); - float_emit.FCVTZS(32, D0, D0); - float_emit.SQXTN(16, D0, D0); + float_emit.FCVTZS(32, ARM64Reg::D0, ARM64Reg::D0); + float_emit.SQXTN(16, ARM64Reg::D0, ARM64Reg::D0); }; storeSingleS16 = GetCodePtr(); emit_quantize(); ADD(addr_reg, addr_reg, MEM_REG); - float_emit.REV16(8, D0, D0); - float_emit.ST1(16, Q0, 0, addr_reg); - RET(X30); + float_emit.REV16(8, ARM64Reg::D0, ARM64Reg::D0); + float_emit.ST1(16, ARM64Reg::Q0, 0, addr_reg); + RET(ARM64Reg::X30); storeSingleS16Slow = GetCodePtr(); emit_quantize(); - float_emit.SMOV(16, W0, Q0, 0); - MOVP2R(X2, &PowerPC::Write_U16); - BR(X2); + float_emit.SMOV(16, ARM64Reg::W0, ARM64Reg::Q0, 0); + MOVP2R(ARM64Reg::X2, &PowerPC::Write_U16); + BR(ARM64Reg::X2); } JitRegister::Register(start, GetCodePtr(), "JIT_QuantizedStore"); diff --git a/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp b/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp index 7fd18e3276..f04d87537c 100644 --- a/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp @@ -39,13 +39,13 @@ private: switch (sbits) { case 8: - m_emit->STRB(IndexType::Unsigned, reg, X0, 0); + m_emit->STRB(IndexType::Unsigned, reg, ARM64Reg::X0, 0); break; case 16: - m_emit->STRH(IndexType::Unsigned, reg, X0, 0); + m_emit->STRH(IndexType::Unsigned, reg, ARM64Reg::X0, 0); break; case 32: - m_emit->STR(IndexType::Unsigned, reg, X0, 0); + m_emit->STR(IndexType::Unsigned, reg, ARM64Reg::X0, 0); break; default: ASSERT_MSG(DYNA_REC, false, "Unknown size %d passed to MMIOWriteCodeGenerator!", sbits); @@ -55,7 +55,7 @@ private: void WriteRegToAddr(int sbits, const void* ptr, u32 mask) { - m_emit->MOVP2R(X0, ptr); + m_emit->MOVP2R(ARM64Reg::X0, ptr); // If we do not need to mask, we can do the sign extend while loading // from memory. If masking is required, we have to first zero extend, @@ -67,8 +67,8 @@ private: } else { - m_emit->ANDI2R(W1, m_src_reg, mask, W1); - StoreFromRegister(sbits, W1); + m_emit->ANDI2R(ARM64Reg::W1, m_src_reg, mask, ARM64Reg::W1); + StoreFromRegister(sbits, ARM64Reg::W1); } } @@ -77,11 +77,11 @@ private: ARM64FloatEmitter float_emit(m_emit); m_emit->ABI_PushRegisters(m_gprs_in_use); - float_emit.ABI_PushRegisters(m_fprs_in_use, X1); - m_emit->MOVI2R(W1, m_address); - m_emit->MOV(W2, m_src_reg); + float_emit.ABI_PushRegisters(m_fprs_in_use, ARM64Reg::X1); + m_emit->MOVI2R(ARM64Reg::W1, m_address); + m_emit->MOV(ARM64Reg::W2, m_src_reg); m_emit->BLR(m_emit->ABI_SetupLambda(lambda)); - float_emit.ABI_PopRegisters(m_fprs_in_use, X1); + float_emit.ABI_PopRegisters(m_fprs_in_use, ARM64Reg::X1); m_emit->ABI_PopRegisters(m_gprs_in_use); } @@ -127,18 +127,18 @@ private: { case 8: if (m_sign_extend && !dont_extend) - m_emit->LDRSB(IndexType::Unsigned, m_dst_reg, X0, 0); + m_emit->LDRSB(IndexType::Unsigned, m_dst_reg, ARM64Reg::X0, 0); else - m_emit->LDRB(IndexType::Unsigned, m_dst_reg, X0, 0); + m_emit->LDRB(IndexType::Unsigned, m_dst_reg, ARM64Reg::X0, 0); break; case 16: if (m_sign_extend && !dont_extend) - m_emit->LDRSH(IndexType::Unsigned, m_dst_reg, X0, 0); + m_emit->LDRSH(IndexType::Unsigned, m_dst_reg, ARM64Reg::X0, 0); else - m_emit->LDRH(IndexType::Unsigned, m_dst_reg, X0, 0); + m_emit->LDRH(IndexType::Unsigned, m_dst_reg, ARM64Reg::X0, 0); break; case 32: - m_emit->LDR(IndexType::Unsigned, m_dst_reg, X0, 0); + m_emit->LDR(IndexType::Unsigned, m_dst_reg, ARM64Reg::X0, 0); break; default: ASSERT_MSG(DYNA_REC, false, "Unknown size %d passed to MMIOReadCodeGenerator!", sbits); @@ -148,7 +148,7 @@ private: void LoadAddrMaskToReg(int sbits, const void* ptr, u32 mask) { - m_emit->MOVP2R(X0, ptr); + m_emit->MOVP2R(ARM64Reg::X0, ptr); // If we do not need to mask, we can do the sign extend while loading // from memory. If masking is required, we have to first zero extend, @@ -161,7 +161,7 @@ private: else { LoadToRegister(sbits, true); - m_emit->ANDI2R(m_dst_reg, m_dst_reg, mask, W0); + m_emit->ANDI2R(m_dst_reg, m_dst_reg, mask, ARM64Reg::W0); if (m_sign_extend) m_emit->SBFM(m_dst_reg, m_dst_reg, 0, sbits - 1); } @@ -172,16 +172,16 @@ private: ARM64FloatEmitter float_emit(m_emit); m_emit->ABI_PushRegisters(m_gprs_in_use); - float_emit.ABI_PushRegisters(m_fprs_in_use, X1); - m_emit->MOVI2R(W1, m_address); + float_emit.ABI_PushRegisters(m_fprs_in_use, ARM64Reg::X1); + m_emit->MOVI2R(ARM64Reg::W1, m_address); m_emit->BLR(m_emit->ABI_SetupLambda(lambda)); - float_emit.ABI_PopRegisters(m_fprs_in_use, X1); + float_emit.ABI_PopRegisters(m_fprs_in_use, ARM64Reg::X1); m_emit->ABI_PopRegisters(m_gprs_in_use); if (m_sign_extend) - m_emit->SBFM(m_dst_reg, W0, 0, sbits - 1); + m_emit->SBFM(m_dst_reg, ARM64Reg::W0, 0, sbits - 1); else - m_emit->UBFM(m_dst_reg, W0, 0, sbits - 1); + m_emit->UBFM(m_dst_reg, ARM64Reg::W0, 0, sbits - 1); } ARM64XEmitter* m_emit; diff --git a/Source/Core/VideoCommon/VertexLoaderARM64.cpp b/Source/Core/VideoCommon/VertexLoaderARM64.cpp index f56314f6c1..6e81f7c19e 100644 --- a/Source/Core/VideoCommon/VertexLoaderARM64.cpp +++ b/Source/Core/VideoCommon/VertexLoaderARM64.cpp @@ -12,18 +12,18 @@ using namespace Arm64Gen; -constexpr ARM64Reg src_reg = X0; -constexpr ARM64Reg dst_reg = X1; -constexpr ARM64Reg count_reg = W2; -constexpr ARM64Reg skipped_reg = W17; -constexpr ARM64Reg scratch1_reg = W16; -constexpr ARM64Reg scratch2_reg = W15; -constexpr ARM64Reg scratch3_reg = W14; -constexpr ARM64Reg saved_count = W12; +constexpr ARM64Reg src_reg = ARM64Reg::X0; +constexpr ARM64Reg dst_reg = ARM64Reg::X1; +constexpr ARM64Reg count_reg = ARM64Reg::W2; +constexpr ARM64Reg skipped_reg = ARM64Reg::W17; +constexpr ARM64Reg scratch1_reg = ARM64Reg::W16; +constexpr ARM64Reg scratch2_reg = ARM64Reg::W15; +constexpr ARM64Reg scratch3_reg = ARM64Reg::W14; +constexpr ARM64Reg saved_count = ARM64Reg::W12; -constexpr ARM64Reg stride_reg = X11; -constexpr ARM64Reg arraybase_reg = X10; -constexpr ARM64Reg scale_reg = X9; +constexpr ARM64Reg stride_reg = ARM64Reg::X11; +constexpr ARM64Reg arraybase_reg = ARM64Reg::X10; +constexpr ARM64Reg scale_reg = ARM64Reg::X9; alignas(16) static const float scale_factors[] = { 1.0 / (1ULL << 0), 1.0 / (1ULL << 1), 1.0 / (1ULL << 2), 1.0 / (1ULL << 3), @@ -115,8 +115,8 @@ int VertexLoaderARM64::ReadVertex(VertexComponentFormat attribute, ComponentForm int count_in, int count_out, bool dequantize, u8 scaling_exponent, AttributeFormat* native_format, s32 offset) { - ARM64Reg coords = count_in == 3 ? Q31 : D31; - ARM64Reg scale = count_in == 3 ? Q30 : D30; + ARM64Reg coords = count_in == 3 ? ARM64Reg::Q31 : ARM64Reg::D31; + ARM64Reg scale = count_in == 3 ? ARM64Reg::Q30 : ARM64Reg::D30; int elem_size = GetElementSize(format); int load_bytes = elem_size * count_in; @@ -253,13 +253,13 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f // B AND(scratch2_reg, scratch3_reg, 32, 4); - ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 3)); + ORR(scratch2_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 3)); ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 5)); - ORR(scratch1_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 16)); + ORR(scratch1_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 16)); // G UBFM(scratch2_reg, scratch3_reg, 5, 10); - ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); + ORR(scratch2_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6)); ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 8)); @@ -328,19 +328,19 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f // A UBFM(scratch2_reg, scratch3_reg, 0, 5); - ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); + ORR(scratch2_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6)); - ORR(scratch1_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 24)); + ORR(scratch1_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 24)); // B UBFM(scratch2_reg, scratch3_reg, 6, 11); - ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); + ORR(scratch2_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6)); ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 16)); // G UBFM(scratch2_reg, scratch3_reg, 12, 17); - ORR(scratch2_reg, WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); + ORR(scratch2_reg, ARM64Reg::WSP, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 2)); ORR(scratch2_reg, scratch2_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 6)); ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSL, 8)); @@ -388,7 +388,7 @@ void VertexLoaderARM64::GenerateVertexLoader() AlignCode16(); if (IsIndexed(m_VtxDesc.low.Position)) - MOV(skipped_reg, WZR); + MOV(skipped_reg, ARM64Reg::WZR); MOV(saved_count, count_reg); MOVP2R(stride_reg, g_main_cp_state.array_strides); @@ -544,11 +544,11 @@ void VertexLoaderARM64::GenerateVertexLoader() m_native_vtx_decl.texcoords[i].integer = false; LDRB(IndexType::Unsigned, scratch2_reg, src_reg, texmatidx_ofs[i]); - m_float_emit.UCVTF(S31, scratch2_reg); + m_float_emit.UCVTF(ARM64Reg::S31, scratch2_reg); if (m_VtxDesc.high.TexCoord[i] != VertexComponentFormat::NotPresent) { - m_float_emit.STR(32, IndexType::Unsigned, D31, dst_reg, m_dst_ofs); + m_float_emit.STR(32, IndexType::Unsigned, ARM64Reg::D31, dst_reg, m_dst_ofs); m_dst_ofs += sizeof(float); } else @@ -557,21 +557,21 @@ void VertexLoaderARM64::GenerateVertexLoader() if (m_dst_ofs < 256) { - STUR(SP, dst_reg, m_dst_ofs); + STUR(ARM64Reg::SP, dst_reg, m_dst_ofs); } else if (!(m_dst_ofs & 7)) { // If m_dst_ofs isn't 8byte aligned we can't store an 8byte zero register // So store two 4byte zero registers // The destination is always 4byte aligned - STR(IndexType::Unsigned, WSP, dst_reg, m_dst_ofs); - STR(IndexType::Unsigned, WSP, dst_reg, m_dst_ofs + 4); + STR(IndexType::Unsigned, ARM64Reg::WSP, dst_reg, m_dst_ofs); + STR(IndexType::Unsigned, ARM64Reg::WSP, dst_reg, m_dst_ofs + 4); } else { - STR(IndexType::Unsigned, SP, dst_reg, m_dst_ofs); + STR(IndexType::Unsigned, ARM64Reg::SP, dst_reg, m_dst_ofs); } - m_float_emit.STR(32, IndexType::Unsigned, D31, dst_reg, m_dst_ofs + 8); + m_float_emit.STR(32, IndexType::Unsigned, ARM64Reg::D31, dst_reg, m_dst_ofs + 8); m_dst_ofs += sizeof(float) * 3; } @@ -588,8 +588,8 @@ void VertexLoaderARM64::GenerateVertexLoader() if (IsIndexed(m_VtxDesc.low.Position)) { - SUB(W0, saved_count, skipped_reg); - RET(X30); + SUB(ARM64Reg::W0, saved_count, skipped_reg); + RET(ARM64Reg::X30); SetJumpTarget(m_skip_vertex); ADD(skipped_reg, skipped_reg, 1); @@ -597,8 +597,8 @@ void VertexLoaderARM64::GenerateVertexLoader() } else { - MOV(W0, saved_count); - RET(X30); + MOV(ARM64Reg::W0, saved_count); + RET(ARM64Reg::X30); } FlushIcache(); diff --git a/Source/UnitTests/Core/PowerPC/JitArm64/MovI2R.cpp b/Source/UnitTests/Core/PowerPC/JitArm64/MovI2R.cpp index c21e47c03d..8313a7be79 100644 --- a/Source/UnitTests/Core/PowerPC/JitArm64/MovI2R.cpp +++ b/Source/UnitTests/Core/PowerPC/JitArm64/MovI2R.cpp @@ -26,7 +26,7 @@ public: ResetCodePtr(); const u8* fn = GetCodePtr(); - MOVI2R(W0, value); + MOVI2R(ARM64Reg::W0, value); RET(); FlushIcacheSection(const_cast(fn), const_cast(GetCodePtr())); @@ -40,7 +40,7 @@ public: ResetCodePtr(); const u8* fn = GetCodePtr(); - MOVI2R(X0, value); + MOVI2R(ARM64Reg::X0, value); RET(); FlushIcacheSection(const_cast(fn), const_cast(GetCodePtr()));