From ab024b218e956f478c91c994628ec7153bfe79cf Mon Sep 17 00:00:00 2001 From: JosJuice Date: Tue, 6 Jul 2021 16:22:23 +0200 Subject: [PATCH 1/4] JitArm64: Accept LogicalImm struct as bitwise inst parameter --- Source/Core/Common/Arm64Emitter.cpp | 72 ++++++++++++++++++----------- Source/Core/Common/Arm64Emitter.h | 27 +++++++++-- 2 files changed, 69 insertions(+), 30 deletions(-) diff --git a/Source/Core/Common/Arm64Emitter.cpp b/Source/Core/Common/Arm64Emitter.cpp index 141336e212..44f229f3ab 100644 --- a/Source/Core/Common/Arm64Emitter.cpp +++ b/Source/Core/Common/Arm64Emitter.cpp @@ -46,7 +46,7 @@ std::optional> IsImmArithmetic(uint64_t input) } // For AND/TST/ORR/EOR etc -std::optional> IsImmLogical(u64 value, u32 width) +LogicalImm IsImmLogical(u64 value, u32 width) { bool negate = false; @@ -154,7 +154,7 @@ std::optional> IsImmLogical(u64 value, u32 width) // The input was zero (or all 1 bits, which will come to here too after we // inverted it at the start of the function), for which we just return // false. - return std::nullopt; + return LogicalImm(); } else { @@ -171,12 +171,12 @@ std::optional> IsImmLogical(u64 value, u32 width) // If the repeat period d is not a power of two, it can't be encoded. if (!MathUtil::IsPow2(d)) - return std::nullopt; + return LogicalImm(); // If the bit stretch (b - a) does not fit within the mask derived from the // repeat period, then fail. if (((b - a) & ~mask) != 0) - return std::nullopt; + return LogicalImm(); // The only possible option is b - a repeated every d bits. Now we're going to // actually construct the valid logical immediate derived from that @@ -204,7 +204,7 @@ std::optional> IsImmLogical(u64 value, u32 width) // The candidate pattern doesn't match our input value, so fail. if (value != candidate) - return std::nullopt; + return LogicalImm(); // We have a match! This is a valid logical immediate, so now we have to // construct the bits and pieces of the instruction encoding that generates @@ -246,11 +246,8 @@ std::optional> IsImmLogical(u64 value, u32 width) // 11110s 2 UInt(s) // // So we 'or' (-d << 1) with our computed s to form imms. - return std::tuple{ - static_cast(out_n), - static_cast(((-d << 1) | (s - 1)) & 0x3f), - static_cast(r), - }; + return LogicalImm(static_cast(r), static_cast(((-d << 1) | (s - 1)) & 0x3f), + static_cast(out_n)); } float FPImm8ToFloat(u8 bits) @@ -780,10 +777,18 @@ void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 i // Use Rn to determine bitness here. bool b64Bit = Is64Bit(Rn); + ASSERT_MSG(DYNAREC, b64Bit || !n, "64-bit logical immediate does not fit in 32-bit register"); + Write32((b64Bit << 31) | (op << 29) | (0x24 << 23) | (n << 22) | (immr << 16) | (imms << 10) | (DecodeReg(Rn) << 5) | DecodeReg(Rd)); } +void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm) +{ + ASSERT_MSG(DYNAREC, imm.valid, "Invalid logical immediate"); + EncodeLogicalImmInst(op, Rd, Rn, imm.r, imm.s, imm.n); +} + void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm) { @@ -1545,22 +1550,42 @@ void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool inver { EncodeLogicalImmInst(0, Rd, Rn, immr, imms, invert); } +void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm) +{ + EncodeLogicalImmInst(0, Rd, Rn, imm); +} void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) { EncodeLogicalImmInst(3, Rd, Rn, immr, imms, invert); } +void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm) +{ + EncodeLogicalImmInst(3, Rd, Rn, imm); +} void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) { EncodeLogicalImmInst(2, Rd, Rn, immr, imms, invert); } +void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm) +{ + EncodeLogicalImmInst(2, Rd, Rn, imm); +} void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) { EncodeLogicalImmInst(1, Rd, Rn, immr, imms, invert); } +void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm) +{ + EncodeLogicalImmInst(1, Rd, Rn, imm); +} void ARM64XEmitter::TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert) { EncodeLogicalImmInst(3, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, immr, imms, invert); } +void ARM64XEmitter::TST(ARM64Reg Rn, LogicalImm imm) +{ + EncodeLogicalImmInst(3, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm); +} // Add/subtract (immediate) void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift) @@ -4129,8 +4154,7 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - AND(Rd, Rn, imm_r, imm_s, n != 0); + AND(Rd, Rn, result); } else { @@ -4146,8 +4170,7 @@ void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) { if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - ORR(Rd, Rn, imm_r, imm_s, n != 0); + ORR(Rd, Rn, result); } else { @@ -4163,8 +4186,7 @@ void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) { if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - EOR(Rd, Rn, imm_r, imm_s, n != 0); + EOR(Rd, Rn, result); } else { @@ -4180,8 +4202,7 @@ void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) { if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - ANDS(Rd, Rn, imm_r, imm_s, n != 0); + ANDS(Rd, Rn, result); } else { @@ -4342,10 +4363,9 @@ bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u64 imm) bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rd) ? 64 : 32)) + if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - AND(Rd, Rn, imm_r, imm_s, n != 0); + AND(Rd, Rn, result); return true; } @@ -4354,10 +4374,9 @@ bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rd) ? 64 : 32)) + if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - ORR(Rd, Rn, imm_r, imm_s, n != 0); + ORR(Rd, Rn, result); return true; } @@ -4366,10 +4385,9 @@ bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rd) ? 64 : 32)) + if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) { - const auto& [n, imm_s, imm_r] = *result; - EOR(Rd, Rn, imm_r, imm_s, n != 0); + EOR(Rd, Rn, result); return true; } diff --git a/Source/Core/Common/Arm64Emitter.h b/Source/Core/Common/Arm64Emitter.h index 5702bebbd9..f62b62e3ef 100644 --- a/Source/Core/Common/Arm64Emitter.h +++ b/Source/Core/Common/Arm64Emitter.h @@ -496,6 +496,19 @@ public: bool IsExtended() const { return m_type == TypeSpecifier::ExtendedReg; } }; +struct LogicalImm +{ + constexpr LogicalImm() : r(0), s(0), n(false), valid(false) {} + constexpr LogicalImm(u8 r_, u8 s_, bool n_) : r(r_), s(s_), n(n_), valid(true) {} + + constexpr operator bool() const { return valid; } + + u8 r; + u8 s; + bool n; + bool valid; +}; + class ARM64XEmitter { friend class ARM64FloatEmitter; @@ -531,6 +544,7 @@ private: void EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); void EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd); void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, int n); + void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm); void EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); void EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm); @@ -772,10 +786,15 @@ public: // Logical (immediate) void AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void AND(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm); void ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void ANDS(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm); void EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void EOR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm); void ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void ORR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm); void TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void TST(ARM64Reg Rn, LogicalImm imm); // Add/subtract (immediate) void ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false); void ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false); @@ -893,8 +912,10 @@ public: MOVI2R(Rd, (uintptr_t)ptr); } - // Wrapper around AND x, y, imm etc. If you are sure the imm will work, no need to pass a scratch - // register. + // Wrapper around AND x, y, imm etc. + // If you are sure the imm will work, no need to pass a scratch register. + // If the imm is constant, preferably call EncodeLogicalImm directly instead of using these + // functions, as this lets the computation of the imm encoding be performed during compilation. void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG) @@ -903,7 +924,6 @@ public: } void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); - void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, ARM64Reg scratch); @@ -911,6 +931,7 @@ public: void ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm); bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm); From 10861ed8ce4f60f76ec3508f18c02083dfd226e4 Mon Sep 17 00:00:00 2001 From: JosJuice Date: Tue, 6 Jul 2021 15:57:03 +0200 Subject: [PATCH 2/4] JitArm64: Turn IsImmLogical into a constexpr constructor --- Source/Core/Common/Arm64Emitter.cpp | 228 ++-------------------------- Source/Core/Common/Arm64Emitter.h | 221 ++++++++++++++++++++++++++- Source/Core/Common/BitUtils.h | 9 ++ 3 files changed, 234 insertions(+), 224 deletions(-) diff --git a/Source/Core/Common/Arm64Emitter.cpp b/Source/Core/Common/Arm64Emitter.cpp index 44f229f3ab..2e7cd2fd4d 100644 --- a/Source/Core/Common/Arm64Emitter.cpp +++ b/Source/Core/Common/Arm64Emitter.cpp @@ -28,11 +28,6 @@ namespace Arm64Gen { namespace { -uint64_t LargestPowerOf2Divisor(uint64_t value) -{ - return value & -(int64_t)value; -} - // For ADD/SUB std::optional> IsImmArithmetic(uint64_t input) { @@ -45,211 +40,6 @@ std::optional> IsImmArithmetic(uint64_t input) return std::nullopt; } -// For AND/TST/ORR/EOR etc -LogicalImm IsImmLogical(u64 value, u32 width) -{ - bool negate = false; - - // Logical immediates are encoded using parameters n, imm_s and imm_r using - // the following table: - // - // N imms immr size S R - // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) - // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) - // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) - // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) - // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) - // 0 11110s xxxxxr 2 UInt(s) UInt(r) - // (s bits must not be all set) - // - // A pattern is constructed of size bits, where the least significant S+1 bits - // are set. The pattern is rotated right by R, and repeated across a 32 or - // 64-bit value, depending on destination register width. - // - // Put another way: the basic format of a logical immediate is a single - // contiguous stretch of 1 bits, repeated across the whole word at intervals - // given by a power of 2. To identify them quickly, we first locate the - // lowest stretch of 1 bits, then the next 1 bit above that; that combination - // is different for every logical immediate, so it gives us all the - // information we need to identify the only logical immediate that our input - // could be, and then we simply check if that's the value we actually have. - // - // (The rotation parameter does give the possibility of the stretch of 1 bits - // going 'round the end' of the word. To deal with that, we observe that in - // any situation where that happens the bitwise NOT of the value is also a - // valid logical immediate. So we simply invert the input whenever its low bit - // is set, and then we know that the rotated case can't arise.) - - if (value & 1) - { - // If the low bit is 1, negate the value, and set a flag to remember that we - // did (so that we can adjust the return values appropriately). - negate = true; - value = ~value; - } - - constexpr int kWRegSizeInBits = 32; - - if (width == kWRegSizeInBits) - { - // To handle 32-bit logical immediates, the very easiest thing is to repeat - // the input value twice to make a 64-bit word. The correct encoding of that - // as a logical immediate will also be the correct encoding of the 32-bit - // value. - - // The most-significant 32 bits may not be zero (ie. negate is true) so - // shift the value left before duplicating it. - value <<= kWRegSizeInBits; - value |= value >> kWRegSizeInBits; - } - - // The basic analysis idea: imagine our input word looks like this. - // - // 0011111000111110001111100011111000111110001111100011111000111110 - // c b a - // |<--d-->| - // - // We find the lowest set bit (as an actual power-of-2 value, not its index) - // and call it a. Then we add a to our original number, which wipes out the - // bottommost stretch of set bits and replaces it with a 1 carried into the - // next zero bit. Then we look for the new lowest set bit, which is in - // position b, and subtract it, so now our number is just like the original - // but with the lowest stretch of set bits completely gone. Now we find the - // lowest set bit again, which is position c in the diagram above. Then we'll - // measure the distance d between bit positions a and c (using CLZ), and that - // tells us that the only valid logical immediate that could possibly be equal - // to this number is the one in which a stretch of bits running from a to just - // below b is replicated every d bits. - uint64_t a = LargestPowerOf2Divisor(value); - uint64_t value_plus_a = value + a; - uint64_t b = LargestPowerOf2Divisor(value_plus_a); - uint64_t value_plus_a_minus_b = value_plus_a - b; - uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b); - - int d, clz_a, out_n; - uint64_t mask; - - if (c != 0) - { - // The general case, in which there is more than one stretch of set bits. - // Compute the repeat distance d, and set up a bitmask covering the basic - // unit of repetition (i.e. a word with the bottom d bits set). Also, in all - // of these cases the N bit of the output will be zero. - clz_a = Common::CountLeadingZeros(a); - int clz_c = Common::CountLeadingZeros(c); - d = clz_a - clz_c; - mask = ((UINT64_C(1) << d) - 1); - out_n = 0; - } - else - { - // Handle degenerate cases. - // - // If any of those 'find lowest set bit' operations didn't find a set bit at - // all, then the word will have been zero thereafter, so in particular the - // last lowest_set_bit operation will have returned zero. So we can test for - // all the special case conditions in one go by seeing if c is zero. - if (a == 0) - { - // The input was zero (or all 1 bits, which will come to here too after we - // inverted it at the start of the function), for which we just return - // false. - return LogicalImm(); - } - else - { - // Otherwise, if c was zero but a was not, then there's just one stretch - // of set bits in our word, meaning that we have the trivial case of - // d == 64 and only one 'repetition'. Set up all the same variables as in - // the general case above, and set the N bit in the output. - clz_a = Common::CountLeadingZeros(a); - d = 64; - mask = ~UINT64_C(0); - out_n = 1; - } - } - - // If the repeat period d is not a power of two, it can't be encoded. - if (!MathUtil::IsPow2(d)) - return LogicalImm(); - - // If the bit stretch (b - a) does not fit within the mask derived from the - // repeat period, then fail. - if (((b - a) & ~mask) != 0) - return LogicalImm(); - - // The only possible option is b - a repeated every d bits. Now we're going to - // actually construct the valid logical immediate derived from that - // specification, and see if it equals our original input. - // - // To repeat a value every d bits, we multiply it by a number of the form - // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can - // be derived using a table lookup on CLZ(d). - static const std::array multipliers = {{ - 0x0000000000000001UL, - 0x0000000100000001UL, - 0x0001000100010001UL, - 0x0101010101010101UL, - 0x1111111111111111UL, - 0x5555555555555555UL, - }}; - - const int multiplier_idx = Common::CountLeadingZeros((u64)d) - 57; - - // Ensure that the index to the multipliers array is within bounds. - DEBUG_ASSERT((multiplier_idx >= 0) && (static_cast(multiplier_idx) < multipliers.size())); - - const u64 multiplier = multipliers[multiplier_idx]; - const u64 candidate = (b - a) * multiplier; - - // The candidate pattern doesn't match our input value, so fail. - if (value != candidate) - return LogicalImm(); - - // We have a match! This is a valid logical immediate, so now we have to - // construct the bits and pieces of the instruction encoding that generates - // it. - - // Count the set bits in our basic stretch. The special case of clz(0) == -1 - // makes the answer come out right for stretches that reach the very top of - // the word (e.g. numbers like 0xffffc00000000000). - const int clz_b = (b == 0) ? -1 : Common::CountLeadingZeros(b); - int s = clz_a - clz_b; - - // Decide how many bits to rotate right by, to put the low bit of that basic - // stretch in position a. - int r; - if (negate) - { - // If we inverted the input right at the start of this function, here's - // where we compensate: the number of set bits becomes the number of clear - // bits, and the rotation count is based on position b rather than position - // a (since b is the location of the 'lowest' 1 bit after inversion). - s = d - s; - r = (clz_b + 1) & (d - 1); - } - else - { - r = (clz_a + 1) & (d - 1); - } - - // Now we're done, except for having to encode the S output in such a way that - // it gives both the number of set bits and the length of the repeated - // segment. The s field is encoded like this: - // - // imms size S - // ssssss 64 UInt(ssssss) - // 0sssss 32 UInt(sssss) - // 10ssss 16 UInt(ssss) - // 110sss 8 UInt(sss) - // 1110ss 4 UInt(ss) - // 11110s 2 UInt(s) - // - // So we 'or' (-d << 1) with our computed s to form imms. - return LogicalImm(static_cast(r), static_cast(((-d << 1) | (s - 1)) & 0x3f), - static_cast(out_n)); -} - float FPImm8ToFloat(u8 bits) { const u32 sign = bits >> 7; @@ -2092,13 +1882,13 @@ void ARM64XEmitter::MOVI2RImpl(ARM64Reg Rd, T imm) (imm & 0xFFFF'FFFF'0000'0000) | (imm >> 32), (imm << 48) | (imm & 0x0000'FFFF'FFFF'0000) | (imm >> 48)}) { - if (IsImmLogical(orr_imm, 64)) + if (LogicalImm(orr_imm, 64)) try_base(orr_imm, Approach::ORRBase, false); } } else { - if (IsImmLogical(imm, 32)) + if (LogicalImm(imm, 32)) try_base(imm, Approach::ORRBase, false); } } @@ -4152,7 +3942,7 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) if (!Is64Bit(Rn)) imm &= 0xFFFFFFFF; - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32)) { AND(Rd, Rn, result); } @@ -4168,7 +3958,7 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32)) { ORR(Rd, Rn, result); } @@ -4184,7 +3974,7 @@ void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32)) { EOR(Rd, Rn, result); } @@ -4200,7 +3990,7 @@ void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32)) { ANDS(Rd, Rn, result); } @@ -4363,7 +4153,7 @@ bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u64 imm) bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rd) ? 64 : 32)) { AND(Rd, Rn, result); return true; @@ -4374,7 +4164,7 @@ bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rd) ? 64 : 32)) { ORR(Rd, Rn, result); return true; @@ -4385,7 +4175,7 @@ bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm) { - if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32)) + if (const auto result = LogicalImm(imm, Is64Bit(Rd) ? 64 : 32)) { EOR(Rd, Rn, result); return true; diff --git a/Source/Core/Common/Arm64Emitter.h b/Source/Core/Common/Arm64Emitter.h index f62b62e3ef..6d4b616c41 100644 --- a/Source/Core/Common/Arm64Emitter.h +++ b/Source/Core/Common/Arm64Emitter.h @@ -5,12 +5,17 @@ #include #include +#include +#include #include "Common/ArmCommon.h" #include "Common/Assert.h" #include "Common/BitSet.h" +#include "Common/BitUtils.h" #include "Common/CodeBlock.h" #include "Common/Common.h" +#include "Common/CommonTypes.h" +#include "Common/MathUtil.h" namespace Arm64Gen { @@ -498,15 +503,221 @@ public: struct LogicalImm { - constexpr LogicalImm() : r(0), s(0), n(false), valid(false) {} + constexpr LogicalImm() {} + constexpr LogicalImm(u8 r_, u8 s_, bool n_) : r(r_), s(s_), n(n_), valid(true) {} + constexpr LogicalImm(u64 value, u32 width) + { + bool negate = false; + + // Logical immediates are encoded using parameters n, imm_s and imm_r using + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. + // + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. + // + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) + + if (value & 1) + { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; + } + + constexpr int kWRegSizeInBits = 32; + + if (width == kWRegSizeInBits) + { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. + + // The most-significant 32 bits may not be zero (ie. negate is true) so + // shift the value left before duplicating it. + value <<= kWRegSizeInBits; + value |= value >> kWRegSizeInBits; + } + + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + u64 a = Common::LargestPowerOf2Divisor(value); + u64 value_plus_a = value + a; + u64 b = Common::LargestPowerOf2Divisor(value_plus_a); + u64 value_plus_a_minus_b = value_plus_a - b; + u64 c = Common::LargestPowerOf2Divisor(value_plus_a_minus_b); + + int d = 0, clz_a = 0, out_n = 0; + u64 mask = 0; + + if (c != 0) + { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = Common::CountLeadingZeros(a); + int clz_c = Common::CountLeadingZeros(c); + d = clz_a - clz_c; + mask = ((UINT64_C(1) << d) - 1); + out_n = 0; + } + else + { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) + { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), which is invalid. + return; + } + else + { + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = Common::CountLeadingZeros(a); + d = 64; + mask = ~UINT64_C(0); + out_n = 1; + } + } + + // If the repeat period d is not a power of two, it can't be encoded. + if (!MathUtil::IsPow2(d)) + return; + + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + if (((b - a) & ~mask) != 0) + return; + + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + constexpr std::array multipliers = {{ + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }}; + + const int multiplier_idx = Common::CountLeadingZeros((u64)d) - 57; + + // Ensure that the index to the multipliers array is within bounds. + DEBUG_ASSERT((multiplier_idx >= 0) && + (static_cast(multiplier_idx) < multipliers.size())); + + const u64 multiplier = multipliers[multiplier_idx]; + const u64 candidate = (b - a) * multiplier; + + // The candidate pattern doesn't match our input value, so fail. + if (value != candidate) + return; + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + n = out_n; + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + const int clz_b = (b == 0) ? -1 : Common::CountLeadingZeros(b); + s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + if (negate) + { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } + else + { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (-d << 1) with our computed s to form imms. + s = ((-d << 1) | (s - 1)) & 0x3f; + + valid = true; + } + constexpr operator bool() const { return valid; } - u8 r; - u8 s; - bool n; - bool valid; + u8 r = 0; + u8 s = 0; + bool n = false; + bool valid = false; }; class ARM64XEmitter diff --git a/Source/Core/Common/BitUtils.h b/Source/Core/Common/BitUtils.h index d4d23d63d8..09ab4bd78c 100644 --- a/Source/Core/Common/BitUtils.h +++ b/Source/Core/Common/BitUtils.h @@ -413,4 +413,13 @@ constexpr int CountLeadingZeros(uint32_t value) #undef CONSTEXPR_FROM_INTRINSIC +template +constexpr T LargestPowerOf2Divisor(T value) +{ + static_assert(std::is_unsigned(), + "LargestPowerOf2Divisor only makes sense for unsigned types."); + + return value & -static_cast>(value); +} + } // namespace Common From 9e80db123f4f8b6b51c672bec729a70a21ee7af4 Mon Sep 17 00:00:00 2001 From: JosJuice Date: Tue, 6 Jul 2021 16:53:04 +0200 Subject: [PATCH 3/4] JitArm64: Encode logical immediates at compile-time where possible Manually encoding and decoding logical immediates is error-prone. Using ORRI2R and friends lets us avoid doing the work manually, but in exchange, there is a runtime performance penalty. It's probably rather small, but still, it would be nice if we could let the compiler do the work at compile-time. And that's exactly what this commit does, so now I have no excuse for trying to manually write logical immediates anymore. --- Source/Core/Common/Arm64Emitter.h | 15 +++-- Source/Core/Core/PowerPC/JitArm64/Jit.cpp | 2 +- .../Core/PowerPC/JitArm64/JitArm64_Branch.cpp | 2 +- .../JitArm64/JitArm64_FloatingPoint.cpp | 12 ++-- .../PowerPC/JitArm64/JitArm64_Integer.cpp | 2 +- .../PowerPC/JitArm64/JitArm64_LoadStore.cpp | 8 +-- .../JitArm64/JitArm64_SystemRegisters.cpp | 26 ++++---- Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp | 63 ++++++++++--------- Source/Core/VideoCommon/VertexLoaderARM64.cpp | 4 +- 9 files changed, 67 insertions(+), 67 deletions(-) diff --git a/Source/Core/Common/Arm64Emitter.h b/Source/Core/Common/Arm64Emitter.h index 6d4b616c41..3f5c18d8c9 100644 --- a/Source/Core/Common/Arm64Emitter.h +++ b/Source/Core/Common/Arm64Emitter.h @@ -1124,17 +1124,16 @@ public: } // Wrapper around AND x, y, imm etc. - // If you are sure the imm will work, no need to pass a scratch register. - // If the imm is constant, preferably call EncodeLogicalImm directly instead of using these - // functions, as this lets the computation of the imm encoding be performed during compilation. - void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); - void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); - void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG) + // If you are sure the imm will work, preferably construct a LogicalImm directly instead, + // since that is constexpr and thus can be done at compile-time for constant values. + void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch); + void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch); + void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch) { ANDSI2R(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, scratch); } - void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); - void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG); + void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch); + void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch); void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, ARM64Reg scratch); diff --git a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp index 39fd84be53..252b90912e 100644 --- a/Source/Core/Core/PowerPC/JitArm64/Jit.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/Jit.cpp @@ -799,7 +799,7 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC) fpr.Flush(FlushMode::MaintainState); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); - ORRI2R(WA, WA, EXCEPTION_FPU_UNAVAILABLE); + ORR(WA, WA, LogicalImm(EXCEPTION_FPU_UNAVAILABLE, 32)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); gpr.Unlock(WA); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp index 4b50d71501..8398ebe33a 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp @@ -24,7 +24,7 @@ void JitArm64::sc(UGeckoInstruction inst) ARM64Reg WA = gpr.GetReg(); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); - ORRI2R(WA, WA, EXCEPTION_SYSCALL); + ORR(WA, WA, LogicalImm(EXCEPTION_SYSCALL, 32)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); gpr.Unlock(WA); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp index e99c6d6a92..ec38cd7b5c 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp @@ -401,7 +401,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper) { fpscr_reg = gpr.GetReg(); LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr)); - ANDI2R(fpscr_reg, fpscr_reg, ~FPCC_MASK); + AND(fpscr_reg, fpscr_reg, LogicalImm(~FPCC_MASK, 32)); } ARM64Reg V0Q = ARM64Reg::INVALID_REG; @@ -450,7 +450,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper) // A == B ORR(XA, XA, 64 - 63, 0, true); if (fprf) - ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_EQ << FPRF_SHIFT); + ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_EQ << FPRF_SHIFT, 32)); continue1 = B(); @@ -458,7 +458,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper) MOVI2R(XA, PowerPC::ConditionRegister::PPCToInternal(PowerPC::CR_SO)); if (fprf) - ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_SO << FPRF_SHIFT); + ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_SO << FPRF_SHIFT, 32)); if (a != b) { @@ -467,7 +467,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper) SetJumpTarget(pGreater); ORR(XA, XA, 0, 0, true); if (fprf) - ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_GT << FPRF_SHIFT); + ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_GT << FPRF_SHIFT, 32)); continue3 = B(); @@ -475,7 +475,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper) ORR(XA, XA, 64 - 62, 1, true); ORR(XA, XA, 0, 0, true); if (fprf) - ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_LT << FPRF_SHIFT); + ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_LT << FPRF_SHIFT, 32)); SetJumpTarget(continue2); SetJumpTarget(continue3); @@ -532,7 +532,7 @@ void JitArm64::fctiwzx(UGeckoInstruction inst) const ARM64Reg WA = gpr.GetReg(); m_float_emit.FCVTS(WA, EncodeRegToDouble(VB), RoundingMode::Z); - ORRI2R(EncodeRegTo64(WA), EncodeRegTo64(WA), 0xFFF8'0000'0000'0000ULL); + ORR(EncodeRegTo64(WA), EncodeRegTo64(WA), LogicalImm(0xFFF8'0000'0000'0000ULL, 64)); m_float_emit.FMOV(EncodeRegToDouble(VD), EncodeRegTo64(WA)); gpr.Unlock(WA); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp index f913b0cf69..6b2f94d80c 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Integer.cpp @@ -611,7 +611,7 @@ void JitArm64::rlwinmx(UGeckoInstruction inst) else if (!inst.SH) { // Immediate mask - ANDI2R(gpr.R(a), gpr.R(s), mask); + AND(gpr.R(a), gpr.R(s), LogicalImm(mask, 32)); } else if (inst.ME == 31 && 31 < inst.SH + inst.MB) { diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp index 574d5fd9dd..7fcabb5095 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_LoadStore.cpp @@ -550,7 +550,7 @@ void JitArm64::dcbx(UGeckoInstruction inst) else MOV(addr, gpr.R(b)); - ANDI2R(addr, addr, ~31); // mask sizeof cacheline + AND(addr, addr, LogicalImm(~31, 32)); // mask sizeof cacheline BitSet32 gprs_to_push = gpr.GetCallerSavedUsed(); BitSet32 fprs_to_push = fpr.GetCallerSavedUsed(); @@ -618,13 +618,13 @@ void JitArm64::dcbz(UGeckoInstruction inst) ARM64Reg base = is_imm_a ? gpr.R(b) : gpr.R(a); u32 imm_offset = is_imm_a ? gpr.GetImm(a) : gpr.GetImm(b); ADDI2R(addr_reg, base, imm_offset, addr_reg); - ANDI2R(addr_reg, addr_reg, ~31); + AND(addr_reg, addr_reg, LogicalImm(~31, 32)); } else { // Both are registers ADD(addr_reg, gpr.R(a), gpr.R(b)); - ANDI2R(addr_reg, addr_reg, ~31); + AND(addr_reg, addr_reg, LogicalImm(~31, 32)); } } else @@ -637,7 +637,7 @@ void JitArm64::dcbz(UGeckoInstruction inst) } else { - ANDI2R(addr_reg, gpr.R(b), ~31); + AND(addr_reg, gpr.R(b), LogicalImm(~31, 32)); } } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp index 3815123850..fb0b611766 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp @@ -217,7 +217,7 @@ void JitArm64::twx(UGeckoInstruction inst) fpr.Flush(FlushMode::MaintainState); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); - ORRI2R(WA, WA, EXCEPTION_PROGRAM); + ORR(WA, WA, LogicalImm(EXCEPTION_PROGRAM, 32)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions)); gpr.Unlock(WA); @@ -290,7 +290,7 @@ void JitArm64::mfspr(UGeckoInstruction inst) SUB(Xresult, Xresult, XB); // a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67 - ORRI2R(XB, ARM64Reg::ZR, 0xAAAAAAAAAAAAAAAA); + ORR(XB, ARM64Reg::ZR, LogicalImm(0xAAAAAAAAAAAAAAAA, 64)); ADD(XB, XB, 1); UMULH(Xresult, Xresult, XB); @@ -440,20 +440,20 @@ void JitArm64::crXXX(UGeckoInstruction inst) switch (bit) { case PowerPC::CR_SO_BIT: - ANDI2R(XA, XA, ~(u64(1) << PowerPC::CR_EMU_SO_BIT)); + AND(XA, XA, LogicalImm(~(u64(1) << PowerPC::CR_EMU_SO_BIT), 64)); break; case PowerPC::CR_EQ_BIT: FixGTBeforeSettingCRFieldBit(XA); - ORRI2R(XA, XA, 1); + ORR(XA, XA, LogicalImm(1, 64)); break; case PowerPC::CR_GT_BIT: - ORRI2R(XA, XA, u64(1) << 63); + ORR(XA, XA, LogicalImm(u64(1) << 63, 64)); break; case PowerPC::CR_LT_BIT: - ANDI2R(XA, XA, ~(u64(1) << PowerPC::CR_EMU_LT_BIT)); + AND(XA, XA, LogicalImm(~(u64(1) << PowerPC::CR_EMU_LT_BIT), 64)); break; } return; @@ -475,23 +475,23 @@ void JitArm64::crXXX(UGeckoInstruction inst) switch (bit) { case PowerPC::CR_SO_BIT: - ORRI2R(XA, XA, u64(1) << PowerPC::CR_EMU_SO_BIT); + ORR(XA, XA, LogicalImm(u64(1) << PowerPC::CR_EMU_SO_BIT, 64)); break; case PowerPC::CR_EQ_BIT: - ANDI2R(XA, XA, 0xFFFF'FFFF'0000'0000); + AND(XA, XA, LogicalImm(0xFFFF'FFFF'0000'0000, 64)); break; case PowerPC::CR_GT_BIT: - ANDI2R(XA, XA, ~(u64(1) << 63)); + AND(XA, XA, LogicalImm(~(u64(1) << 63), 64)); break; case PowerPC::CR_LT_BIT: - ORRI2R(XA, XA, u64(1) << PowerPC::CR_EMU_LT_BIT); + ORR(XA, XA, LogicalImm(u64(1) << PowerPC::CR_EMU_LT_BIT, 64)); break; } - ORRI2R(XA, XA, u64(1) << 32); + ORR(XA, XA, LogicalImm(u64(1) << 32, 64)); return; } @@ -709,12 +709,12 @@ void JitArm64::mcrfs(UGeckoInstruction inst) LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr)); LSR(WCR, WA, shift); - ANDI2R(WCR, WCR, 0xF); + AND(WCR, WCR, LogicalImm(0xF, 32)); if (mask != 0) { const u32 inverted_mask = ~mask; - ANDI2R(WA, WA, inverted_mask); + AND(WA, WA, LogicalImm(inverted_mask, 32)); STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr)); } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp index 6a401a0b74..fd415e8da2 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitAsm.cpp @@ -102,7 +102,7 @@ void JitArm64::GenerateAsm() ARM64Reg pc_masked = ARM64Reg::W25; ARM64Reg cache_base = ARM64Reg::X27; ARM64Reg block = ARM64Reg::X30; - ORRI2R(pc_masked, ARM64Reg::WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3); + ORR(pc_masked, ARM64Reg::WZR, LogicalImm(JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3, 32)); AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1)); MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap()); LDR(block, cache_base, EncodeRegTo64(pc_masked)); @@ -116,7 +116,7 @@ void JitArm64::GenerateAsm() FixupBranch pc_missmatch = B(CC_NEQ); LDR(IndexType::Unsigned, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr)); - ANDI2R(pc_and_msr2, pc_and_msr2, JitBaseBlockCache::JIT_CACHE_MSR_MASK); + AND(pc_and_msr2, pc_and_msr2, LogicalImm(JitBaseBlockCache::JIT_CACHE_MSR_MASK, 32)); LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, msrBits)); CMP(pc_and_msr, pc_and_msr2); FixupBranch msr_missmatch = B(CC_NEQ); @@ -238,7 +238,7 @@ void JitArm64::GenerateFres() UBFX(ARM64Reg::X2, ARM64Reg::X1, 52, 11); // Grab the exponent m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0); CMP(ARM64Reg::X2, 895); - ANDI2R(ARM64Reg::X3, ARM64Reg::X1, Common::DOUBLE_SIGN); + AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(Common::DOUBLE_SIGN, 64)); FixupBranch small_exponent = B(CCFlags::CC_LO); MOVI2R(ARM64Reg::X4, 1148LL); @@ -251,14 +251,14 @@ void JitArm64::GenerateFres() LDP(IndexType::Signed, ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::X2, 0); UBFX(ARM64Reg::X1, ARM64Reg::X1, 37, 10); // Grab lower part of mantissa MOVI2R(ARM64Reg::W4, 1); - ANDI2R(ARM64Reg::X0, ARM64Reg::X0, Common::DOUBLE_SIGN | Common::DOUBLE_EXP); + AND(ARM64Reg::X0, ARM64Reg::X0, LogicalImm(Common::DOUBLE_SIGN | Common::DOUBLE_EXP, 64)); MADD(ARM64Reg::W1, ARM64Reg::W3, ARM64Reg::W1, ARM64Reg::W4); SUB(ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W1, ArithOption(ARM64Reg::W1, ShiftType::LSR, 1)); ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X1, ArithOption(ARM64Reg::X1, ShiftType::LSL, 29)); RET(); SetJumpTarget(small_exponent); - TSTI2R(ARM64Reg::X1, Common::DOUBLE_EXP | Common::DOUBLE_FRAC); + TST(ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP | Common::DOUBLE_FRAC, 64)); FixupBranch zero = B(CCFlags::CC_EQ); MOVI2R(ARM64Reg::X4, Common::BitCast(static_cast(std::numeric_limits::max()))); @@ -289,15 +289,15 @@ void JitArm64::GenerateFrsqrte() // inf, even the mantissa matches. But the mantissa does not match for most other inputs, so in // the normal case we calculate the mantissa using the table-based algorithm from the interpreter. - TSTI2R(ARM64Reg::X1, Common::DOUBLE_EXP | Common::DOUBLE_FRAC); + TST(ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP | Common::DOUBLE_FRAC, 64)); m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0); FixupBranch zero = B(CCFlags::CC_EQ); - ANDI2R(ARM64Reg::X2, ARM64Reg::X1, Common::DOUBLE_EXP); + AND(ARM64Reg::X2, ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP, 64)); MOVI2R(ARM64Reg::X3, Common::DOUBLE_EXP); CMP(ARM64Reg::X2, ARM64Reg::X3); FixupBranch nan_or_inf = B(CCFlags::CC_EQ); FixupBranch negative = TBNZ(ARM64Reg::X1, 63); - ANDI2R(ARM64Reg::X3, ARM64Reg::X1, Common::DOUBLE_FRAC); + AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(Common::DOUBLE_FRAC, 64)); FixupBranch normal = CBNZ(ARM64Reg::X2); // "Normalize" denormal values @@ -306,18 +306,18 @@ void JitArm64::GenerateFrsqrte() MOVI2R(ARM64Reg::X2, 0x00C0'0000'0000'0000); LSLV(ARM64Reg::X4, ARM64Reg::X1, ARM64Reg::X4); SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52)); - ANDI2R(ARM64Reg::X3, ARM64Reg::X4, Common::DOUBLE_FRAC - 1); + AND(ARM64Reg::X3, ARM64Reg::X4, LogicalImm(Common::DOUBLE_FRAC - 1, 64)); SetJumpTarget(normal); LSR(ARM64Reg::X2, ARM64Reg::X2, 48); - ANDI2R(ARM64Reg::X2, ARM64Reg::X2, 0x10); + AND(ARM64Reg::X2, ARM64Reg::X2, LogicalImm(0x10, 64)); MOVP2R(ARM64Reg::X1, &Common::frsqrte_expected); ORR(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X8, ShiftType::LSR, 48)); - EORI2R(ARM64Reg::X2, ARM64Reg::X2, 0x10); + EOR(ARM64Reg::X2, ARM64Reg::X2, LogicalImm(0x10, 64)); ADD(ARM64Reg::X2, ARM64Reg::X1, ARM64Reg::X2, ArithOption(ARM64Reg::X2, ShiftType::LSL, 3)); LDP(IndexType::Signed, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::X2, 0); UBFX(ARM64Reg::X3, ARM64Reg::X3, 37, 11); - ANDI2R(ARM64Reg::X0, ARM64Reg::X0, Common::DOUBLE_SIGN | Common::DOUBLE_EXP); + AND(ARM64Reg::X0, ARM64Reg::X0, LogicalImm(Common::DOUBLE_SIGN | Common::DOUBLE_EXP, 64)); MSUB(ARM64Reg::W3, ARM64Reg::W3, ARM64Reg::W2, ARM64Reg::W1); ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 26)); RET(); @@ -354,17 +354,17 @@ void JitArm64::GenerateConvertDoubleToSingle() LSR(ARM64Reg::X1, ARM64Reg::X0, 32); FixupBranch denormal = B(CCFlags::CC_LS); - ANDI2R(ARM64Reg::X1, ARM64Reg::X1, 0xc0000000); + AND(ARM64Reg::X1, ARM64Reg::X1, LogicalImm(0xc0000000, 64)); BFXIL(ARM64Reg::X1, ARM64Reg::X0, 29, 30); RET(); SetJumpTarget(denormal); LSR(ARM64Reg::X3, ARM64Reg::X0, 21); MOVZ(ARM64Reg::X0, 905); - ORRI2R(ARM64Reg::W3, ARM64Reg::W3, 0x80000000); + ORR(ARM64Reg::W3, ARM64Reg::W3, LogicalImm(0x80000000, 32)); SUB(ARM64Reg::W2, ARM64Reg::W0, ARM64Reg::W2); LSRV(ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::W2); - ANDI2R(ARM64Reg::X3, ARM64Reg::X1, 0x80000000); + AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(0x80000000, 64)); ORR(ARM64Reg::X1, ARM64Reg::X3, ARM64Reg::X2); RET(); } @@ -375,7 +375,7 @@ void JitArm64::GenerateConvertSingleToDouble() UBFX(ARM64Reg::W1, ARM64Reg::W0, 23, 8); FixupBranch normal_or_nan = CBNZ(ARM64Reg::W1); - ANDI2R(ARM64Reg::W1, ARM64Reg::W0, 0x007fffff); + AND(ARM64Reg::W1, ARM64Reg::W0, LogicalImm(0x007fffff, 32)); FixupBranch denormal = CBNZ(ARM64Reg::W1); // Zero @@ -383,10 +383,10 @@ void JitArm64::GenerateConvertSingleToDouble() RET(); SetJumpTarget(denormal); - ANDI2R(ARM64Reg::W2, ARM64Reg::W0, 0x80000000); + AND(ARM64Reg::W2, ARM64Reg::W0, LogicalImm(0x80000000, 32)); CLZ(ARM64Reg::X3, ARM64Reg::X1); LSL(ARM64Reg::X2, ARM64Reg::X2, 32); - ORRI2R(ARM64Reg::X4, ARM64Reg::X3, 0xffffffffffffffc0); + ORR(ARM64Reg::X4, ARM64Reg::X3, LogicalImm(0xffffffffffffffc0, 64)); SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52)); ADD(ARM64Reg::X3, ARM64Reg::X4, 23); LSLV(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::X3); @@ -397,12 +397,12 @@ void JitArm64::GenerateConvertSingleToDouble() SetJumpTarget(normal_or_nan); CMP(ARM64Reg::W1, 0xff); - ANDI2R(ARM64Reg::W2, ARM64Reg::W0, 0x40000000); + AND(ARM64Reg::W2, ARM64Reg::W0, LogicalImm(0x40000000, 32)); CSET(ARM64Reg::W4, CCFlags::CC_NEQ); - ANDI2R(ARM64Reg::W3, ARM64Reg::W0, 0xc0000000); + AND(ARM64Reg::W3, ARM64Reg::W0, LogicalImm(0xc0000000, 32)); EOR(ARM64Reg::W2, ARM64Reg::W4, ARM64Reg::W2, ArithOption(ARM64Reg::W2, ShiftType::LSR, 30)); MOVI2R(ARM64Reg::X1, 0x3800000000000000); - ANDI2R(ARM64Reg::W4, ARM64Reg::W0, 0x3fffffff); + AND(ARM64Reg::W4, ARM64Reg::W0, LogicalImm(0x3fffffff, 32)); LSL(ARM64Reg::X3, ARM64Reg::X3, 32); CMP(ARM64Reg::W2, 0); CSEL(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::ZR, CCFlags::CC_NEQ); @@ -423,9 +423,10 @@ void JitArm64::GenerateFPRF(bool single) constexpr ARM64Reg fprf_reg = ARM64Reg::W3; constexpr ARM64Reg fpscr_reg = ARM64Reg::W4; - const auto INPUT_EXP_MASK = single ? Common::FLOAT_EXP : Common::DOUBLE_EXP; - const auto INPUT_FRAC_MASK = single ? Common::FLOAT_FRAC : Common::DOUBLE_FRAC; - constexpr u32 OUTPUT_SIGN_MASK = 0xC; + const int input_size = single ? 32 : 64; + const u64 input_exp_mask = single ? Common::FLOAT_EXP : Common::DOUBLE_EXP; + const u64 input_frac_mask = single ? Common::FLOAT_FRAC : Common::DOUBLE_FRAC; + constexpr u32 output_sign_mask = 0xC; // This code is duplicated for the most common cases for performance. // For the less common cases, we branch to an existing copy of this code. @@ -439,7 +440,7 @@ void JitArm64::GenerateFPRF(bool single) LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr)); CMP(input_reg, 0); // Grab sign bit (conveniently the same bit for floats as for integers) - ANDI2R(exp_reg, input_reg, INPUT_EXP_MASK); // Grab exponent + AND(exp_reg, input_reg, LogicalImm(input_exp_mask, input_size)); // Grab exponent // Most branches handle the sign in the same way. Perform that handling before branching MOVI2R(ARM64Reg::W3, Common::PPC_FPCLASS_PN); @@ -449,7 +450,7 @@ void JitArm64::GenerateFPRF(bool single) FixupBranch zero_or_denormal = CBZ(exp_reg); // exp != 0 - MOVI2R(temp_reg, INPUT_EXP_MASK); + MOVI2R(temp_reg, input_exp_mask); CMP(exp_reg, temp_reg); FixupBranch nan_or_inf = B(CCFlags::CC_EQ); @@ -458,25 +459,25 @@ void JitArm64::GenerateFPRF(bool single) // exp == 0 SetJumpTarget(zero_or_denormal); - TSTI2R(input_reg, INPUT_FRAC_MASK); + TST(input_reg, LogicalImm(input_frac_mask, input_size)); FixupBranch denormal = B(CCFlags::CC_NEQ); // exp == 0 && frac == 0 LSR(ARM64Reg::W1, fprf_reg, 3); - MOVI2R(fprf_reg, Common::PPC_FPCLASS_PZ & ~OUTPUT_SIGN_MASK); + MOVI2R(fprf_reg, Common::PPC_FPCLASS_PZ & ~output_sign_mask); BFI(fprf_reg, ARM64Reg::W1, 4, 1); const u8* write_fprf_and_ret = GetCodePtr(); emit_write_fprf_and_ret(); // exp == 0 && frac != 0 SetJumpTarget(denormal); - ORRI2R(fprf_reg, fprf_reg, Common::PPC_FPCLASS_PD & ~OUTPUT_SIGN_MASK); + ORR(fprf_reg, fprf_reg, LogicalImm(Common::PPC_FPCLASS_PD & ~output_sign_mask, 32)); B(write_fprf_and_ret); // exp == EXP_MASK SetJumpTarget(nan_or_inf); - TSTI2R(input_reg, INPUT_FRAC_MASK); - ORRI2R(ARM64Reg::W1, fprf_reg, Common::PPC_FPCLASS_PINF & ~OUTPUT_SIGN_MASK); + TST(input_reg, LogicalImm(input_frac_mask, input_size)); + ORR(ARM64Reg::W1, fprf_reg, LogicalImm(Common::PPC_FPCLASS_PINF & ~output_sign_mask, 32)); MOVI2R(ARM64Reg::W2, Common::PPC_FPCLASS_QNAN); CSEL(fprf_reg, ARM64Reg::W1, ARM64Reg::W2, CCFlags::CC_EQ); B(write_fprf_and_ret); diff --git a/Source/Core/VideoCommon/VertexLoaderARM64.cpp b/Source/Core/VideoCommon/VertexLoaderARM64.cpp index eb16ec9bd3..dc760b011b 100644 --- a/Source/Core/VideoCommon/VertexLoaderARM64.cpp +++ b/Source/Core/VideoCommon/VertexLoaderARM64.cpp @@ -244,7 +244,7 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f LDR(IndexType::Unsigned, scratch2_reg, src_reg, offset); if (format != ColorFormat::RGBA8888) - ORRI2R(scratch2_reg, scratch2_reg, 0xFF000000); + ORR(scratch2_reg, scratch2_reg, LogicalImm(0xFF000000, 32)); STR(IndexType::Unsigned, scratch2_reg, dst_reg, m_dst_ofs); load_bytes = format == ColorFormat::RGB888 ? 3 : 4; break; @@ -279,7 +279,7 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 2)); // A - ORRI2R(scratch1_reg, scratch1_reg, 0xFF000000); + ORR(scratch1_reg, scratch1_reg, LogicalImm(0xFF000000, 32)); STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs); load_bytes = 2; From 0f3b9a8874b12200326e6dd0904c30c2b296b159 Mon Sep 17 00:00:00 2001 From: JosJuice Date: Tue, 6 Jul 2021 16:53:59 +0200 Subject: [PATCH 4/4] JitArm64: Minor mcrfs optimization --- Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp index fb0b611766..dd578fe2c4 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp @@ -708,8 +708,7 @@ void JitArm64::mcrfs(UGeckoInstruction inst) ARM64Reg XA = EncodeRegTo64(WA); LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr)); - LSR(WCR, WA, shift); - AND(WCR, WCR, LogicalImm(0xF, 32)); + UBFX(WCR, WA, shift, 4); if (mask != 0) {