Merge pull request #9869 from JosJuice/jitarm64-constexpr-isimmlogical
JitArm64: Encode logical immediates at compile-time where possible
This commit is contained in:
commit
88fd9fd577
|
@ -28,11 +28,6 @@ namespace Arm64Gen
|
||||||
{
|
{
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
uint64_t LargestPowerOf2Divisor(uint64_t value)
|
|
||||||
{
|
|
||||||
return value & -(int64_t)value;
|
|
||||||
}
|
|
||||||
|
|
||||||
// For ADD/SUB
|
// For ADD/SUB
|
||||||
std::optional<std::pair<u32, bool>> IsImmArithmetic(uint64_t input)
|
std::optional<std::pair<u32, bool>> IsImmArithmetic(uint64_t input)
|
||||||
{
|
{
|
||||||
|
@ -45,214 +40,6 @@ std::optional<std::pair<u32, bool>> IsImmArithmetic(uint64_t input)
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
// For AND/TST/ORR/EOR etc
|
|
||||||
std::optional<std::tuple<u32, u32, u32>> IsImmLogical(u64 value, u32 width)
|
|
||||||
{
|
|
||||||
bool negate = false;
|
|
||||||
|
|
||||||
// Logical immediates are encoded using parameters n, imm_s and imm_r using
|
|
||||||
// the following table:
|
|
||||||
//
|
|
||||||
// N imms immr size S R
|
|
||||||
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
|
||||||
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
|
||||||
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
|
||||||
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
|
||||||
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
|
||||||
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
|
||||||
// (s bits must not be all set)
|
|
||||||
//
|
|
||||||
// A pattern is constructed of size bits, where the least significant S+1 bits
|
|
||||||
// are set. The pattern is rotated right by R, and repeated across a 32 or
|
|
||||||
// 64-bit value, depending on destination register width.
|
|
||||||
//
|
|
||||||
// Put another way: the basic format of a logical immediate is a single
|
|
||||||
// contiguous stretch of 1 bits, repeated across the whole word at intervals
|
|
||||||
// given by a power of 2. To identify them quickly, we first locate the
|
|
||||||
// lowest stretch of 1 bits, then the next 1 bit above that; that combination
|
|
||||||
// is different for every logical immediate, so it gives us all the
|
|
||||||
// information we need to identify the only logical immediate that our input
|
|
||||||
// could be, and then we simply check if that's the value we actually have.
|
|
||||||
//
|
|
||||||
// (The rotation parameter does give the possibility of the stretch of 1 bits
|
|
||||||
// going 'round the end' of the word. To deal with that, we observe that in
|
|
||||||
// any situation where that happens the bitwise NOT of the value is also a
|
|
||||||
// valid logical immediate. So we simply invert the input whenever its low bit
|
|
||||||
// is set, and then we know that the rotated case can't arise.)
|
|
||||||
|
|
||||||
if (value & 1)
|
|
||||||
{
|
|
||||||
// If the low bit is 1, negate the value, and set a flag to remember that we
|
|
||||||
// did (so that we can adjust the return values appropriately).
|
|
||||||
negate = true;
|
|
||||||
value = ~value;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr int kWRegSizeInBits = 32;
|
|
||||||
|
|
||||||
if (width == kWRegSizeInBits)
|
|
||||||
{
|
|
||||||
// To handle 32-bit logical immediates, the very easiest thing is to repeat
|
|
||||||
// the input value twice to make a 64-bit word. The correct encoding of that
|
|
||||||
// as a logical immediate will also be the correct encoding of the 32-bit
|
|
||||||
// value.
|
|
||||||
|
|
||||||
// The most-significant 32 bits may not be zero (ie. negate is true) so
|
|
||||||
// shift the value left before duplicating it.
|
|
||||||
value <<= kWRegSizeInBits;
|
|
||||||
value |= value >> kWRegSizeInBits;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The basic analysis idea: imagine our input word looks like this.
|
|
||||||
//
|
|
||||||
// 0011111000111110001111100011111000111110001111100011111000111110
|
|
||||||
// c b a
|
|
||||||
// |<--d-->|
|
|
||||||
//
|
|
||||||
// We find the lowest set bit (as an actual power-of-2 value, not its index)
|
|
||||||
// and call it a. Then we add a to our original number, which wipes out the
|
|
||||||
// bottommost stretch of set bits and replaces it with a 1 carried into the
|
|
||||||
// next zero bit. Then we look for the new lowest set bit, which is in
|
|
||||||
// position b, and subtract it, so now our number is just like the original
|
|
||||||
// but with the lowest stretch of set bits completely gone. Now we find the
|
|
||||||
// lowest set bit again, which is position c in the diagram above. Then we'll
|
|
||||||
// measure the distance d between bit positions a and c (using CLZ), and that
|
|
||||||
// tells us that the only valid logical immediate that could possibly be equal
|
|
||||||
// to this number is the one in which a stretch of bits running from a to just
|
|
||||||
// below b is replicated every d bits.
|
|
||||||
uint64_t a = LargestPowerOf2Divisor(value);
|
|
||||||
uint64_t value_plus_a = value + a;
|
|
||||||
uint64_t b = LargestPowerOf2Divisor(value_plus_a);
|
|
||||||
uint64_t value_plus_a_minus_b = value_plus_a - b;
|
|
||||||
uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
|
|
||||||
|
|
||||||
int d, clz_a, out_n;
|
|
||||||
uint64_t mask;
|
|
||||||
|
|
||||||
if (c != 0)
|
|
||||||
{
|
|
||||||
// The general case, in which there is more than one stretch of set bits.
|
|
||||||
// Compute the repeat distance d, and set up a bitmask covering the basic
|
|
||||||
// unit of repetition (i.e. a word with the bottom d bits set). Also, in all
|
|
||||||
// of these cases the N bit of the output will be zero.
|
|
||||||
clz_a = Common::CountLeadingZeros(a);
|
|
||||||
int clz_c = Common::CountLeadingZeros(c);
|
|
||||||
d = clz_a - clz_c;
|
|
||||||
mask = ((UINT64_C(1) << d) - 1);
|
|
||||||
out_n = 0;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// Handle degenerate cases.
|
|
||||||
//
|
|
||||||
// If any of those 'find lowest set bit' operations didn't find a set bit at
|
|
||||||
// all, then the word will have been zero thereafter, so in particular the
|
|
||||||
// last lowest_set_bit operation will have returned zero. So we can test for
|
|
||||||
// all the special case conditions in one go by seeing if c is zero.
|
|
||||||
if (a == 0)
|
|
||||||
{
|
|
||||||
// The input was zero (or all 1 bits, which will come to here too after we
|
|
||||||
// inverted it at the start of the function), for which we just return
|
|
||||||
// false.
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// Otherwise, if c was zero but a was not, then there's just one stretch
|
|
||||||
// of set bits in our word, meaning that we have the trivial case of
|
|
||||||
// d == 64 and only one 'repetition'. Set up all the same variables as in
|
|
||||||
// the general case above, and set the N bit in the output.
|
|
||||||
clz_a = Common::CountLeadingZeros(a);
|
|
||||||
d = 64;
|
|
||||||
mask = ~UINT64_C(0);
|
|
||||||
out_n = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the repeat period d is not a power of two, it can't be encoded.
|
|
||||||
if (!MathUtil::IsPow2<u64>(d))
|
|
||||||
return std::nullopt;
|
|
||||||
|
|
||||||
// If the bit stretch (b - a) does not fit within the mask derived from the
|
|
||||||
// repeat period, then fail.
|
|
||||||
if (((b - a) & ~mask) != 0)
|
|
||||||
return std::nullopt;
|
|
||||||
|
|
||||||
// The only possible option is b - a repeated every d bits. Now we're going to
|
|
||||||
// actually construct the valid logical immediate derived from that
|
|
||||||
// specification, and see if it equals our original input.
|
|
||||||
//
|
|
||||||
// To repeat a value every d bits, we multiply it by a number of the form
|
|
||||||
// (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
|
|
||||||
// be derived using a table lookup on CLZ(d).
|
|
||||||
static const std::array<uint64_t, 6> multipliers = {{
|
|
||||||
0x0000000000000001UL,
|
|
||||||
0x0000000100000001UL,
|
|
||||||
0x0001000100010001UL,
|
|
||||||
0x0101010101010101UL,
|
|
||||||
0x1111111111111111UL,
|
|
||||||
0x5555555555555555UL,
|
|
||||||
}};
|
|
||||||
|
|
||||||
const int multiplier_idx = Common::CountLeadingZeros((u64)d) - 57;
|
|
||||||
|
|
||||||
// Ensure that the index to the multipliers array is within bounds.
|
|
||||||
DEBUG_ASSERT((multiplier_idx >= 0) && (static_cast<size_t>(multiplier_idx) < multipliers.size()));
|
|
||||||
|
|
||||||
const u64 multiplier = multipliers[multiplier_idx];
|
|
||||||
const u64 candidate = (b - a) * multiplier;
|
|
||||||
|
|
||||||
// The candidate pattern doesn't match our input value, so fail.
|
|
||||||
if (value != candidate)
|
|
||||||
return std::nullopt;
|
|
||||||
|
|
||||||
// We have a match! This is a valid logical immediate, so now we have to
|
|
||||||
// construct the bits and pieces of the instruction encoding that generates
|
|
||||||
// it.
|
|
||||||
|
|
||||||
// Count the set bits in our basic stretch. The special case of clz(0) == -1
|
|
||||||
// makes the answer come out right for stretches that reach the very top of
|
|
||||||
// the word (e.g. numbers like 0xffffc00000000000).
|
|
||||||
const int clz_b = (b == 0) ? -1 : Common::CountLeadingZeros(b);
|
|
||||||
int s = clz_a - clz_b;
|
|
||||||
|
|
||||||
// Decide how many bits to rotate right by, to put the low bit of that basic
|
|
||||||
// stretch in position a.
|
|
||||||
int r;
|
|
||||||
if (negate)
|
|
||||||
{
|
|
||||||
// If we inverted the input right at the start of this function, here's
|
|
||||||
// where we compensate: the number of set bits becomes the number of clear
|
|
||||||
// bits, and the rotation count is based on position b rather than position
|
|
||||||
// a (since b is the location of the 'lowest' 1 bit after inversion).
|
|
||||||
s = d - s;
|
|
||||||
r = (clz_b + 1) & (d - 1);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
r = (clz_a + 1) & (d - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we're done, except for having to encode the S output in such a way that
|
|
||||||
// it gives both the number of set bits and the length of the repeated
|
|
||||||
// segment. The s field is encoded like this:
|
|
||||||
//
|
|
||||||
// imms size S
|
|
||||||
// ssssss 64 UInt(ssssss)
|
|
||||||
// 0sssss 32 UInt(sssss)
|
|
||||||
// 10ssss 16 UInt(ssss)
|
|
||||||
// 110sss 8 UInt(sss)
|
|
||||||
// 1110ss 4 UInt(ss)
|
|
||||||
// 11110s 2 UInt(s)
|
|
||||||
//
|
|
||||||
// So we 'or' (-d << 1) with our computed s to form imms.
|
|
||||||
return std::tuple{
|
|
||||||
static_cast<u32>(out_n),
|
|
||||||
static_cast<u32>(((-d << 1) | (s - 1)) & 0x3f),
|
|
||||||
static_cast<u32>(r),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
float FPImm8ToFloat(u8 bits)
|
float FPImm8ToFloat(u8 bits)
|
||||||
{
|
{
|
||||||
const u32 sign = bits >> 7;
|
const u32 sign = bits >> 7;
|
||||||
|
@ -780,10 +567,18 @@ void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 i
|
||||||
// Use Rn to determine bitness here.
|
// Use Rn to determine bitness here.
|
||||||
bool b64Bit = Is64Bit(Rn);
|
bool b64Bit = Is64Bit(Rn);
|
||||||
|
|
||||||
|
ASSERT_MSG(DYNAREC, b64Bit || !n, "64-bit logical immediate does not fit in 32-bit register");
|
||||||
|
|
||||||
Write32((b64Bit << 31) | (op << 29) | (0x24 << 23) | (n << 22) | (immr << 16) | (imms << 10) |
|
Write32((b64Bit << 31) | (op << 29) | (0x24 << 23) | (n << 22) | (immr << 16) | (imms << 10) |
|
||||||
(DecodeReg(Rn) << 5) | DecodeReg(Rd));
|
(DecodeReg(Rn) << 5) | DecodeReg(Rd));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm)
|
||||||
|
{
|
||||||
|
ASSERT_MSG(DYNAREC, imm.valid, "Invalid logical immediate");
|
||||||
|
EncodeLogicalImmInst(op, Rd, Rn, imm.r, imm.s, imm.n);
|
||||||
|
}
|
||||||
|
|
||||||
void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2,
|
void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2,
|
||||||
ARM64Reg Rn, s32 imm)
|
ARM64Reg Rn, s32 imm)
|
||||||
{
|
{
|
||||||
|
@ -1545,22 +1340,42 @@ void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool inver
|
||||||
{
|
{
|
||||||
EncodeLogicalImmInst(0, Rd, Rn, immr, imms, invert);
|
EncodeLogicalImmInst(0, Rd, Rn, immr, imms, invert);
|
||||||
}
|
}
|
||||||
|
void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm)
|
||||||
|
{
|
||||||
|
EncodeLogicalImmInst(0, Rd, Rn, imm);
|
||||||
|
}
|
||||||
void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
||||||
{
|
{
|
||||||
EncodeLogicalImmInst(3, Rd, Rn, immr, imms, invert);
|
EncodeLogicalImmInst(3, Rd, Rn, immr, imms, invert);
|
||||||
}
|
}
|
||||||
|
void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm)
|
||||||
|
{
|
||||||
|
EncodeLogicalImmInst(3, Rd, Rn, imm);
|
||||||
|
}
|
||||||
void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
||||||
{
|
{
|
||||||
EncodeLogicalImmInst(2, Rd, Rn, immr, imms, invert);
|
EncodeLogicalImmInst(2, Rd, Rn, immr, imms, invert);
|
||||||
}
|
}
|
||||||
|
void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm)
|
||||||
|
{
|
||||||
|
EncodeLogicalImmInst(2, Rd, Rn, imm);
|
||||||
|
}
|
||||||
void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
||||||
{
|
{
|
||||||
EncodeLogicalImmInst(1, Rd, Rn, immr, imms, invert);
|
EncodeLogicalImmInst(1, Rd, Rn, immr, imms, invert);
|
||||||
}
|
}
|
||||||
|
void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm)
|
||||||
|
{
|
||||||
|
EncodeLogicalImmInst(1, Rd, Rn, imm);
|
||||||
|
}
|
||||||
void ARM64XEmitter::TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
void ARM64XEmitter::TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert)
|
||||||
{
|
{
|
||||||
EncodeLogicalImmInst(3, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, immr, imms, invert);
|
EncodeLogicalImmInst(3, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, immr, imms, invert);
|
||||||
}
|
}
|
||||||
|
void ARM64XEmitter::TST(ARM64Reg Rn, LogicalImm imm)
|
||||||
|
{
|
||||||
|
EncodeLogicalImmInst(3, Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm);
|
||||||
|
}
|
||||||
|
|
||||||
// Add/subtract (immediate)
|
// Add/subtract (immediate)
|
||||||
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
|
void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
|
||||||
|
@ -2067,13 +1882,13 @@ void ARM64XEmitter::MOVI2RImpl(ARM64Reg Rd, T imm)
|
||||||
(imm & 0xFFFF'FFFF'0000'0000) | (imm >> 32),
|
(imm & 0xFFFF'FFFF'0000'0000) | (imm >> 32),
|
||||||
(imm << 48) | (imm & 0x0000'FFFF'FFFF'0000) | (imm >> 48)})
|
(imm << 48) | (imm & 0x0000'FFFF'FFFF'0000) | (imm >> 48)})
|
||||||
{
|
{
|
||||||
if (IsImmLogical(orr_imm, 64))
|
if (LogicalImm(orr_imm, 64))
|
||||||
try_base(orr_imm, Approach::ORRBase, false);
|
try_base(orr_imm, Approach::ORRBase, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (IsImmLogical(imm, 32))
|
if (LogicalImm(imm, 32))
|
||||||
try_base(imm, Approach::ORRBase, false);
|
try_base(imm, Approach::ORRBase, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4127,10 +3942,9 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
if (!Is64Bit(Rn))
|
if (!Is64Bit(Rn))
|
||||||
imm &= 0xFFFFFFFF;
|
imm &= 0xFFFFFFFF;
|
||||||
|
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
AND(Rd, Rn, result);
|
||||||
AND(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -4144,10 +3958,9 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
|
|
||||||
void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
{
|
{
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
ORR(Rd, Rn, result);
|
||||||
ORR(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -4161,10 +3974,9 @@ void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
|
|
||||||
void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
{
|
{
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
EOR(Rd, Rn, result);
|
||||||
EOR(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -4178,10 +3990,9 @@ void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
|
|
||||||
void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
{
|
{
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rn) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
ANDS(Rd, Rn, result);
|
||||||
ANDS(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -4342,10 +4153,9 @@ bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u64 imm)
|
||||||
|
|
||||||
bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
||||||
{
|
{
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rd) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rd) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
AND(Rd, Rn, result);
|
||||||
AND(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4354,10 +4164,9 @@ bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
||||||
|
|
||||||
bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
||||||
{
|
{
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rd) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rd) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
ORR(Rd, Rn, result);
|
||||||
ORR(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4366,10 +4175,9 @@ bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
||||||
|
|
||||||
bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm)
|
||||||
{
|
{
|
||||||
if (const auto result = IsImmLogical(imm, Is64Bit(Rd) ? 64 : 32))
|
if (const auto result = LogicalImm(imm, Is64Bit(Rd) ? 64 : 32))
|
||||||
{
|
{
|
||||||
const auto& [n, imm_s, imm_r] = *result;
|
EOR(Rd, Rn, result);
|
||||||
EOR(Rd, Rn, imm_r, imm_s, n != 0);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,12 +5,17 @@
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <optional>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include "Common/ArmCommon.h"
|
#include "Common/ArmCommon.h"
|
||||||
#include "Common/Assert.h"
|
#include "Common/Assert.h"
|
||||||
#include "Common/BitSet.h"
|
#include "Common/BitSet.h"
|
||||||
|
#include "Common/BitUtils.h"
|
||||||
#include "Common/CodeBlock.h"
|
#include "Common/CodeBlock.h"
|
||||||
#include "Common/Common.h"
|
#include "Common/Common.h"
|
||||||
|
#include "Common/CommonTypes.h"
|
||||||
|
#include "Common/MathUtil.h"
|
||||||
|
|
||||||
namespace Arm64Gen
|
namespace Arm64Gen
|
||||||
{
|
{
|
||||||
|
@ -496,6 +501,225 @@ public:
|
||||||
bool IsExtended() const { return m_type == TypeSpecifier::ExtendedReg; }
|
bool IsExtended() const { return m_type == TypeSpecifier::ExtendedReg; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct LogicalImm
|
||||||
|
{
|
||||||
|
constexpr LogicalImm() {}
|
||||||
|
|
||||||
|
constexpr LogicalImm(u8 r_, u8 s_, bool n_) : r(r_), s(s_), n(n_), valid(true) {}
|
||||||
|
|
||||||
|
constexpr LogicalImm(u64 value, u32 width)
|
||||||
|
{
|
||||||
|
bool negate = false;
|
||||||
|
|
||||||
|
// Logical immediates are encoded using parameters n, imm_s and imm_r using
|
||||||
|
// the following table:
|
||||||
|
//
|
||||||
|
// N imms immr size S R
|
||||||
|
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
||||||
|
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
||||||
|
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
||||||
|
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
||||||
|
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
||||||
|
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
||||||
|
// (s bits must not be all set)
|
||||||
|
//
|
||||||
|
// A pattern is constructed of size bits, where the least significant S+1 bits
|
||||||
|
// are set. The pattern is rotated right by R, and repeated across a 32 or
|
||||||
|
// 64-bit value, depending on destination register width.
|
||||||
|
//
|
||||||
|
// Put another way: the basic format of a logical immediate is a single
|
||||||
|
// contiguous stretch of 1 bits, repeated across the whole word at intervals
|
||||||
|
// given by a power of 2. To identify them quickly, we first locate the
|
||||||
|
// lowest stretch of 1 bits, then the next 1 bit above that; that combination
|
||||||
|
// is different for every logical immediate, so it gives us all the
|
||||||
|
// information we need to identify the only logical immediate that our input
|
||||||
|
// could be, and then we simply check if that's the value we actually have.
|
||||||
|
//
|
||||||
|
// (The rotation parameter does give the possibility of the stretch of 1 bits
|
||||||
|
// going 'round the end' of the word. To deal with that, we observe that in
|
||||||
|
// any situation where that happens the bitwise NOT of the value is also a
|
||||||
|
// valid logical immediate. So we simply invert the input whenever its low bit
|
||||||
|
// is set, and then we know that the rotated case can't arise.)
|
||||||
|
|
||||||
|
if (value & 1)
|
||||||
|
{
|
||||||
|
// If the low bit is 1, negate the value, and set a flag to remember that we
|
||||||
|
// did (so that we can adjust the return values appropriately).
|
||||||
|
negate = true;
|
||||||
|
value = ~value;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int kWRegSizeInBits = 32;
|
||||||
|
|
||||||
|
if (width == kWRegSizeInBits)
|
||||||
|
{
|
||||||
|
// To handle 32-bit logical immediates, the very easiest thing is to repeat
|
||||||
|
// the input value twice to make a 64-bit word. The correct encoding of that
|
||||||
|
// as a logical immediate will also be the correct encoding of the 32-bit
|
||||||
|
// value.
|
||||||
|
|
||||||
|
// The most-significant 32 bits may not be zero (ie. negate is true) so
|
||||||
|
// shift the value left before duplicating it.
|
||||||
|
value <<= kWRegSizeInBits;
|
||||||
|
value |= value >> kWRegSizeInBits;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The basic analysis idea: imagine our input word looks like this.
|
||||||
|
//
|
||||||
|
// 0011111000111110001111100011111000111110001111100011111000111110
|
||||||
|
// c b a
|
||||||
|
// |<--d-->|
|
||||||
|
//
|
||||||
|
// We find the lowest set bit (as an actual power-of-2 value, not its index)
|
||||||
|
// and call it a. Then we add a to our original number, which wipes out the
|
||||||
|
// bottommost stretch of set bits and replaces it with a 1 carried into the
|
||||||
|
// next zero bit. Then we look for the new lowest set bit, which is in
|
||||||
|
// position b, and subtract it, so now our number is just like the original
|
||||||
|
// but with the lowest stretch of set bits completely gone. Now we find the
|
||||||
|
// lowest set bit again, which is position c in the diagram above. Then we'll
|
||||||
|
// measure the distance d between bit positions a and c (using CLZ), and that
|
||||||
|
// tells us that the only valid logical immediate that could possibly be equal
|
||||||
|
// to this number is the one in which a stretch of bits running from a to just
|
||||||
|
// below b is replicated every d bits.
|
||||||
|
u64 a = Common::LargestPowerOf2Divisor(value);
|
||||||
|
u64 value_plus_a = value + a;
|
||||||
|
u64 b = Common::LargestPowerOf2Divisor(value_plus_a);
|
||||||
|
u64 value_plus_a_minus_b = value_plus_a - b;
|
||||||
|
u64 c = Common::LargestPowerOf2Divisor(value_plus_a_minus_b);
|
||||||
|
|
||||||
|
int d = 0, clz_a = 0, out_n = 0;
|
||||||
|
u64 mask = 0;
|
||||||
|
|
||||||
|
if (c != 0)
|
||||||
|
{
|
||||||
|
// The general case, in which there is more than one stretch of set bits.
|
||||||
|
// Compute the repeat distance d, and set up a bitmask covering the basic
|
||||||
|
// unit of repetition (i.e. a word with the bottom d bits set). Also, in all
|
||||||
|
// of these cases the N bit of the output will be zero.
|
||||||
|
clz_a = Common::CountLeadingZeros(a);
|
||||||
|
int clz_c = Common::CountLeadingZeros(c);
|
||||||
|
d = clz_a - clz_c;
|
||||||
|
mask = ((UINT64_C(1) << d) - 1);
|
||||||
|
out_n = 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Handle degenerate cases.
|
||||||
|
//
|
||||||
|
// If any of those 'find lowest set bit' operations didn't find a set bit at
|
||||||
|
// all, then the word will have been zero thereafter, so in particular the
|
||||||
|
// last lowest_set_bit operation will have returned zero. So we can test for
|
||||||
|
// all the special case conditions in one go by seeing if c is zero.
|
||||||
|
if (a == 0)
|
||||||
|
{
|
||||||
|
// The input was zero (or all 1 bits, which will come to here too after we
|
||||||
|
// inverted it at the start of the function), which is invalid.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Otherwise, if c was zero but a was not, then there's just one stretch
|
||||||
|
// of set bits in our word, meaning that we have the trivial case of
|
||||||
|
// d == 64 and only one 'repetition'. Set up all the same variables as in
|
||||||
|
// the general case above, and set the N bit in the output.
|
||||||
|
clz_a = Common::CountLeadingZeros(a);
|
||||||
|
d = 64;
|
||||||
|
mask = ~UINT64_C(0);
|
||||||
|
out_n = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the repeat period d is not a power of two, it can't be encoded.
|
||||||
|
if (!MathUtil::IsPow2<u64>(d))
|
||||||
|
return;
|
||||||
|
|
||||||
|
// If the bit stretch (b - a) does not fit within the mask derived from the
|
||||||
|
// repeat period, then fail.
|
||||||
|
if (((b - a) & ~mask) != 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// The only possible option is b - a repeated every d bits. Now we're going to
|
||||||
|
// actually construct the valid logical immediate derived from that
|
||||||
|
// specification, and see if it equals our original input.
|
||||||
|
//
|
||||||
|
// To repeat a value every d bits, we multiply it by a number of the form
|
||||||
|
// (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
|
||||||
|
// be derived using a table lookup on CLZ(d).
|
||||||
|
constexpr std::array<u64, 6> multipliers = {{
|
||||||
|
0x0000000000000001UL,
|
||||||
|
0x0000000100000001UL,
|
||||||
|
0x0001000100010001UL,
|
||||||
|
0x0101010101010101UL,
|
||||||
|
0x1111111111111111UL,
|
||||||
|
0x5555555555555555UL,
|
||||||
|
}};
|
||||||
|
|
||||||
|
const int multiplier_idx = Common::CountLeadingZeros((u64)d) - 57;
|
||||||
|
|
||||||
|
// Ensure that the index to the multipliers array is within bounds.
|
||||||
|
DEBUG_ASSERT((multiplier_idx >= 0) &&
|
||||||
|
(static_cast<size_t>(multiplier_idx) < multipliers.size()));
|
||||||
|
|
||||||
|
const u64 multiplier = multipliers[multiplier_idx];
|
||||||
|
const u64 candidate = (b - a) * multiplier;
|
||||||
|
|
||||||
|
// The candidate pattern doesn't match our input value, so fail.
|
||||||
|
if (value != candidate)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// We have a match! This is a valid logical immediate, so now we have to
|
||||||
|
// construct the bits and pieces of the instruction encoding that generates
|
||||||
|
// it.
|
||||||
|
n = out_n;
|
||||||
|
|
||||||
|
// Count the set bits in our basic stretch. The special case of clz(0) == -1
|
||||||
|
// makes the answer come out right for stretches that reach the very top of
|
||||||
|
// the word (e.g. numbers like 0xffffc00000000000).
|
||||||
|
const int clz_b = (b == 0) ? -1 : Common::CountLeadingZeros(b);
|
||||||
|
s = clz_a - clz_b;
|
||||||
|
|
||||||
|
// Decide how many bits to rotate right by, to put the low bit of that basic
|
||||||
|
// stretch in position a.
|
||||||
|
if (negate)
|
||||||
|
{
|
||||||
|
// If we inverted the input right at the start of this function, here's
|
||||||
|
// where we compensate: the number of set bits becomes the number of clear
|
||||||
|
// bits, and the rotation count is based on position b rather than position
|
||||||
|
// a (since b is the location of the 'lowest' 1 bit after inversion).
|
||||||
|
s = d - s;
|
||||||
|
r = (clz_b + 1) & (d - 1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
r = (clz_a + 1) & (d - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we're done, except for having to encode the S output in such a way that
|
||||||
|
// it gives both the number of set bits and the length of the repeated
|
||||||
|
// segment. The s field is encoded like this:
|
||||||
|
//
|
||||||
|
// imms size S
|
||||||
|
// ssssss 64 UInt(ssssss)
|
||||||
|
// 0sssss 32 UInt(sssss)
|
||||||
|
// 10ssss 16 UInt(ssss)
|
||||||
|
// 110sss 8 UInt(sss)
|
||||||
|
// 1110ss 4 UInt(ss)
|
||||||
|
// 11110s 2 UInt(s)
|
||||||
|
//
|
||||||
|
// So we 'or' (-d << 1) with our computed s to form imms.
|
||||||
|
s = ((-d << 1) | (s - 1)) & 0x3f;
|
||||||
|
|
||||||
|
valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr operator bool() const { return valid; }
|
||||||
|
|
||||||
|
u8 r = 0;
|
||||||
|
u8 s = 0;
|
||||||
|
bool n = false;
|
||||||
|
bool valid = false;
|
||||||
|
};
|
||||||
|
|
||||||
class ARM64XEmitter
|
class ARM64XEmitter
|
||||||
{
|
{
|
||||||
friend class ARM64FloatEmitter;
|
friend class ARM64FloatEmitter;
|
||||||
|
@ -531,6 +755,7 @@ private:
|
||||||
void EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
void EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
||||||
void EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd);
|
void EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd);
|
||||||
void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, int n);
|
void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, int n);
|
||||||
|
void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm);
|
||||||
void EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn,
|
void EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn,
|
||||||
s32 imm);
|
s32 imm);
|
||||||
void EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm);
|
void EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm);
|
||||||
|
@ -772,10 +997,15 @@ public:
|
||||||
|
|
||||||
// Logical (immediate)
|
// Logical (immediate)
|
||||||
void AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
void AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
||||||
|
void AND(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm);
|
||||||
void ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
void ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
||||||
|
void ANDS(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm);
|
||||||
void EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
void EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
||||||
|
void EOR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm);
|
||||||
void ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
void ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
||||||
|
void ORR(ARM64Reg Rd, ARM64Reg Rn, LogicalImm imm);
|
||||||
void TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
void TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
||||||
|
void TST(ARM64Reg Rn, LogicalImm imm);
|
||||||
// Add/subtract (immediate)
|
// Add/subtract (immediate)
|
||||||
void ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
void ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
||||||
void ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
void ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
||||||
|
@ -893,17 +1123,17 @@ public:
|
||||||
MOVI2R(Rd, (uintptr_t)ptr);
|
MOVI2R(Rd, (uintptr_t)ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper around AND x, y, imm etc. If you are sure the imm will work, no need to pass a scratch
|
// Wrapper around AND x, y, imm etc.
|
||||||
// register.
|
// If you are sure the imm will work, preferably construct a LogicalImm directly instead,
|
||||||
void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
// since that is constexpr and thus can be done at compile-time for constant values.
|
||||||
void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
|
||||||
void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG)
|
void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
|
||||||
|
void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch)
|
||||||
{
|
{
|
||||||
ANDSI2R(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, scratch);
|
ANDSI2R(Is64Bit(Rn) ? ARM64Reg::ZR : ARM64Reg::WZR, Rn, imm, scratch);
|
||||||
}
|
}
|
||||||
void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
|
||||||
void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch);
|
||||||
void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
|
||||||
|
|
||||||
void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags,
|
void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags,
|
||||||
ARM64Reg scratch);
|
ARM64Reg scratch);
|
||||||
|
@ -911,6 +1141,7 @@ public:
|
||||||
void ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
void ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
||||||
void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
||||||
void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
||||||
|
void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = ARM64Reg::INVALID_REG);
|
||||||
|
|
||||||
bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
||||||
bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
||||||
|
|
|
@ -413,4 +413,13 @@ constexpr int CountLeadingZeros(uint32_t value)
|
||||||
|
|
||||||
#undef CONSTEXPR_FROM_INTRINSIC
|
#undef CONSTEXPR_FROM_INTRINSIC
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
constexpr T LargestPowerOf2Divisor(T value)
|
||||||
|
{
|
||||||
|
static_assert(std::is_unsigned<T>(),
|
||||||
|
"LargestPowerOf2Divisor only makes sense for unsigned types.");
|
||||||
|
|
||||||
|
return value & -static_cast<std::make_signed_t<T>>(value);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
@ -799,7 +799,7 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
|
||||||
fpr.Flush(FlushMode::MaintainState);
|
fpr.Flush(FlushMode::MaintainState);
|
||||||
|
|
||||||
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
||||||
ORRI2R(WA, WA, EXCEPTION_FPU_UNAVAILABLE);
|
ORR(WA, WA, LogicalImm(EXCEPTION_FPU_UNAVAILABLE, 32));
|
||||||
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
||||||
|
|
||||||
gpr.Unlock(WA);
|
gpr.Unlock(WA);
|
||||||
|
|
|
@ -24,7 +24,7 @@ void JitArm64::sc(UGeckoInstruction inst)
|
||||||
ARM64Reg WA = gpr.GetReg();
|
ARM64Reg WA = gpr.GetReg();
|
||||||
|
|
||||||
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
||||||
ORRI2R(WA, WA, EXCEPTION_SYSCALL);
|
ORR(WA, WA, LogicalImm(EXCEPTION_SYSCALL, 32));
|
||||||
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
||||||
|
|
||||||
gpr.Unlock(WA);
|
gpr.Unlock(WA);
|
||||||
|
|
|
@ -402,7 +402,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
|
||||||
{
|
{
|
||||||
fpscr_reg = gpr.GetReg();
|
fpscr_reg = gpr.GetReg();
|
||||||
LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr));
|
LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr));
|
||||||
ANDI2R(fpscr_reg, fpscr_reg, ~FPCC_MASK);
|
AND(fpscr_reg, fpscr_reg, LogicalImm(~FPCC_MASK, 32));
|
||||||
}
|
}
|
||||||
|
|
||||||
ARM64Reg V0Q = ARM64Reg::INVALID_REG;
|
ARM64Reg V0Q = ARM64Reg::INVALID_REG;
|
||||||
|
@ -451,7 +451,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
|
||||||
// A == B
|
// A == B
|
||||||
ORR(XA, XA, 64 - 63, 0, true);
|
ORR(XA, XA, 64 - 63, 0, true);
|
||||||
if (fprf)
|
if (fprf)
|
||||||
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_EQ << FPRF_SHIFT);
|
ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_EQ << FPRF_SHIFT, 32));
|
||||||
|
|
||||||
continue1 = B();
|
continue1 = B();
|
||||||
|
|
||||||
|
@ -459,7 +459,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
|
||||||
|
|
||||||
MOVI2R(XA, PowerPC::ConditionRegister::PPCToInternal(PowerPC::CR_SO));
|
MOVI2R(XA, PowerPC::ConditionRegister::PPCToInternal(PowerPC::CR_SO));
|
||||||
if (fprf)
|
if (fprf)
|
||||||
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_SO << FPRF_SHIFT);
|
ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_SO << FPRF_SHIFT, 32));
|
||||||
|
|
||||||
if (a != b)
|
if (a != b)
|
||||||
{
|
{
|
||||||
|
@ -468,7 +468,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
|
||||||
SetJumpTarget(pGreater);
|
SetJumpTarget(pGreater);
|
||||||
ORR(XA, XA, 0, 0, true);
|
ORR(XA, XA, 0, 0, true);
|
||||||
if (fprf)
|
if (fprf)
|
||||||
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_GT << FPRF_SHIFT);
|
ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_GT << FPRF_SHIFT, 32));
|
||||||
|
|
||||||
continue3 = B();
|
continue3 = B();
|
||||||
|
|
||||||
|
@ -476,7 +476,7 @@ void JitArm64::FloatCompare(UGeckoInstruction inst, bool upper)
|
||||||
ORR(XA, XA, 64 - 62, 1, true);
|
ORR(XA, XA, 64 - 62, 1, true);
|
||||||
ORR(XA, XA, 0, 0, true);
|
ORR(XA, XA, 0, 0, true);
|
||||||
if (fprf)
|
if (fprf)
|
||||||
ORRI2R(fpscr_reg, fpscr_reg, PowerPC::CR_LT << FPRF_SHIFT);
|
ORR(fpscr_reg, fpscr_reg, LogicalImm(PowerPC::CR_LT << FPRF_SHIFT, 32));
|
||||||
|
|
||||||
SetJumpTarget(continue2);
|
SetJumpTarget(continue2);
|
||||||
SetJumpTarget(continue3);
|
SetJumpTarget(continue3);
|
||||||
|
@ -533,7 +533,7 @@ void JitArm64::fctiwzx(UGeckoInstruction inst)
|
||||||
const ARM64Reg WA = gpr.GetReg();
|
const ARM64Reg WA = gpr.GetReg();
|
||||||
|
|
||||||
m_float_emit.FCVTS(WA, EncodeRegToDouble(VB), RoundingMode::Z);
|
m_float_emit.FCVTS(WA, EncodeRegToDouble(VB), RoundingMode::Z);
|
||||||
ORRI2R(EncodeRegTo64(WA), EncodeRegTo64(WA), 0xFFF8'0000'0000'0000ULL);
|
ORR(EncodeRegTo64(WA), EncodeRegTo64(WA), LogicalImm(0xFFF8'0000'0000'0000ULL, 64));
|
||||||
m_float_emit.FMOV(EncodeRegToDouble(VD), EncodeRegTo64(WA));
|
m_float_emit.FMOV(EncodeRegToDouble(VD), EncodeRegTo64(WA));
|
||||||
|
|
||||||
gpr.Unlock(WA);
|
gpr.Unlock(WA);
|
||||||
|
|
|
@ -611,7 +611,7 @@ void JitArm64::rlwinmx(UGeckoInstruction inst)
|
||||||
else if (!inst.SH)
|
else if (!inst.SH)
|
||||||
{
|
{
|
||||||
// Immediate mask
|
// Immediate mask
|
||||||
ANDI2R(gpr.R(a), gpr.R(s), mask);
|
AND(gpr.R(a), gpr.R(s), LogicalImm(mask, 32));
|
||||||
}
|
}
|
||||||
else if (inst.ME == 31 && 31 < inst.SH + inst.MB)
|
else if (inst.ME == 31 && 31 < inst.SH + inst.MB)
|
||||||
{
|
{
|
||||||
|
|
|
@ -550,7 +550,7 @@ void JitArm64::dcbx(UGeckoInstruction inst)
|
||||||
else
|
else
|
||||||
MOV(addr, gpr.R(b));
|
MOV(addr, gpr.R(b));
|
||||||
|
|
||||||
ANDI2R(addr, addr, ~31); // mask sizeof cacheline
|
AND(addr, addr, LogicalImm(~31, 32)); // mask sizeof cacheline
|
||||||
|
|
||||||
BitSet32 gprs_to_push = gpr.GetCallerSavedUsed();
|
BitSet32 gprs_to_push = gpr.GetCallerSavedUsed();
|
||||||
BitSet32 fprs_to_push = fpr.GetCallerSavedUsed();
|
BitSet32 fprs_to_push = fpr.GetCallerSavedUsed();
|
||||||
|
@ -618,13 +618,13 @@ void JitArm64::dcbz(UGeckoInstruction inst)
|
||||||
ARM64Reg base = is_imm_a ? gpr.R(b) : gpr.R(a);
|
ARM64Reg base = is_imm_a ? gpr.R(b) : gpr.R(a);
|
||||||
u32 imm_offset = is_imm_a ? gpr.GetImm(a) : gpr.GetImm(b);
|
u32 imm_offset = is_imm_a ? gpr.GetImm(a) : gpr.GetImm(b);
|
||||||
ADDI2R(addr_reg, base, imm_offset, addr_reg);
|
ADDI2R(addr_reg, base, imm_offset, addr_reg);
|
||||||
ANDI2R(addr_reg, addr_reg, ~31);
|
AND(addr_reg, addr_reg, LogicalImm(~31, 32));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Both are registers
|
// Both are registers
|
||||||
ADD(addr_reg, gpr.R(a), gpr.R(b));
|
ADD(addr_reg, gpr.R(a), gpr.R(b));
|
||||||
ANDI2R(addr_reg, addr_reg, ~31);
|
AND(addr_reg, addr_reg, LogicalImm(~31, 32));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -637,7 +637,7 @@ void JitArm64::dcbz(UGeckoInstruction inst)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
ANDI2R(addr_reg, gpr.R(b), ~31);
|
AND(addr_reg, gpr.R(b), LogicalImm(~31, 32));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -217,7 +217,7 @@ void JitArm64::twx(UGeckoInstruction inst)
|
||||||
fpr.Flush(FlushMode::MaintainState);
|
fpr.Flush(FlushMode::MaintainState);
|
||||||
|
|
||||||
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
||||||
ORRI2R(WA, WA, EXCEPTION_PROGRAM);
|
ORR(WA, WA, LogicalImm(EXCEPTION_PROGRAM, 32));
|
||||||
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(Exceptions));
|
||||||
gpr.Unlock(WA);
|
gpr.Unlock(WA);
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
|
||||||
SUB(Xresult, Xresult, XB);
|
SUB(Xresult, Xresult, XB);
|
||||||
|
|
||||||
// a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67
|
// a / 12 = (a * 0xAAAAAAAAAAAAAAAB) >> 67
|
||||||
ORRI2R(XB, ARM64Reg::ZR, 0xAAAAAAAAAAAAAAAA);
|
ORR(XB, ARM64Reg::ZR, LogicalImm(0xAAAAAAAAAAAAAAAA, 64));
|
||||||
ADD(XB, XB, 1);
|
ADD(XB, XB, 1);
|
||||||
UMULH(Xresult, Xresult, XB);
|
UMULH(Xresult, Xresult, XB);
|
||||||
|
|
||||||
|
@ -440,20 +440,20 @@ void JitArm64::crXXX(UGeckoInstruction inst)
|
||||||
switch (bit)
|
switch (bit)
|
||||||
{
|
{
|
||||||
case PowerPC::CR_SO_BIT:
|
case PowerPC::CR_SO_BIT:
|
||||||
ANDI2R(XA, XA, ~(u64(1) << PowerPC::CR_EMU_SO_BIT));
|
AND(XA, XA, LogicalImm(~(u64(1) << PowerPC::CR_EMU_SO_BIT), 64));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PowerPC::CR_EQ_BIT:
|
case PowerPC::CR_EQ_BIT:
|
||||||
FixGTBeforeSettingCRFieldBit(XA);
|
FixGTBeforeSettingCRFieldBit(XA);
|
||||||
ORRI2R(XA, XA, 1);
|
ORR(XA, XA, LogicalImm(1, 64));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PowerPC::CR_GT_BIT:
|
case PowerPC::CR_GT_BIT:
|
||||||
ORRI2R(XA, XA, u64(1) << 63);
|
ORR(XA, XA, LogicalImm(u64(1) << 63, 64));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PowerPC::CR_LT_BIT:
|
case PowerPC::CR_LT_BIT:
|
||||||
ANDI2R(XA, XA, ~(u64(1) << PowerPC::CR_EMU_LT_BIT));
|
AND(XA, XA, LogicalImm(~(u64(1) << PowerPC::CR_EMU_LT_BIT), 64));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -475,23 +475,23 @@ void JitArm64::crXXX(UGeckoInstruction inst)
|
||||||
switch (bit)
|
switch (bit)
|
||||||
{
|
{
|
||||||
case PowerPC::CR_SO_BIT:
|
case PowerPC::CR_SO_BIT:
|
||||||
ORRI2R(XA, XA, u64(1) << PowerPC::CR_EMU_SO_BIT);
|
ORR(XA, XA, LogicalImm(u64(1) << PowerPC::CR_EMU_SO_BIT, 64));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PowerPC::CR_EQ_BIT:
|
case PowerPC::CR_EQ_BIT:
|
||||||
ANDI2R(XA, XA, 0xFFFF'FFFF'0000'0000);
|
AND(XA, XA, LogicalImm(0xFFFF'FFFF'0000'0000, 64));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PowerPC::CR_GT_BIT:
|
case PowerPC::CR_GT_BIT:
|
||||||
ANDI2R(XA, XA, ~(u64(1) << 63));
|
AND(XA, XA, LogicalImm(~(u64(1) << 63), 64));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PowerPC::CR_LT_BIT:
|
case PowerPC::CR_LT_BIT:
|
||||||
ORRI2R(XA, XA, u64(1) << PowerPC::CR_EMU_LT_BIT);
|
ORR(XA, XA, LogicalImm(u64(1) << PowerPC::CR_EMU_LT_BIT, 64));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ORRI2R(XA, XA, u64(1) << 32);
|
ORR(XA, XA, LogicalImm(u64(1) << 32, 64));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,13 +708,12 @@ void JitArm64::mcrfs(UGeckoInstruction inst)
|
||||||
ARM64Reg XA = EncodeRegTo64(WA);
|
ARM64Reg XA = EncodeRegTo64(WA);
|
||||||
|
|
||||||
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr));
|
LDR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr));
|
||||||
LSR(WCR, WA, shift);
|
UBFX(WCR, WA, shift, 4);
|
||||||
ANDI2R(WCR, WCR, 0xF);
|
|
||||||
|
|
||||||
if (mask != 0)
|
if (mask != 0)
|
||||||
{
|
{
|
||||||
const u32 inverted_mask = ~mask;
|
const u32 inverted_mask = ~mask;
|
||||||
ANDI2R(WA, WA, inverted_mask);
|
AND(WA, WA, LogicalImm(inverted_mask, 32));
|
||||||
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr));
|
STR(IndexType::Unsigned, WA, PPC_REG, PPCSTATE_OFF(fpscr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ void JitArm64::GenerateAsm()
|
||||||
ARM64Reg pc_masked = ARM64Reg::W25;
|
ARM64Reg pc_masked = ARM64Reg::W25;
|
||||||
ARM64Reg cache_base = ARM64Reg::X27;
|
ARM64Reg cache_base = ARM64Reg::X27;
|
||||||
ARM64Reg block = ARM64Reg::X30;
|
ARM64Reg block = ARM64Reg::X30;
|
||||||
ORRI2R(pc_masked, ARM64Reg::WZR, JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3);
|
ORR(pc_masked, ARM64Reg::WZR, LogicalImm(JitBaseBlockCache::FAST_BLOCK_MAP_MASK << 3, 32));
|
||||||
AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1));
|
AND(pc_masked, pc_masked, DISPATCHER_PC, ArithOption(DISPATCHER_PC, ShiftType::LSL, 1));
|
||||||
MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap());
|
MOVP2R(cache_base, GetBlockCache()->GetFastBlockMap());
|
||||||
LDR(block, cache_base, EncodeRegTo64(pc_masked));
|
LDR(block, cache_base, EncodeRegTo64(pc_masked));
|
||||||
|
@ -116,7 +116,7 @@ void JitArm64::GenerateAsm()
|
||||||
FixupBranch pc_missmatch = B(CC_NEQ);
|
FixupBranch pc_missmatch = B(CC_NEQ);
|
||||||
|
|
||||||
LDR(IndexType::Unsigned, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr));
|
LDR(IndexType::Unsigned, pc_and_msr2, PPC_REG, PPCSTATE_OFF(msr));
|
||||||
ANDI2R(pc_and_msr2, pc_and_msr2, JitBaseBlockCache::JIT_CACHE_MSR_MASK);
|
AND(pc_and_msr2, pc_and_msr2, LogicalImm(JitBaseBlockCache::JIT_CACHE_MSR_MASK, 32));
|
||||||
LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, msrBits));
|
LDR(IndexType::Unsigned, pc_and_msr, block, offsetof(JitBlockData, msrBits));
|
||||||
CMP(pc_and_msr, pc_and_msr2);
|
CMP(pc_and_msr, pc_and_msr2);
|
||||||
FixupBranch msr_missmatch = B(CC_NEQ);
|
FixupBranch msr_missmatch = B(CC_NEQ);
|
||||||
|
@ -238,7 +238,7 @@ void JitArm64::GenerateFres()
|
||||||
UBFX(ARM64Reg::X2, ARM64Reg::X1, 52, 11); // Grab the exponent
|
UBFX(ARM64Reg::X2, ARM64Reg::X1, 52, 11); // Grab the exponent
|
||||||
m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0);
|
m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0);
|
||||||
CMP(ARM64Reg::X2, 895);
|
CMP(ARM64Reg::X2, 895);
|
||||||
ANDI2R(ARM64Reg::X3, ARM64Reg::X1, Common::DOUBLE_SIGN);
|
AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(Common::DOUBLE_SIGN, 64));
|
||||||
FixupBranch small_exponent = B(CCFlags::CC_LO);
|
FixupBranch small_exponent = B(CCFlags::CC_LO);
|
||||||
|
|
||||||
MOVI2R(ARM64Reg::X4, 1148LL);
|
MOVI2R(ARM64Reg::X4, 1148LL);
|
||||||
|
@ -251,14 +251,14 @@ void JitArm64::GenerateFres()
|
||||||
LDP(IndexType::Signed, ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::X2, 0);
|
LDP(IndexType::Signed, ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::X2, 0);
|
||||||
UBFX(ARM64Reg::X1, ARM64Reg::X1, 37, 10); // Grab lower part of mantissa
|
UBFX(ARM64Reg::X1, ARM64Reg::X1, 37, 10); // Grab lower part of mantissa
|
||||||
MOVI2R(ARM64Reg::W4, 1);
|
MOVI2R(ARM64Reg::W4, 1);
|
||||||
ANDI2R(ARM64Reg::X0, ARM64Reg::X0, Common::DOUBLE_SIGN | Common::DOUBLE_EXP);
|
AND(ARM64Reg::X0, ARM64Reg::X0, LogicalImm(Common::DOUBLE_SIGN | Common::DOUBLE_EXP, 64));
|
||||||
MADD(ARM64Reg::W1, ARM64Reg::W3, ARM64Reg::W1, ARM64Reg::W4);
|
MADD(ARM64Reg::W1, ARM64Reg::W3, ARM64Reg::W1, ARM64Reg::W4);
|
||||||
SUB(ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W1, ArithOption(ARM64Reg::W1, ShiftType::LSR, 1));
|
SUB(ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::W1, ArithOption(ARM64Reg::W1, ShiftType::LSR, 1));
|
||||||
ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X1, ArithOption(ARM64Reg::X1, ShiftType::LSL, 29));
|
ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X1, ArithOption(ARM64Reg::X1, ShiftType::LSL, 29));
|
||||||
RET();
|
RET();
|
||||||
|
|
||||||
SetJumpTarget(small_exponent);
|
SetJumpTarget(small_exponent);
|
||||||
TSTI2R(ARM64Reg::X1, Common::DOUBLE_EXP | Common::DOUBLE_FRAC);
|
TST(ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP | Common::DOUBLE_FRAC, 64));
|
||||||
FixupBranch zero = B(CCFlags::CC_EQ);
|
FixupBranch zero = B(CCFlags::CC_EQ);
|
||||||
MOVI2R(ARM64Reg::X4,
|
MOVI2R(ARM64Reg::X4,
|
||||||
Common::BitCast<u64>(static_cast<double>(std::numeric_limits<float>::max())));
|
Common::BitCast<u64>(static_cast<double>(std::numeric_limits<float>::max())));
|
||||||
|
@ -289,15 +289,15 @@ void JitArm64::GenerateFrsqrte()
|
||||||
// inf, even the mantissa matches. But the mantissa does not match for most other inputs, so in
|
// inf, even the mantissa matches. But the mantissa does not match for most other inputs, so in
|
||||||
// the normal case we calculate the mantissa using the table-based algorithm from the interpreter.
|
// the normal case we calculate the mantissa using the table-based algorithm from the interpreter.
|
||||||
|
|
||||||
TSTI2R(ARM64Reg::X1, Common::DOUBLE_EXP | Common::DOUBLE_FRAC);
|
TST(ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP | Common::DOUBLE_FRAC, 64));
|
||||||
m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0);
|
m_float_emit.FMOV(ARM64Reg::X0, ARM64Reg::D0);
|
||||||
FixupBranch zero = B(CCFlags::CC_EQ);
|
FixupBranch zero = B(CCFlags::CC_EQ);
|
||||||
ANDI2R(ARM64Reg::X2, ARM64Reg::X1, Common::DOUBLE_EXP);
|
AND(ARM64Reg::X2, ARM64Reg::X1, LogicalImm(Common::DOUBLE_EXP, 64));
|
||||||
MOVI2R(ARM64Reg::X3, Common::DOUBLE_EXP);
|
MOVI2R(ARM64Reg::X3, Common::DOUBLE_EXP);
|
||||||
CMP(ARM64Reg::X2, ARM64Reg::X3);
|
CMP(ARM64Reg::X2, ARM64Reg::X3);
|
||||||
FixupBranch nan_or_inf = B(CCFlags::CC_EQ);
|
FixupBranch nan_or_inf = B(CCFlags::CC_EQ);
|
||||||
FixupBranch negative = TBNZ(ARM64Reg::X1, 63);
|
FixupBranch negative = TBNZ(ARM64Reg::X1, 63);
|
||||||
ANDI2R(ARM64Reg::X3, ARM64Reg::X1, Common::DOUBLE_FRAC);
|
AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(Common::DOUBLE_FRAC, 64));
|
||||||
FixupBranch normal = CBNZ(ARM64Reg::X2);
|
FixupBranch normal = CBNZ(ARM64Reg::X2);
|
||||||
|
|
||||||
// "Normalize" denormal values
|
// "Normalize" denormal values
|
||||||
|
@ -306,18 +306,18 @@ void JitArm64::GenerateFrsqrte()
|
||||||
MOVI2R(ARM64Reg::X2, 0x00C0'0000'0000'0000);
|
MOVI2R(ARM64Reg::X2, 0x00C0'0000'0000'0000);
|
||||||
LSLV(ARM64Reg::X4, ARM64Reg::X1, ARM64Reg::X4);
|
LSLV(ARM64Reg::X4, ARM64Reg::X1, ARM64Reg::X4);
|
||||||
SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52));
|
SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52));
|
||||||
ANDI2R(ARM64Reg::X3, ARM64Reg::X4, Common::DOUBLE_FRAC - 1);
|
AND(ARM64Reg::X3, ARM64Reg::X4, LogicalImm(Common::DOUBLE_FRAC - 1, 64));
|
||||||
|
|
||||||
SetJumpTarget(normal);
|
SetJumpTarget(normal);
|
||||||
LSR(ARM64Reg::X2, ARM64Reg::X2, 48);
|
LSR(ARM64Reg::X2, ARM64Reg::X2, 48);
|
||||||
ANDI2R(ARM64Reg::X2, ARM64Reg::X2, 0x10);
|
AND(ARM64Reg::X2, ARM64Reg::X2, LogicalImm(0x10, 64));
|
||||||
MOVP2R(ARM64Reg::X1, &Common::frsqrte_expected);
|
MOVP2R(ARM64Reg::X1, &Common::frsqrte_expected);
|
||||||
ORR(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X8, ShiftType::LSR, 48));
|
ORR(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X8, ShiftType::LSR, 48));
|
||||||
EORI2R(ARM64Reg::X2, ARM64Reg::X2, 0x10);
|
EOR(ARM64Reg::X2, ARM64Reg::X2, LogicalImm(0x10, 64));
|
||||||
ADD(ARM64Reg::X2, ARM64Reg::X1, ARM64Reg::X2, ArithOption(ARM64Reg::X2, ShiftType::LSL, 3));
|
ADD(ARM64Reg::X2, ARM64Reg::X1, ARM64Reg::X2, ArithOption(ARM64Reg::X2, ShiftType::LSL, 3));
|
||||||
LDP(IndexType::Signed, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::X2, 0);
|
LDP(IndexType::Signed, ARM64Reg::W1, ARM64Reg::W2, ARM64Reg::X2, 0);
|
||||||
UBFX(ARM64Reg::X3, ARM64Reg::X3, 37, 11);
|
UBFX(ARM64Reg::X3, ARM64Reg::X3, 37, 11);
|
||||||
ANDI2R(ARM64Reg::X0, ARM64Reg::X0, Common::DOUBLE_SIGN | Common::DOUBLE_EXP);
|
AND(ARM64Reg::X0, ARM64Reg::X0, LogicalImm(Common::DOUBLE_SIGN | Common::DOUBLE_EXP, 64));
|
||||||
MSUB(ARM64Reg::W3, ARM64Reg::W3, ARM64Reg::W2, ARM64Reg::W1);
|
MSUB(ARM64Reg::W3, ARM64Reg::W3, ARM64Reg::W2, ARM64Reg::W1);
|
||||||
ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 26));
|
ORR(ARM64Reg::X0, ARM64Reg::X0, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 26));
|
||||||
RET();
|
RET();
|
||||||
|
@ -354,17 +354,17 @@ void JitArm64::GenerateConvertDoubleToSingle()
|
||||||
LSR(ARM64Reg::X1, ARM64Reg::X0, 32);
|
LSR(ARM64Reg::X1, ARM64Reg::X0, 32);
|
||||||
FixupBranch denormal = B(CCFlags::CC_LS);
|
FixupBranch denormal = B(CCFlags::CC_LS);
|
||||||
|
|
||||||
ANDI2R(ARM64Reg::X1, ARM64Reg::X1, 0xc0000000);
|
AND(ARM64Reg::X1, ARM64Reg::X1, LogicalImm(0xc0000000, 64));
|
||||||
BFXIL(ARM64Reg::X1, ARM64Reg::X0, 29, 30);
|
BFXIL(ARM64Reg::X1, ARM64Reg::X0, 29, 30);
|
||||||
RET();
|
RET();
|
||||||
|
|
||||||
SetJumpTarget(denormal);
|
SetJumpTarget(denormal);
|
||||||
LSR(ARM64Reg::X3, ARM64Reg::X0, 21);
|
LSR(ARM64Reg::X3, ARM64Reg::X0, 21);
|
||||||
MOVZ(ARM64Reg::X0, 905);
|
MOVZ(ARM64Reg::X0, 905);
|
||||||
ORRI2R(ARM64Reg::W3, ARM64Reg::W3, 0x80000000);
|
ORR(ARM64Reg::W3, ARM64Reg::W3, LogicalImm(0x80000000, 32));
|
||||||
SUB(ARM64Reg::W2, ARM64Reg::W0, ARM64Reg::W2);
|
SUB(ARM64Reg::W2, ARM64Reg::W0, ARM64Reg::W2);
|
||||||
LSRV(ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::W2);
|
LSRV(ARM64Reg::W2, ARM64Reg::W3, ARM64Reg::W2);
|
||||||
ANDI2R(ARM64Reg::X3, ARM64Reg::X1, 0x80000000);
|
AND(ARM64Reg::X3, ARM64Reg::X1, LogicalImm(0x80000000, 64));
|
||||||
ORR(ARM64Reg::X1, ARM64Reg::X3, ARM64Reg::X2);
|
ORR(ARM64Reg::X1, ARM64Reg::X3, ARM64Reg::X2);
|
||||||
RET();
|
RET();
|
||||||
}
|
}
|
||||||
|
@ -375,7 +375,7 @@ void JitArm64::GenerateConvertSingleToDouble()
|
||||||
UBFX(ARM64Reg::W1, ARM64Reg::W0, 23, 8);
|
UBFX(ARM64Reg::W1, ARM64Reg::W0, 23, 8);
|
||||||
FixupBranch normal_or_nan = CBNZ(ARM64Reg::W1);
|
FixupBranch normal_or_nan = CBNZ(ARM64Reg::W1);
|
||||||
|
|
||||||
ANDI2R(ARM64Reg::W1, ARM64Reg::W0, 0x007fffff);
|
AND(ARM64Reg::W1, ARM64Reg::W0, LogicalImm(0x007fffff, 32));
|
||||||
FixupBranch denormal = CBNZ(ARM64Reg::W1);
|
FixupBranch denormal = CBNZ(ARM64Reg::W1);
|
||||||
|
|
||||||
// Zero
|
// Zero
|
||||||
|
@ -383,10 +383,10 @@ void JitArm64::GenerateConvertSingleToDouble()
|
||||||
RET();
|
RET();
|
||||||
|
|
||||||
SetJumpTarget(denormal);
|
SetJumpTarget(denormal);
|
||||||
ANDI2R(ARM64Reg::W2, ARM64Reg::W0, 0x80000000);
|
AND(ARM64Reg::W2, ARM64Reg::W0, LogicalImm(0x80000000, 32));
|
||||||
CLZ(ARM64Reg::X3, ARM64Reg::X1);
|
CLZ(ARM64Reg::X3, ARM64Reg::X1);
|
||||||
LSL(ARM64Reg::X2, ARM64Reg::X2, 32);
|
LSL(ARM64Reg::X2, ARM64Reg::X2, 32);
|
||||||
ORRI2R(ARM64Reg::X4, ARM64Reg::X3, 0xffffffffffffffc0);
|
ORR(ARM64Reg::X4, ARM64Reg::X3, LogicalImm(0xffffffffffffffc0, 64));
|
||||||
SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52));
|
SUB(ARM64Reg::X2, ARM64Reg::X2, ARM64Reg::X3, ArithOption(ARM64Reg::X3, ShiftType::LSL, 52));
|
||||||
ADD(ARM64Reg::X3, ARM64Reg::X4, 23);
|
ADD(ARM64Reg::X3, ARM64Reg::X4, 23);
|
||||||
LSLV(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::X3);
|
LSLV(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::X3);
|
||||||
|
@ -397,12 +397,12 @@ void JitArm64::GenerateConvertSingleToDouble()
|
||||||
|
|
||||||
SetJumpTarget(normal_or_nan);
|
SetJumpTarget(normal_or_nan);
|
||||||
CMP(ARM64Reg::W1, 0xff);
|
CMP(ARM64Reg::W1, 0xff);
|
||||||
ANDI2R(ARM64Reg::W2, ARM64Reg::W0, 0x40000000);
|
AND(ARM64Reg::W2, ARM64Reg::W0, LogicalImm(0x40000000, 32));
|
||||||
CSET(ARM64Reg::W4, CCFlags::CC_NEQ);
|
CSET(ARM64Reg::W4, CCFlags::CC_NEQ);
|
||||||
ANDI2R(ARM64Reg::W3, ARM64Reg::W0, 0xc0000000);
|
AND(ARM64Reg::W3, ARM64Reg::W0, LogicalImm(0xc0000000, 32));
|
||||||
EOR(ARM64Reg::W2, ARM64Reg::W4, ARM64Reg::W2, ArithOption(ARM64Reg::W2, ShiftType::LSR, 30));
|
EOR(ARM64Reg::W2, ARM64Reg::W4, ARM64Reg::W2, ArithOption(ARM64Reg::W2, ShiftType::LSR, 30));
|
||||||
MOVI2R(ARM64Reg::X1, 0x3800000000000000);
|
MOVI2R(ARM64Reg::X1, 0x3800000000000000);
|
||||||
ANDI2R(ARM64Reg::W4, ARM64Reg::W0, 0x3fffffff);
|
AND(ARM64Reg::W4, ARM64Reg::W0, LogicalImm(0x3fffffff, 32));
|
||||||
LSL(ARM64Reg::X3, ARM64Reg::X3, 32);
|
LSL(ARM64Reg::X3, ARM64Reg::X3, 32);
|
||||||
CMP(ARM64Reg::W2, 0);
|
CMP(ARM64Reg::W2, 0);
|
||||||
CSEL(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::ZR, CCFlags::CC_NEQ);
|
CSEL(ARM64Reg::X1, ARM64Reg::X1, ARM64Reg::ZR, CCFlags::CC_NEQ);
|
||||||
|
@ -423,9 +423,10 @@ void JitArm64::GenerateFPRF(bool single)
|
||||||
constexpr ARM64Reg fprf_reg = ARM64Reg::W3;
|
constexpr ARM64Reg fprf_reg = ARM64Reg::W3;
|
||||||
constexpr ARM64Reg fpscr_reg = ARM64Reg::W4;
|
constexpr ARM64Reg fpscr_reg = ARM64Reg::W4;
|
||||||
|
|
||||||
const auto INPUT_EXP_MASK = single ? Common::FLOAT_EXP : Common::DOUBLE_EXP;
|
const int input_size = single ? 32 : 64;
|
||||||
const auto INPUT_FRAC_MASK = single ? Common::FLOAT_FRAC : Common::DOUBLE_FRAC;
|
const u64 input_exp_mask = single ? Common::FLOAT_EXP : Common::DOUBLE_EXP;
|
||||||
constexpr u32 OUTPUT_SIGN_MASK = 0xC;
|
const u64 input_frac_mask = single ? Common::FLOAT_FRAC : Common::DOUBLE_FRAC;
|
||||||
|
constexpr u32 output_sign_mask = 0xC;
|
||||||
|
|
||||||
// This code is duplicated for the most common cases for performance.
|
// This code is duplicated for the most common cases for performance.
|
||||||
// For the less common cases, we branch to an existing copy of this code.
|
// For the less common cases, we branch to an existing copy of this code.
|
||||||
|
@ -439,7 +440,7 @@ void JitArm64::GenerateFPRF(bool single)
|
||||||
LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr));
|
LDR(IndexType::Unsigned, fpscr_reg, PPC_REG, PPCSTATE_OFF(fpscr));
|
||||||
|
|
||||||
CMP(input_reg, 0); // Grab sign bit (conveniently the same bit for floats as for integers)
|
CMP(input_reg, 0); // Grab sign bit (conveniently the same bit for floats as for integers)
|
||||||
ANDI2R(exp_reg, input_reg, INPUT_EXP_MASK); // Grab exponent
|
AND(exp_reg, input_reg, LogicalImm(input_exp_mask, input_size)); // Grab exponent
|
||||||
|
|
||||||
// Most branches handle the sign in the same way. Perform that handling before branching
|
// Most branches handle the sign in the same way. Perform that handling before branching
|
||||||
MOVI2R(ARM64Reg::W3, Common::PPC_FPCLASS_PN);
|
MOVI2R(ARM64Reg::W3, Common::PPC_FPCLASS_PN);
|
||||||
|
@ -449,7 +450,7 @@ void JitArm64::GenerateFPRF(bool single)
|
||||||
FixupBranch zero_or_denormal = CBZ(exp_reg);
|
FixupBranch zero_or_denormal = CBZ(exp_reg);
|
||||||
|
|
||||||
// exp != 0
|
// exp != 0
|
||||||
MOVI2R(temp_reg, INPUT_EXP_MASK);
|
MOVI2R(temp_reg, input_exp_mask);
|
||||||
CMP(exp_reg, temp_reg);
|
CMP(exp_reg, temp_reg);
|
||||||
FixupBranch nan_or_inf = B(CCFlags::CC_EQ);
|
FixupBranch nan_or_inf = B(CCFlags::CC_EQ);
|
||||||
|
|
||||||
|
@ -458,25 +459,25 @@ void JitArm64::GenerateFPRF(bool single)
|
||||||
|
|
||||||
// exp == 0
|
// exp == 0
|
||||||
SetJumpTarget(zero_or_denormal);
|
SetJumpTarget(zero_or_denormal);
|
||||||
TSTI2R(input_reg, INPUT_FRAC_MASK);
|
TST(input_reg, LogicalImm(input_frac_mask, input_size));
|
||||||
FixupBranch denormal = B(CCFlags::CC_NEQ);
|
FixupBranch denormal = B(CCFlags::CC_NEQ);
|
||||||
|
|
||||||
// exp == 0 && frac == 0
|
// exp == 0 && frac == 0
|
||||||
LSR(ARM64Reg::W1, fprf_reg, 3);
|
LSR(ARM64Reg::W1, fprf_reg, 3);
|
||||||
MOVI2R(fprf_reg, Common::PPC_FPCLASS_PZ & ~OUTPUT_SIGN_MASK);
|
MOVI2R(fprf_reg, Common::PPC_FPCLASS_PZ & ~output_sign_mask);
|
||||||
BFI(fprf_reg, ARM64Reg::W1, 4, 1);
|
BFI(fprf_reg, ARM64Reg::W1, 4, 1);
|
||||||
const u8* write_fprf_and_ret = GetCodePtr();
|
const u8* write_fprf_and_ret = GetCodePtr();
|
||||||
emit_write_fprf_and_ret();
|
emit_write_fprf_and_ret();
|
||||||
|
|
||||||
// exp == 0 && frac != 0
|
// exp == 0 && frac != 0
|
||||||
SetJumpTarget(denormal);
|
SetJumpTarget(denormal);
|
||||||
ORRI2R(fprf_reg, fprf_reg, Common::PPC_FPCLASS_PD & ~OUTPUT_SIGN_MASK);
|
ORR(fprf_reg, fprf_reg, LogicalImm(Common::PPC_FPCLASS_PD & ~output_sign_mask, 32));
|
||||||
B(write_fprf_and_ret);
|
B(write_fprf_and_ret);
|
||||||
|
|
||||||
// exp == EXP_MASK
|
// exp == EXP_MASK
|
||||||
SetJumpTarget(nan_or_inf);
|
SetJumpTarget(nan_or_inf);
|
||||||
TSTI2R(input_reg, INPUT_FRAC_MASK);
|
TST(input_reg, LogicalImm(input_frac_mask, input_size));
|
||||||
ORRI2R(ARM64Reg::W1, fprf_reg, Common::PPC_FPCLASS_PINF & ~OUTPUT_SIGN_MASK);
|
ORR(ARM64Reg::W1, fprf_reg, LogicalImm(Common::PPC_FPCLASS_PINF & ~output_sign_mask, 32));
|
||||||
MOVI2R(ARM64Reg::W2, Common::PPC_FPCLASS_QNAN);
|
MOVI2R(ARM64Reg::W2, Common::PPC_FPCLASS_QNAN);
|
||||||
CSEL(fprf_reg, ARM64Reg::W1, ARM64Reg::W2, CCFlags::CC_EQ);
|
CSEL(fprf_reg, ARM64Reg::W1, ARM64Reg::W2, CCFlags::CC_EQ);
|
||||||
B(write_fprf_and_ret);
|
B(write_fprf_and_ret);
|
||||||
|
|
|
@ -244,7 +244,7 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f
|
||||||
LDR(IndexType::Unsigned, scratch2_reg, src_reg, offset);
|
LDR(IndexType::Unsigned, scratch2_reg, src_reg, offset);
|
||||||
|
|
||||||
if (format != ColorFormat::RGBA8888)
|
if (format != ColorFormat::RGBA8888)
|
||||||
ORRI2R(scratch2_reg, scratch2_reg, 0xFF000000);
|
ORR(scratch2_reg, scratch2_reg, LogicalImm(0xFF000000, 32));
|
||||||
STR(IndexType::Unsigned, scratch2_reg, dst_reg, m_dst_ofs);
|
STR(IndexType::Unsigned, scratch2_reg, dst_reg, m_dst_ofs);
|
||||||
load_bytes = format == ColorFormat::RGB888 ? 3 : 4;
|
load_bytes = format == ColorFormat::RGB888 ? 3 : 4;
|
||||||
break;
|
break;
|
||||||
|
@ -279,7 +279,7 @@ void VertexLoaderARM64::ReadColor(VertexComponentFormat attribute, ColorFormat f
|
||||||
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 2));
|
ORR(scratch1_reg, scratch1_reg, scratch2_reg, ArithOption(scratch2_reg, ShiftType::LSR, 2));
|
||||||
|
|
||||||
// A
|
// A
|
||||||
ORRI2R(scratch1_reg, scratch1_reg, 0xFF000000);
|
ORR(scratch1_reg, scratch1_reg, LogicalImm(0xFF000000, 32));
|
||||||
|
|
||||||
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
|
STR(IndexType::Unsigned, scratch1_reg, dst_reg, m_dst_ofs);
|
||||||
load_bytes = 2;
|
load_bytes = 2;
|
||||||
|
|
Loading…
Reference in New Issue