Common: Fix code styling in Arm64Emitter

This commit is contained in:
Lioncash 2014-09-08 22:52:52 -04:00
parent 0926f1d344
commit bc331ee809
2 changed files with 190 additions and 172 deletions

View File

@ -7,21 +7,21 @@
namespace Arm64Gen namespace Arm64Gen
{ {
void ARM64XEmitter::SetCodePtr(u8 *ptr) void ARM64XEmitter::SetCodePtr(u8* ptr)
{ {
code = ptr; m_code = ptr;
startcode = code; m_startcode = m_code;
lastCacheFlushEnd = ptr; m_lastCacheFlushEnd = ptr;
} }
const u8 *ARM64XEmitter::GetCodePtr() const const u8* ARM64XEmitter::GetCodePtr() const
{ {
return code; return m_code;
} }
u8 *ARM64XEmitter::GetWritableCodePtr() u8* ARM64XEmitter::GetWritableCodePtr()
{ {
return code; return m_code;
} }
void ARM64XEmitter::ReserveCodeSpace(u32 bytes) void ARM64XEmitter::ReserveCodeSpace(u32 bytes)
@ -30,29 +30,29 @@ void ARM64XEmitter::ReserveCodeSpace(u32 bytes)
BRK(0); BRK(0);
} }
const u8 *ARM64XEmitter::AlignCode16() const u8* ARM64XEmitter::AlignCode16()
{ {
int c = int((u64)code & 15); int c = int((u64)m_code & 15);
if (c) if (c)
ReserveCodeSpace(16-c); ReserveCodeSpace(16-c);
return code; return m_code;
} }
const u8 *ARM64XEmitter::AlignCodePage() const u8* ARM64XEmitter::AlignCodePage()
{ {
int c = int((u64)code & 4095); int c = int((u64)m_code & 4095);
if (c) if (c)
ReserveCodeSpace(4096-c); ReserveCodeSpace(4096-c);
return code; return m_code;
} }
void ARM64XEmitter::FlushIcache() void ARM64XEmitter::FlushIcache()
{ {
FlushIcacheSection(lastCacheFlushEnd, code); FlushIcacheSection(m_lastCacheFlushEnd, m_code);
lastCacheFlushEnd = code; m_lastCacheFlushEnd = m_code;
} }
void ARM64XEmitter::FlushIcacheSection(u8 *start, u8 *end) void ARM64XEmitter::FlushIcacheSection(u8* start, u8* end)
{ {
#if defined(IOS) #if defined(IOS)
// Header file says this is equivalent to: sys_icache_invalidate(start, end - start); // Header file says this is equivalent to: sys_icache_invalidate(start, end - start);
@ -184,8 +184,8 @@ u32 LoadStoreExcEnc[][5] = {
void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr) void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
s64 distance = (s64)ptr - (s64(code) + 8); s64 distance = (s64)ptr - (s64(m_code) + 8);
_assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %lx", __FUNCTION__, distance); _assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %lx", __FUNCTION__, distance);
@ -200,8 +200,8 @@ void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr
void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr) void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
s64 distance = (s64)ptr - (s64(code) + 8); s64 distance = (s64)ptr - (s64(m_code) + 8);
_assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %lx", __FUNCTION__, distance); _assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %lx", __FUNCTION__, distance);
@ -216,7 +216,7 @@ void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const voi
void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr) void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr)
{ {
s64 distance = (s64)ptr - s64(code); s64 distance = (s64)ptr - s64(m_code);
_assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %lx", __FUNCTION__, distance); _assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %lx", __FUNCTION__, distance);
@ -256,7 +256,7 @@ void ARM64XEmitter::EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, A
void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rm = DecodeReg(Rm); Rm = DecodeReg(Rm);
@ -267,7 +267,7 @@ void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, A
void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond) void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond)
{ {
bool b64Bit = is64Bit(Rn); bool b64Bit = Is64Bit(Rn);
_assert_msg_(DYNA_REC, !(imm & ~0x1F), "%s: too large immediate: %d", __FUNCTION__, imm) _assert_msg_(DYNA_REC, !(imm & ~0x1F), "%s: too large immediate: %d", __FUNCTION__, imm)
_assert_msg_(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv) _assert_msg_(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv)
@ -279,7 +279,7 @@ void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 n
void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond) void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond)
{ {
bool b64Bit = is64Bit(Rm); bool b64Bit = Is64Bit(Rm);
_assert_msg_(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv) _assert_msg_(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv)
@ -291,7 +291,7 @@ void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u
void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond) void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rm = DecodeReg(Rm); Rm = DecodeReg(Rm);
@ -303,7 +303,7 @@ void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn,
void ARM64XEmitter::EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn) void ARM64XEmitter::EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rn = DecodeReg(Rn); Rn = DecodeReg(Rn);
@ -314,7 +314,7 @@ void ARM64XEmitter::EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn)
void ARM64XEmitter::EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) void ARM64XEmitter::EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rm = DecodeReg(Rm); Rm = DecodeReg(Rm);
@ -326,7 +326,7 @@ void ARM64XEmitter::EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, AR
void ARM64XEmitter::EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) void ARM64XEmitter::EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rm = DecodeReg(Rm); Rm = DecodeReg(Rm);
@ -348,8 +348,8 @@ void ARM64XEmitter::EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM
void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm) void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
bool bVec = isVector(Rt); bool bVec = IsVector(Rt);
_assert_msg_(DYNA_REC, !(imm & 0xFFFFF), "%s: offset too large %d", __FUNCTION__, imm); _assert_msg_(DYNA_REC, !(imm & 0xFFFFF), "%s: offset too large %d", __FUNCTION__, imm);
@ -373,9 +373,9 @@ void ARM64XEmitter::EncodeLoadStoreExcInst(u32 instenc,
void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm) void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
bool b128Bit = is128Bit(Rt); bool b128Bit = Is128Bit(Rt);
bool bVec = isVector(Rt); bool bVec = IsVector(Rt);
if (b128Bit) if (b128Bit)
imm >>= 4; imm >>= 4;
@ -402,8 +402,8 @@ void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2,
void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
bool bVec = isVector(Rt); bool bVec = IsVector(Rt);
if (b64Bit) if (b64Bit)
imm >>= 3; imm >>= 3;
@ -419,8 +419,8 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM
void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
bool bVec = isVector(Rt); bool bVec = IsVector(Rt);
if (b64Bit) if (b64Bit)
imm >>= 3; imm >>= 3;
@ -436,7 +436,7 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn,
void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos) void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
_assert_msg_(DYNA_REC, !(imm & ~0xFFFF), "%s: immediate out of range: %d", __FUNCTION__, imm); _assert_msg_(DYNA_REC, !(imm & ~0xFFFF), "%s: immediate out of range: %d", __FUNCTION__, imm);
@ -446,7 +446,7 @@ void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount
void ARM64XEmitter::EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) void ARM64XEmitter::EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rn = DecodeReg(Rn); Rn = DecodeReg(Rn);
@ -466,7 +466,7 @@ void ARM64XEmitter::EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt
void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd) void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd)
{ {
bool b64Bit = is64Bit(Rd); bool b64Bit = Is64Bit(Rd);
_assert_msg_(DYNA_REC, !(imm & ~0xFFF), "%s: immediate too large: %x", __FUNCTION__, imm); _assert_msg_(DYNA_REC, !(imm & ~0xFFF), "%s: immediate too large: %x", __FUNCTION__, imm);
@ -480,7 +480,7 @@ void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 i
{ {
// Sometimes Rd is fixed to SP, but can still be 32bit or 64bit. // Sometimes Rd is fixed to SP, but can still be 32bit or 64bit.
// Use Rn to determine bitness here. // Use Rn to determine bitness here.
bool b64Bit = is64Bit(Rn); bool b64Bit = Is64Bit(Rn);
Rd = DecodeReg(Rd); Rd = DecodeReg(Rd);
Rn = DecodeReg(Rn); Rn = DecodeReg(Rn);
@ -490,11 +490,11 @@ void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 i
} }
// FixupBranch branching // FixupBranch branching
void ARM64XEmitter::SetJumpTarget(FixupBranch const &branch) void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch)
{ {
bool Not = false; bool Not = false;
u32 inst = 0; u32 inst = 0;
s64 distance = (s64)(code - branch.ptr); s64 distance = (s64)(m_code - branch.ptr);
distance >>= 2; distance >>= 2;
switch (branch.type) switch (branch.type)
@ -504,7 +504,7 @@ void ARM64XEmitter::SetJumpTarget(FixupBranch const &branch)
case 0: // CBZ case 0: // CBZ
{ {
_assert_msg_(DYNA_REC, distance >= -0xFFFFF && distance < 0xFFFFF, "%s(%d): Received too large distance: %lx", __FUNCTION__, branch.type, distance); _assert_msg_(DYNA_REC, distance >= -0xFFFFF && distance < 0xFFFFF, "%s(%d): Received too large distance: %lx", __FUNCTION__, branch.type, distance);
bool b64Bit = is64Bit(branch.reg); bool b64Bit = Is64Bit(branch.reg);
ARM64Reg reg = DecodeReg(branch.reg); ARM64Reg reg = DecodeReg(branch.reg);
inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (distance << 5) | reg; inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (distance << 5) | reg;
} }
@ -537,7 +537,7 @@ void ARM64XEmitter::SetJumpTarget(FixupBranch const &branch)
FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt) FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt)
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 0; branch.type = 0;
branch.reg = Rt; branch.reg = Rt;
HINT(HINT_NOP); HINT(HINT_NOP);
@ -546,7 +546,7 @@ FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt)
FixupBranch ARM64XEmitter::CBNZ(ARM64Reg Rt) FixupBranch ARM64XEmitter::CBNZ(ARM64Reg Rt)
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 1; branch.type = 1;
branch.reg = Rt; branch.reg = Rt;
HINT(HINT_NOP); HINT(HINT_NOP);
@ -555,7 +555,7 @@ FixupBranch ARM64XEmitter::CBNZ(ARM64Reg Rt)
FixupBranch ARM64XEmitter::B(CCFlags cond) FixupBranch ARM64XEmitter::B(CCFlags cond)
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 2; branch.type = 2;
branch.cond = cond; branch.cond = cond;
HINT(HINT_NOP); HINT(HINT_NOP);
@ -564,7 +564,7 @@ FixupBranch ARM64XEmitter::B(CCFlags cond)
FixupBranch ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bit) FixupBranch ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bit)
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 3; branch.type = 3;
branch.reg = Rt; branch.reg = Rt;
branch.bit = bit; branch.bit = bit;
@ -574,7 +574,7 @@ FixupBranch ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bit)
FixupBranch ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bit) FixupBranch ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bit)
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 4; branch.type = 4;
branch.reg = Rt; branch.reg = Rt;
branch.bit = bit; branch.bit = bit;
@ -584,7 +584,7 @@ FixupBranch ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bit)
FixupBranch ARM64XEmitter::B() FixupBranch ARM64XEmitter::B()
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 5; branch.type = 5;
HINT(HINT_NOP); HINT(HINT_NOP);
return branch; return branch;
@ -592,7 +592,7 @@ FixupBranch ARM64XEmitter::B()
FixupBranch ARM64XEmitter::BL() FixupBranch ARM64XEmitter::BL()
{ {
FixupBranch branch; FixupBranch branch;
branch.ptr = code; branch.ptr = m_code;
branch.type = 6; branch.type = 6;
HINT(HINT_NOP); HINT(HINT_NOP);
return branch; return branch;
@ -611,7 +611,7 @@ void ARM64XEmitter::CBNZ(ARM64Reg Rt, const void* ptr)
// Conditional Branch // Conditional Branch
void ARM64XEmitter::B(CCFlags cond, const void* ptr) void ARM64XEmitter::B(CCFlags cond, const void* ptr)
{ {
s64 distance = (s64)ptr - (s64(code) + 8); s64 distance = (s64)ptr - (s64(m_code) + 8);
distance >>= 2; distance >>= 2;
_assert_msg_(DYNA_REC, distance >= -0xFFFFF && distance < 0xFFFFF, "%s: Received too large distance: %lx", __FUNCTION__, distance); _assert_msg_(DYNA_REC, distance >= -0xFFFFF && distance < 0xFFFFF, "%s: Received too large distance: %lx", __FUNCTION__, distance);
@ -630,11 +630,11 @@ void ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bits, const void* ptr)
} }
// Unconditional Branch // Unconditional Branch
void ARM64XEmitter::B(const void *ptr) void ARM64XEmitter::B(const void* ptr)
{ {
EncodeUnconditionalBranchInst(0, ptr); EncodeUnconditionalBranchInst(0, ptr);
} }
void ARM64XEmitter::BL(const void *ptr) void ARM64XEmitter::BL(const void* ptr)
{ {
EncodeUnconditionalBranchInst(1, ptr); EncodeUnconditionalBranchInst(1, ptr);
} }
@ -1051,7 +1051,7 @@ void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift)
} }
void ARM64XEmitter::CMP(ARM64Reg Rn, u32 imm, bool shift) void ARM64XEmitter::CMP(ARM64Reg Rn, u32 imm, bool shift)
{ {
EncodeAddSubImmInst(1, true, shift, imm, Rn, is64Bit(Rn) ? SP : WSP); EncodeAddSubImmInst(1, true, shift, imm, Rn, Is64Bit(Rn) ? SP : WSP);
} }
// Data Processing (Immediate) // Data Processing (Immediate)
@ -1147,43 +1147,43 @@ void ARM64XEmitter::LDARH(ARM64Reg Rt, ARM64Reg Rn)
} }
void ARM64XEmitter::STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) void ARM64XEmitter::STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(12 + is64Bit(Rt), Rs, SP, Rt, Rn); EncodeLoadStoreExcInst(12 + Is64Bit(Rt), Rs, SP, Rt, Rn);
} }
void ARM64XEmitter::STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) void ARM64XEmitter::STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(14 + is64Bit(Rt), Rs, SP, Rt, Rn); EncodeLoadStoreExcInst(14 + Is64Bit(Rt), Rs, SP, Rt, Rn);
} }
void ARM64XEmitter::STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) void ARM64XEmitter::STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(16 + is64Bit(Rt), Rs, Rt2, Rt, Rn); EncodeLoadStoreExcInst(16 + Is64Bit(Rt), Rs, Rt2, Rt, Rn);
} }
void ARM64XEmitter::STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) void ARM64XEmitter::STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(18 + is64Bit(Rt), Rs, Rt2, Rt, Rn); EncodeLoadStoreExcInst(18 + Is64Bit(Rt), Rs, Rt2, Rt, Rn);
} }
void ARM64XEmitter::LDXR(ARM64Reg Rt, ARM64Reg Rn) void ARM64XEmitter::LDXR(ARM64Reg Rt, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(20 + is64Bit(Rt), SP, SP, Rt, Rn); EncodeLoadStoreExcInst(20 + Is64Bit(Rt), SP, SP, Rt, Rn);
} }
void ARM64XEmitter::LDAXR(ARM64Reg Rt, ARM64Reg Rn) void ARM64XEmitter::LDAXR(ARM64Reg Rt, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(22 + is64Bit(Rt), SP, SP, Rt, Rn); EncodeLoadStoreExcInst(22 + Is64Bit(Rt), SP, SP, Rt, Rn);
} }
void ARM64XEmitter::LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) void ARM64XEmitter::LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(24 + is64Bit(Rt), SP, Rt2, Rt, Rn); EncodeLoadStoreExcInst(24 + Is64Bit(Rt), SP, Rt2, Rt, Rn);
} }
void ARM64XEmitter::LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) void ARM64XEmitter::LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(26 + is64Bit(Rt), SP, Rt2, Rt, Rn); EncodeLoadStoreExcInst(26 + Is64Bit(Rt), SP, Rt2, Rt, Rn);
} }
void ARM64XEmitter::STLR(ARM64Reg Rt, ARM64Reg Rn) void ARM64XEmitter::STLR(ARM64Reg Rt, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(28 + is64Bit(Rt), SP, SP, Rt, Rn); EncodeLoadStoreExcInst(28 + Is64Bit(Rt), SP, SP, Rt, Rn);
} }
void ARM64XEmitter::LDAR(ARM64Reg Rt, ARM64Reg Rn) void ARM64XEmitter::LDAR(ARM64Reg Rt, ARM64Reg Rn)
{ {
EncodeLoadStoreExcInst(30 + is64Bit(Rt), SP, SP, Rt, Rn); EncodeLoadStoreExcInst(30 + Is64Bit(Rt), SP, SP, Rt, Rn);
} }
// Load/Store no-allocate pair (offset) // Load/Store no-allocate pair (offset)
@ -1217,9 +1217,9 @@ void ARM64XEmitter::LDRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
void ARM64XEmitter::LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
{ {
if (type == INDEX_UNSIGNED) if (type == INDEX_UNSIGNED)
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x0E6 : 0x0E7, Rt, Rn, imm); EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E6 : 0x0E7, Rt, Rn, imm);
else else
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x0E2 : 0x0E3, EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E2 : 0x0E3,
type == INDEX_POST ? 1 : 3, Rt, Rn, imm); type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
} }
void ARM64XEmitter::STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
@ -1241,25 +1241,25 @@ void ARM64XEmitter::LDRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
void ARM64XEmitter::LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
{ {
if (type == INDEX_UNSIGNED) if (type == INDEX_UNSIGNED)
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x1E6 : 0x1E7, Rt, Rn, imm); EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E6 : 0x1E7, Rt, Rn, imm);
else else
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x1E2 : 0x1E3, EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E2 : 0x1E3,
type == INDEX_POST ? 1 : 3, Rt, Rn, imm); type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
} }
void ARM64XEmitter::STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
{ {
if (type == INDEX_UNSIGNED) if (type == INDEX_UNSIGNED)
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x3E4 : 0x2E4, Rt, Rn, imm); EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E4 : 0x2E4, Rt, Rn, imm);
else else
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x3E0 : 0x2E0, EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E0 : 0x2E0,
type == INDEX_POST ? 1 : 3, Rt, Rn, imm); type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
} }
void ARM64XEmitter::LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
{ {
if (type == INDEX_UNSIGNED) if (type == INDEX_UNSIGNED)
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x3E5 : 0x2E5, Rt, Rn, imm); EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E5 : 0x2E5, Rt, Rn, imm);
else else
EncodeLoadStoreIndexedInst(is64Bit(Rt) ? 0x3E1 : 0x2E1, EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E1 : 0x2E1,
type == INDEX_POST ? 1 : 3, Rt, Rn, imm); type == INDEX_POST ? 1 : 3, Rt, Rn, imm);
} }
void ARM64XEmitter::LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm) void ARM64XEmitter::LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, u32 imm)
@ -1282,7 +1282,7 @@ void ARM64XEmitter::LDRB(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType exten
} }
void ARM64XEmitter::LDRSB(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend) void ARM64XEmitter::LDRSB(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
EncodeLoadStoreRegisterOffset(0, 3 - b64Bit, Rt, Rn, Rm, extend); EncodeLoadStoreRegisterOffset(0, 3 - b64Bit, Rt, Rn, Rm, extend);
} }
void ARM64XEmitter::STRH(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend) void ARM64XEmitter::STRH(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend)
@ -1295,17 +1295,17 @@ void ARM64XEmitter::LDRH(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType exten
} }
void ARM64XEmitter::LDRSH(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend) void ARM64XEmitter::LDRSH(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
EncodeLoadStoreRegisterOffset(1, 3 - b64Bit, Rt, Rn, Rm, extend); EncodeLoadStoreRegisterOffset(1, 3 - b64Bit, Rt, Rn, Rm, extend);
} }
void ARM64XEmitter::STR(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend) void ARM64XEmitter::STR(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
EncodeLoadStoreRegisterOffset(2 + b64Bit, 0, Rt, Rn, Rm, extend); EncodeLoadStoreRegisterOffset(2 + b64Bit, 0, Rt, Rn, Rm, extend);
} }
void ARM64XEmitter::LDR(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend) void ARM64XEmitter::LDR(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend)
{ {
bool b64Bit = is64Bit(Rt); bool b64Bit = Is64Bit(Rt);
EncodeLoadStoreRegisterOffset(2 + b64Bit, 1, Rt, Rn, Rm, extend); EncodeLoadStoreRegisterOffset(2 + b64Bit, 1, Rt, Rn, Rm, extend);
} }
void ARM64XEmitter::LDRSW(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend) void ARM64XEmitter::LDRSW(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType extend)
@ -1320,11 +1320,11 @@ void ARM64XEmitter::PRFM(ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm, ExtendType exten
// Wrapper around MOVZ+MOVK // Wrapper around MOVZ+MOVK
void ARM64XEmitter::MOVI2R(ARM64Reg Rd, u64 imm, bool optimize) void ARM64XEmitter::MOVI2R(ARM64Reg Rd, u64 imm, bool optimize)
{ {
unsigned parts = is64Bit(Rd) ? 4 : 2; unsigned parts = Is64Bit(Rd) ? 4 : 2;
bool upload_part[4] = {}; bool upload_part[4] = {};
bool need_movz = false; bool need_movz = false;
if (!is64Bit(Rd)) if (!Is64Bit(Rd))
_assert_msg_(DYNA_REC, !(imm >> 32), "%s: immediate doesn't fit in 32bit register: %lx", __FUNCTION__, imm); _assert_msg_(DYNA_REC, !(imm >> 32), "%s: immediate doesn't fit in 32bit register: %lx", __FUNCTION__, imm);
// XXX: Optimize more // XXX: Optimize more

View File

@ -74,9 +74,9 @@ enum ARM64Reg
INVALID_REG = 0xFFFFFFFF INVALID_REG = 0xFFFFFFFF
}; };
inline bool is64Bit(ARM64Reg reg) { return reg & 0x20; } inline bool Is64Bit(ARM64Reg reg) { return reg & 0x20; }
inline bool is128Bit(ARM64Reg reg) { return reg & 0xC0; } inline bool Is128Bit(ARM64Reg reg) { return reg & 0xC0; }
inline bool isVector(ARM64Reg reg) { return (reg & 0xC0) != 0; } inline bool IsVector(ARM64Reg reg) { return (reg & 0xC0) != 0; }
inline ARM64Reg DecodeReg(ARM64Reg reg) { return (ARM64Reg)(reg & 0x1F); } inline ARM64Reg DecodeReg(ARM64Reg reg) { return (ARM64Reg)(reg & 0x1F); }
inline ARM64Reg EncodeRegTo64(ARM64Reg reg) { return (ARM64Reg)(reg | 0x20); } inline ARM64Reg EncodeRegTo64(ARM64Reg reg) { return (ARM64Reg)(reg | 0x20); }
@ -122,7 +122,7 @@ enum ExtendType
struct FixupBranch struct FixupBranch
{ {
u8 *ptr; u8* ptr;
// Type defines // Type defines
// 0 = CBZ (32bit) // 0 = CBZ (32bit)
// 1 = CBNZ (32bit) // 1 = CBNZ (32bit)
@ -178,93 +178,101 @@ enum BarrierType
class ArithOption class ArithOption
{ {
public: public:
enum WidthSpecifier { enum WidthSpecifier
WIDTH_DEFAULT, {
WIDTH_32BIT, WIDTH_DEFAULT,
WIDTH_64BIT, WIDTH_32BIT,
}; WIDTH_64BIT,
enum ExtendSpecifier { };
EXTEND_UXTB = 0x0,
EXTEND_UXTH = 0x1, enum ExtendSpecifier
EXTEND_UXTW = 0x2, /* Also LSL on 32bit width */ {
EXTEND_UXTX = 0x3, /* Also LSL on 64bit width */ EXTEND_UXTB = 0x0,
EXTEND_SXTB = 0x4, EXTEND_UXTH = 0x1,
EXTEND_SXTH = 0x5, EXTEND_UXTW = 0x2, /* Also LSL on 32bit width */
EXTEND_SXTW = 0x6, EXTEND_UXTX = 0x3, /* Also LSL on 64bit width */
EXTEND_SXTX = 0x7, EXTEND_SXTB = 0x4,
}; EXTEND_SXTH = 0x5,
enum TypeSpecifier { EXTEND_SXTW = 0x6,
TYPE_EXTENDEDREG, EXTEND_SXTX = 0x7,
TYPE_IMM, };
TYPE_SHIFTEDREG,
}; enum TypeSpecifier
private: {
ARM64Reg _destReg; TYPE_EXTENDEDREG,
WidthSpecifier _width; TYPE_IMM,
ExtendSpecifier _extend; TYPE_SHIFTEDREG,
TypeSpecifier _type; };
ShiftType _shifttype;
u32 _shift; private:
public: ARM64Reg m_destReg;
ArithOption(ARM64Reg Rd) WidthSpecifier m_width;
ExtendSpecifier m_extend;
TypeSpecifier m_type;
ShiftType m_shifttype;
u32 m_shift;
public:
ArithOption(ARM64Reg Rd)
{
m_destReg = Rd;
m_shift = 0;
m_type = TYPE_EXTENDEDREG;
if (Is64Bit(Rd))
{ {
_destReg = Rd; m_width = WIDTH_64BIT;
_shift = 0; m_extend = EXTEND_UXTX;
_type = TYPE_EXTENDEDREG;
if (is64Bit(Rd))
{
_width = WIDTH_64BIT;
_extend = EXTEND_UXTX;
}
else
{
_width = WIDTH_32BIT;
_extend = EXTEND_UXTW;
}
} }
ArithOption(ARM64Reg Rd, ShiftType ShiftType, u32 Shift) else
{ {
_destReg = Rd; m_width = WIDTH_32BIT;
_shift = Shift; m_extend = EXTEND_UXTW;
_shifttype = ShiftType;
_type = TYPE_SHIFTEDREG;
if (is64Bit(Rd))
_width = WIDTH_64BIT;
else
_width = WIDTH_32BIT;
} }
TypeSpecifier GetType() }
ArithOption(ARM64Reg Rd, ShiftType shift_type, u32 shift)
{
m_destReg = Rd;
m_shift = shift;
m_shifttype = shift_type;
m_type = TYPE_SHIFTEDREG;
if (Is64Bit(Rd))
m_width = WIDTH_64BIT;
else
m_width = WIDTH_32BIT;
}
TypeSpecifier GetType()
{
return m_type;
}
u32 GetData()
{
switch (m_type)
{ {
return _type; case TYPE_EXTENDEDREG:
} return (m_width == WIDTH_64BIT ? (1 << 31) : 0) |
u32 GetData() (m_extend << 13) |
{ (m_shift << 10);
switch (_type) break;
{ case TYPE_SHIFTEDREG:
case TYPE_EXTENDEDREG: return (m_width == WIDTH_64BIT ? (1 << 31) : 0) |
return (_width == WIDTH_64BIT ? (1 << 31) : 0) | (m_shifttype << 22) |
(_extend << 13) | (m_shift << 10);
(_shift << 10); break;
break; default:
case TYPE_SHIFTEDREG: _dbg_assert_msg_(DYNA_REC, false, "Invalid type in GetData");
return (_width == WIDTH_64BIT ? (1 << 31) : 0) | break;
(_shifttype << 22) |
(_shift << 10);
break;
default:
_dbg_assert_msg_(DYNA_REC, false, "Invalid type in GetData");
break;
}
return 0;
} }
return 0;
}
}; };
class ARM64XEmitter class ARM64XEmitter
{ {
private: private:
u8 *code, *startcode; u8* m_code;
u8 *lastCacheFlushEnd; u8* m_startcode;
u8* m_lastCacheFlushEnd;
void EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr); void EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr);
void EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr); void EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr);
@ -293,23 +301,33 @@ private:
void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms); void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms);
protected: protected:
inline void Write32(u32 value) {*(u32*)code = value; code+=4;} inline void Write32(u32 value)
{
*(u32*)m_code = value;
m_code += 4;
}
public: public:
ARM64XEmitter() : code(0), startcode(0), lastCacheFlushEnd(0) {} ARM64XEmitter()
virtual ~ARM64XEmitter() {} : m_code(nullptr), m_startcode(nullptr), m_lastCacheFlushEnd(nullptr)
{
}
void SetCodePtr(u8 *ptr); virtual ~ARM64XEmitter()
{
}
void SetCodePtr(u8* ptr);
void ReserveCodeSpace(u32 bytes); void ReserveCodeSpace(u32 bytes);
const u8 *AlignCode16(); const u8* AlignCode16();
const u8 *AlignCodePage(); const u8* AlignCodePage();
const u8 *GetCodePtr() const; const u8* GetCodePtr() const;
void FlushIcache(); void FlushIcache();
void FlushIcacheSection(u8 *start, u8 *end); void FlushIcacheSection(u8* start, u8* end);
u8 *GetWritableCodePtr(); u8* GetWritableCodePtr();
// FixupBranch branching // FixupBranch branching
void SetJumpTarget(FixupBranch const &branch); void SetJumpTarget(FixupBranch const& branch);
FixupBranch CBZ(ARM64Reg Rt); FixupBranch CBZ(ARM64Reg Rt);
FixupBranch CBNZ(ARM64Reg Rt); FixupBranch CBNZ(ARM64Reg Rt);
FixupBranch B(CCFlags cond); FixupBranch B(CCFlags cond);
@ -330,8 +348,8 @@ public:
void TBNZ(ARM64Reg Rt, u8 bits, const void* ptr); void TBNZ(ARM64Reg Rt, u8 bits, const void* ptr);
// Unconditional Branch // Unconditional Branch
void B(const void *ptr); void B(const void* ptr);
void BL(const void *ptr); void BL(const void* ptr);
// Unconditional Branch (register) // Unconditional Branch (register)
void BR(ARM64Reg Rn); void BR(ARM64Reg Rn);