emitter: Clean up 32bit code.

This commit is contained in:
lightningterror 2022-03-21 09:42:15 +01:00
parent 9b21f31b0d
commit 6db573d255
10 changed files with 5 additions and 153 deletions

View File

@ -230,9 +230,7 @@ void x86capabilities::Identify()
Model = (regs[0] >> 4) & 0xf;
FamilyID = (regs[0] >> 8) & 0xf;
TypeID = (regs[0] >> 12) & 0x3;
#ifdef __M_X86_64
//u32 x86_64_8BITBRANDID = regs[1] & 0xff;
#endif
Flags = regs[3];
Flags2 = regs[2];
}
@ -251,9 +249,7 @@ void x86capabilities::Identify()
{
cpuid(regs, 0x80000001);
#ifdef __M_X86_64
//u32 x86_64_12BITBRANDID = regs[1] & 0xfff;
#endif
EFlags2 = regs[2];
EFlags = regs[3];
}

View File

@ -65,10 +65,8 @@ namespace x86Emitter
void operator()(void* f, u32 a1, u32 a2) const;
void operator()(void* f, void* a1) const;
#ifdef __M_X86_64
void operator()(void* f, const xRegisterLong& a1, const xRegisterLong& a2 = xEmptyReg) const;
void operator()(void* f, u32 a1, const xRegisterLong& a2) const;
#endif
template <typename T>
__fi void operator()(T* func, u32 a1, const xRegisterLong& a2 = xEmptyReg) const

View File

@ -70,7 +70,6 @@ namespace x86Emitter
#endif
};
#ifdef __M_X86_64
// --------------------------------------------------------------------------------------
// xImpl_MovImm64
// --------------------------------------------------------------------------------------
@ -82,7 +81,6 @@ namespace x86Emitter
void operator()(const xRegister64& to, s64 imm, bool preserve_flags = false) const;
};
#endif
// --------------------------------------------------------------------------------------
// xImpl_CMov
@ -131,10 +129,8 @@ namespace x86Emitter
void operator()(const xRegister16or32or64& to, const xIndirect8& sibsrc) const;
void operator()(const xRegister32or64& to, const xRegister16& from) const;
void operator()(const xRegister32or64& to, const xIndirect16& sibsrc) const;
#ifdef __M_X86_64
void operator()(const xRegister64& to, const xRegister32& from) const;
void operator()(const xRegister64& to, const xIndirect32& sibsrc) const;
#endif
//void operator()( const xRegister32& to, const xDirectOrIndirect16& src ) const;
//void operator()( const xRegister16or32& to, const xDirectOrIndirect8& src ) const;

View File

@ -57,11 +57,8 @@ namespace x86Emitter
// flags.
extern const xImpl_Mov xMOV;
#ifdef __M_X86_64
extern const xImpl_MovImm64 xMOV64;
#endif
extern const xImpl_Test xTEST;
extern const xImpl_Group2 xROL, xROR,
xRCL, xRCR,
xSHL, xSHR,
@ -222,7 +219,6 @@ namespace x86Emitter
/// May use `tmp` on x86-64
void xWriteImm64ToMem(u64* addr, const xAddressReg& tmp, u64 imm);
#ifdef __M_X86_64
//////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to run operations with large immediates
/// If the immediate fits in 32 bits, runs op(target, imm)
@ -240,7 +236,6 @@ namespace x86Emitter
op(dst, tmpRegister);
}
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////
// JMP / Jcc Instructions!

View File

@ -125,11 +125,7 @@ namespace x86Emitter
const xRegisterInt& reg = param1.IsReg() ? param1 : param2;
#ifdef __M_X86_64
u8 nR = reg.IsExtended() ? 0x00 : 0x80;
#else
u8 nR = 0x80;
#endif
u8 L = reg.IsWideSIMD() ? 4 : 0;
u8 nv = (~param2.GetId() & 0xF) << 3;
@ -155,15 +151,9 @@ namespace x86Emitter
const xRegisterInt& reg = param1.IsReg() ? param1 : param2;
#ifdef __M_X86_64
u8 nR = reg.IsExtended() ? 0x00 : 0x80;
u8 nB = param3.IsExtended() ? 0x00 : 0x20;
u8 nX = 0x40; // likely unused so hardwired to disabled
#else
u8 nR = 0x80;
u8 nB = 0x20;
u8 nX = 0x40;
#endif
u8 L = reg.IsWideSIMD() ? 4 : 0;
u8 W = (w == -1) ? (reg.GetOperandSize() == 8 ? 0x80 : 0) : // autodetect the size
0x80 * w; // take directly the W value

View File

@ -93,7 +93,6 @@ namespace x86Emitter
}
}
#ifdef __M_X86_64
void xImpl_FastCall::operator()(void* f, const xRegisterLong& a1, const xRegisterLong& a2) const
{
prepareRegsForFastcall(a1, a2);
@ -118,7 +117,6 @@ namespace x86Emitter
xMOV(arg1reg, a1);
(*this)(f, arg1reg, arg2reg);
}
#endif
void xImpl_FastCall::operator()(void* f, void* a1) const
{
@ -214,10 +212,8 @@ namespace x86Emitter
s32* bah = xJcc32(comparison);
sptr distance = (sptr)target - (sptr)xGetPtr();
#ifdef __M_X86_64
// This assert won't physically happen on x86 targets
pxAssertDev(distance >= -0x80000000LL && distance < 0x80000000LL, "Jump target is too far away, needs an indirect register");
#endif
*bah = (s32)distance;
}

View File

@ -55,38 +55,16 @@ namespace x86Emitter
// mov eax has a special from when writing directly to a DISP32 address
// (sans any register index/base registers).
#ifndef __M_X86_64
// Note: On x86-64 this is an immediate 64-bit address, which is larger than the equivalent rip offset instr
if (from.IsAccumulator() && dest.Index.IsEmpty() && dest.Base.IsEmpty())
{
xOpAccWrite(from.GetPrefix16(), from.Is8BitOp() ? 0xa2 : 0xa3, from, dest);
xWrite32(dest.Displacement);
}
else
#endif
{
xOpWrite(from.GetPrefix16(), from.Is8BitOp() ? 0x88 : 0x89, from, dest);
}
}
void xImpl_Mov::operator()(const xRegisterInt& to, const xIndirectVoid& src) const
{
// mov eax has a special from when reading directly from a DISP32 address
// (sans any register index/base registers).
#ifndef __M_X86_64
// Note: On x86-64 this is an immediate 64-bit address, which is larger than the equivalent rip offset instr
if (to.IsAccumulator() && src.Index.IsEmpty() && src.Base.IsEmpty())
{
xOpAccWrite(to.GetPrefix16(), to.Is8BitOp() ? 0xa0 : 0xa1, to, src);
xWrite32(src.Displacement);
}
else
#endif
{
xOpWrite(to.GetPrefix16(), to.Is8BitOp() ? 0x8a : 0x8b, to, src);
}
}
void xImpl_Mov::operator()(const xIndirect64orLess& dest, sptr imm) const
{
@ -153,7 +131,6 @@ namespace x86Emitter
const xImpl_Mov xMOV;
#ifdef __M_X86_64
void xImpl_MovImm64::operator()(const xRegister64& to, s64 imm, bool preserve_flags) const
{
if (imm == (u32)imm || imm == (s32)imm)
@ -169,7 +146,6 @@ namespace x86Emitter
}
const xImpl_MovImm64 xMOV64;
#endif
// --------------------------------------------------------------------------------------
// CMOVcc
@ -241,7 +217,6 @@ namespace x86Emitter
xOpWrite0F(SignExtend ? 0xbf : 0xb7, to, sibsrc);
}
#ifdef __M_X86_64
void xImpl_MovExtend::operator()(const xRegister64& to, const xRegister32& from) const
{
EbpAssert();
@ -255,7 +230,6 @@ namespace x86Emitter
pxAssertMsg(SignExtend, "Use mov for 64-bit movzx");
xOpWrite(0, 0x63, to, sibsrc);
}
#endif
const xImpl_MovExtend xMOVSX = {true};
const xImpl_MovExtend xMOVZX = {false};

View File

@ -93,9 +93,8 @@ namespace x86Emitter
//
__emitinline void SimdPrefix(u8 prefix, u16 opcode)
{
#ifdef __M_X86_64
pxAssertMsg(prefix == 0, "REX prefix must be just before the opcode");
#endif
const bool is16BitOpcode = ((opcode & 0xff) == 0x38) || ((opcode & 0xff) == 0x3a);
// If the lower byte is not a valid prefix and the upper byte is non-zero it

View File

@ -151,17 +151,12 @@ const xRegister8
ah(4), ch(5),
dh(6), bh(7);
#if defined(_WIN32) || !defined(__M_X86_64)
#if defined(_WIN32)
const xAddressReg
arg1reg = rcx,
arg2reg = rdx,
#ifdef __M_X86_64
arg3reg = r8,
arg4reg = r9,
#else
arg3reg = xRegisterEmpty(),
arg4reg = xRegisterEmpty(),
#endif
calleeSavedReg1 = rdi,
calleeSavedReg2 = rsi;
@ -214,7 +209,6 @@ const xRegister32
"e12", "e13", "e14", "e15"
};
#ifdef __M_X86_64
const char* const x86_regnames_gpr64[] =
{
"rax", "rcx", "rdx", "rbx",
@ -222,7 +216,6 @@ const xRegister32
"r8", "r9", "r10", "r11",
"r12", "r13", "r14", "r15"
};
#endif
const char* const x86_regnames_sse[] =
{
@ -252,10 +245,8 @@ const xRegister32
return x86_regnames_gpr16[Id];
case 4:
return x86_regnames_gpr32[Id];
#ifdef __M_X86_64
case 8:
return x86_regnames_gpr64[Id];
#endif
case 16:
return x86_regnames_sse[Id];
}
@ -300,9 +291,6 @@ const xRegister32
void EmitSibMagic(uint regfield, const void* address, int extraRIPOffset)
{
sptr displacement = (sptr)address;
#ifndef __M_X86_64
ModRM(0, regfield, ModRm_UseDisp32);
#else
sptr ripRelative = (sptr)address - ((sptr)x86Ptr + sizeof(s8) + sizeof(s32) + extraRIPOffset);
// Can we use a rip-relative address? (Prefer this over eiz because it's a byte shorter)
if (ripRelative == (s32)ripRelative)
@ -316,7 +304,6 @@ const xRegister32
ModRM(0, regfield, ModRm_UseSib);
SibSB(0, Sib_EIZ, Sib_UseDisp32);
}
#endif
xWrite<s32>((s32)displacement);
}
@ -440,11 +427,9 @@ const xRegister32
//////////////////////////////////////////////////////////////////////////////////////////
__emitinline static void EmitRex(bool w, bool r, bool x, bool b)
{
#ifdef __M_X86_64
const u8 rex = 0x40 | (w << 3) | (r << 2) | (x << 1) | (u8)b;
if (rex != 0x40)
xWrite8(rex);
#endif
}
void EmitRex(uint regfield, const void* address)
@ -989,12 +974,8 @@ const xRegister32
__emitinline u32* xLEA_Writeback(xAddressReg to)
{
#ifdef __M_X86_64
xOpWrite(0, 0x8d, to, ptr[(void*)(0xdcdcdcd + (uptr)xGetPtr() + 7)]);
#else
xOpAccWrite(0, 0xb8 | to.Id, 0, to);
xWrite32(0xcdcdcdcd);
#endif
return (u32*)xGetPtr() - 1;
}
@ -1045,12 +1026,7 @@ const xRegister32
}
else
{
#ifdef __M_X86_64
xOpWrite(to.GetPrefix16(), 0xff, isDec ? 1 : 0, to);
#else
to.prefix16();
xWrite8((isDec ? 0x48 : 0x40) | to.Id);
#endif
}
}
@ -1200,25 +1176,10 @@ const xRegister32
//////////////////////////////////////////////////////////////////////////////////////////
// Helper object to handle ABI frame
#ifdef __M_X86_64
// All x86-64 calling conventions ensure/require stack to be 16 bytes aligned
// I couldn't find documentation on when, but compilers would indicate it's before the call: https://gcc.godbolt.org/z/KzTfsz
#define ALIGN_STACK(v) xADD(rsp, v)
#elif defined(__GNUC__)
// GCC ensures/requires stack to be 16 bytes aligned before the call
// Call will store 4 bytes. EDI/ESI/EBX will take another 12 bytes.
// EBP will take 4 bytes if m_base_frame is enabled
#define ALIGN_STACK(v) xADD(esp, v)
#else
#define ALIGN_STACK(v)
#endif
static void stackAlign(int offset, bool moveDown)
{
int needed = (16 - (offset % 16)) % 16;
@ -1250,8 +1211,6 @@ const xRegister32
m_offset += sizeof(void*);
}
#ifdef __M_X86_64
xPUSH(rbx);
xPUSH(r12);
xPUSH(r13);
@ -1265,16 +1224,6 @@ const xRegister32
m_offset += 48;
#endif
#else
// Save the register context
xPUSH(edi);
xPUSH(esi);
xPUSH(ebx);
m_offset += 12;
#endif
stackAlign(m_offset, true);
}
@ -1282,8 +1231,6 @@ const xRegister32
{
stackAlign(m_offset, false);
#ifdef __M_X86_64
// Restore the register context
#ifdef _WIN32
xADD(rsp, 32);
@ -1296,15 +1243,6 @@ const xRegister32
xPOP(r12);
xPOP(rbx);
#else
// Restore the register context
xPOP(ebx);
xPOP(esi);
xPOP(edi);
#endif
// Destroy the frame
if (m_base_frame)
{
@ -1352,7 +1290,6 @@ const xRegister32
void xLoadFarAddr(const xAddressReg& dst, void* addr)
{
#ifdef __M_X86_64
sptr iaddr = (sptr)addr;
sptr rip = (sptr)xGetPtr() + 7; // LEA will be 7 bytes
sptr disp = iaddr - rip;
@ -1364,19 +1301,11 @@ const xRegister32
{
xMOV64(dst, iaddr);
}
#else
xMOV(dst, (sptr)addr);
#endif
}
void xWriteImm64ToMem(u64* addr, const xAddressReg& tmp, u64 imm)
{
#ifdef __M_X86_64
xImm64Op(xMOV, ptr64[addr], tmp, imm);
#else
xMOV(ptr32[(u32*)addr], (u32)(imm & 0xFFFFFFFF));
xMOV(ptr32[(u32*)addr + 1], (u32)(imm >> 32));
#endif
}
} // End namespace x86Emitter

View File

@ -17,14 +17,8 @@
#include "common/Threading.h"
#ifdef __M_X86_64
static const uint iREGCNT_XMM = 16;
static const uint iREGCNT_GPR = 16;
#else
// Register counts for x86/32 mode:
static const uint iREGCNT_XMM = 8;
static const uint iREGCNT_GPR = 8;
#endif
enum XMMSSEType
{
@ -313,17 +307,10 @@ namespace x86Emitter
bool IsSIMD() const { return GetOperandSize() == 16; }
// IsWide: return true if the register is 64 bits (requires a wide op on the rex prefix)
#ifdef __M_X86_64
bool IsWide() const
{
return GetOperandSize() == 8;
}
#else
bool IsWide() const
{
return false;
} // no 64 bits GPR
#endif
// return true if the register is a valid YMM register
bool IsWideSIMD() const { return GetOperandSize() == 32; }
@ -498,11 +485,7 @@ namespace x86Emitter
// more sense and allows the programmer a little more type protection if needed.
//
#ifdef __M_X86_64
#define xRegisterLong xRegister64
#else
#define xRegisterLong xRegister32
#endif
static const int wordsize = sizeof(sptr);
class xAddressReg : public xRegisterLong
@ -854,11 +837,7 @@ extern const xRegister32
typedef xIndirect<u32> xIndirect32;
typedef xIndirect<u16> xIndirect16;
typedef xIndirect<u8> xIndirect8;
#ifdef __M_X86_64
typedef xIndirect<u64> xIndirectNative;
#else
typedef xIndirect<u32> xIndirectNative;
#endif
// --------------------------------------------------------------------------------------
// xIndirect64orLess - base class 64, 32, 16, and 8 bit operand types