mirror of https://github.com/PCSX2/pcsx2.git
commit
a2a23579bd
|
@ -32,13 +32,12 @@ struct xImpl_DwordShift
|
|||
{
|
||||
u16 OpcodeBase;
|
||||
|
||||
void operator()( const xRegister32& to, const xRegister32& from, const xRegisterCL& clreg ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from, const xRegisterCL& clreg ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from, u8 shiftcnt ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from, u8 shiftcnt ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xRegister16or32or64& from, const xRegisterCL& clreg ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& dest,const xRegister16or32& from, const xRegisterCL& clreg ) const;
|
||||
void operator()( const xIndirectVoid& dest,const xRegister16or32& from, u8 shiftcnt ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xRegister16or32or64& from, u8 shiftcnt ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& dest,const xRegister16or32or64& from, const xRegisterCL& clreg ) const;
|
||||
void operator()( const xIndirectVoid& dest,const xRegister16or32or64& from, u8 shiftcnt ) const;
|
||||
};
|
||||
|
||||
} // End namespace x86Emitter
|
||||
|
|
|
@ -38,14 +38,12 @@ struct xImpl_Group1
|
|||
{
|
||||
G1Type InstType;
|
||||
|
||||
void operator()( const xRegister8& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xRegisterInt& from ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& to, const xRegisterInt& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xIndirectVoid& from ) const;
|
||||
void operator()( const xRegisterInt& to, int imm ) const;
|
||||
void operator()( const xIndirect32orLess& to, int imm ) const;
|
||||
void operator()( const xIndirect64orLess& to, int imm ) const;
|
||||
|
||||
#if 0
|
||||
// ------------------------------------------------------------------------
|
||||
|
@ -91,15 +89,13 @@ struct xImpl_G1Logic
|
|||
{
|
||||
G1Type InstType;
|
||||
|
||||
void operator()( const xRegister8& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xRegisterInt& from ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& to, const xRegisterInt& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xIndirectVoid& from ) const;
|
||||
void operator()( const xRegisterInt& to, int imm ) const;
|
||||
|
||||
void operator()( const xIndirect32orLess& to, int imm ) const;
|
||||
void operator()( const xIndirect64orLess& to, int imm ) const;
|
||||
|
||||
xImplSimd_DestRegSSE PS; // packed single precision
|
||||
xImplSimd_DestRegSSE PD; // packed double precision
|
||||
|
@ -112,15 +108,13 @@ struct xImpl_G1Arith
|
|||
{
|
||||
G1Type InstType;
|
||||
|
||||
void operator()( const xRegister8& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xRegisterInt& from ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& to, const xRegisterInt& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xIndirectVoid& from ) const;
|
||||
void operator()( const xRegisterInt& to, int imm ) const;
|
||||
|
||||
void operator()( const xIndirect32orLess& to, int imm ) const;
|
||||
void operator()( const xIndirect64orLess& to, int imm ) const;
|
||||
|
||||
xImplSimd_DestRegSSE PS; // packed single precision
|
||||
xImplSimd_DestRegSSE PD; // packed double precision
|
||||
|
@ -131,15 +125,13 @@ struct xImpl_G1Arith
|
|||
// ------------------------------------------------------------------------
|
||||
struct xImpl_G1Compare
|
||||
{
|
||||
void operator()( const xRegister8& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xRegisterInt& from ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& to, const xRegisterInt& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xIndirectVoid& from ) const;
|
||||
void operator()( const xRegisterInt& to, int imm ) const;
|
||||
|
||||
void operator()( const xIndirect32orLess& to, int imm ) const;
|
||||
void operator()( const xIndirect64orLess& to, int imm ) const;
|
||||
|
||||
xImplSimd_DestSSE_CmpImm PS;
|
||||
xImplSimd_DestSSE_CmpImm PD;
|
||||
|
|
|
@ -41,9 +41,9 @@ struct xImpl_Group2
|
|||
G2Type InstType;
|
||||
|
||||
void operator()( const xRegisterInt& to, const xRegisterCL& from ) const;
|
||||
void operator()( const xIndirect32orLess& to, const xRegisterCL& from ) const;
|
||||
void operator()( const xIndirect64orLess& to, const xRegisterCL& from ) const;
|
||||
void operator()( const xRegisterInt& to, u8 imm ) const;
|
||||
void operator()( const xIndirect32orLess& to, u8 imm ) const;
|
||||
void operator()( const xIndirect64orLess& to, u8 imm ) const;
|
||||
|
||||
#if 0
|
||||
// ------------------------------------------------------------------------
|
||||
|
|
|
@ -35,7 +35,7 @@ struct xImpl_Group3
|
|||
G3Type InstType;
|
||||
|
||||
void operator()( const xRegisterInt& from ) const;
|
||||
void operator()( const xIndirect32orLess& from ) const;
|
||||
void operator()( const xIndirect64orLess& from ) const;
|
||||
|
||||
#if 0
|
||||
template< typename T >
|
||||
|
@ -57,7 +57,7 @@ struct xImpl_MulDivBase
|
|||
u16 OpcodeSSE;
|
||||
|
||||
void operator()( const xRegisterInt& from ) const;
|
||||
void operator()( const xIndirect32orLess& from ) const;
|
||||
void operator()( const xIndirect64orLess& from ) const;
|
||||
|
||||
const xImplSimd_DestRegSSE PS;
|
||||
const xImplSimd_DestRegSSE PD;
|
||||
|
@ -71,7 +71,7 @@ struct xImpl_MulDivBase
|
|||
struct xImpl_iDiv
|
||||
{
|
||||
void operator()( const xRegisterInt& from ) const;
|
||||
void operator()( const xIndirect32orLess& from ) const;
|
||||
void operator()( const xIndirect64orLess& from ) const;
|
||||
|
||||
const xImplSimd_DestRegSSE PS;
|
||||
const xImplSimd_DestRegSSE PD;
|
||||
|
@ -86,7 +86,7 @@ struct xImpl_iDiv
|
|||
struct xImpl_iMul
|
||||
{
|
||||
void operator()( const xRegisterInt& from ) const;
|
||||
void operator()( const xIndirect32orLess& from ) const;
|
||||
void operator()( const xIndirect64orLess& from ) const;
|
||||
|
||||
// The following iMul-specific forms are valid for 16 and 32 bit register operands only!
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ struct xImpl_IncDec
|
|||
bool isDec;
|
||||
|
||||
void operator()( const xRegisterInt& to ) const;
|
||||
void operator()( const xIndirect32orLess& to ) const;
|
||||
void operator()( const xIndirect64orLess& to ) const;
|
||||
};
|
||||
|
||||
} // End namespace x86Emitter
|
||||
|
|
|
@ -43,11 +43,8 @@ struct xImpl_JmpCall
|
|||
{
|
||||
bool isJmp;
|
||||
|
||||
void operator()( const xRegister32& absreg ) const;
|
||||
void operator()( const xIndirect32& src ) const;
|
||||
|
||||
void operator()( const xRegister16& absreg ) const;
|
||||
void operator()( const xIndirect16& src ) const;
|
||||
void operator()( const xRegisterInt& absreg ) const;
|
||||
void operator()( const xIndirect64orLess& src ) const;
|
||||
|
||||
// Special form for calling functions. This form automatically resolves the
|
||||
// correct displacement based on the size of the instruction being generated.
|
||||
|
@ -110,7 +107,7 @@ struct xImpl_FastCall
|
|||
#endif
|
||||
|
||||
template< typename T > __fi __always_inline_tmpl_fail
|
||||
void operator()( T* func, const xRegister32& a1 = xEmptyReg, const xRegister32& a2 = xEmptyReg) const
|
||||
void operator()( T* func, const xRegisterLong& a1 = xEmptyReg, const xRegisterLong& a2 = xEmptyReg) const
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
if (a1.IsEmpty()) {
|
||||
|
@ -132,7 +129,7 @@ struct xImpl_FastCall
|
|||
}
|
||||
|
||||
template< typename T > __fi __always_inline_tmpl_fail
|
||||
void operator()( T* func, u32 a1, const xRegister32& a2) const
|
||||
void operator()( T* func, u32 a1, const xRegisterLong& a2) const
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
XFASTCALL2;
|
||||
|
@ -171,7 +168,7 @@ struct xImpl_FastCall
|
|||
#endif
|
||||
}
|
||||
|
||||
void operator()(const xIndirect32& func, const xRegister32& a1 = xEmptyReg, const xRegister32& a2 = xEmptyReg) const
|
||||
void operator()(const xIndirect32& func, const xRegisterLong& a1 = xEmptyReg, const xRegisterLong& a2 = xEmptyReg) const
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
if (a1.IsEmpty()) {
|
||||
|
|
|
@ -29,13 +29,10 @@ struct xImpl_Mov
|
|||
{
|
||||
xImpl_Mov() {} // Satisfy GCC's whims.
|
||||
|
||||
void operator()( const xRegister8& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
|
||||
void operator()( const xRegisterInt& to, const xRegisterInt& from ) const;
|
||||
void operator()( const xIndirectVoid& dest, const xRegisterInt& from ) const;
|
||||
void operator()( const xRegisterInt& to, const xIndirectVoid& src ) const;
|
||||
void operator()( const xIndirect32orLess& dest, int imm ) const;
|
||||
void operator()( const xIndirect64orLess& dest, int imm ) const;
|
||||
void operator()( const xRegisterInt& to, int imm, bool preserve_flags=false ) const;
|
||||
|
||||
#if 0
|
||||
|
@ -88,12 +85,8 @@ struct xImpl_Mov
|
|||
struct xImpl_CMov
|
||||
{
|
||||
JccComparisonType ccType;
|
||||
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xRegister32& to, const xIndirectVoid& sibsrc ) const;
|
||||
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister16& to, const xIndirectVoid& sibsrc ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xRegister16or32or64& from ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xIndirectVoid& sibsrc ) const;
|
||||
|
||||
//void operator()( const xDirectOrIndirect32& to, const xDirectOrIndirect32& from );
|
||||
//void operator()( const xDirectOrIndirect16& to, const xDirectOrIndirect16& from ) const;
|
||||
|
@ -119,10 +112,10 @@ struct xImpl_MovExtend
|
|||
{
|
||||
bool SignExtend;
|
||||
|
||||
void operator()( const xRegister16or32& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16or32& to, const xIndirect8& sibsrc ) const;
|
||||
void operator()( const xRegister32& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xIndirect16& sibsrc ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xIndirect8& sibsrc ) const;
|
||||
void operator()( const xRegister32or64& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32or64& to, const xIndirect16& sibsrc ) const;
|
||||
|
||||
//void operator()( const xRegister32& to, const xDirectOrIndirect16& src ) const;
|
||||
//void operator()( const xRegister16or32& to, const xDirectOrIndirect8& src ) const;
|
||||
|
|
|
@ -25,10 +25,8 @@ namespace x86Emitter {
|
|||
//
|
||||
struct xImpl_Test
|
||||
{
|
||||
void operator()( const xRegister8& to, const xRegister8& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xIndirect32orLess& dest, int imm ) const;
|
||||
void operator()( const xRegisterInt& to, const xRegisterInt& from ) const;
|
||||
void operator()( const xIndirect64orLess& dest, int imm ) const;
|
||||
void operator()( const xRegisterInt& to, int imm ) const;
|
||||
};
|
||||
|
||||
|
@ -50,9 +48,8 @@ struct xImpl_BitScan
|
|||
// 0xbc [fwd] / 0xbd [rev]
|
||||
u16 Opcode;
|
||||
|
||||
void operator()( const xRegister32& to, const xRegister32& from ) const;
|
||||
void operator()( const xRegister16& to, const xRegister16& from ) const;
|
||||
void operator()( const xRegister16or32& to, const xIndirectVoid& sibsrc ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xRegister16or32or64& from ) const;
|
||||
void operator()( const xRegister16or32or64& to, const xIndirectVoid& sibsrc ) const;
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -64,11 +61,12 @@ struct xImpl_Group8
|
|||
{
|
||||
G8Type InstType;
|
||||
|
||||
void operator()( const xRegister32& bitbase, const xRegister32& bitoffset ) const;
|
||||
void operator()( const xRegister16& bitbase, const xRegister16& bitoffset ) const;
|
||||
void operator()( const xRegister16or32& bitbase, u8 bitoffset ) const;
|
||||
void operator()( const xRegister16or32or64& bitbase, const xRegister16or32or64& bitoffset ) const;
|
||||
void operator()( const xRegister16or32or64& bitbase, u8 bitoffset ) const;
|
||||
|
||||
void operator()( const xIndirectVoid& bitbase, const xRegister16or32& bitoffset ) const;
|
||||
void operator()( const xIndirectVoid& bitbase, const xRegister16or32or64& bitoffset ) const;
|
||||
|
||||
void operator()( const xIndirect64& bitbase, u8 bitoffset ) const;
|
||||
void operator()( const xIndirect32& bitbase, u8 bitoffset ) const;
|
||||
void operator()( const xIndirect16& bitbase, u8 bitoffset ) const;
|
||||
};
|
||||
|
|
|
@ -134,13 +134,14 @@ namespace x86Emitter
|
|||
// These are all defined inline or in ix86.cpp.
|
||||
//
|
||||
|
||||
extern void xBSWAP( const xRegister32& to );
|
||||
extern void xBSWAP( const xRegister32or64& to );
|
||||
|
||||
// ----- Lea Instructions (Load Effective Address) -----
|
||||
// Note: alternate (void*) forms of these instructions are not provided since those
|
||||
// forms are functionally equivalent to Mov reg,imm, and thus better written as MOVs
|
||||
// instead.
|
||||
|
||||
extern void xLEA( xRegister64 to, const xIndirectVoid& src, bool preserve_flags=false );
|
||||
extern void xLEA( xRegister32 to, const xIndirectVoid& src, bool preserve_flags=false );
|
||||
extern void xLEA( xRegister16 to, const xIndirectVoid& src, bool preserve_flags=false );
|
||||
|
||||
|
@ -151,10 +152,10 @@ namespace x86Emitter
|
|||
extern void xPOP( const xIndirectVoid& from );
|
||||
extern void xPUSH( const xIndirectVoid& from );
|
||||
|
||||
extern void xPOP( xRegister32 from );
|
||||
extern void xPOP( xRegister32or64 from );
|
||||
|
||||
extern void xPUSH( u32 imm );
|
||||
extern void xPUSH( xRegister32 from );
|
||||
extern void xPUSH( xRegister32or64 from );
|
||||
|
||||
// pushes the EFLAGS register onto the stack
|
||||
extern void xPUSHFD();
|
||||
|
@ -335,16 +336,16 @@ namespace x86Emitter
|
|||
extern void xFXSAVE( const xIndirectVoid& dest );
|
||||
extern void xFXRSTOR( const xIndirectVoid& src );
|
||||
|
||||
extern void xMOVDZX( const xRegisterSSE& to, const xRegister32& from );
|
||||
extern void xMOVDZX( const xRegisterSSE& to, const xRegister32or64& from );
|
||||
extern void xMOVDZX( const xRegisterSSE& to, const xIndirectVoid& src );
|
||||
|
||||
extern void xMOVDZX( const xRegisterMMX& to, const xRegister32& from );
|
||||
extern void xMOVDZX( const xRegisterMMX& to, const xRegister32or64& from );
|
||||
extern void xMOVDZX( const xRegisterMMX& to, const xIndirectVoid& src );
|
||||
|
||||
extern void xMOVD( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xMOVD( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xMOVD( const xIndirectVoid& dest, const xRegisterSSE& from );
|
||||
|
||||
extern void xMOVD( const xRegister32& to, const xRegisterMMX& from );
|
||||
extern void xMOVD( const xRegister32or64& to, const xRegisterMMX& from );
|
||||
extern void xMOVD( const xIndirectVoid& dest, const xRegisterMMX& from );
|
||||
|
||||
extern void xMOVQ( const xRegisterMMX& to, const xRegisterMMX& from );
|
||||
|
@ -373,13 +374,13 @@ namespace x86Emitter
|
|||
extern void xMOVNTPS( const xIndirectVoid& to, const xRegisterSSE& from );
|
||||
extern void xMOVNTQ( const xIndirectVoid& to, const xRegisterMMX& from );
|
||||
|
||||
extern void xMOVMSKPS( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xMOVMSKPD( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xMOVMSKPS( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xMOVMSKPD( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
|
||||
extern void xMASKMOV( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||
extern void xMASKMOV( const xRegisterMMX& to, const xRegisterMMX& from );
|
||||
extern void xPMOVMSKB( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xPMOVMSKB( const xRegister32& to, const xRegisterMMX& from );
|
||||
extern void xPMOVMSKB( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xPMOVMSKB( const xRegister32or64& to, const xRegisterMMX& from );
|
||||
extern void xPALIGNR( const xRegisterSSE& to, const xRegisterSSE& from, u8 imm8 );
|
||||
extern void xPALIGNR( const xRegisterMMX& to, const xRegisterMMX& from, u8 imm8 );
|
||||
|
||||
|
@ -413,7 +414,7 @@ namespace x86Emitter
|
|||
extern void xINSERTPS( const xRegisterSSE& to, const xRegisterSSE& from, u8 imm8 );
|
||||
extern void xINSERTPS( const xRegisterSSE& to, const xIndirect32& from, u8 imm8 );
|
||||
|
||||
extern void xEXTRACTPS( const xRegister32& to, const xRegisterSSE& from, u8 imm8 );
|
||||
extern void xEXTRACTPS( const xRegister32or64& to, const xRegisterSSE& from, u8 imm8 );
|
||||
extern void xEXTRACTPS( const xIndirect32& dest, const xRegisterSSE& from, u8 imm8 );
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
|
@ -471,19 +472,19 @@ namespace x86Emitter
|
|||
extern void xCVTPS2PI( const xRegisterMMX& to, const xRegisterSSE& from );
|
||||
extern void xCVTPS2PI( const xRegisterMMX& to, const xIndirect64& from );
|
||||
|
||||
extern void xCVTSD2SI( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xCVTSD2SI( const xRegister32& to, const xIndirect64& from );
|
||||
extern void xCVTSD2SI( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xCVTSD2SI( const xRegister32or64& to, const xIndirect64& from );
|
||||
extern void xCVTSD2SS( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||
extern void xCVTSD2SS( const xRegisterSSE& to, const xIndirect64& from );
|
||||
extern void xCVTSI2SD( const xRegisterMMX& to, const xRegister32& from );
|
||||
extern void xCVTSI2SD( const xRegisterMMX& to, const xRegister32or64& from );
|
||||
extern void xCVTSI2SD( const xRegisterMMX& to, const xIndirect32& from );
|
||||
extern void xCVTSI2SS( const xRegisterSSE& to, const xRegister32& from );
|
||||
extern void xCVTSI2SS( const xRegisterSSE& to, const xRegister32or64& from );
|
||||
extern void xCVTSI2SS( const xRegisterSSE& to, const xIndirect32& from );
|
||||
|
||||
extern void xCVTSS2SD( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||
extern void xCVTSS2SD( const xRegisterSSE& to, const xIndirect32& from );
|
||||
extern void xCVTSS2SI( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xCVTSS2SI( const xRegister32& to, const xIndirect32& from );
|
||||
extern void xCVTSS2SI( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xCVTSS2SI( const xRegister32or64& to, const xIndirect32& from );
|
||||
|
||||
extern void xCVTTPD2DQ( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||
extern void xCVTTPD2DQ( const xRegisterSSE& to, const xIndirect128& from );
|
||||
|
@ -494,10 +495,10 @@ namespace x86Emitter
|
|||
extern void xCVTTPS2PI( const xRegisterMMX& to, const xRegisterSSE& from );
|
||||
extern void xCVTTPS2PI( const xRegisterMMX& to, const xIndirect64& from );
|
||||
|
||||
extern void xCVTTSD2SI( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xCVTTSD2SI( const xRegister32& to, const xIndirect64& from );
|
||||
extern void xCVTTSS2SI( const xRegister32& to, const xRegisterSSE& from );
|
||||
extern void xCVTTSS2SI( const xRegister32& to, const xIndirect32& from );
|
||||
extern void xCVTTSD2SI( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xCVTTSD2SI( const xRegister32or64& to, const xIndirect64& from );
|
||||
extern void xCVTTSS2SI( const xRegister32or64& to, const xRegisterSSE& from );
|
||||
extern void xCVTTSS2SI( const xRegister32or64& to, const xIndirect32& from );
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -31,6 +31,12 @@ namespace x86Emitter {
|
|||
extern void EmitSibMagic( const xRegisterBase& reg1, const void* src );
|
||||
extern void EmitSibMagic( const xRegisterBase& reg1, const xIndirectVoid& sib );
|
||||
|
||||
void EmitRex( const xRegisterBase& reg1, const xRegisterBase& reg2 );
|
||||
void EmitRex( const xRegisterBase& reg1, const void* src );
|
||||
void EmitRex( const xRegisterBase& reg1, const xIndirectVoid& sib );
|
||||
void EmitRex( const xRegisterBase& reg1 );
|
||||
void EmitRex( const xIndirectVoid& sib );
|
||||
|
||||
extern void _xMovRtoR( const xRegisterInt& to, const xRegisterInt& from );
|
||||
|
||||
template< typename T > inline
|
||||
|
|
|
@ -15,10 +15,16 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#ifdef __x86_64__
|
||||
static const uint iREGCNT_XMM = 16;
|
||||
static const uint iREGCNT_GPR = 16;
|
||||
static const uint iREGCNT_MMX = 8; // FIXME: port the code and remove MMX
|
||||
#else
|
||||
// Register counts for x86/32 mode:
|
||||
static const uint iREGCNT_XMM = 8;
|
||||
static const uint iREGCNT_GPR = 8;
|
||||
static const uint iREGCNT_MMX = 8;
|
||||
#endif
|
||||
|
||||
enum XMMSSEType
|
||||
{
|
||||
|
@ -70,13 +76,6 @@ extern void xWrite16( u16 val );
|
|||
extern void xWrite32( u32 val );
|
||||
extern void xWrite64( u64 val );
|
||||
|
||||
extern const char *const x86_regnames_gpr8[8];
|
||||
extern const char *const x86_regnames_gpr16[8];
|
||||
extern const char *const x86_regnames_gpr32[8];
|
||||
|
||||
extern const char *const x86_regnames_sse[8];
|
||||
extern const char *const x86_regnames_mmx[8];
|
||||
|
||||
extern const char* xGetRegName( int regid, int operandSize );
|
||||
|
||||
//------------------------------------------------------------------
|
||||
|
@ -206,6 +205,7 @@ template< typename T > void xWrite( T val );
|
|||
virtual uint GetOperandSize() const=0;
|
||||
|
||||
bool Is8BitOp() const { return GetOperandSize() == 1; }
|
||||
u8 GetPrefix16() const { return GetOperandSize() == 2 ? 0x66 : 0; }
|
||||
void prefix16() const { if( GetOperandSize() == 2 ) xWrite8( 0x66 ); }
|
||||
|
||||
void xWriteImm( int imm ) const
|
||||
|
@ -250,7 +250,9 @@ template< typename T > void xWrite( T val );
|
|||
explicit xRegisterBase( int regId )
|
||||
{
|
||||
Id = regId;
|
||||
pxAssert( (Id >= xRegId_Empty) && (Id < 8) );
|
||||
// Note: to avoid tons of ifdef, the 32 bits build will instantiate
|
||||
// all 16x64 bits registers.
|
||||
pxAssert( (Id >= xRegId_Empty) && (Id < 16) );
|
||||
}
|
||||
|
||||
bool IsEmpty() const { return Id < 0 ; }
|
||||
|
@ -262,8 +264,17 @@ template< typename T > void xWrite( T val );
|
|||
// Returns true if the register is a valid accumulator: Eax, Ax, Al, XMM0.
|
||||
bool IsAccumulator() const { return Id == 0; }
|
||||
|
||||
// returns true if the register is a valid MMX or XMM register.
|
||||
// IsSIMD: returns true if the register is a valid MMX or XMM register.
|
||||
// IsWide: return true if the register is 64 bits (requires a wide op on the rex prefix)
|
||||
#ifdef __x86_64__
|
||||
// No MMX on 64 bits, let's directly uses GPR
|
||||
bool IsSIMD() const { return GetOperandSize() == 16; }
|
||||
bool IsWide() const { return GetOperandSize() == 8; }
|
||||
#else
|
||||
bool IsSIMD() const { return GetOperandSize() == 8 || GetOperandSize() == 16; }
|
||||
bool IsWide() const { return false; } // no 64 bits GPR
|
||||
#endif
|
||||
// return true if the register is a valid YMM register
|
||||
bool IsWideSIMD() const { return GetOperandSize() == 32; }
|
||||
|
||||
bool operator==( const xRegisterBase& src ) const { return (Id == src.Id); }
|
||||
|
@ -284,14 +295,12 @@ template< typename T > void xWrite( T val );
|
|||
explicit xRegisterInt( const xRegisterBase& src ) : _parent( src ) {}
|
||||
explicit xRegisterInt( int regId ) : _parent( regId ) { }
|
||||
|
||||
bool IsSIMD() const { return false; }
|
||||
|
||||
bool operator==( const xRegisterInt& src ) const { return Id == src.Id && (GetOperandSize() == src.GetOperandSize()); }
|
||||
bool operator!=( const xRegisterInt& src ) const { return !operator==(src); }
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// xRegister8/16/32 - Represents a basic 8/16/32 bit GPR on the x86
|
||||
// xRegister8/16/32/64 - Represents a basic 8/16/32/64 bit GPR on the x86
|
||||
// --------------------------------------------------------------------------------------
|
||||
class xRegister8 : public xRegisterInt
|
||||
{
|
||||
|
@ -335,6 +344,20 @@ template< typename T > void xWrite( T val );
|
|||
bool operator!=( const xRegister32& src ) const { return this->Id != src.Id; }
|
||||
};
|
||||
|
||||
class xRegister64 : public xRegisterInt
|
||||
{
|
||||
typedef xRegisterInt _parent;
|
||||
|
||||
public:
|
||||
xRegister64(): _parent() {}
|
||||
explicit xRegister64( int regId ) : _parent( regId ) {}
|
||||
|
||||
virtual uint GetOperandSize() const { return 8; }
|
||||
|
||||
bool operator==( const xRegister64& src ) const { return this->Id == src.Id; }
|
||||
bool operator!=( const xRegister64& src ) const { return this->Id != src.Id; }
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// xRegisterMMX/SSE - Represents either a 64 bit or 128 bit SIMD register
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -346,7 +369,11 @@ template< typename T > void xWrite( T val );
|
|||
typedef xRegisterBase _parent;
|
||||
|
||||
public:
|
||||
xRegisterMMX(): _parent() {}
|
||||
xRegisterMMX(): _parent() {
|
||||
#ifdef __x86_64__
|
||||
pxAssert(0); // Sorry but code must be ported
|
||||
#endif
|
||||
}
|
||||
//xRegisterMMX( const xRegisterBase& src ) : _parent( src ) {}
|
||||
explicit xRegisterMMX( int regId ) : _parent( regId ) {}
|
||||
|
||||
|
@ -395,20 +422,28 @@ template< typename T > void xWrite( T val );
|
|||
// --------------------------------------------------------------------------------------
|
||||
// xAddressReg
|
||||
// --------------------------------------------------------------------------------------
|
||||
// Use 32 bit registers as our index registers (for ModSib-style memory address calculations).
|
||||
// This type is implicitly exchangeable with xRegister32.
|
||||
// Use 32/64 bit registers as our index registers (for ModSib-style memory address calculations).
|
||||
// This type is implicitly exchangeable with xRegister32/64.
|
||||
//
|
||||
// Only xAddressReg provides operators for constructing xAddressInfo types. These operators
|
||||
// could have been added to xRegister32 directly instead, however I think this design makes
|
||||
// could have been added to xRegister32/64 directly instead, however I think this design makes
|
||||
// more sense and allows the programmer a little more type protection if needed.
|
||||
//
|
||||
class xAddressReg : public xRegister32
|
||||
|
||||
#ifdef __x86_64__
|
||||
#define xRegisterLong xRegister64
|
||||
#else
|
||||
#define xRegisterLong xRegister32
|
||||
#endif
|
||||
|
||||
class xAddressReg : public xRegisterLong
|
||||
{
|
||||
public:
|
||||
xAddressReg(): xRegister32() {}
|
||||
xAddressReg( const xAddressReg& src ) : xRegister32( src.Id ) {}
|
||||
xAddressReg( const xRegister32& src ) : xRegister32( src ) {}
|
||||
explicit xAddressReg( int regId ) : xRegister32( regId ) {}
|
||||
xAddressReg(): xRegisterLong() {}
|
||||
xAddressReg( const xAddressReg& src ) : xRegisterLong( src.Id ) {}
|
||||
xAddressReg( const xRegister32& src ) : xRegisterLong( src.Id ) {}
|
||||
xAddressReg( const xRegister64& src ) : xRegisterLong( src.Id ) {}
|
||||
explicit xAddressReg( int regId ) : xRegisterLong( regId ) {}
|
||||
|
||||
// Returns true if the register is the stack pointer: ESP.
|
||||
bool IsStackPointer() const { return Id == 4; }
|
||||
|
@ -418,14 +453,9 @@ template< typename T > void xWrite( T val );
|
|||
xAddressVoid operator+( const void* right ) const;
|
||||
xAddressVoid operator-( s32 right ) const;
|
||||
xAddressVoid operator-( const void* right ) const;
|
||||
xAddressVoid operator*( u32 factor ) const;
|
||||
xAddressVoid operator*( int factor ) const;
|
||||
xAddressVoid operator<<( u32 shift ) const;
|
||||
|
||||
/*xAddressReg& operator=( const xRegister32& src )
|
||||
{
|
||||
Id = src.Id;
|
||||
return *this;
|
||||
}*/
|
||||
};
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -443,6 +473,7 @@ template< typename T > void xWrite( T val );
|
|||
return xRegister16( xRegId_Empty );
|
||||
}
|
||||
|
||||
// FIXME remove it in x86 64
|
||||
operator xRegisterMMX() const
|
||||
{
|
||||
return xRegisterMMX( xRegId_Empty );
|
||||
|
@ -459,6 +490,7 @@ template< typename T > void xWrite( T val );
|
|||
}
|
||||
};
|
||||
|
||||
// FIXME This one is likely useless and superseeded by the future xRegister16or32or64
|
||||
class xRegister16or32
|
||||
{
|
||||
protected:
|
||||
|
@ -476,16 +508,59 @@ template< typename T > void xWrite( T val );
|
|||
}
|
||||
};
|
||||
|
||||
class xRegister16or32or64
|
||||
{
|
||||
protected:
|
||||
const xRegisterInt& m_convtype;
|
||||
|
||||
public:
|
||||
xRegister16or32or64( const xRegister64& src ) : m_convtype( src ) {}
|
||||
xRegister16or32or64( const xRegister32& src ) : m_convtype( src ) {}
|
||||
xRegister16or32or64( const xRegister16& src ) : m_convtype( src ) {}
|
||||
|
||||
operator const xRegisterBase&() const { return m_convtype; }
|
||||
|
||||
const xRegisterInt* operator->() const
|
||||
{
|
||||
return &m_convtype;
|
||||
}
|
||||
};
|
||||
|
||||
class xRegister32or64
|
||||
{
|
||||
protected:
|
||||
const xRegisterInt& m_convtype;
|
||||
|
||||
public:
|
||||
xRegister32or64( const xRegister64& src ) : m_convtype( src ) {}
|
||||
xRegister32or64( const xRegister32& src ) : m_convtype( src ) {}
|
||||
|
||||
operator const xRegisterBase&() const { return m_convtype; }
|
||||
|
||||
const xRegisterInt* operator->() const
|
||||
{
|
||||
return &m_convtype;
|
||||
}
|
||||
};
|
||||
|
||||
extern const xRegisterEmpty xEmptyReg;
|
||||
|
||||
extern const xRegisterSSE
|
||||
xmm0, xmm1, xmm2, xmm3,
|
||||
xmm4, xmm5, xmm6, xmm7;
|
||||
xmm4, xmm5, xmm6, xmm7,
|
||||
xmm8, xmm9, xmm10, xmm11,
|
||||
xmm12, xmm13, xmm14, xmm15;
|
||||
|
||||
extern const xRegisterMMX
|
||||
mm0, mm1, mm2, mm3,
|
||||
mm4, mm5, mm6, mm7;
|
||||
|
||||
extern const xAddressReg
|
||||
rax, rbx, rcx, rdx,
|
||||
rsi, rdi, rbp, rsp,
|
||||
r8, r9, r10, r11,
|
||||
r12, r13, r14, r15;
|
||||
|
||||
extern const xAddressReg
|
||||
eax, ebx, ecx, edx,
|
||||
esi, edi, ebp, esp;
|
||||
|
@ -502,12 +577,12 @@ template< typename T > void xWrite( T val );
|
|||
|
||||
const xRegisterSSE& xRegisterSSE::GetInstance(uint id)
|
||||
{
|
||||
static const xRegisterSSE *const m_tbl_xmmRegs[iREGCNT_XMM] =
|
||||
static const xRegisterSSE *const m_tbl_xmmRegs[] =
|
||||
{
|
||||
&xmm0, &xmm1,
|
||||
&xmm2, &xmm3,
|
||||
&xmm4, &xmm5,
|
||||
&xmm6, &xmm7
|
||||
&xmm0, &xmm1, &xmm2, &xmm3,
|
||||
&xmm4, &xmm5, &xmm6, &xmm7,
|
||||
&xmm8, &xmm9, &xmm10, &xmm11,
|
||||
&xmm12, &xmm13, &xmm14, &xmm15
|
||||
};
|
||||
|
||||
pxAssert(id < iREGCNT_XMM);
|
||||
|
@ -523,7 +598,7 @@ template< typename T > void xWrite( T val );
|
|||
xAddressReg Base; // base register (no scale)
|
||||
xAddressReg Index; // index reg gets multiplied by the scale
|
||||
int Factor; // scale applied to the index register, in factor form (not a shift!)
|
||||
s32 Displacement; // address displacement
|
||||
s32 Displacement; // address displacement // 4B max even on 64 bits
|
||||
|
||||
public:
|
||||
xAddressVoid( const xAddressReg& base, const xAddressReg& index, int factor=1, s32 displacement=0 );
|
||||
|
@ -569,7 +644,7 @@ template< typename T > void xWrite( T val );
|
|||
|
||||
/*xAddressInfo( const xAddressVoid& src )
|
||||
: _parent( src ) {}*/
|
||||
|
||||
|
||||
explicit xAddressInfo( const xAddressReg& index, int displacement=0 )
|
||||
: _parent( index, displacement ) {}
|
||||
|
||||
|
@ -683,7 +758,8 @@ template< typename T > void xWrite( T val );
|
|||
xAddressReg Base; // base register (no scale)
|
||||
xAddressReg Index; // index reg gets multiplied by the scale
|
||||
uint Scale; // scale applied to the index register, in scale/shift form
|
||||
sptr Displacement; // offset applied to the Base/Index registers.
|
||||
s32 Displacement; // offset applied to the Base/Index registers.
|
||||
// Displacement is 8/32 bits even on x86_64
|
||||
|
||||
public:
|
||||
explicit xIndirectVoid( s32 disp );
|
||||
|
@ -709,8 +785,8 @@ template< typename T > void xWrite( T val );
|
|||
protected:
|
||||
void Reduce();
|
||||
};
|
||||
|
||||
template< typename OperandType >
|
||||
|
||||
template< typename OperandType >
|
||||
class xIndirect : public xIndirectVoid
|
||||
{
|
||||
typedef xIndirectVoid _parent;
|
||||
|
@ -755,9 +831,9 @@ template< typename T > void xWrite( T val );
|
|||
typedef xIndirect<u8> xIndirect8;
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// xIndirect32orLass - base class 32, 16, and 8 bit operand types
|
||||
// xIndirect64orLess - base class 64, 32, 16, and 8 bit operand types
|
||||
// --------------------------------------------------------------------------------------
|
||||
class xIndirect32orLess : public xIndirectVoid
|
||||
class xIndirect64orLess : public xIndirectVoid
|
||||
{
|
||||
typedef xIndirectVoid _parent;
|
||||
|
||||
|
@ -765,17 +841,18 @@ template< typename T > void xWrite( T val );
|
|||
uint m_OpSize;
|
||||
|
||||
public:
|
||||
xIndirect32orLess( const xIndirect8& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
xIndirect32orLess( const xIndirect16& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
xIndirect32orLess( const xIndirect32& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
xIndirect64orLess( const xIndirect8& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
xIndirect64orLess( const xIndirect16& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
xIndirect64orLess( const xIndirect32& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
xIndirect64orLess( const xIndirect64& src ) : _parent( src ) { m_OpSize = src.GetOperandSize(); }
|
||||
|
||||
uint GetOperandSize() const { return m_OpSize; }
|
||||
|
||||
protected:
|
||||
//xIndirect32orLess( const xAddressVoid& src ) : _parent( src ) {}
|
||||
//xIndirect64orLess( const xAddressVoid& src ) : _parent( src ) {}
|
||||
|
||||
explicit xIndirect32orLess( s32 disp ) : _parent( disp ) {}
|
||||
xIndirect32orLess( xAddressReg base, xAddressReg index, int scale=0, s32 displacement=0 ) :
|
||||
explicit xIndirect64orLess( s32 disp ) : _parent( disp ) {}
|
||||
xIndirect64orLess( xAddressReg base, xAddressReg index, int scale=0, s32 displacement=0 ) :
|
||||
_parent( base, index, scale, displacement ) {}
|
||||
};
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ namespace x86Emitter {
|
|||
// Note on "[Indirect],Imm" forms : use int as the source operand since it's "reasonably inert" from a
|
||||
// compiler perspective. (using uint tends to make the compiler try and fail to match signed immediates
|
||||
// with one of the other overloads).
|
||||
static void _g1_IndirectImm( G1Type InstType, const xIndirect32orLess& sibdest, int imm )
|
||||
static void _g1_IndirectImm( G1Type InstType, const xIndirect64orLess& sibdest, int imm )
|
||||
{
|
||||
if( sibdest.Is8BitOp() )
|
||||
{
|
||||
|
@ -106,13 +106,11 @@ static void _g1_EmitOp( G1Type InstType, const xRegisterInt& to, int imm )
|
|||
}
|
||||
|
||||
#define ImplementGroup1( g1type, insttype ) \
|
||||
void g1type::operator()( const xRegister8& to, const xRegister8& from ) const { _g1_EmitOp( insttype, to, from ); } \
|
||||
void g1type::operator()( const xRegister16& to, const xRegister16& from ) const { _g1_EmitOp( insttype, to, from ); } \
|
||||
void g1type::operator()( const xRegister32& to, const xRegister32& from ) const { _g1_EmitOp( insttype, to, from ); } \
|
||||
void g1type::operator()( const xRegisterInt& to, const xRegisterInt& from ) const { _g1_EmitOp( insttype, to, from ); } \
|
||||
void g1type::operator()( const xIndirectVoid& to, const xRegisterInt& from ) const { _g1_EmitOp( insttype, to, from ); } \
|
||||
void g1type::operator()( const xRegisterInt& to, const xIndirectVoid& from ) const { _g1_EmitOp( insttype, to, from ); } \
|
||||
void g1type::operator()( const xRegisterInt& to, int imm ) const { _g1_EmitOp( insttype, to, imm ); } \
|
||||
void g1type::operator()( const xIndirect32orLess& sibdest, int imm ) const { _g1_IndirectImm( insttype, sibdest, imm ); }
|
||||
void g1type::operator()( const xIndirect64orLess& sibdest, int imm ) const { _g1_IndirectImm( insttype, sibdest, imm ); }
|
||||
|
||||
ImplementGroup1( xImpl_Group1, InstType )
|
||||
ImplementGroup1( xImpl_G1Logic, InstType )
|
||||
|
@ -160,14 +158,14 @@ void xImpl_Group2::operator()(const xRegisterInt& to, u8 imm ) const
|
|||
}
|
||||
}
|
||||
|
||||
void xImpl_Group2::operator()( const xIndirect32orLess& sibdest, const xRegisterCL& /* from */ ) const
|
||||
void xImpl_Group2::operator()( const xIndirect64orLess& sibdest, const xRegisterCL& /* from */ ) const
|
||||
{
|
||||
sibdest.prefix16();
|
||||
xWrite8( sibdest.Is8BitOp() ? 0xd2 : 0xd3 );
|
||||
EmitSibMagic( InstType, sibdest );
|
||||
}
|
||||
|
||||
void xImpl_Group2::operator()( const xIndirect32orLess& sibdest, u8 imm ) const
|
||||
void xImpl_Group2::operator()( const xIndirect64orLess& sibdest, u8 imm ) const
|
||||
{
|
||||
if( imm == 0 ) return;
|
||||
|
||||
|
@ -206,7 +204,7 @@ static void _g3_EmitOp( G3Type InstType, const xRegisterInt& from )
|
|||
EmitSibMagic( InstType, from );
|
||||
}
|
||||
|
||||
static void _g3_EmitOp( G3Type InstType, const xIndirect32orLess& from )
|
||||
static void _g3_EmitOp( G3Type InstType, const xIndirect64orLess& from )
|
||||
{
|
||||
from.prefix16();
|
||||
xWrite8( from.Is8BitOp() ? 0xf6 : 0xf7 );
|
||||
|
@ -214,10 +212,10 @@ static void _g3_EmitOp( G3Type InstType, const xIndirect32orLess& from )
|
|||
}
|
||||
|
||||
void xImpl_Group3::operator()( const xRegisterInt& from ) const { _g3_EmitOp( InstType, from ); }
|
||||
void xImpl_Group3::operator()( const xIndirect32orLess& from ) const { _g3_EmitOp( InstType, from ); }
|
||||
void xImpl_Group3::operator()( const xIndirect64orLess& from ) const { _g3_EmitOp( InstType, from ); }
|
||||
|
||||
void xImpl_iDiv::operator()( const xRegisterInt& from ) const { _g3_EmitOp( G3Type_iDIV, from ); }
|
||||
void xImpl_iDiv::operator()( const xIndirect32orLess& from ) const { _g3_EmitOp( G3Type_iDIV, from ); }
|
||||
void xImpl_iDiv::operator()( const xIndirect64orLess& from ) const { _g3_EmitOp( G3Type_iDIV, from ); }
|
||||
|
||||
template< typename SrcType >
|
||||
static void _imul_ImmStyle( const xRegisterInt& param1, const SrcType& param2, int imm )
|
||||
|
@ -237,7 +235,7 @@ static void _imul_ImmStyle( const xRegisterInt& param1, const SrcType& param2, i
|
|||
}
|
||||
|
||||
void xImpl_iMul::operator()( const xRegisterInt& from ) const { _g3_EmitOp( G3Type_iMUL, from ); }
|
||||
void xImpl_iMul::operator()( const xIndirect32orLess& from ) const { _g3_EmitOp( G3Type_iMUL, from ); }
|
||||
void xImpl_iMul::operator()( const xIndirect64orLess& from ) const { _g3_EmitOp( G3Type_iMUL, from ); }
|
||||
|
||||
void xImpl_iMul::operator()( const xRegister32& to, const xRegister32& from ) const { xOpWrite0F( 0xaf, to, from ); }
|
||||
void xImpl_iMul::operator()( const xRegister32& to, const xIndirectVoid& src ) const { xOpWrite0F( 0xaf, to, src ); }
|
||||
|
@ -261,19 +259,22 @@ const xImpl_iMul xMUL = { { 0x00, 0x59 }, { 0x66, 0x59 }, { 0xf3, 0x59 }, { 0xf2
|
|||
// Group 8 Instructions
|
||||
// =====================================================================================================
|
||||
|
||||
void xImpl_Group8::operator()( const xRegister32& bitbase, const xRegister32& bitoffset ) const { xOpWrite0F( 0xa3 | (InstType << 3), bitbase, bitoffset ); }
|
||||
void xImpl_Group8::operator()( const xRegister16& bitbase, const xRegister16& bitoffset ) const { xOpWrite0F( 0x66, 0xa3 | (InstType << 3), bitbase, bitoffset ); }
|
||||
void xImpl_Group8::operator()( const xRegister16or32or64& bitbase, const xRegister16or32or64& bitoffset ) const {
|
||||
pxAssert( bitbase->GetOperandSize() == bitoffset->GetOperandSize() );
|
||||
xOpWrite0F( bitbase->GetPrefix16(), 0xa3 | (InstType << 3), bitbase, bitoffset );
|
||||
}
|
||||
void xImpl_Group8::operator()( const xIndirect64& bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
|
||||
void xImpl_Group8::operator()( const xIndirect32& bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
|
||||
void xImpl_Group8::operator()( const xIndirect16& bitbase, u8 bitoffset ) const { xOpWrite0F( 0x66, 0xba, InstType, bitbase, bitoffset ); }
|
||||
|
||||
void xImpl_Group8::operator()( const xRegister16or32& bitbase, u8 bitoffset ) const
|
||||
void xImpl_Group8::operator()( const xRegister16or32or64& bitbase, u8 bitoffset ) const
|
||||
{
|
||||
xOpWrite0F( (bitbase->GetOperandSize() == 2) ? 0x66 : 0x00, 0xba, InstType, bitbase, bitoffset );
|
||||
xOpWrite0F( bitbase->GetPrefix16(), 0xba, InstType, bitbase, bitoffset );
|
||||
}
|
||||
|
||||
void xImpl_Group8::operator()( const xIndirectVoid& bitbase, const xRegister16or32& bitoffset ) const
|
||||
void xImpl_Group8::operator()( const xIndirectVoid& bitbase, const xRegister16or32or64& bitoffset ) const
|
||||
{
|
||||
xOpWrite0F( (bitoffset->GetOperandSize() == 2) ? 0x66 : 0x00, 0xa3 | (InstType << 3), bitoffset, bitbase );
|
||||
xOpWrite0F( bitoffset->GetPrefix16(), 0xa3 | (InstType << 3), bitoffset, bitbase );
|
||||
}
|
||||
|
||||
const xImpl_Group8 xBT = { G8Type_BT };
|
||||
|
|
|
@ -33,11 +33,8 @@
|
|||
|
||||
namespace x86Emitter {
|
||||
|
||||
void xImpl_JmpCall::operator()( const xRegister32& absreg ) const { xOpWrite( 0x00, 0xff, isJmp ? 4 : 2, absreg ); }
|
||||
void xImpl_JmpCall::operator()( const xIndirect32& src ) const { xOpWrite( 0x00, 0xff, isJmp ? 4 : 2, src ); }
|
||||
|
||||
void xImpl_JmpCall::operator()( const xRegister16& absreg ) const { xOpWrite( 0x66, 0xff, isJmp ? 4 : 2, absreg ); }
|
||||
void xImpl_JmpCall::operator()( const xIndirect16& src ) const { xOpWrite( 0x66, 0xff, isJmp ? 4 : 2, src ); }
|
||||
void xImpl_JmpCall::operator()( const xRegisterInt& absreg ) const { xOpWrite( 0, 0xff, isJmp ? 4 : 2, absreg ); }
|
||||
void xImpl_JmpCall::operator()( const xIndirect64orLess& src ) const { xOpWrite( 0, 0xff, isJmp ? 4 : 2, src ); }
|
||||
|
||||
const xImpl_JmpCall xJMP = { true };
|
||||
const xImpl_JmpCall xCALL = { false };
|
||||
|
|
|
@ -45,26 +45,10 @@ void _xMovRtoR( const xRegisterInt& to, const xRegisterInt& from )
|
|||
EmitSibMagic( from, to );
|
||||
}
|
||||
|
||||
void xImpl_Mov::operator()( const xRegister8& to, const xRegister8& from ) const
|
||||
void xImpl_Mov::operator()( const xRegisterInt& to, const xRegisterInt& from ) const
|
||||
{
|
||||
if( to == from ) return; // ignore redundant MOVs.
|
||||
xWrite8( 0x88 );
|
||||
EmitSibMagic( from, to );
|
||||
}
|
||||
|
||||
void xImpl_Mov::operator()( const xRegister16& to, const xRegister16& from ) const
|
||||
{
|
||||
if( to == from ) return; // ignore redundant MOVs.
|
||||
from.prefix16();
|
||||
xWrite8( 0x89 );
|
||||
EmitSibMagic( from, to );
|
||||
}
|
||||
|
||||
void xImpl_Mov::operator()( const xRegister32& to, const xRegister32& from ) const
|
||||
{
|
||||
if( to == from ) return; // ignore redundant MOVs.
|
||||
xWrite8( 0x89 );
|
||||
EmitSibMagic( from, to );
|
||||
// FIXME WTF?
|
||||
_xMovRtoR(to, from);
|
||||
}
|
||||
|
||||
void xImpl_Mov::operator()( const xIndirectVoid& dest, const xRegisterInt& from ) const
|
||||
|
@ -105,7 +89,7 @@ void xImpl_Mov::operator()( const xRegisterInt& to, const xIndirectVoid& src ) c
|
|||
}
|
||||
}
|
||||
|
||||
void xImpl_Mov::operator()( const xIndirect32orLess& dest, int imm ) const
|
||||
void xImpl_Mov::operator()( const xIndirect64orLess& dest, int imm ) const
|
||||
{
|
||||
dest.prefix16();
|
||||
xWrite8( dest.Is8BitOp() ? 0xc6 : 0xc7 );
|
||||
|
@ -142,21 +126,17 @@ const xImpl_Mov xMOV;
|
|||
#define EbpAssert()
|
||||
|
||||
|
||||
void xCMOV( JccComparisonType ccType, const xRegister32& to, const xRegister32& from ) { ccSane(); xOpWrite0F( 0x40 | ccType, to, from ); }
|
||||
void xCMOV( JccComparisonType ccType, const xRegister32& to, const xIndirectVoid& sibsrc ) { ccSane(); xOpWrite0F( 0x40 | ccType, to, sibsrc ); }
|
||||
//void xCMOV( JccComparisonType ccType, const xDirectOrIndirect32& to, const xDirectOrIndirect32& from ) const { ccSane(); _DoI_helpermess( *this, to, from ); } // too.. lazy.. to fix.
|
||||
|
||||
void xCMOV( JccComparisonType ccType, const xRegister16& to, const xRegister16& from ) { ccSane(); xOpWrite0F( 0x66, 0x40 | ccType, to, from ); }
|
||||
void xCMOV( JccComparisonType ccType, const xRegister16& to, const xIndirectVoid& sibsrc ) { ccSane(); xOpWrite0F( 0x66, 0x40 | ccType, to, sibsrc ); }
|
||||
//void xCMOV( JccComparisonType ccType, const xDirectOrIndirect16& to, const xDirectOrIndirect16& from ) const { ccSane(); _DoI_helpermess( *this, to, from ); }
|
||||
void xImpl_CMov::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from ) const {
|
||||
pxAssert( to->GetOperandSize() == from->GetOperandSize() );
|
||||
ccSane();
|
||||
xOpWrite0F( to->GetPrefix16(), 0x40 | ccType, to, from );
|
||||
}
|
||||
|
||||
void xSET( JccComparisonType ccType, const xRegister8& to ) { ccSane(); xOpWrite0F( 0x90 | ccType, 0, to ); }
|
||||
void xSET( JccComparisonType ccType, const xIndirect8& dest ) { ccSane(); xOpWrite0F( 0x90 | ccType, 0, dest ); }
|
||||
|
||||
void xImpl_CMov::operator()( const xRegister32& to, const xRegister32& from ) const { ccSane(); xOpWrite0F( 0x40 | ccType, to, from ); }
|
||||
void xImpl_CMov::operator()( const xRegister32& to, const xIndirectVoid& sibsrc ) const { ccSane(); xOpWrite0F( 0x40 | ccType, to, sibsrc ); }
|
||||
void xImpl_CMov::operator()( const xRegister16& to, const xRegister16& from ) const { ccSane(); xOpWrite0F( 0x66, 0x40 | ccType, to, from ); }
|
||||
void xImpl_CMov::operator()( const xRegister16& to, const xIndirectVoid& sibsrc ) const { ccSane(); xOpWrite0F( 0x66, 0x40 | ccType, to, sibsrc ); }
|
||||
void xImpl_CMov::operator()( const xRegister16or32or64& to, const xIndirectVoid& sibsrc ) const {
|
||||
ccSane();
|
||||
xOpWrite0F( to->GetPrefix16(), 0x40 | ccType, to, sibsrc );
|
||||
}
|
||||
|
||||
//void xImpl_CMov::operator()( const xDirectOrIndirect32& to, const xDirectOrIndirect32& from ) const { ccSane(); _DoI_helpermess( *this, to, from ); }
|
||||
//void xImpl_CMov::operator()( const xDirectOrIndirect16& to, const xDirectOrIndirect16& from ) const { ccSane(); _DoI_helpermess( *this, to, from ); }
|
||||
|
@ -165,7 +145,7 @@ void xImpl_Set::operator()( const xRegister8& to ) const { ccSane(); xOpWrite
|
|||
void xImpl_Set::operator()( const xIndirect8& dest ) const { ccSane(); xOpWrite0F( 0x90 | ccType, 0, dest ); }
|
||||
//void xImpl_Set::operator()( const xDirectOrIndirect8& dest ) const { ccSane(); _DoI_helpermess( *this, dest ); }
|
||||
|
||||
void xImpl_MovExtend::operator()( const xRegister16or32& to, const xRegister8& from ) const
|
||||
void xImpl_MovExtend::operator()( const xRegister16or32or64& to, const xRegister8& from ) const
|
||||
{
|
||||
EbpAssert();
|
||||
xOpWrite0F(
|
||||
|
@ -175,7 +155,7 @@ void xImpl_MovExtend::operator()( const xRegister16or32& to, const xRegister8& f
|
|||
);
|
||||
}
|
||||
|
||||
void xImpl_MovExtend::operator()( const xRegister16or32& to, const xIndirect8& sibsrc ) const
|
||||
void xImpl_MovExtend::operator()( const xRegister16or32or64& to, const xIndirect8& sibsrc ) const
|
||||
{
|
||||
EbpAssert();
|
||||
xOpWrite0F(
|
||||
|
@ -185,13 +165,13 @@ void xImpl_MovExtend::operator()( const xRegister16or32& to, const xIndirect8& s
|
|||
);
|
||||
}
|
||||
|
||||
void xImpl_MovExtend::operator()( const xRegister32& to, const xRegister16& from ) const
|
||||
void xImpl_MovExtend::operator()( const xRegister32or64& to, const xRegister16& from ) const
|
||||
{
|
||||
EbpAssert();
|
||||
xOpWrite0F( SignExtend ? 0xbf : 0xb7, to, from );
|
||||
}
|
||||
|
||||
void xImpl_MovExtend::operator()( const xRegister32& to, const xIndirect16& sibsrc ) const
|
||||
void xImpl_MovExtend::operator()( const xRegister32or64& to, const xIndirect16& sibsrc ) const
|
||||
{
|
||||
EbpAssert();
|
||||
xOpWrite0F( SignExtend ? 0xbf : 0xb7, to, sibsrc );
|
||||
|
|
|
@ -159,19 +159,19 @@ __fi void xCVTPS2PD( const xRegisterSSE& to, const xIndirect64& from ) { OpWrit
|
|||
__fi void xCVTPS2PI( const xRegisterMMX& to, const xRegisterSSE& from ) { OpWriteSSE( 0x00, 0x2d ); }
|
||||
__fi void xCVTPS2PI( const xRegisterMMX& to, const xIndirect64& from ) { OpWriteSSE( 0x00, 0x2d ); }
|
||||
|
||||
__fi void xCVTSD2SI( const xRegister32& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf2, 0x2d ); }
|
||||
__fi void xCVTSD2SI( const xRegister32& to, const xIndirect64& from ) { OpWriteSSE( 0xf2, 0x2d ); }
|
||||
__fi void xCVTSD2SI( const xRegister32or64& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf2, 0x2d ); }
|
||||
__fi void xCVTSD2SI( const xRegister32or64& to, const xIndirect64& from ) { OpWriteSSE( 0xf2, 0x2d ); }
|
||||
__fi void xCVTSD2SS( const xRegisterSSE& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf2, 0x5a ); }
|
||||
__fi void xCVTSD2SS( const xRegisterSSE& to, const xIndirect64& from ) { OpWriteSSE( 0xf2, 0x5a ); }
|
||||
__fi void xCVTSI2SD( const xRegisterMMX& to, const xRegister32& from ) { OpWriteSSE( 0xf2, 0x2a ); }
|
||||
__fi void xCVTSI2SD( const xRegisterMMX& to, const xRegister32or64& from ) { OpWriteSSE( 0xf2, 0x2a ); }
|
||||
__fi void xCVTSI2SD( const xRegisterMMX& to, const xIndirect32& from ) { OpWriteSSE( 0xf2, 0x2a ); }
|
||||
__fi void xCVTSI2SS( const xRegisterSSE& to, const xRegister32& from ) { OpWriteSSE( 0xf3, 0x2a ); }
|
||||
__fi void xCVTSI2SS( const xRegisterSSE& to, const xRegister32or64& from ) { OpWriteSSE( 0xf3, 0x2a ); }
|
||||
__fi void xCVTSI2SS( const xRegisterSSE& to, const xIndirect32& from ) { OpWriteSSE( 0xf3, 0x2a ); }
|
||||
|
||||
__fi void xCVTSS2SD( const xRegisterSSE& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf3, 0x5a ); }
|
||||
__fi void xCVTSS2SD( const xRegisterSSE& to, const xIndirect32& from ) { OpWriteSSE( 0xf3, 0x5a ); }
|
||||
__fi void xCVTSS2SI( const xRegister32& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf3, 0x2d ); }
|
||||
__fi void xCVTSS2SI( const xRegister32& to, const xIndirect32& from ) { OpWriteSSE( 0xf3, 0x2d ); }
|
||||
__fi void xCVTSS2SI( const xRegister32or64& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf3, 0x2d ); }
|
||||
__fi void xCVTSS2SI( const xRegister32or64& to, const xIndirect32& from ) { OpWriteSSE( 0xf3, 0x2d ); }
|
||||
|
||||
__fi void xCVTTPD2DQ( const xRegisterSSE& to, const xRegisterSSE& from ) { OpWriteSSE( 0x66, 0xe6 ); }
|
||||
__fi void xCVTTPD2DQ( const xRegisterSSE& to, const xIndirect128& from ) { OpWriteSSE( 0x66, 0xe6 ); }
|
||||
|
@ -182,10 +182,10 @@ __fi void xCVTTPS2DQ( const xRegisterSSE& to, const xIndirect128& from ) { OpWr
|
|||
__fi void xCVTTPS2PI( const xRegisterMMX& to, const xRegisterSSE& from ) { OpWriteSSE( 0x00, 0x2c ); }
|
||||
__fi void xCVTTPS2PI( const xRegisterMMX& to, const xIndirect64& from ) { OpWriteSSE( 0x00, 0x2c ); }
|
||||
|
||||
__fi void xCVTTSD2SI( const xRegister32& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf2, 0x2c ); }
|
||||
__fi void xCVTTSD2SI( const xRegister32& to, const xIndirect64& from ) { OpWriteSSE( 0xf2, 0x2c ); }
|
||||
__fi void xCVTTSS2SI( const xRegister32& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf3, 0x2c ); }
|
||||
__fi void xCVTTSS2SI( const xRegister32& to, const xIndirect32& from ) { OpWriteSSE( 0xf3, 0x2c ); }
|
||||
__fi void xCVTTSD2SI( const xRegister32or64& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf2, 0x2c ); }
|
||||
__fi void xCVTTSD2SI( const xRegister32or64& to, const xIndirect64& from ) { OpWriteSSE( 0xf2, 0x2c ); }
|
||||
__fi void xCVTTSS2SI( const xRegister32or64& to, const xRegisterSSE& from ) { OpWriteSSE( 0xf3, 0x2c ); }
|
||||
__fi void xCVTTSS2SI( const xRegister32or64& to, const xIndirect32& from ) { OpWriteSSE( 0xf3, 0x2c ); }
|
||||
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
|
@ -685,16 +685,16 @@ const xImplSimd_DestRegSSE xMOVSHDUP = { 0xf3,0x16 };
|
|||
// * MOVD has valid forms for MMX and XMM registers.
|
||||
//
|
||||
|
||||
__fi void xMOVDZX( const xRegisterSSE& to, const xRegister32& from ) { xOpWrite0F( 0x66, 0x6e, to, from ); }
|
||||
__fi void xMOVDZX( const xRegisterSSE& to, const xRegister32or64& from ) { xOpWrite0F( 0x66, 0x6e, to, from ); }
|
||||
__fi void xMOVDZX( const xRegisterSSE& to, const xIndirectVoid& src ) { xOpWrite0F( 0x66, 0x6e, to, src ); }
|
||||
|
||||
__fi void xMOVDZX( const xRegisterMMX& to, const xRegister32& from ) { xOpWrite0F( 0x6e, to, from ); }
|
||||
__fi void xMOVDZX( const xRegisterMMX& to, const xRegister32or64& from ) { xOpWrite0F( 0x6e, to, from ); }
|
||||
__fi void xMOVDZX( const xRegisterMMX& to, const xIndirectVoid& src ) { xOpWrite0F( 0x6e, to, src ); }
|
||||
|
||||
__fi void xMOVD( const xRegister32& to, const xRegisterSSE& from ) { xOpWrite0F( 0x66, 0x7e, from, to ); }
|
||||
__fi void xMOVD( const xRegister32or64& to, const xRegisterSSE& from ) { xOpWrite0F( 0x66, 0x7e, from, to ); }
|
||||
__fi void xMOVD( const xIndirectVoid& dest, const xRegisterSSE& from ) { xOpWrite0F( 0x66, 0x7e, from, dest ); }
|
||||
|
||||
__fi void xMOVD( const xRegister32& to, const xRegisterMMX& from ) { xOpWrite0F( 0x7e, from, to ); }
|
||||
__fi void xMOVD( const xRegister32or64& to, const xRegisterMMX& from ) { xOpWrite0F( 0x7e, from, to ); }
|
||||
__fi void xMOVD( const xIndirectVoid& dest, const xRegisterMMX& from ) { xOpWrite0F( 0x7e, from, dest ); }
|
||||
|
||||
|
||||
|
@ -760,8 +760,8 @@ __fi void xMOVNTQ( const xIndirectVoid& to, const xRegisterMMX& from ) { xOpWrit
|
|||
|
||||
// ------------------------------------------------------------------------
|
||||
|
||||
__fi void xMOVMSKPS( const xRegister32& to, const xRegisterSSE& from) { xOpWrite0F( 0x50, to, from ); }
|
||||
__fi void xMOVMSKPD( const xRegister32& to, const xRegisterSSE& from) { xOpWrite0F( 0x66, 0x50, to, from, true ); }
|
||||
__fi void xMOVMSKPS( const xRegister32or64& to, const xRegisterSSE& from) { xOpWrite0F( 0x50, to, from ); }
|
||||
__fi void xMOVMSKPD( const xRegister32or64& to, const xRegisterSSE& from) { xOpWrite0F( 0x66, 0x50, to, from, true ); }
|
||||
|
||||
// xMASKMOV:
|
||||
// Selectively write bytes from mm1/xmm1 to memory location using the byte mask in mm2/xmm2.
|
||||
|
@ -779,8 +779,8 @@ __fi void xMASKMOV( const xRegisterMMX& to, const xRegisterMMX& from ) { xOpWri
|
|||
// When operating on a 64-bit (MMX) source, the byte mask is 8 bits; when operating on
|
||||
// 128-bit (SSE) source, the byte mask is 16-bits.
|
||||
//
|
||||
__fi void xPMOVMSKB( const xRegister32& to, const xRegisterSSE& from ) { xOpWrite0F( 0x66, 0xd7, to, from ); }
|
||||
__fi void xPMOVMSKB( const xRegister32& to, const xRegisterMMX& from ) { xOpWrite0F( 0xd7, to, from ); }
|
||||
__fi void xPMOVMSKB( const xRegister32or64& to, const xRegisterSSE& from ) { xOpWrite0F( 0x66, 0xd7, to, from ); }
|
||||
__fi void xPMOVMSKB( const xRegister32or64& to, const xRegisterMMX& from ) { xOpWrite0F( 0xd7, to, from ); }
|
||||
|
||||
// [sSSE-3] Concatenates dest and source operands into an intermediate composite,
|
||||
// shifts the composite at byte granularity to the right by a constant immediate,
|
||||
|
@ -816,7 +816,7 @@ __emitinline void xINSERTPS( const xRegisterSSE& to, const xIndirect32& from, u8
|
|||
// determined by imm8[1-0]*32. The extracted single precision floating-point value
|
||||
// is stored into the low 32-bits of dest (or at a 32-bit memory pointer).
|
||||
//
|
||||
__emitinline void xEXTRACTPS( const xRegister32& to, const xRegisterSSE& from, u8 imm8 ) { xOpWrite0F( 0x66, 0x173a, to, from, imm8 ); }
|
||||
__emitinline void xEXTRACTPS( const xRegister32or64& to, const xRegisterSSE& from, u8 imm8 ) { xOpWrite0F( 0x66, 0x173a, to, from, imm8 ); }
|
||||
__emitinline void xEXTRACTPS( const xIndirect32& dest, const xRegisterSSE& from, u8 imm8 ) { xOpWrite0F( 0x66, 0x173a, from, dest, imm8 ); }
|
||||
|
||||
|
||||
|
|
|
@ -110,7 +110,11 @@ const xRegisterSSE
|
|||
xmm0( 0 ), xmm1( 1 ),
|
||||
xmm2( 2 ), xmm3( 3 ),
|
||||
xmm4( 4 ), xmm5( 5 ),
|
||||
xmm6( 6 ), xmm7( 7 );
|
||||
xmm6( 6 ), xmm7( 7 ),
|
||||
xmm8( 8 ), xmm9( 9 ),
|
||||
xmm10( 10 ), xmm11( 11 ),
|
||||
xmm12( 12 ), xmm13( 13 ),
|
||||
xmm14( 14 ), xmm15( 15 );
|
||||
|
||||
const xRegisterMMX
|
||||
mm0( 0 ), mm1( 1 ),
|
||||
|
@ -118,6 +122,16 @@ const xRegisterMMX
|
|||
mm4( 4 ), mm5( 5 ),
|
||||
mm6( 6 ), mm7( 7 );
|
||||
|
||||
const xAddressReg
|
||||
rax( 0 ), rbx( 3 ),
|
||||
rcx( 1 ), rdx( 2 ),
|
||||
rsp( 4 ), rbp( 5 ),
|
||||
rsi( 6 ), rdi( 7 ),
|
||||
r8( 8 ) , r9( 9 ) ,
|
||||
r10( 10 ), r11( 11 ),
|
||||
r12( 12 ), r13( 13 ),
|
||||
r14( 14 ), r15( 15 );
|
||||
|
||||
const xAddressReg
|
||||
eax( 0 ), ebx( 3 ),
|
||||
ecx( 1 ), edx( 2 ),
|
||||
|
@ -144,25 +158,37 @@ const char *const x86_regnames_gpr8[8] =
|
|||
"ah", "ch", "dh", "bh"
|
||||
};
|
||||
|
||||
const char *const x86_regnames_gpr16[8] =
|
||||
const char *const x86_regnames_gpr16[] =
|
||||
{
|
||||
"ax", "cx", "dx", "bx",
|
||||
"sp", "bp", "si", "di"
|
||||
};
|
||||
|
||||
const char *const x86_regnames_gpr32[8] =
|
||||
const char *const x86_regnames_gpr32[] =
|
||||
{
|
||||
"eax", "ecx", "edx", "ebx",
|
||||
"esp", "ebp", "esi", "edi"
|
||||
"esp", "ebp", "esi", "edi",
|
||||
"e8", "e9", "e10", "e11",
|
||||
"e12", "e13", "e14", "e15"
|
||||
};
|
||||
|
||||
const char *const x86_regnames_sse[8] =
|
||||
const char *const x86_regnames_gpr64[] =
|
||||
{
|
||||
"rax", "rcx", "rdx", "rbx",
|
||||
"rsp", "rbp", "rsi", "rdi",
|
||||
"r8" , "r9" , "r10", "r11",
|
||||
"r12", "r13", "r14", "r15"
|
||||
};
|
||||
|
||||
const char *const x86_regnames_sse[] =
|
||||
{
|
||||
"xmm0", "xmm1", "xmm2", "xmm3",
|
||||
"xmm4", "xmm5", "xmm6", "xmm7"
|
||||
"xmm4", "xmm5", "xmm6", "xmm7",
|
||||
"xmm8", "xmm9", "xmm10", "xmm11",
|
||||
"xmm12", "xmm13", "xmm14", "xmm15"
|
||||
};
|
||||
|
||||
const char *const x86_regnames_mmx[8] =
|
||||
const char *const x86_regnames_mmx[] =
|
||||
{
|
||||
"mm0", "mm1", "mm2", "mm3",
|
||||
"mm4", "mm5", "mm6", "mm7"
|
||||
|
@ -182,7 +208,11 @@ const char* xRegisterBase::GetName()
|
|||
case 1: return x86_regnames_gpr8[ Id ];
|
||||
case 2: return x86_regnames_gpr16[ Id ];
|
||||
case 4: return x86_regnames_gpr32[ Id ];
|
||||
#ifdef __x86_64__
|
||||
case 8: return x86_regnames_gpr64[ Id ];
|
||||
#else
|
||||
case 8: return x86_regnames_mmx[ Id ];
|
||||
#endif
|
||||
case 16: return x86_regnames_sse[ Id ];
|
||||
}
|
||||
|
||||
|
@ -291,8 +321,9 @@ static __fi bool NeedsSibMagic( const xIndirectVoid& info )
|
|||
//
|
||||
void EmitSibMagic( uint regfield, const xIndirectVoid& info )
|
||||
{
|
||||
// 3 bits also on x86_64 (so max is 8)
|
||||
// We might need to mask it on x86_64
|
||||
pxAssertDev( regfield < 8, "Invalid x86 register identifier." );
|
||||
|
||||
int displacement_size = (info.Displacement == 0) ? 0 :
|
||||
( ( info.IsByteSizeDisp() ) ? 1 : 2 );
|
||||
|
||||
|
@ -374,6 +405,53 @@ void EmitSibMagic( const xRegisterBase& reg1, const xIndirectVoid& sib )
|
|||
EmitSibMagic( reg1.Id, sib );
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
void EmitRex( const xRegisterBase& reg1, const xRegisterBase& reg2 )
|
||||
{
|
||||
u8 w = reg1.IsWide() << 3;
|
||||
u8 r = reg1.IsExtended() << 2;
|
||||
u8 x = 0;
|
||||
u8 b = reg2.IsExtended();
|
||||
xWrite8( 0x40 | w | r | x | b );
|
||||
}
|
||||
|
||||
void EmitRex( const xRegisterBase& reg1, const void* src )
|
||||
{
|
||||
pxAssert(0); //see fixme
|
||||
u8 w = reg1.IsWide() << 3;
|
||||
u8 r = reg1.IsExtended() << 2;
|
||||
u8 x = 0;
|
||||
u8 b = 0; // FIXME src.IsExtended();
|
||||
xWrite8( 0x40 | w | r | x | b );
|
||||
}
|
||||
|
||||
void EmitRex( const xRegisterBase& reg1, const xIndirectVoid& sib )
|
||||
{
|
||||
u8 w = reg1.IsWide() << 3;
|
||||
u8 r = reg1.IsExtended() << 2;
|
||||
u8 x = sib.Index.IsExtended() << 1;
|
||||
u8 b = sib.Base.IsExtended();
|
||||
xWrite8( 0x40 | w | r | x | b );
|
||||
}
|
||||
|
||||
void EmitRex( const xRegisterBase& reg1)
|
||||
{
|
||||
u8 w = reg1.IsWide() << 3;
|
||||
u8 r = 0;
|
||||
u8 x = 0;
|
||||
u8 b = reg1.IsExtended();
|
||||
xWrite8( 0x40 | w | r | x | b );
|
||||
}
|
||||
|
||||
void EmitRex( const xIndirectVoid& sib)
|
||||
{
|
||||
u8 w = sib.Base.IsWide() << 3;
|
||||
u8 r = 0;
|
||||
u8 x = 0;
|
||||
u8 b = sib.IsExtended();
|
||||
xWrite8( 0x40 | w | r | x | b );
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------------
|
||||
// xSetPtr / xAlignPtr / xGetPtr / xAdvancePtr
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -471,7 +549,7 @@ xAddressVoid xAddressReg::operator-( const void* right ) const
|
|||
return xAddressVoid( *this, -(sptr)right );
|
||||
}
|
||||
|
||||
xAddressVoid xAddressReg::operator*( u32 factor ) const
|
||||
xAddressVoid xAddressReg::operator*( int factor ) const
|
||||
{
|
||||
pxAssertMsg( Id != -1, "Uninitialized x86 register." );
|
||||
return xAddressVoid( xEmptyReg, *this, factor );
|
||||
|
@ -499,7 +577,7 @@ xAddressVoid::xAddressVoid( const xAddressReg& base, const xAddressReg& index, i
|
|||
pxAssertMsg( index.Id != xRegId_Invalid, "Uninitialized x86 register." );
|
||||
}
|
||||
|
||||
xAddressVoid::xAddressVoid( const xAddressReg& index, int displacement )
|
||||
xAddressVoid::xAddressVoid( const xAddressReg& index, s32 displacement )
|
||||
{
|
||||
Base = xEmptyReg;
|
||||
Index = index;
|
||||
|
@ -522,7 +600,12 @@ xAddressVoid::xAddressVoid( const void* displacement )
|
|||
Base = xEmptyReg;
|
||||
Index = xEmptyReg;
|
||||
Factor = 0;
|
||||
Displacement = (sptr)displacement;
|
||||
#ifdef __x86_64__
|
||||
pxAssert(0);
|
||||
//Displacement = (s32)displacement;
|
||||
#else
|
||||
Displacement = (s32)displacement;
|
||||
#endif
|
||||
}
|
||||
|
||||
xAddressVoid& xAddressVoid::Add( const xAddressReg& src )
|
||||
|
@ -684,7 +767,7 @@ void xIndirectVoid::Reduce()
|
|||
Base = Index;
|
||||
Scale = 3;
|
||||
break;
|
||||
|
||||
|
||||
jNO_DEFAULT
|
||||
}
|
||||
}
|
||||
|
@ -824,11 +907,15 @@ static void EmitLeaMagic( const xRegisterInt& to, const xIndirectVoid& src, bool
|
|||
}
|
||||
}
|
||||
|
||||
__emitinline void xLEA( xRegister32 to, const xIndirectVoid& src, bool preserve_flags )
|
||||
__emitinline void xLEA( xRegister64 to, const xIndirectVoid& src, bool preserve_flags )
|
||||
{
|
||||
EmitLeaMagic( to, src, preserve_flags );
|
||||
}
|
||||
|
||||
__emitinline void xLEA( xRegister32 to, const xIndirectVoid& src, bool preserve_flags )
|
||||
{
|
||||
EmitLeaMagic( to, src, preserve_flags );
|
||||
}
|
||||
|
||||
__emitinline void xLEA( xRegister16 to, const xIndirectVoid& src, bool preserve_flags )
|
||||
{
|
||||
|
@ -839,26 +926,15 @@ __emitinline void xLEA( xRegister16 to, const xIndirectVoid& src, bool preserve_
|
|||
// =====================================================================================================
|
||||
// TEST / INC / DEC
|
||||
// =====================================================================================================
|
||||
void xImpl_Test::operator()( const xRegister8& to, const xRegister8& from ) const
|
||||
{
|
||||
xWrite8( 0x84 );
|
||||
EmitSibMagic( from, to );
|
||||
}
|
||||
|
||||
void xImpl_Test::operator()( const xRegister16& to, const xRegister16& from ) const
|
||||
void xImpl_Test::operator()( const xRegisterInt& to, const xRegisterInt& from ) const
|
||||
{
|
||||
pxAssert( to.GetOperandSize() == from.GetOperandSize() );
|
||||
to.prefix16();
|
||||
xWrite8( 0x85 );
|
||||
xWrite8( to.Is8BitOp() ? 0x84 : 0x85 );
|
||||
EmitSibMagic( from, to );
|
||||
}
|
||||
|
||||
void xImpl_Test::operator()( const xRegister32& to, const xRegister32& from ) const
|
||||
{
|
||||
xWrite8( 0x85 );
|
||||
EmitSibMagic( from, to );
|
||||
}
|
||||
|
||||
void xImpl_Test::operator()( const xIndirect32orLess& dest, int imm ) const
|
||||
void xImpl_Test::operator()( const xIndirect64orLess& dest, int imm ) const
|
||||
{
|
||||
dest.prefix16();
|
||||
xWrite8( dest.Is8BitOp() ? 0xf6 : 0xf7 );
|
||||
|
@ -880,11 +956,13 @@ void xImpl_Test::operator()( const xRegisterInt& to, int imm ) const
|
|||
to.xWriteImm( imm );
|
||||
}
|
||||
|
||||
void xImpl_BitScan::operator()( const xRegister32& to, const xRegister32& from ) const { xOpWrite0F( Opcode, to, from ); }
|
||||
void xImpl_BitScan::operator()( const xRegister16& to, const xRegister16& from ) const { xOpWrite0F( 0x66, Opcode, to, from ); }
|
||||
void xImpl_BitScan::operator()( const xRegister16or32& to, const xIndirectVoid& sibsrc ) const
|
||||
void xImpl_BitScan::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from ) const {
|
||||
pxAssert( to->GetOperandSize() == from->GetOperandSize() );
|
||||
xOpWrite0F( from->GetPrefix16(), Opcode, to, from );
|
||||
}
|
||||
void xImpl_BitScan::operator()( const xRegister16or32or64& to, const xIndirectVoid& sibsrc ) const
|
||||
{
|
||||
xOpWrite0F( (to->GetOperandSize() == 2) ? 0x66 : 0x00, Opcode, to, sibsrc );
|
||||
xOpWrite0F( to->GetPrefix16(), Opcode, to, sibsrc );
|
||||
}
|
||||
|
||||
void xImpl_IncDec::operator()( const xRegisterInt& to ) const
|
||||
|
@ -901,35 +979,34 @@ void xImpl_IncDec::operator()( const xRegisterInt& to ) const
|
|||
}
|
||||
}
|
||||
|
||||
void xImpl_IncDec::operator()( const xIndirect32orLess& to ) const
|
||||
void xImpl_IncDec::operator()( const xIndirect64orLess& to ) const
|
||||
{
|
||||
to.prefix16();
|
||||
xWrite8( to.Is8BitOp() ? 0xfe : 0xff );
|
||||
EmitSibMagic( isDec ? 1 : 0, to );
|
||||
}
|
||||
|
||||
void xImpl_DwordShift::operator()( const xRegister32& to, const xRegister32& from, const xRegisterCL& /* clreg */ ) const { xOpWrite0F( OpcodeBase+1, to, from ); }
|
||||
void xImpl_DwordShift::operator()( const xRegister16& to, const xRegister16& from, const xRegisterCL& /* clreg */ ) const { xOpWrite0F( 0x66, OpcodeBase+1, to, from ); }
|
||||
void xImpl_DwordShift::operator()( const xRegister32& to, const xRegister32& from, u8 shiftcnt ) const
|
||||
{
|
||||
if( shiftcnt != 0 )
|
||||
xOpWrite0F( OpcodeBase, to, from, shiftcnt );
|
||||
}
|
||||
void xImpl_DwordShift::operator()( const xRegister16& to, const xRegister16& from, u8 shiftcnt ) const
|
||||
{
|
||||
if( shiftcnt != 0 )
|
||||
xOpWrite0F( 0x66, OpcodeBase, to, from, shiftcnt );
|
||||
void xImpl_DwordShift::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from, const xRegisterCL& /* clreg */ ) const {
|
||||
pxAssert( to->GetOperandSize() == from->GetOperandSize() );
|
||||
xOpWrite0F( from->GetPrefix16(), OpcodeBase+1, to, from );
|
||||
}
|
||||
|
||||
void xImpl_DwordShift::operator()( const xIndirectVoid& dest, const xRegister16or32& from, const xRegisterCL& /* clreg */ ) const
|
||||
void xImpl_DwordShift::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from, u8 shiftcnt ) const
|
||||
{
|
||||
xOpWrite0F( (from->GetOperandSize() == 2) ? 0x66 : 0x00, OpcodeBase + 1, from, dest );
|
||||
pxAssert( to->GetOperandSize() == from->GetOperandSize() );
|
||||
if( shiftcnt != 0 )
|
||||
xOpWrite0F( from->GetPrefix16(), OpcodeBase, to, from, shiftcnt );
|
||||
}
|
||||
|
||||
void xImpl_DwordShift::operator()( const xIndirectVoid& dest, const xRegister16or32& from, u8 shiftcnt ) const
|
||||
void xImpl_DwordShift::operator()( const xIndirectVoid& dest, const xRegister16or32or64& from, const xRegisterCL& /* clreg */ ) const
|
||||
{
|
||||
xOpWrite0F( from->GetPrefix16(), OpcodeBase + 1, from, dest );
|
||||
}
|
||||
|
||||
void xImpl_DwordShift::operator()( const xIndirectVoid& dest, const xRegister16or32or64& from, u8 shiftcnt ) const
|
||||
{
|
||||
if( shiftcnt != 0 )
|
||||
xOpWrite0F( (from->GetOperandSize() == 2) ? 0x66 : 0x00, OpcodeBase, from, dest, shiftcnt );
|
||||
xOpWrite0F( from->GetPrefix16(), OpcodeBase, from, dest, shiftcnt );
|
||||
}
|
||||
|
||||
const xImpl_Test xTEST = { };
|
||||
|
@ -961,10 +1038,10 @@ __emitinline void xPUSH( const xIndirectVoid& from )
|
|||
EmitSibMagic( 6, from );
|
||||
}
|
||||
|
||||
__fi void xPOP( xRegister32 from ) { xWrite8( 0x58 | from.Id ); }
|
||||
__fi void xPOP( xRegister32or64 from ) { xWrite8( 0x58 | from->Id ); }
|
||||
|
||||
__fi void xPUSH( u32 imm ) { xWrite8( 0x68 ); xWrite32( imm ); }
|
||||
__fi void xPUSH( xRegister32 from ) { xWrite8( 0x50 | from.Id ); }
|
||||
__fi void xPUSH( xRegister32or64 from ) { xWrite8( 0x50 | from->Id ); }
|
||||
|
||||
// pushes the EFLAGS register onto the stack
|
||||
__fi void xPUSHFD() { xWrite8( 0x9C ); }
|
||||
|
@ -1004,10 +1081,10 @@ __fi void xINT( u8 imm )
|
|||
|
||||
__fi void xINTO() { xWrite8(0xce); }
|
||||
|
||||
__emitinline void xBSWAP( const xRegister32& to )
|
||||
__emitinline void xBSWAP( const xRegister32or64& to )
|
||||
{
|
||||
xWrite8( 0x0F );
|
||||
xWrite8( 0xC8 | to.Id );
|
||||
xWrite8( 0xC8 | to->Id );
|
||||
}
|
||||
|
||||
static __aligned16 u64 xmm_data[iREGCNT_XMM*2];
|
||||
|
|
|
@ -121,12 +121,12 @@ extern _x86regs x86regs[iREGCNT_GPR], s_saveX86regs[iREGCNT_GPR];
|
|||
uptr _x86GetAddr(int type, int reg);
|
||||
void _initX86regs();
|
||||
int _getFreeX86reg(int mode);
|
||||
int _allocX86reg(x86Emitter::xRegister32 x86reg, int type, int reg, int mode);
|
||||
int _allocX86reg(x86Emitter::xRegisterLong x86reg, int type, int reg, int mode);
|
||||
void _deleteX86reg(int type, int reg, int flush);
|
||||
int _checkX86reg(int type, int reg, int mode);
|
||||
void _addNeededX86reg(int type, int reg);
|
||||
void _clearNeededX86regs();
|
||||
void _freeX86reg(const x86Emitter::xRegister32& x86reg);
|
||||
void _freeX86reg(const x86Emitter::xRegisterLong& x86reg);
|
||||
void _freeX86reg(int x86reg);
|
||||
void _freeX86regs();
|
||||
void _flushCachedRegs();
|
||||
|
|
|
@ -388,7 +388,7 @@ void _psxDeleteReg(int reg, int flush)
|
|||
_deleteX86reg(X86TYPE_PSX, reg, flush ? 0 : 2);
|
||||
}
|
||||
|
||||
void _psxMoveGPRtoR(const xRegister32& to, int fromgpr)
|
||||
void _psxMoveGPRtoR(const xRegisterLong& to, int fromgpr)
|
||||
{
|
||||
if( PSX_IS_CONST1(fromgpr) )
|
||||
xMOV(to, g_psxConstRegs[fromgpr] );
|
||||
|
|
|
@ -48,7 +48,7 @@ void _psxFlushCall(int flushtype);
|
|||
|
||||
void _psxOnWriteReg(int reg);
|
||||
|
||||
void _psxMoveGPRtoR(const x86Emitter::xRegister32& to, int fromgpr);
|
||||
void _psxMoveGPRtoR(const x86Emitter::xRegisterLong& to, int fromgpr);
|
||||
#if 0
|
||||
void _psxMoveGPRtoM(uptr to, int fromgpr);
|
||||
void _psxMoveGPRtoRm(x86IntRegType to, int fromgpr);
|
||||
|
|
|
@ -104,7 +104,7 @@ extern u32 g_cpuHasConstReg, g_cpuFlushedConstReg;
|
|||
u32* _eeGetConstReg(int reg);
|
||||
|
||||
// finds where the GPR is stored and moves lower 32 bits to EAX
|
||||
void _eeMoveGPRtoR(const x86Emitter::xRegister32& to, int fromgpr);
|
||||
void _eeMoveGPRtoR(const x86Emitter::xRegisterLong& to, int fromgpr);
|
||||
void _eeMoveGPRtoM(uptr to, int fromgpr);
|
||||
void _eeMoveGPRtoRm(x86IntRegType to, int fromgpr);
|
||||
void eeSignExtendTo(int gpr, bool onlyupper=false);
|
||||
|
|
|
@ -243,7 +243,7 @@ void _flushConstRegs()
|
|||
}
|
||||
}
|
||||
|
||||
int _allocX86reg(xRegister32 x86reg, int type, int reg, int mode)
|
||||
int _allocX86reg(xRegisterLong x86reg, int type, int reg, int mode)
|
||||
{
|
||||
uint i;
|
||||
pxAssertDev( reg >= 0 && reg < 32, "Register index out of bounds." );
|
||||
|
@ -316,7 +316,7 @@ int _allocX86reg(xRegister32 x86reg, int type, int reg, int mode)
|
|||
}
|
||||
|
||||
if (x86reg.IsEmpty())
|
||||
x86reg = xRegister32(_getFreeX86reg(oldmode));
|
||||
x86reg = xRegisterLong(_getFreeX86reg(oldmode));
|
||||
else
|
||||
_freeX86reg(x86reg);
|
||||
|
||||
|
@ -445,7 +445,7 @@ void _deleteX86reg(int type, int reg, int flush)
|
|||
}
|
||||
|
||||
// Temporary solution to support eax/ebx... type
|
||||
void _freeX86reg(const x86Emitter::xRegister32& x86reg)
|
||||
void _freeX86reg(const x86Emitter::xRegisterLong& x86reg)
|
||||
{
|
||||
_freeX86reg(x86reg.GetId());
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ u32* _eeGetConstReg(int reg)
|
|||
return &cpuRegs.GPR.r[ reg ].UL[0];
|
||||
}
|
||||
|
||||
void _eeMoveGPRtoR(const xRegister32& to, int fromgpr)
|
||||
void _eeMoveGPRtoR(const xRegisterLong& to, int fromgpr)
|
||||
{
|
||||
if( fromgpr == 0 )
|
||||
xXOR(to, to); // zero register should use xor, thanks --air
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
using namespace x86Emitter;
|
||||
|
||||
typedef xRegisterSSE xmm;
|
||||
typedef xRegister32 x32;
|
||||
typedef xRegisterLong x32;
|
||||
|
||||
struct microVU;
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ void VifUnpackSSE_Dynarec::writeBackRow() const {
|
|||
// ToDo: Do we need to write back to vifregs.rX too!? :/
|
||||
}
|
||||
|
||||
static void ShiftDisplacementWindow( xAddressVoid& addr, const xRegister32& modReg )
|
||||
static void ShiftDisplacementWindow( xAddressVoid& addr, const xRegisterLong& modReg )
|
||||
{
|
||||
// Shifts the displacement factor of a given indirect address, so that the address
|
||||
// remains in the optimal 0xf0 range (which allows for byte-form displacements when
|
||||
|
|
Loading…
Reference in New Issue