mirror of https://github.com/PCSX2/pcsx2.git
Emitter: Changed a lot of 'i's into 'x's, because...
<JakeStine>sudonim says he likes xMOV better than iMOV as an emitter prefix. I'm wondering if I should go ahead and change it. <JakeStine>I tend to favor logic, but everyone else just thinks it looks like iMac and iPod <JakeStine>I just don't want to have to change it more than once. <cotton>well 'x' is like the algebraic variable, which can be anything <cotton>so it does kindoff make sense <cotton>cuz like <cotton>you have xSOMETHING, where SOMETHING is all the different emitter functions ... I'm sold. :p git-svn-id: http://pcsx2.googlecode.com/svn/trunk@1030 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
parent
3ee59f3f4e
commit
18c4765d31
|
@ -802,12 +802,12 @@ void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
|
||||||
|
|
||||||
// (this is one of my test cases for the new emitter --air)
|
// (this is one of my test cases for the new emitter --air)
|
||||||
using namespace x86Emitter;
|
using namespace x86Emitter;
|
||||||
iAddressReg thisreg( x86reg );
|
xAddressReg thisreg( x86reg );
|
||||||
|
|
||||||
if ( _X ) iMOV(ptr32[thisreg+offset], 0x00000000);
|
if ( _X ) xMOV(ptr32[thisreg+offset], 0x00000000);
|
||||||
if ( _Y ) iMOV(ptr32[thisreg+offset+4], 0x00000000);
|
if ( _Y ) xMOV(ptr32[thisreg+offset+4], 0x00000000);
|
||||||
if ( _Z ) iMOV(ptr32[thisreg+offset+8], 0x00000000);
|
if ( _Z ) xMOV(ptr32[thisreg+offset+8], 0x00000000);
|
||||||
if ( _W ) iMOV(ptr32[thisreg+offset+12], 0x3f800000);
|
if ( _W ) xMOV(ptr32[thisreg+offset+12], 0x3f800000);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,20 +34,20 @@ void MOV128_MtoM( x86IntRegType destRm, x86IntRegType srcRm )
|
||||||
{
|
{
|
||||||
// (this is one of my test cases for the new emitter --air)
|
// (this is one of my test cases for the new emitter --air)
|
||||||
|
|
||||||
iAddressReg src( srcRm );
|
xAddressReg src( srcRm );
|
||||||
iAddressReg dest( destRm );
|
xAddressReg dest( destRm );
|
||||||
|
|
||||||
iMOV( eax, ptr[src] );
|
xMOV( eax, ptr[src] );
|
||||||
iMOV( ptr[dest], eax );
|
xMOV( ptr[dest], eax );
|
||||||
|
|
||||||
iMOV( eax, ptr[src+4] );
|
xMOV( eax, ptr[src+4] );
|
||||||
iMOV( ptr[dest+4], eax );
|
xMOV( ptr[dest+4], eax );
|
||||||
|
|
||||||
iMOV( eax, ptr[src+8] );
|
xMOV( eax, ptr[src+8] );
|
||||||
iMOV( ptr[dest+8], eax );
|
xMOV( ptr[dest+8], eax );
|
||||||
|
|
||||||
iMOV( eax, ptr[src+12] );
|
xMOV( eax, ptr[src+12] );
|
||||||
iMOV( ptr[dest+12], eax );
|
xMOV( ptr[dest+12], eax );
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -200,10 +200,10 @@ void vtlb_DynGenRead64(u32 bits)
|
||||||
SHR32ItoR(EAX,VTLB_PAGE_BITS);
|
SHR32ItoR(EAX,VTLB_PAGE_BITS);
|
||||||
MOV32RmSOffsettoR(EAX,EAX,(int)vtlbdata.vmap,2);
|
MOV32RmSOffsettoR(EAX,EAX,(int)vtlbdata.vmap,2);
|
||||||
ADD32RtoR(ECX,EAX);
|
ADD32RtoR(ECX,EAX);
|
||||||
iForwardJS8 _fullread;
|
xForwardJS8 _fullread;
|
||||||
|
|
||||||
_vtlb_DynGen_DirectRead( bits, false );
|
_vtlb_DynGen_DirectRead( bits, false );
|
||||||
iForwardJump8 cont;
|
xForwardJump8 cont;
|
||||||
|
|
||||||
_fullread.SetTarget();
|
_fullread.SetTarget();
|
||||||
|
|
||||||
|
@ -223,10 +223,10 @@ void vtlb_DynGenRead32(u32 bits, bool sign)
|
||||||
SHR32ItoR(EAX,VTLB_PAGE_BITS);
|
SHR32ItoR(EAX,VTLB_PAGE_BITS);
|
||||||
MOV32RmSOffsettoR(EAX,EAX,(int)vtlbdata.vmap,2);
|
MOV32RmSOffsettoR(EAX,EAX,(int)vtlbdata.vmap,2);
|
||||||
ADD32RtoR(ECX,EAX);
|
ADD32RtoR(ECX,EAX);
|
||||||
iForwardJS8 _fullread;
|
xForwardJS8 _fullread;
|
||||||
|
|
||||||
_vtlb_DynGen_DirectRead( bits, sign );
|
_vtlb_DynGen_DirectRead( bits, sign );
|
||||||
iForwardJump8 cont;
|
xForwardJump8 cont;
|
||||||
|
|
||||||
_fullread.SetTarget();
|
_fullread.SetTarget();
|
||||||
_vtlb_DynGen_IndirectRead( bits );
|
_vtlb_DynGen_IndirectRead( bits );
|
||||||
|
@ -478,10 +478,10 @@ void vtlb_DynGenWrite(u32 sz)
|
||||||
SHR32ItoR(EAX,VTLB_PAGE_BITS);
|
SHR32ItoR(EAX,VTLB_PAGE_BITS);
|
||||||
MOV32RmSOffsettoR(EAX,EAX,(int)vtlbdata.vmap,2);
|
MOV32RmSOffsettoR(EAX,EAX,(int)vtlbdata.vmap,2);
|
||||||
ADD32RtoR(ECX,EAX);
|
ADD32RtoR(ECX,EAX);
|
||||||
iForwardJS8 _full;
|
xForwardJS8 _full;
|
||||||
|
|
||||||
_vtlb_DynGen_DirectWrite( sz );
|
_vtlb_DynGen_DirectWrite( sz );
|
||||||
iForwardJump8 cont;
|
xForwardJump8 cont;
|
||||||
|
|
||||||
_full.SetTarget();
|
_full.SetTarget();
|
||||||
_vtlb_DynGen_IndirectWrite( sz );
|
_vtlb_DynGen_IndirectWrite( sz );
|
||||||
|
|
|
@ -41,54 +41,54 @@ class Group8Impl
|
||||||
protected:
|
protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Group8Impl() {} // For the love of GCC.
|
Group8Impl() {} // For the love of GCC.
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& bitbase, const iRegister<ImmType>& bitoffset )
|
static __emitinline void Emit( const xRegister<ImmType>& bitbase, const xRegister<ImmType>& bitoffset )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( 0xa3 | (InstType << 2) );
|
xWrite<u8>( 0xa3 | (InstType << 2) );
|
||||||
ModRM_Direct( bitoffset.Id, bitbase.Id );
|
ModRM_Direct( bitoffset.Id, bitbase.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( void* bitbase, const iRegister<ImmType>& bitoffset )
|
static __emitinline void Emit( void* bitbase, const xRegister<ImmType>& bitoffset )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( 0xa3 | (InstType << 2) );
|
xWrite<u8>( 0xa3 | (InstType << 2) );
|
||||||
iWriteDisp( bitoffset.Id, bitbase );
|
xWriteDisp( bitoffset.Id, bitbase );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const ModSibBase& bitbase, const iRegister<ImmType>& bitoffset )
|
static __emitinline void Emit( const ModSibBase& bitbase, const xRegister<ImmType>& bitoffset )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( 0xa3 | (InstType << 2) );
|
xWrite<u8>( 0xa3 | (InstType << 2) );
|
||||||
EmitSibMagic( bitoffset.Id, bitbase );
|
EmitSibMagic( bitoffset.Id, bitbase );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& bitbase, u8 immoffset )
|
static __emitinline void Emit( const xRegister<ImmType>& bitbase, u8 immoffset )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u16>( 0xba0f );
|
xWrite<u16>( 0xba0f );
|
||||||
ModRM_Direct( InstType, bitbase.Id );
|
ModRM_Direct( InstType, bitbase.Id );
|
||||||
iWrite<u8>( immoffset );
|
xWrite<u8>( immoffset );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const ModSibStrict<ImmType>& bitbase, u8 immoffset )
|
static __emitinline void Emit( const ModSibStrict<ImmType>& bitbase, u8 immoffset )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u16>( 0xba0f );
|
xWrite<u16>( 0xba0f );
|
||||||
EmitSibMagic( InstType, bitbase );
|
EmitSibMagic( InstType, bitbase );
|
||||||
iWrite<u8>( immoffset );
|
xWrite<u8>( immoffset );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -102,12 +102,12 @@ protected:
|
||||||
typedef Group8Impl<InstType,u32> m_16;
|
typedef Group8Impl<InstType,u32> m_16;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegister32& bitbase, const iRegister32& bitoffset ) const { m_32::Emit( bitbase, bitoffset ); }
|
__forceinline void operator()( const xRegister32& bitbase, const xRegister32& bitoffset ) const { m_32::Emit( bitbase, bitoffset ); }
|
||||||
__forceinline void operator()( const iRegister16& bitbase, const iRegister16& bitoffset ) const { m_16::Emit( bitbase, bitoffset ); }
|
__forceinline void operator()( const xRegister16& bitbase, const xRegister16& bitoffset ) const { m_16::Emit( bitbase, bitoffset ); }
|
||||||
__forceinline void operator()( void* bitbase, const iRegister32& bitoffset ) const { m_32::Emit( bitbase, bitoffset ); }
|
__forceinline void operator()( void* bitbase, const xRegister32& bitoffset ) const { m_32::Emit( bitbase, bitoffset ); }
|
||||||
__forceinline void operator()( void* bitbase, const iRegister16& bitoffset ) const { m_16::Emit( bitbase, bitoffset ); }
|
__forceinline void operator()( void* bitbase, const xRegister16& bitoffset ) const { m_16::Emit( bitbase, bitoffset ); }
|
||||||
__noinline void operator()( const ModSibBase& bitbase, const iRegister32& bitoffset ) const { m_32::Emit( bitbase, bitoffset ); }
|
__noinline void operator()( const ModSibBase& bitbase, const xRegister32& bitoffset ) const { m_32::Emit( bitbase, bitoffset ); }
|
||||||
__noinline void operator()( const ModSibBase& bitbase, const iRegister16& bitoffset ) const { m_16::Emit( bitbase, bitoffset ); }
|
__noinline void operator()( const ModSibBase& bitbase, const xRegister16& bitoffset ) const { m_16::Emit( bitbase, bitoffset ); }
|
||||||
|
|
||||||
// Note on Imm forms : use int as the source operand since it's "reasonably inert" from a compiler
|
// Note on Imm forms : use int as the source operand since it's "reasonably inert" from a compiler
|
||||||
// perspective. (using uint tends to make the compiler try and fail to match signed immediates with
|
// perspective. (using uint tends to make the compiler try and fail to match signed immediates with
|
||||||
|
@ -115,8 +115,8 @@ public:
|
||||||
|
|
||||||
__noinline void operator()( const ModSibStrict<u32>& bitbase, u8 immoffset ) const { m_32::Emit( bitbase, immoffset ); }
|
__noinline void operator()( const ModSibStrict<u32>& bitbase, u8 immoffset ) const { m_32::Emit( bitbase, immoffset ); }
|
||||||
__noinline void operator()( const ModSibStrict<u16>& bitbase, u8 immoffset ) const { m_16::Emit( bitbase, immoffset ); }
|
__noinline void operator()( const ModSibStrict<u16>& bitbase, u8 immoffset ) const { m_16::Emit( bitbase, immoffset ); }
|
||||||
void operator()( const iRegister<u32>& bitbase, u8 immoffset ) const { m_32::Emit( bitbase, immoffset ); }
|
void operator()( const xRegister<u32>& bitbase, u8 immoffset ) const { m_32::Emit( bitbase, immoffset ); }
|
||||||
void operator()( const iRegister<u16>& bitbase, u8 immoffset ) const { m_16::Emit( bitbase, immoffset ); }
|
void operator()( const xRegister<u16>& bitbase, u8 immoffset ) const { m_16::Emit( bitbase, immoffset ); }
|
||||||
|
|
||||||
Group8ImplAll() {}
|
Group8ImplAll() {}
|
||||||
};
|
};
|
||||||
|
@ -130,33 +130,33 @@ class BitScanImpl
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
static void emitbase()
|
static void emitbase()
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( isReverse ? 0xbd : 0xbc );
|
xWrite<u8>( isReverse ? 0xbd : 0xbc );
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BitScanImpl() {} // For the love of GCC.
|
BitScanImpl() {} // For the love of GCC.
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
emitbase();
|
emitbase();
|
||||||
ModRM_Direct( to.Id, from.Id );
|
ModRM_Direct( to.Id, from.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const void* src )
|
||||||
{
|
{
|
||||||
emitbase();
|
emitbase();
|
||||||
iWriteDisp( to.Id, src );
|
xWriteDisp( to.Id, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& sibsrc )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const ModSibBase& sibsrc )
|
||||||
{
|
{
|
||||||
emitbase();
|
emitbase();
|
||||||
EmitSibMagic( to.Id, sibsrc );
|
EmitSibMagic( to.Id, sibsrc );
|
||||||
|
@ -175,12 +175,12 @@ protected:
|
||||||
typedef BitScanImpl<isReverse,u32> m_16;
|
typedef BitScanImpl<isReverse,u32> m_16;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister32& from ) const { m_32::Emit( to, from ); }
|
__forceinline void operator()( const xRegister32& to, const xRegister32& from ) const { m_32::Emit( to, from ); }
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister16& from ) const { m_16::Emit( to, from ); }
|
__forceinline void operator()( const xRegister16& to, const xRegister16& from ) const { m_16::Emit( to, from ); }
|
||||||
__forceinline void operator()( const iRegister32& to, const void* src ) const { m_32::Emit( to, src ); }
|
__forceinline void operator()( const xRegister32& to, const void* src ) const { m_32::Emit( to, src ); }
|
||||||
__forceinline void operator()( const iRegister16& to, const void* src ) const { m_16::Emit( to, src ); }
|
__forceinline void operator()( const xRegister16& to, const void* src ) const { m_16::Emit( to, src ); }
|
||||||
__noinline void operator()( const iRegister32& to, const ModSibBase& sibsrc ) const { m_32::Emit( to, sibsrc ); }
|
__noinline void operator()( const xRegister32& to, const ModSibBase& sibsrc ) const { m_32::Emit( to, sibsrc ); }
|
||||||
__noinline void operator()( const iRegister16& to, const ModSibBase& sibsrc ) const { m_16::Emit( to, sibsrc ); }
|
__noinline void operator()( const xRegister16& to, const ModSibBase& sibsrc ) const { m_16::Emit( to, sibsrc ); }
|
||||||
|
|
||||||
BitScanImplAll() {}
|
BitScanImplAll() {}
|
||||||
};
|
};
|
||||||
|
|
|
@ -33,7 +33,7 @@ protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
static void basesibform( bool isCL )
|
static void basesibform( bool isCL )
|
||||||
{
|
{
|
||||||
|
@ -46,7 +46,7 @@ public:
|
||||||
DwordShiftImpl() {} // because GCC doesn't like static classes
|
DwordShiftImpl() {} // because GCC doesn't like static classes
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( 0xa50f | (isShiftRight ? 0x800 : 0) );
|
write16( 0xa50f | (isShiftRight ? 0x800 : 0) );
|
||||||
|
@ -54,7 +54,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from, u8 imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from, u8 imm )
|
||||||
{
|
{
|
||||||
if( imm == 0 ) return;
|
if( imm == 0 ) return;
|
||||||
prefix16();
|
prefix16();
|
||||||
|
@ -64,14 +64,14 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<ImmType>& from, __unused const iRegisterCL& clreg )
|
static __emitinline void Emit( const ModSibBase& sibdest, const xRegister<ImmType>& from, __unused const xRegisterCL& clreg )
|
||||||
{
|
{
|
||||||
basesibform();
|
basesibform();
|
||||||
EmitSibMagic( from.Id, sibdest );
|
EmitSibMagic( from.Id, sibdest );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<ImmType>& from, u8 imm )
|
static __emitinline void Emit( const ModSibBase& sibdest, const xRegister<ImmType>& from, u8 imm )
|
||||||
{
|
{
|
||||||
basesibform();
|
basesibform();
|
||||||
EmitSibMagic( from.Id, sibdest );
|
EmitSibMagic( from.Id, sibdest );
|
||||||
|
@ -80,18 +80,18 @@ public:
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// dest data type is inferred from the 'from' register, so we can do void* resolution :)
|
// dest data type is inferred from the 'from' register, so we can do void* resolution :)
|
||||||
static __emitinline void Emit( void* dest, const iRegister<ImmType>& from, __unused const iRegisterCL& clreg )
|
static __emitinline void Emit( void* dest, const xRegister<ImmType>& from, __unused const xRegisterCL& clreg )
|
||||||
{
|
{
|
||||||
basesibform();
|
basesibform();
|
||||||
iWriteDisp( from.Id, dest );
|
xWriteDisp( from.Id, dest );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// dest data type is inferred from the 'from' register, so we can do void* resolution :)
|
// dest data type is inferred from the 'from' register, so we can do void* resolution :)
|
||||||
static __emitinline void Emit( void* dest, const iRegister<ImmType>& from, u8 imm )
|
static __emitinline void Emit( void* dest, const xRegister<ImmType>& from, u8 imm )
|
||||||
{
|
{
|
||||||
basesibform();
|
basesibform();
|
||||||
iWriteDisp( from.Id, dest );
|
xWriteDisp( from.Id, dest );
|
||||||
write8( imm );
|
write8( imm );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -110,20 +110,20 @@ protected:
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// ---------- 32 Bit Interface -----------
|
// ---------- 32 Bit Interface -----------
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister32& from, __unused const iRegisterCL& clreg ) const { m_32::Emit( to, from ); }
|
__forceinline void operator()( const xRegister32& to, const xRegister32& from, __unused const xRegisterCL& clreg ) const { m_32::Emit( to, from ); }
|
||||||
__forceinline void operator()( void* dest, const iRegister32& from, __unused const iRegisterCL& clreg ) const { m_32::Emit( dest, from ); }
|
__forceinline void operator()( void* dest, const xRegister32& from, __unused const xRegisterCL& clreg ) const { m_32::Emit( dest, from ); }
|
||||||
__noinline void operator()( const ModSibBase& sibdest, const iRegister32& from, __unused const iRegisterCL& clreg ) const { m_32::Emit( sibdest, from ); }
|
__noinline void operator()( const ModSibBase& sibdest, const xRegister32& from, __unused const xRegisterCL& clreg ) const { m_32::Emit( sibdest, from ); }
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister32& from, u8 imm ) const { m_32::Emit( to, from, imm ); }
|
__forceinline void operator()( const xRegister32& to, const xRegister32& from, u8 imm ) const { m_32::Emit( to, from, imm ); }
|
||||||
__forceinline void operator()( void* dest, const iRegister32& from, u8 imm ) const { m_32::Emit( dest, from, imm ); }
|
__forceinline void operator()( void* dest, const xRegister32& from, u8 imm ) const { m_32::Emit( dest, from, imm ); }
|
||||||
__noinline void operator()( const ModSibBase& sibdest, const iRegister32& from, u8 imm ) const { m_32::Emit( sibdest, from ); }
|
__noinline void operator()( const ModSibBase& sibdest, const xRegister32& from, u8 imm ) const { m_32::Emit( sibdest, from ); }
|
||||||
|
|
||||||
// ---------- 16 Bit Interface -----------
|
// ---------- 16 Bit Interface -----------
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister16& from, __unused const iRegisterCL& clreg ) const { m_16::Emit( to, from ); }
|
__forceinline void operator()( const xRegister16& to, const xRegister16& from, __unused const xRegisterCL& clreg ) const { m_16::Emit( to, from ); }
|
||||||
__forceinline void operator()( void* dest, const iRegister16& from, __unused const iRegisterCL& clreg ) const { m_16::Emit( dest, from ); }
|
__forceinline void operator()( void* dest, const xRegister16& from, __unused const xRegisterCL& clreg ) const { m_16::Emit( dest, from ); }
|
||||||
__noinline void operator()( const ModSibBase& sibdest, const iRegister16& from, __unused const iRegisterCL& clreg ) const { m_16::Emit( sibdest, from ); }
|
__noinline void operator()( const ModSibBase& sibdest, const xRegister16& from, __unused const xRegisterCL& clreg ) const { m_16::Emit( sibdest, from ); }
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister16& from, u8 imm ) const { m_16::Emit( to, from, imm ); }
|
__forceinline void operator()( const xRegister16& to, const xRegister16& from, u8 imm ) const { m_16::Emit( to, from, imm ); }
|
||||||
__forceinline void operator()( void* dest, const iRegister16& from, u8 imm ) const { m_16::Emit( dest, from, imm ); }
|
__forceinline void operator()( void* dest, const xRegister16& from, u8 imm ) const { m_16::Emit( dest, from, imm ); }
|
||||||
__noinline void operator()( const ModSibBase& sibdest, const iRegister16& from, u8 imm ) const { m_16::Emit( sibdest, from ); }
|
__noinline void operator()( const ModSibBase& sibdest, const xRegister16& from, u8 imm ) const { m_16::Emit( sibdest, from ); }
|
||||||
|
|
||||||
DwordShiftImplAll() {} // Why does GCC need these?
|
DwordShiftImplAll() {} // Why does GCC need these?
|
||||||
};
|
};
|
||||||
|
|
|
@ -41,65 +41,65 @@ protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Group1Impl() {} // because GCC doesn't like static classes
|
Group1Impl() {} // because GCC doesn't like static classes
|
||||||
|
|
||||||
static __emitinline void Emit( G1Type InstType, const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( G1Type InstType, const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
|
xWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
|
||||||
ModRM_Direct( from.Id, to.Id );
|
ModRM_Direct( from.Id, to.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( G1Type InstType, const ModSibBase& sibdest, const iRegister<ImmType>& from )
|
static __emitinline void Emit( G1Type InstType, const ModSibBase& sibdest, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
|
xWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
|
||||||
EmitSibMagic( from.Id, sibdest );
|
EmitSibMagic( from.Id, sibdest );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( G1Type InstType, const iRegister<ImmType>& to, const ModSibBase& sibsrc )
|
static __emitinline void Emit( G1Type InstType, const xRegister<ImmType>& to, const ModSibBase& sibsrc )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( (Is8BitOperand() ? 2 : 3) | (InstType<<3) );
|
xWrite<u8>( (Is8BitOperand() ? 2 : 3) | (InstType<<3) );
|
||||||
EmitSibMagic( to.Id, sibsrc );
|
EmitSibMagic( to.Id, sibsrc );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( G1Type InstType, void* dest, const iRegister<ImmType>& from )
|
static __emitinline void Emit( G1Type InstType, void* dest, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
|
xWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
|
||||||
iWriteDisp( from.Id, dest );
|
xWriteDisp( from.Id, dest );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( G1Type InstType, const iRegister<ImmType>& to, const void* src )
|
static __emitinline void Emit( G1Type InstType, const xRegister<ImmType>& to, const void* src )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( (Is8BitOperand() ? 2 : 3) | (InstType<<3) );
|
xWrite<u8>( (Is8BitOperand() ? 2 : 3) | (InstType<<3) );
|
||||||
iWriteDisp( to.Id, src );
|
xWriteDisp( to.Id, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( G1Type InstType, const iRegister<ImmType>& to, int imm )
|
static __emitinline void Emit( G1Type InstType, const xRegister<ImmType>& to, int imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
if( !Is8BitOperand() && is_s8( imm ) )
|
if( !Is8BitOperand() && is_s8( imm ) )
|
||||||
{
|
{
|
||||||
iWrite<u8>( 0x83 );
|
xWrite<u8>( 0x83 );
|
||||||
ModRM_Direct( InstType, to.Id );
|
ModRM_Direct( InstType, to.Id );
|
||||||
iWrite<s8>( imm );
|
xWrite<s8>( imm );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if( to.IsAccumulator() )
|
if( to.IsAccumulator() )
|
||||||
iWrite<u8>( (Is8BitOperand() ? 4 : 5) | (InstType<<3) );
|
xWrite<u8>( (Is8BitOperand() ? 4 : 5) | (InstType<<3) );
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x80 : 0x81 );
|
xWrite<u8>( Is8BitOperand() ? 0x80 : 0x81 );
|
||||||
ModRM_Direct( InstType, to.Id );
|
ModRM_Direct( InstType, to.Id );
|
||||||
}
|
}
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,19 +107,19 @@ public:
|
||||||
{
|
{
|
||||||
if( Is8BitOperand() )
|
if( Is8BitOperand() )
|
||||||
{
|
{
|
||||||
iWrite<u8>( 0x80 );
|
xWrite<u8>( 0x80 );
|
||||||
EmitSibMagic( InstType, sibdest );
|
EmitSibMagic( InstType, sibdest );
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( is_s8( imm ) ? 0x83 : 0x81 );
|
xWrite<u8>( is_s8( imm ) ? 0x83 : 0x81 );
|
||||||
EmitSibMagic( InstType, sibdest );
|
EmitSibMagic( InstType, sibdest );
|
||||||
if( is_s8( imm ) )
|
if( is_s8( imm ) )
|
||||||
iWrite<s8>( imm );
|
xWrite<s8>( imm );
|
||||||
else
|
else
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -132,15 +132,15 @@ class Group1ImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& to, const iRegister<T>& from ) const { Group1Impl<T>::Emit( InstType, to, from ); }
|
__forceinline void operator()( const xRegister<T>& to, const xRegister<T>& from ) const { Group1Impl<T>::Emit( InstType, to, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& to, const void* src ) const { Group1Impl<T>::Emit( InstType, to, src ); }
|
__forceinline void operator()( const xRegister<T>& to, const void* src ) const { Group1Impl<T>::Emit( InstType, to, src ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( void* dest, const iRegister<T>& from ) const { Group1Impl<T>::Emit( InstType, dest, from ); }
|
__forceinline void operator()( void* dest, const xRegister<T>& from ) const { Group1Impl<T>::Emit( InstType, dest, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const ModSibBase& sibdest, const iRegister<T>& from ) const { Group1Impl<T>::Emit( InstType, sibdest, from ); }
|
__noinline void operator()( const ModSibBase& sibdest, const xRegister<T>& from ) const { Group1Impl<T>::Emit( InstType, sibdest, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const iRegister<T>& to, const ModSibBase& sibsrc ) const { Group1Impl<T>::Emit( InstType, to, sibsrc ); }
|
__noinline void operator()( const xRegister<T>& to, const ModSibBase& sibsrc ) const { Group1Impl<T>::Emit( InstType, to, sibsrc ); }
|
||||||
|
|
||||||
// Note on Imm forms : use int as the source operand since it's "reasonably inert" from a compiler
|
// Note on Imm forms : use int as the source operand since it's "reasonably inert" from a compiler
|
||||||
// perspective. (using uint tends to make the compiler try and fail to match signed immediates with
|
// perspective. (using uint tends to make the compiler try and fail to match signed immediates with
|
||||||
|
@ -149,38 +149,60 @@ public:
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const ModSibStrict<T>& sibdest, int imm ) const { Group1Impl<T>::Emit( InstType, sibdest, imm ); }
|
__noinline void operator()( const ModSibStrict<T>& sibdest, int imm ) const { Group1Impl<T>::Emit( InstType, sibdest, imm ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& to, int imm ) const { Group1Impl<T>::Emit( InstType, to, imm ); }
|
__forceinline void operator()( const xRegister<T>& to, int imm ) const { Group1Impl<T>::Emit( InstType, to, imm ); }
|
||||||
|
|
||||||
Group1ImplAll() {} // Why does GCC need these?
|
Group1ImplAll() {} // Why does GCC need these?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
// This class combines x86 with SSE/SSE2 logic operations (ADD, OR, and NOT).
|
||||||
|
// Note: ANDN [AndNot] is handled below separately.
|
||||||
|
//
|
||||||
template< G1Type InstType, u8 OpcodeSSE >
|
template< G1Type InstType, u8 OpcodeSSE >
|
||||||
class G1LogicImpl : public Group1ImplAll<InstType>
|
class G1LogicImpl_PlusSSE : public Group1ImplAll<InstType>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
using Group1ImplAll<InstType>::operator();
|
||||||
|
|
||||||
const SSELogicImpl<0x00,OpcodeSSE> PS;
|
const SSELogicImpl<0x00,OpcodeSSE> PS;
|
||||||
const SSELogicImpl<0x66,OpcodeSSE> PD;
|
const SSELogicImpl<0x66,OpcodeSSE> PD;
|
||||||
|
|
||||||
G1LogicImpl() {}
|
G1LogicImpl_PlusSSE() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
// This calss combines x86 with SSE/SSE2 arithmetic operations (ADD/SUB).
|
||||||
|
//
|
||||||
template< G1Type InstType, u8 OpcodeSSE >
|
template< G1Type InstType, u8 OpcodeSSE >
|
||||||
class G1ArithmeticImpl : public G1LogicImpl<InstType, OpcodeSSE >
|
class G1ArithmeticImpl_PlusSSE : public G1LogicImpl_PlusSSE<InstType, OpcodeSSE >
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
using Group1ImplAll<InstType>::operator();
|
||||||
|
|
||||||
const SSELogicImpl<0xf3,OpcodeSSE> SS;
|
const SSELogicImpl<0xf3,OpcodeSSE> SS;
|
||||||
const SSELogicImpl<0xf2,OpcodeSSE> SD;
|
const SSELogicImpl<0xf2,OpcodeSSE> SD;
|
||||||
|
|
||||||
G1ArithmeticImpl() {}
|
G1ArithmeticImpl_PlusSSE() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
template< u8 OpcodeSSE >
|
class G1CompareImpl_PlusSSE : Group1ImplAll< G1Type_CMP >
|
||||||
class SSEAndNotImpl
|
|
||||||
{
|
{
|
||||||
public:
|
protected:
|
||||||
const SSELogicImpl<0x00,OpcodeSSE> PS;
|
template< u8 Prefix > struct Woot
|
||||||
const SSELogicImpl<0x66,OpcodeSSE> PD;
|
{
|
||||||
|
__forceinline void operator()( const xRegisterSSE& to, const xRegisterSSE& from, SSE2_ComparisonType cmptype ) const { writeXMMop( Prefix, 0xc2, to, from ); xWrite<u8>( cmptype ); }
|
||||||
SSEAndNotImpl() {}
|
__forceinline void operator()( const xRegisterSSE& to, const void* from, SSE2_ComparisonType cmptype ) const { writeXMMop( Prefix, 0xc2, to, from ); xWrite<u8>( cmptype ); }
|
||||||
|
__noinline void operator()( const xRegisterSSE& to, const ModSibBase& from, SSE2_ComparisonType cmptype ) const { writeXMMop( Prefix, 0xc2, to, from ); xWrite<u8>( cmptype ); }
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
using Group1ImplAll< G1Type_CMP >::operator();
|
||||||
|
|
||||||
|
Woot<0x00> PS;
|
||||||
|
Woot<0x66> PD;
|
||||||
|
Woot<0xf3> SS;
|
||||||
|
Woot<0xf2> SD;
|
||||||
|
|
||||||
|
G1CompareImpl_PlusSSE() {} //GCWhat?
|
||||||
};
|
};
|
|
@ -45,21 +45,21 @@ protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Group2Impl() {} // For the love of GCC.
|
Group2Impl() {} // For the love of GCC.
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to )
|
static __emitinline void Emit( const xRegister<ImmType>& to )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xd2 : 0xd3 );
|
xWrite<u8>( Is8BitOperand() ? 0xd2 : 0xd3 );
|
||||||
ModRM_Direct( InstType, to.Id );
|
ModRM_Direct( InstType, to.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, u8 imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, u8 imm )
|
||||||
{
|
{
|
||||||
if( imm == 0 ) return;
|
if( imm == 0 ) return;
|
||||||
|
|
||||||
|
@ -67,14 +67,14 @@ public:
|
||||||
if( imm == 1 )
|
if( imm == 1 )
|
||||||
{
|
{
|
||||||
// special encoding of 1's
|
// special encoding of 1's
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xd0 : 0xd1 );
|
xWrite<u8>( Is8BitOperand() ? 0xd0 : 0xd1 );
|
||||||
ModRM_Direct( InstType, to.Id );
|
ModRM_Direct( InstType, to.Id );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xc0 : 0xc1 );
|
xWrite<u8>( Is8BitOperand() ? 0xc0 : 0xc1 );
|
||||||
ModRM_Direct( InstType, to.Id );
|
ModRM_Direct( InstType, to.Id );
|
||||||
iWrite<u8>( imm );
|
xWrite<u8>( imm );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ public:
|
||||||
static __emitinline void Emit( const ModSibStrict<ImmType>& sibdest )
|
static __emitinline void Emit( const ModSibStrict<ImmType>& sibdest )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xd2 : 0xd3 );
|
xWrite<u8>( Is8BitOperand() ? 0xd2 : 0xd3 );
|
||||||
EmitSibMagic( InstType, sibdest );
|
EmitSibMagic( InstType, sibdest );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,14 +95,14 @@ public:
|
||||||
if( imm == 1 )
|
if( imm == 1 )
|
||||||
{
|
{
|
||||||
// special encoding of 1's
|
// special encoding of 1's
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xd0 : 0xd1 );
|
xWrite<u8>( Is8BitOperand() ? 0xd0 : 0xd1 );
|
||||||
EmitSibMagic( InstType, sibdest );
|
EmitSibMagic( InstType, sibdest );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xc0 : 0xc1 );
|
xWrite<u8>( Is8BitOperand() ? 0xc0 : 0xc1 );
|
||||||
EmitSibMagic( InstType, sibdest );
|
EmitSibMagic( InstType, sibdest );
|
||||||
iWrite<u8>( imm );
|
xWrite<u8>( imm );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -113,16 +113,16 @@ template< G2Type InstType >
|
||||||
class Group2ImplAll
|
class Group2ImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
template< typename T > __forceinline void operator()( const iRegister<T>& to, __unused const iRegisterCL& from ) const
|
template< typename T > __forceinline void operator()( const xRegister<T>& to, __unused const xRegisterCL& from ) const
|
||||||
{ Group2Impl<InstType,T>::Emit( to ); }
|
{ Group2Impl<InstType,T>::Emit( to ); }
|
||||||
|
|
||||||
template< typename T > __noinline void operator()( const ModSibStrict<T>& sibdest, __unused const iRegisterCL& from ) const
|
template< typename T > __noinline void operator()( const ModSibStrict<T>& sibdest, __unused const xRegisterCL& from ) const
|
||||||
{ Group2Impl<InstType,T>::Emit( sibdest ); }
|
{ Group2Impl<InstType,T>::Emit( sibdest ); }
|
||||||
|
|
||||||
template< typename T > __noinline void operator()( const ModSibStrict<T>& sibdest, u8 imm ) const
|
template< typename T > __noinline void operator()( const ModSibStrict<T>& sibdest, u8 imm ) const
|
||||||
{ Group2Impl<InstType,T>::Emit( sibdest, imm ); }
|
{ Group2Impl<InstType,T>::Emit( sibdest, imm ); }
|
||||||
|
|
||||||
template< typename T > __forceinline void operator()( const iRegister<T>& to, u8 imm ) const
|
template< typename T > __forceinline void operator()( const xRegister<T>& to, u8 imm ) const
|
||||||
{ Group2Impl<InstType,T>::Emit( to, imm ); }
|
{ Group2Impl<InstType,T>::Emit( to, imm ); }
|
||||||
|
|
||||||
Group2ImplAll() {} // I am a class with no members, so I need an explicit constructor! Sense abounds.
|
Group2ImplAll() {} // I am a class with no members, so I need an explicit constructor! Sense abounds.
|
||||||
|
|
|
@ -31,6 +31,7 @@ enum G3Type
|
||||||
G3Type_iDIV = 7
|
G3Type_iDIV = 7
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
template< typename ImmType >
|
template< typename ImmType >
|
||||||
class Group3Impl
|
class Group3Impl
|
||||||
{
|
{
|
||||||
|
@ -38,22 +39,22 @@ protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Group3Impl() {} // For the love of GCC.
|
Group3Impl() {} // For the love of GCC.
|
||||||
|
|
||||||
static __emitinline void Emit( G3Type InstType, const iRegister<ImmType>& from )
|
static __emitinline void Emit( G3Type InstType, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>(Is8BitOperand() ? 0xf6 : 0xf7 );
|
xWrite<u8>(Is8BitOperand() ? 0xf6 : 0xf7 );
|
||||||
ModRM_Direct( InstType, from.Id );
|
ModRM_Direct( InstType, from.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( G3Type InstType, const ModSibStrict<ImmType>& sibsrc )
|
static __emitinline void Emit( G3Type InstType, const ModSibStrict<ImmType>& sibsrc )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
|
xWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
|
||||||
EmitSibMagic( InstType, sibsrc );
|
EmitSibMagic( InstType, sibsrc );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -65,7 +66,7 @@ class Group3ImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& from ) const { Group3Impl<T>::Emit( InstType, from ); }
|
__forceinline void operator()( const xRegister<T>& from ) const { Group3Impl<T>::Emit( InstType, from ); }
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const ModSibStrict<T>& from ) const { Group3Impl<T>::Emit( InstType, from ); }
|
__noinline void operator()( const ModSibStrict<T>& from ) const { Group3Impl<T>::Emit( InstType, from ); }
|
||||||
|
@ -73,7 +74,9 @@ public:
|
||||||
Group3ImplAll() {}
|
Group3ImplAll() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
// This class combines x86 and SSE/SSE2 instructions for iMUL and iDIV.
|
||||||
|
//
|
||||||
template< G3Type InstType, u8 OpcodeSSE >
|
template< G3Type InstType, u8 OpcodeSSE >
|
||||||
class G3Impl_PlusSSE : public Group3ImplAll<InstType>
|
class G3Impl_PlusSSE : public Group3ImplAll<InstType>
|
||||||
{
|
{
|
||||||
|
@ -94,11 +97,11 @@ class iMulImpl
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( 0xaf0f );
|
write16( 0xaf0f );
|
||||||
|
@ -106,15 +109,15 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const void* src )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( 0xaf0f );
|
write16( 0xaf0f );
|
||||||
iWriteDisp( to.Id, src );
|
xWriteDisp( to.Id, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& src )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const ModSibBase& src )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( 0xaf0f );
|
write16( 0xaf0f );
|
||||||
|
@ -122,7 +125,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from, ImmType imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from, ImmType imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( is_s8( imm ) ? 0x6b : 0x69 );
|
write16( is_s8( imm ) ? 0x6b : 0x69 );
|
||||||
|
@ -130,23 +133,23 @@ public:
|
||||||
if( is_s8( imm ) )
|
if( is_s8( imm ) )
|
||||||
write8( imm );
|
write8( imm );
|
||||||
else
|
else
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src, ImmType imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const void* src, ImmType imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( is_s8( imm ) ? 0x6b : 0x69 );
|
write16( is_s8( imm ) ? 0x6b : 0x69 );
|
||||||
iWriteDisp( to.Id, src );
|
xWriteDisp( to.Id, src );
|
||||||
if( is_s8( imm ) )
|
if( is_s8( imm ) )
|
||||||
write8( imm );
|
write8( imm );
|
||||||
else
|
else
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& src, ImmType imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const ModSibBase& src, ImmType imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
write16( is_s8( imm ) ? 0x6b : 0x69 );
|
write16( is_s8( imm ) ? 0x6b : 0x69 );
|
||||||
|
@ -154,11 +157,11 @@ public:
|
||||||
if( is_s8( imm ) )
|
if( is_s8( imm ) )
|
||||||
write8( imm );
|
write8( imm );
|
||||||
else
|
else
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
class iMul_PlusSSE : public G3Impl_PlusSSE<G3Type_iMUL,0x59>
|
class iMul_PlusSSE : public G3Impl_PlusSSE<G3Type_iMUL,0x59>
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
|
@ -166,24 +169,19 @@ protected:
|
||||||
typedef iMulImpl<u16> iMUL16;
|
typedef iMulImpl<u16> iMUL16;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
using G3Impl_PlusSSE<G3Type_iMUL,0x59>::operator();
|
||||||
|
|
||||||
template< typename T >
|
__forceinline void operator()( const xRegister32& to, const xRegister32& from ) const { iMUL32::Emit( to, from ); }
|
||||||
__forceinline void operator()( const iRegister<T>& from ) const { Group3Impl<T>::Emit( G3Type_iMUL, from ); }
|
__forceinline void operator()( const xRegister32& to, const void* src ) const { iMUL32::Emit( to, src ); }
|
||||||
|
__forceinline void operator()( const xRegister32& to, const xRegister32& from, s32 imm ) const{ iMUL32::Emit( to, from, imm ); }
|
||||||
|
__noinline void operator()( const xRegister32& to, const ModSibBase& src ) const { iMUL32::Emit( to, src ); }
|
||||||
|
__noinline void operator()( const xRegister32& to, const ModSibBase& from, s32 imm ) const { iMUL32::Emit( to, from, imm ); }
|
||||||
|
|
||||||
template< typename T >
|
__forceinline void operator()( const xRegister16& to, const xRegister16& from ) const { iMUL16::Emit( to, from ); }
|
||||||
__noinline void operator()( const ModSibStrict<T>& from ) const { Group3Impl<T>::Emit( G3Type_iMUL, from ); }
|
__forceinline void operator()( const xRegister16& to, const void* src ) const { iMUL16::Emit( to, src ); }
|
||||||
|
__forceinline void operator()( const xRegister16& to, const xRegister16& from, s16 imm ) const{ iMUL16::Emit( to, from, imm ); }
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister32& from ) const { iMUL32::Emit( to, from ); }
|
__noinline void operator()( const xRegister16& to, const ModSibBase& src ) const { iMUL16::Emit( to, src ); }
|
||||||
__forceinline void operator()( const iRegister32& to, const void* src ) const { iMUL32::Emit( to, src ); }
|
__noinline void operator()( const xRegister16& to, const ModSibBase& from, s16 imm ) const { iMUL16::Emit( to, from, imm ); }
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister32& from, s32 imm ) const{ iMUL32::Emit( to, from, imm ); }
|
|
||||||
__noinline void operator()( const iRegister32& to, const ModSibBase& src ) const { iMUL32::Emit( to, src ); }
|
|
||||||
__noinline void operator()( const iRegister32& to, const ModSibBase& from, s32 imm ) const { iMUL32::Emit( to, from, imm ); }
|
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister16& from ) const { iMUL16::Emit( to, from ); }
|
|
||||||
__forceinline void operator()( const iRegister16& to, const void* src ) const { iMUL16::Emit( to, src ); }
|
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister16& from, s16 imm ) const{ iMUL16::Emit( to, from, imm ); }
|
|
||||||
__noinline void operator()( const iRegister16& to, const ModSibBase& src ) const { iMUL16::Emit( to, src ); }
|
|
||||||
__noinline void operator()( const iRegister16& to, const ModSibBase& from, s16 imm ) const { iMUL16::Emit( to, from, imm ); }
|
|
||||||
|
|
||||||
iMul_PlusSSE() {}
|
iMul_PlusSSE() {}
|
||||||
};
|
};
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
// Implementations found here: Increment and Decrement Instructions!
|
// Implementations found here: Increment and Decrement Instructions!
|
||||||
// Note: This header is meant to be included from within the x86Emitter::Internal namespace.
|
// Note: This header is meant to be included from within the x86Emitter::Internal namespace.
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
//
|
||||||
template< typename ImmType >
|
template< typename ImmType >
|
||||||
class IncDecImpl
|
class IncDecImpl
|
||||||
{
|
{
|
||||||
|
@ -28,12 +30,12 @@ protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
IncDecImpl() {} // For the love of GCC.
|
IncDecImpl() {} // For the love of GCC.
|
||||||
|
|
||||||
static __emitinline void Emit( bool isDec, const iRegister<ImmType>& to )
|
static __emitinline void Emit( bool isDec, const xRegister<ImmType>& to )
|
||||||
{
|
{
|
||||||
// There is no valid 8-bit form of direct register inc/dec, so fall
|
// There is no valid 8-bit form of direct register inc/dec, so fall
|
||||||
// back on Mod/RM format instead:
|
// back on Mod/RM format instead:
|
||||||
|
@ -67,13 +69,13 @@ protected:
|
||||||
typedef IncDecImpl<u8> m_8;
|
typedef IncDecImpl<u8> m_8;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegister32& to ) const { m_32::Emit( isDec, to ); }
|
__forceinline void operator()( const xRegister32& to ) const { m_32::Emit( isDec, to ); }
|
||||||
__noinline void operator()( const ModSibStrict<u32>& sibdest ) const{ m_32::Emit( isDec, sibdest ); }
|
__noinline void operator()( const ModSibStrict<u32>& sibdest ) const{ m_32::Emit( isDec, sibdest ); }
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister16& to ) const { m_16::Emit( isDec, to ); }
|
__forceinline void operator()( const xRegister16& to ) const { m_16::Emit( isDec, to ); }
|
||||||
__noinline void operator()( const ModSibStrict<u16>& sibdest ) const{ m_16::Emit( isDec, sibdest ); }
|
__noinline void operator()( const ModSibStrict<u16>& sibdest ) const{ m_16::Emit( isDec, sibdest ); }
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister8& to ) const { m_8::Emit( isDec, to ); }
|
__forceinline void operator()( const xRegister8& to ) const { m_8::Emit( isDec, to ); }
|
||||||
__noinline void operator()( const ModSibStrict<u8>& sibdest ) const { m_8::Emit( isDec, sibdest ); }
|
__noinline void operator()( const ModSibStrict<u8>& sibdest ) const { m_8::Emit( isDec, sibdest ); }
|
||||||
|
|
||||||
IncDecImplAll() {} // don't ask.
|
IncDecImplAll() {} // don't ask.
|
||||||
|
|
|
@ -27,22 +27,22 @@ class JmpCallImpl
|
||||||
protected:
|
protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
JmpCallImpl() {} // For the love of GCC.
|
JmpCallImpl() {} // For the love of GCC.
|
||||||
|
|
||||||
static __emitinline void Emit( bool isJmp, const iRegister<ImmType>& absreg )
|
static __emitinline void Emit( bool isJmp, const xRegister<ImmType>& absreg )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0xff );
|
xWrite<u8>( 0xff );
|
||||||
ModRM_Direct( isJmp ? 4 : 2, absreg.Id );
|
ModRM_Direct( isJmp ? 4 : 2, absreg.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( bool isJmp, const ModSibStrict<ImmType>& src )
|
static __emitinline void Emit( bool isJmp, const ModSibStrict<ImmType>& src )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0xff );
|
xWrite<u8>( 0xff );
|
||||||
EmitSibMagic( isJmp ? 4 : 2, src );
|
EmitSibMagic( isJmp ? 4 : 2, src );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -58,10 +58,10 @@ protected:
|
||||||
public:
|
public:
|
||||||
JmpCallImplAll() {}
|
JmpCallImplAll() {}
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister32& absreg ) const { m_32::Emit( isJmp, absreg ); }
|
__forceinline void operator()( const xRegister32& absreg ) const { m_32::Emit( isJmp, absreg ); }
|
||||||
__forceinline void operator()( const ModSibStrict<u32>& src ) const { m_32::Emit( isJmp, src ); }
|
__forceinline void operator()( const ModSibStrict<u32>& src ) const { m_32::Emit( isJmp, src ); }
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister16& absreg ) const { m_16::Emit( isJmp, absreg ); }
|
__forceinline void operator()( const xRegister16& absreg ) const { m_16::Emit( isJmp, absreg ); }
|
||||||
__forceinline void operator()( const ModSibStrict<u16>& src ) const { m_16::Emit( isJmp, src ); }
|
__forceinline void operator()( const ModSibStrict<u16>& src ) const { m_16::Emit( isJmp, src ); }
|
||||||
|
|
||||||
// Special form for calling functions. This form automatically resolves the
|
// Special form for calling functions. This form automatically resolves the
|
||||||
|
@ -77,8 +77,8 @@ public:
|
||||||
// always 5 bytes (16 bit calls are bad mojo, so no bother to do special logic).
|
// always 5 bytes (16 bit calls are bad mojo, so no bother to do special logic).
|
||||||
|
|
||||||
sptr dest = (sptr)func - ((sptr)iGetPtr() + 5);
|
sptr dest = (sptr)func - ((sptr)iGetPtr() + 5);
|
||||||
iWrite<u8>( 0xe8 );
|
xWrite<u8>( 0xe8 );
|
||||||
iWrite<u32>( dest );
|
xWrite<u32>( dest );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,23 +32,23 @@ class MovImpl
|
||||||
protected:
|
protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MovImpl() {}
|
MovImpl() {}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
if( to == from ) return; // ignore redundant MOVs.
|
if( to == from ) return; // ignore redundant MOVs.
|
||||||
|
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
|
xWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
|
||||||
ModRM_Direct( from.Id, to.Id );
|
ModRM_Direct( from.Id, to.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const ModSibBase& dest, const iRegister<ImmType>& from )
|
static __emitinline void Emit( const ModSibBase& dest, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
|
|
||||||
|
@ -57,18 +57,18 @@ public:
|
||||||
|
|
||||||
if( from.IsAccumulator() && dest.Index.IsEmpty() && dest.Base.IsEmpty() )
|
if( from.IsAccumulator() && dest.Index.IsEmpty() && dest.Base.IsEmpty() )
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
|
xWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
|
||||||
iWrite<u32>( dest.Displacement );
|
xWrite<u32>( dest.Displacement );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
|
xWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
|
||||||
EmitSibMagic( from.Id, dest );
|
EmitSibMagic( from.Id, dest );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& src )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const ModSibBase& src )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
|
|
||||||
|
@ -77,18 +77,18 @@ public:
|
||||||
|
|
||||||
if( to.IsAccumulator() && src.Index.IsEmpty() && src.Base.IsEmpty() )
|
if( to.IsAccumulator() && src.Index.IsEmpty() && src.Base.IsEmpty() )
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
|
xWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
|
||||||
iWrite<u32>( src.Displacement );
|
xWrite<u32>( src.Displacement );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
|
xWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
|
||||||
EmitSibMagic( to.Id, src );
|
EmitSibMagic( to.Id, src );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( void* dest, const iRegister<ImmType>& from )
|
static __emitinline void Emit( void* dest, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
|
|
||||||
|
@ -96,18 +96,18 @@ public:
|
||||||
|
|
||||||
if( from.IsAccumulator() )
|
if( from.IsAccumulator() )
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
|
xWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
|
||||||
iWrite<s32>( (s32)dest );
|
xWrite<s32>( (s32)dest );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
|
xWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
|
||||||
iWriteDisp( from.Id, dest );
|
xWriteDisp( from.Id, dest );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const void* src )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
|
|
||||||
|
@ -115,33 +115,33 @@ public:
|
||||||
|
|
||||||
if( to.IsAccumulator() )
|
if( to.IsAccumulator() )
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
|
xWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
|
||||||
iWrite<s32>( (s32)src );
|
xWrite<s32>( (s32)src );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
|
xWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
|
||||||
iWriteDisp( to.Id, src );
|
xWriteDisp( to.Id, src );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, ImmType imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, ImmType imm )
|
||||||
{
|
{
|
||||||
// Note: MOV does not have (reg16/32,imm8) forms.
|
// Note: MOV does not have (reg16/32,imm8) forms.
|
||||||
|
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( (Is8BitOperand() ? 0xb0 : 0xb8) | to.Id );
|
xWrite<u8>( (Is8BitOperand() ? 0xb0 : 0xb8) | to.Id );
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( ModSibStrict<ImmType> dest, ImmType imm )
|
static __emitinline void Emit( ModSibStrict<ImmType> dest, ImmType imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xc6 : 0xc7 );
|
xWrite<u8>( Is8BitOperand() ? 0xc6 : 0xc7 );
|
||||||
EmitSibMagic( 0, dest );
|
EmitSibMagic( 0, dest );
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -150,15 +150,15 @@ class MovImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& to, const iRegister<T>& from ) const { MovImpl<T>::Emit( to, from ); }
|
__forceinline void operator()( const xRegister<T>& to, const xRegister<T>& from ) const { MovImpl<T>::Emit( to, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& to, const void* src ) const { MovImpl<T>::Emit( to, src ); }
|
__forceinline void operator()( const xRegister<T>& to, const void* src ) const { MovImpl<T>::Emit( to, src ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( void* dest, const iRegister<T>& from ) const { MovImpl<T>::Emit( dest, from ); }
|
__forceinline void operator()( void* dest, const xRegister<T>& from ) const { MovImpl<T>::Emit( dest, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const ModSibBase& sibdest, const iRegister<T>& from ) const { MovImpl<T>::Emit( sibdest, from ); }
|
__noinline void operator()( const ModSibBase& sibdest, const xRegister<T>& from ) const { MovImpl<T>::Emit( sibdest, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const iRegister<T>& to, const ModSibBase& sibsrc ) const { MovImpl<T>::Emit( to, sibsrc ); }
|
__noinline void operator()( const xRegister<T>& to, const ModSibBase& sibsrc ) const { MovImpl<T>::Emit( to, sibsrc ); }
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const ModSibStrict<T>& sibdest, int imm ) const { MovImpl<T>::Emit( sibdest, imm ); }
|
__noinline void operator()( const ModSibStrict<T>& sibdest, int imm ) const { MovImpl<T>::Emit( sibdest, imm ); }
|
||||||
|
@ -167,10 +167,10 @@ public:
|
||||||
// the flags (namely replacing mov reg,0 with xor).
|
// the flags (namely replacing mov reg,0 with xor).
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void operator()( const iRegister<T>& to, int imm, bool preserve_flags=false ) const
|
__emitinline void operator()( const xRegister<T>& to, int imm, bool preserve_flags=false ) const
|
||||||
{
|
{
|
||||||
if( !preserve_flags && (imm == 0) )
|
if( !preserve_flags && (imm == 0) )
|
||||||
iXOR( to, to );
|
xXOR( to, to );
|
||||||
else
|
else
|
||||||
MovImpl<T>::Emit( to, imm );
|
MovImpl<T>::Emit( to, imm );
|
||||||
}
|
}
|
||||||
|
@ -193,7 +193,7 @@ protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
|
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
static __forceinline void emit_base( JccComparisonType cc )
|
static __forceinline void emit_base( JccComparisonType cc )
|
||||||
{
|
{
|
||||||
|
@ -206,27 +206,27 @@ protected:
|
||||||
public:
|
public:
|
||||||
CMovSetImpl() {}
|
CMovSetImpl() {}
|
||||||
|
|
||||||
static __emitinline void Emit( JccComparisonType cc, const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( JccComparisonType cc, const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
if( to == from ) return;
|
if( to == from ) return;
|
||||||
emit_base( cc );
|
emit_base( cc );
|
||||||
ModRM_Direct( to.Id, from.Id );
|
ModRM_Direct( to.Id, from.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( JccComparisonType cc, const iRegister<ImmType>& to, const void* src )
|
static __emitinline void Emit( JccComparisonType cc, const xRegister<ImmType>& to, const void* src )
|
||||||
{
|
{
|
||||||
emit_base( cc );
|
emit_base( cc );
|
||||||
iWriteDisp( to.Id, src );
|
xWriteDisp( to.Id, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( JccComparisonType cc, const iRegister<ImmType>& to, const ModSibBase& sibsrc )
|
static __emitinline void Emit( JccComparisonType cc, const xRegister<ImmType>& to, const ModSibBase& sibsrc )
|
||||||
{
|
{
|
||||||
emit_base( cc );
|
emit_base( cc );
|
||||||
EmitSibMagic( to.Id, sibsrc );
|
EmitSibMagic( to.Id, sibsrc );
|
||||||
}
|
}
|
||||||
|
|
||||||
// This form is provided for SETcc only (not available in CMOV)
|
// This form is provided for SETcc only (not available in CMOV)
|
||||||
static __emitinline void EmitSet( JccComparisonType cc, const iRegister<ImmType>& to )
|
static __emitinline void EmitSet( JccComparisonType cc, const xRegister<ImmType>& to )
|
||||||
{
|
{
|
||||||
emit_base( cc );
|
emit_base( cc );
|
||||||
ModRM_Direct( 0, to.Id );
|
ModRM_Direct( 0, to.Id );
|
||||||
|
@ -236,7 +236,7 @@ public:
|
||||||
static __emitinline void EmitSet( JccComparisonType cc, const void* src )
|
static __emitinline void EmitSet( JccComparisonType cc, const void* src )
|
||||||
{
|
{
|
||||||
emit_base( cc );
|
emit_base( cc );
|
||||||
iWriteDisp( 0, src );
|
xWriteDisp( 0, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
// This form is provided for SETcc only (not available in CMOV)
|
// This form is provided for SETcc only (not available in CMOV)
|
||||||
|
@ -258,13 +258,13 @@ protected:
|
||||||
typedef CMovSetImpl<u16, 0x40> m_16; // 0x40 is the cmov base instruction id
|
typedef CMovSetImpl<u16, 0x40> m_16; // 0x40 is the cmov base instruction id
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( JccComparisonType ccType, const iRegister32& to, const iRegister32& from ) const { m_32::Emit( ccType, to, from ); }
|
__forceinline void operator()( JccComparisonType ccType, const xRegister32& to, const xRegister32& from ) const { m_32::Emit( ccType, to, from ); }
|
||||||
__forceinline void operator()( JccComparisonType ccType, const iRegister32& to, const void* src ) const { m_32::Emit( ccType, to, src ); }
|
__forceinline void operator()( JccComparisonType ccType, const xRegister32& to, const void* src ) const { m_32::Emit( ccType, to, src ); }
|
||||||
__noinline void operator()( JccComparisonType ccType, const iRegister32& to, const ModSibBase& sibsrc ) const { m_32::Emit( ccType, to, sibsrc ); }
|
__noinline void operator()( JccComparisonType ccType, const xRegister32& to, const ModSibBase& sibsrc ) const { m_32::Emit( ccType, to, sibsrc ); }
|
||||||
|
|
||||||
__forceinline void operator()( JccComparisonType ccType, const iRegister16& to, const iRegister16& from ) const { m_16::Emit( ccType, to, from ); }
|
__forceinline void operator()( JccComparisonType ccType, const xRegister16& to, const xRegister16& from ) const { m_16::Emit( ccType, to, from ); }
|
||||||
__forceinline void operator()( JccComparisonType ccType, const iRegister16& to, const void* src ) const { m_16::Emit( ccType, to, src ); }
|
__forceinline void operator()( JccComparisonType ccType, const xRegister16& to, const void* src ) const { m_16::Emit( ccType, to, src ); }
|
||||||
__noinline void operator()( JccComparisonType ccType, const iRegister16& to, const ModSibBase& sibsrc ) const { m_16::Emit( ccType, to, sibsrc ); }
|
__noinline void operator()( JccComparisonType ccType, const xRegister16& to, const ModSibBase& sibsrc ) const { m_16::Emit( ccType, to, sibsrc ); }
|
||||||
|
|
||||||
CMovImplGeneric() {} // don't ask.
|
CMovImplGeneric() {} // don't ask.
|
||||||
};
|
};
|
||||||
|
@ -278,13 +278,13 @@ protected:
|
||||||
typedef CMovSetImpl<u16, 0x40> m_16;
|
typedef CMovSetImpl<u16, 0x40> m_16;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister32& from ) const { m_32::Emit( ccType, to, from ); }
|
__forceinline void operator()( const xRegister32& to, const xRegister32& from ) const { m_32::Emit( ccType, to, from ); }
|
||||||
__forceinline void operator()( const iRegister32& to, const void* src ) const { m_32::Emit( ccType, to, src ); }
|
__forceinline void operator()( const xRegister32& to, const void* src ) const { m_32::Emit( ccType, to, src ); }
|
||||||
__noinline void operator()( const iRegister32& to, const ModSibBase& sibsrc ) const { m_32::Emit( ccType, to, sibsrc ); }
|
__noinline void operator()( const xRegister32& to, const ModSibBase& sibsrc ) const { m_32::Emit( ccType, to, sibsrc ); }
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister16& from ) const { m_16::Emit( ccType, to, from ); }
|
__forceinline void operator()( const xRegister16& to, const xRegister16& from ) const { m_16::Emit( ccType, to, from ); }
|
||||||
__forceinline void operator()( const iRegister16& to, const void* src ) const { m_16::Emit( ccType, to, src ); }
|
__forceinline void operator()( const xRegister16& to, const void* src ) const { m_16::Emit( ccType, to, src ); }
|
||||||
__noinline void operator()( const iRegister16& to, const ModSibBase& sibsrc ) const { m_16::Emit( ccType, to, sibsrc ); }
|
__noinline void operator()( const xRegister16& to, const ModSibBase& sibsrc ) const { m_16::Emit( ccType, to, sibsrc ); }
|
||||||
|
|
||||||
CMovImplAll() {} // don't ask.
|
CMovImplAll() {} // don't ask.
|
||||||
};
|
};
|
||||||
|
@ -296,7 +296,7 @@ protected:
|
||||||
typedef CMovSetImpl<u8, 0x90> Impl; // 0x90 is the SETcc base instruction id
|
typedef CMovSetImpl<u8, 0x90> Impl; // 0x90 is the SETcc base instruction id
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( JccComparisonType cc, const iRegister8& to ) const { Impl::EmitSet( cc, to ); }
|
__forceinline void operator()( JccComparisonType cc, const xRegister8& to ) const { Impl::EmitSet( cc, to ); }
|
||||||
__forceinline void operator()( JccComparisonType cc, void* dest ) const { Impl::EmitSet( cc, dest ); }
|
__forceinline void operator()( JccComparisonType cc, void* dest ) const { Impl::EmitSet( cc, dest ); }
|
||||||
__noinline void operator()( JccComparisonType cc, const ModSibStrict<u8>& dest ) const { Impl::EmitSet( cc, dest ); }
|
__noinline void operator()( JccComparisonType cc, const ModSibStrict<u8>& dest ) const { Impl::EmitSet( cc, dest ); }
|
||||||
|
|
||||||
|
@ -311,7 +311,7 @@ protected:
|
||||||
typedef CMovSetImpl<u8, 0x90> Impl; // 0x90 is the SETcc base instruction id
|
typedef CMovSetImpl<u8, 0x90> Impl; // 0x90 is the SETcc base instruction id
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegister8& to ) const { Impl::EmitSet( ccType, to ); }
|
__forceinline void operator()( const xRegister8& to ) const { Impl::EmitSet( ccType, to ); }
|
||||||
__forceinline void operator()( void* dest ) const { Impl::EmitSet( ccType, dest ); }
|
__forceinline void operator()( void* dest ) const { Impl::EmitSet( ccType, dest ); }
|
||||||
__noinline void operator()( const ModSibStrict<u8>& dest ) const { Impl::EmitSet( ccType, dest ); }
|
__noinline void operator()( const ModSibStrict<u8>& dest ) const { Impl::EmitSet( ccType, dest ); }
|
||||||
|
|
||||||
|
@ -330,24 +330,24 @@ protected:
|
||||||
static const uint SrcOperandSize = sizeof( SrcImmType );
|
static const uint SrcOperandSize = sizeof( SrcImmType );
|
||||||
|
|
||||||
static bool Is8BitOperand() { return SrcOperandSize == 1; }
|
static bool Is8BitOperand() { return SrcOperandSize == 1; }
|
||||||
static void prefix16() { if( DestOperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( DestOperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
static __forceinline void emit_base( bool SignExtend )
|
static __forceinline void emit_base( bool SignExtend )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( 0xb6 | (Is8BitOperand() ? 0 : 1) | (SignExtend ? 8 : 0 ) );
|
xWrite<u8>( 0xb6 | (Is8BitOperand() ? 0 : 1) | (SignExtend ? 8 : 0 ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MovExtendImpl() {} // For the love of GCC.
|
MovExtendImpl() {} // For the love of GCC.
|
||||||
|
|
||||||
static __emitinline void Emit( const iRegister<DestImmType>& to, const iRegister<SrcImmType>& from, bool SignExtend )
|
static __emitinline void Emit( const xRegister<DestImmType>& to, const xRegister<SrcImmType>& from, bool SignExtend )
|
||||||
{
|
{
|
||||||
emit_base( SignExtend );
|
emit_base( SignExtend );
|
||||||
ModRM_Direct( to.Id, from.Id );
|
ModRM_Direct( to.Id, from.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __emitinline void Emit( const iRegister<DestImmType>& to, const ModSibStrict<SrcImmType>& sibsrc, bool SignExtend )
|
static __emitinline void Emit( const xRegister<DestImmType>& to, const ModSibStrict<SrcImmType>& sibsrc, bool SignExtend )
|
||||||
{
|
{
|
||||||
emit_base( SignExtend );
|
emit_base( SignExtend );
|
||||||
EmitSibMagic( to.Id, sibsrc );
|
EmitSibMagic( to.Id, sibsrc );
|
||||||
|
@ -364,14 +364,14 @@ protected:
|
||||||
typedef MovExtendImpl<u16, u8> m_8to16;
|
typedef MovExtendImpl<u16, u8> m_8to16;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister16& from ) const { m_16to32::Emit( to, from, SignExtend ); }
|
__forceinline void operator()( const xRegister32& to, const xRegister16& from ) const { m_16to32::Emit( to, from, SignExtend ); }
|
||||||
__noinline void operator()( const iRegister32& to, const ModSibStrict<u16>& sibsrc ) const { m_16to32::Emit( to, sibsrc, SignExtend ); }
|
__noinline void operator()( const xRegister32& to, const ModSibStrict<u16>& sibsrc ) const { m_16to32::Emit( to, sibsrc, SignExtend ); }
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister32& to, const iRegister8& from ) const { m_8to32::Emit( to, from, SignExtend ); }
|
__forceinline void operator()( const xRegister32& to, const xRegister8& from ) const { m_8to32::Emit( to, from, SignExtend ); }
|
||||||
__noinline void operator()( const iRegister32& to, const ModSibStrict<u8>& sibsrc ) const { m_8to32::Emit( to, sibsrc, SignExtend ); }
|
__noinline void operator()( const xRegister32& to, const ModSibStrict<u8>& sibsrc ) const { m_8to32::Emit( to, sibsrc, SignExtend ); }
|
||||||
|
|
||||||
__forceinline void operator()( const iRegister16& to, const iRegister8& from ) const { m_8to16::Emit( to, from, SignExtend ); }
|
__forceinline void operator()( const xRegister16& to, const xRegister8& from ) const { m_8to16::Emit( to, from, SignExtend ); }
|
||||||
__noinline void operator()( const iRegister16& to, const ModSibStrict<u8>& sibsrc ) const { m_8to16::Emit( to, sibsrc, SignExtend ); }
|
__noinline void operator()( const xRegister16& to, const ModSibStrict<u8>& sibsrc ) const { m_8to16::Emit( to, sibsrc, SignExtend ); }
|
||||||
|
|
||||||
MovExtendImplAll() {} // don't ask.
|
MovExtendImplAll() {} // don't ask.
|
||||||
};
|
};
|
||||||
|
|
|
@ -27,41 +27,41 @@ class TestImpl
|
||||||
protected:
|
protected:
|
||||||
static const uint OperandSize = sizeof(ImmType);
|
static const uint OperandSize = sizeof(ImmType);
|
||||||
static bool Is8BitOperand() { return OperandSize == 1; }
|
static bool Is8BitOperand() { return OperandSize == 1; }
|
||||||
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
|
static void prefix16() { if( OperandSize == 2 ) xWrite<u8>( 0x66 ); }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TestImpl() {}
|
TestImpl() {}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
|
static __emitinline void Emit( const xRegister<ImmType>& to, const xRegister<ImmType>& from )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0x84 : 0x85 );
|
xWrite<u8>( Is8BitOperand() ? 0x84 : 0x85 );
|
||||||
ModRM_Direct( from.Id, to.Id );
|
ModRM_Direct( from.Id, to.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( const iRegister<ImmType>& to, ImmType imm )
|
static __emitinline void Emit( const xRegister<ImmType>& to, ImmType imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
|
|
||||||
if( to.IsAccumulator() )
|
if( to.IsAccumulator() )
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xa8 : 0xa9 );
|
xWrite<u8>( Is8BitOperand() ? 0xa8 : 0xa9 );
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
|
xWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
|
||||||
ModRM_Direct( 0, to.Id );
|
ModRM_Direct( 0, to.Id );
|
||||||
}
|
}
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
static __emitinline void Emit( ModSibStrict<ImmType> dest, ImmType imm )
|
static __emitinline void Emit( ModSibStrict<ImmType> dest, ImmType imm )
|
||||||
{
|
{
|
||||||
prefix16();
|
prefix16();
|
||||||
iWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
|
xWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
|
||||||
EmitSibMagic( 0, dest );
|
EmitSibMagic( 0, dest );
|
||||||
iWrite<ImmType>( imm );
|
xWrite<ImmType>( imm );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -71,12 +71,12 @@ class TestImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegister<T>& to, const iRegister<T>& from ) const { TestImpl<T>::Emit( to, from ); }
|
__forceinline void operator()( const xRegister<T>& to, const xRegister<T>& from ) const { TestImpl<T>::Emit( to, from ); }
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const ModSibStrict<T>& sibdest, T imm ) const { TestImpl<T>::Emit( sibdest, imm ); }
|
__noinline void operator()( const ModSibStrict<T>& sibdest, T imm ) const { TestImpl<T>::Emit( sibdest, imm ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
void operator()( const iRegister<T>& to, T imm ) const { TestImpl<T>::Emit( to, imm ); }
|
void operator()( const xRegister<T>& to, T imm ) const { TestImpl<T>::Emit( to, imm ); }
|
||||||
|
|
||||||
TestImplAll() {} // Why does GCC need these?
|
TestImplAll() {} // Why does GCC need these?
|
||||||
};
|
};
|
||||||
|
|
|
@ -26,11 +26,11 @@ __emitinline void SimdPrefix( u8 opcode, u8 prefix=0 )
|
||||||
{
|
{
|
||||||
if( sizeof( T ) == 16 && prefix != 0 )
|
if( sizeof( T ) == 16 && prefix != 0 )
|
||||||
{
|
{
|
||||||
iWrite<u16>( 0x0f00 | prefix );
|
xWrite<u16>( 0x0f00 | prefix );
|
||||||
iWrite<u8>( opcode );
|
xWrite<u8>( opcode );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
iWrite<u16>( (opcode<<8) | 0x0f );
|
xWrite<u16>( (opcode<<8) | 0x0f );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
@ -40,24 +40,24 @@ __emitinline void SimdPrefix( u8 opcode, u8 prefix=0 )
|
||||||
// instructions violate this "guideline.")
|
// instructions violate this "guideline.")
|
||||||
//
|
//
|
||||||
template< typename T, typename T2 >
|
template< typename T, typename T2 >
|
||||||
__emitinline void writeXMMop( u8 prefix, u8 opcode, const iRegister<T>& to, const iRegister<T2>& from )
|
__emitinline void writeXMMop( u8 prefix, u8 opcode, const xRegister<T>& to, const xRegister<T2>& from )
|
||||||
{
|
{
|
||||||
SimdPrefix<T>( opcode, prefix );
|
SimdPrefix<T>( opcode, prefix );
|
||||||
ModRM_Direct( to.Id, from.Id );
|
ModRM_Direct( to.Id, from.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
void writeXMMop( u8 prefix, u8 opcode, const iRegister<T>& reg, const ModSibBase& sib )
|
void writeXMMop( u8 prefix, u8 opcode, const xRegister<T>& reg, const ModSibBase& sib )
|
||||||
{
|
{
|
||||||
SimdPrefix<T>( opcode, prefix );
|
SimdPrefix<T>( opcode, prefix );
|
||||||
EmitSibMagic( reg.Id, sib );
|
EmitSibMagic( reg.Id, sib );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void writeXMMop( u8 prefix, u8 opcode, const iRegister<T>& reg, const void* data )
|
__emitinline void writeXMMop( u8 prefix, u8 opcode, const xRegister<T>& reg, const void* data )
|
||||||
{
|
{
|
||||||
SimdPrefix<T>( opcode, prefix );
|
SimdPrefix<T>( opcode, prefix );
|
||||||
iWriteDisp( reg.Id, data );
|
xWriteDisp( reg.Id, data );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
@ -66,51 +66,74 @@ __emitinline void writeXMMop( u8 prefix, u8 opcode, const iRegister<T>& reg, con
|
||||||
// some special forms of sse/xmm mov instructions also use them due to prefixing inconsistencies.
|
// some special forms of sse/xmm mov instructions also use them due to prefixing inconsistencies.
|
||||||
//
|
//
|
||||||
template< typename T, typename T2 >
|
template< typename T, typename T2 >
|
||||||
__emitinline void writeXMMop( u8 opcode, const iRegister<T>& to, const iRegister<T2>& from )
|
__emitinline void writeXMMop( u8 opcode, const xRegister<T>& to, const xRegister<T2>& from )
|
||||||
{
|
{
|
||||||
SimdPrefix<T>( opcode );
|
SimdPrefix<T>( opcode );
|
||||||
ModRM_Direct( to.Id, from.Id );
|
ModRM_Direct( to.Id, from.Id );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
void writeXMMop( u8 opcode, const iRegister<T>& reg, const ModSibBase& sib )
|
void writeXMMop( u8 opcode, const xRegister<T>& reg, const ModSibBase& sib )
|
||||||
{
|
{
|
||||||
SimdPrefix<T>( opcode );
|
SimdPrefix<T>( opcode );
|
||||||
EmitSibMagic( reg.Id, sib );
|
EmitSibMagic( reg.Id, sib );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void writeXMMop( u8 opcode, const iRegister<T>& reg, const void* data )
|
__emitinline void writeXMMop( u8 opcode, const xRegister<T>& reg, const void* data )
|
||||||
{
|
{
|
||||||
SimdPrefix<T>( opcode );
|
SimdPrefix<T>( opcode );
|
||||||
iWriteDisp( reg.Id, data );
|
xWriteDisp( reg.Id, data );
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Moves to/from high/low portions of an xmm register.
|
// Moves to/from high/low portions of an xmm register.
|
||||||
// These instructions cannot be used in reg/reg form.
|
// These instructions cannot be used in reg/reg form.
|
||||||
//
|
//
|
||||||
template< u8 Prefix, u8 Opcode >
|
template< u8 Opcode >
|
||||||
class MovhlImplAll
|
class MovhlImplAll
|
||||||
{
|
{
|
||||||
|
protected:
|
||||||
|
template< u8 Prefix >
|
||||||
|
struct Woot
|
||||||
|
{
|
||||||
|
__forceinline void operator()( const xRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
|
__forceinline void operator()( const void* to, const xRegisterSSE& from ) const { writeXMMop( Prefix, Opcode+1, from, to ); }
|
||||||
|
__noinline void operator()( const xRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
|
__noinline void operator()( const ModSibBase& to, const xRegisterSSE& from ) const { writeXMMop( Prefix, Opcode+1, from, to ); }
|
||||||
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
Woot<0x00> PS;
|
||||||
__forceinline void operator()( const void* to, const iRegisterSSE& from ) const { writeXMMop( Prefix, Opcode+1, from, to ); }
|
Woot<0x66> PD;
|
||||||
__noinline void operator()( const iRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
|
||||||
__noinline void operator()( const ModSibBase& to, const iRegisterSSE& from ) const { writeXMMop( Prefix, Opcode+1, from, to ); }
|
|
||||||
|
|
||||||
MovhlImplAll() {} //GCC.
|
MovhlImplAll() {} //GCC.
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
// RegtoReg forms of MOVHL/MOVLH -- these are the same opcodes as MOVH/MOVL but
|
||||||
|
// do something kinda different! Fun!
|
||||||
|
//
|
||||||
|
template< u8 Opcode >
|
||||||
|
class MovhlImpl_RtoR
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
__forceinline void PS( const xRegisterSSE& to, const xRegisterSSE& from ) const { writeXMMop( Opcode, to, from ); }
|
||||||
|
__forceinline void PD( const xRegisterSSE& to, const xRegisterSSE& from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
||||||
|
|
||||||
|
MovhlImpl_RtoR() {} //GCC.
|
||||||
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
template< u8 Prefix, u8 Opcode, u8 OpcodeAlt >
|
template< u8 Prefix, u8 Opcode, u8 OpcodeAlt >
|
||||||
class MovapsImplAll
|
class MovapsImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const iRegisterSSE& from ) const { if( to != from ) writeXMMop( Prefix, Opcode, to, from ); }
|
__forceinline void operator()( const xRegisterSSE& to, const xRegisterSSE& from ) const { if( to != from ) writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
__forceinline void operator()( const xRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
__forceinline void operator()( const void* to, const iRegisterSSE& from ) const { writeXMMop( Prefix, OpcodeAlt, from, to ); }
|
__forceinline void operator()( const void* to, const xRegisterSSE& from ) const { writeXMMop( Prefix, OpcodeAlt, from, to ); }
|
||||||
__noinline void operator()( const iRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
__noinline void operator()( const xRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
__noinline void operator()( const ModSibBase& to, const iRegisterSSE& from ) const { writeXMMop( Prefix, OpcodeAlt, from, to ); }
|
__noinline void operator()( const ModSibBase& to, const xRegisterSSE& from ) const { writeXMMop( Prefix, OpcodeAlt, from, to ); }
|
||||||
|
|
||||||
MovapsImplAll() {} //GCC.
|
MovapsImplAll() {} //GCC.
|
||||||
};
|
};
|
||||||
|
@ -124,11 +147,11 @@ class PLogicImplAll
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegisterSIMD<T>& to, const iRegisterSIMD<T>& from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
__forceinline void operator()( const xRegisterSIMD<T>& to, const xRegisterSIMD<T>& from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__forceinline void operator()( const iRegisterSIMD<T>& to, const void* from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
__forceinline void operator()( const xRegisterSIMD<T>& to, const void* from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__noinline void operator()( const iRegisterSIMD<T>& to, const ModSibBase& from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
__noinline void operator()( const xRegisterSIMD<T>& to, const ModSibBase& from ) const { writeXMMop( 0x66, Opcode, to, from ); }
|
||||||
|
|
||||||
PLogicImplAll() {} //GCWho?
|
PLogicImplAll() {} //GCWho?
|
||||||
};
|
};
|
||||||
|
@ -140,47 +163,42 @@ template< u8 Prefix, u8 Opcode >
|
||||||
class SSELogicImpl
|
class SSELogicImpl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const iRegisterSSE& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
__forceinline void operator()( const xRegisterSSE& to, const xRegisterSSE& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
__forceinline void operator()( const xRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
__noinline void operator()( const iRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
__noinline void operator()( const xRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
|
||||||
|
|
||||||
SSELogicImpl() {} //GCWho?
|
SSELogicImpl() {} //GCWho?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
//
|
||||||
|
template< u8 OpcodeSSE >
|
||||||
|
class SSEAndNotImpl
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
const SSELogicImpl<0x00,OpcodeSSE> PS;
|
||||||
|
const SSELogicImpl<0x66,OpcodeSSE> PD;
|
||||||
|
|
||||||
|
SSEAndNotImpl() {}
|
||||||
|
};
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// For implementing SSE-only comparison operations, like CMPEQPS.
|
template< SSE2_ComparisonType CType >
|
||||||
//
|
|
||||||
enum SSE2_ComparisonType
|
|
||||||
{
|
|
||||||
SSE2_Equal = 0,
|
|
||||||
SSE2_Less,
|
|
||||||
SSE2_LessOrEqual,
|
|
||||||
SSE2_Unordered,
|
|
||||||
SSE2_NotEqual,
|
|
||||||
SSE2_NotLess,
|
|
||||||
SSE2_NotLessOrEqual,
|
|
||||||
SSE2_Ordered
|
|
||||||
};
|
|
||||||
|
|
||||||
template< u8 Prefix >
|
|
||||||
class SSECompareImplGeneric
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const iRegisterSSE& from, u8 cmptype ) const { writeXMMop( Prefix, 0xc2, to, from ); iWrite( cmptype ); }
|
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const void* from, u8 cmptype ) const { writeXMMop( Prefix, 0xc2, to, from ); iWrite( cmptype ); }
|
|
||||||
__noinline void operator()( const iRegisterSSE& to, const ModSibBase& from, u8 cmptype ) const { writeXMMop( Prefix, 0xc2, to, from ); iWrite( cmptype ); }
|
|
||||||
|
|
||||||
SSECompareImplGeneric() {} //GCWhat?
|
|
||||||
};
|
|
||||||
|
|
||||||
template< u8 Prefix, u8 Opcode, SSE2_ComparisonType CType >
|
|
||||||
class SSECompareImpl
|
class SSECompareImpl
|
||||||
{
|
{
|
||||||
|
protected:
|
||||||
|
template< u8 Prefix > struct Woot
|
||||||
|
{
|
||||||
|
__forceinline void operator()( const xRegisterSSE& to, const xRegisterSSE& from ) const { writeXMMop( Prefix, 0xc2, to, from ); xWrite<u8>( CType ); }
|
||||||
|
__forceinline void operator()( const xRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, 0xc2, to, from ); xWrite<u8>( CType ); }
|
||||||
|
__noinline void operator()( const xRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, 0xc2, to, from ); xWrite<u8>( CType ); }
|
||||||
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const iRegisterSSE& from ) const { writeXMMop( Prefix, 0xc2, to, from ); iWrite( CType ); }
|
Woot<0x00> PS;
|
||||||
__forceinline void operator()( const iRegisterSSE& to, const void* from ) const { writeXMMop( Prefix, 0xc2, to, from ); iWrite( CType ); }
|
Woot<0x66> PD;
|
||||||
__noinline void operator()( const iRegisterSSE& to, const ModSibBase& from ) const { writeXMMop( Prefix, 0xc2, to, from ); iWrite( CType ); }
|
Woot<0xf3> SS;
|
||||||
|
Woot<0xf2> SD;
|
||||||
|
|
||||||
SSECompareImpl() {} //GCWhat?
|
SSECompareImpl() {} //GCWhat?
|
||||||
};
|
};
|
||||||
|
|
|
@ -66,54 +66,54 @@ __threadlocal XMMSSEType g_xmmtypes[iREGCNT_XMM] = { XMMT_INT };
|
||||||
|
|
||||||
namespace x86Emitter {
|
namespace x86Emitter {
|
||||||
|
|
||||||
const iAddressIndexerBase ptr;
|
const xAddressIndexerBase ptr;
|
||||||
const iAddressIndexer<u128> ptr128;
|
const xAddressIndexer<u128> ptr128;
|
||||||
const iAddressIndexer<u64> ptr64;
|
const xAddressIndexer<u64> ptr64;
|
||||||
const iAddressIndexer<u32> ptr32;
|
const xAddressIndexer<u32> ptr32;
|
||||||
const iAddressIndexer<u16> ptr16;
|
const xAddressIndexer<u16> ptr16;
|
||||||
const iAddressIndexer<u8> ptr8;
|
const xAddressIndexer<u8> ptr8;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
template< typename OperandType > const iRegister<OperandType> iRegister<OperandType>::Empty;
|
template< typename OperandType > const xRegister<OperandType> xRegister<OperandType>::Empty;
|
||||||
const iAddressReg iAddressReg::Empty;
|
const xAddressReg xAddressReg::Empty;
|
||||||
|
|
||||||
const iRegisterSSE
|
const xRegisterSSE
|
||||||
xmm0( 0 ), xmm1( 1 ),
|
xmm0( 0 ), xmm1( 1 ),
|
||||||
xmm2( 2 ), xmm3( 3 ),
|
xmm2( 2 ), xmm3( 3 ),
|
||||||
xmm4( 4 ), xmm5( 5 ),
|
xmm4( 4 ), xmm5( 5 ),
|
||||||
xmm6( 6 ), xmm7( 7 );
|
xmm6( 6 ), xmm7( 7 );
|
||||||
|
|
||||||
const iRegisterMMX
|
const xRegisterMMX
|
||||||
mm0( 0 ), mm1( 1 ),
|
mm0( 0 ), mm1( 1 ),
|
||||||
mm2( 2 ), mm3( 3 ),
|
mm2( 2 ), mm3( 3 ),
|
||||||
mm4( 4 ), mm5( 5 ),
|
mm4( 4 ), mm5( 5 ),
|
||||||
mm6( 6 ), mm7( 7 );
|
mm6( 6 ), mm7( 7 );
|
||||||
|
|
||||||
const iRegister32
|
const xRegister32
|
||||||
eax( 0 ), ebx( 3 ),
|
eax( 0 ), ebx( 3 ),
|
||||||
ecx( 1 ), edx( 2 ),
|
ecx( 1 ), edx( 2 ),
|
||||||
esi( 6 ), edi( 7 ),
|
esi( 6 ), edi( 7 ),
|
||||||
ebp( 5 ), esp( 4 );
|
ebp( 5 ), esp( 4 );
|
||||||
|
|
||||||
const iRegister16
|
const xRegister16
|
||||||
ax( 0 ), bx( 3 ),
|
ax( 0 ), bx( 3 ),
|
||||||
cx( 1 ), dx( 2 ),
|
cx( 1 ), dx( 2 ),
|
||||||
si( 6 ), di( 7 ),
|
si( 6 ), di( 7 ),
|
||||||
bp( 5 ), sp( 4 );
|
bp( 5 ), sp( 4 );
|
||||||
|
|
||||||
const iRegister8
|
const xRegister8
|
||||||
al( 0 ),
|
al( 0 ),
|
||||||
dl( 2 ), bl( 3 ),
|
dl( 2 ), bl( 3 ),
|
||||||
ah( 4 ), ch( 5 ),
|
ah( 4 ), ch( 5 ),
|
||||||
dh( 6 ), bh( 7 );
|
dh( 6 ), bh( 7 );
|
||||||
|
|
||||||
const iRegisterCL cl;
|
const xRegisterCL cl;
|
||||||
|
|
||||||
namespace Internal
|
namespace Internal
|
||||||
{
|
{
|
||||||
// Performance note: VC++ wants to use byte/word register form for the following
|
// Performance note: VC++ wants to use byte/word register form for the following
|
||||||
// ModRM/SibSB constructors when we use iWrite<u8>, and furthermore unrolls the
|
// ModRM/SibSB constructors when we use xWrite<u8>, and furthermore unrolls the
|
||||||
// the shift using a series of ADDs for the following results:
|
// the shift using a series of ADDs for the following results:
|
||||||
// add cl,cl
|
// add cl,cl
|
||||||
// add cl,cl
|
// add cl,cl
|
||||||
|
@ -137,7 +137,7 @@ namespace Internal
|
||||||
|
|
||||||
__forceinline void ModRM( uint mod, uint reg, uint rm )
|
__forceinline void ModRM( uint mod, uint reg, uint rm )
|
||||||
{
|
{
|
||||||
iWrite<u8>( (mod << 6) | (reg << 3) | rm );
|
xWrite<u8>( (mod << 6) | (reg << 3) | rm );
|
||||||
//*(u32*)x86Ptr = (mod << 6) | (reg << 3) | rm;
|
//*(u32*)x86Ptr = (mod << 6) | (reg << 3) | rm;
|
||||||
//x86Ptr++;
|
//x86Ptr++;
|
||||||
}
|
}
|
||||||
|
@ -149,20 +149,20 @@ namespace Internal
|
||||||
|
|
||||||
__forceinline void SibSB( u32 ss, u32 index, u32 base )
|
__forceinline void SibSB( u32 ss, u32 index, u32 base )
|
||||||
{
|
{
|
||||||
iWrite<u8>( (ss << 6) | (index << 3) | base );
|
xWrite<u8>( (ss << 6) | (index << 3) | base );
|
||||||
//*(u32*)x86Ptr = (ss << 6) | (index << 3) | base;
|
//*(u32*)x86Ptr = (ss << 6) | (index << 3) | base;
|
||||||
//x86Ptr++;
|
//x86Ptr++;
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline void iWriteDisp( int regfield, s32 displacement )
|
__forceinline void xWriteDisp( int regfield, s32 displacement )
|
||||||
{
|
{
|
||||||
ModRM( 0, regfield, ModRm_UseDisp32 );
|
ModRM( 0, regfield, ModRm_UseDisp32 );
|
||||||
iWrite<s32>( displacement );
|
xWrite<s32>( displacement );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline void iWriteDisp( int regfield, const void* address )
|
__forceinline void xWriteDisp( int regfield, const void* address )
|
||||||
{
|
{
|
||||||
iWriteDisp( regfield, (s32)address );
|
xWriteDisp( regfield, (s32)address );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
@ -206,7 +206,7 @@ namespace Internal
|
||||||
|
|
||||||
if( info.Index.IsEmpty() )
|
if( info.Index.IsEmpty() )
|
||||||
{
|
{
|
||||||
iWriteDisp( regfield, info.Displacement );
|
xWriteDisp( regfield, info.Displacement );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -229,7 +229,7 @@ namespace Internal
|
||||||
{
|
{
|
||||||
ModRM( 0, regfield, ModRm_UseSib );
|
ModRM( 0, regfield, ModRm_UseSib );
|
||||||
SibSB( info.Scale, info.Index.Id, ModRm_UseDisp32 );
|
SibSB( info.Scale, info.Index.Id, ModRm_UseDisp32 );
|
||||||
iWrite<s32>( info.Displacement );
|
xWrite<s32>( info.Displacement );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -245,116 +245,116 @@ namespace Internal
|
||||||
if( displacement_size != 0 )
|
if( displacement_size != 0 )
|
||||||
{
|
{
|
||||||
if( displacement_size == 1 )
|
if( displacement_size == 1 )
|
||||||
iWrite<s8>( info.Displacement );
|
xWrite<s8>( info.Displacement );
|
||||||
else
|
else
|
||||||
iWrite<s32>( info.Displacement );
|
xWrite<s32>( info.Displacement );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
using namespace Internal;
|
using namespace Internal;
|
||||||
|
|
||||||
const MovImplAll iMOV;
|
const MovImplAll xMOV;
|
||||||
const TestImplAll iTEST;
|
const TestImplAll xTEST;
|
||||||
|
|
||||||
const G1LogicImpl<G1Type_AND,0x54> iAND;
|
const G1LogicImpl_PlusSSE<G1Type_AND,0x54> xAND;
|
||||||
const G1LogicImpl<G1Type_OR,0x56> iOR;
|
const G1LogicImpl_PlusSSE<G1Type_OR,0x56> xOR;
|
||||||
const G1LogicImpl<G1Type_XOR,0x57> iXOR;
|
const G1LogicImpl_PlusSSE<G1Type_XOR,0x57> xXOR;
|
||||||
|
|
||||||
const G1ArithmeticImpl<G1Type_ADD,0x58> iADD;
|
const G1ArithmeticImpl_PlusSSE<G1Type_ADD,0x58> xADD;
|
||||||
const G1ArithmeticImpl<G1Type_SUB,0x5c> iSUB;
|
const G1ArithmeticImpl_PlusSSE<G1Type_SUB,0x5c> xSUB;
|
||||||
|
|
||||||
const Group1ImplAll<G1Type_ADC> iADC;
|
const Group1ImplAll<G1Type_ADC> xADC;
|
||||||
const Group1ImplAll<G1Type_SBB> iSBB;
|
const Group1ImplAll<G1Type_SBB> xSBB;
|
||||||
const Group1ImplAll<G1Type_CMP> iCMP;
|
const G1CompareImpl_PlusSSE xCMP;
|
||||||
|
|
||||||
const Group2ImplAll<G2Type_ROL> iROL;
|
const Group2ImplAll<G2Type_ROL> xROL;
|
||||||
const Group2ImplAll<G2Type_ROR> iROR;
|
const Group2ImplAll<G2Type_ROR> xROR;
|
||||||
const Group2ImplAll<G2Type_RCL> iRCL;
|
const Group2ImplAll<G2Type_RCL> xRCL;
|
||||||
const Group2ImplAll<G2Type_RCR> iRCR;
|
const Group2ImplAll<G2Type_RCR> xRCR;
|
||||||
const Group2ImplAll<G2Type_SHL> iSHL;
|
const Group2ImplAll<G2Type_SHL> xSHL;
|
||||||
const Group2ImplAll<G2Type_SHR> iSHR;
|
const Group2ImplAll<G2Type_SHR> xSHR;
|
||||||
const Group2ImplAll<G2Type_SAR> iSAR;
|
const Group2ImplAll<G2Type_SAR> xSAR;
|
||||||
|
|
||||||
const Group3ImplAll<G3Type_NOT> iNOT;
|
const Group3ImplAll<G3Type_NOT> xNOT;
|
||||||
const Group3ImplAll<G3Type_NEG> iNEG;
|
const Group3ImplAll<G3Type_NEG> xNEG;
|
||||||
const Group3ImplAll<G3Type_MUL> iUMUL;
|
const Group3ImplAll<G3Type_MUL> xUMUL;
|
||||||
const Group3ImplAll<G3Type_DIV> iUDIV;
|
const Group3ImplAll<G3Type_DIV> xUDIV;
|
||||||
const G3Impl_PlusSSE<G3Type_iDIV,0x5e> iDIV;
|
const G3Impl_PlusSSE<G3Type_iDIV,0x5e> xDIV;
|
||||||
const iMul_PlusSSE iMUL;
|
const iMul_PlusSSE xMUL;
|
||||||
|
|
||||||
const IncDecImplAll<false> iINC;
|
const IncDecImplAll<false> xINC;
|
||||||
const IncDecImplAll<true> iDEC;
|
const IncDecImplAll<true> xDEC;
|
||||||
|
|
||||||
const MovExtendImplAll<false> iMOVZX;
|
const MovExtendImplAll<false> xMOVZX;
|
||||||
const MovExtendImplAll<true> iMOVSX;
|
const MovExtendImplAll<true> xMOVSX;
|
||||||
|
|
||||||
const DwordShiftImplAll<false> iSHLD;
|
const DwordShiftImplAll<false> xSHLD;
|
||||||
const DwordShiftImplAll<true> iSHRD;
|
const DwordShiftImplAll<true> xSHRD;
|
||||||
|
|
||||||
const Group8ImplAll<G8Type_BT> iBT;
|
const Group8ImplAll<G8Type_BT> xBT;
|
||||||
const Group8ImplAll<G8Type_BTR> iBTR;
|
const Group8ImplAll<G8Type_BTR> xBTR;
|
||||||
const Group8ImplAll<G8Type_BTS> iBTS;
|
const Group8ImplAll<G8Type_BTS> xBTS;
|
||||||
const Group8ImplAll<G8Type_BTC> iBTC;
|
const Group8ImplAll<G8Type_BTC> xBTC;
|
||||||
|
|
||||||
const BitScanImplAll<false> iBSF;
|
const BitScanImplAll<false> xBSF;
|
||||||
const BitScanImplAll<true> iBSR;
|
const BitScanImplAll<true> xBSR;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
const CMovImplGeneric iCMOV;
|
const CMovImplGeneric xCMOV;
|
||||||
|
|
||||||
const CMovImplAll<Jcc_Above> iCMOVA;
|
const CMovImplAll<Jcc_Above> xCMOVA;
|
||||||
const CMovImplAll<Jcc_AboveOrEqual> iCMOVAE;
|
const CMovImplAll<Jcc_AboveOrEqual> xCMOVAE;
|
||||||
const CMovImplAll<Jcc_Below> iCMOVB;
|
const CMovImplAll<Jcc_Below> xCMOVB;
|
||||||
const CMovImplAll<Jcc_BelowOrEqual> iCMOVBE;
|
const CMovImplAll<Jcc_BelowOrEqual> xCMOVBE;
|
||||||
|
|
||||||
const CMovImplAll<Jcc_Greater> iCMOVG;
|
const CMovImplAll<Jcc_Greater> xCMOVG;
|
||||||
const CMovImplAll<Jcc_GreaterOrEqual> iCMOVGE;
|
const CMovImplAll<Jcc_GreaterOrEqual> xCMOVGE;
|
||||||
const CMovImplAll<Jcc_Less> iCMOVL;
|
const CMovImplAll<Jcc_Less> xCMOVL;
|
||||||
const CMovImplAll<Jcc_LessOrEqual> iCMOVLE;
|
const CMovImplAll<Jcc_LessOrEqual> xCMOVLE;
|
||||||
|
|
||||||
const CMovImplAll<Jcc_Zero> iCMOVZ;
|
const CMovImplAll<Jcc_Zero> xCMOVZ;
|
||||||
const CMovImplAll<Jcc_Equal> iCMOVE;
|
const CMovImplAll<Jcc_Equal> xCMOVE;
|
||||||
const CMovImplAll<Jcc_NotZero> iCMOVNZ;
|
const CMovImplAll<Jcc_NotZero> xCMOVNZ;
|
||||||
const CMovImplAll<Jcc_NotEqual> iCMOVNE;
|
const CMovImplAll<Jcc_NotEqual> xCMOVNE;
|
||||||
|
|
||||||
const CMovImplAll<Jcc_Overflow> iCMOVO;
|
const CMovImplAll<Jcc_Overflow> xCMOVO;
|
||||||
const CMovImplAll<Jcc_NotOverflow> iCMOVNO;
|
const CMovImplAll<Jcc_NotOverflow> xCMOVNO;
|
||||||
const CMovImplAll<Jcc_Carry> iCMOVC;
|
const CMovImplAll<Jcc_Carry> xCMOVC;
|
||||||
const CMovImplAll<Jcc_NotCarry> iCMOVNC;
|
const CMovImplAll<Jcc_NotCarry> xCMOVNC;
|
||||||
|
|
||||||
const CMovImplAll<Jcc_Signed> iCMOVS;
|
const CMovImplAll<Jcc_Signed> xCMOVS;
|
||||||
const CMovImplAll<Jcc_Unsigned> iCMOVNS;
|
const CMovImplAll<Jcc_Unsigned> xCMOVNS;
|
||||||
const CMovImplAll<Jcc_ParityEven> iCMOVPE;
|
const CMovImplAll<Jcc_ParityEven> xCMOVPE;
|
||||||
const CMovImplAll<Jcc_ParityOdd> iCMOVPO;
|
const CMovImplAll<Jcc_ParityOdd> xCMOVPO;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
const SetImplGeneric iSET;
|
const SetImplGeneric xSET;
|
||||||
|
|
||||||
const SetImplAll<Jcc_Above> iSETA;
|
const SetImplAll<Jcc_Above> xSETA;
|
||||||
const SetImplAll<Jcc_AboveOrEqual> iSETAE;
|
const SetImplAll<Jcc_AboveOrEqual> xSETAE;
|
||||||
const SetImplAll<Jcc_Below> iSETB;
|
const SetImplAll<Jcc_Below> xSETB;
|
||||||
const SetImplAll<Jcc_BelowOrEqual> iSETBE;
|
const SetImplAll<Jcc_BelowOrEqual> xSETBE;
|
||||||
|
|
||||||
const SetImplAll<Jcc_Greater> iSETG;
|
const SetImplAll<Jcc_Greater> xSETG;
|
||||||
const SetImplAll<Jcc_GreaterOrEqual> iSETGE;
|
const SetImplAll<Jcc_GreaterOrEqual> xSETGE;
|
||||||
const SetImplAll<Jcc_Less> iSETL;
|
const SetImplAll<Jcc_Less> xSETL;
|
||||||
const SetImplAll<Jcc_LessOrEqual> iSETLE;
|
const SetImplAll<Jcc_LessOrEqual> xSETLE;
|
||||||
|
|
||||||
const SetImplAll<Jcc_Zero> iSETZ;
|
const SetImplAll<Jcc_Zero> xSETZ;
|
||||||
const SetImplAll<Jcc_Equal> iSETE;
|
const SetImplAll<Jcc_Equal> xSETE;
|
||||||
const SetImplAll<Jcc_NotZero> iSETNZ;
|
const SetImplAll<Jcc_NotZero> xSETNZ;
|
||||||
const SetImplAll<Jcc_NotEqual> iSETNE;
|
const SetImplAll<Jcc_NotEqual> xSETNE;
|
||||||
|
|
||||||
const SetImplAll<Jcc_Overflow> iSETO;
|
const SetImplAll<Jcc_Overflow> xSETO;
|
||||||
const SetImplAll<Jcc_NotOverflow> iSETNO;
|
const SetImplAll<Jcc_NotOverflow> xSETNO;
|
||||||
const SetImplAll<Jcc_Carry> iSETC;
|
const SetImplAll<Jcc_Carry> xSETC;
|
||||||
const SetImplAll<Jcc_NotCarry> iSETNC;
|
const SetImplAll<Jcc_NotCarry> xSETNC;
|
||||||
|
|
||||||
const SetImplAll<Jcc_Signed> iSETS;
|
const SetImplAll<Jcc_Signed> xSETS;
|
||||||
const SetImplAll<Jcc_Unsigned> iSETNS;
|
const SetImplAll<Jcc_Unsigned> xSETNS;
|
||||||
const SetImplAll<Jcc_ParityEven> iSETPE;
|
const SetImplAll<Jcc_ParityEven> xSETPE;
|
||||||
const SetImplAll<Jcc_ParityOdd> iSETPO;
|
const SetImplAll<Jcc_ParityOdd> xSETPO;
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
@ -389,7 +389,7 @@ __emitinline void iAdvancePtr( uint bytes )
|
||||||
{
|
{
|
||||||
// common debugger courtesy: advance with INT3 as filler.
|
// common debugger courtesy: advance with INT3 as filler.
|
||||||
for( uint i=0; i<bytes; i++ )
|
for( uint i=0; i<bytes; i++ )
|
||||||
iWrite<u8>( 0xcc );
|
xWrite<u8>( 0xcc );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
x86Ptr += bytes;
|
x86Ptr += bytes;
|
||||||
|
@ -430,7 +430,7 @@ void ModSibBase::Reduce()
|
||||||
Index = Base;
|
Index = Base;
|
||||||
Scale = 0;
|
Scale = 0;
|
||||||
if( !Base.IsStackPointer() ) // prevent ESP from being encoded 'alone'
|
if( !Base.IsStackPointer() ) // prevent ESP from being encoded 'alone'
|
||||||
Base = iAddressReg::Empty;
|
Base = xAddressReg::Empty;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,9 +484,9 @@ void ModSibBase::Reduce()
|
||||||
// of LEA, which alters flags states.
|
// of LEA, which alters flags states.
|
||||||
//
|
//
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool preserve_flags )
|
static void EmitLeaMagic( xRegister<OperandType> to, const ModSibBase& src, bool preserve_flags )
|
||||||
{
|
{
|
||||||
typedef iRegister<OperandType> ToReg;
|
typedef xRegister<OperandType> ToReg;
|
||||||
|
|
||||||
int displacement_size = (src.Displacement == 0) ? 0 :
|
int displacement_size = (src.Displacement == 0) ? 0 :
|
||||||
( ( src.IsByteSizeDisp() ) ? 1 : 2 );
|
( ( src.IsByteSizeDisp() ) ? 1 : 2 );
|
||||||
|
@ -501,12 +501,12 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
|
|
||||||
if( src.Index.IsEmpty() )
|
if( src.Index.IsEmpty() )
|
||||||
{
|
{
|
||||||
iMOV( to, src.Displacement );
|
xMOV( to, src.Displacement );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else if( displacement_size == 0 )
|
else if( displacement_size == 0 )
|
||||||
{
|
{
|
||||||
iMOV( to, ToReg( src.Index.Id ) );
|
xMOV( to, ToReg( src.Index.Id ) );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -516,8 +516,8 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
// encode as MOV and ADD combo. Make sure to use the immediate on the
|
// encode as MOV and ADD combo. Make sure to use the immediate on the
|
||||||
// ADD since it can encode as an 8-bit sign-extended value.
|
// ADD since it can encode as an 8-bit sign-extended value.
|
||||||
|
|
||||||
iMOV( to, ToReg( src.Index.Id ) );
|
xMOV( to, ToReg( src.Index.Id ) );
|
||||||
iADD( to, src.Displacement );
|
xADD( to, src.Displacement );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -525,7 +525,7 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
// note: no need to do ebp+0 check since we encode all 0 displacements as
|
// note: no need to do ebp+0 check since we encode all 0 displacements as
|
||||||
// register assignments above (via MOV)
|
// register assignments above (via MOV)
|
||||||
|
|
||||||
iWrite<u8>( 0x8d );
|
xWrite<u8>( 0x8d );
|
||||||
ModRM( displacement_size, to.Id, src.Index.Id );
|
ModRM( displacement_size, to.Id, src.Index.Id );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -543,14 +543,14 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
// (this does not apply to older model P4s with the broken barrel shifter,
|
// (this does not apply to older model P4s with the broken barrel shifter,
|
||||||
// but we currently aren't optimizing for that target anyway).
|
// but we currently aren't optimizing for that target anyway).
|
||||||
|
|
||||||
iMOV( to, ToReg( src.Index.Id ) );
|
xMOV( to, ToReg( src.Index.Id ) );
|
||||||
iSHL( to, src.Scale );
|
xSHL( to, src.Scale );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
iWrite<u8>( 0x8d );
|
xWrite<u8>( 0x8d );
|
||||||
ModRM( 0, to.Id, ModRm_UseSib );
|
ModRM( 0, to.Id, ModRm_UseSib );
|
||||||
SibSB( src.Scale, src.Index.Id, ModRm_UseDisp32 );
|
SibSB( src.Scale, src.Index.Id, ModRm_UseDisp32 );
|
||||||
iWrite<u32>( src.Displacement );
|
xWrite<u32>( src.Displacement );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -562,14 +562,14 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
if( src.Index == esp )
|
if( src.Index == esp )
|
||||||
{
|
{
|
||||||
// ESP is not encodable as an index (ix86 ignores it), thus:
|
// ESP is not encodable as an index (ix86 ignores it), thus:
|
||||||
iMOV( to, ToReg( src.Base.Id ) ); // will do the trick!
|
xMOV( to, ToReg( src.Base.Id ) ); // will do the trick!
|
||||||
if( src.Displacement ) iADD( to, src.Displacement );
|
if( src.Displacement ) xADD( to, src.Displacement );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else if( src.Displacement == 0 )
|
else if( src.Displacement == 0 )
|
||||||
{
|
{
|
||||||
iMOV( to, ToReg( src.Base.Id ) );
|
xMOV( to, ToReg( src.Base.Id ) );
|
||||||
iADD( to, ToReg( src.Index.Id ) );
|
xADD( to, ToReg( src.Index.Id ) );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -578,7 +578,7 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
// special case handling of ESP as Index, which is replaceable with
|
// special case handling of ESP as Index, which is replaceable with
|
||||||
// a single MOV even when preserve_flags is set! :D
|
// a single MOV even when preserve_flags is set! :D
|
||||||
|
|
||||||
iMOV( to, ToReg( src.Base.Id ) );
|
xMOV( to, ToReg( src.Base.Id ) );
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -586,7 +586,7 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
if( src.Base == ebp && displacement_size == 0 )
|
if( src.Base == ebp && displacement_size == 0 )
|
||||||
displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]!
|
displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]!
|
||||||
|
|
||||||
iWrite<u8>( 0x8d );
|
xWrite<u8>( 0x8d );
|
||||||
ModRM( displacement_size, to.Id, ModRm_UseSib );
|
ModRM( displacement_size, to.Id, ModRm_UseSib );
|
||||||
SibSB( src.Scale, src.Index.Id, src.Base.Id );
|
SibSB( src.Scale, src.Index.Id, src.Base.Id );
|
||||||
}
|
}
|
||||||
|
@ -595,19 +595,19 @@ static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool
|
||||||
if( displacement_size != 0 )
|
if( displacement_size != 0 )
|
||||||
{
|
{
|
||||||
if( displacement_size == 1 )
|
if( displacement_size == 1 )
|
||||||
iWrite<s8>( src.Displacement );
|
xWrite<s8>( src.Displacement );
|
||||||
else
|
else
|
||||||
iWrite<s32>( src.Displacement );
|
xWrite<s32>( src.Displacement );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__emitinline void iLEA( iRegister32 to, const ModSibBase& src, bool preserve_flags )
|
__emitinline void xLEA( xRegister32 to, const ModSibBase& src, bool preserve_flags )
|
||||||
{
|
{
|
||||||
EmitLeaMagic( to, src, preserve_flags );
|
EmitLeaMagic( to, src, preserve_flags );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
__emitinline void iLEA( iRegister16 to, const ModSibBase& src, bool preserve_flags )
|
__emitinline void xLEA( xRegister16 to, const ModSibBase& src, bool preserve_flags )
|
||||||
{
|
{
|
||||||
write8( 0x66 );
|
write8( 0x66 );
|
||||||
EmitLeaMagic( to, src, preserve_flags );
|
EmitLeaMagic( to, src, preserve_flags );
|
||||||
|
@ -620,21 +620,21 @@ __emitinline void iLEA( iRegister16 to, const ModSibBase& src, bool preserve_fla
|
||||||
// Note: pushad/popad implementations are intentionally left out. The instructions are
|
// Note: pushad/popad implementations are intentionally left out. The instructions are
|
||||||
// invalid in x64, and are super slow on x32. Use multiple Push/Pop instructions instead.
|
// invalid in x64, and are super slow on x32. Use multiple Push/Pop instructions instead.
|
||||||
|
|
||||||
__emitinline void iPOP( const ModSibBase& from )
|
__emitinline void xPOP( const ModSibBase& from )
|
||||||
{
|
{
|
||||||
iWrite<u8>( 0x8f );
|
xWrite<u8>( 0x8f );
|
||||||
EmitSibMagic( 0, from );
|
EmitSibMagic( 0, from );
|
||||||
}
|
}
|
||||||
|
|
||||||
__emitinline void iPUSH( const ModSibBase& from )
|
__emitinline void xPUSH( const ModSibBase& from )
|
||||||
{
|
{
|
||||||
iWrite<u8>( 0xff );
|
xWrite<u8>( 0xff );
|
||||||
EmitSibMagic( 6, from );
|
EmitSibMagic( 6, from );
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
__emitinline void iBSWAP( const iRegister32& to )
|
__emitinline void xBSWAP( const xRegister32& to )
|
||||||
{
|
{
|
||||||
write8( 0x0F );
|
write8( 0x0F );
|
||||||
write8( 0xC8 | to.Id );
|
write8( 0xC8 | to.Id );
|
||||||
|
@ -645,66 +645,81 @@ __emitinline void iBSWAP( const iRegister32& to )
|
||||||
// MMX / XMM Instructions
|
// MMX / XMM Instructions
|
||||||
// (these will get put in their own file later)
|
// (these will get put in their own file later)
|
||||||
|
|
||||||
const MovapsImplAll< 0, 0x28, 0x29 > iMOVAPS;
|
const MovapsImplAll< 0, 0x28, 0x29 > xMOVAPS;
|
||||||
const MovapsImplAll< 0, 0x10, 0x11 > iMOVUPS;
|
const MovapsImplAll< 0, 0x10, 0x11 > xMOVUPS;
|
||||||
const MovapsImplAll< 0x66, 0x28, 0x29 > iMOVAPD;
|
const MovapsImplAll< 0x66, 0x28, 0x29 > xMOVAPD;
|
||||||
const MovapsImplAll< 0x66, 0x10, 0x11 > iMOVUPD;
|
const MovapsImplAll< 0x66, 0x10, 0x11 > xMOVUPD;
|
||||||
|
|
||||||
#ifdef ALWAYS_USE_MOVAPS
|
#ifdef ALWAYS_USE_MOVAPS
|
||||||
const MovapsImplAll< 0x66, 0x6f, 0x7f > iMOVDQA;
|
const MovapsImplAll< 0x66, 0x6f, 0x7f > xMOVDQA;
|
||||||
const MovapsImplAll< 0xf3, 0x6f, 0x7f > iMOVDQU;
|
const MovapsImplAll< 0xf3, 0x6f, 0x7f > xMOVDQU;
|
||||||
#else
|
#else
|
||||||
const MovapsImplAll< 0, 0x28, 0x29 > iMOVDQA;
|
const MovapsImplAll< 0, 0x28, 0x29 > xMOVDQA;
|
||||||
const MovapsImplAll< 0, 0x10, 0x11 > iMOVDQU;
|
const MovapsImplAll< 0, 0x10, 0x11 > xMOVDQU;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
const MovhlImplAll< 0, 0x16 > iMOVHPS;
|
const MovhlImplAll<0x16> xMOVH;
|
||||||
const MovhlImplAll< 0, 0x12 > iMOVLPS;
|
const MovhlImplAll<0x12> xMOVL;
|
||||||
const MovhlImplAll< 0x66, 0x16 > iMOVHPD;
|
const MovhlImpl_RtoR<0x16> xMOVLH;
|
||||||
const MovhlImplAll< 0x66, 0x12 > iMOVLPD;
|
const MovhlImpl_RtoR<0x12> xMOVHL;
|
||||||
|
|
||||||
const PLogicImplAll<0xdb> iPAND;
|
const PLogicImplAll<0xdb> xPAND;
|
||||||
const PLogicImplAll<0xdf> iPANDN;
|
const PLogicImplAll<0xdf> xPANDN;
|
||||||
const PLogicImplAll<0xeb> iPOR;
|
const PLogicImplAll<0xeb> xPOR;
|
||||||
const PLogicImplAll<0xef> iPXOR;
|
const PLogicImplAll<0xef> xPXOR;
|
||||||
|
|
||||||
const SSEAndNotImpl<0x55> iANDN;
|
const SSEAndNotImpl<0x55> xANDN;
|
||||||
|
|
||||||
// Compute Reciprocal Packed Single-Precision Floating-Point Values
|
// Compute Reciprocal Packed Single-Precision Floating-Point Values
|
||||||
const SSELogicImpl<0,0x53> iRCPPS;
|
const SSELogicImpl<0,0x53> xRCPPS;
|
||||||
|
|
||||||
// Compute Reciprocal of Scalar Single-Precision Floating-Point Value
|
// Compute Reciprocal of Scalar Single-Precision Floating-Point Value
|
||||||
const SSELogicImpl<0xf3,0x53> iRCPSS;
|
const SSELogicImpl<0xf3,0x53> xRCPSS;
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const SSECompareImpl<SSE2_Equal> xCMPEQ;
|
||||||
|
const SSECompareImpl<SSE2_Less> xCMPLT;
|
||||||
|
const SSECompareImpl<SSE2_LessOrEqual> xCMPLE;
|
||||||
|
const SSECompareImpl<SSE2_Unordered> xCMPUNORD;
|
||||||
|
const SSECompareImpl<SSE2_NotEqual> xCMPNE;
|
||||||
|
const SSECompareImpl<SSE2_NotLess> xCMPNLT;
|
||||||
|
const SSECompareImpl<SSE2_NotLessOrEqual> xCMPNLE;
|
||||||
|
const SSECompareImpl<SSE2_Ordered> xCMPORD;
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
|
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
|
||||||
// being cleared to zero.
|
// being cleared to zero.
|
||||||
__forceinline void iMOVQZX( const iRegisterSSE& to, const iRegisterSSE& from ) { writeXMMop( 0xf3, 0x7e, to, from ); }
|
__forceinline void xMOVQZX( const xRegisterSSE& to, const xRegisterSSE& from ) { writeXMMop( 0xf3, 0x7e, to, from ); }
|
||||||
|
|
||||||
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
|
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
|
||||||
// being cleared to zero.
|
// being cleared to zero.
|
||||||
__forceinline void iMOVQZX( const iRegisterSSE& to, const ModSibBase& src ) { writeXMMop( 0xf3, 0x7e, to, src ); }
|
__forceinline void xMOVQZX( const xRegisterSSE& to, const ModSibBase& src ) { writeXMMop( 0xf3, 0x7e, to, src ); }
|
||||||
|
|
||||||
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
|
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
|
||||||
// being cleared to zero.
|
// being cleared to zero.
|
||||||
__forceinline void iMOVQZX( const iRegisterSSE& to, const void* src ) { writeXMMop( 0xf3, 0x7e, to, src ); }
|
__forceinline void xMOVQZX( const xRegisterSSE& to, const void* src ) { writeXMMop( 0xf3, 0x7e, to, src ); }
|
||||||
|
|
||||||
// Moves lower quad of XMM to ptr64 (no bits are cleared)
|
// Moves lower quad of XMM to ptr64 (no bits are cleared)
|
||||||
__forceinline void iMOVQ( const ModSibBase& dest, const iRegisterSSE& from ) { writeXMMop( 0x66, 0xd6, from, dest ); }
|
__forceinline void xMOVQ( const ModSibBase& dest, const xRegisterSSE& from ) { writeXMMop( 0x66, 0xd6, from, dest ); }
|
||||||
// Moves lower quad of XMM to ptr64 (no bits are cleared)
|
// Moves lower quad of XMM to ptr64 (no bits are cleared)
|
||||||
__forceinline void iMOVQ( void* dest, const iRegisterSSE& from ) { writeXMMop( 0x66, 0xd6, from, dest ); }
|
__forceinline void xMOVQ( void* dest, const xRegisterSSE& from ) { writeXMMop( 0x66, 0xd6, from, dest ); }
|
||||||
|
|
||||||
__forceinline void iMOVQ( const iRegisterMMX& to, const iRegisterMMX& from ) { if( to != from ) writeXMMop( 0x6f, to, from ); }
|
__forceinline void xMOVQ( const xRegisterMMX& to, const xRegisterMMX& from ) { if( to != from ) writeXMMop( 0x6f, to, from ); }
|
||||||
__forceinline void iMOVQ( const iRegisterMMX& to, const ModSibBase& src ) { writeXMMop( 0x6f, to, src ); }
|
__forceinline void xMOVQ( const xRegisterMMX& to, const ModSibBase& src ) { writeXMMop( 0x6f, to, src ); }
|
||||||
__forceinline void iMOVQ( const iRegisterMMX& to, const void* src ) { writeXMMop( 0x6f, to, src ); }
|
__forceinline void xMOVQ( const xRegisterMMX& to, const void* src ) { writeXMMop( 0x6f, to, src ); }
|
||||||
__forceinline void iMOVQ( const ModSibBase& dest, const iRegisterMMX& from ) { writeXMMop( 0x7f, from, dest ); }
|
__forceinline void xMOVQ( const ModSibBase& dest, const xRegisterMMX& from ) { writeXMMop( 0x7f, from, dest ); }
|
||||||
__forceinline void iMOVQ( void* dest, const iRegisterMMX& from ) { writeXMMop( 0x7f, from, dest ); }
|
__forceinline void xMOVQ( void* dest, const xRegisterMMX& from ) { writeXMMop( 0x7f, from, dest ); }
|
||||||
|
|
||||||
// This form of iMOVQ is Intel's adeptly named 'MOVQ2DQ'
|
// This form of xMOVQ is Intel's adeptly named 'MOVQ2DQ'
|
||||||
__forceinline void iMOVQ( const iRegisterSSE& to, const iRegisterMMX& from ) { writeXMMop( 0xf3, 0xd6, to, from ); }
|
__forceinline void xMOVQ( const xRegisterSSE& to, const xRegisterMMX& from ) { writeXMMop( 0xf3, 0xd6, to, from ); }
|
||||||
|
|
||||||
// This form of iMOVQ is Intel's adeptly named 'MOVDQ2Q'
|
// This form of xMOVQ is Intel's adeptly named 'MOVDQ2Q'
|
||||||
__forceinline void iMOVQ( const iRegisterMMX& to, const iRegisterSSE& from )
|
__forceinline void xMOVQ( const xRegisterMMX& to, const xRegisterSSE& from )
|
||||||
{
|
{
|
||||||
// Manual implementation of this form of MOVQ, since its parameters are unique in a way
|
// Manual implementation of this form of MOVQ, since its parameters are unique in a way
|
||||||
// that breaks the template inference of writeXMMop();
|
// that breaks the template inference of writeXMMop();
|
||||||
|
@ -716,53 +731,42 @@ __forceinline void iMOVQ( const iRegisterMMX& to, const iRegisterSSE& from )
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
|
|
||||||
#define IMPLEMENT_iMOVS( ssd, prefix ) \
|
#define IMPLEMENT_xMOVS( ssd, prefix ) \
|
||||||
__forceinline void iMOV##ssd( const iRegisterSSE& to, const iRegisterSSE& from ) { if( to != from ) writeXMMop( prefix, 0x10, to, from ); } \
|
__forceinline void xMOV##ssd( const xRegisterSSE& to, const xRegisterSSE& from ) { if( to != from ) writeXMMop( prefix, 0x10, to, from ); } \
|
||||||
__forceinline void iMOV##ssd##ZX( const iRegisterSSE& to, const void* from ) { writeXMMop( prefix, 0x10, to, from ); } \
|
__forceinline void xMOV##ssd##ZX( const xRegisterSSE& to, const void* from ) { writeXMMop( prefix, 0x10, to, from ); } \
|
||||||
__forceinline void iMOV##ssd##ZX( const iRegisterSSE& to, const ModSibBase& from ) { writeXMMop( prefix, 0x10, to, from ); } \
|
__forceinline void xMOV##ssd##ZX( const xRegisterSSE& to, const ModSibBase& from ) { writeXMMop( prefix, 0x10, to, from ); } \
|
||||||
__forceinline void iMOV##ssd( const void* to, const iRegisterSSE& from ) { writeXMMop( prefix, 0x11, from, to ); } \
|
__forceinline void xMOV##ssd( const void* to, const xRegisterSSE& from ) { writeXMMop( prefix, 0x11, from, to ); } \
|
||||||
__forceinline void iMOV##ssd( const ModSibBase& to, const iRegisterSSE& from ) { writeXMMop( prefix, 0x11, from, to ); }
|
__forceinline void xMOV##ssd( const ModSibBase& to, const xRegisterSSE& from ) { writeXMMop( prefix, 0x11, from, to ); }
|
||||||
|
|
||||||
IMPLEMENT_iMOVS( SS, 0xf3 )
|
IMPLEMENT_xMOVS( SS, 0xf3 )
|
||||||
IMPLEMENT_iMOVS( SD, 0xf2 )
|
IMPLEMENT_xMOVS( SD, 0xf2 )
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Non-temporal movs only support a register as a target (ie, load form only, no stores)
|
// Non-temporal movs only support a register as a target (ie, load form only, no stores)
|
||||||
//
|
//
|
||||||
|
|
||||||
__forceinline void iMOVNTDQA( const iRegisterSSE& to, const void* from )
|
__forceinline void xMOVNTDQA( const xRegisterSSE& to, const void* from )
|
||||||
{
|
{
|
||||||
iWrite<u32>( 0x2A380f66 );
|
xWrite<u32>( 0x2A380f66 );
|
||||||
iWriteDisp( to.Id, from );
|
xWriteDisp( to.Id, from );
|
||||||
}
|
}
|
||||||
|
|
||||||
__noinline void iMOVNTDQA( const iRegisterSSE& to, const ModSibBase& from )
|
__noinline void xMOVNTDQA( const xRegisterSSE& to, const ModSibBase& from )
|
||||||
{
|
{
|
||||||
iWrite<u32>( 0x2A380f66 );
|
xWrite<u32>( 0x2A380f66 );
|
||||||
EmitSibMagic( to.Id, from );
|
EmitSibMagic( to.Id, from );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline void iMOVNTDQ( void* to, const iRegisterSSE& from ) { writeXMMop( 0x66, 0xe7, from, to ); }
|
__forceinline void xMOVNTDQ( void* to, const xRegisterSSE& from ) { writeXMMop( 0x66, 0xe7, from, to ); }
|
||||||
__noinline void iMOVNTDQA( const ModSibBase& to, const iRegisterSSE& from ) { writeXMMop( 0x66, 0xe7, from, to ); }
|
__noinline void xMOVNTDQA( const ModSibBase& to, const xRegisterSSE& from ) { writeXMMop( 0x66, 0xe7, from, to ); }
|
||||||
|
|
||||||
__forceinline void iMOVNTPD( void* to, const iRegisterSSE& from ) { writeXMMop( 0x66, 0x2b, from, to ); }
|
__forceinline void xMOVNTPD( void* to, const xRegisterSSE& from ) { writeXMMop( 0x66, 0x2b, from, to ); }
|
||||||
__noinline void iMOVNTPD( const ModSibBase& to, const iRegisterSSE& from ) { writeXMMop( 0x66, 0x2b, from, to ); }
|
__noinline void xMOVNTPD( const ModSibBase& to, const xRegisterSSE& from ) { writeXMMop( 0x66, 0x2b, from, to ); }
|
||||||
__forceinline void iMOVNTPS( void* to, const iRegisterSSE& from ) { writeXMMop( 0x2b, from, to ); }
|
__forceinline void xMOVNTPS( void* to, const xRegisterSSE& from ) { writeXMMop( 0x2b, from, to ); }
|
||||||
__noinline void iMOVNTPS( const ModSibBase& to, const iRegisterSSE& from ) { writeXMMop( 0x2b, from, to ); }
|
__noinline void xMOVNTPS( const ModSibBase& to, const xRegisterSSE& from ) { writeXMMop( 0x2b, from, to ); }
|
||||||
|
|
||||||
__forceinline void iMOVNTQ( void* to, const iRegisterMMX& from ) { writeXMMop( 0xe7, from, to ); }
|
__forceinline void xMOVNTQ( void* to, const xRegisterMMX& from ) { writeXMMop( 0xe7, from, to ); }
|
||||||
__noinline void iMOVNTQ( const ModSibBase& to, const iRegisterMMX& from ) { writeXMMop( 0xe7, from, to ); }
|
__noinline void xMOVNTQ( const ModSibBase& to, const xRegisterMMX& from ) { writeXMMop( 0xe7, from, to ); }
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Mov Low to High / High to Low
|
|
||||||
//
|
|
||||||
// These instructions come in xmmreg,xmmreg forms only!
|
|
||||||
//
|
|
||||||
|
|
||||||
__forceinline void iMOVLHPS( const iRegisterSSE& to, const iRegisterSSE& from ) { writeXMMop( 0x16, to, from ); }
|
|
||||||
__forceinline void iMOVHLPS( const iRegisterSSE& to, const iRegisterSSE& from ) { writeXMMop( 0x12, to, from ); }
|
|
||||||
__forceinline void iMOVLHPD( const iRegisterSSE& to, const iRegisterSSE& from ) { writeXMMop( 0x66, 0x16, to, from ); }
|
|
||||||
__forceinline void iMOVHLPD( const iRegisterSSE& to, const iRegisterSSE& from ) { writeXMMop( 0x66, 0x12, to, from ); }
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,29 +53,29 @@ namespace x86Emitter
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// x86Register Method Implementations
|
// x86Register Method Implementations
|
||||||
//
|
//
|
||||||
__forceinline iAddressInfo iAddressReg::operator+( const iAddressReg& right ) const
|
__forceinline xAddressInfo xAddressReg::operator+( const xAddressReg& right ) const
|
||||||
{
|
{
|
||||||
return iAddressInfo( *this, right );
|
return xAddressInfo( *this, right );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline iAddressInfo iAddressReg::operator+( const iAddressInfo& right ) const
|
__forceinline xAddressInfo xAddressReg::operator+( const xAddressInfo& right ) const
|
||||||
{
|
{
|
||||||
return right + *this;
|
return right + *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline iAddressInfo iAddressReg::operator+( s32 right ) const
|
__forceinline xAddressInfo xAddressReg::operator+( s32 right ) const
|
||||||
{
|
{
|
||||||
return iAddressInfo( *this, right );
|
return xAddressInfo( *this, right );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline iAddressInfo iAddressReg::operator*( u32 right ) const
|
__forceinline xAddressInfo xAddressReg::operator*( u32 right ) const
|
||||||
{
|
{
|
||||||
return iAddressInfo( Empty, *this, right );
|
return xAddressInfo( Empty, *this, right );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline iAddressInfo iAddressReg::operator<<( u32 shift ) const
|
__forceinline xAddressInfo xAddressReg::operator<<( u32 shift ) const
|
||||||
{
|
{
|
||||||
return iAddressInfo( Empty, *this, 1<<shift );
|
return xAddressInfo( Empty, *this, 1<<shift );
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -83,7 +83,7 @@ namespace x86Emitter
|
||||||
//
|
//
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
__forceinline ModSibBase::ModSibBase( const iAddressInfo& src ) :
|
__forceinline ModSibBase::ModSibBase( const xAddressInfo& src ) :
|
||||||
Base( src.Base ),
|
Base( src.Base ),
|
||||||
Index( src.Index ),
|
Index( src.Index ),
|
||||||
Scale( src.Factor ),
|
Scale( src.Factor ),
|
||||||
|
@ -93,7 +93,7 @@ namespace x86Emitter
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
__forceinline ModSibBase::ModSibBase( iAddressReg base, iAddressReg index, int scale, s32 displacement ) :
|
__forceinline ModSibBase::ModSibBase( xAddressReg base, xAddressReg index, int scale, s32 displacement ) :
|
||||||
Base( base ),
|
Base( base ),
|
||||||
Index( index ),
|
Index( index ),
|
||||||
Scale( scale ),
|
Scale( scale ),
|
||||||
|
@ -113,9 +113,9 @@ namespace x86Emitter
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// iAddressInfo Method Implementations
|
// xAddressInfo Method Implementations
|
||||||
//
|
//
|
||||||
__forceinline iAddressInfo& iAddressInfo::Add( const iAddressReg& src )
|
__forceinline xAddressInfo& xAddressInfo::Add( const xAddressReg& src )
|
||||||
{
|
{
|
||||||
if( src == Index )
|
if( src == Index )
|
||||||
{
|
{
|
||||||
|
@ -124,7 +124,7 @@ namespace x86Emitter
|
||||||
else if( src == Base )
|
else if( src == Base )
|
||||||
{
|
{
|
||||||
// Compound the existing register reference into the Index/Scale pair.
|
// Compound the existing register reference into the Index/Scale pair.
|
||||||
Base = iAddressReg::Empty;
|
Base = xAddressReg::Empty;
|
||||||
|
|
||||||
if( src == Index )
|
if( src == Index )
|
||||||
Factor++;
|
Factor++;
|
||||||
|
@ -146,7 +146,7 @@ namespace x86Emitter
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
__forceinline iAddressInfo& iAddressInfo::Add( const iAddressInfo& src )
|
__forceinline xAddressInfo& xAddressInfo::Add( const xAddressInfo& src )
|
||||||
{
|
{
|
||||||
Add( src.Base );
|
Add( src.Base );
|
||||||
Add( src.Displacement );
|
Add( src.Displacement );
|
||||||
|
@ -174,7 +174,7 @@ namespace x86Emitter
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
iForwardJump<OperandType>::iForwardJump( JccComparisonType cctype ) :
|
xForwardJump<OperandType>::xForwardJump( JccComparisonType cctype ) :
|
||||||
BasePtr( (s8*)iGetPtr() +
|
BasePtr( (s8*)iGetPtr() +
|
||||||
((OperandSize == 1) ? 2 : // j8's are always 2 bytes.
|
((OperandSize == 1) ? 2 : // j8's are always 2 bytes.
|
||||||
((cctype==Jcc_Unconditional) ? 5 : 6 )) // j32's are either 5 or 6 bytes
|
((cctype==Jcc_Unconditional) ? 5 : 6 )) // j32's are either 5 or 6 bytes
|
||||||
|
@ -184,15 +184,15 @@ namespace x86Emitter
|
||||||
jASSUME( OperandSize == 1 || OperandSize == 4 );
|
jASSUME( OperandSize == 1 || OperandSize == 4 );
|
||||||
|
|
||||||
if( OperandSize == 1 )
|
if( OperandSize == 1 )
|
||||||
iWrite<u8>( (cctype == Jcc_Unconditional) ? 0xeb : (0x70 | cctype) );
|
xWrite<u8>( (cctype == Jcc_Unconditional) ? 0xeb : (0x70 | cctype) );
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if( cctype == Jcc_Unconditional )
|
if( cctype == Jcc_Unconditional )
|
||||||
iWrite<u8>( 0xe9 );
|
xWrite<u8>( 0xe9 );
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( 0x80 | cctype );
|
xWrite<u8>( 0x80 | cctype );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ namespace x86Emitter
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
void iForwardJump<OperandType>::SetTarget() const
|
void xForwardJump<OperandType>::SetTarget() const
|
||||||
{
|
{
|
||||||
jASSUME( BasePtr != NULL );
|
jASSUME( BasePtr != NULL );
|
||||||
|
|
||||||
|
|
|
@ -38,16 +38,16 @@ namespace x86Emitter
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Group 1 Instruction Class
|
// Group 1 Instruction Class
|
||||||
|
|
||||||
extern const Internal::G1LogicImpl<Internal::G1Type_AND,0x54> iAND;
|
extern const Internal::G1LogicImpl_PlusSSE<Internal::G1Type_AND,0x54> xAND;
|
||||||
extern const Internal::G1LogicImpl<Internal::G1Type_OR,0x56> iOR;
|
extern const Internal::G1LogicImpl_PlusSSE<Internal::G1Type_OR,0x56> xOR;
|
||||||
extern const Internal::G1LogicImpl<Internal::G1Type_XOR,0x57> iXOR;
|
extern const Internal::G1LogicImpl_PlusSSE<Internal::G1Type_XOR,0x57> xXOR;
|
||||||
extern const Internal::SSEAndNotImpl<0x55> iANDN;
|
|
||||||
|
|
||||||
extern const Internal::G1ArithmeticImpl<Internal::G1Type_ADD,0x58> iADD;
|
extern const Internal::G1ArithmeticImpl_PlusSSE<Internal::G1Type_ADD,0x58> xADD;
|
||||||
extern const Internal::G1ArithmeticImpl<Internal::G1Type_SUB,0x5c> iSUB;
|
extern const Internal::G1ArithmeticImpl_PlusSSE<Internal::G1Type_SUB,0x5c> xSUB;
|
||||||
extern const Internal::Group1ImplAll<Internal::G1Type_ADC> iADC;
|
extern const Internal::G1CompareImpl_PlusSSE xCMP;
|
||||||
extern const Internal::Group1ImplAll<Internal::G1Type_SBB> iSBB;
|
|
||||||
extern const Internal::Group1ImplAll<Internal::G1Type_CMP> iCMP;
|
extern const Internal::Group1ImplAll<Internal::G1Type_ADC> xADC;
|
||||||
|
extern const Internal::Group1ImplAll<Internal::G1Type_SBB> xSBB;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Group 2 Instruction Class
|
// Group 2 Instruction Class
|
||||||
|
@ -56,174 +56,154 @@ namespace x86Emitter
|
||||||
// zero. This is a safe optimization since any zero-value shift does not affect any
|
// zero. This is a safe optimization since any zero-value shift does not affect any
|
||||||
// flags.
|
// flags.
|
||||||
|
|
||||||
extern const Internal::MovImplAll iMOV;
|
extern const Internal::MovImplAll xMOV;
|
||||||
extern const Internal::TestImplAll iTEST;
|
extern const Internal::TestImplAll xTEST;
|
||||||
|
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_ROL> iROL;
|
extern const Internal::Group2ImplAll<Internal::G2Type_ROL> xROL;
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_ROR> iROR;
|
extern const Internal::Group2ImplAll<Internal::G2Type_ROR> xROR;
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_RCL> iRCL;
|
extern const Internal::Group2ImplAll<Internal::G2Type_RCL> xRCL;
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_RCR> iRCR;
|
extern const Internal::Group2ImplAll<Internal::G2Type_RCR> xRCR;
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_SHL> iSHL;
|
extern const Internal::Group2ImplAll<Internal::G2Type_SHL> xSHL;
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_SHR> iSHR;
|
extern const Internal::Group2ImplAll<Internal::G2Type_SHR> xSHR;
|
||||||
extern const Internal::Group2ImplAll<Internal::G2Type_SAR> iSAR;
|
extern const Internal::Group2ImplAll<Internal::G2Type_SAR> xSAR;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Group 3 Instruction Class
|
// Group 3 Instruction Class
|
||||||
|
|
||||||
extern const Internal::Group3ImplAll<Internal::G3Type_NOT> iNOT;
|
extern const Internal::Group3ImplAll<Internal::G3Type_NOT> xNOT;
|
||||||
extern const Internal::Group3ImplAll<Internal::G3Type_NEG> iNEG;
|
extern const Internal::Group3ImplAll<Internal::G3Type_NEG> xNEG;
|
||||||
extern const Internal::Group3ImplAll<Internal::G3Type_MUL> iUMUL;
|
extern const Internal::Group3ImplAll<Internal::G3Type_MUL> xUMUL;
|
||||||
extern const Internal::Group3ImplAll<Internal::G3Type_DIV> iUDIV;
|
extern const Internal::Group3ImplAll<Internal::G3Type_DIV> xUDIV;
|
||||||
extern const Internal::G3Impl_PlusSSE<Internal::G3Type_iDIV,0x5e> iDIV;
|
extern const Internal::G3Impl_PlusSSE<Internal::G3Type_iDIV,0x5e> xDIV;
|
||||||
extern const Internal::iMul_PlusSSE iMUL;
|
extern const Internal::iMul_PlusSSE xMUL;
|
||||||
|
|
||||||
extern const Internal::IncDecImplAll<false> iINC;
|
extern const Internal::IncDecImplAll<false> xINC;
|
||||||
extern const Internal::IncDecImplAll<true> iDEC;
|
extern const Internal::IncDecImplAll<true> xDEC;
|
||||||
|
|
||||||
extern const Internal::MovExtendImplAll<false> iMOVZX;
|
extern const Internal::MovExtendImplAll<false> xMOVZX;
|
||||||
extern const Internal::MovExtendImplAll<true> iMOVSX;
|
extern const Internal::MovExtendImplAll<true> xMOVSX;
|
||||||
|
|
||||||
extern const Internal::DwordShiftImplAll<false> iSHLD;
|
extern const Internal::DwordShiftImplAll<false> xSHLD;
|
||||||
extern const Internal::DwordShiftImplAll<true> iSHRD;
|
extern const Internal::DwordShiftImplAll<true> xSHRD;
|
||||||
|
|
||||||
extern const Internal::Group8ImplAll<Internal::G8Type_BT> iBT;
|
extern const Internal::Group8ImplAll<Internal::G8Type_BT> xBT;
|
||||||
extern const Internal::Group8ImplAll<Internal::G8Type_BTR> iBTR;
|
extern const Internal::Group8ImplAll<Internal::G8Type_BTR> xBTR;
|
||||||
extern const Internal::Group8ImplAll<Internal::G8Type_BTS> iBTS;
|
extern const Internal::Group8ImplAll<Internal::G8Type_BTS> xBTS;
|
||||||
extern const Internal::Group8ImplAll<Internal::G8Type_BTC> iBTC;
|
extern const Internal::Group8ImplAll<Internal::G8Type_BTC> xBTC;
|
||||||
|
|
||||||
extern const Internal::JmpCallImplAll<true> iJMP;
|
extern const Internal::JmpCallImplAll<true> xJMP;
|
||||||
extern const Internal::JmpCallImplAll<false> iCALL;
|
extern const Internal::JmpCallImplAll<false> xCALL;
|
||||||
|
|
||||||
extern const Internal::BitScanImplAll<false> iBSF;
|
extern const Internal::BitScanImplAll<false> xBSF;
|
||||||
extern const Internal::BitScanImplAll<true> iBSR;
|
extern const Internal::BitScanImplAll<true> xBSR;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
extern const Internal::CMovImplGeneric iCMOV;
|
extern const Internal::CMovImplGeneric xCMOV;
|
||||||
|
|
||||||
extern const Internal::CMovImplAll<Jcc_Above> iCMOVA;
|
extern const Internal::CMovImplAll<Jcc_Above> xCMOVA;
|
||||||
extern const Internal::CMovImplAll<Jcc_AboveOrEqual> iCMOVAE;
|
extern const Internal::CMovImplAll<Jcc_AboveOrEqual> xCMOVAE;
|
||||||
extern const Internal::CMovImplAll<Jcc_Below> iCMOVB;
|
extern const Internal::CMovImplAll<Jcc_Below> xCMOVB;
|
||||||
extern const Internal::CMovImplAll<Jcc_BelowOrEqual> iCMOVBE;
|
extern const Internal::CMovImplAll<Jcc_BelowOrEqual> xCMOVBE;
|
||||||
|
|
||||||
extern const Internal::CMovImplAll<Jcc_Greater> iCMOVG;
|
extern const Internal::CMovImplAll<Jcc_Greater> xCMOVG;
|
||||||
extern const Internal::CMovImplAll<Jcc_GreaterOrEqual> iCMOVGE;
|
extern const Internal::CMovImplAll<Jcc_GreaterOrEqual> xCMOVGE;
|
||||||
extern const Internal::CMovImplAll<Jcc_Less> iCMOVL;
|
extern const Internal::CMovImplAll<Jcc_Less> xCMOVL;
|
||||||
extern const Internal::CMovImplAll<Jcc_LessOrEqual> iCMOVLE;
|
extern const Internal::CMovImplAll<Jcc_LessOrEqual> xCMOVLE;
|
||||||
|
|
||||||
extern const Internal::CMovImplAll<Jcc_Zero> iCMOVZ;
|
extern const Internal::CMovImplAll<Jcc_Zero> xCMOVZ;
|
||||||
extern const Internal::CMovImplAll<Jcc_Equal> iCMOVE;
|
extern const Internal::CMovImplAll<Jcc_Equal> xCMOVE;
|
||||||
extern const Internal::CMovImplAll<Jcc_NotZero> iCMOVNZ;
|
extern const Internal::CMovImplAll<Jcc_NotZero> xCMOVNZ;
|
||||||
extern const Internal::CMovImplAll<Jcc_NotEqual> iCMOVNE;
|
extern const Internal::CMovImplAll<Jcc_NotEqual> xCMOVNE;
|
||||||
|
|
||||||
extern const Internal::CMovImplAll<Jcc_Overflow> iCMOVO;
|
extern const Internal::CMovImplAll<Jcc_Overflow> xCMOVO;
|
||||||
extern const Internal::CMovImplAll<Jcc_NotOverflow> iCMOVNO;
|
extern const Internal::CMovImplAll<Jcc_NotOverflow> xCMOVNO;
|
||||||
extern const Internal::CMovImplAll<Jcc_Carry> iCMOVC;
|
extern const Internal::CMovImplAll<Jcc_Carry> xCMOVC;
|
||||||
extern const Internal::CMovImplAll<Jcc_NotCarry> iCMOVNC;
|
extern const Internal::CMovImplAll<Jcc_NotCarry> xCMOVNC;
|
||||||
|
|
||||||
extern const Internal::CMovImplAll<Jcc_Signed> iCMOVS;
|
extern const Internal::CMovImplAll<Jcc_Signed> xCMOVS;
|
||||||
extern const Internal::CMovImplAll<Jcc_Unsigned> iCMOVNS;
|
extern const Internal::CMovImplAll<Jcc_Unsigned> xCMOVNS;
|
||||||
extern const Internal::CMovImplAll<Jcc_ParityEven> iCMOVPE;
|
extern const Internal::CMovImplAll<Jcc_ParityEven> xCMOVPE;
|
||||||
extern const Internal::CMovImplAll<Jcc_ParityOdd> iCMOVPO;
|
extern const Internal::CMovImplAll<Jcc_ParityOdd> xCMOVPO;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
extern const Internal::SetImplGeneric iSET;
|
extern const Internal::SetImplGeneric xSET;
|
||||||
|
|
||||||
extern const Internal::SetImplAll<Jcc_Above> iSETA;
|
extern const Internal::SetImplAll<Jcc_Above> xSETA;
|
||||||
extern const Internal::SetImplAll<Jcc_AboveOrEqual> iSETAE;
|
extern const Internal::SetImplAll<Jcc_AboveOrEqual> xSETAE;
|
||||||
extern const Internal::SetImplAll<Jcc_Below> iSETB;
|
extern const Internal::SetImplAll<Jcc_Below> xSETB;
|
||||||
extern const Internal::SetImplAll<Jcc_BelowOrEqual> iSETBE;
|
extern const Internal::SetImplAll<Jcc_BelowOrEqual> xSETBE;
|
||||||
|
|
||||||
extern const Internal::SetImplAll<Jcc_Greater> iSETG;
|
extern const Internal::SetImplAll<Jcc_Greater> xSETG;
|
||||||
extern const Internal::SetImplAll<Jcc_GreaterOrEqual> iSETGE;
|
extern const Internal::SetImplAll<Jcc_GreaterOrEqual> xSETGE;
|
||||||
extern const Internal::SetImplAll<Jcc_Less> iSETL;
|
extern const Internal::SetImplAll<Jcc_Less> xSETL;
|
||||||
extern const Internal::SetImplAll<Jcc_LessOrEqual> iSETLE;
|
extern const Internal::SetImplAll<Jcc_LessOrEqual> xSETLE;
|
||||||
|
|
||||||
extern const Internal::SetImplAll<Jcc_Zero> iSETZ;
|
extern const Internal::SetImplAll<Jcc_Zero> xSETZ;
|
||||||
extern const Internal::SetImplAll<Jcc_Equal> iSETE;
|
extern const Internal::SetImplAll<Jcc_Equal> xSETE;
|
||||||
extern const Internal::SetImplAll<Jcc_NotZero> iSETNZ;
|
extern const Internal::SetImplAll<Jcc_NotZero> xSETNZ;
|
||||||
extern const Internal::SetImplAll<Jcc_NotEqual> iSETNE;
|
extern const Internal::SetImplAll<Jcc_NotEqual> xSETNE;
|
||||||
|
|
||||||
extern const Internal::SetImplAll<Jcc_Overflow> iSETO;
|
extern const Internal::SetImplAll<Jcc_Overflow> xSETO;
|
||||||
extern const Internal::SetImplAll<Jcc_NotOverflow> iSETNO;
|
extern const Internal::SetImplAll<Jcc_NotOverflow> xSETNO;
|
||||||
extern const Internal::SetImplAll<Jcc_Carry> iSETC;
|
extern const Internal::SetImplAll<Jcc_Carry> xSETC;
|
||||||
extern const Internal::SetImplAll<Jcc_NotCarry> iSETNC;
|
extern const Internal::SetImplAll<Jcc_NotCarry> xSETNC;
|
||||||
|
|
||||||
extern const Internal::SetImplAll<Jcc_Signed> iSETS;
|
extern const Internal::SetImplAll<Jcc_Signed> xSETS;
|
||||||
extern const Internal::SetImplAll<Jcc_Unsigned> iSETNS;
|
extern const Internal::SetImplAll<Jcc_Unsigned> xSETNS;
|
||||||
extern const Internal::SetImplAll<Jcc_ParityEven> iSETPE;
|
extern const Internal::SetImplAll<Jcc_ParityEven> xSETPE;
|
||||||
extern const Internal::SetImplAll<Jcc_ParityOdd> iSETPO;
|
extern const Internal::SetImplAll<Jcc_ParityOdd> xSETPO;
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Miscellaneous Instructions
|
// Miscellaneous Instructions
|
||||||
// These are all defined inline or in ix86.cpp.
|
// These are all defined inline or in ix86.cpp.
|
||||||
//
|
//
|
||||||
|
|
||||||
extern void iBSWAP( const iRegister32& to );
|
extern void xBSWAP( const xRegister32& to );
|
||||||
|
|
||||||
// ----- Lea Instructions (Load Effective Address) -----
|
// ----- Lea Instructions (Load Effective Address) -----
|
||||||
// Note: alternate (void*) forms of these instructions are not provided since those
|
// Note: alternate (void*) forms of these instructions are not provided since those
|
||||||
// forms are functionally equivalent to Mov reg,imm, and thus better written as MOVs
|
// forms are functionally equivalent to Mov reg,imm, and thus better written as MOVs
|
||||||
// instead.
|
// instead.
|
||||||
|
|
||||||
extern void iLEA( iRegister32 to, const ModSibBase& src, bool preserve_flags=false );
|
extern void xLEA( xRegister32 to, const ModSibBase& src, bool preserve_flags=false );
|
||||||
extern void iLEA( iRegister16 to, const ModSibBase& src, bool preserve_flags=false );
|
extern void xLEA( xRegister16 to, const ModSibBase& src, bool preserve_flags=false );
|
||||||
|
|
||||||
// ----- Push / Pop Instructions -----
|
// ----- Push / Pop Instructions -----
|
||||||
// Note: pushad/popad implementations are intentionally left out. The instructions are
|
// Note: pushad/popad implementations are intentionally left out. The instructions are
|
||||||
// invalid in x64, and are super slow on x32. Use multiple Push/Pop instructions instead.
|
// invalid in x64, and are super slow on x32. Use multiple Push/Pop instructions instead.
|
||||||
|
|
||||||
extern void iPOP( const ModSibBase& from );
|
extern void xPOP( const ModSibBase& from );
|
||||||
extern void iPUSH( const ModSibBase& from );
|
extern void xPUSH( const ModSibBase& from );
|
||||||
|
|
||||||
static __forceinline void iPOP( iRegister32 from ) { write8( 0x58 | from.Id ); }
|
static __forceinline void xPOP( xRegister32 from ) { write8( 0x58 | from.Id ); }
|
||||||
static __forceinline void iPOP( void* from ) { iPOP( ptr[from] ); }
|
static __forceinline void xPOP( void* from ) { xPOP( ptr[from] ); }
|
||||||
|
|
||||||
static __forceinline void iPUSH( u32 imm ) { write8( 0x68 ); write32( imm ); }
|
static __forceinline void xPUSH( u32 imm ) { write8( 0x68 ); write32( imm ); }
|
||||||
static __forceinline void iPUSH( iRegister32 from ) { write8( 0x50 | from.Id ); }
|
static __forceinline void xPUSH( xRegister32 from ) { write8( 0x50 | from.Id ); }
|
||||||
static __forceinline void iPUSH( void* from ) { iPUSH( ptr[from] ); }
|
static __forceinline void xPUSH( void* from ) { xPUSH( ptr[from] ); }
|
||||||
|
|
||||||
// pushes the EFLAGS register onto the stack
|
// pushes the EFLAGS register onto the stack
|
||||||
static __forceinline void iPUSHFD() { write8( 0x9C ); }
|
static __forceinline void xPUSHFD() { write8( 0x9C ); }
|
||||||
// pops the EFLAGS register from the stack
|
// pops the EFLAGS register from the stack
|
||||||
static __forceinline void iPOPFD() { write8( 0x9D ); }
|
static __forceinline void xPOPFD() { write8( 0x9D ); }
|
||||||
|
|
||||||
// ----- Miscellaneous Instructions -----
|
// ----- Miscellaneous Instructions -----
|
||||||
// Various Instructions with no parameter and no special encoding logic.
|
// Various Instructions with no parameter and no special encoding logic.
|
||||||
|
|
||||||
__forceinline void iRET() { write8( 0xC3 ); }
|
__forceinline void xRET() { write8( 0xC3 ); }
|
||||||
__forceinline void iCBW() { write16( 0x9866 ); }
|
__forceinline void xCBW() { write16( 0x9866 ); }
|
||||||
__forceinline void iCWD() { write8( 0x98 ); }
|
__forceinline void xCWD() { write8( 0x98 ); }
|
||||||
__forceinline void iCDQ() { write8( 0x99 ); }
|
__forceinline void xCDQ() { write8( 0x99 ); }
|
||||||
__forceinline void iCWDE() { write8( 0x98 ); }
|
__forceinline void xCWDE() { write8( 0x98 ); }
|
||||||
|
|
||||||
__forceinline void iLAHF() { write8( 0x9f ); }
|
__forceinline void xLAHF() { write8( 0x9f ); }
|
||||||
__forceinline void iSAHF() { write8( 0x9e ); }
|
__forceinline void xSAHF() { write8( 0x9e ); }
|
||||||
|
|
||||||
__forceinline void iSTC() { write8( 0xF9 ); }
|
__forceinline void xSTC() { write8( 0xF9 ); }
|
||||||
__forceinline void iCLC() { write8( 0xF8 ); }
|
__forceinline void xCLC() { write8( 0xF8 ); }
|
||||||
|
|
||||||
// NOP 1-byte
|
// NOP 1-byte
|
||||||
__forceinline void iNOP() { write8(0x90); }
|
__forceinline void xNOP() { write8(0x90); }
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// MUL / DIV instructions
|
|
||||||
|
|
||||||
/*extern void iMUL( const iRegister32& to, const iRegister32& from );
|
|
||||||
extern void iMUL( const iRegister32& to, const void* src );
|
|
||||||
extern void iMUL( const iRegister32& to, const iRegister32& from, s32 imm );
|
|
||||||
extern void iMUL( const iRegister32& to, const ModSibBase& src );
|
|
||||||
extern void iMUL( const iRegister32& to, const ModSibBase& src, s32 imm );
|
|
||||||
|
|
||||||
extern void iMUL( const iRegister16& to, const iRegister16& from );
|
|
||||||
extern void iMUL( const iRegister16& to, const void* src );
|
|
||||||
extern void iMUL( const iRegister16& to, const iRegister16& from, s16 imm );
|
|
||||||
extern void iMUL( const iRegister16& to, const ModSibBase& src );
|
|
||||||
extern void iMUL( const iRegister16& to, const ModSibBase& src, s16 imm );
|
|
||||||
|
|
||||||
template< typename T >
|
|
||||||
__forceinline void iMUL( const iRegister<T>& from ) { Internal::Group3Impl<T>::Emit( Internal::G3Type_iMUL, from ); }
|
|
||||||
template< typename T >
|
|
||||||
__noinline void iMUL( const ModSibStrict<T>& from ) { Internal::Group3Impl<T>::Emit( Internal::G3Type_iMUL, from ); }*/
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// JMP / Jcc Instructions!
|
// JMP / Jcc Instructions!
|
||||||
|
@ -232,92 +212,92 @@ namespace x86Emitter
|
||||||
|
|
||||||
#define DEFINE_FORWARD_JUMP( label, cond ) \
|
#define DEFINE_FORWARD_JUMP( label, cond ) \
|
||||||
template< typename OperandType > \
|
template< typename OperandType > \
|
||||||
class iForward##label : public iForwardJump<OperandType> \
|
class xForward##label : public xForwardJump<OperandType> \
|
||||||
{ \
|
{ \
|
||||||
public: \
|
public: \
|
||||||
iForward##label() : iForwardJump<OperandType>( cond ) {} \
|
xForward##label() : xForwardJump<OperandType>( cond ) {} \
|
||||||
};
|
};
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// Note: typedefs below are defined individually in order to appease Intellisense
|
// Note: typedefs below are defined individually in order to appease Intellisense
|
||||||
// resolution. Including them into the class definition macro above breaks it.
|
// resolution. Including them into the class definition macro above breaks it.
|
||||||
|
|
||||||
typedef iForwardJump<s8> iForwardJump8;
|
typedef xForwardJump<s8> xForwardJump8;
|
||||||
typedef iForwardJump<s32> iForwardJump32;
|
typedef xForwardJump<s32> xForwardJump32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JA, Jcc_Above );
|
DEFINE_FORWARD_JUMP( JA, Jcc_Above );
|
||||||
DEFINE_FORWARD_JUMP( JB, Jcc_Below );
|
DEFINE_FORWARD_JUMP( JB, Jcc_Below );
|
||||||
DEFINE_FORWARD_JUMP( JAE, Jcc_AboveOrEqual );
|
DEFINE_FORWARD_JUMP( JAE, Jcc_AboveOrEqual );
|
||||||
DEFINE_FORWARD_JUMP( JBE, Jcc_BelowOrEqual );
|
DEFINE_FORWARD_JUMP( JBE, Jcc_BelowOrEqual );
|
||||||
|
|
||||||
typedef iForwardJA<s8> iForwardJA8;
|
typedef xForwardJA<s8> xForwardJA8;
|
||||||
typedef iForwardJA<s32> iForwardJA32;
|
typedef xForwardJA<s32> xForwardJA32;
|
||||||
typedef iForwardJB<s8> iForwardJB8;
|
typedef xForwardJB<s8> xForwardJB8;
|
||||||
typedef iForwardJB<s32> iForwardJB32;
|
typedef xForwardJB<s32> xForwardJB32;
|
||||||
typedef iForwardJAE<s8> iForwardJAE8;
|
typedef xForwardJAE<s8> xForwardJAE8;
|
||||||
typedef iForwardJAE<s32> iForwardJAE32;
|
typedef xForwardJAE<s32> xForwardJAE32;
|
||||||
typedef iForwardJBE<s8> iForwardJBE8;
|
typedef xForwardJBE<s8> xForwardJBE8;
|
||||||
typedef iForwardJBE<s32> iForwardJBE32;
|
typedef xForwardJBE<s32> xForwardJBE32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JG, Jcc_Greater );
|
DEFINE_FORWARD_JUMP( JG, Jcc_Greater );
|
||||||
DEFINE_FORWARD_JUMP( JL, Jcc_Less );
|
DEFINE_FORWARD_JUMP( JL, Jcc_Less );
|
||||||
DEFINE_FORWARD_JUMP( JGE, Jcc_GreaterOrEqual );
|
DEFINE_FORWARD_JUMP( JGE, Jcc_GreaterOrEqual );
|
||||||
DEFINE_FORWARD_JUMP( JLE, Jcc_LessOrEqual );
|
DEFINE_FORWARD_JUMP( JLE, Jcc_LessOrEqual );
|
||||||
|
|
||||||
typedef iForwardJG<s8> iForwardJG8;
|
typedef xForwardJG<s8> xForwardJG8;
|
||||||
typedef iForwardJG<s32> iForwardJG32;
|
typedef xForwardJG<s32> xForwardJG32;
|
||||||
typedef iForwardJL<s8> iForwardJL8;
|
typedef xForwardJL<s8> xForwardJL8;
|
||||||
typedef iForwardJL<s32> iForwardJL32;
|
typedef xForwardJL<s32> xForwardJL32;
|
||||||
typedef iForwardJGE<s8> iForwardJGE8;
|
typedef xForwardJGE<s8> xForwardJGE8;
|
||||||
typedef iForwardJGE<s32> iForwardJGE32;
|
typedef xForwardJGE<s32> xForwardJGE32;
|
||||||
typedef iForwardJLE<s8> iForwardJLE8;
|
typedef xForwardJLE<s8> xForwardJLE8;
|
||||||
typedef iForwardJLE<s32> iForwardJLE32;
|
typedef xForwardJLE<s32> xForwardJLE32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JZ, Jcc_Zero );
|
DEFINE_FORWARD_JUMP( JZ, Jcc_Zero );
|
||||||
DEFINE_FORWARD_JUMP( JE, Jcc_Equal );
|
DEFINE_FORWARD_JUMP( JE, Jcc_Equal );
|
||||||
DEFINE_FORWARD_JUMP( JNZ, Jcc_NotZero );
|
DEFINE_FORWARD_JUMP( JNZ, Jcc_NotZero );
|
||||||
DEFINE_FORWARD_JUMP( JNE, Jcc_NotEqual );
|
DEFINE_FORWARD_JUMP( JNE, Jcc_NotEqual );
|
||||||
|
|
||||||
typedef iForwardJZ<s8> iForwardJZ8;
|
typedef xForwardJZ<s8> xForwardJZ8;
|
||||||
typedef iForwardJZ<s32> iForwardJZ32;
|
typedef xForwardJZ<s32> xForwardJZ32;
|
||||||
typedef iForwardJE<s8> iForwardJE8;
|
typedef xForwardJE<s8> xForwardJE8;
|
||||||
typedef iForwardJE<s32> iForwardJE32;
|
typedef xForwardJE<s32> xForwardJE32;
|
||||||
typedef iForwardJNZ<s8> iForwardJNZ8;
|
typedef xForwardJNZ<s8> xForwardJNZ8;
|
||||||
typedef iForwardJNZ<s32> iForwardJNZ32;
|
typedef xForwardJNZ<s32> xForwardJNZ32;
|
||||||
typedef iForwardJNE<s8> iForwardJNE8;
|
typedef xForwardJNE<s8> xForwardJNE8;
|
||||||
typedef iForwardJNE<s32> iForwardJNE32;
|
typedef xForwardJNE<s32> xForwardJNE32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JS, Jcc_Signed );
|
DEFINE_FORWARD_JUMP( JS, Jcc_Signed );
|
||||||
DEFINE_FORWARD_JUMP( JNS, Jcc_Unsigned );
|
DEFINE_FORWARD_JUMP( JNS, Jcc_Unsigned );
|
||||||
|
|
||||||
typedef iForwardJS<s8> iForwardJS8;
|
typedef xForwardJS<s8> xForwardJS8;
|
||||||
typedef iForwardJS<s32> iForwardJS32;
|
typedef xForwardJS<s32> xForwardJS32;
|
||||||
typedef iForwardJNS<s8> iForwardJNS8;
|
typedef xForwardJNS<s8> xForwardJNS8;
|
||||||
typedef iForwardJNS<s32> iForwardJNS32;
|
typedef xForwardJNS<s32> xForwardJNS32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JO, Jcc_Overflow );
|
DEFINE_FORWARD_JUMP( JO, Jcc_Overflow );
|
||||||
DEFINE_FORWARD_JUMP( JNO, Jcc_NotOverflow );
|
DEFINE_FORWARD_JUMP( JNO, Jcc_NotOverflow );
|
||||||
|
|
||||||
typedef iForwardJO<s8> iForwardJO8;
|
typedef xForwardJO<s8> xForwardJO8;
|
||||||
typedef iForwardJO<s32> iForwardJO32;
|
typedef xForwardJO<s32> xForwardJO32;
|
||||||
typedef iForwardJNO<s8> iForwardJNO8;
|
typedef xForwardJNO<s8> xForwardJNO8;
|
||||||
typedef iForwardJNO<s32> iForwardJNO32;
|
typedef xForwardJNO<s32> xForwardJNO32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JC, Jcc_Carry );
|
DEFINE_FORWARD_JUMP( JC, Jcc_Carry );
|
||||||
DEFINE_FORWARD_JUMP( JNC, Jcc_NotCarry );
|
DEFINE_FORWARD_JUMP( JNC, Jcc_NotCarry );
|
||||||
|
|
||||||
typedef iForwardJC<s8> iForwardJC8;
|
typedef xForwardJC<s8> xForwardJC8;
|
||||||
typedef iForwardJC<s32> iForwardJC32;
|
typedef xForwardJC<s32> xForwardJC32;
|
||||||
typedef iForwardJNC<s8> iForwardJNC8;
|
typedef xForwardJNC<s8> xForwardJNC8;
|
||||||
typedef iForwardJNC<s32> iForwardJNC32;
|
typedef xForwardJNC<s32> xForwardJNC32;
|
||||||
|
|
||||||
DEFINE_FORWARD_JUMP( JPE, Jcc_ParityEven );
|
DEFINE_FORWARD_JUMP( JPE, Jcc_ParityEven );
|
||||||
DEFINE_FORWARD_JUMP( JPO, Jcc_ParityOdd );
|
DEFINE_FORWARD_JUMP( JPO, Jcc_ParityOdd );
|
||||||
|
|
||||||
typedef iForwardJPE<s8> iForwardJPE8;
|
typedef xForwardJPE<s8> xForwardJPE8;
|
||||||
typedef iForwardJPE<s32> iForwardJPE32;
|
typedef xForwardJPE<s32> xForwardJPE32;
|
||||||
typedef iForwardJPO<s8> iForwardJPO8;
|
typedef xForwardJPO<s8> xForwardJPO8;
|
||||||
typedef iForwardJPO<s32> iForwardJPO32;
|
typedef xForwardJPO<s32> xForwardJPO32;
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// MMX Mov Instructions (MOVD, MOVQ, MOVSS).
|
// MMX Mov Instructions (MOVD, MOVQ, MOVSS).
|
||||||
|
@ -332,53 +312,53 @@ namespace x86Emitter
|
||||||
// MOVD has valid forms for MMX and XMM registers.
|
// MOVD has valid forms for MMX and XMM registers.
|
||||||
//
|
//
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void iMOVDZX( const iRegisterSIMD<T>& to, const iRegister32& from )
|
__emitinline void xMOVDZX( const xRegisterSIMD<T>& to, const xRegister32& from )
|
||||||
{
|
{
|
||||||
Internal::writeXMMop( 0x66, 0x6e, to, from );
|
Internal::writeXMMop( 0x66, 0x6e, to, from );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void iMOVDZX( const iRegisterSIMD<T>& to, const void* src )
|
__emitinline void xMOVDZX( const xRegisterSIMD<T>& to, const void* src )
|
||||||
{
|
{
|
||||||
Internal::writeXMMop( 0x66, 0x6e, to, src );
|
Internal::writeXMMop( 0x66, 0x6e, to, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
void iMOVDZX( const iRegisterSIMD<T>& to, const ModSibBase& src )
|
void xMOVDZX( const xRegisterSIMD<T>& to, const ModSibBase& src )
|
||||||
{
|
{
|
||||||
Internal::writeXMMop( 0x66, 0x6e, to, src );
|
Internal::writeXMMop( 0x66, 0x6e, to, src );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void iMOVD( const iRegister32& to, const iRegisterSIMD<T>& from )
|
__emitinline void xMOVD( const xRegister32& to, const xRegisterSIMD<T>& from )
|
||||||
{
|
{
|
||||||
Internal::writeXMMop( 0x66, 0x7e, from, to );
|
Internal::writeXMMop( 0x66, 0x7e, from, to );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
__emitinline void iMOVD( void* dest, const iRegisterSIMD<T>& from )
|
__emitinline void xMOVD( void* dest, const xRegisterSIMD<T>& from )
|
||||||
{
|
{
|
||||||
Internal::writeXMMop( 0x66, 0x7e, from, dest );
|
Internal::writeXMMop( 0x66, 0x7e, from, dest );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
void iMOVD( const ModSibBase& dest, const iRegisterSIMD<T>& from )
|
void xMOVD( const ModSibBase& dest, const xRegisterSIMD<T>& from )
|
||||||
{
|
{
|
||||||
Internal::writeXMMop( 0x66, 0x7e, from, dest );
|
Internal::writeXMMop( 0x66, 0x7e, from, dest );
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
// iMASKMOV:
|
// xMASKMOV:
|
||||||
// Selectively write bytes from mm1/xmm1 to memory location using the byte mask in mm2/xmm2.
|
// Selectively write bytes from mm1/xmm1 to memory location using the byte mask in mm2/xmm2.
|
||||||
// The default memory location is specified by DS:EDI. The most significant bit in each byte
|
// The default memory location is specified by DS:EDI. The most significant bit in each byte
|
||||||
// of the mask operand determines whether the corresponding byte in the source operand is
|
// of the mask operand determines whether the corresponding byte in the source operand is
|
||||||
// written to the corresponding byte location in memory.
|
// written to the corresponding byte location in memory.
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
static __forceinline void iMASKMOV( const iRegisterSIMD<T>& to, const iRegisterSIMD<T>& from ) { Internal::writeXMMop( 0x66, 0xf7, to, from ); }
|
static __forceinline void xMASKMOV( const xRegisterSIMD<T>& to, const xRegisterSIMD<T>& from ) { Internal::writeXMMop( 0x66, 0xf7, to, from ); }
|
||||||
|
|
||||||
// iPMOVMSKB:
|
// xPMOVMSKB:
|
||||||
// Creates a mask made up of the most significant bit of each byte of the source
|
// Creates a mask made up of the most significant bit of each byte of the source
|
||||||
// operand and stores the result in the low byte or word of the destination operand.
|
// operand and stores the result in the low byte or word of the destination operand.
|
||||||
// Upper bits of the destination are cleared to zero.
|
// Upper bits of the destination are cleared to zero.
|
||||||
|
@ -387,93 +367,91 @@ namespace x86Emitter
|
||||||
// 128-bit (SSE) source, the byte mask is 16-bits.
|
// 128-bit (SSE) source, the byte mask is 16-bits.
|
||||||
//
|
//
|
||||||
template< typename T >
|
template< typename T >
|
||||||
static __forceinline void iPMOVMSKB( const iRegister32& to, const iRegisterSIMD<T>& from ) { Internal::writeXMMop( 0x66, 0xd7, to, from ); }
|
static __forceinline void xPMOVMSKB( const xRegister32& to, const xRegisterSIMD<T>& from ) { Internal::writeXMMop( 0x66, 0xd7, to, from ); }
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
extern void iMOVQ( const iRegisterMMX& to, const iRegisterMMX& from );
|
extern void xMOVQ( const xRegisterMMX& to, const xRegisterMMX& from );
|
||||||
extern void iMOVQ( const iRegisterMMX& to, const iRegisterSSE& from );
|
extern void xMOVQ( const xRegisterMMX& to, const xRegisterSSE& from );
|
||||||
extern void iMOVQ( const iRegisterSSE& to, const iRegisterMMX& from );
|
extern void xMOVQ( const xRegisterSSE& to, const xRegisterMMX& from );
|
||||||
|
|
||||||
extern void iMOVQ( void* dest, const iRegisterSSE& from );
|
extern void xMOVQ( void* dest, const xRegisterSSE& from );
|
||||||
extern void iMOVQ( const ModSibBase& dest, const iRegisterSSE& from );
|
extern void xMOVQ( const ModSibBase& dest, const xRegisterSSE& from );
|
||||||
extern void iMOVQ( void* dest, const iRegisterMMX& from );
|
extern void xMOVQ( void* dest, const xRegisterMMX& from );
|
||||||
extern void iMOVQ( const ModSibBase& dest, const iRegisterMMX& from );
|
extern void xMOVQ( const ModSibBase& dest, const xRegisterMMX& from );
|
||||||
extern void iMOVQ( const iRegisterMMX& to, const void* src );
|
extern void xMOVQ( const xRegisterMMX& to, const void* src );
|
||||||
extern void iMOVQ( const iRegisterMMX& to, const ModSibBase& src );
|
extern void xMOVQ( const xRegisterMMX& to, const ModSibBase& src );
|
||||||
|
|
||||||
extern void iMOVQZX( const iRegisterSSE& to, const void* src );
|
extern void xMOVQZX( const xRegisterSSE& to, const void* src );
|
||||||
extern void iMOVQZX( const iRegisterSSE& to, const ModSibBase& src );
|
extern void xMOVQZX( const xRegisterSSE& to, const ModSibBase& src );
|
||||||
extern void iMOVQZX( const iRegisterSSE& to, const iRegisterSSE& from );
|
extern void xMOVQZX( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||||
|
|
||||||
extern void iMOVSS( const iRegisterSSE& to, const iRegisterSSE& from );
|
extern void xMOVSS( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||||
extern void iMOVSS( const void* to, const iRegisterSSE& from );
|
extern void xMOVSS( const void* to, const xRegisterSSE& from );
|
||||||
extern void iMOVSS( const ModSibBase& to, const iRegisterSSE& from );
|
extern void xMOVSS( const ModSibBase& to, const xRegisterSSE& from );
|
||||||
extern void iMOVSD( const iRegisterSSE& to, const iRegisterSSE& from );
|
extern void xMOVSD( const xRegisterSSE& to, const xRegisterSSE& from );
|
||||||
extern void iMOVSD( const void* to, const iRegisterSSE& from );
|
extern void xMOVSD( const void* to, const xRegisterSSE& from );
|
||||||
extern void iMOVSD( const ModSibBase& to, const iRegisterSSE& from );
|
extern void xMOVSD( const ModSibBase& to, const xRegisterSSE& from );
|
||||||
|
|
||||||
extern void iMOVSSZX( const iRegisterSSE& to, const void* from );
|
extern void xMOVSSZX( const xRegisterSSE& to, const void* from );
|
||||||
extern void iMOVSSZX( const iRegisterSSE& to, const ModSibBase& from );
|
extern void xMOVSSZX( const xRegisterSSE& to, const ModSibBase& from );
|
||||||
extern void iMOVSDZX( const iRegisterSSE& to, const void* from );
|
extern void xMOVSDZX( const xRegisterSSE& to, const void* from );
|
||||||
extern void iMOVSDZX( const iRegisterSSE& to, const ModSibBase& from );
|
extern void xMOVSDZX( const xRegisterSSE& to, const ModSibBase& from );
|
||||||
|
|
||||||
extern void iMOVNTDQA( const iRegisterSSE& to, const void* from );
|
extern void xMOVNTDQA( const xRegisterSSE& to, const void* from );
|
||||||
extern void iMOVNTDQA( const iRegisterSSE& to, const ModSibBase& from );
|
extern void xMOVNTDQA( const xRegisterSSE& to, const ModSibBase& from );
|
||||||
extern void iMOVNTDQ( void* to, const iRegisterSSE& from );
|
extern void xMOVNTDQ( void* to, const xRegisterSSE& from );
|
||||||
extern void iMOVNTDQA( const ModSibBase& to, const iRegisterSSE& from );
|
extern void xMOVNTDQA( const ModSibBase& to, const xRegisterSSE& from );
|
||||||
|
|
||||||
extern void iMOVNTPD( void* to, const iRegisterSSE& from );
|
extern void xMOVNTPD( void* to, const xRegisterSSE& from );
|
||||||
extern void iMOVNTPD( const ModSibBase& to, const iRegisterSSE& from );
|
extern void xMOVNTPD( const ModSibBase& to, const xRegisterSSE& from );
|
||||||
extern void iMOVNTPS( void* to, const iRegisterSSE& from );
|
extern void xMOVNTPS( void* to, const xRegisterSSE& from );
|
||||||
extern void iMOVNTPS( const ModSibBase& to, const iRegisterSSE& from );
|
extern void xMOVNTPS( const ModSibBase& to, const xRegisterSSE& from );
|
||||||
extern void iMOVNTQ( void* to, const iRegisterMMX& from );
|
extern void xMOVNTQ( void* to, const xRegisterMMX& from );
|
||||||
extern void iMOVNTQ( const ModSibBase& to, const iRegisterMMX& from );
|
extern void xMOVNTQ( const ModSibBase& to, const xRegisterMMX& from );
|
||||||
|
|
||||||
extern void iMOVLHPS( const iRegisterSSE& to, const iRegisterSSE& from );
|
|
||||||
extern void iMOVHLPS( const iRegisterSSE& to, const iRegisterSSE& from );
|
|
||||||
extern void iMOVLHPD( const iRegisterSSE& to, const iRegisterSSE& from );
|
|
||||||
extern void iMOVHLPD( const iRegisterSSE& to, const iRegisterSSE& from );
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
|
|
||||||
extern const Internal::MovapsImplAll<0, 0x28, 0x29> iMOVAPS;
|
extern const Internal::MovapsImplAll<0, 0x28, 0x29> xMOVAPS;
|
||||||
extern const Internal::MovapsImplAll<0, 0x10, 0x11> iMOVUPS;
|
extern const Internal::MovapsImplAll<0, 0x10, 0x11> xMOVUPS;
|
||||||
|
|
||||||
extern const Internal::MovapsImplAll<0x66, 0x28, 0x29> iMOVAPD;
|
extern const Internal::MovapsImplAll<0x66, 0x28, 0x29> xMOVAPD;
|
||||||
extern const Internal::MovapsImplAll<0x66, 0x10, 0x11> iMOVUPD;
|
extern const Internal::MovapsImplAll<0x66, 0x10, 0x11> xMOVUPD;
|
||||||
|
|
||||||
#ifdef ALWAYS_USE_MOVAPS
|
#ifdef ALWAYS_USE_MOVAPS
|
||||||
extern const Internal::MovapsImplAll<0x66, 0x6f, 0x7f> iMOVDQA;
|
extern const Internal::MovapsImplAll<0x66, 0x6f, 0x7f> xMOVDQA;
|
||||||
extern const Internal::MovapsImplAll<0xf3, 0x6f, 0x7f> iMOVDQU;
|
extern const Internal::MovapsImplAll<0xf3, 0x6f, 0x7f> xMOVDQU;
|
||||||
#else
|
#else
|
||||||
extern const Internal::MovapsImplAll<0, 0x28, 0x29> iMOVDQA;
|
extern const Internal::MovapsImplAll<0, 0x28, 0x29> xMOVDQA;
|
||||||
extern const Internal::MovapsImplAll<0, 0x10, 0x11> iMOVDQU;
|
extern const Internal::MovapsImplAll<0, 0x10, 0x11> xMOVDQU;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern const Internal::MovhlImplAll<0, 0x16> iMOVHPS;
|
extern const Internal::MovhlImpl_RtoR<0x16> xMOVLH;
|
||||||
extern const Internal::MovhlImplAll<0, 0x12> iMOVLPS;
|
extern const Internal::MovhlImpl_RtoR<0x12> xMOVHL;
|
||||||
extern const Internal::MovhlImplAll<0x66, 0x16> iMOVHPD;
|
|
||||||
extern const Internal::MovhlImplAll<0x66, 0x12> iMOVLPD;
|
|
||||||
|
|
||||||
extern const Internal::PLogicImplAll<0xdb> iPAND;
|
extern const Internal::MovhlImplAll<0x16> xMOVH;
|
||||||
extern const Internal::PLogicImplAll<0xdf> iPANDN;
|
extern const Internal::MovhlImplAll<0x12> xMOVL;
|
||||||
extern const Internal::PLogicImplAll<0xeb> iPOR;
|
|
||||||
extern const Internal::PLogicImplAll<0xef> iPXOR;
|
|
||||||
|
|
||||||
extern const Internal::SSELogicImpl<0,0x53> iRCPPS;
|
extern const Internal::PLogicImplAll<0xdb> xPAND;
|
||||||
extern const Internal::SSELogicImpl<0xf3,0x53> iRCPSS;
|
extern const Internal::PLogicImplAll<0xdf> xPANDN;
|
||||||
|
extern const Internal::PLogicImplAll<0xeb> xPOR;
|
||||||
|
extern const Internal::PLogicImplAll<0xef> xPXOR;
|
||||||
|
|
||||||
extern const Internal::SSECompareImplGeneric<0x00> iCMPPS;
|
extern const Internal::SSEAndNotImpl<0x55> xANDN;
|
||||||
extern const Internal::SSECompareImplGeneric<0x66> iCMPPD;
|
|
||||||
extern const Internal::SSECompareImplGeneric<0xf3> iCMPSS;
|
|
||||||
extern const Internal::SSECompareImplGeneric<0xf2> iCMPSD;
|
|
||||||
|
|
||||||
extern const Internal::SSECompareImplGeneric<0x00> iCMPPS;
|
extern const Internal::SSELogicImpl<0,0x53> xRCPPS;
|
||||||
extern const Internal::SSECompareImplGeneric<0x66> iCMPPD;
|
extern const Internal::SSELogicImpl<0xf3,0x53> xRCPSS;
|
||||||
extern const Internal::SSECompareImplGeneric<0xf3> iCMPSS;
|
|
||||||
extern const Internal::SSECompareImplGeneric<0xf2> iCMPSD;
|
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------
|
||||||
|
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_Equal> xCMPEQ;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_Less> xCMPLT;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_LessOrEqual> xCMPLE;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_Unordered> xCMPUNORD;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_NotEqual> xCMPNE;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_NotLess> xCMPNLT;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_NotLessOrEqual> xCMPNLE;
|
||||||
|
extern const Internal::SSECompareImpl<SSE2_Ordered> xCMPORD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,11 +40,11 @@ namespace x86Emitter {
|
||||||
|
|
||||||
using namespace Internal;
|
using namespace Internal;
|
||||||
|
|
||||||
const JmpCallImplAll<true> iJMP;
|
const JmpCallImplAll<true> xJMP;
|
||||||
const JmpCallImplAll<false> iCALL;
|
const JmpCallImplAll<false> xCALL;
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
void iSmartJump::SetTarget()
|
void xSmartJump::SetTarget()
|
||||||
{
|
{
|
||||||
u8* target = iGetPtr();
|
u8* target = iGetPtr();
|
||||||
if( m_baseptr == NULL ) return;
|
if( m_baseptr == NULL ) return;
|
||||||
|
@ -67,7 +67,7 @@ void iSmartJump::SetTarget()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
iSmartJump::~iSmartJump()
|
xSmartJump::~xSmartJump()
|
||||||
{
|
{
|
||||||
SetTarget();
|
SetTarget();
|
||||||
m_baseptr = NULL; // just in case (sometimes helps in debugging too)
|
m_baseptr = NULL; // just in case (sometimes helps in debugging too)
|
||||||
|
@ -78,7 +78,7 @@ iSmartJump::~iSmartJump()
|
||||||
// Writes a jump at the current x86Ptr, which targets a pre-established target address.
|
// Writes a jump at the current x86Ptr, which targets a pre-established target address.
|
||||||
// (usually a backwards jump)
|
// (usually a backwards jump)
|
||||||
//
|
//
|
||||||
// slideForward - used internally by iSmartJump to indicate that the jump target is going
|
// slideForward - used internally by xSmartJump to indicate that the jump target is going
|
||||||
// to slide forward in the event of an 8 bit displacement.
|
// to slide forward in the event of an 8 bit displacement.
|
||||||
//
|
//
|
||||||
// Using this
|
// Using this
|
||||||
|
@ -96,21 +96,21 @@ __emitinline void iJccKnownTarget( JccComparisonType comparison, void* target, b
|
||||||
|
|
||||||
if( is_s8( displacement8 ) )
|
if( is_s8( displacement8 ) )
|
||||||
{
|
{
|
||||||
iWrite<u8>( (comparison == Jcc_Unconditional) ? 0xeb : (0x70 | comparison) );
|
xWrite<u8>( (comparison == Jcc_Unconditional) ? 0xeb : (0x70 | comparison) );
|
||||||
iWrite<s8>( displacement8 );
|
xWrite<s8>( displacement8 );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Perform a 32 bit jump instead. :(
|
// Perform a 32 bit jump instead. :(
|
||||||
|
|
||||||
if( comparison == Jcc_Unconditional )
|
if( comparison == Jcc_Unconditional )
|
||||||
iWrite<u8>( 0xe9 );
|
xWrite<u8>( 0xe9 );
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
iWrite<u8>( 0x0f );
|
xWrite<u8>( 0x0f );
|
||||||
iWrite<u8>( 0x80 | comparison );
|
xWrite<u8>( 0x80 | comparison );
|
||||||
}
|
}
|
||||||
iWrite<s32>( (sptr)target - ((sptr)iGetPtr() + 4) );
|
xWrite<s32>( (sptr)target - ((sptr)iGetPtr() + 4) );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,9 +35,9 @@
|
||||||
using namespace x86Emitter;
|
using namespace x86Emitter;
|
||||||
|
|
||||||
template< typename ImmType >
|
template< typename ImmType >
|
||||||
static __forceinline iRegister<ImmType> _reghlp( x86IntRegType src )
|
static __forceinline xRegister<ImmType> _reghlp( x86IntRegType src )
|
||||||
{
|
{
|
||||||
return iRegister<ImmType>( src );
|
return xRegister<ImmType>( src );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __forceinline ModSibBase _mrmhlp( x86IntRegType src )
|
static __forceinline ModSibBase _mrmhlp( x86IntRegType src )
|
||||||
|
@ -48,43 +48,43 @@ static __forceinline ModSibBase _mrmhlp( x86IntRegType src )
|
||||||
template< typename ImmType >
|
template< typename ImmType >
|
||||||
static __forceinline ModSibStrict<ImmType> _mhlp( x86IntRegType src )
|
static __forceinline ModSibStrict<ImmType> _mhlp( x86IntRegType src )
|
||||||
{
|
{
|
||||||
return ModSibStrict<ImmType>( iAddressReg::Empty, iAddressReg(src) );
|
return ModSibStrict<ImmType>( xAddressReg::Empty, xAddressReg(src) );
|
||||||
}
|
}
|
||||||
|
|
||||||
template< typename ImmType >
|
template< typename ImmType >
|
||||||
static __forceinline ModSibStrict<ImmType> _mhlp2( x86IntRegType src1, x86IntRegType src2 )
|
static __forceinline ModSibStrict<ImmType> _mhlp2( x86IntRegType src1, x86IntRegType src2 )
|
||||||
{
|
{
|
||||||
return ModSibStrict<ImmType>( iAddressReg(src2), iAddressReg(src1) );
|
return ModSibStrict<ImmType>( xAddressReg(src2), xAddressReg(src1) );
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
#define DEFINE_LEGACY_HELPER( cod, bits ) \
|
#define DEFINE_LEGACY_HELPER( cod, bits ) \
|
||||||
emitterT void cod##bits##RtoR( x86IntRegType to, x86IntRegType from ) { i##cod( _reghlp<u##bits>(to), _reghlp<u##bits>(from) ); } \
|
emitterT void cod##bits##RtoR( x86IntRegType to, x86IntRegType from ) { x##cod( _reghlp<u##bits>(to), _reghlp<u##bits>(from) ); } \
|
||||||
emitterT void cod##bits##ItoR( x86IntRegType to, u##bits imm ) { i##cod( _reghlp<u##bits>(to), imm ); } \
|
emitterT void cod##bits##ItoR( x86IntRegType to, u##bits imm ) { x##cod( _reghlp<u##bits>(to), imm ); } \
|
||||||
emitterT void cod##bits##MtoR( x86IntRegType to, uptr from ) { i##cod( _reghlp<u##bits>(to), (void*)from ); } \
|
emitterT void cod##bits##MtoR( x86IntRegType to, uptr from ) { x##cod( _reghlp<u##bits>(to), (void*)from ); } \
|
||||||
emitterT void cod##bits##RtoM( uptr to, x86IntRegType from ) { i##cod( (void*)to, _reghlp<u##bits>(from) ); } \
|
emitterT void cod##bits##RtoM( uptr to, x86IntRegType from ) { x##cod( (void*)to, _reghlp<u##bits>(from) ); } \
|
||||||
emitterT void cod##bits##ItoM( uptr to, u##bits imm ) { i##cod( ptr##bits[to], imm ); } \
|
emitterT void cod##bits##ItoM( uptr to, u##bits imm ) { x##cod( ptr##bits[to], imm ); } \
|
||||||
emitterT void cod##bits##ItoRm( x86IntRegType to, u##bits imm, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, imm ); } \
|
emitterT void cod##bits##ItoRm( x86IntRegType to, u##bits imm, int offset ) { x##cod( _mhlp<u##bits>(to) + offset, imm ); } \
|
||||||
emitterT void cod##bits##RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { i##cod( _reghlp<u##bits>(to), _mhlp<u##bits>(from) + offset ); } \
|
emitterT void cod##bits##RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { x##cod( _reghlp<u##bits>(to), _mhlp<u##bits>(from) + offset ); } \
|
||||||
emitterT void cod##bits##RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, _reghlp<u##bits>(from) ); } \
|
emitterT void cod##bits##RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { x##cod( _mhlp<u##bits>(to) + offset, _reghlp<u##bits>(from) ); } \
|
||||||
emitterT void cod##bits##RtoRmS( x86IntRegType to1, x86IntRegType to2, x86IntRegType from, int offset ) \
|
emitterT void cod##bits##RtoRmS( x86IntRegType to1, x86IntRegType to2, x86IntRegType from, int offset ) \
|
||||||
{ i##cod( _mhlp2<u##bits>(to1,to2) + offset, _reghlp<u##bits>(from) ); } \
|
{ x##cod( _mhlp2<u##bits>(to1,to2) + offset, _reghlp<u##bits>(from) ); } \
|
||||||
emitterT void cod##bits##RmStoR( x86IntRegType to, x86IntRegType from1, x86IntRegType from2, int offset ) \
|
emitterT void cod##bits##RmStoR( x86IntRegType to, x86IntRegType from1, x86IntRegType from2, int offset ) \
|
||||||
{ i##cod( _reghlp<u##bits>(to), _mhlp2<u##bits>(from1,from2) + offset ); }
|
{ x##cod( _reghlp<u##bits>(to), _mhlp2<u##bits>(from1,from2) + offset ); }
|
||||||
|
|
||||||
#define DEFINE_LEGACY_SHIFT_HELPER( cod, bits ) \
|
#define DEFINE_LEGACY_SHIFT_HELPER( cod, bits ) \
|
||||||
emitterT void cod##bits##CLtoR( x86IntRegType to ) { i##cod( _reghlp<u##bits>(to), cl ); } \
|
emitterT void cod##bits##CLtoR( x86IntRegType to ) { x##cod( _reghlp<u##bits>(to), cl ); } \
|
||||||
emitterT void cod##bits##ItoR( x86IntRegType to, u8 imm ) { i##cod( _reghlp<u##bits>(to), imm ); } \
|
emitterT void cod##bits##ItoR( x86IntRegType to, u8 imm ) { x##cod( _reghlp<u##bits>(to), imm ); } \
|
||||||
emitterT void cod##bits##CLtoM( uptr to ) { i##cod( ptr##bits[to], cl ); } \
|
emitterT void cod##bits##CLtoM( uptr to ) { x##cod( ptr##bits[to], cl ); } \
|
||||||
emitterT void cod##bits##ItoM( uptr to, u8 imm ) { i##cod( ptr##bits[to], imm ); } \
|
emitterT void cod##bits##ItoM( uptr to, u8 imm ) { x##cod( ptr##bits[to], imm ); } \
|
||||||
emitterT void cod##bits##ItoRm( x86IntRegType to, u8 imm, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, imm ); } \
|
emitterT void cod##bits##ItoRm( x86IntRegType to, u8 imm, int offset ) { x##cod( _mhlp<u##bits>(to) + offset, imm ); } \
|
||||||
emitterT void cod##bits##CLtoRm( x86IntRegType to, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, cl ); }
|
emitterT void cod##bits##CLtoRm( x86IntRegType to, int offset ) { x##cod( _mhlp<u##bits>(to) + offset, cl ); }
|
||||||
|
|
||||||
#define DEFINE_LEGACY_ONEREG_HELPER( cod, bits ) \
|
#define DEFINE_LEGACY_ONEREG_HELPER( cod, bits ) \
|
||||||
emitterT void cod##bits##R( x86IntRegType to ) { i##cod( _reghlp<u##bits>(to) ); } \
|
emitterT void cod##bits##R( x86IntRegType to ) { x##cod( _reghlp<u##bits>(to) ); } \
|
||||||
emitterT void cod##bits##M( uptr to ) { i##cod( ptr##bits[to] ); } \
|
emitterT void cod##bits##M( uptr to ) { x##cod( ptr##bits[to] ); } \
|
||||||
emitterT void cod##bits##Rm( x86IntRegType to, uptr offset ) { i##cod( _mhlp<u##bits>(to) + offset ); }
|
emitterT void cod##bits##Rm( x86IntRegType to, uptr offset ) { x##cod( _mhlp<u##bits>(to) + offset ); }
|
||||||
|
|
||||||
//emitterT void cod##bits##RtoRmS( x86IntRegType to1, x86IntRegType to2, x86IntRegType from, int offset ) \
|
//emitterT void cod##bits##RtoRmS( x86IntRegType to1, x86IntRegType to2, x86IntRegType from, int offset ) \
|
||||||
// { cod( _mhlp2<u##bits>(to1,to2) + offset, _reghlp<u##bits>(from) ); } \
|
// { cod( _mhlp2<u##bits>(to1,to2) + offset, _reghlp<u##bits>(from) ); } \
|
||||||
|
@ -133,9 +133,9 @@ DEFINE_OPCODE_ONEREG_LEGACY( NEG )
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
#define DEFINE_LEGACY_MOVEXTEND( form, destbits, srcbits ) \
|
#define DEFINE_LEGACY_MOVEXTEND( form, destbits, srcbits ) \
|
||||||
emitterT void MOV##form##destbits##R##srcbits##toR( x86IntRegType to, x86IntRegType from ) { iMOV##form( iRegister##destbits( to ), iRegister##srcbits( from ) ); } \
|
emitterT void MOV##form##destbits##R##srcbits##toR( x86IntRegType to, x86IntRegType from ) { xMOV##form( xRegister##destbits( to ), xRegister##srcbits( from ) ); } \
|
||||||
emitterT void MOV##form##destbits##Rm##srcbits##toR( x86IntRegType to, x86IntRegType from, int offset ) { iMOV##form( iRegister##destbits( to ), ptr##srcbits[iAddressReg( from ) + offset] ); } \
|
emitterT void MOV##form##destbits##Rm##srcbits##toR( x86IntRegType to, x86IntRegType from, int offset ) { xMOV##form( xRegister##destbits( to ), ptr##srcbits[xAddressReg( from ) + offset] ); } \
|
||||||
emitterT void MOV##form##destbits##M##srcbits##toR( x86IntRegType to, u32 from ) { iMOV##form( iRegister##destbits( to ), ptr##srcbits[from] ); }
|
emitterT void MOV##form##destbits##M##srcbits##toR( x86IntRegType to, u32 from ) { xMOV##form( xRegister##destbits( to ), ptr##srcbits[from] ); }
|
||||||
|
|
||||||
DEFINE_LEGACY_MOVEXTEND( SX, 32, 16 )
|
DEFINE_LEGACY_MOVEXTEND( SX, 32, 16 )
|
||||||
DEFINE_LEGACY_MOVEXTEND( ZX, 32, 16 )
|
DEFINE_LEGACY_MOVEXTEND( ZX, 32, 16 )
|
||||||
|
@ -145,164 +145,164 @@ DEFINE_LEGACY_MOVEXTEND( ZX, 32, 8 )
|
||||||
DEFINE_LEGACY_MOVEXTEND( SX, 16, 8 )
|
DEFINE_LEGACY_MOVEXTEND( SX, 16, 8 )
|
||||||
DEFINE_LEGACY_MOVEXTEND( ZX, 16, 8 )
|
DEFINE_LEGACY_MOVEXTEND( ZX, 16, 8 )
|
||||||
|
|
||||||
emitterT void TEST32ItoR( x86IntRegType to, u32 from ) { iTEST( iRegister32(to), from ); }
|
emitterT void TEST32ItoR( x86IntRegType to, u32 from ) { xTEST( xRegister32(to), from ); }
|
||||||
emitterT void TEST32ItoM( uptr to, u32 from ) { iTEST( ptr32[to], from ); }
|
emitterT void TEST32ItoM( uptr to, u32 from ) { xTEST( ptr32[to], from ); }
|
||||||
emitterT void TEST32RtoR( x86IntRegType to, x86IntRegType from ) { iTEST( iRegister32(to), iRegister32(from) ); }
|
emitterT void TEST32RtoR( x86IntRegType to, x86IntRegType from ) { xTEST( xRegister32(to), xRegister32(from) ); }
|
||||||
emitterT void TEST32ItoRm( x86IntRegType to, u32 from ) { iTEST( ptr32[iAddressReg(to)], from ); }
|
emitterT void TEST32ItoRm( x86IntRegType to, u32 from ) { xTEST( ptr32[xAddressReg(to)], from ); }
|
||||||
|
|
||||||
emitterT void TEST16ItoR( x86IntRegType to, u16 from ) { iTEST( iRegister16(to), from ); }
|
emitterT void TEST16ItoR( x86IntRegType to, u16 from ) { xTEST( xRegister16(to), from ); }
|
||||||
emitterT void TEST16ItoM( uptr to, u16 from ) { iTEST( ptr16[to], from ); }
|
emitterT void TEST16ItoM( uptr to, u16 from ) { xTEST( ptr16[to], from ); }
|
||||||
emitterT void TEST16RtoR( x86IntRegType to, x86IntRegType from ) { iTEST( iRegister16(to), iRegister16(from) ); }
|
emitterT void TEST16RtoR( x86IntRegType to, x86IntRegType from ) { xTEST( xRegister16(to), xRegister16(from) ); }
|
||||||
emitterT void TEST16ItoRm( x86IntRegType to, u16 from ) { iTEST( ptr16[iAddressReg(to)], from ); }
|
emitterT void TEST16ItoRm( x86IntRegType to, u16 from ) { xTEST( ptr16[xAddressReg(to)], from ); }
|
||||||
|
|
||||||
emitterT void TEST8ItoR( x86IntRegType to, u8 from ) { iTEST( iRegister8(to), from ); }
|
emitterT void TEST8ItoR( x86IntRegType to, u8 from ) { xTEST( xRegister8(to), from ); }
|
||||||
emitterT void TEST8ItoM( uptr to, u8 from ) { iTEST( ptr8[to], from ); }
|
emitterT void TEST8ItoM( uptr to, u8 from ) { xTEST( ptr8[to], from ); }
|
||||||
emitterT void TEST8RtoR( x86IntRegType to, x86IntRegType from ) { iTEST( iRegister8(to), iRegister8(from) ); }
|
emitterT void TEST8RtoR( x86IntRegType to, x86IntRegType from ) { xTEST( xRegister8(to), xRegister8(from) ); }
|
||||||
emitterT void TEST8ItoRm( x86IntRegType to, u8 from ) { iTEST( ptr8[iAddressReg(to)], from ); }
|
emitterT void TEST8ItoRm( x86IntRegType to, u8 from ) { xTEST( ptr8[xAddressReg(to)], from ); }
|
||||||
|
|
||||||
// mov r32 to [r32<<scale+from2]
|
// mov r32 to [r32<<scale+from2]
|
||||||
emitterT void MOV32RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2, int scale )
|
emitterT void MOV32RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2, int scale )
|
||||||
{
|
{
|
||||||
iMOV( iRegister32(to), ptr[(iAddressReg(from1)<<scale) + from2] );
|
xMOV( xRegister32(to), ptr[(xAddressReg(from1)<<scale) + from2] );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void MOV16RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2, int scale )
|
emitterT void MOV16RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2, int scale )
|
||||||
{
|
{
|
||||||
iMOV( iRegister16(to), ptr[(iAddressReg(from1)<<scale) + from2] );
|
xMOV( xRegister16(to), ptr[(xAddressReg(from1)<<scale) + from2] );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void MOV8RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2, int scale )
|
emitterT void MOV8RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2, int scale )
|
||||||
{
|
{
|
||||||
iMOV( iRegister8(to), ptr[(iAddressReg(from1)<<scale) + from2] );
|
xMOV( xRegister8(to), ptr[(xAddressReg(from1)<<scale) + from2] );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void AND32I8toR( x86IntRegType to, s8 from )
|
emitterT void AND32I8toR( x86IntRegType to, s8 from )
|
||||||
{
|
{
|
||||||
iAND( _reghlp<u32>(to), from );
|
xAND( _reghlp<u32>(to), from );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void AND32I8toM( uptr to, s8 from )
|
emitterT void AND32I8toM( uptr to, s8 from )
|
||||||
{
|
{
|
||||||
iAND( ptr8[to], from );
|
xAND( ptr8[to], from );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* cmove r32 to r32*/
|
/* cmove r32 to r32*/
|
||||||
emitterT void CMOVE32RtoR( x86IntRegType to, x86IntRegType from )
|
emitterT void CMOVE32RtoR( x86IntRegType to, x86IntRegType from )
|
||||||
{
|
{
|
||||||
iCMOVE( iRegister32(to), iRegister32(from) );
|
xCMOVE( xRegister32(to), xRegister32(from) );
|
||||||
}
|
}
|
||||||
|
|
||||||
// shld imm8 to r32
|
// shld imm8 to r32
|
||||||
emitterT void SHLD32ItoR( x86IntRegType to, x86IntRegType from, u8 shift )
|
emitterT void SHLD32ItoR( x86IntRegType to, x86IntRegType from, u8 shift )
|
||||||
{
|
{
|
||||||
iSHLD( iRegister32(to), iRegister32(from), shift );
|
xSHLD( xRegister32(to), xRegister32(from), shift );
|
||||||
}
|
}
|
||||||
|
|
||||||
// shrd imm8 to r32
|
// shrd imm8 to r32
|
||||||
emitterT void SHRD32ItoR( x86IntRegType to, x86IntRegType from, u8 shift )
|
emitterT void SHRD32ItoR( x86IntRegType to, x86IntRegType from, u8 shift )
|
||||||
{
|
{
|
||||||
iSHRD( iRegister32(to), iRegister32(from), shift );
|
xSHRD( xRegister32(to), xRegister32(from), shift );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mul eax by r32 to edx:eax */
|
/* mul eax by r32 to edx:eax */
|
||||||
emitterT void MUL32R( x86IntRegType from ) { iUMUL( iRegister32(from) ); }
|
emitterT void MUL32R( x86IntRegType from ) { xUMUL( xRegister32(from) ); }
|
||||||
/* imul eax by r32 to edx:eax */
|
/* imul eax by r32 to edx:eax */
|
||||||
emitterT void IMUL32R( x86IntRegType from ) { iMUL( iRegister32(from) ); }
|
emitterT void IMUL32R( x86IntRegType from ) { xMUL( xRegister32(from) ); }
|
||||||
/* mul eax by m32 to edx:eax */
|
/* mul eax by m32 to edx:eax */
|
||||||
emitterT void MUL32M( u32 from ) { iUMUL( ptr32[from] ); }
|
emitterT void MUL32M( u32 from ) { xUMUL( ptr32[from] ); }
|
||||||
/* imul eax by m32 to edx:eax */
|
/* imul eax by m32 to edx:eax */
|
||||||
emitterT void IMUL32M( u32 from ) { iMUL( ptr32[from] ); }
|
emitterT void IMUL32M( u32 from ) { xMUL( ptr32[from] ); }
|
||||||
|
|
||||||
/* imul r32 by r32 to r32 */
|
/* imul r32 by r32 to r32 */
|
||||||
emitterT void IMUL32RtoR( x86IntRegType to, x86IntRegType from )
|
emitterT void IMUL32RtoR( x86IntRegType to, x86IntRegType from )
|
||||||
{
|
{
|
||||||
iMUL( iRegister32(to), iRegister32(from) );
|
xMUL( xRegister32(to), xRegister32(from) );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* div eax by r32 to edx:eax */
|
/* div eax by r32 to edx:eax */
|
||||||
emitterT void DIV32R( x86IntRegType from ) { iUDIV( iRegister32(from) ); }
|
emitterT void DIV32R( x86IntRegType from ) { xUDIV( xRegister32(from) ); }
|
||||||
/* idiv eax by r32 to edx:eax */
|
/* idiv eax by r32 to edx:eax */
|
||||||
emitterT void IDIV32R( x86IntRegType from ) { iDIV( iRegister32(from) ); }
|
emitterT void IDIV32R( x86IntRegType from ) { xDIV( xRegister32(from) ); }
|
||||||
/* div eax by m32 to edx:eax */
|
/* div eax by m32 to edx:eax */
|
||||||
emitterT void DIV32M( u32 from ) { iUDIV( ptr32[from] ); }
|
emitterT void DIV32M( u32 from ) { xUDIV( ptr32[from] ); }
|
||||||
/* idiv eax by m32 to edx:eax */
|
/* idiv eax by m32 to edx:eax */
|
||||||
emitterT void IDIV32M( u32 from ) { iDIV( ptr32[from] ); }
|
emitterT void IDIV32M( u32 from ) { xDIV( ptr32[from] ); }
|
||||||
|
|
||||||
|
|
||||||
emitterT void LEA32RtoR(x86IntRegType to, x86IntRegType from, s32 offset)
|
emitterT void LEA32RtoR(x86IntRegType to, x86IntRegType from, s32 offset)
|
||||||
{
|
{
|
||||||
iLEA( iRegister32( to ), ptr[iAddressReg(from)+offset] );
|
xLEA( xRegister32( to ), ptr[xAddressReg(from)+offset] );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void LEA32RRtoR(x86IntRegType to, x86IntRegType from0, x86IntRegType from1)
|
emitterT void LEA32RRtoR(x86IntRegType to, x86IntRegType from0, x86IntRegType from1)
|
||||||
{
|
{
|
||||||
iLEA( iRegister32( to ), ptr[iAddressReg(from0)+iAddressReg(from1)] );
|
xLEA( xRegister32( to ), ptr[xAddressReg(from0)+xAddressReg(from1)] );
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't inline recursive functions
|
// Don't inline recursive functions
|
||||||
emitterT void LEA32RStoR(x86IntRegType to, x86IntRegType from, u32 scale)
|
emitterT void LEA32RStoR(x86IntRegType to, x86IntRegType from, u32 scale)
|
||||||
{
|
{
|
||||||
iLEA( iRegister32( to ), ptr[iAddressReg(from)*(1<<scale)] );
|
xLEA( xRegister32( to ), ptr[xAddressReg(from)*(1<<scale)] );
|
||||||
}
|
}
|
||||||
|
|
||||||
// to = from + offset
|
// to = from + offset
|
||||||
emitterT void LEA16RtoR(x86IntRegType to, x86IntRegType from, s16 offset)
|
emitterT void LEA16RtoR(x86IntRegType to, x86IntRegType from, s16 offset)
|
||||||
{
|
{
|
||||||
iLEA( iRegister16( to ), ptr[iAddressReg(from)+offset] );
|
xLEA( xRegister16( to ), ptr[xAddressReg(from)+offset] );
|
||||||
}
|
}
|
||||||
|
|
||||||
// to = from0 + from1
|
// to = from0 + from1
|
||||||
emitterT void LEA16RRtoR(x86IntRegType to, x86IntRegType from0, x86IntRegType from1)
|
emitterT void LEA16RRtoR(x86IntRegType to, x86IntRegType from0, x86IntRegType from1)
|
||||||
{
|
{
|
||||||
iLEA( iRegister16( to ), ptr[iAddressReg(from0)+iAddressReg(from1)] );
|
xLEA( xRegister16( to ), ptr[xAddressReg(from0)+xAddressReg(from1)] );
|
||||||
}
|
}
|
||||||
|
|
||||||
// to = from << scale (max is 3)
|
// to = from << scale (max is 3)
|
||||||
emitterT void LEA16RStoR(x86IntRegType to, x86IntRegType from, u32 scale)
|
emitterT void LEA16RStoR(x86IntRegType to, x86IntRegType from, u32 scale)
|
||||||
{
|
{
|
||||||
iLEA( iRegister16( to ), ptr[iAddressReg(from)*(1<<scale)] );
|
xLEA( xRegister16( to ), ptr[xAddressReg(from)*(1<<scale)] );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void BT32ItoR( x86IntRegType to, u8 from ) { iBT( iRegister32(to), from ); }
|
emitterT void BT32ItoR( x86IntRegType to, u8 from ) { xBT( xRegister32(to), from ); }
|
||||||
emitterT void BTR32ItoR( x86IntRegType to, u8 from ) { iBTR( iRegister32(to), from ); }
|
emitterT void BTR32ItoR( x86IntRegType to, u8 from ) { xBTR( xRegister32(to), from ); }
|
||||||
|
|
||||||
emitterT void SETS8R( x86IntRegType to ) { iSETS( iRegister8(to) ); }
|
emitterT void SETS8R( x86IntRegType to ) { xSETS( xRegister8(to) ); }
|
||||||
emitterT void SETL8R( x86IntRegType to ) { iSETL( iRegister8(to) ); }
|
emitterT void SETL8R( x86IntRegType to ) { xSETL( xRegister8(to) ); }
|
||||||
emitterT void SETGE8R( x86IntRegType to ) { iSETGE( iRegister8(to) ); }
|
emitterT void SETGE8R( x86IntRegType to ) { xSETGE( xRegister8(to) ); }
|
||||||
emitterT void SETG8R( x86IntRegType to ) { iSETG( iRegister8(to) ); }
|
emitterT void SETG8R( x86IntRegType to ) { xSETG( xRegister8(to) ); }
|
||||||
emitterT void SETA8R( x86IntRegType to ) { iSETA( iRegister8(to) ); }
|
emitterT void SETA8R( x86IntRegType to ) { xSETA( xRegister8(to) ); }
|
||||||
emitterT void SETAE8R( x86IntRegType to ) { iSETAE( iRegister8(to) ); }
|
emitterT void SETAE8R( x86IntRegType to ) { xSETAE( xRegister8(to) ); }
|
||||||
emitterT void SETB8R( x86IntRegType to ) { iSETB( iRegister8(to) ); }
|
emitterT void SETB8R( x86IntRegType to ) { xSETB( xRegister8(to) ); }
|
||||||
emitterT void SETNZ8R( x86IntRegType to ) { iSETNZ( iRegister8(to) ); }
|
emitterT void SETNZ8R( x86IntRegType to ) { xSETNZ( xRegister8(to) ); }
|
||||||
emitterT void SETZ8R( x86IntRegType to ) { iSETZ( iRegister8(to) ); }
|
emitterT void SETZ8R( x86IntRegType to ) { xSETZ( xRegister8(to) ); }
|
||||||
emitterT void SETE8R( x86IntRegType to ) { iSETE( iRegister8(to) ); }
|
emitterT void SETE8R( x86IntRegType to ) { xSETE( xRegister8(to) ); }
|
||||||
|
|
||||||
/* push imm32 */
|
/* push imm32 */
|
||||||
emitterT void PUSH32I( u32 from ) { iPUSH( from ); }
|
emitterT void PUSH32I( u32 from ) { xPUSH( from ); }
|
||||||
|
|
||||||
/* push r32 */
|
/* push r32 */
|
||||||
emitterT void PUSH32R( x86IntRegType from ) { iPUSH( iRegister32( from ) ); }
|
emitterT void PUSH32R( x86IntRegType from ) { xPUSH( xRegister32( from ) ); }
|
||||||
|
|
||||||
/* push m32 */
|
/* push m32 */
|
||||||
emitterT void PUSH32M( u32 from )
|
emitterT void PUSH32M( u32 from )
|
||||||
{
|
{
|
||||||
iPUSH( ptr[from] );
|
xPUSH( ptr[from] );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pop r32 */
|
/* pop r32 */
|
||||||
emitterT void POP32R( x86IntRegType from ) { iPOP( iRegister32( from ) ); }
|
emitterT void POP32R( x86IntRegType from ) { xPOP( xRegister32( from ) ); }
|
||||||
emitterT void PUSHFD( void ) { iPUSHFD(); }
|
emitterT void PUSHFD( void ) { xPUSHFD(); }
|
||||||
emitterT void POPFD( void ) { iPOPFD(); }
|
emitterT void POPFD( void ) { xPOPFD(); }
|
||||||
|
|
||||||
emitterT void RET( void ) { iRET(); }
|
emitterT void RET( void ) { xRET(); }
|
||||||
|
|
||||||
emitterT void CBW( void ) { iCBW(); }
|
emitterT void CBW( void ) { xCBW(); }
|
||||||
emitterT void CWD( void ) { iCWD(); }
|
emitterT void CWD( void ) { xCWD(); }
|
||||||
emitterT void CDQ( void ) { iCDQ(); }
|
emitterT void CDQ( void ) { xCDQ(); }
|
||||||
emitterT void CWDE() { iCWDE(); }
|
emitterT void CWDE() { xCWDE(); }
|
||||||
|
|
||||||
emitterT void LAHF() { iLAHF(); }
|
emitterT void LAHF() { xLAHF(); }
|
||||||
emitterT void SAHF() { iSAHF(); }
|
emitterT void SAHF() { xSAHF(); }
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -437,9 +437,9 @@ emitterT void x86Align( int bytes )
|
||||||
/* IX86 instructions */
|
/* IX86 instructions */
|
||||||
/********************/
|
/********************/
|
||||||
|
|
||||||
emitterT void STC( void ) { iSTC(); }
|
emitterT void STC( void ) { xSTC(); }
|
||||||
emitterT void CLC( void ) { iCLC(); }
|
emitterT void CLC( void ) { xCLC(); }
|
||||||
emitterT void NOP( void ) { iNOP(); }
|
emitterT void NOP( void ) { xNOP(); }
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// jump instructions /
|
// jump instructions /
|
||||||
|
@ -465,13 +465,13 @@ emitterT u32* JMP32( uptr to )
|
||||||
/* jmp r32/r64 */
|
/* jmp r32/r64 */
|
||||||
emitterT void JMPR( x86IntRegType to )
|
emitterT void JMPR( x86IntRegType to )
|
||||||
{
|
{
|
||||||
iJMP( iRegister32(to) );
|
xJMP( xRegister32(to) );
|
||||||
}
|
}
|
||||||
|
|
||||||
// jmp m32
|
// jmp m32
|
||||||
emitterT void JMP32M( uptr to )
|
emitterT void JMP32M( uptr to )
|
||||||
{
|
{
|
||||||
iJMP( ptr32[to] );
|
xJMP( ptr32[to] );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* jp rel8 */
|
/* jp rel8 */
|
||||||
|
@ -720,27 +720,27 @@ emitterT u32* JNO32( u32 to )
|
||||||
/* call func */
|
/* call func */
|
||||||
emitterT void CALLFunc( uptr func )
|
emitterT void CALLFunc( uptr func )
|
||||||
{
|
{
|
||||||
iCALL( (void*)func );
|
xCALL( (void*)func );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* call r32 */
|
/* call r32 */
|
||||||
emitterT void CALL32R( x86IntRegType to )
|
emitterT void CALL32R( x86IntRegType to )
|
||||||
{
|
{
|
||||||
iCALL( iRegister32( to ) );
|
xCALL( xRegister32( to ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* call m32 */
|
/* call m32 */
|
||||||
emitterT void CALL32M( u32 to )
|
emitterT void CALL32M( u32 to )
|
||||||
{
|
{
|
||||||
iCALL( ptr32[to] );
|
xCALL( ptr32[to] );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void BSRRtoR(x86IntRegType to, x86IntRegType from)
|
emitterT void BSRRtoR(x86IntRegType to, x86IntRegType from)
|
||||||
{
|
{
|
||||||
iBSR( iRegister32(to), iRegister32(from) );
|
xBSR( xRegister32(to), xRegister32(from) );
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void BSWAP32R( x86IntRegType to )
|
emitterT void BSWAP32R( x86IntRegType to )
|
||||||
{
|
{
|
||||||
iBSWAP( iRegister32(to) );
|
xBSWAP( xRegister32(to) );
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,26 +27,26 @@
|
||||||
|
|
||||||
using namespace x86Emitter;
|
using namespace x86Emitter;
|
||||||
|
|
||||||
emitterT void MOVQMtoR( x86MMXRegType to, uptr from ) { iMOVQ( iRegisterMMX(to), (void*)from ); }
|
emitterT void MOVQMtoR( x86MMXRegType to, uptr from ) { xMOVQ( xRegisterMMX(to), (void*)from ); }
|
||||||
emitterT void MOVQRtoM( uptr to, x86MMXRegType from ) { iMOVQ( (void*)to, iRegisterMMX(from) ); }
|
emitterT void MOVQRtoM( uptr to, x86MMXRegType from ) { xMOVQ( (void*)to, xRegisterMMX(from) ); }
|
||||||
emitterT void MOVQRtoR( x86MMXRegType to, x86MMXRegType from ) { iMOVQ( iRegisterMMX(to), iRegisterMMX(from) ); }
|
emitterT void MOVQRtoR( x86MMXRegType to, x86MMXRegType from ) { xMOVQ( xRegisterMMX(to), xRegisterMMX(from) ); }
|
||||||
emitterT void MOVQRmtoR( x86MMXRegType to, x86IntRegType from, int offset ) { iMOVQ( iRegisterMMX(to), ptr[iAddressReg(from)+offset] ); }
|
emitterT void MOVQRmtoR( x86MMXRegType to, x86IntRegType from, int offset ) { xMOVQ( xRegisterMMX(to), ptr[xAddressReg(from)+offset] ); }
|
||||||
emitterT void MOVQRtoRm( x86IntRegType to, x86MMXRegType from, int offset ) { iMOVQ( ptr[iAddressReg(to)+offset], iRegisterMMX(from) ); }
|
emitterT void MOVQRtoRm( x86IntRegType to, x86MMXRegType from, int offset ) { xMOVQ( ptr[xAddressReg(to)+offset], xRegisterMMX(from) ); }
|
||||||
|
|
||||||
emitterT void MOVDMtoMMX( x86MMXRegType to, uptr from ) { iMOVDZX( iRegisterMMX(to), (void*)from ); }
|
emitterT void MOVDMtoMMX( x86MMXRegType to, uptr from ) { xMOVDZX( xRegisterMMX(to), (void*)from ); }
|
||||||
emitterT void MOVDMMXtoM( uptr to, x86MMXRegType from ) { iMOVD( (void*)to, iRegisterMMX(from) ); }
|
emitterT void MOVDMMXtoM( uptr to, x86MMXRegType from ) { xMOVD( (void*)to, xRegisterMMX(from) ); }
|
||||||
emitterT void MOVD32RtoMMX( x86MMXRegType to, x86IntRegType from ) { iMOVDZX( iRegisterMMX(to), iRegister32(from) ); }
|
emitterT void MOVD32RtoMMX( x86MMXRegType to, x86IntRegType from ) { xMOVDZX( xRegisterMMX(to), xRegister32(from) ); }
|
||||||
emitterT void MOVD32RmtoMMX( x86MMXRegType to, x86IntRegType from, int offset ) { iMOVDZX( iRegisterMMX(to), ptr[iAddressReg(from)+offset] ); }
|
emitterT void MOVD32RmtoMMX( x86MMXRegType to, x86IntRegType from, int offset ) { xMOVDZX( xRegisterMMX(to), ptr[xAddressReg(from)+offset] ); }
|
||||||
emitterT void MOVD32MMXtoR( x86IntRegType to, x86MMXRegType from ) { iMOVD( iRegister32(to), iRegisterMMX(from) ); }
|
emitterT void MOVD32MMXtoR( x86IntRegType to, x86MMXRegType from ) { xMOVD( xRegister32(to), xRegisterMMX(from) ); }
|
||||||
emitterT void MOVD32MMXtoRm( x86IntRegType to, x86MMXRegType from, int offset ) { iMOVD( ptr[iAddressReg(to)+offset], iRegisterMMX(from) ); }
|
emitterT void MOVD32MMXtoRm( x86IntRegType to, x86MMXRegType from, int offset ) { xMOVD( ptr[xAddressReg(to)+offset], xRegisterMMX(from) ); }
|
||||||
|
|
||||||
emitterT void PMOVMSKBMMXtoR(x86IntRegType to, x86MMXRegType from) { iPMOVMSKB( iRegister32(to), iRegisterMMX(from) ); }
|
emitterT void PMOVMSKBMMXtoR(x86IntRegType to, x86MMXRegType from) { xPMOVMSKB( xRegister32(to), xRegisterMMX(from) ); }
|
||||||
|
|
||||||
#define DEFINE_LEGACY_LOGIC_OPCODE( mod ) \
|
#define DEFINE_LEGACY_LOGIC_OPCODE( mod ) \
|
||||||
emitterT void P##mod##RtoR( x86MMXRegType to, x86MMXRegType from ) { iP##mod( iRegisterMMX(to), iRegisterMMX(from) ); } \
|
emitterT void P##mod##RtoR( x86MMXRegType to, x86MMXRegType from ) { xP##mod( xRegisterMMX(to), xRegisterMMX(from) ); } \
|
||||||
emitterT void P##mod##MtoR( x86MMXRegType to, uptr from ) { iP##mod( iRegisterMMX(to), (void*)from ); } \
|
emitterT void P##mod##MtoR( x86MMXRegType to, uptr from ) { xP##mod( xRegisterMMX(to), (void*)from ); } \
|
||||||
emitterT void SSE2_P##mod##_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iP##mod( iRegisterSSE(to), iRegisterSSE(from) ); } \
|
emitterT void SSE2_P##mod##_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xP##mod( xRegisterSSE(to), xRegisterSSE(from) ); } \
|
||||||
emitterT void SSE2_P##mod##_M128_to_XMM( x86SSERegType to, uptr from ) { iP##mod( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE2_P##mod##_M128_to_XMM( x86SSERegType to, uptr from ) { xP##mod( xRegisterSSE(to), (void*)from ); }
|
||||||
|
|
||||||
DEFINE_LEGACY_LOGIC_OPCODE( AND )
|
DEFINE_LEGACY_LOGIC_OPCODE( AND )
|
||||||
DEFINE_LEGACY_LOGIC_OPCODE( ANDN )
|
DEFINE_LEGACY_LOGIC_OPCODE( ANDN )
|
||||||
|
@ -460,4 +460,4 @@ emitterT void PSHUFWMtoR(x86MMXRegType to, uptr from, u8 imm8)
|
||||||
write8(imm8);
|
write8(imm8);
|
||||||
}
|
}
|
||||||
|
|
||||||
emitterT void MASKMOVQRtoR(x86MMXRegType to, x86MMXRegType from) { iMASKMOV( iRegisterMMX(to), iRegisterMMX(from) ); }
|
emitterT void MASKMOVQRtoR(x86MMXRegType to, x86MMXRegType from) { xMASKMOV( xRegisterMMX(to), xRegisterMMX(from) ); }
|
||||||
|
|
|
@ -137,96 +137,95 @@ using namespace x86Emitter;
|
||||||
write8( op )
|
write8( op )
|
||||||
|
|
||||||
#define DEFINE_LEGACY_MOV_OPCODE( mod, sse ) \
|
#define DEFINE_LEGACY_MOV_OPCODE( mod, sse ) \
|
||||||
emitterT void sse##_MOV##mod##_M128_to_XMM( x86SSERegType to, uptr from ) { iMOV##mod( iRegisterSSE(to), (void*)from ); } \
|
emitterT void sse##_MOV##mod##_M128_to_XMM( x86SSERegType to, uptr from ) { xMOV##mod( xRegisterSSE(to), (void*)from ); } \
|
||||||
emitterT void sse##_MOV##mod##_XMM_to_M128( uptr to, x86SSERegType from ) { iMOV##mod( (void*)to, iRegisterSSE(from) ); } \
|
emitterT void sse##_MOV##mod##_XMM_to_M128( uptr to, x86SSERegType from ) { xMOV##mod( (void*)to, xRegisterSSE(from) ); } \
|
||||||
emitterT void sse##_MOV##mod##RmtoR( x86SSERegType to, x86IntRegType from, int offset ) { iMOV##mod( iRegisterSSE(to), ptr[iAddressReg(from)+offset] ); } \
|
emitterT void sse##_MOV##mod##RmtoR( x86SSERegType to, x86IntRegType from, int offset ) { xMOV##mod( xRegisterSSE(to), ptr[xAddressReg(from)+offset] ); } \
|
||||||
emitterT void sse##_MOV##mod##RtoRm( x86IntRegType to, x86SSERegType from, int offset ) { iMOV##mod( ptr[iAddressReg(to)+offset], iRegisterSSE(from) ); } \
|
emitterT void sse##_MOV##mod##RtoRm( x86IntRegType to, x86SSERegType from, int offset ) { xMOV##mod( ptr[xAddressReg(to)+offset], xRegisterSSE(from) ); } \
|
||||||
emitterT void sse##_MOV##mod##RmStoR( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale ) \
|
emitterT void sse##_MOV##mod##RmStoR( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale ) \
|
||||||
{ iMOV##mod( iRegisterSSE(to), ptr[iAddressReg(from)+iAddressReg(from2)] ); } \
|
{ xMOV##mod( xRegisterSSE(to), ptr[xAddressReg(from)+xAddressReg(from2)] ); } \
|
||||||
emitterT void sse##_MOV##mod##RtoRmS( x86IntRegType to, x86SSERegType from, x86IntRegType from2, int scale ) \
|
emitterT void sse##_MOV##mod##RtoRmS( x86IntRegType to, x86SSERegType from, x86IntRegType from2, int scale ) \
|
||||||
{ iMOV##mod( ptr[iAddressReg(to)+iAddressReg(from2)], iRegisterSSE(from) ); }
|
{ xMOV##mod( ptr[xAddressReg(to)+xAddressReg(from2)], xRegisterSSE(from) ); }
|
||||||
|
|
||||||
DEFINE_LEGACY_MOV_OPCODE( UPS, SSE )
|
DEFINE_LEGACY_MOV_OPCODE( UPS, SSE )
|
||||||
DEFINE_LEGACY_MOV_OPCODE( APS, SSE )
|
DEFINE_LEGACY_MOV_OPCODE( APS, SSE )
|
||||||
DEFINE_LEGACY_MOV_OPCODE( LPS, SSE )
|
|
||||||
DEFINE_LEGACY_MOV_OPCODE( HPS, SSE )
|
|
||||||
DEFINE_LEGACY_MOV_OPCODE( DQA, SSE2 )
|
DEFINE_LEGACY_MOV_OPCODE( DQA, SSE2 )
|
||||||
DEFINE_LEGACY_MOV_OPCODE( DQU, SSE2 )
|
DEFINE_LEGACY_MOV_OPCODE( DQU, SSE2 )
|
||||||
|
|
||||||
|
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//MOVAPS: Move aligned Packed Single Precision FP values *
|
//MOVAPS: Move aligned Packed Single Precision FP values *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
|
|
||||||
emitterT void SSE_MOVAPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMOVAPS( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_MOVAPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMOVAPS( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
emitterT void SSE2_MOVQ_M64_to_XMM( x86SSERegType to, uptr from ) { iMOVQZX( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE2_MOVQ_M64_to_XMM( x86SSERegType to, uptr from ) { xMOVQZX( xRegisterSSE(to), (void*)from ); }
|
||||||
emitterT void SSE2_MOVQ_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMOVQZX( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVQ_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMOVQZX( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE2_MOVQ_XMM_to_M64( u32 to, x86SSERegType from ) { iMOVQ( (void*)to, iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVQ_XMM_to_M64( u32 to, x86SSERegType from ) { xMOVQ( (void*)to, xRegisterSSE(from) ); }
|
||||||
emitterT void SSE2_MOVDQ2Q_XMM_to_MM( x86MMXRegType to, x86SSERegType from) { iMOVQ( iRegisterMMX(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVDQ2Q_XMM_to_MM( x86MMXRegType to, x86SSERegType from) { xMOVQ( xRegisterMMX(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE2_MOVQ2DQ_MM_to_XMM( x86SSERegType to, x86MMXRegType from) { iMOVQ( iRegisterSSE(to), iRegisterMMX(from) ); }
|
emitterT void SSE2_MOVQ2DQ_MM_to_XMM( x86SSERegType to, x86MMXRegType from) { xMOVQ( xRegisterSSE(to), xRegisterMMX(from) ); }
|
||||||
|
|
||||||
|
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//MOVSS: Move Scalar Single-Precision FP value *
|
//MOVSS: Move Scalar Single-Precision FP value *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
emitterT void SSE_MOVSS_M32_to_XMM( x86SSERegType to, uptr from ) { iMOVSSZX( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE_MOVSS_M32_to_XMM( x86SSERegType to, uptr from ) { xMOVSSZX( xRegisterSSE(to), (void*)from ); }
|
||||||
emitterT void SSE_MOVSS_XMM_to_M32( u32 to, x86SSERegType from ) { iMOVSS( (void*)to, iRegisterSSE(from) ); }
|
emitterT void SSE_MOVSS_XMM_to_M32( u32 to, x86SSERegType from ) { xMOVSS( (void*)to, xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_MOVSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMOVSS( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_MOVSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMOVSS( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_MOVSS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { iMOVSSZX( iRegisterSSE(to), ptr[iAddressReg(from)+offset] ); }
|
emitterT void SSE_MOVSS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { xMOVSSZX( xRegisterSSE(to), ptr[xAddressReg(from)+offset] ); }
|
||||||
emitterT void SSE_MOVSS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { iMOVSS( ptr[iAddressReg(to)+offset], iRegisterSSE(from) ); }
|
emitterT void SSE_MOVSS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { xMOVSS( ptr[xAddressReg(to)+offset], xRegisterSSE(from) ); }
|
||||||
|
|
||||||
emitterT void SSE2_MOVSD_M32_to_XMM( x86SSERegType to, uptr from ) { iMOVSDZX( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE2_MOVSD_M32_to_XMM( x86SSERegType to, uptr from ) { xMOVSDZX( xRegisterSSE(to), (void*)from ); }
|
||||||
emitterT void SSE2_MOVSD_XMM_to_M32( u32 to, x86SSERegType from ) { iMOVSD( (void*)to, iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVSD_XMM_to_M32( u32 to, x86SSERegType from ) { xMOVSD( (void*)to, xRegisterSSE(from) ); }
|
||||||
emitterT void SSE2_MOVSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMOVSD( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMOVSD( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE2_MOVSD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { iMOVSDZX( iRegisterSSE(to), ptr[iAddressReg(from)+offset] ); }
|
emitterT void SSE2_MOVSD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { xMOVSDZX( xRegisterSSE(to), ptr[xAddressReg(from)+offset] ); }
|
||||||
emitterT void SSE2_MOVSD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { iMOVSD( ptr[iAddressReg(to)+offset], iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVSD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { xMOVSD( ptr[xAddressReg(to)+offset], xRegisterSSE(from) ); }
|
||||||
|
|
||||||
emitterT void SSE_MASKMOVDQU_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMASKMOV( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_MASKMOVDQU_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMASKMOV( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//MOVLPS: Move low Packed Single-Precision FP *
|
//MOVLPS: Move low Packed Single-Precision FP *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
emitterT void SSE_MOVLPS_M64_to_XMM( x86SSERegType to, uptr from ) { iMOVLPS( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE_MOVLPS_M64_to_XMM( x86SSERegType to, uptr from ) { xMOVL.PS( xRegisterSSE(to), (void*)from ); }
|
||||||
emitterT void SSE_MOVLPS_XMM_to_M64( u32 to, x86SSERegType from ) { iMOVLPS( (void*)to, iRegisterSSE(from) ); }
|
emitterT void SSE_MOVLPS_XMM_to_M64( u32 to, x86SSERegType from ) { xMOVL.PS( (void*)to, xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_MOVLPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { iMOVLPS( iRegisterSSE(to), ptr[iAddressReg(from)+offset] ); }
|
emitterT void SSE_MOVLPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { xMOVL.PS( xRegisterSSE(to), ptr[xAddressReg(from)+offset] ); }
|
||||||
emitterT void SSE_MOVLPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { iMOVLPS( ptr[iAddressReg(to)+offset], iRegisterSSE(from) ); }
|
emitterT void SSE_MOVLPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { xMOVL.PS( ptr[xAddressReg(to)+offset], xRegisterSSE(from) ); }
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//MOVHPS: Move High Packed Single-Precision FP *
|
//MOVHPS: Move High Packed Single-Precision FP *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
emitterT void SSE_MOVHPS_M64_to_XMM( x86SSERegType to, uptr from ) { iMOVHPS( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE_MOVHPS_M64_to_XMM( x86SSERegType to, uptr from ) { xMOVH.PS( xRegisterSSE(to), (void*)from ); }
|
||||||
emitterT void SSE_MOVHPS_XMM_to_M64( u32 to, x86SSERegType from ) { iMOVHPS( (void*)to, iRegisterSSE(from) ); }
|
emitterT void SSE_MOVHPS_XMM_to_M64( u32 to, x86SSERegType from ) { xMOVH.PS( (void*)to, xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_MOVHPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { iMOVHPS( iRegisterSSE(to), ptr[iAddressReg(from)+offset] ); }
|
emitterT void SSE_MOVHPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { xMOVH.PS( xRegisterSSE(to), ptr[xAddressReg(from)+offset] ); }
|
||||||
emitterT void SSE_MOVHPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { iMOVHPS( ptr[iAddressReg(to)+offset], iRegisterSSE(from) ); }
|
emitterT void SSE_MOVHPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset ) { xMOVH.PS( ptr[xAddressReg(to)+offset], xRegisterSSE(from) ); }
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//MOVLHPS: Moved packed Single-Precision FP low to high *
|
//MOVLHPS: Moved packed Single-Precision FP low to high *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
emitterT void SSE_MOVLHPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMOVLHPS( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_MOVLHPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMOVLH.PS( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//MOVHLPS: Moved packed Single-Precision FP High to Low *
|
//MOVHLPS: Moved packed Single-Precision FP High to Low *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
emitterT void SSE_MOVHLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iMOVHLPS( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_MOVHLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xMOVHL.PS( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
|
|
||||||
emitterT void SSE2_PMOVMSKB_XMM_to_R32(x86IntRegType to, x86SSERegType from) { iPMOVMSKB( iRegister32(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_PMOVMSKB_XMM_to_R32(x86IntRegType to, x86SSERegType from) { xPMOVMSKB( xRegister32(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
|
|
||||||
#define DEFINE_LEGACY_PSD_OPCODE( mod ) \
|
#define DEFINE_LEGACY_PSD_OPCODE( mod ) \
|
||||||
emitterT void SSE_##mod##PS_M128_to_XMM( x86SSERegType to, uptr from ) { i##mod.PS( iRegisterSSE(to), (void*)from ); } \
|
emitterT void SSE_##mod##PS_M128_to_XMM( x86SSERegType to, uptr from ) { x##mod.PS( xRegisterSSE(to), (void*)from ); } \
|
||||||
emitterT void SSE_##mod##PS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod.PS( iRegisterSSE(to), iRegisterSSE(from) ); } \
|
emitterT void SSE_##mod##PS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { x##mod.PS( xRegisterSSE(to), xRegisterSSE(from) ); } \
|
||||||
emitterT void SSE2_##mod##PD_M128_to_XMM( x86SSERegType to, uptr from ) { i##mod.PD( iRegisterSSE(to), (void*)from ); } \
|
emitterT void SSE2_##mod##PD_M128_to_XMM( x86SSERegType to, uptr from ) { x##mod.PD( xRegisterSSE(to), (void*)from ); } \
|
||||||
emitterT void SSE2_##mod##PD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod.PD( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_##mod##PD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { x##mod.PD( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
#define DEFINE_LEGACY_PSSD_OPCODE( mod ) \
|
#define DEFINE_LEGACY_PSSD_OPCODE( mod ) \
|
||||||
DEFINE_LEGACY_PSD_OPCODE( mod ) \
|
DEFINE_LEGACY_PSD_OPCODE( mod ) \
|
||||||
emitterT void SSE_##mod##SS_M32_to_XMM( x86SSERegType to, uptr from ) { i##mod.SS( iRegisterSSE(to), (void*)from ); } \
|
emitterT void SSE_##mod##SS_M32_to_XMM( x86SSERegType to, uptr from ) { x##mod.SS( xRegisterSSE(to), (void*)from ); } \
|
||||||
emitterT void SSE_##mod##SS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod.SS( iRegisterSSE(to), iRegisterSSE(from) ); } \
|
emitterT void SSE_##mod##SS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { x##mod.SS( xRegisterSSE(to), xRegisterSSE(from) ); } \
|
||||||
emitterT void SSE2_##mod##SD_M32_to_XMM( x86SSERegType to, uptr from ) { i##mod.SD( iRegisterSSE(to), (void*)from ); } \
|
emitterT void SSE2_##mod##SD_M32_to_XMM( x86SSERegType to, uptr from ) { x##mod.SD( xRegisterSSE(to), (void*)from ); } \
|
||||||
emitterT void SSE2_##mod##SD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod.SD( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_##mod##SD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { x##mod.SD( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
DEFINE_LEGACY_PSD_OPCODE( AND )
|
DEFINE_LEGACY_PSD_OPCODE( AND )
|
||||||
DEFINE_LEGACY_PSD_OPCODE( ANDN )
|
DEFINE_LEGACY_PSD_OPCODE( ANDN )
|
||||||
|
@ -238,76 +237,35 @@ DEFINE_LEGACY_PSSD_OPCODE( ADD )
|
||||||
DEFINE_LEGACY_PSSD_OPCODE( MUL )
|
DEFINE_LEGACY_PSSD_OPCODE( MUL )
|
||||||
DEFINE_LEGACY_PSSD_OPCODE( DIV )
|
DEFINE_LEGACY_PSSD_OPCODE( DIV )
|
||||||
|
|
||||||
emitterT void SSE_RCPPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iRCPPS( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_RCPPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xRCPPS( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_RCPPS_M128_to_XMM( x86SSERegType to, uptr from ) { iRCPPS( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE_RCPPS_M128_to_XMM( x86SSERegType to, uptr from ) { xRCPPS( xRegisterSSE(to), (void*)from ); }
|
||||||
|
|
||||||
emitterT void SSE_RCPSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iRCPSS( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE_RCPSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xRCPSS( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_RCPSS_M32_to_XMM( x86SSERegType to, uptr from ) { iRCPSS( iRegisterSSE(to), (void*)from ); }
|
emitterT void SSE_RCPSS_M32_to_XMM( x86SSERegType to, uptr from ) { xRCPSS( xRegisterSSE(to), (void*)from ); }
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//**********************************************************************************/
|
//**********************************************************************************/
|
||||||
//Packed Single-Precision FP compare (CMPccPS) *
|
//Packed Single-Precision FP compare (CMPccPS) *
|
||||||
//**********************************************************************************
|
//**********************************************************************************
|
||||||
//missing SSE_CMPPS_I8_to_XMM
|
|
||||||
// SSE_CMPPS_M32_to_XMM
|
|
||||||
// SSE_CMPPS_XMM_to_XMM
|
|
||||||
emitterT void SSE_CMPEQPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 0 ); }
|
|
||||||
emitterT void SSE_CMPEQPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 0 ); }
|
|
||||||
emitterT void SSE_CMPLTPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 1 ); }
|
|
||||||
emitterT void SSE_CMPLTPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 1 ); }
|
|
||||||
emitterT void SSE_CMPLEPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 2 ); }
|
|
||||||
emitterT void SSE_CMPLEPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 2 ); }
|
|
||||||
emitterT void SSE_CMPUNORDPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 3 ); }
|
|
||||||
emitterT void SSE_CMPUNORDPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 3 ); }
|
|
||||||
emitterT void SSE_CMPNEPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 4 ); }
|
|
||||||
emitterT void SSE_CMPNEPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 4 ); }
|
|
||||||
emitterT void SSE_CMPNLTPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 5 ); }
|
|
||||||
emitterT void SSE_CMPNLTPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 5 ); }
|
|
||||||
emitterT void SSE_CMPNLEPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 6 ); }
|
|
||||||
emitterT void SSE_CMPNLEPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 6 ); }
|
|
||||||
emitterT void SSE_CMPORDPS_M128_to_XMM( x86SSERegType to, uptr from ) { CMPPSMtoR( 7 ); }
|
|
||||||
emitterT void SSE_CMPORDPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPPSRtoR( 7 ); }
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////
|
#define DEFINE_LEGACY_CMP_OPCODE( comp ) \
|
||||||
//**********************************************************************************/
|
emitterT void SSE_CMP##comp##PS_M128_to_XMM( x86SSERegType to, uptr from ) { xCMP##comp.PS( xRegisterSSE(to), (void*)from ); } \
|
||||||
//Scalar Single-Precision FP compare (CMPccSS) *
|
emitterT void SSE_CMP##comp##PS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xCMP##comp.PS( xRegisterSSE(to), xRegisterSSE(from) ); } \
|
||||||
//**********************************************************************************
|
emitterT void SSE2_CMP##comp##PD_M128_to_XMM( x86SSERegType to, uptr from ) { xCMP##comp.PD( xRegisterSSE(to), (void*)from ); } \
|
||||||
//missing SSE_CMPSS_I8_to_XMM
|
emitterT void SSE2_CMP##comp##PD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xCMP##comp.PD( xRegisterSSE(to), xRegisterSSE(from) ); } \
|
||||||
// SSE_CMPSS_M32_to_XMM
|
emitterT void SSE_CMP##comp##SS_M128_to_XMM( x86SSERegType to, uptr from ) { xCMP##comp.SS( xRegisterSSE(to), (void*)from ); } \
|
||||||
// SSE_CMPSS_XMM_to_XMM
|
emitterT void SSE_CMP##comp##SS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xCMP##comp.SS( xRegisterSSE(to), xRegisterSSE(from) ); } \
|
||||||
emitterT void SSE_CMPEQSS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 0 ); }
|
emitterT void SSE2_CMP##comp##SD_M128_to_XMM( x86SSERegType to, uptr from ) { xCMP##comp.SD( xRegisterSSE(to), (void*)from ); } \
|
||||||
emitterT void SSE_CMPEQSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 0 ); }
|
emitterT void SSE2_CMP##comp##SD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { xCMP##comp.SD( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
emitterT void SSE_CMPLTSS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 1 ); }
|
|
||||||
emitterT void SSE_CMPLTSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 1 ); }
|
|
||||||
emitterT void SSE_CMPLESS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 2 ); }
|
|
||||||
emitterT void SSE_CMPLESS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 2 ); }
|
|
||||||
emitterT void SSE_CMPUNORDSS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 3 ); }
|
|
||||||
emitterT void SSE_CMPUNORDSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 3 ); }
|
|
||||||
emitterT void SSE_CMPNESS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 4 ); }
|
|
||||||
emitterT void SSE_CMPNESS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 4 ); }
|
|
||||||
emitterT void SSE_CMPNLTSS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 5 ); }
|
|
||||||
emitterT void SSE_CMPNLTSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 5 ); }
|
|
||||||
emitterT void SSE_CMPNLESS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 6 ); }
|
|
||||||
emitterT void SSE_CMPNLESS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 6 ); }
|
|
||||||
emitterT void SSE_CMPORDSS_M32_to_XMM( x86SSERegType to, uptr from ) { CMPSSMtoR( 7 ); }
|
|
||||||
emitterT void SSE_CMPORDSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSSRtoR( 7 ); }
|
|
||||||
|
|
||||||
emitterT void SSE2_CMPEQSD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 0 ); }
|
DEFINE_LEGACY_CMP_OPCODE( EQ )
|
||||||
emitterT void SSE2_CMPEQSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 0 ); }
|
DEFINE_LEGACY_CMP_OPCODE( LT )
|
||||||
emitterT void SSE2_CMPLTSD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 1 ); }
|
DEFINE_LEGACY_CMP_OPCODE( LE )
|
||||||
emitterT void SSE2_CMPLTSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 1 ); }
|
DEFINE_LEGACY_CMP_OPCODE( UNORD )
|
||||||
emitterT void SSE2_CMPLESD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 2 ); }
|
DEFINE_LEGACY_CMP_OPCODE( NE )
|
||||||
emitterT void SSE2_CMPLESD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 2 ); }
|
DEFINE_LEGACY_CMP_OPCODE( NLT )
|
||||||
emitterT void SSE2_CMPUNORDSD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 3 ); }
|
DEFINE_LEGACY_CMP_OPCODE( NLE )
|
||||||
emitterT void SSE2_CMPUNORDSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 3 ); }
|
DEFINE_LEGACY_CMP_OPCODE( ORD )
|
||||||
emitterT void SSE2_CMPNESD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 4 ); }
|
|
||||||
emitterT void SSE2_CMPNESD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 4 ); }
|
|
||||||
emitterT void SSE2_CMPNLTSD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 5 ); }
|
|
||||||
emitterT void SSE2_CMPNLTSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 5 ); }
|
|
||||||
emitterT void SSE2_CMPNLESD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 6 ); }
|
|
||||||
emitterT void SSE2_CMPNLESD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 6 ); }
|
|
||||||
emitterT void SSE2_CMPORDSD_M64_to_XMM( x86SSERegType to, uptr from ) { CMPSDMtoR( 7 ); }
|
|
||||||
emitterT void SSE2_CMPORDSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { CMPSDRtoR( 7 ); }
|
|
||||||
|
|
||||||
emitterT void SSE_UCOMISS_M32_to_XMM( x86SSERegType to, uptr from )
|
emitterT void SSE_UCOMISS_M32_to_XMM( x86SSERegType to, uptr from )
|
||||||
{
|
{
|
||||||
|
@ -642,7 +600,7 @@ emitterT void SSE2_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int off
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
emitterT void SSE2_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from) { iMOVDQA( iRegisterSSE(to), iRegisterSSE(from) ); }
|
emitterT void SSE2_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from) { xMOVDQA( xRegisterSSE(to), xRegisterSSE(from) ); }
|
||||||
|
|
||||||
|
|
||||||
// shift right logical
|
// shift right logical
|
||||||
|
|
|
@ -115,7 +115,7 @@ template< typename T >
|
||||||
static __forceinline bool is_s8( T imm ) { return (s8)imm == (s32)imm; }
|
static __forceinline bool is_s8( T imm ) { return (s8)imm == (s32)imm; }
|
||||||
|
|
||||||
template< typename T >
|
template< typename T >
|
||||||
static __forceinline void iWrite( T val )
|
static __forceinline void xWrite( T val )
|
||||||
{
|
{
|
||||||
*(T*)x86Ptr = val;
|
*(T*)x86Ptr = val;
|
||||||
x86Ptr += sizeof(T);
|
x86Ptr += sizeof(T);
|
||||||
|
@ -159,7 +159,7 @@ namespace x86Emitter
|
||||||
static const int ModRm_UseSib = 4; // same index value as ESP (used in RM field)
|
static const int ModRm_UseSib = 4; // same index value as ESP (used in RM field)
|
||||||
static const int ModRm_UseDisp32 = 5; // same index value as EBP (used in Mod field)
|
static const int ModRm_UseDisp32 = 5; // same index value as EBP (used in Mod field)
|
||||||
|
|
||||||
class iAddressInfo;
|
class xAddressInfo;
|
||||||
class ModSibBase;
|
class ModSibBase;
|
||||||
|
|
||||||
extern void iSetPtr( void* ptr );
|
extern void iSetPtr( void* ptr );
|
||||||
|
@ -170,12 +170,12 @@ namespace x86Emitter
|
||||||
|
|
||||||
static __forceinline void write8( u8 val )
|
static __forceinline void write8( u8 val )
|
||||||
{
|
{
|
||||||
iWrite( val );
|
xWrite( val );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __forceinline void write16( u16 val )
|
static __forceinline void write16( u16 val )
|
||||||
{
|
{
|
||||||
iWrite( val );
|
xWrite( val );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __forceinline void write24( u32 val )
|
static __forceinline void write24( u32 val )
|
||||||
|
@ -186,30 +186,30 @@ namespace x86Emitter
|
||||||
|
|
||||||
static __forceinline void write32( u32 val )
|
static __forceinline void write32( u32 val )
|
||||||
{
|
{
|
||||||
iWrite( val );
|
xWrite( val );
|
||||||
}
|
}
|
||||||
|
|
||||||
static __forceinline void write64( u64 val )
|
static __forceinline void write64( u64 val )
|
||||||
{
|
{
|
||||||
iWrite( val );
|
xWrite( val );
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// iRegister
|
// xRegister
|
||||||
// Unless templating some fancy stuff, use the friendly iRegister32/16/8 typedefs instead.
|
// Unless templating some fancy stuff, use the friendly xRegister32/16/8 typedefs instead.
|
||||||
//
|
//
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
class iRegister
|
class xRegister
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static const uint OperandSize = sizeof( OperandType );
|
static const uint OperandSize = sizeof( OperandType );
|
||||||
static const iRegister Empty; // defined as an empty/unused value (-1)
|
static const xRegister Empty; // defined as an empty/unused value (-1)
|
||||||
|
|
||||||
int Id;
|
int Id;
|
||||||
|
|
||||||
iRegister( const iRegister<OperandType>& src ) : Id( src.Id ) {}
|
xRegister( const xRegister<OperandType>& src ) : Id( src.Id ) {}
|
||||||
iRegister(): Id( -1 ) {}
|
xRegister(): Id( -1 ) {}
|
||||||
explicit iRegister( int regId ) : Id( regId ) { jASSUME( Id >= -1 && Id < 8 ); }
|
explicit xRegister( int regId ) : Id( regId ) { jASSUME( Id >= -1 && Id < 8 ); }
|
||||||
|
|
||||||
bool IsEmpty() const { return Id < 0; }
|
bool IsEmpty() const { return Id < 0; }
|
||||||
|
|
||||||
|
@ -219,17 +219,17 @@ namespace x86Emitter
|
||||||
// returns true if the register is a valid MMX or XMM register.
|
// returns true if the register is a valid MMX or XMM register.
|
||||||
bool IsSIMD() const { return OperandSize == 8 || OperandSize == 16; }
|
bool IsSIMD() const { return OperandSize == 8 || OperandSize == 16; }
|
||||||
|
|
||||||
bool operator==( const iRegister<OperandType>& src ) const
|
bool operator==( const xRegister<OperandType>& src ) const
|
||||||
{
|
{
|
||||||
return (Id == src.Id);
|
return (Id == src.Id);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator!=( const iRegister<OperandType>& src ) const
|
bool operator!=( const xRegister<OperandType>& src ) const
|
||||||
{
|
{
|
||||||
return (Id != src.Id);
|
return (Id != src.Id);
|
||||||
}
|
}
|
||||||
|
|
||||||
iRegister<OperandType>& operator=( const iRegister<OperandType>& src )
|
xRegister<OperandType>& operator=( const xRegister<OperandType>& src )
|
||||||
{
|
{
|
||||||
Id = src.Id;
|
Id = src.Id;
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -239,20 +239,20 @@ namespace x86Emitter
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
class iRegisterSIMD : public iRegister<OperandType>
|
class xRegisterSIMD : public xRegister<OperandType>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static const iRegisterSIMD Empty; // defined as an empty/unused value (-1)
|
static const xRegisterSIMD Empty; // defined as an empty/unused value (-1)
|
||||||
|
|
||||||
public:
|
public:
|
||||||
iRegisterSIMD(): iRegister<OperandType>() {}
|
xRegisterSIMD(): xRegister<OperandType>() {}
|
||||||
iRegisterSIMD( const iRegisterSIMD& src ) : iRegister<OperandType>( src.Id ) {}
|
xRegisterSIMD( const xRegisterSIMD& src ) : xRegister<OperandType>( src.Id ) {}
|
||||||
iRegisterSIMD( const iRegister<OperandType>& src ) : iRegister<OperandType>( src ) {}
|
xRegisterSIMD( const xRegister<OperandType>& src ) : xRegister<OperandType>( src ) {}
|
||||||
explicit iRegisterSIMD( int regId ) : iRegister<OperandType>( regId ) {}
|
explicit xRegisterSIMD( int regId ) : xRegister<OperandType>( regId ) {}
|
||||||
|
|
||||||
iRegisterSIMD<OperandType>& operator=( const iRegisterSIMD<OperandType>& src )
|
xRegisterSIMD<OperandType>& operator=( const xRegisterSIMD<OperandType>& src )
|
||||||
{
|
{
|
||||||
iRegister<OperandType>::Id = src.Id;
|
xRegister<OperandType>::Id = src.Id;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -266,66 +266,66 @@ namespace x86Emitter
|
||||||
// all about the the templated code in haphazard fashion. Yay.. >_<
|
// all about the the templated code in haphazard fashion. Yay.. >_<
|
||||||
//
|
//
|
||||||
|
|
||||||
typedef iRegisterSIMD<u128> iRegisterSSE;
|
typedef xRegisterSIMD<u128> xRegisterSSE;
|
||||||
typedef iRegisterSIMD<u64> iRegisterMMX;
|
typedef xRegisterSIMD<u64> xRegisterMMX;
|
||||||
typedef iRegister<u32> iRegister32;
|
typedef xRegister<u32> xRegister32;
|
||||||
typedef iRegister<u16> iRegister16;
|
typedef xRegister<u16> xRegister16;
|
||||||
typedef iRegister<u8> iRegister8;
|
typedef xRegister<u8> xRegister8;
|
||||||
|
|
||||||
class iRegisterCL : public iRegister8
|
class xRegisterCL : public xRegister8
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
iRegisterCL(): iRegister8( 1 ) {}
|
xRegisterCL(): xRegister8( 1 ) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const iRegisterSSE
|
extern const xRegisterSSE
|
||||||
xmm0, xmm1, xmm2, xmm3,
|
xmm0, xmm1, xmm2, xmm3,
|
||||||
xmm4, xmm5, xmm6, xmm7;
|
xmm4, xmm5, xmm6, xmm7;
|
||||||
|
|
||||||
extern const iRegisterMMX
|
extern const xRegisterMMX
|
||||||
mm0, mm1, mm2, mm3,
|
mm0, mm1, mm2, mm3,
|
||||||
mm4, mm5, mm6, mm7;
|
mm4, mm5, mm6, mm7;
|
||||||
|
|
||||||
extern const iRegister32
|
extern const xRegister32
|
||||||
eax, ebx, ecx, edx,
|
eax, ebx, ecx, edx,
|
||||||
esi, edi, ebp, esp;
|
esi, edi, ebp, esp;
|
||||||
|
|
||||||
extern const iRegister16
|
extern const xRegister16
|
||||||
ax, bx, cx, dx,
|
ax, bx, cx, dx,
|
||||||
si, di, bp, sp;
|
si, di, bp, sp;
|
||||||
|
|
||||||
extern const iRegister8
|
extern const xRegister8
|
||||||
al, dl, bl,
|
al, dl, bl,
|
||||||
ah, ch, dh, bh;
|
ah, ch, dh, bh;
|
||||||
|
|
||||||
extern const iRegisterCL cl; // I'm special!
|
extern const xRegisterCL cl; // I'm special!
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Use 32 bit registers as out index register (for ModSib memory address calculations)
|
// Use 32 bit registers as out index register (for ModSib memory address calculations)
|
||||||
// Only iAddressReg provides operators for constructing iAddressInfo types.
|
// Only xAddressReg provides operators for constructing xAddressInfo types.
|
||||||
//
|
//
|
||||||
class iAddressReg : public iRegister32
|
class xAddressReg : public xRegister32
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static const iAddressReg Empty; // defined as an empty/unused value (-1)
|
static const xAddressReg Empty; // defined as an empty/unused value (-1)
|
||||||
|
|
||||||
public:
|
public:
|
||||||
iAddressReg(): iRegister32() {}
|
xAddressReg(): xRegister32() {}
|
||||||
iAddressReg( const iAddressReg& src ) : iRegister32( src.Id ) {}
|
xAddressReg( const xAddressReg& src ) : xRegister32( src.Id ) {}
|
||||||
iAddressReg( const iRegister32& src ) : iRegister32( src ) {}
|
xAddressReg( const xRegister32& src ) : xRegister32( src ) {}
|
||||||
explicit iAddressReg( int regId ) : iRegister32( regId ) {}
|
explicit xAddressReg( int regId ) : xRegister32( regId ) {}
|
||||||
|
|
||||||
// Returns true if the register is the stack pointer: ESP.
|
// Returns true if the register is the stack pointer: ESP.
|
||||||
bool IsStackPointer() const { return Id == 4; }
|
bool IsStackPointer() const { return Id == 4; }
|
||||||
|
|
||||||
iAddressInfo operator+( const iAddressReg& right ) const;
|
xAddressInfo operator+( const xAddressReg& right ) const;
|
||||||
iAddressInfo operator+( const iAddressInfo& right ) const;
|
xAddressInfo operator+( const xAddressInfo& right ) const;
|
||||||
iAddressInfo operator+( s32 right ) const;
|
xAddressInfo operator+( s32 right ) const;
|
||||||
|
|
||||||
iAddressInfo operator*( u32 factor ) const;
|
xAddressInfo operator*( u32 factor ) const;
|
||||||
iAddressInfo operator<<( u32 shift ) const;
|
xAddressInfo operator<<( u32 shift ) const;
|
||||||
|
|
||||||
iAddressReg& operator=( const iRegister32& src )
|
xAddressReg& operator=( const xRegister32& src )
|
||||||
{
|
{
|
||||||
Id = src.Id;
|
Id = src.Id;
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -334,16 +334,16 @@ namespace x86Emitter
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
class iAddressInfo
|
class xAddressInfo
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
iAddressReg Base; // base register (no scale)
|
xAddressReg Base; // base register (no scale)
|
||||||
iAddressReg Index; // index reg gets multiplied by the scale
|
xAddressReg Index; // index reg gets multiplied by the scale
|
||||||
int Factor; // scale applied to the index register, in factor form (not a shift!)
|
int Factor; // scale applied to the index register, in factor form (not a shift!)
|
||||||
s32 Displacement; // address displacement
|
s32 Displacement; // address displacement
|
||||||
|
|
||||||
public:
|
public:
|
||||||
__forceinline iAddressInfo( const iAddressReg& base, const iAddressReg& index, int factor=1, s32 displacement=0 ) :
|
__forceinline xAddressInfo( const xAddressReg& base, const xAddressReg& index, int factor=1, s32 displacement=0 ) :
|
||||||
Base( base ),
|
Base( base ),
|
||||||
Index( index ),
|
Index( index ),
|
||||||
Factor( factor ),
|
Factor( factor ),
|
||||||
|
@ -351,7 +351,7 @@ namespace x86Emitter
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline explicit iAddressInfo( const iAddressReg& index, int displacement=0 ) :
|
__forceinline explicit xAddressInfo( const xAddressReg& index, int displacement=0 ) :
|
||||||
Base(),
|
Base(),
|
||||||
Index( index ),
|
Index( index ),
|
||||||
Factor(0),
|
Factor(0),
|
||||||
|
@ -359,7 +359,7 @@ namespace x86Emitter
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline explicit iAddressInfo( s32 displacement ) :
|
__forceinline explicit xAddressInfo( s32 displacement ) :
|
||||||
Base(),
|
Base(),
|
||||||
Index(),
|
Index(),
|
||||||
Factor(0),
|
Factor(0),
|
||||||
|
@ -367,24 +367,24 @@ namespace x86Emitter
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static iAddressInfo FromIndexReg( const iAddressReg& index, int scale=0, s32 displacement=0 );
|
static xAddressInfo FromIndexReg( const xAddressReg& index, int scale=0, s32 displacement=0 );
|
||||||
|
|
||||||
public:
|
public:
|
||||||
bool IsByteSizeDisp() const { return is_s8( Displacement ); }
|
bool IsByteSizeDisp() const { return is_s8( Displacement ); }
|
||||||
|
|
||||||
__forceinline iAddressInfo& Add( s32 imm )
|
__forceinline xAddressInfo& Add( s32 imm )
|
||||||
{
|
{
|
||||||
Displacement += imm;
|
Displacement += imm;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline iAddressInfo& Add( const iAddressReg& src );
|
__forceinline xAddressInfo& Add( const xAddressReg& src );
|
||||||
__forceinline iAddressInfo& Add( const iAddressInfo& src );
|
__forceinline xAddressInfo& Add( const xAddressInfo& src );
|
||||||
|
|
||||||
__forceinline iAddressInfo operator+( const iAddressReg& right ) const { return iAddressInfo( *this ).Add( right ); }
|
__forceinline xAddressInfo operator+( const xAddressReg& right ) const { return xAddressInfo( *this ).Add( right ); }
|
||||||
__forceinline iAddressInfo operator+( const iAddressInfo& right ) const { return iAddressInfo( *this ).Add( right ); }
|
__forceinline xAddressInfo operator+( const xAddressInfo& right ) const { return xAddressInfo( *this ).Add( right ); }
|
||||||
__forceinline iAddressInfo operator+( s32 imm ) const { return iAddressInfo( *this ).Add( imm ); }
|
__forceinline xAddressInfo operator+( s32 imm ) const { return xAddressInfo( *this ).Add( imm ); }
|
||||||
__forceinline iAddressInfo operator-( s32 imm ) const { return iAddressInfo( *this ).Add( -imm ); }
|
__forceinline xAddressInfo operator-( s32 imm ) const { return xAddressInfo( *this ).Add( -imm ); }
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -392,25 +392,25 @@ namespace x86Emitter
|
||||||
//
|
//
|
||||||
// This class serves two purposes: It houses 'reduced' ModRM/SIB info only, which means
|
// This class serves two purposes: It houses 'reduced' ModRM/SIB info only, which means
|
||||||
// that the Base, Index, Scale, and Displacement values are all in the correct arrange-
|
// that the Base, Index, Scale, and Displacement values are all in the correct arrange-
|
||||||
// ments, and it serves as a type-safe layer between the iRegister's operators (which
|
// ments, and it serves as a type-safe layer between the xRegister's operators (which
|
||||||
// generate iAddressInfo types) and the emitter's ModSib instruction forms. Without this,
|
// generate xAddressInfo types) and the emitter's ModSib instruction forms. Without this,
|
||||||
// the iRegister would pass as a ModSib type implicitly, and that would cause ambiguity
|
// the xRegister would pass as a ModSib type implicitly, and that would cause ambiguity
|
||||||
// on a number of instructions.
|
// on a number of instructions.
|
||||||
//
|
//
|
||||||
// End users should always use iAddressInfo instead.
|
// End users should always use xAddressInfo instead.
|
||||||
//
|
//
|
||||||
class ModSibBase
|
class ModSibBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
iAddressReg Base; // base register (no scale)
|
xAddressReg Base; // base register (no scale)
|
||||||
iAddressReg Index; // index reg gets multiplied by the scale
|
xAddressReg Index; // index reg gets multiplied by the scale
|
||||||
uint Scale; // scale applied to the index register, in scale/shift form
|
uint Scale; // scale applied to the index register, in scale/shift form
|
||||||
s32 Displacement; // offset applied to the Base/Index registers.
|
s32 Displacement; // offset applied to the Base/Index registers.
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit ModSibBase( const iAddressInfo& src );
|
explicit ModSibBase( const xAddressInfo& src );
|
||||||
explicit ModSibBase( s32 disp );
|
explicit ModSibBase( s32 disp );
|
||||||
ModSibBase( iAddressReg base, iAddressReg index, int scale=0, s32 displacement=0 );
|
ModSibBase( xAddressReg base, xAddressReg index, int scale=0, s32 displacement=0 );
|
||||||
|
|
||||||
bool IsByteSizeDisp() const { return is_s8( Displacement ); }
|
bool IsByteSizeDisp() const { return is_s8( Displacement ); }
|
||||||
|
|
||||||
|
@ -437,9 +437,9 @@ namespace x86Emitter
|
||||||
public:
|
public:
|
||||||
static const uint OperandSize = sizeof( OperandType );
|
static const uint OperandSize = sizeof( OperandType );
|
||||||
|
|
||||||
__forceinline explicit ModSibStrict( const iAddressInfo& src ) : ModSibBase( src ) {}
|
__forceinline explicit ModSibStrict( const xAddressInfo& src ) : ModSibBase( src ) {}
|
||||||
__forceinline explicit ModSibStrict( s32 disp ) : ModSibBase( disp ) {}
|
__forceinline explicit ModSibStrict( s32 disp ) : ModSibBase( disp ) {}
|
||||||
__forceinline ModSibStrict( iAddressReg base, iAddressReg index, int scale=0, s32 displacement=0 ) :
|
__forceinline ModSibStrict( xAddressReg base, xAddressReg index, int scale=0, s32 displacement=0 ) :
|
||||||
ModSibBase( base, index, scale, displacement ) {}
|
ModSibBase( base, index, scale, displacement ) {}
|
||||||
|
|
||||||
__forceinline ModSibStrict<OperandType>& Add( s32 imm )
|
__forceinline ModSibStrict<OperandType>& Add( s32 imm )
|
||||||
|
@ -453,20 +453,20 @@ namespace x86Emitter
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// iAddressIndexerBase - This is a static class which provisions our ptr[] syntax.
|
// xAddressIndexerBase - This is a static class which provisions our ptr[] syntax.
|
||||||
//
|
//
|
||||||
struct iAddressIndexerBase
|
struct xAddressIndexerBase
|
||||||
{
|
{
|
||||||
// passthrough instruction, allows ModSib to pass silently through ptr translation
|
// passthrough instruction, allows ModSib to pass silently through ptr translation
|
||||||
// without doing anything and without compiler error.
|
// without doing anything and without compiler error.
|
||||||
const ModSibBase& operator[]( const ModSibBase& src ) const { return src; }
|
const ModSibBase& operator[]( const ModSibBase& src ) const { return src; }
|
||||||
|
|
||||||
__forceinline ModSibBase operator[]( iAddressReg src ) const
|
__forceinline ModSibBase operator[]( xAddressReg src ) const
|
||||||
{
|
{
|
||||||
return ModSibBase( src, iAddressReg::Empty );
|
return ModSibBase( src, xAddressReg::Empty );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline ModSibBase operator[]( const iAddressInfo& src ) const
|
__forceinline ModSibBase operator[]( const xAddressInfo& src ) const
|
||||||
{
|
{
|
||||||
return ModSibBase( src );
|
return ModSibBase( src );
|
||||||
}
|
}
|
||||||
|
@ -481,7 +481,7 @@ namespace x86Emitter
|
||||||
return ModSibBase( (uptr)src );
|
return ModSibBase( (uptr)src );
|
||||||
}
|
}
|
||||||
|
|
||||||
iAddressIndexerBase() {} // appease the GCC gods
|
xAddressIndexerBase() {} // appease the GCC gods
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -489,7 +489,7 @@ namespace x86Emitter
|
||||||
// specification of the operand size for ImmToMem operations.
|
// specification of the operand size for ImmToMem operations.
|
||||||
//
|
//
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
struct iAddressIndexer
|
struct xAddressIndexer
|
||||||
{
|
{
|
||||||
static const uint OperandSize = sizeof( OperandType );
|
static const uint OperandSize = sizeof( OperandType );
|
||||||
|
|
||||||
|
@ -497,12 +497,12 @@ namespace x86Emitter
|
||||||
// without doing anything and without compiler error.
|
// without doing anything and without compiler error.
|
||||||
const ModSibStrict<OperandType>& operator[]( const ModSibStrict<OperandType>& src ) const { return src; }
|
const ModSibStrict<OperandType>& operator[]( const ModSibStrict<OperandType>& src ) const { return src; }
|
||||||
|
|
||||||
__forceinline ModSibStrict<OperandType> operator[]( iAddressReg src ) const
|
__forceinline ModSibStrict<OperandType> operator[]( xAddressReg src ) const
|
||||||
{
|
{
|
||||||
return ModSibStrict<OperandType>( src, iAddressReg::Empty );
|
return ModSibStrict<OperandType>( src, xAddressReg::Empty );
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline ModSibStrict<OperandType> operator[]( const iAddressInfo& src ) const
|
__forceinline ModSibStrict<OperandType> operator[]( const xAddressInfo& src ) const
|
||||||
{
|
{
|
||||||
return ModSibStrict<OperandType>( src );
|
return ModSibStrict<OperandType>( src );
|
||||||
}
|
}
|
||||||
|
@ -517,17 +517,17 @@ namespace x86Emitter
|
||||||
return ModSibStrict<OperandType>( (uptr)src );
|
return ModSibStrict<OperandType>( (uptr)src );
|
||||||
}
|
}
|
||||||
|
|
||||||
iAddressIndexer() {} // GCC initialization dummy
|
xAddressIndexer() {} // GCC initialization dummy
|
||||||
};
|
};
|
||||||
|
|
||||||
// ptr[] - use this form for instructions which can resolve the address operand size from
|
// ptr[] - use this form for instructions which can resolve the address operand size from
|
||||||
// the other register operand sizes.
|
// the other register operand sizes.
|
||||||
extern const iAddressIndexerBase ptr;
|
extern const xAddressIndexerBase ptr;
|
||||||
extern const iAddressIndexer<u128> ptr128;
|
extern const xAddressIndexer<u128> ptr128;
|
||||||
extern const iAddressIndexer<u64> ptr64;
|
extern const xAddressIndexer<u64> ptr64;
|
||||||
extern const iAddressIndexer<u32> ptr32; // explicitly typed addressing, usually needed for '[dest],imm' instruction forms
|
extern const xAddressIndexer<u32> ptr32; // explicitly typed addressing, usually needed for '[dest],imm' instruction forms
|
||||||
extern const iAddressIndexer<u16> ptr16; // explicitly typed addressing, usually needed for '[dest],imm' instruction forms
|
extern const xAddressIndexer<u16> ptr16; // explicitly typed addressing, usually needed for '[dest],imm' instruction forms
|
||||||
extern const iAddressIndexer<u8> ptr8; // explicitly typed addressing, usually needed for '[dest],imm' instruction forms
|
extern const xAddressIndexer<u8> ptr8; // explicitly typed addressing, usually needed for '[dest],imm' instruction forms
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// JccComparisonType - enumerated possibilities for inspired code branching!
|
// JccComparisonType - enumerated possibilities for inspired code branching!
|
||||||
|
@ -561,26 +561,41 @@ namespace x86Emitter
|
||||||
// Not supported yet:
|
// Not supported yet:
|
||||||
//E3 cb JECXZ rel8 Jump short if ECX register is 0.
|
//E3 cb JECXZ rel8 Jump short if ECX register is 0.
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// SSE2_ComparisonType - enumerated possibilities for SIMD data comparison!
|
||||||
|
//
|
||||||
|
enum SSE2_ComparisonType
|
||||||
|
{
|
||||||
|
SSE2_Equal = 0,
|
||||||
|
SSE2_Less,
|
||||||
|
SSE2_LessOrEqual,
|
||||||
|
SSE2_Unordered,
|
||||||
|
SSE2_NotEqual,
|
||||||
|
SSE2_NotLess,
|
||||||
|
SSE2_NotLessOrEqual,
|
||||||
|
SSE2_Ordered
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// iSmartJump
|
// xSmartJump
|
||||||
// This class provides an interface for generating forward-based j8's or j32's "smartly"
|
// This class provides an interface for generating forward-based j8's or j32's "smartly"
|
||||||
// as per the measured displacement distance. If the displacement is a valid s8, then
|
// as per the measured displacement distance. If the displacement is a valid s8, then
|
||||||
// a j8 is inserted, else a j32.
|
// a j8 is inserted, else a j32.
|
||||||
//
|
//
|
||||||
// Note: This class is inherently unsafe, and so it's recommended to use iForwardJump8/32
|
// Note: This class is inherently unsafe, and so it's recommended to use xForwardJump8/32
|
||||||
// whenever it is known that the jump destination is (or is not) short. Only use
|
// whenever it is known that the jump destination is (or is not) short. Only use
|
||||||
// iSmartJump in cases where it's unknown what jump encoding will be ideal.
|
// xSmartJump in cases where it's unknown what jump encoding will be ideal.
|
||||||
//
|
//
|
||||||
// Important: Use this tool with caution! iSmartJump cannot be used in cases where jump
|
// Important: Use this tool with caution! xSmartJump cannot be used in cases where jump
|
||||||
// targets overlap, since the writeback of the second target will alter the position of
|
// targets overlap, since the writeback of the second target will alter the position of
|
||||||
// the first target (which breaks the relative addressing). To assist in avoiding such
|
// the first target (which breaks the relative addressing). To assist in avoiding such
|
||||||
// errors, iSmartJump works based on C++ block scope, where the destruction of the
|
// errors, xSmartJump works based on C++ block scope, where the destruction of the
|
||||||
// iSmartJump object (invoked by a '}') signals the target of the jump. Example:
|
// xSmartJump object (invoked by a '}') signals the target of the jump. Example:
|
||||||
//
|
//
|
||||||
// {
|
// {
|
||||||
// iCMP( EAX, ECX );
|
// iCMP( EAX, ECX );
|
||||||
// iSmartJump jumpTo( Jcc_Above );
|
// xSmartJump jumpTo( Jcc_Above );
|
||||||
// [... conditional code ...]
|
// [... conditional code ...]
|
||||||
// } // smartjump targets this spot.
|
// } // smartjump targets this spot.
|
||||||
//
|
//
|
||||||
|
@ -593,7 +608,7 @@ namespace x86Emitter
|
||||||
// speed benefits in the form of L1/L2 cache clutter, on any CPU. They're also notably
|
// speed benefits in the form of L1/L2 cache clutter, on any CPU. They're also notably
|
||||||
// faster on P4's, and mildly faster on AMDs. (Core2's and i7's don't care)
|
// faster on P4's, and mildly faster on AMDs. (Core2's and i7's don't care)
|
||||||
//
|
//
|
||||||
class iSmartJump : public NoncopyableObject
|
class xSmartJump : public NoncopyableObject
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
u8* m_baseptr; // base address of the instruction (passed to the instruction emitter)
|
u8* m_baseptr; // base address of the instruction (passed to the instruction emitter)
|
||||||
|
@ -607,12 +622,12 @@ namespace x86Emitter
|
||||||
}
|
}
|
||||||
|
|
||||||
JccComparisonType GetCondition() const { return m_cc; }
|
JccComparisonType GetCondition() const { return m_cc; }
|
||||||
virtual ~iSmartJump();
|
virtual ~xSmartJump();
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
// ------------------------------------------------------------------------
|
||||||
// ccType - Comparison type to be written back to the jump instruction position.
|
// ccType - Comparison type to be written back to the jump instruction position.
|
||||||
//
|
//
|
||||||
iSmartJump( JccComparisonType ccType )
|
xSmartJump( JccComparisonType ccType )
|
||||||
{
|
{
|
||||||
jASSUME( ccType != Jcc_Unknown );
|
jASSUME( ccType != Jcc_Unknown );
|
||||||
m_baseptr = iGetPtr();
|
m_baseptr = iGetPtr();
|
||||||
|
@ -625,12 +640,12 @@ namespace x86Emitter
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// iForwardJump
|
// xForwardJump
|
||||||
// Primary use of this class is through the various iForwardJA8/iForwardJLE32/etc. helpers
|
// Primary use of this class is through the various xForwardJA8/xForwardJLE32/etc. helpers
|
||||||
// defined later in this header. :)
|
// defined later in this header. :)
|
||||||
//
|
//
|
||||||
template< typename OperandType >
|
template< typename OperandType >
|
||||||
class iForwardJump
|
class xForwardJump
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static const uint OperandSize = sizeof( OperandType );
|
static const uint OperandSize = sizeof( OperandType );
|
||||||
|
@ -641,7 +656,7 @@ namespace x86Emitter
|
||||||
|
|
||||||
// The jump instruction is emitted at the point of object construction. The conditional
|
// The jump instruction is emitted at the point of object construction. The conditional
|
||||||
// type must be valid (Jcc_Unknown generates an assertion).
|
// type must be valid (Jcc_Unknown generates an assertion).
|
||||||
iForwardJump( JccComparisonType cctype = Jcc_Unconditional );
|
xForwardJump( JccComparisonType cctype = Jcc_Unconditional );
|
||||||
|
|
||||||
// Sets the jump target by writing back the current x86Ptr to the jump instruction.
|
// Sets the jump target by writing back the current x86Ptr to the jump instruction.
|
||||||
// This method can be called multiple times, re-writing the jump instruction's target
|
// This method can be called multiple times, re-writing the jump instruction's target
|
||||||
|
@ -656,8 +671,8 @@ namespace x86Emitter
|
||||||
extern void ModRM( uint mod, uint reg, uint rm );
|
extern void ModRM( uint mod, uint reg, uint rm );
|
||||||
extern void ModRM_Direct( uint reg, uint rm );
|
extern void ModRM_Direct( uint reg, uint rm );
|
||||||
extern void SibSB( u32 ss, u32 index, u32 base );
|
extern void SibSB( u32 ss, u32 index, u32 base );
|
||||||
extern void iWriteDisp( int regfield, s32 displacement );
|
extern void xWriteDisp( int regfield, s32 displacement );
|
||||||
extern void iWriteDisp( int regfield, const void* address );
|
extern void xWriteDisp( int regfield, const void* address );
|
||||||
|
|
||||||
extern void EmitSibMagic( uint regfield, const ModSibBase& info );
|
extern void EmitSibMagic( uint regfield, const ModSibBase& info );
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue