Emitter :Implemented Packed logical operations (PAND/POR/ANDxx/ORxx/etc), ADDxx/SUBxx/MULxx/DIVxx, and iRCPPS/iRCPSS.

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@1021 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2009-04-19 20:14:53 +00:00
parent d91eb6d1c8
commit 1bc6795200
6 changed files with 169 additions and 215 deletions

View File

@ -3859,7 +3859,6 @@ void recVUMI_JR( VURegs* vuu, s32 info )
{
int fsreg = _allocX86reg(-1, X86TYPE_VI|(s_vu?X86TYPE_VU1:0), _Fs_, MODE_READ);
LEA32RStoR(EAX, fsreg, 3);
CWDE();
if( (s_pCurBlock->type & BLOCKTYPE_HASEOP) || s_vu == 0 ) MOV32RtoM(SuperVUGetVIAddr(REG_TPC, 0), EAX);
@ -3876,7 +3875,6 @@ void recVUMI_JALR( VURegs* vuu, s32 info )
int fsreg = _allocX86reg(-1, X86TYPE_VI|(s_vu?X86TYPE_VU1:0), _Fs_, MODE_READ);
LEA32RStoR(EAX, fsreg, 3);
CWDE(); // necessary, charlie and chocolate factory gives bad addrs, but graphics are ok
if ( _Ft_ ) {
_deleteX86reg(X86TYPE_VI|(s_vu?X86TYPE_VU1:0), _Ft_, 2);

View File

@ -87,9 +87,9 @@ __emitinline void writeXMMop( u8 opcode, const iRegister<T>& reg, const void* da
}
//////////////////////////////////////////////////////////////////////////////////////////
//
// Moves to/from high/low portions of an xmm register.
// These instructions cannot be used in reg/reg form.
//
template< u8 Prefix, u8 Opcode >
class MovhlImplAll
{
@ -114,3 +114,46 @@ public:
MovapsImplAll() {} //GCC.
};
//////////////////////////////////////////////////////////////////////////////////////////
// PLogicImplAll - Implements logic forms for MMX/SSE instructions, and can be used for
// a few other various instruction too (anything which comes in simdreg,simdreg/ModRM forms).
//
template< u8 Opcode >
class PLogicImplAll
{
public:
template< typename T >
__forceinline void operator()( const iRegisterSIMD<T>& to, const iRegisterSIMD<T>& from ) const
{
writeXMMop( 0x66, Opcode, to, from );
}
template< typename T >
__forceinline void operator()( const iRegisterSIMD<T>& to, const void* from ) const
{
writeXMMop( 0x66, Opcode, to, from );
}
template< typename T >
__noinline void operator()( const iRegisterSIMD<T>&, const ModSibBase& from ) const { writeXMMop( 0x66, Opcode, to, from ); }
PLogicImplAll() {} //GCWho?
};
// ------------------------------------------------------------------------
// For implementing SSE-only logic operations, like ANDPS/ANDPD
template< u8 Prefix, u8 Opcode >
class PLogicImplSSE
{
public:
__forceinline void operator()( const iRegisterSSE& to, const iRegisterSSE& from ) const
{
writeXMMop( Prefix, Opcode, to, from );
}
__forceinline void operator()( const iRegisterSSE& to, const void* from ) const
{
writeXMMop( Prefix, Opcode, to, from );
}
__noinline void operator()( const iRegisterSSE&, const ModSibBase& from ) const { writeXMMop( Prefix, Opcode, to, from ); }
PLogicImplSSE() {} //GCWho?
};

View File

@ -753,6 +753,46 @@ const MovhlImplAll< 0, 0x12 > iMOVLPS;
const MovhlImplAll< 0x66, 0x16 > iMOVHPD;
const MovhlImplAll< 0x66, 0x12 > iMOVLPD;
const PLogicImplAll<0xdb> iPAND;
const PLogicImplAll<0xdf> iPANDN;
const PLogicImplAll<0xeb> iPOR;
const PLogicImplAll<0xef> iPXOR;
const PLogicImplSSE<0x00,0x54> iANDPS;
const PLogicImplSSE<0x66,0x54> iANDPD;
const PLogicImplSSE<0x00,0x55> iANDNPS;
const PLogicImplSSE<0x66,0x55> iANDNPD;
const PLogicImplSSE<0x00,0x56> iORPS;
const PLogicImplSSE<0x66,0x56> iORPD;
const PLogicImplSSE<0x00,0x57> iXORPS;
const PLogicImplSSE<0x66,0x57> iXORPD;
const PLogicImplSSE<0x00,0x5c> iSUBPS;
const PLogicImplSSE<0x66,0x5c> iSUBPD;
const PLogicImplSSE<0xf3,0x5c> iSUBSS;
const PLogicImplSSE<0xf2,0x5c> iSUBSD;
const PLogicImplSSE<0x00,0x58> iADDPS;
const PLogicImplSSE<0x66,0x58> iADDPD;
const PLogicImplSSE<0xf3,0x58> iADDSS;
const PLogicImplSSE<0xf2,0x58> iADDSD;
const PLogicImplSSE<0x00,0x59> iMULPS;
const PLogicImplSSE<0x66,0x59> iMULPD;
const PLogicImplSSE<0xf3,0x59> iMULSS;
const PLogicImplSSE<0xf2,0x59> iMULSD;
const PLogicImplSSE<0x00,0x5e> iDIVPS;
const PLogicImplSSE<0x66,0x5e> iDIVPD;
const PLogicImplSSE<0xf3,0x5e> iDIVSS;
const PLogicImplSSE<0xf2,0x5e> iDIVSD;
// Compute Reciprocal Packed Single-Precision Floating-Point Values
const PLogicImplSSE<0,0x53> iRCPPS;
// Compute Reciprocal of Scalar Single-Precision Floating-Point Value
const PLogicImplSSE<0xf3,0x53> iRCPSS;
// Moves from XMM to XMM, with the *upper 64 bits* of the destination register
// being cleared to zero.

View File

@ -40,42 +40,20 @@ emitterT void MOVD32RmtoMMX( x86MMXRegType to, x86IntRegType from, int offset )
emitterT void MOVD32MMXtoR( x86IntRegType to, x86MMXRegType from ) { iMOVD( iRegister32(to), iRegisterMMX(from) ); }
emitterT void MOVD32MMXtoRm( x86IntRegType to, x86MMXRegType from, int offset ) { iMOVD( ptr[iAddressReg(to)+offset], iRegisterMMX(from) ); }
emitterT void PMOVMSKBMMXtoR(x86IntRegType to, x86MMXRegType from)
{
iPMOVMSKB( iRegister32(to), iRegisterMMX(from) );
}
emitterT void PMOVMSKBMMXtoR(x86IntRegType to, x86MMXRegType from) { iPMOVMSKB( iRegister32(to), iRegisterMMX(from) ); }
#define DEFINE_LEGACY_LOGIC_OPCODE( mod ) \
emitterT void P##mod##RtoR( x86MMXRegType to, x86MMXRegType from ) { iP##mod( iRegisterMMX(to), iRegisterMMX(from) ); } \
emitterT void P##mod##MtoR( x86MMXRegType to, uptr from ) { iP##mod( iRegisterMMX(to), (void*)from ); } \
emitterT void SSE2_P##mod##_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iP##mod( iRegisterSSE(to), iRegisterSSE(from) ); } \
emitterT void SSE2_P##mod##_M128_to_XMM( x86SSERegType to, uptr from ) { iP##mod( iRegisterSSE(to), (void*)from ); }
DEFINE_LEGACY_LOGIC_OPCODE( AND )
DEFINE_LEGACY_LOGIC_OPCODE( ANDN )
DEFINE_LEGACY_LOGIC_OPCODE( OR )
DEFINE_LEGACY_LOGIC_OPCODE( XOR )
/* pand r64 to r64 */
emitterT void PANDRtoR( x86MMXRegType to, x86MMXRegType from )
{
write16( 0xDB0F );
ModRM( 3, to, from );
}
emitterT void PANDNRtoR( x86MMXRegType to, x86MMXRegType from )
{
write16( 0xDF0F );
ModRM( 3, to, from );
}
/* por r64 to r64 */
emitterT void PORRtoR( x86MMXRegType to, x86MMXRegType from )
{
write16( 0xEB0F );
ModRM( 3, to, from );
}
/* pxor r64 to r64 */
emitterT void PXORRtoR( x86MMXRegType to, x86MMXRegType from )
{
write16( 0xEF0F );
ModRM( 3, to, from );
}
/* psllq r64 to r64 */
emitterT void PSLLQRtoR( x86MMXRegType to, x86MMXRegType from )
{
@ -420,38 +398,6 @@ emitterT void PSRADRtoR( x86MMXRegType to, x86MMXRegType from )
ModRM( 3, to, from );
}
/* por m64 to r64 */
emitterT void PORMtoR( x86MMXRegType to, uptr from )
{
write16( 0xEB0F );
ModRM( 0, to, DISP32 );
write32( MEMADDR(from, 4) );
}
/* pxor m64 to r64 */
emitterT void PXORMtoR( x86MMXRegType to, uptr from )
{
write16( 0xEF0F );
ModRM( 0, to, DISP32 );
write32( MEMADDR(from, 4) );
}
/* pand m64 to r64 */
emitterT void PANDMtoR( x86MMXRegType to, uptr from )
{
//u64 rip = (u64)x86Ptr + 7;
write16( 0xDB0F );
ModRM( 0, to, DISP32 );
write32( MEMADDR(from, 4) );
}
emitterT void PANDNMtoR( x86MMXRegType to, uptr from )
{
write16( 0xDF0F );
ModRM( 0, to, DISP32 );
write32( MEMADDR(from, 4) );
}
emitterT void PUNPCKHDQRtoR( x86MMXRegType to, x86MMXRegType from )
{
write16( 0x6A0F );

View File

@ -215,108 +215,34 @@ emitterT void SSE_MOVHLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i
emitterT void SSE2_PMOVMSKB_XMM_to_R32(x86IntRegType to, x86SSERegType from) { iPMOVMSKB( iRegister32(to), iRegisterSSE(from) ); }
#define DEFINE_LEGACY_PSD_OPCODE( mod ) \
emitterT void SSE_##mod##PS_M128_to_XMM( x86SSERegType to, uptr from ) { i##mod##PS( iRegisterSSE(to), (void*)from ); } \
emitterT void SSE_##mod##PS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod##PS( iRegisterSSE(to), iRegisterSSE(from) ); } \
emitterT void SSE2_##mod##PD_M128_to_XMM( x86SSERegType to, uptr from ) { i##mod##PD( iRegisterSSE(to), (void*)from ); } \
emitterT void SSE2_##mod##PD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod##PD( iRegisterSSE(to), iRegisterSSE(from) ); }
#define DEFINE_LEGACY_PSSD_OPCODE( mod ) \
DEFINE_LEGACY_PSD_OPCODE( mod ) \
emitterT void SSE_##mod##SS_M32_to_XMM( x86SSERegType to, uptr from ) { i##mod##SS( iRegisterSSE(to), (void*)from ); } \
emitterT void SSE_##mod##SS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod##SS( iRegisterSSE(to), iRegisterSSE(from) ); } \
emitterT void SSE2_##mod##SD_M32_to_XMM( x86SSERegType to, uptr from ) { i##mod##SD( iRegisterSSE(to), (void*)from ); } \
emitterT void SSE2_##mod##SD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { i##mod##SD( iRegisterSSE(to), iRegisterSSE(from) ); }
///////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//ANDPS: Logical Bit-wise AND for Single FP *
//**********************************************************************************
emitterT void SSE_ANDPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x540f, 0 ); }
emitterT void SSE_ANDPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x540f ); }
DEFINE_LEGACY_PSD_OPCODE( AND )
DEFINE_LEGACY_PSD_OPCODE( ANDN )
DEFINE_LEGACY_PSD_OPCODE( OR )
DEFINE_LEGACY_PSD_OPCODE( XOR )
emitterT void SSE2_ANDPD_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0x540f ); }
emitterT void SSE2_ANDPD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0x540f ); }
DEFINE_LEGACY_PSSD_OPCODE( SUB )
DEFINE_LEGACY_PSSD_OPCODE( ADD )
DEFINE_LEGACY_PSSD_OPCODE( MUL )
DEFINE_LEGACY_PSSD_OPCODE( DIV )
///////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//ANDNPS : Logical Bit-wise AND NOT of Single-precision FP values *
//**********************************************************************************
emitterT void SSE_ANDNPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x550f, 0 ); }
emitterT void SSE_ANDNPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ){ SSERtoR( 0x550f ); }
emitterT void SSE_RCPPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iRCPPS( iRegisterSSE(to), iRegisterSSE(from) ); }
emitterT void SSE_RCPPS_M128_to_XMM( x86SSERegType to, uptr from ) { iRCPPS( iRegisterSSE(to), (void*)from ); }
emitterT void SSE2_ANDNPD_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0x550f ); }
emitterT void SSE2_ANDNPD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ){ SSERtoR66( 0x550f ); }
/////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//RCPPS : Packed Single-Precision FP Reciprocal *
//**********************************************************************************
emitterT void SSE_RCPPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x530f ); }
emitterT void SSE_RCPPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x530f, 0 ); }
emitterT void SSE_RCPSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SS_RtoR(0x530f); }
emitterT void SSE_RCPSS_M32_to_XMM( x86SSERegType to, uptr from ) { SSE_SS_MtoR(0x530f, 0); }
//////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//ORPS : Bit-wise Logical OR of Single-Precision FP Data *
//**********************************************************************************
emitterT void SSE_ORPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x560f, 0 ); }
emitterT void SSE_ORPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x560f ); }
emitterT void SSE2_ORPD_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0x560f ); }
emitterT void SSE2_ORPD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0x560f ); }
/////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//XORPS : Bitwise Logical XOR of Single-Precision FP Values *
//**********************************************************************************
emitterT void SSE_XORPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x570f, 0 ); }
emitterT void SSE_XORPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x570f ); }
emitterT void SSE2_XORPD_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0x570f ); }
emitterT void SSE2_XORPD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0x570f ); }
///////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//ADDPS : ADD Packed Single-Precision FP Values *
//**********************************************************************************
emitterT void SSE_ADDPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x580f, 0 ); }
emitterT void SSE_ADDPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x580f ); }
////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//ADDSS : ADD Scalar Single-Precision FP Values *
//**********************************************************************************
emitterT void SSE_ADDSS_M32_to_XMM( x86SSERegType to, uptr from ) { SSE_SS_MtoR( 0x580f, 0 ); }
emitterT void SSE_ADDSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SS_RtoR( 0x580f ); }
emitterT void SSE2_ADDSD_M64_to_XMM( x86SSERegType to, uptr from ) { SSE_SD_MtoR( 0x580f, 0 ); }
emitterT void SSE2_ADDSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SD_RtoR( 0x580f ); }
/////////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//SUBPS: Packed Single-Precision FP Subtract *
//**********************************************************************************
emitterT void SSE_SUBPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x5c0f, 0 ); }
emitterT void SSE_SUBPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x5c0f ); }
///////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//SUBSS : Scalar Single-Precision FP Subtract *
//**********************************************************************************
emitterT void SSE_SUBSS_M32_to_XMM( x86SSERegType to, uptr from ) { SSE_SS_MtoR( 0x5c0f, 0 ); }
emitterT void SSE_SUBSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SS_RtoR( 0x5c0f ); }
emitterT void SSE2_SUBSD_M64_to_XMM( x86SSERegType to, uptr from ) { SSE_SD_MtoR( 0x5c0f, 0 ); }
emitterT void SSE2_SUBSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SD_RtoR( 0x5c0f ); }
/////////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//MULPS : Packed Single-Precision FP Multiply *
//**********************************************************************************
emitterT void SSE_MULPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x590f, 0 ); }
emitterT void SSE_MULPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x590f ); }
////////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//MULSS : Scalar Single-Precision FP Multiply *
//**********************************************************************************
emitterT void SSE_MULSS_M32_to_XMM( x86SSERegType to, uptr from ) { SSE_SS_MtoR( 0x590f, 0 ); }
emitterT void SSE_MULSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SS_RtoR( 0x590f ); }
emitterT void SSE2_MULSD_M64_to_XMM( x86SSERegType to, uptr from ) { SSE_SD_MtoR( 0x590f, 0 ); }
emitterT void SSE2_MULSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SD_RtoR( 0x590f ); }
emitterT void SSE_RCPSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { iRCPSS( iRegisterSSE(to), iRegisterSSE(from) ); }
emitterT void SSE_RCPSS_M32_to_XMM( x86SSERegType to, uptr from ) { iRCPSS( iRegisterSSE(to), (void*)from ); }
////////////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
@ -610,23 +536,6 @@ emitterT void SSE_UNPCKLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) {
emitterT void SSE_UNPCKHPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR(0x150f, 0); }
emitterT void SSE_UNPCKHPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x150F ); }
////////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//DIVPS : Packed Single-Precision FP Divide *
//**********************************************************************************
emitterT void SSE_DIVPS_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x5e0F, 0 ); }
emitterT void SSE_DIVPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR( 0x5e0F ); }
//////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//DIVSS : Scalar Single-Precision FP Divide *
//**********************************************************************************
emitterT void SSE_DIVSS_M32_to_XMM( x86SSERegType to, uptr from ) { SSE_SS_MtoR( 0x5e0F, 0 ); }
emitterT void SSE_DIVSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SS_RtoR( 0x5e0F ); }
emitterT void SSE2_DIVSD_M64_to_XMM( x86SSERegType to, uptr from ) { SSE_SD_MtoR( 0x5e0F, 0 ); }
emitterT void SSE2_DIVSD_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSE_SD_RtoR( 0x5e0F ); }
/////////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//STMXCSR : Store Streaming SIMD Extension Control/Status *
@ -731,30 +640,9 @@ emitterT void SSE2_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int off
WriteRmOffsetFrom(from, to, offset);
}
////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//POR : SSE Bitwise OR *
//**********************************************************************************
emitterT void SSE2_POR_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0xEB0F ); }
emitterT void SSE2_POR_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0xEB0F ); }
// logical and to &= from
emitterT void SSE2_PAND_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0xDB0F ); }
emitterT void SSE2_PAND_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0xDB0F ); }
// to = (~to) & from
emitterT void SSE2_PANDN_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0xDF0F ); }
emitterT void SSE2_PANDN_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0xDF0F ); }
/////////////////////////////////////////////////////////////////////////////////////
//**********************************************************************************/
//PXOR : SSE Bitwise XOR *
//**********************************************************************************
emitterT void SSE2_PXOR_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { SSERtoR66( 0xEF0F ); }
emitterT void SSE2_PXOR_M128_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR66( 0xEF0F ); }
///////////////////////////////////////////////////////////////////////////////////////
emitterT void SSE2_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from) { if( AlwaysUseMovaps ) SSE_MOVAPS_XMM_to_XMM( to, from ); else if( to != from ) SSERtoR66(0x6F0F); }
emitterT void SSE2_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from) { iMOVDQA( iRegisterSSE(to), iRegisterSSE(from) ); }
// shift right logical

View File

@ -690,25 +690,64 @@ namespace x86Emitter
static const bool AlwaysUseMovaps = false;
#endif
extern const Internal::MovapsImplAll< 0, 0x28, 0x29 > iMOVAPS;
extern const Internal::MovapsImplAll< 0, 0x10, 0x11 > iMOVUPS;
extern const Internal::MovapsImplAll<0, 0x28, 0x29> iMOVAPS;
extern const Internal::MovapsImplAll<0, 0x10, 0x11> iMOVUPS;
extern const Internal::MovapsImplAll< 0x66, 0x28, 0x29 > iMOVAPD;
extern const Internal::MovapsImplAll< 0x66, 0x10, 0x11 > iMOVUPD;
extern const Internal::MovapsImplAll<0x66, 0x28, 0x29> iMOVAPD;
extern const Internal::MovapsImplAll<0x66, 0x10, 0x11> iMOVUPD;
#ifdef ALWAYS_USE_MOVAPS
extern const Internal::MovapsImplAll< 0x66, 0x6f, 0x7f > iMOVDQA;
extern const Internal::MovapsImplAll< 0xf3, 0x6f, 0x7f > iMOVDQU;
extern const Internal::MovapsImplAll<0x66, 0x6f, 0x7f> iMOVDQA;
extern const Internal::MovapsImplAll<0xf3, 0x6f, 0x7f> iMOVDQU;
#else
extern const Internal::MovapsImplAll< 0, 0x28, 0x29 > iMOVDQA;
extern const Internal::MovapsImplAll< 0, 0x10, 0x11 > iMOVDQU;
extern const Internal::MovapsImplAll<0, 0x28, 0x29> iMOVDQA;
extern const Internal::MovapsImplAll<0, 0x10, 0x11> iMOVDQU;
#endif
extern const Internal::MovhlImplAll< 0, 0x16 > iMOVHPS;
extern const Internal::MovhlImplAll< 0, 0x12 > iMOVLPS;
extern const Internal::MovhlImplAll< 0x66, 0x16 > iMOVHPD;
extern const Internal::MovhlImplAll< 0x66, 0x12 > iMOVLPD;
extern const Internal::MovhlImplAll<0, 0x16> iMOVHPS;
extern const Internal::MovhlImplAll<0, 0x12> iMOVLPS;
extern const Internal::MovhlImplAll<0x66, 0x16> iMOVHPD;
extern const Internal::MovhlImplAll<0x66, 0x12> iMOVLPD;
extern const Internal::PLogicImplAll<0xdb> iPAND;
extern const Internal::PLogicImplAll<0xdf> iPANDN;
extern const Internal::PLogicImplAll<0xeb> iPOR;
extern const Internal::PLogicImplAll<0xef> iPXOR;
extern const Internal::PLogicImplSSE<0x00,0x54> iANDPS;
extern const Internal::PLogicImplSSE<0x66,0x54> iANDPD;
extern const Internal::PLogicImplSSE<0x00,0x55> iANDNPS;
extern const Internal::PLogicImplSSE<0x66,0x55> iANDNPD;
extern const Internal::PLogicImplSSE<0x00,0x56> iORPS;
extern const Internal::PLogicImplSSE<0x66,0x56> iORPD;
extern const Internal::PLogicImplSSE<0x00,0x57> iXORPS;
extern const Internal::PLogicImplSSE<0x66,0x57> iXORPD;
extern const Internal::PLogicImplSSE<0x00,0x5c> iSUBPS;
extern const Internal::PLogicImplSSE<0x66,0x5c> iSUBPD;
extern const Internal::PLogicImplSSE<0xf3,0x5c> iSUBSS;
extern const Internal::PLogicImplSSE<0xf2,0x5c> iSUBSD;
extern const Internal::PLogicImplSSE<0x00,0x58> iADDPS;
extern const Internal::PLogicImplSSE<0x66,0x58> iADDPD;
extern const Internal::PLogicImplSSE<0xf3,0x58> iADDSS;
extern const Internal::PLogicImplSSE<0xf2,0x58> iADDSD;
extern const Internal::PLogicImplSSE<0x00,0x59> iMULPS;
extern const Internal::PLogicImplSSE<0x66,0x59> iMULPD;
extern const Internal::PLogicImplSSE<0xf3,0x59> iMULSS;
extern const Internal::PLogicImplSSE<0xf2,0x59> iMULSD;
extern const Internal::PLogicImplSSE<0x00,0x5e> iDIVPS;
extern const Internal::PLogicImplSSE<0x66,0x5e> iDIVPD;
extern const Internal::PLogicImplSSE<0xf3,0x5e> iDIVSS;
extern const Internal::PLogicImplSSE<0xf2,0x5e> iDIVSD;
extern const Internal::PLogicImplSSE<0,0x53> iRCPPS;
extern const Internal::PLogicImplSSE<0xf3,0x53> iRCPSS;
}
#include "ix86_inlines.inl"