Reverted the emitter back to a c/cpp form from inl files (probably wasn't necessary, but I don't like having code in header/inl files when I can help it). Also:

* Fixed a couple potential bugs in some Rm forms of MMX instructions.
* Improved compilation times by isolating BaseBlockEx.h to the files the needed it (it uses STL junks).
* Removed some dead code form emitters and BaseBlockEx.

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@921 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2009-04-07 21:54:50 +00:00
parent 1d9adee468
commit 5f354c3cee
27 changed files with 1995 additions and 831 deletions

View File

@ -20,11 +20,11 @@
#define __PCSX2CONFIG_H__
// Hack so that you can still use this file from C (not C++), or from a plugin without access to Paths.h.
#ifdef PLUGIN_ONLY
// .. and removed in favor of a less hackish approach (air)
#ifndef g_MaxPath
#define g_MaxPath 255
#else
#include "Paths.h"
#endif
#endif
/////////////////////////////////////////////////////////////////////////
// Session Configuration Override Flags

View File

@ -29,8 +29,10 @@
#define PCSX2_VERSION "(beta)"
#include "Plugins.h"
#include "System.h"
#include "SaveState.h"
#include "Plugins.h"
#include "DebugTools/Debug.h"
#include "Memory.h"
@ -40,7 +42,4 @@
#include "Elfheader.h"
#include "Patch.h"
#include "System.h"
#include "Pcsx2Config.h"
#endif /* __COMMON_H__ */

View File

@ -16,11 +16,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#ifndef _PCSX2_EXCEPTIONS_H_
#define _PCSX2_EXCEPTIONS_H_
#include <stdexcept>
#include "StringUtils.h"
#pragma once
// This class provides an easy and clean method for ensuring objects are not copyable.
class NoncopyableObject
@ -380,5 +376,3 @@ namespace Exception
{}
};
}
#endif

View File

@ -33,6 +33,7 @@
//////////////////////////////////////////////////////////////////////////////////////////
// Include the STL junk that's actually handy.
#include <stdexcept>
#include <algorithm>
#include <vector>
#include <string>
@ -69,7 +70,9 @@ typedef int BOOL;
#include "zlib/zlib.h"
#include "PS2Etypes.h"
#include "MemcpyFast.h"
#include "StringUtils.h"
#include "Exceptions.h"
////////////////////////////////////////////////////////////////////
// Compiler/OS specific macros and defines -- Begin Section

View File

@ -18,8 +18,6 @@
#ifndef _R5900_OPCODETABLES_H
#define _R5900_OPCODETABLES_H
#include <string>
#include "PS2Etypes.h"
// TODO : Move these into the OpcodeTables namespace

View File

@ -20,9 +20,9 @@
#define __SYSTEM_H__
#include "PS2Etypes.h"
#include "Paths.h"
#include "Pcsx2Config.h"
#include "Exceptions.h"
#include "Paths.h"
#include "MemcpyFast.h"
#include "SafeArray.h"
#include "Misc.h"

View File

@ -947,7 +947,6 @@
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="1"
PrecompiledHeaderFile="$(IntDir)\$(TargetName).pch"
/>
</FileConfiguration>
<FileConfiguration
@ -2912,149 +2911,36 @@
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86.inl"
RelativePath="..\..\x86\ix86\ix86_3dnow.cpp"
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_3dnow.inl"
>
<FileConfiguration
Name="Devel vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Debug vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Release vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_cpudetect.cpp"
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_fpu.inl"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCustomBuildTool"
/>
</FileConfiguration>
<FileConfiguration
Name="Devel vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Debug vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Release vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_group1.inl"
RelativePath="..\..\x86\ix86\ix86_fpu.cpp"
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_mmx.inl"
RelativePath="..\..\x86\ix86\ix86_group1.cpp"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCustomBuildTool"
/>
</FileConfiguration>
<FileConfiguration
Name="Devel vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Debug vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Release vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_sse.inl"
RelativePath="..\..\x86\ix86\ix86_internal.h"
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_legacy.cpp"
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_mmx.cpp"
>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_sse.cpp"
>
<FileConfiguration
Name="Debug|Win32"
>
<Tool
Name="VCCustomBuildTool"
/>
</FileConfiguration>
<FileConfiguration
Name="Devel vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Debug vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
<FileConfiguration
Name="Release vm|Win32"
>
<Tool
Name="VCCLCompilerTool"
UsePrecompiledHeader="0"
/>
</FileConfiguration>
</File>
<File
RelativePath="..\..\x86\ix86\ix86_sse_helpers.h"

View File

@ -18,14 +18,9 @@
#pragma once
#include "PrecompiledHeader.h"
#include <vector>
#include <map>
#include <map> // used by BaseBlockEx
#include <utility>
// used to keep block information
#define BLOCKTYPE_DELAYSLOT 1 // if bit set, delay slot
// Every potential jump point in the PS2's addressable memory has a BASEBLOCK
// associated with it. So that means a BASEBLOCK for every 4 bytes of PS2
// addressable memory. Yay!
@ -119,7 +114,6 @@ public:
}
};
#define GET_BLOCKTYPE(b) ((b)->Type)
#define PC_GETBLOCK_(x, reclut) ((BASEBLOCK*)(reclut[((u32)(x)) >> 16] + (x)*(sizeof(BASEBLOCK)/4)))
static void recLUT_SetPage(uptr reclut[0x10000], uptr hwlut[0x10000],

View File

@ -18,7 +18,7 @@
#include "PrecompiledHeader.h"
#include "Misc.h"
#include "System.h"
#include "iR5900.h"
#include "Vif.h"
#include "VU.h"

View File

@ -24,6 +24,8 @@
#include "PrecompiledHeader.h"
#include "iR3000A.h"
#include "BaseblockEx.h"
#include <time.h>
#ifndef _WIN32

View File

@ -18,12 +18,10 @@
#ifndef _R3000A_SUPERREC_
#define _R3000A_SUPERREC_
#define _EmitterId_ EmitterId_R3000a
#include "ix86/ix86.h"
#include "R3000A.h"
#include "iCore.h"
#include "BaseblockEx.h"
// Cycle penalties for particularly slow instructions.
static const int psxInstCycles_Mult = 7;

View File

@ -19,13 +19,11 @@
#ifndef __IR5900_H__
#define __IR5900_H__
#define _EmitterId_ EmitterId_R5900
#include "ix86/ix86.h"
#include "ix86/ix86_sse_helpers.h"
#include "R5900.h"
#include "VU.h"
#include "iCore.h"
#include "BaseblockEx.h" // needed for recClear and stuff
// Yay! These work now! (air) ... almost (air)
#define ARITHMETICIMM_RECOMPILE

View File

@ -17,7 +17,7 @@
*/
#include "PrecompiledHeader.h"
#include "Misc.h"
#include "System.h"
#include "iR5900.h"
#include "Vif.h"
#include "VU.h"

View File

@ -30,6 +30,9 @@
#include "iR5900Jump.h"
#include "iR5900LoadStore.h"
#include "iR5900Move.h"
#include "BaseblockEx.h"
#include "iMMI.h"
#include "iFPU.h"
#include "iCOP0.h"

View File

@ -27,7 +27,7 @@
#include "PrecompiledHeader.h"
#include "System.h"
#include "ix86.h"
#include "ix86_internal.h"
__threadlocal u8 *x86Ptr;
__threadlocal u8 *j8Ptr[32];
@ -39,198 +39,266 @@ PCSX2_ALIGNED16(float f[4]);
XMMSSEType g_xmmtypes[XMMREGS] = { XMMT_INT };
namespace x86Emitter
namespace x86Emitter {
x86IndexerType ptr;
//////////////////////////////////////////////////////////////////////////////////////////
//
const x86Register x86Register::Empty( -1 );
const x86Register eax( 0 );
const x86Register ebx( 3 );
const x86Register ecx( 1 );
const x86Register edx( 2 );
const x86Register esi( 6 );
const x86Register edi( 7 );
const x86Register ebp( 5 );
const x86Register esp( 4 );
const x86Register16 ax( 0 );
const x86Register16 bx( 3 );
const x86Register16 cx( 1 );
const x86Register16 dx( 2 );
const x86Register16 si( 6 );
const x86Register16 di( 7 );
const x86Register16 bp( 5 );
const x86Register16 sp( 4 );
const x86Register8 al( 0 );
const x86Register8 cl( 1 );
const x86Register8 dl( 2 );
const x86Register8 bl( 3 );
const x86Register8 ah( 4 );
const x86Register8 ch( 5 );
const x86Register8 dh( 6 );
const x86Register8 bh( 7 );
//////////////////////////////////////////////////////////////////////////////////////////
// x86Register Method Implementations
//
x86ModRm x86Register::operator+( const x86Register& right ) const
{
x86IndexerType ptr;
return x86ModRm( *this, right );
}
//////////////////////////////////////////////////////////////////////////////////////////
//
const x86Register x86Register::Empty( -1 );
x86ModRm x86Register::operator+( const x86ModRm& right ) const
{
return right + *this;
}
const x86Register eax( 0 );
const x86Register ebx( 3 );
const x86Register ecx( 1 );
const x86Register edx( 2 );
const x86Register esi( 6 );
const x86Register edi( 7 );
const x86Register ebp( 5 );
const x86Register esp( 4 );
const x86Register16 ax( 0 );
const x86Register16 bx( 3 );
const x86Register16 cx( 1 );
const x86Register16 dx( 2 );
const x86Register16 si( 6 );
const x86Register16 di( 7 );
const x86Register16 bp( 5 );
const x86Register16 sp( 4 );
const x86Register8 al( 0 );
const x86Register8 cl( 1 );
const x86Register8 dl( 2 );
const x86Register8 bl( 3 );
const x86Register8 ah( 4 );
const x86Register8 ch( 5 );
const x86Register8 dh( 6 );
const x86Register8 bh( 7 );
//////////////////////////////////////////////////////////////////////////////////////////
// x86Register Method Implementations
//
x86ModRm x86Register::operator+( const x86Register& right ) const
//////////////////////////////////////////////////////////////////////////////////////////
// x86ModRm Method Implementations
//
x86ModRm& x86ModRm::Add( const x86Register& src )
{
if( src == Index )
{
return x86ModRm( *this, right );
Factor++;
}
else if( src == Base )
{
// Compound the existing register reference into the Index/Scale pair.
Base = x86Register::Empty;
x86ModRm x86Register::operator+( const x86ModRm& right ) const
{
return right + *this;
}
//////////////////////////////////////////////////////////////////////////////////////////
// ModSib Method Implementations
//
x86ModRm x86ModRm::FromIndexReg( x86Register index, int scale, int displacement )
{
return x86ModRm( x86Register::Empty, index, scale, displacement );
}
x86Register x86ModRm::GetEitherReg() const
{
return Base.IsEmpty() ? Base : Index;
}
x86ModRm& x86ModRm::Add( const x86Register& src )
{
if( src == Index )
{
Factor++;
}
else if( src == Base )
else
{
// Compound the existing register reference into the Index/Scale pair.
Base = x86Register::Empty;
if( src == Index )
Factor++;
else
{
jASSUME( Index.IsEmpty() ); // or die if we already have an index!
Index = src;
Factor = 2;
}
}
else if( Base.IsEmpty() )
Base = src;
else if( Index.IsEmpty() )
jASSUME( Index.IsEmpty() ); // or die if we already have an index!
Index = src;
else
assert( false ); // oops, only 2 regs allowed per ModRm!
return *this;
}
x86ModRm& x86ModRm::Add( const x86ModRm& src )
{
Add( src.Base );
Add( src.Displacement );
// If the factor is 1, we can just treat index like a base register also.
if( src.Factor == 1 )
{
Add( src.Index );
}
else if( Index.IsEmpty() )
{
Index = src.Index;
Factor = 1;
}
else if( Index == src.Index )
Factor++;
else
assert( false ); // oops, only 2 regs allowed!
return *this;
}
x86ModRm x86ptr( x86Register base ) { return x86ModRm( base ); }
// ------------------------------------------------------------------------
// Generates a 'reduced' ModSib form, which has valid Base, Index, and Scale values.
// Necessary because by default ModSib compounds registers into Index when possible.
//
void ModSib::Reduce()
{
// If no index reg, then nothing for us to do...
if( Index.IsEmpty() || Scale == 0 ) return;
// The Scale has a series of valid forms, all shown here:
switch( Scale )
{
case 1: Scale = 0; break;
case 2: Scale = 1; break;
case 3: // becomes [reg*2+reg]
jASSUME( Base.IsEmpty() );
Base = Index;
Scale = 1;
break;
case 4: Scale = 2; break;
case 5: // becomes [reg*4+reg]
jASSUME( Base.IsEmpty() );
Base = Index;
Scale = 2;
break;
case 6: // invalid!
assert( false );
break;
case 7: // so invalid!
assert( false );
break;
case 8: Scale = 3; break;
case 9: // becomes [reg*8+reg]
jASSUME( Base.IsEmpty() );
Base = Index;
Scale = 3;
break;
Factor = 2;
}
}
else if( Base.IsEmpty() )
Base = src;
else if( Index.IsEmpty() )
Index = src;
else
assert( false ); // oops, only 2 regs allowed per ModRm!
ModSib::ModSib( const x86ModRm& src ) :
Base( src.Base ),
Index( src.Index ),
Scale( src.Factor ),
Displacement( src.Displacement )
return *this;
}
x86ModRm& x86ModRm::Add( const x86ModRm& src )
{
Add( src.Base );
Add( src.Displacement );
// If the factor is 1, we can just treat index like a base register also.
if( src.Factor == 1 )
{
Reduce();
Add( src.Index );
}
else if( Index.IsEmpty() )
{
Index = src.Index;
Factor = 1;
}
else if( Index == src.Index )
Factor++;
else
assert( false ); // oops, only 2 regs allowed!
return *this;
}
//////////////////////////////////////////////////////////////////////////////////////////
// ModSib Method Implementations
//
// ------------------------------------------------------------------------
// Generates a 'reduced' ModSib form, which has valid Base, Index, and Scale values.
// Necessary because by default ModSib compounds registers into Index when possible.
//
void ModSib::Reduce()
{
// If no index reg, then nothing for us to do...
if( Index.IsEmpty() || Scale == 0 ) return;
ModSib::ModSib( x86Register base, x86Register index, int scale, s32 displacement ) :
Base( base ),
Index( index ),
Scale( scale ),
Displacement( displacement )
// The Scale has a series of valid forms, all shown here:
switch( Scale )
{
Reduce();
}
case 1: Scale = 0; break;
case 2: Scale = 1; break;
ModSib::ModSib( s32 displacement ) :
Base(),
Index(),
Scale(0),
Displacement( displacement )
{
}
case 3: // becomes [reg*2+reg]
jASSUME( Base.IsEmpty() );
Base = Index;
Scale = 1;
break;
case 4: Scale = 2; break;
x86Register ModSib::GetEitherReg() const
{
return Base.IsEmpty() ? Base : Index;
case 5: // becomes [reg*4+reg]
jASSUME( Base.IsEmpty() );
Base = Index;
Scale = 2;
break;
case 6: // invalid!
assert( false );
break;
case 7: // so invalid!
assert( false );
break;
case 8: Scale = 3; break;
case 9: // becomes [reg*8+reg]
jASSUME( Base.IsEmpty() );
Base = Index;
Scale = 3;
break;
}
}
ModSib::ModSib( const x86ModRm& src ) :
Base( src.Base ),
Index( src.Index ),
Scale( src.Factor ),
Displacement( src.Displacement )
{
Reduce();
}
ModSib::ModSib( x86Register base, x86Register index, int scale, s32 displacement ) :
Base( base ),
Index( index ),
Scale( scale ),
Displacement( displacement )
{
Reduce();
}
ModSib::ModSib( s32 displacement ) :
Base(),
Index(),
Scale(0),
Displacement( displacement )
{
}
x86Register ModSib::GetEitherReg() const
{
return Base.IsEmpty() ? Base : Index;
}
// ------------------------------------------------------------------------
// returns TRUE if this instruction requires SIB to be encoded, or FALSE if the
// instruction ca be encoded as ModRm alone.
emitterT bool NeedsSibMagic( const ModSib& info )
{
// no registers? no sibs!
if( info.Base.IsEmpty() && info.Index.IsEmpty() ) return false;
// A scaled register needs a SIB
if( info.Scale != 0 && !info.Index.IsEmpty() ) return true;
// two registers needs a SIB
if( !info.Base.IsEmpty() && !info.Index.IsEmpty() ) return true;
// If register is ESP, then we need a SIB:
if( info.Base == esp || info.Index == esp ) return true;
return false;
}
// ------------------------------------------------------------------------
// Conditionally generates Sib encoding information!
//
// regfield - register field to be written to the ModRm. This is either a register specifier
// or an opcode extension. In either case, the instruction determines the value for us.
//
emitterT void EmitSibMagic( int regfield, const ModSib& info )
{
int displacement_size = (info.Displacement == 0) ? 0 :
( ( info.IsByteSizeDisp() ) ? 1 : 2 );
if( !NeedsSibMagic( info ) )
{
// Use ModRm-only encoding, with the rm field holding an index/base register, if
// one has been specified. If neither register is specified then use Disp32 form,
// which is encoded as "EBP w/o displacement" (which is why EBP must always be
// encoded *with* a displacement of 0, if it would otherwise not have one).
x86Register basereg = info.GetEitherReg();
if( basereg.IsEmpty() )
ModRM( 0, regfield, ModRm_UseDisp32 );
else
{
if( basereg == ebp && displacement_size == 0 )
displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]!
ModRM( displacement_size, regfield, basereg.Id );
}
}
else
{
ModRM( displacement_size, regfield, ModRm_UseSib );
SibSB( info.Index.Id, info.Scale, info.Base.Id );
}
switch( displacement_size )
{
case 0: break;
case 1: write8( info.Displacement ); break;
case 2: write32( info.Displacement ); break;
jNO_DEFAULT
}
}
// ------------------------------------------------------------------------
// Conditionally generates Sib encoding information!
//
// regfield - register field to be written to the ModRm. This is either a register specifier
// or an opcode extension. In either case, the instruction determines the value for us.
//
emitterT void EmitSibMagic( x86Register regfield, const ModSib& info )
{
EmitSibMagic( regfield.Id, info );
}
}

File diff suppressed because it is too large Load Diff

View File

@ -16,7 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#pragma once
#include "PrecompiledHeader.h"
#include "ix86_internal.h"
//------------------------------------------------------------------
// 3DNOW instructions

View File

@ -18,10 +18,8 @@
#include "PrecompiledHeader.h"
#define _EmitterId_ 0
#include "ix86.h"
#include "Misc.h"
#include "ix86_internal.h"
#include "System.h"
#include "Threading.h"
#include "RedtapeWindows.h"

View File

@ -16,8 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#pragma once
//#include "PrecompiledHeader.h"
#include "PrecompiledHeader.h"
#include "ix86_internal.h"
//------------------------------------------------------------------
// FPU instructions

View File

@ -16,7 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#pragma once
#include "PrecompiledHeader.h"
#include "ix86_internal.h"
//------------------------------------------------------------------
// x86 Group 1 Instructions
@ -32,85 +33,6 @@
namespace x86Emitter {
static const int ModRm_UseSib = 4; // same index value as ESP (used in RM field)
static const int ModRm_UseDisp32 = 5; // same index value as EBP (used in Mod field)
// ------------------------------------------------------------------------
// returns TRUE if this instruction requires SIB to be encoded, or FALSE if the
// instruction ca be encoded as ModRm alone.
emitterT bool NeedsSibMagic( const ModSib& info )
{
// no registers? no sibs!
if( info.Base.IsEmpty() && info.Index.IsEmpty() ) return false;
// A scaled register needs a SIB
if( info.Scale != 0 && !info.Index.IsEmpty() ) return true;
// two registers needs a SIB
if( !info.Base.IsEmpty() && !info.Index.IsEmpty() ) return true;
// If register is ESP, then we need a SIB:
if( info.Base == esp || info.Index == esp ) return true;
return false;
}
// ------------------------------------------------------------------------
// Conditionally generates Sib encoding information!
//
// regfield - register field to be written to the ModRm. This is either a register specifier
// or an opcode extension. In either case, the instruction determines the value for us.
//
emitterT void EmitSibMagic( int regfield, const ModSib& info )
{
int displacement_size = (info.Displacement == 0) ? 0 :
( ( info.IsByteSizeDisp() ) ? 1 : 2 );
if( !NeedsSibMagic( info ) )
{
// Use ModRm-only encoding, with the rm field holding an index/base register, if
// one has been specified. If neither register is specified then use Disp32 form,
// which is encoded as "EBP w/o displacement" (which is why EBP must always be
// encoded *with* a displacement of 0, if it would otherwise not have one).
x86Register basereg = info.GetEitherReg();
if( basereg.IsEmpty() )
ModRM( 0, regfield, ModRm_UseDisp32 );
else
{
if( basereg == ebp && displacement_size == 0 )
displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]!
ModRM( displacement_size, regfield, basereg.Id );
}
}
else
{
ModRM( displacement_size, regfield, ModRm_UseSib );
SibSB( info.Index.Id, info.Scale, info.Base.Id );
}
switch( displacement_size )
{
case 0: break;
case 1: write8( info.Displacement ); break;
case 2: write32( info.Displacement ); break;
jNO_DEFAULT
}
}
// ------------------------------------------------------------------------
// Conditionally generates Sib encoding information!
//
// regfield - register field to be written to the ModRm. This is either a register specifier
// or an opcode extension. In either case, the instruction determines the value for us.
//
emitterT void EmitSibMagic( x86Register regfield, const ModSib& info )
{
EmitSibMagic( regfield.Id, info );
}
enum Group1InstructionType
{
G1Type_ADD=0,
@ -245,9 +167,9 @@ static __forceinline x86Emitter::x86ModRm _mrmhlp( x86IntRegType src )
emitterT void cod##32MtoR( x86IntRegType to, uptr from ) { x86Emitter::lwr##32( _reghlp(to), (void*)from ); } \
emitterT void cod##32RtoM( uptr to, x86IntRegType from ) { x86Emitter::lwr##32( (void*)to, _reghlp(from) ); } \
emitterT void cod##32ItoM( uptr to, u32 imm ) { x86Emitter::lwr##32( (void*)to, imm ); } \
emitterT void cod##32ItoRm( x86IntRegType to, u32 imm, int offset=0 ){ x86Emitter::lwr##32( _mrmhlp(to) + offset, imm ); } \
emitterT void cod##32RmtoR( x86IntRegType to, x86IntRegType from, int offset=0 ) { x86Emitter::lwr##32( _reghlp(to), _mrmhlp(from) + offset ); } \
emitterT void cod##32RtoRm( x86IntRegType to, x86IntRegType from, int offset=0 ) { x86Emitter::lwr##32( _mrmhlp(to) + offset, _reghlp(from) ); }
emitterT void cod##32ItoRm( x86IntRegType to, u32 imm, int offset ){ x86Emitter::lwr##32( _mrmhlp(to) + offset, imm ); } \
emitterT void cod##32RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::lwr##32( _reghlp(to), _mrmhlp(from) + offset ); } \
emitterT void cod##32RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::lwr##32( _mrmhlp(to) + offset, _reghlp(from) ); }
DEFINE_GROUP1_OPCODE_LEGACY( add, ADD );
DEFINE_GROUP1_OPCODE_LEGACY( cmp, CMP );

View File

@ -0,0 +1,43 @@
#pragma once
#include "ix86.h"
//------------------------------------------------------------------
// Helper Macros
//------------------------------------------------------------------
#define MEMADDR(addr, oplen) (addr)
#define Rex(w,r,x,b) assert(0)
#define RexR(w, reg) assert( !(w || (reg)>=8) )
#define RexB(w, base) assert( !(w || (base)>=8) )
#define RexRB(w, reg, base) assert( !(w || (reg) >= 8 || (base)>=8) )
#define RexRXB(w, reg, index, base) assert( !(w || (reg) >= 8 || (index) >= 8 || (base) >= 8) )
#define _MM_MK_INSERTPS_NDX(srcField, dstField, zeroMask) (((srcField)<<6) | ((dstField)<<4) | (zeroMask))
static const int ModRm_UseSib = 4; // same index value as ESP (used in RM field)
static const int ModRm_UseDisp32 = 5; // same index value as EBP (used in Mod field)
//------------------------------------------------------------------
// General Emitter Helper functions
//------------------------------------------------------------------
namespace x86Emitter
{
extern void EmitSibMagic( int regfield, const ModSib& info );
extern void EmitSibMagic( x86Register regfield, const ModSib& info );
extern bool NeedsSibMagic( const ModSib& info );
}
// From here out are the legacy (old) emitter functions...
extern void WriteRmOffsetFrom(x86IntRegType to, x86IntRegType from, int offset);
extern void ModRM( int mod, int reg, int rm );
extern void SibSB( int ss, int index, int base );
extern void SET8R( int cc, int to );
extern u8* J8Rel( int cc, int to );
extern u32* J32Rel( int cc, u32 to );
extern u64 GetCPUTick( void );
//------------------------------------------------------------------

View File

@ -27,14 +27,12 @@
#pragma once
//------------------------------------------------------------------
// ix86 instructions
// ix86 legacy emitter functions
//------------------------------------------------------------------
#include "PrecompiledHeader.h"
#include "System.h"
#include "ix86.h"
#include "ix86_group1.inl"
#include "ix86_internal.h"
// Note: the 'to' field can either be a register or a special opcode extension specifier
// depending on the opcode's encoding.
@ -46,7 +44,7 @@ emitterT void WriteRmOffsetFrom(x86IntRegType to, x86IntRegType from, int offset
ModRM( 0, to, 0x4 );
SibSB( 0, 0x4, 0x4 );
}
else if( offset <= 127 && offset >= -128 ) {
else if( is_s8( offset ) ) {
ModRM( 1, to, 0x4 );
SibSB( 0, 0x4, 0x4 );
write8(offset);
@ -61,7 +59,7 @@ emitterT void WriteRmOffsetFrom(x86IntRegType to, x86IntRegType from, int offset
if( offset == 0 ) {
ModRM( 0, to, from );
}
else if( offset <= 127 && offset >= -128 ) {
else if( is_s8( offset ) ) {
ModRM( 1, to, from );
write8(offset);
}
@ -136,8 +134,13 @@ emitterT void x86SetPtr( u8* ptr )
x86Ptr = ptr;
}
////////////////////////////////////////////////////
emitterT void x86SetJ8( u8* j8 )
//////////////////////////////////////////////////////////////////////////////////////////
// Jump Label API (as rough as it might be)
//
// I don't auto-inline these because of the console logging in case of error, which tends
// to cause quite a bit of code bloat.
//
void x86SetJ8( u8* j8 )
{
u32 jump = ( x86Ptr - j8 ) - 1;
@ -148,7 +151,7 @@ emitterT void x86SetJ8( u8* j8 )
*j8 = (u8)jump;
}
emitterT void x86SetJ8A( u8* j8 )
void x86SetJ8A( u8* j8 )
{
u32 jump = ( x86Ptr - j8 ) - 1;
@ -169,26 +172,6 @@ emitterT void x86SetJ8A( u8* j8 )
*j8 = (u8)jump;
}
emitterT void x86SetJ16( u16 *j16 )
{
// doesn't work
u32 jump = ( x86Ptr - (u8*)j16 ) - 2;
if ( jump > 0x7fff ) {
Console::Error( "j16 greater than 0x7fff!!" );
assert(0);
}
*j16 = (u16)jump;
}
emitterT void x86SetJ16A( u16 *j16 )
{
if( ((uptr)x86Ptr&0xf) > 4 ) {
while((uptr)x86Ptr&0xf) *x86Ptr++ = 0x90;
}
x86SetJ16(j16);
}
////////////////////////////////////////////////////
emitterT void x86SetJ32( u32* j32 )
{
@ -211,25 +194,29 @@ emitterT void x86Align( int bytes )
////////////////////////////////////////////////////
// Generates executable code to align to the given alignment (could be useful for the second leg
// of if/else conditionals, which usually fall through a jump target label).
emitterT void x86AlignExecutable( int align )
//
// Note: Left in for now just in case, but usefulness is moot. Only K8's and older (non-Prescott)
// P4s benefit from this, and we don't optimize for those platforms anyway.
//
void x86AlignExecutable( int align )
{
uptr newx86 = ( (uptr)x86Ptr + align - 1) & ~( align - 1 );
uptr bytes = ( newx86 - (uptr)x86Ptr );
switch( bytes )
{
case 0: break;
case 0: break;
case 1: NOP(); break;
case 2: MOV32RtoR( ESI, ESI ); break;
case 3: write8(0x08D); write8(0x024); write8(0x024); break;
case 5: NOP(); // falls through to 4...
case 4: write8(0x08D); write8(0x064); write8(0x024); write8(0); break;
case 6: write8(0x08D); write8(0x0B6); write32(0); break;
case 8: NOP(); // falls through to 7...
case 7: write8(0x08D); write8(0x034); write8(0x035); write32(0); break;
case 1: NOP(); break;
case 2: MOV32RtoR( ESI, ESI ); break;
case 3: write8(0x08D); write8(0x024); write8(0x024); break;
case 5: NOP(); // falls through to 4...
case 4: write8(0x08D); write8(0x064); write8(0x024); write8(0); break;
case 6: write8(0x08D); write8(0x0B6); write32(0); break;
case 8: NOP(); // falls through to 7...
case 7: write8(0x08D); write8(0x034); write8(0x035); write32(0); break;
default:
default:
{
// for larger alignments, just use a JMP...
u8* aligned_target = JMP8(0);
@ -242,7 +229,7 @@ emitterT void x86AlignExecutable( int align )
}
/********************/
/* IX86 intructions */
/* IX86 instructions */
/********************/
emitterT void STC( void )
@ -300,7 +287,7 @@ emitterT void MOV32MtoR( x86IntRegType to, uptr from )
write32( MEMADDR(from, 4) );
}
emitterT void MOV32RmtoR( x86IntRegType to, x86IntRegType from, int offset=0 )
emitterT void MOV32RmtoR( x86IntRegType to, x86IntRegType from, int offset )
{
RexRB(0, to, from);
write8( 0x8B );
@ -308,7 +295,7 @@ emitterT void MOV32RmtoR( x86IntRegType to, x86IntRegType from, int offset=0 )
}
/* mov [r32+r32*scale] to r32 */
emitterT void MOV32RmStoR( x86IntRegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void MOV32RmStoR( x86IntRegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
RexRXB(0,to,from2,from);
write8( 0x8B );
@ -317,7 +304,7 @@ emitterT void MOV32RmStoR( x86IntRegType to, x86IntRegType from, x86IntRegType f
}
// mov r32 to [r32<<scale+from2]
emitterT void MOV32RmSOffsettoR( x86IntRegType to, x86IntRegType from1, int from2, int scale=0 )
emitterT void MOV32RmSOffsettoR( x86IntRegType to, x86IntRegType from1, int from2, int scale )
{
RexRXB(0,to,from1,0);
write8( 0x8B );
@ -327,7 +314,7 @@ emitterT void MOV32RmSOffsettoR( x86IntRegType to, x86IntRegType from1, int from
}
/* mov r32 to [r32][r32*scale] */
emitterT void MOV32RtoRmS( x86IntRegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void MOV32RtoRmS( x86IntRegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
RexRXB(0, to, from2, from);
write8( 0x89 );
@ -353,7 +340,7 @@ emitterT void MOV32ItoM(uptr to, u32 from )
}
// mov imm32 to [r32+off]
emitterT void MOV32ItoRm( x86IntRegType to, u32 from, int offset=0)
emitterT void MOV32ItoRm( x86IntRegType to, u32 from, int offset)
{
RexB(0,to);
write8( 0xC7 );
@ -362,7 +349,7 @@ emitterT void MOV32ItoRm( x86IntRegType to, u32 from, int offset=0)
}
// mov r32 to [r32+off]
emitterT void MOV32RtoRm( x86IntRegType to, x86IntRegType from, int offset=0)
emitterT void MOV32RtoRm( x86IntRegType to, x86IntRegType from, int offset)
{
RexRB(0,from,to);
write8( 0x89 );
@ -389,7 +376,7 @@ emitterT void MOV16MtoR( x86IntRegType to, uptr from )
write32( MEMADDR(from, 4) );
}
emitterT void MOV16RmtoR( x86IntRegType to, x86IntRegType from, int offset=0 )
emitterT void MOV16RmtoR( x86IntRegType to, x86IntRegType from, int offset )
{
write8( 0x66 );
RexRB(0,to,from);
@ -397,7 +384,7 @@ emitterT void MOV16RmtoR( x86IntRegType to, x86IntRegType from, int offset=0 )
WriteRmOffsetFrom(to, from, offset);
}
emitterT void MOV16RmSOffsettoR( x86IntRegType to, x86IntRegType from1, u32 from2, int scale=0 )
emitterT void MOV16RmSOffsettoR( x86IntRegType to, x86IntRegType from1, u32 from2, int scale )
{
write8(0x66);
RexRXB(0,to,from1,0);
@ -418,7 +405,7 @@ emitterT void MOV16ItoM( uptr to, u16 from )
}
/* mov r16 to [r32][r32*scale] */
emitterT void MOV16RtoRmS( x86IntRegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void MOV16RtoRmS( x86IntRegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
write8( 0x66 );
RexRXB(0,to,from2,from);
@ -445,7 +432,7 @@ emitterT void MOV16ItoRm( x86IntRegType to, u16 from, u32 offset=0 )
}
// mov r16 to [r16+off]
emitterT void MOV16RtoRm( x86IntRegType to, x86IntRegType from, int offset=0 )
emitterT void MOV16RtoRm( x86IntRegType to, x86IntRegType from, int offset )
{
write8(0x66);
RexRB(0,from,to);
@ -471,14 +458,14 @@ emitterT void MOV8MtoR( x86IntRegType to, uptr from )
write32( MEMADDR(from, 4) );
}
emitterT void MOV8RmtoR(x86IntRegType to, x86IntRegType from, int offset=0)
emitterT void MOV8RmtoR(x86IntRegType to, x86IntRegType from, int offset)
{
RexRB(0,to,from);
write8( 0x8A );
WriteRmOffsetFrom(to, from, offset);
}
emitterT void MOV8RmSOffsettoR( x86IntRegType to, x86IntRegType from1, u32 from2, int scale=0 )
emitterT void MOV8RmSOffsettoR( x86IntRegType to, x86IntRegType from1, u32 from2, int scale )
{
RexRXB(0,to,from1,0);
write8( 0x8A );
@ -505,7 +492,7 @@ emitterT void MOV8ItoR( x86IntRegType to, u8 from )
}
// mov imm8 to [r8+off]
emitterT void MOV8ItoRm( x86IntRegType to, u8 from, int offset=0)
emitterT void MOV8ItoRm( x86IntRegType to, u8 from, int offset)
{
assert( to != ESP );
RexB(0,to);
@ -515,7 +502,7 @@ emitterT void MOV8ItoRm( x86IntRegType to, u8 from, int offset=0)
}
// mov r8 to [r8+off]
emitterT void MOV8RtoRm( x86IntRegType to, x86IntRegType from, int offset=0)
emitterT void MOV8RtoRm( x86IntRegType to, x86IntRegType from, int offset)
{
assert( to != ESP );
RexRB(0,from,to);
@ -531,14 +518,7 @@ emitterT void MOVSX32R8toR( x86IntRegType to, x86IntRegType from )
ModRM( 3, to, from );
}
emitterT void MOVSX32Rm8toR( x86IntRegType to, x86IntRegType from )
{
RexRB(0,to,from);
write16( 0xBE0F );
ModRM( 0, to, from );
}
emitterT void MOVSX32Rm8toROffset( x86IntRegType to, x86IntRegType from, int offset )
emitterT void MOVSX32Rm8toR( x86IntRegType to, x86IntRegType from, int offset )
{
RexRB(0,to,from);
write16( 0xBE0F );
@ -562,7 +542,7 @@ emitterT void MOVSX32R16toR( x86IntRegType to, x86IntRegType from )
ModRM( 3, to, from );
}
emitterT void MOVSX32Rm16toR( x86IntRegType to, x86IntRegType from, int offset=0 )
emitterT void MOVSX32Rm16toR( x86IntRegType to, x86IntRegType from, int offset )
{
RexRB(0,to,from);
write16( 0xBF0F );
@ -586,7 +566,7 @@ emitterT void MOVZX32R8toR( x86IntRegType to, x86IntRegType from )
ModRM( 3, to, from );
}
emitterT void MOVZX32Rm8toR( x86IntRegType to, x86IntRegType from, int offset=0 )
emitterT void MOVZX32Rm8toR( x86IntRegType to, x86IntRegType from, int offset )
{
RexRB(0,to,from);
write16( 0xB60F );
@ -610,7 +590,7 @@ emitterT void MOVZX32R16toR( x86IntRegType to, x86IntRegType from )
ModRM( 3, to, from );
}
emitterT void MOVZX32Rm16toR( x86IntRegType to, x86IntRegType from, int offset=0 )
emitterT void MOVZX32Rm16toR( x86IntRegType to, x86IntRegType from, int offset )
{
RexRB(0,to,from);
write16( 0xB70F );
@ -837,12 +817,12 @@ emitterT void ADD16ItoR( x86IntRegType to, s16 imm )
write8( 0x66 );
RexB(0,to);
if ( to == EAX)
if (to == EAX)
{
write8( 0x05 );
write16( imm );
}
else if(imm <= 127 && imm >= -128)
else if(is_s8(imm))
{
write8( 0x83 );
ModRM( 3, 0, to );
@ -860,7 +840,7 @@ emitterT void ADD16ItoR( x86IntRegType to, s16 imm )
emitterT void ADD16ItoM( uptr to, s16 imm )
{
write8( 0x66 );
if(imm <= 127 && imm >= -128)
if(is_s8(imm))
{
write8( 0x83 );
ModRM( 0, 0, DISP32 );
@ -1776,31 +1756,6 @@ emitterT u8* JNO8( u8 to )
{
return J8Rel( 0x71, to );
}
/* Untested and slower, use 32bit versions instead
// ja rel16
emitterT u16* eJA16( u16 to )
{
return J16Rel( 0x87, to );
}
// jb rel16
emitterT u16* eJB16( u16 to )
{
return J16Rel( 0x82, to );
}
// je rel16
emitterT u16* eJE16( u16 to )
{
return J16Rel( 0x84, to );
}
// jz rel16
emitterT u16* eJZ16( u16 to )
{
return J16Rel( 0x84, to );
}
*/
// jb rel32
emitterT u32* JB32( u32 to )
{
@ -2271,7 +2226,7 @@ emitterT void LEA32RtoR(x86IntRegType to, x86IntRegType from, s32 offset)
ModRM(1, to, from);
write8(0x24);
}
else if( offset <= 127 && offset >= -128 ) {
else if( is_s8(offset) ) {
ModRM(1, to, from);
write8(0x24);
write8(offset);
@ -2286,7 +2241,7 @@ emitterT void LEA32RtoR(x86IntRegType to, x86IntRegType from, s32 offset)
if( offset == 0 && from != EBP && from!=ESP ) {
ModRM(0, to, from);
}
else if( offset <= 127 && offset >= -128 ) {
else if( is_s8(offset) ) {
ModRM(1, to, from);
write8(offset);
}
@ -2298,7 +2253,7 @@ emitterT void LEA32RtoR(x86IntRegType to, x86IntRegType from, s32 offset)
}
// to = from + offset
emitterT void LEA16RtoR(x86IntRegType to, x86IntRegType from, u16 offset)
emitterT void LEA16RtoR(x86IntRegType to, x86IntRegType from, s16 offset)
{
write8(0x66);
LEA32RtoR(to, from, offset);

View File

@ -16,7 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#pragma once
#include "PrecompiledHeader.h"
#include "ix86_internal.h"
//------------------------------------------------------------------
// MMX instructions
@ -482,32 +483,16 @@ emitterT void MOVQRtoR( x86MMXRegType to, x86MMXRegType from )
ModRM( 3, to, from );
}
emitterT void MOVQRmtoR( x86MMXRegType to, x86IntRegType from, int offset=0 )
emitterT void MOVQRmtoR( x86MMXRegType to, x86IntRegType from, int offset )
{
write16( 0x6F0F );
if( offset < 128 && offset >= -128) {
ModRM( 1, to, from );
write8(offset);
}
else {
ModRM( 2, to, from );
write32(offset);
}
WriteRmOffsetFrom( to, from, offset );
}
emitterT void MOVQRtoRm( x86IntRegType to, x86MMXRegType from, int offset=0 )
emitterT void MOVQRtoRm( x86IntRegType to, x86MMXRegType from, int offset )
{
write16( 0x7F0F );
if( offset < 128 && offset >= -128) {
ModRM( 1, from , to );
write8(offset);
}
else {
ModRM( 2, from, to );
write32(offset);
}
WriteRmOffsetFrom( from, to, offset );
}
/* movd m32 to r64 */
@ -532,24 +517,10 @@ emitterT void MOVD32RtoMMX( x86MMXRegType to, x86IntRegType from )
ModRM( 3, to, from );
}
emitterT void MOVD32RmtoMMX( x86MMXRegType to, x86IntRegType from )
emitterT void MOVD32RmtoMMX( x86MMXRegType to, x86IntRegType from, int offset )
{
write16( 0x6E0F );
ModRM( 0, to, from );
}
emitterT void MOVD32RmOffsettoMMX( x86MMXRegType to, x86IntRegType from, u32 offset )
{
write16( 0x6E0F );
if( offset < 128 ) {
ModRM( 1, to, from );
write8(offset);
}
else {
ModRM( 2, to, from );
write32(offset);
}
WriteRmOffsetFrom( to, from, offset );
}
emitterT void MOVD32MMXtoR( x86IntRegType to, x86MMXRegType from )
@ -558,46 +529,12 @@ emitterT void MOVD32MMXtoR( x86IntRegType to, x86MMXRegType from )
ModRM( 3, from, to );
}
emitterT void MOVD32MMXtoRm( x86IntRegType to, x86MMXRegType from )
emitterT void MOVD32MMXtoRm( x86IntRegType to, x86MMXRegType from, int offset )
{
write16( 0x7E0F );
ModRM( 0, from, to );
if( to >= 4 ) {
// no idea why
assert( to == ESP );
write8(0x24);
}
WriteRmOffsetFrom( from, to, offset );
}
emitterT void MOVD32MMXtoRmOffset( x86IntRegType to, x86MMXRegType from, u32 offset )
{
write16( 0x7E0F );
if( offset < 128 ) {
ModRM( 1, from, to );
write8(offset);
}
else {
ModRM( 2, from, to );
write32(offset);
}
}
///* movd r32 to r64 */
//emitterT void MOVD32MMXtoMMX( x86MMXRegType to, x86MMXRegType from )
//{
// write16( 0x6E0F );
// ModRM( 3, to, from );
//}
//
///* movq r64 to r32 */
//emitterT void MOVD64MMXtoMMX( x86MMXRegType to, x86MMXRegType from )
//{
// write16( 0x7E0F );
// ModRM( 3, from, to );
//}
// untested
emitterT void PACKSSWBMMXtoMMX(x86MMXRegType to, x86MMXRegType from)
{

View File

@ -16,7 +16,9 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#pragma once
#include "PrecompiledHeader.h"
#include "ix86_internal.h"
#include "ix86_sse_helpers.h"
//////////////////////////////////////////////////////////////////////////////////////////
// AlwaysUseMovaps [const]
@ -144,7 +146,7 @@ static const bool AlwaysUseMovaps = true;
write8( op )
/* movups [r32][r32*scale] to xmm1 */
emitterT void SSE_MOVUPSRmStoR( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void SSE_MOVUPSRmStoR( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
RexRXB(0, to, from2, from);
write16( 0x100f );
@ -153,7 +155,7 @@ emitterT void SSE_MOVUPSRmStoR( x86SSERegType to, x86IntRegType from, x86IntRegT
}
/* movups xmm1 to [r32][r32*scale] */
emitterT void SSE_MOVUPSRtoRmS( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void SSE_MOVUPSRtoRmS( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
RexRXB(1, to, from2, from);
write16( 0x110f );
@ -185,7 +187,7 @@ emitterT void SSE_MOVLPSRmtoR( x86SSERegType to, x86IntRegType from )
ModRM( 0, to, from );
}
emitterT void SSE_MOVLPSRmtoR( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVLPSRmtoR( x86SSERegType to, x86IntRegType from, int offset )
{
RexRB(0, to, from);
write16( 0x120f );
@ -200,7 +202,7 @@ emitterT void SSE_MOVLPSRtoRm( x86IntRegType to, x86IntRegType from )
ModRM( 0, from, to );
}
emitterT void SSE_MOVLPSRtoRm( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVLPSRtoRm( x86SSERegType to, x86IntRegType from, int offset )
{
RexRB(0, from, to);
write16( 0x130f );
@ -208,7 +210,7 @@ emitterT void SSE_MOVLPSRtoRm( x86SSERegType to, x86IntRegType from, int offset=
}
/* movaps [r32][r32*scale] to xmm1 */
emitterT void SSE_MOVAPSRmStoR( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void SSE_MOVAPSRmStoR( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
assert( from != EBP );
RexRXB(0, to, from2, from);
@ -218,7 +220,7 @@ emitterT void SSE_MOVAPSRmStoR( x86SSERegType to, x86IntRegType from, x86IntRegT
}
/* movaps xmm1 to [r32][r32*scale] */
emitterT void SSE_MOVAPSRtoRmS( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale=0 )
emitterT void SSE_MOVAPSRtoRmS( x86SSERegType to, x86IntRegType from, x86IntRegType from2, int scale )
{
assert( from != EBP );
RexRXB(0, to, from2, from);
@ -228,7 +230,7 @@ emitterT void SSE_MOVAPSRtoRmS( x86SSERegType to, x86IntRegType from, x86IntRegT
}
// movaps [r32+offset] to r32
emitterT void SSE_MOVAPSRmtoR( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVAPSRmtoR( x86SSERegType to, x86IntRegType from, int offset )
{
RexRB(0, to, from);
write16( 0x280f );
@ -236,7 +238,7 @@ emitterT void SSE_MOVAPSRmtoR( x86SSERegType to, x86IntRegType from, int offset=
}
// movaps r32 to [r32+offset]
emitterT void SSE_MOVAPSRtoRm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE_MOVAPSRtoRm( x86IntRegType to, x86SSERegType from, int offset )
{
RexRB(0, from, to);
write16( 0x290f );
@ -244,7 +246,7 @@ emitterT void SSE_MOVAPSRtoRm( x86IntRegType to, x86SSERegType from, int offset=
}
// movdqa [r32+offset] to r32
emitterT void SSE2_MOVDQARmtoR( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE2_MOVDQARmtoR( x86SSERegType to, x86IntRegType from, int offset )
{
if( AlwaysUseMovaps )
SSE_MOVAPSRmtoR( to, from, offset );
@ -258,7 +260,7 @@ emitterT void SSE2_MOVDQARmtoR( x86SSERegType to, x86IntRegType from, int offset
}
// movdqa r32 to [r32+offset]
emitterT void SSE2_MOVDQARtoRm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE2_MOVDQARtoRm( x86IntRegType to, x86SSERegType from, int offset )
{
if( AlwaysUseMovaps )
SSE_MOVAPSRtoRm( to, from, offset );
@ -272,7 +274,7 @@ emitterT void SSE2_MOVDQARtoRm( x86IntRegType to, x86SSERegType from, int offset
}
// movups [r32+offset] to r32
emitterT void SSE_MOVUPSRmtoR( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVUPSRmtoR( x86SSERegType to, x86IntRegType from, int offset )
{
RexRB(0, to, from);
write16( 0x100f );
@ -280,7 +282,7 @@ emitterT void SSE_MOVUPSRmtoR( x86SSERegType to, x86IntRegType from, int offset=
}
// movups r32 to [r32+offset]
emitterT void SSE_MOVUPSRtoRm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE_MOVUPSRtoRm( x86IntRegType to, x86SSERegType from, int offset )
{
RexRB(0, from, to);
write16( 0x110f );
@ -335,7 +337,7 @@ emitterT void SSE_MOVSS_XMM_to_M32( u32 to, x86SSERegType from ) { SSE_SS_RtoM
emitterT void SSE_MOVSS_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) { if (to != from) { SSE_SS_RtoR( 0x100f ); } }
emitterT void SSE_MOVSS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVSS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset )
{
write8(0xf3);
RexRB(0, to, from);
@ -343,7 +345,7 @@ emitterT void SSE_MOVSS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int off
WriteRmOffsetFrom(to, from, offset);
}
emitterT void SSE_MOVSS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE_MOVSS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset )
{
write8(0xf3);
RexRB(0, from, to);
@ -358,14 +360,14 @@ emitterT void SSE_MASKMOVDQU_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
emitterT void SSE_MOVLPS_M64_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x120f, 0 ); }
emitterT void SSE_MOVLPS_XMM_to_M64( u32 to, x86SSERegType from ) { SSERtoM( 0x130f, 0 ); }
emitterT void SSE_MOVLPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVLPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset )
{
RexRB(0, to, from);
write16( 0x120f );
WriteRmOffsetFrom(to, from, offset);
}
emitterT void SSE_MOVLPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE_MOVLPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset )
{
RexRB(0, from, to);
write16(0x130f);
@ -379,14 +381,14 @@ emitterT void SSE_MOVLPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int of
emitterT void SSE_MOVHPS_M64_to_XMM( x86SSERegType to, uptr from ) { SSEMtoR( 0x160f, 0 ); }
emitterT void SSE_MOVHPS_XMM_to_M64( u32 to, x86SSERegType from ) { SSERtoM( 0x170f, 0 ); }
emitterT void SSE_MOVHPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE_MOVHPS_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset )
{
RexRB(0, to, from);
write16( 0x160f );
WriteRmOffsetFrom(to, from, offset);
}
emitterT void SSE_MOVHPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE_MOVHPS_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset )
{
RexRB(0, from, to);
write16(0x170f);
@ -900,7 +902,7 @@ emitterT void SSE2_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from )
ModRM( 0, to, from);
}
emitterT void SSE2_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset=0 )
emitterT void SSE2_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset )
{
write8(0x66);
RexRB(0, to, from);
@ -911,7 +913,7 @@ emitterT void SSE2_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int off
emitterT void SSE2_MOVD_XMM_to_M32( u32 to, x86SSERegType from ) { SSERtoM66(0x7E0F); }
emitterT void SSE2_MOVD_XMM_to_R( x86IntRegType to, x86SSERegType from ) { _SSERtoR66(0x7E0F); }
emitterT void SSE2_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset=0 )
emitterT void SSE2_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset )
{
write8(0x66);
RexRB(0, from, to);
@ -1400,3 +1402,160 @@ emitterT void SSE4_PMULDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from)
write24(0x28380F);
ModRM(3, to, from);
}
//////////////////////////////////////////////////////////////////////////////////////////
// SSE-X Helpers (generates either INT or FLOAT versions of certain SSE instructions)
// This header should always be included *after* ix86.h.
// Added AlwaysUseMovaps check to the relevant functions here, which helps reduce the
// overhead of dynarec instructions that use these, even thought the same check would
// have been done redundantly by the emitter function.
emitterT void SSEX_MOVDQA_M128_to_XMM( x86SSERegType to, uptr from )
{
if( !AlwaysUseMovaps && g_xmmtypes[to] == XMMT_INT ) SSE2_MOVDQA_M128_to_XMM(to, from);
else SSE_MOVAPS_M128_to_XMM(to, from);
}
emitterT void SSEX_MOVDQA_XMM_to_M128( uptr to, x86SSERegType from )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQA_XMM_to_M128(to, from);
else SSE_MOVAPS_XMM_to_M128(to, from);
}
emitterT void SSEX_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQA_XMM_to_XMM(to, from);
else SSE_MOVAPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_MOVDQARmtoR( x86SSERegType to, x86IntRegType from, int offset )
{
if( !AlwaysUseMovaps && g_xmmtypes[to] == XMMT_INT ) SSE2_MOVDQARmtoR(to, from, offset);
else SSE_MOVAPSRmtoR(to, from, offset);
}
emitterT void SSEX_MOVDQARtoRm( x86IntRegType to, x86SSERegType from, int offset )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQARtoRm(to, from, offset);
else SSE_MOVAPSRtoRm(to, from, offset);
}
emitterT void SSEX_MOVDQU_M128_to_XMM( x86SSERegType to, uptr from )
{
if( !AlwaysUseMovaps && g_xmmtypes[to] == XMMT_INT ) SSE2_MOVDQU_M128_to_XMM(to, from);
else SSE_MOVUPS_M128_to_XMM(to, from);
}
emitterT void SSEX_MOVDQU_XMM_to_M128( uptr to, x86SSERegType from )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQU_XMM_to_M128(to, from);
else SSE_MOVUPS_XMM_to_M128(to, from);
}
emitterT void SSEX_MOVD_M32_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_MOVD_M32_to_XMM(to, from);
else SSE_MOVSS_M32_to_XMM(to, from);
}
emitterT void SSEX_MOVD_XMM_to_M32( u32 to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_MOVD_XMM_to_M32(to, from);
else SSE_MOVSS_XMM_to_M32(to, from);
}
emitterT void SSEX_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_MOVD_Rm_to_XMM(to, from, offset);
else SSE_MOVSS_Rm_to_XMM(to, from, offset);
}
emitterT void SSEX_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_MOVD_XMM_to_Rm(to, from, offset);
else SSE_MOVSS_XMM_to_Rm(to, from, offset);
}
emitterT void SSEX_POR_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_POR_M128_to_XMM(to, from);
else SSE_ORPS_M128_to_XMM(to, from);
}
emitterT void SSEX_POR_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_POR_XMM_to_XMM(to, from);
else SSE_ORPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_PXOR_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PXOR_M128_to_XMM(to, from);
else SSE_XORPS_M128_to_XMM(to, from);
}
emitterT void SSEX_PXOR_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PXOR_XMM_to_XMM(to, from);
else SSE_XORPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_PAND_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PAND_M128_to_XMM(to, from);
else SSE_ANDPS_M128_to_XMM(to, from);
}
emitterT void SSEX_PAND_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PAND_XMM_to_XMM(to, from);
else SSE_ANDPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_PANDN_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PANDN_M128_to_XMM(to, from);
else SSE_ANDNPS_M128_to_XMM(to, from);
}
emitterT void SSEX_PANDN_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PANDN_XMM_to_XMM(to, from);
else SSE_ANDNPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_PUNPCKLDQ_M128_to_XMM(x86SSERegType to, uptr from)
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PUNPCKLDQ_M128_to_XMM(to, from);
else SSE_UNPCKLPS_M128_to_XMM(to, from);
}
emitterT void SSEX_PUNPCKLDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from)
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PUNPCKLDQ_XMM_to_XMM(to, from);
else SSE_UNPCKLPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_PUNPCKHDQ_M128_to_XMM(x86SSERegType to, uptr from)
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PUNPCKHDQ_M128_to_XMM(to, from);
else SSE_UNPCKHPS_M128_to_XMM(to, from);
}
emitterT void SSEX_PUNPCKHDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from)
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PUNPCKHDQ_XMM_to_XMM(to, from);
else SSE_UNPCKHPS_XMM_to_XMM(to, from);
}
emitterT void SSEX_MOVHLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) {
SSE2_PUNPCKHQDQ_XMM_to_XMM(to, from);
if( to != from ) SSE2_PSHUFD_XMM_to_XMM(to, to, 0x4e);
}
else {
SSE_MOVHLPS_XMM_to_XMM(to, from);
}
}

View File

@ -22,158 +22,30 @@
// SSE-X Helpers (generates either INT or FLOAT versions of certain SSE instructions)
// This header should always be included *after* ix86.h.
#ifndef _ix86_included_
#error Dependency fail: Please define _EmitterId_ and include ix86.h first.
#endif
// Added AlwaysUseMovaps check to the relevant functions here, which helps reduce the
// overhead of dynarec instructions that use these.
static __forceinline void SSEX_MOVDQA_M128_to_XMM( x86SSERegType to, uptr from )
{
if( !AlwaysUseMovaps && g_xmmtypes[to] == XMMT_INT ) SSE2_MOVDQA_M128_to_XMM(to, from);
else SSE_MOVAPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_MOVDQA_XMM_to_M128( uptr to, x86SSERegType from )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQA_XMM_to_M128(to, from);
else SSE_MOVAPS_XMM_to_M128(to, from);
}
static __forceinline void SSEX_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQA_XMM_to_XMM(to, from);
else SSE_MOVAPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_MOVDQARmtoR( x86SSERegType to, x86IntRegType from, int offset=0 )
{
if( !AlwaysUseMovaps && g_xmmtypes[to] == XMMT_INT ) SSE2_MOVDQARmtoR(to, from, offset);
else SSE_MOVAPSRmtoR(to, from, offset);
}
static __forceinline void SSEX_MOVDQARtoRm( x86IntRegType to, x86SSERegType from, int offset=0 )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQARtoRm(to, from, offset);
else SSE_MOVAPSRtoRm(to, from, offset);
}
static __forceinline void SSEX_MOVDQU_M128_to_XMM( x86SSERegType to, uptr from )
{
if( !AlwaysUseMovaps && g_xmmtypes[to] == XMMT_INT ) SSE2_MOVDQU_M128_to_XMM(to, from);
else SSE_MOVUPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_MOVDQU_XMM_to_M128( uptr to, x86SSERegType from )
{
if( !AlwaysUseMovaps && g_xmmtypes[from] == XMMT_INT ) SSE2_MOVDQU_XMM_to_M128(to, from);
else SSE_MOVUPS_XMM_to_M128(to, from);
}
static __forceinline void SSEX_MOVD_M32_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_MOVD_M32_to_XMM(to, from);
else SSE_MOVSS_M32_to_XMM(to, from);
}
static __forceinline void SSEX_MOVD_XMM_to_M32( u32 to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_MOVD_XMM_to_M32(to, from);
else SSE_MOVSS_XMM_to_M32(to, from);
}
static __forceinline void SSEX_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset=0 )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_MOVD_Rm_to_XMM(to, from, offset);
else SSE_MOVSS_Rm_to_XMM(to, from, offset);
}
static __forceinline void SSEX_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset=0 )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_MOVD_XMM_to_Rm(to, from, offset);
else SSE_MOVSS_XMM_to_Rm(to, from, offset);
}
static __forceinline void SSEX_POR_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_POR_M128_to_XMM(to, from);
else SSE_ORPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_POR_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_POR_XMM_to_XMM(to, from);
else SSE_ORPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_PXOR_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PXOR_M128_to_XMM(to, from);
else SSE_XORPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_PXOR_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PXOR_XMM_to_XMM(to, from);
else SSE_XORPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_PAND_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PAND_M128_to_XMM(to, from);
else SSE_ANDPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_PAND_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PAND_XMM_to_XMM(to, from);
else SSE_ANDPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_PANDN_M128_to_XMM( x86SSERegType to, uptr from )
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PANDN_M128_to_XMM(to, from);
else SSE_ANDNPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_PANDN_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PANDN_XMM_to_XMM(to, from);
else SSE_ANDNPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_PUNPCKLDQ_M128_to_XMM(x86SSERegType to, uptr from)
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PUNPCKLDQ_M128_to_XMM(to, from);
else SSE_UNPCKLPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_PUNPCKLDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from)
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PUNPCKLDQ_XMM_to_XMM(to, from);
else SSE_UNPCKLPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_PUNPCKHDQ_M128_to_XMM(x86SSERegType to, uptr from)
{
if( g_xmmtypes[to] == XMMT_INT ) SSE2_PUNPCKHDQ_M128_to_XMM(to, from);
else SSE_UNPCKHPS_M128_to_XMM(to, from);
}
static __forceinline void SSEX_PUNPCKHDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from)
{
if( g_xmmtypes[from] == XMMT_INT ) SSE2_PUNPCKHDQ_XMM_to_XMM(to, from);
else SSE_UNPCKHPS_XMM_to_XMM(to, from);
}
static __forceinline void SSEX_MOVHLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from )
{
if( g_xmmtypes[from] == XMMT_INT ) {
SSE2_PUNPCKHQDQ_XMM_to_XMM(to, from);
if( to != from ) SSE2_PSHUFD_XMM_to_XMM(to, to, 0x4e);
}
else {
SSE_MOVHLPS_XMM_to_XMM(to, from);
}
}
extern void SSEX_MOVDQA_M128_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_MOVDQA_XMM_to_M128( uptr to, x86SSERegType from );
extern void SSEX_MOVDQA_XMM_to_XMM( x86SSERegType to, x86SSERegType from );
extern void SSEX_MOVDQARmtoR( x86SSERegType to, x86IntRegType from, int offset=0 );
extern void SSEX_MOVDQARtoRm( x86IntRegType to, x86SSERegType from, int offset=0 );
extern void SSEX_MOVDQU_M128_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_MOVDQU_XMM_to_M128( uptr to, x86SSERegType from );
extern void SSEX_MOVD_M32_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_MOVD_XMM_to_M32( u32 to, x86SSERegType from );
extern void SSEX_MOVD_Rm_to_XMM( x86SSERegType to, x86IntRegType from, int offset=0 );
extern void SSEX_MOVD_XMM_to_Rm( x86IntRegType to, x86SSERegType from, int offset=0 );
extern void SSEX_POR_M128_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_POR_XMM_to_XMM( x86SSERegType to, x86SSERegType from );
extern void SSEX_PXOR_M128_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_PXOR_XMM_to_XMM( x86SSERegType to, x86SSERegType from );
extern void SSEX_PAND_M128_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_PAND_XMM_to_XMM( x86SSERegType to, x86SSERegType from );
extern void SSEX_PANDN_M128_to_XMM( x86SSERegType to, uptr from );
extern void SSEX_PANDN_XMM_to_XMM( x86SSERegType to, x86SSERegType from );
extern void SSEX_PUNPCKLDQ_M128_to_XMM(x86SSERegType to, uptr from );
extern void SSEX_PUNPCKLDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from );
extern void SSEX_PUNPCKHDQ_M128_to_XMM(x86SSERegType to, uptr from );
extern void SSEX_PUNPCKHDQ_XMM_to_XMM(x86SSERegType to, x86SSERegType from );
extern void SSEX_MOVHLPS_XMM_to_XMM( x86SSERegType to, x86SSERegType from );

View File

@ -18,7 +18,7 @@
#include "PrecompiledHeader.h"
#include "Misc.h"
#include "System.h"
#include "ix86/ix86.h"
// used to make sure regs don't get changed while in recompiler

View File

@ -18,7 +18,6 @@
#pragma once
#define mVUdebug // Prints Extra Info to Console
#define _EmitterId_ (vuIndex+1)
#include "Common.h"
#include "VU.h"
#include "GS.h"