Emitter: Implemented INC/DEC/NEG/NOT instructions. Plus: many code cleanups using a better form of template parameter inference.

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@990 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2009-04-16 22:38:55 +00:00
parent dfd433993f
commit 1279fe2c21
11 changed files with 497 additions and 638 deletions

View File

@ -2977,10 +2977,6 @@
RelativePath="..\..\x86\ix86\ix86_types.h"
>
</File>
<File
RelativePath="..\..\x86\ix86\implement\movs.h"
>
</File>
<Filter
Name="Implement"
>
@ -2996,10 +2992,18 @@
RelativePath="..\..\x86\ix86\implement\group2.h"
>
</File>
<File
RelativePath="..\..\x86\ix86\implement\group3.h"
>
</File>
<File
RelativePath="..\..\x86\ix86\implement\incdec.h"
>
</File>
<File
RelativePath="..\..\x86\ix86\implement\movs.h"
>
</File>
</Filter>
</Filter>
<File

View File

@ -27,16 +27,9 @@
// because shifts by 0 do *not* affect flags status.
template< typename ImmType, bool isShiftRight >
class DwordShiftImpl
class DwordShiftImpl : public ImplementationHelper< ImmType >
{
public:
static const uint OperandSize = sizeof(ImmType);
DwordShiftImpl() {} // because GCC doesn't like static classes
protected:
static bool Is8BitOperand() { return OperandSize == 1; }
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
static void basesibform( bool isCL )
{
prefix16();
@ -44,15 +37,17 @@ protected:
write8( (isCL ? 0xa5 : 0xa4) | (isShiftRight ? 0x8 : 0) );
}
public:
static __emitinline void Emit( const iRegister<OperandSize>& to, const iRegister<OperandSize>& from )
public:
DwordShiftImpl() {} // because GCC doesn't like static classes
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
{
prefix16();
write16( 0xa50f | (isShiftRight ? 0x800 : 0) );
ModRM_Direct( from.Id, to.Id );
}
static __emitinline void Emit( const iRegister<OperandSize>& to, const iRegister<OperandSize>& from, u8 imm )
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from, u8 imm )
{
if( imm == 0 ) return;
prefix16();
@ -61,13 +56,13 @@ public:
write8( imm );
}
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<OperandSize>& from, __unused const iRegisterCL& clreg )
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<ImmType>& from, __unused const iRegisterCL& clreg )
{
basesibform();
EmitSibMagic( from.Id, sibdest );
}
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<OperandSize>& from, u8 imm )
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<ImmType>& from, u8 imm )
{
basesibform();
EmitSibMagic( from.Id, sibdest );
@ -75,14 +70,14 @@ public:
}
// dest data type is inferred from the 'from' register, so we can do void* resolution :)
static __emitinline void Emit( void* dest, const iRegister<OperandSize>& from, __unused const iRegisterCL& clreg )
static __emitinline void Emit( void* dest, const iRegister<ImmType>& from, __unused const iRegisterCL& clreg )
{
basesibform();
iWriteDisp( from.Id, dest );
}
// dest data type is inferred from the 'from' register, so we can do void* resolution :)
static __emitinline void Emit( void* dest, const iRegister<OperandSize>& from, u8 imm )
static __emitinline void Emit( void* dest, const iRegister<ImmType>& from, u8 imm )
{
basesibform();
iWriteDisp( from.Id, dest );
@ -92,6 +87,8 @@ public:
// -------------------------------------------------------------------
// I use explicit method declarations here instead of templates, in order to provide
// *only* 32 and 16 bit register operand forms (8 bit registers are not valid in SHLD/SHRD).
//
template< bool isShiftRight >
class DwordShiftImplAll
@ -100,8 +97,6 @@ protected:
typedef DwordShiftImpl<u32, isShiftRight> m_32;
typedef DwordShiftImpl<u16, isShiftRight> m_16;
// (Note: I'm not going to macro this since it would likely clobber intellisense parameter resolution)
public:
// ---------- 32 Bit Interface -----------
__forceinline void operator()( const iRegister32& to, const iRegister32& from, __unused const iRegisterCL& clreg ) const { m_32::Emit( to, from ); }

View File

@ -34,55 +34,48 @@ enum G1Type
};
// -------------------------------------------------------------------
template< typename ImmType, G1Type InstType >
class Group1Impl
template< G1Type InstType, typename ImmType >
class Group1Impl : public ImplementationHelper< ImmType >
{
public:
static const uint OperandSize = sizeof(ImmType);
Group1Impl() {} // because GCC doesn't like static classes
protected:
static bool Is8BitOperand() { return OperandSize == 1; }
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
public:
static __emitinline void Emit( const iRegister<OperandSize>& to, const iRegister<OperandSize>& from )
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
{
prefix16();
iWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
ModRM_Direct( from.Id, to.Id );
}
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<OperandSize>& from )
static __emitinline void Emit( const ModSibBase& sibdest, const iRegister<ImmType>& from )
{
prefix16();
iWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
EmitSibMagic( from.Id, sibdest );
}
static __emitinline void Emit( const iRegister<OperandSize>& to, const ModSibBase& sibsrc )
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& sibsrc )
{
prefix16();
iWrite<u8>( (Is8BitOperand() ? 2 : 3) | (InstType<<3) );
EmitSibMagic( to.Id, sibsrc );
}
static __emitinline void Emit( void* dest, const iRegister<OperandSize>& from )
static __emitinline void Emit( void* dest, const iRegister<ImmType>& from )
{
prefix16();
iWrite<u8>( (Is8BitOperand() ? 0 : 1) | (InstType<<3) );
iWriteDisp( from.Id, dest );
}
static __emitinline void Emit( const iRegister<OperandSize>& to, const void* src )
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src )
{
prefix16();
iWrite<u8>( (Is8BitOperand() ? 2 : 3) | (InstType<<3) );
iWriteDisp( to.Id, src );
}
static __emitinline void Emit( const iRegister<OperandSize>& to, ImmType imm )
static __emitinline void Emit( const iRegister<ImmType>& to, int imm )
{
prefix16();
if( !Is8BitOperand() && is_s8( imm ) )
@ -104,7 +97,7 @@ public:
}
}
static __emitinline void Emit( const ModSibStrict<OperandSize>& sibdest, ImmType imm )
static __emitinline void Emit( const ModSibStrict<ImmType>& sibdest, int imm )
{
if( Is8BitOperand() )
{
@ -131,43 +124,27 @@ public:
template< G1Type InstType >
class Group1ImplAll
{
protected:
typedef Group1Impl<u32, InstType> m_32;
typedef Group1Impl<u16, InstType> m_16;
typedef Group1Impl<u8, InstType> m_8;
// (Note: I'm not going to macro this since it would likely clobber intellisense parameter resolution)
public:
// ---------- 32 Bit Interface -----------
__forceinline void operator()( const iRegister32& to, const iRegister32& from ) const { m_32::Emit( to, from ); }
__forceinline void operator()( const iRegister32& to, const void* src ) const { m_32::Emit( to, src ); }
__forceinline void operator()( void* dest, const iRegister32& from ) const { m_32::Emit( dest, from ); }
__noinline void operator()( const ModSibBase& sibdest, const iRegister32& from ) const { m_32::Emit( sibdest, from ); }
__noinline void operator()( const iRegister32& to, const ModSibBase& sibsrc ) const{ m_32::Emit( to, sibsrc ); }
__noinline void operator()( const ModSibStrict<4>& sibdest, u32 imm ) const { m_32::Emit( sibdest, imm ); }
template< typename T >
__forceinline void operator()( const iRegister<T>& to, const iRegister<T>& from ) const { Group1Impl<InstType,T>::Emit( to, from ); }
template< typename T >
__forceinline void operator()( const iRegister<T>& to, const void* src ) const { Group1Impl<InstType,T>::Emit( to, src ); }
template< typename T >
__forceinline void operator()( void* dest, const iRegister<T>& from ) const { Group1Impl<InstType,T>::Emit( dest, from ); }
template< typename T >
__noinline void operator()( const ModSibBase& sibdest, const iRegister<T>& from ) const { Group1Impl<InstType,T>::Emit( sibdest, from ); }
template< typename T >
__noinline void operator()( const iRegister<T>& to, const ModSibBase& sibsrc ) const { Group1Impl<InstType,T>::Emit( to, sibsrc ); }
void operator()( const iRegister32& to, u32 imm ) const { m_32::Emit( to, imm ); }
// Note on Imm forms : use int as the source operand since it's "reasonably inert" from a compiler
// perspective. (using uint tends to make the compiler try and fail to match signed immediates with
// one of the other overloads).
template< typename T >
__noinline void operator()( const ModSibStrict<T>& sibdest, int imm ) const { Group1Impl<InstType,T>::Emit( sibdest, imm ); }
template< typename T >
void operator()( const iRegister<T>& to, int imm ) const { Group1Impl<InstType,T>::Emit( to, imm ); }
// ---------- 16 Bit Interface -----------
__forceinline void operator()( const iRegister16& to, const iRegister16& from ) const { m_16::Emit( to, from ); }
__forceinline void operator()( const iRegister16& to, const void* src ) const { m_16::Emit( to, src ); }
__forceinline void operator()( void* dest, const iRegister16& from ) const { m_16::Emit( dest, from ); }
__noinline void operator()( const ModSibBase& sibdest, const iRegister16& from ) const { m_16::Emit( sibdest, from ); }
__noinline void operator()( const iRegister16& to, const ModSibBase& sibsrc ) const{ m_16::Emit( to, sibsrc ); }
__noinline void operator()( const ModSibStrict<2>& sibdest, u16 imm ) const { m_16::Emit( sibdest, imm ); }
void operator()( const iRegister16& to, u16 imm ) const { m_16::Emit( to, imm ); }
// ---------- 8 Bit Interface -----------
__forceinline void operator()( const iRegister8& to, const iRegister8& from ) const { m_8::Emit( to, from ); }
__forceinline void operator()( const iRegister8& to, const void* src ) const { m_8::Emit( to, src ); }
__forceinline void operator()( void* dest, const iRegister8& from ) const { m_8::Emit( dest, from ); }
__noinline void operator()( const ModSibBase& sibdest, const iRegister8& from ) const { m_8::Emit( sibdest, from ); }
__noinline void operator()( const iRegister8& to, const ModSibBase& sibsrc ) const{ m_8::Emit( to, sibsrc ); }
__noinline void operator()( const ModSibStrict<1>& sibdest, u8 imm ) const { m_8::Emit( sibdest, imm ); }
void operator()( const iRegister8& to, u8 imm ) const { m_8::Emit( to, imm ); }
Group1ImplAll() {} // Why does GCC need these?
};

View File

@ -38,27 +38,20 @@ enum G2Type
// Optimization Note: For Imm forms, we ignore the instruction if the shift count is zero.
// This is a safe optimization since any zero-value shift does not affect any flags.
//
template< typename ImmType, G2Type InstType >
class Group2Impl
template< G2Type InstType, typename ImmType >
class Group2Impl : public ImplementationHelper< ImmType >
{
public:
static const uint OperandSize = sizeof(ImmType);
Group2Impl() {} // For the love of GCC.
protected:
static bool Is8BitOperand() { return OperandSize == 1; }
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
public:
static __emitinline void Emit( const iRegister<OperandSize>& to )
static __emitinline void Emit( const iRegister<ImmType>& to )
{
prefix16();
iWrite<u8>( Is8BitOperand() ? 0xd2 : 0xd3 );
ModRM_Direct( InstType, to.Id );
}
static __emitinline void Emit( const iRegister<OperandSize>& to, u8 imm )
static __emitinline void Emit( const iRegister<ImmType>& to, u8 imm )
{
if( imm == 0 ) return;
@ -77,14 +70,14 @@ public:
}
}
static __emitinline void Emit( const ModSibStrict<OperandSize>& sibdest )
static __emitinline void Emit( const ModSibStrict<ImmType>& sibdest )
{
prefix16();
iWrite<u8>( Is8BitOperand() ? 0xd2 : 0xd3 );
EmitSibMagic( InstType, sibdest );
}
static __emitinline void Emit( const ModSibStrict<OperandSize>& sibdest, u8 imm )
static __emitinline void Emit( const ModSibStrict<ImmType>& sibdest, u8 imm )
{
if( imm == 0 ) return;
@ -109,11 +102,6 @@ public:
template< G2Type InstType >
class Group2ImplAll
{
protected:
typedef Group2Impl<u32, InstType> m_32;
typedef Group2Impl<u16, InstType> m_16;
typedef Group2Impl<u8, InstType> m_8;
// Inlining Notes:
// I've set up the inlining to be as practical and intelligent as possible, which means
// forcing inlining for (void*) forms of ModRM, which thanks to constprop reduce to
@ -125,22 +113,17 @@ protected:
public:
// ---------- 32 Bit Interface -----------
__forceinline void operator()( const iRegister32& to, __unused const iRegisterCL& from ) const{ m_32::Emit( to ); }
__noinline void operator()( const ModSibStrict<4>& sibdest, __unused const iRegisterCL& from ) const{ m_32::Emit( sibdest ); }
__noinline void operator()( const ModSibStrict<4>& sibdest, u8 imm ) const { m_32::Emit( sibdest, imm ); }
void operator()( const iRegister32& to, u8 imm ) const { m_32::Emit( to, imm ); }
template< typename T > __forceinline void operator()( const iRegister<T>& to, __unused const iRegisterCL& from ) const
{ Group2Impl<InstType,T>::Emit( to ); }
// ---------- 16 Bit Interface -----------
__forceinline void operator()( const iRegister16& to, __unused const iRegisterCL& from ) const{ m_16::Emit( to ); }
__noinline void operator()( const ModSibStrict<2>& sibdest, __unused const iRegisterCL& from ) const{ m_16::Emit( sibdest ); }
__noinline void operator()( const ModSibStrict<2>& sibdest, u8 imm ) const { m_16::Emit( sibdest, imm ); }
void operator()( const iRegister16& to, u8 imm ) const { m_16::Emit( to, imm ); }
template< typename T > __noinline void operator()( const ModSibStrict<T>& sibdest, __unused const iRegisterCL& from ) const
{ Group2Impl<InstType,T>::Emit( sibdest ); }
// ---------- 8 Bit Interface -----------
__forceinline void operator()( const iRegister8& to, __unused const iRegisterCL& from ) const{ m_8::Emit( to ); }
__noinline void operator()( const ModSibStrict<1>& sibdest, __unused const iRegisterCL& from ) const{ m_8::Emit( sibdest ); }
__noinline void operator()( const ModSibStrict<1>& sibdest, u8 imm ) const { m_8::Emit( sibdest, imm ); }
void operator()( const iRegister8& to, u8 imm ) const { m_8::Emit( to, imm ); }
template< typename T > __noinline void operator()( const ModSibStrict<T>& sibdest, u8 imm ) const
{ Group2Impl<InstType,T>::Emit( sibdest, imm ); }
template< typename T > void operator()( const iRegister<T>& to, u8 imm ) const
{ Group2Impl<InstType,T>::Emit( to, imm ); }
Group2ImplAll() {} // I am a class with no members, so I need an explicit constructor! Sense abounds.
};

View File

@ -0,0 +1,66 @@
/* Pcsx2 - Pc Ps2 Emulator
* Copyright (C) 2002-2009 Pcsx2 Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#pragma once
// Note: This header is meant to be included from within the x86Emitter::Internal namespace.
// Instructions implemented in this header are as follows -->>
enum G3Type
{
G3Type_NOT = 2,
G3Type_NEG = 3,
G3Type_MUL = 4,
G3Type_iMUL = 5, // partial implementation, iMul has additional forms in ix86.cpp
G3Type_DIV = 6,
G3Type_iDIV = 7
};
template< typename ImmType >
class Group3Impl : public ImplementationHelper<ImmType>
{
public:
Group3Impl() {} // For the love of GCC.
static __emitinline void Emit( G3Type InstType, const iRegister<ImmType>& from )
{
prefix16();
iWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
ModRM_Direct( InstType, from.Id );
}
static __emitinline void Emit( G3Type InstType, const ModSibStrict<ImmType>& sibsrc )
{
prefix16();
iWrite<u8>( Is8BitOperand() ? 0xf6 : 0xf7 );
EmitSibMagic( InstType, sibsrc );
}
};
// -------------------------------------------------------------------
//
template< G3Type InstType >
class Group3ImplAll
{
public:
template< typename T >
__forceinline void operator()( const iRegister<T>& from ) const { Group3Impl<T>::Emit( InstType, from ); }
template< typename T >
__noinline void operator()( const ModSibStrict<T>& from ) const { Group3Impl<T>::Emit( InstType, from ); }
};

View File

@ -20,3 +20,54 @@
// Implementations found here: Increment and Decrement Instructions!
// Note: This header is meant to be included from within the x86Emitter::Internal namespace.
template< typename ImmType >
class IncDecImpl : public ImplementationHelper<ImmType>
{
public:
IncDecImpl() {} // For the love of GCC.
static __emitinline void Emit( bool isDec, const iRegister<ImmType>& to )
{
// There is no valid 8-bit form of direct register inc/dec, so fall
// back on Mod/RM format instead:
if( Is8BitOperand() )
{
write8( 0xfe );
ModRM_Direct( isDec ? 1 : 0, to.Id );
}
else
{
prefix16();
write8( (isDec ? 0x48 : 0x40) | to.Id );
}
}
static __emitinline void Emit( bool isDec, const ModSibStrict<ImmType>& dest )
{
write8( Is8BitOperand() ? 0xfe : 0xff );
EmitSibMagic( isDec ? 1: 0, dest );
}
};
// ------------------------------------------------------------------------
template< bool isDec >
class IncDecImplAll
{
protected:
typedef IncDecImpl<u32> m_32;
typedef IncDecImpl<u16> m_16;
typedef IncDecImpl<u8> m_8;
public:
__forceinline void operator()( const iRegister32& to ) const { m_32::Emit( isDec, to ); }
__noinline void operator()( const ModSibStrict<u32>& sibdest ) const { m_32::Emit( isDec, sibdest ); }
__forceinline void operator()( const iRegister16& to ) const { m_16::Emit( isDec, to ); }
__noinline void operator()( const ModSibStrict<u16>& sibdest ) const { m_16::Emit( isDec, sibdest ); }
__forceinline void operator()( const iRegister8& to ) const { m_8::Emit( isDec, to ); }
__noinline void operator()( const ModSibStrict<u8>& sibdest ) const { m_8::Emit( isDec, sibdest ); }
IncDecImplAll() {} // don't ask.
};

View File

@ -21,11 +21,164 @@
// Header: ix86_impl_movs.h -- covers cmov and movsx/movzx.
// Note: This header is meant to be included from within the x86Emitter::Internal namespace.
//////////////////////////////////////////////////////////////////////////////////////////
// MOV instruction Implementation
template< typename ImmType >
class MovImpl : ImplementationHelper< ImmType >
{
public:
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
{
if( to == from ) return; // ignore redundant MOVs.
prefix16();
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
ModRM( 3, from.Id, to.Id );
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const ModSibBase& dest, const iRegister<ImmType>& from )
{
prefix16();
// mov eax has a special from when writing directly to a DISP32 address
// (sans any register index/base registers).
if( from.IsAccumulator() && dest.Index.IsEmpty() && dest.Base.IsEmpty() )
{
iWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
iWrite<u32>( dest.Displacement );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
EmitSibMagic( from.Id, dest );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<ImmType>& to, const ModSibBase& src )
{
prefix16();
// mov eax has a special from when reading directly from a DISP32 address
// (sans any register index/base registers).
if( to.IsAccumulator() && src.Index.IsEmpty() && src.Base.IsEmpty() )
{
iWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
iWrite<u32>( src.Displacement );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
EmitSibMagic( to.Id, src );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( void* dest, const iRegister<ImmType>& from )
{
prefix16();
// mov eax has a special from when writing directly to a DISP32 address
if( from.IsAccumulator() )
{
iWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
iWrite<s32>( (s32)dest );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
iWriteDisp( from.Id, dest );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<ImmType>& to, const void* src )
{
prefix16();
// mov eax has a special from when reading directly from a DISP32 address
if( to.IsAccumulator() )
{
iWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
iWrite<s32>( (s32)src );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
iWriteDisp( to.Id, src );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<ImmType>& to, ImmType imm )
{
// Note: MOV does not have (reg16/32,imm8) forms.
prefix16();
iWrite<u8>( (Is8BitOperand() ? 0xb0 : 0xb8) | to.Id );
iWrite<ImmType>( imm );
}
// ------------------------------------------------------------------------
static __forceinline void Emit( ModSibStrict<ImmType> dest, ImmType imm )
{
prefix16();
iWrite<u8>( Is8BitOperand() ? 0xc6 : 0xc7 );
EmitSibMagic( 0, dest );
iWrite<ImmType>( imm );
}
};
// Inlining Notes:
// I've set up the inlining to be as practical and intelligent as possible, which means
// forcing inlining for (void*) forms of ModRM, which thanks to constprop reduce to
// virtually no code. In the case of (Reg, Imm) forms, the inlinign is up to the dis-
// cretion of the compiler.
//
class MovImplAll
{
public:
template< typename T>
__forceinline void operator()( const iRegister<T>& to, const iRegister<T>& from ) const { MovImpl<T>::Emit( to, from ); }
template< typename T>
__forceinline void operator()( const iRegister<T>& to, const void* src ) const { MovImpl<T>::Emit( to, src ); }
template< typename T>
__forceinline void operator()( void* dest, const iRegister<T>& from ) const { MovImpl<T>::Emit( dest, from ); }
template< typename T>
__noinline void operator()( const ModSibBase& sibdest, const iRegister<T>& from ) const { MovImpl<T>::Emit( sibdest, from ); }
template< typename T>
__noinline void operator()( const iRegister<T>& to, const ModSibBase& sibsrc ) const { MovImpl<T>::Emit( to, sibsrc ); }
template< typename T>
__noinline void operator()( const ModSibStrict<T>& sibdest, int imm ) const { MovImpl<T>::Emit( sibdest, imm ); }
// preserve_flags - set to true to disable optimizations which could alter the state of
// the flags (namely replacing mov reg,0 with xor).
template< typename T >
__emitinline void operator()( const iRegister<T>& to, int imm, bool preserve_flags=false ) const
{
if( !preserve_flags && (imm == 0) )
iXOR( to, to );
else
MovImpl<T>::Emit( to, imm );
}
};
//////////////////////////////////////////////////////////////////////////////////////////
// CMOV !! [in all of it's disappointing lack-of glory]
//
template< int OperandSize >
class CMovImpl
template< typename ImmType >
class CMovImpl : public ImplementationHelper< ImmType >
{
protected:
static bool Is8BitOperand() { return OperandSize == 1; }
@ -42,20 +195,20 @@ protected:
public:
CMovImpl() {}
static __emitinline void Emit( JccComparisonType cc, const iRegister<OperandSize>& to, const iRegister<OperandSize>& from )
static __emitinline void Emit( JccComparisonType cc, const iRegister<ImmType>& to, const iRegister<ImmType>& from )
{
if( to == from ) return;
emit_base( cc );
ModRM( ModRm_Direct, to.Id, from.Id );
ModRM_Direct( to.Id, from.Id );
}
static __emitinline void Emit( JccComparisonType cc, const iRegister<OperandSize>& to, const void* src )
static __emitinline void Emit( JccComparisonType cc, const iRegister<ImmType>& to, const void* src )
{
emit_base( cc );
iWriteDisp( to.Id, src );
}
static __emitinline void Emit( JccComparisonType cc, const iRegister<OperandSize>& to, const ModSibBase& sibsrc )
static __emitinline void Emit( JccComparisonType cc, const iRegister<ImmType>& to, const ModSibBase& sibsrc )
{
emit_base( cc );
EmitSibMagic( to.Id, sibsrc );
@ -64,11 +217,14 @@ public:
};
// ------------------------------------------------------------------------
// I use explicit method declarations here instead of templates, in order to provide
// *only* 32 and 16 bit register operand forms (8 bit registers are not valid in CMOV).
//
class CMovImplGeneric
{
protected:
typedef CMovImpl<4> m_32;
typedef CMovImpl<2> m_16;
typedef CMovImpl<u32> m_32;
typedef CMovImpl<u16> m_16;
public:
__forceinline void operator()( JccComparisonType ccType, const iRegister32& to, const iRegister32& from ) const { m_32::Emit( ccType, to, from ); }
@ -87,8 +243,8 @@ template< JccComparisonType ccType >
class CMovImplAll
{
protected:
typedef CMovImpl<4> m_32;
typedef CMovImpl<2> m_16;
typedef CMovImpl<u32> m_32;
typedef CMovImpl<u16> m_16;
public:
__forceinline void operator()( const iRegister32& to, const iRegister32& from ) const { m_32::Emit( ccType, to, from ); }
@ -105,10 +261,13 @@ public:
//////////////////////////////////////////////////////////////////////////////////////////
// Mov with sign/zero extension implementations (movsx / movzx)
//
template< int DestOperandSize, int SrcOperandSize >
template< typename DestImmType, typename SrcImmType >
class MovExtendImpl
{
protected:
static const uint DestOperandSize = sizeof( DestImmType );
static const uint SrcOperandSize = sizeof( SrcImmType );
static bool Is8BitOperand() { return SrcOperandSize == 1; }
static void prefix16() { if( DestOperandSize == 2 ) iWrite<u8>( 0x66 ); }
static __forceinline void emit_base( bool SignExtend )
@ -121,13 +280,13 @@ protected:
public:
MovExtendImpl() {} // For the love of GCC.
static __emitinline void Emit( const iRegister<DestOperandSize>& to, const iRegister<SrcOperandSize>& from, bool SignExtend )
static __emitinline void Emit( const iRegister<DestImmType>& to, const iRegister<SrcImmType>& from, bool SignExtend )
{
emit_base( SignExtend );
ModRM_Direct( to.Id, from.Id );
}
static __emitinline void Emit( const iRegister<DestOperandSize>& to, const ModSibStrict<SrcOperandSize>& sibsrc, bool SignExtend )
static __emitinline void Emit( const iRegister<DestImmType>& to, const ModSibStrict<SrcImmType>& sibsrc, bool SignExtend )
{
emit_base( SignExtend );
EmitSibMagic( to.Id, sibsrc );
@ -139,19 +298,19 @@ template< bool SignExtend >
class MovExtendImplAll
{
protected:
typedef MovExtendImpl<4, 2> m_16to32;
typedef MovExtendImpl<4, 1> m_8to32;
typedef MovExtendImpl<2, 1> m_8to16;
typedef MovExtendImpl<u32, u16> m_16to32;
typedef MovExtendImpl<u32, u8> m_8to32;
typedef MovExtendImpl<u16, u8> m_8to16;
public:
__forceinline void operator()( const iRegister32& to, const iRegister16& from ) const { m_16to32::Emit( to, from, SignExtend ); }
__noinline void operator()( const iRegister32& to, const ModSibStrict<2>& sibsrc ) const { m_16to32::Emit( to, sibsrc, SignExtend ); }
__noinline void operator()( const iRegister32& to, const ModSibStrict<u16>& sibsrc ) const { m_16to32::Emit( to, sibsrc, SignExtend ); }
__forceinline void operator()( const iRegister32& to, const iRegister8& from ) const { m_8to32::Emit( to, from, SignExtend ); }
__noinline void operator()( const iRegister32& to, const ModSibStrict<1>& sibsrc ) const { m_8to32::Emit( to, sibsrc, SignExtend ); }
__noinline void operator()( const iRegister32& to, const ModSibStrict<u8>& sibsrc ) const { m_8to32::Emit( to, sibsrc, SignExtend ); }
__forceinline void operator()( const iRegister16& to, const iRegister8& from ) const { m_8to16::Emit( to, from, SignExtend ); }
__noinline void operator()( const iRegister16& to, const ModSibStrict<1>& sibsrc ) const { m_8to16::Emit( to, sibsrc, SignExtend ); }
__noinline void operator()( const iRegister16& to, const ModSibStrict<u8>& sibsrc ) const { m_8to16::Emit( to, sibsrc, SignExtend ); }
MovExtendImplAll() {} // don't ask.
};

View File

@ -67,13 +67,13 @@ __threadlocal XMMSSEType g_xmmtypes[iREGCNT_XMM] = { XMMT_INT };
namespace x86Emitter {
const x86IndexerType ptr;
const x86IndexerTypeExplicit<4> ptr32;
const x86IndexerTypeExplicit<2> ptr16;
const x86IndexerTypeExplicit<1> ptr8;
const x86IndexerTypeExplicit<u32> ptr32;
const x86IndexerTypeExplicit<u16> ptr16;
const x86IndexerTypeExplicit<u8> ptr8;
// ------------------------------------------------------------------------
template< int OperandSize > const iRegister<OperandSize> iRegister<OperandSize>::Empty;
template< typename OperandType > const iRegister<OperandType> iRegister<OperandType>::Empty;
const x86IndexReg x86IndexReg::Empty;
const iRegister32
@ -240,6 +240,8 @@ namespace Internal
using namespace Internal;
const MovImplAll iMOV;
const Group1ImplAll<G1Type_ADD> iADD;
const Group1ImplAll<G1Type_OR> iOR;
const Group1ImplAll<G1Type_ADC> iADC;
@ -257,6 +259,15 @@ const Group2ImplAll<G2Type_SHL> iSHL;
const Group2ImplAll<G2Type_SHR> iSHR;
const Group2ImplAll<G2Type_SAR> iSAR;
const Group3ImplAll<G3Type_NOT> iNOT;
const Group3ImplAll<G3Type_NEG> iNEG;
const Group3ImplAll<G3Type_MUL> iUMUL;
const Group3ImplAll<G3Type_DIV> iUDIV;
const Group3ImplAll<G3Type_iDIV> iSDIV;
const IncDecImplAll<false> iINC;
const IncDecImplAll<true> iDEC;
const MovExtendImplAll<false> iMOVZX;
const MovExtendImplAll<true> iMOVSX;
@ -336,9 +347,11 @@ __emitinline void iAdvancePtr( uint bytes )
// preserve_flags - set to ture to disable use of SHL on [Index*Base] forms
// of LEA, which alters flags states.
//
template< typename ToReg >
static void EmitLeaMagic( ToReg to, const ModSibBase& src, bool preserve_flags )
template< typename OperandType >
static void EmitLeaMagic( iRegister<OperandType> to, const ModSibBase& src, bool preserve_flags )
{
typedef iRegister<OperandType> ToReg;
int displacement_size = (src.Displacement == 0) ? 0 :
( ( src.IsByteSizeDisp() ) ? 1 : 2 );
@ -465,226 +478,7 @@ __emitinline void iLEA( iRegister16 to, const ModSibBase& src, bool preserve_fla
}
//////////////////////////////////////////////////////////////////////////////////////////
// MOV instruction Implementation
template< typename ImmType >
class MovImpl
{
public:
static const uint OperandSize = sizeof(ImmType);
protected:
static bool Is8BitOperand() { return OperandSize == 1; }
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
public:
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const iRegister<OperandSize>& from )
{
if( to == from ) return; // ignore redundant MOVs.
prefix16();
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
ModRM( 3, from.Id, to.Id );
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const ModSibBase& dest, const iRegister<OperandSize>& from )
{
prefix16();
// mov eax has a special from when writing directly to a DISP32 address
// (sans any register index/base registers).
if( from.IsAccumulator() && dest.Index.IsEmpty() && dest.Base.IsEmpty() )
{
iWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
iWrite<u32>( dest.Displacement );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
EmitSibMagic( from.Id, dest );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const ModSibBase& src )
{
prefix16();
// mov eax has a special from when reading directly from a DISP32 address
// (sans any register index/base registers).
if( to.IsAccumulator() && src.Index.IsEmpty() && src.Base.IsEmpty() )
{
iWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
iWrite<u32>( src.Displacement );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
EmitSibMagic( to.Id, src );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( void* dest, const iRegister<OperandSize>& from )
{
prefix16();
// mov eax has a special from when writing directly to a DISP32 address
if( from.IsAccumulator() )
{
iWrite<u8>( Is8BitOperand() ? 0xa2 : 0xa3 );
iWrite<s32>( (s32)dest );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x88 : 0x89 );
iWriteDisp( from.Id, dest );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const void* src )
{
prefix16();
// mov eax has a special from when reading directly from a DISP32 address
if( to.IsAccumulator() )
{
iWrite<u8>( Is8BitOperand() ? 0xa0 : 0xa1 );
iWrite<s32>( (s32)src );
}
else
{
iWrite<u8>( Is8BitOperand() ? 0x8a : 0x8b );
iWriteDisp( to.Id, src );
}
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, ImmType imm )
{
// Note: MOV does not have (reg16/32,imm8) forms.
prefix16();
iWrite<u8>( (Is8BitOperand() ? 0xb0 : 0xb8) | to.Id );
iWrite<ImmType>( imm );
}
// ------------------------------------------------------------------------
static __forceinline void Emit( ModSibStrict<OperandSize> dest, ImmType imm )
{
prefix16();
iWrite<u8>( Is8BitOperand() ? 0xc6 : 0xc7 );
EmitSibMagic( 0, dest );
iWrite<ImmType>( imm );
}
};
namespace Internal
{
typedef MovImpl<u32> MOV32;
typedef MovImpl<u16> MOV16;
typedef MovImpl<u8> MOV8;
}
// Inlining Notes:
// I've set up the inlining to be as practical and intelligent as possible, which means
// forcing inlining for (void*) forms of ModRM, which thanks to constprop reduce to
// virtually no code. In the case of (Reg, Imm) forms, the inlinign is up to the dis-
// cretion of the compiler.
//
// TODO : Turn this into a macro after it's been debugged and accuracy-approved! :D
// ---------- 32 Bit Interface -----------
__forceinline void iMOV( const iRegister32& to, const iRegister32& from ) { MOV32::Emit( to, from ); }
__forceinline void iMOV( const iRegister32& to, const void* src ) { MOV32::Emit( to, ptr32[src] ); }
__forceinline void iMOV( void* dest, const iRegister32& from ) { MOV32::Emit( ptr32[dest], from ); }
__noinline void iMOV( const ModSibBase& sibdest, const iRegister32& from ) { MOV32::Emit( sibdest, from ); }
__noinline void iMOV( const iRegister32& to, const ModSibBase& sibsrc ) { MOV32::Emit( to, sibsrc ); }
__noinline void iMOV( const ModSibStrict<4>& sibdest,u32 imm ) { MOV32::Emit( sibdest, imm ); }
void iMOV( const iRegister32& to, u32 imm, bool preserve_flags )
{
if( !preserve_flags && (imm == 0) )
iXOR( to, to );
else
MOV32::Emit( to, imm );
}
// ---------- 16 Bit Interface -----------
__forceinline void iMOV( const iRegister16& to, const iRegister16& from ) { MOV16::Emit( to, from ); }
__forceinline void iMOV( const iRegister16& to, const void* src ) { MOV16::Emit( to, ptr16[src] ); }
__forceinline void iMOV( void* dest, const iRegister16& from ) { MOV16::Emit( ptr16[dest], from ); }
__noinline void iMOV( const ModSibBase& sibdest, const iRegister16& from ) { MOV16::Emit( sibdest, from ); }
__noinline void iMOV( const iRegister16& to, const ModSibBase& sibsrc ) { MOV16::Emit( to, sibsrc ); }
__noinline void iMOV( const ModSibStrict<2>& sibdest,u16 imm ) { MOV16::Emit( sibdest, imm ); }
void iMOV( const iRegister16& to, u16 imm, bool preserve_flags )
{
if( !preserve_flags && (imm == 0) )
iXOR( to, to );
else
MOV16::Emit( to, imm );
}
// ---------- 8 Bit Interface -----------
__forceinline void iMOV( const iRegister8& to, const iRegister8& from ) { MOV8::Emit( to, from ); }
__forceinline void iMOV( const iRegister8& to, const void* src ) { MOV8::Emit( to, ptr8[src] ); }
__forceinline void iMOV( void* dest, const iRegister8& from ) { MOV8::Emit( ptr8[dest], from ); }
__noinline void iMOV( const ModSibBase& sibdest, const iRegister8& from ) { MOV8::Emit( sibdest, from ); }
__noinline void iMOV( const iRegister8& to, const ModSibBase& sibsrc ) { MOV8::Emit( to, sibsrc ); }
__noinline void iMOV( const ModSibStrict<1>& sibdest,u8 imm ) { MOV8::Emit( sibdest, imm ); }
void iMOV( const iRegister8& to, u8 imm, bool preserve_flags )
{
if( !preserve_flags && (imm == 0) )
iXOR( to, to );
else
MOV8::Emit( to, imm );
}
//////////////////////////////////////////////////////////////////////////////////////////
// DIV/MUL/IDIV/IMUL instructions (Implemented!)
// F6 is r8, F7 is r32.
// MUL is 4, DIV is 6.
enum MulDivType
{
MDT_Mul = 4,
MDT_iMul = 5,
MDT_Div = 6,
MDT_iDiv = 7
};
// ------------------------------------------------------------------------
// EAX form emitter for Mul/Div/iMUL/iDIV
//
template< int OperandSize >
static __forceinline void EmitMulDiv_OneRegForm( MulDivType InstType, const iRegister<OperandSize>& from )
{
if( OperandSize == 2 ) iWrite<u8>( 0x66 );
iWrite<u8>( (OperandSize == 1) ? 0xf6 : 0xf7 );
ModRM( ModRm_Direct, InstType, from.Id );
}
static __forceinline void EmitMulDiv_OneRegForm( MulDivType InstType, const ModSibSized& sibsrc )
{
if( sibsrc.OperandSize == 2 ) iWrite<u8>( 0x66 );
iWrite<u8>( (sibsrc.OperandSize == 1) ? 0xf6 : 0xf7 );
EmitSibMagic( InstType, sibsrc );
}
//////////////////////////////////////////////////////////////////////////////////////////
// All iMul forms are valid for 16 and 32 bit register operands only!
// The following iMul-specific forms are valid for 16 and 32 bit register operands only!
template< typename ImmType >
class iMulImpl
@ -697,15 +491,15 @@ protected:
public:
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const iRegister<OperandSize>& from )
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from )
{
prefix16();
write16( 0xaf0f );
ModRM( ModRm_Direct, to.Id, from.Id );
ModRM_Direct( to.Id, from.Id );
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const void* src )
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src )
{
prefix16();
write16( 0xaf0f );
@ -713,7 +507,7 @@ public:
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const ModSibBase& src )
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& src )
{
prefix16();
write16( 0xaf0f );
@ -721,11 +515,11 @@ public:
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const iRegister<OperandSize>& from, ImmType imm )
static __emitinline void Emit( const iRegister<ImmType>& to, const iRegister<ImmType>& from, ImmType imm )
{
prefix16();
write16( is_s8( imm ) ? 0x6b : 0x69 );
ModRM( ModRm_Direct, to.Id, from.Id );
ModRM_Direct( to.Id, from.Id );
if( is_s8( imm ) )
write8( imm );
else
@ -733,7 +527,7 @@ public:
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const void* src, ImmType imm )
static __emitinline void Emit( const iRegister<ImmType>& to, const void* src, ImmType imm )
{
prefix16();
write16( is_s8( imm ) ? 0x6b : 0x69 );
@ -745,7 +539,7 @@ public:
}
// ------------------------------------------------------------------------
static __forceinline void Emit( const iRegister<OperandSize>& to, const ModSibBase& src, ImmType imm )
static __emitinline void Emit( const iRegister<ImmType>& to, const ModSibBase& src, ImmType imm )
{
prefix16();
write16( is_s8( imm ) ? 0x6b : 0x69 );
@ -757,28 +551,9 @@ public:
}
};
__forceinline void iUMUL( const iRegister32& from ) { EmitMulDiv_OneRegForm( MDT_Mul, from ); }
__forceinline void iUMUL( const iRegister16& from ) { EmitMulDiv_OneRegForm( MDT_Mul, from ); }
__forceinline void iUMUL( const iRegister8& from ) { EmitMulDiv_OneRegForm( MDT_Mul, from ); }
__noinline void iUMUL( const ModSibSized& from ) { EmitMulDiv_OneRegForm( MDT_Mul, from ); }
__forceinline void iUDIV( const iRegister32& from ) { EmitMulDiv_OneRegForm( MDT_Div, from ); }
__forceinline void iUDIV( const iRegister16& from ) { EmitMulDiv_OneRegForm( MDT_Div, from ); }
__forceinline void iUDIV( const iRegister8& from ) { EmitMulDiv_OneRegForm( MDT_Div, from ); }
__noinline void iUDIV( const ModSibSized& from ) { EmitMulDiv_OneRegForm( MDT_Div, from ); }
__forceinline void iSDIV( const iRegister32& from ) { EmitMulDiv_OneRegForm( MDT_iDiv, from ); }
__forceinline void iSDIV( const iRegister16& from ) { EmitMulDiv_OneRegForm( MDT_iDiv, from ); }
__forceinline void iSDIV( const iRegister8& from ) { EmitMulDiv_OneRegForm( MDT_iDiv, from ); }
__noinline void iSDIV( const ModSibSized& from ) { EmitMulDiv_OneRegForm( MDT_iDiv, from ); }
__forceinline void iSMUL( const iRegister32& from ) { EmitMulDiv_OneRegForm( MDT_iMul, from ); }
__forceinline void iSMUL( const iRegister16& from ) { EmitMulDiv_OneRegForm( MDT_iMul, from ); }
__forceinline void iSMUL( const iRegister8& from ) { EmitMulDiv_OneRegForm( MDT_iMul, from ); }
__noinline void iSMUL( const ModSibSized& from ) { EmitMulDiv_OneRegForm( MDT_iMul, from ); }
// ------------------------------------------------------------------------
// iMUL's special forms (unique to iMUL alone)
// iMUL's special forms (unique to iMUL alone), and valid for 32/16 bit operands only,
// thus noi templates are used.
namespace Internal
{

View File

@ -37,7 +37,6 @@ namespace x86Emitter
{
extern void iJccKnownTarget( JccComparisonType comparison, void* target, bool slideForward=false );
// ----- Lea Instructions (Load Effective Address) -----
// Note: alternate (void*) forms of these instructions are not provided since those
// forms are functionally equivalent to Mov reg,imm, and thus better written as MOVs
@ -86,74 +85,22 @@ namespace x86Emitter
//////////////////////////////////////////////////////////////////////////////////////////
// MUL / DIV instructions
extern void iUMUL( const iRegister32& from );
extern void iUMUL( const iRegister16& from );
extern void iUMUL( const iRegister8& from );
extern void iUMUL( const ModSibSized& from );
extern void iUDIV( const iRegister32& from );
extern void iUDIV( const iRegister16& from );
extern void iUDIV( const iRegister8& from );
extern void iUDIV( const ModSibSized& from );
extern void iSDIV( const iRegister32& from );
extern void iSDIV( const iRegister16& from );
extern void iSDIV( const iRegister8& from );
extern void iSDIV( const ModSibSized& from );
extern void iSMUL( const iRegister32& from );
extern void iSMUL( const iRegister32& to, const iRegister32& from );
extern void iSMUL( const iRegister32& to, const void* src );
extern void iSMUL( const iRegister32& to, const iRegister32& from, s32 imm );
extern void iSMUL( const iRegister32& to, const ModSibBase& src );
extern void iSMUL( const iRegister32& to, const ModSibBase& src, s32 imm );
extern void iSMUL( const iRegister16& from );
extern void iSMUL( const iRegister16& to, const iRegister16& from );
extern void iSMUL( const iRegister16& to, const void* src );
extern void iSMUL( const iRegister16& to, const iRegister16& from, s16 imm );
extern void iSMUL( const iRegister16& to, const ModSibBase& src );
extern void iSMUL( const iRegister16& to, const ModSibBase& src, s16 imm );
extern void iSMUL( const iRegister8& from );
extern void iSMUL( const ModSibSized& from );
//////////////////////////////////////////////////////////////////////////////////////////
// MOV instructions!
// ---------- 32 Bit Interface -----------
extern void iMOV( const iRegister32& to, const iRegister32& from );
extern void iMOV( const ModSibBase& sibdest, const iRegister32& from );
extern void iMOV( const iRegister32& to, const ModSibBase& sibsrc );
extern void iMOV( const iRegister32& to, const void* src );
extern void iMOV( void* dest, const iRegister32& from );
// preserve_flags - set to true to disable optimizations which could alter the state of
// the flags (namely replacing mov reg,0 with xor).
extern void iMOV( const iRegister32& to, u32 imm, bool preserve_flags=false );
extern void iMOV( const ModSibStrict<4>& sibdest, u32 imm );
// ---------- 16 Bit Interface -----------
extern void iMOV( const iRegister16& to, const iRegister16& from );
extern void iMOV( const ModSibBase& sibdest, const iRegister16& from );
extern void iMOV( const iRegister16& to, const ModSibBase& sibsrc );
extern void iMOV( const iRegister16& to, const void* src );
extern void iMOV( void* dest, const iRegister16& from );
// preserve_flags - set to true to disable optimizations which could alter the state of
// the flags (namely replacing mov reg,0 with xor).
extern void iMOV( const iRegister16& to, u16 imm, bool preserve_flags=false );
extern void iMOV( const ModSibStrict<2>& sibdest, u16 imm );
// ---------- 8 Bit Interface -----------
extern void iMOV( const iRegister8& to, const iRegister8& from );
extern void iMOV( const ModSibBase& sibdest, const iRegister8& from );
extern void iMOV( const iRegister8& to, const ModSibBase& sibsrc );
extern void iMOV( const iRegister8& to, const void* src );
extern void iMOV( void* dest, const iRegister8& from );
extern void iMOV( const iRegister8& to, u8 imm, bool preserve_flags=false );
extern void iMOV( const ModSibStrict<1>& sibdest, u8 imm );
template< typename T >
__forceinline void iSMUL( const iRegister<T>& from ) { Internal::Group3Impl<T>::Emit( Internal::G3Type_iMUL, from ); }
template< typename T >
__noinline void iSMUL( const ModSibStrict<T>& from ) { Internal::Group3Impl<T>::Emit( Internal::G3Type_iMUL, from ); }
//////////////////////////////////////////////////////////////////////////////////////////
// JMP / Jcc Instructions!

View File

@ -34,55 +34,60 @@
using namespace x86Emitter;
template< int OperandSize >
static __forceinline iRegister<OperandSize> _reghlp( x86IntRegType src )
template< typename ImmType >
static __forceinline iRegister<ImmType> _reghlp( x86IntRegType src )
{
return iRegister<OperandSize>( src );
return iRegister<ImmType>( src );
}
static __forceinline ModSibBase _mrmhlp( x86IntRegType src )
{
return ptr[_reghlp<4>(src)];
return ptr[_reghlp<u32>(src)];
}
template< int OperandSize >
static __forceinline ModSibStrict<OperandSize> _mhlp( x86IntRegType src )
template< typename ImmType >
static __forceinline ModSibStrict<ImmType> _mhlp( x86IntRegType src )
{
return ModSibStrict<OperandSize>( x86IndexReg::Empty, x86IndexReg(src) );
return ModSibStrict<ImmType>( x86IndexReg::Empty, x86IndexReg(src) );
}
template< int OperandSize >
static __forceinline ModSibStrict<OperandSize> _mhlp2( x86IntRegType src1, x86IntRegType src2 )
template< typename ImmType >
static __forceinline ModSibStrict<ImmType> _mhlp2( x86IntRegType src1, x86IntRegType src2 )
{
return ModSibStrict<OperandSize>( x86IndexReg(src2), x86IndexReg(src1) );
return ModSibStrict<ImmType>( x86IndexReg(src2), x86IndexReg(src1) );
}
//////////////////////////////////////////////////////////////////////////////////////////
//
#define DEFINE_LEGACY_HELPER( cod, bits ) \
emitterT void cod##bits##RtoR( x86IntRegType to, x86IntRegType from ) { i##cod( _reghlp<bits/8>(to), _reghlp<bits/8>(from) ); } \
emitterT void cod##bits##ItoR( x86IntRegType to, u##bits imm ) { i##cod( _reghlp<bits/8>(to), imm ); } \
emitterT void cod##bits##MtoR( x86IntRegType to, uptr from ) { i##cod( _reghlp<bits/8>(to), (void*)from ); } \
emitterT void cod##bits##RtoM( uptr to, x86IntRegType from ) { i##cod( (void*)to, _reghlp<bits/8>(from) ); } \
emitterT void cod##bits##RtoR( x86IntRegType to, x86IntRegType from ) { i##cod( _reghlp<u##bits>(to), _reghlp<u##bits>(from) ); } \
emitterT void cod##bits##ItoR( x86IntRegType to, u##bits imm ) { i##cod( _reghlp<u##bits>(to), imm ); } \
emitterT void cod##bits##MtoR( x86IntRegType to, uptr from ) { i##cod( _reghlp<u##bits>(to), (void*)from ); } \
emitterT void cod##bits##RtoM( uptr to, x86IntRegType from ) { i##cod( (void*)to, _reghlp<u##bits>(from) ); } \
emitterT void cod##bits##ItoM( uptr to, u##bits imm ) { i##cod( ptr##bits[to], imm ); } \
emitterT void cod##bits##ItoRm( x86IntRegType to, u##bits imm, int offset ) { i##cod( _mhlp<bits/8>(to) + offset, imm ); } \
emitterT void cod##bits##RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { i##cod( _reghlp<bits/8>(to), _mhlp<bits/8>(from) + offset ); } \
emitterT void cod##bits##RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { i##cod( _mhlp<bits/8>(to) + offset, _reghlp<bits/8>(from) ); } \
emitterT void cod##bits##ItoRm( x86IntRegType to, u##bits imm, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, imm ); } \
emitterT void cod##bits##RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { i##cod( _reghlp<u##bits>(to), _mhlp<u##bits>(from) + offset ); } \
emitterT void cod##bits##RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, _reghlp<u##bits>(from) ); } \
emitterT void cod##bits##RtoRmS( x86IntRegType to1, x86IntRegType to2, x86IntRegType from, int offset ) \
{ i##cod( _mhlp2<bits/8>(to1,to2) + offset, _reghlp<bits/8>(from) ); } \
{ i##cod( _mhlp2<u##bits>(to1,to2) + offset, _reghlp<u##bits>(from) ); } \
emitterT void cod##bits##RmStoR( x86IntRegType to, x86IntRegType from1, x86IntRegType from2, int offset ) \
{ i##cod( _reghlp<bits/8>(to), _mhlp2<bits/8>(from1,from2) + offset ); }
{ i##cod( _reghlp<u##bits>(to), _mhlp2<u##bits>(from1,from2) + offset ); }
#define DEFINE_LEGACY_SHIFT_HELPER( cod, bits ) \
emitterT void cod##bits##CLtoR( x86IntRegType to ) { i##cod( _reghlp<bits/8>(to), cl ); } \
emitterT void cod##bits##ItoR( x86IntRegType to, u8 imm ) { i##cod( _reghlp<bits/8>(to), imm ); } \
emitterT void cod##bits##CLtoR( x86IntRegType to ) { i##cod( _reghlp<u##bits>(to), cl ); } \
emitterT void cod##bits##ItoR( x86IntRegType to, u8 imm ) { i##cod( _reghlp<u##bits>(to), imm ); } \
emitterT void cod##bits##CLtoM( uptr to ) { i##cod( ptr##bits[to], cl ); } \
emitterT void cod##bits##ItoM( uptr to, u8 imm ) { i##cod( ptr##bits[to], imm ); } \
emitterT void cod##bits##ItoRm( x86IntRegType to, u8 imm, int offset ) { i##cod( _mhlp<bits/8>(to) + offset, imm ); } \
emitterT void cod##bits##CLtoRm( x86IntRegType to, int offset ) { i##cod( _mhlp<bits/8>(to) + offset, cl ); }
emitterT void cod##bits##ItoRm( x86IntRegType to, u8 imm, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, imm ); } \
emitterT void cod##bits##CLtoRm( x86IntRegType to, int offset ) { i##cod( _mhlp<u##bits>(to) + offset, cl ); }
#define DEFINE_LEGACY_ONEREG_HELPER( cod, bits ) \
emitterT void cod##bits##R( x86IntRegType to ) { i##cod( _reghlp<u##bits>(to) ); } \
emitterT void cod##bits##M( uptr to ) { i##cod( ptr##bits[to] ); } \
emitterT void cod##bits##Rm( x86IntRegType to, uptr offset ) { i##cod( _mhlp<u##bits>(to) + offset ); }
//emitterT void cod##bits##RtoRmS( x86IntRegType to1, x86IntRegType to2, x86IntRegType from, int offset ) \
// { cod( _mhlp2<bits/8>(to1,to2) + offset, _reghlp<bits/8>(from) ); } \
// { cod( _mhlp2<u##bits>(to1,to2) + offset, _reghlp<u##bits>(from) ); } \
#define DEFINE_OPCODE_LEGACY( cod ) \
DEFINE_LEGACY_HELPER( cod, 32 ) \
@ -94,6 +99,11 @@ static __forceinline ModSibStrict<OperandSize> _mhlp2( x86IntRegType src1, x86In
DEFINE_LEGACY_SHIFT_HELPER( cod, 16 ) \
DEFINE_LEGACY_SHIFT_HELPER( cod, 8 )
#define DEFINE_OPCODE_ONEREG_LEGACY( cod ) \
DEFINE_LEGACY_ONEREG_HELPER( cod, 32 ) \
DEFINE_LEGACY_ONEREG_HELPER( cod, 16 ) \
DEFINE_LEGACY_ONEREG_HELPER( cod, 8 )
//////////////////////////////////////////////////////////////////////////////////////////
//
DEFINE_OPCODE_LEGACY( ADD )
@ -115,6 +125,12 @@ DEFINE_OPCODE_SHIFT_LEGACY( SAR )
DEFINE_OPCODE_LEGACY( MOV )
DEFINE_OPCODE_ONEREG_LEGACY( INC )
DEFINE_OPCODE_ONEREG_LEGACY( DEC )
DEFINE_OPCODE_ONEREG_LEGACY( NOT )
DEFINE_OPCODE_ONEREG_LEGACY( NEG )
// ------------------------------------------------------------------------
#define DEFINE_LEGACY_MOVEXTEND( form, destbits, srcbits ) \
emitterT void MOV##form##destbits##R##srcbits##toR( x86IntRegType to, x86IntRegType from ) { iMOV##form( iRegister##destbits( to ), iRegister##srcbits( from ) ); } \
@ -150,7 +166,7 @@ emitterT void MOV8RmSOffsettoR( x86IntRegType to, x86IntRegType from1, s32 from2
emitterT void AND32I8toR( x86IntRegType to, s8 from )
{
iAND( _reghlp<4>(to), from );
iAND( _reghlp<u32>(to), from );
}
emitterT void AND32I8toM( uptr to, s8 from )
@ -374,114 +390,6 @@ emitterT void STC( void ) { iSTC(); }
emitterT void CLC( void ) { iCLC(); }
emitterT void NOP( void ) { iNOP(); }
////////////////////////////////////
// arithmetic instructions /
////////////////////////////////////
/* inc r32 */
emitterT void INC32R( x86IntRegType to )
{
write8( 0x40 + to );
}
/* inc m32 */
emitterT void INC32M( u32 to )
{
write8( 0xFF );
ModRM( 0, 0, DISP32 );
write32( MEMADDR(to, 4) );
}
/* inc r16 */
emitterT void INC16R( x86IntRegType to )
{
write8( 0x66 );
write8( 0x40 + to );
}
/* inc m16 */
emitterT void INC16M( u32 to )
{
write8( 0x66 );
write8( 0xFF );
ModRM( 0, 0, DISP32 );
write32( MEMADDR(to, 4) );
}
/* dec r32 */
emitterT void DEC32R( x86IntRegType to )
{
write8( 0x48 + to );
}
/* dec m32 */
emitterT void DEC32M( u32 to )
{
write8( 0xFF );
ModRM( 0, 1, DISP32 );
write32( MEMADDR(to, 4) );
}
/* dec r16 */
emitterT void DEC16R( x86IntRegType to )
{
write8( 0x66 );
write8( 0x48 + to );
}
/* dec m16 */
emitterT void DEC16M( u32 to )
{
write8( 0x66 );
write8( 0xFF );
ModRM( 0, 1, DISP32 );
write32( MEMADDR(to, 4) );
}
////////////////////////////////////
// logical instructions /
////////////////////////////////////
/* not r32 */
emitterT void NOT32R( x86IntRegType from )
{
RexB(0,from);
write8( 0xF7 );
ModRM( 3, 2, from );
}
// not m32
emitterT void NOT32M( u32 from )
{
write8( 0xF7 );
ModRM( 0, 2, DISP32 );
write32( MEMADDR(from, 4));
}
/* neg r32 */
emitterT void NEG32R( x86IntRegType from )
{
RexB(0,from);
write8( 0xF7 );
ModRM( 3, 3, from );
}
emitterT void NEG32M( u32 from )
{
write8( 0xF7 );
ModRM( 0, 3, DISP32 );
write32( MEMADDR(from, 4));
}
/* neg r16 */
emitterT void NEG16R( x86IntRegType from )
{
write8( 0x66 );
RexB(0,from);
write8( 0xF7 );
ModRM( 3, 3, from );
}
////////////////////////////////////
// jump instructions /
////////////////////////////////////

View File

@ -156,7 +156,6 @@ namespace x86Emitter
Mod_Direct, // direct reg/reg operation
};
static const int ModRm_Direct = 3; // when used as the first parameter, specifies direct register operation (no mem)
static const int ModRm_UseSib = 4; // same index value as ESP (used in RM field)
static const int ModRm_UseDisp32 = 5; // same index value as EBP (used in Mod field)
@ -197,15 +196,16 @@ namespace x86Emitter
//////////////////////////////////////////////////////////////////////////////////////////
//
template< int OperandSize >
template< typename OperandType >
class iRegister
{
public:
static const uint OperandSize = sizeof( OperandType );
static const iRegister Empty; // defined as an empty/unused value (-1)
int Id;
iRegister( const iRegister<OperandSize>& src ) : Id( src.Id ) {}
iRegister( const iRegister<OperandType>& src ) : Id( src.Id ) {}
iRegister(): Id( -1 ) {}
explicit iRegister( int regId ) : Id( regId ) { jASSUME( Id >= -1 && Id < 8 ); }
@ -214,17 +214,17 @@ namespace x86Emitter
// Returns true if the register is a valid accumulator: Eax, Ax, Al.
bool IsAccumulator() const { return Id == 0; }
bool operator==( const iRegister<OperandSize>& src ) const
bool operator==( const iRegister<OperandType>& src ) const
{
return (Id == src.Id);
}
bool operator!=( const iRegister<OperandSize>& src ) const
bool operator!=( const iRegister<OperandType>& src ) const
{
return (Id != src.Id);
}
iRegister<OperandSize>& operator=( const iRegister<OperandSize>& src )
iRegister<OperandType>& operator=( const iRegister<OperandType>& src )
{
Id = src.Id;
return *this;
@ -239,9 +239,9 @@ namespace x86Emitter
// all about the the templated code in haphazard fashion. Yay.. >_<
//
typedef iRegister<4> iRegister32;
typedef iRegister<2> iRegister16;
typedef iRegister<1> iRegister8;
typedef iRegister<u32> iRegister32;
typedef iRegister<u16> iRegister16;
typedef iRegister<u8> iRegister8;
class iRegisterCL : public iRegister8
{
@ -396,65 +396,29 @@ namespace x86Emitter
__forceinline void Reduce();
};
//////////////////////////////////////////////////////////////////////////////////////////
//
class ModSibSized : public ModSibBase
{
public:
int OperandSize;
ModSibSized( int opsize, const iAddressInfo& src ) :
ModSibBase( src ),
OperandSize( opsize )
{
jASSUME( OperandSize == 1 || OperandSize == 2 || OperandSize == 4 );
}
ModSibSized( int opsize, s32 disp ) :
ModSibBase( disp ),
OperandSize( opsize )
{
jASSUME( OperandSize == 1 || OperandSize == 2 || OperandSize == 4 );
}
ModSibSized( int opsize, x86IndexReg base, x86IndexReg index, int scale=0, s32 displacement=0 ) :
ModSibBase( base, index, scale, displacement ),
OperandSize( opsize )
{
jASSUME( OperandSize == 1 || OperandSize == 2 || OperandSize == 4 );
}
__forceinline ModSibSized& Add( s32 imm )
{
Displacement += imm;
return *this;
}
__forceinline ModSibSized operator+( const s32 imm ) const { return ModSibSized( *this ).Add( imm ); }
__forceinline ModSibSized operator-( const s32 imm ) const { return ModSibSized( *this ).Add( -imm ); }
};
//////////////////////////////////////////////////////////////////////////////////////////
// Strictly-typed version of ModSibBase, which is used to apply operand size information
// to ImmToMem operations.
//
template< int OpSize >
class ModSibStrict : public ModSibSized
template< typename OperandType >
class ModSibStrict : public ModSibBase
{
public:
__forceinline explicit ModSibStrict( const iAddressInfo& src ) : ModSibSized( OpSize, src ) {}
__forceinline explicit ModSibStrict( s32 disp ) : ModSibSized( OpSize, disp ) {}
static const uint OperandSize = sizeof( OperandType );
__forceinline explicit ModSibStrict( const iAddressInfo& src ) : ModSibBase( src ) {}
__forceinline explicit ModSibStrict( s32 disp ) : ModSibBase( disp ) {}
__forceinline ModSibStrict( x86IndexReg base, x86IndexReg index, int scale=0, s32 displacement=0 ) :
ModSibSized( OpSize, base, index, scale, displacement ) {}
ModSibBase( base, index, scale, displacement ) {}
__forceinline ModSibStrict<OpSize>& Add( s32 imm )
__forceinline ModSibStrict<OperandType>& Add( s32 imm )
{
Displacement += imm;
return *this;
}
__forceinline ModSibStrict<OpSize> operator+( const s32 imm ) const { return ModSibStrict<OpSize>( *this ).Add( imm ); }
__forceinline ModSibStrict<OpSize> operator-( const s32 imm ) const { return ModSibStrict<OpSize>( *this ).Add( -imm ); }
__forceinline ModSibStrict<OperandType> operator+( const s32 imm ) const { return ModSibStrict<OperandType>( *this ).Add( imm ); }
__forceinline ModSibStrict<OperandType> operator-( const s32 imm ) const { return ModSibStrict<OperandType>( *this ).Add( -imm ); }
};
//////////////////////////////////////////////////////////////////////////////////////////
@ -493,40 +457,42 @@ namespace x86Emitter
// Explicit version of ptr[], in the form of ptr32[], ptr16[], etc. which allows
// specification of the operand size for ImmToMem operations.
//
template< int OperandSize >
template< typename OperandType >
struct x86IndexerTypeExplicit
{
static const uint OperandSize = sizeof( OperandType );
// passthrough instruction, allows ModSib to pass silently through ptr translation
// without doing anything and without compiler error.
const ModSibStrict<OperandSize>& operator[]( const ModSibStrict<OperandSize>& src ) const { return src; }
const ModSibStrict<OperandType>& operator[]( const ModSibStrict<OperandType>& src ) const { return src; }
__forceinline ModSibStrict<OperandSize> operator[]( x86IndexReg src ) const
__forceinline ModSibStrict<OperandType> operator[]( x86IndexReg src ) const
{
return ModSibStrict<OperandSize>( src, x86IndexReg::Empty );
return ModSibStrict<OperandType>( src, x86IndexReg::Empty );
}
__forceinline ModSibStrict<OperandSize> operator[]( const iAddressInfo& src ) const
__forceinline ModSibStrict<OperandType> operator[]( const iAddressInfo& src ) const
{
return ModSibStrict<OperandSize>( src );
return ModSibStrict<OperandType>( src );
}
__forceinline ModSibStrict<OperandSize> operator[]( uptr src ) const
__forceinline ModSibStrict<OperandType> operator[]( uptr src ) const
{
return ModSibStrict<OperandSize>( src );
return ModSibStrict<OperandType>( src );
}
__forceinline ModSibStrict<OperandSize> operator[]( const void* src ) const
__forceinline ModSibStrict<OperandType> operator[]( const void* src ) const
{
return ModSibStrict<OperandSize>( (uptr)src );
return ModSibStrict<OperandType>( (uptr)src );
}
x86IndexerTypeExplicit() {} // GCC initialization dummy
};
extern const x86IndexerType ptr;
extern const x86IndexerTypeExplicit<4> ptr32;
extern const x86IndexerTypeExplicit<2> ptr16;
extern const x86IndexerTypeExplicit<1> ptr8;
extern const x86IndexerTypeExplicit<u32> ptr32;
extern const x86IndexerTypeExplicit<u16> ptr16;
extern const x86IndexerTypeExplicit<u8> ptr8;
//////////////////////////////////////////////////////////////////////////////////////////
// JccComparisonType - enumerated possibilities for inspired code branching!
@ -652,14 +618,29 @@ namespace x86Emitter
extern void EmitSibMagic( uint regfield, const ModSibBase& info );
// ------------------------------------------------------------------------
template< typename ImmType >
class ImplementationHelper
{
public:
static const uint OperandSize = sizeof(ImmType);
protected:
static bool Is8BitOperand() { return OperandSize == 1; }
static void prefix16() { if( OperandSize == 2 ) iWrite<u8>( 0x66 ); }
};
// ------------------------------------------------------------------------
#include "implement/group1.h"
#include "implement/group2.h"
#include "implement/group3.h"
#include "implement/movs.h" // cmov and movsx/zx
#include "implement/dwshift.h" // dowubleword shifts!
#include "implement/dwshift.h" // doubleword shifts!
#include "implement/incdec.h"
}
// ------------------------------------------------------------------------
//////////////////////////////////////////////////////////////////////////////////////////
//
// ----- Group 1 Instruction Class -----
extern const Internal::Group1ImplAll<Internal::G1Type_ADD> iADD;
@ -676,6 +657,8 @@ namespace x86Emitter
// zero. This is a safe optimization since any zero-value shift does not affect any
// flags.
extern const Internal::MovImplAll iMOV;
extern const Internal::Group2ImplAll<Internal::G2Type_ROL> iROL;
extern const Internal::Group2ImplAll<Internal::G2Type_ROR> iROR;
extern const Internal::Group2ImplAll<Internal::G2Type_RCL> iRCL;
@ -684,6 +667,17 @@ namespace x86Emitter
extern const Internal::Group2ImplAll<Internal::G2Type_SHR> iSHR;
extern const Internal::Group2ImplAll<Internal::G2Type_SAR> iSAR;
// ----- Group 3 Instruction Class -----
extern const Internal::Group3ImplAll<Internal::G3Type_NOT> iNOT;
extern const Internal::Group3ImplAll<Internal::G3Type_NEG> iNEG;
extern const Internal::Group3ImplAll<Internal::G3Type_MUL> iUMUL;
extern const Internal::Group3ImplAll<Internal::G3Type_DIV> iUDIV;
extern const Internal::Group3ImplAll<Internal::G3Type_iDIV> iSDIV;
extern const Internal::IncDecImplAll<false> iINC;
extern const Internal::IncDecImplAll<true> iDEC;
extern const Internal::MovExtendImplAll<false> iMOVZX;
extern const Internal::MovExtendImplAll<true> iMOVSX;