diff --git a/pcsx2/x86/iVUmicroLower.cpp b/pcsx2/x86/iVUmicroLower.cpp index 3271d69681..9f9d816cf7 100644 --- a/pcsx2/x86/iVUmicroLower.cpp +++ b/pcsx2/x86/iVUmicroLower.cpp @@ -354,7 +354,7 @@ void recVUMI_IADD( VURegs *VU, int info ) if( fdreg == fsreg ) ADD32RtoR(fdreg, ftreg); else if( fdreg == ftreg ) ADD32RtoR(fdreg, fsreg); - else LEA16RRtoR(fdreg, fsreg, ftreg); + else LEA32RRtoR(fdreg, fsreg, ftreg); MOVZX32R16toR(fdreg, fdreg); // neeed since don't know if fdreg's upper bits are 0 } } diff --git a/pcsx2/x86/ix86/ix86.cpp b/pcsx2/x86/ix86/ix86.cpp index bbdf9da5ff..a13c7b34a7 100644 --- a/pcsx2/x86/ix86/ix86.cpp +++ b/pcsx2/x86/ix86/ix86.cpp @@ -45,16 +45,16 @@ x86IndexerType ptr; ////////////////////////////////////////////////////////////////////////////////////////// // -const x86Register x86Register::Empty( -1 ); +const x86Register32 x86Register32::Empty( -1 ); -const x86Register eax( 0 ); -const x86Register ebx( 3 ); -const x86Register ecx( 1 ); -const x86Register edx( 2 ); -const x86Register esi( 6 ); -const x86Register edi( 7 ); -const x86Register ebp( 5 ); -const x86Register esp( 4 ); +const x86Register32 eax( 0 ); +const x86Register32 ebx( 3 ); +const x86Register32 ecx( 1 ); +const x86Register32 edx( 2 ); +const x86Register32 esi( 6 ); +const x86Register32 edi( 7 ); +const x86Register32 ebp( 5 ); +const x86Register32 esp( 4 ); const x86Register16 ax( 0 ); const x86Register16 bx( 3 ); @@ -77,20 +77,30 @@ const x86Register8 bh( 7 ); ////////////////////////////////////////////////////////////////////////////////////////// // x86Register Method Implementations // -x86ModRm x86Register::operator+( const x86Register& right ) const +x86ModRm x86Register32::operator+( const x86Register32& right ) const { return x86ModRm( *this, right ); } -x86ModRm x86Register::operator+( const x86ModRm& right ) const +x86ModRm x86Register32::operator+( const x86ModRm& right ) const { return right + *this; } +x86ModRm x86Register32::operator+( s32 right ) const +{ + return x86ModRm( *this, right ); +} + +x86ModRm x86Register32::operator*( u32 right ) const +{ + return x86ModRm( Empty, *this, right ); +} + ////////////////////////////////////////////////////////////////////////////////////////// // x86ModRm Method Implementations // -x86ModRm& x86ModRm::Add( const x86Register& src ) +x86ModRm& x86ModRm::Add( const x86IndexReg& src ) { if( src == Index ) { @@ -99,7 +109,7 @@ x86ModRm& x86ModRm::Add( const x86Register& src ) else if( src == Base ) { // Compound the existing register reference into the Index/Scale pair. - Base = x86Register::Empty; + Base = x86IndexReg::Empty; if( src == Index ) Factor++; @@ -153,13 +163,20 @@ x86ModRm& x86ModRm::Add( const x86ModRm& src ) // void ModSib::Reduce() { - // If no index reg, then nothing for us to do... - if( Index.IsEmpty() || Scale == 0 ) return; + // If no index reg, then load the base register into the index slot. + if( Index.IsEmpty() ) + { + Index = Base; + Scale = 0; + Base = x86IndexReg::Empty; + return; + } // The Scale has a series of valid forms, all shown here: switch( Scale ) { + case 0: break; case 1: Scale = 0; break; case 2: Scale = 1; break; @@ -203,7 +220,7 @@ ModSib::ModSib( const x86ModRm& src ) : Reduce(); } -ModSib::ModSib( x86Register base, x86Register index, int scale, s32 displacement ) : +ModSib::ModSib( x86IndexReg base, x86IndexReg index, int scale, s32 displacement ) : Base( base ), Index( index ), Scale( scale ), @@ -220,27 +237,24 @@ ModSib::ModSib( s32 displacement ) : { } -x86Register ModSib::GetEitherReg() const -{ - return Base.IsEmpty() ? Base : Index; -} - // ------------------------------------------------------------------------ // returns TRUE if this instruction requires SIB to be encoded, or FALSE if the // instruction ca be encoded as ModRm alone. -emitterT bool NeedsSibMagic( const ModSib& info ) +bool NeedsSibMagic( const ModSib& info ) { // no registers? no sibs! - if( info.Base.IsEmpty() && info.Index.IsEmpty() ) return false; + if( info.Index.IsEmpty() ) return false; // A scaled register needs a SIB - if( info.Scale != 0 && !info.Index.IsEmpty() ) return true; + if( info.Scale != 0 ) return true; // two registers needs a SIB - if( !info.Base.IsEmpty() && !info.Index.IsEmpty() ) return true; + if( !info.Base.IsEmpty() ) return true; - // If register is ESP, then we need a SIB: - if( info.Base == esp || info.Index == esp ) return true; + // If index register is ESP, then we need a SIB: + // (the ModSib::Reduce() ensures that stand-alone ESP will be in the + // index position for us) + if( info.Index == esp ) return true; return false; } @@ -251,7 +265,7 @@ emitterT bool NeedsSibMagic( const ModSib& info ) // regfield - register field to be written to the ModRm. This is either a register specifier // or an opcode extension. In either case, the instruction determines the value for us. // -emitterT void EmitSibMagic( int regfield, const ModSib& info ) +void EmitSibMagic( int regfield, const ModSib& info ) { int displacement_size = (info.Displacement == 0) ? 0 : ( ( info.IsByteSizeDisp() ) ? 1 : 2 ); @@ -263,29 +277,45 @@ emitterT void EmitSibMagic( int regfield, const ModSib& info ) // which is encoded as "EBP w/o displacement" (which is why EBP must always be // encoded *with* a displacement of 0, if it would otherwise not have one). - x86Register basereg = info.GetEitherReg(); - - if( basereg.IsEmpty() ) + if( info.Index.IsEmpty() ) ModRM( 0, regfield, ModRm_UseDisp32 ); else { - if( basereg == ebp && displacement_size == 0 ) + if( info.Index == ebp && displacement_size == 0 ) displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! - ModRM( displacement_size, regfield, basereg.Id ); + ModRM( displacement_size, regfield, info.Index.Id ); } } else { - ModRM( displacement_size, regfield, ModRm_UseSib ); - SibSB( info.Index.Id, info.Scale, info.Base.Id ); + // In order to encode "just" index*scale (and no base), we have to encode + // it as a special [index*scale + displacement] form, which is done by + // specifying EBP as the base register and setting the displacement field + // to zero. (same as ModRm w/o SIB form above, basically, except the + // ModRm_UseDisp flag is specified in the SIB instead of the ModRM field). + + if( info.Base.IsEmpty() ) + { + ModRM( 0, regfield, ModRm_UseSib ); + SibSB( info.Scale, info.Index.Id, ModRm_UseDisp32 ); + displacement_size = 2; + } + else + { + if( info.Base == ebp && displacement_size == 0 ) + displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! + + ModRM( displacement_size, regfield, ModRm_UseSib ); + SibSB( info.Scale, info.Index.Id, info.Base.Id ); + } } switch( displacement_size ) { - case 0: break; - case 1: write8( info.Displacement ); break; - case 2: write32( info.Displacement ); break; + case 0: break; + case 1: write8( info.Displacement ); break; + case 2: write32( info.Displacement ); break; jNO_DEFAULT } } @@ -296,9 +326,166 @@ emitterT void EmitSibMagic( int regfield, const ModSib& info ) // regfield - register field to be written to the ModRm. This is either a register specifier // or an opcode extension. In either case, the instruction determines the value for us. // -emitterT void EmitSibMagic( x86Register regfield, const ModSib& info ) +emitterT void EmitSibMagic( x86Register32 regfield, const ModSib& info ) { EmitSibMagic( regfield.Id, info ); } +template< typename ToReg > +static void EmitLeaMagic( ToReg to, const ModSib& src, bool is16bit=false ) +{ + int displacement_size = (src.Displacement == 0) ? 0 : + ( ( src.IsByteSizeDisp() ) ? 1 : 2 ); + + // See EmitSibMagic for commenting on SIB encoding. + + if( !NeedsSibMagic( src ) ) + { + // LEA Land: means we have either 1-register encoding or just an offset. + // offset is encodable as an immediate MOV, and a register is encodable + // as a register MOV. + + if( src.Index.IsEmpty() ) + { + if( is16bit ) + MOV16ItoR( to.Id, src.Displacement ); + else + MOV32ItoR( to.Id, src.Displacement ); + return; + } + else if( displacement_size == 0 ) + { + if( is16bit ) + MOV16RtoR( to.Id, src.Index.Id ); + else + MOV32RtoR( to.Id, src.Index.Id ); + return; + } + else + { + // note: no need to do ebp+0 check since we encode all 0 displacements as + // register assignments above (via MOV) + + write8( 0x8d ); + ModRM( displacement_size, to.Id, src.Index.Id ); + } + } + else + { + if( src.Base.IsEmpty() ) + { + if( displacement_size == 0 ) + { + // Encode [Index*Scale] as a combination of Mov and Shl. + // This is more efficient because of the bloated format which requires + // a 32 bit displacement. + + if( is16bit ) + { + MOV16RtoR( to.Id, src.Index.Id ); + SHL16ItoR( to.Id, src.Scale ); + } + else + { + MOV32RtoR( to.Id, src.Index.Id ); + SHL32ItoR( to.Id, src.Scale ); + } + return; + } + + write8( 0x8d ); + ModRM( 0, to.Id, ModRm_UseSib ); + SibSB( src.Scale, src.Index.Id, ModRm_UseDisp32 ); + displacement_size = 2; // force 32bit displacement. + } + else + { + if( src.Base == ebp && displacement_size == 0 ) + displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! + + write8( 0x8d ); + ModRM( displacement_size, to.Id, ModRm_UseSib ); + SibSB( src.Scale, src.Index.Id, src.Base.Id ); + } + } + + switch( displacement_size ) + { + case 0: break; + case 1: write8( src.Displacement ); break; + case 2: write32( src.Displacement ); break; + jNO_DEFAULT + } + +} + +emitterT void LEA32( x86Register32 to, const ModSib& src ) +{ + EmitLeaMagic( to, src ); +} + + +emitterT void LEA16( x86Register16 to, const ModSib& src ) +{ + // fixme: is this right? Does Lea16 use 32 bit displacement and ModRM form? + + write8( 0x66 ); + EmitLeaMagic( to, src ); +} + +////////////////////////////////////////////////////////////////////////////////////////// +// Miscellaneous Section! +// Various Instructions with no parameter and no special encoding logic. +// +emitterT void RET() { write8( 0xC3 ); } +emitterT void CBW() { write16( 0x9866 ); } +emitterT void CWD() { write8( 0x98 ); } +emitterT void CDQ() { write8( 0x99 ); } +emitterT void CWDE() { write8( 0x98 ); } + +emitterT void LAHF() { write8( 0x9f ); } +emitterT void SAHF() { write8( 0x9e ); } + + +////////////////////////////////////////////////////////////////////////////////////////// +// Push / Pop Emitters +// +// fixme? push/pop instructions always push and pop aligned to whatever mode the cpu +// is running in. So even thought these say push32, they would essentially be push64 on +// an x64 build. Should I rename them accordingly? --air +// +// Note: pushad/popad implementations are intentionally left out. The instructions are +// invalid in x64, and are super slow on x32. Use multiple Push/Pop instructions instead. + + +emitterT void POP( x86Register32 from ) +{ + write8( 0x58 | from.Id ); +} + +emitterT void POP( const ModSib& from ) +{ + write8( 0x8f ); EmitSibMagic( 0, from ); +} + +emitterT void PUSH( u32 imm ) +{ + write8( 0x68 ); write32( imm ); +} + +emitterT void PUSH( x86Register32 from ) +{ + write8( 0x50 | from.Id ); +} + +emitterT void PUSH( const ModSib& from ) +{ + write8( 0xff ); EmitSibMagic( 6, from ); +} + +// pushes the EFLAGS register onto the stack +emitterT void PUSHFD() { write8( 0x9C ); } +// pops the EFLAGS register from the stack +emitterT void POPFD() { write8( 0x9D ); } + } diff --git a/pcsx2/x86/ix86/ix86.h b/pcsx2/x86/ix86/ix86.h index 939ff21e48..bf89a17853 100644 --- a/pcsx2/x86/ix86/ix86.h +++ b/pcsx2/x86/ix86/ix86.h @@ -100,6 +100,54 @@ extern void x86Align( int bytes ); extern void x86AlignExecutable( int align ); //------------------------------------------------------------------ +////////////////////////////////////////////////////////////////////////////////////////// +// New C++ Emitter! +// +// To use it just include the x86Emitter namespace into your file/class/function off choice. + +namespace x86Emitter +{ + extern void POP( x86Register32 from ); + extern void POP( const ModSib& from ); + + extern void PUSH( u32 imm ); + extern void PUSH( x86Register32 from ); + extern void PUSH( const ModSib& from ); + + extern void LEA32( x86Register32 to, const ModSib& src ); + extern void LEA16( x86Register16 to, const ModSib& src ); + + + static __forceinline void POP( void* from ) { POP( ptr[from] ); } + static __forceinline void PUSH( void* from ) { PUSH( ptr[from] ); } + + #define DECLARE_GROUP1_OPCODE_HELPER( lwr, bits ) \ + emitterT void lwr##bits( x86Register##bits to, x86Register##bits from ); \ + emitterT void lwr##bits( x86Register##bits to, void* from ); \ + emitterT void lwr##bits( x86Register##bits to, const ModSib& from ); \ + emitterT void lwr##bits( x86Register##bits to, u##bits imm ); \ + emitterT void lwr##bits( const ModSib& to, x86Register##bits from ); \ + emitterT void lwr##bits( void* to, x86Register##bits from ); \ + emitterT void lwr##bits( void* to, u##bits imm ); \ + emitterT void lwr##bits( const ModSib& to, u##bits imm ); + + #define DECLARE_GROUP1_OPCODE( lwr ) \ + DECLARE_GROUP1_OPCODE_HELPER( lwr, 32 ) + DECLARE_GROUP1_OPCODE_HELPER( lwr, 16 ) + DECLARE_GROUP1_OPCODE_HELPER( lwr, 8 ) + + DECLARE_GROUP1_OPCODE( ADD ) + DECLARE_GROUP1_OPCODE( CMP ) + DECLARE_GROUP1_OPCODE( OR ) + DECLARE_GROUP1_OPCODE( ADC ) + DECLARE_GROUP1_OPCODE( SBB ) + DECLARE_GROUP1_OPCODE( AND ) + DECLARE_GROUP1_OPCODE( SUB ) + DECLARE_GROUP1_OPCODE( XOR ) + +} + + extern void CLC( void ); extern void NOP( void ); @@ -130,6 +178,8 @@ extern void MOV32ItoRm( x86IntRegType to, u32 from, int offset=0); // mov r32 to [r32+off] extern void MOV32RtoRm( x86IntRegType to, x86IntRegType from, int offset=0); +// mov r16 to r16 +extern void MOV16RtoR( x86IntRegType to, x86IntRegType from ) ; // mov r16 to m16 extern void MOV16RtoM( uptr to, x86IntRegType from ); // mov m16 to r16 diff --git a/pcsx2/x86/ix86/ix86_group1.cpp b/pcsx2/x86/ix86/ix86_group1.cpp index 190550e1eb..f76950c0ef 100644 --- a/pcsx2/x86/ix86/ix86_group1.cpp +++ b/pcsx2/x86/ix86/ix86_group1.cpp @@ -33,6 +33,20 @@ namespace x86Emitter { +////////////////////////////////////////////////////////////////////////////////////////// +// x86RegConverter - this class is used internally by the emitter as a helper for +// converting 8 and 16 register forms into 32 bit forms. This way the end-user exposed API +// can use type-safe 8/16/32 bit register types, and the underlying code can use a single +// unified emitter to generate all function variations + prefixes and such. :) +// +class x86RegConverter : public x86Register32 +{ +public: + x86RegConverter( x86Register32 src ) : x86Register32( src ) {} + x86RegConverter( x86Register16 src ) : x86Register32( src.Id ) {} + x86RegConverter( x86Register8 src ) : x86Register32( src.Id ) {} +}; + enum Group1InstructionType { G1Type_ADD=0, @@ -46,29 +60,32 @@ enum Group1InstructionType }; -static emitterT void Group1( Group1InstructionType inst, x86Register to, x86Register from ) +static emitterT void Group1( Group1InstructionType inst, x86RegConverter to, x86RegConverter from, bool bit8form=false ) { - write8( 0x01 | (inst<<3) ); + write8( (bit8form ? 0 : 1) | (inst<<3) ); ModRM( 3, from.Id, to.Id ); } -static emitterT void Group1( Group1InstructionType inst, const ModSib& sibdest, x86Register from ) +static emitterT void Group1( Group1InstructionType inst, const ModSib& sibdest, x86RegConverter from, bool bit8form=false ) { - write8( 0x01 | (inst<<3) ); + write8( (bit8form ? 0 : 1) | (inst<<3) ); EmitSibMagic( from, sibdest ); } -/* add m32 to r32 */ -static emitterT void Group1( Group1InstructionType inst, x86Register to, const ModSib& sibsrc ) +static emitterT void Group1( Group1InstructionType inst, x86RegConverter to, const ModSib& sibsrc, bool bit8form=false ) { - write8( 0x03 | (inst<<3) ); + write8( (bit8form ? 2 : 3) | (inst<<3) ); EmitSibMagic( to, sibsrc ); } +// Note: this function emits based on the operand size of imm, so 16 bit imms generate a 16 bit +// instruction (AX,BX,etc). template< typename T > -static emitterT void Group1_Imm( Group1InstructionType inst, x86Register to, T imm ) +static emitterT void Group1_Imm( Group1InstructionType inst, x86RegConverter to, T imm ) { - if( is_s8( imm ) ) + bool bit8form = (sizeof(T) == 1); + + if( !bit8form && is_s8( imm ) ) { write8( 0x83 ); ModRM( 3, inst, to.Id ); @@ -77,84 +94,81 @@ static emitterT void Group1_Imm( Group1InstructionType inst, x86Register to, T i else { if( to == eax ) - write8( 0x05 | (inst<<3) ); + write8( (bit8form ? 4 : 5) | (inst<<3) ); else { - write8( 0x81 ); + write8( bit8form ? 0x80 : 0x81 ); ModRM( 3, inst, to.Id ); } x86write( imm ); } } +// Note: this function emits based on the operand size of imm, so 16 bit imms generate a 16 bit +// instruction (AX,BX,etc). template< typename T > static emitterT void Group1_Imm( Group1InstructionType inst, const ModSib& sibdest, T imm ) { - write8( is_s8( imm ) ? 0x83 : 0x81 ); + bool bit8form = (sizeof(T) == 1); + + write8( bit8form ? 0x80 : (is_s8( imm ) ? 0x83 : 0x81) ); EmitSibMagic( inst, sibdest ); - if( is_s8( imm ) ) + if( !bit8form && is_s8( imm ) ) write8( (s8)imm ); else x86write( imm ); } -static emitterT void Group1_8( Group1InstructionType inst, x86Register to, s8 imm ) -{ - if( to == eax ) - { - write8( 0x04 | (inst<<3) ); - write8( imm ); - } - else - { - write8( 0x80 ); - ModRM( 3, inst, to.Id ); - write8( imm ); - } -} - // 16 bit instruction prefix! static __forceinline void prefix16() { write8(0x66); } -static __forceinline x86Register cvt2reg( x86Register16 src ) { return x86Register( src.Id ); } ////////////////////////////////////////////////////////////////////////////////////////// // -#define DEFINE_GROUP1_OPCODE( lwr, cod ) \ - emitterT void lwr##32( x86Register to, x86Register from ) { Group1( G1Type_##cod, to, from ); } \ - emitterT void lwr##32( x86Register to, void* from ) { Group1( G1Type_##cod, to, ptr[from] ); } \ - emitterT void lwr##32( void* to, x86Register from ) { Group1( G1Type_##cod, ptr[to], from ); } \ - emitterT void lwr##32( x86Register to, const x86ModRm& from ) { Group1( G1Type_##cod, to, ptr[from] ); } \ - emitterT void lwr##32( const x86ModRm& to, x86Register from ) { Group1( G1Type_##cod, ptr[to], from ); } \ - emitterT void lwr##32( x86Register to, u32 imm ) { Group1_Imm( G1Type_##cod, to, imm ); } \ - emitterT void lwr##32( void* to, u32 imm ) { Group1_Imm( G1Type_##cod, ptr[to], imm ); } \ - emitterT void lwr##32( const x86ModRm& to, u32 imm ) { Group1_Imm( G1Type_##cod, ptr[to], imm ); } \ +#define DEFINE_GROUP1_OPCODE( cod ) \ + emitterT void cod##32( x86Register32 to, x86Register32 from ) { Group1( G1Type_##cod, to, from ); } \ + emitterT void cod##32( x86Register32 to, void* from ) { Group1( G1Type_##cod, to, ptr[from] ); } \ + emitterT void cod##32( x86Register32 to, const ModSib& from ) { Group1( G1Type_##cod, to, from ); } \ + emitterT void cod##32( x86Register32 to, u32 imm ) { Group1_Imm( G1Type_##cod, to, imm ); } \ + emitterT void cod##32( const ModSib& to, x86Register32 from ) { Group1( G1Type_##cod, to, from ); } \ + emitterT void cod##32( void* to, x86Register32 from ) { Group1( G1Type_##cod, ptr[to], from ); } \ + emitterT void cod##32( void* to, u32 imm ) { Group1_Imm( G1Type_##cod, ptr[to], imm ); } \ + emitterT void cod##32( const ModSib& to, u32 imm ) { Group1_Imm( G1Type_##cod, to, imm ); } \ \ - emitterT void lwr##16( x86Register16 to, x86Register16 from ) { prefix16(); Group1( G1Type_##cod, cvt2reg(to), cvt2reg(from) ); } \ - emitterT void lwr##16( x86Register16 to, void* from ) { prefix16(); Group1( G1Type_##cod, cvt2reg(to), ptr[from] ); } \ - emitterT void lwr##16( void* to, x86Register16 from ) { prefix16(); Group1( G1Type_##cod, ptr[to], cvt2reg(from) ); } \ - emitterT void lwr##16( x86Register16 to, const x86ModRm& from ){ prefix16(); Group1( G1Type_##cod, cvt2reg(to), ptr[from] ); } \ - emitterT void lwr##16( const x86ModRm& to, x86Register16 from ){ prefix16(); Group1( G1Type_##cod, ptr[to], cvt2reg(from) ); } \ - emitterT void lwr##16( x86Register16 to, u16 imm ) { prefix16(); Group1_Imm( G1Type_##cod, cvt2reg(to), imm ); } \ - emitterT void lwr##16( void* to, u16 imm ) { prefix16(); Group1_Imm( G1Type_##cod, ptr[to], imm ); } \ - emitterT void lwr##16( const x86ModRm& to, u16 imm ) { prefix16(); Group1_Imm( G1Type_##cod, ptr[to], imm ); } + emitterT void cod##16( x86Register16 to, x86Register16 from ) { prefix16(); Group1( G1Type_##cod, to, from ); } \ + emitterT void cod##16( x86Register16 to, void* from ) { prefix16(); Group1( G1Type_##cod, to, ptr[from] ); } \ + emitterT void cod##16( x86Register16 to, const ModSib& from ) { prefix16(); Group1( G1Type_##cod, to, from ); } \ + emitterT void cod##16( x86Register16 to, u16 imm ) { prefix16(); Group1_Imm( G1Type_##cod, to, imm ); } \ + emitterT void cod##16( const ModSib& to, x86Register16 from ) { prefix16(); Group1( G1Type_##cod, to, from ); } \ + emitterT void cod##16( void* to, x86Register16 from ) { prefix16(); Group1( G1Type_##cod, ptr[to], from ); } \ + emitterT void cod##16( void* to, u16 imm ) { prefix16(); Group1_Imm( G1Type_##cod, ptr[to], imm ); } \ + emitterT void cod##16( const ModSib& to, u16 imm ) { prefix16(); Group1_Imm( G1Type_##cod, to, imm ); } \ + \ + emitterT void cod##8( x86Register8 to, x86Register8 from ) { Group1( G1Type_##cod, to, from , true ); } \ + emitterT void cod##8( x86Register8 to, void* from ) { Group1( G1Type_##cod, to, ptr[from], true ); } \ + emitterT void cod##8( x86Register8 to, const ModSib& from ) { Group1( G1Type_##cod, to, from , true ); } \ + emitterT void cod##8( x86Register8 to, u8 imm ) { Group1_Imm( G1Type_##cod, to, imm ); } \ + emitterT void cod##8( const ModSib& to, x86Register8 from ) { Group1( G1Type_##cod, to, from , true ); } \ + emitterT void cod##8( void* to, x86Register8 from ) { Group1( G1Type_##cod, ptr[to], from , true ); } \ + emitterT void cod##8( void* to, u8 imm ) { Group1_Imm( G1Type_##cod, ptr[to], imm ); } \ + emitterT void cod##8( const ModSib& to, u8 imm ) { Group1_Imm( G1Type_##cod, to, imm ); } -DEFINE_GROUP1_OPCODE( add, ADD ); -DEFINE_GROUP1_OPCODE( cmp, CMP ); -DEFINE_GROUP1_OPCODE( or, OR ); -DEFINE_GROUP1_OPCODE( adc, ADC ); -DEFINE_GROUP1_OPCODE( sbb, SBB ); -DEFINE_GROUP1_OPCODE( and, AND ); -DEFINE_GROUP1_OPCODE( sub, SUB ); -DEFINE_GROUP1_OPCODE( xor, XOR ); +DEFINE_GROUP1_OPCODE( ADD ) +DEFINE_GROUP1_OPCODE( CMP ) +DEFINE_GROUP1_OPCODE( OR ) +DEFINE_GROUP1_OPCODE( ADC ) +DEFINE_GROUP1_OPCODE( SBB ) +DEFINE_GROUP1_OPCODE( AND ) +DEFINE_GROUP1_OPCODE( SUB ) +DEFINE_GROUP1_OPCODE( XOR ) } // end namespace x86Emitter -static __forceinline x86Emitter::x86Register _reghlp( x86IntRegType src ) +static __forceinline x86Emitter::x86Register32 _reghlp32( x86IntRegType src ) { - return x86Emitter::x86Register( src ); + return x86Emitter::x86Register32( src ); } static __forceinline x86Emitter::x86Register16 _reghlp16( x86IntRegType src ) @@ -162,49 +176,50 @@ static __forceinline x86Emitter::x86Register16 _reghlp16( x86IntRegType src ) return x86Emitter::x86Register16( src ); } -static __forceinline x86Emitter::x86ModRm _mrmhlp( x86IntRegType src ) +static __forceinline x86Emitter::x86Register8 _reghlp8( x86IntRegType src ) { - return x86Emitter::x86ModRm( _reghlp(src) ); + return x86Emitter::x86Register8( src ); +} + +static __forceinline x86Emitter::ModSib _mrmhlp( x86IntRegType src ) +{ + return x86Emitter::ModSib( x86Emitter::x86ModRm( _reghlp32(src) ) ); } ////////////////////////////////////////////////////////////////////////////////////////// // -#define DEFINE_GROUP1_OPCODE_LEGACY( lwr, cod ) \ - emitterT void cod##32RtoR( x86IntRegType to, x86IntRegType from ) { x86Emitter::lwr##32( _reghlp(to), _reghlp(from) ); } \ - emitterT void cod##32ItoR( x86IntRegType to, u32 imm ) { x86Emitter::lwr##32( _reghlp(to), imm ); } \ - emitterT void cod##32MtoR( x86IntRegType to, uptr from ) { x86Emitter::lwr##32( _reghlp(to), (void*)from ); } \ - emitterT void cod##32RtoM( uptr to, x86IntRegType from ) { x86Emitter::lwr##32( (void*)to, _reghlp(from) ); } \ - emitterT void cod##32ItoM( uptr to, u32 imm ) { x86Emitter::lwr##32( (void*)to, imm ); } \ - emitterT void cod##32ItoRm( x86IntRegType to, u32 imm, int offset ) { x86Emitter::lwr##32( _mrmhlp(to) + offset, imm ); } \ - emitterT void cod##32RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::lwr##32( _reghlp(to), _mrmhlp(from) + offset ); } \ - emitterT void cod##32RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::lwr##32( _mrmhlp(to) + offset, _reghlp(from) ); } \ - \ - emitterT void cod##16RtoR( x86IntRegType to, x86IntRegType from ) { x86Emitter::lwr##16( _reghlp16(to), _reghlp16(from) ); } \ - emitterT void cod##16ItoR( x86IntRegType to, u16 imm ) { x86Emitter::lwr##16( _reghlp16(to), imm ); } \ - emitterT void cod##16MtoR( x86IntRegType to, uptr from ) { x86Emitter::lwr##16( _reghlp16(to), (void*)from ); } \ - emitterT void cod##16RtoM( uptr to, x86IntRegType from ) { x86Emitter::lwr##16( (void*)to, _reghlp16(from) ); } \ - emitterT void cod##16ItoM( uptr to, u16 imm ) { x86Emitter::lwr##16( (void*)to, imm ); } \ - emitterT void cod##16ItoRm( x86IntRegType to, u16 imm, int offset ) { x86Emitter::lwr##16( _mrmhlp(to) + offset, imm ); } \ - emitterT void cod##16RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::lwr##16( _reghlp16(to), _mrmhlp(from) + offset ); } \ - emitterT void cod##16RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::lwr##16( _mrmhlp(to) + offset, _reghlp16(from) ); } +#define DEFINE_LEGACY_HELPER( cod, bits ) \ + emitterT void cod##bits##RtoR( x86IntRegType to, x86IntRegType from ) { x86Emitter::cod##bits( _reghlp##bits(to), _reghlp##bits(from) ); } \ + emitterT void cod##bits##ItoR( x86IntRegType to, u##bits imm ) { x86Emitter::cod##bits( _reghlp##bits(to), imm ); } \ + emitterT void cod##bits##MtoR( x86IntRegType to, uptr from ) { x86Emitter::cod##bits( _reghlp##bits(to), (void*)from ); } \ + emitterT void cod##bits##RtoM( uptr to, x86IntRegType from ) { x86Emitter::cod##bits( (void*)to, _reghlp##bits(from) ); } \ + emitterT void cod##bits##ItoM( uptr to, u##bits imm ) { x86Emitter::cod##bits( (void*)to, imm ); } \ + emitterT void cod##bits##ItoRm( x86IntRegType to, u##bits imm, int offset ) { x86Emitter::cod##bits( _mrmhlp(to) + offset, imm ); } \ + emitterT void cod##bits##RmtoR( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::cod##bits( _reghlp##bits(to), _mrmhlp(from) + offset ); } \ + emitterT void cod##bits##RtoRm( x86IntRegType to, x86IntRegType from, int offset ) { x86Emitter::cod##bits( _mrmhlp(to) + offset, _reghlp##bits(from) ); } -DEFINE_GROUP1_OPCODE_LEGACY( add, ADD ); -DEFINE_GROUP1_OPCODE_LEGACY( cmp, CMP ); -DEFINE_GROUP1_OPCODE_LEGACY( or, OR ); -DEFINE_GROUP1_OPCODE_LEGACY( adc, ADC ); -DEFINE_GROUP1_OPCODE_LEGACY( sbb, SBB ); -DEFINE_GROUP1_OPCODE_LEGACY( and, AND ); -DEFINE_GROUP1_OPCODE_LEGACY( sub, SUB ); -DEFINE_GROUP1_OPCODE_LEGACY( xor, XOR ); +#define DEFINE_GROUP1_OPCODE_LEGACY( cod ) \ + DEFINE_LEGACY_HELPER( cod, 32 ) \ + DEFINE_LEGACY_HELPER( cod, 16 ) \ + DEFINE_LEGACY_HELPER( cod, 8 ) + +DEFINE_GROUP1_OPCODE_LEGACY( ADD ) +DEFINE_GROUP1_OPCODE_LEGACY( CMP ) +DEFINE_GROUP1_OPCODE_LEGACY( OR ) +DEFINE_GROUP1_OPCODE_LEGACY( ADC ) +DEFINE_GROUP1_OPCODE_LEGACY( SBB ) +DEFINE_GROUP1_OPCODE_LEGACY( AND ) +DEFINE_GROUP1_OPCODE_LEGACY( SUB ) +DEFINE_GROUP1_OPCODE_LEGACY( XOR ) // Special forms needed by the legacy emitter syntax: emitterT void AND32I8toR( x86IntRegType to, s8 from ) { - x86Emitter::and32( _reghlp(to), from ); + x86Emitter::AND32( _reghlp32(to), from ); } emitterT void AND32I8toM( uptr to, s8 from ) { - x86Emitter::and32( (void*)to, from ); + x86Emitter::AND32( (void*)to, from ); } diff --git a/pcsx2/x86/ix86/ix86_internal.h b/pcsx2/x86/ix86/ix86_internal.h index 83ec23a291..4f3f72f2e4 100644 --- a/pcsx2/x86/ix86/ix86_internal.h +++ b/pcsx2/x86/ix86/ix86_internal.h @@ -27,7 +27,7 @@ static const int ModRm_UseDisp32 = 5; // same index value as EBP (used in Mod fi namespace x86Emitter { extern void EmitSibMagic( int regfield, const ModSib& info ); - extern void EmitSibMagic( x86Register regfield, const ModSib& info ); + extern void EmitSibMagic( x86Register32 regfield, const ModSib& info ); extern bool NeedsSibMagic( const ModSib& info ); } diff --git a/pcsx2/x86/ix86/ix86_legacy.cpp b/pcsx2/x86/ix86/ix86_legacy.cpp index 3f4134836c..752414a1b2 100644 --- a/pcsx2/x86/ix86/ix86_legacy.cpp +++ b/pcsx2/x86/ix86/ix86_legacy.cpp @@ -24,8 +24,6 @@ * cottonvibes(@gmail.com) */ -#pragma once - //------------------------------------------------------------------ // ix86 legacy emitter functions //------------------------------------------------------------------ @@ -34,6 +32,8 @@ #include "System.h" #include "ix86_internal.h" +using namespace x86Emitter; + // Note: the 'to' field can either be a register or a special opcode extension specifier // depending on the opcode's encoding. @@ -256,6 +256,8 @@ emitterT void NOP( void ) /* mov r32 to r32 */ emitterT void MOV32RtoR( x86IntRegType to, x86IntRegType from ) { + if( to == from ) return; + RexRB(0, from, to); write8( 0x89 ); ModRM( 3, from, to ); @@ -356,6 +358,18 @@ emitterT void MOV32RtoRm( x86IntRegType to, x86IntRegType from, int offset) WriteRmOffsetFrom(from, to, offset); } + +/* mov r32 to r32 */ +emitterT void MOV16RtoR( x86IntRegType to, x86IntRegType from ) +{ + if( to == from ) return; + + write8( 0x66 ); + RexRB(0, from, to); + write8( 0x89 ); + ModRM( 3, from, to ); +} + /* mov r16 to m16 */ emitterT void MOV16RtoM(uptr to, x86IntRegType from ) { @@ -802,15 +816,6 @@ emitterT void CMOVLE32MtoR( x86IntRegType to, uptr from ) // arithmetic instructions / //////////////////////////////////// -// add m8 to r8 -emitterT void ADD8MtoR( x86IntRegType to, uptr from ) -{ - RexR(0,to); - write8( 0x02 ); - ModRM( 0, to, DISP32 ); - write32( MEMADDR(from, 4) ); -} - /* inc r32 */ emitterT void INC32R( x86IntRegType to ) { @@ -1214,90 +1219,6 @@ emitterT void SHRD32ItoR( x86IntRegType to, x86IntRegType from, u8 shift ) // logical instructions / //////////////////////////////////// -// or r8 to r8 -emitterT void OR8RtoR( x86IntRegType to, x86IntRegType from ) -{ - RexRB(0,from,to); - write8( 0x08 ); - ModRM( 3, from, to ); -} - -// or r8 to m8 -emitterT void OR8RtoM( uptr to, x86IntRegType from ) -{ - RexR(0,from); - write8( 0x08 ); - ModRM( 0, from, DISP32 ); - write32( MEMADDR(to, 4) ); -} - -// or imm8 to m8 -emitterT void OR8ItoM( uptr to, u8 from ) -{ - write8( 0x80 ); - ModRM( 0, 1, DISP32 ); - write32( MEMADDR(to, 5) ); - write8( from ); -} - -// or m8 to r8 -emitterT void OR8MtoR( x86IntRegType to, uptr from ) -{ - RexR(0,to); - write8( 0x0A ); - ModRM( 0, to, DISP32 ); - write32( MEMADDR(from, 4) ); -} - -/* and imm8 to r8 */ -emitterT void AND8ItoR( x86IntRegType to, u8 from ) -{ - RexB(0,to); - if ( to == EAX ) { - write8( 0x24 ); - } - else { - write8( 0x80 ); - ModRM( 3, 0x4, to ); - } - write8( from ); -} - -/* and imm8 to m8 */ -emitterT void AND8ItoM( uptr to, u8 from ) -{ - write8( 0x80 ); - ModRM( 0, 0x4, DISP32 ); - write32( MEMADDR(to, 5) ); - write8( from ); -} - -// and r8 to r8 -emitterT void AND8RtoR( x86IntRegType to, x86IntRegType from ) -{ - RexRB(0,to,from); - write8( 0x22 ); - ModRM( 3, to, from ); -} - -/* and r8 to m8 */ -emitterT void AND8RtoM( uptr to, x86IntRegType from ) -{ - RexR(0,from); - write8( 0x20 ); - ModRM( 0, from, DISP32 ); - write32( MEMADDR(to, 4) ); -} - -/* and m8 to r8 */ -emitterT void AND8MtoR( x86IntRegType to, uptr from ) -{ - RexR(0,to); - write8( 0x22 ); - ModRM( 0, to, DISP32 ); - write32( MEMADDR(from, 4)); -} - /* not r32 */ emitterT void NOT32R( x86IntRegType from ) { @@ -1664,41 +1585,6 @@ emitterT void CALL32M( u32 to ) // misc instructions / //////////////////////////////////// -// cmp imm8 to r8 -emitterT void CMP8ItoR( x86IntRegType to, u8 from ) -{ - RexB(0,to); - if ( to == EAX ) - { - write8( 0x3C ); - } - else - { - write8( 0x80 ); - ModRM( 3, 7, to ); - } - write8( from ); -} - -// cmp m8 to r8 -emitterT void CMP8MtoR( x86IntRegType to, uptr from ) -{ - RexR(0,to); - write8( 0x3A ); - ModRM( 0, to, DISP32 ); - write32( MEMADDR(from, 4) ); -} - -// cmp imm8 to [r32] (byte ptr) -emitterT void CMP8I8toRm( x86IntRegType to, s8 from, s8 off=0 ) -{ - RexB(0,to); - write8( 0x80 ); - ModRM( (off != 0), 7, to ); - if( off != 0 ) write8(off); - write8(from); -} - /* test imm32 to r32 */ emitterT void TEST32ItoR( x86IntRegType to, u32 from ) { @@ -1830,31 +1716,19 @@ emitterT void SETZ8R( x86IntRegType to ) { SET8R(0x94, to); } emitterT void SETE8R( x86IntRegType to ) { SET8R(0x94, to); } /* push imm32 */ -emitterT void PUSH32I( u32 from ) -{; -write8( 0x68 ); -write32( from ); -} +emitterT void PUSH32I( u32 from ) { PUSH( from ); } /* push r32 */ -emitterT void PUSH32R( x86IntRegType from ) { write8( 0x50 | from ); } +emitterT void PUSH32R( x86IntRegType from ) { PUSH( x86Register32( from ) ); } /* push m32 */ -emitterT void PUSH32M( u32 from ) +emitterT void PUSH32M( u32 from ) { - write8( 0xFF ); - ModRM( 0, 6, DISP32 ); - write32( MEMADDR(from, 4) ); + PUSH( ptr[from] ); } /* pop r32 */ -emitterT void POP32R( x86IntRegType from ) { write8( 0x58 | from ); } - -/* pushad */ -emitterT void PUSHA32( void ) { write8( 0x60 ); } - -/* popad */ -emitterT void POPA32( void ) { write8( 0x61 ); } +emitterT void POP32R( x86IntRegType from ) { POP( x86Register32( from ) ); } /* pushfd */ emitterT void PUSHFD( void ) { write8( 0x9C ); } @@ -1899,95 +1773,34 @@ emitterT void BSWAP32R( x86IntRegType to ) emitterT void LEA32RtoR(x86IntRegType to, x86IntRegType from, s32 offset) { - RexRB(0,to,from); - write8(0x8d); - - if( (from&7) == ESP ) { - if( offset == 0 ) { - ModRM(1, to, from); - write8(0x24); - } - else if( is_s8(offset) ) { - ModRM(1, to, from); - write8(0x24); - write8(offset); - } - else { - ModRM(2, to, from); - write8(0x24); - write32(offset); - } - } - else { - if( offset == 0 && from != EBP && from!=ESP ) { - ModRM(0, to, from); - } - else if( is_s8(offset) ) { - ModRM(1, to, from); - write8(offset); - } - else { - ModRM(2, to, from); - write32(offset); - } - } -} - -// to = from + offset -emitterT void LEA16RtoR(x86IntRegType to, x86IntRegType from, s16 offset) -{ - write8(0x66); - LEA32RtoR(to, from, offset); -} - -// to = from0 + from1 -emitterT void LEA16RRtoR(x86IntRegType to, x86IntRegType from0, x86IntRegType from1) -{ - write8(0x66); - LEA32RRtoR(to, from0, from1); + LEA32( x86Register32( to ), ptr[x86IndexReg(from)+offset] ); } emitterT void LEA32RRtoR(x86IntRegType to, x86IntRegType from0, x86IntRegType from1) { - RexRXB(0, to, from0, from1); - write8(0x8d); - - if( (from1&7) == EBP ) { - ModRM(1, to, 4); - ModRM(0, from0, from1); - write8(0); - } - else { - ModRM(0, to, 4); - ModRM(0, from0, from1); - } -} - -// to = from << scale (max is 3) -emitterT void LEA16RStoR(x86IntRegType to, x86IntRegType from, u32 scale) -{ - write8(0x66); - LEA32RStoR(to, from, scale); + LEA32( x86Register32( to ), ptr[x86IndexReg(from0)+x86IndexReg(from1)] ); } // Don't inline recursive functions emitterT void LEA32RStoR(x86IntRegType to, x86IntRegType from, u32 scale) { - if( to == from ) { - SHL32ItoR(to, scale); - return; - } - - if( from != ESP ) { - RexRXB(0,to,from,0); - write8(0x8d); - ModRM(0, to, 4); - ModRM(scale, from, 5); - write32(0); - } - else { - assert( to != ESP ); - MOV32RtoR(to, from); - LEA32RStoR(to, to, scale); - } + LEA32( x86Register32( to ), ptr[x86IndexReg(from)*(1<= -1 && Id < 8 ); } bool IsEmpty() const { return Id == -1; } - bool operator==( const x86Register& src ) const { return Id == src.Id; } - bool operator!=( const x86Register& src ) const { return Id != src.Id; } + bool operator==( const x86Register32& src ) const { return Id == src.Id; } + bool operator!=( const x86Register32& src ) const { return Id != src.Id; } - x86ModRm operator+( const x86Register& right ) const; + x86ModRm operator+( const x86Register32& right ) const; x86ModRm operator+( const x86ModRm& right ) const; + x86ModRm operator+( s32 right ) const; + + x86ModRm operator*( u32 factor ) const; - x86Register& operator=( const x86Register& src ) + x86Register32& operator=( const x86Register32& src ) { Id = src.Id; return *this; } }; - + ////////////////////////////////////////////////////////////////////////////////////////// // Similar to x86Register, but without the ability to add/combine them with ModSib. // @@ -198,7 +201,7 @@ namespace x86Emitter x86Register16( const x86Register16& src ) : Id( src.Id ) {} x86Register16() : Id( -1 ) {} - explicit x86Register16( int regId ) : Id( regId ) { } + explicit x86Register16( int regId ) : Id( regId ) { jASSUME( Id >= -1 && Id < 8 ); } bool IsEmpty() const { return Id == -1; } @@ -224,7 +227,7 @@ namespace x86Emitter x86Register8( const x86Register16& src ) : Id( src.Id ) {} x86Register8() : Id( -1 ) {} - explicit x86Register8( int regId ) : Id( regId ) { } + explicit x86Register8( int regId ) : Id( regId ) { jASSUME( Id >= -1 && Id < 8 ); } bool IsEmpty() const { return Id == -1; } @@ -237,19 +240,22 @@ namespace x86Emitter return *this; } }; + + // Use 32 bit registers as out index register (for ModSig memory address calculations) + typedef x86Register32 x86IndexReg; ////////////////////////////////////////////////////////////////////////////////////////// // class x86ModRm { public: - x86Register Base; // base register (no scale) - x86Register Index; // index reg gets multiplied by the scale + x86IndexReg Base; // base register (no scale) + x86IndexReg Index; // index reg gets multiplied by the scale int Factor; // scale applied to the index register, in factor form (not a shift!) s32 Displacement; // address displacement public: - x86ModRm( x86Register base, x86Register index, int factor=1, s32 displacement=0 ) : + x86ModRm( x86IndexReg base, x86IndexReg index, int factor=1, s32 displacement=0 ) : Base( base ), Index( index ), Factor( factor ), @@ -257,7 +263,7 @@ namespace x86Emitter { } - explicit x86ModRm( x86Register base, int displacement=0 ) : + explicit x86ModRm( x86IndexReg base, int displacement=0 ) : Base( base ), Index(), Factor(0), @@ -273,11 +279,11 @@ namespace x86Emitter { } - static x86ModRm FromIndexReg( x86Register index, int scale=0, s32 displacement=0 ); + static x86ModRm FromIndexReg( x86IndexReg index, int scale=0, s32 displacement=0 ); public: bool IsByteSizeDisp() const { return is_s8( Displacement ); } - x86Register GetEitherReg() const; + x86IndexReg GetEitherReg() const; x86ModRm& Add( s32 imm ) { @@ -285,10 +291,10 @@ namespace x86Emitter return *this; } - x86ModRm& Add( const x86Register& src ); + x86ModRm& Add( const x86IndexReg& src ); x86ModRm& Add( const x86ModRm& src ); - x86ModRm operator+( const x86Register& right ) const { return x86ModRm( *this ).Add( right ); } + x86ModRm operator+( const x86IndexReg& right ) const { return x86ModRm( *this ).Add( right ); } x86ModRm operator+( const x86ModRm& right ) const { return x86ModRm( *this ).Add( right ); } x86ModRm operator+( const s32 imm ) const { return x86ModRm( *this ).Add( imm ); } x86ModRm operator-( const s32 imm ) const { return x86ModRm( *this ).Add( -imm ); } @@ -306,18 +312,27 @@ namespace x86Emitter class ModSib { public: - x86Register Base; // base register (no scale) - x86Register Index; // index reg gets multiplied by the scale + x86IndexReg Base; // base register (no scale) + x86IndexReg Index; // index reg gets multiplied by the scale int Scale; // scale applied to the index register, in scale/shift form s32 Displacement; // offset applied to the Base/Index registers. - ModSib( const x86ModRm& src ); - ModSib( x86Register base, x86Register index, int scale=0, s32 displacement=0 ); - ModSib( s32 disp ); + explicit ModSib( const x86ModRm& src ); + explicit ModSib( s32 disp ); + ModSib( x86IndexReg base, x86IndexReg index, int scale=0, s32 displacement=0 ); - x86Register GetEitherReg() const; + x86IndexReg GetEitherReg() const; bool IsByteSizeDisp() const { return is_s8( Displacement ); } + ModSib& Add( s32 imm ) + { + Displacement += imm; + return *this; + } + + ModSib operator+( const s32 imm ) const { return ModSib( *this ).Add( imm ); } + ModSib operator-( const s32 imm ) const { return ModSib( *this ).Add( -imm ); } + protected: void Reduce(); }; @@ -327,9 +342,13 @@ namespace x86Emitter // struct x86IndexerType { - ModSib operator[]( x86Register src ) const + // passthrough instruction, allows ModSib to pass silently through ptr translation + // without doing anything and without compiler error. + const ModSib& operator[]( const ModSib& src ) const { return src; } + + ModSib operator[]( x86IndexReg src ) const { - return ModSib( src, x86Register::Empty ); + return ModSib( src, x86IndexReg::Empty ); } ModSib operator[]( const x86ModRm& src ) const @@ -349,14 +368,32 @@ namespace x86Emitter }; // ------------------------------------------------------------------------ - extern const x86Register eax; - extern const x86Register ebx; - extern const x86Register ecx; - extern const x86Register edx; - extern const x86Register esi; - extern const x86Register edi; - extern const x86Register ebp; - extern const x86Register esp; - extern x86IndexerType ptr; + + extern const x86Register32 eax; + extern const x86Register32 ebx; + extern const x86Register32 ecx; + extern const x86Register32 edx; + extern const x86Register32 esi; + extern const x86Register32 edi; + extern const x86Register32 ebp; + extern const x86Register32 esp; + + extern const x86Register16 ax; + extern const x86Register16 bx; + extern const x86Register16 cx; + extern const x86Register16 dx; + extern const x86Register16 si; + extern const x86Register16 di; + extern const x86Register16 bp; + extern const x86Register16 sp; + + extern const x86Register8 al; + extern const x86Register8 cl; + extern const x86Register8 dl; + extern const x86Register8 bl; + extern const x86Register8 ah; + extern const x86Register8 ch; + extern const x86Register8 dh; + extern const x86Register8 bh; } \ No newline at end of file