Merge pull request #176 from Sonicadvance1/x86_64-recompiler-casts

Fix a bunch of casts in the x86 recompiler core.
This commit is contained in:
Pseudonym 2014-07-31 15:03:26 +01:00
commit de5a55c03e
12 changed files with 333 additions and 333 deletions

View File

@ -409,7 +409,7 @@ int _allocGPRtoXMMreg(int xmmreg, int gprreg, int mode)
SetMMXstate();
SSE2_MOVQ2DQ_MM_to_XMM(xmmreg, mmxreg);
SSE2_PUNPCKLQDQ_XMM_to_XMM(xmmreg, xmmreg);
SSE2_PUNPCKHQDQ_M128_to_XMM(xmmreg, (u32)&cpuRegs.GPR.r[gprreg].UL[0]);
SSE2_PUNPCKHQDQ_M128_to_XMM(xmmreg, (uptr)&cpuRegs.GPR.r[gprreg].UL[0]);
if (mmxregs[mmxreg].mode & MODE_WRITE )
{
@ -417,7 +417,7 @@ int _allocGPRtoXMMreg(int xmmreg, int gprreg, int mode)
if (!(mode & MODE_WRITE))
{
SetMMXstate();
MOVQRtoM((u32)&cpuRegs.GPR.r[gprreg].UL[0], mmxreg);
MOVQRtoM((uptr)&cpuRegs.GPR.r[gprreg].UL[0], mmxreg);
}
//xmmregs[xmmreg].mode |= MODE_WRITE;
}

View File

@ -102,7 +102,7 @@ void recMFSA( void )
SetMMXstate();
}
else {
MOV32MtoR(EAX, (u32)&cpuRegs.sa);
MOV32MtoR(EAX, (uptr)&cpuRegs.sa);
_deleteEEreg(_Rd_, 0);
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], EAX);
MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], 0);

View File

@ -41,21 +41,21 @@ void _initX86regs() {
g_x86checknext = 0;
}
u32 _x86GetAddr(int type, int reg)
uptr _x86GetAddr(int type, int reg)
{
u32 ret = 0;
uptr ret = 0;
switch(type&~X86TYPE_VU1)
{
case X86TYPE_GPR:
ret = (u32)&cpuRegs.GPR.r[reg];
ret = (uptr)&cpuRegs.GPR.r[reg];
break;
case X86TYPE_VI:
if (type & X86TYPE_VU1)
ret = (u32)&VU1.VI[reg];
ret = (uptr)&VU1.VI[reg];
else
ret = (u32)&VU0.VI[reg];
ret = (uptr)&VU0.VI[reg];
break;
case X86TYPE_MEMOFFSET:
@ -68,42 +68,42 @@ u32 _x86GetAddr(int type, int reg)
case X86TYPE_VUQREAD:
if (type & X86TYPE_VU1)
ret = (u32)&VU1.VI[REG_Q];
ret = (uptr)&VU1.VI[REG_Q];
else
ret = (u32)&VU0.VI[REG_Q];
ret = (uptr)&VU0.VI[REG_Q];
break;
case X86TYPE_VUPREAD:
if (type & X86TYPE_VU1)
ret = (u32)&VU1.VI[REG_P];
ret = (uptr)&VU1.VI[REG_P];
else
ret = (u32)&VU0.VI[REG_P];
ret = (uptr)&VU0.VI[REG_P];
break;
case X86TYPE_VUQWRITE:
if (type & X86TYPE_VU1)
ret = (u32)&VU1.q;
ret = (uptr)&VU1.q;
else
ret = (u32)&VU0.q;
ret = (uptr)&VU0.q;
break;
case X86TYPE_VUPWRITE:
if (type & X86TYPE_VU1)
ret = (u32)&VU1.p;
ret = (uptr)&VU1.p;
else
ret = (u32)&VU0.p;
ret = (uptr)&VU0.p;
break;
case X86TYPE_PSX:
ret = (u32)&psxRegs.GPR.r[reg];
ret = (uptr)&psxRegs.GPR.r[reg];
break;
case X86TYPE_PCWRITEBACK:
ret = (u32)&g_recWriteback;
ret = (uptr)&g_recWriteback;
break;
case X86TYPE_VUJUMP:
ret = (u32)&g_recWriteback;
ret = (uptr)&g_recWriteback;
break;
jNO_DEFAULT;
@ -169,8 +169,8 @@ void _flushCachedRegs()
void _flushConstReg(int reg)
{
if( GPR_IS_CONST1( reg ) && !(g_cpuFlushedConstReg&(1<<reg)) ) {
MOV32ItoM((int)&cpuRegs.GPR.r[reg].UL[0], g_cpuConstRegs[reg].UL[0]);
MOV32ItoM((int)&cpuRegs.GPR.r[reg].UL[1], g_cpuConstRegs[reg].UL[1]);
MOV32ItoM((uptr)&cpuRegs.GPR.r[reg].UL[0], g_cpuConstRegs[reg].UL[0]);
MOV32ItoM((uptr)&cpuRegs.GPR.r[reg].UL[1], g_cpuConstRegs[reg].UL[1]);
g_cpuFlushedConstReg |= (1<<reg);
if (reg == 0) DevCon.Warning("Flushing r0!");
}
@ -569,9 +569,9 @@ int _allocMMXreg(int mmxreg, int reg, int mode)
else {
if( MMX_ISGPR(reg) ) _flushConstReg(reg-MMX_GPR);
if( (mode & MODE_READHALF) || (MMX_IS32BITS(reg)&&(mode&MODE_READ)) )
MOVDMtoMMX(i, (u32)_MMXGetAddr(reg));
MOVDMtoMMX(i, (uptr)_MMXGetAddr(reg));
else
MOVQMtoR(i, (u32)_MMXGetAddr(reg));
MOVQMtoR(i, (uptr)_MMXGetAddr(reg));
}
mmxregs[i].mode |= MODE_READ;
@ -599,7 +599,7 @@ int _allocMMXreg(int mmxreg, int reg, int mode)
else {
int xmmreg;
if( MMX_ISGPR(reg) && (xmmreg = _checkXMMreg(XMMTYPE_GPRREG, reg-MMX_GPR, 0)) >= 0 ) {
SSE_MOVHPS_XMM_to_M64((u32)_MMXGetAddr(reg)+8, xmmreg);
SSE_MOVHPS_XMM_to_M64((uptr)_MMXGetAddr(reg)+8, xmmreg);
if( mode & MODE_READ )
SSE2_MOVDQ2Q_XMM_to_MM(mmxreg, xmmreg);
@ -615,10 +615,10 @@ int _allocMMXreg(int mmxreg, int reg, int mode)
}
if( (mode & MODE_READHALF) || (MMX_IS32BITS(reg)&&(mode&MODE_READ)) ) {
MOVDMtoMMX(mmxreg, (u32)_MMXGetAddr(reg));
MOVDMtoMMX(mmxreg, (uptr)_MMXGetAddr(reg));
}
else if( mode & MODE_READ ) {
MOVQMtoR(mmxreg, (u32)_MMXGetAddr(reg));
MOVQMtoR(mmxreg, (uptr)_MMXGetAddr(reg));
}
}
}
@ -641,9 +641,9 @@ int _checkMMXreg(int reg, int mode)
else {
if (MMX_ISGPR(reg) && (mode&(MODE_READHALF|MODE_READ))) _flushConstReg(reg-MMX_GPR);
if( (mode & MODE_READHALF) || (MMX_IS32BITS(reg)&&(mode&MODE_READ)) )
MOVDMtoMMX(i, (u32)_MMXGetAddr(reg));
MOVDMtoMMX(i, (uptr)_MMXGetAddr(reg));
else
MOVQMtoR(i, (u32)_MMXGetAddr(reg));
MOVQMtoR(i, (uptr)_MMXGetAddr(reg));
}
SetMMXstate();
}
@ -701,9 +701,9 @@ void _deleteMMXreg(int reg, int flush)
pxAssert( mmxregs[i].reg != MMX_GPR );
if( MMX_IS32BITS(reg) )
MOVDMMXtoM((u32)_MMXGetAddr(mmxregs[i].reg), i);
MOVDMMXtoM((uptr)_MMXGetAddr(mmxregs[i].reg), i);
else
MOVQRtoM((u32)_MMXGetAddr(mmxregs[i].reg), i);
MOVQRtoM((uptr)_MMXGetAddr(mmxregs[i].reg), i);
SetMMXstate();
// get rid of MODE_WRITE since don't want to flush again
@ -773,9 +773,9 @@ void _freeMMXreg(u32 mmxreg)
pxAssert( mmxregs[mmxreg].reg != MMX_GPR );
if( MMX_IS32BITS(mmxregs[mmxreg].reg) )
MOVDMMXtoM((u32)_MMXGetAddr(mmxregs[mmxreg].reg), mmxreg);
MOVDMMXtoM((uptr)_MMXGetAddr(mmxregs[mmxreg].reg), mmxreg);
else
MOVQRtoM((u32)_MMXGetAddr(mmxregs[mmxreg].reg), mmxreg);
MOVQRtoM((uptr)_MMXGetAddr(mmxregs[mmxreg].reg), mmxreg);
SetMMXstate();
}
@ -820,9 +820,9 @@ void _flushMMXregs()
pxAssert( mmxregs[i].reg != MMX_GPR );
if( MMX_IS32BITS(mmxregs[i].reg) )
MOVDMMXtoM((u32)_MMXGetAddr(mmxregs[i].reg), i);
MOVDMMXtoM((uptr)_MMXGetAddr(mmxregs[i].reg), i);
else
MOVQRtoM((u32)_MMXGetAddr(mmxregs[i].reg), i);
MOVQRtoM((uptr)_MMXGetAddr(mmxregs[i].reg), i);
SetMMXstate();
mmxregs[i].mode &= ~MODE_WRITE;
@ -895,9 +895,9 @@ int _signExtendGPRMMXtoMMX(x86MMXRegType to, u32 gprreg, x86MMXRegType from, u32
SetMMXstate();
MOVQRtoR(to, from);
MOVDMMXtoM((u32)&cpuRegs.GPR.r[gprreg].UL[0], from);
MOVDMMXtoM((uptr)&cpuRegs.GPR.r[gprreg].UL[0], from);
PSRADItoR(from, 31);
MOVDMMXtoM((u32)&cpuRegs.GPR.r[gprreg].UL[1], from);
MOVDMMXtoM((uptr)&cpuRegs.GPR.r[gprreg].UL[1], from);
mmxregs[to].inuse = 0;
return -1;
@ -910,9 +910,9 @@ int _signExtendGPRtoMMX(x86MMXRegType to, u32 gprreg, int shift)
SetMMXstate();
if( shift > 0 ) PSRADItoR(to, shift);
MOVDMMXtoM((u32)&cpuRegs.GPR.r[gprreg].UL[0], to);
MOVDMMXtoM((uptr)&cpuRegs.GPR.r[gprreg].UL[0], to);
PSRADItoR(to, 31);
MOVDMMXtoM((u32)&cpuRegs.GPR.r[gprreg].UL[1], to);
MOVDMMXtoM((uptr)&cpuRegs.GPR.r[gprreg].UL[1], to);
mmxregs[to].inuse = 0;
return -1;
@ -973,12 +973,12 @@ void LogicalOpRtoR(x86MMXRegType to, x86MMXRegType from, int op)
case 2: PXORRtoR(to, from); break;
case 3:
PORRtoR(to, from);
PXORMtoR(to, (u32)&s_ones[0]);
PXORMtoR(to, (uptr)&s_ones[0]);
break;
}
}
void LogicalOpMtoR(x86MMXRegType to, u32 from, int op)
void LogicalOpMtoR(x86MMXRegType to, uptr from, int op)
{
switch(op) {
case 0: PANDMtoR(to, from); break;
@ -986,12 +986,12 @@ void LogicalOpMtoR(x86MMXRegType to, u32 from, int op)
case 2: PXORMtoR(to, from); break;
case 3:
PORRtoR(to, from);
PXORMtoR(to, (u32)&s_ones[0]);
PXORMtoR(to, (uptr)&s_ones[0]);
break;
}
}
void LogicalOp32RtoM(u32 to, x86IntRegType from, int op)
void LogicalOp32RtoM(uptr to, x86IntRegType from, int op)
{
switch(op) {
case 0: AND32RtoM(to, from); break;
@ -1001,7 +1001,7 @@ void LogicalOp32RtoM(u32 to, x86IntRegType from, int op)
}
}
void LogicalOp32MtoR(x86IntRegType to, u32 from, int op)
void LogicalOp32MtoR(x86IntRegType to, uptr from, int op)
{
switch(op) {
case 0: AND32MtoR(to, from); break;
@ -1021,7 +1021,7 @@ void LogicalOp32ItoR(x86IntRegType to, u32 from, int op)
}
}
void LogicalOp32ItoM(u32 to, u32 from, int op)
void LogicalOp32ItoM(uptr to, u32 from, int op)
{
switch(op) {
case 0: AND32ItoM(to, from); break;

View File

@ -184,7 +184,7 @@ void _eeMoveGPRtoR(x86IntRegType to, int fromgpr)
SetMMXstate();
}
else {
MOV32MtoR(to, (int)&cpuRegs.GPR.r[ fromgpr ].UL[ 0 ] );
MOV32MtoR(to, (uptr)&cpuRegs.GPR.r[ fromgpr ].UL[ 0 ] );
}
}
}
@ -204,7 +204,7 @@ void _eeMoveGPRtoM(u32 to, int fromgpr)
SetMMXstate();
}
else {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ fromgpr ].UL[ 0 ] );
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ fromgpr ].UL[ 0 ] );
MOV32RtoM(to, EAX );
}
}
@ -225,7 +225,7 @@ void _eeMoveGPRtoRm(x86IntRegType to, int fromgpr)
SetMMXstate();
}
else {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ fromgpr ].UL[ 0 ] );
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ fromgpr ].UL[ 0 ] );
MOV32RtoRm( to, EAX );
}
}
@ -981,12 +981,12 @@ void SetBranchReg( u32 reg )
if( x86regs[ESI].inuse ) {
pxAssert( x86regs[ESI].type == X86TYPE_PCWRITEBACK );
MOV32RtoM((int)&cpuRegs.pc, ESI);
MOV32RtoM((uptr)&cpuRegs.pc, ESI);
x86regs[ESI].inuse = 0;
}
else {
MOV32MtoR(EAX, (u32)&g_recWriteback);
MOV32RtoM((int)&cpuRegs.pc, EAX);
MOV32MtoR(EAX, (uptr)&g_recWriteback);
MOV32RtoM((uptr)&cpuRegs.pc, EAX);
}
}

View File

@ -59,17 +59,17 @@ void recADDI_(int info)
if ( _Rt_ == _Rs_ ) {
// must perform the ADD unconditionally, to maintain flags status:
ADD32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], _Imm_);
_signExtendSFtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ]);
ADD32ItoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], _Imm_);
_signExtendSFtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ]);
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
if ( _Imm_ != 0 ) ADD32ItoR( EAX, _Imm_ );
CDQ( );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
}
}
@ -92,13 +92,13 @@ void recDADDI_(int info)
pxAssert( !(info&PROCESS_EE_XMM) );
if( _Rt_ == _Rs_ ) {
ADD32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], _Imm_);
ADC32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], _Imm_<0?0xffffffff:0);
ADD32ItoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], _Imm_);
ADC32ItoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], _Imm_<0?0xffffffff:0);
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
MOV32MtoR( EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
if ( _Imm_ != 0 )
{
@ -106,9 +106,9 @@ void recDADDI_(int info)
ADC32ItoR( EDX, _Imm_ < 0?0xffffffff:0);
}
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
}
}
@ -133,11 +133,11 @@ void recSLTIU_(int info)
{
MOV32ItoR(EAX, 1);
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], _Imm_ >= 0 ? 0 : 0xffffffff);
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], _Imm_ >= 0 ? 0 : 0xffffffff);
j8Ptr[0] = JB8( 0 );
j8Ptr[2] = JA8( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], (s32)_Imm_ );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], (s32)_Imm_ );
j8Ptr[1] = JB8(0);
x86SetJ8(j8Ptr[2]);
@ -146,8 +146,8 @@ void recSLTIU_(int info)
x86SetJ8(j8Ptr[0]);
x86SetJ8(j8Ptr[1]);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
}
EERECOMPILE_CODEX(eeRecompileCode1, SLTIU);
@ -163,11 +163,11 @@ void recSLTI_(int info)
// test silent hill if modding
MOV32ItoR(EAX, 1);
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], _Imm_ >= 0 ? 0 : 0xffffffff);
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], _Imm_ >= 0 ? 0 : 0xffffffff);
j8Ptr[0] = JL8( 0 );
j8Ptr[2] = JG8( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], (s32)_Imm_ );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], (s32)_Imm_ );
j8Ptr[1] = JB8(0);
x86SetJ8(j8Ptr[2]);
@ -176,8 +176,8 @@ void recSLTI_(int info)
x86SetJ8(j8Ptr[0]);
x86SetJ8(j8Ptr[1]);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
}
EERECOMPILE_CODEX(eeRecompileCode1, SLTI);
@ -193,34 +193,34 @@ void recLogicalOpI(int info, int op)
if ( _ImmU_ != 0 )
{
if( _Rt_ == _Rs_ ) {
LogicalOp32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], _ImmU_, op);
LogicalOp32ItoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], _ImmU_, op);
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
if( op != 0 )
MOV32MtoR( EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
MOV32MtoR( EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
LogicalOp32ItoR( EAX, _ImmU_, op);
if( op != 0 )
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
}
if( op == 0 ) {
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
}
}
else
{
if( op == 0 ) {
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], 0 );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], 0 );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], 0 );
}
else {
if( _Rt_ != _Rs_ ) {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR(EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR(EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
}
}
}

View File

@ -71,7 +71,7 @@ void recSetBranchEQ(int info, int bne, int process)
}
_flushConstReg(_Rs_);
SSE2_PCMPEQD_M128_to_XMM(t0reg, (u32)&cpuRegs.GPR.r[_Rs_].UL[0]);
SSE2_PCMPEQD_M128_to_XMM(t0reg, (uptr)&cpuRegs.GPR.r[_Rs_].UL[0]);
if( t0reg != EEREC_T ) _freeXMMreg(t0reg);
@ -88,7 +88,7 @@ void recSetBranchEQ(int info, int bne, int process)
}
_flushConstReg(_Rt_);
SSE2_PCMPEQD_M128_to_XMM(t0reg, (u32)&cpuRegs.GPR.r[_Rt_].UL[0]);
SSE2_PCMPEQD_M128_to_XMM(t0reg, (uptr)&cpuRegs.GPR.r[_Rt_].UL[0]);
if( t0reg != EEREC_S ) _freeXMMreg(t0reg);
}
@ -131,26 +131,26 @@ void recSetBranchEQ(int info, int bne, int process)
if( bne ) {
if( process & PROCESS_CONSTS ) {
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
j8Ptr[ 0 ] = JNE8( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
j32Ptr[ 1 ] = JE32( 0 );
}
else if( process & PROCESS_CONSTT ) {
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0] );
j8Ptr[ 0 ] = JNE8( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1] );
j32Ptr[ 1 ] = JE32( 0 );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
CMP32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
CMP32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
j8Ptr[ 0 ] = JNE8( 0 );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
CMP32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
CMP32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
j32Ptr[ 1 ] = JE32( 0 );
}
@ -159,26 +159,26 @@ void recSetBranchEQ(int info, int bne, int process)
else {
// beq
if( process & PROCESS_CONSTS ) {
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
j32Ptr[ 0 ] = JNE32( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
j32Ptr[ 1 ] = JNE32( 0 );
}
else if( process & PROCESS_CONSTT ) {
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0] );
j32Ptr[ 0 ] = JNE32( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1] );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1] );
j32Ptr[ 1 ] = JNE32( 0 );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
CMP32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
CMP32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
j32Ptr[ 0 ] = JNE32( 0 );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
CMP32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ] );
CMP32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
j32Ptr[ 1 ] = JNE32( 0 );
}
}
@ -205,7 +205,7 @@ void recSetBranchL(int ltz)
return;
}
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
if( ltz ) j32Ptr[ 0 ] = JGE32( 0 );
else j32Ptr[ 0 ] = JL32( 0 );
@ -565,11 +565,11 @@ void recBLEZ( void )
_flushEEreg(_Rs_);
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
j8Ptr[ 0 ] = JL8( 0 );
j32Ptr[ 1 ] = JG32( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
j32Ptr[ 2 ] = JNZ32( 0 );
x86SetJ8( j8Ptr[ 0 ] );
@ -611,11 +611,11 @@ void recBGTZ( void )
_flushEEreg(_Rs_);
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
j8Ptr[ 0 ] = JG8( 0 );
j32Ptr[ 1 ] = JL32( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
j32Ptr[ 2 ] = JZ32( 0 );
x86SetJ8( j8Ptr[ 0 ] );
@ -793,11 +793,11 @@ void recBLEZL( void )
_flushEEreg(_Rs_);
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
j32Ptr[ 0 ] = JL32( 0 );
j32Ptr[ 1 ] = JG32( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
j32Ptr[ 2 ] = JNZ32( 0 );
x86SetJ32( j32Ptr[ 0 ] );
@ -837,11 +837,11 @@ void recBGTZL( void )
_flushEEreg(_Rs_);
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], 0 );
j32Ptr[ 0 ] = JG32( 0 );
j32Ptr[ 1 ] = JL32( 0 );
CMP32ItoM( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
CMP32ItoM( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], 0 );
j32Ptr[ 2 ] = JZ32( 0 );
x86SetJ32( j32Ptr[ 0 ] );

View File

@ -69,8 +69,8 @@ void recJAL( void )
}
else
{
MOV32ItoM((u32)&cpuRegs.GPR.r[31].UL[0], pc + 4);
MOV32ItoM((u32)&cpuRegs.GPR.r[31].UL[1], 0);
MOV32ItoM((uptr)&cpuRegs.GPR.r[31].UL[0], pc + 4);
MOV32ItoM((uptr)&cpuRegs.GPR.r[31].UL[1], 0);
}
recompileNextInstruction(1);
@ -135,8 +135,8 @@ void recJALR( void )
}
else
{
MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[0], newpc);
MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[1], 0);
MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], newpc);
MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], 0);
}
}
@ -146,12 +146,12 @@ void recJALR( void )
if( x86regs[ESI].inuse ) {
pxAssert( x86regs[ESI].type == X86TYPE_PCWRITEBACK );
MOV32RtoM((int)&cpuRegs.pc, ESI);
MOV32RtoM((uptr)&cpuRegs.pc, ESI);
x86regs[ESI].inuse = 0;
}
else {
MOV32MtoR(EAX, (u32)&g_recWriteback);
MOV32RtoM((int)&cpuRegs.pc, EAX);
MOV32MtoR(EAX, (uptr)&g_recWriteback);
MOV32RtoM((uptr)&cpuRegs.pc, EAX);
}
SetBranchReg(0xffffffff);

View File

@ -84,9 +84,9 @@ void _eeOnLoadWrite(u32 reg)
if( xmmregs[regt].mode & MODE_WRITE ) {
if( reg != _Rs_ ) {
SSE2_PUNPCKHQDQ_XMM_to_XMM(regt, regt);
SSE2_MOVQ_XMM_to_M64((u32)&cpuRegs.GPR.r[reg].UL[2], regt);
SSE2_MOVQ_XMM_to_M64((uptr)&cpuRegs.GPR.r[reg].UL[2], regt);
}
else SSE_MOVHPS_XMM_to_M64((u32)&cpuRegs.GPR.r[reg].UL[2], regt);
else SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[reg].UL[2], regt);
}
xmmregs[regt].inuse = 0;
}

View File

@ -65,7 +65,7 @@ void recLUI()
if( (mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_WRITE)) >= 0 ) {
if( xmmregs[mmreg].mode & MODE_WRITE ) {
SSE_MOVHPS_XMM_to_M64((u32)&cpuRegs.GPR.r[_Rt_].UL[2], mmreg);
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rt_].UL[2], mmreg);
}
xmmregs[mmreg].inuse = 0;
}
@ -81,8 +81,8 @@ void recLUI()
{
MOV32ItoR(EAX, (s32)(cpuRegs.code << 16));
CDQ();
MOV32RtoM((u32)&cpuRegs.GPR.r[_Rt_].UL[0], EAX);
MOV32RtoM((u32)&cpuRegs.GPR.r[_Rt_].UL[1], EDX);
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[0], EAX);
MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rt_].UL[1], EDX);
}
}
@ -106,10 +106,10 @@ void recMFHILO(int hi)
xmmregs[regd].inuse = 0;
SSE2_MOVQ_XMM_to_M64((u32)&cpuRegs.GPR.r[_Rd_].UL[0], reghi);
SSE2_MOVQ_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], reghi);
if( xmmregs[regd].mode & MODE_WRITE ) {
SSE_MOVHPS_XMM_to_M64((u32)&cpuRegs.GPR.r[_Rd_].UL[2], regd);
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rd_].UL[2], regd);
}
}
else {
@ -120,7 +120,7 @@ void recMFHILO(int hi)
}
else {
_deleteEEreg(_Rd_, 0);
SSE2_MOVQ_XMM_to_M64((int)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
SSE2_MOVQ_XMM_to_M64((uptr)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
}
}
}
@ -132,10 +132,10 @@ void recMFHILO(int hi)
if( regd >= 0 ) {
if( EEINST_ISLIVE2(_Rd_) ) {
if( xmmregs[regd].mode & MODE_WRITE ) {
SSE_MOVHPS_XMM_to_M64((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 2 ], regd);
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 2 ], regd);
}
xmmregs[regd].inuse = 0;
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
}
else {
SetMMXstate();
@ -152,28 +152,28 @@ void recMFHILO(int hi)
}
else {
_deleteEEreg(_Rd_, 0);
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
}
}
}
else {
if( regd >= 0 ) {
if( EEINST_ISLIVE2(_Rd_) ) SSE_MOVLPS_M64_to_XMM(regd, hi ? (int)&cpuRegs.HI.UD[ 0 ] : (int)&cpuRegs.LO.UD[ 0 ]);
else SSE2_MOVQ_M64_to_XMM(regd, hi ? (int)&cpuRegs.HI.UD[ 0 ] : (int)&cpuRegs.LO.UD[ 0 ]);
if( EEINST_ISLIVE2(_Rd_) ) SSE_MOVLPS_M64_to_XMM(regd, hi ? (uptr)&cpuRegs.HI.UD[ 0 ] : (uptr)&cpuRegs.LO.UD[ 0 ]);
else SSE2_MOVQ_M64_to_XMM(regd, hi ? (uptr)&cpuRegs.HI.UD[ 0 ] : (uptr)&cpuRegs.LO.UD[ 0 ]);
}
else {
regd = _allocCheckGPRtoMMX(g_pCurInstInfo, _Rd_, MODE_WRITE);
if( regd >= 0 ) {
SetMMXstate();
MOVQMtoR(regd, hi ? (int)&cpuRegs.HI.UD[ 0 ] : (int)&cpuRegs.LO.UD[ 0 ]);
MOVQMtoR(regd, hi ? (uptr)&cpuRegs.HI.UD[ 0 ] : (uptr)&cpuRegs.LO.UD[ 0 ]);
}
else {
_deleteEEreg(_Rd_, 0);
MOV32MtoR( EAX, hi ? (int)&cpuRegs.HI.UL[ 0 ] : (int)&cpuRegs.LO.UL[ 0 ]);
MOV32MtoR( EDX, hi ? (int)&cpuRegs.HI.UL[ 1 ] : (int)&cpuRegs.LO.UL[ 1 ]);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32MtoR( EAX, hi ? (uptr)&cpuRegs.HI.UL[ 0 ] : (uptr)&cpuRegs.LO.UL[ 0 ]);
MOV32MtoR( EDX, hi ? (uptr)&cpuRegs.HI.UL[ 1 ] : (uptr)&cpuRegs.LO.UL[ 1 ]);
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
}
}
@ -183,10 +183,10 @@ void recMFHILO(int hi)
void recMTHILO(int hi)
{
int reghi, regs, xmmhilo;
u32 addrhilo;
uptr addrhilo;
xmmhilo = hi ? XMMGPR_HI : XMMGPR_LO;
addrhilo = hi ? (int)&cpuRegs.HI.UD[0] : (int)&cpuRegs.LO.UD[0];
addrhilo = hi ? (uptr)&cpuRegs.HI.UD[0] : (uptr)&cpuRegs.LO.UD[0];
regs = _checkXMMreg(XMMTYPE_GPRREG, _Rs_, MODE_READ);
reghi = _checkXMMreg(XMMTYPE_GPRREG, xmmhilo, MODE_READ|MODE_WRITE);
@ -225,7 +225,7 @@ void recMTHILO(int hi)
}
else {
_flushConstReg(_Rs_);
SSE_MOVLPS_M64_to_XMM(reghi, (int)&cpuRegs.GPR.r[ _Rs_ ].UD[ 0 ]);
SSE_MOVLPS_M64_to_XMM(reghi, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UD[ 0 ]);
xmmregs[reghi].mode |= MODE_WRITE;
}
}
@ -247,7 +247,7 @@ void recMTHILO(int hi)
}
else {
_flushConstReg(_Rs_);
MOVQMtoR(reghi, (int)&cpuRegs.GPR.r[ _Rs_ ].UD[ 0 ]);
MOVQMtoR(reghi, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UD[ 0 ]);
}
}
}
@ -270,8 +270,8 @@ void recMTHILO(int hi)
else {
_eeMoveGPRtoR(ECX, _Rs_);
_flushEEreg(_Rs_);
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR( EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR( EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM( addrhilo, EAX );
MOV32RtoM( addrhilo+4, EDX );
}
@ -322,17 +322,17 @@ void recMFHILO1(int hi)
}
else {
_deleteEEreg(_Rd_, 0);
SSE_MOVHPS_XMM_to_M64((int)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[ _Rd_ ].UD[ 0 ], reghi);
}
}
else {
if( regd >= 0 ) {
if( EEINST_ISLIVE2(_Rd_) ) {
SSE2_PUNPCKHQDQ_M128_to_XMM(regd, hi ? (int)&cpuRegs.HI.UD[ 0 ] : (int)&cpuRegs.LO.UD[ 0 ]);
SSE2_PUNPCKHQDQ_M128_to_XMM(regd, hi ? (uptr)&cpuRegs.HI.UD[ 0 ] : (uptr)&cpuRegs.LO.UD[ 0 ]);
SSE2_PSHUFD_XMM_to_XMM(regd, regd, 0x4e);
}
else {
SSE2_MOVQ_M64_to_XMM(regd, hi ? (int)&cpuRegs.HI.UD[ 1 ] : (int)&cpuRegs.LO.UD[ 1 ]);
SSE2_MOVQ_M64_to_XMM(regd, hi ? (uptr)&cpuRegs.HI.UD[ 1 ] : (uptr)&cpuRegs.LO.UD[ 1 ]);
}
xmmregs[regd].mode |= MODE_WRITE;
@ -342,14 +342,14 @@ void recMFHILO1(int hi)
if( regd >= 0 ) {
SetMMXstate();
MOVQMtoR(regd, hi ? (int)&cpuRegs.HI.UD[ 1 ] : (int)&cpuRegs.LO.UD[ 1 ]);
MOVQMtoR(regd, hi ? (uptr)&cpuRegs.HI.UD[ 1 ] : (uptr)&cpuRegs.LO.UD[ 1 ]);
}
else {
_deleteEEreg(_Rd_, 0);
MOV32MtoR( EAX, hi ? (int)&cpuRegs.HI.UL[ 2 ] : (int)&cpuRegs.LO.UL[ 2 ]);
MOV32MtoR( EDX, hi ? (int)&cpuRegs.HI.UL[ 3 ] : (int)&cpuRegs.LO.UL[ 3 ]);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32MtoR( EAX, hi ? (uptr)&cpuRegs.HI.UL[ 2 ] : (uptr)&cpuRegs.LO.UL[ 2 ]);
MOV32MtoR( EDX, hi ? (uptr)&cpuRegs.HI.UL[ 3 ] : (uptr)&cpuRegs.LO.UL[ 3 ]);
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
}
}
@ -358,10 +358,10 @@ void recMFHILO1(int hi)
void recMTHILO1(int hi)
{
int reghi, regs, xmmhilo;
u32 addrhilo;
uptr addrhilo;
xmmhilo = hi ? XMMGPR_HI : XMMGPR_LO;
addrhilo = hi ? (int)&cpuRegs.HI.UD[0] : (int)&cpuRegs.LO.UD[0];
addrhilo = hi ? (uptr)&cpuRegs.HI.UD[0] : (uptr)&cpuRegs.LO.UD[0];
regs = _checkXMMreg(XMMTYPE_GPRREG, _Rs_, MODE_READ);
reghi = _allocCheckGPRtoXMM(g_pCurInstInfo, xmmhilo, MODE_WRITE|MODE_READ);
@ -372,7 +372,7 @@ void recMTHILO1(int hi)
}
else {
_flushEEreg(_Rs_);
SSE2_PUNPCKLQDQ_M128_to_XMM(reghi, (int)&cpuRegs.GPR.r[ _Rs_ ].UD[ 0 ]);
SSE2_PUNPCKLQDQ_M128_to_XMM(reghi, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UD[ 0 ]);
}
}
else {
@ -393,8 +393,8 @@ void recMTHILO1(int hi)
}
else {
_flushEEreg(_Rs_);
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR( EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR( EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM( addrhilo+8, EAX );
MOV32RtoM( addrhilo+12, EDX );
}
@ -433,12 +433,12 @@ void recMOVZtemp_const()
void recMOVZtemp_consts(int info)
{
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
j8Ptr[ 0 ] = JNZ8( 0 );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
x86SetJ8( j8Ptr[ 0 ] );
}
@ -448,15 +448,15 @@ void recMOVZtemp_constt(int info)
// Fixme: MMX problem
if(0/* _hasFreeXMMreg() */) {
int t0reg = _allocMMXreg(-1, MMX_TEMP, 0);
MOVQMtoR(t0reg, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
MOVQMtoR(t0reg, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
_freeMMXreg(t0reg);
}
else {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
}
}
@ -468,20 +468,20 @@ void recMOVZtemp_(int info)
if(0/* _hasFreeXMMreg() */)
t0reg = _allocMMXreg(-1, MMX_TEMP, 0);
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
j8Ptr[ 0 ] = JNZ8( 0 );
if( t0reg >= 0 ) {
MOVQMtoR(t0reg, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
MOVQMtoR(t0reg, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
_freeMMXreg(t0reg);
}
else {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
}
x86SetJ8( j8Ptr[ 0 ] );
@ -512,12 +512,12 @@ void recMOVNtemp_const()
void recMOVNtemp_consts(int info)
{
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
j8Ptr[ 0 ] = JZ8( 0 );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0] );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1] );
x86SetJ8( j8Ptr[ 0 ] );
}
@ -527,15 +527,15 @@ void recMOVNtemp_constt(int info)
// Fixme: MMX problem
if(0/* _hasFreeXMMreg() */) {
int t0reg = _allocMMXreg(-1, MMX_TEMP, 0);
MOVQMtoR(t0reg, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
MOVQMtoR(t0reg, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
_freeMMXreg(t0reg);
}
else {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
}
}
@ -547,20 +547,20 @@ void recMOVNtemp_(int info)
if(0/* _hasFreeXMMreg() */)
t0reg = _allocMMXreg(-1, MMX_TEMP, 0);
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
OR32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
j8Ptr[ 0 ] = JZ8( 0 );
if( t0reg >= 0 ) {
MOVQMtoR(t0reg, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
MOVQMtoR(t0reg, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], t0reg);
_freeMMXreg(t0reg);
}
else {
MOV32MtoR(EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ]);
MOV32MtoR(EDX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ]);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX);
MOV32RtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX);
}
x86SetJ8( j8Ptr[ 0 ] );

View File

@ -54,8 +54,8 @@ REC_FUNC_DEL( MADDU1 , _Rd_ );
void recWritebackHILO(int info, int writed, int upper)
{
int regd, reglo = -1, reghi, savedlo = 0;
u32 loaddr = (int)&cpuRegs.LO.UL[ upper ? 2 : 0 ];
u32 hiaddr = (int)&cpuRegs.HI.UL[ upper ? 2 : 0 ];
uptr loaddr = (uptr)&cpuRegs.LO.UL[ upper ? 2 : 0 ];
uptr hiaddr = (uptr)&cpuRegs.HI.UL[ upper ? 2 : 0 ];
u8 testlive = upper?EEINST_LIVE2:EEINST_LIVE0;
if( g_pCurInstInfo->regs[XMMGPR_HI] & testlive )
@ -100,8 +100,8 @@ void recWritebackHILO(int info, int writed, int upper)
_deleteEEreg(_Rd_, 0);
if( !savedlo ) CDQ();
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
}
@ -127,8 +127,8 @@ void recWritebackHILO(int info, int writed, int upper)
void recWritebackHILOMMX(int info, int regsource, int writed, int upper)
{
int regd, t0reg, t1reg = -1;
u32 loaddr = (int)&cpuRegs.LO.UL[ upper ? 2 : 0 ];
u32 hiaddr = (int)&cpuRegs.HI.UL[ upper ? 2 : 0 ];
uptr loaddr = (uptr)&cpuRegs.LO.UL[ upper ? 2 : 0 ];
uptr hiaddr = (uptr)&cpuRegs.HI.UL[ upper ? 2 : 0 ];
u8 testlive = upper?EEINST_LIVE2:EEINST_LIVE0;
SetMMXstate();
@ -190,17 +190,17 @@ void recWritebackHILOMMX(int info, int regsource, int writed, int upper)
xmmregs[regd].mode |= MODE_WRITE;
}
else {
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], regsource);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], regsource);
if( xmmregs[regd].mode & MODE_WRITE ) {
SSE_MOVHPS_XMM_to_M64((int)&cpuRegs.GPR.r[_Rd_].UL[2], regd);
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rd_].UL[2], regd);
}
xmmregs[regd].inuse = 0;
}
}
else {
MOVQRtoM((int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], regsource);
MOVQRtoM((uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], regsource);
}
}
}
@ -247,8 +247,8 @@ void recWritebackHILOMMX(int info, int regsource, int writed, int upper)
void recWritebackConstHILO(u64 res, int writed, int upper)
{
int reglo, reghi;
u32 loaddr = (int)&cpuRegs.LO.UL[ upper ? 2 : 0 ];
u32 hiaddr = (int)&cpuRegs.HI.UL[ upper ? 2 : 0 ];
uptr loaddr = (uptr)&cpuRegs.LO.UL[ upper ? 2 : 0 ];
uptr hiaddr = (uptr)&cpuRegs.HI.UL[ upper ? 2 : 0 ];
u8 testlive = upper?EEINST_LIVE2:EEINST_LIVE0;
if( g_pCurInstInfo->regs[XMMGPR_LO] & testlive ) {
@ -280,8 +280,8 @@ void recWritebackConstHILO(u64 res, int writed, int upper)
if( reghi >= 0 ) {
u32* ptr = recGetImm64(res >> 63 ? -1 : 0, res >> 32);
if( upper ) SSE_MOVHPS_M64_to_XMM(reghi, (u32)ptr);
else SSE_MOVLPS_M64_to_XMM(reghi, (u32)ptr);
if( upper ) SSE_MOVHPS_M64_to_XMM(reghi, (uptr)ptr);
else SSE_MOVLPS_M64_to_XMM(reghi, (uptr)ptr);
}
else {
_deleteEEreg(XMMGPR_HI, 0);
@ -308,15 +308,15 @@ void recMULTsuper(int info, int upper, int process)
{
if( process & PROCESS_CONSTS ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
else if( process & PROCESS_CONSTT) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
recWritebackHILO(info, 1, upper);
@ -397,15 +397,15 @@ void recMULTUsuper(int info, int upper, int process)
{
if( process & PROCESS_CONSTS ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
else if( process & PROCESS_CONSTT) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
recWritebackHILO(info, 1, upper);
@ -512,12 +512,12 @@ void recDIVsuper(int info, int sign, int upper, int process)
if( process & PROCESS_CONSTT )
MOV32ItoR( ECX, g_cpuConstRegs[_Rt_].UL[0] );
else
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if( process & PROCESS_CONSTS )
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
else
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
u8 *end1;
if (sign) //test for overflow (x86 will just throw an exception)
@ -673,25 +673,25 @@ void recMADD()
_deleteEEreg(XMMGPR_HI, 1);
// dadd
MOV32MtoR( EAX, (int)&cpuRegs.LO.UL[ 0 ] );
MOV32MtoR( ECX, (int)&cpuRegs.HI.UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.LO.UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.HI.UL[ 0 ] );
ADD32ItoR( EAX, (u32)result&0xffffffff );
ADC32ItoR( ECX, (u32)(result>>32) );
CDQ();
if( _Rd_) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[0], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[0], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[1], EDX );
return;
}
@ -704,35 +704,35 @@ void recMADD()
if( GPR_IS_CONST1(_Rs_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
else if ( GPR_IS_CONST1(_Rt_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
MOV32RtoR( ECX, EDX );
ADD32MtoR( EAX, (u32)&cpuRegs.LO.UL[0] );
ADC32MtoR( ECX, (u32)&cpuRegs.HI.UL[0] );
ADD32MtoR( EAX, (uptr)&cpuRegs.LO.UL[0] );
ADC32MtoR( ECX, (uptr)&cpuRegs.HI.UL[0] );
CDQ();
if( _Rd_ ) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[0], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[0], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[1], EDX );
}
//static PCSX2_ALIGNED16(u32 s_MaddMask[]) = { 0x80000000, 0, 0x80000000, 0 };
@ -745,25 +745,25 @@ void recMADDU()
_deleteEEreg(XMMGPR_HI, 1);
// dadd
MOV32MtoR( EAX, (int)&cpuRegs.LO.UL[ 0 ] );
MOV32MtoR( ECX, (int)&cpuRegs.HI.UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.LO.UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.HI.UL[ 0 ] );
ADD32ItoR( EAX, (u32)result&0xffffffff );
ADC32ItoR( ECX, (u32)(result>>32) );
CDQ();
if( _Rd_) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[0], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[0], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[1], EDX );
return;
}
@ -776,35 +776,35 @@ void recMADDU()
if( GPR_IS_CONST1(_Rs_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
else if ( GPR_IS_CONST1(_Rt_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
MOV32RtoR( ECX, EDX );
ADD32MtoR( EAX, (u32)&cpuRegs.LO.UL[0] );
ADC32MtoR( ECX, (u32)&cpuRegs.HI.UL[0] );
ADD32MtoR( EAX, (uptr)&cpuRegs.LO.UL[0] );
ADC32MtoR( ECX, (uptr)&cpuRegs.HI.UL[0] );
CDQ();
if( _Rd_ ) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[0], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[1], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[0], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[0], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[1], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[1], EDX );
}
void recMADD1()
@ -815,25 +815,25 @@ void recMADD1()
_deleteEEreg(XMMGPR_HI, 1);
// dadd
MOV32MtoR( EAX, (int)&cpuRegs.LO.UL[ 2 ] );
MOV32MtoR( ECX, (int)&cpuRegs.HI.UL[ 2 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.LO.UL[ 2 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.HI.UL[ 2 ] );
ADD32ItoR( EAX, (u32)result&0xffffffff );
ADC32ItoR( ECX, (u32)(result>>32) );
CDQ();
if( _Rd_) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[2], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[2], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[3], EDX );
return;
}
@ -846,35 +846,35 @@ void recMADD1()
if( GPR_IS_CONST1(_Rs_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
else if ( GPR_IS_CONST1(_Rt_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
IMUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
MOV32RtoR( ECX, EDX );
ADD32MtoR( EAX, (u32)&cpuRegs.LO.UL[2] );
ADC32MtoR( ECX, (u32)&cpuRegs.HI.UL[2] );
ADD32MtoR( EAX, (uptr)&cpuRegs.LO.UL[2] );
ADC32MtoR( ECX, (uptr)&cpuRegs.HI.UL[2] );
CDQ();
if( _Rd_ ) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[2], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[2], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[3], EDX );
}
//static PCSX2_ALIGNED16(u32 s_MaddMask[]) = { 0x80000000, 0, 0x80000000, 0 };
@ -887,25 +887,25 @@ void recMADDU1()
_deleteEEreg(XMMGPR_HI, 1);
// dadd
MOV32MtoR( EAX, (int)&cpuRegs.LO.UL[ 2 ] );
MOV32MtoR( ECX, (int)&cpuRegs.HI.UL[ 2 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.LO.UL[ 2 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.HI.UL[ 2 ] );
ADD32ItoR( EAX, (u32)result&0xffffffff );
ADC32ItoR( ECX, (u32)(result>>32) );
CDQ();
if( _Rd_) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[2], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[2], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[3], EDX );
return;
}
@ -918,35 +918,35 @@ void recMADDU1()
if( GPR_IS_CONST1(_Rs_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rs_].UL[0] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
else if ( GPR_IS_CONST1(_Rt_) ) {
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
}
else {
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MUL32M( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
}
MOV32RtoR( ECX, EDX );
ADD32MtoR( EAX, (u32)&cpuRegs.LO.UL[2] );
ADC32MtoR( ECX, (u32)&cpuRegs.HI.UL[2] );
ADD32MtoR( EAX, (uptr)&cpuRegs.LO.UL[2] );
ADC32MtoR( ECX, (uptr)&cpuRegs.HI.UL[2] );
CDQ();
if( _Rd_ ) {
_eeOnWriteReg(_Rd_, 1);
_deleteEEreg(_Rd_, 0);
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
MOV32RtoM( (int)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (int)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[2], EAX );
MOV32RtoM( (uptr)&cpuRegs.LO.UL[3], EDX );
MOV32RtoM( (int)&cpuRegs.HI.UL[2], ECX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[2], ECX );
MOV32RtoR(EAX, ECX);
CDQ();
MOV32RtoM( (int)&cpuRegs.HI.UL[3], EDX );
MOV32RtoM( (uptr)&cpuRegs.HI.UL[3], EDX );
}

View File

@ -63,15 +63,15 @@ void recSLLs_(int info, int sa)
{
pxAssert( !(info & PROCESS_EE_XMM) );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( sa != 0 )
{
SHL32ItoR( EAX, sa );
}
CDQ( );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
void recSLL_(int info)
@ -91,12 +91,12 @@ void recSRLs_(int info, int sa)
{
pxAssert( !(info & PROCESS_EE_XMM) );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( sa != 0 ) SHR32ItoR( EAX, sa);
CDQ( );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
void recSRL_(int info)
@ -116,12 +116,12 @@ void recSRAs_(int info, int sa)
{
pxAssert( !(info & PROCESS_EE_XMM) );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( sa != 0 ) SAR32ItoR( EAX, sa);
CDQ();
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
void recSRA_(int info)
@ -239,13 +239,13 @@ void recDSLL32s_(int info, int sa)
{
pxAssert( !(info & PROCESS_EE_XMM) );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( sa != 0 )
{
SHL32ItoR( EAX, sa );
}
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], 0 );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EAX );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], 0 );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EAX );
}
@ -266,11 +266,11 @@ void recDSRL32s_(int info, int sa)
{
pxAssert( !(info & PROCESS_EE_XMM) );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
if ( sa != 0 ) SHR32ItoR( EAX, sa );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32ItoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], 0 );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32ItoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], 0 );
}
void recDSRL32_(int info)
@ -290,12 +290,12 @@ void recDSRA32s_(int info, int sa)
{
pxAssert( !(info & PROCESS_EE_XMM) );
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ] );
CDQ( );
if ( sa != 0 ) SAR32ItoR( EAX, sa );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
@ -324,7 +324,7 @@ int recSetShiftV(int info, int* rsreg, int* rtreg, int* rdreg, int* rstemp, int
SetMMXstate();
*rstemp = _allocMMXreg(-1, MMX_TEMP, 0);
MOV32MtoR(EAX, (u32)&cpuRegs.GPR.r[_Rs_].UL[0]);
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[_Rs_].UL[0]);
AND32ItoR(EAX, shift64?0x3f:0x1f);
MOVD32RtoMMX(*rstemp, EAX);
*rsreg = *rstemp;
@ -340,7 +340,7 @@ void recSetConstShiftV(int info, int* rsreg, int* rdreg, int* rstemp, int shift6
SetMMXstate();
*rstemp = _allocMMXreg(-1, MMX_TEMP, 0);
MOV32MtoR(EAX, (u32)&cpuRegs.GPR.r[_Rs_].UL[0]);
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[_Rs_].UL[0]);
AND32ItoR(EAX, shift64?0x3f:0x1f);
MOVD32RtoMMX(*rstemp, EAX);
*rsreg = *rstemp;
@ -361,7 +361,7 @@ void recSLLV_consts(int info)
void recSLLV_constt(int info)
{
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
AND32ItoR( ECX, 0x1f );
@ -372,16 +372,16 @@ void recSLLV_constt(int info)
void recSLLV_(int info)
{
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( _Rs_ != 0 )
{
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
AND32ItoR( ECX, 0x1f );
SHL32CLtoR( EAX );
}
CDQ();
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
EERECOMPILE_CODE0(SLLV, XMMINFO_READS|XMMINFO_READT|XMMINFO_WRITED);
@ -399,7 +399,7 @@ void recSRLV_consts(int info)
void recSRLV_constt(int info)
{
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
AND32ItoR( ECX, 0x1f );
@ -410,16 +410,16 @@ void recSRLV_constt(int info)
void recSRLV_(int info)
{
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( _Rs_ != 0 )
{
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
AND32ItoR( ECX, 0x1f );
SHR32CLtoR( EAX );
}
CDQ( );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
EERECOMPILE_CODE0(SRLV, XMMINFO_READS|XMMINFO_READT|XMMINFO_WRITED);
@ -437,7 +437,7 @@ void recSRAV_consts(int info)
void recSRAV_constt(int info)
{
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32ItoR( EAX, g_cpuConstRegs[_Rt_].UL[0] );
AND32ItoR( ECX, 0x1f );
@ -448,16 +448,16 @@ void recSRAV_constt(int info)
void recSRAV_(int info)
{
MOV32MtoR( EAX, (int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
if ( _Rs_ != 0 )
{
MOV32MtoR( ECX, (int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
MOV32MtoR( ECX, (uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] );
AND32ItoR( ECX, 0x1f );
SAR32CLtoR( EAX );
}
CDQ( );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (int)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 0 ], EAX );
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rd_ ].UL[ 1 ], EDX );
}
EERECOMPILE_CODE0(SRAV, XMMINFO_READS|XMMINFO_READT|XMMINFO_WRITED);
@ -480,7 +480,7 @@ void recDSLLV_constt(int info)
int rsreg, rdreg, rstemp = -1;
recSetConstShiftV(info, &rsreg, &rdreg, &rstemp, 1);
MOVQMtoR(rdreg, (u32)&cpuRegs.GPR.r[_Rt_]);
MOVQMtoR(rdreg, (uptr)&cpuRegs.GPR.r[_Rt_]);
PSLLQRtoR(rdreg, rsreg);
if( rstemp != -1 ) _freeMMXreg(rstemp);
}
@ -514,7 +514,7 @@ void recDSRLV_constt(int info)
int rsreg, rdreg, rstemp = -1;
recSetConstShiftV(info, &rsreg, &rdreg, &rstemp, 1);
MOVQMtoR(rdreg, (u32)&cpuRegs.GPR.r[_Rt_]);
MOVQMtoR(rdreg, (uptr)&cpuRegs.GPR.r[_Rt_]);
PSRLQRtoR(rdreg, rsreg);
if( rstemp != -1 ) _freeMMXreg(rstemp);
}
@ -551,7 +551,7 @@ void recDSRAV_constt(int info)
recSetConstShiftV(info, &rsreg, &rdreg, &rstemp, 1);
MOVQMtoR(rdreg, (u32)&cpuRegs.GPR.r[_Rt_]);
MOVQMtoR(rdreg, (uptr)&cpuRegs.GPR.r[_Rt_]);
PXORRtoR(t0reg, t0reg);
// calc high bit

View File

@ -523,15 +523,15 @@ int eeRecompileCodeXMM(int xmminfo)
// flush consts
if( xmminfo & XMMINFO_READT ) {
if( GPR_IS_CONST1( _Rt_ ) && !(g_cpuFlushedConstReg&(1<<_Rt_)) ) {
MOV32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0]);
MOV32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1]);
MOV32ItoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0]);
MOV32ItoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1]);
g_cpuFlushedConstReg |= (1<<_Rt_);
}
}
if( xmminfo & XMMINFO_READS) {
if( GPR_IS_CONST1( _Rs_ ) && !(g_cpuFlushedConstReg&(1<<_Rs_)) ) {
MOV32ItoM((int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0]);
MOV32ItoM((int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1]);
MOV32ItoM((uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0]);
MOV32ItoM((uptr)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1]);
g_cpuFlushedConstReg |= (1<<_Rs_);
}
}