diff --git a/pcsx2/build.sh b/pcsx2/build.sh index bc4d618a9d..7244ce332f 100644 --- a/pcsx2/build.sh +++ b/pcsx2/build.sh @@ -18,6 +18,10 @@ # # Uncomment if building by itself, rather then with all the plugins + +#Normal +#export PCSX2OPTIONS="--enable-sse2 --enable-sse3 --prefix `pwd`" +#Debug version #export PCSX2OPTIONS="--enable-debug --enable-devbuild --enable-sse2 --enable-sse3 --prefix `pwd`" echo --------------- diff --git a/pcsx2/x86/iMMI.c b/pcsx2/x86/iMMI.c index 92aa5c9b48..b899d6005e 100644 --- a/pcsx2/x86/iMMI.c +++ b/pcsx2/x86/iMMI.c @@ -255,7 +255,7 @@ CPU_SSE2_XMMCACHE_START(XMMINFO_WRITED|XMMINFO_READLO|XMMINFO_READHI) _deleteEEreg(XMMGPR_LO, 1); _deleteEEreg(XMMGPR_HI, 1); iFlushCall(FLUSH_CACHED_REGS); // since calling CALLFunc - CALLFunc( (u32)PMFHL ); + CALLFunc( (uptr)PMFHL ); break; case 0x03: // LH @@ -876,7 +876,7 @@ CPU_SSE_XMMCACHE_END else { MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); - CALLFunc( (u32)PMAXH ); + CALLFunc( (uptr)PMAXH ); } } @@ -1068,7 +1068,7 @@ void recPADDSW( void ) MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); - CALLFunc( (u32)PADDSW ); + CALLFunc( (uptr)PADDSW ); } //////////////////////////////////////////////////// @@ -1159,7 +1159,7 @@ void recPSUBSW( void ) MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); - CALLFunc( (u32)PSUBSW ); + CALLFunc( (uptr)PSUBSW ); } //////////////////////////////////////////////////// @@ -1587,7 +1587,7 @@ CPU_SSE_XMMCACHE_END MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); - CALLFunc( (u32)PABSW ); + CALLFunc( (uptr)PABSW ); } //////////////////////////////////////////////////// @@ -1611,7 +1611,7 @@ CPU_SSE_XMMCACHE_END MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); - CALLFunc( (u32)PABSW ); + CALLFunc( (uptr)PABSW ); } //////////////////////////////////////////////////// @@ -2033,7 +2033,7 @@ CPU_SSE_XMMCACHE_END else { MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); - CALLFunc( (u32)PMINH ); + CALLFunc( (uptr)PMINH ); } } @@ -2362,8 +2362,8 @@ CPU_SSE2_XMMCACHE_START((_Rd_?XMMINFO_WRITED:0)|XMMINFO_READS|XMMINFO_READT|XMMI SSE2_PSHUFD_XMM_to_XMM(EEREC_HI, EEREC_LO, 0xf5); - SSE2_PAND_M128_to_XMM(EEREC_LO, (u32)s_mask); - SSE2_PAND_M128_to_XMM(EEREC_HI, (u32)s_mask); + SSE2_PAND_M128_to_XMM(EEREC_LO, (uptr)s_mask); + SSE2_PAND_M128_to_XMM(EEREC_HI, (uptr)s_mask); if( !_Rd_ ) _freeXMMreg(t0reg); @@ -2980,7 +2980,7 @@ CPU_SSE2_XMMCACHE_START((_Rs_?XMMINFO_READS:0)|(_Rt_?XMMINFO_READT:0)|XMMINFO_WR } else { if( EEREC_D != EEREC_T ) SSEX_MOVDQA_XMM_to_XMM(EEREC_D, EEREC_T); - SSE2_PAND_M128_to_XMM(EEREC_D, (u32)s_tempPINTEH); + SSE2_PAND_M128_to_XMM(EEREC_D, (uptr)s_tempPINTEH); } } else if( _Rt_ == 0 ) { diff --git a/pcsx2/x86/iPsxHw.c b/pcsx2/x86/iPsxHw.c index 194043924b..8a93216dff 100644 --- a/pcsx2/x86/iPsxHw.c +++ b/pcsx2/x86/iPsxHw.c @@ -1096,40 +1096,40 @@ void psxHwConstWrite32(u32 add, int mmreg) int psxHw4ConstRead8(u32 x86reg, u32 add, u32 sign) { switch (add) { - case 0x1f402004: CONSTREAD8_CALL((u32)cdvdRead04); return 1; - case 0x1f402005: CONSTREAD8_CALL((u32)cdvdRead05); return 1; - case 0x1f402006: CONSTREAD8_CALL((u32)cdvdRead06); return 1; - case 0x1f402007: CONSTREAD8_CALL((u32)cdvdRead07); return 1; - case 0x1f402008: CONSTREAD8_CALL((u32)cdvdRead08); return 1; - case 0x1f40200A: CONSTREAD8_CALL((u32)cdvdRead0A); return 1; - case 0x1f40200B: CONSTREAD8_CALL((u32)cdvdRead0B); return 1; - case 0x1f40200C: CONSTREAD8_CALL((u32)cdvdRead0C); return 1; - case 0x1f40200D: CONSTREAD8_CALL((u32)cdvdRead0D); return 1; - case 0x1f40200E: CONSTREAD8_CALL((u32)cdvdRead0E); return 1; - case 0x1f40200F: CONSTREAD8_CALL((u32)cdvdRead0F); return 1; - case 0x1f402013: CONSTREAD8_CALL((u32)cdvdRead13); return 1; - case 0x1f402015: CONSTREAD8_CALL((u32)cdvdRead15); return 1; - case 0x1f402016: CONSTREAD8_CALL((u32)cdvdRead16); return 1; - case 0x1f402017: CONSTREAD8_CALL((u32)cdvdRead17); return 1; - case 0x1f402018: CONSTREAD8_CALL((u32)cdvdRead18); return 1; - case 0x1f402020: CONSTREAD8_CALL((u32)cdvdRead20); return 1; - case 0x1f402021: CONSTREAD8_CALL((u32)cdvdRead21); return 1; - case 0x1f402022: CONSTREAD8_CALL((u32)cdvdRead22); return 1; - case 0x1f402023: CONSTREAD8_CALL((u32)cdvdRead23); return 1; - case 0x1f402024: CONSTREAD8_CALL((u32)cdvdRead24); return 1; - case 0x1f402028: CONSTREAD8_CALL((u32)cdvdRead28); return 1; - case 0x1f402029: CONSTREAD8_CALL((u32)cdvdRead29); return 1; - case 0x1f40202A: CONSTREAD8_CALL((u32)cdvdRead2A); return 1; - case 0x1f40202B: CONSTREAD8_CALL((u32)cdvdRead2B); return 1; - case 0x1f40202C: CONSTREAD8_CALL((u32)cdvdRead2C); return 1; - case 0x1f402030: CONSTREAD8_CALL((u32)cdvdRead30); return 1; - case 0x1f402031: CONSTREAD8_CALL((u32)cdvdRead31); return 1; - case 0x1f402032: CONSTREAD8_CALL((u32)cdvdRead32); return 1; - case 0x1f402033: CONSTREAD8_CALL((u32)cdvdRead33); return 1; - case 0x1f402034: CONSTREAD8_CALL((u32)cdvdRead34); return 1; - case 0x1f402038: CONSTREAD8_CALL((u32)cdvdRead38); return 1; - case 0x1f402039: CONSTREAD8_CALL((u32)cdvdRead39); return 1; - case 0x1f40203A: CONSTREAD8_CALL((u32)cdvdRead3A); return 1; + case 0x1f402004: CONSTREAD8_CALL((uptr)cdvdRead04); return 1; + case 0x1f402005: CONSTREAD8_CALL((uptr)cdvdRead05); return 1; + case 0x1f402006: CONSTREAD8_CALL((uptr)cdvdRead06); return 1; + case 0x1f402007: CONSTREAD8_CALL((uptr)cdvdRead07); return 1; + case 0x1f402008: CONSTREAD8_CALL((uptr)cdvdRead08); return 1; + case 0x1f40200A: CONSTREAD8_CALL((uptr)cdvdRead0A); return 1; + case 0x1f40200B: CONSTREAD8_CALL((uptr)cdvdRead0B); return 1; + case 0x1f40200C: CONSTREAD8_CALL((uptr)cdvdRead0C); return 1; + case 0x1f40200D: CONSTREAD8_CALL((uptr)cdvdRead0D); return 1; + case 0x1f40200E: CONSTREAD8_CALL((uptr)cdvdRead0E); return 1; + case 0x1f40200F: CONSTREAD8_CALL((uptr)cdvdRead0F); return 1; + case 0x1f402013: CONSTREAD8_CALL((uptr)cdvdRead13); return 1; + case 0x1f402015: CONSTREAD8_CALL((uptr)cdvdRead15); return 1; + case 0x1f402016: CONSTREAD8_CALL((uptr)cdvdRead16); return 1; + case 0x1f402017: CONSTREAD8_CALL((uptr)cdvdRead17); return 1; + case 0x1f402018: CONSTREAD8_CALL((uptr)cdvdRead18); return 1; + case 0x1f402020: CONSTREAD8_CALL((uptr)cdvdRead20); return 1; + case 0x1f402021: CONSTREAD8_CALL((uptr)cdvdRead21); return 1; + case 0x1f402022: CONSTREAD8_CALL((uptr)cdvdRead22); return 1; + case 0x1f402023: CONSTREAD8_CALL((uptr)cdvdRead23); return 1; + case 0x1f402024: CONSTREAD8_CALL((uptr)cdvdRead24); return 1; + case 0x1f402028: CONSTREAD8_CALL((uptr)cdvdRead28); return 1; + case 0x1f402029: CONSTREAD8_CALL((uptr)cdvdRead29); return 1; + case 0x1f40202A: CONSTREAD8_CALL((uptr)cdvdRead2A); return 1; + case 0x1f40202B: CONSTREAD8_CALL((uptr)cdvdRead2B); return 1; + case 0x1f40202C: CONSTREAD8_CALL((uptr)cdvdRead2C); return 1; + case 0x1f402030: CONSTREAD8_CALL((uptr)cdvdRead30); return 1; + case 0x1f402031: CONSTREAD8_CALL((uptr)cdvdRead31); return 1; + case 0x1f402032: CONSTREAD8_CALL((uptr)cdvdRead32); return 1; + case 0x1f402033: CONSTREAD8_CALL((uptr)cdvdRead33); return 1; + case 0x1f402034: CONSTREAD8_CALL((uptr)cdvdRead34); return 1; + case 0x1f402038: CONSTREAD8_CALL((uptr)cdvdRead38); return 1; + case 0x1f402039: CONSTREAD8_CALL((uptr)cdvdRead39); return 1; + case 0x1f40203A: CONSTREAD8_CALL((uptr)cdvdRead3A); return 1; default: SysPrintf("*Unkwnown 8bit read at address %lx\n", add); XOR32RtoR(x86reg, x86reg); diff --git a/pcsx2/x86/iR5900.h b/pcsx2/x86/iR5900.h index e71ef39712..f9b9854622 100644 --- a/pcsx2/x86/iR5900.h +++ b/pcsx2/x86/iR5900.h @@ -65,31 +65,31 @@ extern u16 iCWstate; extern u32 s_nBlockCycles; // cycles of current block recompiling #define REC_FUNC_INLINE( f, delreg ) \ - MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); \ - MOV32ItoM( (u32)&cpuRegs.pc, pc ); \ + MOV32ItoM( (uptr)&cpuRegs.code, (u32)cpuRegs.code ); \ + MOV32ItoM( (uptr)&cpuRegs.pc, (u32)pc ); \ iFlushCall(FLUSH_EVERYTHING); \ if( (delreg) > 0 ) _deleteEEreg(delreg, 0); \ - CALLFunc( (u32)f ); + CALLFunc( (uptr)f ); #define REC_FUNC( f, delreg ) \ void f( void ); \ void rec##f( void ) \ { \ - MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); \ - MOV32ItoM( (u32)&cpuRegs.pc, pc ); \ + MOV32ItoM( (uptr)&cpuRegs.code, (u32)cpuRegs.code ); \ + MOV32ItoM( (uptr)&cpuRegs.pc, (u32)pc ); \ iFlushCall(FLUSH_EVERYTHING); \ if( (delreg) > 0 ) _deleteEEreg(delreg, 0); \ - CALLFunc( (u32)f ); \ + CALLFunc( (uptr)f ); \ } #define REC_SYS( f ) \ void f( void ); \ void rec##f( void ) \ { \ - MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); \ - MOV32ItoM( (u32)&cpuRegs.pc, pc ); \ + MOV32ItoM( (uptr)&cpuRegs.code, (u32)cpuRegs.code ); \ + MOV32ItoM( (uptr)&cpuRegs.pc, (u32)pc ); \ iFlushCall(FLUSH_EVERYTHING); \ - CALLFunc( (u32)f ); \ + CALLFunc( (uptr)f ); \ branch = 2; \ } diff --git a/pcsx2/x86/iVUmicro.c b/pcsx2/x86/iVUmicro.c index 33456aeaef..f254102856 100644 --- a/pcsx2/x86/iVUmicro.c +++ b/pcsx2/x86/iVUmicro.c @@ -1085,7 +1085,7 @@ void CheckForOverflow_(int fdreg, int t0reg, int keepxyzw) SSE_ORPS_M128_to_XMM(t0reg, (uptr)&SSEmovMask[15-keepxyzw][0]); SSE_ANDPS_XMM_to_XMM(fdreg, t0reg); - //SSE_MOVAPS_M128_to_XMM(t0reg, (u32)s_expmask); + //SSE_MOVAPS_M128_to_XMM(t0reg, (uptr)s_expmask); //SSE_ANDPS_XMM_to_XMM(t0reg, fdreg); //SSE_CMPNEPS_M128_to_XMM(t0reg, (u32)s_expmask); ////SSE_ORPS_M128_to_XMM(t0reg, (u32)g_minvals); @@ -1591,11 +1591,11 @@ void recVUMI_ADD(VURegs *VU, int info) if( _Fs_ == 0 && _Ft_ == 0 ) { // if adding VF00 with VF00, then the result is always 0,0,0,2 if( _X_Y_Z_W != 0xf ) { - SSE_MOVAPS_M128_to_XMM(EEREC_TEMP, (u32)s_two); + SSE_MOVAPS_M128_to_XMM(EEREC_TEMP, (uptr)s_two); VU_MERGE_REGS(EEREC_D, EEREC_TEMP); } else { - SSE_MOVAPS_M128_to_XMM(EEREC_D, (u32)s_two); + SSE_MOVAPS_M128_to_XMM(EEREC_D, (uptr)s_two); } } else { @@ -2080,7 +2080,7 @@ void recVUMI_SUB_xyzw(VURegs *VU, int xyzw, int info) if( xyzw == 0 ) { if( EEREC_D == EEREC_T ) { if( _Fs_ > 0 ) SSE_SUBSS_XMM_to_XMM(EEREC_D, EEREC_S); - SSE_XORPS_M128_to_XMM(EEREC_D, (u32)s_unaryminus); + SSE_XORPS_M128_to_XMM(EEREC_D, (uptr)s_unaryminus); } else { if( EEREC_D != EEREC_S ) SSE_MOVSS_XMM_to_XMM(EEREC_D, EEREC_S); @@ -3239,7 +3239,7 @@ void recVUMI_MAX_xyzw(VURegs *VU, int xyzw, int info) SSE_MOVSS_XMM_to_XMM(EEREC_D, EEREC_TEMP); } else { - SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (u32)s_fones); + SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)s_fones); SSE_MOVSS_XMM_to_XMM(EEREC_D, EEREC_TEMP); } } @@ -3271,7 +3271,7 @@ void recVUMI_MAX_xyzw(VURegs *VU, int xyzw, int info) SSE_XORPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); } } - else SSE_MOVAPS_M128_to_XMM(EEREC_TEMP, (u32)s_fones); + else SSE_MOVAPS_M128_to_XMM(EEREC_TEMP, (uptr)s_fones); } else { _unpackVF_xyzw(EEREC_TEMP, EEREC_T, xyzw); @@ -3282,7 +3282,7 @@ void recVUMI_MAX_xyzw(VURegs *VU, int xyzw, int info) else { if( _Fs_ == 0 && _Ft_ == 0 ) { if( xyzw < 3 ) SSE_XORPS_XMM_to_XMM(EEREC_D, EEREC_D); - else SSE_MOVAPS_M128_to_XMM(EEREC_D, (u32)s_fones); + else SSE_MOVAPS_M128_to_XMM(EEREC_D, (uptr)s_fones); } else { if (EEREC_D == EEREC_S) { @@ -3758,7 +3758,7 @@ void recVUMI_SQRT( VURegs *VU, int info ) x86SetJ8(pjmp); } - SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (u32)const_clip); //Do a cardinal sqrt + SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); //Do a cardinal sqrt if (CHECK_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); //Clamp infinities (only need to do positive clamp since EEREC_TEMP is positive) SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); @@ -3788,7 +3788,7 @@ void recVUMI_RSQRT(VURegs *VU, int info) x86SetJ8(ajmp8); } - SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (u32)const_clip); //Do a cardinal sqrt + SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); //Do a cardinal sqrt if (CHECK_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals);// Clamp Infinities to Fmax SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); if (CHECK_EXTRA_OVERFLOW) vuFloat2(EEREC_TEMP, EEREC_TEMP, 8); //Clamp again just incase :/ @@ -4805,8 +4805,8 @@ void recVUMI_RXOR( VURegs *VU, int info ) _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_); SSE_XORPS_M128_to_XMM(EEREC_TEMP, VU_REGR_ADDR); - SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (u32)s_mask); - SSE_ORPS_M128_to_XMM(EEREC_TEMP, (u32)s_fones); + SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)s_mask); + SSE_ORPS_M128_to_XMM(EEREC_TEMP, (uptr)s_fones); SSE_MOVSS_XMM_to_M32(VU_REGR_ADDR, EEREC_TEMP); } else { @@ -5322,7 +5322,7 @@ void recVUMI_EATANxy( VURegs *VU, int info ) assert( VU == &VU1 ); if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) { - SSE_MOVLPS_XMM_to_M64((u32)s_tempmem, EEREC_S); + SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S); FLD32((uptr)&s_tempmem[0]); FLD32((uptr)&s_tempmem[1]); } @@ -5345,7 +5345,7 @@ void recVUMI_EATANxz( VURegs *VU, int info ) assert( VU == &VU1 ); if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) { - SSE_MOVLPS_XMM_to_M64((u32)s_tempmem, EEREC_S); + SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S); FLD32((uptr)&s_tempmem[0]); FLD32((uptr)&s_tempmem[2]); } @@ -5478,7 +5478,7 @@ void recVUMI_ERSQRT( VURegs *VU, int info ) //} //else SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)&VU->VF[_Fs_].UL[_Fsf_]); - SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (u32)const_clip); // abs(x) + SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); // abs(x) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Clamp Infinities to Fmax SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); // SQRT(abs(x)) @@ -5506,8 +5506,8 @@ void recVUMI_ESIN( VURegs *VU, int info ) if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) { switch(_Fsf_) { - case 0: SSE_MOVSS_XMM_to_M32((u32)s_tempmem, EEREC_S); - case 1: SSE_MOVLPS_XMM_to_M64((u32)s_tempmem, EEREC_S); + case 0: SSE_MOVSS_XMM_to_M32((uptr)s_tempmem, EEREC_S); + case 1: SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S); default: SSE_MOVHPS_XMM_to_M64((uptr)&s_tempmem[2], EEREC_S); } FLD32((uptr)&s_tempmem[_Fsf_]); @@ -5531,8 +5531,8 @@ void recVUMI_EATAN( VURegs *VU, int info ) if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) { switch(_Fsf_) { - case 0: SSE_MOVSS_XMM_to_M32((u32)s_tempmem, EEREC_S); - case 1: SSE_MOVLPS_XMM_to_M64((u32)s_tempmem, EEREC_S); + case 0: SSE_MOVSS_XMM_to_M32((uptr)s_tempmem, EEREC_S); + case 1: SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S); default: SSE_MOVHPS_XMM_to_M64((uptr)&s_tempmem[2], EEREC_S); } FLD32((uptr)&s_tempmem[_Fsf_]); @@ -5558,8 +5558,8 @@ void recVUMI_EEXP( VURegs *VU, int info ) if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) { switch(_Fsf_) { - case 0: SSE_MOVSS_XMM_to_M32((u32)s_tempmem, EEREC_S); - case 1: SSE_MOVLPS_XMM_to_M64((u32)s_tempmem, EEREC_S); + case 0: SSE_MOVSS_XMM_to_M32((uptr)s_tempmem, EEREC_S); + case 1: SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S); default: SSE_MOVHPS_XMM_to_M64((uptr)&s_tempmem[2], EEREC_S); } FMUL32((uptr)&s_tempmem[_Fsf_]); diff --git a/pcsx2/x86/iVUops.h b/pcsx2/x86/iVUops.h index c72d30e891..6962140534 100644 --- a/pcsx2/x86/iVUops.h +++ b/pcsx2/x86/iVUops.h @@ -23,22 +23,22 @@ #define REC_VUOP(VU, f) { \ _freeXMMregs(&VU); \ X86_32CODE(_freeMMXregs(); SetFPUstate();) \ - MOV32ItoM((u32)&VU.code, (u32)VU.code); \ - CALLFunc((u32)VU##MI_##f); \ + MOV32ItoM((uptr)&VU.code, (u32)VU.code); \ + CALLFunc((uptr)VU##MI_##f); \ } #define REC_VUOPFLAGS(VU, f) { \ _freeXMMregs(&VU); \ X86_32CODE(_freeMMXregs(); SetFPUstate();) \ - MOV32ItoM((u32)&VU.code, (u32)VU.code); \ - CALLFunc((u32)VU##MI_##f); \ + MOV32ItoM((uptr)&VU.code, (u32)VU.code); \ + CALLFunc((uptr)VU##MI_##f); \ } #define REC_VUBRANCH(VU, f) { \ _freeXMMregs(&VU); \ X86_32CODE(_freeMMXregs(); SetFPUstate();) \ - MOV32ItoM((u32)&VU.code, (u32)VU.code); \ - MOV32ItoM((u32)&VU.VI[REG_TPC].UL, (u32)pc); \ - CALLFunc((u32)VU##MI_##f); \ + MOV32ItoM((uptr)&VU.code, (u32)VU.code); \ + MOV32ItoM((uptr)&VU.VI[REG_TPC].UL, (u32)pc); \ + CALLFunc((uptr)VU##MI_##f); \ branch = 1; \ } diff --git a/pcsx2/x86/ix86-32/iR5900-32.c b/pcsx2/x86/ix86-32/iR5900-32.c index 7af1d83321..10e46448d4 100644 --- a/pcsx2/x86/ix86-32/iR5900-32.c +++ b/pcsx2/x86/ix86-32/iR5900-32.c @@ -389,8 +389,8 @@ int _flushUnusedConstReg() !_recIsRegWritten(g_pCurInstInfo+1, (s_nEndBlock-pc)/4, XMMTYPE_GPRREG, i) ) { // check if will be written in the future - MOV32ItoM((u32)&cpuRegs.GPR.r[i].UL[0], g_cpuConstRegs[i].UL[0]); - MOV32ItoM((u32)&cpuRegs.GPR.r[i].UL[1], g_cpuConstRegs[i].UL[1]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[i].UL[0], g_cpuConstRegs[i].UL[0]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[i].UL[1], g_cpuConstRegs[i].UL[1]); g_cpuFlushedConstReg |= 1<ltime); - CALLFunc((u32)_StopPerfCounter); + MOV32ItoM((uptr)&s_pCurBlock_ltime, (u32)&s_pCurBlockEx->ltime); + CALLFunc((uptr)_StopPerfCounter); } #endif } @@ -2093,7 +2093,7 @@ void SetBranchReg( u32 reg ) if( reg != 0xffffffff ) { // if( GPR_IS_CONST1(reg) ) -// MOV32ItoM( (u32)&cpuRegs.pc, g_cpuConstRegs[reg].UL[0] ); +// MOV32ItoM( (uptr)&cpuRegs.pc, g_cpuConstRegs[reg].UL[0] ); // else { // int mmreg; // @@ -2127,7 +2127,7 @@ void SetBranchReg( u32 reg ) // CMP32ItoM((u32)&cpuRegs.pc, 0); // j8Ptr[5] = JNE8(0); -// CALLFunc((u32)tempfn); +// CALLFunc((uptr)tempfn); // x86SetJ8( j8Ptr[5] ); iFlushCall(FLUSH_EVERYTHING); @@ -2146,7 +2146,7 @@ void SetBranchImm( u32 imm ) assert( imm ); // end the current block - MOV32ItoM( (u32)&cpuRegs.pc, imm ); + MOV32ItoM( (uptr)&cpuRegs.pc, imm ); iFlushCall(FLUSH_EVERYTHING); iBranchTest(imm, imm <= pc); @@ -2265,7 +2265,7 @@ static void iBranchTest(u32 newpc, u32 cpuBranch) #endif #ifdef _DEBUG - //CALLFunc((u32)testfpu); + //CALLFunc((uptr)testfpu); #endif if( !USE_FAST_BRANCHES || cpuBranch ) { @@ -2309,11 +2309,11 @@ void recCOP2( void ) #endif if ( !cpucaps.hasStreamingSIMDExtensions ) { - MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); - MOV32ItoM( (u32)&cpuRegs.pc, pc ); + MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); + MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); g_cpuHasConstReg = 1; // reset all since COP2 can change regs - CALLFunc( (u32)COP2 ); + CALLFunc( (uptr)COP2 ); CMP32ItoM((int)&cpuRegs.pc, pc); j8Ptr[0] = JE8(0); @@ -2331,10 +2331,10 @@ void recCOP2( void ) //////////////////////////////////////////////////// void recSYSCALL( void ) { - MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); - MOV32ItoM( (u32)&cpuRegs.pc, pc ); + MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); + MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_NODESTROY); - CALLFunc( (u32)SYSCALL ); + CALLFunc( (uptr)SYSCALL ); CMP32ItoM((int)&cpuRegs.pc, pc); j8Ptr[0] = JE8(0); @@ -2346,10 +2346,10 @@ void recSYSCALL( void ) { //////////////////////////////////////////////////// void recBREAK( void ) { - MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); - MOV32ItoM( (u32)&cpuRegs.pc, pc ); + MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); + MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); - CALLFunc( (u32)BREAK ); + CALLFunc( (uptr)BREAK ); CMP32ItoM((int)&cpuRegs.pc, pc); j8Ptr[0] = JE8(0); @@ -2361,10 +2361,10 @@ void recBREAK( void ) { //////////////////////////////////////////////////// //static void recCACHE( void ) { -// MOV32ItoM( (u32)&cpuRegs.code, cpuRegs.code ); -// MOV32ItoM( (u32)&cpuRegs.pc, pc ); +// MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); +// MOV32ItoM( (uptr)&cpuRegs.pc, pc ); // iFlushCall(FLUSH_EVERYTHING); -// CALLFunc( (u32)CACHE ); +// CALLFunc( (uptr)CACHE ); // //branch = 2; // // CMP32ItoM((int)&cpuRegs.pc, pc); @@ -2399,14 +2399,14 @@ void recMFSA( void ) MOV32MtoR(EAX, (u32)&cpuRegs.sa); _deleteEEreg(_Rd_, 0); MOV32RtoM((u32)&cpuRegs.GPR.r[_Rd_].UL[0], EAX); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[1], 0); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], 0); } } void recMTSA( void ) { if( GPR_IS_CONST1(_Rs_) ) { - MOV32ItoM((u32)&cpuRegs.sa, g_cpuConstRegs[_Rs_].UL[0] ); + MOV32ItoM((uptr)&cpuRegs.sa, g_cpuConstRegs[_Rs_].UL[0] ); } else { int mmreg; @@ -2428,7 +2428,7 @@ void recMTSA( void ) void recMTSAB( void ) { if( GPR_IS_CONST1(_Rs_) ) { - MOV32ItoM((u32)&cpuRegs.sa, ((g_cpuConstRegs[_Rs_].UL[0] & 0xF) ^ (_Imm_ & 0xF)) << 3); + MOV32ItoM((uptr)&cpuRegs.sa, ((g_cpuConstRegs[_Rs_].UL[0] & 0xF) ^ (_Imm_ & 0xF)) << 3); } else { _eeMoveGPRtoR(EAX, _Rs_); @@ -2442,7 +2442,7 @@ void recMTSAB( void ) void recMTSAH( void ) { if( GPR_IS_CONST1(_Rs_) ) { - MOV32ItoM((u32)&cpuRegs.sa, ((g_cpuConstRegs[_Rs_].UL[0] & 0x7) ^ (_Imm_ & 0x7)) << 4); + MOV32ItoM((uptr)&cpuRegs.sa, ((g_cpuConstRegs[_Rs_].UL[0] & 0x7) ^ (_Imm_ & 0x7)) << 4); } else { _eeMoveGPRtoR(EAX, _Rs_); @@ -2473,7 +2473,7 @@ void checkpchanged(u32 startpc) } //#ifdef _DEBUG -//#define CHECK_XMMCHANGED() CALLFunc((u32)checkxmmchanged); +//#define CHECK_XMMCHANGED() CALLFunc((uptr)checkxmmchanged); //#else //#define CHECK_XMMCHANGED() //#endif @@ -2511,7 +2511,7 @@ void recompileNextInstruction(int delayslot) assert( PC_GETBLOCKEX(pblock)->startpc == pblock->startpc ); iFlushCall(FLUSH_EVERYTHING); - MOV32ItoM((u32)&cpuRegs.pc, pc); + MOV32ItoM((uptr)&cpuRegs.pc, pc); // if( pexblock->pOldFnptr ) { // // code already in place, so jump to it and exit recomp @@ -2555,7 +2555,7 @@ void recompileNextInstruction(int delayslot) // CMP32ItoM((u32)s_pCode, cpuRegs.code); // j8Ptr[0] = JE8(0); // MOV32ItoR(EAX, pc); -// CALLFunc((u32)checkcodefn); +// CALLFunc((uptr)checkcodefn); // x86SetJ8( j8Ptr[ 0 ] ); // // if( !delayslot ) { @@ -2567,7 +2567,7 @@ void recompileNextInstruction(int delayslot) // x86SetJ8( j8Ptr[ 0 ] ); // x86SetJ8( j8Ptr[ 1 ] ); // PUSH32I(s_pCurBlockEx->startpc); -// CALLFunc((u32)checkpchanged); +// CALLFunc((uptr)checkpchanged); // ADD32ItoR(ESP, 4); // x86SetJ8( j8Ptr[ 2 ] ); // } @@ -2868,7 +2868,7 @@ void recRecompile( u32 startpc ) // MOV32RtoM((u32)&cpuRegs.cycle, ECX); // //ADD32ItoR(ECX, 9); // //ADD32ItoM((u32)&cpuRegs.cycle, 512); -// CALLFunc((u32)cpuBranchTest); +// CALLFunc((uptr)cpuBranchTest); // CMP32ItoM((u32)&cpuRegs.pc, 0x81fc0); // JE8(s_pCurBlock->pFnptr - (u32)(x86Ptr+2) ); // JMP32((u32)DispatcherReg - (u32)(x86Ptr+5)); @@ -2918,12 +2918,12 @@ void recRecompile( u32 startpc ) #ifdef _DEBUG // for debugging purposes - MOV32ItoM((u32)&g_lastpc, pc); - CALLFunc((u32)printfn); + MOV32ItoM((uptr)&g_lastpc, pc); + CALLFunc((uptr)printfn); // CMP32MtoR(EBP, (u32)&s_uSaveEBP); // j8Ptr[0] = JE8(0); -// CALLFunc((u32)badespfn); +// CALLFunc((uptr)badespfn); // x86SetJ8(j8Ptr[0]); #endif @@ -3188,7 +3188,7 @@ StartRecomp: // if( pc+32 < s_nEndBlock ) { // // only blocks with more than 8 insts // //PUSH32I((u32)&lbase); -// //CALLFunc((u32)QueryPerformanceCounter); +// //CALLFunc((uptr)QueryPerformanceCounter); // lbase.QuadPart = GetCPUTick(); // s_startcount = 1; // } @@ -3264,13 +3264,13 @@ StartRecomp: BASEBLOCK* pblock = PC_GETBLOCK(s_nEndBlock); assert( pc == s_nEndBlock ); iFlushCall(FLUSH_EVERYTHING); - MOV32ItoM((u32)&cpuRegs.pc, pc); + MOV32ItoM((uptr)&cpuRegs.pc, pc); JMP32((u32)pblock->pFnptr - ((u32)x86Ptr + 5)); branch = 3; } else if( !branch ) { // didn't branch, but had to stop - MOV32ItoM( (u32)&cpuRegs.pc, pc ); + MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); diff --git a/pcsx2/x86/ix86-32/iR5900Branch.c b/pcsx2/x86/ix86-32/iR5900Branch.c index 3e22f5c050..910772ee90 100644 --- a/pcsx2/x86/ix86-32/iR5900Branch.c +++ b/pcsx2/x86/ix86-32/iR5900Branch.c @@ -516,8 +516,8 @@ void recBLTZAL(int info) _eeFlushAllUnused(); _deleteEEreg(31, 0); - MOV32ItoM((u32)&cpuRegs.GPR.r[31].UL[0], pc+4); - MOV32ItoM((u32)&cpuRegs.GPR.r[31].UL[1], 0); + MOV32ItoM((uptr)&cpuRegs.GPR.r[31].UL[0], pc+4); + MOV32ItoM((uptr)&cpuRegs.GPR.r[31].UL[1], 0); if( GPR_IS_CONST1(_Rs_) ) { if( !(g_cpuConstRegs[_Rs_].SD[0] < 0) ) @@ -554,8 +554,8 @@ void recBGEZAL( void ) if( GPR_IS_CONST1(_Rs_) ) { // will always branch _deleteEEreg(31, 0); - MOV32ItoM((u32)&cpuRegs.GPR.r[31].UL[0], pc+4); - MOV32ItoM((u32)&cpuRegs.GPR.r[31].UL[1], 0); + MOV32ItoM((uptr)&cpuRegs.GPR.r[31].UL[0], pc+4); + MOV32ItoM((uptr)&cpuRegs.GPR.r[31].UL[1], 0); if( !(g_cpuConstRegs[_Rs_].SD[0] >= 0) ) branchTo = pc+4; diff --git a/pcsx2/x86/ix86-32/iR5900LoadStore.c b/pcsx2/x86/ix86-32/iR5900LoadStore.c index eb6156ddba..010f0d9329 100644 --- a/pcsx2/x86/ix86-32/iR5900LoadStore.c +++ b/pcsx2/x86/ix86-32/iR5900LoadStore.c @@ -269,7 +269,7 @@ int recSetMemLocation(int regs, int imm, int mmreg, int msize, int j32) LoadCW(); #ifdef _DEBUG - //CALLFunc((u32)testaddrs); + //CALLFunc((uptr)testaddrs); #endif @@ -299,7 +299,7 @@ int recSetMemLocation(int regs, int imm, int mmreg, int msize, int j32) CMP32ItoR(EAX, 1); ptr = JNE8(0); MOV32ItoR(EDX, _Rs_); - CALLFunc((u32)assertmem); + CALLFunc((uptr)assertmem); x86SetJ8(ptr); #endif return 0; @@ -1596,7 +1596,7 @@ void recMemConstClear(u32 mem, u32 size) j8Ptr[6] = JE8(0); PUSH32I((u32)PC_GETBLOCK(mem)); - CALLFunc((u32)recClearMem); + CALLFunc((uptr)recClearMem); ADD32ItoR(ESP, 4); x86SetJ8(j8Ptr[6]); } @@ -1616,7 +1616,7 @@ void recMemConstClear(u32 mem, u32 size) x86SetJ8( j8Ptr[6] ); PUSH32I((u32)PC_GETBLOCK(mem)); - CALLFunc((u32)recClear64); + CALLFunc((uptr)recClear64); ADD32ItoR(ESP, 4); x86SetJ8( j8Ptr[8] ); @@ -1646,7 +1646,7 @@ void recMemConstClear(u32 mem, u32 size) x86SetJ8( j8Ptr[9] ); PUSH32I((u32)PC_GETBLOCK(mem)); - CALLFunc((u32)recClear128); + CALLFunc((uptr)recClear128); ADD32ItoR(ESP, 4); x86SetJ8( j8Ptr[10] ); @@ -2206,9 +2206,9 @@ void recStore(int bit, u32 imm, int align) else j8Ptr[1] = JAE8(0); if( bit < 32 || !align ) AND8ItoR(ECX, 0xfc); - if( bit <= 32 ) CALLFunc((u32)recWriteMemClear32); - else if( bit == 64 ) CALLFunc((u32)recWriteMemClear64); - else CALLFunc((u32)recWriteMemClear128); + if( bit <= 32 ) CALLFunc((uptr)recWriteMemClear32); + else if( bit == 64 ) CALLFunc((uptr)recWriteMemClear64); + else CALLFunc((uptr)recWriteMemClear128); if( dohw ) { if( s_bCachingMem & 2 ) j32Ptr[5] = JMP32(0); @@ -2365,18 +2365,18 @@ void recStore_co(int bit, int align) MOV32RtoM((u32)&s_tempaddr, ECX); if( bit < 32 ) AND8ItoR(ECX, 0xfc); - if( bit <= 32 ) CALLFunc((u32)recWriteMemClear32); - else if( bit == 64 ) CALLFunc((u32)recWriteMemClear64); - else CALLFunc((u32)recWriteMemClear128); + if( bit <= 32 ) CALLFunc((uptr)recWriteMemClear32); + else if( bit == 64 ) CALLFunc((uptr)recWriteMemClear64); + else CALLFunc((uptr)recWriteMemClear128); MOV32MtoR(ECX, (u32)&s_tempaddr); if( off < 0 ) ADD32ItoR(ECX, -off); else ADD32ItoR(ECX, off); if( bit < 32 ) AND8ItoR(ECX, 0xfc); - if( bit <= 32 ) CALLFunc((u32)recWriteMemClear32); - else if( bit == 64 ) CALLFunc((u32)recWriteMemClear64); - else CALLFunc((u32)recWriteMemClear128); + if( bit <= 32 ) CALLFunc((uptr)recWriteMemClear32); + else if( bit == 64 ) CALLFunc((uptr)recWriteMemClear64); + else CALLFunc((uptr)recWriteMemClear128); if( dohw ) { if( s_bCachingMem & 2 ) j32Ptr[4] = JMP32(0); @@ -2675,12 +2675,12 @@ void recSD_coX(int num, int align) MOV32RtoM((u32)&s_tempaddr, ECX); if( minoff != _Imm_ ) ADD32ItoR(ECX, _Imm_-minoff); - CALLFunc((u32)recWriteMemClear64); + CALLFunc((uptr)recWriteMemClear64); for(i = 0; i < num; ++i) { MOV32MtoR(ECX, (u32)&s_tempaddr); if( minoff != (*(s16*)PSM(pc+i*4)) ) ADD32ItoR(ECX, (*(s16*)PSM(pc+i*4))-minoff); - CALLFunc((u32)recWriteMemClear64); + CALLFunc((uptr)recWriteMemClear64); } if( dohw ) { @@ -2832,12 +2832,12 @@ void recSQ_coX(int num) MOV32RtoM((u32)&s_tempaddr, ECX); if( minoff != _Imm_ ) ADD32ItoR(ECX, _Imm_-minoff); - CALLFunc((u32)recWriteMemClear128); + CALLFunc((uptr)recWriteMemClear128); for(i = 0; i < num; ++i) { MOV32MtoR(ECX, (u32)&s_tempaddr); if( minoff != (*(s16*)PSM(pc+i*4)) ) ADD32ItoR(ECX, (*(s16*)PSM(pc+i*4))-minoff); - CALLFunc((u32)recWriteMemClear128); + CALLFunc((uptr)recWriteMemClear128); } if( dohw ) { diff --git a/pcsx2/x86/ix86-32/iR5900Move.c b/pcsx2/x86/ix86-32/iR5900Move.c index ae678ed4bc..aa08a571a3 100644 --- a/pcsx2/x86/ix86-32/iR5900Move.c +++ b/pcsx2/x86/ix86-32/iR5900Move.c @@ -446,8 +446,8 @@ void recMOVZtemp_const() _deleteEEreg(_Rd_, 1); _eeOnWriteReg(_Rd_, 0); if (g_cpuConstRegs[_Rt_].UD[0] == 0) { - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rs_].UL[0]); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rs_].UL[1]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rs_].UL[0]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rs_].UL[1]); } } @@ -568,8 +568,8 @@ void recMOVZ() if( !GPR_IS_CONST2(_Rs_, _Rt_) ) { // remove the const, since move is conditional _deleteEEreg(_Rd_, 0); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rd_].UL[0]); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rd_].UL[1]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rd_].UL[0]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rd_].UL[1]); } else { if (g_cpuConstRegs[_Rt_].UD[0] == 0) { @@ -590,8 +590,8 @@ void recMOVNtemp_const() _deleteEEreg(_Rd_, 1); _eeOnWriteReg(_Rd_, 0); if (g_cpuConstRegs[_Rt_].UD[0] != 0) { - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rs_].UL[0]); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rs_].UL[1]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rs_].UL[0]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rs_].UL[1]); } } @@ -707,8 +707,8 @@ void recMOVN() if( !GPR_IS_CONST2(_Rs_, _Rt_) ) { // remove the const, since move is conditional _deleteEEreg(_Rd_, 0); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rd_].UL[0]); - MOV32ItoM((u32)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rd_].UL[1]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], g_cpuConstRegs[_Rd_].UL[0]); + MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], g_cpuConstRegs[_Rd_].UL[1]); } else { if (g_cpuConstRegs[_Rt_].UD[0] != 0) { diff --git a/pcsx2/x86/ix86-64/iR5900-64.c b/pcsx2/x86/ix86-64/iR5900-64.c index 977dcdf609..6e3b7d4d39 100644 --- a/pcsx2/x86/ix86-64/iR5900-64.c +++ b/pcsx2/x86/ix86-64/iR5900-64.c @@ -1794,7 +1794,7 @@ static void iBranchTest(u32 newpc, u32 cpuBranch) j8Ptr[0] = JS8( 0 ); // has to be in the middle of Save/LoadBranchState - CALLFunc( (int)cpuBranchTest ); + CALLFunc((uptr)cpuBranchTest ); if( newpc != 0xffffffff ) { CMP32ItoM((uptr)&cpuRegs.pc, newpc); @@ -1823,7 +1823,7 @@ void recCOP2( void ) MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); g_cpuHasConstReg = 1; // reset all since COP2 can change regs - CALLFunc( (u32)COP2 ); + CALLFunc( (uptr)COP2 ); CMP32ItoM((uptr)&cpuRegs.pc, pc); j8Ptr[0] = JE8(0); @@ -1844,7 +1844,7 @@ void recSYSCALL( void ) { MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_NODESTROY); - CALLFunc( (u32)SYSCALL ); + CALLFunc( (uptr)SYSCALL ); CMP32ItoM((uptr)&cpuRegs.pc, pc); j8Ptr[0] = JE8(0); @@ -1859,7 +1859,7 @@ void recBREAK( void ) { MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); - CALLFunc( (u32)BREAK ); + CALLFunc( (uptr)BREAK ); CMP32ItoM((uptr)&cpuRegs.pc, pc); j8Ptr[0] = JE8(0); @@ -1877,7 +1877,7 @@ void recBREAK( void ) { // MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); // MOV32ItoM( (uptr)&cpuRegs.pc, pc ); // iFlushCall(FLUSH_EVERYTHING); -// CALLFunc( (u32)CACHE ); +// CALLFunc( (uptr)CACHE ); // //branch = 2; // // CMP32ItoM((uptr)&cpuRegs.pc, pc); diff --git a/pcsx2/x86/ix86-64/iR5900Branch-64.c b/pcsx2/x86/ix86-64/iR5900Branch-64.c index 1eb66cd4d4..da71fba46a 100644 --- a/pcsx2/x86/ix86-64/iR5900Branch-64.c +++ b/pcsx2/x86/ix86-64/iR5900Branch-64.c @@ -145,7 +145,7 @@ void recBLTZAL( void ) MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); - CALLFunc( (u32)BLTZAL ); + CALLFunc( (uptr)BLTZAL ); branch = 2; } @@ -156,7 +156,7 @@ void recBGEZAL( void ) MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); - CALLFunc( (u32)BGEZAL ); + CALLFunc( (uptr)BGEZAL ); branch = 2; } @@ -472,7 +472,7 @@ void recBLTZALL( void ) MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); - CALLFunc( (u32)BLTZALL ); + CALLFunc( (uptr)BLTZALL ); branch = 2; } @@ -483,7 +483,7 @@ void recBGEZALL( void ) MOV32ItoM( (uptr)&cpuRegs.code, cpuRegs.code ); MOV32ItoM( (uptr)&cpuRegs.pc, pc ); iFlushCall(FLUSH_EVERYTHING); - CALLFunc( (u32)BGEZALL ); + CALLFunc( (uptr)BGEZALL ); branch = 2; } diff --git a/pcsx2/x86/ix86-64/iR5900Jump-64.c b/pcsx2/x86/ix86-64/iR5900Jump-64.c index dec9b4c8be..d746fefceb 100644 --- a/pcsx2/x86/ix86-64/iR5900Jump-64.c +++ b/pcsx2/x86/ix86-64/iR5900Jump-64.c @@ -98,12 +98,12 @@ void recJALR( void ) if( x86regs[ESI].inuse ) { assert( x86regs[ESI].type == X86TYPE_PCWRITEBACK ); - MOV32RtoM((int)&cpuRegs.pc, ESI); + MOV32RtoM((uptr)&cpuRegs.pc, ESI); x86regs[ESI].inuse = 0; } else { - MOV32MtoR(EAX, (u32)&g_recWriteback); - MOV32RtoM((int)&cpuRegs.pc, EAX); + MOV32MtoR(EAX, (uptr)&g_recWriteback); + MOV32RtoM((uptr)&cpuRegs.pc, EAX); } SetBranchReg(0xffffffff); diff --git a/pcsx2/x86/ix86/ix86_mmx.c b/pcsx2/x86/ix86/ix86_mmx.c index d3b1aab908..005114b317 100644 --- a/pcsx2/x86/ix86/ix86_mmx.c +++ b/pcsx2/x86/ix86/ix86_mmx.c @@ -475,7 +475,7 @@ void PUNPCKLDQMtoR( x86MMXRegType to, uptr from ) void MOVQ64ItoR( x86MMXRegType reg, u64 i ) { - MOVQMtoR( reg, ( u32 )(x86Ptr) + 2 + 7 ); + MOVQMtoR( reg, ( uptr )(x86Ptr) + 2 + 7 ); JMP8( 8 ); write64( i ); } diff --git a/pcsx2/x86/ix86/ix86_sse.c b/pcsx2/x86/ix86/ix86_sse.c index c0c280a2e0..b18e4e269d 100644 --- a/pcsx2/x86/ix86/ix86_sse.c +++ b/pcsx2/x86/ix86/ix86_sse.c @@ -1418,17 +1418,17 @@ _inline void SSE2EMU_MOVQ_XMM_to_XMM( x86SSERegType to, x86SSERegType from) _inline void SSE2EMU_MOVD_RmOffset_to_XMM( x86SSERegType to, x86IntRegType from, int offset ) { MOV32RmtoROffset(EAX, from, offset); - MOV32ItoM((u32)p+4, 0); - MOV32ItoM((u32)p+8, 0); - MOV32RtoM((u32)p, EAX); - MOV32ItoM((u32)p+12, 0); - SSE_MOVAPS_M128_to_XMM(to, (u32)p); + MOV32ItoM((uptr)p+4, 0); + MOV32ItoM((uptr)p+8, 0); + MOV32RtoM((uptr)p, EAX); + MOV32ItoM((uptr)p+12, 0); + SSE_MOVAPS_M128_to_XMM(to, (uptr)p); } _inline void SSE2EMU_MOVD_XMM_to_RmOffset(x86IntRegType to, x86SSERegType from, int offset ) { - SSE_MOVSS_XMM_to_M32((u32)p, from); - MOV32MtoR(EAX, (u32)p); + SSE_MOVSS_XMM_to_M32((uptr)p, from); + MOV32MtoR(EAX, (uptr)p); MOV32RtoRmOffset(to, EAX, offset); } @@ -1438,14 +1438,14 @@ extern void SetMMXstate(); _inline void SSE2EMU_MOVDQ2Q_XMM_to_MM( x86MMXRegType to, x86SSERegType from) { SSE_MOVLPS_XMM_to_M64((u32)p, from); - MOVQMtoR(to, (u32)p); + MOVQMtoR(to, (uptr)p); SetMMXstate(); } _inline void SSE2EMU_MOVQ2DQ_MM_to_XMM( x86SSERegType to, x86MMXRegType from) { - MOVQRtoM((u32)p, from); - SSE_MOVLPS_M64_to_XMM(to, (u32)p); + MOVQRtoM((uptr)p, from); + SSE_MOVLPS_M64_to_XMM(to, (uptr)p); SetMMXstate(); } #endif @@ -1498,7 +1498,7 @@ _inline void SSE2EMU_PSHUFD_XMM_to_XMM( x86SSERegType to, x86SSERegType from, u8 } _inline void SSE2EMU_MOVD_XMM_to_R( x86IntRegType to, x86SSERegType from ) { - MOV32ItoR(to, (u32)&p); + MOV32ItoR(to, (uptr)&p); SSE_MOVUPSRtoRm(to, from); MOV32RmtoR(to, to); } @@ -1513,7 +1513,7 @@ _inline void SSE2EMU_CVTPS2DQ_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) SetFPUstate(); _freeMMXreg(7); #endif - SSE_MOVAPS_XMM_to_M128((u32)f, from); + SSE_MOVAPS_XMM_to_M128((uptr)f, from); FLD32((u32)&f[0]); FISTP32((u32)&p2[0]); @@ -1524,7 +1524,7 @@ _inline void SSE2EMU_CVTPS2DQ_XMM_to_XMM( x86SSERegType to, x86SSERegType from ) FLD32((u32)&f[3]); FISTP32((u32)&p2[3]); - SSE_MOVAPS_M128_to_XMM(to, (u32)p2); + SSE_MOVAPS_M128_to_XMM(to, (uptr)p2); } _inline void SSE2EMU_CVTDQ2PS_M128_to_XMM( x86SSERegType to, uptr from ) { @@ -1541,7 +1541,7 @@ _inline void SSE2EMU_CVTDQ2PS_M128_to_XMM( x86SSERegType to, uptr from ) { FILD32((u32)from+12); FSTP32((u32)&f[3]); - SSE_MOVAPS_M128_to_XMM(to, (u32)f); + SSE_MOVAPS_M128_to_XMM(to, (uptr)f); } _inline void SSE2EMU_MOVD_XMM_to_M32( u32 to, x86SSERegType from ) { @@ -1552,11 +1552,11 @@ _inline void SSE2EMU_MOVD_XMM_to_M32( u32 to, x86SSERegType from ) { } _inline void SSE2EMU_MOVD_R_to_XMM( x86SSERegType to, x86IntRegType from ) { - MOV32ItoM((u32)p+4, 0); - MOV32ItoM((u32)p+8, 0); - MOV32RtoM((u32)p, from); - MOV32ItoM((u32)p+12, 0); - SSE_MOVAPS_M128_to_XMM(to, (u32)p); + MOV32ItoM((uptr)p+4, 0); + MOV32ItoM((uptr)p+8, 0); + MOV32RtoM((uptr)p, from); + MOV32ItoM((uptr)p+12, 0); + SSE_MOVAPS_M128_to_XMM(to, (uptr)p); } #endif