mirror of https://github.com/PCSX2/pcsx2.git
some fpu interpreter changes...
git-svn-id: http://pcsx2-playground.googlecode.com/svn/trunk@170 a6443dda-0b58-4228-96e9-037be469359c
This commit is contained in:
parent
34a196376b
commit
3774eedbae
16
pcsx2/FPU.c
16
pcsx2/FPU.c
|
@ -35,7 +35,7 @@
|
|||
Setting it to ~0x00000000 = Compares Exact Value. (comment out this macro for faster Exact Compare method)
|
||||
Setting it to ~0x00000001 = Discards the least significant bit when comparing.
|
||||
Setting it to ~0x00000003 = Discards the least 2 significant bits when comparing... etc.. */
|
||||
#define comparePrecision ~0x00000001
|
||||
//#define comparePrecision ~0x00000001
|
||||
|
||||
// Operands
|
||||
#define _Ft_ ( ( cpuRegs.code >> 16 ) & 0x1F )
|
||||
|
@ -195,7 +195,7 @@ float fpuDouble(u32 f)
|
|||
}
|
||||
|
||||
void ABS_S() {
|
||||
_FdValf_ = fpufabsf( fpuDouble( _FsValUl_ ) );
|
||||
_FdValUl_ = _FsValUl_ & 0x7fffffff;
|
||||
clearFPUFlags( FPUflagO | FPUflagU );
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ void C_LT() {
|
|||
|
||||
void CFC1() {
|
||||
if ( !_Rt_ || ( (_Fs_ != 0) && (_Fs_ != 31) ) ) return;
|
||||
cpuRegs.GPR.r[_Rt_].UD[0] = (s32)fpuRegs.fprc[_Fs_];
|
||||
cpuRegs.GPR.r[_Rt_].SD[0] = (s64)fpuRegs.fprc[_Fs_];
|
||||
}
|
||||
|
||||
void CTC1() {
|
||||
|
@ -254,12 +254,12 @@ void CTC1() {
|
|||
}
|
||||
|
||||
void CVT_S() {
|
||||
_FdValf_ = (float)(s32)_FsValUl_;
|
||||
_FdValf_ = (float)(*(s32*)&_FsValUl_);
|
||||
_FdValf_ = fpuDouble( _FdValUl_ );
|
||||
}
|
||||
|
||||
void CVT_W() {
|
||||
if ( ( _FsValUl_ & 0x7F800000 ) <= 0x4E800000 ) { _FdValUl_ = (s32)(float)_FsValf_; }
|
||||
if ( ( _FsValUl_ & 0x7F800000 ) <= 0x4E800000 ) { _FdValUl_ = (s32)_FsValf_; }
|
||||
else if ( ( _FsValUl_ & 0x80000000 ) == 0 ) { _FdValUl_ = 0x7fffffff; }
|
||||
else { _FdValUl_ = 0x80000000; }
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ void DIV_S() {
|
|||
|
||||
void LWC1() {
|
||||
u32 addr;
|
||||
addr = cpuRegs.GPR.r[_Rs_].UL[0] + (s32)(s16)(cpuRegs.code);
|
||||
addr = cpuRegs.GPR.r[_Rs_].UL[0] + (s32)(s16)(cpuRegs.code & 0xffff);
|
||||
if (addr & 0x00000003) { SysPrintf( "FPU (LWC1 Opcode): Invalid Memory Address\n" ); return; } // Should signal an exception?
|
||||
memRead32(addr, &fpuRegs.fpr[_Rt_].UL);
|
||||
}
|
||||
|
@ -303,7 +303,7 @@ void MAX_S() {
|
|||
|
||||
void MFC1() {
|
||||
if ( !_Rt_ ) return;
|
||||
cpuRegs.GPR.r[_Rt_].UD[0] = (s32)_FsValUl_;
|
||||
cpuRegs.GPR.r[_Rt_].SD[0] = (s64)_FsValUl_;
|
||||
}
|
||||
|
||||
void MIN_S() {
|
||||
|
@ -392,7 +392,7 @@ void SUBA_S() {
|
|||
|
||||
void SWC1() {
|
||||
u32 addr;
|
||||
addr = cpuRegs.GPR.r[_Rs_].UL[0] + (s32)(s16)(cpuRegs.code);
|
||||
addr = cpuRegs.GPR.r[_Rs_].UL[0] + (s32)(s16)(cpuRegs.code & 0xffff);
|
||||
if (addr & 0x00000003) { SysPrintf( "FPU (SWC1 Opcode): Invalid Memory Address\n" ); return; } // Should signal an exception?
|
||||
memWrite32(addr, fpuRegs.fpr[_Rt_].UL);
|
||||
}
|
|
@ -1046,7 +1046,7 @@ BEGIN
|
|||
"Button",BS_AUTOCHECKBOX | WS_TABSTOP,15,162,418,10
|
||||
CONTROL "Disable VU Overflow Checks - *Checked = Disables overflow checks. ( Speedup! ) *Greyed = Extra overflow checks. ( Helps SPS, Slow! )",IDC_VU_OVERFLOWHACK,
|
||||
"Button",BS_AUTO3STATE | WS_TABSTOP,15,49,475,10
|
||||
CTEXT "These hacks will effect the speed of PCSX2 but possibly comprimise on compatability",IDC_HACKDESC,7,7,497,8
|
||||
CTEXT "These hacks will effect the speed of PCSX2 but possibly compromise on compatability",IDC_HACKDESC,7,7,497,8
|
||||
CONTROL "Tighter SPU2 Sync ( FFXII vids) - Slower, not very useful anymore.",IDC_SOUNDHACK,
|
||||
"Button",BS_AUTOCHECKBOX | WS_TABSTOP,15,245,421,10
|
||||
CONTROL "IOP Sync Hack (x2) - Doubles the cycle rate of the IOP. ( Speedup but breaks some games. )",IDC_SYNCHACK2,
|
||||
|
|
464
pcsx2/x86/iFPU.c
464
pcsx2/x86/iFPU.c
|
@ -129,9 +129,240 @@ void recCOP1_W( void ) {
|
|||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// FPU Opcodes!
|
||||
// *FPU Opcodes!*
|
||||
//------------------------------------------------------------------
|
||||
#ifndef FPU_RECOMPILE
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// CFC1 / CTC1
|
||||
//------------------------------------------------------------------
|
||||
void recCFC1(void)
|
||||
{
|
||||
if ( ! _Rt_ ) return;
|
||||
|
||||
_eeOnWriteReg(_Rt_, 1);
|
||||
|
||||
MOV32MtoR( EAX, (uptr)&fpuRegs.fprc[ _Fs_ ] );
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
#ifdef __x86_64__
|
||||
CDQE();
|
||||
MOV64RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], RAX );
|
||||
#else
|
||||
CDQ( );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
}
|
||||
}
|
||||
|
||||
void recCTC1( void )
|
||||
{
|
||||
if( GPR_IS_CONST1(_Rt_)) {
|
||||
MOV32ItoM((uptr)&fpuRegs.fprc[ _Fs_ ], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
}
|
||||
else {
|
||||
int mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ);
|
||||
if( mmreg >= 0 ) {
|
||||
SSEX_MOVD_XMM_to_M32((uptr)&fpuRegs.fprc[ _Fs_ ], mmreg);
|
||||
}
|
||||
#ifdef __x86_64__
|
||||
else if( (mmreg = _checkX86reg(X86TYPE_GPR, _Rt_, MODE_READ)) >= 0 ) {
|
||||
MOV32RtoM((uptr)&fpuRegs.fprc[ _Fs_ ], mmreg);
|
||||
}
|
||||
#else
|
||||
else if( (mmreg = _checkMMXreg(MMX_GPR+_Rt_, MODE_READ)) >= 0 ) {
|
||||
MOVDMMXtoM((uptr)&fpuRegs.fprc[ _Fs_ ], mmreg);
|
||||
SetMMXstate();
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
_deleteGPRtoXMMreg(_Rt_, 1);
|
||||
|
||||
#ifdef __x86_64__
|
||||
_deleteX86reg(X86TYPE_GPR, _Rt_, 1);
|
||||
#else
|
||||
_deleteMMXreg(MMX_GPR+_Rt_, 1);
|
||||
#endif
|
||||
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
|
||||
MOV32RtoM( (uptr)&fpuRegs.fprc[ _Fs_ ], EAX );
|
||||
}
|
||||
}
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// MFC1
|
||||
//------------------------------------------------------------------
|
||||
void recMFC1(void) {
|
||||
int regt, regs;
|
||||
if ( ! _Rt_ ) return;
|
||||
|
||||
_eeOnWriteReg(_Rt_, 1);
|
||||
|
||||
regs = _checkXMMreg(XMMTYPE_FPREG, _Fs_, MODE_READ);
|
||||
if( regs >= 0 ) {
|
||||
_deleteGPRtoXMMreg(_Rt_, 2);
|
||||
|
||||
#ifdef __x86_64__
|
||||
regt = _allocCheckGPRtoX86(g_pCurInstInfo, _Rt_, MODE_WRITE);
|
||||
|
||||
if( regt >= 0 ) {
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
SSE2_MOVD_XMM_to_R(RAX, regs);
|
||||
// sign extend
|
||||
CDQE();
|
||||
MOV64RtoR(regt, RAX);
|
||||
}
|
||||
else {
|
||||
SSE2_MOVD_XMM_to_R(regt, regs);
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
}
|
||||
#else
|
||||
regt = _allocCheckGPRtoMMX(g_pCurInstInfo, _Rt_, MODE_WRITE);
|
||||
|
||||
if( regt >= 0 ) {
|
||||
SSE2_MOVDQ2Q_XMM_to_MM(regt, regs);
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) _signExtendGPRtoMMX(regt, _Rt_, 0);
|
||||
else EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
_signExtendXMMtoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], regs, 0);
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
SSE_MOVSS_XMM_to_M32((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], regs);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifndef __x86_64__
|
||||
else if( (regs = _checkMMXreg(MMX_FPU+_Fs_, MODE_READ)) >= 0 ) {
|
||||
// convert to mmx reg
|
||||
mmxregs[regs].reg = MMX_GPR+_Rt_;
|
||||
mmxregs[regs].mode |= MODE_READ|MODE_WRITE;
|
||||
_signExtendGPRtoMMX(regs, _Rt_, 0);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
regt = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ);
|
||||
|
||||
if( regt >= 0 ) {
|
||||
if( xmmregs[regt].mode & MODE_WRITE ) {
|
||||
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rt_].UL[2], regt);
|
||||
}
|
||||
xmmregs[regt].inuse = 0;
|
||||
}
|
||||
#ifdef __x86_64__
|
||||
else if( (regt = _allocCheckGPRtoX86(g_pCurInstInfo, _Rt_, MODE_WRITE)) >= 0 ) {
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
MOV32MtoR( RAX, (uptr)&fpuRegs.fpr[ _Fs_ ].UL );
|
||||
CDQE();
|
||||
MOV64RtoR(regt, RAX);
|
||||
}
|
||||
else {
|
||||
MOV32MtoR( regt, (uptr)&fpuRegs.fpr[ _Fs_ ].UL );
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
MOV32MtoR( EAX, (uptr)&fpuRegs.fpr[ _Fs_ ].UL );
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
#ifdef __x86_64__
|
||||
CDQE();
|
||||
MOV64RtoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], RAX);
|
||||
#else
|
||||
CDQ( );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// MTC1
|
||||
//------------------------------------------------------------------
|
||||
void recMTC1(void)
|
||||
{
|
||||
if( GPR_IS_CONST1(_Rt_) ) {
|
||||
_deleteFPtoXMMreg(_Fs_, 0);
|
||||
MOV32ItoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
}
|
||||
else {
|
||||
int mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ);
|
||||
if( mmreg >= 0 ) {
|
||||
if( g_pCurInstInfo->regs[_Rt_] & EEINST_LASTUSE ) {
|
||||
// transfer the reg directly
|
||||
_deleteGPRtoXMMreg(_Rt_, 2);
|
||||
_deleteFPtoXMMreg(_Fs_, 2);
|
||||
_allocFPtoXMMreg(mmreg, _Fs_, MODE_WRITE);
|
||||
}
|
||||
else {
|
||||
int mmreg2 = _allocCheckFPUtoXMM(g_pCurInstInfo, _Fs_, MODE_WRITE);
|
||||
if( mmreg2 >= 0 ) SSE_MOVSS_XMM_to_XMM(mmreg2, mmreg);
|
||||
else SSE_MOVSS_XMM_to_M32((uptr)&fpuRegs.fpr[ _Fs_ ].UL, mmreg);
|
||||
}
|
||||
}
|
||||
#ifndef __x86_64__
|
||||
else if( (mmreg = _checkMMXreg(MMX_GPR+_Rt_, MODE_READ)) >= 0 ) {
|
||||
|
||||
if( cpucaps.hasStreamingSIMD2Extensions ) {
|
||||
int mmreg2 = _allocCheckFPUtoXMM(g_pCurInstInfo, _Fs_, MODE_WRITE);
|
||||
if( mmreg2 >= 0 ) {
|
||||
SetMMXstate();
|
||||
SSE2_MOVQ2DQ_MM_to_XMM(mmreg2, mmreg);
|
||||
}
|
||||
else {
|
||||
SetMMXstate();
|
||||
MOVDMMXtoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, mmreg);
|
||||
}
|
||||
}
|
||||
else {
|
||||
_deleteFPtoXMMreg(_Fs_, 0);
|
||||
SetMMXstate();
|
||||
MOVDMMXtoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, mmreg);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
int mmreg2 = _allocCheckFPUtoXMM(g_pCurInstInfo, _Fs_, MODE_WRITE);
|
||||
|
||||
if( mmreg2 >= 0 ) SSE_MOVSS_M32_to_XMM(mmreg2, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ]);
|
||||
else {
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ]);
|
||||
MOV32RtoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, EAX);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
#ifndef FPU_RECOMPILE // If FPU_RECOMPILE is not defined, then use the interpreter opcodes. (CFC1, CTC1, MFC1, and MTC1 are special because they work specifically with the EE rec so they're defined above)
|
||||
|
||||
REC_FPUFUNC(ABS_S);
|
||||
REC_FPUFUNC(ADD_S);
|
||||
|
@ -559,70 +790,6 @@ FPURECOMPILE_CONSTCODE(C_LT, XMMINFO_READS|XMMINFO_READT);
|
|||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// CFC1 / CTC1 XMM
|
||||
//------------------------------------------------------------------
|
||||
void recCFC1(void)
|
||||
{
|
||||
if ( ! _Rt_ ) return;
|
||||
|
||||
_eeOnWriteReg(_Rt_, 1);
|
||||
|
||||
MOV32MtoR( EAX, (uptr)&fpuRegs.fprc[ _Fs_ ] );
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
#ifdef __x86_64__
|
||||
CDQE();
|
||||
MOV64RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], RAX );
|
||||
#else
|
||||
CDQ( );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
}
|
||||
}
|
||||
|
||||
void recCTC1( void )
|
||||
{
|
||||
if( GPR_IS_CONST1(_Rt_)) {
|
||||
MOV32ItoM((uptr)&fpuRegs.fprc[ _Fs_ ], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
}
|
||||
else {
|
||||
int mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ);
|
||||
if( mmreg >= 0 ) {
|
||||
SSEX_MOVD_XMM_to_M32((uptr)&fpuRegs.fprc[ _Fs_ ], mmreg);
|
||||
}
|
||||
#ifdef __x86_64__
|
||||
else if( (mmreg = _checkX86reg(X86TYPE_GPR, _Rt_, MODE_READ)) >= 0 ) {
|
||||
MOV32RtoM((uptr)&fpuRegs.fprc[ _Fs_ ], mmreg);
|
||||
}
|
||||
#else
|
||||
else if( (mmreg = _checkMMXreg(MMX_GPR+_Rt_, MODE_READ)) >= 0 ) {
|
||||
MOVDMMXtoM((uptr)&fpuRegs.fprc[ _Fs_ ], mmreg);
|
||||
SetMMXstate();
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
_deleteGPRtoXMMreg(_Rt_, 1);
|
||||
|
||||
#ifdef __x86_64__
|
||||
_deleteX86reg(X86TYPE_GPR, _Rt_, 1);
|
||||
#else
|
||||
_deleteMMXreg(MMX_GPR+_Rt_, 1);
|
||||
#endif
|
||||
MOV32MtoR( EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ] );
|
||||
MOV32RtoM( (uptr)&fpuRegs.fprc[ _Fs_ ], EAX );
|
||||
}
|
||||
}
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// CVT.x XMM
|
||||
//------------------------------------------------------------------
|
||||
|
@ -1031,112 +1198,6 @@ FPURECOMPILE_CONSTCODE(MIN_S, XMMINFO_WRITED|XMMINFO_READS|XMMINFO_READT);
|
|||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// MFC1 XMM
|
||||
//------------------------------------------------------------------
|
||||
void recMFC1(void) {
|
||||
int regt, regs;
|
||||
if ( ! _Rt_ ) return;
|
||||
|
||||
_eeOnWriteReg(_Rt_, 1);
|
||||
|
||||
regs = _checkXMMreg(XMMTYPE_FPREG, _Fs_, MODE_READ);
|
||||
if( regs >= 0 ) {
|
||||
_deleteGPRtoXMMreg(_Rt_, 2);
|
||||
|
||||
#ifdef __x86_64__
|
||||
regt = _allocCheckGPRtoX86(g_pCurInstInfo, _Rt_, MODE_WRITE);
|
||||
|
||||
if( regt >= 0 ) {
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
SSE2_MOVD_XMM_to_R(RAX, regs);
|
||||
// sign extend
|
||||
CDQE();
|
||||
MOV64RtoR(regt, RAX);
|
||||
}
|
||||
else {
|
||||
SSE2_MOVD_XMM_to_R(regt, regs);
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
}
|
||||
#else
|
||||
regt = _allocCheckGPRtoMMX(g_pCurInstInfo, _Rt_, MODE_WRITE);
|
||||
|
||||
if( regt >= 0 ) {
|
||||
SSE2_MOVDQ2Q_XMM_to_MM(regt, regs);
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) _signExtendGPRtoMMX(regt, _Rt_, 0);
|
||||
else EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
_signExtendXMMtoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], regs, 0);
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
SSE_MOVSS_XMM_to_M32((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], regs);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifndef __x86_64__
|
||||
else if( (regs = _checkMMXreg(MMX_FPU+_Fs_, MODE_READ)) >= 0 ) {
|
||||
// convert to mmx reg
|
||||
mmxregs[regs].reg = MMX_GPR+_Rt_;
|
||||
mmxregs[regs].mode |= MODE_READ|MODE_WRITE;
|
||||
_signExtendGPRtoMMX(regs, _Rt_, 0);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
regt = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ);
|
||||
|
||||
if( regt >= 0 ) {
|
||||
if( xmmregs[regt].mode & MODE_WRITE ) {
|
||||
SSE_MOVHPS_XMM_to_M64((uptr)&cpuRegs.GPR.r[_Rt_].UL[2], regt);
|
||||
}
|
||||
xmmregs[regt].inuse = 0;
|
||||
}
|
||||
#ifdef __x86_64__
|
||||
else if( (regt = _allocCheckGPRtoX86(g_pCurInstInfo, _Rt_, MODE_WRITE)) >= 0 ) {
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
MOV32MtoR( RAX, (uptr)&fpuRegs.fpr[ _Fs_ ].UL );
|
||||
CDQE();
|
||||
MOV64RtoR(regt, RAX);
|
||||
}
|
||||
else {
|
||||
MOV32MtoR( regt, (uptr)&fpuRegs.fpr[ _Fs_ ].UL );
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
}
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
|
||||
_deleteEEreg(_Rt_, 0);
|
||||
MOV32MtoR( EAX, (uptr)&fpuRegs.fpr[ _Fs_ ].UL );
|
||||
|
||||
if(EEINST_ISLIVE1(_Rt_)) {
|
||||
#ifdef __x86_64__
|
||||
CDQE();
|
||||
MOV64RtoM((uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], RAX);
|
||||
#else
|
||||
CDQ( );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], EDX );
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
EEINST_RESETHASLIVE1(_Rt_);
|
||||
MOV32RtoM( (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], EAX );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// MOV XMM
|
||||
//------------------------------------------------------------------
|
||||
|
@ -1305,65 +1366,6 @@ FPURECOMPILE_CONSTCODE(MSUBA_S, XMMINFO_WRITEACC|XMMINFO_READACC|XMMINFO_READS|X
|
|||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// MTC1 XMM
|
||||
//------------------------------------------------------------------
|
||||
void recMTC1(void)
|
||||
{
|
||||
if( GPR_IS_CONST1(_Rt_) ) {
|
||||
_deleteFPtoXMMreg(_Fs_, 0);
|
||||
MOV32ItoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
}
|
||||
else {
|
||||
int mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rt_, MODE_READ);
|
||||
if( mmreg >= 0 ) {
|
||||
if( g_pCurInstInfo->regs[_Rt_] & EEINST_LASTUSE ) {
|
||||
// transfer the reg directly
|
||||
_deleteGPRtoXMMreg(_Rt_, 2);
|
||||
_deleteFPtoXMMreg(_Fs_, 2);
|
||||
_allocFPtoXMMreg(mmreg, _Fs_, MODE_WRITE);
|
||||
}
|
||||
else {
|
||||
int mmreg2 = _allocCheckFPUtoXMM(g_pCurInstInfo, _Fs_, MODE_WRITE);
|
||||
if( mmreg2 >= 0 ) SSE_MOVSS_XMM_to_XMM(mmreg2, mmreg);
|
||||
else SSE_MOVSS_XMM_to_M32((uptr)&fpuRegs.fpr[ _Fs_ ].UL, mmreg);
|
||||
}
|
||||
}
|
||||
#ifndef __x86_64__
|
||||
else if( (mmreg = _checkMMXreg(MMX_GPR+_Rt_, MODE_READ)) >= 0 ) {
|
||||
|
||||
if( cpucaps.hasStreamingSIMD2Extensions ) {
|
||||
int mmreg2 = _allocCheckFPUtoXMM(g_pCurInstInfo, _Fs_, MODE_WRITE);
|
||||
if( mmreg2 >= 0 ) {
|
||||
SetMMXstate();
|
||||
SSE2_MOVQ2DQ_MM_to_XMM(mmreg2, mmreg);
|
||||
}
|
||||
else {
|
||||
SetMMXstate();
|
||||
MOVDMMXtoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, mmreg);
|
||||
}
|
||||
}
|
||||
else {
|
||||
_deleteFPtoXMMreg(_Fs_, 0);
|
||||
SetMMXstate();
|
||||
MOVDMMXtoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, mmreg);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
int mmreg2 = _allocCheckFPUtoXMM(g_pCurInstInfo, _Fs_, MODE_WRITE);
|
||||
|
||||
if( mmreg2 >= 0 ) SSE_MOVSS_M32_to_XMM(mmreg2, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ]);
|
||||
else {
|
||||
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ]);
|
||||
MOV32RtoM((uptr)&fpuRegs.fpr[ _Fs_ ].UL, EAX);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//------------------------------------------------------------------
|
||||
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// MUL XMM
|
||||
//------------------------------------------------------------------
|
||||
|
|
Loading…
Reference in New Issue