mirror of https://github.com/PCSX2/pcsx2.git
aligned_stack: Finished conversion of all CALLFunc and _callFunctionArg1 functions to aligned-stack safe __fastcall invocations; only IOP's psxExecute and dispatchers remain to be done. (rev is fully functional in this state tho, on win32 at least)
git-svn-id: http://pcsx2.googlecode.com/svn/branches/aligned_stack@2045 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
parent
e3a5229076
commit
3e1c1f939c
|
@ -31,11 +31,6 @@ typedef int x86IntRegType;
|
||||||
#define EBP 5
|
#define EBP 5
|
||||||
#define ESP 4
|
#define ESP 4
|
||||||
|
|
||||||
#define X86ARG1 EAX
|
|
||||||
#define X86ARG2 ECX
|
|
||||||
#define X86ARG3 EDX
|
|
||||||
#define X86ARG4 EBX
|
|
||||||
|
|
||||||
#define MM0 0
|
#define MM0 0
|
||||||
#define MM1 1
|
#define MM1 1
|
||||||
#define MM2 2
|
#define MM2 2
|
||||||
|
|
|
@ -38,7 +38,7 @@ __releaseinline void UpdateCP0Status() {
|
||||||
cpuTestHwInts();
|
cpuTestHwInts();
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteCP0Status(u32 value) {
|
void __fastcall WriteCP0Status(u32 value) {
|
||||||
cpuRegs.CP0.n.Status.val = value;
|
cpuRegs.CP0.n.Status.val = value;
|
||||||
UpdateCP0Status();
|
UpdateCP0Status();
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ __forceinline void COP0_UpdatePCCR()
|
||||||
//if( cpuRegs.CP0.n.Status.b.ERL || !cpuRegs.PERF.n.pccr.b.CTE ) return;
|
//if( cpuRegs.CP0.n.Status.b.ERL || !cpuRegs.PERF.n.pccr.b.CTE ) return;
|
||||||
|
|
||||||
// TODO : Implement memory mode checks here (kernel/super/user)
|
// TODO : Implement memory mode checks here (kernel/super/user)
|
||||||
// For now we just assume user mode.
|
// For now we just assume kernel mode.
|
||||||
|
|
||||||
if( cpuRegs.PERF.n.pccr.val & 0xf )
|
if( cpuRegs.PERF.n.pccr.val & 0xf )
|
||||||
{
|
{
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#ifndef __COP0_H__
|
#ifndef __COP0_H__
|
||||||
#define __COP0_H__
|
#define __COP0_H__
|
||||||
|
|
||||||
extern void WriteCP0Status(u32 value);
|
extern void __fastcall WriteCP0Status(u32 value);
|
||||||
extern void UpdateCP0Status();
|
extern void UpdateCP0Status();
|
||||||
extern void WriteTLB(int i);
|
extern void WriteTLB(int i);
|
||||||
extern void UnmapTLB(int i);
|
extern void UnmapTLB(int i);
|
||||||
|
|
|
@ -115,7 +115,7 @@ void psxMemShutdown()
|
||||||
psxMemRLUT = NULL;
|
psxMemRLUT = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 iopMemRead8(u32 mem)
|
u8 __fastcall iopMemRead8(u32 mem)
|
||||||
{
|
{
|
||||||
mem &= 0x1fffffff;
|
mem &= 0x1fffffff;
|
||||||
u32 t = mem >> 16;
|
u32 t = mem >> 16;
|
||||||
|
@ -159,7 +159,7 @@ u8 iopMemRead8(u32 mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u16 iopMemRead16(u32 mem)
|
u16 __fastcall iopMemRead16(u32 mem)
|
||||||
{
|
{
|
||||||
mem &= 0x1fffffff;
|
mem &= 0x1fffffff;
|
||||||
u32 t = mem >> 16;
|
u32 t = mem >> 16;
|
||||||
|
@ -225,7 +225,7 @@ u16 iopMemRead16(u32 mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 iopMemRead32(u32 mem)
|
u32 __fastcall iopMemRead32(u32 mem)
|
||||||
{
|
{
|
||||||
mem &= 0x1fffffff;
|
mem &= 0x1fffffff;
|
||||||
u32 t = mem >> 16;
|
u32 t = mem >> 16;
|
||||||
|
@ -294,7 +294,7 @@ u32 iopMemRead32(u32 mem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void iopMemWrite8(u32 mem, u8 value)
|
void __fastcall iopMemWrite8(u32 mem, u8 value)
|
||||||
{
|
{
|
||||||
mem &= 0x1fffffff;
|
mem &= 0x1fffffff;
|
||||||
u32 t = mem >> 16;
|
u32 t = mem >> 16;
|
||||||
|
@ -356,7 +356,7 @@ void iopMemWrite8(u32 mem, u8 value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void iopMemWrite16(u32 mem, u16 value)
|
void __fastcall iopMemWrite16(u32 mem, u16 value)
|
||||||
{
|
{
|
||||||
mem &= 0x1fffffff;
|
mem &= 0x1fffffff;
|
||||||
u32 t = mem >> 16;
|
u32 t = mem >> 16;
|
||||||
|
@ -440,7 +440,7 @@ void iopMemWrite16(u32 mem, u16 value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void iopMemWrite32(u32 mem, u32 value)
|
void __fastcall iopMemWrite32(u32 mem, u32 value)
|
||||||
{
|
{
|
||||||
mem &= 0x1fffffff;
|
mem &= 0x1fffffff;
|
||||||
u32 t = mem >> 16;
|
u32 t = mem >> 16;
|
||||||
|
|
|
@ -75,24 +75,24 @@ static __forceinline u8* iopPhysMem( u32 addr )
|
||||||
#define psxHu16(mem) (*(u16*)&psxH[(mem) & 0xffff])
|
#define psxHu16(mem) (*(u16*)&psxH[(mem) & 0xffff])
|
||||||
#define psxHu32(mem) (*(u32*)&psxH[(mem) & 0xffff])
|
#define psxHu32(mem) (*(u32*)&psxH[(mem) & 0xffff])
|
||||||
|
|
||||||
void psxMemAlloc();
|
extern void psxMemAlloc();
|
||||||
void psxMemReset();
|
extern void psxMemReset();
|
||||||
void psxMemShutdown();
|
extern void psxMemShutdown();
|
||||||
|
|
||||||
u8 iopMemRead8 (u32 mem);
|
extern u8 __fastcall iopMemRead8 (u32 mem);
|
||||||
u16 iopMemRead16(u32 mem);
|
extern u16 __fastcall iopMemRead16(u32 mem);
|
||||||
u32 iopMemRead32(u32 mem);
|
extern u32 __fastcall iopMemRead32(u32 mem);
|
||||||
void iopMemWrite8 (u32 mem, u8 value);
|
extern void __fastcall iopMemWrite8 (u32 mem, u8 value);
|
||||||
void iopMemWrite16(u32 mem, u16 value);
|
extern void __fastcall iopMemWrite16(u32 mem, u16 value);
|
||||||
void iopMemWrite32(u32 mem, u32 value);
|
extern void __fastcall iopMemWrite32(u32 mem, u32 value);
|
||||||
|
|
||||||
// x86reg and mmreg are always x86 regs
|
// x86reg and mmreg are always x86 regs
|
||||||
void psxRecMemRead8();
|
extern void psxRecMemRead8();
|
||||||
void psxRecMemRead16();
|
extern void psxRecMemRead16();
|
||||||
void psxRecMemRead32();
|
extern void psxRecMemRead32();
|
||||||
void psxRecMemWrite8();
|
extern void psxRecMemWrite8();
|
||||||
void psxRecMemWrite16();
|
extern void psxRecMemWrite16();
|
||||||
void psxRecMemWrite32();
|
extern void psxRecMemWrite32();
|
||||||
|
|
||||||
namespace IopMemory
|
namespace IopMemory
|
||||||
{
|
{
|
||||||
|
|
|
@ -70,7 +70,8 @@ void psxShutdown() {
|
||||||
//psxCpu->Shutdown();
|
//psxCpu->Shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
void psxException(u32 code, u32 bd) {
|
void __fastcall psxException(u32 code, u32 bd)
|
||||||
|
{
|
||||||
// PSXCPU_LOG("psxException %x: %x, %x", code, psxHu32(0x1070), psxHu32(0x1074));
|
// PSXCPU_LOG("psxException %x: %x, %x", code, psxHu32(0x1070), psxHu32(0x1074));
|
||||||
//Console.WriteLn("!! psxException %x: %x, %x", code, psxHu32(0x1070), psxHu32(0x1074));
|
//Console.WriteLn("!! psxException %x: %x, %x", code, psxHu32(0x1070), psxHu32(0x1074));
|
||||||
// Set the Cause
|
// Set the Cause
|
||||||
|
|
|
@ -194,11 +194,11 @@ extern R3000Acpu *psxCpu;
|
||||||
extern R3000Acpu psxInt;
|
extern R3000Acpu psxInt;
|
||||||
extern R3000Acpu psxRec;
|
extern R3000Acpu psxRec;
|
||||||
|
|
||||||
void psxReset();
|
extern void psxReset();
|
||||||
void psxShutdown();
|
extern void psxShutdown();
|
||||||
void psxException(u32 code, u32 step);
|
extern void __fastcall psxException(u32 code, u32 step);
|
||||||
extern void psxBranchTest();
|
extern void psxBranchTest();
|
||||||
void psxMemReset();
|
extern void psxMemReset();
|
||||||
|
|
||||||
// Subsets
|
// Subsets
|
||||||
extern void (*psxBSC[64])();
|
extern void (*psxBSC[64])();
|
||||||
|
|
|
@ -50,7 +50,7 @@ void VU0MI_XGKICK() {
|
||||||
void VU0MI_XTOP() {
|
void VU0MI_XTOP() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void vu0ExecMicro(u32 addr) {
|
void __fastcall vu0ExecMicro(u32 addr) {
|
||||||
VUM_LOG("vu0ExecMicro %x", addr);
|
VUM_LOG("vu0ExecMicro %x", addr);
|
||||||
|
|
||||||
if(VU0.VI[REG_VPU_STAT].UL & 0x1) {
|
if(VU0.VI[REG_VPU_STAT].UL & 0x1) {
|
||||||
|
|
|
@ -46,7 +46,7 @@ void vu1ResetRegs()
|
||||||
|
|
||||||
static int count;
|
static int count;
|
||||||
|
|
||||||
void vu1ExecMicro(u32 addr)
|
void __fastcall vu1ExecMicro(u32 addr)
|
||||||
{
|
{
|
||||||
while(VU0.VI[REG_VPU_STAT].UL & 0x100)
|
while(VU0.VI[REG_VPU_STAT].UL & 0x100)
|
||||||
{
|
{
|
||||||
|
|
|
@ -119,14 +119,14 @@ extern void (*VU1regs_UPPER_FD_11_TABLE[32])(_VURegsNum *VUregsn);
|
||||||
|
|
||||||
// VU0
|
// VU0
|
||||||
extern void vu0ResetRegs();
|
extern void vu0ResetRegs();
|
||||||
extern void vu0ExecMicro(u32 addr);
|
extern void __fastcall vu0ExecMicro(u32 addr);
|
||||||
extern void vu0Exec(VURegs* VU);
|
extern void vu0Exec(VURegs* VU);
|
||||||
extern void vu0Finish();
|
extern void vu0Finish();
|
||||||
extern void recResetVU0( void );
|
extern void recResetVU0( void );
|
||||||
|
|
||||||
// VU1
|
// VU1
|
||||||
extern void vu1ResetRegs();
|
extern void vu1ResetRegs();
|
||||||
extern void vu1ExecMicro(u32 addr);
|
extern void __fastcall vu1ExecMicro(u32 addr);
|
||||||
extern void vu1Exec(VURegs* VU);
|
extern void vu1Exec(VURegs* VU);
|
||||||
|
|
||||||
void VU0_UPPER_FD_00();
|
void VU0_UPPER_FD_00();
|
||||||
|
|
|
@ -22,7 +22,6 @@ SuperVUExecuteProgram:
|
||||||
add esp, 4
|
add esp, 4
|
||||||
mov dword ptr [s_callstack], eax
|
mov dword ptr [s_callstack], eax
|
||||||
call SuperVUGetProgram
|
call SuperVUGetProgram
|
||||||
mov s_vu1ebp, ebp
|
|
||||||
mov s_vu1esi, esi
|
mov s_vu1esi, esi
|
||||||
mov s_vuedi, edi
|
mov s_vuedi, edi
|
||||||
mov s_vuebx, ebx
|
mov s_vuebx, ebx
|
||||||
|
@ -39,7 +38,6 @@ SuperVUExecuteProgram:
|
||||||
SuperVUEndProgram:
|
SuperVUEndProgram:
|
||||||
// restore cpu state
|
// restore cpu state
|
||||||
ldmxcsr g_sseMXCSR
|
ldmxcsr g_sseMXCSR
|
||||||
mov ebp, s_vu1ebp
|
|
||||||
mov esi, s_vu1esi
|
mov esi, s_vu1esi
|
||||||
mov edi, s_vuedi
|
mov edi, s_vuedi
|
||||||
mov ebx, s_vuebx
|
mov ebx, s_vuebx
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "iCOP0.h"
|
#include "iCOP0.h"
|
||||||
|
|
||||||
namespace Interp = R5900::Interpreter::OpcodeImpl::COP0;
|
namespace Interp = R5900::Interpreter::OpcodeImpl::COP0;
|
||||||
|
using namespace x86Emitter;
|
||||||
|
|
||||||
namespace R5900 {
|
namespace R5900 {
|
||||||
namespace Dynarec {
|
namespace Dynarec {
|
||||||
|
@ -163,12 +164,14 @@ void recMFC0( void )
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
CALLFunc( (uptr)COP0_UpdatePCCR );
|
iFlushCall(FLUSH_NODESTROY);
|
||||||
MOV32MtoR(EAX, (uptr)&cpuRegs.PERF.n.pcr0);
|
xCALL( COP0_UpdatePCCR );
|
||||||
|
xMOV(eax, &cpuRegs.PERF.n.pcr0);
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
CALLFunc( (uptr)COP0_UpdatePCCR );
|
iFlushCall(FLUSH_NODESTROY);
|
||||||
MOV32MtoR(EAX, (uptr)&cpuRegs.PERF.n.pcr1);
|
xCALL( COP0_UpdatePCCR );
|
||||||
|
xMOV(eax, &cpuRegs.PERF.n.pcr1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
_deleteEEreg(_Rt_, 0);
|
_deleteEEreg(_Rt_, 0);
|
||||||
|
@ -240,8 +243,8 @@ void recMTC0()
|
||||||
{
|
{
|
||||||
case 12:
|
case 12:
|
||||||
iFlushCall(FLUSH_NODESTROY);
|
iFlushCall(FLUSH_NODESTROY);
|
||||||
//_flushCachedRegs(); //NOTE: necessary?
|
xMOV( ecx, g_cpuConstRegs[_Rt_].UL[0] );
|
||||||
_callFunctionArg1((uptr)WriteCP0Status, MEM_CONSTTAG, g_cpuConstRegs[_Rt_].UL[0]);
|
xCALL( WriteCP0Status );
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 9:
|
case 9:
|
||||||
|
@ -254,9 +257,10 @@ void recMTC0()
|
||||||
switch(_Imm_ & 0x3F)
|
switch(_Imm_ & 0x3F)
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
CALLFunc( (uptr)COP0_UpdatePCCR );
|
iFlushCall(FLUSH_NODESTROY);
|
||||||
MOV32ItoM((uptr)&cpuRegs.PERF.n.pccr, g_cpuConstRegs[_Rt_].UL[0]);
|
xCALL( COP0_UpdatePCCR );
|
||||||
CALLFunc( (uptr)COP0_DiagnosticPCCR );
|
xMOV( ptr32[&cpuRegs.PERF.n.pccr], g_cpuConstRegs[_Rt_].UL[0] );
|
||||||
|
xCALL( COP0_DiagnosticPCCR );
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
|
@ -288,8 +292,8 @@ void recMTC0()
|
||||||
{
|
{
|
||||||
case 12:
|
case 12:
|
||||||
iFlushCall(FLUSH_NODESTROY);
|
iFlushCall(FLUSH_NODESTROY);
|
||||||
//_flushCachedRegs(); //NOTE: necessary?
|
_eeMoveGPRtoR(ECX, _Rt_);
|
||||||
_callFunctionArg1((uptr)WriteCP0Status, MEM_GPRTAG|_Rt_, 0);
|
xCALL( WriteCP0Status );
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 9:
|
case 9:
|
||||||
|
@ -302,9 +306,10 @@ void recMTC0()
|
||||||
switch(_Imm_ & 0x3F)
|
switch(_Imm_ & 0x3F)
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
CALLFunc( (uptr)COP0_UpdatePCCR );
|
iFlushCall(FLUSH_NODESTROY);
|
||||||
|
xCALL( COP0_UpdatePCCR );
|
||||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pccr, _Rt_);
|
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pccr, _Rt_);
|
||||||
CALLFunc( (uptr)COP0_DiagnosticPCCR );
|
xCALL( COP0_DiagnosticPCCR );
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
|
|
|
@ -136,11 +136,13 @@ static void recCTC2(s32 info)
|
||||||
MOV16ItoM((uptr)&VU0.VI[REG_FBRST].UL,g_cpuConstRegs[_Rt_].UL[0]&0x0c0c);
|
MOV16ItoM((uptr)&VU0.VI[REG_FBRST].UL,g_cpuConstRegs[_Rt_].UL[0]&0x0c0c);
|
||||||
break;
|
break;
|
||||||
case REG_CMSAR1: // REG_CMSAR1
|
case REG_CMSAR1: // REG_CMSAR1
|
||||||
iFlushCall(FLUSH_NOCONST);// since CALLFunc
|
iFlushCall(FLUSH_NOCONST);
|
||||||
assert( _checkX86reg(X86TYPE_VI, REG_VPU_STAT, 0) < 0 &&
|
assert( _checkX86reg(X86TYPE_VI, REG_VPU_STAT, 0) < 0 &&
|
||||||
_checkX86reg(X86TYPE_VI, REG_TPC, 0) < 0 );
|
_checkX86reg(X86TYPE_VI, REG_TPC, 0) < 0 );
|
||||||
// Execute VU1 Micro SubRoutine
|
// Execute VU1 Micro SubRoutine
|
||||||
_callFunctionArg1((uptr)vu1ExecMicro, MEM_CONSTTAG, g_cpuConstRegs[_Rt_].UL[0]&0xffff);
|
|
||||||
|
xMOV( ecx, g_cpuConstRegs[_Rt_].UL[0]&0xffff );
|
||||||
|
xCALL( vu1ExecMicro );
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
|
@ -191,10 +193,10 @@ static void recCTC2(s32 info)
|
||||||
AND32ItoR(EAX,0x0C0C);
|
AND32ItoR(EAX,0x0C0C);
|
||||||
MOV16RtoM((uptr)&VU0.VI[REG_FBRST].UL,EAX);
|
MOV16RtoM((uptr)&VU0.VI[REG_FBRST].UL,EAX);
|
||||||
break;
|
break;
|
||||||
case REG_CMSAR1: // REG_CMSAR1
|
case REG_CMSAR1: // REG_CMSAR1 (Execute VU1micro Subroutine)
|
||||||
iFlushCall(FLUSH_NOCONST);
|
iFlushCall(FLUSH_NOCONST);
|
||||||
_eeMoveGPRtoR(EAX, _Rt_);
|
_eeMoveGPRtoR(ECX, _Rt_);
|
||||||
_callFunctionArg1((uptr)vu1ExecMicro, MEM_X86TAG|EAX, 0); // Execute VU1 Micro SubRoutine
|
xCALL( vu1ExecMicro );
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
_eeMoveGPRtoM((uptr)&VU0.VI[_Fs_].UL,_Rt_);
|
_eeMoveGPRtoM((uptr)&VU0.VI[_Fs_].UL,_Rt_);
|
||||||
|
|
|
@ -187,8 +187,6 @@ u8 _hasFreeXMMreg();
|
||||||
void _freeXMMregs();
|
void _freeXMMregs();
|
||||||
int _getNumXMMwrite();
|
int _getNumXMMwrite();
|
||||||
|
|
||||||
// uses MEM_MMXTAG/MEM_XMMTAG to differentiate between the regs
|
|
||||||
void _recPushReg(int mmreg);
|
|
||||||
void _signExtendSFtoM(u32 mem);
|
void _signExtendSFtoM(u32 mem);
|
||||||
|
|
||||||
// returns new index of reg, lower 32 bits already in mmx
|
// returns new index of reg, lower 32 bits already in mmx
|
||||||
|
@ -196,41 +194,8 @@ void _signExtendSFtoM(u32 mem);
|
||||||
// a negative shift is for sign extension
|
// a negative shift is for sign extension
|
||||||
int _signExtendXMMtoM(u32 to, x86SSERegType from, int candestroy); // returns true if reg destroyed
|
int _signExtendXMMtoM(u32 to, x86SSERegType from, int candestroy); // returns true if reg destroyed
|
||||||
|
|
||||||
// Defines for passing register info
|
static const int MEM_MMXTAG = 0x002; // mmreg is mmxreg
|
||||||
|
static const int MEM_XMMTAG = 0x004; // mmreg is xmmreg
|
||||||
// only valid during writes. If write128, then upper 64bits are in an mmxreg
|
|
||||||
// (mmreg&0xf). Constant is used from gprreg ((mmreg>>16)&0x1f)
|
|
||||||
enum memtag
|
|
||||||
{
|
|
||||||
MEM_EECONSTTAG = 0x0100, // argument is a GPR and comes from g_cpuConstRegs
|
|
||||||
MEM_PSXCONSTTAG = 0x0200,
|
|
||||||
MEM_MEMORYTAG = 0x0400,
|
|
||||||
MEM_MMXTAG = 0x0800, // mmreg is mmxreg
|
|
||||||
MEM_XMMTAG = 0x8000, // mmreg is xmmreg
|
|
||||||
MEM_X86TAG = 0x4000, // ignored most of the time
|
|
||||||
MEM_GPRTAG = 0x2000, // argument is a GPR reg
|
|
||||||
MEM_CONSTTAG = 0x1000 // argument is a const
|
|
||||||
};
|
|
||||||
|
|
||||||
template<memtag tag> static __forceinline bool IS_REG(s32 reg)
|
|
||||||
{
|
|
||||||
return ((reg >= 0) && (reg & tag));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<memtag tag> static __forceinline bool IS_REG(u32 reg)
|
|
||||||
{
|
|
||||||
return !!(reg & tag);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define IS_EECONSTREG(reg) IS_REG<MEM_EECONSTTAG>(reg)
|
|
||||||
#define IS_PSXCONSTREG(reg) IS_REG<MEM_PSXCONSTTAG>(reg)
|
|
||||||
#define IS_MMXREG(reg) IS_REG<MEM_MMXTAG>(reg)
|
|
||||||
#define IS_XMMREG(reg) IS_REG<MEM_XMMTAG>(reg)
|
|
||||||
|
|
||||||
#define IS_X86REG(reg) IS_REG<MEM_X86TAG>(reg)
|
|
||||||
#define IS_GPRREG(reg) IS_REG<MEM_GPRTAG>(reg)
|
|
||||||
#define IS_CONSTREG(reg) IS_REG<MEM_CONSTTAG>(reg)
|
|
||||||
#define IS_MEMORYREG(reg) IS_REG<MEM_MEMORYTAG>(reg)
|
|
||||||
|
|
||||||
//////////////////////
|
//////////////////////
|
||||||
// Instruction Info //
|
// Instruction Info //
|
||||||
|
@ -425,12 +390,6 @@ extern u16 x86FpuState;
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// Utility Functions -- that should probably be part of the Emitter.
|
// Utility Functions -- that should probably be part of the Emitter.
|
||||||
|
|
||||||
// see MEM_X defines for argX format
|
|
||||||
extern void _callPushArg(u32 arg, uptr argmem); /// X86ARG is ignored for 32bit recs
|
|
||||||
extern void _callFunctionArg1(uptr fn, u32 arg1, uptr arg1mem);
|
|
||||||
extern void _callFunctionArg2(uptr fn, u32 arg1, u32 arg2, uptr arg1mem, uptr arg2mem);
|
|
||||||
extern void _callFunctionArg3(uptr fn, u32 arg1, u32 arg2, u32 arg3, uptr arg1mem, uptr arg2mem, uptr arg3mem);
|
|
||||||
|
|
||||||
// Moves 128 bits of data using EAX/EDX (used by iCOP2 only currently)
|
// Moves 128 bits of data using EAX/EDX (used by iCOP2 only currently)
|
||||||
extern void _recMove128MtoM(u32 to, u32 from);
|
extern void _recMove128MtoM(u32 to, u32 from);
|
||||||
|
|
||||||
|
|
|
@ -350,7 +350,10 @@ void _psxMoveGPRtoRm(x86IntRegType to, int fromgpr)
|
||||||
|
|
||||||
void _psxFlushCall(int flushtype)
|
void _psxFlushCall(int flushtype)
|
||||||
{
|
{
|
||||||
_freeX86regs();
|
// x86-32 ABI : These registers are not preserved across calls:
|
||||||
|
_freeX86reg( EAX );
|
||||||
|
_freeX86reg( ECX );
|
||||||
|
_freeX86reg( EDX );
|
||||||
|
|
||||||
if( flushtype & FLUSH_CACHED_REGS )
|
if( flushtype & FLUSH_CACHED_REGS )
|
||||||
_psxFlushConstRegs();
|
_psxFlushConstRegs();
|
||||||
|
@ -436,11 +439,6 @@ void psxRecompileCodeConst1(R3000AFNPTR constcode, R3000AFNPTR_INFO noconstcode)
|
||||||
_psxFlushCall(FLUSH_NODESTROY);
|
_psxFlushCall(FLUSH_NODESTROY);
|
||||||
CALLFunc((uptr)zeroEx);
|
CALLFunc((uptr)zeroEx);
|
||||||
}
|
}
|
||||||
// Bios Call: Force the IOP to do a Branch Test ASAP.
|
|
||||||
// Important! This helps prevent game freeze-ups during boot-up and stage loads.
|
|
||||||
// Note: Fixes to cdvd have removed the need for this code.
|
|
||||||
//MOV32MtoR( EAX, (uptr)&psxRegs.cycle );
|
|
||||||
//MOV32RtoM( (uptr)&g_psxNextBranchCycle, EAX );
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -846,7 +844,9 @@ void rpsxSYSCALL()
|
||||||
MOV32ItoM((uptr)&psxRegs.pc, psxpc - 4);
|
MOV32ItoM((uptr)&psxRegs.pc, psxpc - 4);
|
||||||
_psxFlushCall(FLUSH_NODESTROY);
|
_psxFlushCall(FLUSH_NODESTROY);
|
||||||
|
|
||||||
_callFunctionArg2((uptr)psxException, MEM_CONSTTAG, MEM_CONSTTAG, 0x20, psxbranch==1);
|
xMOV( ecx, 0x20 ); // exception code
|
||||||
|
xMOV( edx, psxbranch==1 ); // branch delay slot?
|
||||||
|
xCALL( psxException );
|
||||||
|
|
||||||
CMP32ItoM((uptr)&psxRegs.pc, psxpc-4);
|
CMP32ItoM((uptr)&psxRegs.pc, psxpc-4);
|
||||||
j8Ptr[0] = JE8(0);
|
j8Ptr[0] = JE8(0);
|
||||||
|
@ -867,7 +867,9 @@ void rpsxBREAK()
|
||||||
MOV32ItoM((uptr)&psxRegs.pc, psxpc - 4);
|
MOV32ItoM((uptr)&psxRegs.pc, psxpc - 4);
|
||||||
_psxFlushCall(FLUSH_NODESTROY);
|
_psxFlushCall(FLUSH_NODESTROY);
|
||||||
|
|
||||||
_callFunctionArg2((uptr)psxBREAK, MEM_CONSTTAG, MEM_CONSTTAG, 0x24, psxbranch==1);
|
xMOV( ecx, 0x24 ); // exception code
|
||||||
|
xMOV( edx, psxbranch==1 ); // branch delay slot?
|
||||||
|
xCALL( psxException );
|
||||||
|
|
||||||
CMP32ItoM((uptr)&psxRegs.pc, psxpc-4);
|
CMP32ItoM((uptr)&psxRegs.pc, psxpc-4);
|
||||||
j8Ptr[0] = JE8(0);
|
j8Ptr[0] = JE8(0);
|
||||||
|
|
|
@ -594,321 +594,23 @@ void rpsxDIVU_(int info) { rpsxDIVsuper(info, 0); }
|
||||||
|
|
||||||
PSXRECOMPILE_CONSTCODE3_PENALTY(DIVU, 1, psxInstCycles_Div);
|
PSXRECOMPILE_CONSTCODE3_PENALTY(DIVU, 1, psxInstCycles_Div);
|
||||||
|
|
||||||
//// LoadStores
|
|
||||||
#ifdef PCSX2_VIRTUAL_MEM
|
|
||||||
|
|
||||||
// VM load store functions (fastest)
|
|
||||||
|
|
||||||
//#define REC_SLOWREAD
|
|
||||||
//#define REC_SLOWWRITE
|
|
||||||
|
|
||||||
int _psxPrepareReg(int gprreg)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u32 s_nAddMemOffset = 0;
|
|
||||||
|
|
||||||
static __forceinline void SET_HWLOC_R3000A() {
|
|
||||||
x86SetJ8(j8Ptr[0]);
|
|
||||||
SHR32ItoR(ECX, 3);
|
|
||||||
if( s_nAddMemOffset ) ADD32ItoR(ECX, s_nAddMemOffset);
|
|
||||||
}
|
|
||||||
|
|
||||||
int rpsxSetMemLocation(int regs, int mmreg)
|
|
||||||
{
|
|
||||||
s_nAddMemOffset = 0;
|
|
||||||
MOV32MtoR( ECX, (int)&psxRegs.GPR.r[ regs ] );
|
|
||||||
|
|
||||||
if ( _Imm_ != 0 ) ADD32ItoR( ECX, _Imm_ );
|
|
||||||
|
|
||||||
SHL32ItoR(ECX, 3);
|
|
||||||
j8Ptr[0] = JS8(0);
|
|
||||||
SHR32ItoR(ECX, 3);
|
|
||||||
AND32ItoR(ECX, 0x1fffff); // 2Mb
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
void recLoad32(u32 bit, u32 sign)
|
|
||||||
{
|
|
||||||
int mmreg = -1;
|
|
||||||
|
|
||||||
#ifdef REC_SLOWREAD
|
|
||||||
_psxFlushConstReg(_Rs_);
|
|
||||||
#else
|
|
||||||
if( PSX_IS_CONST1( _Rs_ ) ) {
|
|
||||||
// do const processing
|
|
||||||
int ineax = 0;
|
|
||||||
|
|
||||||
_psxOnWriteReg(_Rt_);
|
|
||||||
mmreg = EAX;
|
|
||||||
|
|
||||||
switch(bit) {
|
|
||||||
case 8: ineax = psxRecMemConstRead8(mmreg, g_psxConstRegs[_Rs_]+_Imm_, sign); break;
|
|
||||||
case 16:
|
|
||||||
assert( (g_psxConstRegs[_Rs_]+_Imm_) % 2 == 0 );
|
|
||||||
ineax = psxRecMemConstRead16(mmreg, g_psxConstRegs[_Rs_]+_Imm_, sign);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
assert( (g_psxConstRegs[_Rs_]+_Imm_) % 4 == 0 );
|
|
||||||
ineax = psxRecMemConstRead32(mmreg, g_psxConstRegs[_Rs_]+_Imm_);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if( _Rt_ ) MOV32RtoM( (int)&psxRegs.GPR.r[ _Rt_ ], EAX );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
int dohw;
|
|
||||||
int mmregs = _psxPrepareReg(_Rs_);
|
|
||||||
|
|
||||||
_psxOnWriteReg(_Rt_);
|
|
||||||
_psxDeleteReg(_Rt_, 0);
|
|
||||||
|
|
||||||
dohw = rpsxSetMemLocation(_Rs_, mmregs);
|
|
||||||
|
|
||||||
switch(bit) {
|
|
||||||
case 8:
|
|
||||||
if( sign ) MOVSX32Rm8toROffset(EAX, ECX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
else MOVZX32Rm8toROffset(EAX, ECX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
if( sign ) MOVSX32Rm16toROffset(EAX, ECX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
else MOVZX32Rm16toROffset(EAX, ECX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
MOV32RmtoROffset(EAX, ECX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if( dohw ) {
|
|
||||||
j8Ptr[1] = JMP8(0);
|
|
||||||
|
|
||||||
SET_HWLOC_R3000A();
|
|
||||||
|
|
||||||
switch(bit) {
|
|
||||||
case 8:
|
|
||||||
CALLFunc( (int)psxRecMemRead8 );
|
|
||||||
if( sign ) MOVSX32R8toR(EAX, EAX);
|
|
||||||
else MOVZX32R8toR(EAX, EAX);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
CALLFunc( (int)psxRecMemRead16 );
|
|
||||||
if( sign ) MOVSX32R16toR(EAX, EAX);
|
|
||||||
else MOVZX32R16toR(EAX, EAX);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
CALLFunc( (int)psxRecMemRead32 );
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
x86SetJ8(j8Ptr[1]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if( _Rt_ )
|
|
||||||
MOV32RtoM( (int)&psxRegs.GPR.r[ _Rt_ ], EAX );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void rpsxLB() { recLoad32(8, 1); }
|
|
||||||
void rpsxLBU() { recLoad32(8, 0); }
|
|
||||||
void rpsxLH() { recLoad32(16, 1); }
|
|
||||||
void rpsxLHU() { recLoad32(16, 0); }
|
|
||||||
void rpsxLW() { recLoad32(32, 0); }
|
|
||||||
|
|
||||||
extern void rpsxMemConstClear(u32 mem);
|
|
||||||
|
|
||||||
// check if mem is executable, and clear it
|
|
||||||
__declspec(naked) void rpsxWriteMemClear()
|
|
||||||
{
|
|
||||||
_asm {
|
|
||||||
mov edx, ecx
|
|
||||||
shr edx, 14
|
|
||||||
and dl, 0xfc
|
|
||||||
add edx, psxRecLUT
|
|
||||||
test dword ptr [edx], 0xffffffff
|
|
||||||
jnz Clear32
|
|
||||||
ret
|
|
||||||
Clear32:
|
|
||||||
// recLUT[mem>>16] + (mem&0xfffc)
|
|
||||||
mov edx, dword ptr [edx]
|
|
||||||
mov eax, ecx
|
|
||||||
and eax, 0xfffc
|
|
||||||
// edx += 2*eax
|
|
||||||
shl eax, 1
|
|
||||||
add edx, eax
|
|
||||||
cmp dword ptr [edx], 0
|
|
||||||
je ClearRet
|
|
||||||
sub esp, 4
|
|
||||||
mov dword ptr [esp], edx
|
|
||||||
call psxRecClearMem
|
|
||||||
add esp, 4
|
|
||||||
ClearRet:
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extern u32 s_psxBlockCycles;
|
|
||||||
void recStore(int bit)
|
|
||||||
{
|
|
||||||
#ifdef REC_SLOWWRITE
|
|
||||||
_psxFlushConstReg(_Rs_);
|
|
||||||
#else
|
|
||||||
if( PSX_IS_CONST1( _Rs_ ) ) {
|
|
||||||
u8* pjmpok;
|
|
||||||
u32 addr = g_psxConstRegs[_Rs_]+_Imm_;
|
|
||||||
int doclear = 0;
|
|
||||||
|
|
||||||
if( !(addr & 0x10000000) ) {
|
|
||||||
// check g_psxWriteOk
|
|
||||||
CMP32ItoM((uptr)&g_psxWriteOk, 0);
|
|
||||||
pjmpok = JE8(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
switch(bit) {
|
|
||||||
case 8:
|
|
||||||
if( PSX_IS_CONST1(_Rt_) ) doclear = psxRecMemConstWrite8(addr, MEM_PSXCONSTTAG|(_Rt_<<16));
|
|
||||||
else {
|
|
||||||
_psxMoveGPRtoR(EAX, _Rt_);
|
|
||||||
doclear = psxRecMemConstWrite8(addr, EAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 16:
|
|
||||||
assert( (addr)%2 == 0 );
|
|
||||||
if( PSX_IS_CONST1(_Rt_) ) doclear = psxRecMemConstWrite16(addr, MEM_PSXCONSTTAG|(_Rt_<<16));
|
|
||||||
else {
|
|
||||||
_psxMoveGPRtoR(EAX, _Rt_);
|
|
||||||
doclear = psxRecMemConstWrite16(addr, EAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 32:
|
|
||||||
assert( (addr)%4 == 0 );
|
|
||||||
if( PSX_IS_CONST1(_Rt_) ) doclear = psxRecMemConstWrite32(addr, MEM_PSXCONSTTAG|(_Rt_<<16));
|
|
||||||
else {
|
|
||||||
_psxMoveGPRtoR(EAX, _Rt_);
|
|
||||||
doclear = psxRecMemConstWrite32(addr, EAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if( !(addr & 0x10000000) ) {
|
|
||||||
if( doclear ) rpsxMemConstClear((addr)&~3);
|
|
||||||
x86SetJ8(pjmpok);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
int dohw;
|
|
||||||
int mmregs = _psxPrepareReg(_Rs_);
|
|
||||||
dohw = rpsxSetMemLocation(_Rs_, mmregs);
|
|
||||||
|
|
||||||
CMP32ItoM((uptr)&g_psxWriteOk, 0);
|
|
||||||
u8* pjmpok = JE8(0);
|
|
||||||
|
|
||||||
if( PSX_IS_CONST1( _Rt_ ) ) {
|
|
||||||
switch(bit) {
|
|
||||||
case 8: MOV8ItoRmOffset(ECX, g_psxConstRegs[_Rt_], PS2MEM_PSX_+s_nAddMemOffset); break;
|
|
||||||
case 16: MOV16ItoRmOffset(ECX, g_psxConstRegs[_Rt_], PS2MEM_PSX_+s_nAddMemOffset); break;
|
|
||||||
case 32: MOV32ItoRmOffset(ECX, g_psxConstRegs[_Rt_], PS2MEM_PSX_+s_nAddMemOffset); break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
switch(bit) {
|
|
||||||
case 8:
|
|
||||||
MOV8MtoR(EAX, (int)&psxRegs.GPR.r[ _Rt_ ]);
|
|
||||||
MOV8RtoRmOffset(ECX, EAX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 16:
|
|
||||||
MOV16MtoR(EAX, (int)&psxRegs.GPR.r[ _Rt_ ]);
|
|
||||||
MOV16RtoRmOffset(ECX, EAX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 32:
|
|
||||||
MOV32MtoR(EAX, (int)&psxRegs.GPR.r[ _Rt_ ]);
|
|
||||||
MOV32RtoRmOffset(ECX, EAX, PS2MEM_PSX_+s_nAddMemOffset);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if( s_nAddMemOffset ) ADD32ItoR(ECX, s_nAddMemOffset);
|
|
||||||
CMP32MtoR(ECX, (uptr)&g_psxMaxRecMem);
|
|
||||||
|
|
||||||
j8Ptr[1] = JAE8(0);
|
|
||||||
|
|
||||||
if( bit < 32 ) AND8ItoR(ECX, 0xfc);
|
|
||||||
CALLFunc((u32)rpsxWriteMemClear);
|
|
||||||
|
|
||||||
if( dohw ) {
|
|
||||||
j8Ptr[2] = JMP8(0);
|
|
||||||
|
|
||||||
SET_HWLOC_R3000A();
|
|
||||||
|
|
||||||
if( PSX_IS_CONST1(_Rt_) ) {
|
|
||||||
switch(bit) {
|
|
||||||
case 8: MOV8ItoR(EAX, g_psxConstRegs[_Rt_]); break;
|
|
||||||
case 16: MOV16ItoR(EAX, g_psxConstRegs[_Rt_]); break;
|
|
||||||
case 32: MOV32ItoR(EAX, g_psxConstRegs[_Rt_]); break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
switch(bit) {
|
|
||||||
case 8: MOV8MtoR(EAX, (int)&psxRegs.GPR.r[ _Rt_ ]); break;
|
|
||||||
case 16: MOV16MtoR(EAX, (int)&psxRegs.GPR.r[ _Rt_ ]); break;
|
|
||||||
case 32: MOV32MtoR(EAX, (int)&psxRegs.GPR.r[ _Rt_ ]); break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if( s_nAddMemOffset != 0 ) ADD32ItoR(ECX, s_nAddMemOffset);
|
|
||||||
|
|
||||||
// some type of hardware write
|
|
||||||
switch(bit) {
|
|
||||||
case 8: CALLFunc( (int)psxRecMemWrite8 ); break;
|
|
||||||
case 16: CALLFunc( (int)psxRecMemWrite16 ); break;
|
|
||||||
case 32: CALLFunc( (int)psxRecMemWrite32 ); break;
|
|
||||||
}
|
|
||||||
|
|
||||||
x86SetJ8(j8Ptr[2]);
|
|
||||||
}
|
|
||||||
|
|
||||||
x86SetJ8(j8Ptr[1]);
|
|
||||||
x86SetJ8(pjmpok);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void rpsxSB() { recStore(8); }
|
|
||||||
void rpsxSH() { recStore(16); }
|
|
||||||
void rpsxSW() { recStore(32); }
|
|
||||||
|
|
||||||
REC_FUNC(LWL);
|
|
||||||
REC_FUNC(LWR);
|
|
||||||
REC_FUNC(SWL);
|
|
||||||
REC_FUNC(SWR);
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
// TLB loadstore functions
|
// TLB loadstore functions
|
||||||
REC_FUNC(LWL);
|
REC_FUNC(LWL);
|
||||||
REC_FUNC(LWR);
|
REC_FUNC(LWR);
|
||||||
REC_FUNC(SWL);
|
REC_FUNC(SWL);
|
||||||
REC_FUNC(SWR);
|
REC_FUNC(SWR);
|
||||||
|
|
||||||
|
using namespace x86Emitter;
|
||||||
|
|
||||||
static void rpsxLB()
|
static void rpsxLB()
|
||||||
{
|
{
|
||||||
_psxDeleteReg(_Rs_, 1);
|
_psxDeleteReg(_Rs_, 1);
|
||||||
_psxOnWriteReg(_Rt_);
|
_psxOnWriteReg(_Rt_);
|
||||||
_psxDeleteReg(_Rt_, 0);
|
_psxDeleteReg(_Rt_, 0);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg1((uptr)iopMemRead8, X86ARG1|MEM_X86TAG, 0);
|
xCALL( iopMemRead8 ); // returns value in EAX
|
||||||
if (_Rt_) {
|
if (_Rt_) {
|
||||||
MOVSX32R8toR(EAX, EAX);
|
MOVSX32R8toR(EAX, EAX);
|
||||||
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
||||||
|
@ -922,9 +624,9 @@ static void rpsxLBU()
|
||||||
_psxOnWriteReg(_Rt_);
|
_psxOnWriteReg(_Rt_);
|
||||||
_psxDeleteReg(_Rt_, 0);
|
_psxDeleteReg(_Rt_, 0);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg1((uptr)iopMemRead8, X86ARG1|MEM_X86TAG, 0);
|
xCALL( iopMemRead8 ); // returns value in EAX
|
||||||
if (_Rt_) {
|
if (_Rt_) {
|
||||||
MOVZX32R8toR(EAX, EAX);
|
MOVZX32R8toR(EAX, EAX);
|
||||||
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
||||||
|
@ -938,9 +640,9 @@ static void rpsxLH()
|
||||||
_psxOnWriteReg(_Rt_);
|
_psxOnWriteReg(_Rt_);
|
||||||
_psxDeleteReg(_Rt_, 0);
|
_psxDeleteReg(_Rt_, 0);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg1((uptr)iopMemRead16, X86ARG1|MEM_X86TAG, 0);
|
xCALL( iopMemRead16 ); // returns value in EAX
|
||||||
if (_Rt_) {
|
if (_Rt_) {
|
||||||
MOVSX32R16toR(EAX, EAX);
|
MOVSX32R16toR(EAX, EAX);
|
||||||
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
||||||
|
@ -954,9 +656,9 @@ static void rpsxLHU()
|
||||||
_psxOnWriteReg(_Rt_);
|
_psxOnWriteReg(_Rt_);
|
||||||
_psxDeleteReg(_Rt_, 0);
|
_psxDeleteReg(_Rt_, 0);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg1((uptr)iopMemRead16, X86ARG1|MEM_X86TAG, 0);
|
xCALL( iopMemRead16 ); // returns value in EAX
|
||||||
if (_Rt_) {
|
if (_Rt_) {
|
||||||
MOVZX32R16toR(EAX, EAX);
|
MOVZX32R16toR(EAX, EAX);
|
||||||
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
||||||
|
@ -971,13 +673,13 @@ static void rpsxLW()
|
||||||
_psxDeleteReg(_Rt_, 0);
|
_psxDeleteReg(_Rt_, 0);
|
||||||
|
|
||||||
_psxFlushCall(FLUSH_EVERYTHING);
|
_psxFlushCall(FLUSH_EVERYTHING);
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
|
|
||||||
TEST32ItoR(X86ARG1, 0x10000000);
|
TEST32ItoR(ECX, 0x10000000);
|
||||||
j8Ptr[0] = JZ8(0);
|
j8Ptr[0] = JZ8(0);
|
||||||
|
|
||||||
_callFunctionArg1((uptr)iopMemRead32, X86ARG1|MEM_X86TAG, 0);
|
xCALL( iopMemRead32 ); // returns value in EAX
|
||||||
if (_Rt_) {
|
if (_Rt_) {
|
||||||
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
MOV32RtoM((uptr)&psxRegs.GPR.r[_Rt_], EAX);
|
||||||
}
|
}
|
||||||
|
@ -985,11 +687,11 @@ static void rpsxLW()
|
||||||
x86SetJ8(j8Ptr[0]);
|
x86SetJ8(j8Ptr[0]);
|
||||||
|
|
||||||
// read from psM directly
|
// read from psM directly
|
||||||
AND32ItoR(X86ARG1, 0x1fffff);
|
AND32ItoR(ECX, 0x1fffff);
|
||||||
ADD32ItoR(X86ARG1, (uptr)psxM);
|
ADD32ItoR(ECX, (uptr)psxM);
|
||||||
|
|
||||||
MOV32RmtoR( X86ARG1, X86ARG1 );
|
MOV32RmtoR( ECX, ECX );
|
||||||
MOV32RtoM( (uptr)&psxRegs.GPR.r[_Rt_], X86ARG1);
|
MOV32RtoM( (uptr)&psxRegs.GPR.r[_Rt_], ECX);
|
||||||
|
|
||||||
x86SetJ8(j8Ptr[1]);
|
x86SetJ8(j8Ptr[1]);
|
||||||
PSX_DEL_CONST(_Rt_);
|
PSX_DEL_CONST(_Rt_);
|
||||||
|
@ -1000,9 +702,10 @@ static void rpsxSB()
|
||||||
_psxDeleteReg(_Rs_, 1);
|
_psxDeleteReg(_Rs_, 1);
|
||||||
_psxDeleteReg(_Rt_, 1);
|
_psxDeleteReg(_Rt_, 1);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg2((uptr)iopMemWrite8, X86ARG1|MEM_X86TAG, MEM_MEMORYTAG, 0, (uptr)&psxRegs.GPR.r[_Rt_]);
|
xMOV( edx, &psxRegs.GPR.r[_Rt_] );
|
||||||
|
xCALL( iopMemWrite8 );
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rpsxSH()
|
static void rpsxSH()
|
||||||
|
@ -1010,9 +713,10 @@ static void rpsxSH()
|
||||||
_psxDeleteReg(_Rs_, 1);
|
_psxDeleteReg(_Rs_, 1);
|
||||||
_psxDeleteReg(_Rt_, 1);
|
_psxDeleteReg(_Rt_, 1);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg2((uptr)iopMemWrite16, X86ARG1|MEM_X86TAG, MEM_MEMORYTAG, 0, (uptr)&psxRegs.GPR.r[_Rt_]);
|
xMOV( edx, &psxRegs.GPR.r[_Rt_] );
|
||||||
|
xCALL( iopMemWrite16 );
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rpsxSW()
|
static void rpsxSW()
|
||||||
|
@ -1020,13 +724,12 @@ static void rpsxSW()
|
||||||
_psxDeleteReg(_Rs_, 1);
|
_psxDeleteReg(_Rs_, 1);
|
||||||
_psxDeleteReg(_Rt_, 1);
|
_psxDeleteReg(_Rt_, 1);
|
||||||
|
|
||||||
MOV32MtoR(X86ARG1, (uptr)&psxRegs.GPR.r[_Rs_]);
|
MOV32MtoR(ECX, (uptr)&psxRegs.GPR.r[_Rs_]);
|
||||||
if (_Imm_) ADD32ItoR(X86ARG1, _Imm_);
|
if (_Imm_) ADD32ItoR(ECX, _Imm_);
|
||||||
_callFunctionArg2((uptr)iopMemWrite32, X86ARG1|MEM_X86TAG, MEM_MEMORYTAG, 0, (uptr)&psxRegs.GPR.r[_Rt_]);
|
xMOV( edx, &psxRegs.GPR.r[_Rt_] );
|
||||||
|
xCALL( iopMemWrite32 );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // end load store
|
|
||||||
|
|
||||||
//// SLL
|
//// SLL
|
||||||
void rpsxSLL_const()
|
void rpsxSLL_const()
|
||||||
{
|
{
|
||||||
|
|
|
@ -236,7 +236,8 @@ void _flushConstRegs()
|
||||||
int _allocX86reg(int x86reg, int type, int reg, int mode)
|
int _allocX86reg(int x86reg, int type, int reg, int mode)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
assert( reg >= 0 && reg < 32 );
|
pxAssertDev( reg >= 0 && reg < 32, "Register index out of bounds." );
|
||||||
|
pxAssertDev( x86reg != ESP && x86reg != EBP, "Allocation of ESP/EBP is not allowed!" );
|
||||||
|
|
||||||
// don't alloc EAX and ESP,EBP if MODE_NOFRAME
|
// don't alloc EAX and ESP,EBP if MODE_NOFRAME
|
||||||
int oldmode = mode;
|
int oldmode = mode;
|
||||||
|
@ -448,14 +449,10 @@ void _freeX86reg(int x86reg)
|
||||||
x86regs[x86reg].inuse = 0;
|
x86regs[x86reg].inuse = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void _freeX86regs() {
|
void _freeX86regs()
|
||||||
int i;
|
{
|
||||||
|
for (int i=0; i<iREGCNT_GPR; i++)
|
||||||
for (i=0; i<iREGCNT_GPR; i++) {
|
|
||||||
if (!x86regs[i].inuse) continue;
|
|
||||||
|
|
||||||
_freeX86reg(i);
|
_freeX86reg(i);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MMX Caching
|
// MMX Caching
|
||||||
|
@ -863,88 +860,6 @@ void SetFPUstate() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__forceinline void _callPushArg(u32 arg, uptr argmem)
|
|
||||||
{
|
|
||||||
if( IS_X86REG(arg) ) {
|
|
||||||
PUSH32R(arg&0xff);
|
|
||||||
}
|
|
||||||
else if( IS_CONSTREG(arg) ) {
|
|
||||||
PUSH32I(argmem);
|
|
||||||
}
|
|
||||||
else if( IS_GPRREG(arg) ) {
|
|
||||||
SUB32ItoR(ESP, 4);
|
|
||||||
_eeMoveGPRtoRm(ESP, arg&0xff);
|
|
||||||
}
|
|
||||||
else if( IS_XMMREG(arg) ) {
|
|
||||||
SUB32ItoR(ESP, 4);
|
|
||||||
SSEX_MOVD_XMM_to_Rm(ESP, arg&0xf);
|
|
||||||
}
|
|
||||||
else if( IS_MMXREG(arg) ) {
|
|
||||||
SUB32ItoR(ESP, 4);
|
|
||||||
MOVD32MMXtoRm(ESP, arg&0xf);
|
|
||||||
}
|
|
||||||
else if( IS_EECONSTREG(arg) ) {
|
|
||||||
PUSH32I(g_cpuConstRegs[(arg>>16)&0x1f].UL[0]);
|
|
||||||
}
|
|
||||||
else if( IS_PSXCONSTREG(arg) ) {
|
|
||||||
PUSH32I(g_psxConstRegs[(arg>>16)&0x1f]);
|
|
||||||
}
|
|
||||||
else if( IS_MEMORYREG(arg) ) {
|
|
||||||
PUSH32M(argmem);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
assert( (arg&0xfff0) == 0 );
|
|
||||||
// assume it is a GPR reg
|
|
||||||
PUSH32R(arg&0xf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
__forceinline void _callFunctionArg1(uptr fn, u32 arg1, uptr arg1mem)
|
|
||||||
{
|
|
||||||
_callPushArg(arg1, arg1mem);
|
|
||||||
CALLFunc((uptr)fn);
|
|
||||||
ADD32ItoR(ESP, 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
__forceinline void _callFunctionArg2(uptr fn, u32 arg1, u32 arg2, uptr arg1mem, uptr arg2mem)
|
|
||||||
{
|
|
||||||
_callPushArg(arg2, arg2mem);
|
|
||||||
_callPushArg(arg1, arg1mem);
|
|
||||||
CALLFunc((uptr)fn);
|
|
||||||
ADD32ItoR(ESP, 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
__forceinline void _callFunctionArg3(uptr fn, u32 arg1, u32 arg2, u32 arg3, uptr arg1mem, uptr arg2mem, uptr arg3mem)
|
|
||||||
{
|
|
||||||
_callPushArg(arg3, arg3mem);
|
|
||||||
_callPushArg(arg2, arg2mem);
|
|
||||||
_callPushArg(arg1, arg1mem);
|
|
||||||
CALLFunc((uptr)fn);
|
|
||||||
ADD32ItoR(ESP, 12);
|
|
||||||
}
|
|
||||||
|
|
||||||
void _recPushReg(int mmreg)
|
|
||||||
{
|
|
||||||
if( IS_XMMREG(mmreg) ) {
|
|
||||||
SUB32ItoR(ESP, 4);
|
|
||||||
SSEX_MOVD_XMM_to_Rm(ESP, mmreg&0xf);
|
|
||||||
}
|
|
||||||
else if( IS_MMXREG(mmreg) ) {
|
|
||||||
SUB32ItoR(ESP, 4);
|
|
||||||
MOVD32MMXtoRm(ESP, mmreg&0xf);
|
|
||||||
}
|
|
||||||
else if( IS_EECONSTREG(mmreg) ) {
|
|
||||||
PUSH32I(g_cpuConstRegs[(mmreg>>16)&0x1f].UL[0]);
|
|
||||||
}
|
|
||||||
else if( IS_PSXCONSTREG(mmreg) ) {
|
|
||||||
PUSH32I(g_psxConstRegs[(mmreg>>16)&0x1f]);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
assert( (mmreg&0xfff0) == 0 );
|
|
||||||
PUSH32R(mmreg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void _signExtendSFtoM(u32 mem)
|
void _signExtendSFtoM(u32 mem)
|
||||||
{
|
{
|
||||||
LAHF();
|
LAHF();
|
||||||
|
|
|
@ -371,7 +371,7 @@ static void _DynGen_StackFrameCheck()
|
||||||
xCMP( esp, &s_store_esp );
|
xCMP( esp, &s_store_esp );
|
||||||
xForwardJE8 skipassert_esp;
|
xForwardJE8 skipassert_esp;
|
||||||
|
|
||||||
xXOR( ecx, ecx ); // 0 specifies ESI
|
xXOR( ecx, ecx ); // 0 specifies ESP
|
||||||
xMOV( edx, esp );
|
xMOV( edx, esp );
|
||||||
xCALL( StackFrameCheckFailed );
|
xCALL( StackFrameCheckFailed );
|
||||||
xMOV( esp, &s_store_esp ); // half-hearted frame recovery attempt!
|
xMOV( esp, &s_store_esp ); // half-hearted frame recovery attempt!
|
||||||
|
@ -867,6 +867,12 @@ void CheckForBIOSEnd()
|
||||||
{
|
{
|
||||||
xMOV( eax, &cpuRegs.pc );
|
xMOV( eax, &cpuRegs.pc );
|
||||||
|
|
||||||
|
/*xCMP( eax, 0x00200008 );
|
||||||
|
xJE(ExitRec);
|
||||||
|
|
||||||
|
xCMP( eax, 0x00100008 );
|
||||||
|
xJE(ExitRec);*/
|
||||||
|
|
||||||
xCMP( eax, 0x00200008 );
|
xCMP( eax, 0x00200008 );
|
||||||
xForwardJE8 CallExitRec;
|
xForwardJE8 CallExitRec;
|
||||||
|
|
||||||
|
@ -975,7 +981,10 @@ void LoadBranchState()
|
||||||
|
|
||||||
void iFlushCall(int flushtype)
|
void iFlushCall(int flushtype)
|
||||||
{
|
{
|
||||||
_freeX86regs();
|
// Free registers that are not saved across function calls (x86-32 ABI):
|
||||||
|
_freeX86reg(EAX);
|
||||||
|
_freeX86reg(ECX);
|
||||||
|
_freeX86reg(EDX);
|
||||||
|
|
||||||
if( flushtype & FLUSH_FREE_XMM )
|
if( flushtype & FLUSH_FREE_XMM )
|
||||||
_freeXMMregs();
|
_freeXMMregs();
|
||||||
|
@ -1076,6 +1085,8 @@ static u32 eeScaleBlockCycles()
|
||||||
// setting "branch = 2";
|
// setting "branch = 2";
|
||||||
static void iBranchTest(u32 newpc)
|
static void iBranchTest(u32 newpc)
|
||||||
{
|
{
|
||||||
|
_DynGen_StackFrameCheck();
|
||||||
|
|
||||||
if( g_ExecBiosHack ) CheckForBIOSEnd();
|
if( g_ExecBiosHack ) CheckForBIOSEnd();
|
||||||
|
|
||||||
// Check the Event scheduler if our "cycle target" has been reached.
|
// Check the Event scheduler if our "cycle target" has been reached.
|
||||||
|
@ -1306,7 +1317,7 @@ void __fastcall dyna_block_discard(u32 start,u32 sz)
|
||||||
// EBP/stackframe before issuing a RET, else esp/ebp will be incorrect.
|
// EBP/stackframe before issuing a RET, else esp/ebp will be incorrect.
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
__asm leave; __asm jmp [ExitRecompiledCode]
|
__asm leave __asm jmp [ExitRecompiledCode]
|
||||||
#else
|
#else
|
||||||
__asm__ __volatile__( "leave\n jmp *%[exitRec]\n" : : [exitRec] "m" (ExitRecompiledCode) : );
|
__asm__ __volatile__( "leave\n jmp *%[exitRec]\n" : : [exitRec] "m" (ExitRecompiledCode) : );
|
||||||
#endif
|
#endif
|
||||||
|
@ -1321,7 +1332,7 @@ void __fastcall dyna_page_reset(u32 start,u32 sz)
|
||||||
mmap_MarkCountedRamPage( start );
|
mmap_MarkCountedRamPage( start );
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
__asm leave; __asm jmp [ExitRecompiledCode]
|
__asm leave __asm jmp [ExitRecompiledCode]
|
||||||
#else
|
#else
|
||||||
__asm__ __volatile__( "leave\n jmp *%[exitRec]\n" : : [exitRec] "m" (ExitRecompiledCode) : );
|
__asm__ __volatile__( "leave\n jmp *%[exitRec]\n" : : [exitRec] "m" (ExitRecompiledCode) : );
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
#include "iR5900.h"
|
#include "iR5900.h"
|
||||||
#include "R5900OpcodeTables.h"
|
#include "R5900OpcodeTables.h"
|
||||||
|
|
||||||
|
using namespace x86Emitter;
|
||||||
|
|
||||||
extern void _vu0WaitMicro();
|
extern void _vu0WaitMicro();
|
||||||
extern void _vu0FinishMicro();
|
extern void _vu0FinishMicro();
|
||||||
|
|
||||||
|
@ -311,14 +313,12 @@ static void recCTC2() {
|
||||||
}
|
}
|
||||||
else MOV32ItoM((uptr)µVU0.regs->VI[_Rd_].UL, 0);
|
else MOV32ItoM((uptr)µVU0.regs->VI[_Rd_].UL, 0);
|
||||||
break;
|
break;
|
||||||
case REG_CMSAR1:
|
case REG_CMSAR1: // Execute VU1 Micro SubRoutine
|
||||||
if (_Rt_) {
|
if (_Rt_) {
|
||||||
MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[_Rt_].UL[0]);
|
MOV32MtoR(ECX, (uptr)&cpuRegs.GPR.r[_Rt_].UL[0]);
|
||||||
PUSH32R(EAX);
|
|
||||||
}
|
}
|
||||||
else PUSH32I(0);
|
else XOR32RtoR(ECX,ECX);
|
||||||
CALLFunc((uptr)vu1ExecMicro); // Execute VU1 Micro SubRoutine
|
xCALL(vu1ExecMicro);
|
||||||
ADD32ItoR(ESP, 4);
|
|
||||||
break;
|
break;
|
||||||
case REG_FBRST:
|
case REG_FBRST:
|
||||||
if (!_Rt_) {
|
if (!_Rt_) {
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#include "AppConfig.h"
|
#include "AppConfig.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
using namespace x86Emitter;
|
||||||
|
|
||||||
// temporary externs
|
// temporary externs
|
||||||
extern void iDumpVU0Registers();
|
extern void iDumpVU0Registers();
|
||||||
|
@ -2526,8 +2527,8 @@ static void SuperVUAssignRegs()
|
||||||
int s_writeQ, s_writeP;
|
int s_writeQ, s_writeP;
|
||||||
|
|
||||||
// declare the saved registers
|
// declare the saved registers
|
||||||
uptr s_vu1esp, s_callstack;//, s_vu1esp
|
uptr s_vu1esp, s_callstack;
|
||||||
uptr s_vu1ebp, s_vuebx, s_vuedi, s_vu1esi;
|
uptr s_vuebx, s_vuedi, s_vu1esi;
|
||||||
|
|
||||||
static int s_recWriteQ, s_recWriteP; // wait times during recompilation
|
static int s_recWriteQ, s_recWriteP; // wait times during recompilation
|
||||||
static int s_needFlush; // first bit - Q, second bit - P, third bit - Q has been written, fourth bit - P has been written
|
static int s_needFlush; // first bit - Q, second bit - P, third bit - Q has been written, fourth bit - P has been written
|
||||||
|
@ -2576,6 +2577,11 @@ void SuperVUCleanupProgram(u32 startpc, int vuindex)
|
||||||
// entry point of all vu programs from emulator calls
|
// entry point of all vu programs from emulator calls
|
||||||
__declspec(naked) void SuperVUExecuteProgram(u32 startpc, int vuindex)
|
__declspec(naked) void SuperVUExecuteProgram(u32 startpc, int vuindex)
|
||||||
{
|
{
|
||||||
|
// Stackframe setup for the recompiler:
|
||||||
|
// We rewind the stack 4 bytes, which places the parameters of this function before
|
||||||
|
// any calls we might make from recompiled code. The return address for this function
|
||||||
|
// call is subsequently stored in s_callstack.
|
||||||
|
|
||||||
__asm
|
__asm
|
||||||
{
|
{
|
||||||
mov eax, dword ptr [esp]
|
mov eax, dword ptr [esp]
|
||||||
|
@ -2585,20 +2591,15 @@ __declspec(naked) void SuperVUExecuteProgram(u32 startpc, int vuindex)
|
||||||
call SuperVUGetProgram
|
call SuperVUGetProgram
|
||||||
|
|
||||||
// save cpu state
|
// save cpu state
|
||||||
mov s_vu1ebp, ebp
|
//mov s_vu1ebp, ebp
|
||||||
mov s_vu1esi, esi // have to save even in Release
|
mov s_vu1esi, esi
|
||||||
mov s_vuedi, edi // have to save even in Release
|
mov s_vuedi, edi
|
||||||
mov s_vuebx, ebx
|
mov s_vuebx, ebx
|
||||||
}
|
|
||||||
#ifdef PCSX2_DEBUG
|
#ifdef PCSX2_DEBUG
|
||||||
__asm
|
|
||||||
{
|
|
||||||
mov s_vu1esp, esp
|
mov s_vu1esp, esp
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
__asm
|
|
||||||
{
|
|
||||||
//stmxcsr s_ssecsr
|
//stmxcsr s_ssecsr
|
||||||
ldmxcsr g_sseVUMXCSR
|
ldmxcsr g_sseVUMXCSR
|
||||||
|
|
||||||
|
@ -2618,7 +2619,7 @@ __declspec(naked) static void SuperVUEndProgram()
|
||||||
// restore cpu state
|
// restore cpu state
|
||||||
ldmxcsr g_sseMXCSR
|
ldmxcsr g_sseMXCSR
|
||||||
|
|
||||||
mov ebp, s_vu1ebp
|
//mov ebp, s_vu1ebp
|
||||||
mov esi, s_vu1esi
|
mov esi, s_vu1esi
|
||||||
mov edi, s_vuedi
|
mov edi, s_vuedi
|
||||||
mov ebx, s_vuebx
|
mov ebx, s_vuebx
|
||||||
|
@ -3105,6 +3106,8 @@ void VuBaseBlock::Recompile()
|
||||||
_x86regs* endx86 = &s_vecRegArray[nEndx86];
|
_x86regs* endx86 = &s_vecRegArray[nEndx86];
|
||||||
for (int i = 0; i < iREGCNT_GPR; ++i)
|
for (int i = 0; i < iREGCNT_GPR; ++i)
|
||||||
{
|
{
|
||||||
|
if( i == ESP || i == EBP ) continue;
|
||||||
|
|
||||||
if (endx86[i].inuse)
|
if (endx86[i].inuse)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -3652,7 +3655,8 @@ void VuInstruction::Recompile(list<VuInstruction>::iterator& itinst, u32 vuxyz)
|
||||||
TEST32ItoM((uptr)&VU0.VI[REG_FBRST].UL, s_vu ? 0x400 : 0x004);
|
TEST32ItoM((uptr)&VU0.VI[REG_FBRST].UL, s_vu ? 0x400 : 0x004);
|
||||||
u8* ptr = JZ8(0);
|
u8* ptr = JZ8(0);
|
||||||
OR32ItoM((uptr)&VU0.VI[REG_VPU_STAT].UL, s_vu ? 0x200 : 0x002);
|
OR32ItoM((uptr)&VU0.VI[REG_VPU_STAT].UL, s_vu ? 0x200 : 0x002);
|
||||||
_callFunctionArg1((uptr)hwIntcIrq, MEM_CONSTTAG, s_vu ? INTC_VU1 : INTC_VU0);
|
xMOV( ecx, s_vu ? INTC_VU1 : INTC_VU0 );
|
||||||
|
xCALL( hwIntcIrq );
|
||||||
x86SetJ8(ptr);
|
x86SetJ8(ptr);
|
||||||
}
|
}
|
||||||
if (ptr[1] & 0x08000000) // T flag
|
if (ptr[1] & 0x08000000) // T flag
|
||||||
|
@ -3660,7 +3664,8 @@ void VuInstruction::Recompile(list<VuInstruction>::iterator& itinst, u32 vuxyz)
|
||||||
TEST32ItoM((uptr)&VU0.VI[REG_FBRST].UL, s_vu ? 0x800 : 0x008);
|
TEST32ItoM((uptr)&VU0.VI[REG_FBRST].UL, s_vu ? 0x800 : 0x008);
|
||||||
u8* ptr = JZ8(0);
|
u8* ptr = JZ8(0);
|
||||||
OR32ItoM((uptr)&VU0.VI[REG_VPU_STAT].UL, s_vu ? 0x400 : 0x004);
|
OR32ItoM((uptr)&VU0.VI[REG_VPU_STAT].UL, s_vu ? 0x400 : 0x004);
|
||||||
_callFunctionArg1((uptr)hwIntcIrq, MEM_CONSTTAG, s_vu ? INTC_VU1 : INTC_VU0);
|
xMOV( ecx, s_vu ? INTC_VU1 : INTC_VU0 );
|
||||||
|
xCALL( hwIntcIrq );
|
||||||
x86SetJ8(ptr);
|
x86SetJ8(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4379,7 +4384,7 @@ void recVUMI_XGKICK(VURegs *VU, int info)
|
||||||
recVUMI_XGKICK_(VU);
|
recVUMI_XGKICK_(VU);
|
||||||
}
|
}
|
||||||
|
|
||||||
int isreg = _allocX86reg(X86ARG2, X86TYPE_VI | (s_vu ? X86TYPE_VU1 : 0), _Is_, MODE_READ);
|
int isreg = _allocX86reg(ECX, X86TYPE_VI | (s_vu ? X86TYPE_VU1 : 0), _Is_, MODE_READ);
|
||||||
_freeX86reg(isreg); // flush
|
_freeX86reg(isreg); // flush
|
||||||
x86regs[isreg].inuse = 1;
|
x86regs[isreg].inuse = 1;
|
||||||
x86regs[isreg].type = X86TYPE_VITEMP;
|
x86regs[isreg].type = X86TYPE_VITEMP;
|
||||||
|
|
Loading…
Reference in New Issue