From 7893daa1a46900f05b27d97e5c61c714fe5b0517 Mon Sep 17 00:00:00 2001 From: cottonvibes Date: Mon, 7 Mar 2011 20:06:49 +0000 Subject: [PATCH] microVU: Merge some changes I did in the ReorderingMTGS branch with trunk. git-svn-id: http://pcsx2.googlecode.com/svn/trunk@4402 96395faa-99c1-11dd-bbfe-3dabce05a288 --- pcsx2/VUmicro.h | 8 ++++ pcsx2/x86/microVU.cpp | 11 ++++- pcsx2/x86/microVU.h | 39 +++++++++-------- pcsx2/x86/microVU_Execute.inl | 80 +++++++++++++++++++++++++++++++---- pcsx2/x86/microVU_Lower.inl | 5 +++ 5 files changed, 117 insertions(+), 26 deletions(-) diff --git a/pcsx2/VUmicro.h b/pcsx2/VUmicro.h index 3a5d09bbd2..a4fe2d4cc0 100644 --- a/pcsx2/VUmicro.h +++ b/pcsx2/VUmicro.h @@ -151,6 +151,11 @@ public: virtual void ExecuteBlock(bool startUp=0); static void __fastcall ExecuteBlockJIT(BaseVUmicroCPU* cpu); + + // VU1 sometimes needs to break execution on XGkick Path1 transfers if + // there is another gif path 2/3 transfer already taking place. + // Use this method to resume execution of VU1. + virtual void ResumeXGkick() {} }; @@ -194,6 +199,7 @@ public: void Step(); void Execute(u32 cycles); void Clear(u32 addr, u32 size) {} + void ResumeXGkick() {} uint GetCacheReserve() const { return 0; } void SetCacheReserve( uint reserveInMegs ) const {} @@ -238,6 +244,7 @@ public: void Execute(u32 cycles); void Clear(u32 addr, u32 size); void Vsync() throw(); + void ResumeXGkick(); uint GetCacheReserve() const; void SetCacheReserve( uint reserveInMegs ) const; @@ -278,6 +285,7 @@ public: void Reset(); void Execute(u32 cycles); void Clear(u32 Addr, u32 Size); + void ResumeXGkick() { Console.Warning("ResumeXGkick() Not implemented!"); } uint GetCacheReserve() const; void SetCacheReserve( uint reserveInMegs ) const; diff --git a/pcsx2/x86/microVU.cpp b/pcsx2/x86/microVU.cpp index e29648ba9e..3f0cbc0b3d 100644 --- a/pcsx2/x86/microVU.cpp +++ b/pcsx2/x86/microVU.cpp @@ -128,7 +128,9 @@ void microVU::reset() { x86SetPtr(dispCache); mVUdispatcherA(this); mVUdispatcherB(this); - mVUemitSearch(); + mVUdispatcherC(this); + mVUdispatcherD(this); + mVUemitSearch(); // Clear All Program Data //memset(&prog, 0, sizeof(prog)); @@ -411,3 +413,10 @@ void recMicroVU1::SetCacheReserve( uint reserveInMegs ) const { DevCon.WriteLn("microVU1: Upping cache size [%dmb]", reserveInMegs); microVU1.cacheSize = min(reserveInMegs, mVUcacheMaxReserve); } + +void recMicroVU1::ResumeXGkick() { + pxAssert(m_Reserved); // please allocate me first! :| + + if(!(VU0.VI[REG_VPU_STAT].UL & 0x100)) return; + ((mVUrecCallXG)microVU1.startFunctXG)(); +} diff --git a/pcsx2/x86/microVU.h b/pcsx2/x86/microVU.h index a06cd6a18c..627d93ba46 100644 --- a/pcsx2/x86/microVU.h +++ b/pcsx2/x86/microVU.h @@ -169,8 +169,9 @@ static const uint mVUcacheMaxReserve = 128; // Max Reserve Cache Size (in mega struct microVU { - __aligned16 u32 macFlag[4]; // 4 instances of mac flag (used in execution) - __aligned16 u32 clipFlag[4]; // 4 instances of clip flag (used in execution) + __aligned16 u32 statFlag[4]; // 4 instances of status flag (backup for xgkick) + __aligned16 u32 macFlag [4]; // 4 instances of mac flag (used in execution) + __aligned16 u32 clipFlag[4]; // 4 instances of clip flag (used in execution) __aligned16 u32 xmmCTemp[4]; // Backup used in mVUclamp2() __aligned16 u32 xmmBackup[8][4]; // Backup for xmm0~xmm7 @@ -188,21 +189,24 @@ struct microVU { RecompiledCodeReserve* cache_reserve; - u8* cache; // Dynarec Cache Start (where we will start writing the recompiled code to) - u8* dispCache; // Dispatchers Cache (where startFunct and exitFunct are written to) - u8* startFunct; // Ptr Function to the Start code for recompiled programs - u8* exitFunct; // Ptr Function to the Exit code for recompiled programs - u32 code; // Contains the current Instruction - u32 divFlag; // 1 instance of I/D flags - u32 VIbackup; // Holds a backup of a VI reg if modified before a branch - u32 VIxgkick; // Holds a backup of a VI reg used for xgkick-delays - u32 branch; // Holds branch compare result (IBxx) OR Holds address to Jump to (JALR/JR) - u32 badBranch; // For Branches in Branch Delay Slots, holds Address the first Branch went to + 8 - u32 evilBranch; // For Branches in Branch Delay Slots, holds Address to Jump to - u32 p; // Holds current P instance index - u32 q; // Holds current Q instance index - u32 totalCycles; // Total Cycles that mVU is expected to run for - u32 cycles; // Cycles Counter + u8* cache; // Dynarec Cache Start (where we will start writing the recompiled code to) + u8* dispCache; // Dispatchers Cache (where startFunct and exitFunct are written to) + u8* startFunct; // Function Ptr to the recompiler dispatcher (start) + u8* exitFunct; // Function Ptr to the recompiler dispatcher (exit) + u8* startFunctXG; // Function Ptr to the recompiler dispatcher (xgkick resume) + u8* exitFunctXG; // Function Ptr to the recompiler dispatcher (xgkick exit) + u8* resumePtrXG; // Ptr to recompiled code position to resume xgkick + u32 code; // Contains the current Instruction + u32 divFlag; // 1 instance of I/D flags + u32 VIbackup; // Holds a backup of a VI reg if modified before a branch + u32 VIxgkick; // Holds a backup of a VI reg used for xgkick-delays + u32 branch; // Holds branch compare result (IBxx) OR Holds address to Jump to (JALR/JR) + u32 badBranch; // For Branches in Branch Delay Slots, holds Address the first Branch went to + 8 + u32 evilBranch; // For Branches in Branch Delay Slots, holds Address to Jump to + u32 p; // Holds current P instance index + u32 q; // Holds current Q instance index + u32 totalCycles; // Total Cycles that mVU is expected to run for + u32 cycles; // Cycles Counter VURegs& regs() const { return ::vuRegs[index]; } @@ -281,6 +285,7 @@ extern void* __fastcall mVUexecuteVU1(u32 startPC, u32 cycles); // recCall Function Pointer typedef void (__fastcall *mVUrecCall)(u32, u32); +typedef void (*mVUrecCallXG)(void); template void makeUnique(T& v) { // Removes Duplicates diff --git a/pcsx2/x86/microVU_Execute.inl b/pcsx2/x86/microVU_Execute.inl index cca83ed6ba..5100103f69 100644 --- a/pcsx2/x86/microVU_Execute.inl +++ b/pcsx2/x86/microVU_Execute.inl @@ -51,20 +51,22 @@ void mVUdispatcherA(mV) { mVUallocSFLAGd((uptr)&mVU->regs().VI[REG_STATUS_FLAG].UL, 1); #endif - xMOVAPS(xmmT1, ptr128[&mVU->regs().VI[REG_MAC_FLAG].UL]); + xMOVAPS (xmmT1, ptr128[&mVU->regs().VI[REG_MAC_FLAG].UL]); xSHUF.PS(xmmT1, xmmT1, 0); - xMOVAPS(ptr128[mVU->macFlag], xmmT1); + xMOVAPS (ptr128[mVU->macFlag], xmmT1); - xMOVAPS(xmmT1, ptr128[&mVU->regs().VI[REG_CLIP_FLAG].UL]); + xMOVAPS (xmmT1, ptr128[&mVU->regs().VI[REG_CLIP_FLAG].UL]); xSHUF.PS(xmmT1, xmmT1, 0); - xMOVAPS(ptr128[mVU->clipFlag], xmmT1); + xMOVAPS (ptr128[mVU->clipFlag], xmmT1); - xMOVAPS(xmmT1, ptr128[&mVU->regs().VI[REG_P].UL]); - xMOVAPS(xmmPQ, ptr128[&mVU->regs().VI[REG_Q].UL]); + xMOVAPS (xmmT1, ptr128[&mVU->regs().VI[REG_P].UL]); + xMOVAPS (xmmPQ, ptr128[&mVU->regs().VI[REG_Q].UL]); xSHUF.PS(xmmPQ, xmmT1, 0); // wzyx = PPQQ // Jump to Recompiled Code Block xJMP(eax); + pxAssertDev(xGetPtr() < (mVU->dispCache + mVUdispCacheSize), + "microVU: Dispatcher generation exceeded reserved cache area!"); } // Generates the code to exit from recompiled blocks @@ -91,8 +93,71 @@ void mVUdispatcherB(mV) { xPOP(ebp); xRET(); + pxAssertDev(xGetPtr() < (mVU->dispCache + mVUdispCacheSize), + "microVU: Dispatcher generation exceeded reserved cache area!"); +} - pxAssertDev(xGetPtr() < (mVU->dispCache + mVUdispCacheSize), "microVU: Dispatcher generation exceeded reserved cache area!"); +// Generates the code for resuming xgkick +void mVUdispatcherC(mV) { + mVU->startFunctXG = x86Ptr; + + // Backup cpu state + xPUSH(ebp); + xPUSH(ebx); + xPUSH(esi); + xPUSH(edi); + + // Align the stackframe (GCC only, since GCC assumes stackframe is always aligned) + #ifdef __GNUC__ + xSUB(esp, 12); + #endif + + // Load VU's MXCSR state + xLDMXCSR(g_sseVUMXCSR); + + mVUrestoreRegs(mVU); + + xMOV(gprF0, ptr32[&mVU->statFlag[0]]); + xMOV(gprF1, ptr32[&mVU->statFlag[1]]); + xMOV(gprF2, ptr32[&mVU->statFlag[2]]); + xMOV(gprF3, ptr32[&mVU->statFlag[3]]); + + // Jump to Recompiled Code Block + xJMP(ptr32[&mVU->resumePtrXG]); + pxAssertDev(xGetPtr() < (mVU->dispCache + mVUdispCacheSize), + "microVU: Dispatcher generation exceeded reserved cache area!"); +} + +// Generates the code to exit from xgkick +void mVUdispatcherD(mV) { + mVU->exitFunctXG = x86Ptr; + + //xPOP(gprT1); // Pop return address + //xMOV(ptr32[&mVU->resumePtrXG], gprT1); + + // Backup Status Flag (other regs were backed up on xgkick) + xMOV(ptr32[&mVU->statFlag[0]], gprF0); + xMOV(ptr32[&mVU->statFlag[1]], gprF1); + xMOV(ptr32[&mVU->statFlag[2]], gprF2); + xMOV(ptr32[&mVU->statFlag[3]], gprF3); + + // Load EE's MXCSR state + xLDMXCSR(g_sseMXCSR); + + // Unalign the stackframe: + #ifdef __GNUC__ + xADD( esp, 12 ); + #endif + + // Restore cpu state + xPOP(edi); + xPOP(esi); + xPOP(ebx); + xPOP(ebp); + + xRET(); + pxAssertDev(xGetPtr() < (mVU->dispCache + mVUdispCacheSize), + "microVU: Dispatcher generation exceeded reserved cache area!"); } //------------------------------------------------------------------ @@ -150,4 +215,3 @@ void* __fastcall mVUexecuteVU0(u32 startPC, u32 cycles) { return mVUexecute<0>(s void* __fastcall mVUexecuteVU1(u32 startPC, u32 cycles) { return mVUexecute<1>(startPC, cycles); } void __fastcall mVUcleanUpVU0() { mVUcleanUp<0>(); } void __fastcall mVUcleanUpVU1() { mVUcleanUp<1>(); } - diff --git a/pcsx2/x86/microVU_Lower.inl b/pcsx2/x86/microVU_Lower.inl index 6832e5a134..ee26e3ba02 100644 --- a/pcsx2/x86/microVU_Lower.inl +++ b/pcsx2/x86/microVU_Lower.inl @@ -1151,6 +1151,11 @@ void __fastcall mVU_XGKICK_(u32 addr) { static __fi void mVU_XGKICK_DELAY(mV, bool memVI) { mVUbackupRegs(mVU); +#if 0 // XGkick Break - ToDo: Change "SomeGifPathValue" to w/e needs to be tested + xTEST (ptr32[&SomeGifPathValue], 1); // If '1', breaks execution + xMOV (ptr32[&mVU->resumePtrXG], (uptr)xGetPtr() + 10 + 6); + xJcc32(Jcc_NotZero, (uptr)mVU->exitFunctXG - ((uptr)xGetPtr()+6)); +#endif if (memVI) xMOV(gprT2, ptr32[&mVU->VIxgkick]); else mVUallocVIa(mVU, gprT2, _Is_); xCALL(mVU_XGKICK_);