microVU: Implemented indirect jump address caching (speedup)

Indirect jumps (JR/JALR) get a table which stores the previously jumped-to x86 code entry points, and this table is indexed by the jump-to PC address.
If current jump is jumping to a previously jumped-to address, the table will have an entry-point, but before it is returned, the microProgram for which the entry point belongs to must be validated to see if it matches the current contents of VU memory.
The program validation check is remembered and doesn't need to be performed again until after a micro memory clear (which happens when vif writes to vu micro memory).

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@4083 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
cottonvibes 2010-12-11 04:44:25 +00:00
parent 6f289bedaf
commit 835127b69e
5 changed files with 46 additions and 14 deletions

View File

@ -49,10 +49,9 @@ public:
}
~microBlockManager() { reset(); }
void reset() {
microBlockLink* linkI = blockList;
while( linkI != NULL )
{
for(microBlockLink* linkI = blockList; linkI != NULL; ) {
microBlockLink* freeI = linkI;
safe_delete_array(linkI->block.jumpCache);
linkI = linkI->next;
_aligned_free(freeI);
}
@ -63,7 +62,8 @@ public:
microBlock* thisBlock = search(&pBlock->pState);
if (!thisBlock) {
listI++;
microBlockLink* newBlock = (microBlockLink*)_aligned_malloc(sizeof(microBlockLink), 16);
microBlockLink* newBlock = (microBlockLink*)_aligned_malloc(sizeof(microBlockLink), 16);
newBlock->block.jumpCache = NULL;
newBlock->next = NULL;
if (blockEnd) {
@ -248,7 +248,7 @@ int mVUdebugNow = 0;
// Main Functions
static void mVUclear(mV, u32, u32);
static void* mVUblockFetch(microVU* mVU, u32 startPC, uptr pState);
_mVUt extern void* __fastcall mVUcompileJIT(u32 startPC, uptr pState);
_mVUt extern void* __fastcall mVUcompileJIT(u32 startPC, uptr ptr);
// Prototypes for Linux
extern void __fastcall mVUcleanUpVU0();

View File

@ -108,9 +108,14 @@ void normJumpCompile(mV, microFlagCycles& mFC, bool isEvilJump) {
mVUsetupBranch(mVU, mFC);
mVUbackupRegs(mVU);
if (isEvilJump) xMOV(gprT2, ptr32[&mVU->evilBranch]);
else xMOV(gprT2, ptr32[&mVU->branch]);
xMOV(gprT3, (uptr)&mVUpBlock->pStateEnd);
if(!mVUpBlock->jumpCache) { // Create the jump cache for this block
mVUpBlock->jumpCache = new microJumpCache[mProgSize/2];
}
if (isEvilJump) xMOV(gprT2, ptr32[&mVU->evilBranch]);
else xMOV(gprT2, ptr32[&mVU->branch]);
if (doJumpCaching) xMOV(gprT3, (uptr)mVUpBlock);
else xMOV(gprT3, (uptr)&mVUpBlock->pStateEnd);
if (!mVU->index) xCALL(mVUcompileJIT<0>); //(u32 startPC, uptr pState)
else xCALL(mVUcompileJIT<1>);

View File

@ -513,7 +513,19 @@ static __fi void* mVUblockFetch(microVU* mVU, u32 startPC, uptr pState) {
}
// mVUcompileJIT() - Called By JR/JALR during execution
_mVUt void* __fastcall mVUcompileJIT(u32 startPC, uptr pState) {
//return mVUblockFetch(mVUx, startPC, pState);
return mVUsearchProg<vuIndex>(startPC, pState); // Find and set correct program
_mVUt void* __fastcall mVUcompileJIT(u32 startPC, uptr ptr) {
if (doJumpCaching) { // When doJumpCaching, ptr is a microBlock pointer
microVU* mVU = mVUx;
microBlock* pBlock = (microBlock*)ptr;
microJumpCache& jc = pBlock->jumpCache[startPC/8];
if (jc.prog && jc.prog == mVU->prog.quick[startPC/8].prog) return jc.x86ptrStart;
void* v = mVUsearchProg<vuIndex>(startPC, (uptr)&pBlock->pStateEnd);
jc.prog = mVU->prog.quick[startPC/8].prog;
jc.x86ptrStart = v;
return v;
}
else { // When !doJumpCaching, pBlock param is really a microRegInfo pointer
//return mVUblockFetch(mVUx, startPC, ptr);
return mVUsearchProg<vuIndex>(startPC, ptr); // Find and set correct program
}
}

View File

@ -62,10 +62,18 @@ union __aligned16 microRegInfo {
C_ASSERT(sizeof(microRegInfo) == 160);
struct microProgram;
struct microJumpCache {
microJumpCache() : prog(NULL), x86ptrStart(NULL) {}
microProgram* prog; // Program to which the entry point below is part of
void* x86ptrStart; // Start of code (Entry point for block)
};
struct __aligned16 microBlock {
microRegInfo pState; // Detailed State of Pipeline
microRegInfo pStateEnd; // Detailed State of Pipeline at End of Block (needed by JR/JALR opcodes)
u8* x86ptrStart; // Start of code
microRegInfo pState; // Detailed State of Pipeline
microRegInfo pStateEnd; // Detailed State of Pipeline at End of Block (needed by JR/JALR opcodes)
u8* x86ptrStart; // Start of code (Entry point for block)
microJumpCache* jumpCache; // Will point to an array of entry points of size [16k/8] if block ends in JR/JALR
};
struct microTempRegInfo {

View File

@ -265,6 +265,13 @@ static const bool doConstProp = 0; // Set to 1 to turn on vi15 const propagation
// allowing us to know many indirect jump target addresses.
// Makes GoW a lot slower due to extra recompilation time and extra code-gen!
// Indirect Jump Caching
static const bool doJumpCaching = 1; // Set to 1 to enable jump caching
// Indirect jumps (JR/JALR) will remember the entry points to their previously
// jumped-to addresses. This allows us to skip the microBlockManager::search()
// routine that is performed every indirect jump in order to find a block within a
// program that matches the correct pipeline state.
//------------------------------------------------------------------
// Speed Hacks (can cause infinite loops, SPS, Black Screens, etc...)
//------------------------------------------------------------------