more microVU stuff...

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@953 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
cottonvibes 2009-04-11 09:25:47 +00:00
parent 9c8a9712a9
commit 076e9e5386
9 changed files with 252 additions and 88 deletions

View File

@ -42,7 +42,6 @@ microVUt(void) mVUinit(VURegs* vuRegsPtr) {
mVU->index = vuIndex;
mVU->microSize = (vuIndex ? 0x4000 : 0x1000);
mVU->progSize = (vuIndex ? 0x4000 : 0x1000) / 4;
mVU->cacheAddr = (vuIndex ? 0x1e840000 : 0x0e840000);
mVU->cache = NULL;
mVUreset<vuIndex>();
@ -62,7 +61,7 @@ microVUt(void) mVUreset() {
}
// Dynarec Cache
mVU->cache = SysMmapEx(mVU->cacheAddr, mVU->cacheSize, 0, (vuIndex ? "Micro VU1" : "Micro VU0"));
mVU->cache = SysMmapEx((vuIndex ? 0x1e840000 : 0x0e840000), mVU->cacheSize, 0, (vuIndex ? "Micro VU1" : "Micro VU0"));
if ( mVU->cache == NULL ) throw Exception::OutOfMemory(fmt_string( "microVU Error: Failed to allocate recompiler memory! (addr: 0x%x)", params (u32)mVU->cache));
mVU->ptr = mVU->cache;
@ -70,12 +69,21 @@ microVUt(void) mVUreset() {
mVUdispatcherA<vuIndex>();
mVUdispatcherB<vuIndex>();
// Other Variables
// Program Variables
memset(&mVU->prog, 0, sizeof(mVU->prog));
mVU->prog.finished = 1;
mVU->prog.cleared = 1;
mVU->prog.cur = -1;
mVU->prog.total = -1;
// Setup Dynarec Cache Limits for Each Program
u8* z = (mVU->cache + 512); // Dispatcher Code is in first 512 bytes
for (int i = 0; i <= mVU->prog.max; i++) {
mVU->prog.prog[i].x86start = z;
mVU->prog.prog[i].x86ptr = z;
z += (mVU->cacheSize / (mVU->prog.max + 1));
mVU->prog.prog[i].x86end = z;
}
}
// Free Allocated Resources
@ -111,6 +119,7 @@ microVUt(void) mVUclear(u32 addr, u32 size) {
// Clears program data (Sets used to 1 because calling this function implies the program will be used at least once)
__forceinline void mVUclearProg(microVU* mVU, int progIndex) {
mVU->prog.prog[progIndex].used = 1;
mVU->prog.prog[progIndex].x86ptr = mVU->prog.prog[progIndex].x86start;
for (u32 i = 0; i < (mVU->progSize / 2); i++) {
mVU->prog.prog[progIndex].block[i]->reset();
}
@ -149,7 +158,7 @@ __forceinline int mVUsearchProg(microVU* mVU) {
for (int i = 0; i <= mVU->prog.total; i++) {
//if (i == mVU->prog.cur) continue; // We can skip the current program. (ToDo: Verify that games don't clear, and send the same microprogram :/)
if (!memcmp_mmx(mVU->prog.prog[i].data, mVU->regs->Micro, mVU->microSize)) {
if (i == mVU->prog.cur) SysPrintf("microVU: Same micro program sent!\n");
if (i == mVU->prog.cur) { mVUlog("microVU: Same micro program sent!"); }
mVU->prog.cur = i;
mVU->prog.cleared = 0;
mVU->prog.prog[i].used++;

View File

@ -92,7 +92,10 @@ public:
template<u32 progSize>
struct microProgram {
u32 data[progSize/4];
u32 used; // Number of times its been used
u32 used; // Number of times its been used
u8* x86ptr; // Pointer to program's recompilation code
u8* x86start; // Start of program's rec-cache
u8* x86end; // Limit of program's rec-cache
microBlockManager* block[progSize/8];
microAllocInfo<progSize> allocInfo;
};
@ -112,20 +115,24 @@ struct microProgManager {
struct microVU {
u32 index; // VU Index (VU0 or VU1)
u32 microSize; // VU Micro Memory Size
u32 progSize; // VU Micro Program Size (microSize/8)
u32 cacheAddr; // VU Cache Start Address
u32 progSize; // VU Micro Program Size (microSize/4)
static const u32 cacheSize = 0x500000; // VU Cache Size
microProgManager<0x4000> prog; // Micro Program Data
VURegs* regs; // VU Regs Struct
u8* cache; // Dynarec Cache Start (where we will start writing the recompiled code to)
u8* startFunct; // Ptr Function to the Start code for recompiled programs
u8* exitFunct; // Ptr Function to the Exit code for recompiled programs
u8* ptr; // Pointer to next place to write recompiled code to
u32 code; // Contains the current Instruction
u32 iReg; // iReg (only used in recompilation, not execution)
u32 clipFlag[4]; // 4 instances of clip flag (used in execution)
u32 divFlag; // 1 instance of I/D flags
u32 VIbackup[2]; // Holds a backup of a VI reg if modified before a branch
u32 branch; // Holds branch compare result (IBxx) OR Holds address to Jump to (JALR/JR)
u32 p; // Holds current P instance index
u32 q; // Holds current Q instance index
};
// microVU rec structs

View File

@ -52,9 +52,10 @@ template<u32 pSize>
struct microAllocInfo {
microRegInfo regs; // Pipeline info
microTempRegInfo regsTemp; // Temp Pipeline info (used so that new pipeline info isn't conflicting between upper and lower instructions in the same cycle)
u8 branch; // 0 = No Branch, 1 = Branch, 2 = Conditional Branch, 3 = Jump (JALR/JR)
u8 branch; // 0 = No Branch, 1 = B. 2 = BAL, 3~8 = Conditional Branches, 9 = JALR, 10 = JR
u8 maxStall; // Helps in computing stalls (stores the max amount of cycles to stall for the current opcodes)
u32 cycles; // Cycles for current block
u32 count; // Number of VU 64bit instructions ran (starts at 0 for each block)
u32 curPC; // Current PC
u32 startPC; // Start PC for Cur Block
u32 info[pSize/8]; // Info for Instructions in current block

View File

@ -102,6 +102,49 @@ microVUt(void) mVUanalyzeFMAC4(int Fs, int Ft) {
analyzeReg4(Ft);
}
//------------------------------------------------------------------
// IALU - IALU Opcodes
//------------------------------------------------------------------
#define analyzeVIreg1(reg) { if (reg) { mVUstall = aMax(mVUstall, mVUregs.VI[reg]); } }
#define analyzeVIreg2(reg, aCycles) { if (reg) { mVUregsTemp.VIreg = reg; mVUregsTemp.VI = aCycles; mVUinfo |= _writesVI; mVU->VIbackup[0] = reg; } }
microVUt(void) mVUanalyzeIALU1(int Id, int Is, int It) {
microVU* mVU = mVUx;
if (!Id) { mVUinfo |= _isNOP; }
analyzeVIreg1(Is);
analyzeVIreg1(It);
analyzeVIreg2(Id, 1);
}
microVUt(void) mVUanalyzeIALU2(int Is, int It) {
microVU* mVU = mVUx;
if (!It) { mVUinfo |= _isNOP; }
analyzeVIreg1(Is);
analyzeVIreg2(It, 1);
}
//------------------------------------------------------------------
// MR32 - MR32 Opcode
//------------------------------------------------------------------
// Flips xyzw stalls to yzwx
#define analyzeReg6(reg) { \
if (reg) { \
if (_X) { mVUstall = aMax(mVUstall, aReg(reg).y); } \
if (_Y) { mVUstall = aMax(mVUstall, aReg(reg).z); } \
if (_Z) { mVUstall = aMax(mVUstall, aReg(reg).w); } \
if (_W) { mVUstall = aMax(mVUstall, aReg(reg).x); } \
} \
}
microVUt(void) mVUanalyzeMR32(int Fs, int Ft) {
microVU* mVU = mVUx;
if (!Ft) { mVUinfo |= _isNOP; }
analyzeReg6(Fs);
analyzeReg2(Ft);
}
//------------------------------------------------------------------
// FDIV - DIV/SQRT/RSQRT Opcodes
//------------------------------------------------------------------
@ -144,11 +187,18 @@ microVUt(void) mVUanalyzeEFU2(int Fs, u8 xCycles) {
}
//------------------------------------------------------------------
// LQx - LQ/LQD/LQI Opcodes
// MFP - MFP Opcode
//------------------------------------------------------------------
#define analyzeVIreg1(reg) { if (reg) { mVUstall = aMax(mVUstall, mVUregs.VI[reg]); } }
#define analyzeVIreg2(reg, aCycles) { if (reg) { mVUregsTemp.VIreg = reg; mVUregsTemp.VI = aCycles; mVUinfo |= _writesVI; mVU->VIbackup[0] = reg; } }
microVUt(void) mVUanalyzeMFP(int Ft) {
microVU* mVU = mVUx; // ToDo: Needs special info for P reg?
if (!Ft) { mVUinfo |= _isNOP; }
analyzeReg2(Ft);
}
//------------------------------------------------------------------
// LQx - LQ/LQD/LQI Opcodes
//------------------------------------------------------------------
microVUt(void) mVUanalyzeLQ(int Ft, int Is, bool writeIs) {
microVU* mVU = mVUx;
@ -183,7 +233,7 @@ microVUt(void) mVUanalyzeR1(int Fs, int Fsf) {
microVUt(void) mVUanalyzeR2(int Ft, bool canBeNOP) {
microVU* mVU = mVUx;
if (!Ft) { mVUinfo |= ((canBeNOP) ? _isNOP : _noWriteVF); return; }
if (!Ft) { mVUinfo |= ((canBeNOP) ? _isNOP : _noWriteVF); }
analyzeReg2(Ft);
analyzeRreg();
}
@ -194,11 +244,22 @@ microVUt(void) mVUanalyzeR2(int Ft, bool canBeNOP) {
microVUt(void) mVUanalyzeSflag(int It) {
microVU* mVU = mVUx;
if (!It) { mVUinfo |= _isNOP; return; }
mVUinfo |= _isSflag;
if (!It) { mVUinfo |= _isNOP; }
else { mVUinfo |= _isSflag | _swapOps; } // ToDo: set s flag at right time
analyzeVIreg2(It, 1);
}
microVUt(void) mVUanalyzeFSSET() {
microVU* mVU = mVUx;
int i, curPC = iPC;
for (i = mVUcount; i > 0; i--) {
incPC2(-2);
if (isSflag) break;
mVUinfo &= ~_doStatus;
}
iPC = curPC;
}
//------------------------------------------------------------------
// XGkick
//------------------------------------------------------------------
@ -218,7 +279,7 @@ microVUt(void) mVUanalyzeXGkick(int Fs, int xCycles) {
//------------------------------------------------------------------
#define analyzeBranchVI(reg, infoVal) { \
if (reg && (mVUcycles > 1)) { /* Ensures branch is not first opcode in block */ \
if (reg && (mVUcount > 0)) { /* Ensures branch is not first opcode in block */ \
incPC(-2); \
if (writesVI && (reg == mVU->VIbackup[0])) { /* If prev Op modified VI reg */ \
mVUinfo |= _backupVI; \

View File

@ -29,10 +29,25 @@
} \
}
#define branchCase(Xcmp) \
CMP16ItoM((uptr)mVU->branch, 0); \
ajmp = Xcmp((uptr)0); \
break
#define branchCase2() { \
incPC(-2); \
MOV32ItoR(gprT1, (xPC + (2 * 8)) & ((vuIndex) ? 0x3fff:0xfff)); \
mVUallocVIb<vuIndex>(gprT1, _Ft_); \
incPC(+2); \
}
#define startLoop() { mVUdebug1(); mVUstall = 0; memset(&mVUregsTemp, 0, sizeof(mVUregsTemp)); }
#define calcCycles(reg, x) { reg = ((reg > x) ? (reg - x) : 0); }
#define incP() { mVU->p = (mVU->p+1) & 1; }
#define incQ() { mVU->q = (mVU->q+1) & 1; }
microVUt(void) mVUincCycles(int x) {
microVU* mVU = mVUx;
mVUcycles += x;
for (int z = 31; z > 0; z--) {
calcCycles(mVUregs.VF[z].x, x);
@ -45,9 +60,12 @@ microVUt(void) mVUincCycles(int x) {
}
if (mVUregs.q) {
calcCycles(mVUregs.q, x);
if (!mVUregs.q) {} // Do Status Flag Merging Stuff?
if (!mVUregs.q) { incQ(); } // Do Status Flag Merging Stuff?
}
if (mVUregs.p) {
calcCycles(mVUregs.p, x);
if (!mVUregs.p) { incP(); }
}
calcCycles(mVUregs.p, x);
calcCycles(mVUregs.r, x);
calcCycles(mVUregs.xgkick, x);
}
@ -57,8 +75,7 @@ microVUt(void) mVUsetCycles() {
incCycles(mVUstall);
if (mVUregsTemp.VFreg[0] == mVUregsTemp.VFreg[1] && !mVUregsTemp.VFreg[0]) { // If upper Op && lower Op write to same VF reg
mVUinfo |= (mVUregsTemp.r || mVUregsTemp.VI) ? _noWriteVF : _isNOP; // If lower Op doesn't modify anything else, then make it a NOP
//mVUregsTemp.VF[1].reg = mVUregsTemp.VF[0]; // Just use cycles from upper Op (incorrect?)
mVUregsTemp.VF[1].x = aMax(mVUregsTemp.VF[0].x, mVUregsTemp.VF[1].x); // Use max cycles from each vector (correct?)
mVUregsTemp.VF[1].x = aMax(mVUregsTemp.VF[0].x, mVUregsTemp.VF[1].x); // Use max cycles from each vector
mVUregsTemp.VF[1].y = aMax(mVUregsTemp.VF[0].y, mVUregsTemp.VF[1].y);
mVUregsTemp.VF[1].z = aMax(mVUregsTemp.VF[0].z, mVUregsTemp.VF[1].z);
mVUregsTemp.VF[1].w = aMax(mVUregsTemp.VF[0].w, mVUregsTemp.VF[1].w);
@ -72,20 +89,28 @@ microVUt(void) mVUsetCycles() {
mVUregs.xgkick = mVUregsTemp.xgkick;
}
microVUx(void) mVUcompile(u32 startPC, u32 pipelineState, microRegInfo* pState, u8* x86ptrStart) {
//------------------------------------------------------------------
// Recompiler
//------------------------------------------------------------------
microVUx(void*) mVUcompile(u32 startPC, u32 pipelineState, microRegInfo* pState, u8* x86ptrStart) {
microVU* mVU = mVUx;
microBlock block;
u8* thisPtr = mVUcurProg.x86Ptr;
iPC = startPC / 4;
// Searches for Existing Compiled Block (if found, then returns; else, compile)
microBlock* pblock = mVUblock[iPC]->search(pipelineState, pState);
if (block) { x86SetPtr(pblock->x86ptrEnd); return; }
microBlock* pblock = mVUblock[iPC/2]->search(pipelineState, pState);
if (block) { return pblock->x86ptrStart; }
// First Pass
setCode();
mVUbranch = 0;
mVUstartPC = iPC;
mVUcount = 0;
mVUcycles = 1; // Skips "M" phase, and starts counting cycles at "T" stage
mVU->p = 0; // All blocks start at p index #0
mVU->q = 0; // All blocks start at q index #0
for (int branch = 0;; ) {
startLoop();
mVUopU<vuIndex, 0>();
@ -94,11 +119,15 @@ microVUx(void) mVUcompile(u32 startPC, u32 pipelineState, microRegInfo* pState,
if (curI & _Ibit_) { incPC(1); mVUinfo |= _isNOP; }
else { incPC(1); mVUopL<vuIndex, 0>(); }
mVUsetCycles<vuIndex>();
if (mVU->p) { mVUinfo |= _readP; }
if (mVU->q) { mVUinfo |= _readQ; }
else { mVUinfo |= _writeQ; }
if (branch >= 2) { mVUinfo |= _isEOB | ((branch == 3) ? _isBdelay : 0); if (mVUbranch) { Console::Error("microVU Warning: Branch in E-bit/Branch delay slot!"); mVUinfo |= _isNOP; } break; }
else if (branch == 1) { branch = 2; }
if (mVUbranch) { branch = 3; mVUbranch = 0; mVUinfo |= _isBranch; }
incPC(1);
incCycles(1);
mVUcount++;
}
// Second Pass
@ -109,7 +138,7 @@ microVUx(void) mVUcompile(u32 startPC, u32 pipelineState, microRegInfo* pState,
// ToDo: status/mac flag stuff?
//
if (isEOB) { x = 0; }
else if (isBranch) { mVUopU<vuIndex, 1>(); incPC(2); }
//if (isBranch2) { mVUopU<vuIndex, 1>(); incPC(2); }
if (isNop) { mVUopU<vuIndex, 1>(); if (curI & _Ibit_) { incPC(1); mVU->iReg = curI; } else { incPC(1); } }
else if (!swapOps) { mVUopU<vuIndex, 1>(); incPC(1); mVUopL<vuIndex, 1>(); }
@ -117,16 +146,43 @@ microVUx(void) mVUcompile(u32 startPC, u32 pipelineState, microRegInfo* pState,
if (!isBdelay) { incPC(1); }
else {
incPC(-2); // Go back to Branch Opcode
mVUopL<vuIndex, 1>(); // Run Branch Opcode
u32* ajmp;
switch (mVUbranch) {
case 1: break;
case 2: break;
case 3: break;
case 3: branchCase(JZ32); // IBEQ
case 4: branchCase(JGE32); // IBGEZ
case 5: branchCase(JG32); // IBGTZ
case 6: branchCase(JLE32); // IBLEQ
case 7: branchCase(JL32); // IBLTZ
case 8: branchCase(JNZ32); // IBNEQ
case 2: branchCase2(); // BAL
case 1:
// search for block
ajmp = JMP32((uptr)0);
break; // B/BAL
case 9: branchCase2(); // JALR
case 10: break; // JR/JALR
//mVUcurProg.x86Ptr
}
break;
return thisPtr;
}
}
// Do E-bit end stuff here
incCycles(55); // Ensures Valid P/Q instances
mVUcycles -= 55;
if (mVU->q) { SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, 0xe5); }
SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_Q], xmmPQ);
SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, mVU->p ? 3 : 2);
SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_P], xmmPQ);
MOV32ItoM((uptr)&mVU->p, mVU->p);
MOV32ItoM((uptr)&mVU->q, mVU->q);
AND32ItoM((uptr)&microVU0.regs.VI[REG_VPU_STAT].UL, (vuIndex ? ~0x100 : ~0x001)); // VBS0/VBS1 flag
AND32ItoM((uptr)&mVU->regs->vifRegs->stat, ~0x4); // Not sure what this does but zerorecs do it...
MOV32ItoM((uptr)&mVU->regs->VI[REG_TPC], xPC);
JMP32((uptr)mVU->exitFunct - ((uptr)x86Ptr + 5));
return thisPtr;
}
#endif //PCSX2_MICROVU

View File

@ -27,6 +27,7 @@ microVUt(void) mVUdispatcherA() {
static u32 PCSX2_ALIGNED16(vuMXCSR);
microVU* mVU = mVUx;
x86SetPtr(mVU->ptr);
mVU->startFunct = mVU->ptr;
// __fastcall = The first two DWORD or smaller arguments are passed in ECX and EDX registers; all other arguments are passed right to left.
if (!vuIndex) { CALLFunc((uptr)mVUexecuteVU0); }
@ -74,6 +75,7 @@ microVUt(void) mVUdispatcherB() {
static u32 PCSX2_ALIGNED16(eeMXCSR);
microVU* mVU = mVUx;
x86SetPtr(mVU->ptr);
mVU->exitFunct = mVU->ptr;
// __fastcall = The first two DWORD or smaller arguments are passed in ECX and EDX registers; all other arguments are passed right to left.
if (!vuIndex) { CALLFunc((uptr)mVUcleanUpVU0); }
@ -96,9 +98,9 @@ microVUt(void) mVUdispatcherB() {
}
SSE_MOVAPS_XMM_to_M128((uptr)&mVU->regs->ACC, xmmACC);
SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_Q], xmmPQ); // ToDo: Ensure Correct Q/P instances
SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, 0); // wzyx = PPPP
SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_P], xmmPQ);
//SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_Q], xmmPQ); // ToDo: Ensure Correct Q/P instances
//SSE2_PSHUFD_XMM_to_XMM(xmmPQ, xmmPQ, 0); // wzyx = PPPP
//SSE_MOVSS_XMM_to_M32((uptr)&mVU->regs->VI[REG_P], xmmPQ);
// Restore cpu state
POP32R(EDI);
@ -110,7 +112,7 @@ microVUt(void) mVUdispatcherB() {
RET();
mVU->ptr = x86Ptr;
mVUcachCheck(512);
mVUcachCheck(mVU->cache, 512);
}
//------------------------------------------------------------------
@ -144,16 +146,16 @@ microVUt(void*) __fastcall mVUexecute(u32 startPC, u32 cycles) {
microVUt(void) mVUcleanUp() {
microVU* mVU = mVUx;
mVU->ptr = x86Ptr;
mVUcachCheck(1024); // ToDo: Implement Program Cache Limits
mVU->ptr = mVUcurProg.x86ptr;
mVUcachCheck(mVUcurProg.x86start, (uptr)(mVUcurProg.x86end - mVUcurProg.x86start));
}
//------------------------------------------------------------------
// Caller Functions
//------------------------------------------------------------------
void __fastcall startVU0(u32 startPC, u32 cycles) { ((mVUrecCall)microVU0.cache)(startPC, cycles); }
void __fastcall startVU1(u32 startPC, u32 cycles) { ((mVUrecCall)microVU1.cache)(startPC, cycles); }
void __fastcall startVU0(u32 startPC, u32 cycles) { ((mVUrecCall)microVU0.startFunct)(startPC, cycles); }
void __fastcall startVU1(u32 startPC, u32 cycles) { ((mVUrecCall)microVU1.startFunct)(startPC, cycles); }
void* __fastcall mVUexecuteVU0(u32 startPC, u32 cycles) { return mVUexecute<0>(startPC, cycles); }
void* __fastcall mVUexecuteVU1(u32 startPC, u32 cycles) { return mVUexecute<1>(startPC, cycles); }
void mVUcleanUpVU0() { mVUcleanUp<0>(); }

View File

@ -545,14 +545,12 @@ microVUf(void) mVU_FSOR() {
microVUf(void) mVU_FSSET() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeFSSET<vuIndex>(); }
else {
int flagReg;
getFlagReg(flagReg, fsInstance);
MOV16ItoR(gprT1, (_Imm12_ & 0xfc0));
//if (_Imm12_ & 0xc00) { mVUdivFlag = _Imm12_ >> 9; }
//else { mVUdivFlag = 1; }
//mVUdivFlagT = 4;
AND32ItoR(flagReg, 0x03f);
OR32ItoR(flagReg, (_Imm12_ & 0xfc0));
}
}
@ -562,7 +560,7 @@ microVUf(void) mVU_FSSET() {
microVUf(void) mVU_IADD() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU1<vuIndex>(_Fd_, _Fs_, _Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
if (_Ft_ != _Fs_) {
@ -576,7 +574,7 @@ microVUf(void) mVU_IADD() {
microVUf(void) mVU_IADDI() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU2<vuIndex>(_Fs_, _Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
ADD16ItoR(gprT1, _Imm5_);
@ -586,7 +584,7 @@ microVUf(void) mVU_IADDI() {
microVUf(void) mVU_IADDIU() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU2<vuIndex>(_Fs_, _Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
ADD16ItoR(gprT1, _Imm12_);
@ -596,7 +594,7 @@ microVUf(void) mVU_IADDIU() {
microVUf(void) mVU_IAND() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU1<vuIndex>(_Fd_, _Fs_, _Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
if (_Ft_ != _Fs_) {
@ -609,7 +607,7 @@ microVUf(void) mVU_IAND() {
microVUf(void) mVU_IOR() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU1<vuIndex>(_Fd_, _Fs_, _Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
if (_Ft_ != _Fs_) {
@ -622,7 +620,7 @@ microVUf(void) mVU_IOR() {
microVUf(void) mVU_ISUB() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU1<vuIndex>(_Fd_, _Fs_, _Ft_); }
else {
if (_Ft_ != _Fs_) {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
@ -639,7 +637,7 @@ microVUf(void) mVU_ISUB() {
microVUf(void) mVU_ISUBIU() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { mVUanalyzeIALU2<vuIndex>(_Fs_, _Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
SUB16ItoR(gprT1, _Imm12_);
@ -653,7 +651,7 @@ microVUf(void) mVU_ISUBIU() {
microVUf(void) mVU_MFIR() {
microVU* mVU = mVUx;
if (!recPass) { /*If (!_Ft_) nop();*/ }
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; } analyzeVIreg1(_Fs_); analyzeReg2(_Ft_); }
else {
mVUallocVIa<vuIndex>(gprT1, _Fs_);
MOVSX32R16toR(gprT1, gprT1);
@ -665,7 +663,7 @@ microVUf(void) mVU_MFIR() {
microVUf(void) mVU_MFP() {
microVU* mVU = mVUx;
if (!recPass) { /*If (!_Ft_) nop();*/ }
if (!recPass) { mVUanalyzeMFP<vuIndex>(_Ft_); }
else {
getPreg(xmmFt);
mVUsaveReg<vuIndex>(xmmFt, (uptr)&mVU->regs->VF[_Ft_].UL[0], _X_Y_Z_W);
@ -674,7 +672,7 @@ microVUf(void) mVU_MFP() {
microVUf(void) mVU_MOVE() {
microVU* mVU = mVUx;
if (!recPass) { /*If (!_Ft_ || (_Ft_ == _Fs_)) nop();*/ }
if (!recPass) { if (!_Ft_ || (_Ft_ == _Fs_)) { mVUinfo |= _isNOP; } analyzeReg1(_Fs_); analyzeReg2(_Ft_); }
else {
mVUloadReg<vuIndex>(xmmT1, (uptr)&mVU->regs->VF[_Fs_].UL[0], _X_Y_Z_W);
mVUsaveReg<vuIndex>(xmmT1, (uptr)&mVU->regs->VF[_Ft_].UL[0], _X_Y_Z_W);
@ -683,7 +681,7 @@ microVUf(void) mVU_MOVE() {
microVUf(void) mVU_MR32() {
microVU* mVU = mVUx;
if (!recPass) { /*If (!_Ft_) nop();*/ }
if (!recPass) { mVUanalyzeMR32<vuIndex>(_Fs_, _Ft_); }
else {
mVUloadReg<vuIndex>(xmmT1, (uptr)&mVU->regs->VF[_Fs_].UL[0], (_X_Y_Z_W == 8) ? 4 : 15);
if (_X_Y_Z_W != 8) { SSE2_PSHUFD_XMM_to_XMM(xmmT1, xmmT1, 0x39); }
@ -693,7 +691,7 @@ microVUf(void) mVU_MR32() {
microVUf(void) mVU_MTIR() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; } analyzeReg5(_Fs_, _Fsf_); analyzeVIreg2(_Ft_, 1); }
else {
MOVZX32M16toR(gprT1, (uptr)&mVU->regs->VF[_Fs_].UL[_Fsf_]);
mVUallocVIb<vuIndex>(gprT1, _Ft_);
@ -706,7 +704,7 @@ microVUf(void) mVU_MTIR() {
microVUf(void) mVU_ILW() {
microVU* mVU = mVUx;
if (!recPass) { /*If (!_Ft_) nop();*/ }
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; } analyzeVIreg1(_Fs_); analyzeVIreg2(_Ft_, 4); }
else {
if (!_Fs_) {
MOVZX32M16toR( gprT1, (uptr)mVU->regs->Mem + getVUmem(_Imm11_) + offsetSS );
@ -725,7 +723,7 @@ microVUf(void) mVU_ILW() {
microVUf(void) mVU_ILWR() {
microVU* mVU = mVUx;
if (!recPass) { /*If (!_Ft_) nop();*/ }
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; } analyzeVIreg1(_Fs_); analyzeVIreg2(_Ft_, 4); }
else {
if (!_Fs_) {
MOVZX32M16toR(gprT1, (uptr)mVU->regs->Mem + offsetSS);
@ -747,7 +745,7 @@ microVUf(void) mVU_ILWR() {
microVUf(void) mVU_ISW() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { analyzeVIreg1(_Fs_); analyzeVIreg1(_Ft_); }
else {
if (!_Fs_) {
int imm = getVUmem(_Imm11_);
@ -772,7 +770,7 @@ microVUf(void) mVU_ISW() {
microVUf(void) mVU_ISWR() {
microVU* mVU = mVUx;
if (!recPass) {}
if (!recPass) { analyzeVIreg1(_Fs_); analyzeVIreg1(_Ft_); }
else {
if (!_Fs_) {
mVUallocVIa<vuIndex>(gprT1, _Ft_);
@ -1006,7 +1004,7 @@ microVUf(void) mVU_WAITQ() {
microVUf(void) mVU_XTOP() {
microVU* mVU = mVUx;
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; return; } analyzeVIreg2(_Ft_, 1); }
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; } analyzeVIreg2(_Ft_, 1); }
else {
MOVZX32M16toR( gprT1, (uptr)&mVU->regs->vifRegs->top);
mVUallocVIb<vuIndex>(gprT1, _Ft_);
@ -1015,7 +1013,7 @@ microVUf(void) mVU_XTOP() {
microVUf(void) mVU_XITOP() {
microVU* mVU = mVUx;
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; return; } analyzeVIreg2(_Ft_, 1); }
if (!recPass) { if (!_Ft_) { mVUinfo |= _isNOP; } analyzeVIreg2(_Ft_, 1); }
else {
MOVZX32M16toR( gprT1, (uptr)&mVU->regs->vifRegs->itop );
mVUallocVIb<vuIndex>(gprT1, _Ft_);
@ -1056,63 +1054,89 @@ microVUf(void) mVU_XGKICK() {
microVUf(void) mVU_B() {
microVU* mVU = mVUx;
mVUbranch = 1;
if (!recPass) { /*mVUinfo |= _isBranch2;*/ }
}
microVUf(void) mVU_BAL() {
microVU* mVU = mVUx;
mVUbranch = 1;
if (!recPass) { analyzeVIreg2(_Ft_, 1); }
else {
MOV32ItoR(gprT1, (xPC + (2 * 8)) & 0xffff);
mVUallocVIb<vuIndex>(gprT1, _Ft_);
}
mVUbranch = 2;
if (!recPass) { /*mVUinfo |= _isBranch2;*/ analyzeVIreg2(_Ft_, 1); }
else {}
}
microVUf(void) mVU_IBEQ() {
microVU* mVU = mVUx;
mVUbranch = 2;
mVUbranch = 3;
if (!recPass) { mVUanalyzeBranch2<vuIndex>(_Fs_, _Ft_); }
else {}
else {
if (memReadIs) MOV32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else mVUallocVIa<vuIndex>(gprT1, _Fs_);
if (memReadIt) XOR32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else { mVUallocVIa<vuIndex>(gprT2, _Ft_); XOR32RtoR(gprT1, gprT2); }
MOV32RtoM((uptr)mVU->branch, gprT1);
}
}
microVUf(void) mVU_IBGEZ() {
microVU* mVU = mVUx;
mVUbranch = 2;
mVUbranch = 4;
if (!recPass) { mVUanalyzeBranch1<vuIndex>(_Fs_); }
else {}
else {
if (memReadIs) MOV32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else mVUallocVIa<vuIndex>(gprT1, _Fs_);
//SHR32ItoR(gprT1, 15);
MOV32RtoM((uptr)mVU->branch, gprT1);
}
}
microVUf(void) mVU_IBGTZ() {
microVU* mVU = mVUx;
mVUbranch = 2;
mVUbranch = 5;
if (!recPass) { mVUanalyzeBranch1<vuIndex>(_Fs_); }
else {}
else {
if (memReadIs) MOV32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else mVUallocVIa<vuIndex>(gprT1, _Fs_);
MOV32RtoM((uptr)mVU->branch, gprT1);
}
}
microVUf(void) mVU_IBLEZ() {
microVU* mVU = mVUx;
mVUbranch = 2;
mVUbranch = 6;
if (!recPass) { mVUanalyzeBranch1<vuIndex>(_Fs_); }
else {}
else {
if (memReadIs) MOV32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else mVUallocVIa<vuIndex>(gprT1, _Fs_);
MOV32RtoM((uptr)mVU->branch, gprT1);
}
}
microVUf(void) mVU_IBLTZ() {
microVU* mVU = mVUx;
mVUbranch = 2;
mVUbranch = 7;
if (!recPass) { mVUanalyzeBranch1<vuIndex>(_Fs_); }
else {}
else {
if (memReadIs) MOV32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else mVUallocVIa<vuIndex>(gprT1, _Fs_);
//SHR32ItoR(gprT1, 15);
MOV32RtoM((uptr)mVU->branch, gprT1);
}
}
microVUf(void) mVU_IBNE() {
microVU* mVU = mVUx;
mVUbranch = 2;
mVUbranch = 8;
if (!recPass) { mVUanalyzeBranch2<vuIndex>(_Fs_, _Ft_); }
else {}
else {
if (memReadIs) MOV32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else mVUallocVIa<vuIndex>(gprT1, _Fs_);
if (memReadIt) XOR32MtoR(gprT1, (uptr)mVU->VIbackup[0]);
else { mVUallocVIa<vuIndex>(gprT2, _Ft_); XOR32RtoR(gprT1, gprT2); }
MOV32RtoM((uptr)mVU->branch, gprT1);
}
}
microVUf(void) mVU_JR() {
microVU* mVU = mVUx;
mVUbranch = 3;
mVUbranch = 9;
if (!recPass) { mVUanalyzeBranch1<vuIndex>(_Fs_); }
else {}
}
microVUf(void) mVU_JALR() {
microVU* mVU = mVUx;
mVUbranch = 3;
mVUbranch = 10;
if (!recPass) { mVUanalyzeBranch1<vuIndex>(_Fs_); analyzeVIreg2(_Ft_, 1); }
else {}
}
#endif //PCSX2_MICROVU

View File

@ -143,6 +143,7 @@ declareAllVariables
#define mVUallocInfo mVU->prog.prog[mVU->prog.cur].allocInfo
#define mVUbranch mVUallocInfo.branch
#define mVUcycles mVUallocInfo.cycles
#define mVUcount mVUallocInfo.count
#define mVUstall mVUallocInfo.maxStall
#define mVUregs mVUallocInfo.regs
#define mVUregsTemp mVUallocInfo.regsTemp
@ -153,6 +154,7 @@ declareAllVariables
#define curI mVUcurProg.data[iPC]
#define setCode() { mVU->code = curI; }
#define incPC(x) { iPC = ((iPC + x) & (mVU->progSize-1)); setCode(); }
#define incPC2(x) { iPC = ((iPC + x) & (mVU->progSize-1)); }
#define incCycles(x) { mVUincCycles<vuIndex>(x); }
#define _isNOP (1<<0) // Skip Lower Instruction
@ -181,6 +183,7 @@ declareAllVariables
#define _memReadIt (1<<24) // Read If (VI reg) from memory (used by branches)
#define _writesVI (1<<25) // Current Instruction writes to VI
#define _swapOps (1<<26) // Runs Lower Instruction Before Upper Instruction
//#define _isBranch2 (1<<27) // Cur Instruction is a Branch that writes VI regs (BAL/JALR)
#define isNOP (mVUinfo & (1<<0))
#define isBranch (mVUinfo & (1<<1))
@ -208,6 +211,7 @@ declareAllVariables
#define memReadIt (mVUinfo & (1<<24))
#define writesVI (mVUinfo & (1<<25))
#define swapOps (mVUinfo & (1<<26))
//#define isBranch2 (mVUinfo & (1<<27))
#define isMMX(_VIreg_) (_VIreg_ >= 1 && _VIreg_ <=9)
#define mmVI(_VIreg_) (_VIreg_ - 1)
@ -226,7 +230,7 @@ declareAllVariables
#define mVUdebug1() {}
#endif
#define mVUcachCheck(x) { \
uptr diff = mVU->ptr - mVU->cache; \
if (diff > x) { Console::Error("microVU Error: Program went over it's cache limit. Size = %x", params diff); } \
#define mVUcachCheck(start, limit) { \
uptr diff = mVU->ptr - start; \
if (diff >= limit) { Console::Error("microVU Error: Program went over it's cache limit. Size = %x", params diff); } \
}

View File

@ -251,7 +251,7 @@ microVUt(void) mVUaddrFix(int gprReg) {
u8 *jmpA, *jmpB;
CMP32ItoR(EAX, 0x400);
jmpA = JL8(0); // if addr >= 0x4000, reads VU1's VF regs and VI regs
AND32ItoR(EAX, 0x43f);
AND32ItoR(EAX, 0x43f); // ToDo: theres a potential problem if VU0 overrides VU1's VF0/VI0 regs!
jmpB = JMP8(0);
x86SetJ8(jmpA);
AND32ItoR(EAX, 0xff); // if addr < 0x4000, wrap around