From ed65172308a01e8725ed23f5a3836ac6e33b4008 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Thu, 31 Jul 2014 20:23:50 -0400 Subject: [PATCH] Use bool constants instead of magic numbers in microVU --- pcsx2/x86/microVU.cpp | 4 +- pcsx2/x86/microVU_Analyze.inl | 38 +++++-- pcsx2/x86/microVU_Branch.inl | 22 ++-- pcsx2/x86/microVU_Compile.inl | 88 +++++++++------ pcsx2/x86/microVU_Flags.inl | 71 +++++++++--- pcsx2/x86/microVU_IR.h | 40 ++++--- pcsx2/x86/microVU_Log.inl | 36 +++--- pcsx2/x86/microVU_Lower.inl | 60 ++++++---- pcsx2/x86/microVU_Macro.inl | 18 +-- pcsx2/x86/microVU_Misc.h | 22 ++-- pcsx2/x86/microVU_Misc.inl | 28 ++--- pcsx2/x86/microVU_Upper.inl | 205 +++++++++++++++++----------------- 12 files changed, 367 insertions(+), 265 deletions(-) diff --git a/pcsx2/x86/microVU.cpp b/pcsx2/x86/microVU.cpp index 16e8676e87..2947eb431f 100644 --- a/pcsx2/x86/microVU.cpp +++ b/pcsx2/x86/microVU.cpp @@ -239,9 +239,9 @@ __fi bool mVUcmpProg(microVU& mVU, microProgram& prog, const bool cmpWholeProg) mVU.prog.cleared = 0; mVU.prog.cur = &prog; mVU.prog.isSame = cmpWholeProg ? 1 : -1; - return 1; + return true; } - return 0; + return false; } // Searches for Cached Micro Program and sets prog.cur to it (returns entry-point to program) diff --git a/pcsx2/x86/microVU_Analyze.inl b/pcsx2/x86/microVU_Analyze.inl index dbe6fcc685..6160cb267b 100644 --- a/pcsx2/x86/microVU_Analyze.inl +++ b/pcsx2/x86/microVU_Analyze.inl @@ -393,12 +393,18 @@ static void analyzeBranchVI(mV, int xReg, bool& infoVar) { DevCon.Warning("microVU%d: Branch VI-Delay with %d cycle stall (%d) [%04x]", getIndex, mVUstall, i, xPC); } if (i == mVUcount) { - bool warn = 0; - if (i == 1) warn = 1; + bool warn = false; + + if (i == 1) + warn = true; + if (mVUpBlock->pState.viBackUp == xReg) { DevCon.WriteLn(Color_Green, "microVU%d: Loading Branch VI value from previous block", getIndex); - if (i == 0) warn = 1; - infoVar = 1; + + if (i == 0) + warn = true; + + infoVar = true; j = i; i++; } if (warn) DevCon.Warning("microVU%d: Branch VI-Delay with small block (%d) [%04x]", getIndex, i, xPC); @@ -417,17 +423,20 @@ static void analyzeBranchVI(mV, int xReg, bool& infoVar) { cyc += mVUstall + 1; incPC2(-2); } + if (i) { if (!infoVar) { iPC = bPC; incPC2(-2*(j+1)); - mVUlow.backupVI = 1; - infoVar = 1; + mVUlow.backupVI = true; + infoVar = true; } iPC = bPC; DevCon.WriteLn(Color_Green, "microVU%d: Branch VI-Delay (%d) [%04x][%03d]", getIndex, j+1, xPC, mVU.prog.cur->idx); } - else iPC = bPC; + else { + iPC = bPC; + } } /* @@ -470,14 +479,17 @@ __fi void analyzeBranchVI(mV, int xReg, bool& infoVar) { // Branch in Branch Delay-Slots __ri int mVUbranchCheck(mV) { - if (!mVUcount) return 0; + if (!mVUcount) + return 0; + incPC(-2); + if (mVUlow.branch) { u32 branchType = mVUlow.branch; if (doBranchInDelaySlot) { - mVUlow.badBranch = 1; + mVUlow.badBranch = true; incPC(2); - mVUlow.evilBranch = 1; + mVUlow.evilBranch = true; if(mVUlow.branch == 2 || mVUlow.branch == 10) //Needs linking, we can only guess this if the next is not conditional { @@ -493,7 +505,9 @@ __ri int mVUbranchCheck(mV) { mVUregs.blockType = 2; } //Else it is conditional, so we need to do some nasty processing later in microVU_Branch.inl } - else mVUregs.blockType = 2; //Second branch doesn't need linking, so can let it run its evil block course (MGS2 for testing) + else { + mVUregs.blockType = 2; //Second branch doesn't need linking, so can let it run its evil block course (MGS2 for testing) + } mVUregs.needExactMatch |= 7; // This might not be necessary, but w/e... mVUregs.flagInfo = 0; @@ -505,7 +519,7 @@ __ri int mVUbranchCheck(mV) { } else { incPC(2); - mVUlow.isNOP = 1; + mVUlow.isNOP = true; DevCon.Warning("microVU%d: %s in %s delay slot! [%04x]", mVU.index, branchSTR[mVUlow.branch&0xf], branchSTR[branchType&0xf], xPC); return 0; diff --git a/pcsx2/x86/microVU_Branch.inl b/pcsx2/x86/microVU_Branch.inl index e2e2f92c67..267b858f20 100644 --- a/pcsx2/x86/microVU_Branch.inl +++ b/pcsx2/x86/microVU_Branch.inl @@ -48,12 +48,14 @@ void mVUDTendProgram(mV, microFlagCycles* mFC, int isEbit) { qInst = mVU.q; pInst = mVU.p; if (mVUinfo.doDivFlag) { - sFLAG.doFlag = 1; + sFLAG.doFlag = true; sFLAG.write = fStatus; mVUdivSet(mVU); } //Run any pending XGKick, providing we've got to it. - if (mVUinfo.doXGKICK && xPC >= mVUinfo.XGKICKPC) { mVU_XGKICK_DELAY(mVU, 1); } + if (mVUinfo.doXGKICK && xPC >= mVUinfo.XGKICKPC) { + mVU_XGKICK_DELAY(mVU, true); + } if (doEarlyExit(mVU)) { if (!isVU1) xCALL(mVU0clearlpStateJIT); else xCALL(mVU1clearlpStateJIT); @@ -106,14 +108,18 @@ void mVUendProgram(mV, microFlagCycles* mFC, int isEbit) { qInst = mVU.q; pInst = mVU.p; if (mVUinfo.doDivFlag) { - sFLAG.doFlag = 1; + sFLAG.doFlag = true; sFLAG.write = fStatus; mVUdivSet(mVU); } - if (mVUinfo.doXGKICK) { mVU_XGKICK_DELAY(mVU, 1); } + if (mVUinfo.doXGKICK) { + mVU_XGKICK_DELAY(mVU, true); + } if (doEarlyExit(mVU)) { - if (!isVU1) xCALL(mVU0clearlpStateJIT); - else xCALL(mVU1clearlpStateJIT); + if (!isVU1) + xCALL(mVU0clearlpStateJIT); + else + xCALL(mVU1clearlpStateJIT); } } @@ -451,5 +457,7 @@ void normJump(mV, microFlagCycles& mFC) { xMOV(ptr32[&mVU.regs().VI[REG_TPC].UL], gprT1); xJMP(mVU.exitFunct); } - else normJumpCompile(mVU, mFC, 0); + else { + normJumpCompile(mVU, mFC, false); + } } diff --git a/pcsx2/x86/microVU_Compile.inl b/pcsx2/x86/microVU_Compile.inl index 02f8a7a95d..d5624d034c 100644 --- a/pcsx2/x86/microVU_Compile.inl +++ b/pcsx2/x86/microVU_Compile.inl @@ -69,18 +69,18 @@ void mVUsetupRange(microVU& mVU, s32 pc, bool isStartPC) { } if (mVUrange.start <= pc) { mVUrange.end = pc; - bool mergedRange = 0; + bool mergedRange = false; s32 rStart = mVUrange.start; s32 rEnd = mVUrange.end; deque::iterator it(ranges->begin()); for (++it; it != ranges->end(); ++it) { if((it[0].start >= rStart) && (it[0].start <= rEnd)) { it[0].end = max(it[0].end, rEnd); - mergedRange = 1; + mergedRange = true; } else if ((it[0].end >= rStart) && (it[0].end <= rEnd)) { it[0].start = min(it[0].start, rStart); - mergedRange = 1; + mergedRange = true; } } if (mergedRange) { @@ -208,13 +208,13 @@ __ri void branchWarning(mV) { if (mVUup.eBit && mVUbranch) { incPC(2); Console.Error("microVU%d Warning: Branch in E-bit delay slot! [%04x]", mVU.index, xPC); - mVUlow.isNOP = 1; + mVUlow.isNOP = true; } else incPC(2); if (mVUinfo.isBdelay && !mVUlow.evilBranch) { // Check if VI Reg Written to on Branch Delay Slot Instruction if (mVUlow.VI_write.reg && mVUlow.VI_write.used && !mVUlow.readFlags) { - mVUlow.backupVI = 1; + mVUlow.backupVI = true; mVUregs.viBackUp = mVUlow.VI_write.reg; } } @@ -223,7 +223,7 @@ __ri void branchWarning(mV) { __fi void eBitPass1(mV, int& branch) { if (mVUregs.blockType != 1) { branch = 1; - mVUup.eBit = 1; + mVUup.eBit = true; } } @@ -306,8 +306,10 @@ void mVUsetCycles(mV) { mVUincCycles(mVU, mVUstall); // If upper Op && lower Op write to same VF reg: if ((mVUregsTemp.VFreg[0] == mVUregsTemp.VFreg[1]) && mVUregsTemp.VFreg[0]) { - if (mVUregsTemp.r || mVUregsTemp.VI) mVUlow.noWriteVF = 1; - else mVUlow.isNOP = 1; // If lower Op doesn't modify anything else, then make it a NOP + if (mVUregsTemp.r || mVUregsTemp.VI) + mVUlow.noWriteVF = true; + else + mVUlow.isNOP = true; // If lower Op doesn't modify anything else, then make it a NOP } // If lower op reads a VF reg that upper Op writes to: if ((mVUlow.VF_read[0].reg || mVUlow.VF_read[1].reg) && mVUup.VF_write.reg) { @@ -466,8 +468,8 @@ void* mVUcompileSingleInstruction(microVU& mVU, u32 startPC, uptr pState, microF mVUcheckBadOp(mVU); if (curI & _Ebit_) { eBitPass1(mVU, branch); DevCon.Warning("E Bit on single instruction");} if (curI & _DTbit_) { branch = 4; DevCon.Warning("D Bit on single instruction");} - if (curI & _Mbit_) { mVUup.mBit = 1; DevCon.Warning("M Bit on single instruction");} - if (curI & _Ibit_) { mVUlow.isNOP = 1; mVUup.iBit = 1; DevCon.Warning("I Bit on single instruction");} + if (curI & _Mbit_) { mVUup.mBit = true; DevCon.Warning("M Bit on single instruction");} + if (curI & _Ibit_) { mVUlow.isNOP = true; mVUup.iBit = true; DevCon.Warning("I Bit on single instruction");} else { incPC(-1); mVUopL(mVU, 0); incPC(1); } mVUsetCycles(mVU); mVUinfo.readQ = mVU.q; @@ -479,21 +481,25 @@ void* mVUcompileSingleInstruction(microVU& mVU, u32 startPC, uptr pState, microF incPC(1); - mVUsetFlags(mVU, mFC); // Sets Up Flag instances - mVUoptimizePipeState(mVU); // Optimize the End Pipeline State for nicer Block Linking - mVUdebugPrintBlocks(mVU,0);// Prints Start/End PC of blocks executed, for debugging... - mVUtestCycles(mVU); // Update VU Cycles and Exit Early if Necessary + mVUsetFlags(mVU, mFC); // Sets Up Flag instances + mVUoptimizePipeState(mVU); // Optimize the End Pipeline State for nicer Block Linking + mVUdebugPrintBlocks(mVU, false); // Prints Start/End PC of blocks executed, for debugging... + mVUtestCycles(mVU); // Update VU Cycles and Exit Early if Necessary // Second Pass iPC = startPC / 4; setCode(); - if (mVUup.mBit) { xOR(ptr32[&mVU.regs().flags], VUFLAG_MFLAGSET); } + if (mVUup.mBit) { + xOR(ptr32[&mVU.regs().flags], VUFLAG_MFLAGSET); + } mVUexecuteInstruction(mVU); mVUincCycles(mVU, 1); //Just incase the is XGKick - if (mVUinfo.doXGKICK) { mVU_XGKICK_DELAY(mVU, 1); } - + if (mVUinfo.doXGKICK) { + mVU_XGKICK_DELAY(mVU, true); + } + return thisPtr; } @@ -547,12 +553,12 @@ void* mVUcompile(microVU& mVU, u32 startPC, uptr pState) { mVUcheckBadOp(mVU); if (curI & _Ebit_) { eBitPass1(mVU, branch); } - if (curI & _Mbit_) { mVUup.mBit = 1; } + if (curI & _Mbit_) { mVUup.mBit = true; } - if (curI & _Ibit_) { mVUlow.isNOP = 1; mVUup.iBit = 1; } - else { incPC(-1); mVUopL(mVU, 0); incPC(1); } - if (curI & _Dbit_) { mVUup.dBit = 1; } - if (curI & _Tbit_) { mVUup.tBit = 1; } + if (curI & _Ibit_) { mVUlow.isNOP = true; mVUup.iBit = true; } + else { incPC(-1); mVUopL(mVU, 0); incPC(1); } + if (curI & _Dbit_) { mVUup.dBit = true; } + if (curI & _Tbit_) { mVUup.tBit = true; } mVUsetCycles(mVU); mVUinfo.readQ = mVU.q; mVUinfo.writeQ = !mVU.q; @@ -591,10 +597,10 @@ void* mVUcompile(microVU& mVU, u32 startPC, uptr pState) { mVUregs.vi15 = (doConstProp && mVUconstReg[15].isValid) ? (u16)mVUconstReg[15].regValue : 0; mVUregs.vi15v = (doConstProp && mVUconstReg[15].isValid) ? 1 : 0; - mVUsetFlags(mVU, mFC); // Sets Up Flag instances - mVUoptimizePipeState(mVU); // Optimize the End Pipeline State for nicer Block Linking - mVUdebugPrintBlocks(mVU,0);// Prints Start/End PC of blocks executed, for debugging... - mVUtestCycles(mVU); // Update VU Cycles and Exit Early if Necessary + mVUsetFlags(mVU, mFC); // Sets Up Flag instances + mVUoptimizePipeState(mVU); // Optimize the End Pipeline State for nicer Block Linking + mVUdebugPrintBlocks(mVU, false); // Prints Start/End PC of blocks executed, for debugging... + mVUtestCycles(mVU); // Update VU Cycles and Exit Early if Necessary // Second Pass iPC = mVUstartPC; @@ -607,15 +613,29 @@ void* mVUcompile(microVU& mVU, u32 startPC, uptr pState) { mVUexecuteInstruction(mVU); if(!mVUinfo.isBdelay && !mVUlow.branch) //T/D Bit on branch is handled after the branch, branch delay slots are executed. { - if(mVUup.tBit) { mVUDoTBit(mVU, &mFC); } - else if(mVUup.dBit && doDBitHandling) { mVUDoDBit(mVU, &mFC); } + if (mVUup.tBit) { + mVUDoTBit(mVU, &mFC); + } + else if (mVUup.dBit && doDBitHandling) { + mVUDoDBit(mVU, &mFC); + } + } + + if (mVUinfo.doXGKICK) { + mVU_XGKICK_DELAY(mVU, true); + } + + if (isEvilBlock) { + mVUsetupRange(mVU, xPC, false); + normJumpCompile(mVU, mFC, true); + return thisPtr; + } + else if (!mVUinfo.isBdelay) { + incPC(1); } - if (mVUinfo.doXGKICK) { mVU_XGKICK_DELAY(mVU, 1); } - if (isEvilBlock) { mVUsetupRange(mVU, xPC, 0); normJumpCompile(mVU, mFC, 1); return thisPtr; } - else if (!mVUinfo.isBdelay) { incPC(1); } else { - mVUsetupRange(mVU, xPC, 0); - mVUdebugPrintBlocks(mVU,1); + mVUsetupRange(mVU, xPC, false); + mVUdebugPrintBlocks(mVU, true); incPC(-3); // Go back to branch opcode switch (mVUlow.branch) { @@ -634,7 +654,7 @@ void* mVUcompile(microVU& mVU, u32 startPC, uptr pState) { if ((x == endCount) && (x!=1)) { Console.Error("microVU%d: Possible infinite compiling loop!", mVU.index); } // E-bit End - mVUsetupRange(mVU, xPC-8, 0); + mVUsetupRange(mVU, xPC-8, false); mVUendProgram(mVU, &mFC, 1); return thisPtr; } diff --git a/pcsx2/x86/microVU_Flags.inl b/pcsx2/x86/microVU_Flags.inl index e90014bee7..32dd33eb9e 100644 --- a/pcsx2/x86/microVU_Flags.inl +++ b/pcsx2/x86/microVU_Flags.inl @@ -29,20 +29,32 @@ __fi void mVUdivSet(mV) { __fi void mVUstatusFlagOp(mV) { int curPC = iPC; int i = mVUcount; - bool runLoop = 1; - if (sFLAG.doFlag) { sFLAG.doNonSticky = 1; } + bool runLoop = true; + + if (sFLAG.doFlag) { + sFLAG.doNonSticky = true; + } else { for (; i > 0; i--) { incPC2(-2); - if (sFLAG.doNonSticky) { runLoop = 0; break; } - else if (sFLAG.doFlag) { sFLAG.doNonSticky = 1; break; } + if (sFLAG.doNonSticky) { + runLoop = false; + break; + } + else if (sFLAG.doFlag) { + sFLAG.doNonSticky = true; + break; + } } } if (runLoop) { for (; i > 0; i--) { incPC2(-2); - if (sFLAG.doNonSticky) break; - sFLAG.doFlag = 0; + + if (sFLAG.doNonSticky) + break; + + sFLAG.doFlag = false; } } iPC = curPC; @@ -52,7 +64,9 @@ __fi void mVUstatusFlagOp(mV) { int findFlagInst(int* fFlag, int cycles) { int j = 0, jValue = -1; for(int i = 0; i < 4; i++) { - if ((fFlag[i] <= cycles) && (fFlag[i] > jValue)) { j = i; jValue = fFlag[i]; } + if ((fFlag[i] <= cycles) && (fFlag[i] > jValue)) { + j = i; jValue = fFlag[i]; + } } return j; } @@ -90,9 +104,14 @@ __fi void mVUsetFlags(mV, microFlagCycles& mFC) { // Ensure last ~4+ instructions update mac/status flags (if next block's first 4 instructions will read them) for(int i = mVUcount; i > 0; i--, aCount++) { if (sFLAG.doFlag) { - if (__Mac) mFLAG.doFlag = 1; - if (__Status) sFLAG.doNonSticky = 1; - if (aCount >= 4) break; + if (__Mac) + mFLAG.doFlag = true; + + if (__Status) + sFLAG.doNonSticky = true; + + if (aCount >= 4) + break; } incPC2(-2); } @@ -176,11 +195,31 @@ __fi void mVUsetFlags(mV, microFlagCycles& mFC) { mFLAG.lastWrite = doMFlagInsts ? (xM-1) & 3 : 0; cFLAG.lastWrite = doCFlagInsts ? (xC-1) & 3 : 0; - if (sHackCond) { sFLAG.doFlag = 0; } - if (sFLAG.doFlag) { if(noFlagOpts){sFLAG.doNonSticky=1;mFLAG.doFlag=1;}} - if (sFlagCond) { mFC.xStatus[xS] = mFC.cycles + 4; xS = (xS+1) & 3; } - if (mFLAG.doFlag) { mFC.xMac [xM] = mFC.cycles + 4; xM = (xM+1) & 3; } - if (cFLAG.doFlag) { mFC.xClip [xC] = mFC.cycles + 4; xC = (xC+1) & 3; } + if (sHackCond) { + sFLAG.doFlag = false; + } + + if (sFLAG.doFlag) { + if(noFlagOpts) { + sFLAG.doNonSticky = true; + mFLAG.doFlag = true; + } + } + + if (sFlagCond) { + mFC.xStatus[xS] = mFC.cycles + 4; + xS = (xS+1) & 3; + } + + if (mFLAG.doFlag) { + mFC.xMac[xM] = mFC.cycles + 4; + xM = (xM+1) & 3; + } + + if (cFLAG.doFlag) { + mFC.xClip[xC] = mFC.cycles + 4; + xC = (xC+1) & 3; + } mFC.cycles++; incPC2(2); @@ -221,7 +260,7 @@ __fi void mVUsetupFlags(mV, microFlagCycles& mFC) { if (mVUregs.needExactMatch) DevCon.Error("mVU ERROR!!!"); } - const bool pf = 0; // Print Flag Info + const bool pf = false; // Print Flag Info if (pf) DevCon.WriteLn("mVU%d - [#%d][sPC=%04x][bPC=%04x][mVUBranch=%d][branch=%d]", mVU.index, mVU.prog.cur->idx, mVUstartPC/2*8, xPC, mVUbranch, mVUlow.branch); diff --git a/pcsx2/x86/microVU_IR.h b/pcsx2/x86/microVU_IR.h index 023a256b6b..6e0ba1c837 100644 --- a/pcsx2/x86/microVU_IR.h +++ b/pcsx2/x86/microVU_IR.h @@ -253,14 +253,15 @@ public: // Flushes all allocated registers (i.e. writes-back to memory all modified registers). // If clearState is 0, then it keeps cached reg data valid // If clearState is 1, then it invalidates all cached reg data after write-back - void flushAll(bool clearState = 1) { + void flushAll(bool clearState = true) { for(int i = 0; i < xmmTotal; i++) { writeBackReg(xmm(i)); - if (clearState) clearReg(i); + if (clearState) + clearReg(i); } } - void TDwritebackAll(bool clearState = 0) { + void TDwritebackAll(bool clearState = false) { for(int i = 0; i < xmmTotal; i++) { microMapXMM& mapX = xmmMap[xmm(i).Id]; @@ -293,21 +294,24 @@ public: // Writes back modified reg to memory. // If all vectors modified, then keeps the VF reg cached in the xmm register. // If reg was not modified, then keeps the VF reg cached in the xmm register. - void writeBackReg(const xmm& reg, bool invalidateRegs = 1) { + void writeBackReg(const xmm& reg, bool invalidateRegs = true) { microMapXMM& mapX = xmmMap[reg.Id]; if ((mapX.VFreg > 0) && mapX.xyzw) { // Reg was modified and not Temp or vf0 if (mapX.VFreg == 33) xMOVSS(ptr32[&getVI(REG_I)], reg); else if (mapX.VFreg == 32) - mVUsaveReg(reg, ptr[®s().ACC], mapX.xyzw, 1); + mVUsaveReg(reg, ptr[®s().ACC], mapX.xyzw, true); else - mVUsaveReg(reg, ptr[&getVF(mapX.VFreg)], mapX.xyzw, 1); + mVUsaveReg(reg, ptr[&getVF(mapX.VFreg)], mapX.xyzw, true); if (invalidateRegs) { for(int i = 0; i < xmmTotal; i++) { microMapXMM& mapI = xmmMap[i]; - if ((i == reg.Id) || mapI.isNeeded) continue; + + if ((i == reg.Id) || mapI.isNeeded) + continue; + if (mapI.VFreg == mapX.VFreg) { if (mapI.xyzw && mapI.xyzw < 0xf) DevCon.Error("microVU Error: writeBackReg() [%d]", mapI.VFreg); clearReg(i); // Invalidate any Cached Regs of same vf Reg @@ -315,9 +319,9 @@ public: } } if (mapX.xyzw == 0xf) { // Make Cached Reg if All Vectors were Modified - mapX.count = counter; - mapX.xyzw = 0; - mapX.isNeeded = 0; + mapX.count = counter; + mapX.xyzw = 0; + mapX.isNeeded = false; return; } clearReg(reg); @@ -337,7 +341,7 @@ public: if ((reg.Id < 0) || (reg.Id >= xmmTotal)) return; // Sometimes xmmPQ hits this microMapXMM& clear = xmmMap[reg.Id]; - clear.isNeeded = 0; + clear.isNeeded = false; if (clear.xyzw) { // Reg was modified if (clear.VFreg > 0) { int mergeRegs = 0; @@ -350,7 +354,7 @@ public: DevCon.Error("microVU Error: clearNeeded() [%d]", mapI.VFreg); } if (mergeRegs == 1) { - mVUmergeRegs(xmm(i), reg, clear.xyzw, 1); + mVUmergeRegs(xmm(i), reg, clear.xyzw, true); mapI.xyzw = 0xf; mapI.count = counter; mergeRegs = 2; @@ -417,8 +421,8 @@ public: xmmMap[z].VFreg = vfWriteReg; xmmMap[z].xyzw = xyzw; } - xmmMap[z].count = counter; - xmmMap[z].isNeeded = 1; + xmmMap[z].count = counter; + xmmMap[z].isNeeded = true; return xmm::GetInstance(z); } } @@ -428,13 +432,13 @@ public: writeBackReg(xmmX); if (vfWriteReg >= 0) { // Reg Will Be Modified (allow partial reg loading) - if ((vfLoadReg == 0) && !(xyzw & 1)) + if ((vfLoadReg == 0) && !(xyzw & 1)) xPXOR(xmmX, xmmX); else if (vfLoadReg == 33) - loadIreg (xmmX, xyzw); + loadIreg(xmmX, xyzw); else if (vfLoadReg == 32) mVUloadReg(xmmX, ptr[®s().ACC], xyzw); - else if (vfLoadReg >= 0) + else if (vfLoadReg >= 0) mVUloadReg(xmmX, ptr[&getVF(vfLoadReg)], xyzw); xmmMap[x].VFreg = vfWriteReg; @@ -452,7 +456,7 @@ public: xmmMap[x].xyzw = 0; } xmmMap[x].count = counter; - xmmMap[x].isNeeded = 1; + xmmMap[x].isNeeded = true; return xmmX; } }; diff --git a/pcsx2/x86/microVU_Log.inl b/pcsx2/x86/microVU_Log.inl index 210ae20850..93295b6533 100644 --- a/pcsx2/x86/microVU_Log.inl +++ b/pcsx2/x86/microVU_Log.inl @@ -35,13 +35,13 @@ _mVUt void __mVULog(const char* fmt, ...) { mVU.logFile->Flush(); } -#define commaIf() { if (bitX[6]) { mVUlog(","); bitX[6] = 0; } } +#define commaIf() { if (bitX[6]) { mVUlog(","); bitX[6] = false; } } #include "AppConfig.h" void __mVUdumpProgram(microVU& mVU, microProgram& prog) { bool bitX[7]; - int delay = 0; + int delay = 0; int bBranch = mVUbranch; int bCode = mVU.code; int bPC = iPC; @@ -67,19 +67,19 @@ void __mVUdumpProgram(microVU& mVU, microProgram& prog) { if (mVUbranch) { delay = 1; mVUbranch = 0; } mVU.code = prog.data[i+1]; - bitX[0] = 0; - bitX[1] = 0; - bitX[2] = 0; - bitX[3] = 0; - bitX[4] = 0; - bitX[5] = 0; - bitX[6] = 0; + bitX[0] = false; + bitX[1] = false; + bitX[2] = false; + bitX[3] = false; + bitX[4] = false; + bitX[5] = false; + bitX[6] = false; - if (mVU.code & _Ibit_) { bitX[0] = 1; bitX[5] = 1; } - if (mVU.code & _Ebit_) { bitX[1] = 1; bitX[5] = 1; delay = 2; } - if (mVU.code & _Mbit_) { bitX[2] = 1; bitX[5] = 1; } - if (mVU.code & _Dbit_) { bitX[3] = 1; bitX[5] = 1; } - if (mVU.code & _Tbit_) { bitX[4] = 1; bitX[5] = 1; } + if (mVU.code & _Ibit_) { bitX[0] = true; bitX[5] = true; } + if (mVU.code & _Ebit_) { bitX[1] = true; bitX[5] = true; delay = 2; } + if (mVU.code & _Mbit_) { bitX[2] = true; bitX[5] = true; } + if (mVU.code & _Dbit_) { bitX[3] = true; bitX[5] = true; } + if (mVU.code & _Tbit_) { bitX[4] = true; bitX[5] = true; } if (delay == 2) { mVUlog(""); } if (delay == 1) { mVUlog(""); } @@ -91,10 +91,10 @@ void __mVUdumpProgram(microVU& mVU, microProgram& prog) { if (bitX[5]) { mVUlog(" ("); - if (bitX[0]) { mVUlog("I"); bitX[6] = 1; } - if (bitX[1]) { commaIf(); mVUlog("E"); bitX[6] = 1; } - if (bitX[2]) { commaIf(); mVUlog("M"); bitX[6] = 1; } - if (bitX[3]) { commaIf(); mVUlog("D"); bitX[6] = 1; } + if (bitX[0]) { mVUlog("I"); bitX[6] = true; } + if (bitX[1]) { commaIf(); mVUlog("E"); bitX[6] = true; } + if (bitX[2]) { commaIf(); mVUlog("M"); bitX[6] = true; } + if (bitX[3]) { commaIf(); mVUlog("D"); bitX[6] = true; } if (bitX[4]) { commaIf(); mVUlog("T"); } mVUlog(")"); } diff --git a/pcsx2/x86/microVU_Lower.inl b/pcsx2/x86/microVU_Lower.inl index a899e6284d..3991ab2a8f 100644 --- a/pcsx2/x86/microVU_Lower.inl +++ b/pcsx2/x86/microVU_Lower.inl @@ -74,7 +74,7 @@ mVUop(mVU_DIV) { cjmp.SetTarget(); xMOV(ptr32[&mVU.divFlag], 0); // Clear I/D flags SSE_DIVSS(mVU, Fs, Ft); - mVUclamp1(Fs, t1, 8, 1); + mVUclamp1(Fs, t1, 8, true); djmp.SetTarget(); writeQreg(Fs, mVUinfo.writeQ); @@ -133,7 +133,7 @@ mVUop(mVU_RSQRT) { xForwardJump8 djmp; ajmp.SetTarget(); SSE_DIVSS(mVU, Fs, Ft); - mVUclamp1(Fs, t1, 8, 1); + mVUclamp1(Fs, t1, 8, true); djmp.SetTarget(); writeQreg(Fs, mVUinfo.writeQ); @@ -516,7 +516,7 @@ mVUop(mVU_FCOR) { } mVUop(mVU_FCSET) { - pass1 { cFLAG.doFlag = 1; } + pass1 { cFLAG.doFlag = true; } pass2 { xMOV(gprT1, _Imm24_); mVUallocCFLAGb(mVU, gprT1, cFLAG.write); @@ -758,7 +758,9 @@ mVUop(mVU_ISUBIU) { mVUop(mVU_MFIR) { pass1 { - if (!_Ft_) { mVUlow.isNOP = 1; } + if (!_Ft_) { + mVUlow.isNOP = true; + } analyzeVIreg1(mVU, _Is_, mVUlow.VI_read[0]); analyzeReg2 (mVU, _Ft_, mVUlow.VF_write, 1); } @@ -810,7 +812,9 @@ mVUop(mVU_MR32) { mVUop(mVU_MTIR) { pass1 { - if (!_It_) mVUlow.isNOP = 1; + if (!_It_) + mVUlow.isNOP = true; + analyzeReg5 (mVU, _Fs_, _Fsf_, mVUlow.VF_read[0]); analyzeVIreg2(mVU, _It_, mVUlow.VI_write, 1); } @@ -830,7 +834,9 @@ mVUop(mVU_MTIR) { mVUop(mVU_ILW) { pass1 { - if (!_It_) mVUlow.isNOP = 1; + if (!_It_) + mVUlow.isNOP = true; + analyzeVIreg1(mVU, _Is_, mVUlow.VI_read[0]); analyzeVIreg2(mVU, _It_, mVUlow.VI_write, 4); } @@ -842,8 +848,9 @@ mVUop(mVU_ILW) { mVUaddrFix (mVU, gprT2); ptr += gprT2; } - else + else { ptr += getVUmem(_Imm11_); + } xMOVZX(gprT1, ptr16[ptr]); mVUallocVIb(mVU, gprT1, _It_); mVU.profiler.EmitOp(opILW); @@ -853,7 +860,9 @@ mVUop(mVU_ILW) { mVUop(mVU_ILWR) { pass1 { - if (!_It_) mVUlow.isNOP = 1; + if (!_It_) + mVUlow.isNOP = true; + analyzeVIreg1(mVU, _Is_, mVUlow.VI_read[0]); analyzeVIreg2(mVU, _It_, mVUlow.VI_write, 4); } @@ -926,7 +935,7 @@ mVUop(mVU_ISWR) { //------------------------------------------------------------------ mVUop(mVU_LQ) { - pass1 { mVUanalyzeLQ(mVU, _Ft_, _Is_, 0); } + pass1 { mVUanalyzeLQ(mVU, _Ft_, _Is_, false); } pass2 { xAddressVoid ptr(mVU.regs().Mem); if (_Is_) { @@ -946,7 +955,7 @@ mVUop(mVU_LQ) { } mVUop(mVU_LQD) { - pass1 { mVUanalyzeLQ(mVU, _Ft_, _Is_, 1); } + pass1 { mVUanalyzeLQ(mVU, _Ft_, _Is_, true); } pass2 { xAddressVoid ptr(mVU.regs().Mem); if (_Is_ || isVU0) { // Access VU1 regs mem-map in !_Is_ case @@ -968,7 +977,7 @@ mVUop(mVU_LQD) { } mVUop(mVU_LQI) { - pass1 { mVUanalyzeLQ(mVU, _Ft_, _Is_, 1); } + pass1 { mVUanalyzeLQ(mVU, _Ft_, _Is_, true); } pass2 { xAddressVoid ptr(mVU.regs().Mem); if (_Is_) { @@ -994,7 +1003,7 @@ mVUop(mVU_LQI) { //------------------------------------------------------------------ mVUop(mVU_SQ) { - pass1 { mVUanalyzeSQ(mVU, _Fs_, _It_, 0); } + pass1 { mVUanalyzeSQ(mVU, _Fs_, _It_, false); } pass2 { xAddressVoid ptr(mVU.regs().Mem); if (_It_) { @@ -1014,7 +1023,7 @@ mVUop(mVU_SQ) { } mVUop(mVU_SQD) { - pass1 { mVUanalyzeSQ(mVU, _Fs_, _It_, 1); } + pass1 { mVUanalyzeSQ(mVU, _Fs_, _It_, true); } pass2 { xAddressVoid ptr(mVU.regs().Mem); if (_It_ || isVU0) {// Access VU1 regs mem-map in !_It_ case @@ -1034,7 +1043,7 @@ mVUop(mVU_SQD) { } mVUop(mVU_SQI) { - pass1 { mVUanalyzeSQ(mVU, _Fs_, _It_, 1); } + pass1 { mVUanalyzeSQ(mVU, _Fs_, _It_, true); } pass2 { xAddressVoid ptr(mVU.regs().Mem); if (_It_) { @@ -1084,7 +1093,7 @@ static __fi void mVU_RGET_(mV, const x32& Rreg) { } mVUop(mVU_RGET) { - pass1 { mVUanalyzeR2(mVU, _Ft_, 1); } + pass1 { mVUanalyzeR2(mVU, _Ft_, true); } pass2 { xMOV(gprT1, ptr32[Rmem]); mVU_RGET_(mVU, gprT1); @@ -1094,7 +1103,7 @@ mVUop(mVU_RGET) { } mVUop(mVU_RNEXT) { - pass1 { mVUanalyzeR2(mVU, _Ft_, 0); } + pass1 { mVUanalyzeR2(mVU, _Ft_, false); } pass2 { // algorithm from www.project-fao.org xMOV(gprT3, ptr32[Rmem]); @@ -1155,7 +1164,9 @@ mVUop(mVU_WAITQ) { mVUop(mVU_XTOP) { pass1 { - if (!_It_) mVUlow.isNOP = 1; + if (!_It_) + mVUlow.isNOP = true; + analyzeVIreg2(mVU, _It_, mVUlow.VI_write, 1); } pass2 { @@ -1168,8 +1179,11 @@ mVUop(mVU_XTOP) { mVUop(mVU_XITOP) { pass1 { - if (!_It_) mVUlow.isNOP = 1; - analyzeVIreg2(mVU, _It_, mVUlow.VI_write, 1); } + if (!_It_) + mVUlow.isNOP = true; + + analyzeVIreg2(mVU, _It_, mVUlow.VI_write, 1); + } pass2 { xMOVZX(gprT1, ptr16[&mVU.getVifRegs().itop]); xAND (gprT1, isVU1 ? 0x3ff : 0xff); @@ -1238,7 +1252,7 @@ void setBranchA(mP, int x, int _x_) { pass1 { if (_Imm11_ == 1 && !_x_) { DevCon.WriteLn(Color_Green, "microVU%d: Branch Optimization", mVU.index); - mVUlow.isNOP = 1; + mVUlow.isNOP = true; return; } mVUbranch = x; @@ -1275,7 +1289,7 @@ void condEvilBranch(mV, int JMPcc) { mVUop(mVU_B) { setBranchA(mX, 1, 0); - pass1 { mVUanalyzeNormBranch(mVU, 0, 0); } + pass1 { mVUanalyzeNormBranch(mVU, 0, false); } pass2 { if (mVUlow.badBranch) { xMOV(ptr32[&mVU.badBranch], branchAddrN); } if (mVUlow.evilBranch) { xMOV(ptr32[&mVU.evilBranch], branchAddr); } @@ -1286,7 +1300,7 @@ mVUop(mVU_B) { mVUop(mVU_BAL) { setBranchA(mX, 2, _It_); - pass1 { mVUanalyzeNormBranch(mVU, _It_, 1); } + pass1 { mVUanalyzeNormBranch(mVU, _It_, true); } pass2 { if(!mVUlow.evilBranch) { @@ -1402,7 +1416,7 @@ void normJumpPass2(mV) { mVUop(mVU_JR) { mVUbranch = 9; - pass1 { mVUanalyzeJump(mVU, _Is_, 0, 0); } + pass1 { mVUanalyzeJump(mVU, _Is_, 0, false); } pass2 { normJumpPass2(mVU); mVU.profiler.EmitOp(opJR); } pass3 { mVUlog("JR [vi%02d]", _Fs_); } } diff --git a/pcsx2/x86/microVU_Macro.inl b/pcsx2/x86/microVU_Macro.inl index 021f894bb6..3ada688a59 100644 --- a/pcsx2/x86/microVU_Macro.inl +++ b/pcsx2/x86/microVU_Macro.inl @@ -45,12 +45,12 @@ void setupMacroOp(int mode, const char* opName) { microVU0.prog.IRinfo.info[0].cFlag.lastWrite = 0xff; } if (mode & 0x10) { // Update Status/Mac Flags - microVU0.prog.IRinfo.info[0].sFlag.doFlag = 1; - microVU0.prog.IRinfo.info[0].sFlag.doNonSticky = 1; - microVU0.prog.IRinfo.info[0].sFlag.write = 0; - microVU0.prog.IRinfo.info[0].sFlag.lastWrite = 0; - microVU0.prog.IRinfo.info[0].mFlag.doFlag = 1; - microVU0.prog.IRinfo.info[0].mFlag.write = 0xff; + microVU0.prog.IRinfo.info[0].sFlag.doFlag = true; + microVU0.prog.IRinfo.info[0].sFlag.doNonSticky = true; + microVU0.prog.IRinfo.info[0].sFlag.write = 0; + microVU0.prog.IRinfo.info[0].sFlag.lastWrite = 0; + microVU0.prog.IRinfo.info[0].mFlag.doFlag = true; + microVU0.prog.IRinfo.info[0].mFlag.write = 0xff; xMOV(gprF0, ptr32[&vu0Regs.VI[REG_STATUS_FLAG].UL]); } @@ -266,7 +266,7 @@ static void recCFC2() { printCOP2("CFC2"); - COP2_Interlock(0); + COP2_Interlock(false); if (!_Rt_) return; iFlushCall(FLUSH_EVERYTHING); @@ -345,7 +345,7 @@ static void recCTC2() { static void recQMFC2() { printCOP2("QMFC2"); - COP2_Interlock(0); + COP2_Interlock(false); if (!_Rt_) return; iFlushCall(FLUSH_EVERYTHING); @@ -359,7 +359,7 @@ static void recQMFC2() { static void recQMTC2() { printCOP2("QMTC2"); - COP2_Interlock(1); + COP2_Interlock(true); if (!_Rd_) return; iFlushCall(FLUSH_EVERYTHING); diff --git a/pcsx2/x86/microVU_Misc.h b/pcsx2/x86/microVU_Misc.h index 226c1abe2e..19f70c13a4 100644 --- a/pcsx2/x86/microVU_Misc.h +++ b/pcsx2/x86/microVU_Misc.h @@ -287,14 +287,14 @@ typedef u32 (__fastcall *mVUCall)(void*, void*); //------------------------------------------------------------------ // Reg Alloc -static const bool doRegAlloc = 1; // Set to 0 to flush every 32bit Instruction +static const bool doRegAlloc = true; // Set to false to flush every 32bit Instruction // This turns off reg alloc for the most part, but reg alloc will still // be done within instructions... Also on doSwapOp() regAlloc is needed between // Lower and Upper instructions, so in this case it flushes after the full // 64bit instruction (lower and upper) // No Flag Optimizations -static const bool noFlagOpts = 0; // Set to 1 to disable all flag setting optimizations +static const bool noFlagOpts = false; // Set to true to disable all flag setting optimizations // Note: The flag optimizations this disables should all be harmless, so // this option is mainly just for debugging... it effectively forces mVU // to always update Mac and Status Flags (both sticky and non-sticky) whenever @@ -302,16 +302,16 @@ static const bool noFlagOpts = 0; // Set to 1 to disable all flag setting optimi // flag instances between blocks... // Multiple Flag Instances -static const bool doSFlagInsts = 1; // Set to 1 to enable multiple status flag instances -static const bool doMFlagInsts = 1; // Set to 1 to enable multiple mac flag instances -static const bool doCFlagInsts = 1; // Set to 1 to enable multiple clip flag instances +static const bool doSFlagInsts = true; // Set to true to enable multiple status flag instances +static const bool doMFlagInsts = true; // Set to true to enable multiple mac flag instances +static const bool doCFlagInsts = true; // Set to true to enable multiple clip flag instances // This is the correct behavior of the VU's. Due to the pipeline of the VU's // there can be up to 4 different instances of values to keep track of // for the 3 different types of flags: Status, Mac, Clip flags. // Setting one of these to 0 acts as if there is only 1 instance of the // corresponding flag, which may be useful when debugging flag pipeline bugs. -static const int doFullFlagOpt = 0; // Set above 0 to enable full flag optimization +static const int doFullFlagOpt = false; // Set above to false to enable full flag optimization // This attempts to eliminate some flag shuffling at the end of blocks, but // can end up creating more recompiled code. The max amount of times this optimization // is performed per block can be set by changing the doFullFlagOpt value to be that limit. @@ -320,27 +320,27 @@ static const int doFullFlagOpt = 0; // Set above 0 to enable full flag optimizat // Note: This optimization doesn't really seem to be benefitial and is buggy... // Branch in Branch Delay Slots -static const bool doBranchInDelaySlot = 1; // Set to 1 to enable evil-branches +static const bool doBranchInDelaySlot = true; // Set to true to enable evil-branches // This attempts to emulate the correct behavior for branches in branch delay // slots. It is evil that games do this, and handling the different possible // cases is tricky and bug prone. If this option is disabled then the second // branch is treated as a NOP and effectively ignored. // Constant Propagation -static const bool doConstProp = 0; // Set to 1 to turn on vi15 const propagation +static const bool doConstProp = false; // Set to true to turn on vi15 const propagation // Enables Constant Propagation for Jumps based on vi15 'link-register' // allowing us to know many indirect jump target addresses. // Makes GoW a lot slower due to extra recompilation time and extra code-gen! // Indirect Jump Caching -static const bool doJumpCaching = 1; // Set to 1 to enable jump caching +static const bool doJumpCaching = true; // Set to true to enable jump caching // Indirect jumps (JR/JALR) will remember the entry points to their previously // jumped-to addresses. This allows us to skip the microBlockManager::search() // routine that is performed every indirect jump in order to find a block within a // program that matches the correct pipeline state. // Indirect Jumps are part of same cached microProgram -static const bool doJumpAsSameProgram = 0; // Set to 1 to treat jumps as same program +static const bool doJumpAsSameProgram = false; // Set to true to treat jumps as same program // Enabling this treats indirect jumps (JR/JALR) as part of the same microProgram // when determining the valid ranges for the microProgram cache. Disabling this // counts indirect jumps as separate cached microPrograms which generally leads @@ -350,7 +350,7 @@ static const bool doJumpAsSameProgram = 0; // Set to 1 to treat jumps as same pr // Note: You MUST disable doJumpCaching if you enable this option. // Handling of D-Bit in Micro Programs -static const bool doDBitHandling = 0; +static const bool doDBitHandling = false; // This flag shouldn't be enabled in released versions of games. Any games which // need this method of pausing the VU should be using the T-Bit instead, however // this could prove useful for VU debugging. diff --git a/pcsx2/x86/microVU_Misc.inl b/pcsx2/x86/microVU_Misc.inl index 83492a6396..411ad45375 100644 --- a/pcsx2/x86/microVU_Misc.inl +++ b/pcsx2/x86/microVU_Misc.inl @@ -474,65 +474,65 @@ void ADD_SS_TriAceHack(microVU& mVU, const xmm& to, const xmm& from) void SSE_MAXPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { if (CHECK_VU_MINMAXHACK) { xMAX.PS(to, from); } - else { MIN_MAX_PS(mVU, to, from, t1, t2, 0); } + else { MIN_MAX_PS(mVU, to, from, t1, t2, false); } } void SSE_MINPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { if (CHECK_VU_MINMAXHACK) { xMIN.PS(to, from); } - else { MIN_MAX_PS(mVU, to, from, t1, t2, 1); } + else { MIN_MAX_PS(mVU, to, from, t1, t2, true); } } void SSE_MAXSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { if (CHECK_VU_MINMAXHACK) { xMAX.SS(to, from); } - else { MIN_MAX_SS(mVU, to, from, t1, 0); } + else { MIN_MAX_SS(mVU, to, from, t1, false); } } void SSE_MINSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { if (CHECK_VU_MINMAXHACK) { xMIN.SS(to, from); } - else { MIN_MAX_SS(mVU, to, from, t1, 1); } + else { MIN_MAX_SS(mVU, to, from, t1, true); } } void SSE_ADD2SS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - if (!CHECK_VUADDSUBHACK) { clampOp(xADD.SS, 0); } + if (!CHECK_VUADDSUBHACK) { clampOp(xADD.SS, false); } else { ADD_SS_TriAceHack(mVU, to, from); } } // Does same as SSE_ADDPS since tri-ace games only need SS implementation of VUADDSUBHACK... void SSE_ADD2PS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xADD.PS, 1); + clampOp(xADD.PS, true); } void SSE_ADDPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xADD.PS, 1); + clampOp(xADD.PS, true); } void SSE_ADDSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xADD.SS, 0); + clampOp(xADD.SS, false); } void SSE_SUBPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xSUB.PS, 1); + clampOp(xSUB.PS, true); } void SSE_SUBSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xSUB.SS, 0); + clampOp(xSUB.SS, false); } void SSE_MULPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xMUL.PS, 1); + clampOp(xMUL.PS, true); } void SSE_MULSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xMUL.SS, 0); + clampOp(xMUL.SS, false); } void SSE_DIVPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xDIV.PS, 1); + clampOp(xDIV.PS, true); } void SSE_DIVSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg) { - clampOp(xDIV.SS, 0); + clampOp(xDIV.SS, false); } //------------------------------------------------------------------ diff --git a/pcsx2/x86/microVU_Upper.inl b/pcsx2/x86/microVU_Upper.inl index 5b655fa114..86e976f0c4 100644 --- a/pcsx2/x86/microVU_Upper.inl +++ b/pcsx2/x86/microVU_Upper.inl @@ -124,7 +124,10 @@ static void setupPass1(microVU& mVU, int opCase, bool isACC, bool noFlagUpdate) opCase2 { mVUanalyzeFMAC3(mVU, ((isACC) ? 0 : _Fd_), _Fs_, _Ft_); } opCase3 { mVUanalyzeFMAC1(mVU, ((isACC) ? 0 : _Fd_), _Fs_, 0); } opCase4 { mVUanalyzeFMAC1(mVU, ((isACC) ? 0 : _Fd_), _Fs_, 0); } - if (noFlagUpdate) { sFLAG.doFlag = 0; } + + if (noFlagUpdate) { + sFLAG.doFlag = false; + } } // Safer to force 0 as the result for X minus X than to do actual subtraction @@ -135,10 +138,10 @@ static bool doSafeSub(microVU& mVU, int opCase, int opType, bool isACC) { xPXOR(Fs, Fs); // Set to Positive 0 mVUupdateFlags(mVU, Fs); mVU.regAlloc->clearNeeded(Fs); - return 1; + return true; } } - return 0; + return false; } // Sets Up Ft Reg for Normal, BC, I, and Q Cases @@ -203,13 +206,13 @@ static void mVU_FMACa(microVU& mVU, int recPass, int opCase, int opType, bool is // MADDA/MSUBA Opcodes static void mVU_FMACb(microVU& mVU, int recPass, int opCase, int opType, microOpcode opEnum, int clampType) { - pass1 { setupPass1(mVU, opCase, 1, 0); } + pass1 { setupPass1(mVU, opCase, true, false); } pass2 { xmm Fs, Ft, ACC, tempFt; setupFtReg(mVU, Ft, tempFt, opCase); Fs = mVU.regAlloc->allocReg(_Fs_, 0, _X_Y_Z_W); - ACC = mVU.regAlloc->allocReg(32, 32, 0xf, 0); + ACC = mVU.regAlloc->allocReg(32, 32, 0xf, false); if (_XYZW_SS2) { xPSHUF.D(ACC, ACC, shuffleSS(_X_Y_Z_W)); } @@ -239,13 +242,13 @@ static void mVU_FMACb(microVU& mVU, int recPass, int opCase, int opType, microOp mVU.regAlloc->clearNeeded(Ft); mVU.profiler.EmitOp(opEnum); } - pass3 { mVU_printOP(mVU, opCase, opEnum, 1); } + pass3 { mVU_printOP(mVU, opCase, opEnum, true); } pass4 { mVUregs.needExactMatch |= 8; } } // MADD Opcodes static void mVU_FMACc(microVU& mVU, int recPass, int opCase, microOpcode opEnum, int clampType) { - pass1 { setupPass1(mVU, opCase, 0, 0); } + pass1 { setupPass1(mVU, opCase, false, false); } pass2 { xmm Fs, Ft, ACC, tempFt; setupFtReg(mVU, Ft, tempFt, opCase); @@ -271,13 +274,13 @@ static void mVU_FMACc(microVU& mVU, int recPass, int opCase, microOpcode opEnum, mVU.regAlloc->clearNeeded(ACC); mVU.profiler.EmitOp(opEnum); } - pass3 { mVU_printOP(mVU, opCase, opEnum, 0); } + pass3 { mVU_printOP(mVU, opCase, opEnum, false); } pass4 { mVUregs.needExactMatch |= 8; } } // MSUB Opcodes static void mVU_FMACd(microVU& mVU, int recPass, int opCase, microOpcode opEnum, int clampType) { - pass1 { setupPass1(mVU, opCase, 0, 0); } + pass1 { setupPass1(mVU, opCase, false, false); } pass2 { xmm Fs, Ft, Fd, tempFt; setupFtReg(mVU, Ft, tempFt, opCase); @@ -299,7 +302,7 @@ static void mVU_FMACd(microVU& mVU, int recPass, int opCase, microOpcode opEnum, mVU.regAlloc->clearNeeded(Fs); mVU.profiler.EmitOp(opEnum); } - pass3 { mVU_printOP(mVU, opCase, opEnum, 0); } + pass3 { mVU_printOP(mVU, opCase, opEnum, false); } pass4 { mVUregs.needExactMatch |= 8; } } @@ -448,94 +451,94 @@ mVUop(mVU_CLIP) { // Micro VU Micromode Upper instructions //------------------------------------------------------------------ -mVUop(mVU_ADD) { mVU_FMACa(mVU, recPass, 1, 0, 0, opADD, 0); } -mVUop(mVU_ADDi) { mVU_FMACa(mVU, recPass, 3, 5, 0, opADDi, 0); } -mVUop(mVU_ADDq) { mVU_FMACa(mVU, recPass, 4, 0, 0, opADDq, 0); } -mVUop(mVU_ADDx) { mVU_FMACa(mVU, recPass, 2, 0, 0, opADDx, 0); } -mVUop(mVU_ADDy) { mVU_FMACa(mVU, recPass, 2, 0, 0, opADDy, 0); } -mVUop(mVU_ADDz) { mVU_FMACa(mVU, recPass, 2, 0, 0, opADDz, 0); } -mVUop(mVU_ADDw) { mVU_FMACa(mVU, recPass, 2, 0, 0, opADDw, 0); } -mVUop(mVU_ADDA) { mVU_FMACa(mVU, recPass, 1, 0, 1, opADDA, 0); } -mVUop(mVU_ADDAi) { mVU_FMACa(mVU, recPass, 3, 0, 1, opADDAi, 0); } -mVUop(mVU_ADDAq) { mVU_FMACa(mVU, recPass, 4, 0, 1, opADDAq, 0); } -mVUop(mVU_ADDAx) { mVU_FMACa(mVU, recPass, 2, 0, 1, opADDAx, 0); } -mVUop(mVU_ADDAy) { mVU_FMACa(mVU, recPass, 2, 0, 1, opADDAy, 0); } -mVUop(mVU_ADDAz) { mVU_FMACa(mVU, recPass, 2, 0, 1, opADDAz, 0); } -mVUop(mVU_ADDAw) { mVU_FMACa(mVU, recPass, 2, 0, 1, opADDAw, 0); } -mVUop(mVU_SUB) { mVU_FMACa(mVU, recPass, 1, 1, 0, opSUB, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBi) { mVU_FMACa(mVU, recPass, 3, 1, 0, opSUBi, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBq) { mVU_FMACa(mVU, recPass, 4, 1, 0, opSUBq, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBx) { mVU_FMACa(mVU, recPass, 2, 1, 0, opSUBx, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBy) { mVU_FMACa(mVU, recPass, 2, 1, 0, opSUBy, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBz) { mVU_FMACa(mVU, recPass, 2, 1, 0, opSUBz, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBw) { mVU_FMACa(mVU, recPass, 2, 1, 0, opSUBw, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) -mVUop(mVU_SUBA) { mVU_FMACa(mVU, recPass, 1, 1, 1, opSUBA, 0); } -mVUop(mVU_SUBAi) { mVU_FMACa(mVU, recPass, 3, 1, 1, opSUBAi, 0); } -mVUop(mVU_SUBAq) { mVU_FMACa(mVU, recPass, 4, 1, 1, opSUBAq, 0); } -mVUop(mVU_SUBAx) { mVU_FMACa(mVU, recPass, 2, 1, 1, opSUBAx, 0); } -mVUop(mVU_SUBAy) { mVU_FMACa(mVU, recPass, 2, 1, 1, opSUBAy, 0); } -mVUop(mVU_SUBAz) { mVU_FMACa(mVU, recPass, 2, 1, 1, opSUBAz, 0); } -mVUop(mVU_SUBAw) { mVU_FMACa(mVU, recPass, 2, 1, 1, opSUBAw, 0); } -mVUop(mVU_MUL) { mVU_FMACa(mVU, recPass, 1, 2, 0, opMUL, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) -mVUop(mVU_MULi) { mVU_FMACa(mVU, recPass, 3, 2, 0, opMULi, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) -mVUop(mVU_MULq) { mVU_FMACa(mVU, recPass, 4, 2, 0, opMULq, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) -mVUop(mVU_MULx) { mVU_FMACa(mVU, recPass, 2, 2, 0, opMULx, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (vu0)) -mVUop(mVU_MULy) { mVU_FMACa(mVU, recPass, 2, 2, 0, opMULy, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) -mVUop(mVU_MULz) { mVU_FMACa(mVU, recPass, 2, 2, 0, opMULz, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) -mVUop(mVU_MULw) { mVU_FMACa(mVU, recPass, 2, 2, 0, opMULw, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) -mVUop(mVU_MULA) { mVU_FMACa(mVU, recPass, 1, 2, 1, opMULA, 0); } -mVUop(mVU_MULAi) { mVU_FMACa(mVU, recPass, 3, 2, 1, opMULAi, 0); } -mVUop(mVU_MULAq) { mVU_FMACa(mVU, recPass, 4, 2, 1, opMULAq, 0); } -mVUop(mVU_MULAx) { mVU_FMACa(mVU, recPass, 2, 2, 1, opMULAx, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MULAy) { mVU_FMACa(mVU, recPass, 2, 2, 1, opMULAy, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MULAz) { mVU_FMACa(mVU, recPass, 2, 2, 1, opMULAz, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MULAw) { mVU_FMACa(mVU, recPass, 2, 2, 1, opMULAw, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADD) { mVU_FMACc(mVU, recPass, 1, opMADD, 0); } -mVUop(mVU_MADDi) { mVU_FMACc(mVU, recPass, 3, opMADDi, 0); } -mVUop(mVU_MADDq) { mVU_FMACc(mVU, recPass, 4, opMADDq, 0); } -mVUop(mVU_MADDx) { mVU_FMACc(mVU, recPass, 2, opMADDx, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADDy) { mVU_FMACc(mVU, recPass, 2, opMADDy, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADDz) { mVU_FMACc(mVU, recPass, 2, opMADDz, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADDw) { mVU_FMACc(mVU, recPass, 2, opMADDw, (isCOP2)?(cACC|cFt|cFs):cFs);} // Clamp (ICO (COP2), TOTA, DoM) -mVUop(mVU_MADDA) { mVU_FMACb(mVU, recPass, 1, 0, opMADDA, 0); } -mVUop(mVU_MADDAi) { mVU_FMACb(mVU, recPass, 3, 0, opMADDAi, 0); } -mVUop(mVU_MADDAq) { mVU_FMACb(mVU, recPass, 4, 0, opMADDAq, 0); } -mVUop(mVU_MADDAx) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAx, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADDAy) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAy, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADDAz) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAz, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MADDAw) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAw, cFs);} // Clamp (TOTA, DoM, ...) -mVUop(mVU_MSUB) { mVU_FMACd(mVU, recPass, 1, opMSUB, 0); } -mVUop(mVU_MSUBi) { mVU_FMACd(mVU, recPass, 3, opMSUBi, 0); } -mVUop(mVU_MSUBq) { mVU_FMACd(mVU, recPass, 4, opMSUBq, 0); } -mVUop(mVU_MSUBx) { mVU_FMACd(mVU, recPass, 2, opMSUBx, 0); } -mVUop(mVU_MSUBy) { mVU_FMACd(mVU, recPass, 2, opMSUBy, 0); } -mVUop(mVU_MSUBz) { mVU_FMACd(mVU, recPass, 2, opMSUBz, 0); } -mVUop(mVU_MSUBw) { mVU_FMACd(mVU, recPass, 2, opMSUBw, 0); } -mVUop(mVU_MSUBA) { mVU_FMACb(mVU, recPass, 1, 1, opMSUBA, 0); } -mVUop(mVU_MSUBAi) { mVU_FMACb(mVU, recPass, 3, 1, opMSUBAi, 0); } -mVUop(mVU_MSUBAq) { mVU_FMACb(mVU, recPass, 4, 1, opMSUBAq, 0); } -mVUop(mVU_MSUBAx) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAx, 0); } -mVUop(mVU_MSUBAy) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAy, 0); } -mVUop(mVU_MSUBAz) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAz, 0); } -mVUop(mVU_MSUBAw) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAw, 0); } -mVUop(mVU_MAX) { mVU_FMACa(mVU, recPass, 1, 3, 0, opMAX, 0); } -mVUop(mVU_MAXi) { mVU_FMACa(mVU, recPass, 3, 3, 0, opMAXi, 0); } -mVUop(mVU_MAXx) { mVU_FMACa(mVU, recPass, 2, 3, 0, opMAXx, 0); } -mVUop(mVU_MAXy) { mVU_FMACa(mVU, recPass, 2, 3, 0, opMAXy, 0); } -mVUop(mVU_MAXz) { mVU_FMACa(mVU, recPass, 2, 3, 0, opMAXz, 0); } -mVUop(mVU_MAXw) { mVU_FMACa(mVU, recPass, 2, 3, 0, opMAXw, 0); } -mVUop(mVU_MINI) { mVU_FMACa(mVU, recPass, 1, 4, 0, opMINI, 0); } -mVUop(mVU_MINIi) { mVU_FMACa(mVU, recPass, 3, 4, 0, opMINIi, 0); } -mVUop(mVU_MINIx) { mVU_FMACa(mVU, recPass, 2, 4, 0, opMINIx, 0); } -mVUop(mVU_MINIy) { mVU_FMACa(mVU, recPass, 2, 4, 0, opMINIy, 0); } -mVUop(mVU_MINIz) { mVU_FMACa(mVU, recPass, 2, 4, 0, opMINIz, 0); } -mVUop(mVU_MINIw) { mVU_FMACa(mVU, recPass, 2, 4, 0, opMINIw, 0); } -mVUop(mVU_FTOI0) { mVU_FTOIx(mX, NULL, opFTOI0); } -mVUop(mVU_FTOI4) { mVU_FTOIx(mX, mVUglob.FTOI_4, opFTOI4); } -mVUop(mVU_FTOI12) { mVU_FTOIx(mX, mVUglob.FTOI_12, opFTOI12); } -mVUop(mVU_FTOI15) { mVU_FTOIx(mX, mVUglob.FTOI_15, opFTOI15); } -mVUop(mVU_ITOF0) { mVU_ITOFx(mX, NULL, opITOF0); } -mVUop(mVU_ITOF4) { mVU_ITOFx(mX, mVUglob.ITOF_4, opITOF4); } -mVUop(mVU_ITOF12) { mVU_ITOFx(mX, mVUglob.ITOF_12, opITOF12); } -mVUop(mVU_ITOF15) { mVU_ITOFx(mX, mVUglob.ITOF_15, opITOF15); } -mVUop(mVU_NOP) { pass2 { mVU.profiler.EmitOp(opNOP); } pass3 { mVUlog("NOP"); } } +mVUop(mVU_ADD) { mVU_FMACa(mVU, recPass, 1, 0, false, opADD, 0); } +mVUop(mVU_ADDi) { mVU_FMACa(mVU, recPass, 3, 5, false, opADDi, 0); } +mVUop(mVU_ADDq) { mVU_FMACa(mVU, recPass, 4, 0, false, opADDq, 0); } +mVUop(mVU_ADDx) { mVU_FMACa(mVU, recPass, 2, 0, false, opADDx, 0); } +mVUop(mVU_ADDy) { mVU_FMACa(mVU, recPass, 2, 0, false, opADDy, 0); } +mVUop(mVU_ADDz) { mVU_FMACa(mVU, recPass, 2, 0, false, opADDz, 0); } +mVUop(mVU_ADDw) { mVU_FMACa(mVU, recPass, 2, 0, false, opADDw, 0); } +mVUop(mVU_ADDA) { mVU_FMACa(mVU, recPass, 1, 0, true, opADDA, 0); } +mVUop(mVU_ADDAi) { mVU_FMACa(mVU, recPass, 3, 0, true, opADDAi, 0); } +mVUop(mVU_ADDAq) { mVU_FMACa(mVU, recPass, 4, 0, true, opADDAq, 0); } +mVUop(mVU_ADDAx) { mVU_FMACa(mVU, recPass, 2, 0, true, opADDAx, 0); } +mVUop(mVU_ADDAy) { mVU_FMACa(mVU, recPass, 2, 0, true, opADDAy, 0); } +mVUop(mVU_ADDAz) { mVU_FMACa(mVU, recPass, 2, 0, true, opADDAz, 0); } +mVUop(mVU_ADDAw) { mVU_FMACa(mVU, recPass, 2, 0, true, opADDAw, 0); } +mVUop(mVU_SUB) { mVU_FMACa(mVU, recPass, 1, 1, false, opSUB, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBi) { mVU_FMACa(mVU, recPass, 3, 1, false, opSUBi, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBq) { mVU_FMACa(mVU, recPass, 4, 1, false, opSUBq, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBx) { mVU_FMACa(mVU, recPass, 2, 1, false, opSUBx, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBy) { mVU_FMACa(mVU, recPass, 2, 1, false, opSUBy, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBz) { mVU_FMACa(mVU, recPass, 2, 1, false, opSUBz, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBw) { mVU_FMACa(mVU, recPass, 2, 1, false, opSUBw, (_XYZW_PS)?(cFs|cFt):0); } // Clamp (Kingdom Hearts I (VU0)) +mVUop(mVU_SUBA) { mVU_FMACa(mVU, recPass, 1, 1, true, opSUBA, 0); } +mVUop(mVU_SUBAi) { mVU_FMACa(mVU, recPass, 3, 1, true, opSUBAi, 0); } +mVUop(mVU_SUBAq) { mVU_FMACa(mVU, recPass, 4, 1, true, opSUBAq, 0); } +mVUop(mVU_SUBAx) { mVU_FMACa(mVU, recPass, 2, 1, true, opSUBAx, 0); } +mVUop(mVU_SUBAy) { mVU_FMACa(mVU, recPass, 2, 1, true, opSUBAy, 0); } +mVUop(mVU_SUBAz) { mVU_FMACa(mVU, recPass, 2, 1, true, opSUBAz, 0); } +mVUop(mVU_SUBAw) { mVU_FMACa(mVU, recPass, 2, 1, true, opSUBAw, 0); } +mVUop(mVU_MUL) { mVU_FMACa(mVU, recPass, 1, 2, false, opMUL, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) +mVUop(mVU_MULi) { mVU_FMACa(mVU, recPass, 3, 2, false, opMULi, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) +mVUop(mVU_MULq) { mVU_FMACa(mVU, recPass, 4, 2, false, opMULq, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) +mVUop(mVU_MULx) { mVU_FMACa(mVU, recPass, 2, 2, false, opMULx, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (vu0)) +mVUop(mVU_MULy) { mVU_FMACa(mVU, recPass, 2, 2, false, opMULy, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) +mVUop(mVU_MULz) { mVU_FMACa(mVU, recPass, 2, 2, false, opMULz, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) +mVUop(mVU_MULw) { mVU_FMACa(mVU, recPass, 2, 2, false, opMULw, (_XYZW_PS)?(cFs|cFt):cFs); } // Clamp (TOTA, DoM, Ice Age (VU0)) +mVUop(mVU_MULA) { mVU_FMACa(mVU, recPass, 1, 2, true, opMULA, 0); } +mVUop(mVU_MULAi) { mVU_FMACa(mVU, recPass, 3, 2, true, opMULAi, 0); } +mVUop(mVU_MULAq) { mVU_FMACa(mVU, recPass, 4, 2, true, opMULAq, 0); } +mVUop(mVU_MULAx) { mVU_FMACa(mVU, recPass, 2, 2, true, opMULAx, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MULAy) { mVU_FMACa(mVU, recPass, 2, 2, true, opMULAy, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MULAz) { mVU_FMACa(mVU, recPass, 2, 2, true, opMULAz, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MULAw) { mVU_FMACa(mVU, recPass, 2, 2, true, opMULAw, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADD) { mVU_FMACc(mVU, recPass, 1, opMADD, 0); } +mVUop(mVU_MADDi) { mVU_FMACc(mVU, recPass, 3, opMADDi, 0); } +mVUop(mVU_MADDq) { mVU_FMACc(mVU, recPass, 4, opMADDq, 0); } +mVUop(mVU_MADDx) { mVU_FMACc(mVU, recPass, 2, opMADDx, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADDy) { mVU_FMACc(mVU, recPass, 2, opMADDy, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADDz) { mVU_FMACc(mVU, recPass, 2, opMADDz, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADDw) { mVU_FMACc(mVU, recPass, 2, opMADDw, (isCOP2)?(cACC|cFt|cFs):cFs);} // Clamp (ICO (COP2), TOTA, DoM) +mVUop(mVU_MADDA) { mVU_FMACb(mVU, recPass, 1, 0, opMADDA, 0); } +mVUop(mVU_MADDAi) { mVU_FMACb(mVU, recPass, 3, 0, opMADDAi, 0); } +mVUop(mVU_MADDAq) { mVU_FMACb(mVU, recPass, 4, 0, opMADDAq, 0); } +mVUop(mVU_MADDAx) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAx, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADDAy) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAy, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADDAz) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAz, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MADDAw) { mVU_FMACb(mVU, recPass, 2, 0, opMADDAw, cFs);} // Clamp (TOTA, DoM, ...) +mVUop(mVU_MSUB) { mVU_FMACd(mVU, recPass, 1, opMSUB, 0); } +mVUop(mVU_MSUBi) { mVU_FMACd(mVU, recPass, 3, opMSUBi, 0); } +mVUop(mVU_MSUBq) { mVU_FMACd(mVU, recPass, 4, opMSUBq, 0); } +mVUop(mVU_MSUBx) { mVU_FMACd(mVU, recPass, 2, opMSUBx, 0); } +mVUop(mVU_MSUBy) { mVU_FMACd(mVU, recPass, 2, opMSUBy, 0); } +mVUop(mVU_MSUBz) { mVU_FMACd(mVU, recPass, 2, opMSUBz, 0); } +mVUop(mVU_MSUBw) { mVU_FMACd(mVU, recPass, 2, opMSUBw, 0); } +mVUop(mVU_MSUBA) { mVU_FMACb(mVU, recPass, 1, 1, opMSUBA, 0); } +mVUop(mVU_MSUBAi) { mVU_FMACb(mVU, recPass, 3, 1, opMSUBAi, 0); } +mVUop(mVU_MSUBAq) { mVU_FMACb(mVU, recPass, 4, 1, opMSUBAq, 0); } +mVUop(mVU_MSUBAx) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAx, 0); } +mVUop(mVU_MSUBAy) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAy, 0); } +mVUop(mVU_MSUBAz) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAz, 0); } +mVUop(mVU_MSUBAw) { mVU_FMACb(mVU, recPass, 2, 1, opMSUBAw, 0); } +mVUop(mVU_MAX) { mVU_FMACa(mVU, recPass, 1, 3, false, opMAX, 0); } +mVUop(mVU_MAXi) { mVU_FMACa(mVU, recPass, 3, 3, false, opMAXi, 0); } +mVUop(mVU_MAXx) { mVU_FMACa(mVU, recPass, 2, 3, false, opMAXx, 0); } +mVUop(mVU_MAXy) { mVU_FMACa(mVU, recPass, 2, 3, false, opMAXy, 0); } +mVUop(mVU_MAXz) { mVU_FMACa(mVU, recPass, 2, 3, false, opMAXz, 0); } +mVUop(mVU_MAXw) { mVU_FMACa(mVU, recPass, 2, 3, false, opMAXw, 0); } +mVUop(mVU_MINI) { mVU_FMACa(mVU, recPass, 1, 4, false, opMINI, 0); } +mVUop(mVU_MINIi) { mVU_FMACa(mVU, recPass, 3, 4, false, opMINIi, 0); } +mVUop(mVU_MINIx) { mVU_FMACa(mVU, recPass, 2, 4, false, opMINIx, 0); } +mVUop(mVU_MINIy) { mVU_FMACa(mVU, recPass, 2, 4, false, opMINIy, 0); } +mVUop(mVU_MINIz) { mVU_FMACa(mVU, recPass, 2, 4, false, opMINIz, 0); } +mVUop(mVU_MINIw) { mVU_FMACa(mVU, recPass, 2, 4, false, opMINIw, 0); } +mVUop(mVU_FTOI0) { mVU_FTOIx(mX, NULL, opFTOI0); } +mVUop(mVU_FTOI4) { mVU_FTOIx(mX, mVUglob.FTOI_4, opFTOI4); } +mVUop(mVU_FTOI12) { mVU_FTOIx(mX, mVUglob.FTOI_12, opFTOI12); } +mVUop(mVU_FTOI15) { mVU_FTOIx(mX, mVUglob.FTOI_15, opFTOI15); } +mVUop(mVU_ITOF0) { mVU_ITOFx(mX, NULL, opITOF0); } +mVUop(mVU_ITOF4) { mVU_ITOFx(mX, mVUglob.ITOF_4, opITOF4); } +mVUop(mVU_ITOF12) { mVU_ITOFx(mX, mVUglob.ITOF_12, opITOF12); } +mVUop(mVU_ITOF15) { mVU_ITOFx(mX, mVUglob.ITOF_15, opITOF15); } +mVUop(mVU_NOP) { pass2 { mVU.profiler.EmitOp(opNOP); } pass3 { mVUlog("NOP"); } }