MicroVU: Replace BranchAddr macro with an Inline function

Coverity seems to not like the assert trick and only considers the macro as a single statement, so let's rework branchAddr and branchAddrN to satisfy coverity and improve the code quality. The following patch fixes 8 coverity issues with the same problem , CID 151736 - 151743.

(#1 of 1): Misused comma operator (NO_EFFECT)extra_comma: Part !!((mVU.prog.IRinfo.curPC & 1U) == 0U) of statement (!!((mVU.prog.IRinfo.curPC & 1U) == 0U)) , ((mVU.prog.IRinfo.curPC + 2U + (s32)((mVU.code & 0x400U) ? 0xfffffc00U | (mVU.code & 0x3ffU) : (mVU.code & 0x3ffU)) * 2 & mVU.progMemMask) * 4U) has no effect due to the comma.
This commit is contained in:
Akash 2015-10-01 11:41:01 +05:30
parent c7000355fe
commit 71dbe3e4c4
5 changed files with 44 additions and 39 deletions

View File

@ -209,7 +209,7 @@ void normBranch(mV, microFlagCycles& mFC) {
xForwardJump32 eJMP(Jcc_Zero);
xOR(ptr32[&VU0.VI[REG_VPU_STAT].UL], (isVU1 ? 0x200 : 0x2));
xOR(ptr32[&mVU.regs().flags], VUFLAG_INTCINTERRUPT);
iPC = branchAddr/4;
iPC = branchAddr(mVU)/4;
mVUDTendProgram(mVU, &mFC, 1);
eJMP.SetTarget();
iPC = tempPC;
@ -221,7 +221,7 @@ void normBranch(mV, microFlagCycles& mFC) {
xForwardJump32 eJMP(Jcc_Zero);
xOR(ptr32[&VU0.VI[REG_VPU_STAT].UL], (isVU1 ? 0x400 : 0x4));
xOR(ptr32[&mVU.regs().flags], VUFLAG_INTCINTERRUPT);
iPC = branchAddr/4;
iPC = branchAddr(mVU)/4;
mVUDTendProgram(mVU, &mFC, 1);
eJMP.SetTarget();
iPC = tempPC;
@ -230,14 +230,14 @@ void normBranch(mV, microFlagCycles& mFC) {
if(mVUlow.badBranch)
DevCon.Warning("End on evil Unconditional branch! - Not implemented! - If game broken report to PCSX2 Team");
iPC = branchAddr/4;
iPC = branchAddr(mVU)/4;
mVUendProgram(mVU, &mFC, 1);
return;
}
if(mVUlow.badBranch)
{
u32 badBranchAddr = branchAddr+8;
u32 badBranchAddr = branchAddr(mVU)+8;
incPC(3);
if(mVUlow.branch == 2 || mVUlow.branch == 10) //Delay slot branch needs linking
{
@ -252,7 +252,7 @@ void normBranch(mV, microFlagCycles& mFC) {
// Normal Branch
mVUsetupBranch(mVU, mFC);
normBranchCompile(mVU, branchAddr);
normBranchCompile(mVU, branchAddr(mVU));
}
//Messy handler warning!!
@ -266,7 +266,7 @@ void condJumpProcessingEvil(mV, microFlagCycles& mFC, int JMPcc) {
u32 badBranchAddr;
iPC = bPC-2;
setCode();
badBranchAddr = branchAddr;
badBranchAddr = branchAddr(mVU);
xCMP(ptr16[&mVU.branch], 0);
@ -314,7 +314,7 @@ void condBranch(mV, microFlagCycles& mFC, int JMPcc) {
xJMP(mVU.exitFunct);
tJMP.SetTarget();
incPC(-4); // Go Back to Branch Opcode to get branchAddr
iPC = branchAddr/4;
iPC = branchAddr(mVU)/4;
xMOV(ptr32[&mVU.regs().VI[REG_TPC].UL], xPC);
xJMP(mVU.exitFunct);
eJMP.SetTarget();
@ -335,7 +335,7 @@ void condBranch(mV, microFlagCycles& mFC, int JMPcc) {
xJMP(mVU.exitFunct);
dJMP.SetTarget();
incPC(-4); // Go Back to Branch Opcode to get branchAddr
iPC = branchAddr/4;
iPC = branchAddr(mVU)/4;
xMOV(ptr32[&mVU.regs().VI[REG_TPC].UL], xPC);
xJMP(mVU.exitFunct);
eJMP.SetTarget();
@ -355,7 +355,7 @@ void condBranch(mV, microFlagCycles& mFC, int JMPcc) {
xJMP(mVU.exitFunct);
eJMP.SetTarget();
incPC(-4); // Go Back to Branch Opcode to get branchAddr
iPC = branchAddr/4;
iPC = branchAddr(mVU)/4;
xMOV(ptr32[&mVU.regs().VI[REG_TPC].UL], xPC);
xJMP(mVU.exitFunct);
return;
@ -380,7 +380,7 @@ void condBranch(mV, microFlagCycles& mFC, int JMPcc) {
if (bBlock) { // Branch non-taken has already been compiled
xJcc(xInvertCond((JccComparisonType)JMPcc), bBlock->x86ptrStart);
incPC(-3); // Go back to branch opcode (to get branch imm addr)
normBranchCompile(mVU, branchAddr);
normBranchCompile(mVU, branchAddr(mVU));
}
else {
s32* ajmp = xJcc32((JccComparisonType)JMPcc);
@ -393,7 +393,7 @@ void condBranch(mV, microFlagCycles& mFC, int JMPcc) {
iPC = bPC;
incPC(-3); // Go back to branch opcode (to get branch imm addr)
uptr jumpAddr = (uptr)mVUblockFetch(mVU, branchAddr, (uptr)&pBlock->pStateEnd);
uptr jumpAddr = (uptr)mVUblockFetch(mVU, branchAddr(mVU), (uptr)&pBlock->pStateEnd);
*ajmp = (jumpAddr - ((uptr)ajmp + 4));
}
}

View File

@ -371,7 +371,7 @@ void _mVUflagPass(mV, u32 startPC, u32 sCount, u32 found, std::vector<u32>& v) {
if (branch >= 2) { shortBranch(); }
else if (branch == 1) { branch = 2; }
if (mVUbranch) { branch = ((mVUbranch>8)?(5):((mVUbranch<3)?3:4)); incPC(-1); aBranchAddr = branchAddr; incPC(1); mVUbranch = 0; }
if (mVUbranch) { branch = ((mVUbranch>8)?(5):((mVUbranch<3)?3:4)); incPC(-1); aBranchAddr = branchAddr(mVU); incPC(1); mVUbranch = 0; }
incPC(1);
if ((mVUregs.needExactMatch&7)==7) break;
}
@ -405,8 +405,8 @@ void mVUsetFlagInfo(mV) {
int ffOpt = doFullFlagOpt;
if (mVUbranch <= 2) { // B/BAL
incPC(-1);
mVUflagPass (mVU, branchAddr);
checkFFblock(mVU, branchAddr, ffOpt);
mVUflagPass (mVU, branchAddr(mVU));
checkFFblock(mVU, branchAddr(mVU), ffOpt);
incPC(1);
mVUregs.needExactMatch &= 0x7;
@ -416,8 +416,8 @@ void mVUsetFlagInfo(mV) {
}
else if (mVUbranch <= 8) { // Conditional Branch
incPC(-1); // Branch Taken
mVUflagPass (mVU, branchAddr);
checkFFblock(mVU, branchAddr, ffOpt);
mVUflagPass (mVU, branchAddr(mVU));
checkFFblock(mVU, branchAddr(mVU), ffOpt);
int backupFlagInfo = mVUregs.needExactMatch;
mVUregs.needExactMatch = 0;

View File

@ -1261,7 +1261,7 @@ void setBranchA(mP, int x, int _x_) {
void condEvilBranch(mV, int JMPcc) {
if (mVUlow.badBranch) {
xMOV(ptr32[&mVU.branch], gprT1);
xMOV(ptr32[&mVU.badBranch], branchAddrN);
xMOV(ptr32[&mVU.badBranch], branchAddrN(mVU));
xCMP(gprT1b, 0);
xForwardJump8 cJMP((JccComparisonType)JMPcc);
@ -1271,7 +1271,7 @@ void condEvilBranch(mV, int JMPcc) {
cJMP.SetTarget();
return;
}
xMOV(ptr32[&mVU.evilBranch], branchAddr);
xMOV(ptr32[&mVU.evilBranch], branchAddr(mVU));
xCMP(gprT1b, 0);
xForwardJump8 cJMP((JccComparisonType)JMPcc);
xMOV(gprT1, ptr32[&mVU.badBranch]); // Branch Not Taken
@ -1286,11 +1286,11 @@ mVUop(mVU_B) {
setBranchA(mX, 1, 0);
pass1 { mVUanalyzeNormBranch(mVU, 0, false); }
pass2 {
if (mVUlow.badBranch) { xMOV(ptr32[&mVU.badBranch], branchAddrN); }
if (mVUlow.evilBranch) { xMOV(ptr32[&mVU.evilBranch], branchAddr); }
if (mVUlow.badBranch) { xMOV(ptr32[&mVU.badBranch], branchAddrN(mVU)); }
if (mVUlow.evilBranch) { xMOV(ptr32[&mVU.evilBranch], branchAddr(mVU)); }
mVU.profiler.EmitOp(opB);
}
pass3 { mVUlog("B [<a href=\"#addr%04x\">%04x</a>]", branchAddr, branchAddr); }
pass3 { mVUlog("B [<a href=\"#addr%04x\">%04x</a>]", branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_BAL) {
@ -1303,11 +1303,11 @@ mVUop(mVU_BAL) {
mVUallocVIb(mVU, gprT1, _It_);
}
if (mVUlow.badBranch) { xMOV(ptr32[&mVU.badBranch], branchAddrN); }
if (mVUlow.evilBranch) { xMOV(ptr32[&mVU.evilBranch], branchAddr);}
if (mVUlow.badBranch) { xMOV(ptr32[&mVU.badBranch], branchAddrN(mVU)); }
if (mVUlow.evilBranch) { xMOV(ptr32[&mVU.evilBranch], branchAddr(mVU));}
mVU.profiler.EmitOp(opBAL);
}
pass3 { mVUlog("BAL vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Ft_, branchAddr, branchAddr); }
pass3 { mVUlog("BAL vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Ft_, branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_IBEQ) {
@ -1324,7 +1324,7 @@ mVUop(mVU_IBEQ) {
else condEvilBranch(mVU, Jcc_Equal);
mVU.profiler.EmitOp(opIBEQ);
}
pass3 { mVUlog("IBEQ vi%02d, vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Ft_, _Fs_, branchAddr, branchAddr); }
pass3 { mVUlog("IBEQ vi%02d, vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Ft_, _Fs_, branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_IBGEZ) {
@ -1337,7 +1337,7 @@ mVUop(mVU_IBGEZ) {
else condEvilBranch(mVU, Jcc_GreaterOrEqual);
mVU.profiler.EmitOp(opIBGEZ);
}
pass3 { mVUlog("IBGEZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr, branchAddr); }
pass3 { mVUlog("IBGEZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_IBGTZ) {
@ -1350,7 +1350,7 @@ mVUop(mVU_IBGTZ) {
else condEvilBranch(mVU, Jcc_Greater);
mVU.profiler.EmitOp(opIBGTZ);
}
pass3 { mVUlog("IBGTZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr, branchAddr); }
pass3 { mVUlog("IBGTZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_IBLEZ) {
@ -1363,7 +1363,7 @@ mVUop(mVU_IBLEZ) {
else condEvilBranch(mVU, Jcc_LessOrEqual);
mVU.profiler.EmitOp(opIBLEZ);
}
pass3 { mVUlog("IBLEZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr, branchAddr); }
pass3 { mVUlog("IBLEZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_IBLTZ) {
@ -1376,7 +1376,7 @@ mVUop(mVU_IBLTZ) {
else condEvilBranch(mVU, Jcc_Less);
mVU.profiler.EmitOp(opIBLTZ);
}
pass3 { mVUlog("IBLTZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr, branchAddr); }
pass3 { mVUlog("IBLTZ vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Fs_, branchAddr(mVU), branchAddr(mVU)); }
}
mVUop(mVU_IBNE) {
@ -1393,7 +1393,7 @@ mVUop(mVU_IBNE) {
else condEvilBranch(mVU, Jcc_NotEqual);
mVU.profiler.EmitOp(opIBNE);
}
pass3 { mVUlog("IBNE vi%02d, vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Ft_, _Fs_, branchAddr, branchAddr); }
pass3 { mVUlog("IBNE vi%02d, vi%02d [<a href=\"#addr%04x\">%04x</a>]", _Ft_, _Fs_, branchAddr(mVU), branchAddr(mVU)); }
}
void normJumpPass2(mV) {

View File

@ -232,20 +232,12 @@ typedef u32 (__fastcall *mVUCall)(void*, void*);
#define shuffleSS(x) ((x==1)?(0x27):((x==2)?(0xc6):((x==4)?(0xe1):(0xe4))))
#define clampE CHECK_VU_EXTRA_OVERFLOW
#define varPrint(x) DevCon.WriteLn(#x " = %d", (int)x)
#define islowerOP ((iPC & 1) == 0)
#define blockCreate(addr) { \
if (!mVUblocks[addr]) mVUblocks[addr] = new microBlockManager(); \
}
#define branchAddr ( \
pxAssertDev((iPC & 1) == 0, "microVU: Expected Lower Op for valid branch addr."), \
((((iPC + 2) + (_Imm11_ * 2)) & mVU.progMemMask) * 4) \
)
#define branchAddrN ( \
pxAssertDev((iPC & 1) == 0, "microVU: Expected Lower Op for valid branch addr."), \
((((iPC + 4) + (_Imm11_ * 2)) & mVU.progMemMask) * 4) \
)
// Fetches the PC and instruction opcode relative to the current PC. Used to rewind and
// fast-forward the IR state while calculating VU pipeline conditions (branches, writebacks, etc)
#define incPC(x) { iPC = ((iPC + (x)) & mVU.progMemMask); mVU.code = curI; }

View File

@ -255,6 +255,19 @@ static void __fc mVUwarningRegAccess(u32 prog, u32 pc) {
Console.Error("microVU0 Warning: Accessing VU1 Regs! [%04x] [%x]", pc, prog);
}
static inline u32 branchAddrN(const mV)
{
pxAssumeDev(islowerOP, "MicroVU: Expected Lower OP code for valid branch addr.");
return ((((iPC + 4) + (_Imm11_ * 2)) & mVU.progMemMask) * 4);
}
static inline u32 branchAddr(const mV)
{
pxAssumeDev(islowerOP, "MicroVU: Expected Lower OP code for valid branch addr.");
return ((((iPC + 2) + (_Imm11_ * 2)) & mVU.progMemMask) * 4);
}
static void __fc mVUwaitMTVU() {
if (IsDevBuild) DevCon.WriteLn("microVU0: Waiting on VU1 thread to access VU1 regs!");
if (THREAD_VU1) vu1Thread.WaitVU();