pcsx2: sign compare mismatch 1/3

This commit is contained in:
Gregory Hainaut 2014-04-14 20:11:23 +02:00
parent c6d8b52d26
commit 88f4d1e3a7
9 changed files with 39 additions and 38 deletions

View File

@ -348,7 +348,7 @@ static __fi bool mfifoGIFrbTransfer()
return true; // Skip if can't do path3
}
bool needWrap = (gifch.madr + (mfifoqwc * 16)) > (dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16);
bool needWrap = (gifch.madr + (mfifoqwc * 16u)) > (dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16u);
uint s1 = ((dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16) - gifch.madr) >> 4;
uint s2 = mfifoqwc - s1;
uint s3 = needWrap ? s1 : mfifoqwc;
@ -378,7 +378,7 @@ static __fi bool mfifoGIFchain()
if (gifch.qwc == 0) return true;
if (gifch.madr >= dmacRegs.rbor.ADDR &&
gifch.madr <= (dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16))
gifch.madr <= (dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16u))
{
bool ret = true;
// if(gifch.madr == (dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16)) DevCon.Warning("Edge GIF");

View File

@ -27,7 +27,7 @@
// - buffer_size must be power of 2
// - ring-buffer has no complete pending packets when read_pos==write_pos
class VU_Thread : public pxThread {
static const u32 buffer_size = (_1mb * 16) / sizeof(u32);
static const s32 buffer_size = (_1mb * 16) / sizeof(s32);
static const u32 buffer_mask = buffer_size - 1;
__aligned(4) u32 buffer[buffer_size];
__aligned(4) volatile s32 read_pos; // Only modified by VU thread

View File

@ -708,7 +708,7 @@ void Panels::PluginSelectorPanel::OnProgress( wxCommandEvent& evt )
if( DisableThreading )
{
const int nextidx = evtidx+1;
const u32 nextidx = evtidx+1;
if( nextidx == m_FileList->Count() )
{
wxCommandEvent done( pxEvt_EnumerationFinished );

View File

@ -1182,7 +1182,7 @@ static void __fastcall PreBlockCheck( u32 blockpc )
#ifdef PCSX2_DEBUG
extern void iDumpPsxRegisters(u32 startpc, u32 temp);
static int lastrec = 0;
static u32 lastrec = 0;
static int curcount = 0;
const int skip = 0;

View File

@ -129,7 +129,7 @@ static u32 eeScaleBlockCycles();
void _eeFlushAllUnused()
{
int i;
u32 i;
for(i = 0; i < 34; ++i) {
if( pc < s_nEndBlock ) {
if( (g_pCurInstInfo[1].regs[i]&EEINST_USED) )
@ -241,7 +241,7 @@ void eeSignExtendTo(int gpr, bool onlyupper)
int _flushXMMunused()
{
int i;
u32 i;
for (i=0; i<iREGCNT_XMM; i++) {
if (!xmmregs[i].inuse || xmmregs[i].needed || !(xmmregs[i].mode&MODE_WRITE) ) continue;
@ -260,7 +260,7 @@ int _flushXMMunused()
int _flushMMXunused()
{
int i;
u32 i;
for (i=0; i<iREGCNT_MMX; i++) {
if (!mmxregs[i].inuse || mmxregs[i].needed || !(mmxregs[i].mode&MODE_WRITE) ) continue;
@ -1358,7 +1358,8 @@ void recMemcheck(u32 bits, bool store)
void recompileNextInstruction(int delayslot)
{
static u8 s_bFlushReg = 1;
int i, count;
u32 i;
int count;
// add breakpoint
if (CBreakPoints::IsAddressBreakPoint(pc))

View File

@ -61,7 +61,7 @@ void recADD_const()
g_cpuConstRegs[_Rd_].SD[0] = g_cpuConstRegs[_Rs_].SL[0] + g_cpuConstRegs[_Rt_].SL[0];
}
void recADD_constv(int info, int creg, int vreg)
void recADD_constv(int info, int creg, u32 vreg)
{
pxAssert( !(info&PROCESS_EE_XMM) );
@ -112,7 +112,7 @@ void recDADD_const(void)
g_cpuConstRegs[_Rd_].SD[0] = g_cpuConstRegs[_Rs_].SD[0] + g_cpuConstRegs[_Rt_].SD[0];
}
void recDADD_constv(int info, int creg, int vreg)
void recDADD_constv(int info, int creg, u32 vreg)
{
pxAssert( !(info&PROCESS_EE_XMM) );
@ -149,7 +149,7 @@ void recDADD_(int info)
{
pxAssert( !(info&PROCESS_EE_XMM) );
int rs = _Rs_, rt = _Rt_;
u32 rs = _Rs_, rt = _Rt_;
if (_Rd_ == _Rt_)
rs = _Rt_, rt = _Rs_;
@ -331,7 +331,7 @@ void recAND_const()
g_cpuConstRegs[_Rd_].UD[0] = g_cpuConstRegs[_Rs_].UD[0] & g_cpuConstRegs[_Rt_].UD[0];
}
void recAND_constv(int info, int creg, int vreg)
void recAND_constv(int info, int creg, u32 vreg)
{
pxAssert( !(info & PROCESS_EE_XMM) );
@ -341,11 +341,11 @@ void recAND_constv(int info, int creg, int vreg)
if (!cval.UL[i]) {
xMOV(ptr32[&cpuRegs.GPR.r[_Rd_].UL[i]], 0);
} else if (_Rd_ == vreg) {
if (cval.UL[i] != -1)
if (cval.SL[i] != -1)
xAND(ptr32[&cpuRegs.GPR.r[_Rd_].UL[i]], cval.UL[i]);
} else {
xMOV(eax, ptr32[&cpuRegs.GPR.r[vreg].UL[i]]);
if (cval.UL[i] != -1)
if (cval.SL[i] != -1)
xAND(eax, cval.UL[i]);
xMOV(ptr32[&cpuRegs.GPR.r[_Rd_].UL[i]], eax);
}
@ -366,7 +366,7 @@ void recAND_(int info)
{
pxAssert( !(info & PROCESS_EE_XMM) );
int rs = _Rs_, rt = _Rt_;
u32 rs = _Rs_, rt = _Rt_;
if (_Rd_ == _Rt_)
rs = _Rt_, rt = _Rs_;
@ -393,14 +393,14 @@ void recOR_const()
g_cpuConstRegs[_Rd_].UD[0] = g_cpuConstRegs[_Rs_].UD[0] | g_cpuConstRegs[_Rt_].UD[0];
}
void recOR_constv(int info, int creg, int vreg)
void recOR_constv(int info, int creg, u32 vreg)
{
pxAssert( !(info & PROCESS_EE_XMM) );
GPR_reg64 cval = g_cpuConstRegs[creg];
for (int i = 0; i < 2; i++) {
if (cval.UL[i] == -1) {
if (cval.SL[i] == -1) {
xMOV(ptr32[&cpuRegs.GPR.r[_Rd_].UL[i]], -1);
} else if (_Rd_ == vreg) {
if (cval.UL[i])
@ -428,7 +428,7 @@ void recOR_(int info)
{
pxAssert( !(info & PROCESS_EE_XMM) );
int rs = _Rs_, rt = _Rt_;
u32 rs = _Rs_, rt = _Rt_;
if (_Rd_ == _Rt_)
rs = _Rt_, rt = _Rs_;
@ -455,7 +455,7 @@ void recXOR_const()
g_cpuConstRegs[_Rd_].UD[0] = g_cpuConstRegs[_Rs_].UD[0] ^ g_cpuConstRegs[_Rt_].UD[0];
}
void recXOR_constv(int info, int creg, int vreg)
void recXOR_constv(int info, int creg, u32 vreg)
{
pxAssert( !(info & PROCESS_EE_XMM) );
@ -488,7 +488,7 @@ void recXOR_(int info)
{
pxAssert( !(info & PROCESS_EE_XMM) );
int rs = _Rs_, rt = _Rt_;
u32 rs = _Rs_, rt = _Rt_;
if (_Rd_ == _Rt_)
rs = _Rt_, rt = _Rs_;
@ -514,7 +514,7 @@ void recNOR_const()
g_cpuConstRegs[_Rd_].UD[0] =~(g_cpuConstRegs[_Rs_].UD[0] | g_cpuConstRegs[_Rt_].UD[0]);
}
void recNOR_constv(int info, int creg, int vreg)
void recNOR_constv(int info, int creg, u32 vreg)
{
pxAssert( !(info & PROCESS_EE_XMM) );
@ -549,7 +549,7 @@ void recNOR_(int info)
{
pxAssert( !(info & PROCESS_EE_XMM) );
int rs = _Rs_, rt = _Rt_;
u32 rs = _Rs_, rt = _Rt_;
if (_Rd_ == _Rt_)
rs = _Rt_, rt = _Rs_;

View File

@ -340,7 +340,7 @@ __fi bool doEarlyExit(microVU& mVU) {
// Saves Pipeline State for resuming from early exits
__fi void mVUsavePipelineState(microVU& mVU) {
u32* lpS = (u32*)&mVU.prog.lpState;
for(int i = 0; i < (sizeof(microRegInfo)-4)/4; i++, lpS++) {
for(size_t i = 0; i < (sizeof(microRegInfo)-4)/4; i++, lpS++) {
xMOV(ptr32[lpS], lpS[0]);
}
}

View File

@ -200,7 +200,7 @@ struct microMapXMM {
class microRegAlloc {
protected:
static const u32 xmmTotal = 7; // Don't allocate PQ?
static const int xmmTotal = 7; // Don't allocate PQ?
microMapXMM xmmMap[xmmTotal];
int counter; // Current allocation count
int index; // VU0 or VU1

View File

@ -2023,7 +2023,6 @@ static void SuperVUEliminateDeadCode()
void VuBaseBlock::AssignVFRegs()
{
int i;
VuBaseBlock::LISTBLOCKS::iterator itchild;
list<VuBaseBlock*>::iterator itblock;
list<VuInstruction>::iterator itinst, itnext, itinst2;
@ -2034,6 +2033,7 @@ void VuBaseBlock::AssignVFRegs()
if (type & BLOCKTYPE_ANALYZED)
{
u32 i;
// check if changed
for (i = 0; i < iREGCNT_XMM; ++i)
{
@ -2056,13 +2056,13 @@ void VuBaseBlock::AssignVFRegs()
// reserve, go from upper to lower
int lastwrite = -1;
for (i = 1; i >= 0; --i)
for (int i = 1; i >= 0; --i)
{
_VURegsNum* regs = itinst->regs + i;
// redo the counters so that the proper regs are released
for (int j = 0; j < iREGCNT_XMM; ++j)
for (u32 j = 0; j < iREGCNT_XMM; ++j)
{
if (xmmregs[j].inuse)
{
@ -2795,7 +2795,7 @@ static void __fastcall svudispfn( int g_curdebugvu )
// frees all regs taking into account the livevars
void SuperVUFreeXMMregs(u32* livevars)
{
for (int i = 0; i < iREGCNT_XMM; ++i)
for (u32 i = 0; i < iREGCNT_XMM; ++i)
{
if (xmmregs[i].inuse)
{
@ -3969,7 +3969,7 @@ void recVUMI_IBQ_prep()
if (_Is_ == 0)
{
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _It_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_It_)
{
itreg = -1;
}
@ -3990,7 +3990,7 @@ void recVUMI_IBQ_prep()
else if (_It_ == 0)
{
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = -1;
}
@ -4014,7 +4014,7 @@ void recVUMI_IBQ_prep()
_addNeededX86reg(X86TYPE_VI | (VU == &VU1 ? X86TYPE_VU1 : 0), _It_);
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = -1;
}
@ -4025,14 +4025,14 @@ void recVUMI_IBQ_prep()
}
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _It_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_It_)
{
itreg = -1;
if (isreg <= 0)
{
// allocate fsreg
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = _allocX86reg(-1, X86TYPE_TEMP, 0, MODE_READ | MODE_WRITE);
MOV32MtoR(isreg, SuperVUGetVIAddr(_Is_, 1));
@ -4082,7 +4082,7 @@ void recVUMI_IBGEZ(VURegs* vuu, s32 info)
s_JumpX86 = _allocX86reg(-1, X86TYPE_VUJUMP, 0, MODE_WRITE);
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = -1;
}
@ -4112,7 +4112,7 @@ void recVUMI_IBGTZ(VURegs* vuu, s32 info)
s_JumpX86 = _allocX86reg(-1, X86TYPE_VUJUMP, 0, MODE_WRITE);
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = -1;
}
@ -4141,7 +4141,7 @@ void recVUMI_IBLEZ(VURegs* vuu, s32 info)
s_JumpX86 = _allocX86reg(-1, X86TYPE_VUJUMP, 0, MODE_WRITE);
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = -1;
}
@ -4170,7 +4170,7 @@ void recVUMI_IBLTZ(VURegs* vuu, s32 info)
s_JumpX86 = _allocX86reg(-1, X86TYPE_VUJUMP, 0, MODE_WRITE);
#ifdef SUPERVU_VIBRANCHDELAY
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == _Is_)
if (s_pCurInst->vicached >= 0 && s_pCurInst->vicached == (s8)_Is_)
{
isreg = -1;
}