mirror of https://github.com/PCSX2/pcsx2.git
parent
b0496645f1
commit
1b80f3aea2
|
@ -33,7 +33,7 @@ bool _VIF0chain()
|
|||
}
|
||||
|
||||
pMem = (u32*)dmaGetAddr(vif0ch.madr, false);
|
||||
if (pMem == NULL)
|
||||
if (pMem == nullptr)
|
||||
{
|
||||
vif0.cmd = 0;
|
||||
vif0.tag.size = 0;
|
||||
|
|
|
@ -27,7 +27,7 @@ void vif1TransferToMemory()
|
|||
u128* pMem = (u128*)dmaGetAddr(vif1ch.madr, false);
|
||||
|
||||
// VIF from gsMemory
|
||||
if (pMem == NULL)
|
||||
if (pMem == nullptr)
|
||||
{ // Is vif0ptag empty?
|
||||
Console.WriteLn("Vif1 Tag BUSERR");
|
||||
dmacRegs.stat.BEIS = true; // Bus Error
|
||||
|
@ -117,7 +117,7 @@ bool _VIF1chain()
|
|||
}
|
||||
|
||||
pMem = (u32*)dmaGetAddr(vif1ch.madr, !vif1ch.chcr.DIR);
|
||||
if (pMem == NULL)
|
||||
if (pMem == nullptr)
|
||||
{
|
||||
vif1.cmd = 0;
|
||||
vif1.tag.size = 0;
|
||||
|
@ -305,8 +305,8 @@ __fi void vif1Interrupt()
|
|||
// from the GS then we handle that separately (KH2 for testing)
|
||||
if (vif1ch.chcr.DIR)
|
||||
{
|
||||
bool isDirect = (vif1.cmd & 0x7f) == 0x50;
|
||||
bool isDirectHL = (vif1.cmd & 0x7f) == 0x51;
|
||||
const bool isDirect = (vif1.cmd & 0x7f) == 0x50;
|
||||
const bool isDirectHL = (vif1.cmd & 0x7f) == 0x51;
|
||||
if ((isDirect && !gifUnit.CanDoPath2()) || (isDirectHL && !gifUnit.CanDoPath2HL()))
|
||||
{
|
||||
GUNIT_WARN("vif1Interrupt() - Waiting for Path 2 to be ready");
|
||||
|
|
|
@ -23,7 +23,7 @@ static u32 QWCinVIFMFIFO(u32 DrainADDR, u32 qwc)
|
|||
}
|
||||
else
|
||||
{
|
||||
u32 limit = dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16;
|
||||
const u32 limit = dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16;
|
||||
//Drain is higher than SPR so it has looped round,
|
||||
//calculate from base to the SPR tag addr and what is left in the top of the ring
|
||||
ret = ((spr0ch.madr - dmacRegs.rbor.ADDR) + (limit - DrainADDR)) >> 4;
|
||||
|
@ -35,8 +35,8 @@ static u32 QWCinVIFMFIFO(u32 DrainADDR, u32 qwc)
|
|||
}
|
||||
static __fi bool mfifoVIF1rbTransfer()
|
||||
{
|
||||
u32 msize = dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16;
|
||||
u32 mfifoqwc = std::min(QWCinVIFMFIFO(vif1ch.madr, vif1ch.qwc), vif1ch.qwc);
|
||||
const u32 msize = dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK + 16;
|
||||
const u32 mfifoqwc = std::min(QWCinVIFMFIFO(vif1ch.madr, vif1ch.qwc), vif1ch.qwc);
|
||||
u32* src;
|
||||
bool ret;
|
||||
|
||||
|
@ -49,7 +49,7 @@ static __fi bool mfifoVIF1rbTransfer()
|
|||
/* Check if the transfer should wrap around the ring buffer */
|
||||
if ((vif1ch.madr + (mfifoqwc << 4)) > (msize))
|
||||
{
|
||||
int s1 = ((msize)-vif1ch.madr) >> 2;
|
||||
const int s1 = ((msize)-vif1ch.madr) >> 2;
|
||||
|
||||
VIF_LOG("Split MFIFO");
|
||||
|
||||
|
@ -57,7 +57,7 @@ static __fi bool mfifoVIF1rbTransfer()
|
|||
vif1ch.madr = qwctag(vif1ch.madr);
|
||||
|
||||
src = (u32*)PSM(vif1ch.madr);
|
||||
if (src == NULL)
|
||||
if (src == nullptr)
|
||||
return false;
|
||||
|
||||
if (vif1.irqoffset.enabled)
|
||||
|
@ -75,7 +75,7 @@ static __fi bool mfifoVIF1rbTransfer()
|
|||
vif1ch.madr = qwctag(vif1ch.madr);
|
||||
|
||||
src = (u32*)PSM(vif1ch.madr);
|
||||
if (src == NULL)
|
||||
if (src == nullptr)
|
||||
return false;
|
||||
VIF1transfer(src, ((mfifoqwc << 2) - s1));
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ static __fi bool mfifoVIF1rbTransfer()
|
|||
|
||||
/* it doesn't, so just transfer 'qwc*4' words */
|
||||
src = (u32*)PSM(vif1ch.madr);
|
||||
if (src == NULL)
|
||||
if (src == nullptr)
|
||||
return false;
|
||||
|
||||
if (vif1.irqoffset.enabled)
|
||||
|
@ -133,7 +133,7 @@ static __fi void mfifo_VIF1chain()
|
|||
|
||||
//No need to exit on non-mfifo as it is indirect anyway, so it can be transferring this while spr refills the mfifo
|
||||
|
||||
if (pMem == NULL)
|
||||
if (pMem == nullptr)
|
||||
return;
|
||||
|
||||
if (vif1.irqoffset.enabled)
|
||||
|
@ -158,7 +158,7 @@ void mfifoVifMaskMem(int id)
|
|||
//DevCon.Warning("VIF MFIFO MADR below bottom of ring buffer, wrapping VIF MADR = %x Ring Bottom %x", vif1ch.madr, dmacRegs.rbor.ADDR);
|
||||
vif1ch.madr = qwctag(vif1ch.madr);
|
||||
}
|
||||
if (vif1ch.madr > (dmacRegs.rbor.ADDR + (u32)dmacRegs.rbsr.RMSK)) //Usual scenario is the tag is near the end (Front Mission 4)
|
||||
if (vif1ch.madr > (dmacRegs.rbor.ADDR + static_cast<u32>(dmacRegs.rbsr.RMSK))) //Usual scenario is the tag is near the end (Front Mission 4)
|
||||
{
|
||||
//DevCon.Warning("VIF MFIFO MADR outside top of ring buffer, wrapping VIF MADR = %x Ring Top %x", vif1ch.madr, (dmacRegs.rbor.ADDR + dmacRegs.rbsr.RMSK)+16);
|
||||
vif1ch.madr = qwctag(vif1ch.madr);
|
||||
|
@ -281,8 +281,8 @@ void vifMFIFOInterrupt()
|
|||
|
||||
if (vif1ch.chcr.DIR)
|
||||
{
|
||||
bool isDirect = (vif1.cmd & 0x7f) == 0x50;
|
||||
bool isDirectHL = (vif1.cmd & 0x7f) == 0x51;
|
||||
const bool isDirect = (vif1.cmd & 0x7f) == 0x50;
|
||||
const bool isDirectHL = (vif1.cmd & 0x7f) == 0x51;
|
||||
if ((isDirect && !gifUnit.CanDoPath2()) || (isDirectHL && !gifUnit.CanDoPath2HL()))
|
||||
{
|
||||
GUNIT_WARN("vifMFIFOInterrupt() - Waiting for Path 2 to be ready");
|
||||
|
|
|
@ -109,7 +109,7 @@ static __fi void vuExecMicro(int idx, u32 addr, bool requires_wait)
|
|||
}
|
||||
|
||||
GetVifX.queued_program = true;
|
||||
if ((s32)addr == -1)
|
||||
if (static_cast<s32>(addr) == -1)
|
||||
GetVifX.queued_pc = addr;
|
||||
else
|
||||
GetVifX.queued_pc = addr & (idx ? 0x7ffu : 0x1ffu);
|
||||
|
@ -144,7 +144,7 @@ __fi int _vifCode_Direct(int pass, const u8* data, bool isDirectHL)
|
|||
vif1Only();
|
||||
pass1
|
||||
{
|
||||
int vifImm = (u16)vif1Regs.code;
|
||||
const int vifImm = static_cast<u16>(vif1Regs.code);
|
||||
vif1.tag.size = vifImm ? (vifImm * 4) : (65536 * 4);
|
||||
vif1.pass = 1;
|
||||
return 1;
|
||||
|
@ -152,9 +152,9 @@ __fi int _vifCode_Direct(int pass, const u8* data, bool isDirectHL)
|
|||
pass2
|
||||
{
|
||||
const char* name = isDirectHL ? "DirectHL" : "Direct";
|
||||
GIF_TRANSFER_TYPE tranType = isDirectHL ? GIF_TRANS_DIRECTHL : GIF_TRANS_DIRECT;
|
||||
uint size = std::min(vif1.vifpacketsize, vif1.tag.size) * 4; // Get size in bytes
|
||||
uint ret = gifUnit.TransferGSPacketData(tranType, (u8*)data, size);
|
||||
const GIF_TRANSFER_TYPE tranType = isDirectHL ? GIF_TRANS_DIRECTHL : GIF_TRANS_DIRECT;
|
||||
const uint size = std::min(vif1.vifpacketsize, vif1.tag.size) * 4; // Get size in bytes
|
||||
const uint ret = gifUnit.TransferGSPacketData(tranType, (u8*)data, size);
|
||||
|
||||
vif1.tag.size -= ret / 4; // Convert to u32's
|
||||
vif1Regs.stat.VGW = false;
|
||||
|
@ -202,7 +202,7 @@ vifOp(vifCode_Flush)
|
|||
//vifStruct& vifX = GetVifX;
|
||||
pass1or2
|
||||
{
|
||||
bool p1or2 = (gifRegs.stat.APATH != 0 && gifRegs.stat.APATH != 3);
|
||||
const bool p1or2 = (gifRegs.stat.APATH != 0 && gifRegs.stat.APATH != 3);
|
||||
vif1Regs.stat.VGW = false;
|
||||
vifFlush(idx);
|
||||
if (gifUnit.checkPaths(1, 1, 0) || p1or2)
|
||||
|
@ -234,7 +234,7 @@ vifOp(vifCode_FlushA)
|
|||
pass1or2
|
||||
{
|
||||
//Gif_Path& p3 = gifUnit.gifPath[GIF_PATH_3];
|
||||
u32 gifBusy = gifUnit.checkPaths(1, 1, 1) || (gifRegs.stat.APATH != 0);
|
||||
const u32 gifBusy = gifUnit.checkPaths(1, 1, 1) || (gifRegs.stat.APATH != 0);
|
||||
//bool doStall = false;
|
||||
vif1Regs.stat.VGW = false;
|
||||
vifFlush(idx);
|
||||
|
@ -298,7 +298,7 @@ vifOp(vifCode_Mark)
|
|||
vifStruct& vifX = GetVifX;
|
||||
pass1
|
||||
{
|
||||
vifXRegs.mark = (u16)vifXRegs.code;
|
||||
vifXRegs.mark = static_cast<u16>(vifXRegs.code);
|
||||
vifXRegs.stat.MRK = true;
|
||||
vifX.cmd = 0;
|
||||
vifX.pass = 0;
|
||||
|
@ -311,7 +311,7 @@ static __fi void _vifCode_MPG(int idx, u32 addr, const u32* data, int size)
|
|||
{
|
||||
VURegs& VUx = idx ? VU1 : VU0;
|
||||
vifStruct& vifX = GetVifX;
|
||||
u16 vuMemSize = idx ? 0x4000 : 0x1000;
|
||||
const u16 vuMemSize = idx ? 0x4000 : 0x1000;
|
||||
pxAssert(VUx.Micro);
|
||||
|
||||
vifExecQueue(idx);
|
||||
|
@ -371,8 +371,8 @@ vifOp(vifCode_MPG)
|
|||
vifStruct& vifX = GetVifX;
|
||||
pass1
|
||||
{
|
||||
int vifNum = (u8)(vifXRegs.code >> 16);
|
||||
vifX.tag.addr = (u16)(vifXRegs.code << 3) & (idx ? 0x3fff : 0xfff);
|
||||
const int vifNum = static_cast<u8>(vifXRegs.code >> 16);
|
||||
vifX.tag.addr = static_cast<u16>(vifXRegs.code << 3) & (idx ? 0x3fff : 0xfff);
|
||||
vifX.tag.size = vifNum ? (vifNum * 2) : 512;
|
||||
vifFlush(idx);
|
||||
|
||||
|
@ -406,7 +406,7 @@ vifOp(vifCode_MPG)
|
|||
//DevCon.Warning("Vif%d MPG Split Overflow full %x", idx, vifX.tag.addr + vifX.tag.size*4);
|
||||
}
|
||||
_vifCode_MPG(idx, vifX.tag.addr, data, vifX.tag.size);
|
||||
int ret = vifX.tag.size;
|
||||
const int ret = vifX.tag.size;
|
||||
vifX.tag.size = 0;
|
||||
vifX.cmd = 0;
|
||||
vifX.pass = 0;
|
||||
|
@ -430,7 +430,7 @@ vifOp(vifCode_MSCAL)
|
|||
return 0;
|
||||
}
|
||||
|
||||
vuExecMicro(idx, (u16)(vifXRegs.code), false);
|
||||
vuExecMicro(idx, static_cast<u16>(vifXRegs.code), false);
|
||||
vifX.cmd = 0;
|
||||
vifX.pass = 0;
|
||||
|
||||
|
@ -455,7 +455,7 @@ vifOp(vifCode_MSCALF)
|
|||
{
|
||||
vifXRegs.stat.VGW = false;
|
||||
vifFlush(idx);
|
||||
if (u32 a = gifUnit.checkPaths(1, 1, 0))
|
||||
if (const u32 a = gifUnit.checkPaths(1, 1, 0))
|
||||
{
|
||||
GUNIT_WARN("Vif MSCALF: Stall! [%d,%d]", !!(a & 1), !!(a & 2));
|
||||
vif1Regs.stat.VGW = true;
|
||||
|
@ -469,7 +469,7 @@ vifOp(vifCode_MSCALF)
|
|||
return 0;
|
||||
}
|
||||
|
||||
vuExecMicro(idx, (u16)(vifXRegs.code), true);
|
||||
vuExecMicro(idx, static_cast<u16>(vifXRegs.code), true);
|
||||
vifX.cmd = 0;
|
||||
vifX.pass = 0;
|
||||
vifExecQueue(idx);
|
||||
|
@ -595,7 +595,7 @@ static __fi int _vifCode_STColRow(const u32* data, u32* pmem2)
|
|||
{
|
||||
vifStruct& vifX = GetVifX;
|
||||
|
||||
int ret = std::min(4 - vifX.tag.addr, vifX.vifpacketsize);
|
||||
const int ret = std::min(4 - vifX.tag.addr, vifX.vifpacketsize);
|
||||
pxAssume(vifX.tag.addr < 4);
|
||||
pxAssume(ret > 0);
|
||||
|
||||
|
@ -641,7 +641,7 @@ vifOp(vifCode_STCol)
|
|||
}
|
||||
pass2
|
||||
{
|
||||
u32 ret = _vifCode_STColRow<idx>(data, &vifX.MaskCol._u32[vifX.tag.addr]);
|
||||
const u32 ret = _vifCode_STColRow<idx>(data, &vifX.MaskCol._u32[vifX.tag.addr]);
|
||||
if (idx && vifX.tag.size == 0)
|
||||
vu1Thread.WriteCol(vifX);
|
||||
return ret;
|
||||
|
@ -662,7 +662,7 @@ vifOp(vifCode_STRow)
|
|||
}
|
||||
pass2
|
||||
{
|
||||
u32 ret = _vifCode_STColRow<idx>(data, &vifX.MaskRow._u32[vifX.tag.addr]);
|
||||
const u32 ret = _vifCode_STColRow<idx>(data, &vifX.MaskRow._u32[vifX.tag.addr]);
|
||||
if (idx && vifX.tag.size == 0)
|
||||
vu1Thread.WriteRow(vifX);
|
||||
return ret;
|
||||
|
@ -676,8 +676,8 @@ vifOp(vifCode_STCycl)
|
|||
vifStruct& vifX = GetVifX;
|
||||
pass1
|
||||
{
|
||||
vifXRegs.cycle.cl = (u8)(vifXRegs.code);
|
||||
vifXRegs.cycle.wl = (u8)(vifXRegs.code >> 8);
|
||||
vifXRegs.cycle.cl = static_cast<u8>(vifXRegs.code);
|
||||
vifXRegs.cycle.wl = static_cast<u8>(vifXRegs.code >> 8);
|
||||
vifX.cmd = 0;
|
||||
vifX.pass = 0;
|
||||
}
|
||||
|
@ -744,9 +744,9 @@ vifOp(vifCode_Unpack)
|
|||
{
|
||||
vifStruct& vifX = GetVifX;
|
||||
VIFregisters& vifRegs = vifXRegs;
|
||||
uint vl = vifX.cmd & 0x03;
|
||||
uint vn = (vifX.cmd >> 2) & 0x3;
|
||||
bool flg = (vifRegs.code >> 15) & 1;
|
||||
const uint vl = vifX.cmd & 0x03;
|
||||
const uint vn = (vifX.cmd >> 2) & 0x3;
|
||||
const bool flg = (vifRegs.code >> 15) & 1;
|
||||
static const char* const vntbl[] = {"S", "V2", "V3", "V4"};
|
||||
static const uint vltbl[] = {32, 16, 8, 5};
|
||||
|
||||
|
|
|
@ -46,9 +46,9 @@ __fi void VifUnpackSSE_Dynarec::SetMasks(int cS) const
|
|||
const vifStruct& vif = MTVU_VifX;
|
||||
|
||||
//This could have ended up copying the row when there was no row to write.1810080
|
||||
u32 m0 = vB.mask; //The actual mask example 0x03020100
|
||||
u32 m3 = ((m0 & 0xaaaaaaaa) >> 1) & ~m0; //all the upper bits, so our example 0x01010000 & 0xFCFDFEFF = 0x00010000 just the cols (shifted right for maskmerge)
|
||||
u32 m2 = (m0 & 0x55555555) & (~m0 >> 1); // 0x1000100 & 0xFE7EFF7F = 0x00000100 Just the row
|
||||
const u32 m0 = vB.mask; //The actual mask example 0x03020100
|
||||
const u32 m3 = ((m0 & 0xaaaaaaaa) >> 1) & ~m0; //all the upper bits, so our example 0x01010000 & 0xFCFDFEFF = 0x00010000 just the cols (shifted right for maskmerge)
|
||||
const u32 m2 = (m0 & 0x55555555) & (~m0 >> 1); // 0x1000100 & 0xFE7EFF7F = 0x00000100 Just the row
|
||||
|
||||
if ((doMask && m2) || doMode)
|
||||
{
|
||||
|
@ -73,7 +73,7 @@ void VifUnpackSSE_Dynarec::doMaskWrite(const xRegisterSSE& regX) const
|
|||
pxAssertMsg(regX.Id <= 1, "Reg Overflow! XMM2 thru XMM6 are reserved for masking.");
|
||||
|
||||
const int cc = std::min(vCL, 3);
|
||||
u32 m0 = (vB.mask >> (cc * 8)) & 0xff; //The actual mask example 0xE4 (protect, col, row, clear)
|
||||
const u32 m0 = (vB.mask >> (cc * 8)) & 0xff; //The actual mask example 0xE4 (protect, col, row, clear)
|
||||
u32 m3 = ((m0 & 0xaa) >> 1) & ~m0; //all the upper bits (cols shifted right) cancelling out any write protects 0x10
|
||||
u32 m2 = (m0 & 0x55) & (~m0 >> 1); // all the lower bits (rows)cancelling out any write protects 0x04
|
||||
u32 m4 = (m0 & ~((m3 << 1) | m2)) & 0x55; // = 0xC0 & 0x55 = 0x40 (for merge mask)
|
||||
|
@ -317,8 +317,8 @@ static u16 dVifComputeLength(uint cl, uint wl, u8 num, bool isFill)
|
|||
|
||||
if (!isFill)
|
||||
{
|
||||
uint skipSize = (cl - wl) * 16;
|
||||
uint blocks = (num + (wl - 1)) / wl; //Need to round up num's to calculate skip size correctly.
|
||||
const uint skipSize = (cl - wl) * 16;
|
||||
const uint blocks = (num + (wl - 1)) / wl; //Need to round up num's to calculate skip size correctly.
|
||||
length += (blocks - 1) * skipSize;
|
||||
}
|
||||
|
||||
|
@ -370,15 +370,15 @@ _vifT __fi void dVifUnpack(const u8* data, bool isFill)
|
|||
// in u32 (aka x86 register).
|
||||
//
|
||||
// Warning the order of data in hash_key/key0/key1 depends on the nVifBlock struct
|
||||
u32 hash_key = (u32)(upkType & 0xFF) << 8 | (vifRegs.num & 0xFF);
|
||||
const u32 hash_key = static_cast<u32>(upkType & 0xFF) << 8 | (vifRegs.num & 0xFF);
|
||||
|
||||
u32 key1 = ((u32)vifRegs.cycle.wl << 24) | ((u32)vifRegs.cycle.cl << 16) | ((u32)(vif.start_aligned & 0xFF) << 8) | ((u32)vifRegs.mode & 0xFF);
|
||||
u32 key1 = (static_cast<u32>(vifRegs.cycle.wl) << 24) | (static_cast<u32>(vifRegs.cycle.cl) << 16) | (static_cast<u32>(vif.start_aligned & 0xFF) << 8) | (static_cast<u32>(vifRegs.mode) & 0xFF);
|
||||
if ((upkType & 0xf) != 9)
|
||||
key1 &= 0xFFFF01FF;
|
||||
|
||||
// Zero out the mask parameter if it's unused -- games leave random junk
|
||||
// values here which cause false recblock cache misses.
|
||||
u32 key0 = doMask ? vifRegs.mask : 0;
|
||||
const u32 key0 = doMask ? vifRegs.mask : 0;
|
||||
|
||||
block.hash_key = hash_key;
|
||||
block.key0 = key0;
|
||||
|
@ -397,7 +397,7 @@ _vifT __fi void dVifUnpack(const u8* data, bool isFill)
|
|||
|
||||
{ // Execute the block
|
||||
const VURegs& VU = vuRegs[idx];
|
||||
const uint vuMemLimit = idx ? 0x4000 : 0x1000;
|
||||
constexpr uint vuMemLimit = idx ? 0x4000 : 0x1000;
|
||||
|
||||
u8* startmem = VU.Mem + (vif.tag.addr & (vuMemLimit - 0x10));
|
||||
u8* endmem = VU.Mem + vuMemLimit;
|
||||
|
|
|
@ -222,7 +222,7 @@ void VifUnpackSSE_Base::xUPK_V3_16() const
|
|||
//However - IF the end of this iteration of the unpack falls on a quadword boundary, W becomes 0
|
||||
//IsAligned is the position through the current QW in the vif packet
|
||||
//Iteration counts where we are in the packet.
|
||||
int result = (((UnpkLoopIteration / 4) + 1 + (4 - IsAligned)) & 0x3);
|
||||
const int result = (((UnpkLoopIteration / 4) + 1 + (4 - IsAligned)) & 0x3);
|
||||
|
||||
if ((UnpkLoopIteration & 0x1) == 0 && result == 0)
|
||||
xBLEND.PS(destReg, zeroReg, 0x8); //zero last word - tested on ps2
|
||||
|
@ -330,7 +330,7 @@ VifUnpackSSE_Simple::VifUnpackSSE_Simple(bool usn_, bool domask_, int curCycle_)
|
|||
void VifUnpackSSE_Simple::doMaskWrite(const xRegisterSSE& regX) const
|
||||
{
|
||||
xMOVAPS(xmm7, ptr[dstIndirect]);
|
||||
int offX = std::min(curCycle, 3);
|
||||
const int offX = std::min(curCycle, 3);
|
||||
xPAND(regX, ptr32[nVifMask[0][offX]]);
|
||||
xPAND(xmm7, ptr32[nVifMask[1][offX]]);
|
||||
xPOR (regX, ptr32[nVifMask[2][offX]]);
|
||||
|
@ -349,7 +349,7 @@ static void nVifGen(int usn, int mask, int curCycle)
|
|||
for (int i = 0; i < 16; ++i)
|
||||
{
|
||||
nVifCall& ucall(nVifUpk[((usnpart + maskpart + i) * 4) + curCycle]);
|
||||
ucall = NULL;
|
||||
ucall = nullptr;
|
||||
if (nVifT[i] == 0)
|
||||
continue;
|
||||
|
||||
|
|
Loading…
Reference in New Issue