DMA: Add backpressure to GIF as it has a 64bit bus

This commit is contained in:
refractionpcsx2 2023-11-24 12:50:27 +00:00
parent 779b175567
commit f5d926fc08
4 changed files with 41 additions and 10 deletions

View File

@ -84,6 +84,8 @@ int GIF_Fifo::write_fifo(u32* pMem, int size)
{
if (fifoSize == 16)
{
gifRegs.stat.FQC = fifoSize;
CalculateFIFOCSR();
//GIF_LOG("GIF FIFO Full");
return 0;
}
@ -232,6 +234,7 @@ __fi void gifInterrupt()
GifDMAInt(16);
}
CPU_SET_DMASTALL(DMAC_GIF, gifUnit.Path3Masked() || !gifUnit.CanDoPath3());
return;
}
}
@ -251,7 +254,6 @@ __fi void gifInterrupt()
if (gif_fifo.fifoSize == 16)
return;
}
// If there's something in the FIFO and we can do PATH3, empty the FIFO.
if (gif_fifo.fifoSize > 0)
{
@ -326,6 +328,11 @@ static u32 WRITERING_DMA(u32* pMem, u32 qwc)
{
if (gif_fifo.fifoSize < 16)
{
if (vif1Regs.stat.VGW && gifUnit.Path3Masked())
{
DevCon.Warning("Check paths %d fifosize %d qwc %d", CheckPaths(), gif_fifo.fifoSize, qwc);
//return 0;
}
size = gif_fifo.write_fifo((u32*)pMem, originalQwc); // Use original QWC here, the intermediate mode is for the GIF unit, not DMA
incGifChAddr(size);
return size;
@ -333,6 +340,7 @@ static u32 WRITERING_DMA(u32* pMem, u32 qwc)
return 4; // Arbitrary value, probably won't schedule a DMA anwyay since the FIFO is full and GIF is paused
}
size = gifUnit.TransferGSPacketData(GIF_TRANS_DMA, (u8*)pMem, qwc * 16) / 16;
incGifChAddr(size);
return size;
@ -353,7 +361,7 @@ static __fi void GIFchain()
}
const int transferred = WRITERING_DMA((u32*)pMem, gifch.qwc);
gif.gscycles += transferred * BIAS;
gif.gscycles += (gif_fifo.fifoSize > 0) ? transferred * 2 : transferred * 4;
if (!gifUnit.Path3Masked() || (gif_fifo.fifoSize < 16))
GifDMAInt(gif.gscycles);
@ -420,8 +428,11 @@ void GIFdma()
return;
//DevCon.Warning("GIF Reading Tag MSK = %x", vif1Regs.mskpath3);
GIF_LOG("gifdmaChain %8.8x_%8.8x size=%d, id=%d, addr=%lx tadr=%lx", ptag[1]._u32, ptag[0]._u32, gifch.qwc, ptag->ID, gifch.madr, gifch.tadr);
gifRegs.stat.FQC = std::min((u32)0x10, gifch.qwc);
CalculateFIFOCSR();
if (!CHECK_GIFFIFOHACK)
{
gifRegs.stat.FQC = std::min((u32)0x10, gifch.qwc);
CalculateFIFOCSR();
}
if (dmacRegs.ctrl.STD == STD_GIF)
{
@ -658,8 +669,11 @@ void mfifoGIFtransfer()
gifch.unsafeTransfer(ptag);
gifch.madr = ptag[1]._u32;
gifRegs.stat.FQC = std::min((u32)0x10, gifch.qwc);
CalculateFIFOCSR();
if (!CHECK_GIFFIFOHACK)
{
gifRegs.stat.FQC = std::min((u32)0x10, gifch.qwc);
CalculateFIFOCSR();
}
gif.mfifocycles += 2;

View File

@ -263,7 +263,7 @@ struct Gif_Path
bool isMTVU() const { return !idx && THREAD_VU1; }
s32 getReadAmount() { return readAmount.load(std::memory_order_acquire) + gsPack.readAmount; }
bool hasDataRemaining() const { return curOffset < curSize; }
bool isDone() const { return isMTVU() ? !mtvu.fakePackets : (!hasDataRemaining() && (state == GIF_PATH_IDLE || state == GIF_PATH_WAIT)); }
bool isDone() const { return isMTVU() ? !mtvu.fakePackets : (!hasDataRemaining() && (state == GIF_PATH_IDLE/* || state == GIF_PATH_WAIT*/)); }
// Waits on the MTGS to process gs packets
void mtgsReadWait()
@ -429,7 +429,7 @@ struct Gif_Path
//Including breaking packets early (Rewind DMA to pick up where left off)
//but only do this when the path is masked, else we're pointlessly slowing things down.
dmaRewind = curSize - curOffset;
curSize = curOffset;
curSize -= dmaRewind;
}
}
else

View File

@ -159,6 +159,8 @@ __fi int _vifCode_Direct(int pass, const u8* data, bool isDirectHL)
vif1.tag.size -= ret / 4; // Convert to u32's
vif1Regs.stat.VGW = false;
g_vif1Cycles += (ret / 16) * 2; // Need to add on the same amount of cycles again, as the GS has a 64bit bus bottleneck.
if (ret & 3)
DevCon.Warning("Vif %s: Ret wasn't a multiple of 4!", name); // Shouldn't happen
if (size == 0)
@ -520,6 +522,7 @@ vifOp(vifCode_MskPath3)
GUNIT_WARN("VIF MSKPATH3 off Path3 triggering!");
gifInterrupt();
}
vif1.cmd = 0;
vif1.pass = 0;
}

View File

@ -331,9 +331,12 @@ _vifT int nVifUnpack(const u8* data)
const uint ret = std::min(vif.vifpacketsize, vif.tag.size);
const bool isFill = (vifRegs.cycle.cl < wl);
s32 size = ret << 2;
int num_transferred = 0;
if (ret == vif.tag.size) // Full Transfer
{
num_transferred = vifRegs.num;
if (v.bSize) // Last transfer was partial
{
memcpy(&v.buffer[v.bSize], data, size);
@ -357,6 +360,11 @@ _vifT int nVifUnpack(const u8* data)
else
vu1Thread.VifUnpack(vif, vifRegs, (u8*)data, (size + 4) & ~0x3);
if (idx)
g_vif1Cycles += num_transferred;
else
g_vif0Cycles += num_transferred;
vif.pass = 0;
vif.tag.size = 0;
vif.cmd = 0;
@ -377,13 +385,19 @@ _vifT int nVifUnpack(const u8* data)
if (!isFill)
{
vifRegs.num -= (size / vSize);
num_transferred = (size / vSize);
}
else
{
int dataSize = (size / vSize);
vifRegs.num = vifRegs.num - (((dataSize / vifRegs.cycle.cl) * (vifRegs.cycle.wl - vifRegs.cycle.cl)) + dataSize);
num_transferred = (((dataSize / vifRegs.cycle.cl) * (vifRegs.cycle.wl - vifRegs.cycle.cl)) + dataSize);
}
if (idx)
g_vif1Cycles += num_transferred;
else
g_vif0Cycles += num_transferred;
vifRegs.num -= num_transferred;
}
return ret;