mirror of https://github.com/PCSX2/pcsx2.git
ReorderingMTGS:
* fixes flickering screen in Soul Calibur 3 (caused by VSYNC register bug) * Optimized upload of queued Path1 transfers; such that all Path1's are uploaded as a single MTGS packet. git-svn-id: http://pcsx2.googlecode.com/svn/branches/ReorderingMTGS@3515 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
parent
ce2b9e30fc
commit
1c9cefd778
|
@ -57,21 +57,24 @@ void gsPath1Interrupt()
|
|||
if((gifRegs->stat.APATH <= GIF_APATH1 || (gifRegs->stat.IP3 == true && gifRegs->stat.APATH == GIF_APATH3)) && Path1WritePos > 0 && !gifRegs->stat.PSE)
|
||||
{
|
||||
gifRegs->stat.P1Q = false;
|
||||
while(Path1WritePos > 0)
|
||||
|
||||
if (uint size = (Path1WritePos - Path1ReadPos))
|
||||
{
|
||||
uint size = (Path1WritePos - Path1ReadPos);
|
||||
GetMTGS().PrepDataPacket(GIF_PATH_1, size);
|
||||
//DevCon.Warning("Flush Size = %x", size);
|
||||
|
||||
while(size > 0)
|
||||
{
|
||||
uint count = GIFPath_CopyTag(GIF_PATH_1, ((u128*)Path1Buffer) + Path1ReadPos, size);
|
||||
GetMTGS().SendDataPacket();
|
||||
Path1ReadPos += count;
|
||||
size -= count;
|
||||
|
||||
if(GSTransferStatus.PTH1 == STOPPED_MODE)
|
||||
{
|
||||
gifRegs->stat.OPH = false;
|
||||
gifRegs->stat.APATH = GIF_APATH_IDLE;
|
||||
}
|
||||
}
|
||||
GetMTGS().SendDataPacket();
|
||||
|
||||
if(Path1ReadPos == Path1WritePos)
|
||||
{
|
||||
|
|
|
@ -409,13 +409,16 @@ void SysMtgsThread::ExecuteTaskInThread()
|
|||
MTGS_LOG( "(MTGS Packet Read) ringtype=Vsync, field=%u, skip=%s", !!(((u32&)RingBuffer.Regs[0x1000]) & 0x2000) ? 0 : 1, tag.data[1] ? "true" : "false" );
|
||||
|
||||
// Mail in the important GS registers.
|
||||
// This seemingly obtuse system is needed in order to handle cases where the vsync data wraps
|
||||
// around the edge of the ringbuffer. If not for that I'd just use a struct. >_<
|
||||
|
||||
uint datapos = (m_ReadPos+1) & RingBufferMask;
|
||||
MemCopy_WrappedSrc( RingBuffer.m_Ring, datapos, RingBufferSize, (u128*)RingBuffer.Regs, 0xf );
|
||||
|
||||
u32* remainder = (u32*)&RingBuffer[datapos];
|
||||
GSCSRr = remainder[0];
|
||||
GSIMR = remainder[1];
|
||||
GSSIGLBLID = (GSRegSIGBLID&)remainder[2];
|
||||
((u32&)RingBuffer.Regs[0x1000]) = remainder[0];
|
||||
((u32&)RingBuffer.Regs[0x1010]) = remainder[1];
|
||||
((GSRegSIGBLID&)RingBuffer.Regs[0x1080]) = (GSRegSIGBLID&)remainder[2];
|
||||
|
||||
// CSR & 0x2000; is the pageflip id.
|
||||
GSvsync(((u32&)RingBuffer.Regs[0x1000]) & 0x2000);
|
||||
|
|
|
@ -515,6 +515,7 @@ __forceinline int GIFPath::ParseTagQuick(GIF_PATH pathidx, const u8* pMem, u32 s
|
|||
|
||||
Console.Warning("GIFTAG error, size exceeded VU memory size %x", startSize);
|
||||
nloop = 0;
|
||||
const_cast<GIFTAG&>(tag).EOP = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1097,7 +1097,6 @@ void __fastcall mVU_XGKICK_(u32 addr) {
|
|||
u8* data = microVU1.regs->Mem + (addr*16);
|
||||
u32 diff = 0x400 - addr;
|
||||
u32 size;
|
||||
u8* pDest;
|
||||
|
||||
if(gifRegs->stat.APATH <= GIF_APATH1 || (gifRegs->stat.APATH == GIF_APATH3 && gifRegs->stat.IP3 == true) && SIGNAL_IMR_Pending == false)
|
||||
{
|
||||
|
@ -1120,24 +1119,22 @@ void __fastcall mVU_XGKICK_(u32 addr) {
|
|||
{
|
||||
//DevCon.Warning("GIF APATH busy %x Holding for later W %x, R %x", gifRegs->stat.APATH, Path1WritePos, Path1ReadPos);
|
||||
size = GIFPath_ParseTagQuick(GIF_PATH_1, data, diff);
|
||||
pDest = &Path1Buffer[Path1WritePos*16];
|
||||
u8* pDest = &Path1Buffer[Path1WritePos*16];
|
||||
|
||||
pxAssumeMsg((Path1WritePos+size < sizeof(Path1Buffer)), "XGKick Buffer Overflow detected on Path1Buffer!");
|
||||
Path1WritePos += size;
|
||||
|
||||
pxAssumeMsg((Path1WritePos < sizeof(Path1Buffer)), "XGKick Buffer Overflow detected on Path1Buffer!");
|
||||
//DevCon.Warning("Storing size %x PATH 1", size);
|
||||
|
||||
if (size > diff) {
|
||||
// fixme: one of these days the following *16's will get cleaned up when we introduce
|
||||
// a special qwc/simd16 optimized version of memcpy_aligned. :)
|
||||
//DevCon.Status("XGkick Wrap!");
|
||||
memcpy_qwc(pDest, microVU1.regs->Mem + (addr*16), diff);
|
||||
Path1WritePos += size;
|
||||
memcpy_aligned(pDest, microVU1.regs->Mem + (addr*16), diff*16);
|
||||
size -= diff;
|
||||
pDest += diff*16;
|
||||
memcpy_qwc(pDest, microVU1.regs->Mem, size);
|
||||
memcpy_aligned(pDest, microVU1.regs->Mem, size*16);
|
||||
}
|
||||
else {
|
||||
memcpy_qwc(pDest, microVU1.regs->Mem + (addr*16), size);
|
||||
Path1WritePos += size;
|
||||
memcpy_aligned(pDest, microVU1.regs->Mem + (addr*16), size*16);
|
||||
}
|
||||
//if(!gifRegs->stat.P1Q) CPU_INT(28, 128);
|
||||
gifRegs->stat.P1Q = true;
|
||||
|
|
Loading…
Reference in New Issue