ReorderingMTGS: Bugfix for possible corruption/crash if a vsync happens at the ends of the ringbuffer.

git-svn-id: http://pcsx2.googlecode.com/svn/branches/ReorderingMTGS@3464 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2010-07-11 13:23:56 +00:00
parent 472358345a
commit ec7e1ed0a1
3 changed files with 32 additions and 9 deletions

View File

@ -443,3 +443,8 @@ struct MTGS_BufferedData
};
extern __aligned(32) MTGS_BufferedData RingBuffer;
// FIXME: These belong in common with other memcpy tools. Will move them there later if no one
// else beats me to it. --air
extern void MemCopy_WrappedDest( const u128* src, u128* destBase, uint& destStart, uint destSize, uint len );
extern void MemCopy_WrappedSrc( const u128* srcBase, uint& srcStart, uint srcSize, u128* dest, uint len );

View File

@ -132,14 +132,14 @@ void SysMtgsThread::PostVsyncEnd()
uint packsize = sizeof(RingCmdPacket_Vsync) / 16;
PrepDataPacket(GS_RINGTYPE_VSYNC, packsize);
RingCmdPacket_Vsync& local( *(RingCmdPacket_Vsync*)GetDataPacketPtr() );
MemCopy_WrappedDest( (u128*)PS2MEM_GS, RingBuffer.m_Ring, m_packet_ringpos, RingBufferSize, 0xf );
memcpy_fast( local.regset1, PS2MEM_GS, sizeof(local.regset1) );
local.csr = GSCSRr;
local.imr = GSIMR;
local.siglblid = GSSIGLBLID;
m_packet_ringpos += packsize;
u32* remainder = (u32*)GetDataPacketPtr();
remainder[0] = GSCSRr;
remainder[1] = GSIMR;
(GSRegSIGBLID&)remainder[2] = GSSIGLBLID;
m_packet_ringpos = (m_packet_ringpos + 1) & RingBufferMask;
SendDataPacket();
// Alter-frame flushing! Restarts the ringbuffer (wraps) on every other frame. This is a

View File

@ -528,10 +528,10 @@ void MemCopy_WrappedDest( const u128* src, u128* destBase, uint& destStart, uint
uint endpos = destStart + len;
if( endpos >= destSize )
{
uint firstcopylen = RingBufferSize - destStart;
uint firstcopylen = destSize - destStart;
memcpy_aligned(&destBase[destStart], src, firstcopylen );
destStart = endpos & RingBufferMask;
destStart = endpos % destSize;
memcpy_aligned(destBase, src+firstcopylen, destStart );
}
else
@ -541,6 +541,24 @@ void MemCopy_WrappedDest( const u128* src, u128* destBase, uint& destStart, uint
}
}
void MemCopy_WrappedSrc( const u128* srcBase, uint& srcStart, uint srcSize, u128* dest, uint len )
{
uint endpos = srcStart + len;
if( endpos >= srcSize )
{
uint firstcopylen = srcSize - srcStart;
memcpy_aligned(dest, &srcBase[srcStart], firstcopylen );
srcStart = endpos & srcSize;
memcpy_aligned(dest+firstcopylen, srcBase, srcStart );
}
else
{
memcpy_aligned(dest, &srcBase[srcStart], len );
srcStart += len;
}
}
// [TODO] optimization: If later templated, we can have Paths 1 and 3 use aligned SSE movs,
// since only PATH2 can feed us unaligned source data.
#define copyTag() do { \