A few minor counter changes to prevent scenarios where branch targets could be missed on counter resets/target changes, also a situation where the vsync counter could cause repetative branch tests to be triggered in quick succession on cycle counter overflow

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@1140 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
refraction 2009-05-06 23:51:27 +00:00
parent 2bf04882e3
commit 9541e502c2
4 changed files with 82 additions and 16 deletions

View File

@ -82,16 +82,39 @@ static __forceinline void _rcntSet( int cntidx )
c = ((0x10000 - counter.count) * counter.rate) - (cpuRegs.cycle - counter.sCycleT);
c += cpuRegs.cycle - nextsCounter; // adjust for time passed since last rcntUpdate();
if (c < nextCounter) nextCounter = c;
if (c < nextCounter)
{
nextCounter = c;
if((g_nextBranchCycle - nextsCounter) > (u32)nextCounter) //Need to update on counter resets/target changes
{
g_nextBranchCycle = nextsCounter + nextCounter;
}
}
// Ignore target diff if target is currently disabled.
// (the overflow is all we care about since it goes first, and then the
// target will be turned on afterward).
if( counter.target & EECNT_FUTURE_TARGET ) return;
c = ((counter.target - counter.count) * counter.rate) - (cpuRegs.cycle - counter.sCycleT);
c += cpuRegs.cycle - nextsCounter; // adjust for time passed since last rcntUpdate();
if (c < nextCounter) nextCounter = c;
if( counter.target & EECNT_FUTURE_TARGET )
{
return;
}
else
{
c = ((counter.target - counter.count) * counter.rate) - (cpuRegs.cycle - counter.sCycleT);
c += cpuRegs.cycle - nextsCounter; // adjust for time passed since last rcntUpdate();
if (c < nextCounter)
{
nextCounter = c;
if((g_nextBranchCycle - nextsCounter) > (u32)nextCounter) //Need to update on counter resets/target changes
{
g_nextBranchCycle = nextsCounter + nextCounter;
}
}
}
//cpuSetNextBranch( nextsCounter, nextCounter );
}
@ -100,7 +123,7 @@ static __forceinline void cpuRcntSet()
int i;
nextsCounter = cpuRegs.cycle;
nextCounter = (vsyncCounter.sCycle + vsyncCounter.CycleT) - cpuRegs.cycle;
nextCounter = vsyncCounter.CycleT - (cpuRegs.cycle - vsyncCounter.sCycle);
for (i = 0; i < 4; i++)
_rcntSet( i );
@ -714,6 +737,19 @@ __forceinline void rcntWtarget(int index, u32 value)
// If the target is behind the current count, set it up so that the counter must
// overflow first before the target fires:
if(counters[index].mode.IsCounting) {
if(counters[index].mode.ClockSource != 0x3) {
u32 change = cpuRegs.cycle - counters[index].sCycleT;
if( change > 0 )
{
counters[index].count += change / counters[index].rate;
change -= (change / counters[index].rate) * counters[index].rate;
counters[index].sCycleT = cpuRegs.cycle - change;
}
}
}
if( counters[index].target <= rcntCycle(index) )
counters[index].target |= EECNT_FUTURE_TARGET;

View File

@ -91,14 +91,32 @@ static void _rcntSet( int cntidx )
c = (u64)((overflowCap - counter.count) * counter.rate) - (psxRegs.cycle - counter.sCycleT);
c += psxRegs.cycle - psxNextsCounter; // adjust for time passed since last rcntUpdate();
if(c < (u64)psxNextCounter) psxNextCounter = (u32)c;
if(c < (u64)psxNextCounter)
{
psxNextCounter = (u32)c;
if((g_psxNextBranchCycle - psxNextsCounter) > (u32)psxNextCounter) //Need to update on counter resets/target changes
{
g_psxNextBranchCycle = psxNextsCounter + psxNextCounter;
}
}
//if((counter.mode & 0x10) == 0 || psxCounters[i].target > 0xffff) continue;
if( counter.target & IOPCNT_FUTURE_TARGET ) return;
c = (s64)((counter.target - counter.count) * counter.rate) - (psxRegs.cycle - counter.sCycleT);
c += psxRegs.cycle - psxNextsCounter; // adjust for time passed since last rcntUpdate();
if(c < (u64)psxNextCounter) psxNextCounter = (u32)c;
c = (s64)((counter.target - counter.count) * counter.rate) - (psxRegs.cycle - counter.sCycleT);
c += psxRegs.cycle - psxNextsCounter; // adjust for time passed since last rcntUpdate();
if(c < (u64)psxNextCounter)
{
psxNextCounter = (u32)c;
if((g_psxNextBranchCycle - psxNextsCounter) > (u32)psxNextCounter) //Need to update on counter resets/target changes
{
g_psxNextBranchCycle = psxNextsCounter + psxNextCounter;
}
}
}
@ -383,6 +401,11 @@ void psxRcntUpdate()
int i;
//u32 change = 0;
g_psxNextBranchCycle = psxRegs.cycle + 32;
psxNextCounter = 0x7fffffff;
psxNextsCounter = psxRegs.cycle;
for (i=0; i<=5; i++)
{
s32 change = psxRegs.cycle - psxCounters[i].sCycleT;
@ -424,9 +447,7 @@ void psxRcntUpdate()
//if( psxCounters[i].count >= psxCounters[i].target ) _rcntTestTarget( i );
}
psxNextCounter = 0xffffff;
psxNextsCounter = psxRegs.cycle;
if(SPU2async)
{

View File

@ -54,6 +54,12 @@ static void __fastcall psxDmaGeneric(u32 madr, u32 bcr, u32 chcr, u32 spuCore, _
psxNextsCounter = psxRegs.cycle;
if (psxCounters[6].CycleT < psxNextCounter)
psxNextCounter = psxCounters[6].CycleT;
if((g_psxNextBranchCycle - psxNextsCounter) > (u32)psxNextCounter)
{
DevCon::Notice("SPU2async Setting new counter branch, old %x new %x ((%x - %x = %x) > %x delta)", params g_psxNextBranchCycle, psxNextsCounter + psxNextCounter, g_psxNextBranchCycle, psxNextsCounter, (g_psxNextBranchCycle - psxNextsCounter), psxNextCounter);
g_psxNextBranchCycle = psxNextsCounter + psxNextCounter;
}
}
switch (chcr)

View File

@ -231,10 +231,13 @@ __releaseinline void psxBranchTest()
psxRcntUpdate();
iopBranchAction = true;
}
else
{
// start the next branch at the next counter event by default
// the interrupt code below will assign nearer branches if needed.
g_psxNextBranchCycle = psxNextsCounter+psxNextCounter;
g_psxNextBranchCycle = psxNextsCounter+psxNextCounter;
}
if (psxRegs.interrupt)
{