IOP: Adjust cycle counts slightly more accurately in some situations

This commit is contained in:
refractionpcsx2 2024-05-15 18:11:29 +01:00
parent 8887930fd5
commit 454d4c9fa0
4 changed files with 62 additions and 23 deletions

View File

@ -135,8 +135,8 @@ __fi void PSX_INT( IopEventId n, s32 ecycle )
psxRegs.eCycle[n] = ecycle;
psxSetNextBranchDelta(ecycle);
const s32 iopDelta = (psxRegs.iopNextEventCycle - psxRegs.cycle) * 8;
const float mutiplier = static_cast<float>(PS2CLK) / static_cast<float>(PSXCLK);
const s32 iopDelta = (psxRegs.iopNextEventCycle - psxRegs.cycle) * mutiplier;
if (psxRegs.iopCycleEE < iopDelta)
{

View File

@ -219,15 +219,6 @@ static __fi void execI()
psxRegs.pc+= 4;
psxRegs.cycle++;
if ((psxHu32(HW_ICFG) & (1 << 3)))
{
//One of the Iop to EE delta clocks to be set in PS1 mode.
psxRegs.iopCycleEE -= 9;
}
else
{ //default ps2 mode value
psxRegs.iopCycleEE -= 8;
}
psxBSC[psxRegs.code >> 26]();
}
@ -268,15 +259,36 @@ static s32 intExecuteBlock( s32 eeCycles )
{
psxRegs.iopBreak = 0;
psxRegs.iopCycleEE = eeCycles;
u32 lastIOPCycle = 0;
while (psxRegs.iopCycleEE > 0)
{
lastIOPCycle = psxRegs.cycle;
if ((psxHu32(HW_ICFG) & 8) && ((psxRegs.pc & 0x1fffffffU) == 0xa0 || (psxRegs.pc & 0x1fffffffU) == 0xb0 || (psxRegs.pc & 0x1fffffffU) == 0xc0))
psxBiosCall();
branch2 = 0;
while (!branch2)
execI();
if ((psxHu32(HW_ICFG) & (1 << 3)))
{
// F = gcd(PS2CLK, PSXCLK) = 230400
const u32 cnum = 1280; // PS2CLK / F
const u32 cdenom = 147; // PSXCLK / F
//One of the Iop to EE delta clocks to be set in PS1 mode.
static u32 carry;
const u32 t = ((cnum * (psxRegs.cycle - lastIOPCycle)) + carry);
psxRegs.iopCycleEE -= t / cdenom;
carry = t % cdenom;
}
else
{
//default ps2 mode value
psxRegs.iopCycleEE -= (psxRegs.cycle - lastIOPCycle) * 8;
}
}
return psxRegs.iopBreak + psxRegs.iopCycleEE;

View File

@ -432,8 +432,8 @@ __fi void _cpuEventTest_Shared()
CpuVU1->ExecuteBlock();
// ---- Schedule Next Event Test --------------
const int nextIopEventDeta = ((psxRegs.iopNextEventCycle - psxRegs.cycle) * 8);
const float mutiplier = static_cast<float>(PS2CLK) / static_cast<float>(PSXCLK);
const int nextIopEventDeta = ((psxRegs.iopNextEventCycle - psxRegs.cycle) * mutiplier);
// 8 or more cycles behind and there's an event scheduled
if (EEsCycle >= nextIopEventDeta)
{
@ -446,7 +446,7 @@ __fi void _cpuEventTest_Shared()
else
{
// Otherwise IOP is caught up/not doing anything so we can wait for the next event.
cpuSetNextEventDelta(((psxRegs.iopNextEventCycle - psxRegs.cycle) * 8) - EEsCycle);
cpuSetNextEventDelta(((psxRegs.iopNextEventCycle - psxRegs.cycle) * mutiplier) - EEsCycle);
}
// Apply vsync and other counter nextCycles

View File

@ -1126,6 +1126,33 @@ static __fi u32 psxScaleBlockCycles()
return s_psxBlockCycles;
}
static void iPsxAddEECycles(u32 blockCycles)
{
if (!(psxHu32(HW_ICFG) & (1 << 3))) [[likely]]
{
if (blockCycles != 0xFFFFFFFF)
xSUB(ptr32[&psxRegs.iopCycleEE], blockCycles * 8);
else
xSUB(ptr32[&psxRegs.iopCycleEE], eax);
return;
}
// F = gcd(PS2CLK, PSXCLK) = 230400
const u32 cnum = 1280; // PS2CLK / F
const u32 cdenom = 147; // PSXCLK / F
static u32 ticks_carry = 0;
if (blockCycles != 0xFFFFFFFF)
xMOV(eax, blockCycles * cnum);
xADD(eax, ptr32[&ticks_carry]);
xMOV(ecx, cdenom);
xXOR(edx, edx);
xUDIV(ecx);
xMOV(ptr32[&ticks_carry], edx);
xSUB(ptr32[&psxRegs.iopCycleEE], eax);
}
static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
{
u32 blockCycles = psxScaleBlockCycles();
@ -1143,7 +1170,7 @@ static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
xMOV(ptr32[&psxRegs.cycle], eax);
xSUB(eax, ecx);
xSHL(eax, 3);
xSUB(ptr32[&psxRegs.iopCycleEE], eax);
iPsxAddEECycles(0xFFFFFFFF);
xJLE(iopExitRecompiledCode);
xFastCall((void*)iopEventTest);
@ -1156,16 +1183,16 @@ static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
}
else
{
xMOV(eax, ptr32[&psxRegs.cycle]);
xADD(eax, blockCycles);
xMOV(ptr32[&psxRegs.cycle], eax); // update cycles
xMOV(ebx, ptr32[&psxRegs.cycle]);
xADD(ebx, blockCycles);
xMOV(ptr32[&psxRegs.cycle], ebx); // update cycles
// jump if iopCycleEE <= 0 (iop's timeslice timed out, so time to return control to the EE)
xSUB(ptr32[&psxRegs.iopCycleEE], blockCycles * 8);
iPsxAddEECycles(blockCycles);
xJLE(iopExitRecompiledCode);
// check if an event is pending
xSUB(eax, ptr32[&psxRegs.iopNextEventCycle]);
xSUB(ebx, ptr32[&psxRegs.iopNextEventCycle]);
xForwardJS<u8> nointerruptpending;
xFastCall((void*)iopEventTest);
@ -1212,7 +1239,7 @@ void rpsxSYSCALL()
j8Ptr[0] = JE8(0);
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
xSUB(ptr32[&psxRegs.iopCycleEE], psxScaleBlockCycles() * 8);
iPsxAddEECycles(psxScaleBlockCycles());
JMP32((uptr)iopDispatcherReg - ((uptr)x86Ptr + 5));
// jump target for skipping blockCycle updates
@ -1234,7 +1261,7 @@ void rpsxBREAK()
xCMP(ptr32[&psxRegs.pc], psxpc - 4);
j8Ptr[0] = JE8(0);
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
xSUB(ptr32[&psxRegs.iopCycleEE], psxScaleBlockCycles() * 8);
iPsxAddEECycles(psxScaleBlockCycles());
JMP32((uptr)iopDispatcherReg - ((uptr)x86Ptr + 5));
x86SetJ8(j8Ptr[0]);
@ -1719,7 +1746,7 @@ StartRecomp:
else
{
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
xSUB(ptr32[&psxRegs.iopCycleEE], psxScaleBlockCycles() * 8);
iPsxAddEECycles(psxScaleBlockCycles());
}
if (willbranch3 || !psxbranch)