mirror of https://github.com/PCSX2/pcsx2.git
x86: Move cycle/writeback fields to CPU structs
[SAVEVERSION+] Potentially better locality, keeps everything we're accessing from the rec together.
This commit is contained in:
parent
ff7053c566
commit
fd194124a9
|
@ -18,9 +18,6 @@
|
||||||
#include "Common.h"
|
#include "Common.h"
|
||||||
#include "COP0.h"
|
#include "COP0.h"
|
||||||
|
|
||||||
u32 s_iLastCOP0Cycle = 0;
|
|
||||||
u32 s_iLastPERFCycle[2] = { 0, 0 };
|
|
||||||
|
|
||||||
// Updates the CPU's mode of operation (either, Kernel, Supervisor, or User modes).
|
// Updates the CPU's mode of operation (either, Kernel, Supervisor, or User modes).
|
||||||
// Currently the different modes are not implemented.
|
// Currently the different modes are not implemented.
|
||||||
// Given this function is called so much, it's commented out for now. (rama)
|
// Given this function is called so much, it's commented out for now. (rama)
|
||||||
|
@ -127,8 +124,8 @@ __fi void COP0_UpdatePCCR()
|
||||||
// or the counting function is not enabled (CTE)
|
// or the counting function is not enabled (CTE)
|
||||||
if (cpuRegs.CP0.n.Status.b.ERL || !cpuRegs.PERF.n.pccr.b.CTE)
|
if (cpuRegs.CP0.n.Status.b.ERL || !cpuRegs.PERF.n.pccr.b.CTE)
|
||||||
{
|
{
|
||||||
s_iLastPERFCycle[0] = cpuRegs.cycle;
|
cpuRegs.lastPERFCycle[0] = cpuRegs.cycle;
|
||||||
s_iLastPERFCycle[1] = s_iLastPERFCycle[0];
|
cpuRegs.lastPERFCycle[1] = cpuRegs.lastPERFCycle[0];
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,13 +139,13 @@ __fi void COP0_UpdatePCCR()
|
||||||
|
|
||||||
if( PERF_ShouldCountEvent( cpuRegs.PERF.n.pccr.b.Event0 ) )
|
if( PERF_ShouldCountEvent( cpuRegs.PERF.n.pccr.b.Event0 ) )
|
||||||
{
|
{
|
||||||
u32 incr = cpuRegs.cycle - s_iLastPERFCycle[0];
|
u32 incr = cpuRegs.cycle - cpuRegs.lastPERFCycle[0];
|
||||||
if( incr == 0 ) incr++;
|
if( incr == 0 ) incr++;
|
||||||
|
|
||||||
// use prev/XOR method for one-time exceptions (but likely less correct)
|
// use prev/XOR method for one-time exceptions (but likely less correct)
|
||||||
//u32 prev = cpuRegs.PERF.n.pcr0;
|
//u32 prev = cpuRegs.PERF.n.pcr0;
|
||||||
cpuRegs.PERF.n.pcr0 += incr;
|
cpuRegs.PERF.n.pcr0 += incr;
|
||||||
s_iLastPERFCycle[0] = cpuRegs.cycle;
|
cpuRegs.lastPERFCycle[0] = cpuRegs.cycle;
|
||||||
|
|
||||||
//prev ^= (1UL<<31); // XOR is fun!
|
//prev ^= (1UL<<31); // XOR is fun!
|
||||||
//if( (prev & cpuRegs.PERF.n.pcr0) & (1UL<<31) )
|
//if( (prev & cpuRegs.PERF.n.pcr0) & (1UL<<31) )
|
||||||
|
@ -193,11 +190,11 @@ __fi void COP0_UpdatePCCR()
|
||||||
|
|
||||||
if( PERF_ShouldCountEvent( cpuRegs.PERF.n.pccr.b.Event1 ) )
|
if( PERF_ShouldCountEvent( cpuRegs.PERF.n.pccr.b.Event1 ) )
|
||||||
{
|
{
|
||||||
u32 incr = cpuRegs.cycle - s_iLastPERFCycle[1];
|
u32 incr = cpuRegs.cycle - cpuRegs.lastPERFCycle[1];
|
||||||
if( incr == 0 ) incr++;
|
if( incr == 0 ) incr++;
|
||||||
|
|
||||||
cpuRegs.PERF.n.pcr1 += incr;
|
cpuRegs.PERF.n.pcr1 += incr;
|
||||||
s_iLastPERFCycle[1] = cpuRegs.cycle;
|
cpuRegs.lastPERFCycle[1] = cpuRegs.cycle;
|
||||||
|
|
||||||
if( (cpuRegs.PERF.n.pcr1 & 0x80000000))
|
if( (cpuRegs.PERF.n.pcr1 & 0x80000000))
|
||||||
{
|
{
|
||||||
|
@ -456,10 +453,10 @@ void MFC0()
|
||||||
|
|
||||||
case 9:
|
case 9:
|
||||||
{
|
{
|
||||||
u32 incr = cpuRegs.cycle-s_iLastCOP0Cycle;
|
u32 incr = cpuRegs.cycle - cpuRegs.lastCOP0Cycle;
|
||||||
if( incr == 0 ) incr++;
|
if( incr == 0 ) incr++;
|
||||||
cpuRegs.CP0.n.Count += incr;
|
cpuRegs.CP0.n.Count += incr;
|
||||||
s_iLastCOP0Cycle = cpuRegs.cycle;
|
cpuRegs.lastCOP0Cycle = cpuRegs.cycle;
|
||||||
if( !_Rt_ ) break;
|
if( !_Rt_ ) break;
|
||||||
}
|
}
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
|
@ -475,7 +472,7 @@ void MTC0()
|
||||||
switch (_Rd_)
|
switch (_Rd_)
|
||||||
{
|
{
|
||||||
case 9:
|
case 9:
|
||||||
s_iLastCOP0Cycle = cpuRegs.cycle;
|
cpuRegs.lastCOP0Cycle = cpuRegs.cycle;
|
||||||
cpuRegs.CP0.r[9] = cpuRegs.GPR.r[_Rt_].UL[0];
|
cpuRegs.CP0.r[9] = cpuRegs.GPR.r[_Rt_].UL[0];
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -506,12 +503,12 @@ void MTC0()
|
||||||
else if (0 == (_Imm_ & 2)) // MTPC 0, only LSB of register matters
|
else if (0 == (_Imm_ & 2)) // MTPC 0, only LSB of register matters
|
||||||
{
|
{
|
||||||
cpuRegs.PERF.n.pcr0 = cpuRegs.GPR.r[_Rt_].UL[0];
|
cpuRegs.PERF.n.pcr0 = cpuRegs.GPR.r[_Rt_].UL[0];
|
||||||
s_iLastPERFCycle[0] = cpuRegs.cycle;
|
cpuRegs.lastPERFCycle[0] = cpuRegs.cycle;
|
||||||
}
|
}
|
||||||
else // MTPC 1
|
else // MTPC 1
|
||||||
{
|
{
|
||||||
cpuRegs.PERF.n.pcr1 = cpuRegs.GPR.r[_Rt_].UL[0];
|
cpuRegs.PERF.n.pcr1 = cpuRegs.GPR.r[_Rt_].UL[0];
|
||||||
s_iLastPERFCycle[1] = cpuRegs.cycle;
|
cpuRegs.lastPERFCycle[1] = cpuRegs.cycle;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -29,8 +29,8 @@ static __fi void IntCHackCheck()
|
||||||
{
|
{
|
||||||
// Sanity check: To protect from accidentally "rewinding" the cyclecount
|
// Sanity check: To protect from accidentally "rewinding" the cyclecount
|
||||||
// on the few times nextBranchCycle can be behind our current cycle.
|
// on the few times nextBranchCycle can be behind our current cycle.
|
||||||
s32 diff = g_nextEventCycle - cpuRegs.cycle;
|
s32 diff = cpuRegs.nextEventCycle - cpuRegs.cycle;
|
||||||
if (diff > 0 && (cpuRegs.cycle - g_lastEventCycle) > 8) cpuRegs.cycle = g_nextEventCycle;
|
if (diff > 0 && (cpuRegs.cycle - cpuRegs.lastEventCycle) > 8) cpuRegs.cycle = cpuRegs.nextEventCycle;
|
||||||
}
|
}
|
||||||
|
|
||||||
template< uint page > RETURNS_R128 _hwRead128(u32 mem);
|
template< uint page > RETURNS_R128 _hwRead128(u32 mem);
|
||||||
|
|
|
@ -433,7 +433,7 @@ void psxRcntUpdate()
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
g_iopNextEventCycle = psxRegs.cycle + 32;
|
psxRegs.iopNextEventCycle = psxRegs.cycle + 32;
|
||||||
|
|
||||||
psxNextCounter = 0x7fffffff;
|
psxNextCounter = 0x7fffffff;
|
||||||
psxNextsCounter = psxRegs.cycle;
|
psxNextsCounter = psxRegs.cycle;
|
||||||
|
|
|
@ -55,10 +55,10 @@ static void psxDmaGeneric(u32 madr, u32 bcr, u32 chcr, u32 spuCore)
|
||||||
if (psxCounters[6].CycleT < psxNextCounter)
|
if (psxCounters[6].CycleT < psxNextCounter)
|
||||||
psxNextCounter = psxCounters[6].CycleT;
|
psxNextCounter = psxCounters[6].CycleT;
|
||||||
|
|
||||||
if ((g_iopNextEventCycle - psxNextsCounter) > (u32)psxNextCounter)
|
if ((psxRegs.iopNextEventCycle - psxNextsCounter) > (u32)psxNextCounter)
|
||||||
{
|
{
|
||||||
//DevCon.Warning("SPU2async Setting new counter branch, old %x new %x ((%x - %x = %x) > %x delta)", g_iopNextEventCycle, psxNextsCounter + psxNextCounter, g_iopNextEventCycle, psxNextsCounter, (g_iopNextEventCycle - psxNextsCounter), psxNextCounter);
|
//DevCon.Warning("SPU2async Setting new counter branch, old %x new %x ((%x - %x = %x) > %x delta)", g_iopNextEventCycle, psxNextsCounter + psxNextCounter, g_iopNextEventCycle, psxNextsCounter, (g_iopNextEventCycle - psxNextsCounter), psxNextCounter);
|
||||||
g_iopNextEventCycle = psxNextsCounter + psxNextCounter;
|
psxRegs.iopNextEventCycle = psxNextsCounter + psxNextCounter;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (chcr)
|
switch (chcr)
|
||||||
|
|
|
@ -37,19 +37,6 @@ R3000Acpu *psxCpu;
|
||||||
u32 g_psxConstRegs[32];
|
u32 g_psxConstRegs[32];
|
||||||
u32 g_psxHasConstReg, g_psxFlushedConstReg;
|
u32 g_psxHasConstReg, g_psxFlushedConstReg;
|
||||||
|
|
||||||
// Controls when branch tests are performed.
|
|
||||||
u32 g_iopNextEventCycle = 0;
|
|
||||||
|
|
||||||
// This value is used when the IOP execution is broken to return control to the EE.
|
|
||||||
// (which happens when the IOP throws EE-bound interrupts). It holds the value of
|
|
||||||
// iopCycleEE (which is set to zero to facilitate the code break), so that the unrun
|
|
||||||
// cycles can be accounted for later.
|
|
||||||
s32 iopBreak = 0;
|
|
||||||
|
|
||||||
// tracks the IOP's current sync status with the EE. When it dips below zero,
|
|
||||||
// control is returned to the EE.
|
|
||||||
s32 iopCycleEE = -1;
|
|
||||||
|
|
||||||
// Used to signal to the EE when important actions that need IOP-attention have
|
// Used to signal to the EE when important actions that need IOP-attention have
|
||||||
// happened (hsyncs, vsyncs, IOP exceptions, etc). IOP runs code whenever this
|
// happened (hsyncs, vsyncs, IOP exceptions, etc). IOP runs code whenever this
|
||||||
// is true, even if it's already running ahead a bit.
|
// is true, even if it's already running ahead a bit.
|
||||||
|
@ -67,9 +54,9 @@ void psxReset()
|
||||||
psxRegs.CP0.n.Status = 0x10900000; // COP0 enabled | BEV = 1 | TS = 1
|
psxRegs.CP0.n.Status = 0x10900000; // COP0 enabled | BEV = 1 | TS = 1
|
||||||
psxRegs.CP0.n.PRid = 0x0000001f; // PRevID = Revision ID, same as the IOP R3000A
|
psxRegs.CP0.n.PRid = 0x0000001f; // PRevID = Revision ID, same as the IOP R3000A
|
||||||
|
|
||||||
iopBreak = 0;
|
psxRegs.iopBreak = 0;
|
||||||
iopCycleEE = -1;
|
psxRegs.iopCycleEE = -1;
|
||||||
g_iopNextEventCycle = psxRegs.cycle + 4;
|
psxRegs.iopNextEventCycle = psxRegs.cycle + 4;
|
||||||
|
|
||||||
psxHwReset();
|
psxHwReset();
|
||||||
PSXCLK = 36864000;
|
PSXCLK = 36864000;
|
||||||
|
@ -123,8 +110,8 @@ __fi void psxSetNextBranch( u32 startCycle, s32 delta )
|
||||||
// typecast the conditional to signed so that things don't blow up
|
// typecast the conditional to signed so that things don't blow up
|
||||||
// if startCycle is greater than our next branch cycle.
|
// if startCycle is greater than our next branch cycle.
|
||||||
|
|
||||||
if( (int)(g_iopNextEventCycle - startCycle) > delta )
|
if( (int)(psxRegs.iopNextEventCycle - startCycle) > delta )
|
||||||
g_iopNextEventCycle = startCycle + delta;
|
psxRegs.iopNextEventCycle = startCycle + delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
__fi void psxSetNextBranchDelta( s32 delta )
|
__fi void psxSetNextBranchDelta( s32 delta )
|
||||||
|
@ -151,15 +138,15 @@ __fi void PSX_INT( IopEventId n, s32 ecycle )
|
||||||
psxRegs.sCycle[n] = psxRegs.cycle;
|
psxRegs.sCycle[n] = psxRegs.cycle;
|
||||||
psxRegs.eCycle[n] = ecycle;
|
psxRegs.eCycle[n] = ecycle;
|
||||||
|
|
||||||
psxSetNextBranchDelta( ecycle );
|
psxSetNextBranchDelta(ecycle);
|
||||||
|
|
||||||
if( iopCycleEE < 0 )
|
if (psxRegs.iopCycleEE < 0)
|
||||||
{
|
{
|
||||||
// The EE called this int, so inform it to branch as needed:
|
// The EE called this int, so inform it to branch as needed:
|
||||||
// fixme - this doesn't take into account EE/IOP sync (the IOP may be running
|
// fixme - this doesn't take into account EE/IOP sync (the IOP may be running
|
||||||
// ahead or behind the EE as per the EEsCycles value)
|
// ahead or behind the EE as per the EEsCycles value)
|
||||||
s32 iopDelta = (g_iopNextEventCycle-psxRegs.cycle)*8;
|
const s32 iopDelta = (psxRegs.iopNextEventCycle - psxRegs.cycle) * 8;
|
||||||
cpuSetNextEventDelta( iopDelta );
|
cpuSetNextEventDelta(iopDelta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,16 +209,16 @@ static __fi void _psxTestInterrupts()
|
||||||
|
|
||||||
__ri void iopEventTest()
|
__ri void iopEventTest()
|
||||||
{
|
{
|
||||||
if( psxTestCycle( psxNextsCounter, psxNextCounter ) )
|
if (psxTestCycle(psxNextsCounter, psxNextCounter))
|
||||||
{
|
{
|
||||||
psxRcntUpdate();
|
psxRcntUpdate();
|
||||||
iopEventAction = true;
|
iopEventAction = true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// start the next branch at the next counter event by default
|
// start the next branch at the next counter event by default
|
||||||
// the interrupt code below will assign nearer branches if needed.
|
// the interrupt code below will assign nearer branches if needed.
|
||||||
g_iopNextEventCycle = psxNextsCounter+psxNextCounter;
|
psxRegs.iopNextEventCycle = psxNextsCounter + psxNextCounter;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (psxRegs.interrupt)
|
if (psxRegs.interrupt)
|
||||||
|
@ -241,9 +228,9 @@ __ri void iopEventTest()
|
||||||
iopEventTestIsActive = false;
|
iopEventTestIsActive = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( (psxHu32(0x1078) != 0) && ((psxHu32(0x1070) & psxHu32(0x1074)) != 0) )
|
if ((psxHu32(0x1078) != 0) && ((psxHu32(0x1070) & psxHu32(0x1074)) != 0))
|
||||||
{
|
{
|
||||||
if( (psxRegs.CP0.n.Status & 0xFE01) >= 0x401 )
|
if ((psxRegs.CP0.n.Status & 0xFE01) >= 0x401)
|
||||||
{
|
{
|
||||||
PSXCPU_LOG("Interrupt: %x %x", psxHu32(0x1070), psxHu32(0x1074));
|
PSXCPU_LOG("Interrupt: %x %x", psxHu32(0x1070), psxHu32(0x1074));
|
||||||
psxException(0, 0);
|
psxException(0, 0);
|
||||||
|
|
|
@ -108,6 +108,21 @@ struct psxRegisters {
|
||||||
u32 code; /* The instruction */
|
u32 code; /* The instruction */
|
||||||
u32 cycle;
|
u32 cycle;
|
||||||
u32 interrupt;
|
u32 interrupt;
|
||||||
|
u32 pcWriteback;
|
||||||
|
|
||||||
|
// Controls when branch tests are performed.
|
||||||
|
u32 iopNextEventCycle;
|
||||||
|
|
||||||
|
// This value is used when the IOP execution is broken to return control to the EE.
|
||||||
|
// (which happens when the IOP throws EE-bound interrupts). It holds the value of
|
||||||
|
// iopCycleEE (which is set to zero to facilitate the code break), so that the unrun
|
||||||
|
// cycles can be accounted for later.
|
||||||
|
s32 iopBreak;
|
||||||
|
|
||||||
|
// tracks the IOP's current sync status with the EE. When it dips below zero,
|
||||||
|
// control is returned to the EE.
|
||||||
|
s32 iopCycleEE;
|
||||||
|
|
||||||
u32 sCycle[32]; // start cycle for signaled ints
|
u32 sCycle[32]; // start cycle for signaled ints
|
||||||
s32 eCycle[32]; // cycle delta for signaled ints (sCycle + eCycle == branch cycle)
|
s32 eCycle[32]; // cycle delta for signaled ints (sCycle + eCycle == branch cycle)
|
||||||
//u32 _msflag[32];
|
//u32 _msflag[32];
|
||||||
|
@ -116,10 +131,6 @@ struct psxRegisters {
|
||||||
|
|
||||||
alignas(16) extern psxRegisters psxRegs;
|
alignas(16) extern psxRegisters psxRegs;
|
||||||
|
|
||||||
extern u32 g_iopNextEventCycle;
|
|
||||||
extern s32 iopBreak; // used when the IOP execution is broken and control returned to the EE
|
|
||||||
extern s32 iopCycleEE; // tracks IOP's current sych status with the EE
|
|
||||||
|
|
||||||
#ifndef _PC_
|
#ifndef _PC_
|
||||||
|
|
||||||
#define _i32(x) (s32)x //R3000A
|
#define _i32(x) (s32)x //R3000A
|
||||||
|
|
|
@ -243,11 +243,11 @@ static __fi void execI()
|
||||||
if ((psxHu32(HW_ICFG) & (1 << 3)))
|
if ((psxHu32(HW_ICFG) & (1 << 3)))
|
||||||
{
|
{
|
||||||
//One of the Iop to EE delta clocks to be set in PS1 mode.
|
//One of the Iop to EE delta clocks to be set in PS1 mode.
|
||||||
iopCycleEE-=9;
|
psxRegs.iopCycleEE -= 9;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{ //default ps2 mode value
|
{ //default ps2 mode value
|
||||||
iopCycleEE-=8;
|
psxRegs.iopCycleEE -= 8;
|
||||||
}
|
}
|
||||||
psxBSC[psxRegs.code >> 26]();
|
psxBSC[psxRegs.code >> 26]();
|
||||||
}
|
}
|
||||||
|
@ -278,12 +278,12 @@ static void intReset() {
|
||||||
|
|
||||||
static s32 intExecuteBlock( s32 eeCycles )
|
static s32 intExecuteBlock( s32 eeCycles )
|
||||||
{
|
{
|
||||||
iopBreak = 0;
|
psxRegs.iopBreak = 0;
|
||||||
iopCycleEE = eeCycles;
|
psxRegs.iopCycleEE = eeCycles;
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
while (iopCycleEE > 0) {
|
while (psxRegs.iopCycleEE > 0) {
|
||||||
if ((psxHu32(HW_ICFG) & 8) && ((psxRegs.pc & 0x1fffffffU) == 0xa0 || (psxRegs.pc & 0x1fffffffU) == 0xb0 || (psxRegs.pc & 0x1fffffffU) == 0xc0))
|
if ((psxHu32(HW_ICFG) & 8) && ((psxRegs.pc & 0x1fffffffU) == 0xa0 || (psxRegs.pc & 0x1fffffffU) == 0xb0 || (psxRegs.pc & 0x1fffffffU) == 0xc0))
|
||||||
psxBiosCall();
|
psxBiosCall();
|
||||||
|
|
||||||
|
@ -299,7 +299,7 @@ static s32 intExecuteBlock( s32 eeCycles )
|
||||||
Cpu->ExitExecution();
|
Cpu->ExitExecution();
|
||||||
}
|
}
|
||||||
|
|
||||||
return iopBreak + iopCycleEE;
|
return psxRegs.iopBreak + psxRegs.iopCycleEE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intClear(u32 Addr, u32 Size) {
|
static void intClear(u32 Addr, u32 Size) {
|
||||||
|
|
|
@ -98,7 +98,7 @@ void cpuReset()
|
||||||
fpuRegs.fprc[0] = 0x00002e30; // fpu Revision..
|
fpuRegs.fprc[0] = 0x00002e30; // fpu Revision..
|
||||||
fpuRegs.fprc[31] = 0x01000001; // fpu Status/Control
|
fpuRegs.fprc[31] = 0x01000001; // fpu Status/Control
|
||||||
|
|
||||||
g_nextEventCycle = cpuRegs.cycle + 4;
|
cpuRegs.nextEventCycle = cpuRegs.cycle + 4;
|
||||||
EEsCycle = 0;
|
EEsCycle = 0;
|
||||||
EEoCycle = cpuRegs.cycle;
|
EEoCycle = cpuRegs.cycle;
|
||||||
|
|
||||||
|
@ -237,9 +237,9 @@ __fi void cpuSetNextEvent( u32 startCycle, s32 delta )
|
||||||
// typecast the conditional to signed so that things don't blow up
|
// typecast the conditional to signed so that things don't blow up
|
||||||
// if startCycle is greater than our next branch cycle.
|
// if startCycle is greater than our next branch cycle.
|
||||||
|
|
||||||
if( (int)(g_nextEventCycle - startCycle) > delta )
|
if( (int)(cpuRegs.nextEventCycle - startCycle) > delta )
|
||||||
{
|
{
|
||||||
g_nextEventCycle = startCycle + delta;
|
cpuRegs.nextEventCycle = startCycle + delta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ __fi int cpuTestCycle( u32 startCycle, s32 delta )
|
||||||
// tells the EE to run the branch test the next time it gets a chance.
|
// tells the EE to run the branch test the next time it gets a chance.
|
||||||
__fi void cpuSetEvent()
|
__fi void cpuSetEvent()
|
||||||
{
|
{
|
||||||
g_nextEventCycle = cpuRegs.cycle;
|
cpuRegs.nextEventCycle = cpuRegs.cycle;
|
||||||
}
|
}
|
||||||
|
|
||||||
__fi void cpuClearInt( uint i )
|
__fi void cpuClearInt( uint i )
|
||||||
|
@ -346,8 +346,8 @@ static __fi bool _cpuTestInterrupts()
|
||||||
|
|
||||||
static __fi void _cpuTestTIMR()
|
static __fi void _cpuTestTIMR()
|
||||||
{
|
{
|
||||||
cpuRegs.CP0.n.Count += cpuRegs.cycle-s_iLastCOP0Cycle;
|
cpuRegs.CP0.n.Count += cpuRegs.cycle - cpuRegs.lastCOP0Cycle;
|
||||||
s_iLastCOP0Cycle = cpuRegs.cycle;
|
cpuRegs.lastCOP0Cycle = cpuRegs.cycle;
|
||||||
|
|
||||||
// fixme: this looks like a hack to make up for the fact that the TIMR
|
// fixme: this looks like a hack to make up for the fact that the TIMR
|
||||||
// doesn't yet have a proper mechanism for setting itself up on a nextEventCycle.
|
// doesn't yet have a proper mechanism for setting itself up on a nextEventCycle.
|
||||||
|
@ -385,23 +385,21 @@ static bool cpuIntsEnabled(int Interrupt)
|
||||||
!cpuRegs.CP0.n.Status.b.EXL && (cpuRegs.CP0.n.Status.b.ERL == 0);
|
!cpuRegs.CP0.n.Status.b.EXL && (cpuRegs.CP0.n.Status.b.ERL == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if cpuRegs.cycle is greater than this cycle, should check cpuEventTest for updates
|
|
||||||
u32 g_nextEventCycle = 0;
|
|
||||||
u32 g_lastEventCycle = 0;
|
|
||||||
// Shared portion of the branch test, called from both the Interpreter
|
// Shared portion of the branch test, called from both the Interpreter
|
||||||
// and the recompiler. (moved here to help alleviate redundant code)
|
// and the recompiler. (moved here to help alleviate redundant code)
|
||||||
__fi void _cpuEventTest_Shared()
|
__fi void _cpuEventTest_Shared()
|
||||||
{
|
{
|
||||||
eeEventTestIsActive = true;
|
eeEventTestIsActive = true;
|
||||||
g_nextEventCycle = cpuRegs.cycle + eeWaitCycles;
|
cpuRegs.nextEventCycle = cpuRegs.cycle + eeWaitCycles;
|
||||||
g_lastEventCycle = cpuRegs.cycle;
|
cpuRegs.lastEventCycle = cpuRegs.cycle;
|
||||||
// ---- INTC / DMAC (CPU-level Exceptions) -----------------
|
// ---- INTC / DMAC (CPU-level Exceptions) -----------------
|
||||||
// Done first because exceptions raised during event tests need to be postponed a few
|
// Done first because exceptions raised during event tests need to be postponed a few
|
||||||
// cycles (fixes Grandia II [PAL], which does a spin loop on a vsync and expects to
|
// cycles (fixes Grandia II [PAL], which does a spin loop on a vsync and expects to
|
||||||
// be able to read the value before the exception handler clears it).
|
// be able to read the value before the exception handler clears it).
|
||||||
|
|
||||||
uint mask = intcInterrupt() | dmacInterrupt();
|
uint mask = intcInterrupt() | dmacInterrupt();
|
||||||
if (cpuIntsEnabled(mask)) cpuException(mask, cpuRegs.branch);
|
if (cpuIntsEnabled(mask))
|
||||||
|
cpuException(mask, cpuRegs.branch);
|
||||||
|
|
||||||
|
|
||||||
// ---- Counters -------------
|
// ---- Counters -------------
|
||||||
|
@ -409,7 +407,7 @@ __fi void _cpuEventTest_Shared()
|
||||||
// escape/suspend hooks, and it's really a good idea to suspend/resume emulation before
|
// escape/suspend hooks, and it's really a good idea to suspend/resume emulation before
|
||||||
// doing any actual meaningful branchtest logic.
|
// doing any actual meaningful branchtest logic.
|
||||||
|
|
||||||
if ( cpuTestCycle( nextsCounter, nextCounter ) )
|
if (cpuTestCycle(nextsCounter, nextCounter))
|
||||||
{
|
{
|
||||||
rcntUpdate();
|
rcntUpdate();
|
||||||
_cpuTestPERF();
|
_cpuTestPERF();
|
||||||
|
@ -429,7 +427,7 @@ __fi void _cpuEventTest_Shared()
|
||||||
// Only use the lower 17 bits of the cpuRegs.interrupt as the upper bits are for VU0/1 sync which can't be done in a tight loop
|
// Only use the lower 17 bits of the cpuRegs.interrupt as the upper bits are for VU0/1 sync which can't be done in a tight loop
|
||||||
if ((!g_GameStarted || CHECK_INSTANTDMAHACK) && dmacRegs.ctrl.DMAE && !(psHu8(DMAC_ENABLER + 2) & 1) && (cpuRegs.interrupt & 0x1FFFF))
|
if ((!g_GameStarted || CHECK_INSTANTDMAHACK) && dmacRegs.ctrl.DMAE && !(psHu8(DMAC_ENABLER + 2) & 1) && (cpuRegs.interrupt & 0x1FFFF))
|
||||||
{
|
{
|
||||||
while((cpuRegs.interrupt & 0x1FFFF) && _cpuTestInterrupts())
|
while ((cpuRegs.interrupt & 0x1FFFF) && _cpuTestInterrupts())
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -447,17 +445,17 @@ __fi void _cpuEventTest_Shared()
|
||||||
EEsCycle += cpuRegs.cycle - EEoCycle;
|
EEsCycle += cpuRegs.cycle - EEoCycle;
|
||||||
EEoCycle = cpuRegs.cycle;
|
EEoCycle = cpuRegs.cycle;
|
||||||
|
|
||||||
if( EEsCycle > 0 )
|
if (EEsCycle > 0)
|
||||||
iopEventAction = true;
|
iopEventAction = true;
|
||||||
|
|
||||||
iopEventTest();
|
iopEventTest();
|
||||||
|
|
||||||
if( iopEventAction )
|
if (iopEventAction)
|
||||||
{
|
{
|
||||||
//if( EEsCycle < -450 )
|
//if( EEsCycle < -450 )
|
||||||
// Console.WriteLn( " IOP ahead by: %d cycles", -EEsCycle );
|
// Console.WriteLn( " IOP ahead by: %d cycles", -EEsCycle );
|
||||||
|
|
||||||
EEsCycle = psxCpu->ExecuteBlock( EEsCycle );
|
EEsCycle = psxCpu->ExecuteBlock(EEsCycle);
|
||||||
|
|
||||||
iopEventAction = false;
|
iopEventAction = false;
|
||||||
}
|
}
|
||||||
|
@ -470,24 +468,24 @@ __fi void _cpuEventTest_Shared()
|
||||||
|
|
||||||
// ---- Schedule Next Event Test --------------
|
// ---- Schedule Next Event Test --------------
|
||||||
|
|
||||||
if( EEsCycle > 192 )
|
if (EEsCycle > 192)
|
||||||
{
|
{
|
||||||
// EE's running way ahead of the IOP still, so we should branch quickly to give the
|
// EE's running way ahead of the IOP still, so we should branch quickly to give the
|
||||||
// IOP extra timeslices in short order.
|
// IOP extra timeslices in short order.
|
||||||
|
|
||||||
cpuSetNextEventDelta( 48 );
|
cpuSetNextEventDelta(48);
|
||||||
//Console.Warning( "EE ahead of the IOP -- Rapid Event! %d", EEsCycle );
|
//Console.Warning( "EE ahead of the IOP -- Rapid Event! %d", EEsCycle );
|
||||||
}
|
}
|
||||||
|
|
||||||
// The IOP could be running ahead/behind of us, so adjust the iop's next branch by its
|
// The IOP could be running ahead/behind of us, so adjust the iop's next branch by its
|
||||||
// relative position to the EE (via EEsCycle)
|
// relative position to the EE (via EEsCycle)
|
||||||
cpuSetNextEventDelta( ((g_iopNextEventCycle-psxRegs.cycle)*8) - EEsCycle );
|
cpuSetNextEventDelta(((psxRegs.iopNextEventCycle - psxRegs.cycle) * 8) - EEsCycle);
|
||||||
|
|
||||||
// Apply the hsync counter's nextCycle
|
// Apply the hsync counter's nextCycle
|
||||||
cpuSetNextEvent( hsyncCounter.sCycle, hsyncCounter.CycleT );
|
cpuSetNextEvent(hsyncCounter.sCycle, hsyncCounter.CycleT);
|
||||||
|
|
||||||
// Apply vsync and other counter nextCycles
|
// Apply vsync and other counter nextCycles
|
||||||
cpuSetNextEvent( nextsCounter, nextCounter );
|
cpuSetNextEvent(nextsCounter, nextCounter);
|
||||||
|
|
||||||
eeEventTestIsActive = false;
|
eeEventTestIsActive = false;
|
||||||
}
|
}
|
||||||
|
@ -496,15 +494,17 @@ __ri void cpuTestINTCInts()
|
||||||
{
|
{
|
||||||
// Check the COP0's Status register for general interrupt disables, and the 0x400
|
// Check the COP0's Status register for general interrupt disables, and the 0x400
|
||||||
// bit (which is INTC master toggle).
|
// bit (which is INTC master toggle).
|
||||||
if( !cpuIntsEnabled(0x400) ) return;
|
if (!cpuIntsEnabled(0x400))
|
||||||
|
return;
|
||||||
|
|
||||||
if( (psHu32(INTC_STAT) & psHu32(INTC_MASK)) == 0 ) return;
|
if ((psHu32(INTC_STAT) & psHu32(INTC_MASK)) == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
cpuSetNextEventDelta( 4 );
|
cpuSetNextEventDelta(4);
|
||||||
if(eeEventTestIsActive && (iopCycleEE > 0))
|
if (eeEventTestIsActive && (psxRegs.iopCycleEE > 0))
|
||||||
{
|
{
|
||||||
iopBreak += iopCycleEE; // record the number of cycles the IOP didn't run.
|
psxRegs.iopBreak += psxRegs.iopCycleEE; // record the number of cycles the IOP didn't run.
|
||||||
iopCycleEE = 0;
|
psxRegs.iopCycleEE = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -512,27 +512,32 @@ __fi void cpuTestDMACInts()
|
||||||
{
|
{
|
||||||
// Check the COP0's Status register for general interrupt disables, and the 0x800
|
// Check the COP0's Status register for general interrupt disables, and the 0x800
|
||||||
// bit (which is the DMAC master toggle).
|
// bit (which is the DMAC master toggle).
|
||||||
if( !cpuIntsEnabled(0x800) ) return;
|
if (!cpuIntsEnabled(0x800))
|
||||||
|
return;
|
||||||
|
|
||||||
if ( ( (psHu16(0xe012) & psHu16(0xe010)) == 0) &&
|
if (((psHu16(0xe012) & psHu16(0xe010)) == 0) &&
|
||||||
( (psHu16(0xe010) & 0x8000) == 0) ) return;
|
((psHu16(0xe010) & 0x8000) == 0))
|
||||||
|
return;
|
||||||
|
|
||||||
cpuSetNextEventDelta( 4 );
|
cpuSetNextEventDelta(4);
|
||||||
if(eeEventTestIsActive && (iopCycleEE > 0))
|
if (eeEventTestIsActive && (psxRegs.iopCycleEE > 0))
|
||||||
{
|
{
|
||||||
iopBreak += iopCycleEE; // record the number of cycles the IOP didn't run.
|
psxRegs.iopBreak += psxRegs.iopCycleEE; // record the number of cycles the IOP didn't run.
|
||||||
iopCycleEE = 0;
|
psxRegs.iopCycleEE = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__fi void cpuTestTIMRInts() {
|
__fi void cpuTestTIMRInts()
|
||||||
if ((cpuRegs.CP0.n.Status.val & 0x10007) == 0x10001) {
|
{
|
||||||
|
if ((cpuRegs.CP0.n.Status.val & 0x10007) == 0x10001)
|
||||||
|
{
|
||||||
_cpuTestPERF();
|
_cpuTestPERF();
|
||||||
_cpuTestTIMR();
|
_cpuTestTIMR();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__fi void cpuTestHwInts() {
|
__fi void cpuTestHwInts()
|
||||||
|
{
|
||||||
cpuTestINTCInts();
|
cpuTestINTCInts();
|
||||||
cpuTestDMACInts();
|
cpuTestDMACInts();
|
||||||
cpuTestTIMRInts();
|
cpuTestTIMRInts();
|
||||||
|
@ -551,24 +556,25 @@ __fi void CPU_INT( EE_EventType n, s32 ecycle)
|
||||||
// EE events happen 8 cycles in the future instead of whatever was requested.
|
// EE events happen 8 cycles in the future instead of whatever was requested.
|
||||||
// This can be used on games with PATH3 masking issues for example, or when
|
// This can be used on games with PATH3 masking issues for example, or when
|
||||||
// some FMV look bad.
|
// some FMV look bad.
|
||||||
if(CHECK_EETIMINGHACK && n < VIF_VU0_FINISH) ecycle = 8;
|
if (CHECK_EETIMINGHACK && n < VIF_VU0_FINISH)
|
||||||
|
ecycle = 8;
|
||||||
|
|
||||||
cpuRegs.interrupt|= 1 << n;
|
cpuRegs.interrupt |= 1 << n;
|
||||||
cpuRegs.sCycle[n] = cpuRegs.cycle;
|
cpuRegs.sCycle[n] = cpuRegs.cycle;
|
||||||
cpuRegs.eCycle[n] = ecycle;
|
cpuRegs.eCycle[n] = ecycle;
|
||||||
|
|
||||||
// Interrupt is happening soon: make sure both EE and IOP are aware.
|
// Interrupt is happening soon: make sure both EE and IOP are aware.
|
||||||
|
|
||||||
if( ecycle <= 28 && iopCycleEE > 0 )
|
if (ecycle <= 28 && psxRegs.iopCycleEE > 0)
|
||||||
{
|
{
|
||||||
// If running in the IOP, force it to break immediately into the EE.
|
// If running in the IOP, force it to break immediately into the EE.
|
||||||
// the EE's branch test is due to run.
|
// the EE's branch test is due to run.
|
||||||
|
|
||||||
iopBreak += iopCycleEE; // record the number of cycles the IOP didn't run.
|
psxRegs.iopBreak += psxRegs.iopCycleEE; // record the number of cycles the IOP didn't run.
|
||||||
iopCycleEE = 0;
|
psxRegs.iopCycleEE = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpuSetNextEventDelta( cpuRegs.eCycle[n] );
|
cpuSetNextEventDelta(cpuRegs.eCycle[n]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called from recompilers; define is mandatory.
|
// Called from recompilers; define is mandatory.
|
||||||
|
|
|
@ -175,6 +175,13 @@ struct cpuRegisters {
|
||||||
int opmode; // operating mode
|
int opmode; // operating mode
|
||||||
u32 tempcycles;
|
u32 tempcycles;
|
||||||
u32 dmastall;
|
u32 dmastall;
|
||||||
|
u32 pcWriteback;
|
||||||
|
|
||||||
|
// if cpuRegs.cycle is greater than this cycle, should check cpuEventTest for updates
|
||||||
|
u32 nextEventCycle;
|
||||||
|
u32 lastEventCycle;
|
||||||
|
u32 lastCOP0Cycle;
|
||||||
|
u32 lastPERFCycle[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
// used for optimization
|
// used for optimization
|
||||||
|
@ -260,11 +267,7 @@ alignas(16) extern cpuRegisters cpuRegs;
|
||||||
alignas(16) extern fpuRegisters fpuRegs;
|
alignas(16) extern fpuRegisters fpuRegs;
|
||||||
alignas(16) extern tlbs tlb[48];
|
alignas(16) extern tlbs tlb[48];
|
||||||
|
|
||||||
extern u32 g_nextEventCycle;
|
|
||||||
extern u32 g_lastEventCycle;
|
|
||||||
extern bool eeEventTestIsActive;
|
extern bool eeEventTestIsActive;
|
||||||
extern u32 s_iLastCOP0Cycle;
|
|
||||||
extern u32 s_iLastPERFCycle[2];
|
|
||||||
|
|
||||||
void intSetBranch();
|
void intSetBranch();
|
||||||
|
|
||||||
|
|
|
@ -266,12 +266,6 @@ SaveStateBase& SaveStateBase::FreezeInternals()
|
||||||
FreezeTag( "Cycles" );
|
FreezeTag( "Cycles" );
|
||||||
Freeze(EEsCycle);
|
Freeze(EEsCycle);
|
||||||
Freeze(EEoCycle);
|
Freeze(EEoCycle);
|
||||||
Freeze(iopCycleEE);
|
|
||||||
Freeze(iopBreak);
|
|
||||||
Freeze(g_nextEventCycle);
|
|
||||||
Freeze(g_iopNextEventCycle);
|
|
||||||
Freeze(s_iLastCOP0Cycle);
|
|
||||||
Freeze(s_iLastPERFCycle);
|
|
||||||
Freeze(nextCounter);
|
Freeze(nextCounter);
|
||||||
Freeze(nextsCounter);
|
Freeze(nextsCounter);
|
||||||
Freeze(psxNextsCounter);
|
Freeze(psxNextsCounter);
|
||||||
|
|
|
@ -33,7 +33,7 @@ enum class FreezeAction
|
||||||
// [SAVEVERSION+]
|
// [SAVEVERSION+]
|
||||||
// This informs the auto updater that the users savestates will be invalidated.
|
// This informs the auto updater that the users savestates will be invalidated.
|
||||||
|
|
||||||
static const u32 g_SaveVersion = (0x9A30 << 16) | 0x0000;
|
static const u32 g_SaveVersion = (0x9A31 << 16) | 0x0000;
|
||||||
|
|
||||||
|
|
||||||
// the freezing data between submodules and core
|
// the freezing data between submodules and core
|
||||||
|
@ -44,8 +44,8 @@ static const u32 g_SaveVersion = (0x9A30 << 16) | 0x0000;
|
||||||
// necessarily portable; we might want to investigate this in the future -- govanify
|
// necessarily portable; we might want to investigate this in the future -- govanify
|
||||||
struct freezeData
|
struct freezeData
|
||||||
{
|
{
|
||||||
int size;
|
int size;
|
||||||
u8 *data;
|
u8* data;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SaveStateScreenshotData
|
struct SaveStateScreenshotData
|
||||||
|
|
|
@ -146,12 +146,12 @@ void recMFC0()
|
||||||
// This case needs to be handled even if the write-back is ignored (_Rt_ == 0 )
|
// This case needs to be handled even if the write-back is ignored (_Rt_ == 0 )
|
||||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||||
xMOV(eax, ecx);
|
xMOV(eax, ecx);
|
||||||
xSUB(eax, ptr[&s_iLastCOP0Cycle]);
|
xSUB(eax, ptr[&cpuRegs.lastCOP0Cycle]);
|
||||||
u8* skipInc = JNZ8(0);
|
u8* skipInc = JNZ8(0);
|
||||||
xINC(eax);
|
xINC(eax);
|
||||||
x86SetJ8(skipInc);
|
x86SetJ8(skipInc);
|
||||||
xADD(ptr[&cpuRegs.CP0.n.Count], eax);
|
xADD(ptr[&cpuRegs.CP0.n.Count], eax);
|
||||||
xMOV(ptr[&s_iLastCOP0Cycle], ecx);
|
xMOV(ptr[&cpuRegs.lastCOP0Cycle], ecx);
|
||||||
xMOV(eax, ptr[&cpuRegs.CP0.r[_Rd_]]);
|
xMOV(eax, ptr[&cpuRegs.CP0.r[_Rd_]]);
|
||||||
|
|
||||||
if (!_Rt_)
|
if (!_Rt_)
|
||||||
|
@ -217,7 +217,7 @@ void recMTC0()
|
||||||
|
|
||||||
case 9:
|
case 9:
|
||||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||||
xMOV(ptr[&s_iLastCOP0Cycle], ecx);
|
xMOV(ptr[&cpuRegs.lastCOP0Cycle], ecx);
|
||||||
xMOV(ptr32[&cpuRegs.CP0.r[9]], g_cpuConstRegs[_Rt_].UL[0]);
|
xMOV(ptr32[&cpuRegs.CP0.r[9]], g_cpuConstRegs[_Rt_].UL[0]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -236,13 +236,13 @@ void recMTC0()
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr[&cpuRegs.cycle]);
|
xMOV(eax, ptr[&cpuRegs.cycle]);
|
||||||
xMOV(ptr32[&cpuRegs.PERF.n.pcr0], g_cpuConstRegs[_Rt_].UL[0]);
|
xMOV(ptr32[&cpuRegs.PERF.n.pcr0], g_cpuConstRegs[_Rt_].UL[0]);
|
||||||
xMOV(ptr[&s_iLastPERFCycle[0]], eax);
|
xMOV(ptr[&cpuRegs.lastPERFCycle[0]], eax);
|
||||||
}
|
}
|
||||||
else // MTPC 1
|
else // MTPC 1
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr[&cpuRegs.cycle]);
|
xMOV(eax, ptr[&cpuRegs.cycle]);
|
||||||
xMOV(ptr32[&cpuRegs.PERF.n.pcr1], g_cpuConstRegs[_Rt_].UL[0]);
|
xMOV(ptr32[&cpuRegs.PERF.n.pcr1], g_cpuConstRegs[_Rt_].UL[0]);
|
||||||
xMOV(ptr[&s_iLastPERFCycle[1]], eax);
|
xMOV(ptr[&cpuRegs.lastPERFCycle[1]], eax);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ void recMTC0()
|
||||||
case 9:
|
case 9:
|
||||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||||
_eeMoveGPRtoM((uptr)&cpuRegs.CP0.r[9], _Rt_);
|
_eeMoveGPRtoM((uptr)&cpuRegs.CP0.r[9], _Rt_);
|
||||||
xMOV(ptr[&s_iLastCOP0Cycle], ecx);
|
xMOV(ptr[&cpuRegs.lastCOP0Cycle], ecx);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 25:
|
case 25:
|
||||||
|
@ -291,13 +291,13 @@ void recMTC0()
|
||||||
{
|
{
|
||||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr0, _Rt_);
|
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr0, _Rt_);
|
||||||
xMOV(ptr[&s_iLastPERFCycle[0]], ecx);
|
xMOV(ptr[&cpuRegs.lastPERFCycle[0]], ecx);
|
||||||
}
|
}
|
||||||
else // MTPC 1
|
else // MTPC 1
|
||||||
{
|
{
|
||||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr1, _Rt_);
|
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr1, _Rt_);
|
||||||
xMOV(ptr[&s_iLastPERFCycle[1]], ecx);
|
xMOV(ptr[&cpuRegs.lastPERFCycle[1]], ecx);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -32,10 +32,6 @@ u16 g_xmmAllocCounter = 0;
|
||||||
|
|
||||||
EEINST* g_pCurInstInfo = NULL;
|
EEINST* g_pCurInstInfo = NULL;
|
||||||
|
|
||||||
// used to make sure regs don't get changed while in recompiler
|
|
||||||
// use FreezeXMMRegs
|
|
||||||
u32 g_recWriteback = 0;
|
|
||||||
|
|
||||||
_xmmregs xmmregs[iREGCNT_XMM], s_saveXMMregs[iREGCNT_XMM];
|
_xmmregs xmmregs[iREGCNT_XMM], s_saveXMMregs[iREGCNT_XMM];
|
||||||
|
|
||||||
// X86 caching
|
// X86 caching
|
||||||
|
|
|
@ -91,7 +91,7 @@
|
||||||
#define X86TYPE_VUPWRITE 8
|
#define X86TYPE_VUPWRITE 8
|
||||||
#define X86TYPE_PSX 9
|
#define X86TYPE_PSX 9
|
||||||
#define X86TYPE_PCWRITEBACK 10
|
#define X86TYPE_PCWRITEBACK 10
|
||||||
#define X86TYPE_VUJUMP 12 // jump from random mem (g_recWriteback)
|
#define X86TYPE_PSX_PCWRITEBACK 12
|
||||||
#define X86TYPE_VITEMP 13
|
#define X86TYPE_VITEMP 13
|
||||||
#define X86TYPE_FNARG 14 // function parameter, max is 4
|
#define X86TYPE_FNARG 14 // function parameter, max is 4
|
||||||
|
|
||||||
|
@ -253,8 +253,6 @@ static __fi bool EEINST_ISLIVE2(u32 reg) { return !!(g_pCurInstInfo->regs[reg]
|
||||||
static __fi bool FPUINST_ISLIVE(u32 reg) { return !!(g_pCurInstInfo->fpuregs[reg] & EEINST_LIVE0); }
|
static __fi bool FPUINST_ISLIVE(u32 reg) { return !!(g_pCurInstInfo->fpuregs[reg] & EEINST_LIVE0); }
|
||||||
static __fi bool FPUINST_LASTUSE(u32 reg) { return !!(g_pCurInstInfo->fpuregs[reg] & EEINST_LASTUSE); }
|
static __fi bool FPUINST_LASTUSE(u32 reg) { return !!(g_pCurInstInfo->fpuregs[reg] & EEINST_LASTUSE); }
|
||||||
|
|
||||||
extern u32 g_recWriteback; // used for jumps (VUrec mess!)
|
|
||||||
|
|
||||||
extern _xmmregs xmmregs[iREGCNT_XMM], s_saveXMMregs[iREGCNT_XMM];
|
extern _xmmregs xmmregs[iREGCNT_XMM], s_saveXMMregs[iREGCNT_XMM];
|
||||||
|
|
||||||
extern thread_local u8* j8Ptr[32]; // depreciated item. use local u8* vars instead.
|
extern thread_local u8* j8Ptr[32]; // depreciated item. use local u8* vars instead.
|
||||||
|
|
|
@ -53,7 +53,6 @@
|
||||||
|
|
||||||
using namespace x86Emitter;
|
using namespace x86Emitter;
|
||||||
|
|
||||||
extern u32 g_iopNextEventCycle;
|
|
||||||
extern void psxBREAK();
|
extern void psxBREAK();
|
||||||
|
|
||||||
u32 g_psxMaxRecMem = 0;
|
u32 g_psxMaxRecMem = 0;
|
||||||
|
@ -817,8 +816,8 @@ static void iopClearRecLUT(BASEBLOCK* base, int count)
|
||||||
|
|
||||||
static __noinline s32 recExecuteBlock(s32 eeCycles)
|
static __noinline s32 recExecuteBlock(s32 eeCycles)
|
||||||
{
|
{
|
||||||
iopBreak = 0;
|
psxRegs.iopBreak = 0;
|
||||||
iopCycleEE = eeCycles;
|
psxRegs.iopCycleEE = eeCycles;
|
||||||
|
|
||||||
#ifdef PCSX2_DEVBUILD
|
#ifdef PCSX2_DEVBUILD
|
||||||
//if (SysTrace.SIF.IsActive())
|
//if (SysTrace.SIF.IsActive())
|
||||||
|
@ -843,7 +842,7 @@ static __noinline s32 recExecuteBlock(s32 eeCycles)
|
||||||
|
|
||||||
iopEnterRecompiledCode();
|
iopEnterRecompiledCode();
|
||||||
|
|
||||||
return iopBreak + iopCycleEE;
|
return psxRegs.iopBreak + psxRegs.iopCycleEE;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the offset to the next instruction after any cleared memory
|
// Returns the offset to the next instruction after any cleared memory
|
||||||
|
@ -917,14 +916,14 @@ void psxSetBranchReg(u32 reg)
|
||||||
|
|
||||||
if (reg != 0xffffffff)
|
if (reg != 0xffffffff)
|
||||||
{
|
{
|
||||||
_allocX86reg(calleeSavedReg2d, X86TYPE_PCWRITEBACK, 0, MODE_WRITE);
|
_allocX86reg(calleeSavedReg2d, X86TYPE_PSX_PCWRITEBACK, 0, MODE_WRITE);
|
||||||
_psxMoveGPRtoR(calleeSavedReg2d, reg);
|
_psxMoveGPRtoR(calleeSavedReg2d, reg);
|
||||||
|
|
||||||
psxRecompileNextInstruction(1);
|
psxRecompileNextInstruction(1);
|
||||||
|
|
||||||
if (x86regs[calleeSavedReg2d.GetId()].inuse)
|
if (x86regs[calleeSavedReg2d.GetId()].inuse)
|
||||||
{
|
{
|
||||||
pxAssert(x86regs[calleeSavedReg2d.GetId()].type == X86TYPE_PCWRITEBACK);
|
pxAssert(x86regs[calleeSavedReg2d.GetId()].type == X86TYPE_PSX_PCWRITEBACK);
|
||||||
xMOV(ptr32[&psxRegs.pc], calleeSavedReg2d);
|
xMOV(ptr32[&psxRegs.pc], calleeSavedReg2d);
|
||||||
x86regs[calleeSavedReg2d.GetId()].inuse = 0;
|
x86regs[calleeSavedReg2d.GetId()].inuse = 0;
|
||||||
#ifdef PCSX2_DEBUG
|
#ifdef PCSX2_DEBUG
|
||||||
|
@ -933,7 +932,7 @@ void psxSetBranchReg(u32 reg)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr32[&g_recWriteback]);
|
xMOV(eax, ptr32[&psxRegs.pcWriteback]);
|
||||||
xMOV(ptr32[&psxRegs.pc], eax);
|
xMOV(ptr32[&psxRegs.pc], eax);
|
||||||
|
|
||||||
#ifdef PCSX2_DEBUG
|
#ifdef PCSX2_DEBUG
|
||||||
|
@ -980,16 +979,16 @@ static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr32[&psxRegs.cycle]);
|
xMOV(eax, ptr32[&psxRegs.cycle]);
|
||||||
xMOV(ecx, eax);
|
xMOV(ecx, eax);
|
||||||
xMOV(edx, ptr32[&iopCycleEE]);
|
xMOV(edx, ptr32[&psxRegs.iopCycleEE]);
|
||||||
xADD(edx, 7);
|
xADD(edx, 7);
|
||||||
xSHR(edx, 3);
|
xSHR(edx, 3);
|
||||||
xADD(eax, edx);
|
xADD(eax, edx);
|
||||||
xCMP(eax, ptr32[&g_iopNextEventCycle]);
|
xCMP(eax, ptr32[&psxRegs.iopNextEventCycle]);
|
||||||
xCMOVNS(eax, ptr32[&g_iopNextEventCycle]);
|
xCMOVNS(eax, ptr32[&psxRegs.iopNextEventCycle]);
|
||||||
xMOV(ptr32[&psxRegs.cycle], eax);
|
xMOV(ptr32[&psxRegs.cycle], eax);
|
||||||
xSUB(eax, ecx);
|
xSUB(eax, ecx);
|
||||||
xSHL(eax, 3);
|
xSHL(eax, 3);
|
||||||
xSUB(ptr32[&iopCycleEE], eax);
|
xSUB(ptr32[&psxRegs.iopCycleEE], eax);
|
||||||
xJLE(iopExitRecompiledCode);
|
xJLE(iopExitRecompiledCode);
|
||||||
|
|
||||||
xFastCall((void*)iopEventTest);
|
xFastCall((void*)iopEventTest);
|
||||||
|
@ -1007,11 +1006,11 @@ static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
|
||||||
xMOV(ptr32[&psxRegs.cycle], eax); // update cycles
|
xMOV(ptr32[&psxRegs.cycle], eax); // update cycles
|
||||||
|
|
||||||
// jump if iopCycleEE <= 0 (iop's timeslice timed out, so time to return control to the EE)
|
// jump if iopCycleEE <= 0 (iop's timeslice timed out, so time to return control to the EE)
|
||||||
xSUB(ptr32[&iopCycleEE], blockCycles * 8);
|
xSUB(ptr32[&psxRegs.iopCycleEE], blockCycles * 8);
|
||||||
xJLE(iopExitRecompiledCode);
|
xJLE(iopExitRecompiledCode);
|
||||||
|
|
||||||
// check if an event is pending
|
// check if an event is pending
|
||||||
xSUB(eax, ptr32[&g_iopNextEventCycle]);
|
xSUB(eax, ptr32[&psxRegs.iopNextEventCycle]);
|
||||||
xForwardJS<u8> nointerruptpending;
|
xForwardJS<u8> nointerruptpending;
|
||||||
|
|
||||||
xFastCall((void*)iopEventTest);
|
xFastCall((void*)iopEventTest);
|
||||||
|
@ -1058,7 +1057,7 @@ void rpsxSYSCALL()
|
||||||
j8Ptr[0] = JE8(0);
|
j8Ptr[0] = JE8(0);
|
||||||
|
|
||||||
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
|
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
|
||||||
xSUB(ptr32[&iopCycleEE], psxScaleBlockCycles() * 8);
|
xSUB(ptr32[&psxRegs.iopCycleEE], psxScaleBlockCycles() * 8);
|
||||||
JMP32((uptr)iopDispatcherReg - ((uptr)x86Ptr + 5));
|
JMP32((uptr)iopDispatcherReg - ((uptr)x86Ptr + 5));
|
||||||
|
|
||||||
// jump target for skipping blockCycle updates
|
// jump target for skipping blockCycle updates
|
||||||
|
@ -1080,7 +1079,7 @@ void rpsxBREAK()
|
||||||
xCMP(ptr32[&psxRegs.pc], psxpc - 4);
|
xCMP(ptr32[&psxRegs.pc], psxpc - 4);
|
||||||
j8Ptr[0] = JE8(0);
|
j8Ptr[0] = JE8(0);
|
||||||
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
|
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
|
||||||
xSUB(ptr32[&iopCycleEE], psxScaleBlockCycles() * 8);
|
xSUB(ptr32[&psxRegs.iopCycleEE], psxScaleBlockCycles() * 8);
|
||||||
JMP32((uptr)iopDispatcherReg - ((uptr)x86Ptr + 5));
|
JMP32((uptr)iopDispatcherReg - ((uptr)x86Ptr + 5));
|
||||||
x86SetJ8(j8Ptr[0]);
|
x86SetJ8(j8Ptr[0]);
|
||||||
|
|
||||||
|
@ -1516,7 +1515,7 @@ StartRecomp:
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
|
xADD(ptr32[&psxRegs.cycle], psxScaleBlockCycles());
|
||||||
xSUB(ptr32[&iopCycleEE], psxScaleBlockCycles() * 8);
|
xSUB(ptr32[&psxRegs.iopCycleEE], psxScaleBlockCycles() * 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (willbranch3 || !psxbranch)
|
if (willbranch3 || !psxbranch)
|
||||||
|
|
|
@ -1057,7 +1057,7 @@ void rpsxJR()
|
||||||
void rpsxJALR()
|
void rpsxJALR()
|
||||||
{
|
{
|
||||||
// jalr Rs
|
// jalr Rs
|
||||||
_allocX86reg(calleeSavedReg2d, X86TYPE_PCWRITEBACK, 0, MODE_WRITE);
|
_allocX86reg(calleeSavedReg2d, X86TYPE_PSX_PCWRITEBACK, 0, MODE_WRITE);
|
||||||
_psxMoveGPRtoR(calleeSavedReg2d, _Rs_);
|
_psxMoveGPRtoR(calleeSavedReg2d, _Rs_);
|
||||||
|
|
||||||
if (_Rd_)
|
if (_Rd_)
|
||||||
|
@ -1071,7 +1071,7 @@ void rpsxJALR()
|
||||||
|
|
||||||
if (x86regs[calleeSavedReg2d.GetId()].inuse)
|
if (x86regs[calleeSavedReg2d.GetId()].inuse)
|
||||||
{
|
{
|
||||||
pxAssert(x86regs[calleeSavedReg2d.GetId()].type == X86TYPE_PCWRITEBACK);
|
pxAssert(x86regs[calleeSavedReg2d.GetId()].type == X86TYPE_PSX_PCWRITEBACK);
|
||||||
xMOV(ptr32[&psxRegs.pc], calleeSavedReg2d);
|
xMOV(ptr32[&psxRegs.pc], calleeSavedReg2d);
|
||||||
x86regs[calleeSavedReg2d.GetId()].inuse = 0;
|
x86regs[calleeSavedReg2d.GetId()].inuse = 0;
|
||||||
#ifdef PCSX2_DEBUG
|
#ifdef PCSX2_DEBUG
|
||||||
|
@ -1080,7 +1080,7 @@ void rpsxJALR()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr32[&g_recWriteback]);
|
xMOV(eax, ptr32[&psxRegs.pcWriteback]);
|
||||||
xMOV(ptr32[&psxRegs.pc], eax);
|
xMOV(ptr32[&psxRegs.pc], eax);
|
||||||
#ifdef PCSX2_DEBUG
|
#ifdef PCSX2_DEBUG
|
||||||
xOR(eax, eax);
|
xOR(eax, eax);
|
||||||
|
|
|
@ -98,11 +98,11 @@ uptr _x86GetAddr(int type, int reg)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case X86TYPE_PCWRITEBACK:
|
case X86TYPE_PCWRITEBACK:
|
||||||
ret = (uptr)&g_recWriteback;
|
ret = (uptr)&cpuRegs.pcWriteback;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case X86TYPE_VUJUMP:
|
case X86TYPE_PSX_PCWRITEBACK:
|
||||||
ret = (uptr)&g_recWriteback;
|
ret = (uptr)&psxRegs.pcWriteback;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
jNO_DEFAULT;
|
jNO_DEFAULT;
|
||||||
|
|
|
@ -363,7 +363,7 @@ void recBranchCall(void (*func)())
|
||||||
// to the current cpu cycle.
|
// to the current cpu cycle.
|
||||||
|
|
||||||
xMOV(eax, ptr[&cpuRegs.cycle]);
|
xMOV(eax, ptr[&cpuRegs.cycle]);
|
||||||
xMOV(ptr[&g_nextEventCycle], eax);
|
xMOV(ptr[&cpuRegs.nextEventCycle], eax);
|
||||||
|
|
||||||
recCall(func);
|
recCall(func);
|
||||||
g_branch = 2;
|
g_branch = 2;
|
||||||
|
@ -930,7 +930,7 @@ void SetBranchReg(u32 reg)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr[&g_recWriteback]);
|
xMOV(eax, ptr[&cpuRegs.pcWriteback]);
|
||||||
xMOV(ptr[&cpuRegs.pc], eax);
|
xMOV(ptr[&cpuRegs.pc], eax);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1125,7 +1125,7 @@ static void iBranchTest(u32 newpc)
|
||||||
|
|
||||||
if (EmuConfig.Speedhacks.WaitLoop && s_nBlockFF && newpc == s_branchTo)
|
if (EmuConfig.Speedhacks.WaitLoop && s_nBlockFF && newpc == s_branchTo)
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr32[&g_nextEventCycle]);
|
xMOV(eax, ptr32[&cpuRegs.nextEventCycle]);
|
||||||
xADD(ptr32[&cpuRegs.cycle], scaleblockcycles());
|
xADD(ptr32[&cpuRegs.cycle], scaleblockcycles());
|
||||||
xCMP(eax, ptr32[&cpuRegs.cycle]);
|
xCMP(eax, ptr32[&cpuRegs.cycle]);
|
||||||
xCMOVS(eax, ptr32[&cpuRegs.cycle]);
|
xCMOVS(eax, ptr32[&cpuRegs.cycle]);
|
||||||
|
@ -1138,7 +1138,7 @@ static void iBranchTest(u32 newpc)
|
||||||
xMOV(eax, ptr[&cpuRegs.cycle]);
|
xMOV(eax, ptr[&cpuRegs.cycle]);
|
||||||
xADD(eax, scaleblockcycles());
|
xADD(eax, scaleblockcycles());
|
||||||
xMOV(ptr[&cpuRegs.cycle], eax); // update cycles
|
xMOV(ptr[&cpuRegs.cycle], eax); // update cycles
|
||||||
xSUB(eax, ptr[&g_nextEventCycle]);
|
xSUB(eax, ptr[&cpuRegs.nextEventCycle]);
|
||||||
|
|
||||||
if (newpc == 0xffffffff)
|
if (newpc == 0xffffffff)
|
||||||
xJS(DispatcherReg);
|
xJS(DispatcherReg);
|
||||||
|
|
|
@ -157,7 +157,7 @@ void recJALR()
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
xMOV(eax, ptr[&g_recWriteback]);
|
xMOV(eax, ptr[&cpuRegs.pcWriteback]);
|
||||||
xMOV(ptr[&cpuRegs.pc], eax);
|
xMOV(ptr[&cpuRegs.pc], eax);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue