* Rename cpuBranch[...] functions and vars to cpuEvent[...], which should be more clear and consistent as to their true purpose. (to clarify: events typically run during cpu branch instructions, but most branches don't actually have anything to do with whether or not there are events pending or events being run).

* Add some missing & ~0x0f address alignment stuff to odd-size FIFO reads/writes (thanks gregory)

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@3730 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2010-09-05 15:38:14 +00:00
parent 1698382065
commit 3cfd0c68af
18 changed files with 131 additions and 131 deletions

View File

@ -38,7 +38,7 @@ __ri void cpuUpdateOperationMode() {
void __fastcall WriteCP0Status(u32 value) {
cpuRegs.CP0.n.Status.val = value;
cpuUpdateOperationMode();
cpuSetNextBranchDelta(4);
cpuSetNextEventDelta(4);
}
void MapTLB(int i)
@ -534,7 +534,7 @@ void ERET() {
cpuRegs.CP0.n.Status.b.EXL = 0;
}
cpuUpdateOperationMode();
cpuSetNextBranchDelta(4);
cpuSetNextEventDelta(4);
intSetBranch();
}
@ -543,7 +543,7 @@ void DI() {
cpuRegs.CP0.n.Status.b.ERL || (cpuRegs.CP0.n.Status.b.KSU == 0)) {
cpuRegs.CP0.n.Status.b.EIE = 0;
// IRQs are disabled so no need to do a cpu exception/event test...
//cpuSetNextBranchDelta();
//cpuSetNextEventDelta();
}
}
@ -552,7 +552,7 @@ void EI() {
cpuRegs.CP0.n.Status.b.ERL || (cpuRegs.CP0.n.Status.b.KSU == 0)) {
cpuRegs.CP0.n.Status.b.EIE = 1;
// schedule an event test, which will check for and raise pending IRQs.
cpuSetNextBranchDelta(4);
cpuSetNextEventDelta(4);
}
}

View File

@ -17,7 +17,7 @@
#define __COP0_H__
extern void __fastcall WriteCP0Status(u32 value);
extern void UpdateCP0Status();
extern void cpuUpdateOperationMode();
extern void WriteTLB(int i);
extern void UnmapTLB(int i);
extern void MapTLB(int i);

View File

@ -93,7 +93,7 @@ static __fi void _rcntSet( int cntidx )
if (c < nextCounter)
{
nextCounter = c;
cpuSetNextBranch( nextsCounter, nextCounter ); //Need to update on counter resets/target changes
cpuSetNextEvent( nextsCounter, nextCounter ); //Need to update on counter resets/target changes
}
// Ignore target diff if target is currently disabled.
@ -111,7 +111,7 @@ static __fi void _rcntSet( int cntidx )
if (c < nextCounter)
{
nextCounter = c;
cpuSetNextBranch( nextsCounter, nextCounter ); //Need to update on counter resets/target changes
cpuSetNextEvent( nextsCounter, nextCounter ); //Need to update on counter resets/target changes
}
}
}
@ -419,7 +419,7 @@ __fi void rcntUpdate_hScanline()
{
if( !cpuTestCycle( hsyncCounter.sCycle, hsyncCounter.CycleT ) ) return;
//iopBranchAction = 1;
//iopEventAction = 1;
if (hsyncCounter.Mode & MODE_HBLANK) { //HBLANK Start
rcntStartGate(false, hsyncCounter.sCycle);
psxCheckStartGate16(0);
@ -890,6 +890,6 @@ void SaveStateBase::rcntFreeze()
for( int i=0; i<4; i++ )
_rcntSetGate( i );
iopBranchAction = 1; // probably not needed but won't hurt anything either.
iopEventAction = 1; // probably not needed but won't hurt anything either.
}
}

View File

@ -55,7 +55,7 @@ void iDumpPsxRegisters(u32 startpc, u32 temp)
for(i = 0; i < 34; i+=2) __Log("%spsx%s: %x %x", pstr, disRNameGPR[i], psxRegs.GPR.r[i], psxRegs.GPR.r[i+1]);
DbgCon.WriteLn("%scycle: %x %x %x; counters %x %x", pstr, psxRegs.cycle, g_psxNextBranchCycle, EEsCycle,
DbgCon.WriteLn("%scycle: %x %x %x; counters %x %x", pstr, psxRegs.cycle, g_iopNextEventCycle, EEsCycle,
psxNextsCounter, psxNextCounter);
DbgCon.WriteLn(wxsFormat(L"psxdma%d ", 2) + hw_dma(2).desc());
@ -109,7 +109,7 @@ void iDumpRegisters(u32 startpc, u32 temp)
__Log("%svfACC: %x %x %x %x", pstr, VU0.ACC.UL[3], VU0.ACC.UL[2], VU0.ACC.UL[1], VU0.ACC.UL[0]);
__Log("%sLO: %x_%x_%x_%x, HI: %x_%x_%x_%x", pstr, cpuRegs.LO.UL[3], cpuRegs.LO.UL[2], cpuRegs.LO.UL[1], cpuRegs.LO.UL[0],
cpuRegs.HI.UL[3], cpuRegs.HI.UL[2], cpuRegs.HI.UL[1], cpuRegs.HI.UL[0]);
__Log("%sCycle: %x %x, Count: %x", pstr, cpuRegs.cycle, g_nextBranchCycle, cpuRegs.CP0.n.Count);
__Log("%sCycle: %x %x, Count: %x", pstr, cpuRegs.cycle, g_nextEventCycle, cpuRegs.CP0.n.Count);
iDumpPsxRegisters(psxRegs.pc, temp);

View File

@ -27,8 +27,8 @@ static __fi void IntCHackCheck()
{
// Sanity check: To protect from accidentally "rewinding" the cyclecount
// on the few times nextBranchCycle can be behind our current cycle.
s32 diff = g_nextBranchCycle - cpuRegs.cycle;
if( diff > 0 ) cpuRegs.cycle = g_nextBranchCycle;
s32 diff = g_nextEventCycle - cpuRegs.cycle;
if( diff > 0 ) cpuRegs.cycle = g_nextEventCycle;
}
static const uint HwF_VerboseConLog = 1<<0;
@ -65,7 +65,7 @@ mem32_t __fastcall _hwRead32(u32 mem)
DevCon.WriteLn( Color_Cyan, "Reading 32-bit FIFO data" );
u128 out128;
_hwRead128<page>(mem, &out128);
_hwRead128<page>(mem & ~0x0f, &out128);
return out128._u32[(mem >> 2) & 0x3];
}
break;
@ -221,7 +221,7 @@ static void _hwRead64(u32 mem, mem64_t* result )
DevCon.WriteLn( Color_Cyan, "Reading 64-bit FIFO data (%s 64 bits discarded)", wordpart ? "upper" : "lower" );
u128 out128;
_hwRead128<page>(mem, &out128);
_hwRead128<page>(mem & ~0x0f, &out128);
*result = out128._u64[wordpart];
}
return;

View File

@ -68,7 +68,7 @@ void __fastcall _hwWrite32( u32 mem, u32 value )
zerofill._u32[(mem >> 2) & 0x03] = value;
DevCon.WriteLn( Color_Cyan, "Writing 32-bit FIFO data (zero-extended to 128 bits)" );
_hwWrite128<page>(mem, &zerofill);
_hwWrite128<page>(mem & ~0x0f, &zerofill);
}
return;
@ -301,7 +301,7 @@ void __fastcall _hwWrite64( u32 mem, const mem64_t* srcval )
u128 zerofill = u128::From32(0);
zerofill._u64[(mem >> 3) & 0x01] = *srcval;
hwWrite128<page>(mem, &zerofill);
hwWrite128<page>(mem & ~0x0f, &zerofill);
}
return;

View File

@ -375,7 +375,7 @@ static void intReset()
static void intEventTest()
{
// Perform counters, ints, and IOP updates:
_cpuBranchTest_Shared();
_cpuEventTest_Shared();
}
static void intExecute()

View File

@ -391,7 +391,7 @@ void psxRcntUpdate()
int i;
//u32 change = 0;
g_psxNextBranchCycle = psxRegs.cycle + 32;
g_iopNextEventCycle = psxRegs.cycle + 32;
psxNextCounter = 0x7fffffff;
psxNextsCounter = psxRegs.cycle;

View File

@ -51,10 +51,10 @@ static void __fastcall psxDmaGeneric(u32 madr, u32 bcr, u32 chcr, u32 spuCore, _
if (psxCounters[6].CycleT < psxNextCounter)
psxNextCounter = psxCounters[6].CycleT;
if((g_psxNextBranchCycle - psxNextsCounter) > (u32)psxNextCounter)
if((g_iopNextEventCycle - psxNextsCounter) > (u32)psxNextCounter)
{
//DevCon.Warning("SPU2async Setting new counter branch, old %x new %x ((%x - %x = %x) > %x delta)", g_psxNextBranchCycle, psxNextsCounter + psxNextCounter, g_psxNextBranchCycle, psxNextsCounter, (g_psxNextBranchCycle - psxNextsCounter), psxNextCounter);
g_psxNextBranchCycle = psxNextsCounter + psxNextCounter;
//DevCon.Warning("SPU2async Setting new counter branch, old %x new %x ((%x - %x = %x) > %x delta)", g_iopNextEventCycle, psxNextsCounter + psxNextCounter, g_iopNextEventCycle, psxNextsCounter, (g_iopNextEventCycle - psxNextsCounter), psxNextCounter);
g_iopNextEventCycle = psxNextsCounter + psxNextCounter;
}
}

View File

@ -29,22 +29,22 @@ u32 g_psxConstRegs[32];
u32 g_psxHasConstReg, g_psxFlushedConstReg;
// Controls when branch tests are performed.
u32 g_psxNextBranchCycle = 0;
u32 g_iopNextEventCycle = 0;
// This value is used when the IOP execution is broken to return control to the EE.
// (which happens when the IOP throws EE-bound interrupts). It holds the value of
// psxCycleEE (which is set to zero to facilitate the code break), so that the unrun
// iopCycleEE (which is set to zero to facilitate the code break), so that the unrun
// cycles can be accounted for later.
s32 psxBreak = 0;
s32 iopBreak = 0;
// tracks the IOP's current sync status with the EE. When it dips below zero,
// control is returned to the EE.
s32 psxCycleEE = -1;
s32 iopCycleEE = -1;
// Used to signal to the EE when important actions that need IOP-attention have
// happened (hsyncs, vsyncs, IOP exceptions, etc). IOP runs code whenever this
// is true, even if it's already running ahead a bit.
bool iopBranchAction = false;
bool iopEventAction = false;
bool iopEventTestIsActive = false;
@ -58,9 +58,9 @@ void psxReset()
psxRegs.CP0.n.Status = 0x10900000; // COP0 enabled | BEV = 1 | TS = 1
psxRegs.CP0.n.PRid = 0x0000001f; // PRevID = Revision ID, same as the IOP R3000A
psxBreak = 0;
psxCycleEE = -1;
g_psxNextBranchCycle = psxRegs.cycle + 4;
iopBreak = 0;
iopCycleEE = -1;
g_iopNextEventCycle = psxRegs.cycle + 4;
psxHwReset();
@ -113,8 +113,8 @@ __fi void psxSetNextBranch( u32 startCycle, s32 delta )
// typecast the conditional to signed so that things don't blow up
// if startCycle is greater than our next branch cycle.
if( (int)(g_psxNextBranchCycle - startCycle) > delta )
g_psxNextBranchCycle = startCycle + delta;
if( (int)(g_iopNextEventCycle - startCycle) > delta )
g_iopNextEventCycle = startCycle + delta;
}
__fi void psxSetNextBranchDelta( s32 delta )
@ -151,13 +151,13 @@ __fi void PSX_INT( IopEventId n, s32 ecycle )
psxSetNextBranchDelta( ecycle );
if( psxCycleEE < 0 )
if( iopCycleEE < 0 )
{
// The EE called this int, so inform it to branch as needed:
// fixme - this doesn't take into account EE/IOP sync (the IOP may be running
// ahead or behind the EE as per the EEsCycles value)
s32 iopDelta = (g_psxNextBranchCycle-psxRegs.cycle)*8;
cpuSetNextBranchDelta( iopDelta );
s32 iopDelta = (g_iopNextEventCycle-psxRegs.cycle)*8;
cpuSetNextEventDelta( iopDelta );
}
}
@ -211,18 +211,18 @@ static __fi void _psxTestInterrupts()
}
}
__ri void psxBranchTest()
__ri void iopEventTest()
{
if( psxTestCycle( psxNextsCounter, psxNextCounter ) )
{
psxRcntUpdate();
iopBranchAction = true;
iopEventAction = true;
}
else
{
// start the next branch at the next counter event by default
// the interrupt code below will assign nearer branches if needed.
g_psxNextBranchCycle = psxNextsCounter+psxNextCounter;
g_iopNextEventCycle = psxNextsCounter+psxNextCounter;
}
@ -239,7 +239,7 @@ __ri void psxBranchTest()
{
PSXCPU_LOG("Interrupt: %x %x", psxHu32(0x1070), psxHu32(0x1074));
psxException(0, 0);
iopBranchAction = true;
iopEventAction = true;
// No need to execute the SIFhack after cpuExceptions, since these by nature break SIF's
// thread sleep hangs and allow the IOP to "come back to life."
@ -258,9 +258,9 @@ void iopTestIntc()
// An iop exception has occurred while the EE is running code.
// Inform the EE to branch so the IOP can handle it promptly:
cpuSetNextBranchDelta( 16 );
iopBranchAction = true;
//Console.Error( "** IOP Needs an EE EventText, kthx ** %d", psxCycleEE );
cpuSetNextEventDelta( 16 );
iopEventAction = true;
//Console.Error( "** IOP Needs an EE EventText, kthx ** %d", iopCycleEE );
// Note: No need to set the iop's branch delta here, since the EE
// will run an IOP branch test regardless.

View File

@ -117,9 +117,9 @@ struct psxRegisters {
extern __aligned16 psxRegisters psxRegs;
extern u32 g_psxNextBranchCycle;
extern s32 psxBreak; // used when the IOP execution is broken and control returned to the EE
extern s32 psxCycleEE; // tracks IOP's current sych status with the EE
extern u32 g_iopNextEventCycle;
extern s32 iopBreak; // used when the IOP execution is broken and control returned to the EE
extern s32 iopCycleEE; // tracks IOP's current sych status with the EE
#ifndef _PC_
@ -172,7 +172,7 @@ extern u32 EEoCycle;
extern s32 psxNextCounter;
extern u32 psxNextsCounter;
extern bool iopBranchAction;
extern bool iopEventAction;
extern bool iopEventTestIsActive;
// Branching status used when throwing exceptions.
@ -196,7 +196,7 @@ extern R3000Acpu psxRec;
extern void psxReset();
extern void __fastcall psxException(u32 code, u32 step);
extern void psxBranchTest();
extern void iopEventTest();
extern void psxMemReset();
// Subsets

View File

@ -133,7 +133,7 @@ static __fi void execI()
psxRegs.pc+= 4;
psxRegs.cycle++;
psxCycleEE-=8;
iopCycleEE-=8;
psxBSC[psxRegs.code >> 26]();
}
@ -147,7 +147,7 @@ static void doBranch(s32 tar) {
iopIsDelaySlot = false;
psxRegs.pc = branchPC;
psxBranchTest();
iopEventTest();
}
static void intAlloc() {
@ -162,16 +162,16 @@ static void intExecute() {
static s32 intExecuteBlock( s32 eeCycles )
{
psxBreak = 0;
psxCycleEE = eeCycles;
iopBreak = 0;
iopCycleEE = eeCycles;
while (psxCycleEE > 0){
while (iopCycleEE > 0){
branch2 = 0;
while (!branch2) {
execI();
}
}
return psxBreak + psxCycleEE;
return iopBreak + iopCycleEE;
}
static void intClear(u32 Addr, u32 Size) {

View File

@ -71,7 +71,7 @@ void cpuReset()
fpuRegs.fprc[0] = 0x00002e00; // fpu Revision..
fpuRegs.fprc[31] = 0x01000001; // fpu Status/Control
g_nextBranchCycle = cpuRegs.cycle + 4;
g_nextEventCycle = cpuRegs.cycle + 4;
EEsCycle = 0;
EEoCycle = cpuRegs.cycle;
@ -209,21 +209,21 @@ void cpuTlbMissW(u32 addr, u32 bd) {
}
// sets a branch test to occur some time from an arbitrary starting point.
__fi void cpuSetNextBranch( u32 startCycle, s32 delta )
__fi void cpuSetNextEvent( u32 startCycle, s32 delta )
{
// typecast the conditional to signed so that things don't blow up
// if startCycle is greater than our next branch cycle.
if( (int)(g_nextBranchCycle - startCycle) > delta )
if( (int)(g_nextEventCycle - startCycle) > delta )
{
g_nextBranchCycle = startCycle + delta;
g_nextEventCycle = startCycle + delta;
}
}
// sets a branch to occur some time from the current cycle
__fi void cpuSetNextBranchDelta( s32 delta )
__fi void cpuSetNextEventDelta( s32 delta )
{
cpuSetNextBranch( cpuRegs.cycle, delta );
cpuSetNextEvent( cpuRegs.cycle, delta );
}
// tests the cpu cycle against the given start and delta values.
@ -237,9 +237,9 @@ __fi int cpuTestCycle( u32 startCycle, s32 delta )
}
// tells the EE to run the branch test the next time it gets a chance.
__fi void cpuSetBranch()
__fi void cpuSetEvent()
{
g_nextBranchCycle = cpuRegs.cycle;
g_nextEventCycle = cpuRegs.cycle;
}
__fi void cpuClearInt( uint i )
@ -258,7 +258,7 @@ static __fi void TESTINT( u8 n, void (*callback)() )
callback();
}
else
cpuSetNextBranch( cpuRegs.sCycle[n], cpuRegs.eCycle[n] );
cpuSetNextEvent( cpuRegs.sCycle[n], cpuRegs.eCycle[n] );
}
// [TODO] move this function to LegacyDmac.cpp, and remove most of the DMAC-related headers from
@ -303,7 +303,7 @@ static __fi void _cpuTestTIMR()
s_iLastCOP0Cycle = cpuRegs.cycle;
// fixme: this looks like a hack to make up for the fact that the TIMR
// doesn't yet have a proper mechanism for setting itself up on a nextBranchCycle.
// doesn't yet have a proper mechanism for setting itself up on a nextEventCycle.
// A proper fix would schedule the TIMR to trigger at a specific cycle anytime
// the Count or Compare registers are modified.
@ -338,15 +338,15 @@ static bool cpuIntsEnabled(int Interrupt)
!cpuRegs.CP0.n.Status.b.EXL && (cpuRegs.CP0.n.Status.b.ERL == 0);
}
// if cpuRegs.cycle is greater than this cycle, should check cpuBranchTest for updates
u32 g_nextBranchCycle = 0;
// if cpuRegs.cycle is greater than this cycle, should check cpuEventTest for updates
u32 g_nextEventCycle = 0;
// Shared portion of the branch test, called from both the Interpreter
// and the recompiler. (moved here to help alleviate redundant code)
__fi void _cpuBranchTest_Shared()
__fi void _cpuEventTest_Shared()
{
ScopedBool etest(eeEventTestIsActive);
g_nextBranchCycle = cpuRegs.cycle + eeWaitCycles;
g_nextEventCycle = cpuRegs.cycle + eeWaitCycles;
// ---- INTC / DMAC (CPU-level Exceptions) -----------------
// Done first because exceptions raised during event tests need to be postponed a few
@ -379,34 +379,34 @@ __fi void _cpuBranchTest_Shared()
_cpuTestInterrupts();
// ---- IOP -------------
// * It's important to run a psxBranchTest before calling ExecuteBlock. This
// * It's important to run a iopEventTest before calling ExecuteBlock. This
// is because the IOP does not always perform branch tests before returning
// (during the prev branch) and also so it can act on the state the EE has
// given it before executing any code.
//
// * The IOP cannot always be run. If we run IOP code every time through the
// cpuBranchTest, the IOP generally starts to run way ahead of the EE.
// cpuEventTest, the IOP generally starts to run way ahead of the EE.
EEsCycle += cpuRegs.cycle - EEoCycle;
EEoCycle = cpuRegs.cycle;
if( EEsCycle > 0 )
iopBranchAction = true;
iopEventAction = true;
psxBranchTest();
iopEventTest();
if( iopBranchAction )
if( iopEventAction )
{
//if( EEsCycle < -450 )
// Console.WriteLn( " IOP ahead by: %d cycles", -EEsCycle );
EEsCycle = psxCpu->ExecuteBlock( EEsCycle );
iopBranchAction = false;
iopEventAction = false;
}
// ---- VU0 -------------
// We're in a BranchTest. All dynarec registers are flushed
// We're in a EventTest. All dynarec registers are flushed
// so there is no need to freeze registers here.
CpuVU0->ExecuteBlock();
@ -421,19 +421,19 @@ __fi void _cpuBranchTest_Shared()
// EE's running way ahead of the IOP still, so we should branch quickly to give the
// IOP extra timeslices in short order.
cpuSetNextBranchDelta( 48 );
//Console.Warning( "EE ahead of the IOP -- Rapid Branch! %d", EEsCycle );
cpuSetNextEventDelta( 48 );
//Console.Warning( "EE ahead of the IOP -- Rapid Event! %d", EEsCycle );
}
// The IOP could be running ahead/behind of us, so adjust the iop's next branch by its
// relative position to the EE (via EEsCycle)
cpuSetNextBranchDelta( ((g_psxNextBranchCycle-psxRegs.cycle)*8) - EEsCycle );
cpuSetNextEventDelta( ((g_iopNextEventCycle-psxRegs.cycle)*8) - EEsCycle );
// Apply the hsync counter's nextCycle
cpuSetNextBranch( hsyncCounter.sCycle, hsyncCounter.CycleT );
cpuSetNextEvent( hsyncCounter.sCycle, hsyncCounter.CycleT );
// Apply vsync and other counter nextCycles
cpuSetNextBranch( nextsCounter, nextCounter );
cpuSetNextEvent( nextsCounter, nextCounter );
}
__ri void cpuTestINTCInts()
@ -444,11 +444,11 @@ __ri void cpuTestINTCInts()
if( (psHu32(INTC_STAT) & psHu32(INTC_MASK)) == 0 ) return;
cpuSetNextBranchDelta( 4 );
if(eeEventTestIsActive && (psxCycleEE > 0))
cpuSetNextEventDelta( 4 );
if(eeEventTestIsActive && (iopCycleEE > 0))
{
psxBreak += psxCycleEE; // record the number of cycles the IOP didn't run.
psxCycleEE = 0;
iopBreak += iopCycleEE; // record the number of cycles the IOP didn't run.
iopCycleEE = 0;
}
}
@ -461,11 +461,11 @@ __fi void cpuTestDMACInts()
if ( ( (psHu16(0xe012) & psHu16(0xe010)) == 0) &&
( (psHu16(0xe010) & 0x8000) == 0) ) return;
cpuSetNextBranchDelta( 4 );
if(eeEventTestIsActive && (psxCycleEE > 0))
cpuSetNextEventDelta( 4 );
if(eeEventTestIsActive && (iopCycleEE > 0))
{
psxBreak += psxCycleEE; // record the number of cycles the IOP didn't run.
psxCycleEE = 0;
iopBreak += iopCycleEE; // record the number of cycles the IOP didn't run.
iopCycleEE = 0;
}
}
@ -499,16 +499,16 @@ __fi void CPU_INT( EE_EventType n, s32 ecycle)
// Interrupt is happening soon: make sure both EE and IOP are aware.
if( ecycle <= 28 && psxCycleEE > 0 )
if( ecycle <= 28 && iopCycleEE > 0 )
{
// If running in the IOP, force it to break immediately into the EE.
// the EE's branch test is due to run.
psxBreak += psxCycleEE; // record the number of cycles the IOP didn't run.
psxCycleEE = 0;
iopBreak += iopCycleEE; // record the number of cycles the IOP didn't run.
iopCycleEE = 0;
}
cpuSetNextBranchDelta( cpuRegs.eCycle[n] );
cpuSetNextEventDelta( cpuRegs.eCycle[n] );
}
// Called from recompilers; __fastcall define is mandatory.

View File

@ -244,7 +244,7 @@ extern __aligned16 cpuRegisters cpuRegs;
extern __aligned16 fpuRegisters fpuRegs;
extern __aligned16 tlbs tlb[48];
extern u32 g_nextBranchCycle;
extern u32 g_nextEventCycle;
extern bool eeEventTestIsActive;
extern u32 s_iLastCOP0Cycle;
extern u32 s_iLastPERFCycle[2];
@ -415,12 +415,12 @@ extern void cpuTlbMissW(u32 addr, u32 bd);
extern void cpuTestHwInts();
extern void cpuClearInt(uint n);
extern void cpuSetNextBranch( u32 startCycle, s32 delta );
extern void cpuSetNextBranchDelta( s32 delta );
extern void cpuSetNextEvent( u32 startCycle, s32 delta );
extern void cpuSetNextEventDelta( s32 delta );
extern int cpuTestCycle( u32 startCycle, s32 delta );
extern void cpuSetBranch();
extern void cpuSetEvent();
extern void _cpuBranchTest_Shared(); // for internal use by the Dynarecs and Ints inside R5900:
extern void _cpuEventTest_Shared(); // for internal use by the Dynarecs and Ints inside R5900:
extern void cpuTestINTCInts();
extern void cpuTestDMACInts();

View File

@ -179,8 +179,8 @@ void SaveStateBase::FreezeRegisters()
FreezeTag( "Cycles" );
Freeze(EEsCycle);
Freeze(EEoCycle);
Freeze(g_nextBranchCycle);
Freeze(g_psxNextBranchCycle);
Freeze(g_nextEventCycle);
Freeze(g_iopNextEventCycle);
Freeze(s_iLastCOP0Cycle);
Freeze(s_iLastPERFCycle);

View File

@ -33,7 +33,7 @@ void BaseVUmicroCPU::ExecuteBlock(bool startUp) {
// Let VUs run behind EE instead of ahead
if (stat & test) {
cpuSetNextBranchDelta((s+c)*2);
cpuSetNextEventDelta((s+c)*2);
m_lastEEcycles = cpuRegs.cycle + (s*2);
}
}
@ -43,11 +43,11 @@ void BaseVUmicroCPU::ExecuteBlock(bool startUp) {
delta >>= 1; // Divide by 2 (unsigned)
Execute(delta); // Execute the time since the last call
if (stat & test) {
cpuSetNextBranchDelta(c*2);
cpuSetNextEventDelta(c*2);
m_lastEEcycles = cpuRegs.cycle;
}
}
else cpuSetNextBranchDelta(-delta); // Haven't caught-up from kick start
else cpuSetNextEventDelta(-delta); // Haven't caught-up from kick start
}
}
@ -63,7 +63,7 @@ void __fastcall BaseVUmicroCPU::ExecuteBlockJIT(BaseVUmicroCPU* cpu) {
cpu->Execute(c); // Execute VU
if (stat & test) {
cpu->m_lastEEcycles+=(c*2);
cpuSetNextBranchDelta(c*2);
cpuSetNextEventDelta(c*2);
}
}
}
@ -80,7 +80,7 @@ void BaseVUmicroCPU::ExecuteBlock(bool startUp) {
// If the VU0 program didn't finish then we'll want to finish it up
// pretty soon. This fixes vmhacks in some games (Naruto Ultimate Ninja 2)
if(VU0.VI[REG_VPU_STAT].UL & vuRunning)
cpuSetNextBranchDelta( 192 ); // fixme : ideally this should be higher, like 512 or so.
cpuSetNextEventDelta( 192 ); // fixme : ideally this should be higher, like 512 or so.
}
else {
Execute(vu0RunCycles);
@ -89,7 +89,7 @@ void BaseVUmicroCPU::ExecuteBlock(bool startUp) {
// This helps keep the EE and VU0 in sync.
// Check Silver Surfer. Currently has SPS varying with different branch deltas set below.
if(VU0.VI[REG_VPU_STAT].UL & vuRunning)
cpuSetNextBranchDelta( 768 );
cpuSetNextEventDelta( 768 );
}
}

View File

@ -39,7 +39,7 @@
using namespace x86Emitter;
extern u32 g_psxNextBranchCycle;
extern u32 g_iopNextEventCycle;
extern void psxBREAK();
u32 g_psxMaxRecMem = 0;
@ -121,7 +121,7 @@ static DynGenFunc* iopExitRecompiledCode = NULL;
static void recEventTest()
{
_cpuBranchTest_Shared();
_cpuEventTest_Shared();
}
// parameters:
@ -876,28 +876,28 @@ static void recExecute()
static __noinline s32 recExecuteBlock( s32 eeCycles )
{
psxBreak = 0;
psxCycleEE = eeCycles;
iopBreak = 0;
iopCycleEE = eeCycles;
// [TODO] recExecuteBlock could be replaced by a direct call to the iopEnterRecompiledCode()
// (by assigning its address to the psxRec structure). But for that to happen, we need
// to move psxBreak/psxCycleEE update code to emitted assembly code. >_< --air
// to move iopBreak/iopCycleEE update code to emitted assembly code. >_< --air
// Likely Disasm, as borrowed from MSVC:
// Entry:
// mov eax,dword ptr [esp+4]
// mov dword ptr [psxBreak (0E88DCCh)],0
// mov dword ptr [psxCycleEE (832A84h)],eax
// mov dword ptr [iopBreak (0E88DCCh)],0
// mov dword ptr [iopCycleEE (832A84h)],eax
// Exit:
// mov ecx,dword ptr [psxBreak (0E88DCCh)]
// mov edx,dword ptr [psxCycleEE (832A84h)]
// mov ecx,dword ptr [iopBreak (0E88DCCh)]
// mov edx,dword ptr [iopCycleEE (832A84h)]
// lea eax,[edx+ecx]
iopEnterRecompiledCode();
return psxBreak + psxCycleEE;
return iopBreak + iopCycleEE;
}
// Returns the offset to the next instruction after any cleared memory
@ -1021,19 +1021,19 @@ static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
{
xMOV(eax, ptr32[&psxRegs.cycle]);
xMOV(ecx, eax);
xMOV(edx, ptr32[&psxCycleEE]);
xMOV(edx, ptr32[&iopCycleEE]);
xADD(edx, 7);
xSHR(edx, 3);
xADD(eax, edx);
xCMP(eax, ptr32[&g_psxNextBranchCycle]);
xCMOVNS(eax, ptr32[&g_psxNextBranchCycle]);
xCMP(eax, ptr32[&g_iopNextEventCycle]);
xCMOVNS(eax, ptr32[&g_iopNextEventCycle]);
xMOV(ptr32[&psxRegs.cycle], eax);
xSUB(eax, ecx);
xSHL(eax, 3);
xSUB(ptr32[&psxCycleEE], eax);
xSUB(ptr32[&iopCycleEE], eax);
xJLE(iopExitRecompiledCode);
xCALL(psxBranchTest);
xCALL(iopEventTest);
if( newpc != 0xffffffff )
{
@ -1047,15 +1047,15 @@ static void iPsxBranchTest(u32 newpc, u32 cpuBranch)
xADD(eax, blockCycles);
xMOV(ptr32[&psxRegs.cycle], eax); // update cycles
// jump if psxCycleEE <= 0 (iop's timeslice timed out, so time to return control to the EE)
xSUB(ptr32[&psxCycleEE], blockCycles*8);
// jump if iopCycleEE <= 0 (iop's timeslice timed out, so time to return control to the EE)
xSUB(ptr32[&iopCycleEE], blockCycles*8);
xJLE(iopExitRecompiledCode);
// check if an event is pending
xSUB(eax, ptr32[&g_psxNextBranchCycle]);
xSUB(eax, ptr32[&g_iopNextEventCycle]);
xForwardJS<u8> nointerruptpending;
xCALL(psxBranchTest);
xCALL(iopEventTest);
if( newpc != 0xffffffff ) {
xCMP(ptr32[&psxRegs.pc], newpc);
@ -1098,7 +1098,7 @@ void rpsxSYSCALL()
j8Ptr[0] = JE8(0);
ADD32ItoM((uptr)&psxRegs.cycle, psxScaleBlockCycles() );
SUB32ItoM((uptr)&psxCycleEE, psxScaleBlockCycles()*8 );
SUB32ItoM((uptr)&iopCycleEE, psxScaleBlockCycles()*8 );
JMP32((uptr)iopDispatcherReg - ( (uptr)x86Ptr + 5 ));
// jump target for skipping blockCycle updates
@ -1120,7 +1120,7 @@ void rpsxBREAK()
CMP32ItoM((uptr)&psxRegs.pc, psxpc-4);
j8Ptr[0] = JE8(0);
ADD32ItoM((uptr)&psxRegs.cycle, psxScaleBlockCycles() );
SUB32ItoM((uptr)&psxCycleEE, psxScaleBlockCycles()*8 );
SUB32ItoM((uptr)&iopCycleEE, psxScaleBlockCycles()*8 );
JMP32((uptr)iopDispatcherReg - ( (uptr)x86Ptr + 5 ));
x86SetJ8(j8Ptr[0]);
@ -1373,7 +1373,7 @@ StartRecomp:
else
{
ADD32ItoM((uptr)&psxRegs.cycle, psxScaleBlockCycles() );
SUB32ItoM((uptr)&psxCycleEE, psxScaleBlockCycles()*8 );
SUB32ItoM((uptr)&iopCycleEE, psxScaleBlockCycles()*8 );
}
if (willbranch3 || !psxbranch) {

View File

@ -316,7 +316,7 @@ void recBranchCall( void (*func)() )
// to the current cpu cycle.
MOV32MtoR( EAX, (uptr)&cpuRegs.cycle );
MOV32RtoM( (uptr)&g_nextBranchCycle, EAX );
MOV32RtoM( (uptr)&g_nextEventCycle, EAX );
recCall(func);
branch = 2;
@ -350,7 +350,7 @@ static DynGenFunc* ExitRecompiledCode = NULL;
static void recEventTest()
{
_cpuBranchTest_Shared();
_cpuEventTest_Shared();
}
// parameters:
@ -1111,11 +1111,11 @@ static void iBranchTest(u32 newpc)
// Check the Event scheduler if our "cycle target" has been reached.
// Equiv code to:
// cpuRegs.cycle += blockcycles;
// if( cpuRegs.cycle > g_nextBranchCycle ) { DoEvents(); }
// if( cpuRegs.cycle > g_nextEventCycle ) { DoEvents(); }
if (EmuConfig.Speedhacks.WaitLoop && s_nBlockFF && newpc == s_branchTo)
{
xMOV(eax, ptr32[&g_nextBranchCycle]);
xMOV(eax, ptr32[&g_nextEventCycle]);
xADD(ptr32[&cpuRegs.cycle], eeScaleBlockCycles());
xCMP(eax, ptr32[&cpuRegs.cycle]);
xCMOVS(eax, ptr32[&cpuRegs.cycle]);
@ -1128,7 +1128,7 @@ static void iBranchTest(u32 newpc)
xMOV(eax, ptr[&cpuRegs.cycle]);
xADD(eax, eeScaleBlockCycles());
xMOV(ptr[&cpuRegs.cycle], eax); // update cycles
xSUB(eax, ptr[&g_nextBranchCycle]);
xSUB(eax, ptr[&g_nextEventCycle]);
if (newpc == 0xffffffff)
xJS( DispatcherReg );