mirror of https://github.com/PCSX2/pcsx2.git
COP0: Correctly update performance counter cycles
This commit is contained in:
parent
8e9fd91cee
commit
a3eb1655c6
154
pcsx2/COP0.cpp
154
pcsx2/COP0.cpp
|
@ -21,7 +21,8 @@
|
|||
// Updates the CPU's mode of operation (either, Kernel, Supervisor, or User modes).
|
||||
// Currently the different modes are not implemented.
|
||||
// Given this function is called so much, it's commented out for now. (rama)
|
||||
__ri void cpuUpdateOperationMode() {
|
||||
__ri void cpuUpdateOperationMode()
|
||||
{
|
||||
|
||||
//u32 value = cpuRegs.CP0.n.Status.val;
|
||||
|
||||
|
@ -33,15 +34,15 @@ __ri void cpuUpdateOperationMode() {
|
|||
//}
|
||||
}
|
||||
|
||||
void WriteCP0Status(u32 value) {
|
||||
|
||||
//DMA_LOG("COP0 Status write = 0x%08x", value);
|
||||
|
||||
void WriteCP0Status(u32 value)
|
||||
{
|
||||
COP0_UpdatePCCR();
|
||||
cpuRegs.CP0.n.Status.val = value;
|
||||
cpuSetNextEventDelta(4);
|
||||
}
|
||||
|
||||
void WriteCP0Config(u32 value) {
|
||||
void WriteCP0Config(u32 value)
|
||||
{
|
||||
// Protect the read-only ICacheSize (IC) and DataCacheSize (DC) bits
|
||||
cpuRegs.CP0.n.Config = value & ~0xFC0;
|
||||
cpuRegs.CP0.n.Config |= 0x440;
|
||||
|
@ -140,13 +141,13 @@ __fi void COP0_UpdatePCCR()
|
|||
if (PERF_ShouldCountEvent(cpuRegs.PERF.n.pccr.b.Event0))
|
||||
{
|
||||
u32 incr = cpuRegs.cycle - cpuRegs.lastPERFCycle[0];
|
||||
if( incr == 0 ) incr++;
|
||||
if (incr == 0)
|
||||
incr++;
|
||||
|
||||
// use prev/XOR method for one-time exceptions (but likely less correct)
|
||||
//u32 prev = cpuRegs.PERF.n.pcr0;
|
||||
cpuRegs.PERF.n.pcr0 += incr;
|
||||
cpuRegs.lastPERFCycle[0] = cpuRegs.cycle;
|
||||
|
||||
//DevCon.Warning("PCR VAL %x", cpuRegs.PERF.n.pccr.val);
|
||||
//prev ^= (1UL<<31); // XOR is fun!
|
||||
//if( (prev & cpuRegs.PERF.n.pcr0) & (1UL<<31) )
|
||||
if ((cpuRegs.PERF.n.pcr0 & 0x80000000))
|
||||
|
@ -191,10 +192,10 @@ __fi void COP0_UpdatePCCR()
|
|||
if (PERF_ShouldCountEvent(cpuRegs.PERF.n.pccr.b.Event1))
|
||||
{
|
||||
u32 incr = cpuRegs.cycle - cpuRegs.lastPERFCycle[1];
|
||||
if( incr == 0 ) incr++;
|
||||
if (incr == 0)
|
||||
incr++;
|
||||
|
||||
cpuRegs.PERF.n.pcr1 += incr;
|
||||
cpuRegs.lastPERFCycle[1] = cpuRegs.cycle;
|
||||
|
||||
if ((cpuRegs.PERF.n.pcr1 & 0x80000000))
|
||||
{
|
||||
|
@ -229,6 +230,8 @@ __fi void COP0_UpdatePCCR()
|
|||
}
|
||||
}
|
||||
}
|
||||
cpuRegs.lastPERFCycle[0] = cpuRegs.cycle;
|
||||
cpuRegs.lastPERFCycle[1] = cpuRegs.cycle;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -249,27 +252,34 @@ void MapTLB(const tlbs& t, int i)
|
|||
vtlb_VMapBuffer(t.VPN2, eeMem->Scratch, Ps2MemSize::Scratch);
|
||||
}
|
||||
|
||||
if (t.VPN2 == 0x70000000) return; //uh uhh right ...
|
||||
if (t.EntryLo0 & 0x2) {
|
||||
if (t.VPN2 == 0x70000000)
|
||||
return; //uh uhh right ...
|
||||
if (t.EntryLo0 & 0x2)
|
||||
{
|
||||
mask = ((~t.Mask) << 1) & 0xfffff;
|
||||
saddr = t.VPN2 >> 12;
|
||||
eaddr = saddr + t.Mask + 1;
|
||||
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask)) { //match
|
||||
for (addr = saddr; addr < eaddr; addr++)
|
||||
{
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask))
|
||||
{ //match
|
||||
memSetPageAddr(addr << 12, t.PFN0 + ((addr - saddr) << 12));
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (t.EntryLo1 & 0x2) {
|
||||
if (t.EntryLo1 & 0x2)
|
||||
{
|
||||
mask = ((~t.Mask) << 1) & 0xfffff;
|
||||
saddr = (t.VPN2 >> 12) + t.Mask + 1;
|
||||
eaddr = saddr + t.Mask + 1;
|
||||
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask)) { //match
|
||||
for (addr = saddr; addr < eaddr; addr++)
|
||||
{
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask))
|
||||
{ //match
|
||||
memSetPageAddr(addr << 12, t.PFN1 + ((addr - saddr) << 12));
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
|
@ -295,21 +305,26 @@ void UnmapTLB(const tlbs& t, int i)
|
|||
saddr = t.VPN2 >> 12;
|
||||
eaddr = saddr + t.Mask + 1;
|
||||
// Console.WriteLn("Clear TLB: %08x ~ %08x",saddr,eaddr-1);
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask)) { //match
|
||||
for (addr = saddr; addr < eaddr; addr++)
|
||||
{
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask))
|
||||
{ //match
|
||||
memClearPageAddr(addr << 12);
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (t.EntryLo1 & 0x2) {
|
||||
if (t.EntryLo1 & 0x2)
|
||||
{
|
||||
mask = ((~t.Mask) << 1) & 0xfffff;
|
||||
saddr = (t.VPN2 >> 12) + t.Mask + 1;
|
||||
eaddr = saddr + t.Mask + 1;
|
||||
// Console.WriteLn("Clear TLB: %08x ~ %08x",saddr,eaddr-1);
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask)) { //match
|
||||
for (addr = saddr; addr < eaddr; addr++)
|
||||
{
|
||||
if ((addr & mask) == ((t.VPN2 >> 12) & mask))
|
||||
{ //match
|
||||
memClearPageAddr(addr << 12);
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
|
@ -341,7 +356,8 @@ namespace Interpreter {
|
|||
namespace OpcodeImpl {
|
||||
namespace COP0 {
|
||||
|
||||
void TLBR() {
|
||||
void TLBR()
|
||||
{
|
||||
COP0_LOG("COP0_TLBR %d:%x,%x,%x,%x",
|
||||
cpuRegs.CP0.n.Index, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);
|
||||
|
@ -354,7 +370,8 @@ void TLBR() {
|
|||
cpuRegs.CP0.n.EntryLo1 = (tlb[i].EntryLo1 & ~1) | ((tlb[i].EntryHi >> 12) & 1);
|
||||
}
|
||||
|
||||
void TLBWI() {
|
||||
void TLBWI()
|
||||
{
|
||||
int j = cpuRegs.CP0.n.Index & 0x3f;
|
||||
|
||||
//if (j > 48) return;
|
||||
|
@ -371,7 +388,8 @@ void TLBWI() {
|
|||
WriteTLB(j);
|
||||
}
|
||||
|
||||
void TLBWR() {
|
||||
void TLBWR()
|
||||
{
|
||||
int j = cpuRegs.CP0.n.Random & 0x3f;
|
||||
|
||||
//if (j > 48) return;
|
||||
|
@ -390,11 +408,14 @@ DevCon.Warning("COP0_TLBWR %d:%x,%x,%x,%x\n",
|
|||
WriteTLB(j);
|
||||
}
|
||||
|
||||
void TLBP() {
|
||||
void TLBP()
|
||||
{
|
||||
int i;
|
||||
|
||||
union {
|
||||
struct {
|
||||
union
|
||||
{
|
||||
struct
|
||||
{
|
||||
u32 VPN2 : 19;
|
||||
u32 VPN2X : 2;
|
||||
u32 G : 3;
|
||||
|
@ -406,20 +427,23 @@ void TLBP() {
|
|||
EntryHi32.u = cpuRegs.CP0.n.EntryHi;
|
||||
|
||||
cpuRegs.CP0.n.Index = 0xFFFFFFFF;
|
||||
for(i=0;i<48;i++){
|
||||
if (tlb[i].VPN2 == ((~tlb[i].Mask) & (EntryHi32.s.VPN2))
|
||||
&& ((tlb[i].G&1) || ((tlb[i].ASID & 0xff) == EntryHi32.s.ASID))) {
|
||||
for (i = 0; i < 48; i++)
|
||||
{
|
||||
if (tlb[i].VPN2 == ((~tlb[i].Mask) & (EntryHi32.s.VPN2)) && ((tlb[i].G & 1) || ((tlb[i].ASID & 0xff) == EntryHi32.s.ASID)))
|
||||
{
|
||||
cpuRegs.CP0.n.Index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(cpuRegs.CP0.n.Index == 0xFFFFFFFF) cpuRegs.CP0.n.Index = 0x80000000;
|
||||
if (cpuRegs.CP0.n.Index == 0xFFFFFFFF)
|
||||
cpuRegs.CP0.n.Index = 0x80000000;
|
||||
}
|
||||
|
||||
void MFC0()
|
||||
{
|
||||
// Note on _Rd_ Condition 9: CP0.Count should be updated even if _Rt_ is 0.
|
||||
if ((_Rd_ != 9) && !_Rt_ ) return;
|
||||
if ((_Rd_ != 9) && !_Rt_)
|
||||
return;
|
||||
|
||||
//if(bExecBIOS == FALSE && _Rd_ == 25) Console.WriteLn("MFC0 _Rd_ %x = %x", _Rd_, cpuRegs.CP0.r[_Rd_]);
|
||||
switch (_Rd_)
|
||||
|
@ -454,10 +478,12 @@ void MFC0()
|
|||
case 9:
|
||||
{
|
||||
u32 incr = cpuRegs.cycle - cpuRegs.lastCOP0Cycle;
|
||||
if( incr == 0 ) incr++;
|
||||
if (incr == 0)
|
||||
incr++;
|
||||
cpuRegs.CP0.n.Count += incr;
|
||||
cpuRegs.lastCOP0Cycle = cpuRegs.cycle;
|
||||
if( !_Rt_ ) break;
|
||||
if (!_Rt_)
|
||||
break;
|
||||
}
|
||||
[[fallthrough]];
|
||||
|
||||
|
@ -518,36 +544,43 @@ void MTC0()
|
|||
}
|
||||
}
|
||||
|
||||
int CPCOND0() {
|
||||
int CPCOND0()
|
||||
{
|
||||
return (((dmacRegs.stat.CIS | ~dmacRegs.pcr.CPC) & 0x3FF) == 0x3ff);
|
||||
}
|
||||
|
||||
//#define CPCOND0 1
|
||||
|
||||
void BC0F() {
|
||||
if (CPCOND0() == 0) intDoBranch(_BranchTarget_);
|
||||
void BC0F()
|
||||
{
|
||||
if (CPCOND0() == 0)
|
||||
intDoBranch(_BranchTarget_);
|
||||
}
|
||||
|
||||
void BC0T() {
|
||||
if (CPCOND0() == 1) intDoBranch(_BranchTarget_);
|
||||
void BC0T()
|
||||
{
|
||||
if (CPCOND0() == 1)
|
||||
intDoBranch(_BranchTarget_);
|
||||
}
|
||||
|
||||
void BC0FL() {
|
||||
void BC0FL()
|
||||
{
|
||||
if (CPCOND0() == 0)
|
||||
intDoBranch(_BranchTarget_);
|
||||
else
|
||||
cpuRegs.pc += 4;
|
||||
|
||||
}
|
||||
|
||||
void BC0TL() {
|
||||
void BC0TL()
|
||||
{
|
||||
if (CPCOND0() == 1)
|
||||
intDoBranch(_BranchTarget_);
|
||||
else
|
||||
cpuRegs.pc += 4;
|
||||
}
|
||||
|
||||
void ERET() {
|
||||
void ERET()
|
||||
{
|
||||
#ifdef ENABLE_VTUNE
|
||||
// Allow to stop vtune in a predictable way to compare runs
|
||||
// Of course, the limit will depend on the game.
|
||||
|
@ -557,19 +590,25 @@ void ERET() {
|
|||
|
||||
// quick_exit vs exit: quick_exit won't call static storage destructor (OS will manage). It helps
|
||||
// avoiding the race condition between threads destruction.
|
||||
if (vtune > 30 * million) {
|
||||
if (vtune > 30 * million)
|
||||
{
|
||||
Console.WriteLn("VTUNE: quick_exit");
|
||||
std::quick_exit(EXIT_SUCCESS);
|
||||
} else if (!(vtune % million)) {
|
||||
}
|
||||
else if (!(vtune % million))
|
||||
{
|
||||
Console.WriteLn("VTUNE: ERET was called %uM times", vtune / million);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
if (cpuRegs.CP0.n.Status.b.ERL) {
|
||||
if (cpuRegs.CP0.n.Status.b.ERL)
|
||||
{
|
||||
cpuRegs.pc = cpuRegs.CP0.n.ErrorEPC;
|
||||
cpuRegs.CP0.n.Status.b.ERL = 0;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
cpuRegs.pc = cpuRegs.CP0.n.EPC;
|
||||
cpuRegs.CP0.n.Status.b.EXL = 0;
|
||||
}
|
||||
|
@ -578,22 +617,29 @@ void ERET() {
|
|||
intSetBranch();
|
||||
}
|
||||
|
||||
void DI() {
|
||||
void DI()
|
||||
{
|
||||
if (cpuRegs.CP0.n.Status.b._EDI || cpuRegs.CP0.n.Status.b.EXL ||
|
||||
cpuRegs.CP0.n.Status.b.ERL || (cpuRegs.CP0.n.Status.b.KSU == 0)) {
|
||||
cpuRegs.CP0.n.Status.b.ERL || (cpuRegs.CP0.n.Status.b.KSU == 0))
|
||||
{
|
||||
cpuRegs.CP0.n.Status.b.EIE = 0;
|
||||
// IRQs are disabled so no need to do a cpu exception/event test...
|
||||
//cpuSetNextEventDelta();
|
||||
}
|
||||
}
|
||||
|
||||
void EI() {
|
||||
void EI()
|
||||
{
|
||||
if (cpuRegs.CP0.n.Status.b._EDI || cpuRegs.CP0.n.Status.b.EXL ||
|
||||
cpuRegs.CP0.n.Status.b.ERL || (cpuRegs.CP0.n.Status.b.KSU == 0)) {
|
||||
cpuRegs.CP0.n.Status.b.ERL || (cpuRegs.CP0.n.Status.b.KSU == 0))
|
||||
{
|
||||
cpuRegs.CP0.n.Status.b.EIE = 1;
|
||||
// schedule an event test, which will check for and raise pending IRQs.
|
||||
cpuSetNextEventDelta(4);
|
||||
}
|
||||
}
|
||||
|
||||
} } } } // end namespace R5900::Interpreter::OpcodeImpl
|
||||
} // namespace COP0
|
||||
} // namespace OpcodeImpl
|
||||
} // namespace Interpreter
|
||||
} // namespace R5900
|
||||
|
|
|
@ -150,7 +150,9 @@ void recMFC0()
|
|||
if (_Rd_ == 9)
|
||||
{
|
||||
// This case needs to be handled even if the write-back is ignored (_Rt_ == 0 )
|
||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||
xMOV(ecx, ptr32[&cpuRegs.cycle]);
|
||||
xADD(ecx, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], ecx); // update cycles
|
||||
xMOV(eax, ecx);
|
||||
xSUB(eax, ptr[&cpuRegs.lastCOP0Cycle]);
|
||||
u8* skipInc = JNZ8(0);
|
||||
|
@ -180,6 +182,9 @@ void recMFC0()
|
|||
else if (0 == (_Imm_ & 2)) // MFPC 0, only LSB of register matters
|
||||
{
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xFastCall((void*)COP0_UpdatePCCR);
|
||||
|
||||
const int regt = _allocX86reg(X86TYPE_GPR, _Rt_, MODE_WRITE);
|
||||
|
@ -188,6 +193,9 @@ void recMFC0()
|
|||
else // MFPC 1
|
||||
{
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xFastCall((void*)COP0_UpdatePCCR);
|
||||
|
||||
const int regt = _allocX86reg(X86TYPE_GPR, _Rt_, MODE_WRITE);
|
||||
|
@ -214,6 +222,9 @@ void recMTC0()
|
|||
{
|
||||
case 12:
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xFastCall((void*)WriteCP0Status, g_cpuConstRegs[_Rt_].UL[0]);
|
||||
break;
|
||||
|
||||
|
@ -223,7 +234,9 @@ void recMTC0()
|
|||
break;
|
||||
|
||||
case 9:
|
||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||
xMOV(ecx, ptr32[&cpuRegs.cycle]);
|
||||
xADD(ecx, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], ecx); // update cycles
|
||||
xMOV(ptr[&cpuRegs.lastCOP0Cycle], ecx);
|
||||
xMOV(ptr32[&cpuRegs.CP0.r[9]], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
break;
|
||||
|
@ -235,19 +248,26 @@ void recMTC0()
|
|||
break;
|
||||
// Updates PCRs and sets the PCCR.
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xFastCall((void*)COP0_UpdatePCCR);
|
||||
xMOV(ptr32[&cpuRegs.PERF.n.pccr], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
xFastCall((void*)COP0_DiagnosticPCCR);
|
||||
}
|
||||
else if (0 == (_Imm_ & 2)) // MTPC 0, only LSB of register matters
|
||||
{
|
||||
xMOV(eax, ptr[&cpuRegs.cycle]);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xMOV(ptr32[&cpuRegs.PERF.n.pcr0], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
xMOV(ptr[&cpuRegs.lastPERFCycle[0]], eax);
|
||||
}
|
||||
else // MTPC 1
|
||||
{
|
||||
xMOV(eax, ptr[&cpuRegs.cycle]);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xMOV(ptr32[&cpuRegs.PERF.n.pcr1], g_cpuConstRegs[_Rt_].UL[0]);
|
||||
xMOV(ptr[&cpuRegs.lastPERFCycle[1]], eax);
|
||||
}
|
||||
|
@ -269,6 +289,9 @@ void recMTC0()
|
|||
case 12:
|
||||
_eeMoveGPRtoR(arg1reg, _Rt_);
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xFastCall((void*)WriteCP0Status);
|
||||
break;
|
||||
|
||||
|
@ -279,7 +302,9 @@ void recMTC0()
|
|||
break;
|
||||
|
||||
case 9:
|
||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||
xMOV(ecx, ptr32[&cpuRegs.cycle]);
|
||||
xADD(ecx, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], ecx); // update cycles
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.CP0.r[9], _Rt_);
|
||||
xMOV(ptr[&cpuRegs.lastCOP0Cycle], ecx);
|
||||
break;
|
||||
|
@ -290,19 +315,26 @@ void recMTC0()
|
|||
if (0 != (_Imm_ & 0x3E)) // only effective when the register is 0
|
||||
break;
|
||||
iFlushCall(FLUSH_INTERPRETER);
|
||||
xMOV(eax, ptr32[&cpuRegs.cycle]);
|
||||
xADD(eax, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], eax); // update cycles
|
||||
xFastCall((void*)COP0_UpdatePCCR);
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pccr, _Rt_);
|
||||
xFastCall((void*)COP0_DiagnosticPCCR);
|
||||
}
|
||||
else if (0 == (_Imm_ & 2)) // MTPC 0, only LSB of register matters
|
||||
{
|
||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||
xMOV(ecx, ptr32[&cpuRegs.cycle]);
|
||||
xADD(ecx, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], ecx); // update cycles
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr0, _Rt_);
|
||||
xMOV(ptr[&cpuRegs.lastPERFCycle[0]], ecx);
|
||||
}
|
||||
else // MTPC 1
|
||||
{
|
||||
xMOV(ecx, ptr[&cpuRegs.cycle]);
|
||||
xMOV(ecx, ptr32[&cpuRegs.cycle]);
|
||||
xADD(ecx, scaleblockcycles_clear());
|
||||
xMOV(ptr32[&cpuRegs.cycle], ecx); // update cycles
|
||||
_eeMoveGPRtoM((uptr)&cpuRegs.PERF.n.pcr1, _Rt_);
|
||||
xMOV(ptr[&cpuRegs.lastPERFCycle[1]], ecx);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue