mirror of https://github.com/PCSX2/pcsx2.git
Cache Emulation: Updated cache emulation for new VTLB, Dead or Alive 2 (Japanese Version only) now playable. You can enable this under the Recompiler options by ticking the "Enable EE Cache" box, however it will only work with the EE in Interpreter mode. Also fixed some cache bugs from the old implementation.
Note: Once DoA2 is ingame (start of fight), you can switch to the EE Rec until the fight is over with good speed! Hopefully one day someone will be brave enough to implement it on the rec side so you dont have to mess about :P git-svn-id: http://pcsx2.googlecode.com/svn/trunk@4309 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
parent
257d57ba52
commit
dde94adec3
343
pcsx2/COP0.cpp
343
pcsx2/COP0.cpp
|
@ -44,105 +44,6 @@ void __fastcall WriteCP0Status(u32 value) {
|
|||
cpuSetNextEventDelta(4);
|
||||
}
|
||||
|
||||
void MapTLB(int i)
|
||||
{
|
||||
u32 mask, addr;
|
||||
u32 saddr, eaddr;
|
||||
|
||||
DevCon.WriteLn("MAP TLB %d: 0x%08X-> [0x%08X 0x%08X] S=0x%08X G=%d ASID=%d Mask=0x%03X",
|
||||
i, tlb[i].VPN2, tlb[i].PFN0, tlb[i].PFN1, tlb[i].S, tlb[i].G, tlb[i].ASID, tlb[i].Mask);
|
||||
|
||||
if (tlb[i].S)
|
||||
{
|
||||
vtlb_VMapBuffer(tlb[i].VPN2, eeMem->Scratch, Ps2MemSize::Scratch);
|
||||
}
|
||||
|
||||
if (tlb[i].VPN2 == 0x70000000) return; //uh uhh right ...
|
||||
if (tlb[i].EntryLo0 & 0x2) {
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = tlb[i].VPN2 >> 12;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memSetPageAddr(addr << 12, tlb[i].PFN0 + ((addr - saddr) << 12));
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tlb[i].EntryLo1 & 0x2) {
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = (tlb[i].VPN2 >> 12) + tlb[i].Mask + 1;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memSetPageAddr(addr << 12, tlb[i].PFN1 + ((addr - saddr) << 12));
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UnmapTLB(int i)
|
||||
{
|
||||
//Console.WriteLn("Clear TLB %d: %08x-> [%08x %08x] S=%d G=%d ASID=%d Mask= %03X", i,tlb[i].VPN2,tlb[i].PFN0,tlb[i].PFN1,tlb[i].S,tlb[i].G,tlb[i].ASID,tlb[i].Mask);
|
||||
u32 mask, addr;
|
||||
u32 saddr, eaddr;
|
||||
|
||||
if (tlb[i].S)
|
||||
{
|
||||
vtlb_VMapUnmap(tlb[i].VPN2,0x4000);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tlb[i].EntryLo0 & 0x2)
|
||||
{
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = tlb[i].VPN2 >> 12;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
// Console.WriteLn("Clear TLB: %08x ~ %08x",saddr,eaddr-1);
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memClearPageAddr(addr << 12);
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tlb[i].EntryLo1 & 0x2) {
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = (tlb[i].VPN2 >> 12) + tlb[i].Mask + 1;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
// Console.WriteLn("Clear TLB: %08x ~ %08x",saddr,eaddr-1);
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memClearPageAddr(addr << 12);
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WriteTLB(int i)
|
||||
{
|
||||
tlb[i].PageMask = cpuRegs.CP0.n.PageMask;
|
||||
tlb[i].EntryHi = cpuRegs.CP0.n.EntryHi;
|
||||
tlb[i].EntryLo0 = cpuRegs.CP0.n.EntryLo0;
|
||||
tlb[i].EntryLo1 = cpuRegs.CP0.n.EntryLo1;
|
||||
|
||||
tlb[i].Mask = (cpuRegs.CP0.n.PageMask >> 13) & 0xfff;
|
||||
tlb[i].nMask = (~tlb[i].Mask) & 0xfff;
|
||||
tlb[i].VPN2 = ((cpuRegs.CP0.n.EntryHi >> 13) & (~tlb[i].Mask)) << 13;
|
||||
tlb[i].ASID = cpuRegs.CP0.n.EntryHi & 0xfff;
|
||||
tlb[i].G = cpuRegs.CP0.n.EntryLo0 & cpuRegs.CP0.n.EntryLo1 & 0x1;
|
||||
tlb[i].PFN0 = (((cpuRegs.CP0.n.EntryLo0 >> 6) & 0xFFFFF) & (~tlb[i].Mask)) << 12;
|
||||
tlb[i].PFN1 = (((cpuRegs.CP0.n.EntryLo1 >> 6) & 0xFFFFF) & (~tlb[i].Mask)) << 12;
|
||||
tlb[i].S = cpuRegs.CP0.n.EntryLo0&0x80000000;
|
||||
|
||||
MapTLB(i);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Performance Counters Update Stuff!
|
||||
|
@ -325,11 +226,187 @@ __fi void COP0_UpdatePCCR()
|
|||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
|
||||
|
||||
void MapTLB(int i)
|
||||
{
|
||||
u32 mask, addr;
|
||||
u32 saddr, eaddr;
|
||||
|
||||
DevCon.WriteLn("MAP TLB %d: 0x%08X-> [0x%08X 0x%08X] S=0x%08X G=%d ASID=%d Mask=0x%03X EntryLo0 PFN=%x EntryLo0 Cache=%x EntryLo1 PFN=%x EntryLo1 Cache=%x VPN2=%x",
|
||||
i, tlb[i].VPN2, tlb[i].PFN0, tlb[i].PFN1, tlb[i].S, tlb[i].G, tlb[i].ASID, tlb[i].Mask, tlb[i].EntryLo0 >> 6, (tlb[i].EntryLo0 & 0x38) >> 3, tlb[i].EntryLo1 >> 6, (tlb[i].EntryLo1 & 0x38) >> 3, tlb[i].VPN2);
|
||||
|
||||
if (tlb[i].S)
|
||||
{
|
||||
vtlb_VMapBuffer(tlb[i].VPN2, eeMem->Scratch, Ps2MemSize::Scratch);
|
||||
}
|
||||
|
||||
if (tlb[i].VPN2 == 0x70000000) return; //uh uhh right ...
|
||||
if (tlb[i].EntryLo0 & 0x2) {
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = tlb[i].VPN2 >> 12;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memSetPageAddr(addr << 12, tlb[i].PFN0 + ((addr - saddr) << 12));
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tlb[i].EntryLo1 & 0x2) {
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = (tlb[i].VPN2 >> 12) + tlb[i].Mask + 1;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memSetPageAddr(addr << 12, tlb[i].PFN1 + ((addr - saddr) << 12));
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UnmapTLB(int i)
|
||||
{
|
||||
//Console.WriteLn("Clear TLB %d: %08x-> [%08x %08x] S=%d G=%d ASID=%d Mask= %03X", i,tlb[i].VPN2,tlb[i].PFN0,tlb[i].PFN1,tlb[i].S,tlb[i].G,tlb[i].ASID,tlb[i].Mask);
|
||||
u32 mask, addr;
|
||||
u32 saddr, eaddr;
|
||||
|
||||
if (tlb[i].S)
|
||||
{
|
||||
vtlb_VMapUnmap(tlb[i].VPN2,0x4000);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tlb[i].EntryLo0 & 0x2)
|
||||
{
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = tlb[i].VPN2 >> 12;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
// Console.WriteLn("Clear TLB: %08x ~ %08x",saddr,eaddr-1);
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memClearPageAddr(addr << 12);
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tlb[i].EntryLo1 & 0x2) {
|
||||
mask = ((~tlb[i].Mask) << 1) & 0xfffff;
|
||||
saddr = (tlb[i].VPN2 >> 12) + tlb[i].Mask + 1;
|
||||
eaddr = saddr + tlb[i].Mask + 1;
|
||||
// Console.WriteLn("Clear TLB: %08x ~ %08x",saddr,eaddr-1);
|
||||
for (addr=saddr; addr<eaddr; addr++) {
|
||||
if ((addr & mask) == ((tlb[i].VPN2 >> 12) & mask)) { //match
|
||||
memClearPageAddr(addr << 12);
|
||||
Cpu->Clear(addr << 12, 0x400);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WriteTLB(int i)
|
||||
{
|
||||
tlb[i].PageMask = cpuRegs.CP0.n.PageMask;
|
||||
tlb[i].EntryHi = cpuRegs.CP0.n.EntryHi;
|
||||
tlb[i].EntryLo0 = cpuRegs.CP0.n.EntryLo0;
|
||||
tlb[i].EntryLo1 = cpuRegs.CP0.n.EntryLo1;
|
||||
|
||||
tlb[i].Mask = (cpuRegs.CP0.n.PageMask >> 13) & 0xfff;
|
||||
tlb[i].nMask = (~tlb[i].Mask) & 0xfff;
|
||||
tlb[i].VPN2 = ((cpuRegs.CP0.n.EntryHi >> 13) & (~tlb[i].Mask)) << 13;
|
||||
tlb[i].ASID = cpuRegs.CP0.n.EntryHi & 0xfff;
|
||||
tlb[i].G = cpuRegs.CP0.n.EntryLo0 & cpuRegs.CP0.n.EntryLo1 & 0x1;
|
||||
tlb[i].PFN0 = (((cpuRegs.CP0.n.EntryLo0 >> 6) & 0xFFFFF) & (~tlb[i].Mask)) << 12;
|
||||
tlb[i].PFN1 = (((cpuRegs.CP0.n.EntryLo1 >> 6) & 0xFFFFF) & (~tlb[i].Mask)) << 12;
|
||||
tlb[i].S = cpuRegs.CP0.n.EntryLo0&0x80000000;
|
||||
|
||||
MapTLB(i);
|
||||
}
|
||||
|
||||
namespace R5900 {
|
||||
namespace Interpreter {
|
||||
namespace OpcodeImpl {
|
||||
namespace COP0 {
|
||||
|
||||
void TLBR() {
|
||||
DevCon.Warning("COP0_TLBR %d:%x,%x,%x,%x\n",
|
||||
cpuRegs.CP0.n.Random, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);
|
||||
|
||||
int i = cpuRegs.CP0.n.Index&0x1f;
|
||||
|
||||
cpuRegs.CP0.n.PageMask = tlb[i].PageMask;
|
||||
cpuRegs.CP0.n.EntryHi = tlb[i].EntryHi&~(tlb[i].PageMask|0x1f00);
|
||||
cpuRegs.CP0.n.EntryLo0 = (tlb[i].EntryLo0&~1)|((tlb[i].EntryHi>>12)&1);
|
||||
cpuRegs.CP0.n.EntryLo1 =(tlb[i].EntryLo1&~1)|((tlb[i].EntryHi>>12)&1);
|
||||
}
|
||||
|
||||
void TLBWI() {
|
||||
int j = cpuRegs.CP0.n.Index & 0x3f;
|
||||
|
||||
//if (j > 48) return;
|
||||
|
||||
DevCon.Warning("COP0_TLBWI %d:%x,%x,%x,%x\n",
|
||||
cpuRegs.CP0.n.Index, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);
|
||||
|
||||
UnmapTLB(j);
|
||||
tlb[j].PageMask = cpuRegs.CP0.n.PageMask;
|
||||
tlb[j].EntryHi = cpuRegs.CP0.n.EntryHi;
|
||||
tlb[j].EntryLo0 = cpuRegs.CP0.n.EntryLo0;
|
||||
tlb[j].EntryLo1 = cpuRegs.CP0.n.EntryLo1;
|
||||
WriteTLB(j);
|
||||
}
|
||||
|
||||
void TLBWR() {
|
||||
int j = cpuRegs.CP0.n.Random & 0x3f;
|
||||
|
||||
//if (j > 48) return;
|
||||
|
||||
DevCon.Warning("COP0_TLBWR %d:%x,%x,%x,%x\n",
|
||||
cpuRegs.CP0.n.Random, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);
|
||||
|
||||
//if (j > 48) return;
|
||||
|
||||
UnmapTLB(j);
|
||||
tlb[j].PageMask = cpuRegs.CP0.n.PageMask;
|
||||
tlb[j].EntryHi = cpuRegs.CP0.n.EntryHi;
|
||||
tlb[j].EntryLo0 = cpuRegs.CP0.n.EntryLo0;
|
||||
tlb[j].EntryLo1 = cpuRegs.CP0.n.EntryLo1;
|
||||
WriteTLB(j);
|
||||
}
|
||||
|
||||
void TLBP() {
|
||||
int i;
|
||||
|
||||
union {
|
||||
struct {
|
||||
u32 VPN2:19;
|
||||
u32 VPN2X:2;
|
||||
u32 G:3;
|
||||
u32 ASID:8;
|
||||
} s;
|
||||
u32 u;
|
||||
} EntryHi32;
|
||||
|
||||
EntryHi32.u = cpuRegs.CP0.n.EntryHi;
|
||||
|
||||
cpuRegs.CP0.n.Index=0xFFFFFFFF;
|
||||
for(i=0;i<48;i++){
|
||||
if (tlb[i].VPN2 == ((~tlb[i].Mask) & (EntryHi32.s.VPN2))
|
||||
&& ((tlb[i].G&1) || ((tlb[i].ASID & 0xff) == EntryHi32.s.ASID))) {
|
||||
cpuRegs.CP0.n.Index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(cpuRegs.CP0.n.Index == 0xFFFFFFFF) cpuRegs.CP0.n.Index = 0x80000000;
|
||||
}
|
||||
|
||||
void MFC0()
|
||||
{
|
||||
// Note on _Rd_ Condition 9: CP0.Count should be updated even if _Rt_ is 0.
|
||||
|
@ -460,74 +537,6 @@ void BC0TL() {
|
|||
cpuRegs.pc+= 4;
|
||||
}
|
||||
|
||||
void TLBR() {
|
||||
/* CPU_LOG("COP0_TLBR %d:%x,%x,%x,%x\n",
|
||||
cpuRegs.CP0.n.Random, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);*/
|
||||
|
||||
int i = cpuRegs.CP0.n.Index&0x1f;
|
||||
|
||||
cpuRegs.CP0.n.PageMask = tlb[i].PageMask;
|
||||
cpuRegs.CP0.n.EntryHi = tlb[i].EntryHi&~(tlb[i].PageMask|0x1f00);
|
||||
cpuRegs.CP0.n.EntryLo0 = (tlb[i].EntryLo0&~1)|((tlb[i].EntryHi>>12)&1);
|
||||
cpuRegs.CP0.n.EntryLo1 =(tlb[i].EntryLo1&~1)|((tlb[i].EntryHi>>12)&1);
|
||||
}
|
||||
|
||||
void TLBWI() {
|
||||
int j = cpuRegs.CP0.n.Index & 0x3f;
|
||||
|
||||
if (j > 48) return;
|
||||
|
||||
/* CPU_LOG("COP0_TLBWI %d:%x,%x,%x,%x\n",
|
||||
cpuRegs.CP0.n.Index, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);*/
|
||||
|
||||
UnmapTLB(j);
|
||||
WriteTLB(j);
|
||||
}
|
||||
|
||||
void TLBWR() {
|
||||
int j = cpuRegs.CP0.n.Random & 0x3f;
|
||||
|
||||
if (j > 48) return;
|
||||
|
||||
/* CPU_LOG("COP0_TLBWR %d:%x,%x,%x,%x\n",
|
||||
cpuRegs.CP0.n.Random, cpuRegs.CP0.n.PageMask, cpuRegs.CP0.n.EntryHi,
|
||||
cpuRegs.CP0.n.EntryLo0, cpuRegs.CP0.n.EntryLo1);*/
|
||||
|
||||
// if( !bExecBIOS )
|
||||
// __Log("TLBWR %d\n", j);
|
||||
|
||||
UnmapTLB(j);
|
||||
WriteTLB(j);
|
||||
}
|
||||
|
||||
void TLBP() {
|
||||
int i;
|
||||
|
||||
union {
|
||||
struct {
|
||||
u32 VPN2:19;
|
||||
u32 VPN2X:2;
|
||||
u32 G:3;
|
||||
u32 ASID:8;
|
||||
} s;
|
||||
u32 u;
|
||||
} EntryHi32;
|
||||
|
||||
EntryHi32.u = cpuRegs.CP0.n.EntryHi;
|
||||
|
||||
cpuRegs.CP0.n.Index=0xFFFFFFFF;
|
||||
for(i=0;i<48;i++){
|
||||
if (tlb[i].VPN2 == ((~tlb[i].Mask) & (EntryHi32.s.VPN2))
|
||||
&& ((tlb[i].G&1) || ((tlb[i].ASID & 0xff) == EntryHi32.s.ASID))) {
|
||||
cpuRegs.CP0.n.Index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(cpuRegs.CP0.n.Index == 0xFFFFFFFF) cpuRegs.CP0.n.Index = 0x80000000;
|
||||
}
|
||||
|
||||
void ERET() {
|
||||
if (cpuRegs.CP0.n.Status.b.ERL) {
|
||||
cpuRegs.pc = cpuRegs.CP0.n.ErrorEPC;
|
||||
|
|
501
pcsx2/Cache.cpp
501
pcsx2/Cache.cpp
|
@ -17,382 +17,443 @@
|
|||
#include "PrecompiledHeader.h"
|
||||
#include "Common.h"
|
||||
#include "Cache.h"
|
||||
|
||||
#include "vtlb.h"
|
||||
_cacheS pCache[64];
|
||||
|
||||
namespace R5900{
|
||||
namespace Interpreter
|
||||
{
|
||||
#ifdef PCSX2_CACHE_EMU_MEM
|
||||
#define DIRTY_FLAG 0x40
|
||||
#define VALID_FLAG 0x20
|
||||
#define LRF_FLAG 0x10
|
||||
#define LOCK_FLAG 0x8
|
||||
|
||||
int getFreeCache(u32 mem, int mode, int * way) {
|
||||
u8 * out;
|
||||
u32 paddr;
|
||||
u32 taddr[2];
|
||||
u8 * t;
|
||||
using namespace R5900;
|
||||
using namespace vtlb_private;
|
||||
|
||||
|
||||
|
||||
int getFreeCache(u32 mem, int mode, int * way ) {
|
||||
int number;
|
||||
int i = (mem >> 6) & 0x3F;
|
||||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=mem+vmv;
|
||||
|
||||
paddr = getMemR(mem);
|
||||
taddr[0] = getMemW(pCache[i].tag[0]);
|
||||
taddr[1] = getMemW(pCache[i].tag[1]);
|
||||
u32 hand=(u8)vmv;
|
||||
u32 paddr=ppf-hand+0x80000000;
|
||||
|
||||
if (taddr[0] == paddr && (pCache[i].tag[0] & 0x20))
|
||||
if((cpuRegs.CP0.n.Config & 0x10000) == 0) DevCon.Warning("Cache off!");
|
||||
|
||||
if ((pCache[i].tag[0] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[i].tag[0] & VALID_FLAG))
|
||||
{
|
||||
*way = 0;
|
||||
if(pCache[i].tag[0] & LOCK_FLAG) DevCon.Warning("Index %x Way %x Locked!!", i, 0);
|
||||
return i;
|
||||
}
|
||||
else if(taddr[1] == paddr && (pCache[i].tag[1] & 0x20))
|
||||
else if((pCache[i].tag[1] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[i].tag[1] & VALID_FLAG))
|
||||
{
|
||||
*way = 1;
|
||||
if(pCache[i].tag[1] & LOCK_FLAG) DevCon.Warning("Index %x Way %x Locked!!", i, 1);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
|
||||
number = ((pCache[i].tag[0]>>4) & 1) ^ ((pCache[i].tag[1]>>4) & 1);
|
||||
|
||||
|
||||
if(pCache[i].tag[number] & 0x60) // Valid Dirty
|
||||
{
|
||||
t = (u8*)(taddr[number]);
|
||||
out = (u8*)(t + (mem & 0xFC0));
|
||||
((u64*)out)[0] = ((u64*)pCache[i].data[number][0].b8._8)[0];
|
||||
((u64*)out)[1] = ((u64*)pCache[i].data[number][0].b8._8)[1];
|
||||
((u64*)out)[2] = ((u64*)pCache[i].data[number][1].b8._8)[0];
|
||||
((u64*)out)[3] = ((u64*)pCache[i].data[number][1].b8._8)[1];
|
||||
((u64*)out)[4] = ((u64*)pCache[i].data[number][2].b8._8)[0];
|
||||
((u64*)out)[5] = ((u64*)pCache[i].data[number][2].b8._8)[1];
|
||||
((u64*)out)[6] = ((u64*)pCache[i].data[number][3].b8._8)[0];
|
||||
((u64*)out)[7] = ((u64*)pCache[i].data[number][3].b8._8)[1];
|
||||
ppf = (ppf & ~0x3F) ;
|
||||
if((pCache[i].tag[number] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
|
||||
{
|
||||
s32 oldppf = (pCache[i].tag[number] & ~0xf0000fff) + (mem & 0xFC0) + 0x20000000;
|
||||
|
||||
CACHE_LOG("Dirty cache fill! PPF %x", oldppf);
|
||||
*reinterpret_cast<mem64_t*>(oldppf) = pCache[i].data[number][0].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+8) = pCache[i].data[number][0].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+16) = pCache[i].data[number][1].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+24) = pCache[i].data[number][1].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+32) = pCache[i].data[number][2].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+40) = pCache[i].data[number][2].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+48) = pCache[i].data[number][3].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(oldppf+56) = pCache[i].data[number][3].b8._u64[1];
|
||||
pCache[i].tag[number] &= ~DIRTY_FLAG;
|
||||
}
|
||||
|
||||
|
||||
pCache[i].data[number][0].b8._u64[0] = *(mem64_t*)(ppf);
|
||||
pCache[i].data[number][0].b8._u64[1] = *(mem64_t*)(ppf+8);
|
||||
pCache[i].data[number][1].b8._u64[0] = *(mem64_t*)(ppf+16);
|
||||
pCache[i].data[number][1].b8._u64[1] = *(mem64_t*)(ppf+24);
|
||||
pCache[i].data[number][2].b8._u64[0] = *(mem64_t*)(ppf+32);
|
||||
pCache[i].data[number][2].b8._u64[1] = *(mem64_t*)(ppf+40);
|
||||
pCache[i].data[number][3].b8._u64[0] = *(mem64_t*)(ppf+48);
|
||||
pCache[i].data[number][3].b8._u64[1] = *(mem64_t*)(ppf+56);
|
||||
|
||||
if(mode == 1)
|
||||
{
|
||||
pCache[i].tag[number] |= 0x40; // Set Dirty Bit if mode == write
|
||||
}
|
||||
|
||||
pCache[i].tag[number] &= ~(0xFFFFF000);
|
||||
pCache[i].tag[number] |= ((mem>>12) & 0xFFFFF) << 12;
|
||||
|
||||
|
||||
t = (u8 *)paddr;
|
||||
out= (u8*)(t + (mem & 0xFC0));
|
||||
((u64*)pCache[i].data[number][0].b8._8)[0] = ((u64*)out)[0];
|
||||
((u64*)pCache[i].data[number][0].b8._8)[1] = ((u64*)out)[1];
|
||||
((u64*)pCache[i].data[number][1].b8._8)[0] = ((u64*)out)[2];
|
||||
((u64*)pCache[i].data[number][1].b8._8)[1] = ((u64*)out)[3];
|
||||
((u64*)pCache[i].data[number][2].b8._8)[0] = ((u64*)out)[4];
|
||||
((u64*)pCache[i].data[number][2].b8._8)[1] = ((u64*)out)[5];
|
||||
((u64*)pCache[i].data[number][3].b8._8)[0] = ((u64*)out)[6];
|
||||
((u64*)pCache[i].data[number][3].b8._8)[1] = ((u64*)out)[7];
|
||||
|
||||
if(pCache[i].tag[number] & 0x10)
|
||||
pCache[i].tag[number] &= ~(0x10);
|
||||
else
|
||||
pCache[i].tag[number] |= 0x10;
|
||||
|
||||
pCache[i].tag[number] |= 0x20;
|
||||
*way = number;
|
||||
pCache[i].tag[number] |= VALID_FLAG;
|
||||
pCache[i].tag[number] &= 0xFFF;
|
||||
pCache[i].tag[number] |= paddr & ~0xFFF;
|
||||
|
||||
if(pCache[i].tag[number] & LRF_FLAG)
|
||||
pCache[i].tag[number] &= ~LRF_FLAG;
|
||||
else
|
||||
pCache[i].tag[number] |= LRF_FLAG;
|
||||
|
||||
|
||||
return i;
|
||||
|
||||
}
|
||||
|
||||
void writeCache8(u32 mem, u8 value) {
|
||||
int i, number;
|
||||
|
||||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(mem+vmv) & ~0x3f;
|
||||
i = getFreeCache(mem,1,&number);
|
||||
// CACHE_LOG("writeCache8 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._8[(mem&0xf)] = value;
|
||||
CACHE_LOG("writeCache8 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._u8[(mem&0xf)] = value;
|
||||
}
|
||||
|
||||
void writeCache16(u32 mem, u16 value) {
|
||||
int i, number;
|
||||
|
||||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(mem+vmv) & ~0x3f;
|
||||
i = getFreeCache(mem,1,&number);
|
||||
// CACHE_LOG("writeCache16 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
|
||||
*(u16*)(&pCache[i].data[number][(mem>>4) & 0x3].b8._8[(mem&0xf)]) = value;
|
||||
CACHE_LOG("writeCache16 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._u16[(mem&0xf)>>1] = value;
|
||||
}
|
||||
|
||||
void writeCache32(u32 mem, u32 value) {
|
||||
int i, number;
|
||||
|
||||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(mem+vmv) & ~0x3f;
|
||||
i = getFreeCache(mem,1,&number);
|
||||
// CACHE_LOG("writeCache32 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
*(u32*)(&pCache[i].data[number][(mem>>4) & 0x3].b8._8[(mem&0xf)]) = value;
|
||||
CACHE_LOG("writeCache32 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._u32[(mem&0xf)>>2] = value;
|
||||
}
|
||||
|
||||
void writeCache64(u32 mem, u64 value) {
|
||||
void writeCache64(u32 mem, const u64 value) {
|
||||
int i, number;
|
||||
|
||||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(mem+vmv) & ~0x3f;
|
||||
i = getFreeCache(mem,1,&number);
|
||||
// CACHE_LOG("writeCache64 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
*(u64*)(&pCache[i].data[number][(mem>>4) & 0x3].b8._8[(mem&0xf)]) = value;
|
||||
CACHE_LOG("writeCache64 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
|
||||
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._u64[(mem&0xf)>>3] = value;
|
||||
}
|
||||
|
||||
void writeCache128(u32 mem, u64 *value) {
|
||||
void writeCache128(u32 mem, const mem128_t* value){
|
||||
int i, number;
|
||||
|
||||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(mem+vmv) & ~0x3f;
|
||||
i = getFreeCache(mem,1,&number);
|
||||
// CACHE_LOG("writeCache128 %8.8x adding to %d", mem, i);
|
||||
((u64*)pCache[i].data[number][(mem>>4) & 0x3].b8._8)[0] = value[0];
|
||||
((u64*)pCache[i].data[number][(mem>>4) & 0x3].b8._8)[1] = value[1];
|
||||
CACHE_LOG("writeCache128 %8.8x adding to %d way %x tag %x vallo = %x_%x valhi = %x_%x", mem, i, number, pCache[i].tag[number], value->lo, value->hi);
|
||||
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._u64[0] = value->lo;
|
||||
pCache[i].data[number][(mem>>4) & 0x3].b8._u64[1] = value->hi;
|
||||
}
|
||||
|
||||
u8 *readCache(u32 mem) {
|
||||
u8 readCache8(u32 mem) {
|
||||
int i, number;
|
||||
|
||||
|
||||
i = getFreeCache(mem,0,&number);
|
||||
// CACHE_LOG("readCache %8.8x from %d, way %d", mem, i,number);
|
||||
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u8 part %x Really Reading %x", mem, i,number, (mem >> 4) & 0x3, (mem&0xf)>>2, (u32)pCache[i].data[number][(mem >> 4) & 0x3].b8._u8[(mem&0xf)]);
|
||||
|
||||
return pCache[i].data[number][(mem>>4) & 0x3].b8._8;
|
||||
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u8[(mem&0xf)];
|
||||
}
|
||||
|
||||
u16 readCache16(u32 mem) {
|
||||
int i, number;
|
||||
|
||||
i = getFreeCache(mem,0,&number);
|
||||
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u16 part %x Really Reading %x", mem, i,number, (mem >> 4) & 0x3, (mem&0xf)>>2, (u32)pCache[i].data[number][(mem >> 4) & 0x3].b8._u16[(mem&0xf)>>1]);
|
||||
|
||||
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u16[(mem&0xf)>>1];
|
||||
}
|
||||
|
||||
u32 readCache32(u32 mem) {
|
||||
int i, number;
|
||||
|
||||
i = getFreeCache(mem,0,&number);
|
||||
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u32 part %x Really Reading %x", mem, i,number, (mem >> 4) & 0x3, (mem&0xf)>>2, (u32)pCache[i].data[number][(mem >> 4) & 0x3].b8._u32[(mem&0xf)>>2]);
|
||||
|
||||
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u32[(mem&0xf)>>2];
|
||||
}
|
||||
|
||||
u64 readCache64(u32 mem) {
|
||||
int i, number;
|
||||
|
||||
i = getFreeCache(mem,0,&number);
|
||||
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u64 part %x Really Reading %x_%x", mem, i,number, (mem >> 4) & 0x3, (mem&0xf)>>2, pCache[i].data[number][(mem >> 4) & 0x3].b8._u64[(mem&0xf)>>3]);
|
||||
|
||||
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u64[(mem&0xf)>>3];
|
||||
}
|
||||
|
||||
namespace R5900 {
|
||||
namespace Interpreter
|
||||
{
|
||||
namespace OpcodeImpl
|
||||
{
|
||||
|
||||
extern int Dcache;
|
||||
void CACHE() {
|
||||
u32 addr;
|
||||
//if(Dcache == 0) return;
|
||||
|
||||
addr = cpuRegs.GPR.r[_Rs_].UL[0] + _Imm_;
|
||||
switch (_Rt_) {
|
||||
case 0x1a:
|
||||
// DevCon.Warning("cpuRegs.GPR.r[_Rs_].UL[0] = %x, IMM = %x RT = %x", cpuRegs.GPR.r[_Rs_].UL[0], _Imm_, _Rt_);
|
||||
switch (_Rt_)
|
||||
{
|
||||
case 0x1a: //DHIN (Data Cache Hit Invalidate)
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
u32 paddr[2];
|
||||
int way;
|
||||
u32 taddr = getMemR(addr);
|
||||
paddr[0] = getMemW(pCache[index].tag[0]);
|
||||
paddr[1] = getMemW(pCache[index].tag[1]);
|
||||
int way = 0;
|
||||
u32 pfnaddr = addr;
|
||||
u32 vmv=vtlbdata.vmap[pfnaddr>>VTLB_PAGE_BITS];
|
||||
s32 ppf=pfnaddr+vmv;
|
||||
u32 hand=(u8)vmv;
|
||||
u32 paddr=ppf-hand+0x80000000;
|
||||
|
||||
if(paddr[0] == taddr && (pCache[index].tag[0] & 0x20))
|
||||
if((paddr & ~0xFFF) == (pCache[index].tag[0] & ~0xfff) && (pCache[index].tag[0] & VALID_FLAG))
|
||||
{
|
||||
way = 0;
|
||||
}
|
||||
else if(paddr[1] == taddr && (pCache[index].tag[1] & 0x20))
|
||||
else if((paddr & ~0xFFF) == (pCache[index].tag[1] & ~0xfff) && (pCache[index].tag[1] & VALID_FLAG))
|
||||
{
|
||||
way = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
CACHE_LOG("CACHE DHIN NO HIT addr %x, index %d, phys %x tag0 %x tag1 %x",addr,index, paddr, pCache[index].tag[0], pCache[index].tag[1]);
|
||||
return;
|
||||
}
|
||||
|
||||
CACHE_LOG("CACHE DHIN addr %x, index %d, way %d, Flags %x",addr,index,way,pCache[index].tag[way] & 0x78);
|
||||
CACHE_LOG("CACHE DHIN addr %x, index %d, way %d, Flags %x OP %x",addr,index,way,pCache[index].tag[way] & 0x78, cpuRegs.code);
|
||||
|
||||
pCache[index].tag[way] &= LRF_FLAG;
|
||||
pCache[index].data[way][0].b8._u64[0] = 0;
|
||||
pCache[index].data[way][0].b8._u64[1] = 0;
|
||||
pCache[index].data[way][1].b8._u64[0] = 0;
|
||||
pCache[index].data[way][1].b8._u64[1] = 0;
|
||||
pCache[index].data[way][2].b8._u64[0] = 0;
|
||||
pCache[index].data[way][2].b8._u64[1] = 0;
|
||||
pCache[index].data[way][3].b8._u64[0] = 0;
|
||||
pCache[index].data[way][3].b8._u64[1] = 0;
|
||||
|
||||
pCache[index].tag[way] &= ~(0x6F);
|
||||
((u64*)pCache[index].data[way][0].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][0].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[1] = 0;
|
||||
break;
|
||||
}
|
||||
case 0x18:
|
||||
case 0x18: //DHWBIN (Data Cache Hit WriteBack with Invalidate)
|
||||
{
|
||||
u8 * out;
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
u32 paddr[2];
|
||||
int way;
|
||||
u32 taddr = getMemW(addr);
|
||||
paddr[0] = getMemW(pCache[index].tag[0]);
|
||||
paddr[1] = getMemW(pCache[index].tag[1]);
|
||||
int way = 0;
|
||||
u32 pfnaddr = addr;
|
||||
u32 vmv=vtlbdata.vmap[pfnaddr>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(pfnaddr+vmv) & ~0x3F;
|
||||
u32 hand=(u8)vmv;
|
||||
u32 paddr=ppf-hand+0x80000000;
|
||||
|
||||
if(paddr[0] == taddr && (pCache[index].tag[0] & 0x20))
|
||||
if ((pCache[index].tag[0] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[0] & VALID_FLAG))
|
||||
{
|
||||
way = 0;
|
||||
}
|
||||
else if(paddr[1] == taddr && (pCache[index].tag[1] & 0x20))
|
||||
else if((pCache[index].tag[1] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[1] & VALID_FLAG))
|
||||
{
|
||||
way = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
CACHE_LOG("CACHE DHWBIN NO HIT addr %x, index %d, phys %x tag0 %x tag1 %x",addr,index, paddr, pCache[index].tag[0], pCache[index].tag[1]);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
CACHE_LOG("CACHE DHWBIN addr %x, index %d, phys %x tag0 %x tag1 %x way %x",addr,index, paddr, pCache[index].tag[0], pCache[index].tag[1], way );
|
||||
|
||||
CACHE_LOG("CACHE DHWBIN addr %x, index %d, way %d, Flags %x",addr,index,way,pCache[index].tag[way] & 0x78);
|
||||
|
||||
if(pCache[index].tag[way] & 0x60) // Valid Dirty
|
||||
if((pCache[index].tag[way] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
|
||||
{
|
||||
char * t = (char *)(taddr);//paddr[way]);
|
||||
out = (u8*)(t + (addr & 0xFC0));
|
||||
((u64*)out)[0] = ((u64*)pCache[index].data[way][0].b8._8)[0];
|
||||
((u64*)out)[1] = ((u64*)pCache[index].data[way][0].b8._8)[1];
|
||||
((u64*)out)[2] = ((u64*)pCache[index].data[way][1].b8._8)[0];
|
||||
((u64*)out)[3] = ((u64*)pCache[index].data[way][1].b8._8)[1];
|
||||
((u64*)out)[4] = ((u64*)pCache[index].data[way][2].b8._8)[0];
|
||||
((u64*)out)[5] = ((u64*)pCache[index].data[way][2].b8._8)[1];
|
||||
((u64*)out)[6] = ((u64*)pCache[index].data[way][3].b8._8)[0];
|
||||
((u64*)out)[7] = ((u64*)pCache[index].data[way][3].b8._8)[1];
|
||||
CACHE_LOG("DHWBIN Dirty WriteBack PPF %x", ppf);
|
||||
|
||||
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+8) = pCache[index].data[way][0].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+16) = pCache[index].data[way][1].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+24) = pCache[index].data[way][1].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+32) = pCache[index].data[way][2].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+40) = pCache[index].data[way][2].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+48) = pCache[index].data[way][3].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+56) = pCache[index].data[way][3].b8._u64[1];
|
||||
}
|
||||
|
||||
pCache[index].tag[way] &= ~(0x6F);
|
||||
((u64*)pCache[index].data[way][0].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][0].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[1] = 0;
|
||||
pCache[index].tag[way] &= LRF_FLAG;
|
||||
|
||||
pCache[index].data[way][0].b8._u64[0] = 0;
|
||||
pCache[index].data[way][0].b8._u64[1] = 0;
|
||||
pCache[index].data[way][1].b8._u64[0] = 0;
|
||||
pCache[index].data[way][1].b8._u64[1] = 0;
|
||||
pCache[index].data[way][2].b8._u64[0] = 0;
|
||||
pCache[index].data[way][2].b8._u64[1] = 0;
|
||||
pCache[index].data[way][3].b8._u64[0] = 0;
|
||||
pCache[index].data[way][3].b8._u64[1] = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
case 0x1c:
|
||||
case 0x1c: //DHWOIN (Data Cache Hit WriteBack Without Invalidate)
|
||||
{
|
||||
u8 * out;
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
u32 paddr[2];
|
||||
int way;
|
||||
u32 taddr = getMemW(addr);
|
||||
paddr[0] = getMemW(pCache[index].tag[0]);
|
||||
paddr[1] = getMemW(pCache[index].tag[1]);
|
||||
int way = 0;
|
||||
u32 pfnaddr = (pCache[index].tag[way] & ~0xf0000fff) | (addr & 0xfc0);
|
||||
u32 vmv=vtlbdata.vmap[pfnaddr>>VTLB_PAGE_BITS];
|
||||
s32 ppf=(pfnaddr+vmv) & ~0x3F;
|
||||
u32 hand=(u8)vmv;
|
||||
u32 paddr=ppf-hand+0x80000000;
|
||||
|
||||
CACHE_LOG("CACHE DHWOIN addr %x, index %d, way %d, Flags %x OP %x",addr,index,way,pCache[index].tag[way] & 0x78, cpuRegs.code);
|
||||
|
||||
if(paddr[0] == taddr && (pCache[index].tag[0] & 0x20))
|
||||
if ((pCache[index].tag[0] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[0] & VALID_FLAG))
|
||||
{
|
||||
way = 0;
|
||||
}
|
||||
else if(paddr[1] == taddr && (pCache[index].tag[1] & 0x20))
|
||||
else if((pCache[index].tag[1] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[1] & VALID_FLAG))
|
||||
{
|
||||
way = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
CACHE_LOG("CACHE DHWOIN NO HIT addr %x, index %d, phys %x tag0 %x tag1 %x",addr,index, paddr, pCache[index].tag[0], pCache[index].tag[1]);
|
||||
return;
|
||||
}
|
||||
CACHE_LOG("CACHE DHWOIN addr %x, index %d, way %d, Flags %x",addr,index,way,pCache[index].tag[way] & 0x78);
|
||||
|
||||
if(pCache[index].tag[way] & 0x60) // Valid Dirty
|
||||
if((pCache[index].tag[way] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
|
||||
{
|
||||
char * t = (char *)(taddr);
|
||||
out = (u8*)(t + (addr & 0xFC0));
|
||||
((u64*)out)[0] = ((u64*)pCache[index].data[way][0].b8._8)[0];
|
||||
((u64*)out)[1] = ((u64*)pCache[index].data[way][0].b8._8)[1];
|
||||
((u64*)out)[2] = ((u64*)pCache[index].data[way][1].b8._8)[0];
|
||||
((u64*)out)[3] = ((u64*)pCache[index].data[way][1].b8._8)[1];
|
||||
((u64*)out)[4] = ((u64*)pCache[index].data[way][2].b8._8)[0];
|
||||
((u64*)out)[5] = ((u64*)pCache[index].data[way][2].b8._8)[1];
|
||||
((u64*)out)[6] = ((u64*)pCache[index].data[way][3].b8._8)[0];
|
||||
((u64*)out)[7] = ((u64*)pCache[index].data[way][3].b8._8)[1];
|
||||
CACHE_LOG("DHWOIN Dirty WriteBack! PPF %x", ppf);
|
||||
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+8) = pCache[index].data[way][0].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+16) = pCache[index].data[way][1].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+24) = pCache[index].data[way][1].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+32) = pCache[index].data[way][2].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+40) = pCache[index].data[way][2].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+48) = pCache[index].data[way][3].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+56) = pCache[index].data[way][3].b8._u64[1];
|
||||
|
||||
pCache[index].tag[way] &= ~DIRTY_FLAG;
|
||||
}
|
||||
|
||||
pCache[index].tag[way] &= ~(0x40);
|
||||
break;
|
||||
}
|
||||
case 0x16:
|
||||
case 0x16: //DXIN (Data Cache Index Invalidate)
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
|
||||
CACHE_LOG("CACHE DXIN addr %x, index %d, way %d, flag %x\n",addr,index,way,pCache[index].tag[way] & 0x78);
|
||||
|
||||
pCache[index].tag[way] &= ~(0x6F);
|
||||
pCache[index].tag[way] &= LRF_FLAG;
|
||||
|
||||
((u64*)pCache[index].data[way][0].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][0].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[1] = 0;
|
||||
break;
|
||||
pCache[index].data[way][0].b8._u64[0] = 0;
|
||||
pCache[index].data[way][0].b8._u64[1] = 0;
|
||||
pCache[index].data[way][1].b8._u64[0] = 0;
|
||||
pCache[index].data[way][1].b8._u64[1] = 0;
|
||||
pCache[index].data[way][2].b8._u64[0] = 0;
|
||||
pCache[index].data[way][2].b8._u64[1] = 0;
|
||||
pCache[index].data[way][3].b8._u64[0] = 0;
|
||||
pCache[index].data[way][3].b8._u64[1] = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
case 0x11:
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
u8 * out = pCache[index].data[way][(addr>>4) & 0x3].b8._8;
|
||||
cpuRegs.CP0.r[28] = *(u32 *)(out+(addr&0xf));
|
||||
|
||||
CACHE_LOG("CACHE DXLDT addr %x, index %d, way %d, DATA %x",addr,index,way,cpuRegs.CP0.r[28]);
|
||||
|
||||
break;
|
||||
}
|
||||
case 0x10:
|
||||
case 0x11: //DXLDT (Data Cache Load Data into TagLo)
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
|
||||
cpuRegs.CP0.r[28] = 0;
|
||||
cpuRegs.CP0.r[28] = pCache[index].tag[way];
|
||||
cpuRegs.CP0.n.TagLo = pCache[index].data[way][(addr>>4) & 0x3].b8._u32[(addr&0xf)>>2];
|
||||
|
||||
CACHE_LOG("CACHE DXLTG addr %x, index %d, way %d, DATA %x",addr,index,way,cpuRegs.CP0.r[28]);
|
||||
CACHE_LOG("CACHE DXLDT addr %x, index %d, way %d, DATA %x OP %x",addr,index,way,cpuRegs.CP0.r[28], cpuRegs.code);
|
||||
|
||||
break;
|
||||
}
|
||||
case 0x13:
|
||||
case 0x10: //DXLTG (Data Cache Load Tag into TagLo)
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
//u8 * out = pCache[index].data[way][(addr>>4) & 0x3].b8._8;
|
||||
*(u32*)(&pCache[index].data[way][(addr>>4) & 0x3].b8._8[(addr&0xf)]) = cpuRegs.CP0.r[28];
|
||||
|
||||
cpuRegs.CP0.n.TagLo = pCache[index].tag[way];
|
||||
|
||||
CACHE_LOG("CACHE DXSDT addr %x, index %d, way %d, DATA %x",addr,index,way,cpuRegs.CP0.r[28]);
|
||||
CACHE_LOG("CACHE DXLTG addr %x, index %d, way %d, DATA %x OP %x ",addr,index,way,cpuRegs.CP0.r[28], cpuRegs.code);
|
||||
|
||||
break;
|
||||
}
|
||||
case 0x12:
|
||||
case 0x13: //DXSDT (Data Cache Store 32bits from TagLo)
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
pCache[index].tag[way] = cpuRegs.CP0.r[28];
|
||||
|
||||
CACHE_LOG("CACHE DXSTG addr %x, index %d, way %d, DATA %x",addr,index,way,cpuRegs.CP0.r[28] & 0x6F);
|
||||
pCache[index].data[way][(addr>>4) & 0x3].b8._u32[(addr&0xf)>>2] = cpuRegs.CP0.n.TagLo;
|
||||
|
||||
CACHE_LOG("CACHE DXSDT addr %x, index %d, way %d, DATA %x OP %x",addr,index,way,cpuRegs.CP0.r[28], cpuRegs.code);
|
||||
|
||||
break;
|
||||
}
|
||||
case 0x14:
|
||||
case 0x12: //DXSTG (Data Cache Store Tag from TagLo)
|
||||
{
|
||||
|
||||
u8 * out;
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
pCache[index].tag[way] = cpuRegs.CP0.n.TagLo;
|
||||
|
||||
CACHE_LOG("CACHE DXSTG addr %x, index %d, way %d, DATA %x OP %x",addr,index,way,cpuRegs.CP0.r[28] & 0x6F, cpuRegs.code);
|
||||
|
||||
CACHE_LOG("CACHE DXWBIN addr %x, index %d, way %d, Flags %x",addr,index,way,pCache[index].tag[way] & 0x78);
|
||||
break;
|
||||
}
|
||||
case 0x14: //DXWBIN (Data Cache Index WriteBack Invalidate)
|
||||
{
|
||||
int index = (addr >> 6) & 0x3F;
|
||||
int way = addr & 0x1;
|
||||
u32 pfnaddr = (pCache[index].tag[way] & ~0xf0000fff) + (addr & 0xFC0);
|
||||
u32 vmv=vtlbdata.vmap[pfnaddr >>VTLB_PAGE_BITS];
|
||||
s32 ppf=pfnaddr+vmv;
|
||||
u32 hand=(u8)vmv;
|
||||
u32 paddr=ppf-hand+0x80000000;
|
||||
|
||||
CACHE_LOG("CACHE DXWBIN addr %x, index %d, way %d, Flags %x Paddr %x tag %x",addr,index,way,pCache[index].tag[way] & 0x78, paddr, pCache[index].tag[way]);
|
||||
if((pCache[index].tag[way] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
|
||||
{
|
||||
ppf = (ppf & 0x0fffffff) + 0x20000000;
|
||||
CACHE_LOG("DXWBIN Dirty WriteBack! PPF %x", ppf);
|
||||
|
||||
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+8) = pCache[index].data[way][0].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+16) = pCache[index].data[way][1].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+24) = pCache[index].data[way][1].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+32) = pCache[index].data[way][2].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+40) = pCache[index].data[way][2].b8._u64[1];
|
||||
*reinterpret_cast<mem64_t*>(ppf+48) = pCache[index].data[way][3].b8._u64[0];
|
||||
*reinterpret_cast<mem64_t*>(ppf+56) = pCache[index].data[way][3].b8._u64[1];
|
||||
|
||||
if(pCache[index].tag[way] & 0x60) // Dirty
|
||||
{
|
||||
u32 paddr = getMemW(pCache[index].tag[way]);
|
||||
char * t = (char *)(paddr);
|
||||
out = (u8*)(t + (addr & 0xFC0));
|
||||
((u64*)out)[0] = ((u64*)pCache[index].data[way][0].b8._8)[0];
|
||||
((u64*)out)[1] = ((u64*)pCache[index].data[way][0].b8._8)[1];
|
||||
((u64*)out)[2] = ((u64*)pCache[index].data[way][1].b8._8)[0];
|
||||
((u64*)out)[3] = ((u64*)pCache[index].data[way][1].b8._8)[1];
|
||||
((u64*)out)[4] = ((u64*)pCache[index].data[way][2].b8._8)[0];
|
||||
((u64*)out)[5] = ((u64*)pCache[index].data[way][2].b8._8)[1];
|
||||
((u64*)out)[6] = ((u64*)pCache[index].data[way][3].b8._8)[0];
|
||||
((u64*)out)[7] = ((u64*)pCache[index].data[way][3].b8._8)[1];
|
||||
}
|
||||
|
||||
pCache[index].tag[way] &= ~(0x6F);
|
||||
((u64*)pCache[index].data[way][0].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][0].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][1].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][2].b8._8)[1] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[0] = 0;
|
||||
((u64*)pCache[index].data[way][3].b8._8)[1] = 0;
|
||||
pCache[index].tag[way] &= LRF_FLAG;
|
||||
|
||||
pCache[index].data[way][0].b8._u64[0] = 0;
|
||||
pCache[index].data[way][0].b8._u64[1] = 0;
|
||||
pCache[index].data[way][1].b8._u64[0] = 0;
|
||||
pCache[index].data[way][1].b8._u64[1] = 0;
|
||||
pCache[index].data[way][2].b8._u64[0] = 0;
|
||||
pCache[index].data[way][2].b8._u64[1] = 0;
|
||||
pCache[index].data[way][3].b8._u64[0] = 0;
|
||||
pCache[index].data[way][3].b8._u64[1] = 0;
|
||||
break;
|
||||
}
|
||||
case 0x7: //IXIN (Instruction Cache Index Invalidate)
|
||||
{
|
||||
//Not Implemented as we do not have instruction cache
|
||||
break;
|
||||
}
|
||||
case 0xC: //BFH (BTAC Flush)
|
||||
{
|
||||
//Not Implemented as we do not cache Branch Target Addresses.
|
||||
break;
|
||||
}
|
||||
default:
|
||||
CACHE_LOG("Cache mode %x not impemented", _Rt_);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} // end namespace OpcodeImpl
|
||||
#else
|
||||
|
||||
namespace OpcodeImpl
|
||||
{
|
||||
|
||||
void CACHE() {
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
}}
|
|
@ -18,9 +18,13 @@
|
|||
|
||||
#include "Common.h"
|
||||
|
||||
struct _u8bit_128 {
|
||||
u8 _8[16];
|
||||
|
||||
union _u8bit_128
|
||||
{
|
||||
u8 _u8[16];
|
||||
u16 _u16[8];
|
||||
u32 _u32[4];
|
||||
u64 _u64[2];
|
||||
};
|
||||
|
||||
struct u8bit_128 {
|
||||
|
@ -38,19 +42,11 @@ extern _cacheS pCache[64];
|
|||
void writeCache8(u32 mem, u8 value);
|
||||
void writeCache16(u32 mem, u16 value);
|
||||
void writeCache32(u32 mem, u32 value);
|
||||
void writeCache64(u32 mem, u64 value);
|
||||
void writeCache128(u32 mem, u64 *value);
|
||||
u8 *readCache(u32 mem);
|
||||
|
||||
// Fixme - these two functions do nothing, and the cache code relies on these two functions.
|
||||
static __fi u32 getMemR(s32 mem)
|
||||
{
|
||||
return 0;//memLUTR[mem >> 12];
|
||||
}
|
||||
|
||||
static __fi u32 getMemW(s32 mem)
|
||||
{
|
||||
return 0;//memLUTW[mem>>12];
|
||||
}
|
||||
void writeCache64(u32 mem, const u64 value);
|
||||
void writeCache128(u32 mem, const mem128_t* value);
|
||||
u8 readCache8(u32 mem);
|
||||
u16 readCache16(u32 mem);
|
||||
u32 readCache32(u32 mem);
|
||||
u64 readCache64(u32 mem);
|
||||
|
||||
#endif /* __CACHE_H__ */
|
||||
|
|
|
@ -223,6 +223,8 @@ struct Pcsx2Config
|
|||
StackFrameChecks:1,
|
||||
PreBlockCheckEE :1,
|
||||
PreBlockCheckIOP:1;
|
||||
bool
|
||||
EnableEECache :1;
|
||||
BITFIELD_END
|
||||
|
||||
RecompilerOptions();
|
||||
|
@ -467,6 +469,7 @@ TraceLogFilters& SetTraceConfig();
|
|||
#define CHECK_MICROVU0 (EmuConfig.Cpu.Recompiler.UseMicroVU0)
|
||||
#define CHECK_MICROVU1 (EmuConfig.Cpu.Recompiler.UseMicroVU1)
|
||||
#define CHECK_EEREC (EmuConfig.Cpu.Recompiler.EnableEE && GetCpuProviders().IsRecAvailable_EE())
|
||||
#define CHECK_CACHE (EmuConfig.Cpu.Recompiler.EnableEECache)
|
||||
#define CHECK_IOPREC (EmuConfig.Cpu.Recompiler.EnableIOP && GetCpuProviders().IsRecAvailable_IOP())
|
||||
|
||||
//------------ SPECIAL GAME FIXES!!! ---------------
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include <signal.h>
|
||||
#endif
|
||||
|
||||
//#define ENABLECACHE
|
||||
#include "vtlb.h"
|
||||
|
||||
#include <xmmintrin.h>
|
||||
|
|
|
@ -88,6 +88,7 @@ Pcsx2Config::RecompilerOptions::RecompilerOptions()
|
|||
// All recs are enabled by default.
|
||||
|
||||
EnableEE = true;
|
||||
EnableEECache = false;
|
||||
EnableIOP = true;
|
||||
EnableVU0 = true;
|
||||
EnableVU1 = true;
|
||||
|
@ -145,6 +146,7 @@ void Pcsx2Config::RecompilerOptions::LoadSave( IniInterface& ini )
|
|||
|
||||
IniBitBool( EnableEE );
|
||||
IniBitBool( EnableIOP );
|
||||
IniBitBool( EnableEECache );
|
||||
IniBitBool( EnableVU0 );
|
||||
IniBitBool( EnableVU1 );
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static __fi bool ProcessEETag()
|
|||
case TAG_CNT: break;
|
||||
|
||||
case TAG_CNTS:
|
||||
if (dmacRegs.ctrl.STS != NO_STS)
|
||||
if (dmacRegs.ctrl.STS == STS_SIF0)
|
||||
dmacRegs.stadr.ADDR = sif0dma.madr + (sif0dma.qwc * 16);
|
||||
break;
|
||||
|
||||
|
|
|
@ -126,6 +126,7 @@ static __fi bool ProcessEETag()
|
|||
|
||||
case TAG_REF:
|
||||
case TAG_REFS:
|
||||
if(ptag->ID == TAG_REFS && dmacRegs.ctrl.STD == STD_SIF1) DevCon.Warning("SIF1 Drain Stall Control not implemented");
|
||||
sif1dma.madr = ptag[1]._u32;
|
||||
sif1dma.tadr += 16;
|
||||
break;
|
||||
|
|
|
@ -175,6 +175,7 @@ namespace Panels
|
|||
protected:
|
||||
pxRadioPanel* m_panel_RecEE;
|
||||
pxRadioPanel* m_panel_RecIOP;
|
||||
pxCheckBox* m_check_EECacheEnable;
|
||||
AdvancedOptionsFPU* m_advancedOptsFpu;
|
||||
|
||||
public:
|
||||
|
|
|
@ -151,15 +151,18 @@ Panels::CpuPanelEE::CpuPanelEE( wxWindow* parent )
|
|||
wxStaticBoxSizer& s_iop ( *new wxStaticBoxSizer( wxVERTICAL, this, L"IOP" ) );
|
||||
|
||||
s_ee += m_panel_RecEE | StdExpand();
|
||||
s_ee += m_check_EECacheEnable = new pxCheckBox( this, _("Enable EE Cache - Interpreter Only! (Slower)") );
|
||||
s_iop += m_panel_RecIOP | StdExpand();
|
||||
|
||||
s_recs += s_ee | SubGroup();
|
||||
s_recs += s_iop | SubGroup();
|
||||
|
||||
|
||||
*this += &s_recs | StdExpand();
|
||||
|
||||
// move following line down so EE and VU panels look more uniform.
|
||||
// Use an empty Label (std expanded) so it work with custom font sizes, too.
|
||||
*this += Label(_("")) | StdExpand();
|
||||
//*this += Label(_("")) | StdExpand();
|
||||
*this += new wxStaticLine( this ) | pxExpand.Border(wxALL, 18);
|
||||
*this += (m_advancedOptsFpu = new AdvancedOptionsFPU( this )) | StdExpand();
|
||||
|
||||
|
@ -225,8 +228,9 @@ Panels::CpuPanelVU::CpuPanelVU( wxWindow* parent )
|
|||
void Panels::CpuPanelEE::Apply()
|
||||
{
|
||||
Pcsx2Config::RecompilerOptions& recOps( g_Conf->EmuOptions.Cpu.Recompiler );
|
||||
recOps.EnableEE = !!m_panel_RecEE->GetSelection();
|
||||
recOps.EnableIOP = !!m_panel_RecIOP->GetSelection();
|
||||
recOps.EnableEE = !!m_panel_RecEE->GetSelection();
|
||||
recOps.EnableIOP = !!m_panel_RecIOP->GetSelection();
|
||||
recOps.EnableEECache = m_check_EECacheEnable ->GetValue();
|
||||
}
|
||||
|
||||
void Panels::CpuPanelEE::AppStatusEvent_OnSettingsApplied()
|
||||
|
@ -247,6 +251,8 @@ void Panels::CpuPanelEE::ApplyConfigToGui( AppConfig& configToApply, int flags )
|
|||
m_panel_RecEE->Enable(!configToApply.EnablePresets);
|
||||
m_panel_RecIOP->Enable(!configToApply.EnablePresets);
|
||||
|
||||
m_check_EECacheEnable ->SetValue(recOps.EnableEECache);
|
||||
|
||||
this->Enable(!configToApply.EnablePresets);
|
||||
|
||||
if( flags & AppConfig::APPLY_FLAG_MANUALLY_PROPAGATE )
|
||||
|
|
126
pcsx2/vtlb.cpp
126
pcsx2/vtlb.cpp
|
@ -33,6 +33,7 @@
|
|||
#include "Common.h"
|
||||
#include "vtlb.h"
|
||||
#include "COP0.h"
|
||||
#include "Cache.h"
|
||||
#include "R5900Exceptions.h"
|
||||
|
||||
#include "Utilities/MemsetFast.inl"
|
||||
|
@ -55,7 +56,29 @@ static vtlbHandler UnmappedVirtHandler1;
|
|||
static vtlbHandler UnmappedPhyHandler0;
|
||||
static vtlbHandler UnmappedPhyHandler1;
|
||||
|
||||
__inline int CheckCache(u32 addr)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
if(((cpuRegs.CP0.n.Config >> 16) & 0x1) == 0)
|
||||
{
|
||||
//DevCon.Warning("Data Cache Disabled! %x", cpuRegs.CP0.n.Config);
|
||||
return false;//
|
||||
}
|
||||
|
||||
for(int i = 1; i < 48; i++)
|
||||
{
|
||||
if (((tlb[i].EntryLo1 & 0x38) >> 3) == 0x3 || ((tlb[i].EntryLo0 & 0x38) >> 3) == 0x3) {
|
||||
mask = tlb[i].PageMask;
|
||||
|
||||
if ((addr >= tlb[i].PFN0) && (addr <= tlb[i].PFN0 + mask)) {
|
||||
//DevCon.Warning("Yay! Cache check cache addr=%x, mask=%x, addr+mask=%x, VPN2=%x", addr, mask, (addr & mask), tlb[i].VPN2);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// --------------------------------------------------------------------------------------
|
||||
// Interpreter Implementations of VTLB Memory Operations.
|
||||
// --------------------------------------------------------------------------------------
|
||||
|
@ -69,7 +92,30 @@ DataType __fastcall vtlb_memRead(u32 addr)
|
|||
s32 ppf=addr+vmv;
|
||||
|
||||
if (!(ppf<0))
|
||||
{
|
||||
if (!CHECK_EEREC)
|
||||
{
|
||||
if(CHECK_CACHE && CheckCache(addr))
|
||||
{
|
||||
switch( DataSize )
|
||||
{
|
||||
case 8:
|
||||
return readCache8(addr);
|
||||
break;
|
||||
case 16:
|
||||
return readCache16(addr);
|
||||
break;
|
||||
case 32:
|
||||
return readCache32(addr);
|
||||
break;
|
||||
|
||||
jNO_DEFAULT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return *reinterpret_cast<DataType*>(ppf);
|
||||
}
|
||||
|
||||
//has to: translate, find function, call function
|
||||
u32 hand=(u8)vmv;
|
||||
|
@ -79,9 +125,15 @@ DataType __fastcall vtlb_memRead(u32 addr)
|
|||
|
||||
switch( DataSize )
|
||||
{
|
||||
case 8: return ((vtlbMemR8FP*)vtlbdata.RWFT[0][0][hand])(paddr);
|
||||
case 16: return ((vtlbMemR16FP*)vtlbdata.RWFT[1][0][hand])(paddr);
|
||||
case 32: return ((vtlbMemR32FP*)vtlbdata.RWFT[2][0][hand])(paddr);
|
||||
case 8:
|
||||
|
||||
return ((vtlbMemR8FP*)vtlbdata.RWFT[0][0][hand])(paddr);
|
||||
case 16:
|
||||
|
||||
return ((vtlbMemR16FP*)vtlbdata.RWFT[1][0][hand])(paddr);
|
||||
case 32:
|
||||
|
||||
return ((vtlbMemR32FP*)vtlbdata.RWFT[2][0][hand])(paddr);
|
||||
|
||||
jNO_DEFAULT;
|
||||
}
|
||||
|
@ -96,6 +148,14 @@ void __fastcall vtlb_memRead64(u32 mem, mem64_t *out)
|
|||
|
||||
if (!(ppf<0))
|
||||
{
|
||||
if (!CHECK_EEREC) {
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
*out = readCache64(mem);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
*out = *(mem64_t*)ppf;
|
||||
}
|
||||
else
|
||||
|
@ -115,6 +175,16 @@ void __fastcall vtlb_memRead128(u32 mem, mem128_t *out)
|
|||
|
||||
if (!(ppf<0))
|
||||
{
|
||||
if (!CHECK_EEREC)
|
||||
{
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
out->lo = readCache64(mem);
|
||||
out->hi = readCache64(mem+8);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CopyQWC(out,(void*)ppf);
|
||||
}
|
||||
else
|
||||
|
@ -136,7 +206,26 @@ void __fastcall vtlb_memWrite(u32 addr, DataType data)
|
|||
u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
|
||||
s32 ppf=addr+vmv;
|
||||
if (!(ppf<0))
|
||||
{
|
||||
{
|
||||
if (!CHECK_EEREC)
|
||||
{
|
||||
if(CHECK_CACHE && CheckCache(addr))
|
||||
{
|
||||
switch( DataSize )
|
||||
{
|
||||
case 8:
|
||||
writeCache8(addr, data);
|
||||
return;
|
||||
case 16:
|
||||
writeCache16(addr, data);
|
||||
return;
|
||||
case 32:
|
||||
writeCache32(addr, data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*reinterpret_cast<DataType*>(ppf)=data;
|
||||
}
|
||||
else
|
||||
|
@ -148,9 +237,12 @@ void __fastcall vtlb_memWrite(u32 addr, DataType data)
|
|||
|
||||
switch( DataSize )
|
||||
{
|
||||
case 8: return ((vtlbMemW8FP*)vtlbdata.RWFT[0][1][hand])(paddr, (u8)data);
|
||||
case 16: return ((vtlbMemW16FP*)vtlbdata.RWFT[1][1][hand])(paddr, (u16)data);
|
||||
case 32: return ((vtlbMemW32FP*)vtlbdata.RWFT[2][1][hand])(paddr, (u32)data);
|
||||
case 8:
|
||||
return ((vtlbMemW8FP*)vtlbdata.RWFT[0][1][hand])(paddr, (u8)data);
|
||||
case 16:
|
||||
return ((vtlbMemW16FP*)vtlbdata.RWFT[1][1][hand])(paddr, (u16)data);
|
||||
case 32:
|
||||
return ((vtlbMemW32FP*)vtlbdata.RWFT[2][1][hand])(paddr, (u32)data);
|
||||
|
||||
jNO_DEFAULT;
|
||||
}
|
||||
|
@ -162,7 +254,16 @@ void __fastcall vtlb_memWrite64(u32 mem, const mem64_t* value)
|
|||
u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
|
||||
s32 ppf=mem+vmv;
|
||||
if (!(ppf<0))
|
||||
{
|
||||
{
|
||||
if (!CHECK_EEREC)
|
||||
{
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
writeCache64(mem, *value);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
*(mem64_t*)ppf = *value;
|
||||
}
|
||||
else
|
||||
|
@ -182,6 +283,15 @@ void __fastcall vtlb_memWrite128(u32 mem, const mem128_t *value)
|
|||
s32 ppf=mem+vmv;
|
||||
if (!(ppf<0))
|
||||
{
|
||||
if (!CHECK_EEREC)
|
||||
{
|
||||
if(CHECK_CACHE && CheckCache(mem))
|
||||
{
|
||||
writeCache128(mem, value);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
CopyQWC((void*)ppf, value);
|
||||
}
|
||||
else
|
||||
|
|
|
@ -197,7 +197,7 @@ void recMTSAH( void )
|
|||
**********************************************************/
|
||||
|
||||
// Suikoden 3 uses it a lot
|
||||
void recCACHE()
|
||||
void recCACHE() //Interpreter only!
|
||||
{
|
||||
//MOV32ItoM( (uptr)&cpuRegs.code, (u32)cpuRegs.code );
|
||||
//MOV32ItoM( (uptr)&cpuRegs.pc, (u32)pc );
|
||||
|
|
Loading…
Reference in New Issue