ee: update cache emulation for 64 bits

This commit is contained in:
TellowKrinkle 2020-10-17 04:31:26 -05:00 committed by refractionpcsx2
parent 82ea30cf3f
commit 6ad8563c16
3 changed files with 278 additions and 374 deletions

View File

@ -22,7 +22,21 @@
using namespace R5900;
using namespace vtlb_private;
_cacheS pCache[64];
namespace
{
union alignas(64) CacheData
{
u8 bytes[64];
constexpr CacheData(): bytes{0} {}
};
struct CacheTag
{
uptr rawValue = 0;
CacheTag() = default;
// The lower parts of a cache tags structure is as follows:
// 31 - 12: The physical address cache tag.
@ -33,242 +47,268 @@ _cacheS pCache[64];
// 3: Lock flag.
// 2-0: Unused.
// 0xFFF - 12 bits, so x & ~0xFFF = the physical address cache tag.
const u32 DIRTY_FLAG = 0x40;
const u32 VALID_FLAG = 0x20;
const u32 LRF_FLAG = 0x10;
const u32 LOCK_FLAG = 0x8;
// TODO: You can't assume the vtlb entry is in both states at once!
int getFreeCache(u32 mem, int mode, int * way)
enum Flags : decltype(rawValue)
{
int number = 0;
const int i = (mem >> 6) & 0x3F;
const auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
DIRTY_FLAG = 0x40,
VALID_FLAG = 0x20,
LRF_FLAG = 0x10,
LOCK_FLAG = 0x8,
ALL_FLAGS = 0xFFF
};
int flags() const
{
return rawValue & ALL_FLAGS;
}
bool isValid() const { return rawValue & VALID_FLAG; }
bool isDirty() const { return rawValue & DIRTY_FLAG; }
bool lrf() const { return rawValue & LRF_FLAG; }
bool isLocked() const { return rawValue & LOCK_FLAG; }
bool isDirtyAndValid() const
{
return (rawValue & (DIRTY_FLAG | VALID_FLAG)) == (DIRTY_FLAG | VALID_FLAG);
}
void setValid() { rawValue |= VALID_FLAG; }
void setDirty() { rawValue |= DIRTY_FLAG; }
void setLocked() { rawValue |= LOCK_FLAG; }
void clearValid() { rawValue &= ~VALID_FLAG; }
void clearDirty() { rawValue &= ~DIRTY_FLAG; }
void clearLocked() { rawValue &= ~LOCK_FLAG; }
void toggleLRF() { rawValue ^= LRF_FLAG; }
uptr addr() const { return rawValue & ~ALL_FLAGS; }
void setAddr(uptr addr)
{
rawValue &= ALL_FLAGS;
rawValue |= (addr & ~ALL_FLAGS);
}
bool matches(uptr other) const
{
return isValid() && addr() == (other & ~ALL_FLAGS);
}
void clear()
{
rawValue &= LRF_FLAG;
}
};
struct CacheLine
{
CacheTag& tag;
CacheData& data;
int set;
uptr addr()
{
return tag.addr() | (set << 6);
}
void writeBackIfNeeded()
{
if (!tag.isDirtyAndValid())
return;
uptr target = addr();
CACHE_LOG("Write back at %zx", target);
*reinterpret_cast<CacheData*>(target) = data;
tag.clearDirty();
}
void load(uptr ppf)
{
pxAssertMsg(!tag.isDirtyAndValid(), "Loaded a value into cache without writing back the old one!");
tag.setAddr(ppf);
data = *reinterpret_cast<CacheData*>(ppf & ~0x3FULL);
tag.setValid();
tag.clearDirty();
}
void clear()
{
tag.clear();
data = CacheData();
}
};
struct CacheSet
{
CacheTag tags[2];
CacheData data[2];
};
struct Cache
{
CacheSet sets[64];
int setIdxFor(u32 vaddr) const
{
return (vaddr >> 6) & 0x3F;
}
CacheLine lineAt(int idx, int way)
{
return { sets[idx].tags[way], sets[idx].data[way], idx };
}
};
static Cache cache;
}
void resetCache()
{
memzero(cache);
}
static bool findInCache(const CacheSet& set, uptr ppf, int* way)
{
auto check = [&](int checkWay) -> bool
{
if (!set.tags[checkWay].matches(ppf))
return false;
*way = checkWay;
return true;
};
return check(0) || check(1);
}
static int getFreeCache(u32 mem, int* way)
{
const int setIdx = cache.setIdxFor(mem);
CacheSet& set = cache.sets[setIdx];
VTLBVirtual vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
pxAssertMsg(!vmv.isHandler(mem), "Cache currently only supports non-handler addresses!");
uptr ppf = vmv.assumePtr(mem);
const u32 paddr = vmv.assumeHandlerGetPAddr(mem);
if((cpuRegs.CP0.n.Config & 0x10000) == 0) CACHE_LOG("Cache off!");
if((cpuRegs.CP0.n.Config & 0x10000) == 0)
CACHE_LOG("Cache off!");
if ((pCache[i].tag[0] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[i].tag[0] & VALID_FLAG))
if (findInCache(set, ppf, way))
{
*way = 0;
if (pCache[i].tag[0] & LOCK_FLAG) CACHE_LOG("Index %x Way %x Locked!!", i, 0);
return i;
if (set.tags[*way].isLocked())
CACHE_LOG("Index %x Way %x Locked!!", setIdx, *way);
}
else if((pCache[i].tag[1] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[i].tag[1] & VALID_FLAG))
{
*way = 1;
if (pCache[i].tag[1] & LOCK_FLAG) CACHE_LOG("Index %x Way %x Locked!!", i, 1);
return i;
}
number = (((pCache[i].tag[0]) & LRF_FLAG) ^ ((pCache[i].tag[1]) & LRF_FLAG)) >> 4;
ppf = (ppf & ~0x3F);
if ((pCache[i].tag[number] & (DIRTY_FLAG | VALID_FLAG)) == (DIRTY_FLAG | VALID_FLAG)) // Dirty Write
{
s32 oldppf = (pCache[i].tag[number] & ~0x80000fff) + (mem & 0xFC0);
CACHE_LOG("Dirty cache fill! PPF %x", oldppf);
*reinterpret_cast<mem64_t*>(oldppf) = pCache[i].data[number][0].b8._u64[0];
*reinterpret_cast<mem64_t*>(oldppf+8) = pCache[i].data[number][0].b8._u64[1];
*reinterpret_cast<mem64_t*>(oldppf+16) = pCache[i].data[number][1].b8._u64[0];
*reinterpret_cast<mem64_t*>(oldppf+24) = pCache[i].data[number][1].b8._u64[1];
*reinterpret_cast<mem64_t*>(oldppf+32) = pCache[i].data[number][2].b8._u64[0];
*reinterpret_cast<mem64_t*>(oldppf+40) = pCache[i].data[number][2].b8._u64[1];
*reinterpret_cast<mem64_t*>(oldppf+48) = pCache[i].data[number][3].b8._u64[0];
*reinterpret_cast<mem64_t*>(oldppf+56) = pCache[i].data[number][3].b8._u64[1];
pCache[i].tag[number] &= ~DIRTY_FLAG;
}
pCache[i].data[number][0].b8._u64[0] = *reinterpret_cast<mem64_t*>(ppf);
pCache[i].data[number][0].b8._u64[1] = *reinterpret_cast<mem64_t*>(ppf+8);
pCache[i].data[number][1].b8._u64[0] = *reinterpret_cast<mem64_t*>(ppf+16);
pCache[i].data[number][1].b8._u64[1] = *reinterpret_cast<mem64_t*>(ppf+24);
pCache[i].data[number][2].b8._u64[0] = *reinterpret_cast<mem64_t*>(ppf+32);
pCache[i].data[number][2].b8._u64[1] = *reinterpret_cast<mem64_t*>(ppf+40);
pCache[i].data[number][3].b8._u64[0] = *reinterpret_cast<mem64_t*>(ppf+48);
pCache[i].data[number][3].b8._u64[1] = *reinterpret_cast<mem64_t*>(ppf+56);
*way = number;
pCache[i].tag[number] |= VALID_FLAG;
pCache[i].tag[number] &= 0xFFF;
pCache[i].tag[number] |= paddr & ~0xFFF;
if(pCache[i].tag[number] & LRF_FLAG)
pCache[i].tag[number] &= ~LRF_FLAG;
else
pCache[i].tag[number] |= LRF_FLAG;
{
int newWay = set.tags[0].lrf() ^ set.tags[1].lrf();
*way = newWay;
CacheLine line = cache.lineAt(setIdx, newWay);
return i;
line.writeBackIfNeeded();
line.load(ppf);
line.tag.toggleLRF();
}
return setIdx;
}
template <typename Int>
void writeCache(u32 mem, Int value)
{
int way = 0;
const int idx = getFreeCache(mem, &way);
CACHE_LOG("writeCache%d %8.8x adding to %d, way %d, value %llx", 8 * sizeof(value), mem, idx, way, value);
CacheLine line = cache.lineAt(idx, way);
line.tag.setDirty(); // Set dirty bit for writes;
u32 aligned = mem & ~(sizeof(value) - 1);
*reinterpret_cast<Int*>(&line.data.bytes[aligned & 0x3f]) = value;
}
void writeCache8(u32 mem, u8 value)
{
int number = 0;
const int i = getFreeCache(mem, 1, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
*reinterpret_cast<mem8_t*>(vmv.assumePtr(mem)) = value;
return;
}
CACHE_LOG("writeCache8 %8.8x adding to %d, way %d, value %x", mem, i, number, value);
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
pCache[i].data[number][(mem >> 4) & 0x3].b8._u8[(mem & 0xf)] = value;
writeCache<u8>(mem, value);
}
void writeCache16(u32 mem, u16 value)
{
int number = 0;
const int i = getFreeCache(mem, 1, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
*reinterpret_cast<mem16_t*>(vmv.assumePtr(mem)) = value;
return;
}
CACHE_LOG("writeCache16 %8.8x adding to %d, way %d, value %x", mem, i, number, value);
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
pCache[i].data[number][(mem >> 4) & 0x3].b8._u16[(mem & 0xf) >> 1] = value;
writeCache<u16>(mem, value);
}
void writeCache32(u32 mem, u32 value)
{
int number = 0;
const int i = getFreeCache(mem, 1, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
*reinterpret_cast<mem32_t*>(vmv.assumePtr(mem)) = value;
return;
}
CACHE_LOG("writeCache32 %8.8x adding to %d, way %d, value %x", mem, i, number, value);
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
pCache[i].data[number][(mem >> 4) & 0x3].b8._u32[(mem & 0xf) >> 2] = value;
writeCache<u32>(mem, value);
}
void writeCache64(u32 mem, const u64 value)
{
int number = 0;
const int i = getFreeCache(mem, 1, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
*reinterpret_cast<mem64_t*>(vmv.assumePtr(mem)) = value;
return;
}
CACHE_LOG("writeCache64 %8.8x adding to %d, way %d, value %x", mem, i,number,value);
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
pCache[i].data[number][(mem >> 4) & 0x3].b8._u64[(mem & 0xf) >> 3] = value;
writeCache<u64>(mem, value);
}
void writeCache128(u32 mem, const mem128_t* value)
{
int number = 0;
const int i = getFreeCache(mem, 1, &number);
int way = 0;
const int idx = getFreeCache(mem, &way);
if (i == -1)
CACHE_LOG("writeCache128 %8.8x adding to %d, way %x, lo %x, hi %x", mem, idx, way, value->lo, value->hi);
CacheLine line = cache.lineAt(idx, way);
line.tag.setDirty(); // Set dirty bit for writes;
u32 aligned = mem & ~0xF;
*reinterpret_cast<mem128_t*>(&line.data.bytes[aligned & 0x3f]) = *value;
}
template <typename Int>
Int readCache(u32 mem)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
auto ptr = reinterpret_cast<mem64_t*>(vmv.assumePtr(mem));
ptr[0] = value->lo;
ptr[1] = value->hi;
return;
int way = 0;
const int idx = getFreeCache(mem, &way);
CacheLine line = cache.lineAt(idx, way);
u32 aligned = mem & ~(sizeof(Int) - 1);
Int value = *reinterpret_cast<Int*>(&line.data.bytes[aligned & 0x3f]);
CACHE_LOG("readCache%d %8.8x from %d, way %d, value %llx", 8 * sizeof(value), mem, idx, way, value);
return value;
}
CACHE_LOG("writeCache128 %8.8x adding to %d way %x tag %x vallo = %x_%x valhi = %x_%x", mem, i, number, pCache[i].tag[number], value->lo, value->hi);
pCache[i].tag[number] |= DIRTY_FLAG; // Set Dirty Bit if mode == write
pCache[i].data[number][(mem>>4) & 0x3].b8._u64[0] = value->lo;
pCache[i].data[number][(mem>>4) & 0x3].b8._u64[1] = value->hi;
}
u8 readCache8(u32 mem)
{
int number = 0;
const int i = getFreeCache(mem, 0, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
return *reinterpret_cast<u8*>(vmv.assumePtr(mem));
}
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u8 part %x Really Reading %x", mem, i, number, (mem >> 4) & 0x3, (mem & 0xf) >> 2, (u32)pCache[i].data[number][(mem >> 4) & 0x3].b8._u8[(mem & 0xf)]);
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u8[(mem & 0xf)];
return readCache<u8>(mem);
}
u16 readCache16(u32 mem)
{
int number = 0;
const int i = getFreeCache(mem, 0, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
return *reinterpret_cast<u16*>(vmv.assumePtr(mem));
}
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u16 part %x Really Reading %x", mem, i, number, (mem >> 4) & 0x3, (mem & 0xf) >> 2, (u32)pCache[i].data[number][(mem >> 4) & 0x3].b8._u16[(mem & 0xf) >> 1]);
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u16[(mem & 0xf) >> 1];
return readCache<u16>(mem);
}
u32 readCache32(u32 mem)
{
int number = 0;
const int i = getFreeCache(mem, 0, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
return *reinterpret_cast<u32*>(vmv.assumePtr(mem));
}
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u32 part %x Really Reading %x", mem, i, number, (mem >> 4) & 0x3, (mem & 0xf) >> 2, (u32)pCache[i].data[number][(mem >> 4) & 0x3].b8._u32[(mem & 0xf) >> 2]);
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u32[(mem & 0xf) >> 2];
return readCache<u32>(mem);
}
u64 readCache64(u32 mem)
{
int number = 0;
int i = getFreeCache(mem, 0, &number);
if (i == -1)
{
auto vmv = vtlbdata.vmap[mem >> VTLB_PAGE_BITS];
return *reinterpret_cast<u64*>(vmv.assumePtr(mem));
return readCache<u64>(mem);
}
CACHE_LOG("readCache %8.8x from %d, way %d QW %x u64 part %x Really Reading %x_%x", mem, i, number, (mem >> 4) & 0x3, (mem & 0xf) >> 2, pCache[i].data[number][(mem >> 4) & 0x3].b8._u64[(mem & 0xf) >> 3]);
return pCache[i].data[number][(mem >> 4) & 0x3].b8._u64[(mem & 0xf) >> 3];
template <typename Op>
void doCacheHitOp(u32 addr, const char* name, Op op)
{
const int index = cache.setIdxFor(addr);
CacheSet& set = cache.sets[index];
VTLBVirtual vmv = vtlbdata.vmap[addr >> VTLB_PAGE_BITS];
uptr ppf = vmv.assumePtr(addr);
int way;
if (!findInCache(set, ppf, &way))
{
CACHE_LOG("CACHE %s NO HIT addr %x, index %d, tag0 %zx tag1 %zx", name, addr, index, set.tags[0].rawValue, set.tags[1].rawValue);
return;
}
__forceinline void clear_cache(int index, int way)
{
pCache[index].tag[way] &= LRF_FLAG;
CACHE_LOG("CACHE %s addr %x, index %d, way %d, flags %x OP %x", name, addr, index, way, set.tags[way].flags(), cpuRegs.code);
pCache[index].data[way][0].b8._u64[0] = 0;
pCache[index].data[way][0].b8._u64[1] = 0;
pCache[index].data[way][1].b8._u64[0] = 0;
pCache[index].data[way][1].b8._u64[1] = 0;
pCache[index].data[way][2].b8._u64[0] = 0;
pCache[index].data[way][2].b8._u64[1] = 0;
pCache[index].data[way][3].b8._u64[0] = 0;
pCache[index].data[way][3].b8._u64[1] = 0;
op(cache.lineAt(index, way));
}
namespace R5900 {
@ -286,137 +326,48 @@ void CACHE()
switch (_Rt_)
{
case 0x1a: //DHIN (Data Cache Hit Invalidate)
doCacheHitOp(addr, "DHIN", [](CacheLine line)
{
const int index = (addr >> 6) & 0x3F;
int way = 0;
const u32 pfnaddr = addr;
const auto vmv = vtlbdata.vmap[pfnaddr >> VTLB_PAGE_BITS];
const u32 paddr = vmv.assumeHandlerGetPAddr(pfnaddr);
if ((paddr & ~0xFFF) == (pCache[index].tag[0] & ~0xfff) && (pCache[index].tag[0] & VALID_FLAG))
{
way = 0;
}
else if ((paddr & ~0xFFF) == (pCache[index].tag[1] & ~0xfff) && (pCache[index].tag[1] & VALID_FLAG))
{
way = 1;
}
else
{
CACHE_LOG("CACHE DHIN NO HIT addr %x, index %d, phys %x tag0 %x tag1 %x", addr, index, paddr, pCache[index].tag[0], pCache[index].tag[1]);
return;
}
CACHE_LOG("CACHE DHIN addr %x, index %d, way %d, Flags %x OP %x", addr, index, way, pCache[index].tag[way] & 0x78, cpuRegs.code);
clear_cache(index, way);
line.clear();
});
break;
}
case 0x18: //DHWBIN (Data Cache Hit WriteBack with Invalidate)
doCacheHitOp(addr, "DHWBIN", [](CacheLine line)
{
const int index = (addr >> 6) & 0x3F;
int way = 0;
const u32 pfnaddr = addr;
const auto vmv = vtlbdata.vmap[pfnaddr >> VTLB_PAGE_BITS];
uptr ppf = vmv.assumePtr(pfnaddr) & ~0x3F;
const u32 paddr = vmv.assumeHandlerGetPAddr(pfnaddr) & ~0x3f;
if ((pCache[index].tag[0] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[0] & VALID_FLAG))
{
way = 0;
}
else if ((pCache[index].tag[1] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[1] & VALID_FLAG))
{
way = 1;
}
else
{
CACHE_LOG("CACHE DHWBIN NO HIT addr %x, index %d, phys %x tag0 %x tag1 %x", addr, index, paddr, pCache[index].tag[0], pCache[index].tag[1]);
return;
}
CACHE_LOG("CACHE DHWBIN addr %x, index %d, phys %x tag0 %x tag1 %x way %x", addr, index, paddr, pCache[index].tag[0], pCache[index].tag[1], way );
if ((pCache[index].tag[way] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
{
CACHE_LOG("DHWBIN Dirty WriteBack PPF %x", ppf);
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+8) = pCache[index].data[way][0].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+16) = pCache[index].data[way][1].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+24) = pCache[index].data[way][1].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+32) = pCache[index].data[way][2].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+40) = pCache[index].data[way][2].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+48) = pCache[index].data[way][3].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+56) = pCache[index].data[way][3].b8._u64[1];
}
clear_cache(index, way);
line.writeBackIfNeeded();
line.clear();
});
break;
}
case 0x1c: //DHWOIN (Data Cache Hit WriteBack Without Invalidate)
doCacheHitOp(addr, "DHWOIN", [](CacheLine line)
{
const int index = (addr >> 6) & 0x3F;
int way = 0;
const u32 pfnaddr = (pCache[index].tag[way] & ~0x80000fff) | (addr & 0xfc0);
const auto vmv = vtlbdata.vmap[pfnaddr >> VTLB_PAGE_BITS];
uptr ppf = vmv.assumePtr(pfnaddr);
const u32 paddr = vmv.assumeHandlerGetPAddr(pfnaddr);
CACHE_LOG("CACHE DHWOIN addr %x, index %d, way %d, Flags %x OP %x", addr, index, way, pCache[index].tag[way] & 0x78, cpuRegs.code);
if ((pCache[index].tag[0] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[0] & VALID_FLAG))
{
way = 0;
}
else if ((pCache[index].tag[1] & ~0xFFF) == (paddr & ~0xFFF) && (pCache[index].tag[1] & VALID_FLAG))
{
way = 1;
}
else
{
CACHE_LOG("CACHE DHWOIN NO HIT addr %x, index %d, phys %x tag0 %x tag1 %x", addr, index, paddr, pCache[index].tag[0], pCache[index].tag[1]);
return;
}
if ((pCache[index].tag[way] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
{
CACHE_LOG("DHWOIN Dirty WriteBack! PPF %x", ppf);
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+8) = pCache[index].data[way][0].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+16) = pCache[index].data[way][1].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+24) = pCache[index].data[way][1].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+32) = pCache[index].data[way][2].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+40) = pCache[index].data[way][2].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+48) = pCache[index].data[way][3].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+56) = pCache[index].data[way][3].b8._u64[1];
pCache[index].tag[way] &= ~DIRTY_FLAG;
}
line.writeBackIfNeeded();
});
break;
}
case 0x16: //DXIN (Data Cache Index Invalidate)
{
const int index = (addr >> 6) & 0x3F;
const int index = cache.setIdxFor(addr);
const int way = addr & 0x1;
CacheLine line = cache.lineAt(index, way);
CACHE_LOG("CACHE DXIN addr %x, index %d, way %d, flag %x\n", addr, index, way, pCache[index].tag[way] & 0x78);
CACHE_LOG("CACHE DXIN addr %x, index %d, way %d, flag %x", addr, index, way, line.tag.flags());
clear_cache(index, way);
line.clear();
break;
}
case 0x11: //DXLDT (Data Cache Load Data into TagLo)
{
const int index = (addr >> 6) & 0x3F;
const int index = cache.setIdxFor(addr);
const int way = addr & 0x1;
CacheLine line = cache.lineAt(index, way);
cpuRegs.CP0.n.TagLo = pCache[index].data[way][(addr >> 4) & 0x3].b8._u32[(addr & 0xf) >> 2];
cpuRegs.CP0.n.TagLo = *reinterpret_cast<u32*>(&line.data.bytes[addr & 0x3C]);
CACHE_LOG("CACHE DXLDT addr %x, index %d, way %d, DATA %x OP %x", addr, index, way, cpuRegs.CP0.r[28], cpuRegs.code);
CACHE_LOG("CACHE DXLDT addr %x, index %d, way %d, DATA %x OP %x", addr, index, way, cpuRegs.CP0.n.TagLo, cpuRegs.code);
break;
}
@ -424,32 +375,17 @@ void CACHE()
{
const int index = (addr >> 6) & 0x3F;
const int way = addr & 0x1;
CacheLine line = cache.lineAt(index, way);
// DXLTG demands that SYNC.L is called before this command, which forces the cache to write back, so presumably games are checking the cache has updated the memory
// For speed, we will do it here.
const u32 pfnaddr = (pCache[index].tag[way] & ~0x80000fff) | (addr & 0xfc0);
const auto vmv = vtlbdata.vmap[pfnaddr >> VTLB_PAGE_BITS];
s32 ppf = vmv.assumePtr(pfnaddr);
line.writeBackIfNeeded();
if ((pCache[index].tag[way] & (DIRTY_FLAG | VALID_FLAG)) == (DIRTY_FLAG | VALID_FLAG)) // Dirty
{
CACHE_LOG("DXLTG Dirty WriteBack! PPF %x", ppf);
ppf = (ppf & 0x7fffffff);
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf + 8) = pCache[index].data[way][0].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf + 16) = pCache[index].data[way][1].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf + 24) = pCache[index].data[way][1].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf + 32) = pCache[index].data[way][2].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf + 40) = pCache[index].data[way][2].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf + 48) = pCache[index].data[way][3].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf + 56) = pCache[index].data[way][3].b8._u64[1];
// Our tags don't contain PS2 paddrs (instead they contain x86 addrs)
cpuRegs.CP0.n.TagLo = line.tag.flags();
pCache[index].tag[way] &= ~DIRTY_FLAG;
}
//DevCon.Warning("DXLTG way %x index %x addr %x tagdata=%x", way, index, addr, pCache[index].tag[way]);
cpuRegs.CP0.n.TagLo = pCache[index].tag[way];
CACHE_LOG("CACHE DXLTG addr %x, index %d, way %d, DATA %x OP %x ", addr, index, way, cpuRegs.CP0.r[28], cpuRegs.code);
CACHE_LOG("CACHE DXLTG addr %x, index %d, way %d, DATA %x OP %x ", addr, index, way, cpuRegs.CP0.n.TagLo, cpuRegs.code);
CACHE_LOG("WARNING: DXLTG emulation supports flags only, things could break");
break;
}
@ -457,10 +393,11 @@ void CACHE()
{
const int index = (addr >> 6) & 0x3F;
const int way = addr & 0x1;
CacheLine line = cache.lineAt(index, way);
pCache[index].data[way][(addr >> 4) & 0x3].b8._u32[(addr & 0xf) >> 2] = cpuRegs.CP0.n.TagLo;
*reinterpret_cast<u32*>(&line.data.bytes[addr & 0x3C]) = cpuRegs.CP0.n.TagLo;
CACHE_LOG("CACHE DXSDT addr %x, index %d, way %d, DATA %x OP %x", addr, index, way, cpuRegs.CP0.r[28], cpuRegs.code);
CACHE_LOG("CACHE DXSDT addr %x, index %d, way %d, DATA %x OP %x", addr, index, way, cpuRegs.CP0.n.TagLo, cpuRegs.code);
break;
}
@ -468,9 +405,13 @@ void CACHE()
{
const int index = (addr >> 6) & 0x3F;
const int way = addr & 0x1;
pCache[index].tag[way] = cpuRegs.CP0.n.TagLo;
CacheLine line = cache.lineAt(index, way);
CACHE_LOG("CACHE DXSTG addr %x, index %d, way %d, DATA %x OP %x", addr, index, way, cpuRegs.CP0.r[28] & 0x6F, cpuRegs.code);
line.tag.rawValue &= ~CacheTag::ALL_FLAGS;
line.tag.rawValue |= (cpuRegs.CP0.n.TagLo & CacheTag::ALL_FLAGS);
CACHE_LOG("CACHE DXSTG addr %x, index %d, way %d, DATA %x OP %x", addr, index, way, cpuRegs.CP0.n.TagLo, cpuRegs.code);
CACHE_LOG("WARNING: DXSTG emulation supports flags only, things will probably break");
break;
}
@ -478,28 +419,11 @@ void CACHE()
{
const int index = (addr >> 6) & 0x3F;
const int way = addr & 0x1;
const u32 pfnaddr = (pCache[index].tag[way] & ~0x80000fff) + (addr & 0xFC0);
const auto vmv = vtlbdata.vmap[pfnaddr >> VTLB_PAGE_BITS];
s32 ppf = vmv.assumePtr(pfnaddr);
const u32 paddr = vmv.assumeHandlerGetPAddr(pfnaddr);
CacheLine line = cache.lineAt(index, way);
CACHE_LOG("CACHE DXWBIN addr %x, index %d, way %d, Flags %x Paddr %x tag %x", addr, index, way, pCache[index].tag[way] & 0x78, paddr, pCache[index].tag[way]);
if ((pCache[index].tag[way] & (DIRTY_FLAG|VALID_FLAG)) == (DIRTY_FLAG|VALID_FLAG)) // Dirty
{
ppf = (ppf & 0x7fffffff);
CACHE_LOG("DXWBIN Dirty WriteBack! PPF %x", ppf);
*reinterpret_cast<mem64_t*>(ppf) = pCache[index].data[way][0].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+8) = pCache[index].data[way][0].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+16) = pCache[index].data[way][1].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+24) = pCache[index].data[way][1].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+32) = pCache[index].data[way][2].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+40) = pCache[index].data[way][2].b8._u64[1];
*reinterpret_cast<mem64_t*>(ppf+48) = pCache[index].data[way][3].b8._u64[0];
*reinterpret_cast<mem64_t*>(ppf+56) = pCache[index].data[way][3].b8._u64[1];
}
clear_cache(index, way);
CACHE_LOG("CACHE DXWBIN addr %x, index %d, way %d, flags %x paddr %zx", addr, index, way, line.tag.flags(), line.addr());
line.writeBackIfNeeded();
line.clear();
break;
}

View File

@ -18,27 +18,7 @@
#include "Common.h"
union _u8bit_128
{
u8 _u8[16];
u16 _u16[8];
u32 _u32[4];
u64 _u64[2];
};
struct u8bit_128 {
_u8bit_128 b8;
};
struct _cacheS {
u32 tag[2];
u8bit_128 data[2][4];
};
extern _cacheS pCache[64];
void resetCache();
void writeCache8(u32 mem, u8 value);
void writeCache16(u32 mem, u16 value);
void writeCache32(u32 mem, u32 value);

View File

@ -41,7 +41,7 @@ static void PreLoadPrep()
static void PostLoadPrep()
{
memzero(pCache);
resetCache();
// WriteCP0Status(cpuRegs.CP0.n.Status.val);
for(int i=0; i<48; i++) MapTLB(i);
if (EmuConfig.Gamefixes.GoemonTlbHack) GoemonPreloadTlb();