786 lines
18 KiB
C++
786 lines
18 KiB
C++
#include "mmu.h"
|
|
#include "hw/sh4/sh4_if.h"
|
|
#include "hw/sh4/sh4_interrupts.h"
|
|
#include "hw/sh4/sh4_core.h"
|
|
#include "types.h"
|
|
|
|
TLB_Entry UTLB[64];
|
|
TLB_Entry ITLB[4];
|
|
|
|
//SQ fast remap , mainly hackish , assumes 1MB pages
|
|
//max 64MB can be remapped on SQ
|
|
// Used when FullMMU is off
|
|
u32 sq_remap[64];
|
|
|
|
#if defined(NO_MMU)
|
|
|
|
//Sync memory mapping to MMU , suspend compiled blocks if needed.entry is a UTLB entry # , -1 is for full sync
|
|
bool UTLB_Sync(u32 entry)
|
|
{
|
|
if ((UTLB[entry].Address.VPN & (0xFC000000 >> 10)) == (0xE0000000 >> 10))
|
|
{
|
|
u32 vpn_sq = ((UTLB[entry].Address.VPN & 0x7FFFF) >> 10) & 0x3F;//upper bits are always known [0xE0/E1/E2/E3]
|
|
sq_remap[vpn_sq] = UTLB[entry].Data.PPN << 10;
|
|
INFO_LOG(SH4, "SQ remap %d : 0x%X to 0x%X", entry, UTLB[entry].Address.VPN << 10, UTLB[entry].Data.PPN << 10);
|
|
}
|
|
else
|
|
{
|
|
INFO_LOG(SH4, "MEM remap %d : 0x%X to 0x%X", entry, UTLB[entry].Address.VPN << 10, UTLB[entry].Data.PPN << 10);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
//Sync memory mapping to MMU, suspend compiled blocks if needed.entry is a ITLB entry # , -1 is for full sync
|
|
void ITLB_Sync(u32 entry)
|
|
{
|
|
INFO_LOG(SH4, "ITLB MEM remap %d : 0x%X to 0x%X",entry,ITLB[entry].Address.VPN<<10,ITLB[entry].Data.PPN<<10);
|
|
}
|
|
|
|
void mmu_set_state()
|
|
{
|
|
}
|
|
|
|
void MMU_init()
|
|
{
|
|
|
|
}
|
|
|
|
void MMU_reset()
|
|
{
|
|
memset(UTLB,0,sizeof(UTLB));
|
|
memset(ITLB,0,sizeof(ITLB));
|
|
}
|
|
|
|
void MMU_term()
|
|
{
|
|
}
|
|
#else
|
|
/*
|
|
MMU support code
|
|
This is mostly hacked-on as the core was never meant to have mmu support
|
|
|
|
There are two modes, one with 'full' mmu emulation (for wince/bleem/wtfever)
|
|
and a fast-hack mode for 1mb sqremaps (for katana)
|
|
|
|
defining NO_MMU disables the full mmu emulation
|
|
*/
|
|
#include "mmu.h"
|
|
#include "mmu_impl.h"
|
|
#include "hw/sh4/sh4_if.h"
|
|
#include "ccn.h"
|
|
#include "hw/sh4/sh4_interrupts.h"
|
|
#include "hw/sh4/sh4_if.h"
|
|
#include "hw/sh4/sh4_mem.h"
|
|
|
|
#include "hw/mem/_vmem.h"
|
|
|
|
template<bool internal = false>
|
|
u32 mmu_full_lookup(u32 va, u32& idx, u32& rv);
|
|
|
|
//#define TRACE_WINCE_SYSCALLS
|
|
|
|
#ifdef TRACE_WINCE_SYSCALLS
|
|
#include "wince.h"
|
|
u32 unresolved_ascii_string;
|
|
u32 unresolved_unicode_string;
|
|
#endif
|
|
|
|
#define printf_mmu(...) DEBUG_LOG(SH4, __VA_ARGS__)
|
|
|
|
ReadMem8Func ReadMem8;
|
|
ReadMem16Func ReadMem16;
|
|
ReadMem16Func IReadMem16;
|
|
ReadMem32Func ReadMem32;
|
|
ReadMem64Func ReadMem64;
|
|
|
|
WriteMem8Func WriteMem8;
|
|
WriteMem16Func WriteMem16;
|
|
WriteMem32Func WriteMem32;
|
|
WriteMem64Func WriteMem64;
|
|
|
|
extern const u32 mmu_mask[4] =
|
|
{
|
|
((0xFFFFFFFF) >> 10) << 10, //1 kb page
|
|
((0xFFFFFFFF) >> 12) << 12, //4 kb page
|
|
((0xFFFFFFFF) >> 16) << 16, //64 kb page
|
|
((0xFFFFFFFF) >> 20) << 20 //1 MB page
|
|
};
|
|
|
|
extern const u32 fast_reg_lut[8] =
|
|
{
|
|
0, 0, 0, 0 //P0-U0
|
|
, 1 //P1
|
|
, 1 //P2
|
|
, 0 //P3
|
|
, 1 //P4
|
|
};
|
|
|
|
const u32 ITLB_LRU_OR[4] =
|
|
{
|
|
0x00,//000xxx
|
|
0x20,//1xx00x
|
|
0x14,//x1x1x0
|
|
0x0B,//xx1x11
|
|
};
|
|
const u32 ITLB_LRU_AND[4] =
|
|
{
|
|
0x07,//000xxx
|
|
0x39,//1xx00x
|
|
0x3E,//x1x1x0
|
|
0x3F,//xx1x11
|
|
};
|
|
u32 ITLB_LRU_USE[64];
|
|
|
|
#ifndef FAST_MMU
|
|
//sync mem mapping to mmu , suspend compiled blocks if needed.entry is a UTLB entry # , -1 is for full sync
|
|
bool UTLB_Sync(u32 entry)
|
|
{
|
|
printf_mmu("UTLB MEM remap %d : 0x%X to 0x%X : %d asid %d size %d", entry, UTLB[entry].Address.VPN << 10, UTLB[entry].Data.PPN << 10, UTLB[entry].Data.V,
|
|
UTLB[entry].Address.ASID, UTLB[entry].Data.SZ0 + UTLB[entry].Data.SZ1 * 2);
|
|
if (UTLB[entry].Data.V == 0)
|
|
return true;
|
|
|
|
if ((UTLB[entry].Address.VPN & (0xFC000000 >> 10)) == (0xE0000000 >> 10))
|
|
{
|
|
// Used when FullMMU is off
|
|
u32 vpn_sq = ((UTLB[entry].Address.VPN & 0x7FFFF) >> 10) & 0x3F;//upper bits are always known [0xE0/E1/E2/E3]
|
|
sq_remap[vpn_sq] = UTLB[entry].Data.PPN << 10;
|
|
|
|
return true;
|
|
}
|
|
else
|
|
{
|
|
return false;
|
|
}
|
|
}
|
|
//sync mem mapping to mmu , suspend compiled blocks if needed.entry is a ITLB entry # , -1 is for full sync
|
|
void ITLB_Sync(u32 entry)
|
|
{
|
|
printf_mmu("ITLB MEM remap %d : 0x%X to 0x%X : %d", entry, ITLB[entry].Address.VPN << 10, ITLB[entry].Data.PPN << 10, ITLB[entry].Data.V);
|
|
}
|
|
#endif
|
|
|
|
void RaiseException(u32 expEvnt, u32 callVect) {
|
|
#if !defined(NO_MMU)
|
|
SH4ThrownException ex = { next_pc - 2, expEvnt, callVect };
|
|
throw ex;
|
|
#else
|
|
msgboxf("Can't raise exceptions yet", MBX_ICONERROR);
|
|
#endif
|
|
}
|
|
|
|
void mmu_raise_exception(u32 mmu_error, u32 address, u32 am)
|
|
{
|
|
printf_mmu("mmu_raise_exception -> pc = 0x%X : ", next_pc);
|
|
CCN_TEA = address;
|
|
CCN_PTEH.VPN = address >> 10;
|
|
|
|
switch (mmu_error)
|
|
{
|
|
//No error
|
|
case MMU_ERROR_NONE:
|
|
die("Error : mmu_raise_exception(MMU_ERROR_NONE)");
|
|
break;
|
|
|
|
//TLB miss
|
|
case MMU_ERROR_TLB_MISS:
|
|
printf_mmu("MMU_ERROR_UTLB_MISS 0x%X, handled", address);
|
|
if (am == MMU_TT_DWRITE) //WTLBMISS - Write Data TLB Miss Exception
|
|
RaiseException(0x60, 0x400);
|
|
else if (am == MMU_TT_DREAD) //RTLBMISS - Read Data TLB Miss Exception
|
|
RaiseException(0x40, 0x400);
|
|
else //ITLBMISS - Instruction TLB Miss Exception
|
|
RaiseException(0x40, 0x400);
|
|
return;
|
|
|
|
//TLB Multihit
|
|
case MMU_ERROR_TLB_MHIT:
|
|
INFO_LOG(SH4, "MMU_ERROR_TLB_MHIT @ 0x%X", address);
|
|
break;
|
|
|
|
//Mem is read/write protected (depends on translation type)
|
|
case MMU_ERROR_PROTECTED:
|
|
printf_mmu("MMU_ERROR_PROTECTED 0x%X, handled", address);
|
|
if (am == MMU_TT_DWRITE) //WRITEPROT - Write Data TLB Protection Violation Exception
|
|
RaiseException(0xC0, 0x100);
|
|
else if (am == MMU_TT_DREAD) //READPROT - Data TLB Protection Violation Exception
|
|
RaiseException(0xA0, 0x100);
|
|
else
|
|
{
|
|
verify(false);
|
|
}
|
|
return;
|
|
|
|
//Mem is write protected , firstwrite
|
|
case MMU_ERROR_FIRSTWRITE:
|
|
printf_mmu("MMU_ERROR_FIRSTWRITE");
|
|
verify(am == MMU_TT_DWRITE);
|
|
//FIRSTWRITE - Initial Page Write Exception
|
|
RaiseException(0x80, 0x100);
|
|
return;
|
|
|
|
//data read/write missasligned
|
|
case MMU_ERROR_BADADDR:
|
|
if (am == MMU_TT_DWRITE) //WADDERR - Write Data Address Error
|
|
{
|
|
printf_mmu("MMU_ERROR_BADADDR(dw) 0x%X", address);
|
|
RaiseException(0x100, 0x100);
|
|
}
|
|
else if (am == MMU_TT_DREAD) //RADDERR - Read Data Address Error
|
|
{
|
|
printf_mmu("MMU_ERROR_BADADDR(dr) 0x%X", address);
|
|
RaiseException(0xE0, 0x100);
|
|
}
|
|
else //IADDERR - Instruction Address Error
|
|
{
|
|
#ifdef TRACE_WINCE_SYSCALLS
|
|
if (!print_wince_syscall(address))
|
|
#endif
|
|
printf_mmu("MMU_ERROR_BADADDR(i) 0x%X", address);
|
|
RaiseException(0xE0, 0x100);
|
|
}
|
|
return;
|
|
|
|
//Can't Execute
|
|
case MMU_ERROR_EXECPROT:
|
|
INFO_LOG(SH4, "MMU_ERROR_EXECPROT 0x%X", address);
|
|
|
|
//EXECPROT - Instruction TLB Protection Violation Exception
|
|
RaiseException(0xA0, 0x100);
|
|
return;
|
|
}
|
|
|
|
die("Unknown mmu_error");
|
|
}
|
|
|
|
|
|
void DoMMUException(u32 address, u32 mmu_error, u32 access_type)
|
|
{
|
|
printf_mmu("DoMMUException -> pc = 0x%X : %d ", next_pc, access_type);
|
|
CCN_TEA = address;
|
|
CCN_PTEH.VPN = address >> 10;
|
|
|
|
switch (mmu_error)
|
|
{
|
|
//No error
|
|
case MMU_ERROR_NONE:
|
|
die("Error : mmu_raise_exception(MMU_ERROR_NONE)");
|
|
break;
|
|
|
|
//TLB miss
|
|
case MMU_ERROR_TLB_MISS:
|
|
printf_mmu("MMU_ERROR_UTLB_MISS 0x%X, handled", address);
|
|
if (access_type == MMU_TT_DWRITE) //WTLBMISS - Write Data TLB Miss Exception
|
|
Do_Exception(next_pc, 0x60, 0x400);
|
|
else if (access_type == MMU_TT_DREAD) //RTLBMISS - Read Data TLB Miss Exception
|
|
Do_Exception(next_pc, 0x40, 0x400);
|
|
else //ITLBMISS - Instruction TLB Miss Exception
|
|
Do_Exception(next_pc, 0x40, 0x400);
|
|
|
|
return;
|
|
break;
|
|
|
|
//TLB Multihit
|
|
case MMU_ERROR_TLB_MHIT:
|
|
INFO_LOG(SH4, "MMU_ERROR_TLB_MHIT @ 0x%X", address);
|
|
break;
|
|
|
|
//Mem is read/write protected (depends on translation type)
|
|
case MMU_ERROR_PROTECTED:
|
|
printf_mmu("MMU_ERROR_PROTECTED 0x%X, handled", address);
|
|
if (access_type == MMU_TT_DWRITE) //WRITEPROT - Write Data TLB Protection Violation Exception
|
|
Do_Exception(next_pc, 0xC0, 0x100);
|
|
else if (access_type == MMU_TT_DREAD) //READPROT - Data TLB Protection Violation Exception
|
|
Do_Exception(next_pc, 0xA0, 0x100);
|
|
else
|
|
{
|
|
verify(false);
|
|
}
|
|
return;
|
|
break;
|
|
|
|
//Mem is write protected , firstwrite
|
|
case MMU_ERROR_FIRSTWRITE:
|
|
printf_mmu("MMU_ERROR_FIRSTWRITE");
|
|
verify(access_type == MMU_TT_DWRITE);
|
|
//FIRSTWRITE - Initial Page Write Exception
|
|
Do_Exception(next_pc, 0x80, 0x100);
|
|
|
|
return;
|
|
break;
|
|
|
|
//data read/write missasligned
|
|
case MMU_ERROR_BADADDR:
|
|
if (access_type == MMU_TT_DWRITE) //WADDERR - Write Data Address Error
|
|
Do_Exception(next_pc, 0x100, 0x100);
|
|
else if (access_type == MMU_TT_DREAD) //RADDERR - Read Data Address Error
|
|
Do_Exception(next_pc, 0xE0, 0x100);
|
|
else //IADDERR - Instruction Address Error
|
|
{
|
|
#ifdef TRACE_WINCE_SYSCALLS
|
|
if (!print_wince_syscall(address))
|
|
#endif
|
|
printf_mmu("MMU_ERROR_BADADDR(i) 0x%X", address);
|
|
Do_Exception(next_pc, 0xE0, 0x100);
|
|
return;
|
|
}
|
|
printf_mmu("MMU_ERROR_BADADDR(d) 0x%X, handled", address);
|
|
return;
|
|
break;
|
|
|
|
//Can't Execute
|
|
case MMU_ERROR_EXECPROT:
|
|
INFO_LOG(SH4, "MMU_ERROR_EXECPROT 0x%X", address);
|
|
|
|
//EXECPROT - Instruction TLB Protection Violation Exception
|
|
Do_Exception(next_pc, 0xA0, 0x100);
|
|
return;
|
|
break;
|
|
}
|
|
|
|
die("Unknown mmu_error");
|
|
}
|
|
|
|
bool mmu_match(u32 va, CCN_PTEH_type Address, CCN_PTEL_type Data)
|
|
{
|
|
if (Data.V == 0)
|
|
return false;
|
|
|
|
u32 sz = Data.SZ1 * 2 + Data.SZ0;
|
|
u32 mask = mmu_mask[sz];
|
|
|
|
if ((((Address.VPN << 10)&mask) == (va&mask)))
|
|
{
|
|
bool asid_match = (Data.SH == 0) && ((sr.MD == 0) || (CCN_MMUCR.SV == 0));
|
|
|
|
if ((asid_match == false) || (Address.ASID == CCN_PTEH.ASID))
|
|
{
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
#ifndef FAST_MMU
|
|
//Do a full lookup on the UTLB entry's
|
|
template<bool internal>
|
|
u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
|
|
{
|
|
if (!internal)
|
|
{
|
|
CCN_MMUCR.URC++;
|
|
if (CCN_MMUCR.URB == CCN_MMUCR.URC)
|
|
CCN_MMUCR.URC = 0;
|
|
}
|
|
|
|
u32 entry = -1;
|
|
u32 nom = 0;
|
|
|
|
for (u32 i = 0; i<64; i++)
|
|
{
|
|
//verify(sz!=0);
|
|
TLB_Entry *tlb_entry = &UTLB[i];
|
|
if (mmu_match(va, tlb_entry->Address, tlb_entry->Data))
|
|
{
|
|
entry = i;
|
|
nom++;
|
|
u32 sz = tlb_entry->Data.SZ1 * 2 + tlb_entry->Data.SZ0;
|
|
u32 mask = mmu_mask[sz];
|
|
//VPN->PPN | low bits
|
|
rv = ((tlb_entry->Data.PPN << 10) & mask) | (va & (~mask));
|
|
}
|
|
}
|
|
|
|
if (nom != 1)
|
|
{
|
|
if (nom)
|
|
{
|
|
return MMU_ERROR_TLB_MHIT;
|
|
}
|
|
else
|
|
{
|
|
return MMU_ERROR_TLB_MISS;
|
|
}
|
|
}
|
|
|
|
*tlb_entry_ret = &UTLB[entry];
|
|
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
#endif
|
|
|
|
//Simple QACR translation for mmu (when AT is off)
|
|
u32 mmu_QACR_SQ(u32 va)
|
|
{
|
|
u32 QACR;
|
|
|
|
//default to sq 0
|
|
QACR = CCN_QACR_TR[0];
|
|
//sq1 ? if so use QACR1
|
|
if (va & 0x20)
|
|
QACR = CCN_QACR_TR[1];
|
|
va &= ~0x1f;
|
|
return QACR + va;
|
|
}
|
|
|
|
#ifndef FAST_MMU
|
|
template<u32 translation_type>
|
|
u32 mmu_full_SQ(u32 va, u32& rv)
|
|
{
|
|
|
|
if ((va & 3) || (CCN_MMUCR.SQMD == 1 && sr.MD == 0))
|
|
{
|
|
//here, or after ?
|
|
return MMU_ERROR_BADADDR;
|
|
}
|
|
|
|
if (CCN_MMUCR.AT)
|
|
{
|
|
//Address=Dest&0xFFFFFFE0;
|
|
|
|
const TLB_Entry *entry;
|
|
u32 lookup = mmu_full_lookup(va, &entry, rv);
|
|
|
|
rv &= ~31;//lower 5 bits are forced to 0
|
|
|
|
if (lookup != MMU_ERROR_NONE)
|
|
return lookup;
|
|
|
|
u32 md = entry->Data.PR >> 1;
|
|
|
|
//Priv mode protection
|
|
if ((md == 0) && sr.MD == 0)
|
|
{
|
|
return MMU_ERROR_PROTECTED;
|
|
}
|
|
|
|
//Write Protection (Lock or FW)
|
|
if (translation_type == MMU_TT_DWRITE)
|
|
{
|
|
if ((entry->Data.PR & 1) == 0)
|
|
return MMU_ERROR_PROTECTED;
|
|
else if (entry->Data.D == 0)
|
|
return MMU_ERROR_FIRSTWRITE;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
rv = mmu_QACR_SQ(va);
|
|
}
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
template u32 mmu_full_SQ<MMU_TT_DREAD>(u32 va, u32& rv);
|
|
|
|
template<u32 translation_type, typename T>
|
|
u32 mmu_data_translation(u32 va, u32& rv)
|
|
{
|
|
if (va & (sizeof(T) - 1))
|
|
return MMU_ERROR_BADADDR;
|
|
|
|
if (translation_type == MMU_TT_DWRITE)
|
|
{
|
|
if ((va & 0xFC000000) == 0xE0000000)
|
|
{
|
|
u32 lookup = mmu_full_SQ<translation_type>(va, rv);
|
|
if (lookup != MMU_ERROR_NONE)
|
|
return lookup;
|
|
|
|
rv = va; //SQ writes are not translated, only write backs are.
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
}
|
|
|
|
if ((sr.MD == 0) && (va & 0x80000000) != 0)
|
|
{
|
|
//if on kernel, and not SQ addr -> error
|
|
return MMU_ERROR_BADADDR;
|
|
}
|
|
|
|
if (sr.MD == 1 && ((va & 0xFC000000) == 0x7C000000))
|
|
{
|
|
rv = va;
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
|
|
// Not called if CCN_MMUCR.AT == 0
|
|
//if ((CCN_MMUCR.AT == 0) || (fast_reg_lut[va >> 29] != 0))
|
|
if (fast_reg_lut[va >> 29] != 0)
|
|
{
|
|
rv = va;
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
|
|
const TLB_Entry *entry;
|
|
u32 lookup = mmu_full_lookup(va, &entry, rv);
|
|
|
|
if (lookup != MMU_ERROR_NONE)
|
|
return lookup;
|
|
|
|
#ifdef TRACE_WINCE_SYSCALLS
|
|
if (unresolved_unicode_string != 0)
|
|
{
|
|
if (va == unresolved_unicode_string)
|
|
{
|
|
unresolved_unicode_string = 0;
|
|
INFO_LOG(SH4, "RESOLVED %s", get_unicode_string(va).c_str());
|
|
}
|
|
}
|
|
#endif
|
|
|
|
u32 md = entry->Data.PR >> 1;
|
|
|
|
//0X & User mode-> protection violation
|
|
//Priv mode protection
|
|
if ((md == 0) && sr.MD == 0)
|
|
{
|
|
return MMU_ERROR_PROTECTED;
|
|
}
|
|
|
|
//X0 -> read olny
|
|
//X1 -> read/write , can be FW
|
|
|
|
//Write Protection (Lock or FW)
|
|
if (translation_type == MMU_TT_DWRITE)
|
|
{
|
|
if ((entry->Data.PR & 1) == 0)
|
|
return MMU_ERROR_PROTECTED;
|
|
else if (entry->Data.D == 0)
|
|
return MMU_ERROR_FIRSTWRITE;
|
|
}
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
template u32 mmu_data_translation<MMU_TT_DREAD, u8>(u32 va, u32& rv);
|
|
template u32 mmu_data_translation<MMU_TT_DREAD, u16>(u32 va, u32& rv);
|
|
template u32 mmu_data_translation<MMU_TT_DREAD, u32>(u32 va, u32& rv);
|
|
template u32 mmu_data_translation<MMU_TT_DREAD, u64>(u32 va, u32& rv);
|
|
|
|
template u32 mmu_data_translation<MMU_TT_DWRITE, u8>(u32 va, u32& rv);
|
|
template u32 mmu_data_translation<MMU_TT_DWRITE, u16>(u32 va, u32& rv);
|
|
template u32 mmu_data_translation<MMU_TT_DWRITE, u32>(u32 va, u32& rv);
|
|
template u32 mmu_data_translation<MMU_TT_DWRITE, u64>(u32 va, u32& rv);
|
|
|
|
u32 mmu_instruction_translation(u32 va, u32& rv)
|
|
{
|
|
if (va & 1)
|
|
{
|
|
return MMU_ERROR_BADADDR;
|
|
}
|
|
if ((sr.MD == 0) && (va & 0x80000000) != 0)
|
|
{
|
|
//if SQ disabled , or if if SQ on but out of SQ mem then BAD ADDR ;)
|
|
if (va >= 0xE0000000)
|
|
return MMU_ERROR_BADADDR;
|
|
}
|
|
|
|
if ((CCN_MMUCR.AT == 0) || (fast_reg_lut[va >> 29] != 0))
|
|
{
|
|
rv = va;
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
|
|
bool mmach = false;
|
|
retry_ITLB_Match:
|
|
u32 entry = 4;
|
|
u32 nom = 0;
|
|
for (u32 i = 0; i<4; i++)
|
|
{
|
|
if (ITLB[i].Data.V == 0)
|
|
continue;
|
|
u32 sz = ITLB[i].Data.SZ1 * 2 + ITLB[i].Data.SZ0;
|
|
u32 mask = mmu_mask[sz];
|
|
|
|
if ((((ITLB[i].Address.VPN << 10)&mask) == (va&mask)))
|
|
{
|
|
bool asid_match = (ITLB[i].Data.SH == 0) && ((sr.MD == 0) || (CCN_MMUCR.SV == 0));
|
|
|
|
if ((asid_match == false) || (ITLB[i].Address.ASID == CCN_PTEH.ASID))
|
|
{
|
|
//verify(sz!=0);
|
|
entry = i;
|
|
nom++;
|
|
//VPN->PPN | low bits
|
|
rv = ((ITLB[i].Data.PPN << 10)&mask) | (va&(~mask));
|
|
}
|
|
}
|
|
}
|
|
|
|
if (entry == 4)
|
|
{
|
|
verify(mmach == false);
|
|
const TLB_Entry *tlb_entry;
|
|
u32 lookup = mmu_full_lookup(va, &tlb_entry, rv);
|
|
|
|
if (lookup != MMU_ERROR_NONE)
|
|
return lookup;
|
|
|
|
u32 replace_index = ITLB_LRU_USE[CCN_MMUCR.LRUI];
|
|
verify(replace_index != 0xFFFFFFFF);
|
|
ITLB[replace_index] = *tlb_entry;
|
|
entry = replace_index;
|
|
ITLB_Sync(entry);
|
|
mmach = true;
|
|
goto retry_ITLB_Match;
|
|
}
|
|
else if (nom != 1)
|
|
{
|
|
if (nom)
|
|
{
|
|
return MMU_ERROR_TLB_MHIT;
|
|
}
|
|
else
|
|
{
|
|
return MMU_ERROR_TLB_MISS;
|
|
}
|
|
}
|
|
|
|
CCN_MMUCR.LRUI &= ITLB_LRU_AND[entry];
|
|
CCN_MMUCR.LRUI |= ITLB_LRU_OR[entry];
|
|
|
|
u32 md = ITLB[entry].Data.PR >> 1;
|
|
|
|
//0X & User mode-> protection violation
|
|
//Priv mode protection
|
|
if ((md == 0) && sr.MD == 0)
|
|
{
|
|
return MMU_ERROR_PROTECTED;
|
|
}
|
|
return MMU_ERROR_NONE;
|
|
}
|
|
#endif
|
|
|
|
void mmu_set_state()
|
|
{
|
|
if (CCN_MMUCR.AT == 1 && settings.dreamcast.FullMMU)
|
|
{
|
|
NOTICE_LOG(SH4, "Enabling Full MMU support");
|
|
IReadMem16 = &mmu_IReadMem16;
|
|
ReadMem8 = &mmu_ReadMem<u8>;
|
|
ReadMem16 = &mmu_ReadMem<u16>;
|
|
ReadMem32 = &mmu_ReadMem<u32>;
|
|
ReadMem64 = &mmu_ReadMem<u64>;
|
|
|
|
WriteMem8 = &mmu_WriteMem<u8>;
|
|
WriteMem16 = &mmu_WriteMem<u16>;
|
|
WriteMem32 = &mmu_WriteMem<u32>;
|
|
WriteMem64 = &mmu_WriteMem<u64>;
|
|
_vmem_enable_mmu(true);
|
|
mmu_flush_table();
|
|
}
|
|
else
|
|
{
|
|
ReadMem8 = &_vmem_ReadMem8;
|
|
ReadMem16 = &_vmem_ReadMem16;
|
|
IReadMem16 = &_vmem_ReadMem16;
|
|
ReadMem32 = &_vmem_ReadMem32;
|
|
ReadMem64 = &_vmem_ReadMem64;
|
|
|
|
WriteMem8 = &_vmem_WriteMem8;
|
|
WriteMem16 = &_vmem_WriteMem16;
|
|
WriteMem32 = &_vmem_WriteMem32;
|
|
WriteMem64 = &_vmem_WriteMem64;
|
|
_vmem_enable_mmu(false);
|
|
}
|
|
}
|
|
|
|
void MMU_init()
|
|
{
|
|
memset(ITLB_LRU_USE, 0xFF, sizeof(ITLB_LRU_USE));
|
|
for (u32 e = 0; e<4; e++)
|
|
{
|
|
u32 match_key = ((~ITLB_LRU_AND[e]) & 0x3F);
|
|
u32 match_mask = match_key | ITLB_LRU_OR[e];
|
|
for (u32 i = 0; i<64; i++)
|
|
{
|
|
if ((i & match_mask) == match_key)
|
|
{
|
|
verify(ITLB_LRU_USE[i] == 0xFFFFFFFF);
|
|
ITLB_LRU_USE[i] = e;
|
|
}
|
|
}
|
|
}
|
|
mmu_set_state();
|
|
}
|
|
|
|
|
|
void MMU_reset()
|
|
{
|
|
memset(UTLB, 0, sizeof(UTLB));
|
|
memset(ITLB, 0, sizeof(ITLB));
|
|
mmu_set_state();
|
|
}
|
|
|
|
void MMU_term()
|
|
{
|
|
}
|
|
|
|
#ifndef FAST_MMU
|
|
void mmu_flush_table()
|
|
{
|
|
//printf("MMU tables flushed\n");
|
|
|
|
ITLB[0].Data.V = 0;
|
|
ITLB[1].Data.V = 0;
|
|
ITLB[2].Data.V = 0;
|
|
ITLB[3].Data.V = 0;
|
|
|
|
for (u32 i = 0; i < 64; i++)
|
|
UTLB[i].Data.V = 0;
|
|
}
|
|
#endif
|
|
|
|
template<typename T>
|
|
T DYNACALL mmu_ReadMem(u32 adr)
|
|
{
|
|
u32 addr;
|
|
u32 rv = mmu_data_translation<MMU_TT_DREAD, T>(adr, addr);
|
|
if (rv != MMU_ERROR_NONE)
|
|
mmu_raise_exception(rv, adr, MMU_TT_DREAD);
|
|
return _vmem_readt<T, T>(addr);
|
|
}
|
|
|
|
u16 DYNACALL mmu_IReadMem16(u32 vaddr)
|
|
{
|
|
u32 addr;
|
|
u32 rv = mmu_instruction_translation(vaddr, addr);
|
|
if (rv != MMU_ERROR_NONE)
|
|
mmu_raise_exception(rv, vaddr, MMU_TT_IREAD);
|
|
return _vmem_ReadMem16(addr);
|
|
}
|
|
|
|
template<typename T>
|
|
void DYNACALL mmu_WriteMem(u32 adr, T data)
|
|
{
|
|
u32 addr;
|
|
u32 rv = mmu_data_translation<MMU_TT_DWRITE, T>(adr, addr);
|
|
if (rv != MMU_ERROR_NONE)
|
|
mmu_raise_exception(rv, adr, MMU_TT_DWRITE);
|
|
_vmem_writet<T>(addr, data);
|
|
}
|
|
|
|
bool mmu_TranslateSQW(u32 adr, u32* out)
|
|
{
|
|
if (!settings.dreamcast.FullMMU)
|
|
{
|
|
//This will only work for 1 mb pages .. hopefully nothing else is used
|
|
//*FIXME* to work for all page sizes ?
|
|
|
|
*out = sq_remap[(adr >> 20) & 0x3F] | (adr & 0xFFFE0);
|
|
}
|
|
else
|
|
{
|
|
u32 addr;
|
|
u32 tv = mmu_full_SQ<MMU_TT_DREAD>(adr, addr);
|
|
if (tv != MMU_ERROR_NONE)
|
|
{
|
|
mmu_raise_exception(tv, adr, MMU_TT_DREAD);
|
|
return false;
|
|
}
|
|
|
|
*out = addr;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|