mmu: max alignment requirement is 32-bit

a 64-bit access only needs to be 32-bit aligned.
use enum for mmu errors
This commit is contained in:
Flyinghead 2023-02-16 18:33:00 +01:00
parent afcb3b7ad8
commit bf79183bd6
8 changed files with 197 additions and 193 deletions

View File

@ -96,8 +96,8 @@ DynarecCodeEntryPtr DYNACALL bm_GetCodeByVAddr(u32 addr)
}
u32 paddr;
u32 rv = mmu_instruction_translation(addr, paddr);
if (rv != MMU_ERROR_NONE)
MmuError rv = mmu_instruction_translation(addr, paddr);
if (rv != MmuError::NONE)
{
DoMMUException(addr, rv, MMU_TT_IREAD);
mmu_instruction_translation(next_pc, paddr);

View File

@ -145,8 +145,8 @@ bool RuntimeBlockInfo::Setup(u32 rpc,fpscr_t rfpu_cfg)
}
else if (mmu_enabled())
{
u32 rv = mmu_instruction_translation(vaddr, addr);
if (rv != MMU_ERROR_NONE)
MmuError rv = mmu_instruction_translation(vaddr, addr);
if (rv != MmuError::NONE)
{
DoMMUException(vaddr, rv, MMU_TT_IREAD);
return false;
@ -463,10 +463,10 @@ static bool translateAddress(u32 addr, int size, u32 access, u32& outAddr, Runti
return false;
u32 paddr;
u32 rv = access == MMU_TT_DREAD ?
MmuError rv = access == MMU_TT_DREAD ?
mmu_data_translation<MMU_TT_DREAD>(addr, paddr)
: mmu_data_translation<MMU_TT_DWRITE>(addr, paddr);
if (rv != MMU_ERROR_NONE)
if (rv != MmuError::NONE)
return false;
addr = paddr;
@ -478,18 +478,20 @@ static bool translateAddress(u32 addr, int size, u32 access, u32& outAddr, Runti
bool rdv_readMemImmediate(u32 addr, int size, void*& ptr, bool& isRam, u32& physAddr, RuntimeBlockInfo* block)
{
size = std::min(size, 4);
if (!translateAddress(addr, size, MMU_TT_DREAD, physAddr, block))
return false;
ptr = addrspace::readConst(physAddr, isRam, size > 4 ? 4 : size);
ptr = addrspace::readConst(physAddr, isRam, size);
return true;
}
bool rdv_writeMemImmediate(u32 addr, int size, void*& ptr, bool& isRam, u32& physAddr, RuntimeBlockInfo* block)
{
size = std::min(size, 4);
if (!translateAddress(addr, size, MMU_TT_DWRITE, physAddr, block))
return false;
ptr = addrspace::writeConst(physAddr, isRam, size > 4 ? 4 : size);
ptr = addrspace::writeConst(physAddr, isRam, size);
return true;
}

View File

@ -203,7 +203,7 @@ void ITLB_Sync(u32 entry)
}
//Do a full lookup on the UTLB entry's
u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
MmuError mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
{
if (lru_entry != NULL)
{
@ -218,7 +218,7 @@ u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
if (tlb_entry_ret != nullptr)
*tlb_entry_ret = lru_entry;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
}
const TLB_Entry *localEntry;
@ -233,7 +233,7 @@ u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
lru_mask = mask;
lru_address = ((*tlb_entry_ret)->Address.VPN << 10);
return MMU_ERROR_NONE;
return MmuError::NONE;
}
#ifdef USE_WINCE_HACK
@ -256,50 +256,50 @@ u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
cache_entry(entry);
return MMU_ERROR_NONE;
return MmuError::NONE;
}
#endif
return MMU_ERROR_TLB_MISS;
return MmuError::TLB_MISS;
}
template<u32 translation_type>
u32 mmu_full_SQ(u32 va, u32& rv)
MmuError mmu_full_SQ(u32 va, u32& rv)
{
u32 lookup = mmu_full_lookup(va, nullptr, rv);
MmuError lookup = mmu_full_lookup(va, nullptr, rv);
if (lookup != MMU_ERROR_NONE)
if (lookup != MmuError::NONE)
return lookup;
rv &= ~31;//lower 5 bits are forced to 0
return MMU_ERROR_NONE;
return MmuError::NONE;
}
template u32 mmu_full_SQ<MMU_TT_DREAD>(u32 va, u32& rv);
template u32 mmu_full_SQ<MMU_TT_DWRITE>(u32 va, u32& rv);
template MmuError mmu_full_SQ<MMU_TT_DREAD>(u32 va, u32& rv);
template MmuError mmu_full_SQ<MMU_TT_DWRITE>(u32 va, u32& rv);
template<u32 translation_type>
u32 mmu_data_translation(u32 va, u32& rv)
MmuError mmu_data_translation(u32 va, u32& rv)
{
if (fast_reg_lut[va >> 29] != 0)
{
rv = va;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
if ((va & 0xFC000000) == 0x7C000000)
{
// On-chip RAM area isn't translated
rv = va;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
u32 lookup = mmu_full_lookup(va, nullptr, rv);
if (lookup == MMU_ERROR_NONE && (rv & 0x1C000000) == 0x1C000000)
MmuError lookup = mmu_full_lookup(va, nullptr, rv);
if (lookup == MmuError::NONE && (rv & 0x1C000000) == 0x1C000000)
// map 1C000000-1FFFFFFF to P4 memory-mapped registers
rv |= 0xF0000000;
#ifdef TRACE_WINCE_SYSCALLS
if (unresolved_unicode_string != 0 && lookup == MMU_ERROR_NONE)
if (unresolved_unicode_string != 0 && lookup == MmuError::NONE)
{
if (va == unresolved_unicode_string)
{
@ -311,8 +311,8 @@ u32 mmu_data_translation(u32 va, u32& rv)
return lookup;
}
template u32 mmu_data_translation<MMU_TT_DREAD>(u32 va, u32& rv);
template u32 mmu_data_translation<MMU_TT_DWRITE>(u32 va, u32& rv);
template MmuError mmu_data_translation<MMU_TT_DREAD>(u32 va, u32& rv);
template MmuError mmu_data_translation<MMU_TT_DWRITE>(u32 va, u32& rv);
void mmu_flush_table()
{

View File

@ -83,7 +83,7 @@ void ITLB_Sync(u32 entry)
#endif
template<typename F>
static void mmuException(u32 mmu_error, u32 address, u32 am, F raise)
static void mmuException(MmuError mmu_error, u32 address, u32 am, F raise)
{
printf_mmu("MMU exception -> pc = 0x%X : ", next_pc);
CCN_TEA = address;
@ -91,26 +91,26 @@ static void mmuException(u32 mmu_error, u32 address, u32 am, F raise)
switch (mmu_error)
{
case MMU_ERROR_NONE:
die("Error: mmu_error == MMU_ERROR_NONE)");
case MmuError::NONE:
die("Error: mmu_error == MmuError::NONE)");
return;
case MMU_ERROR_TLB_MISS:
printf_mmu("MMU_ERROR_UTLB_MISS 0x%X, handled", address);
case MmuError::TLB_MISS:
printf_mmu("MmuError::UTLB_MISS 0x%X, handled", address);
if (am == MMU_TT_DWRITE)
raise(Sh4Ex_TlbMissWrite);
else
raise(Sh4Ex_TlbMissRead);
return;
case MMU_ERROR_TLB_MHIT:
ERROR_LOG(SH4, "MMU_ERROR_TLB_MHIT @ 0x%X", address);
case MmuError::TLB_MHIT:
ERROR_LOG(SH4, "MmuError::TLB_MHIT @ 0x%X", address);
raise(Sh4Ex_TlbMultiHit);
break;
//Mem is read/write protected (depends on translation type)
case MMU_ERROR_PROTECTED:
printf_mmu("MMU_ERROR_PROTECTED 0x%X, handled", address);
case MmuError::PROTECTED:
printf_mmu("MmuError::PROTECTED 0x%X, handled", address);
if (am == MMU_TT_DWRITE)
raise(Sh4Ex_TlbProtViolWrite);
else
@ -118,22 +118,22 @@ static void mmuException(u32 mmu_error, u32 address, u32 am, F raise)
return;
//Mem is write protected , firstwrite
case MMU_ERROR_FIRSTWRITE:
printf_mmu("MMU_ERROR_FIRSTWRITE");
case MmuError::FIRSTWRITE:
printf_mmu("MmuError::FIRSTWRITE");
verify(am == MMU_TT_DWRITE);
raise(Sh4Ex_TlbInitPageWrite);
return;
//data read/write misaligned
case MMU_ERROR_BADADDR:
case MmuError::BADADDR:
if (am == MMU_TT_DWRITE) //WADDERR - Write Data Address Error
{
printf_mmu("MMU_ERROR_BADADDR(dw) 0x%X", address);
printf_mmu("MmuError::BADADDR(dw) 0x%X", address);
raise(Sh4Ex_AddressErrorWrite);
}
else if (am == MMU_TT_DREAD) //RADDERR - Read Data Address Error
{
printf_mmu("MMU_ERROR_BADADDR(dr) 0x%X", address);
printf_mmu("MmuError::BADADDR(dr) 0x%X", address);
raise(Sh4Ex_AddressErrorRead);
}
else //IADDERR - Instruction Address Error
@ -141,7 +141,7 @@ static void mmuException(u32 mmu_error, u32 address, u32 am, F raise)
#ifdef TRACE_WINCE_SYSCALLS
if (!print_wince_syscall(address))
#endif
printf_mmu("MMU_ERROR_BADADDR(i) 0x%X", address);
printf_mmu("MmuError::BADADDR(i) 0x%X", address);
raise(Sh4Ex_AddressErrorRead);
}
return;
@ -151,7 +151,7 @@ static void mmuException(u32 mmu_error, u32 address, u32 am, F raise)
}
}
[[noreturn]] void mmu_raise_exception(u32 mmu_error, u32 address, u32 am)
[[noreturn]] void mmu_raise_exception(MmuError mmu_error, u32 address, u32 am)
{
mmuException(mmu_error, address, am, [](Sh4ExceptionCode event) {
debugger::debugTrap(event); // FIXME CCN_TEA and CCN_PTEH have been updated already
@ -162,7 +162,7 @@ static void mmuException(u32 mmu_error, u32 address, u32 am, F raise)
}
void DoMMUException(u32 address, u32 mmu_error, u32 access_type)
void DoMMUException(u32 address, MmuError mmu_error, u32 access_type)
{
mmuException(mmu_error, address, access_type, [](Sh4ExceptionCode event) {
Do_Exception(next_pc, event);
@ -190,7 +190,7 @@ bool mmu_match(u32 va, CCN_PTEH_type Address, CCN_PTEL_type Data)
#ifndef FAST_MMU
//Do a full lookup on the UTLB entry's
u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
MmuError mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
{
CCN_MMUCR.URC++;
if (CCN_MMUCR.URB == CCN_MMUCR.URC)
@ -202,7 +202,7 @@ u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
if (mmu_match(va, tlb_entry.Address, tlb_entry.Data))
{
if (*tlb_entry_ret != nullptr)
return MMU_ERROR_TLB_MHIT;
return MmuError::TLB_MHIT;
*tlb_entry_ret = &tlb_entry;
u32 sz = tlb_entry.Data.SZ1 * 2 + tlb_entry.Data.SZ0;
u32 mask = mmu_mask[sz];
@ -212,9 +212,9 @@ u32 mmu_full_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
}
if (*tlb_entry_ret == nullptr)
return MMU_ERROR_TLB_MISS;
return MmuError::TLB_MISS;
else
return MMU_ERROR_NONE;
return MmuError::NONE;
}
//Simple QACR translation for mmu (when AT is off)
@ -228,87 +228,87 @@ static u32 mmu_QACR_SQ(u32 va)
}
template<u32 translation_type>
u32 mmu_full_SQ(u32 va, u32& rv)
MmuError mmu_full_SQ(u32 va, u32& rv)
{
if ((va & 3) || (CCN_MMUCR.SQMD == 1 && sr.MD == 0))
//here, or after ?
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
if (CCN_MMUCR.AT)
{
//Address=Dest&0xFFFFFFE0;
const TLB_Entry *entry;
u32 lookup = mmu_full_lookup(va, &entry, rv);
MmuError lookup = mmu_full_lookup(va, &entry, rv);
rv &= ~31;//lower 5 bits are forced to 0
if (lookup != MMU_ERROR_NONE)
if (lookup != MmuError::NONE)
return lookup;
u32 md = entry->Data.PR >> 1;
//Priv mode protection
if (md == 0 && sr.MD == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
//Write Protection (Lock or FW)
if (translation_type == MMU_TT_DWRITE)
{
if ((entry->Data.PR & 1) == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
else if (entry->Data.D == 0)
return MMU_ERROR_FIRSTWRITE;
return MmuError::FIRSTWRITE;
}
}
else
{
rv = mmu_QACR_SQ(va);
}
return MMU_ERROR_NONE;
return MmuError::NONE;
}
template u32 mmu_full_SQ<MMU_TT_DREAD>(u32 va, u32& rv);
template u32 mmu_full_SQ<MMU_TT_DWRITE>(u32 va, u32& rv);
template MmuError mmu_full_SQ<MMU_TT_DREAD>(u32 va, u32& rv);
template MmuError mmu_full_SQ<MMU_TT_DWRITE>(u32 va, u32& rv);
template<u32 translation_type>
u32 mmu_data_translation(u32 va, u32& rv)
MmuError mmu_data_translation(u32 va, u32& rv)
{
if (translation_type == MMU_TT_DWRITE)
{
if ((va & 0xFC000000) == 0xE0000000)
{
u32 lookup = mmu_full_SQ<MMU_TT_DWRITE>(va, rv);
if (lookup != MMU_ERROR_NONE)
MmuError lookup = mmu_full_SQ<MMU_TT_DWRITE>(va, rv);
if (lookup != MmuError::NONE)
return lookup;
rv = va; //SQ writes are not translated, only write backs are.
return MMU_ERROR_NONE;
return MmuError::NONE;
}
}
if (sr.MD == 0 && (va & 0x80000000) != 0)
//if on kernel, and not SQ addr -> error
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
if ((va & 0xFC000000) == 0x7C000000)
{
// 7C000000 to 7FFFFFFF in P0/U0 not translated
rv = va;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
if (fast_reg_lut[va >> 29] != 0)
{
// P1, P2 and P4 aren't translated
rv = va;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
const TLB_Entry *entry;
u32 lookup = mmu_full_lookup(va, &entry, rv);
if (lookup != MMU_ERROR_NONE)
if (lookup != MmuError::NONE)
return lookup;
#ifdef TRACE_WINCE_SYSCALLS
@ -327,7 +327,7 @@ u32 mmu_data_translation(u32 va, u32& rv)
//0X & User mode-> protection violation
//Priv mode protection
if (md == 0 && sr.MD == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
//X0 -> read olny
//X1 -> read/write , can be FW
@ -336,39 +336,39 @@ u32 mmu_data_translation(u32 va, u32& rv)
if (translation_type == MMU_TT_DWRITE)
{
if ((entry->Data.PR & 1) == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
else if (entry->Data.D == 0)
return MMU_ERROR_FIRSTWRITE;
return MmuError::FIRSTWRITE;
}
if ((rv & 0x1C000000) == 0x1C000000)
// map 1C000000-1FFFFFFF to P4 memory-mapped registers
rv |= 0xF0000000;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
template u32 mmu_data_translation<MMU_TT_DREAD>(u32 va, u32& rv);
template u32 mmu_data_translation<MMU_TT_DWRITE>(u32 va, u32& rv);
template MmuError mmu_data_translation<MMU_TT_DREAD>(u32 va, u32& rv);
template MmuError mmu_data_translation<MMU_TT_DWRITE>(u32 va, u32& rv);
u32 mmu_instruction_translation(u32 va, u32& rv)
MmuError mmu_instruction_translation(u32 va, u32& rv)
{
if (sr.MD == 0 && (va & 0x80000000) != 0)
// User mode on kernel address
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
if ((va >> 29) == 7)
// P4 not executable
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
if (fast_reg_lut[va >> 29] != 0)
{
// P1 and P2 aren't translated
rv = va;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
const TLB_Entry *entry;
u32 lookup = mmu_instruction_lookup(va, &entry, rv);
if (lookup != MMU_ERROR_NONE)
MmuError lookup = mmu_instruction_lookup(va, &entry, rv);
if (lookup != MmuError::NONE)
return lookup;
u32 md = entry->Data.PR >> 1;
@ -376,13 +376,13 @@ u32 mmu_instruction_translation(u32 va, u32& rv)
//0X & User mode-> protection violation
//Priv mode protection
if (md == 0 && sr.MD == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
#endif
u32 mmu_instruction_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
MmuError mmu_instruction_lookup(u32 va, const TLB_Entry** tlb_entry_ret, u32& rv)
{
bool mmach = false;
retry_ITLB_Match:
@ -401,7 +401,7 @@ retry_ITLB_Match:
if (!needAsidMatch || entry.Address.ASID == CCN_PTEH.ASID)
{
if (*tlb_entry_ret != nullptr)
return MMU_ERROR_TLB_MHIT;
return MmuError::TLB_MHIT;
*tlb_entry_ret = &entry;
//VPN->PPN | low bits
rv = ((entry.Data.PPN << 10) & mask) | (va & ~mask);
@ -413,8 +413,8 @@ retry_ITLB_Match:
{
verify(mmach == false);
const TLB_Entry *tlb_entry;
u32 lookup = mmu_full_lookup(va, &tlb_entry, rv);
if (lookup != MMU_ERROR_NONE)
MmuError lookup = mmu_full_lookup(va, &tlb_entry, rv);
if (lookup != MmuError::NONE)
return lookup;
u32 replace_index = ITLB_LRU_USE[CCN_MMUCR.LRUI];
@ -428,7 +428,7 @@ retry_ITLB_Match:
CCN_MMUCR.LRUI &= ITLB_LRU_AND[*tlb_entry_ret - ITLB];
CCN_MMUCR.LRUI |= ITLB_LRU_OR[*tlb_entry_ret - ITLB];
return MMU_ERROR_NONE;
return MmuError::NONE;
}
void mmu_set_state()
@ -505,12 +505,12 @@ void mmu_flush_table()
template<typename T>
T DYNACALL mmu_ReadMem(u32 adr)
{
if (adr & (sizeof(T) - 1))
if (adr & (std::min((int)sizeof(T), 4) - 1))
// Unaligned
mmu_raise_exception(MMU_ERROR_BADADDR, adr, MMU_TT_DREAD);
mmu_raise_exception(MmuError::BADADDR, adr, MMU_TT_DREAD);
u32 addr;
u32 rv = mmu_data_translation<MMU_TT_DREAD>(adr, addr);
if (rv != MMU_ERROR_NONE)
MmuError rv = mmu_data_translation<MMU_TT_DREAD>(adr, addr);
if (rv != MmuError::NONE)
mmu_raise_exception(rv, adr, MMU_TT_DREAD);
return addrspace::readt<T>(addr);
}
@ -523,10 +523,10 @@ u16 DYNACALL mmu_IReadMem16(u32 vaddr)
{
if (vaddr & (sizeof(u16) - 1))
// Unaligned
mmu_raise_exception(MMU_ERROR_BADADDR, vaddr, MMU_TT_DREAD);
mmu_raise_exception(MmuError::BADADDR, vaddr, MMU_TT_IREAD);
u32 addr;
u32 rv = mmu_instruction_translation(vaddr, addr);
if (rv != MMU_ERROR_NONE)
MmuError rv = mmu_instruction_translation(vaddr, addr);
if (rv != MmuError::NONE)
mmu_raise_exception(rv, vaddr, MMU_TT_IREAD);
return addrspace::read16(addr);
}
@ -534,12 +534,12 @@ u16 DYNACALL mmu_IReadMem16(u32 vaddr)
template<typename T>
void DYNACALL mmu_WriteMem(u32 adr, T data)
{
if (adr & (sizeof(T) - 1))
if (adr & (std::min((int)sizeof(T), 4) - 1))
// Unaligned
mmu_raise_exception(MMU_ERROR_BADADDR, adr, MMU_TT_DWRITE);
mmu_raise_exception(MmuError::BADADDR, adr, MMU_TT_DWRITE);
u32 addr;
u32 rv = mmu_data_translation<MMU_TT_DWRITE>(adr, addr);
if (rv != MMU_ERROR_NONE)
MmuError rv = mmu_data_translation<MMU_TT_DWRITE>(adr, addr);
if (rv != MmuError::NONE)
mmu_raise_exception(rv, adr, MMU_TT_DWRITE);
addrspace::writet<T>(addr, data);
}
@ -560,8 +560,8 @@ void mmu_TranslateSQW(u32 adr, u32 *out)
else
{
u32 addr;
u32 tv = mmu_full_SQ<MMU_TT_DREAD>(adr, addr);
if (tv != MMU_ERROR_NONE)
MmuError tv = mmu_full_SQ<MMU_TT_DREAD>(adr, addr);
if (tv != MmuError::NONE)
mmu_raise_exception(tv, adr, MMU_TT_DREAD);
*out = addr;

View File

@ -12,19 +12,21 @@
//Data write
#define MMU_TT_DREAD 2
//Return Values
//Translation was successful
#define MMU_ERROR_NONE 0
//TLB miss
#define MMU_ERROR_TLB_MISS 1
//TLB Multihit
#define MMU_ERROR_TLB_MHIT 2
//Mem is read/write protected (depends on translation type)
#define MMU_ERROR_PROTECTED 3
//Mem is write protected , firstwrite
#define MMU_ERROR_FIRSTWRITE 4
//data-Opcode read/write misaligned
#define MMU_ERROR_BADADDR 5
enum class MmuError
{
//Translation was successful
NONE,
//TLB miss
TLB_MISS,
//TLB Multihit
TLB_MHIT,
//Mem is read/write protected (depends on translation type)
PROTECTED,
//Mem is write protected , firstwrite
FIRSTWRITE,
//data-Opcode read/write misaligned
BADADDR
};
struct TLB_Entry
{
@ -61,25 +63,25 @@ void ITLB_Sync(u32 entry);
bool mmu_match(u32 va, CCN_PTEH_type Address, CCN_PTEL_type Data);
void mmu_set_state();
void mmu_flush_table();
[[noreturn]] void mmu_raise_exception(u32 mmu_error, u32 address, u32 am);
[[noreturn]] void mmu_raise_exception(MmuError mmu_error, u32 address, u32 am);
static inline bool mmu_enabled()
{
return mmuOn;
}
u32 mmu_full_lookup(u32 va, const TLB_Entry **entry, u32& rv);
u32 mmu_instruction_lookup(u32 va, const TLB_Entry **entry, u32& rv);
MmuError mmu_full_lookup(u32 va, const TLB_Entry **entry, u32& rv);
MmuError mmu_instruction_lookup(u32 va, const TLB_Entry **entry, u32& rv);
template<u32 translation_type>
u32 mmu_full_SQ(u32 va, u32& rv);
MmuError mmu_full_SQ(u32 va, u32& rv);
#ifdef FAST_MMU
static inline u32 mmu_instruction_translation(u32 va, u32& rv)
static inline MmuError mmu_instruction_translation(u32 va, u32& rv)
{
if (fast_reg_lut[va >> 29] != 0)
{
rv = va;
return MMU_ERROR_NONE;
return MmuError::NONE;
}
return mmu_full_lookup(va, nullptr, rv);
@ -89,13 +91,13 @@ u32 mmu_instruction_translation(u32 va, u32& rv);
#endif
template<u32 translation_type>
u32 mmu_data_translation(u32 va, u32& rv);
void DoMMUException(u32 addr, u32 mmu_error, u32 access_type);
MmuError mmu_data_translation(u32 va, u32& rv);
void DoMMUException(u32 addr, MmuError mmu_error, u32 access_type);
inline static bool mmu_is_translated(u32 va, u32 size)
{
#ifndef FAST_MMU
if (va & (size - 1))
if (va & (std::min(size, 4u) - 1))
return true;
#endif
@ -133,13 +135,13 @@ static inline void mmuAddressLUTFlush(bool full)
static inline u32 DYNACALL mmuDynarecLookup(u32 vaddr, u32 write, u32 pc)
{
u32 paddr;
u32 rv;
MmuError rv;
// TODO pass access size so that alignment errors are raised
if (write)
rv = mmu_data_translation<MMU_TT_DWRITE>(vaddr, paddr);
else
rv = mmu_data_translation<MMU_TT_DREAD>(vaddr, paddr);
if (unlikely(rv != MMU_ERROR_NONE))
if (unlikely(rv != MmuError::NONE))
{
Sh4cntx.pc = pc;
DoMMUException(vaddr, rv, write ? MMU_TT_DWRITE : MMU_TT_DREAD);

View File

@ -30,7 +30,7 @@ static bool read_mem32(u32 addr, u32& data)
{
u32 pa;
const TLB_Entry *entry;
if (mmu_full_lookup(addr, &entry, pa) != MMU_ERROR_NONE)
if (mmu_full_lookup(addr, &entry, pa) != MmuError::NONE)
return false;
data = ReadMem32_nommu(pa);
return true;
@ -40,7 +40,7 @@ static bool read_mem16(u32 addr, u16& data)
{
u32 pa;
const TLB_Entry *entry;
if (mmu_full_lookup(addr, &entry, pa) != MMU_ERROR_NONE)
if (mmu_full_lookup(addr, &entry, pa) != MmuError::NONE)
return false;
data = ReadMem16_nommu(pa);
return true;
@ -50,7 +50,7 @@ static bool read_mem8(u32 addr, u8& data)
{
u32 pa;
const TLB_Entry *entry;
if (mmu_full_lookup(addr, &entry, pa) != MMU_ERROR_NONE)
if (mmu_full_lookup(addr, &entry, pa) != MmuError::NONE)
return false;
data = ReadMem8_nommu(pa);
return true;

View File

@ -58,8 +58,8 @@ public:
{
bool cacheOn = false;
u32 physAddr;
u32 err = translateAddress(address, physAddr, cacheOn);
if (err != MMU_ERROR_NONE)
MmuError err = translateAddress(address, physAddr, cacheOn);
if (err != MmuError::NONE)
mmu_raise_exception(err, address, MMU_TT_IREAD);
if (!cacheOn)
@ -130,11 +130,11 @@ public:
const u32 vaddr = data & ~0x3ff;
bool cached;
u32 physAddr;
u32 err = translateAddress(vaddr, physAddr, cached);
if (err == MMU_ERROR_TLB_MISS)
MmuError err = translateAddress(vaddr, physAddr, cached);
if (err == MmuError::TLB_MISS)
// Ignore the write
return;
if (err != MMU_ERROR_NONE)
if (err != MmuError::NONE)
mmu_raise_exception(err, vaddr, MMU_TT_IREAD);
u32 tag = (physAddr >> 10) & 0x7ffff;
@ -167,11 +167,11 @@ private:
u8 data[32];
};
u32 translateAddress(u32 address, u32& physAddr, bool& cached)
MmuError translateAddress(u32 address, u32& physAddr, bool& cached)
{
// Alignment errors
if (address & 1)
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
const u32 area = address >> 29;
const bool userMode = sr.MD == 0;
@ -181,13 +181,13 @@ private:
// kernel mem protected in user mode
// FIXME this makes WinCE fail
//if (address & 0x80000000)
// return MMU_ERROR_BADADDR;
// return MmuError::BADADDR;
}
else
{
// P4 not executable
if (area == 7)
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
}
cached = CCN_CCR.ICE == 1 && cachedArea(area);
@ -200,9 +200,9 @@ private:
else
{
const TLB_Entry *entry;
u32 err = mmu_instruction_lookup(address, &entry, physAddr);
MmuError err = mmu_instruction_lookup(address, &entry, physAddr);
if (err != MMU_ERROR_NONE)
if (err != MmuError::NONE)
return err;
//0X & User mode-> protection violation
@ -211,11 +211,11 @@ private:
{
u32 md = entry->Data.PR >> 1;
if (md == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
}
cached = cached && entry->Data.C == 1;
}
return MMU_ERROR_NONE;
return MmuError::NONE;
}
std::array<cache_line, 256> lines;
@ -235,8 +235,8 @@ public:
u32 physAddr;
bool cacheOn = false;
bool copyBack;
u32 err = translateAddress<T, MMU_TT_DREAD>(address, physAddr, cacheOn, copyBack);
if (err != MMU_ERROR_NONE)
MmuError err = translateAddress<T, MMU_TT_DREAD>(address, physAddr, cacheOn, copyBack);
if (err != MmuError::NONE)
mmu_raise_exception(err, address, MMU_TT_DREAD);
if (!cacheOn)
@ -264,8 +264,8 @@ public:
u32 physAddr = 0;
bool cacheOn = false;
bool copyBack = false;
u32 err = translateAddress<T, MMU_TT_DWRITE>(address, physAddr, cacheOn, copyBack);
if (err != MMU_ERROR_NONE)
MmuError err = translateAddress<T, MMU_TT_DWRITE>(address, physAddr, cacheOn, copyBack);
if (err != MmuError::NONE)
mmu_raise_exception(err, address, MMU_TT_DWRITE);
if (!cacheOn)
@ -312,8 +312,8 @@ public:
u32 physAddr;
bool cached = false;
bool copyBack;
u32 err = translateAddress<u8, MMU_TT_DWRITE>(address, physAddr, cached, copyBack);
if (err != MMU_ERROR_NONE)
MmuError err = translateAddress<u8, MMU_TT_DWRITE>(address, physAddr, cached, copyBack);
if (err != MmuError::NONE)
mmu_raise_exception(err, address, MMU_TT_DWRITE);
if (!cached)
@ -336,8 +336,8 @@ public:
u32 physAddr;
bool cached;
bool copyBack;
u32 err = translateAddress<u8, MMU_TT_DREAD>(address, physAddr, cached, copyBack);
if (err != MMU_ERROR_NONE || !cached)
MmuError err = translateAddress<u8, MMU_TT_DREAD>(address, physAddr, cached, copyBack);
if (err != MmuError::NONE || !cached)
// ignore address translation errors
return;
@ -396,11 +396,11 @@ public:
u32 physAddr;
bool cached = false;
bool copyBack;
u32 err = translateAddress<u8, MMU_TT_DREAD>(data & ~0x3ff, physAddr, cached, copyBack);
if (err == MMU_ERROR_TLB_MISS)
MmuError err = translateAddress<u8, MMU_TT_DREAD>(data & ~0x3ff, physAddr, cached, copyBack);
if (err == MmuError::TLB_MISS)
// Ignore the write
return;
if (err != MMU_ERROR_NONE)
if (err != MmuError::NONE)
mmu_raise_exception(err, data & ~0x3ff, MMU_TT_DREAD);
u32 tag = (physAddr >> 10) & 0x7ffff;
@ -491,16 +491,16 @@ private:
}
template<class T, u32 ACCESS>
u32 translateAddress(u32 address, u32& physAddr, bool& cached, bool& copyBack)
MmuError translateAddress(u32 address, u32& physAddr, bool& cached, bool& copyBack)
{
// Alignment errors
if (address & (sizeof(T) - 1))
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
if (ACCESS == MMU_TT_DWRITE && (address & 0xFC000000) == 0xE0000000)
{
// Store queues
u32 rv;
u32 lookup = mmu_full_SQ<MMU_TT_DWRITE>(address, rv);
MmuError lookup = mmu_full_SQ<MMU_TT_DWRITE>(address, rv);
physAddr = address;
return lookup;
@ -510,7 +510,7 @@ private:
// kernel mem protected in user mode
if (userMode && (address & 0x80000000))
return MMU_ERROR_BADADDR;
return MmuError::BADADDR;
cached = CCN_CCR.OCE == 1 && cachedArea(area);
if (ACCESS == MMU_TT_DWRITE)
@ -526,9 +526,9 @@ private:
else
{
const TLB_Entry *entry;
u32 lookup = mmu_full_lookup(address, &entry, physAddr);
MmuError lookup = mmu_full_lookup(address, &entry, physAddr);
if (lookup != MMU_ERROR_NONE)
if (lookup != MmuError::NONE)
return lookup;
//0X & User mode-> protection violation
@ -537,16 +537,16 @@ private:
{
u32 md = entry->Data.PR >> 1;
if (md == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
}
//X0 -> read only
//X1 -> read/write , can be FW
if (ACCESS == MMU_TT_DWRITE)
{
if ((entry->Data.PR & 1) == 0)
return MMU_ERROR_PROTECTED;
return MmuError::PROTECTED;
if (entry->Data.D == 0)
return MMU_ERROR_FIRSTWRITE;
return MmuError::FIRSTWRITE;
copyBack = copyBack && entry->Data.WT == 0;
}
cached = cached && entry->Data.C == 1;
@ -555,7 +555,7 @@ private:
physAddr |= 0xF0000000;
}
return MMU_ERROR_NONE;
return MmuError::NONE;
}
std::array<cache_line, 512> lines;

View File

@ -41,26 +41,26 @@ TEST_F(MmuTest, TestUntranslated)
{
u32 pa;
// P1
int err = mmu_data_translation<MMU_TT_DREAD>(0x80000000, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
MmuError err = mmu_data_translation<MMU_TT_DREAD>(0x80000000, pa);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x80000000u, pa);
err = mmu_instruction_translation(0x80000002, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x80000002u, pa);
// P2
err = mmu_data_translation<MMU_TT_DWRITE>(0xA0001234, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0xA0001234u, pa);
// P4
err = mmu_data_translation<MMU_TT_DREAD>(0xFF0000CC, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0xFF0000CCu, pa);
// 7C000000 to 7FFFFFFF in P0/U0 not translated
err = mmu_data_translation<MMU_TT_DREAD>(0x7D000088, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x7D000088u, pa);
// SQ write
@ -71,7 +71,7 @@ TEST_F(MmuTest, TestUntranslated)
UTLB[0].Data.D = 1;
UTLB_Sync(0);
err = mmu_data_translation<MMU_TT_DWRITE>(0xE2000004, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0xE2000004, pa);
}
@ -86,16 +86,16 @@ TEST_F(MmuTest, TestTranslated)
UTLB[0].Data.D = 1;
UTLB[0].Data.PPN = 0x0C000000 >> 10;
UTLB_Sync(0);
int err = mmu_data_translation<MMU_TT_DREAD>(0x02000044, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
MmuError err = mmu_data_translation<MMU_TT_DREAD>(0x02000044, pa);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x0C000044u, pa);
err = mmu_data_translation<MMU_TT_DWRITE>(0x02000045, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x0C000045u, pa);
err = mmu_instruction_translation(0x02000046, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x0C000046u, pa);
// ASID match
@ -103,10 +103,10 @@ TEST_F(MmuTest, TestTranslated)
CCN_PTEH.ASID = 13;
UTLB_Sync(0);
err = mmu_data_translation<MMU_TT_DWRITE>(0x02000222, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x0C000222u, pa);
err = mmu_instruction_translation(0x02000232, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x0C000232u, pa);
// Shared entry
@ -114,14 +114,14 @@ TEST_F(MmuTest, TestTranslated)
CCN_PTEH.ASID = 14;
UTLB_Sync(0);
err = mmu_data_translation<MMU_TT_DWRITE>(0x02000222, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0x0C000222u, pa);
// 1C000000-1FFFFFF mapped to P4
UTLB[0].Data.PPN = 0x1C000000 >> 10;
UTLB_Sync(0);
err = mmu_data_translation<MMU_TT_DWRITE>(0x02000222, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
ASSERT_EQ(0xFC000222u, pa);
}
@ -136,15 +136,15 @@ TEST_F(MmuTest, TestMiss)
UTLB[0].Data.PPN = 0x0C000000 >> 10;
UTLB_Sync(0);
// no match
int err = mmu_data_translation<MMU_TT_DREAD>(0x02100044, pa);
ASSERT_EQ(MMU_ERROR_TLB_MISS, err);
MmuError err = mmu_data_translation<MMU_TT_DREAD>(0x02100044, pa);
ASSERT_EQ(MmuError::TLB_MISS, err);
#ifndef FAST_MMU
// entry not valid
UTLB[0].Data.V = 0;
UTLB_Sync(0);
err = mmu_data_translation<MMU_TT_DREAD>(0x02000044, pa);
ASSERT_EQ(MMU_ERROR_TLB_MISS, err);
ASSERT_EQ(MmuError::TLB_MISS, err);
#endif
// asid mismatch
@ -153,20 +153,20 @@ TEST_F(MmuTest, TestMiss)
CCN_PTEH.ASID = 14;
UTLB_Sync(0);
err = mmu_data_translation<MMU_TT_DREAD>(0x02000044, pa);
ASSERT_EQ(MMU_ERROR_TLB_MISS, err);
ASSERT_EQ(MmuError::TLB_MISS, err);
}
TEST_F(MmuTest, TestErrors)
{
#ifndef FAST_MMU
u32 pa;
int err;
MmuError err;
// P4 not executable
err = mmu_instruction_translation(0xFF00008A, pa);
ASSERT_EQ(MMU_ERROR_BADADDR, err);
ASSERT_EQ(MmuError::BADADDR, err);
err = mmu_instruction_translation(0xE0000004, pa);
ASSERT_EQ(MMU_ERROR_BADADDR, err);
ASSERT_EQ(MmuError::BADADDR, err);
#endif
// unaligned address
@ -186,33 +186,33 @@ TEST_F(MmuTest, TestErrors)
UTLB[0].Data.PPN = 0x0A000000 >> 10;
// no access in user mode
err = mmu_data_translation<MMU_TT_DREAD>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_PROTECTED, err);
ASSERT_EQ(MmuError::PROTECTED, err);
err = mmu_data_translation<MMU_TT_DWRITE>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_PROTECTED, err);
ASSERT_EQ(MmuError::PROTECTED, err);
err = mmu_instruction_translation(0x04000042, pa);
ASSERT_EQ(MMU_ERROR_PROTECTED, err);
ASSERT_EQ(MmuError::PROTECTED, err);
// read-only access in priv mode
p_sh4rcb->cntx.sr.MD = 1;
err = mmu_data_translation<MMU_TT_DREAD>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_NONE, err);
ASSERT_EQ(MmuError::NONE, err);
err = mmu_data_translation<MMU_TT_DWRITE>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_PROTECTED, err);
ASSERT_EQ(MmuError::PROTECTED, err);
// read-only access in user & priv mode
UTLB[0].Data.PR = 2;
p_sh4rcb->cntx.sr.MD = 0;
err = mmu_data_translation<MMU_TT_DWRITE>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_PROTECTED, err);
ASSERT_EQ(MmuError::PROTECTED, err);
p_sh4rcb->cntx.sr.MD = 1;
err = mmu_data_translation<MMU_TT_DWRITE>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_PROTECTED, err);
ASSERT_EQ(MmuError::PROTECTED, err);
UTLB[0].Data.PR = 3;
// kernel address in user mode
p_sh4rcb->cntx.sr.MD = 0;
err = mmu_data_translation<MMU_TT_DWRITE>(0xA4000004, pa);
ASSERT_EQ(MMU_ERROR_BADADDR, err);
ASSERT_EQ(MmuError::BADADDR, err);
err = mmu_instruction_translation(0xA4000006, pa);
ASSERT_EQ(MMU_ERROR_BADADDR, err);
ASSERT_EQ(MmuError::BADADDR, err);
// multiple hits
memset(ITLB, 0, sizeof(ITLB));
@ -223,14 +223,14 @@ TEST_F(MmuTest, TestErrors)
UTLB[1].Data.D = 1;
UTLB[1].Data.PPN = 0x0C000000 >> 10;
err = mmu_data_translation<MMU_TT_DREAD>(0x04000040, pa);
ASSERT_EQ(MMU_ERROR_TLB_MHIT, err);
ASSERT_EQ(MmuError::TLB_MHIT, err);
err = mmu_instruction_translation(0x04000042, pa);
ASSERT_EQ(MMU_ERROR_TLB_MHIT, err);
ASSERT_EQ(MmuError::TLB_MHIT, err);
UTLB[1].Data.V = 0;
// first write
UTLB[0].Data.D = 0;
err = mmu_data_translation<MMU_TT_DWRITE>(0x04000224, pa);
ASSERT_EQ(MMU_ERROR_FIRSTWRITE, err);
ASSERT_EQ(MmuError::FIRSTWRITE, err);
#endif
}