wince: enable code block protection

This commit is contained in:
Flyinghead 2019-06-19 22:55:47 +02:00
parent cb338ae106
commit 6ed098a929
8 changed files with 25 additions and 35 deletions

View File

@ -249,6 +249,7 @@ static u32 vmem32_map_mmu(u32 address, bool write)
if (offset == -1)
return VMEM32_ERROR_NOT_MAPPED;
bool allow_write = (entry->Data.PR & 1) != 0;
if (offset >= MAP_VRAM_START_OFFSET && offset < MAP_VRAM_START_OFFSET + VRAM_SIZE)
{
// Check vram protected regions
@ -262,7 +263,7 @@ static u32 vmem32_map_mmu(u32 address, bool write)
return MMU_ERROR_NONE;
}
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, (entry->Data.PR & 1) != 0) != NULL);
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, allow_write) != NULL);
u32 end = start + page_size;
const vector<vram_lock>& blocks = vram_blocks[start / VRAM_PROT_SEGMENT];
@ -281,12 +282,14 @@ static u32 vmem32_map_mmu(u32 address, bool write)
vramlist_lock.Unlock();
}
else if (offset >= MAP_VRAM_START_OFFSET && offset < MAP_VRAM_START_OFFSET + VRAM_SIZE)
else if (offset >= MAP_RAM_START_OFFSET && offset < MAP_RAM_START_OFFSET + RAM_SIZE)
{
// Check system RAM protected pages
if (bm_IsRamPageProtected(ppn))
u32 start = offset - MAP_RAM_START_OFFSET;
if (bm_IsRamPageProtected(start) && allow_write)
{
if (sram_mapped_pages[ppn >> 15] & (1 << ((ppn >> 12) & 7)))
if (sram_mapped_pages[start >> 15] & (1 << ((start >> 12) & 7)))
{
// Already mapped => write access
vmem32_unprotect_buffer(address & ~PAGE_MASK, PAGE_SIZE);
@ -294,16 +297,16 @@ static u32 vmem32_map_mmu(u32 address, bool write)
}
else
{
sram_mapped_pages[ppn >> 15] |= (1 << ((ppn >> 12) & 7));
sram_mapped_pages[start >> 15] |= (1 << ((start >> 12) & 7));
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, false) != NULL);
}
}
else
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, (entry->Data.PR & 1) != 0) != NULL);
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, allow_write) != NULL);
}
else
// Not vram or system ram
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, (entry->Data.PR & 1) != 0) != NULL);
verify(vmem32_map_buffer(vpn, page_size, offset, page_size, allow_write) != NULL);
return MMU_ERROR_NONE;
}

View File

@ -255,7 +255,7 @@ bool rend_frame(TA_context* ctx, bool draw_osd) {
bool proc = renderer->Process(ctx);
if ((ctx->rend.isRTT || ctx->rend.isRenderFramebuffer) && swap_pending)
{
// If there a frame swap pending, we want to do it now.
// If there is a frame swap pending, we want to do it now.
// The current frame "swapping" detection mechanism (using FB_R_SOF1) doesn't work
// if a RTT frame is rendered in between.
renderer->Present();

View File

@ -653,10 +653,13 @@ void bm_Sort()
RuntimeBlockInfo::~RuntimeBlockInfo()
{
if (read_only)
protected_blocks--;
else
unprotected_blocks--;
if (sh4_code_size != 0)
{
if (read_only)
protected_blocks--;
else
unprotected_blocks--;
}
}
void RuntimeBlockInfo::AddRef(RuntimeBlockInfoPtr other)
@ -699,8 +702,7 @@ void RuntimeBlockInfo::Discard()
void RuntimeBlockInfo::SetProtectedFlags()
{
// Don't write protect rom
// TODO Enable this for wince. hangs 4x4 EVO
if (mmu_enabled() || (!IsOnRam(addr) /*|| (vaddr & 0x1FFF0000) == 0x0C000000 */))
if (!IsOnRam(addr))
{
this->read_only = false;
unprotected_blocks++;
@ -746,7 +748,7 @@ void bm_RamWriteAccess(u32 addr)
verify(block_list.empty());
}
bool bm_RamWriteAccess(void *p, unat pc)
bool bm_RamWriteAccess(void *p)
{
if (_nvmem_4gb_space())
{

View File

@ -106,7 +106,7 @@ void bm_Init();
void bm_Term();
void bm_vmem_pagefill(void** ptr,u32 PAGE_SZ);
bool bm_RamWriteAccess(void *p, unat pc);
bool bm_RamWriteAccess(void *p);
void bm_RamWriteAccess(u32 addr);
static inline bool bm_IsRamPageProtected(u32 addr)
{

View File

@ -237,6 +237,7 @@ bool RuntimeBlockInfo::Setup(u32 rpc,fpscr_t rfpu_cfg)
{
staging_runs=addr=lookups=runs=host_code_size=0;
guest_cycles=guest_opcodes=host_opcodes=0;
sh4_code_size = 0;
pBranchBlock=pNextBlock=0;
code=0;
has_jcond=false;

View File

@ -71,7 +71,7 @@ void fault_handler (int sn, siginfo_t * si, void *segfault_ctx)
if (vmem32_handle_signal(si->si_addr, write, exception_pc))
return;
#endif
if (bm_RamWriteAccess(si->si_addr, ctx.pc))
if (bm_RamWriteAccess(si->si_addr))
return;
if (VramLockedWrite((u8*)si->si_addr) || BM_LockedWrite((u8*)si->si_addr))
return;

View File

@ -287,7 +287,7 @@ bool mem_region_release(void *start, size_t len);
void *mem_region_map_file(void *file_handle, void *dest, size_t len, size_t offset, bool readwrite);
bool mem_region_unmap_file(void *start, size_t len);
// Locked memory class, used for texture invalidation purposes.
// Locked memory class, was used for texture invalidation purposes.
class VLockedMemory {
public:
u8* data;
@ -300,23 +300,7 @@ public:
void *getPtr() const { return data; }
unsigned getSize() const { return size; }
#ifdef TARGET_NO_EXCEPTIONS
void LockRegion(unsigned offset, unsigned size_bytes) {}
void UnLockRegion(unsigned offset, unsigned size_bytes) {}
#else
void LockRegion(unsigned offset, unsigned size_bytes)
{
mem_region_lock(&data[offset], size_bytes);
}
void UnLockRegion(unsigned offset, unsigned size_bytes)
{
mem_region_unlock(&data[offset], size_bytes);
}
#endif
void Zero() {
UnLockRegion(0, size);
memset(data, 0, size);
}

View File

@ -150,7 +150,7 @@ LONG ExeptionHandler(EXCEPTION_POINTERS *ExceptionInfo)
if (vmem32_handle_signal(address, write, 0))
return EXCEPTION_CONTINUE_EXECUTION;
#endif
if (bm_RamWriteAccess(address, 0))
if (bm_RamWriteAccess(address))
{
return EXCEPTION_CONTINUE_EXECUTION;
}