rename _vmem to addrspace and move platform vmem stuff to virtmem ns

use namespaces
simplify names
no functional changes
This commit is contained in:
Flyinghead 2023-01-29 18:48:33 +01:00
parent c472d60f80
commit 143073b61d
52 changed files with 1111 additions and 1091 deletions

View File

@ -783,8 +783,8 @@ target_sources(${PROJECT_NAME} PRIVATE
core/hw/maple/maple_if.cpp
core/hw/maple/maple_if.h
core/hw/maple/maple_jvs.cpp
core/hw/mem/_vmem.cpp
core/hw/mem/_vmem.h
core/hw/mem/addrspace.cpp
core/hw/mem/addrspace.h
core/hw/mem/mem_watch.cpp
core/hw/mem/mem_watch.h
core/hw/modem/modem.cpp
@ -1005,6 +1005,7 @@ target_sources(${PROJECT_NAME} PRIVATE
core/oslib/directory.h
core/oslib/host_context.h
core/oslib/oslib.h
core/oslib/virtmem.h
core/lua/lua.cpp
core/lua/lua.h)

View File

@ -371,7 +371,7 @@ void dc_reset(bool hard)
NetworkHandshake::term();
if (hard)
{
_vmem_unprotect_vram(0, VRAM_SIZE);
addrspace::unprotectVram(0, VRAM_SIZE);
memwatch::elanWatcher.unprotectMem(0, 0xffffffff);
}
sh4_sched_reset(hard);
@ -385,7 +385,7 @@ void dc_reset(bool hard)
static void setPlatform(int platform)
{
if (VRAM_SIZE != 0)
_vmem_unprotect_vram(0, VRAM_SIZE);
addrspace::unprotectVram(0, VRAM_SIZE);
elan::ERAM_SIZE = 0;
switch (platform)
{
@ -426,7 +426,7 @@ static void setPlatform(int platform)
settings.platform.ram_mask = settings.platform.ram_size - 1;
settings.platform.vram_mask = settings.platform.vram_size - 1;
settings.platform.aram_mask = settings.platform.aram_size - 1;
_vmem_init_mappings();
addrspace::initMappings();
}
void Emulator::init()
@ -666,7 +666,7 @@ void Emulator::term()
pvr::term();
mem_Term();
_vmem_release();
addrspace::release();
state = Terminated;
}
}

View File

@ -23,7 +23,7 @@
#include "dsp.h"
#include "aica.h"
#include "aica_if.h"
#include "hw/mem/_vmem.h"
#include "oslib/virtmem.h"
#include <aarch32/macro-assembler-aarch32.h>
using namespace vixl::aarch32;
@ -295,7 +295,7 @@ public:
FinalizeCode();
vmem_platform_flush_cache(
virtmem::flush_cache(
GetBuffer()->GetStartAddress<char*>(), GetBuffer()->GetEndAddress<char*>(),
GetBuffer()->GetStartAddress<void*>(), GetBuffer()->GetEndAddress<void*>());
}
@ -407,7 +407,7 @@ void recompile()
void recInit()
{
u8 *pCodeBuffer;
bool rc = vmem_platform_prepare_jit_block(DynCode, CodeSize, (void**)&pCodeBuffer);
bool rc = virtmem::prepare_jit_block(DynCode, CodeSize, (void**)&pCodeBuffer);
verify(rc);
}

View File

@ -24,7 +24,7 @@
#include "dsp.h"
#include "aica.h"
#include "aica_if.h"
#include "hw/mem/_vmem.h"
#include "oslib/virtmem.h"
#include <aarch64/macro-assembler-aarch64.h>
using namespace vixl::aarch64;
@ -337,7 +337,7 @@ public:
FinalizeCode();
vmem_platform_flush_cache(
virtmem::flush_cache(
GetBuffer()->GetStartAddress<char*>() + rx_offset, GetBuffer()->GetEndAddress<char*>() + rx_offset,
GetBuffer()->GetStartAddress<void*>(), GetBuffer()->GetEndAddress<void*>());
}
@ -451,9 +451,9 @@ void recompile()
void recInit()
{
#ifdef FEAT_NO_RWX_PAGES
bool rc = vmem_platform_prepare_jit_block(DynCode, CodeSize, (void**)&pCodeBuffer, &rx_offset);
bool rc = virtmem::prepare_jit_block(DynCode, CodeSize, (void**)&pCodeBuffer, &rx_offset);
#else
bool rc = vmem_platform_prepare_jit_block(DynCode, CodeSize, (void**)&pCodeBuffer);
bool rc = virtmem::prepare_jit_block(DynCode, CodeSize, (void**)&pCodeBuffer);
#endif
verify(rc);
#if defined(TARGET_IPHONE) || defined(TARGET_ARM_MAC)
@ -469,10 +469,10 @@ void recTerm()
#endif
#ifdef FEAT_NO_RWX_PAGES
if (pCodeBuffer != nullptr)
vmem_platform_release_jit_block(DynCode, pCodeBuffer, CodeSize);
virtmem::release_jit_block(DynCode, pCodeBuffer, CodeSize);
#else
if (pCodeBuffer != nullptr && pCodeBuffer != DynCode)
vmem_platform_release_jit_block(pCodeBuffer, CodeSize);
virtmem::release_jit_block(pCodeBuffer, CodeSize);
#endif
pCodeBuffer = nullptr;
}

View File

@ -25,7 +25,7 @@
#include "dsp.h"
#include "aica.h"
#include "aica_if.h"
#include "hw/mem/_vmem.h"
#include "oslib/virtmem.h"
namespace dsp
{
@ -412,30 +412,30 @@ private:
void recompile()
{
vmem_platform_jit_set_exec(pCodeBuffer, CodeBufferSize, false);
virtmem::jit_set_exec(pCodeBuffer, CodeBufferSize, false);
X64DSPAssembler assembler(pCodeBuffer, CodeBufferSize);
assembler.Compile(&state);
vmem_platform_jit_set_exec(pCodeBuffer, CodeBufferSize, true);
virtmem::jit_set_exec(pCodeBuffer, CodeBufferSize, true);
}
void recInit()
{
#ifdef FEAT_NO_RWX_PAGES
if (!vmem_platform_prepare_jit_block(CodeBuffer, CodeBufferSize, (void**)&pCodeBuffer, &rx_offset))
if (!virtmem::prepare_jit_block(CodeBuffer, CodeBufferSize, (void**)&pCodeBuffer, &rx_offset))
#else
if (!vmem_platform_prepare_jit_block(CodeBuffer, CodeBufferSize, (void**)&pCodeBuffer))
if (!virtmem::prepare_jit_block(CodeBuffer, CodeBufferSize, (void**)&pCodeBuffer))
#endif
die("vmem_platform_prepare_jit_block failed in x64 dsp");
die("virtmem::prepare_jit_block failed in x64 dsp");
}
void recTerm()
{
#ifdef FEAT_NO_RWX_PAGES
if (pCodeBuffer != nullptr)
vmem_platform_release_jit_block(CodeBuffer, pCodeBuffer, CodeBufferSize);
virtmem::release_jit_block(CodeBuffer, pCodeBuffer, CodeBufferSize);
#else
if (pCodeBuffer != nullptr && pCodeBuffer != CodeBuffer)
vmem_platform_release_jit_block(pCodeBuffer, CodeBufferSize);
virtmem::release_jit_block(pCodeBuffer, CodeBufferSize);
#endif
pCodeBuffer = nullptr;
}

View File

@ -25,7 +25,7 @@
#include "dsp.h"
#include "aica.h"
#include "aica_if.h"
#include "hw/mem/_vmem.h"
#include "oslib/virtmem.h"
namespace dsp
{
@ -379,7 +379,7 @@ void recompile()
void recInit()
{
if (!vmem_platform_prepare_jit_block(CodeBuffer, sizeof(CodeBuffer), (void**)&pCodeBuffer))
if (!virtmem::prepare_jit_block(CodeBuffer, sizeof(CodeBuffer), (void**)&pCodeBuffer))
die("mprotect failed in x86 dsp");
}

View File

@ -24,7 +24,7 @@
#include "arm7_rec.h"
#include "arm7.h"
#include "hw/aica/aica_if.h"
#include "hw/mem/_vmem.h"
#include "oslib/virtmem.h"
#include "arm_mem.h"
#if 0
@ -673,9 +673,9 @@ void flush()
void init()
{
#ifdef FEAT_NO_RWX_PAGES
bool rc = vmem_platform_prepare_jit_block(ARM7_TCB, ICacheSize, (void**)&ICache, &rx_offset);
bool rc = virtmem::prepare_jit_block(ARM7_TCB, ICacheSize, (void**)&ICache, &rx_offset);
#else
bool rc = vmem_platform_prepare_jit_block(ARM7_TCB, ICacheSize, (void**)&ICache);
bool rc = virtmem::prepare_jit_block(ARM7_TCB, ICacheSize, (void**)&ICache);
#endif
verify(rc);
@ -695,10 +695,10 @@ void term()
{
#ifdef FEAT_NO_RWX_PAGES
if (ICache != nullptr)
vmem_platform_release_jit_block(ARM7_TCB, ICache, ICacheSize);
virtmem::release_jit_block(ARM7_TCB, ICache, ICacheSize);
#else
if (ICache != nullptr && ICache != ARM7_TCB)
vmem_platform_release_jit_block(ICache, ICacheSize);
virtmem::release_jit_block(ICache, ICacheSize);
#endif
ICache = nullptr;
}

View File

@ -21,8 +21,8 @@
#if HOST_CPU == CPU_ARM && FEAT_AREC != DYNAREC_NONE
#include "arm7_rec.h"
#include "hw/mem/_vmem.h"
#include "rec-ARM/arm_unwind.h"
#include "oslib/virtmem.h"
#include <aarch32/macro-assembler-aarch32.h>
using namespace vixl::aarch32;
@ -39,7 +39,7 @@ public:
void Finalize() {
FinalizeCode();
vmem_platform_flush_cache(GetBuffer()->GetStartAddress<void *>(), GetCursorAddress<u8 *>() - 1,
virtmem::flush_cache(GetBuffer()->GetStartAddress<void *>(), GetCursorAddress<u8 *>() - 1,
GetBuffer()->GetStartAddress<void *>(), GetCursorAddress<u8 *>() - 1);
}
};

View File

@ -23,12 +23,11 @@
#include <sstream>
#include "arm7_rec.h"
#include "hw/mem/_vmem.h"
#include <aarch64/macro-assembler-aarch64.h>
using namespace vixl::aarch64;
//#include <aarch32/disasm-aarch32.h>
#include "rec-ARM64/arm64_unwind.h"
#include "stdclass.h"
#include "oslib/virtmem.h"
namespace aicaarm {
@ -624,7 +623,7 @@ public:
FinalizeCode();
verify((size_t)GetBuffer()->GetCursorOffset() <= GetBuffer()->GetCapacity());
vmem_platform_flush_cache(
virtmem::flush_cache(
recompiler::writeToExec(GetBuffer()->GetStartAddress<void*>()), recompiler::writeToExec(GetBuffer()->GetEndAddress<void*>()),
GetBuffer()->GetStartAddress<void*>(), GetBuffer()->GetEndAddress<void*>());
recompiler::advance(GetBuffer()->GetSizeInBytes());
@ -728,7 +727,7 @@ public:
size_t unwindSize = unwinder.end(recompiler::spaceLeft() - 128, (ptrdiff_t)recompiler::writeToExec(nullptr));
verify(unwindSize <= 128);
vmem_platform_flush_cache(
virtmem::flush_cache(
recompiler::writeToExec(GetBuffer()->GetStartAddress<void*>()), recompiler::writeToExec(GetBuffer()->GetEndAddress<void*>()),
GetBuffer()->GetStartAddress<void*>(), GetBuffer()->GetEndAddress<void*>());
recompiler::advance(GetBuffer()->GetSizeInBytes());

View File

@ -27,7 +27,7 @@ using namespace Xbyak::util;
#include "arm7_rec.h"
#include "oslib/oslib.h"
#include "hw/mem/_vmem.h"
#include "oslib/virtmem.h"
namespace aicaarm {
@ -998,25 +998,25 @@ void arm7backend_compile(const std::vector<ArmOp>& block_ops, u32 cycles)
{
void* protStart = recompiler::currentCode();
size_t protSize = recompiler::spaceLeft();
vmem_platform_jit_set_exec(protStart, protSize, false);
virtmem::jit_set_exec(protStart, protSize, false);
Arm7Compiler assembler;
assembler.compile(block_ops, cycles);
vmem_platform_jit_set_exec(protStart, protSize, true);
virtmem::jit_set_exec(protStart, protSize, true);
}
void arm7backend_flush()
{
void* protStart = recompiler::currentCode();
size_t protSize = recompiler::spaceLeft();
vmem_platform_jit_set_exec(protStart, protSize, false);
virtmem::jit_set_exec(protStart, protSize, false);
unwinder.clear();
Arm7Compiler assembler;
assembler.generateMainLoop();
vmem_platform_jit_set_exec(protStart, protSize, true);
virtmem::jit_set_exec(protStart, protSize, true);
}
}

View File

@ -12,7 +12,7 @@
#include "hw/modem/modem.h"
#include "hw/naomi/naomi.h"
#include "hw/pvr/pvr_mem.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/bba/bba.h"
#include "cfg/option.h"
@ -278,12 +278,12 @@ void sh4_area0_Term()
//AREA 0
static _vmem_handler area0_handler;
static _vmem_handler area0_mirror_handler;
static addrspace::handler area0_handler;
static addrspace::handler area0_mirror_handler;
void map_area0_init()
{
#define registerHandler(system, mirror) _vmem_register_handler \
#define registerHandler(system, mirror) addrspace::registerHandler \
(ReadMem_area0<u8, system, mirror>, ReadMem_area0<u16, system, mirror>, ReadMem_area0<u32, system, mirror>, \
WriteMem_area0<u8, system, mirror>, WriteMem_area0<u16, system, mirror>, WriteMem_area0<u32, system, mirror>)
@ -313,8 +313,8 @@ void map_area0(u32 base)
{
verify(base<0xE0);
_vmem_map_handler(area0_handler, 0x00 | base, 0x01 | base);
_vmem_map_handler(area0_mirror_handler, 0x02 | base, 0x03 | base);
addrspace::mapHandler(area0_handler, 0x00 | base, 0x01 | base);
addrspace::mapHandler(area0_mirror_handler, 0x02 | base, 0x03 | base);
//0x0240 to 0x03FF mirrors 0x0040 to 0x01FF (no flashrom or bios)
//0x0200 to 0x023F are unused

View File

@ -1,590 +0,0 @@
#include "_vmem.h"
#include "hw/aica/aica_if.h"
#include "hw/pvr/pvr_mem.h"
#include "hw/pvr/elan.h"
#include "hw/sh4/dyna/blockmanager.h"
#include "hw/sh4/sh4_mem.h"
#include "oslib/oslib.h"
#define HANDLER_MAX 0x1F
#define HANDLER_COUNT (HANDLER_MAX+1)
//top registered handler
static _vmem_handler _vmem_lrp;
//handler tables
static _vmem_ReadMem8FP* _vmem_RF8[HANDLER_COUNT];
static _vmem_WriteMem8FP* _vmem_WF8[HANDLER_COUNT];
static _vmem_ReadMem16FP* _vmem_RF16[HANDLER_COUNT];
static _vmem_WriteMem16FP* _vmem_WF16[HANDLER_COUNT];
static _vmem_ReadMem32FP* _vmem_RF32[HANDLER_COUNT];
static _vmem_WriteMem32FP* _vmem_WF32[HANDLER_COUNT];
//upper 8b of the address
static void* _vmem_MemInfo_ptr[0x100];
#define MAP_RAM_START_OFFSET 0
#define MAP_VRAM_START_OFFSET (MAP_RAM_START_OFFSET+RAM_SIZE)
#define MAP_ARAM_START_OFFSET (MAP_VRAM_START_OFFSET+VRAM_SIZE)
#define MAP_ERAM_START_OFFSET (MAP_ARAM_START_OFFSET+ARAM_SIZE)
void* _vmem_read_const(u32 addr,bool& ismem,u32 sz)
{
u32 page=addr>>24;
unat iirf=(unat)_vmem_MemInfo_ptr[page];
void* ptr=(void*)(iirf&~HANDLER_MAX);
if (ptr==0)
{
ismem=false;
const unat id=iirf;
if (sz==1)
{
return (void*)_vmem_RF8[id];
}
else if (sz==2)
{
return (void*)_vmem_RF16[id];
}
else if (sz==4)
{
return (void*)_vmem_RF32[id];
}
else
{
die("Invalid size");
return nullptr;
}
}
else
{
ismem=true;
addr<<=iirf;
addr>>=iirf;
return &(((u8*)ptr)[addr]);
}
}
void* _vmem_write_const(u32 addr,bool& ismem,u32 sz)
{
u32 page=addr>>24;
unat iirf=(unat)_vmem_MemInfo_ptr[page];
void* ptr=(void*)(iirf&~HANDLER_MAX);
if (ptr==0)
{
ismem=false;
const unat id=iirf;
if (sz==1)
{
return (void*)_vmem_WF8[id];
}
else if (sz==2)
{
return (void*)_vmem_WF16[id];
}
else if (sz==4)
{
return (void*)_vmem_WF32[id];
}
else
{
die("Invalid size");
return nullptr;
}
}
else
{
ismem=true;
addr<<=iirf;
addr>>=iirf;
return &(((u8*)ptr)[addr]);
}
}
template<typename T, typename Trv>
Trv DYNACALL _vmem_readt(u32 addr)
{
constexpr u32 sz = sizeof(T);
u32 page=addr>>24; //1 op, shift/extract
unat iirf=(unat)_vmem_MemInfo_ptr[page]; //2 ops, insert + read [vmem table will be on reg ]
void* ptr=(void*)(iirf&~HANDLER_MAX); //2 ops, and // 1 op insert
if (likely(ptr!=0))
{
addr<<=iirf;
addr>>=iirf;
T data=(*((T*)&(((u8*)ptr)[addr])));
return data;
}
else
{
const u32 id=iirf;
if (sz==1)
{
return (T)_vmem_RF8[id](addr);
}
else if (sz==2)
{
return (T)_vmem_RF16[id](addr);
}
else if (sz==4)
{
return _vmem_RF32[id](addr);
}
else if (sz==8)
{
T rv=_vmem_RF32[id](addr);
rv|=(T)((u64)_vmem_RF32[id](addr+4)<<32);
return rv;
}
else
{
die("Invalid size");
return 0;
}
}
}
template u8 DYNACALL _vmem_readt<u8, u8>(u32 addr);
template u16 DYNACALL _vmem_readt<u16, u16>(u32 addr);
template u32 DYNACALL _vmem_readt<u32, u32>(u32 addr);
template u64 DYNACALL _vmem_readt<u64, u64>(u32 addr);
template<typename T>
void DYNACALL _vmem_writet(u32 addr, T data)
{
constexpr u32 sz = sizeof(T);
u32 page=addr>>24;
unat iirf=(unat)_vmem_MemInfo_ptr[page];
void* ptr=(void*)(iirf&~HANDLER_MAX);
if (likely(ptr!=0))
{
addr<<=iirf;
addr>>=iirf;
*((T*)&(((u8*)ptr)[addr]))=data;
}
else
{
const u32 id=iirf;
if (sz==1)
{
_vmem_WF8[id](addr,data);
}
else if (sz==2)
{
_vmem_WF16[id](addr,data);
}
else if (sz==4)
{
_vmem_WF32[id](addr,data);
}
else if (sz==8)
{
_vmem_WF32[id](addr,(u32)data);
_vmem_WF32[id](addr+4,(u32)((u64)data>>32));
}
else
{
die("Invalid size");
}
}
}
template void DYNACALL _vmem_writet<u8>(u32 addr, u8 data);
template void DYNACALL _vmem_writet<u16>(u32 addr, u16 data);
template void DYNACALL _vmem_writet<u32>(u32 addr, u32 data);
template void DYNACALL _vmem_writet<u64>(u32 addr, u64 data);
//ReadMem/WriteMem functions
//ReadMem
u32 DYNACALL _vmem_ReadMem8SX32(u32 Address) { return _vmem_readt<s8,s32>(Address); }
u32 DYNACALL _vmem_ReadMem16SX32(u32 Address) { return _vmem_readt<s16,s32>(Address); }
u8 DYNACALL _vmem_ReadMem8(u32 Address) { return _vmem_readt<u8,u8>(Address); }
u16 DYNACALL _vmem_ReadMem16(u32 Address) { return _vmem_readt<u16,u16>(Address); }
u32 DYNACALL _vmem_ReadMem32(u32 Address) { return _vmem_readt<u32,u32>(Address); }
u64 DYNACALL _vmem_ReadMem64(u32 Address) { return _vmem_readt<u64,u64>(Address); }
//WriteMem
void DYNACALL _vmem_WriteMem8(u32 Address,u8 data) { _vmem_writet<u8>(Address,data); }
void DYNACALL _vmem_WriteMem16(u32 Address,u16 data) { _vmem_writet<u16>(Address,data); }
void DYNACALL _vmem_WriteMem32(u32 Address,u32 data) { _vmem_writet<u32>(Address,data); }
void DYNACALL _vmem_WriteMem64(u32 Address,u64 data) { _vmem_writet<u64>(Address,data); }
#define MEM_ERROR_RETURN_VALUE 0
//default read handlers
static u8 DYNACALL _vmem_ReadMem8_not_mapped(u32 addresss)
{
INFO_LOG(MEMORY, "[sh4]Read8 from 0x%X, not mapped [_vmem default handler]", addresss);
return (u8)MEM_ERROR_RETURN_VALUE;
}
static u16 DYNACALL _vmem_ReadMem16_not_mapped(u32 addresss)
{
INFO_LOG(MEMORY, "[sh4]Read16 from 0x%X, not mapped [_vmem default handler]", addresss);
return (u16)MEM_ERROR_RETURN_VALUE;
}
static u32 DYNACALL _vmem_ReadMem32_not_mapped(u32 address)
{
INFO_LOG(MEMORY, "[sh4]Read32 from 0x%X, not mapped [_vmem default handler]", address);
return (u32)MEM_ERROR_RETURN_VALUE;
}
//default write handers
static void DYNACALL _vmem_WriteMem8_not_mapped(u32 addresss,u8 data)
{
INFO_LOG(MEMORY, "[sh4]Write8 to 0x%X=0x%X, not mapped [_vmem default handler]", addresss, data);
}
static void DYNACALL _vmem_WriteMem16_not_mapped(u32 addresss,u16 data)
{
INFO_LOG(MEMORY, "[sh4]Write16 to 0x%X=0x%X, not mapped [_vmem default handler]", addresss, data);
}
static void DYNACALL _vmem_WriteMem32_not_mapped(u32 addresss,u32 data)
{
INFO_LOG(MEMORY, "[sh4]Write32 to 0x%X=0x%X, not mapped [_vmem default handler]", addresss, data);
}
//code to register handlers
//0 is considered error :)
_vmem_handler _vmem_register_handler(
_vmem_ReadMem8FP* read8,
_vmem_ReadMem16FP* read16,
_vmem_ReadMem32FP* read32,
_vmem_WriteMem8FP* write8,
_vmem_WriteMem16FP* write16,
_vmem_WriteMem32FP* write32
)
{
_vmem_handler rv=_vmem_lrp++;
verify(rv<HANDLER_COUNT);
_vmem_RF8[rv] =read8==0 ? _vmem_ReadMem8_not_mapped : read8;
_vmem_RF16[rv]=read16==0 ? _vmem_ReadMem16_not_mapped : read16;
_vmem_RF32[rv]=read32==0 ? _vmem_ReadMem32_not_mapped : read32;
_vmem_WF8[rv] =write8==0 ? _vmem_WriteMem8_not_mapped : write8;
_vmem_WF16[rv]=write16==0? _vmem_WriteMem16_not_mapped: write16;
_vmem_WF32[rv]=write32==0? _vmem_WriteMem32_not_mapped: write32;
return rv;
}
static u32 FindMask(u32 msk)
{
u32 s=-1;
u32 rv=0;
while(msk!=s>>rv)
rv++;
return rv;
}
//map a registered handler to a mem region
void _vmem_map_handler(_vmem_handler Handler,u32 start,u32 end)
{
verify(start<0x100);
verify(end<0x100);
verify(start<=end);
for (u32 i=start;i<=end;i++)
{
_vmem_MemInfo_ptr[i] = (u8*)nullptr + Handler;
}
}
//map a memory block to a mem region
void _vmem_map_block(void* base,u32 start,u32 end,u32 mask)
{
verify(start<0x100);
verify(end<0x100);
verify(start<=end);
verify((0xFF & (unat)base)==0);
verify(base!=0);
u32 j=0;
for (u32 i=start;i<=end;i++)
{
_vmem_MemInfo_ptr[i]=&(((u8*)base)[j&mask]) + FindMask(mask) - (j & mask);
j+=0x1000000;
}
}
void _vmem_mirror_mapping(u32 new_region,u32 start,u32 size)
{
u32 end=start+size-1;
verify(start<0x100);
verify(end<0x100);
verify(start<=end);
verify(!((start>=new_region) && (end<=new_region)));
u32 j=new_region;
for (u32 i=start;i<=end;i++)
{
_vmem_MemInfo_ptr[j&0xFF]=_vmem_MemInfo_ptr[i&0xFF];
j++;
}
}
//init/reset/term
void _vmem_init()
{
//clear read tables
memset(_vmem_RF8,0,sizeof(_vmem_RF8));
memset(_vmem_RF16,0,sizeof(_vmem_RF16));
memset(_vmem_RF32,0,sizeof(_vmem_RF32));
//clear write tables
memset(_vmem_WF8,0,sizeof(_vmem_WF8));
memset(_vmem_WF16,0,sizeof(_vmem_WF16));
memset(_vmem_WF32,0,sizeof(_vmem_WF32));
//clear meminfo table
memset(_vmem_MemInfo_ptr,0,sizeof(_vmem_MemInfo_ptr));
//reset registration index
_vmem_lrp=0;
//register default functions (0) for slot 0
_vmem_handler defaultHandler = _vmem_register_handler(0,0,0,0,0,0);
verify(defaultHandler == 0);
}
void _vmem_term()
{
}
u8* virt_ram_base;
static bool vmemAvailable = false;
static void *malloc_pages(size_t size)
{
return allocAligned(PAGE_SIZE, size);
}
static void free_pages(void *p)
{
freeAligned(p);
}
#if FEAT_SHREC != DYNAREC_NONE
// Resets the FPCB table (by either clearing it to the default val
// or by flushing it and making it fault on access again.
void _vmem_bm_reset()
{
// If we allocated it via vmem:
if (virt_ram_base)
vmem_platform_reset_mem(p_sh4rcb->fpcb, sizeof(p_sh4rcb->fpcb));
else
// We allocated it via a regular malloc/new/whatever on the heap
bm_vmem_pagefill((void**)p_sh4rcb->fpcb, sizeof(p_sh4rcb->fpcb));
}
// This gets called whenever there is a pagefault, it is possible that it lands
// on the fpcb memory range, which is allocated on miss. Returning true tells the
// fault handler this was us, and that the page is resolved and can continue the execution.
bool BM_LockedWrite(u8* address) {
if (!virt_ram_base)
return false; // No vmem, therefore not us who caused this.
uintptr_t ptrint = (uintptr_t)address;
uintptr_t start = (uintptr_t)p_sh4rcb->fpcb;
uintptr_t end = start + sizeof(p_sh4rcb->fpcb);
if (ptrint >= start && ptrint < end) {
// Alloc the page then and initialize it to default values
void *aligned_addr = (void*)(ptrint & (~PAGE_MASK));
vmem_platform_ondemand_page(aligned_addr, PAGE_SIZE);
bm_vmem_pagefill((void**)aligned_addr, PAGE_SIZE);
return true;
}
return false;
}
#endif
bool _vmem_reserve()
{
static_assert((sizeof(Sh4RCB) % PAGE_SIZE) == 0, "sizeof(Sh4RCB) not multiple of PAGE_SIZE");
if (vmemAvailable)
return true;
// Use vmem only if settings mandate so, and if we have proper exception handlers.
#if !defined(TARGET_NO_EXCEPTIONS)
if (!settings.dynarec.disable_nvmem)
vmemAvailable = vmem_platform_init((void**)&virt_ram_base, (void**)&p_sh4rcb, RAM_SIZE_MAX + VRAM_SIZE_MAX + ARAM_SIZE_MAX + elan::ERAM_SIZE_MAX);
#endif
return true;
}
static void _vmem_term_mappings()
{
if (!vmemAvailable)
{
free_pages(p_sh4rcb);
p_sh4rcb = nullptr;
free_pages(mem_b.data);
mem_b.data = nullptr;
free_pages(vram.data);
vram.data = nullptr;
free_pages(aica_ram.data);
aica_ram.data = nullptr;
free_pages(elan::RAM);
elan::RAM = nullptr;
}
}
void _vmem_init_mappings()
{
_vmem_term_mappings();
// Fallback to statically allocated buffers, this results in slow-ops being generated.
if (!vmemAvailable)
{
WARN_LOG(VMEM, "Warning! nvmem is DISABLED (due to failure or not being built-in");
virt_ram_base = nullptr;
// Allocate it all and initialize it.
p_sh4rcb = (Sh4RCB*)malloc_pages(sizeof(Sh4RCB));
#if FEAT_SHREC != DYNAREC_NONE
bm_vmem_pagefill((void**)p_sh4rcb->fpcb, sizeof(p_sh4rcb->fpcb));
#endif
memset(&p_sh4rcb->cntx, 0, sizeof(p_sh4rcb->cntx));
mem_b.size = RAM_SIZE;
mem_b.data = (u8*)malloc_pages(RAM_SIZE);
vram.size = VRAM_SIZE;
vram.data = (u8*)malloc_pages(VRAM_SIZE);
aica_ram.size = ARAM_SIZE;
aica_ram.data = (u8*)malloc_pages(ARAM_SIZE);
elan::RAM = (u8*)malloc_pages(elan::ERAM_SIZE);
}
else {
NOTICE_LOG(VMEM, "Info: nvmem is enabled");
INFO_LOG(VMEM, "Info: p_sh4rcb: %p virt_ram_base: %p", p_sh4rcb, virt_ram_base);
// Map the different parts of the memory file into the new memory range we got.
const vmem_mapping mem_mappings[] = {
{0x00000000, 0x00800000, 0, 0, false}, // Area 0 -> unused
{0x00800000, 0x01000000, MAP_ARAM_START_OFFSET, ARAM_SIZE, false}, // Aica
{0x01000000, 0x04000000, 0, 0, false}, // More unused
{0x04000000, 0x05000000, MAP_VRAM_START_OFFSET, VRAM_SIZE, true}, // Area 1 (vram, 16MB, wrapped on DC as 2x8MB)
{0x05000000, 0x06000000, 0, 0, false}, // 32 bit path (unused)
{0x06000000, 0x07000000, MAP_VRAM_START_OFFSET, VRAM_SIZE, true}, // VRAM mirror
{0x07000000, 0x08000000, 0, 0, false}, // 32 bit path (unused) mirror
{0x08000000, 0x0A000000, 0, 0, false}, // Area 2
{0x0A000000, 0x0C000000, MAP_ERAM_START_OFFSET, elan::ERAM_SIZE, true}, // Area 2 (Elan RAM)
{0x0C000000, 0x10000000, MAP_RAM_START_OFFSET, RAM_SIZE, true}, // Area 3 (main RAM + 3 mirrors)
{0x10000000, 0x20000000, 0, 0, false}, // Area 4-7 (unused)
// This is outside of the 512MB addr space. We map 8MB in all cases to help some games read past the end of aica ram
{0x20000000, 0x20800000, MAP_ARAM_START_OFFSET, ARAM_SIZE, true}, // writable aica ram
};
vmem_platform_create_mappings(&mem_mappings[0], ARRAY_SIZE(mem_mappings));
// Point buffers to actual data pointers
aica_ram.data = &virt_ram_base[0x20000000]; // Points to the writable AICA addrspace
vram.data = &virt_ram_base[0x04000000]; // Points to first vram mirror (writable and lockable)
mem_b.data = &virt_ram_base[0x0C000000]; // Main memory, first mirror
elan::RAM = &virt_ram_base[0x0A000000];
aica_ram.size = ARAM_SIZE;
vram.size = VRAM_SIZE;
mem_b.size = RAM_SIZE;
}
// Clear out memory
aica_ram.Zero();
vram.Zero();
mem_b.Zero();
NOTICE_LOG(VMEM, "BASE %p RAM(%d MB) %p VRAM64(%d MB) %p ARAM(%d MB) %p",
virt_ram_base,
RAM_SIZE / 1024 / 1024, mem_b.data,
VRAM_SIZE / 1024 / 1024, vram.data,
ARAM_SIZE / 1024 / 1024, aica_ram.data);
}
void _vmem_release()
{
if (virt_ram_base)
{
vmem_platform_destroy();
virt_ram_base = nullptr;
}
else
{
_vmem_unprotect_vram(0, VRAM_SIZE);
_vmem_term_mappings();
}
vmemAvailable = false;
}
void _vmem_protect_vram(u32 addr, u32 size)
{
addr &= VRAM_MASK;
if (_nvmem_enabled())
{
mem_region_lock(virt_ram_base + 0x04000000 + addr, size); // P0
//mem_region_lock(virt_ram_base + 0x06000000 + addr, size); // P0 - mirror
if (VRAM_SIZE == 0x800000)
{
// wraps when only 8MB VRAM
mem_region_lock(virt_ram_base + 0x04000000 + addr + VRAM_SIZE, size); // P0 wrap
//mem_region_lock(virt_ram_base + 0x06000000 + addr + VRAM_SIZE, size); // P0 mirror wrap
}
}
else
{
mem_region_lock(&vram[addr], size);
}
}
void _vmem_unprotect_vram(u32 addr, u32 size)
{
addr &= VRAM_MASK;
if (_nvmem_enabled())
{
mem_region_unlock(virt_ram_base + 0x04000000 + addr, size); // P0
//mem_region_unlock(virt_ram_base + 0x06000000 + addr, size); // P0 - mirror
if (VRAM_SIZE == 0x800000)
{
// wraps when only 8MB VRAM
mem_region_unlock(virt_ram_base + 0x04000000 + addr + VRAM_SIZE, size); // P0 wrap
//mem_region_unlock(virt_ram_base + 0x06000000 + addr + VRAM_SIZE, size); // P0 mirror wrap
}
}
else
{
mem_region_unlock(&vram[addr], size);
}
}
u32 _vmem_get_vram_offset(void *addr)
{
if (_nvmem_enabled())
{
ptrdiff_t offset = (u8*)addr - virt_ram_base;
if (offset < 0 || offset >= 0x20000000)
return -1;
if ((offset >> 24) != 4)
return -1;
return offset & VRAM_MASK;
}
else
{
ptrdiff_t offset = (u8*)addr - &vram[0];
if (offset < 0 || offset >= VRAM_SIZE)
return -1;
return (u32)offset;
}
}

View File

@ -1,110 +0,0 @@
#pragma once
#include "types.h"
struct vmem_mapping {
u64 start_address, end_address;
u64 memoffset, memsize;
bool allow_writes;
};
// Platform specific vmemory API
// To initialize (maybe) the vmem subsystem
bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize);
// To reset the on-demand allocated pages.
void vmem_platform_reset_mem(void *ptr, unsigned size_bytes);
// To handle a fault&allocate an ondemand page.
void vmem_platform_ondemand_page(void *address, unsigned size_bytes);
// To create the mappings in the address space.
void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned nummaps);
// Just tries to wipe as much as possible in the relevant area.
void vmem_platform_destroy();
// Given a block of data in the .text section, prepares it for JIT action.
// both code_area and size are page aligned. Returns success.
bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_area_rwx);
// Same as above but uses two address spaces one with RX and RW protections.
// Note: this function doesnt have to be implemented, it's a fallback for the above one.
bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_area_rw, ptrdiff_t *rx_offset);
// This might not need an implementation (ie x86/64 cpus).
void vmem_platform_flush_cache(void *icache_start, void *icache_end, void *dcache_start, void *dcache_end);
// Change a code buffer permissions from r-x to/from rw-
void vmem_platform_jit_set_exec(void* code, size_t size, bool enable);
// Release a jit block previously allocated by vmem_platform_prepare_jit_block
void vmem_platform_release_jit_block(void *code_area, size_t size);
// Release a jit block previously allocated by vmem_platform_prepare_jit_block (with dual RW and RX areas)
void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t size);
// Note: if you want to disable vmem magic in any given platform, implement the
// above functions as empty functions and make vmem_platform_init return false.
//Typedef's
//ReadMem
typedef u8 DYNACALL _vmem_ReadMem8FP(u32 Address);
typedef u16 DYNACALL _vmem_ReadMem16FP(u32 Address);
typedef u32 DYNACALL _vmem_ReadMem32FP(u32 Address);
//WriteMem
typedef void DYNACALL _vmem_WriteMem8FP(u32 Address,u8 data);
typedef void DYNACALL _vmem_WriteMem16FP(u32 Address,u16 data);
typedef void DYNACALL _vmem_WriteMem32FP(u32 Address,u32 data);
//our own handle type :)
typedef u32 _vmem_handler;
//Functions
//init/reset/term
void _vmem_init();
void _vmem_term();
void _vmem_init_mappings();
//functions to register and map handlers/memory
_vmem_handler _vmem_register_handler(_vmem_ReadMem8FP* read8,_vmem_ReadMem16FP* read16,_vmem_ReadMem32FP* read32, _vmem_WriteMem8FP* write8,_vmem_WriteMem16FP* write16,_vmem_WriteMem32FP* write32);
#define _vmem_register_handler_Template(read,write) _vmem_register_handler \
(read<u8>,read<u16>,read<u32>, \
write<u8>,write<u16>,write<u32>)
void _vmem_map_handler(_vmem_handler Handler,u32 start,u32 end);
void _vmem_map_block(void* base,u32 start,u32 end,u32 mask);
void _vmem_mirror_mapping(u32 new_region,u32 start,u32 size);
#define _vmem_map_block_mirror(base, start, end, blck_size) { \
u32 block_size = (blck_size) >> 24; \
for (u32 _maip = (start); _maip <= (end); _maip += block_size) \
_vmem_map_block((base), _maip, _maip + block_size - 1, blck_size - 1); \
}
//ReadMem(s)
u32 DYNACALL _vmem_ReadMem8SX32(u32 Address);
u32 DYNACALL _vmem_ReadMem16SX32(u32 Address);
u8 DYNACALL _vmem_ReadMem8(u32 Address);
u16 DYNACALL _vmem_ReadMem16(u32 Address);
u32 DYNACALL _vmem_ReadMem32(u32 Address);
u64 DYNACALL _vmem_ReadMem64(u32 Address);
template<typename T, typename Trv> Trv DYNACALL _vmem_readt(u32 addr);
//WriteMem(s)
void DYNACALL _vmem_WriteMem8(u32 Address,u8 data);
void DYNACALL _vmem_WriteMem16(u32 Address,u16 data);
void DYNACALL _vmem_WriteMem32(u32 Address,u32 data);
void DYNACALL _vmem_WriteMem64(u32 Address,u64 data);
template<typename T> void DYNACALL _vmem_writet(u32 addr, T data);
//should be called at start up to ensure it will succeed :)
bool _vmem_reserve();
void _vmem_release();
//dynarec helpers
void* _vmem_read_const(u32 addr,bool& ismem,u32 sz);
void* _vmem_write_const(u32 addr,bool& ismem,u32 sz);
extern u8* virt_ram_base;
static inline bool _nvmem_enabled() {
return virt_ram_base != 0;
}
void _vmem_bm_reset();
void _vmem_protect_vram(u32 addr, u32 size);
void _vmem_unprotect_vram(u32 addr, u32 size);
u32 _vmem_get_vram_offset(void *addr);
bool BM_LockedWrite(u8* address);

558
core/hw/mem/addrspace.cpp Normal file
View File

@ -0,0 +1,558 @@
#include "addrspace.h"
#include "hw/aica/aica_if.h"
#include "hw/pvr/pvr_mem.h"
#include "hw/pvr/elan.h"
#include "hw/sh4/dyna/blockmanager.h"
#include "hw/sh4/sh4_mem.h"
#include "oslib/oslib.h"
#include "oslib/virtmem.h"
#include <cassert>
namespace addrspace
{
#define HANDLER_MAX 0x1F
#define HANDLER_COUNT (HANDLER_MAX+1)
//top registered handler
static handler lastRegisteredHandler;
//handler tables
static ReadMem8FP* RF8[HANDLER_COUNT];
static WriteMem8FP* WF8[HANDLER_COUNT];
static ReadMem16FP* RF16[HANDLER_COUNT];
static WriteMem16FP* WF16[HANDLER_COUNT];
static ReadMem32FP* RF32[HANDLER_COUNT];
static WriteMem32FP* WF32[HANDLER_COUNT];
//upper 8b of the address
static void* memInfo_ptr[0x100];
#define MAP_RAM_START_OFFSET 0
#define MAP_VRAM_START_OFFSET (MAP_RAM_START_OFFSET+RAM_SIZE)
#define MAP_ARAM_START_OFFSET (MAP_VRAM_START_OFFSET+VRAM_SIZE)
#define MAP_ERAM_START_OFFSET (MAP_ARAM_START_OFFSET+ARAM_SIZE)
void *readConst(u32 addr, bool& ismem, u32 sz)
{
u32 page = addr >> 24;
uintptr_t iirf = (uintptr_t)memInfo_ptr[page];
void *ptr = (void *)(iirf & ~HANDLER_MAX);
if (ptr == nullptr)
{
ismem = false;
const uintptr_t id = iirf;
switch (sz)
{
case 1:
return (void *)RF8[id];
case 2:
return (void *)RF16[id];
case 4:
return (void *)RF32[id];
default:
die("Invalid size");
return nullptr;
}
}
else
{
ismem = true;
addr <<= iirf;
addr >>= iirf;
return &(((u8 *)ptr)[addr]);
}
}
void *writeConst(u32 addr, bool& ismem, u32 sz)
{
u32 page = addr >> 24;
uintptr_t iirf = (uintptr_t)memInfo_ptr[page];
void *ptr = (void *)(iirf & ~HANDLER_MAX);
if (ptr == nullptr)
{
ismem = false;
const uintptr_t id = iirf;
switch (sz)
{
case 1:
return (void *)WF8[id];
case 2:
return (void *)WF16[id];
case 4:
return (void *)WF32[id];
default:
die("Invalid size");
return nullptr;
}
}
else
{
ismem = true;
addr <<= iirf;
addr >>= iirf;
return &(((u8 *)ptr)[addr]);
}
}
template<typename T>
T DYNACALL readt(u32 addr)
{
constexpr u32 sz = sizeof(T);
u32 page = addr >> 24; //1 op, shift/extract
uintptr_t iirf = (uintptr_t)memInfo_ptr[page]; //2 ops, insert + read [vmem table will be on reg ]
void *ptr = (void *)(iirf & ~HANDLER_MAX); //2 ops, and // 1 op insert
if (likely(ptr != nullptr))
{
addr <<= iirf;
addr >>= iirf;
return *(T *)&((u8 *)ptr)[addr];
}
else
{
const u32 id = iirf;
switch (sz)
{
case 1:
return (T)RF8[id](addr);
case 2:
return (T)RF16[id](addr);
case 4:
return (T)RF32[id](addr);
case 8:
{
T rv = RF32[id](addr);
rv |= (T)((u64)RF32[id](addr + 4) << 32);
return rv;
}
default:
die("Invalid size");
return 0;
}
}
}
template u8 DYNACALL readt<u8>(u32 addr);
template u16 DYNACALL readt<u16>(u32 addr);
template u32 DYNACALL readt<u32>(u32 addr);
template u64 DYNACALL readt<u64>(u32 addr);
template<typename T>
void DYNACALL writet(u32 addr, T data)
{
constexpr u32 sz = sizeof(T);
u32 page = addr>>24;
uintptr_t iirf = (uintptr_t)memInfo_ptr[page];
void *ptr = (void *)(iirf & ~HANDLER_MAX);
if (likely(ptr != nullptr))
{
addr <<= iirf;
addr >>= iirf;
*(T *)&((u8 *)ptr)[addr] = data;
}
else
{
const u32 id = iirf;
switch (sz)
{
case 1:
WF8[id](addr,data);
break;
case 2:
WF16[id](addr,data);
break;
case 4:
WF32[id](addr,data);
break;
case 8:
WF32[id](addr,(u32)data);
WF32[id](addr+4,(u32)((u64)data>>32));
break;
default:
die("Invalid size");
break;
}
}
}
template void DYNACALL writet<u8>(u32 addr, u8 data);
template void DYNACALL writet<u16>(u32 addr, u16 data);
template void DYNACALL writet<u32>(u32 addr, u32 data);
template void DYNACALL writet<u64>(u32 addr, u64 data);
//ReadMem/WriteMem functions
//ReadMem
u8 DYNACALL read8(u32 Address) { return readt<u8>(Address); }
u16 DYNACALL read16(u32 Address) { return readt<u16>(Address); }
u32 DYNACALL read32(u32 Address) { return readt<u32>(Address); }
u64 DYNACALL read64(u32 Address) { return readt<u64>(Address); }
//WriteMem
void DYNACALL write8(u32 Address,u8 data) { writet<u8>(Address,data); }
void DYNACALL write16(u32 Address,u16 data) { writet<u16>(Address,data); }
void DYNACALL write32(u32 Address,u32 data) { writet<u32>(Address,data); }
void DYNACALL write64(u32 Address,u64 data) { writet<u64>(Address,data); }
#define MEM_ERROR_RETURN_VALUE 0
//default read handler
template<typename T>
static T DYNACALL readMemNotMapped(u32 addresss)
{
INFO_LOG(MEMORY, "[sh4]read%d from %08x, not mapped (default handler)", (int)sizeof(T), addresss);
return (u8)MEM_ERROR_RETURN_VALUE;
}
//default write hander
template<typename T>
static void DYNACALL writeMemNotMapped(u32 addresss, T data)
{
INFO_LOG(MEMORY, "[sh4]Write%d to %08x = %x, not mapped (default handler)", (int)sizeof(T), addresss, data);
}
//code to register handlers
//0 is considered error
handler registerHandler(
ReadMem8FP *read8,
ReadMem16FP *read16,
ReadMem32FP *read32,
WriteMem8FP *write8,
WriteMem16FP *write16,
WriteMem32FP *write32)
{
handler rv = lastRegisteredHandler++;
assert(rv < HANDLER_COUNT);
RF8[rv] = read8 == nullptr ? readMemNotMapped<u8> : read8;
RF16[rv] = read16 == nullptr ? readMemNotMapped<u16> : read16;
RF32[rv] = read32 == nullptr ? readMemNotMapped<u32> : read32;
WF8[rv] = write8 == nullptr ? writeMemNotMapped<u8> : write8;
WF16[rv] = write16 == nullptr? writeMemNotMapped<u16> : write16;
WF32[rv] = write32 == nullptr? writeMemNotMapped<u32> : write32;
return rv;
}
static u32 FindMask(u32 msk)
{
u32 s=-1;
u32 rv=0;
while(msk!=s>>rv)
rv++;
return rv;
}
//map a registered handler to a mem region
void mapHandler(handler Handler, u32 start, u32 end)
{
assert(start < 0x100);
assert(end < 0x100);
assert(start <= end);
for (u32 i = start; i <= end; i++)
memInfo_ptr[i] = (u8 *)nullptr + Handler;
}
//map a memory block to a mem region
void mapBlock(void *base, u32 start, u32 end, u32 mask)
{
assert(start < 0x100);
assert(end < 0x100);
assert(start <= end);
assert((0xFF & (uintptr_t)base) == 0);
assert(base != nullptr);
u32 j = 0;
for (u32 i = start; i <= end; i++)
{
memInfo_ptr[i] = &((u8 *)base)[j & mask] + FindMask(mask) - (j & mask);
j += 0x1000000;
}
}
void mirrorMapping(u32 new_region, u32 start, u32 size)
{
u32 end = start + size - 1;
assert(start < 0x100);
assert(end < 0x100);
assert(start <= end);
assert(!(start >= new_region && end <= new_region));
u32 j = new_region;
for (u32 i = start; i <= end; i++)
{
memInfo_ptr[j & 0xFF] = memInfo_ptr[i & 0xFF];
j++;
}
}
//init/reset/term
void init()
{
//clear read tables
memset(RF8, 0, sizeof(RF8));
memset(RF16, 0, sizeof(RF16));
memset(RF32, 0, sizeof(RF32));
//clear write tables
memset(WF8, 0, sizeof(WF8));
memset(WF16, 0, sizeof(WF16));
memset(WF32, 0, sizeof(WF32));
//clear meminfo table
memset(memInfo_ptr, 0, sizeof(memInfo_ptr));
//reset registration index
lastRegisteredHandler = 0;
//register default functions (0) for slot 0
handler defaultHandler = registerHandler(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
assert(defaultHandler == 0);
(void)defaultHandler;
}
void term()
{
}
u8* ram_base;
static void *malloc_pages(size_t size)
{
return allocAligned(PAGE_SIZE, size);
}
static void free_pages(void *p)
{
freeAligned(p);
}
#if FEAT_SHREC != DYNAREC_NONE
// Resets the FPCB table (by either clearing it to the default val
// or by flushing it and making it fault on access again.
void bm_reset()
{
// If we allocated it via vmem:
if (ram_base)
virtmem::reset_mem(p_sh4rcb->fpcb, sizeof(p_sh4rcb->fpcb));
else
// We allocated it via a regular malloc/new/whatever on the heap
bm_vmem_pagefill((void**)p_sh4rcb->fpcb, sizeof(p_sh4rcb->fpcb));
}
// This gets called whenever there is a pagefault, it is possible that it lands
// on the fpcb memory range, which is allocated on miss. Returning true tells the
// fault handler this was us, and that the page is resolved and can continue the execution.
bool bm_lockedWrite(u8* address)
{
if (!ram_base)
return false; // No vmem, therefore not us who caused this.
uintptr_t ptrint = (uintptr_t)address;
uintptr_t start = (uintptr_t)p_sh4rcb->fpcb;
uintptr_t end = start + sizeof(p_sh4rcb->fpcb);
if (ptrint >= start && ptrint < end)
{
// Alloc the page then and initialize it to default values
void *aligned_addr = (void*)(ptrint & (~PAGE_MASK));
virtmem::ondemand_page(aligned_addr, PAGE_SIZE);
bm_vmem_pagefill((void**)aligned_addr, PAGE_SIZE);
return true;
}
return false;
}
#endif
bool reserve()
{
static_assert((sizeof(Sh4RCB) % PAGE_SIZE) == 0, "sizeof(Sh4RCB) not multiple of PAGE_SIZE");
if (ram_base != nullptr)
return true;
// Use vmem only if settings mandate so, and if we have proper exception handlers.
#if !defined(TARGET_NO_EXCEPTIONS)
if (!settings.dynarec.disable_nvmem)
virtmem::init((void**)&ram_base, (void**)&p_sh4rcb, RAM_SIZE_MAX + VRAM_SIZE_MAX + ARAM_SIZE_MAX + elan::ERAM_SIZE_MAX);
#endif
return true;
}
static void termMappings()
{
if (ram_base == nullptr)
{
free_pages(p_sh4rcb);
p_sh4rcb = nullptr;
free_pages(mem_b.data);
mem_b.data = nullptr;
free_pages(vram.data);
vram.data = nullptr;
free_pages(aica_ram.data);
aica_ram.data = nullptr;
free_pages(elan::RAM);
elan::RAM = nullptr;
}
}
void initMappings()
{
termMappings();
// Fallback to statically allocated buffers, this results in slow-ops being generated.
if (ram_base == nullptr)
{
WARN_LOG(VMEM, "Warning! nvmem is DISABLED (due to failure or not being built-in");
// Allocate it all and initialize it.
p_sh4rcb = (Sh4RCB*)malloc_pages(sizeof(Sh4RCB));
#if FEAT_SHREC != DYNAREC_NONE
bm_vmem_pagefill((void**)p_sh4rcb->fpcb, sizeof(p_sh4rcb->fpcb));
#endif
memset(&p_sh4rcb->cntx, 0, sizeof(p_sh4rcb->cntx));
mem_b.size = RAM_SIZE;
mem_b.data = (u8*)malloc_pages(RAM_SIZE);
vram.size = VRAM_SIZE;
vram.data = (u8*)malloc_pages(VRAM_SIZE);
aica_ram.size = ARAM_SIZE;
aica_ram.data = (u8*)malloc_pages(ARAM_SIZE);
elan::RAM = (u8*)malloc_pages(elan::ERAM_SIZE);
}
else {
NOTICE_LOG(VMEM, "Info: nvmem is enabled");
INFO_LOG(VMEM, "Info: p_sh4rcb: %p ram_base: %p", p_sh4rcb, ram_base);
// Map the different parts of the memory file into the new memory range we got.
const virtmem::Mapping mem_mappings[] = {
{0x00000000, 0x00800000, 0, 0, false}, // Area 0 -> unused
{0x00800000, 0x01000000, MAP_ARAM_START_OFFSET, ARAM_SIZE, false}, // Aica
{0x01000000, 0x04000000, 0, 0, false}, // More unused
{0x04000000, 0x05000000, MAP_VRAM_START_OFFSET, VRAM_SIZE, true}, // Area 1 (vram, 16MB, wrapped on DC as 2x8MB)
{0x05000000, 0x06000000, 0, 0, false}, // 32 bit path (unused)
{0x06000000, 0x07000000, MAP_VRAM_START_OFFSET, VRAM_SIZE, true}, // VRAM mirror
{0x07000000, 0x08000000, 0, 0, false}, // 32 bit path (unused) mirror
{0x08000000, 0x0A000000, 0, 0, false}, // Area 2
{0x0A000000, 0x0C000000, MAP_ERAM_START_OFFSET, elan::ERAM_SIZE, true}, // Area 2 (Elan RAM)
{0x0C000000, 0x10000000, MAP_RAM_START_OFFSET, RAM_SIZE, true}, // Area 3 (main RAM + 3 mirrors)
{0x10000000, 0x20000000, 0, 0, false}, // Area 4-7 (unused)
// This is outside of the 512MB addr space. We map 8MB in all cases to help some games read past the end of aica ram
{0x20000000, 0x20800000, MAP_ARAM_START_OFFSET, ARAM_SIZE, true}, // writable aica ram
};
virtmem::create_mappings(&mem_mappings[0], ARRAY_SIZE(mem_mappings));
// Point buffers to actual data pointers
aica_ram.data = &ram_base[0x20000000]; // Points to the writable AICA addrspace
vram.data = &ram_base[0x04000000]; // Points to first vram mirror (writable and lockable)
mem_b.data = &ram_base[0x0C000000]; // Main memory, first mirror
elan::RAM = &ram_base[0x0A000000];
aica_ram.size = ARAM_SIZE;
vram.size = VRAM_SIZE;
mem_b.size = RAM_SIZE;
}
// Clear out memory
aica_ram.Zero();
vram.Zero();
mem_b.Zero();
NOTICE_LOG(VMEM, "BASE %p RAM(%d MB) %p VRAM64(%d MB) %p ARAM(%d MB) %p",
ram_base,
RAM_SIZE / 1024 / 1024, mem_b.data,
VRAM_SIZE / 1024 / 1024, vram.data,
ARAM_SIZE / 1024 / 1024, aica_ram.data);
}
void release()
{
if (ram_base != nullptr)
{
virtmem::destroy();
ram_base = nullptr;
}
else
{
unprotectVram(0, VRAM_SIZE);
termMappings();
}
}
void protectVram(u32 addr, u32 size)
{
addr &= VRAM_MASK;
if (virtmemEnabled())
{
virtmem::region_lock(ram_base + 0x04000000 + addr, size); // P0
//virtmem::region_lock(ram_base + 0x06000000 + addr, size); // P0 - mirror
if (VRAM_SIZE == 0x800000)
{
// wraps when only 8MB VRAM
virtmem::region_lock(ram_base + 0x04000000 + addr + VRAM_SIZE, size); // P0 wrap
//virtmem::region_lock(ram_base + 0x06000000 + addr + VRAM_SIZE, size); // P0 mirror wrap
}
}
else
{
virtmem::region_lock(&vram[addr], size);
}
}
void unprotectVram(u32 addr, u32 size)
{
addr &= VRAM_MASK;
if (virtmemEnabled())
{
virtmem::region_unlock(ram_base + 0x04000000 + addr, size); // P0
//virtmem::region_unlock(ram_base + 0x06000000 + addr, size); // P0 - mirror
if (VRAM_SIZE == 0x800000)
{
// wraps when only 8MB VRAM
virtmem::region_unlock(ram_base + 0x04000000 + addr + VRAM_SIZE, size); // P0 wrap
//virtmem::region_unlock(ram_base + 0x06000000 + addr + VRAM_SIZE, size); // P0 mirror wrap
}
}
else
{
virtmem::region_unlock(&vram[addr], size);
}
}
u32 getVramOffset(void *addr)
{
if (virtmemEnabled())
{
ptrdiff_t offset = (u8*)addr - ram_base;
if (offset < 0 || offset >= 0x20000000)
return -1;
if ((offset >> 24) != 4)
return -1;
return offset & VRAM_MASK;
}
else
{
ptrdiff_t offset = (u8*)addr - &vram[0];
if (offset < 0 || offset >= VRAM_SIZE)
return -1;
return (u32)offset;
}
}
} // namespace addrspace

84
core/hw/mem/addrspace.h Normal file
View File

@ -0,0 +1,84 @@
#pragma once
#include "types.h"
namespace addrspace
{
//Typedef's
//ReadMem
typedef u8 DYNACALL ReadMem8FP(u32 Address);
typedef u16 DYNACALL ReadMem16FP(u32 Address);
typedef u32 DYNACALL ReadMem32FP(u32 Address);
//WriteMem
typedef void DYNACALL WriteMem8FP(u32 Address,u8 data);
typedef void DYNACALL WriteMem16FP(u32 Address,u16 data);
typedef void DYNACALL WriteMem32FP(u32 Address,u32 data);
//our own handle type
typedef u32 handler;
//Functions
//init/reset/term
void init();
void term();
void initMappings();
//functions to register and map handlers/memory
handler registerHandler(ReadMem8FP *read8, ReadMem16FP *read16, ReadMem32FP *read32, WriteMem8FP *write8, WriteMem16FP *write16, WriteMem32FP *write32);
#define addrspaceRegisterHandlerTemplate(read, write) addrspace::registerHandler \
(read<u8>, read<u16>, read<u32>, \
write<u8>, write<u16>, write<u32>)
void mapHandler(handler Handler, u32 start, u32 end);
void mapBlock(void* base, u32 start, u32 end, u32 mask);
void mirrorMapping(u32 new_region, u32 start, u32 size);
static inline void mapBlockMirror(void *base, u32 start, u32 end, u32 blck_size)
{
u32 block_size = blck_size >> 24;
for (u32 _maip = start; _maip <= end; _maip += block_size)
mapBlock(base, _maip, _maip + block_size - 1, blck_size - 1);
}
u8 DYNACALL read8(u32 address);
u16 DYNACALL read16(u32 address);
u32 DYNACALL read32(u32 address);
u64 DYNACALL read64(u32 address);
template<typename T> T DYNACALL readt(u32 addr);
static inline int32_t DYNACALL read8SX32(u32 address) {
return (int32_t)(int8_t)readt<u8>(address);
}
static inline int32_t DYNACALL read16SX32(u32 address) {
return (int32_t)(int16_t)readt<u16>(address);
}
void DYNACALL write8(u32 address, u8 data);
void DYNACALL write16(u32 address, u16 data);
void DYNACALL write32(u32 address, u32 data);
void DYNACALL write64(u32 address, u64 data);
template<typename T> void DYNACALL writet(u32 addr, T data);
//should be called at start up to ensure it will succeed
bool reserve();
void release();
//dynarec helpers
void *readConst(u32 addr, bool& ismem, u32 sz);
void *writeConst(u32 addr, bool& ismem, u32 sz);
extern u8* ram_base;
static inline bool virtmemEnabled() {
return ram_base != nullptr;
}
void bm_reset(); // FIXME rename? move?
bool bm_lockedWrite(u8* address); // FIXME rename?
void protectVram(u32 addr, u32 size);
void unprotectVram(u32 addr, u32 size);
u32 getVramOffset(void *addr);
} // namespace addrspace

View File

@ -17,6 +17,7 @@
along with Flycast. If not, see <https://www.gnu.org/licenses/>.
*/
#include "mem_watch.h"
#include "oslib/virtmem.h"
namespace memwatch
{
@ -29,13 +30,13 @@ ElanRamWatcher elanWatcher;
void AicaRamWatcher::protectMem(u32 addr, u32 size)
{
size = std::min(ARAM_SIZE - addr, size) & ~PAGE_MASK;
mem_region_lock(aica_ram.data + addr, size);
virtmem::region_lock(aica_ram.data + addr, size);
}
void AicaRamWatcher::unprotectMem(u32 addr, u32 size)
{
size = std::min(ARAM_SIZE - addr, size) & ~PAGE_MASK;
mem_region_unlock(aica_ram.data + addr, size);
virtmem::region_unlock(aica_ram.data + addr, size);
}
u32 AicaRamWatcher::getMemOffset(void *p)
@ -51,7 +52,7 @@ void ElanRamWatcher::protectMem(u32 addr, u32 size)
if (ERAM_SIZE != 0)
{
size = std::min(ERAM_SIZE - addr, size) & ~PAGE_MASK;
mem_region_lock(RAM + addr, size);
virtmem::region_lock(RAM + addr, size);
}
}
@ -61,7 +62,7 @@ void ElanRamWatcher::unprotectMem(u32 addr, u32 size)
if (ERAM_SIZE != 0)
{
size = std::min(ERAM_SIZE - addr, size) & ~PAGE_MASK;
mem_region_unlock(RAM + addr, size);
virtmem::region_unlock(RAM + addr, size);
}
}

View File

@ -17,7 +17,7 @@
along with Flycast. If not, see <https://www.gnu.org/licenses/>.
*/
#include "types.h"
#include "_vmem.h"
#include "addrspace.h"
#include "hw/aica/aica_if.h"
#include "hw/sh4/dyna/blockmanager.h"
#include "hw/sh4/sh4_mem.h"
@ -100,17 +100,17 @@ class VramWatcher : public Watcher<VramWatcher>
protected:
void protectMem(u32 addr, u32 size)
{
_vmem_protect_vram(addr, std::min(VRAM_SIZE - addr, size) & ~PAGE_MASK);
addrspace::protectVram(addr, std::min(VRAM_SIZE - addr, size) & ~PAGE_MASK);
}
void unprotectMem(u32 addr, u32 size)
{
_vmem_unprotect_vram(addr, std::min(VRAM_SIZE - addr, size) & ~PAGE_MASK);
addrspace::unprotectVram(addr, std::min(VRAM_SIZE - addr, size) & ~PAGE_MASK);
}
u32 getMemOffset(void *p)
{
return _vmem_get_vram_offset(p);
return addrspace::getVramOffset(p);
}
public:

View File

@ -49,7 +49,7 @@
* 0A000000 - 0bfffffff elan RAM
*/
#include "elan.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "pvr_mem.h"
#include "ta.h"
#include "ta_ctx.h"
@ -69,9 +69,9 @@ namespace elan {
constexpr u32 ELAN_RAM_MASK = ERAM_SIZE_MAX - 1;
static _vmem_handler elanRegHandler;
static _vmem_handler elanCmdHandler;
static _vmem_handler elanRamHandler;
static addrspace::handler elanRegHandler;
static addrspace::handler elanCmdHandler;
static addrspace::handler elanRamHandler;
u8 *RAM;
u32 ERAM_SIZE;
@ -1760,19 +1760,19 @@ void term()
void vmem_init()
{
elanRegHandler = _vmem_register_handler(nullptr, nullptr, read_elanreg, nullptr, nullptr, write_elanreg);
elanCmdHandler = _vmem_register_handler(nullptr, nullptr, nullptr, nullptr, nullptr, write_elancmd);
elanRamHandler = _vmem_register_handler_Template(read_elanram, write_elanram);
elanRegHandler = addrspace::registerHandler(nullptr, nullptr, read_elanreg, nullptr, nullptr, write_elanreg);
elanCmdHandler = addrspace::registerHandler(nullptr, nullptr, nullptr, nullptr, nullptr, write_elancmd);
elanRamHandler = addrspaceRegisterHandlerTemplate(read_elanram, write_elanram);
}
void vmem_map(u32 base)
{
if (!settings.platform.isNaomi2())
return;
_vmem_map_handler(elanRegHandler, base | 8, base | 8);
_vmem_map_handler(elanCmdHandler, base | 9, base | 9);
_vmem_map_handler(elanRamHandler, base | 0xA, base | 0xB);
_vmem_map_block(RAM, base | 0xA, base | 0xB, ELAN_RAM_MASK);
addrspace::mapHandler(elanRegHandler, base | 8, base | 8);
addrspace::mapHandler(elanCmdHandler, base | 9, base | 9);
addrspace::mapHandler(elanRamHandler, base | 0xA, base | 0xB);
addrspace::mapBlock(RAM, base | 0xA, base | 0xB, ELAN_RAM_MASK);
}
void serialize(Serializer& ser)

View File

@ -15,7 +15,7 @@
#include "hw/sh4/sh4_opcode_list.h"
#include "hw/sh4/sh4_sched.h"
#include "hw/sh4/modules/mmu.h"
#include "oslib/virtmem.h"
#if defined(__unix__) && defined(DYNA_OPROF)
#include <opagent.h>
@ -74,7 +74,7 @@ DynarecCodeEntryPtr DYNACALL bm_GetCodeByVAddr(u32 addr)
u32 paddr;
if (mmu_data_translation<MMU_TT_DWRITE, u64>(r[4], paddr) == MMU_ERROR_NONE)
{
_vmem_WriteMem64(paddr, sh4_sched_now64() >> 4);
addrspace::write64(paddr, sh4_sched_now64() >> 4);
r[0] = 1;
next_pc = pr;
}
@ -233,51 +233,51 @@ void bm_Reset()
protected_blocks = 0;
unprotected_blocks = 0;
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
{
// Windows cannot lock/unlock a region spanning more than one VirtualAlloc or MapViewOfFile
// so we have to unlock each region individually
if (settings.platform.ram_size == 16 * 1024 * 1024)
{
mem_region_unlock(virt_ram_base + 0x0C000000, RAM_SIZE);
mem_region_unlock(virt_ram_base + 0x0D000000, RAM_SIZE);
mem_region_unlock(virt_ram_base + 0x0E000000, RAM_SIZE);
mem_region_unlock(virt_ram_base + 0x0F000000, RAM_SIZE);
virtmem::region_unlock(addrspace::ram_base + 0x0C000000, RAM_SIZE);
virtmem::region_unlock(addrspace::ram_base + 0x0D000000, RAM_SIZE);
virtmem::region_unlock(addrspace::ram_base + 0x0E000000, RAM_SIZE);
virtmem::region_unlock(addrspace::ram_base + 0x0F000000, RAM_SIZE);
}
else
{
mem_region_unlock(virt_ram_base + 0x0C000000, RAM_SIZE);
mem_region_unlock(virt_ram_base + 0x0E000000, RAM_SIZE);
virtmem::region_unlock(addrspace::ram_base + 0x0C000000, RAM_SIZE);
virtmem::region_unlock(addrspace::ram_base + 0x0E000000, RAM_SIZE);
}
}
else
{
mem_region_unlock(&mem_b[0], RAM_SIZE);
virtmem::region_unlock(&mem_b[0], RAM_SIZE);
}
}
void bm_LockPage(u32 addr, u32 size)
{
addr = addr & (RAM_MASK - PAGE_MASK);
if (_nvmem_enabled())
mem_region_lock(virt_ram_base + 0x0C000000 + addr, size);
if (addrspace::virtmemEnabled())
virtmem::region_lock(addrspace::ram_base + 0x0C000000 + addr, size);
else
mem_region_lock(&mem_b[addr], size);
virtmem::region_lock(&mem_b[addr], size);
}
void bm_UnlockPage(u32 addr, u32 size)
{
addr = addr & (RAM_MASK - PAGE_MASK);
if (_nvmem_enabled())
mem_region_unlock(virt_ram_base + 0x0C000000 + addr, size);
if (addrspace::virtmemEnabled())
virtmem::region_unlock(addrspace::ram_base + 0x0C000000 + addr, size);
else
mem_region_unlock(&mem_b[addr], size);
virtmem::region_unlock(&mem_b[addr], size);
}
void bm_ResetCache()
{
ngen_ResetBlocks();
_vmem_bm_reset();
addrspace::bm_reset();
for (const auto& it : blkmap)
{
@ -592,11 +592,11 @@ void bm_RamWriteAccess(u32 addr)
u32 bm_getRamOffset(void *p)
{
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
{
if ((u8 *)p < virt_ram_base || (u8 *)p >= virt_ram_base + 0x20000000)
if ((u8 *)p < addrspace::ram_base || (u8 *)p >= addrspace::ram_base + 0x20000000)
return -1;
u32 addr = (u8*)p - virt_ram_base;
u32 addr = (u8*)p - addrspace::ram_base;
if (!IsOnRam(addr))
return -1;
return addr & RAM_MASK;

View File

@ -13,6 +13,7 @@
#include "blockmanager.h"
#include "ngen.h"
#include "decoder.h"
#include "oslib/virtmem.h"
#include <xxhash.h>
@ -396,15 +397,15 @@ static void recSh4_Init()
sh4Interp.Init();
bm_Init();
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
verify(mem_b.data == ((u8*)p_sh4rcb->sq_buffer + 512 + 0x0C000000));
// Call the platform-specific magic to make the pages RWX
CodeCache = nullptr;
#ifdef FEAT_NO_RWX_PAGES
bool rc = vmem_platform_prepare_jit_block(SH4_TCB, CODE_SIZE + TEMP_CODE_SIZE, (void**)&CodeCache, &cc_rx_offset);
bool rc = virtmem::prepare_jit_block(SH4_TCB, CODE_SIZE + TEMP_CODE_SIZE, (void**)&CodeCache, &cc_rx_offset);
#else
bool rc = vmem_platform_prepare_jit_block(SH4_TCB, CODE_SIZE + TEMP_CODE_SIZE, (void**)&CodeCache);
bool rc = virtmem::prepare_jit_block(SH4_TCB, CODE_SIZE + TEMP_CODE_SIZE, (void**)&CodeCache);
#endif
verify(rc);
// Ensure the pointer returned is non-null
@ -420,10 +421,10 @@ static void recSh4_Term()
INFO_LOG(DYNAREC, "recSh4 Term");
#ifdef FEAT_NO_RWX_PAGES
if (CodeCache != nullptr)
vmem_platform_release_jit_block(CodeCache, (u8 *)CodeCache + cc_rx_offset, CODE_SIZE + TEMP_CODE_SIZE);
virtmem::release_jit_block(CodeCache, (u8 *)CodeCache + cc_rx_offset, CODE_SIZE + TEMP_CODE_SIZE);
#else
if (CodeCache != nullptr && CodeCache != SH4_TCB)
vmem_platform_release_jit_block(CodeCache, CODE_SIZE + TEMP_CODE_SIZE);
virtmem::release_jit_block(CodeCache, CODE_SIZE + TEMP_CODE_SIZE);
#endif
CodeCache = nullptr;
TempCodeCache = nullptr;

View File

@ -3,7 +3,7 @@
#include "ccn.h"
#include "mmu.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/pvr/pvr_mem.h"
#include "hw/sh4/sh4_if.h"
#include "hw/sh4/sh4_mmr.h"
@ -29,7 +29,7 @@ void CCN_QACR_write(u32 addr, u32 value)
switch (area)
{
case 3:
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
do_sqw_nommu = &do_sqw_nommu_area_3;
else
do_sqw_nommu = &do_sqw_nommu_area_3_nonvmem;

View File

@ -24,7 +24,6 @@
#ifdef FAST_MMU
#include "hw/mem/_vmem.h"
#include "hw/sh4/sh4_mem.h"
extern TLB_Entry UTLB[64];

View File

@ -1,5 +1,5 @@
#include "mmu.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/sh4/sh4_if.h"
#include "hw/sh4/sh4_interrupts.h"
#include "hw/sh4/sh4_core.h"
@ -515,7 +515,7 @@ T DYNACALL mmu_ReadMem(u32 adr)
u32 rv = mmu_data_translation<MMU_TT_DREAD, T>(adr, addr);
if (rv != MMU_ERROR_NONE)
mmu_raise_exception(rv, adr, MMU_TT_DREAD);
return _vmem_readt<T, T>(addr);
return addrspace::readt<T>(addr);
}
template u8 mmu_ReadMem(u32 adr);
template u16 mmu_ReadMem(u32 adr);
@ -528,7 +528,7 @@ u16 DYNACALL mmu_IReadMem16(u32 vaddr)
u32 rv = mmu_instruction_translation(vaddr, addr);
if (rv != MMU_ERROR_NONE)
mmu_raise_exception(rv, vaddr, MMU_TT_IREAD);
return _vmem_ReadMem16(addr);
return addrspace::read16(addr);
}
template<typename T>
@ -538,7 +538,7 @@ void DYNACALL mmu_WriteMem(u32 adr, T data)
u32 rv = mmu_data_translation<MMU_TT_DWRITE, T>(adr, addr);
if (rv != MMU_ERROR_NONE)
mmu_raise_exception(rv, adr, MMU_TT_DWRITE);
_vmem_writet<T>(addr, data);
addrspace::writet<T>(addr, data);
}
template void mmu_WriteMem(u32 adr, u8 data);
template void mmu_WriteMem(u32 adr, u16 data);

View File

@ -63,7 +63,7 @@ public:
mmu_raise_exception(err, address, MMU_TT_IREAD);
if (!cacheOn)
return _vmem_readt<u16, u16>(physAddr);
return addrspace::readt<u16>(physAddr);
const u32 index = CCN_CCR.IIX ?
((address >> 5) & 0x7f) | ((address >> (25 - 7)) & 0x80)
@ -83,7 +83,7 @@ public:
{
u32 *p = (u32 *)line.data;
for (int i = 0; i < 32; i += 4)
*p++ = _vmem_ReadMem32(line_addr + i);
*p++ = addrspace::read32(line_addr + i);
}
}
@ -240,7 +240,7 @@ public:
mmu_raise_exception(err, address, MMU_TT_DREAD);
if (!cacheOn)
return _vmem_readt<T, T>(physAddr);
return addrspace::readt<T>(physAddr);
const u32 index = lineIndex(address);
cache_line& line = lines[index];
@ -270,7 +270,7 @@ public:
if (!cacheOn)
{
_vmem_writet<T>(physAddr, data);
addrspace::writet<T>(physAddr, data);
return;
}
@ -303,7 +303,7 @@ public:
else
{
// write-through => update main ram
_vmem_writet<T>(physAddr, data);
addrspace::writet<T>(physAddr, data);
}
}
@ -470,7 +470,7 @@ private:
{
u32 *p = (u32 *)line.data;
for (int i = 0; i < 32; i += 4)
*p++ = _vmem_ReadMem32(line_addr + i);
*p++ = addrspace::read32(line_addr + i);
}
}
@ -486,7 +486,7 @@ private:
{
u32 *p = (u32 *)line.data;
for (int i = 0; i < 32; i += 4)
_vmem_WriteMem32(line_addr + i, *p++);
addrspace::write32(line_addr + i, *p++);
}
}

View File

@ -10,7 +10,7 @@
#include "hw/pvr/elan.h"
#include "hw/pvr/pvr_mem.h"
#include "cfg/option.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/sh4/modules/mmu.h"
#ifdef STRICT_MODE
@ -33,11 +33,11 @@ WriteMem32Func WriteMem32;
WriteMem64Func WriteMem64;
//AREA 1
static _vmem_handler area1_32b;
static addrspace::handler area1_32b;
static void map_area1_init()
{
area1_32b = _vmem_register_handler_Template(pvr_read32p, pvr_write32p);
area1_32b = addrspaceRegisterHandlerTemplate(pvr_read32p, pvr_write32p);
}
static void map_area1(u32 base)
@ -46,13 +46,13 @@ static void map_area1(u32 base)
//Lower 32 mb map
//64b interface
_vmem_map_block(vram.data, 0x04 | base, 0x04 | base, VRAM_MASK);
addrspace::mapBlock(vram.data, 0x04 | base, 0x04 | base, VRAM_MASK);
//32b interface
_vmem_map_handler(area1_32b, 0x05 | base, 0x05 | base);
addrspace::mapHandler(area1_32b, 0x05 | base, 0x05 | base);
//Upper 32 mb mirror
//0x0600 to 0x07FF
_vmem_mirror_mapping(0x06 | base, 0x04 | base, 0x02);
addrspace::mirrorMapping(0x06 | base, 0x04 | base, 0x02);
}
//AREA 2: Naomi2 elan
@ -65,27 +65,27 @@ static void map_area3_init()
static void map_area3(u32 base)
{
// System RAM
_vmem_map_block_mirror(mem_b.data, 0x0C | base,0x0F | base, RAM_SIZE);
addrspace::mapBlockMirror(mem_b.data, 0x0C | base,0x0F | base, RAM_SIZE);
}
//AREA 4
static _vmem_handler area4_handler_lower;
static _vmem_handler area4_handler_upper;
static addrspace::handler area4_handler_lower;
static addrspace::handler area4_handler_upper;
static void map_area4_init()
{
area4_handler_lower = _vmem_register_handler(pvr_read_area4<u8, false>, pvr_read_area4<u16, false>, pvr_read_area4<u32, false>,
area4_handler_lower = addrspace::registerHandler(pvr_read_area4<u8, false>, pvr_read_area4<u16, false>, pvr_read_area4<u32, false>,
pvr_write_area4<u8, false>, pvr_write_area4<u16, false>, pvr_write_area4<u32, false>);
area4_handler_upper = _vmem_register_handler(pvr_read_area4<u8, true>, pvr_read_area4<u16, true>, pvr_read_area4<u32, true>,
area4_handler_upper = addrspace::registerHandler(pvr_read_area4<u8, true>, pvr_read_area4<u16, true>, pvr_read_area4<u32, true>,
pvr_write_area4<u8, true>, pvr_write_area4<u16, true>, pvr_write_area4<u32, true>);
}
static void map_area4(u32 base)
{
// VRAM 64b/32b interface
_vmem_map_handler(area4_handler_lower, 0x11 | base, 0x11 | base);
addrspace::mapHandler(area4_handler_lower, 0x11 | base, 0x11 | base);
// upper mirror
_vmem_map_handler(area4_handler_upper, 0x13 | base, 0x13 | base);
addrspace::mapHandler(area4_handler_upper, 0x13 | base, 0x13 | base);
}
@ -105,16 +105,16 @@ void DYNACALL WriteMem_extdev_T(u32 addr,T data)
INFO_LOG(SH4, "Write ext. device (Area 5) undefined @ %08x: %x", addr, (u32)data);
}
_vmem_handler area5_handler;
addrspace::handler area5_handler;
static void map_area5_init()
{
area5_handler = _vmem_register_handler_Template(ReadMem_extdev_T,WriteMem_extdev_T);
area5_handler = addrspaceRegisterHandlerTemplate(ReadMem_extdev_T, WriteMem_extdev_T);
}
static void map_area5(u32 base)
{
//map whole region to plugin handler
_vmem_map_handler(area5_handler,base|0x14,base|0x17);
addrspace::mapHandler(area5_handler, base | 0x14, base | 0x17);
}
//AREA 6 -- Unassigned
@ -128,7 +128,7 @@ static void map_area6(u32 base)
//set vmem to default values
void mem_map_default()
{
_vmem_init();
addrspace::init();
//U0/P0
//0x0xxx xxxx -> normal memmap
@ -202,14 +202,14 @@ void mem_Term()
sh4_mmr_term();
sh4_area0_Term();
_vmem_term();
addrspace::term();
}
void WriteMemBlock_nommu_dma(u32 dst, u32 src, u32 size)
{
bool dst_ismem, src_ismem;
void* dst_ptr = _vmem_write_const(dst, dst_ismem, 4);
void* src_ptr = _vmem_read_const(src, src_ismem, 4);
void* dst_ptr = addrspace::writeConst(dst, dst_ismem, 4);
void* src_ptr = addrspace::readConst(src, src_ismem, 4);
if (dst_ismem && src_ismem)
{
@ -231,7 +231,7 @@ void WriteMemBlock_nommu_ptr(u32 dst, const u32 *src, u32 size)
{
bool dst_ismem;
void* dst_ptr = _vmem_write_const(dst, dst_ismem, 4);
void* dst_ptr = addrspace::writeConst(dst, dst_ismem, 4);
if (dst_ismem)
{
@ -265,7 +265,7 @@ void WriteMemBlock_nommu_sq(u32 dst, const SQBuffer *src)
{
// destination address is 32-byte aligned
bool dst_ismem;
SQBuffer *dst_ptr = (SQBuffer *)_vmem_write_const(dst, dst_ismem, 4);
SQBuffer *dst_ptr = (SQBuffer *)addrspace::writeConst(dst, dst_ismem, 4);
if (dst_ismem)
{
@ -338,15 +338,15 @@ void SetMemoryHandlers()
}
else
{
ReadMem8 = &_vmem_ReadMem8;
ReadMem16 = &_vmem_ReadMem16;
IReadMem16 = &_vmem_ReadMem16;
ReadMem32 = &_vmem_ReadMem32;
ReadMem64 = &_vmem_ReadMem64;
ReadMem8 = &addrspace::read8;
ReadMem16 = &addrspace::read16;
IReadMem16 = &addrspace::read16;
ReadMem32 = &addrspace::read32;
ReadMem64 = &addrspace::read64;
WriteMem8 = &_vmem_WriteMem8;
WriteMem16 = &_vmem_WriteMem16;
WriteMem32 = &_vmem_WriteMem32;
WriteMem64 = &_vmem_WriteMem64;
WriteMem8 = &addrspace::write8;
WriteMem16 = &addrspace::write16;
WriteMem32 = &addrspace::write32;
WriteMem64 = &addrspace::write64;
}
}

View File

@ -5,7 +5,7 @@
//main system mem
extern VArray2 mem_b;
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "sh4_if.h"
#ifdef _MSC_VER
@ -41,14 +41,13 @@ extern WriteMem16Func WriteMem16;
extern WriteMem32Func WriteMem32;
extern WriteMem64Func WriteMem64;
#define ReadMem8_nommu _vmem_ReadMem8
#define ReadMem16_nommu _vmem_ReadMem16
#define IReadMem16_nommu _vmem_IReadMem16
#define ReadMem32_nommu _vmem_ReadMem32
#define ReadMem8_nommu addrspace::read8
#define ReadMem16_nommu addrspace::read16
#define ReadMem32_nommu addrspace::read32
#define WriteMem8_nommu _vmem_WriteMem8
#define WriteMem16_nommu _vmem_WriteMem16
#define WriteMem32_nommu _vmem_WriteMem32
#define WriteMem8_nommu addrspace::write8
#define WriteMem16_nommu addrspace::write16
#define WriteMem32_nommu addrspace::write32
void WriteMemBlock_nommu_ptr(u32 dst, const u32 *src, u32 size);
void WriteMemBlock_nommu_sq(u32 dst, const SQBuffer *src);

View File

@ -4,7 +4,7 @@
#include "types.h"
#include "sh4_mmr.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "modules/mmu.h"
#include "modules/ccn.h"
#include "modules/modules.h"
@ -868,38 +868,38 @@ void sh4_mmr_term()
}
// AREA 7--Sh4 Regs
static _vmem_handler p4mmr_handler;
static _vmem_handler area7_ocr_handler;
static addrspace::handler p4mmr_handler;
static addrspace::handler area7_ocr_handler;
void map_area7_init()
{
p4mmr_handler = _vmem_register_handler_Template(ReadMem_p4mmr, WriteMem_p4mmr);
area7_ocr_handler = _vmem_register_handler_Template(ReadMem_area7_OCR, WriteMem_area7_OCR);
p4mmr_handler = addrspaceRegisterHandlerTemplate(ReadMem_p4mmr, WriteMem_p4mmr);
area7_ocr_handler = addrspaceRegisterHandlerTemplate(ReadMem_area7_OCR, WriteMem_area7_OCR);
}
void map_area7(u32 base)
{
// on-chip RAM: 7C000000-7FFFFFFF
if (base == 0x60)
_vmem_map_handler(area7_ocr_handler, 0x7C, 0x7F);
addrspace::mapHandler(area7_ocr_handler, 0x7C, 0x7F);
}
//P4
void map_p4()
{
//P4 Region :
_vmem_handler p4_handler = _vmem_register_handler_Template(ReadMem_P4, WriteMem_P4);
addrspace::handler p4_handler = addrspaceRegisterHandlerTemplate(ReadMem_P4, WriteMem_P4);
//register this before mmr and SQ so they overwrite it and handle em
//default P4 handler
//0xE0000000-0xFFFFFFFF
_vmem_map_handler(p4_handler, 0xE0, 0xFF);
addrspace::mapHandler(p4_handler, 0xE0, 0xFF);
//Store Queues -- Write only 32bit
_vmem_map_block(sq_both, 0xE0, 0xE0, 63);
_vmem_map_block(sq_both, 0xE1, 0xE1, 63);
_vmem_map_block(sq_both, 0xE2, 0xE2, 63);
_vmem_map_block(sq_both, 0xE3, 0xE3, 63);
addrspace::mapBlock(sq_both, 0xE0, 0xE0, 63);
addrspace::mapBlock(sq_both, 0xE1, 0xE1, 63);
addrspace::mapBlock(sq_both, 0xE2, 0xE2, 63);
addrspace::mapBlock(sq_both, 0xE3, 0xE3, 63);
_vmem_map_handler(p4mmr_handler, 0xFF, 0xFF);
addrspace::mapHandler(p4mmr_handler, 0xFF, 0xFF);
}

View File

@ -21,7 +21,7 @@
#include "hw/sh4/dyna/ngen.h"
#include "rend/TexCache.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/mem/mem_watch.h"
#ifdef __SWITCH__
@ -54,7 +54,7 @@ void fault_handler(int sn, siginfo_t * si, void *segfault_ctx)
if (VramLockedWrite((u8*)si->si_addr))
return;
// FPCB jump table protection
if (BM_LockedWrite((u8*)si->si_addr))
if (addrspace::bm_lockedWrite((u8*)si->si_addr))
return;
#if FEAT_SHREC == DYNAREC_JIT

View File

@ -1,11 +1,14 @@
#if defined(__SWITCH__)
#include "hw/mem/_vmem.h"
#include "hw/sh4/sh4_if.h"
#include "stdclass.h"
#include "hw/mem/addrspace.h"
#include "virtmem.h"
#include <switch.h>
#include <malloc.h>
namespace virtmem
{
#define siginfo_t switch_siginfo_t
using mem_handle_t = uintptr_t;
@ -17,7 +20,7 @@ static void *reserved_base;
static size_t reserved_size;
static VirtmemReservation *virtmemReservation;
bool mem_region_lock(void *start, size_t len)
bool region_lock(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
len += inpage;
@ -37,7 +40,7 @@ bool mem_region_lock(void *start, size_t len)
return true;
}
bool mem_region_unlock(void *start, size_t len)
bool region_unlock(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
len += inpage;
@ -123,7 +126,7 @@ static mem_handle_t allocate_shared_filemem(unsigned size)
// In negative offsets of the pointer (up to FPCB size, usually 65/129MB) the context and jump table
// can be found. If the platform init returns error, the user is responsible for initializing the
// memory using a fallback (that is, regular mallocs and falling back to slow memory JIT).
bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize)
bool init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize)
{
return false;
#if 0
@ -162,25 +165,25 @@ bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSiz
}
// Just tries to wipe as much as possible in the relevant area.
void vmem_platform_destroy()
void destroy()
{
if (reserved_base != NULL)
mem_region_release(reserved_base, reserved_size);
}
// Resets a chunk of memory by deleting its data and setting its protection back.
void vmem_platform_reset_mem(void *ptr, unsigned size_bytes) {
void reset_mem(void *ptr, unsigned size_bytes) {
svcSetMemoryPermission(ptr, size_bytes, Perm_None);
}
// Allocates a bunch of memory (page aligned and page-sized)
void vmem_platform_ondemand_page(void *address, unsigned size_bytes) {
void ondemand_page(void *address, unsigned size_bytes) {
bool rc = mem_region_unlock(address, size_bytes);
verify(rc);
}
// Creates mappings to the underlying file including mirroring sections
void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned nummaps)
void create_mappings(const Mapping *vmem_maps, unsigned nummaps)
{
for (unsigned i = 0; i < nummaps; i++) {
// Ignore unmapped stuff, it is already reserved as PROT_NONE
@ -194,9 +197,9 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
for (unsigned j = 0; j < num_mirrors; j++) {
u64 offset = vmem_maps[i].start_address + j * vmem_maps[i].memsize;
bool rc = mem_region_unmap_file(&virt_ram_base[offset], vmem_maps[i].memsize);
bool rc = mem_region_unmap_file(&addrspace::ram_base[offset], vmem_maps[i].memsize);
verify(rc);
void *p = mem_region_map_file((void*)(uintptr_t)vmem_fd, &virt_ram_base[offset],
void *p = mem_region_map_file((void*)(uintptr_t)vmem_fd, &addrspace::ram_base[offset],
vmem_maps[i].memsize, vmem_maps[i].memoffset, vmem_maps[i].allow_writes);
verify(p != nullptr);
}
@ -204,20 +207,20 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
}
// Prepares the code region for JIT operations, thus marking it as RWX
bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_area_rwx)
bool prepare_jit_block(void *code_area, size_t size, void **code_area_rwx)
{
die("Not supported in libnx");
return false;
}
void vmem_platform_release_jit_block(void *code_area, size_t size)
void release_jit_block(void *code_area, size_t size)
{
die("Not supported in libnx");
}
// Use two addr spaces: need to remap something twice, therefore use allocate_shared_filemem()
bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_area_rw, ptrdiff_t *rx_offset)
bool prepare_jit_block(void *code_area, size_t size, void **code_area_rw, ptrdiff_t *rx_offset)
{
const size_t size_aligned = ((size + PAGE_SIZE) & (~(PAGE_SIZE-1)));
@ -239,7 +242,7 @@ bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_a
return true;
}
void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t size)
void release_jit_block(void *code_area1, void *code_area2, size_t size)
{
const size_t size_aligned = ((size + PAGE_SIZE) & (~(PAGE_SIZE-1)));
virtmemLock();
@ -247,6 +250,8 @@ void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t
virtmemUnlock();
}
} // namespace virtmem
#ifndef TARGET_NO_EXCEPTIONS
#include <ucontext.h>

View File

@ -12,9 +12,9 @@
#include <cerrno>
#include <unistd.h>
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/sh4/sh4_if.h"
#include "stdclass.h"
#include "oslib/virtmem.h"
#ifndef MAP_NOSYNC
#define MAP_NOSYNC 0
@ -27,7 +27,7 @@
extern "C" int __attribute__((weak)) ASharedMemory_create(const char*, size_t);
// Android specific ashmem-device stuff for creating shared memory regions
int ashmem_create_region(const char *name, size_t size)
static int ashmem_create_region(const char *name, size_t size)
{
int fd = -1;
if (ASharedMemory_create != nullptr)
@ -51,7 +51,10 @@ int ashmem_create_region(const char *name, size_t size)
}
#endif // #ifdef __ANDROID__
bool mem_region_lock(void *start, size_t len)
namespace virtmem
{
bool region_lock(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
if (mprotect((u8*)start - inpage, len + inpage, PROT_READ))
@ -59,7 +62,7 @@ bool mem_region_lock(void *start, size_t len)
return true;
}
bool mem_region_unlock(void *start, size_t len)
bool region_unlock(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
if (mprotect((u8*)start - inpage, len + inpage, PROT_READ | PROT_WRITE))
@ -68,7 +71,7 @@ bool mem_region_unlock(void *start, size_t len)
return true;
}
bool mem_region_set_exec(void *start, size_t len)
bool region_set_exec(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
int protFlags = PROT_READ | PROT_EXEC;
@ -77,7 +80,7 @@ bool mem_region_set_exec(void *start, size_t len)
#endif
if (mprotect((u8*)start - inpage, len + inpage, protFlags))
{
WARN_LOG(VMEM, "mem_region_set_exec: mprotect failed. errno %d", errno);
WARN_LOG(VMEM, "region_set_exec: mprotect failed. errno %d", errno);
return false;
}
return true;
@ -114,7 +117,8 @@ static void *mem_region_map_file(void *file_handle, void *dest, size_t len, size
}
// Allocates memory via a fd on shmem/ahmem or even a file on disk
static int allocate_shared_filemem(unsigned size) {
static int allocate_shared_filemem(unsigned size)
{
int fd = -1;
#if defined(__ANDROID__)
// Use Android's specific shmem stuff.
@ -157,7 +161,8 @@ static size_t reserved_size;
// In negative offsets of the pointer (up to FPCB size, usually 65/129MB) the context and jump table
// can be found. If the platform init returns error, the user is responsible for initializing the
// memory using a fallback (that is, regular mallocs and falling back to slow memory JIT).
bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize) {
bool init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize)
{
// Firt let's try to allocate the shm-backed memory
vmem_fd = allocate_shared_filemem(ramSize);
if (vmem_fd < 0)
@ -180,13 +185,13 @@ bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSiz
void *sh4rcb_base_ptr = (void*)(ptrint + fpcb_size);
// Now map the memory for the SH4 context, do not include FPCB on purpose (paged on demand).
mem_region_unlock(sh4rcb_base_ptr, sizeof(Sh4RCB) - fpcb_size);
region_unlock(sh4rcb_base_ptr, sizeof(Sh4RCB) - fpcb_size);
return true;
}
// Just tries to wipe as much as possible in the relevant area.
void vmem_platform_destroy()
void destroy()
{
if (reserved_base != nullptr)
{
@ -201,7 +206,7 @@ void vmem_platform_destroy()
}
// Resets a chunk of memory by deleting its data and setting its protection back.
void vmem_platform_reset_mem(void *ptr, unsigned size_bytes) {
void reset_mem(void *ptr, unsigned size_bytes) {
// Mark them as non accessible.
mprotect(ptr, size_bytes, PROT_NONE);
// Tell the kernel to flush'em all (FIXME: perhaps unmap+mmap 'd be better?)
@ -214,13 +219,13 @@ void vmem_platform_reset_mem(void *ptr, unsigned size_bytes) {
}
// Allocates a bunch of memory (page aligned and page-sized)
void vmem_platform_ondemand_page(void *address, unsigned size_bytes) {
bool rc = mem_region_unlock(address, size_bytes);
void ondemand_page(void *address, unsigned size_bytes) {
bool rc = region_unlock(address, size_bytes);
verify(rc);
}
// Creates mappings to the underlying file including mirroring sections
void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned nummaps) {
void create_mappings(const Mapping *vmem_maps, unsigned nummaps) {
for (unsigned i = 0; i < nummaps; i++) {
// Ignore unmapped stuff, it is already reserved as PROT_NONE
if (!vmem_maps[i].memsize)
@ -233,7 +238,7 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
for (unsigned j = 0; j < num_mirrors; j++) {
u64 offset = vmem_maps[i].start_address + j * vmem_maps[i].memsize;
void *p = mem_region_map_file((void*)(uintptr_t)vmem_fd, &virt_ram_base[offset],
void *p = mem_region_map_file((void*)(uintptr_t)vmem_fd, &addrspace::ram_base[offset],
vmem_maps[i].memsize, vmem_maps[i].memoffset, vmem_maps[i].allow_writes);
verify(p != nullptr);
}
@ -241,10 +246,10 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
}
// Prepares the code region for JIT operations, thus marking it as RWX
bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_area_rwx)
bool prepare_jit_block(void *code_area, size_t size, void **code_area_rwx)
{
// Try to map is as RWX, this fails apparently on OSX (and perhaps other systems?)
if (code_area != nullptr && mem_region_set_exec(code_area, size))
if (code_area != nullptr && region_set_exec(code_area, size))
{
// Pointer location should be same:
*code_area_rwx = code_area;
@ -277,13 +282,13 @@ bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_a
return true;
}
void vmem_platform_release_jit_block(void *code_area, size_t size)
void release_jit_block(void *code_area, size_t size)
{
munmap(code_area, size);
}
// Use two addr spaces: need to remap something twice, therefore use allocate_shared_filemem()
bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_area_rw, ptrdiff_t *rx_offset)
bool prepare_jit_block(void *code_area, size_t size, void **code_area_rw, ptrdiff_t *rx_offset)
{
int fd = allocate_shared_filemem(size);
if (fd < 0)
@ -313,7 +318,7 @@ bool vmem_platform_prepare_jit_block(void *code_area, size_t size, void **code_a
return (ptr_rw != MAP_FAILED);
}
void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t size)
void release_jit_block(void *code_area1, void *code_area2, size_t size)
{
// keep code_area1 (RX) mapped since it's statically allocated
munmap(code_area2, size);
@ -321,9 +326,11 @@ void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t
#endif // !__SWITCH__
void vmem_platform_jit_set_exec(void* code, size_t size, bool enable) {
void jit_set_exec(void* code, size_t size, bool enable) {
}
} // namespace virtmem
// Some OSes restrict cache flushing, cause why not right? :D
#if HOST_CPU == CPU_ARM64
@ -332,6 +339,9 @@ void vmem_platform_jit_set_exec(void* code, size_t size, bool enable) {
#include <libkern/OSCacheControl.h>
#endif
namespace virtmem
{
// Code borrowed from Dolphin https://github.com/dolphin-emu/dolphin
static void Arm64_CacheFlush(void* start, void* end) {
if (start == end)
@ -373,7 +383,7 @@ static void Arm64_CacheFlush(void* start, void* end) {
}
void vmem_platform_flush_cache(void *icache_start, void *icache_end, void *dcache_start, void *dcache_end) {
void flush_cache(void *icache_start, void *icache_end, void *dcache_start, void *dcache_end) {
Arm64_CacheFlush(dcache_start, dcache_end);
// Dont risk it and flush and invalidate icache&dcache for both ranges just in case.
@ -381,11 +391,14 @@ void vmem_platform_flush_cache(void *icache_start, void *icache_end, void *dcach
Arm64_CacheFlush(icache_start, icache_end);
}
} // namespace virtmem
#elif HOST_CPU == CPU_ARM
#if defined(__APPLE__)
#include <libkern/OSCacheControl.h>
static void CacheFlush(void* code, void* pEnd)
{
sys_dcache_flush(code, (u8*)pEnd - (u8*)code + 1);
@ -507,9 +520,14 @@ asm static void CacheFlush(void* code, void* pEnd)
}
#endif
void vmem_platform_flush_cache(void *icache_start, void *icache_end, void *dcache_start, void *dcache_end)
namespace virtmem
{
void flush_cache(void *icache_start, void *icache_end, void *dcache_start, void *dcache_end)
{
CacheFlush(icache_start, icache_end);
}
#endif // #if HOST_CPU == CPU_ARM
} // namespace virtmem
#endif // #if HOST_CPU == CPU_ARM

View File

@ -22,7 +22,7 @@
#include <lua.hpp>
#include <LuaBridge/LuaBridge.h>
#include "rend/gui.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "cfg/option.h"
#include "emulator.h"
#include "input/gamepad_device.h"
@ -105,7 +105,7 @@ static LuaRef readMemoryTable(u32 address, int count, lua_State* L)
t = newTable(L);
while (count > 0)
{
t[address] = _vmem_readt<T, T>(address);
t[address] = addrspace::readt<T>(address);
address += sizeof(T);
count--;
}
@ -533,18 +533,18 @@ static void luaRegister(lua_State *L)
.endNamespace()
.beginNamespace("memory")
.addFunction("read8", _vmem_readt<u8, u8>)
.addFunction("read16", _vmem_readt<u16, u16>)
.addFunction("read32", _vmem_readt<u32, u32>)
.addFunction("read64", _vmem_readt<u64, u64>)
.addFunction("read8", addrspace::readt<u8>)
.addFunction("read16", addrspace::readt<u16>)
.addFunction("read32", addrspace::readt<u32>)
.addFunction("read64", addrspace::readt<u64>)
.addFunction("readTable8", readMemoryTable<u8>)
.addFunction("readTable16", readMemoryTable<u16>)
.addFunction("readTable32", readMemoryTable<u32>)
.addFunction("readTable64", readMemoryTable<u64>)
.addFunction("write8", _vmem_writet<u8>)
.addFunction("write16", _vmem_writet<u16>)
.addFunction("write32", _vmem_writet<u32>)
.addFunction("write64", _vmem_writet<u64>)
.addFunction("write8", addrspace::writet<u8>)
.addFunction("write16", addrspace::writet<u16>)
.addFunction("write32", addrspace::writet<u32>)
.addFunction("write64", addrspace::writet<u64>)
.endNamespace()
.beginNamespace("input")

View File

@ -1,7 +1,7 @@
#ifndef LIBRETRO
#include "types.h"
#include "emulator.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "cfg/cfg.h"
#include "cfg/option.h"
#include "log/LogManager.h"
@ -22,7 +22,7 @@ int flycast_init(int argc, char* argv[])
setbuf(stderr, 0);
settings.aica.muteAudio = true;
#endif
if (!_vmem_reserve())
if (!addrspace::reserve())
{
ERROR_LOG(VMEM, "Failed to alloc mem");
return -1;

43
core/oslib/virtmem.h Normal file
View File

@ -0,0 +1,43 @@
#include "types.h"
namespace virtmem
{
struct Mapping {
u64 start_address, end_address;
u64 memoffset, memsize;
bool allow_writes;
};
// Platform specific vmemory API
// To initialize (maybe) the vmem subsystem
bool init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize);
// To reset the on-demand allocated pages.
void reset_mem(void *ptr, unsigned size_bytes);
// To handle a fault&allocate an ondemand page.
void ondemand_page(void *address, unsigned size_bytes);
// To create the mappings in the address space.
void create_mappings(const Mapping *vmem_maps, unsigned nummaps);
// Just tries to wipe as much as possible in the relevant area.
void destroy();
// Given a block of data in the .text section, prepares it for JIT action.
// both code_area and size are page aligned. Returns success.
bool prepare_jit_block(void *code_area, size_t size, void **code_area_rwx);
// Same as above but uses two address spaces one with RX and RW protections.
// Note: this function doesnt have to be implemented, it's a fallback for the above one.
bool prepare_jit_block(void *code_area, size_t size, void **code_area_rw, ptrdiff_t *rx_offset);
// This might not need an implementation (ie x86/64 cpus).
void flush_cache(void *icache_start, void *icache_end, void *dcache_start, void *dcache_end);
// Change a code buffer permissions from r-x to/from rw-
void jit_set_exec(void* code, size_t size, bool enable);
// Release a jit block previously allocated by prepare_jit_block
void release_jit_block(void *code_area, size_t size);
// Release a jit block previously allocated by prepare_jit_block (with dual RW and RX areas)
void release_jit_block(void *code_area1, void *code_area2, size_t size);
// from stdclass.h
bool region_lock(void *start, std::size_t len);
bool region_unlock(void *start, std::size_t len);
bool region_set_exec(void *start, std::size_t len);
} // namespace vmem

View File

@ -35,6 +35,7 @@ using namespace vixl::aarch32;
#include "hw/sh4/sh4_mem.h"
#include "cfg/option.h"
#include "arm_unwind.h"
#include "oslib/virtmem.h"
//#define CANONICALTEST
@ -95,7 +96,7 @@ public:
void Finalize() {
FinalizeCode();
vmem_platform_flush_cache(GetBuffer()->GetStartAddress<void *>(), GetCursorAddress<u8 *>() - 1,
virtmem::flush_cache(GetBuffer()->GetStartAddress<void *>(), GetCursorAddress<u8 *>() - 1,
GetBuffer()->GetStartAddress<void *>(), GetCursorAddress<u8 *>() - 1);
}
};
@ -583,8 +584,8 @@ static const void *_mem_hndl_SQ32[3][14];
static const void *_mem_hndl[2][3][14];
const void * const _mem_func[2][2] =
{
{ (void *)_vmem_WriteMem32, (void *)_vmem_WriteMem64 },
{ (void *)_vmem_ReadMem32, (void *)_vmem_ReadMem64 },
{ (void *)addrspace::write32, (void *)addrspace::write64 },
{ (void *)addrspace::read32, (void *)addrspace::read64 },
};
const struct
@ -889,7 +890,7 @@ static bool ngen_readm_immediate(RuntimeBlockInfo* block, shil_opcode* op, bool
mem_op_type optp = memop_type(op);
bool isram = false;
void* ptr = _vmem_read_const(addr, isram, std::min(4u, memop_bytes[optp]));
void* ptr = addrspace::readConst(addr, isram, std::min(4u, memop_bytes[optp]));
Register rd = (optp != SZ_32F && optp != SZ_64F) ? reg.mapReg(op->rd) : r0;
@ -1023,7 +1024,7 @@ static bool ngen_writemem_immediate(RuntimeBlockInfo* block, shil_opcode* op, bo
mem_op_type optp = memop_type(op);
bool isram = false;
void* ptr = _vmem_write_const(addr, isram, std::min(4u, memop_bytes[optp]));
void* ptr = addrspace::writeConst(addr, isram, std::min(4u, memop_bytes[optp]));
Register rs2 = r1;
SRegister rs2f = s0;
@ -1158,7 +1159,7 @@ static void ngen_compile_opcode(RuntimeBlockInfo* block, shil_opcode* op, bool o
Register raddr = GenMemAddr(op);
genMmuLookup(block, *op, 0, raddr);
if (_nvmem_enabled()) {
if (addrspace::virtmemEnabled()) {
ass.Bic(r1, raddr, optp == SZ_32F || optp == SZ_64F ? 0xE0000003 : 0xE0000000);
switch(optp)
@ -1272,7 +1273,7 @@ static void ngen_compile_opcode(RuntimeBlockInfo* block, shil_opcode* op, bool o
else
rs2 = reg.mapReg(op->rs2);
}
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
{
ass.Bic(r1, raddr, optp == SZ_32F || optp == SZ_64F ? 0xE0000003 : 0xE0000000);
@ -2385,12 +2386,12 @@ static void generate_mainloop()
// Memory handlers
for (int s=0;s<6;s++)
{
const void* fn=s==0?(void*)_vmem_ReadMem8SX32:
s==1?(void*)_vmem_ReadMem16SX32:
s==2?(void*)_vmem_ReadMem32:
s==3?(void*)_vmem_WriteMem8:
s==4?(void*)_vmem_WriteMem16:
s==5?(void*)_vmem_WriteMem32:
const void* fn=s==0?(void*)addrspace::read8SX32:
s==1?(void*)addrspace::read16SX32:
s==2?(void*)addrspace::read32:
s==3?(void*)addrspace::write8:
s==4?(void*)addrspace::write16:
s==5?(void*)addrspace::write32:
0;
bool read=s<=2;
@ -2433,7 +2434,7 @@ static void generate_mainloop()
ass.Cmp(r1, 0x38);
ass.And(r1, r0, 0x3F);
ass.Add(r1, r1, r8);
jump((void *)&_vmem_WriteMem64, ne);
jump((void *)&addrspace::write64, ne);
ass.Strd(r2, r3, MemOperand(r1, rcbOffset(sq_buffer)));
}
else
@ -2444,7 +2445,7 @@ static void generate_mainloop()
ass.Cmp(r2, 0x38);
if (reg != 0)
ass.Mov(ne, r0, Register(reg));
jump((void *)&_vmem_WriteMem32, ne);
jump((void *)&addrspace::write32, ne);
ass.Str(r1, MemOperand(r3, rcbOffset(sq_buffer)));
}
ass.Bx(lr);

View File

@ -38,8 +38,9 @@ using namespace vixl::aarch64;
#include "hw/sh4/sh4_mem.h"
#include "hw/sh4/sh4_rom.h"
#include "arm64_regalloc.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "arm64_unwind.h"
#include "oslib/virtmem.h"
#undef do_sqw_nommu
@ -1104,21 +1105,21 @@ public:
switch (size)
{
case 1:
GenCallRuntime(_vmem_ReadMem8);
GenCallRuntime(addrspace::read8);
Sxtb(w0, w0);
break;
case 2:
GenCallRuntime(_vmem_ReadMem16);
GenCallRuntime(addrspace::read16);
Sxth(w0, w0);
break;
case 4:
GenCallRuntime(_vmem_ReadMem32);
GenCallRuntime(addrspace::read32);
break;
case 8:
GenCallRuntime(_vmem_ReadMem64);
GenCallRuntime(addrspace::read64);
break;
default:
@ -1135,19 +1136,19 @@ public:
switch (size)
{
case 1:
GenCallRuntime(_vmem_WriteMem8);
GenCallRuntime(addrspace::write8);
break;
case 2:
GenCallRuntime(_vmem_WriteMem16);
GenCallRuntime(addrspace::write16);
break;
case 4:
GenCallRuntime(_vmem_WriteMem32);
GenCallRuntime(addrspace::write32);
break;
case 8:
GenCallRuntime(_vmem_WriteMem64);
GenCallRuntime(addrspace::write64);
break;
default:
@ -1335,7 +1336,7 @@ public:
}
// Flush and invalidate caches
vmem_platform_flush_cache(
virtmem::flush_cache(
CC_RW2RX(GetBuffer()->GetStartAddress<void*>()), CC_RW2RX(GetBuffer()->GetEndAddress<void*>()),
GetBuffer()->GetStartAddress<void*>(), GetBuffer()->GetEndAddress<void*>());
#if 0
@ -1570,7 +1571,7 @@ public:
Bind(&writeStoreQueue32Label);
Lsr(x7, x0, 26);
Cmp(x7, 0x38);
GenBranchRuntime(_vmem_WriteMem32, Condition::ne);
GenBranchRuntime(addrspace::write32, Condition::ne);
And(x0, x0, 0x3f);
Sub(x7, x0, sizeof(Sh4RCB::sq_buffer), LeaveFlags);
Str(w1, MemOperand(x28, x7));
@ -1580,7 +1581,7 @@ public:
Bind(&writeStoreQueue64Label);
Lsr(x7, x0, 26);
Cmp(x7, 0x38);
GenBranchRuntime(_vmem_WriteMem64, Condition::ne);
GenBranchRuntime(addrspace::write64, Condition::ne);
And(x0, x0, 0x3f);
Sub(x7, x0, sizeof(Sh4RCB::sq_buffer), LeaveFlags);
Str(x1, MemOperand(x28, x7));
@ -1598,7 +1599,7 @@ public:
writeStoreQueue64 = GetLabelAddress<DynaCode *>(&writeStoreQueue64Label);
// Flush and invalidate caches
vmem_platform_flush_cache(
virtmem::flush_cache(
CC_RW2RX(GetBuffer()->GetStartAddress<void*>()), CC_RW2RX(GetBuffer()->GetEndAddress<void*>()),
GetBuffer()->GetStartAddress<void*>(), GetBuffer()->GetEndAddress<void*>());
}
@ -1766,7 +1767,7 @@ private:
addr = paddr;
}
bool isram = false;
void* ptr = _vmem_read_const(addr, isram, op.size > 4 ? 4 : op.size);
void* ptr = addrspace::readConst(addr, isram, op.size > 4 ? 4 : op.size);
if (isram)
{
@ -1890,7 +1891,7 @@ private:
bool GenReadMemoryFast(const shil_opcode& op, size_t opid)
{
// Direct memory access. Need to handle SIGSEGV and rewrite block as needed. See ngen_Rewrite()
if (!_nvmem_enabled())
if (!addrspace::virtmemEnabled())
return false;
Instruction *start_instruction = GetCursorAddress<Instruction *>();
@ -1976,7 +1977,7 @@ private:
addr = paddr;
}
bool isram = false;
void* ptr = _vmem_write_const(addr, isram, op.size > 4 ? 4 : op.size);
void* ptr = addrspace::writeConst(addr, isram, op.size > 4 ? 4 : op.size);
if (isram)
{
@ -2054,7 +2055,7 @@ private:
bool GenWriteMemoryFast(const shil_opcode& op, size_t opid)
{
// Direct memory access. Need to handle SIGSEGV and rewrite block as needed. See ngen_Rewrite()
if (!_nvmem_enabled())
if (!addrspace::virtmemEnabled())
return false;
Instruction *start_instruction = GetCursorAddress<Instruction *>();

View File

@ -19,6 +19,7 @@ using namespace Xbyak::util;
#include "x64_regalloc.h"
#include "xbyak_base.h"
#include "oslib/oslib.h"
#include "oslib/virtmem.h"
struct DynaRBI : RuntimeBlockInfo
{
@ -732,7 +733,7 @@ public:
bool rewriteMemAccess(host_context_t &context)
{
if (!_nvmem_enabled())
if (!addrspace::virtmemEnabled())
return false;
//printf("ngen_Rewrite pc %p\n", context.pc);
@ -849,7 +850,7 @@ private:
addr = paddr;
}
bool isram = false;
void* ptr = _vmem_read_const(addr, isram, op.size > 4 ? 4 : op.size);
void* ptr = addrspace::readConst(addr, isram, op.size > 4 ? 4 : op.size);
if (isram)
{
@ -998,7 +999,7 @@ private:
addr = paddr;
}
bool isram = false;
void* ptr = _vmem_write_const(addr, isram, op.size > 4 ? 4 : op.size);
void* ptr = addrspace::writeConst(addr, isram, op.size > 4 ? 4 : op.size);
if (isram)
{
@ -1142,9 +1143,9 @@ private:
for (int op = 0; op < MemOp::Count; op++)
{
MemHandlers[type][size][op] = getCurr();
if (type == MemType::Fast && _nvmem_enabled())
if (type == MemType::Fast && addrspace::virtmemEnabled())
{
mov(rax, (uintptr_t)virt_ram_base);
mov(rax, (uintptr_t)addrspace::ram_base);
mov(r9, call_regs64[0]);
and_(call_regs[0], 0x1FFFFFFF);
@ -1199,9 +1200,9 @@ private:
ret();
L(no_sqw);
if (size == MemSize::S32)
jmp((const void *)_vmem_WriteMem32); // tail call
jmp((const void *)addrspace::write32); // tail call
else
jmp((const void *)_vmem_WriteMem64); // tail call
jmp((const void *)addrspace::write64); // tail call
continue;
}
else
@ -1212,21 +1213,21 @@ private:
switch (size) {
case MemSize::S8:
sub(rsp, STACK_ALIGN);
call((const void *)_vmem_ReadMem8);
call((const void *)addrspace::read8);
movsx(eax, al);
add(rsp, STACK_ALIGN);
break;
case MemSize::S16:
sub(rsp, STACK_ALIGN);
call((const void *)_vmem_ReadMem16);
call((const void *)addrspace::read16);
movsx(eax, ax);
add(rsp, STACK_ALIGN);
break;
case MemSize::S32:
jmp((const void *)_vmem_ReadMem32); // tail call
jmp((const void *)addrspace::read32); // tail call
continue;
case MemSize::S64:
jmp((const void *)_vmem_ReadMem64); // tail call
jmp((const void *)addrspace::read64); // tail call
continue;
default:
die("1..8 bytes");
@ -1236,16 +1237,16 @@ private:
{
switch (size) {
case MemSize::S8:
jmp((const void *)_vmem_WriteMem8); // tail call
jmp((const void *)addrspace::write8); // tail call
continue;
case MemSize::S16:
jmp((const void *)_vmem_WriteMem16); // tail call
jmp((const void *)addrspace::write16); // tail call
continue;
case MemSize::S32:
jmp((const void *)_vmem_WriteMem32); // tail call
jmp((const void *)addrspace::write32); // tail call
continue;
case MemSize::S64:
jmp((const void *)_vmem_WriteMem64); // tail call
jmp((const void *)addrspace::write64); // tail call
continue;
default:
die("1..8 bytes");
@ -1340,7 +1341,7 @@ void ngen_Compile(RuntimeBlockInfo* block, bool smc_checks, bool reset, bool sta
verify(emit_FreeSpace() >= 16 * 1024);
void* protStart = emit_GetCCPtr();
size_t protSize = emit_FreeSpace();
vmem_platform_jit_set_exec(protStart, protSize, false);
virtmem::jit_set_exec(protStart, protSize, false);
BlockCompiler compiler;
::ccCompiler = &compiler;
@ -1350,7 +1351,7 @@ void ngen_Compile(RuntimeBlockInfo* block, bool smc_checks, bool reset, bool sta
ERROR_LOG(DYNAREC, "Fatal xbyak error: %s", e.what());
}
::ccCompiler = nullptr;
vmem_platform_jit_set_exec(protStart, protSize, true);
virtmem::jit_set_exec(protStart, protSize, true);
}
void ngen_CC_Start(shil_opcode* op)
@ -1376,14 +1377,14 @@ bool ngen_Rewrite(host_context_t &context, void *faultAddress)
{
void* protStart = emit_GetCCPtr();
size_t protSize = emit_FreeSpace();
vmem_platform_jit_set_exec(protStart, protSize, false);
virtmem::jit_set_exec(protStart, protSize, false);
u8 *retAddr = *(u8 **)context.rsp - 5;
BlockCompiler compiler(retAddr);
bool rc = false;
try {
rc = compiler.rewriteMemAccess(context);
vmem_platform_jit_set_exec(protStart, protSize, true);
virtmem::jit_set_exec(protStart, protSize, true);
} catch (const Xbyak::Error& e) {
ERROR_LOG(DYNAREC, "Fatal xbyak error: %s", e.what());
}
@ -1404,7 +1405,7 @@ void ngen_ResetBlocks()
void* protStart = emit_GetCCPtr();
size_t protSize = emit_FreeSpace();
vmem_platform_jit_set_exec(protStart, protSize, false);
virtmem::jit_set_exec(protStart, protSize, false);
BlockCompiler compiler;
try {
@ -1412,7 +1413,7 @@ void ngen_ResetBlocks()
} catch (const Xbyak::Error& e) {
ERROR_LOG(DYNAREC, "Fatal xbyak error: %s", e.what());
}
vmem_platform_jit_set_exec(protStart, protSize, true);
virtmem::jit_set_exec(protStart, protSize, true);
}
#endif

View File

@ -25,7 +25,7 @@
#include "hw/sh4/sh4_interpreter.h"
#include "hw/sh4/sh4_interrupts.h"
#include "hw/sh4/sh4_mem.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "oslib/oslib.h"
static void (*mainloop)();
@ -640,7 +640,7 @@ bool X86Compiler::genReadMemImmediate(const shil_opcode& op, RuntimeBlockInfo* b
addr = paddr;
}
bool isram = false;
void* ptr = _vmem_read_const(addr, isram, op.size > 4 ? 4 : op.size);
void* ptr = addrspace::readConst(addr, isram, op.size > 4 ? 4 : op.size);
if (isram)
{
@ -778,7 +778,7 @@ bool X86Compiler::genWriteMemImmediate(const shil_opcode& op, RuntimeBlockInfo*
addr = paddr;
}
bool isram = false;
void* ptr = _vmem_write_const(addr, isram, op.size > 4 ? 4 : op.size);
void* ptr = addrspace::writeConst(addr, isram, op.size > 4 ? 4 : op.size);
if (isram)
{

View File

@ -26,7 +26,7 @@
#include "hw/sh4/sh4_opcode_list.h"
#include "hw/sh4/sh4_core.h"
#include "hw/sh4/sh4_interrupts.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "oslib/oslib.h"
extern UnwindInfo unwinder;
@ -73,7 +73,7 @@ void X86Compiler::genMemHandlers()
{
MemHandlers[type][size][op] = getCurr();
if (type == MemType::Fast && _nvmem_enabled())
if (type == MemType::Fast && addrspace::virtmemEnabled())
{
// save the original address in eax so it can be restored during rewriting
mov(eax, ecx);
@ -83,19 +83,19 @@ void X86Compiler::genMemHandlers()
switch (size)
{
case MemSize::S8:
address = byte[ecx + (size_t)virt_ram_base];
address = byte[ecx + (size_t)addrspace::ram_base];
reg = op == MemOp::R ? (Xbyak::Reg)eax : (Xbyak::Reg)dl;
break;
case MemSize::S16:
address = word[ecx + (size_t)virt_ram_base];
address = word[ecx + (size_t)addrspace::ram_base];
reg = op == MemOp::R ? (Xbyak::Reg)eax : (Xbyak::Reg)dx;
break;
case MemSize::S32:
address = dword[ecx + (size_t)virt_ram_base];
address = dword[ecx + (size_t)addrspace::ram_base];
reg = op == MemOp::R ? eax : edx;
break;
default:
address = dword[ecx + (size_t)virt_ram_base];
address = dword[ecx + (size_t)addrspace::ram_base];
break;
}
if (size >= MemSize::F32)
@ -106,7 +106,7 @@ void X86Compiler::genMemHandlers()
movss(address, xmm0);
if (size == MemSize::F64)
{
address = dword[ecx + (size_t)virt_ram_base + 4];
address = dword[ecx + (size_t)addrspace::ram_base + 4];
if (op == MemOp::R)
movss(xmm1, address);
else
@ -160,14 +160,14 @@ void X86Compiler::genMemHandlers()
#endif
movss(dword[esp], xmm0);
movss(dword[esp + 4], xmm1);
call((const void *)_vmem_WriteMem64); // dynacall adds 8 to esp
call((const void *)addrspace::write64); // dynacall adds 8 to esp
alignStack(4);
}
else
{
if (size == MemSize::F32)
movd(edx, xmm0);
jmp((const void *)_vmem_WriteMem32); // tail call
jmp((const void *)addrspace::write32); // tail call
continue;
}
}
@ -180,31 +180,31 @@ void X86Compiler::genMemHandlers()
case MemSize::S8:
// 16-byte alignment
alignStack(-12);
call((const void *)_vmem_ReadMem8);
call((const void *)addrspace::read8);
movsx(eax, al);
alignStack(12);
break;
case MemSize::S16:
// 16-byte alignment
alignStack(-12);
call((const void *)_vmem_ReadMem16);
call((const void *)addrspace::read16);
movsx(eax, ax);
alignStack(12);
break;
case MemSize::S32:
jmp((const void *)_vmem_ReadMem32); // tail call
jmp((const void *)addrspace::read32); // tail call
continue;
case MemSize::F32:
// 16-byte alignment
alignStack(-12);
call((const void *)_vmem_ReadMem32);
call((const void *)addrspace::read32);
movd(xmm0, eax);
alignStack(12);
break;
case MemSize::F64:
// 16-byte alignment
alignStack(-12);
call((const void *)_vmem_ReadMem64);
call((const void *)addrspace::read64);
movd(xmm0, eax);
movd(xmm1, edx);
alignStack(12);
@ -217,17 +217,17 @@ void X86Compiler::genMemHandlers()
{
switch (size) {
case MemSize::S8:
jmp((const void *)_vmem_WriteMem8); // tail call
jmp((const void *)addrspace::write8); // tail call
continue;
case MemSize::S16:
jmp((const void *)_vmem_WriteMem16); // tail call
jmp((const void *)addrspace::write16); // tail call
continue;
case MemSize::S32:
jmp((const void *)_vmem_WriteMem32); // tail call
jmp((const void *)addrspace::write32); // tail call
continue;
case MemSize::F32:
movd(edx, xmm0);
jmp((const void *)_vmem_WriteMem32); // tail call
jmp((const void *)addrspace::write32); // tail call
continue;
case MemSize::F64:
#ifndef _WIN32
@ -239,7 +239,7 @@ void X86Compiler::genMemHandlers()
#endif
movss(dword[esp], xmm0);
movss(dword[esp + 4], xmm1);
call((const void *)_vmem_WriteMem64); // dynacall adds 8 to esp
call((const void *)addrspace::write64); // dynacall adds 8 to esp
alignStack(4);
break;
default:

View File

@ -112,11 +112,11 @@ static bool reios_locate_bootfile(const char* bootfile)
u8 data[24] = {0};
// system id
for (u32 j = 0; j < 8; j++)
data[j] = _vmem_ReadMem8(0x0021a056 + j);
data[j] = addrspace::read8(0x0021a056 + j);
// system properties
for (u32 j = 0; j < 5; j++)
data[8 + j] = _vmem_ReadMem8(0x0021a000 + j);
data[8 + j] = addrspace::read8(0x0021a000 + j);
// system settings
flash_syscfg_block syscfg{};

View File

@ -2,7 +2,7 @@
#include "CustomTexture.h"
#include "deps/xbrz/xbrz.h"
#include "hw/pvr/pvr_mem.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include <algorithm>
#include <mutex>
@ -200,7 +200,7 @@ static void vramlock_list_add(vram_block* block)
std::vector<vram_block*>& list = VramLocks[i];
// If the list is empty then we need to protect vram, otherwise it's already been done
if (list.empty() || std::all_of(list.begin(), list.end(), [](vram_block *block) { return block == nullptr; }))
_vmem_protect_vram(i * PAGE_SIZE, PAGE_SIZE);
addrspace::protectVram(i * PAGE_SIZE, PAGE_SIZE);
auto it = std::find(list.begin(), list.end(), nullptr);
if (it != list.end())
*it = block;
@ -237,7 +237,7 @@ bool VramLockedWriteOffset(size_t offset)
}
list.clear();
_vmem_unprotect_vram((u32)(offset & ~PAGE_MASK), PAGE_SIZE);
addrspace::unprotectVram((u32)(offset & ~PAGE_MASK), PAGE_SIZE);
}
return true;
@ -245,7 +245,7 @@ bool VramLockedWriteOffset(size_t offset)
bool VramLockedWrite(u8* address)
{
u32 offset = _vmem_get_vram_offset(address);
u32 offset = addrspace::getVramOffset(address);
if (offset == (u32)-1)
return false;
return VramLockedWriteOffset(offset);

View File

@ -391,7 +391,7 @@ static void dc_deserialize_libretro(Deserializer& deser)
{
case 0:
case 1:
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
do_sqw_nommu = &do_sqw_nommu_area_3;
else
do_sqw_nommu = &do_sqw_nommu_area_3_nonvmem;
@ -676,7 +676,7 @@ void dc_deserialize(Deserializer& deser)
{
case 0:
case 1:
if (_nvmem_enabled())
if (addrspace::virtmemEnabled())
do_sqw_nommu = &do_sqw_nommu_area_3;
else
do_sqw_nommu = &do_sqw_nommu_area_3_nonvmem;

View File

@ -73,10 +73,6 @@ std::string get_game_dir();
// returns the position of the last path separator, or string::npos if none
size_t get_last_slash_pos(const std::string& path);
bool mem_region_lock(void *start, std::size_t len);
bool mem_region_unlock(void *start, std::size_t len);
bool mem_region_set_exec(void *start, std::size_t len);
class VArray2 {
public:
u8* data;

View File

@ -18,7 +18,7 @@
#include "hw/sh4/dyna/blockmanager.h"
#include "hw/sh4/dyna/ngen.h"
#include "rend/TexCache.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/mem/mem_watch.h"
#include <windows.h>
@ -89,7 +89,7 @@ static LONG WINAPI exceptionHandler(EXCEPTION_POINTERS *ep)
if (VramLockedWrite(address))
return EXCEPTION_CONTINUE_EXECUTION;
// FPCB jump table protection
if (BM_LockedWrite(address))
if (addrspace::bm_lockedWrite(address))
return EXCEPTION_CONTINUE_EXECUTION;
host_context_t context;

View File

@ -1,14 +1,18 @@
#include "hw/mem/_vmem.h"
#include "hw/sh4/sh4_if.h"
#include "hw/mem/addrspace.h"
#include "oslib/virtmem.h"
#include <windows.h>
namespace virtmem
{
// Implementation of the vmem related function for Windows platforms.
// For now this probably does some assumptions on the CPU/platform.
// The implementation allows it to be empty (that is, to not lock memory).
bool mem_region_lock(void *start, size_t len)
bool region_lock(void *start, size_t len)
{
DWORD old;
if (!VirtualProtect(start, len, PAGE_READONLY, &old))
@ -16,7 +20,7 @@ bool mem_region_lock(void *start, size_t len)
return true;
}
bool mem_region_unlock(void *start, size_t len)
bool region_unlock(void *start, size_t len)
{
DWORD old;
if (!VirtualProtect(start, len, PAGE_READWRITE, &old))
@ -48,7 +52,7 @@ static std::vector<void *> mapped_regions;
// Please read the POSIX implementation for more information. On Windows this is
// rather straightforward.
bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize)
bool init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSize)
{
#ifdef TARGET_UWP
return false;
@ -84,24 +88,24 @@ bool vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr, size_t ramSiz
}
// Just tries to wipe as much as possible in the relevant area.
void vmem_platform_destroy() {
void destroy() {
VirtualFree(base_alloc, 0, MEM_RELEASE);
CloseHandle(mem_handle);
}
// Resets a chunk of memory by deleting its data and setting its protection back.
void vmem_platform_reset_mem(void *ptr, unsigned size_bytes) {
void reset_mem(void *ptr, unsigned size_bytes) {
VirtualFree(ptr, size_bytes, MEM_DECOMMIT);
}
// Allocates a bunch of memory (page aligned and page-sized)
void vmem_platform_ondemand_page(void *address, unsigned size_bytes) {
void ondemand_page(void *address, unsigned size_bytes) {
void *p = VirtualAlloc(address, size_bytes, MEM_COMMIT, PAGE_READWRITE);
verify(p != nullptr);
}
/// Creates mappings to the underlying file including mirroring sections
void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned nummaps) {
void create_mappings(const Mapping *vmem_maps, unsigned nummaps) {
// Since this is tricky to get right in Windows (in posix one can just unmap sections and remap later)
// we unmap the whole thing only to remap it later.
#ifndef TARGET_UWP
@ -113,14 +117,15 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
mem_region_release(p, 0);
unmapped_regions.clear();
for (unsigned i = 0; i < nummaps; i++) {
for (unsigned i = 0; i < nummaps; i++)
{
size_t address_range_size = vmem_maps[i].end_address - vmem_maps[i].start_address;
DWORD protection = vmem_maps[i].allow_writes ? (FILE_MAP_READ | FILE_MAP_WRITE) : FILE_MAP_READ;
if (!vmem_maps[i].memsize) {
// Unmapped stuff goes with a protected area or memory. Prevent anything from allocating here
void *ptr = VirtualAlloc(&virt_ram_base[vmem_maps[i].start_address], address_range_size, MEM_RESERVE, PAGE_NOACCESS);
verify(ptr == &virt_ram_base[vmem_maps[i].start_address]);
void *ptr = VirtualAlloc(&addrspace::ram_base[vmem_maps[i].start_address], address_range_size, MEM_RESERVE, PAGE_NOACCESS);
verify(ptr == &addrspace::ram_base[vmem_maps[i].start_address]);
unmapped_regions.push_back(ptr);
}
else {
@ -133,8 +138,8 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
size_t offset = vmem_maps[i].start_address + j * vmem_maps[i].memsize;
void *ptr = MapViewOfFileEx(mem_handle, protection, 0, (DWORD)vmem_maps[i].memoffset,
vmem_maps[i].memsize, &virt_ram_base[offset]);
verify(ptr == &virt_ram_base[offset]);
vmem_maps[i].memsize, &addrspace::ram_base[offset]);
verify(ptr == &addrspace::ram_base[offset]);
mapped_regions.push_back(ptr);
}
}
@ -143,7 +148,7 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
}
template<typename Mapper>
static void *vmem_platform_prepare_jit_block_template(size_t size, Mapper mapper)
static void *prepare_jit_block_template(size_t size, Mapper mapper)
{
// Several issues on Windows: can't protect arbitrary pages due to (I guess) the way
// kernel tracks mappings, so only stuff that has been allocated with VirtualAlloc can be
@ -154,7 +159,7 @@ static void *vmem_platform_prepare_jit_block_template(size_t size, Mapper mapper
// Remember that on x64 we have 4 byte jump/load offset immediates, no issues on x86 :D
// Take this function addr as reference.
uintptr_t base_addr = reinterpret_cast<uintptr_t>(&vmem_platform_init) & ~0xFFFFF;
uintptr_t base_addr = reinterpret_cast<uintptr_t>(&init) & ~0xFFFFF;
// Probably safe to assume reicast code is <200MB (today seems to be <16MB on every platform I've seen).
for (uintptr_t i = 0; i < 1800 * 1024 * 1024; i += 1024 * 1024) { // Some arbitrary step size.
@ -187,22 +192,22 @@ static void* mem_alloc(void *addr, size_t size)
}
// Prepares the code region for JIT operations, thus marking it as RWX
bool vmem_platform_prepare_jit_block(void *, size_t size, void **code_area_rwx)
bool prepare_jit_block(void *, size_t size, void **code_area_rwx)
{
// Get the RWX page close to the code_area
void *ptr = vmem_platform_prepare_jit_block_template(size, mem_alloc);
void *ptr = prepare_jit_block_template(size, mem_alloc);
if (!ptr)
return false;
*code_area_rwx = ptr;
INFO_LOG(DYNAREC, "Found code area at %p, not too far away from %p", *code_area_rwx, &vmem_platform_init);
INFO_LOG(DYNAREC, "Found code area at %p, not too far away from %p", *code_area_rwx, &init);
// We should have found some area in the addrspace, after all size is ~tens of megabytes.
// Pages are already RWX, all done
return true;
}
void vmem_platform_release_jit_block(void *code_area, size_t)
void release_jit_block(void *code_area, size_t)
{
VirtualFree(code_area, 0, MEM_RELEASE);
}
@ -225,12 +230,12 @@ static void* mem_file_map(void *addr, size_t size)
}
// Use two addr spaces: need to remap something twice, therefore use CreateFileMapping()
bool vmem_platform_prepare_jit_block(void *, size_t size, void** code_area_rw, ptrdiff_t* rx_offset)
bool prepare_jit_block(void *, size_t size, void** code_area_rw, ptrdiff_t* rx_offset)
{
mem_handle2 = CreateFileMapping(INVALID_HANDLE_VALUE, 0, PAGE_EXECUTE_READWRITE, 0, (DWORD)size, 0);
// Get the RX page close to the code_area
void* ptr_rx = vmem_platform_prepare_jit_block_template(size, mem_file_map);
void* ptr_rx = prepare_jit_block_template(size, mem_file_map);
if (!ptr_rx)
return false;
@ -248,7 +253,7 @@ bool vmem_platform_prepare_jit_block(void *, size_t size, void** code_area_rw, p
return (ptr_rw != NULL);
}
void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t)
void release_jit_block(void *code_area1, void *code_area2, size_t)
{
UnmapViewOfFile(code_area1);
UnmapViewOfFile(code_area2);
@ -259,7 +264,7 @@ void vmem_platform_release_jit_block(void *code_area1, void *code_area2, size_t)
CloseHandle(mem_handle2);
}
void vmem_platform_jit_set_exec(void* code, size_t size, bool enable)
void jit_set_exec(void* code, size_t size, bool enable)
{
#ifdef TARGET_UWP
DWORD old;
@ -267,3 +272,5 @@ void vmem_platform_jit_set_exec(void* code, size_t size, bool enable)
die("VirtualProtect failed");
#endif
}
} // namespace virtmem

View File

@ -333,7 +333,7 @@ void retro_init()
set_user_data_dir(std::string(data_dir) + "/");
#endif
if (!_vmem_reserve())
if (!addrspace::reserve())
ERROR_LOG(VMEM, "Cannot reserve memory space");
os_InstallFaultHandler();
@ -359,7 +359,7 @@ void retro_deinit()
os_UninstallFaultHandler();
#if defined(__APPLE__) || (defined(__GNUC__) && defined(__linux__) && !defined(__ANDROID__))
_vmem_release();
addrspace::release();
#else
emu.term();
#endif

View File

@ -1,6 +1,6 @@
#include "gtest/gtest.h"
#include "types.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/arm7/arm7.h"
#include "hw/aica/aica_if.h"
#include "hw/arm7/arm7_rec.h"
@ -21,9 +21,10 @@ extern void (*EntryPoints[])();
class AicaArmTest : public ::testing::Test {
protected:
void SetUp() override {
if (!_vmem_reserve())
die("_vmem_reserve failed");
void SetUp() override
{
if (!addrspace::reserve())
die("addrspace::reserve failed");
emu.init();
dc_reset(true);
Arm7Enabled = true;

View File

@ -8,9 +8,10 @@
class CheatManagerTest : public ::testing::Test {
protected:
void SetUp() override {
if (!_vmem_reserve())
die("_vmem_reserve failed");
void SetUp() override
{
if (!addrspace::reserve())
die("addrspace::reserve failed");
emu.init();
}
};

View File

@ -18,15 +18,16 @@
*/
#include "gtest/gtest.h"
#include "types.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "emulator.h"
#include "hw/sh4/modules/mmu.h"
class MmuTest : public ::testing::Test {
protected:
void SetUp() override {
if (!_vmem_reserve())
die("_vmem_reserve failed");
void SetUp() override
{
if (!addrspace::reserve())
die("addrspace::reserve failed");
emu.init();
dc_reset(true);
config::ForceWindowsCE = true;

View File

@ -22,9 +22,10 @@
class Sh4InterpreterTest : public Sh4OpTest {
protected:
void SetUp() override {
if (!_vmem_reserve())
die("_vmem_reserve failed");
void SetUp() override
{
if (!addrspace::reserve())
die("addrspace::reserve failed");
emu.init();
mem_map_default();
dc_reset(true);
@ -34,11 +35,11 @@ protected:
void PrepareOp(u16 op, u16 op2 = 0, u16 op3 = 0) override
{
ctx->pc = START_PC;
_vmem_WriteMem16(ctx->pc, op);
addrspace::write16(ctx->pc, op);
if (op2 != 0)
_vmem_WriteMem16(ctx->pc + 2, op2);
addrspace::write16(ctx->pc + 2, op2);
if (op3 != 0)
_vmem_WriteMem16(ctx->pc + 4, op3);
addrspace::write16(ctx->pc + 4, op3);
}
void RunOp(int numOp = 1) override
{

View File

@ -1,6 +1,6 @@
#include "gtest/gtest.h"
#include "types.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "emulator.h"
#include "hw/sh4/sh4_core.h"
@ -106,9 +106,10 @@ static void div32u_slow(u32& r1, u32 r2, u32& r3)
class Div32Test : public ::testing::Test {
protected:
void SetUp() override {
if (!_vmem_reserve())
die("_vmem_reserve failed");
void SetUp() override
{
if (!addrspace::reserve())
die("addrspace::reserve failed");
emu.init();
dc_reset(true);
}

View File

@ -1,6 +1,6 @@
#include "gtest/gtest.h"
#include "types.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
#include "hw/maple/maple_cfg.h"
#include "hw/maple/maple_devs.h"
#include "emulator.h"
@ -8,9 +8,10 @@
class SerializeTest : public ::testing::Test {
protected:
void SetUp() override {
if (!_vmem_reserve())
die("_vmem_reserve failed");
void SetUp() override
{
if (!addrspace::reserve())
die("addrspace::reserve failed");
emu.init();
dc_reset(true);
}

View File

@ -22,7 +22,7 @@
#include "gtest/gtest.h"
#include "types.h"
#include "hw/sh4/sh4_if.h"
#include "hw/mem/_vmem.h"
#include "hw/mem/addrspace.h"
constexpr u32 REG_MAGIC = 0xbaadf00d;
@ -159,7 +159,7 @@ protected:
{
ClearRegs();
r(14) = 0x8C001000;
_vmem_WriteMem32(r(14), 0xffccaa88u);
addrspace::write32(r(14), 0xffccaa88u);
PrepareOp(0x6002 | Rm(14) | Rn(11)); // mov.l @Rm,Rn
RunOp();
ASSERT_EQ(r(11), 0xffccaa88u);
@ -181,7 +181,7 @@ protected:
ClearRegs();
r(8) = 0x8C001004;
_vmem_WriteMem32(r(8), 0x4433ff11);
addrspace::write32(r(8), 0x4433ff11);
PrepareOp(0x6006 | Rm(8) | Rn(7)); // mov.l @Rm+,Rn
RunOp();
ASSERT_EQ(r(7), 0x4433ff11u);
@ -190,7 +190,7 @@ protected:
ClearRegs();
r(7) = 0x8C001004;
_vmem_WriteMem32(r(7), 0x4433ff11);
addrspace::write32(r(7), 0x4433ff11);
PrepareOp(0x6006 | Rm(7) | Rn(7)); // mov.l @Rm+,Rn
RunOp();
ASSERT_EQ(r(7), 0x4433ff11u);
@ -213,7 +213,7 @@ protected:
AssertState();
ClearRegs();
_vmem_WriteMem32(0x8C001010, 0x50607080);
addrspace::write32(0x8C001010, 0x50607080);
r(8) = 0x8C001004;
PrepareOp(0x5000 | Rm(8) | Rn(7) | Imm4(3));// mov.l @(disp, Rm), Rn
RunOp();
@ -221,7 +221,7 @@ protected:
AssertState();
ClearRegs();
_vmem_WriteMem32(0x8C001010, 0x50607080);
addrspace::write32(0x8C001010, 0x50607080);
r(8) = 0x8C001004;
PrepareOp(0x8500 | Rm(8) | Imm4(6)); // mov.w @(disp, Rm), R0
RunOp();
@ -229,7 +229,7 @@ protected:
AssertState();
ClearRegs();
_vmem_WriteMem32(0x8C001010, 0x50607080);
addrspace::write32(0x8C001010, 0x50607080);
r(8) = 0x8C001004;
PrepareOp(0x8400 | Rm(8) | Imm4(12)); // mov.b @(disp, Rm), R0
RunOp();
@ -241,7 +241,7 @@ protected:
ClearRegs();
r(11) = 0x8C000800;
r(0) = 0x00000800;
_vmem_WriteMem32(r(11) + r(0), 0x88aaccffu);
addrspace::write32(r(11) + r(0), 0x88aaccffu);
PrepareOp(0x000e | Rm(11) | Rn(12)); // mov.l @(R0, Rm), Rn
RunOp();
ASSERT_EQ(r(12), 0x88aaccffu);
@ -250,7 +250,7 @@ protected:
ClearRegs();
r(11) = 0x8C000800;
r(0) = 0x00000800;
_vmem_WriteMem32(r(11) + r(0), 0x88aaccffu);
addrspace::write32(r(11) + r(0), 0x88aaccffu);
PrepareOp(0x000d | Rm(11) | Rn(12)); // mov.w @(R0, Rm), Rn
RunOp();
ASSERT_EQ(r(12), 0xffffccffu);
@ -259,7 +259,7 @@ protected:
ClearRegs();
r(11) = 0x8C000800;
r(0) = 0x00000800;
_vmem_WriteMem32(r(11) + r(0), 0x88aaccffu);
addrspace::write32(r(11) + r(0), 0x88aaccffu);
PrepareOp(0x000c | Rm(11) | Rn(12)); // mov.b @(R0, Rm), Rn
RunOp();
ASSERT_EQ(r(12), 0xffffffffu);
@ -267,7 +267,7 @@ protected:
ClearRegs();
gbr() = 0x8C000800;
_vmem_WriteMem32(gbr() + 0x10 * 4, 0x11223344u);
addrspace::write32(gbr() + 0x10 * 4, 0x11223344u);
PrepareOp(0xc600 | Imm8(0x10)); // mov.l @(disp, GBR), R0
RunOp();
ASSERT_EQ(r(0), 0x11223344u);
@ -275,7 +275,7 @@ protected:
ClearRegs();
gbr() = 0x8C000800;
_vmem_WriteMem32(gbr() + 0x18 * 2, 0x11223344u);
addrspace::write32(gbr() + 0x18 * 2, 0x11223344u);
PrepareOp(0xc500 | Imm8(0x18)); // mov.w @(disp, GBR), R0
RunOp();
ASSERT_EQ(r(0), 0x3344u);
@ -283,7 +283,7 @@ protected:
ClearRegs();
gbr() = 0x8C000800;
_vmem_WriteMem32(gbr() + 0x17, 0x112233c4u);
addrspace::write32(gbr() + 0x17, 0x112233c4u);
PrepareOp(0xc400 | Imm8(0x17)); // mov.b @(disp, GBR), R0
RunOp();
ASSERT_EQ(r(0), 0xffffffc4u);
@ -291,7 +291,7 @@ protected:
ClearRegs();
u32 disp = 0x11;
_vmem_WriteMem32(START_PC + 4 + disp * 4, 0x01020304u);
addrspace::write32(START_PC + 4 + disp * 4, 0x01020304u);
PrepareOp(0x9, // nop
0xd000 | Rn(6) | Imm8(disp)); // mov.l @(disp, PC), Rn
RunOp(2);
@ -300,7 +300,7 @@ protected:
ClearRegs();
disp = 0x12;
_vmem_WriteMem32(START_PC + 4 + disp * 2, 0x01020304u);
addrspace::write32(START_PC + 4 + disp * 2, 0x01020304u);
PrepareOp(0x9000 | Rn(5) | Imm8(disp)); // mov.w @(disp, PC), Rn
RunOp();
ASSERT_EQ(r(5), 0x0304u);
@ -314,7 +314,7 @@ protected:
r(11) = 0xbeeff00d;
PrepareOp(0x2002 | Rm(11) | Rn(14)); // mov.l Rm, @Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0xbeeff00du);
ASSERT_EQ(addrspace::read32(0x8C001000), 0xbeeff00du);
ASSERT_EQ(r(14), 0x8C001000u);
ASSERT_EQ(r(11), 0xbeeff00du);
AssertState();
@ -322,10 +322,10 @@ protected:
ClearRegs();
r(14) = 0x8C001000;
r(11) = 0xf00dbeef;
_vmem_WriteMem32(0x8C001000, 0xbaadbaad);
addrspace::write32(0x8C001000, 0xbaadbaad);
PrepareOp(0x2001 | Rm(11) | Rn(14)); // mov.w Rm, @Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0xbaadbeefu);
ASSERT_EQ(addrspace::read32(0x8C001000), 0xbaadbeefu);
ASSERT_EQ(r(14), 0x8C001000u);
ASSERT_EQ(r(11), 0xf00dbeefu);
AssertState();
@ -333,10 +333,10 @@ protected:
ClearRegs();
r(14) = 0x8C001000;
r(11) = 0xccccccf0;
_vmem_WriteMem32(0x8C001000, 0xbaadbaad);
addrspace::write32(0x8C001000, 0xbaadbaad);
PrepareOp(0x2000 | Rm(11) | Rn(14)); // mov.b Rm, @Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0xbaadbaf0u);
ASSERT_EQ(addrspace::read32(0x8C001000), 0xbaadbaf0u);
ASSERT_EQ(r(14), 0x8C001000u);
ASSERT_EQ(r(11), 0xccccccf0u);
AssertState();
@ -346,7 +346,7 @@ protected:
r(7) = 0xfeedf00d;
PrepareOp(0x2006 | Rm(7) | Rn(8)); // mov.l Rm, @-Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0xfeedf00du);
ASSERT_EQ(addrspace::read32(0x8C001000), 0xfeedf00du);
ASSERT_EQ(r(7), 0xfeedf00du);
ASSERT_EQ(r(8), 0x8C001000u);
AssertState();
@ -355,7 +355,7 @@ protected:
r(7) = 0x8C001004;
PrepareOp(0x2006 | Rm(7) | Rn(7)); // mov.l Rm, @-Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0x8C001004); // value before decrement is stored
ASSERT_EQ(addrspace::read32(0x8C001000), 0x8C001004); // value before decrement is stored
ASSERT_EQ(r(7), 0x8C001000u);
AssertState();
@ -364,7 +364,7 @@ protected:
r(7) = 0x1234cafe;
PrepareOp(0x2005 | Rm(7) | Rn(8)); // mov.w Rm, @-Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem16(0x8C001000), 0xcafeu);
ASSERT_EQ(addrspace::read16(0x8C001000), 0xcafeu);
ASSERT_EQ(r(7), 0x1234cafeu);
ASSERT_EQ(r(8), 0x8C001000u);
AssertState();
@ -374,7 +374,7 @@ protected:
r(7) = 0x12345642;
PrepareOp(0x2004 | Rm(7) | Rn(8)); // mov.b Rm, @-Rn
RunOp();
ASSERT_EQ(_vmem_ReadMem8(0x8C001000), 0x42u);
ASSERT_EQ(addrspace::read8(0x8C001000), 0x42u);
ASSERT_EQ(r(7), 0x12345642u);
ASSERT_EQ(r(8), 0x8C001000u);
AssertState();
@ -384,7 +384,7 @@ protected:
r(7) = 0x50607080;
PrepareOp(0x1000 | Rm(7) | Rn(8) | Imm4(3));// mov.l Rm, @(disp, Rn)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001010), 0x50607080u);
ASSERT_EQ(addrspace::read32(0x8C001010), 0x50607080u);
ASSERT_EQ(r(7), 0x50607080u);
ASSERT_EQ(r(8), 0x8C001004u);
AssertState();
@ -394,7 +394,7 @@ protected:
r(0) = 0x10203040;
PrepareOp(0x8100 | Rm(8) | Imm4(3)); // mov.w R0, @(disp, Rn)
RunOp();
ASSERT_EQ(_vmem_ReadMem16(0x8C00100A), 0x3040u);
ASSERT_EQ(addrspace::read16(0x8C00100A), 0x3040u);
ASSERT_EQ(r(0), 0x10203040u);
ASSERT_EQ(r(8), 0x8C001004u);
AssertState();
@ -404,7 +404,7 @@ protected:
r(0) = 0x66666672;
PrepareOp(0x8000 | Rm(8) | Imm4(3)); // mov.b R0, @(disp, Rn)
RunOp();
ASSERT_EQ(_vmem_ReadMem8(0x8C001007), 0x72u);
ASSERT_EQ(addrspace::read8(0x8C001007), 0x72u);
ASSERT_EQ(r(0), 0x66666672u);
ASSERT_EQ(r(8), 0x8C001004u);
AssertState();
@ -417,7 +417,7 @@ protected:
r(12) = 0x87654321;
PrepareOp(0x0006 | Rm(12) | Rn(11)); // mov.l Rm, @(R0, Rn)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0x87654321u);
ASSERT_EQ(addrspace::read32(0x8C001000), 0x87654321u);
ASSERT_EQ(r(12), 0x87654321u);
ASSERT_EQ(r(11), 0x8C000800u);
ASSERT_EQ(r(0), 0x00000800u);
@ -429,7 +429,7 @@ protected:
r(12) = 0x12345678;
PrepareOp(0x0005 | Rm(12) | Rn(11)); // mov.w Rm, @(R0, Rn)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0x87655678u); // relies on value set in previous test
ASSERT_EQ(addrspace::read32(0x8C001000), 0x87655678u); // relies on value set in previous test
ASSERT_EQ(r(12), 0x12345678u);
ASSERT_EQ(r(11), 0x8C000800u);
ASSERT_EQ(r(0), 0x00000800u);
@ -441,7 +441,7 @@ protected:
r(12) = 0x99999999;
PrepareOp(0x0004 | Rm(12) | Rn(11)); // mov.b Rm, @(R0, Rn)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C001000), 0x87655699u); // relies on value set in 2 previous tests
ASSERT_EQ(addrspace::read32(0x8C001000), 0x87655699u); // relies on value set in 2 previous tests
ASSERT_EQ(r(12), 0x99999999u);
ASSERT_EQ(r(11), 0x8C000800u);
ASSERT_EQ(r(0), 0x00000800u);
@ -452,7 +452,7 @@ protected:
r(0) = 0xabcdef01;
PrepareOp(0xc200 | Imm8(0x10)); // mov.l R0, @(disp, GBR)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C000840), 0xabcdef01u);
ASSERT_EQ(addrspace::read32(0x8C000840), 0xabcdef01u);
ASSERT_EQ(gbr(), 0x8C000800u);
ASSERT_EQ(r(0), 0xabcdef01u);
AssertState();
@ -462,7 +462,7 @@ protected:
r(0) = 0x11117777;
PrepareOp(0xc100 | Imm8(0x20)); // mov.w R0, @(disp, GBR)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C000840), 0xabcd7777u); // relies on value set in previous test
ASSERT_EQ(addrspace::read32(0x8C000840), 0xabcd7777u); // relies on value set in previous test
AssertState();
ClearRegs();
@ -470,7 +470,7 @@ protected:
r(0) = 0x22222266;
PrepareOp(0xc000 | Imm8(0x40)); // mov.b R0, @(disp, GBR)
RunOp();
ASSERT_EQ(_vmem_ReadMem32(0x8C000840), 0xabcd7766u); // relies on value set in 2 previous tests
ASSERT_EQ(addrspace::read32(0x8C000840), 0xabcd7766u); // relies on value set in 2 previous tests
AssertState();
}
@ -865,17 +865,17 @@ protected:
ClearRegs();
r(7) = 0xAC001000;
_vmem_WriteMem32(r(7), 4);
addrspace::write32(r(7), 4);
r(8) = 0xAC002000;
_vmem_WriteMem32(r(8), 3);
addrspace::write32(r(8), 3);
PrepareOp(0x000f | Rn(7) | Rm(8)); // mac.l @Rm+, @Rn+
RunOp();
ASSERT_EQ(mac(), 12ull);
ASSERT_EQ(r(7), 0xAC001004u);
ASSERT_EQ(r(8), 0xAC002004u);
_vmem_WriteMem32(r(7), -5);
_vmem_WriteMem32(r(8), 7);
addrspace::write32(r(7), -5);
addrspace::write32(r(8), 7);
RunOp();
ASSERT_EQ(mac(), -23ull);
ASSERT_EQ(r(7), 0xAC001008u);
@ -884,17 +884,17 @@ protected:
ClearRegs();
r(7) = 0xAC001000;
_vmem_WriteMem32(r(7), (u16)-7);
addrspace::write32(r(7), (u16)-7);
r(8) = 0xAC002000;
_vmem_WriteMem32(r(8), 3);
addrspace::write32(r(8), 3);
PrepareOp(0x400f | Rn(7) | Rm(8)); // mac.w @Rm+, @Rn+
RunOp();
ASSERT_EQ(mac(), -21ull);
ASSERT_EQ(r(7), 0xAC001002u);
ASSERT_EQ(r(8), 0xAC002002u);
_vmem_WriteMem16(r(7), 5);
_vmem_WriteMem16(r(8), 7);
addrspace::write16(r(7), 5);
addrspace::write16(r(8), 7);
RunOp();
ASSERT_EQ(mac(), 14ull);
ASSERT_EQ(r(7), 0xAC001004u);