Standard functions for virtual mem alloc/protect/map/...

This commit is contained in:
Flyinghead 2019-05-22 11:41:12 +02:00
parent d55d803c16
commit fb76efb08d
15 changed files with 139 additions and 104 deletions

View File

@ -186,13 +186,7 @@ void dsp_init()
dsp.RBP=0;
dsp.regs.MDEC_CT=1;
//os_MakeExecutable(dsp.DynCode,sizeof(dsp.DynCode));
#if HOST_OS == OS_WINDOWS
DWORD old;
VirtualProtect(dsp.DynCode, sizeof(dsp.DynCode), PAGE_EXECUTE_READWRITE, &old);
#endif
mem_region_set_exec(dsp.DynCode, sizeof(dsp.DynCode));
}
void dsp_recompile();

View File

@ -21,7 +21,6 @@
#if HOST_CPU == CPU_ARM64 && FEAT_DSPREC != DYNAREC_NONE
#include <sys/mman.h>
#include "dsp.h"
#include "hw/aica/aica_if.h"
#include "deps/vixl/aarch64/macro-assembler-aarch64.h"
@ -522,7 +521,7 @@ void dsp_init()
dsp.regs.MDEC_CT = 1;
dsp.dyndirty = true;
if (mprotect(dsp.DynCode, sizeof(dsp.DynCode), PROT_EXEC | PROT_READ | PROT_WRITE))
if (!mem_region_set_exec(dsp.DynCode, sizeof(dsp.DynCode)))
{
perror("Couldnt mprotect DSP code");
die("mprotect failed in arm64 dsp");

View File

@ -422,7 +422,7 @@ void arm_Run(u32 CycleCount) {
}
#else // FEAT_AREC != DYNAREC_NONE
#if HOST_OS == OS_LINUX || HOST_OS == OS_DARWIN
#if HOST_OS == OS_DARWIN
#include <sys/mman.h>
#endif
@ -1557,10 +1557,6 @@ naked void arm_exit()
*
*/
//mprotect and stuff ..
#include <sys/mman.h>
void armEmit32(u32 emit32)
{
if (icPtr >= (ICache+ICacheSize-1024))
@ -2183,25 +2179,12 @@ void armt_init()
ICache = (u8*)mmap(ICache, ICacheSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_ANON, 0, 0);
#endif
#if HOST_OS == OS_WINDOWS
DWORD old;
VirtualProtect(ICache,ICacheSize,PAGE_EXECUTE_READWRITE,&old);
#elif HOST_OS == OS_LINUX || HOST_OS == OS_DARWIN
printf("\n\t ARM7_TCB addr: %p | from: %p | addr here: %p\n", ICache, ARM7_TCB, armt_init);
if (mprotect(ICache, ICacheSize, PROT_EXEC|PROT_READ|PROT_WRITE))
{
perror("\n\tError - Couldnt mprotect ARM7_TCB!");
verify(false);
}
mem_region_set_exec(ICache, ICacheSize);
#if TARGET_IPHONE
memset((u8*)mmap(ICache, ICacheSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_ANON, 0, 0),0xFF,ICacheSize);
#else
memset(ICache,0xFF,ICacheSize);
#endif
#endif
icPtr=ICache;

View File

@ -24,7 +24,6 @@ bool bios_loaded = false;
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <errno.h>
#endif
@ -485,15 +484,9 @@ bool naomi_cart_LoadRom(char* file)
RomCacheMapCount = (u32)files.size();
RomCacheMap = new fd_t[files.size()]();
//Allocate space for the ram, so we are sure we have a segment of continius ram
#if HOST_OS == OS_WINDOWS
RomPtr = (u8*)VirtualAlloc(0, RomSize, MEM_RESERVE, PAGE_NOACCESS);
#else
RomPtr = (u8*)mmap(0, RomSize, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
#endif
verify(RomPtr != 0);
verify(RomPtr != (void*)-1);
//Allocate space for the ram, so we are sure we have a segment of continuous ram
RomPtr = (u8*)mem_region_reserve(NULL, RomSize);
verify(RomPtr != NULL);
bool load_error = false;
@ -550,11 +543,7 @@ bool naomi_cart_LoadRom(char* file)
}
//Release the segment we reserved so we can map the files there
#if HOST_OS == OS_WINDOWS
verify(VirtualFree(RomPtr, 0, MEM_RELEASE));
#else
munmap(RomPtr, RomSize);
#endif
mem_region_release(RomPtr, RomSize);
if (load_error)
{
@ -574,23 +563,13 @@ bool naomi_cart_LoadRom(char* file)
if (RomCacheMap[i] == INVALID_FD)
{
//printf("-Reserving ram at 0x%08X, size 0x%08X\n", fstart[i], fsize[i]);
#if HOST_OS == OS_WINDOWS
bool mapped = RomDest == VirtualAlloc(RomDest, fsize[i], MEM_RESERVE, PAGE_NOACCESS);
#else
bool mapped = RomDest == (u8*)mmap(RomDest, RomSize, PROT_NONE, MAP_PRIVATE, 0, 0);
#endif
bool mapped = RomDest == (u8 *)mem_region_reserve(RomDest, fsize[i]);
verify(mapped);
}
else
{
//printf("-Mapping \"%s\" at 0x%08X, size 0x%08X\n", files[i].c_str(), fstart[i], fsize[i]);
#if HOST_OS == OS_WINDOWS
bool mapped = RomDest == MapViewOfFileEx(RomCacheMap[i], FILE_MAP_READ, 0, 0, fsize[i], RomDest);
#else
bool mapped = RomDest == mmap(RomDest, fsize[i], PROT_READ, MAP_PRIVATE, RomCacheMap[i], 0 );
#endif
bool mapped = RomDest == (u8 *)mem_region_map_file((void *)(uintptr_t)RomCacheMap[i], RomDest, fsize[i], 0, false);
if (!mapped)
{
printf("-Mapping ROM FAILED: %s @ %08x size %x\n", files[i].c_str(), fstart[i], fsize[i]);
@ -653,7 +632,8 @@ Cartridge::Cartridge(u32 size)
Cartridge::~Cartridge()
{
free(RomPtr);
if (RomPtr != NULL)
free(RomPtr);
}
bool Cartridge::Read(u32 offset, u32 size, void* dst)
@ -1036,3 +1016,11 @@ void M2Cartridge::Unserialize(void** data, unsigned int* total_size) {
REICAST_US(naomi_cart_ram);
NaomiCartridge::Unserialize(data, total_size);
}
DecryptedCartridge::~DecryptedCartridge()
{
// TODO this won't work on windows -> need to unmap each file first
mem_region_release(RomPtr, RomSize);
// Avoid crash when freeing vmem
RomPtr = NULL;
}

View File

@ -58,7 +58,7 @@ class DecryptedCartridge : public NaomiCartridge
{
public:
DecryptedCartridge(u8 *rom_ptr, u32 size) : NaomiCartridge(size) { RomPtr = rom_ptr; }
// FIXME Must do a munmap and close for each segment
virtual ~DecryptedCartridge() override;
};
class M2Cartridge : public NaomiCartridge

View File

@ -417,10 +417,6 @@ void recSh4_Reset(bool Manual)
Sh4_int_Reset(Manual);
}
#if HOST_OS == OS_DARWIN
#include <sys/mman.h>
#endif
void recSh4_Init()
{
printf("recSh4 Init\n");

View File

@ -4,13 +4,11 @@
#if HOST_OS==OS_LINUX
#include <poll.h>
#include <termios.h>
//#include <curses.h>
#include <fcntl.h>
#include <semaphore.h>
#include <stdarg.h>
#include <signal.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <sys/time.h>
#include "hw/sh4/dyna/blockmanager.h"
#include "hw/maple/maple_cfg.h"

View File

@ -11,13 +11,11 @@
#include <poll.h>
#include <termios.h>
#endif
//#include <curses.h>
#include <fcntl.h>
#include <semaphore.h>
#include <stdarg.h>
#include <signal.h>
#include <sys/param.h>
#include <sys/mman.h>
#include <sys/time.h>
#if !defined(TARGET_BSD) && !defined(_ANDROID) && !defined(TARGET_IPHONE) && !defined(TARGET_NACL32) && !defined(TARGET_EMSCRIPTEN) && !defined(TARGET_OSX) && !defined(TARGET_OSX_X64)
#include <sys/personality.h>

View File

@ -46,19 +46,64 @@ int ashmem_create_region(const char *name, size_t size) {
}
#endif // #ifdef _ANDROID
void VLockedMemory::LockRegion(unsigned offset, unsigned size_bytes) {
size_t inpage = offset & PAGE_MASK;
if (mprotect(&data[offset - inpage], size_bytes + inpage, PROT_READ)) {
die("mprotect failed ..\n");
}
bool mem_region_lock(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
if (mprotect((u8*)start - inpage, len + inpage, PROT_READ))
die("mprotect failed...");
return true;
}
void VLockedMemory::UnLockRegion(unsigned offset, unsigned size_bytes) {
size_t inpage = offset & PAGE_MASK;
if (mprotect(&data[offset - inpage], size_bytes + inpage, PROT_READ|PROT_WRITE)) {
bool mem_region_unlock(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
if (mprotect((u8*)start - inpage, len + inpage, PROT_READ | PROT_WRITE))
// Add some way to see why it failed? gdb> info proc mappings
die("mprotect failed ..\n");
die("mprotect failed...");
return true;
}
bool mem_region_set_exec(void *start, size_t len)
{
size_t inpage = (uintptr_t)start & PAGE_MASK;
if (mprotect((u8*)start - inpage, len + inpage, PROT_READ | PROT_WRITE | PROT_EXEC))
die("mprotect failed...");
return true;
}
void *mem_region_reserve(void *start, size_t len)
{
void *p = mmap(start, len, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (p == MAP_FAILED)
{
perror("mmap");
return NULL;
}
else
return p;
}
bool mem_region_release(void *start, size_t len)
{
return munmap(start, len) == 0;
}
void *mem_region_map_file(void *file_handle, void *dest, size_t len, size_t offset, bool readwrite)
{
int flags = MAP_SHARED | MAP_NOSYNC | (dest != NULL ? MAP_FIXED : 0);
void *p = mmap(dest, len, PROT_READ | (readwrite ? PROT_WRITE : 0), flags, (int)(uintptr_t)file_handle, offset);
if (p == MAP_FAILED)
{
perror("mmap");
return NULL;
}
else
return p;
}
bool mem_region_unmap_file(void *start, size_t len)
{
return mem_region_release(start, len);
}
// Allocates memory via a fd on shmem/ahmem or even a file on disk
@ -111,7 +156,7 @@ VMemType vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr) {
// Now try to allocate a contiguous piece of memory.
unsigned memsize = 512*1024*1024 + sizeof(Sh4RCB) + ARAM_SIZE_MAX + 0x10000;
void *first_ptr = mmap(0, memsize, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
void *first_ptr = mem_region_reserve(NULL, memsize);
if (!first_ptr) {
close(shmem_fd);
return MemTypeError;
@ -125,14 +170,14 @@ VMemType vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr) {
void *sh4rcb_base_ptr = (void*)(ptrint + FPCB_SIZE);
// Now map the memory for the SH4 context, do not include FPCB on purpose (paged on demand).
mprotect(sh4rcb_base_ptr, sizeof(Sh4RCB) - FPCB_SIZE, PROT_READ | PROT_WRITE);
mem_region_unlock(sh4rcb_base_ptr, sizeof(Sh4RCB) - FPCB_SIZE);
return MemType512MB;
}
// Just tries to wipe as much as possible in the relevant area.
void vmem_platform_destroy() {
munmap(virt_ram_base, 0x20000000);
mem_region_release(virt_ram_base, 0x20000000);
}
// Resets a chunk of memory by deleting its data and setting its protection back.
@ -150,7 +195,7 @@ void vmem_platform_reset_mem(void *ptr, unsigned size_bytes) {
// Allocates a bunch of memory (page aligned and page-sized)
void vmem_platform_ondemand_page(void *address, unsigned size_bytes) {
verify(!mprotect(address, size_bytes, PROT_READ | PROT_WRITE));
verify(mem_region_unlock(address, size_bytes));
}
// Creates mappings to the underlying file including mirroring sections
@ -163,15 +208,13 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
// Calculate the number of mirrors
unsigned address_range_size = vmem_maps[i].end_address - vmem_maps[i].start_address;
unsigned num_mirrors = (address_range_size) / vmem_maps[i].memsize;
int protection = vmem_maps[i].allow_writes ? (PROT_READ | PROT_WRITE) : PROT_READ;
verify((address_range_size % vmem_maps[i].memsize) == 0 && num_mirrors >= 1);
for (unsigned j = 0; j < num_mirrors; j++) {
unsigned offset = vmem_maps[i].start_address + j * vmem_maps[i].memsize;
verify(!munmap(&virt_ram_base[offset], vmem_maps[i].memsize));
verify(MAP_FAILED != mmap(&virt_ram_base[offset], vmem_maps[i].memsize, protection,
MAP_SHARED | MAP_NOSYNC | MAP_FIXED, shmem_fd, vmem_maps[i].memoffset));
// ??? (mprotect(rv,size,prot)!=0)
verify(mem_region_unmap_file(&virt_ram_base[offset], vmem_maps[i].memsize));
verify(mem_region_map_file((void*)(uintptr_t)shmem_fd, &virt_ram_base[offset],
vmem_maps[i].memsize, vmem_maps[i].memoffset, vmem_maps[i].allow_writes) != NULL);
}
}
}
@ -179,7 +222,8 @@ void vmem_platform_create_mappings(const vmem_mapping *vmem_maps, unsigned numma
// Prepares the code region for JIT operations, thus marking it as RWX
bool vmem_platform_prepare_jit_block(void *code_area, unsigned size, void **code_area_rwx) {
// Try to map is as RWX, this fails apparently on OSX (and perhaps other systems?)
if (mprotect(code_area, size, PROT_READ | PROT_WRITE | PROT_EXEC)) {
if (!mem_region_set_exec(code_area, size))
{
// Well it failed, use another approach, unmap the memory area and remap it back.
// Seems it works well on Darwin according to reicast code :P
munmap(code_area, size);

View File

@ -2,7 +2,6 @@
#include "types.h"
void os_SetWindowText(const char* text);
void os_MakeExecutable(void* ptr, u32 sz);
double os_GetSeconds();
void os_DoEvents();

View File

@ -1,5 +1,4 @@
#include <unistd.h>
#include <sys/mman.h>
#include "types.h"
#if FEAT_SHREC == DYNAREC_JIT

View File

@ -22,7 +22,6 @@
#if FEAT_SHREC == DYNAREC_JIT
#include <unistd.h>
#include <sys/mman.h>
#include <map>
#include "deps/vixl/aarch64/macro-assembler-aarch64.h"

View File

@ -279,6 +279,13 @@ string get_game_save_prefix();
string get_game_basename();
string get_game_dir();
bool mem_region_lock(void *start, size_t len);
bool mem_region_unlock(void *start, size_t len);
bool mem_region_set_exec(void *start, size_t len);
void *mem_region_reserve(void *start, size_t len);
bool mem_region_release(void *start, size_t len);
void *mem_region_map_file(void *file_handle, void *dest, size_t len, size_t offset, bool readwrite);
bool mem_region_unmap_file(void *start, size_t len);
// Locked memory class, used for texture invalidation purposes.
class VLockedMemory {
@ -297,8 +304,15 @@ public:
void LockRegion(unsigned offset, unsigned size_bytes) {}
void UnLockRegion(unsigned offset, unsigned size_bytes) {}
#else
void LockRegion(unsigned offset, unsigned size_bytes);
void UnLockRegion(unsigned offset, unsigned size_bytes);
void LockRegion(unsigned offset, unsigned size_bytes)
{
mem_region_lock(&data[offset], size_bytes);
}
void UnLockRegion(unsigned offset, unsigned size_bytes)
{
mem_region_unlock(&data[offset], size_bytes);
}
#endif
void Zero() {
@ -318,7 +332,6 @@ public:
}
};
int msgboxf(const wchar* text,unsigned int type,...);

View File

@ -8,19 +8,50 @@
// Implementation of the vmem related function for Windows platforms.
// For now this probably does some assumptions on the CPU/platform.
// This implements the VLockedMemory interface, as defined in _vmem.h
// The implementation allows it to be empty (that is, to not lock memory).
void VLockedMemory::LockRegion(unsigned offset, unsigned size) {
//verify(offset + size < this->size && size != 0);
bool mem_region_lock(void *start, size_t len)
{
DWORD old;
VirtualProtect(&data[offset], size, PAGE_READONLY, &old);
if (!VirtualProtect(start, len, PAGE_READONLY, &old))
die("VirtualProtect failed ..\n");
return true;
}
void VLockedMemory::UnLockRegion(unsigned offset, unsigned size) {
//verify(offset + size <= this->size && size != 0);
bool mem_region_unlock(void *start, size_t len)
{
DWORD old;
VirtualProtect(&data[offset], size, PAGE_READWRITE, &old);
if (!VirtualProtect(start, len, PAGE_READWRITE, &old))
die("VirtualProtect failed ..\n");
return true;
}
bool mem_region_set_exec(void *start, size_t len)
{
DWORD old;
if (!VirtualProtect(start, len, PAGE_EXECUTE_READWRITE, &old))
die("VirtualProtect failed ..\n");
return true;
}
void *mem_region_reserve(void *start, size_t len)
{
return VirtualAlloc(start, len, MEM_RESERVE, PAGE_NOACCESS);
}
bool mem_region_release(void *start, size_t len)
{
return VirtualFree(start, 0, MEM_RELEASE);
}
void *mem_region_map_file(void *file_handle, void *dest, size_t len, size_t offset, bool readwrite)
{
return MapViewOfFileEx((HANDLE)file_handle, readwrite ? FILE_MAP_WRITE : FILE_MAP_READ, (DWORD)(offset >> 32), (DWORD)offset, len, dest);
}
bool mem_region_unmap_file(void *start, size_t len)
{
return UnmapViewOfFile(start);
}
static HANDLE mem_handle = INVALID_HANDLE_VALUE, mem_handle2 = INVALID_HANDLE_VALUE;
@ -37,7 +68,7 @@ VMemType vmem_platform_init(void **vmem_base_addr, void **sh4rcb_addr) {
// Now allocate the actual address space (it will be 64KB aligned on windows).
unsigned memsize = 512*1024*1024 + sizeof(Sh4RCB) + ARAM_SIZE_MAX;
base_alloc = (char*)VirtualAlloc(0, memsize, MEM_RESERVE, PAGE_NOACCESS);
base_alloc = (char*)mem_region_reserve(NULL, memsize);
// Calculate pointers now
*sh4rcb_addr = &base_alloc[0];

View File

@ -465,12 +465,6 @@ void os_SetWindowText(const char* text)
}
}
void os_MakeExecutable(void* ptr, u32 sz)
{
DWORD old;
VirtualProtect(ptr, sz, PAGE_EXECUTE_READWRITE, &old); // sizeof(sz) really?
}
void ReserveBottomMemory()
{
#if defined(_WIN64) && defined(_DEBUG)