mirror of https://github.com/PCSX2/pcsx2.git
Common: Switch back to Linux allocation routines on macOS
We don't need MAP_FIXED anymore
This commit is contained in:
parent
5a6b3cba6e
commit
d8b8af44a0
|
@ -149,6 +149,7 @@ elseif(APPLE)
|
|||
Darwin/DarwinThreads.cpp
|
||||
Darwin/DarwinMisc.cpp
|
||||
Darwin/DarwinMisc.h
|
||||
Linux/LnxHostSys.cpp
|
||||
)
|
||||
target_compile_options(common PRIVATE -fobjc-arc)
|
||||
target_link_options(common PRIVATE -fobjc-link-runtime)
|
||||
|
|
|
@ -16,16 +16,10 @@
|
|||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
#include <optional>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <time.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/mach_port.h>
|
||||
#include <mach/mach_time.h>
|
||||
#include <mach/mach_vm.h>
|
||||
#include <mach/task.h>
|
||||
#include <mach/vm_map.h>
|
||||
#include <mutex>
|
||||
#include <IOKit/pwr_mgt/IOPMLib.h>
|
||||
|
||||
|
@ -201,200 +195,6 @@ size_t HostSys::GetRuntimeCacheLineSize()
|
|||
return static_cast<size_t>(std::max<s64>(sysctlbyname_T<s64>("hw.cachelinesize").value_or(0), 0));
|
||||
}
|
||||
|
||||
static __ri vm_prot_t MachProt(const PageProtectionMode& mode)
|
||||
{
|
||||
vm_prot_t machmode = (mode.CanWrite()) ? VM_PROT_WRITE : 0;
|
||||
machmode |= (mode.CanRead()) ? VM_PROT_READ : 0;
|
||||
machmode |= (mode.CanExecute()) ? (VM_PROT_EXECUTE | VM_PROT_READ) : 0;
|
||||
return machmode;
|
||||
}
|
||||
|
||||
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
if (mode.IsNone())
|
||||
return nullptr;
|
||||
|
||||
#ifdef __aarch64__
|
||||
// We can't allocate executable memory with mach_vm_allocate() on Apple Silicon.
|
||||
// Instead, we need to use MAP_JIT with mmap(), which does not support fixed mappings.
|
||||
if (mode.CanExecute())
|
||||
{
|
||||
if (base)
|
||||
return nullptr;
|
||||
|
||||
const u32 mmap_prot = mode.CanWrite() ? (PROT_READ | PROT_WRITE | PROT_EXEC) : (PROT_READ | PROT_EXEC);
|
||||
const u32 flags = MAP_PRIVATE | MAP_ANON | MAP_JIT;
|
||||
void* const res = mmap(nullptr, size, mmap_prot, flags, -1, 0);
|
||||
return (res == MAP_FAILED) ? nullptr : res;
|
||||
}
|
||||
#endif
|
||||
|
||||
kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&base), size,
|
||||
base ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE);
|
||||
if (ret != KERN_SUCCESS)
|
||||
{
|
||||
DEV_LOG("mach_vm_allocate() returned {}", ret);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size, false, MachProt(mode));
|
||||
if (ret != KERN_SUCCESS)
|
||||
{
|
||||
DEV_LOG("mach_vm_protect() returned {}", ret);
|
||||
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
void HostSys::Munmap(void* base, size_t size)
|
||||
{
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
|
||||
}
|
||||
|
||||
void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
||||
|
||||
kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false,
|
||||
MachProt(mode));
|
||||
if (res != KERN_SUCCESS) [[unlikely]]
|
||||
{
|
||||
ERROR_LOG("mach_vm_protect() failed: {}", res);
|
||||
pxFailRel("mach_vm_protect() failed");
|
||||
}
|
||||
}
|
||||
|
||||
std::string HostSys::GetFileMappingName(const char* prefix)
|
||||
{
|
||||
// name actually is not used.
|
||||
return {};
|
||||
}
|
||||
|
||||
void* HostSys::CreateSharedMemory(const char* name, size_t size)
|
||||
{
|
||||
mach_vm_size_t vm_size = size;
|
||||
mach_port_t port;
|
||||
const kern_return_t res = mach_make_memory_entry_64(
|
||||
mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL);
|
||||
if (res != KERN_SUCCESS)
|
||||
{
|
||||
ERROR_LOG("mach_make_memory_entry_64() failed: {}", res);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(static_cast<uintptr_t>(port));
|
||||
}
|
||||
|
||||
void HostSys::DestroySharedMemory(void* ptr)
|
||||
{
|
||||
mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr)));
|
||||
}
|
||||
|
||||
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
|
||||
{
|
||||
mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr);
|
||||
const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
|
||||
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE,
|
||||
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
|
||||
if (res != KERN_SUCCESS)
|
||||
{
|
||||
ERROR_LOG("mach_vm_map() failed: {}", res);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(ptr);
|
||||
}
|
||||
|
||||
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
|
||||
{
|
||||
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size);
|
||||
if (res != KERN_SUCCESS)
|
||||
pxFailRel("Failed to unmap shared memory");
|
||||
}
|
||||
|
||||
#ifdef _M_ARM64
|
||||
|
||||
void HostSys::FlushInstructionCache(void* address, u32 size)
|
||||
{
|
||||
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
|
||||
: m_base_ptr(base_ptr)
|
||||
, m_size(size)
|
||||
, m_num_pages(num_pages)
|
||||
{
|
||||
}
|
||||
|
||||
SharedMemoryMappingArea::~SharedMemoryMappingArea()
|
||||
{
|
||||
pxAssertRel(m_num_mappings == 0, "No mappings left");
|
||||
|
||||
if (mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS)
|
||||
pxFailRel("Failed to release shared memory area");
|
||||
}
|
||||
|
||||
|
||||
std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
|
||||
{
|
||||
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");
|
||||
|
||||
mach_vm_address_t alloc;
|
||||
const kern_return_t res =
|
||||
mach_vm_map(mach_task_self(), &alloc, size, 0, VM_FLAGS_ANYWHERE,
|
||||
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
|
||||
if (res != KERN_SUCCESS)
|
||||
{
|
||||
ERROR_LOG("mach_vm_map() failed: {}", res);
|
||||
return {};
|
||||
}
|
||||
|
||||
return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(reinterpret_cast<u8*>(alloc), size, size / __pagesize));
|
||||
}
|
||||
|
||||
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
|
||||
{
|
||||
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
|
||||
|
||||
const kern_return_t res =
|
||||
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
|
||||
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false,
|
||||
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
|
||||
if (res != KERN_SUCCESS) [[unlikely]]
|
||||
{
|
||||
ERROR_LOG("mach_vm_map() failed: {}", res);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
m_num_mappings++;
|
||||
return static_cast<u8*>(map_base);
|
||||
}
|
||||
|
||||
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
|
||||
{
|
||||
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
|
||||
|
||||
const kern_return_t res =
|
||||
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
|
||||
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
|
||||
if (res != KERN_SUCCESS) [[unlikely]]
|
||||
{
|
||||
ERROR_LOG("mach_vm_map() failed: {}", res);
|
||||
return false;
|
||||
}
|
||||
|
||||
m_num_mappings--;
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _M_ARM64
|
||||
|
||||
static thread_local int s_code_write_depth = 0;
|
||||
|
|
|
@ -14,8 +14,10 @@
|
|||
#include <fcntl.h>
|
||||
#include <mutex>
|
||||
#include <sys/mman.h>
|
||||
#include <ucontext.h>
|
||||
#include <unistd.h>
|
||||
#ifndef __APPLE__
|
||||
#include <ucontext.h>
|
||||
#endif
|
||||
|
||||
#include "fmt/core.h"
|
||||
|
||||
|
@ -23,12 +25,6 @@
|
|||
#include "cpuinfo.h"
|
||||
#endif
|
||||
|
||||
// FreeBSD does not have MAP_FIXED_NOREPLACE, but does have MAP_EXCL.
|
||||
// MAP_FIXED combined with MAP_EXCL behaves like MAP_FIXED_NOREPLACE.
|
||||
#if defined(__FreeBSD__) && !defined(MAP_FIXED_NOREPLACE)
|
||||
#define MAP_FIXED_NOREPLACE (MAP_FIXED | MAP_EXCL)
|
||||
#endif
|
||||
|
||||
static __ri uint LinuxProt(const PageProtectionMode& mode)
|
||||
{
|
||||
u32 lnxmode = 0;
|
||||
|
@ -53,8 +49,6 @@ void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
|
|||
const u32 prot = LinuxProt(mode);
|
||||
|
||||
u32 flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
if (base)
|
||||
flags |= MAP_FIXED_NOREPLACE;
|
||||
|
||||
void* res = mmap(base, size, prot, flags, -1, 0);
|
||||
if (res == MAP_FAILED)
|
||||
|
@ -124,8 +118,12 @@ void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size
|
|||
{
|
||||
const uint lnxmode = LinuxProt(mode);
|
||||
|
||||
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED_NOREPLACE) : MAP_SHARED;
|
||||
void* ptr = mmap(baseaddr, size, lnxmode, flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)), static_cast<off_t>(offset));
|
||||
int flags = MAP_SHARED;
|
||||
#ifdef __APPLE__
|
||||
if (mode.CanExecute())
|
||||
flags |= MAP_JIT;
|
||||
#endif
|
||||
void* ptr = mmap(0, size, lnxmode, flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)), static_cast<off_t>(offset));
|
||||
if (ptr == MAP_FAILED)
|
||||
return nullptr;
|
||||
|
||||
|
@ -138,6 +136,8 @@ void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
|
|||
pxFailRel("Failed to unmap shared memory");
|
||||
}
|
||||
|
||||
#ifndef __APPLE__
|
||||
|
||||
size_t HostSys::GetRuntimePageSize()
|
||||
{
|
||||
int res = sysconf(_SC_PAGESIZE);
|
||||
|
@ -183,6 +183,8 @@ size_t HostSys::GetRuntimeCacheLineSize()
|
|||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
|
||||
: m_base_ptr(base_ptr)
|
||||
, m_size(size)
|
||||
|
@ -236,6 +238,8 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifndef __APPLE__ // These are done in DarwinMisc
|
||||
|
||||
namespace PageFaultHandler
|
||||
{
|
||||
static std::recursive_mutex s_exception_handler_mutex;
|
||||
|
@ -370,3 +374,4 @@ bool PageFaultHandler::Install(Error* error)
|
|||
s_installed = true;
|
||||
return true;
|
||||
}
|
||||
#endif // __APPLE__
|
||||
|
|
Loading…
Reference in New Issue