diff --git a/common/Darwin/DarwinMisc.cpp b/common/Darwin/DarwinMisc.cpp index 4f9d76ba74..662ef79aa3 100644 --- a/common/Darwin/DarwinMisc.cpp +++ b/common/Darwin/DarwinMisc.cpp @@ -1,4 +1,4 @@ -// SPDX-FileCopyrightText: 2002-2023 PCSX2 Dev Team +// SPDX-FileCopyrightText: 2002-2024 PCSX2 Dev Team // SPDX-License-Identifier: LGPL-3.0+ #if defined(__APPLE__) @@ -9,14 +9,21 @@ #include #include #include +#include #include #include #include +#include +#include #include +#include +#include #include -#include "common/Pcsx2Types.h" +#include "common/Assertions.h" +#include "common/BitUtils.h" #include "common/Console.h" +#include "common/Pcsx2Types.h" #include "common/HostSys.h" #include "common/Threading.h" #include "common/WindowInfo.h" @@ -95,9 +102,9 @@ static std::optional sysctlbyname_u32(const char* name) std::string GetOSVersionString() { - std::string type = sysctl_str(CTL_KERN, KERN_OSTYPE); + std::string type = sysctl_str(CTL_KERN, KERN_OSTYPE); std::string release = sysctl_str(CTL_KERN, KERN_OSRELEASE); - std::string arch = sysctl_str(CTL_HW, HW_MACHINE); + std::string arch = sysctl_str(CTL_HW, HW_MACHINE); return type + " " + release + " " + arch; } @@ -162,7 +169,7 @@ std::vector DarwinMisc::GetCPUClasses() if (!physicalcpu.has_value() || !logicalcpu.has_value()) { Console.Warning("(DarwinMisc) Perf level %u is missing data on %s cpus!", - i, !physicalcpu.has_value() ? "physical" : "logical"); + i, !physicalcpu.has_value() ? "physical" : "logical"); continue; } @@ -181,4 +188,217 @@ std::vector DarwinMisc::GetCPUClasses() return out; } +static __ri vm_prot_t MachProt(const PageProtectionMode& mode) +{ + vm_prot_t machmode = (mode.CanWrite()) ? VM_PROT_WRITE : 0; + machmode |= (mode.CanRead()) ? VM_PROT_READ : 0; + machmode |= (mode.CanExecute()) ? (VM_PROT_EXECUTE | VM_PROT_READ) : 0; + return machmode; +} + +void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode) +{ + pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned"); + if (mode.IsNone()) + return nullptr; + +#ifdef __aarch64__ + // We can't allocate executable memory with mach_vm_allocate() on Apple Silicon. + // Instead, we need to use MAP_JIT with mmap(), which does not support fixed mappings. + if (mode.CanExecute()) + { + if (base) + return nullptr; + + const u32 mmap_prot = mode.CanWrite() ? (PROT_READ | PROT_WRITE | PROT_EXEC) : (PROT_READ | PROT_EXEC); + const u32 flags = MAP_PRIVATE | MAP_ANON | MAP_JIT; + void* const res = mmap(nullptr, size, mmap_prot, flags, -1, 0); + return (res == MAP_FAILED) ? nullptr : res; + } +#endif + + kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast(&base), size, + base ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE); + if (ret != KERN_SUCCESS) + { + DEV_LOG("mach_vm_allocate() returned {}", ret); + return nullptr; + } + + ret = mach_vm_protect(mach_task_self(), reinterpret_cast(base), size, false, MachProt(mode)); + if (ret != KERN_SUCCESS) + { + DEV_LOG("mach_vm_protect() returned {}", ret); + mach_vm_deallocate(mach_task_self(), reinterpret_cast(base), size); + return nullptr; + } + + return base; +} + +void HostSys::Munmap(void* base, size_t size) +{ + if (!base) + return; + + mach_vm_deallocate(mach_task_self(), reinterpret_cast(base), size); +} + +void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode) +{ + pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned"); + + kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast(baseaddr), size, false, + MachProt(mode)); + if (res != KERN_SUCCESS) [[unlikely]] + { + ERROR_LOG("mach_vm_protect() failed: {}", res); + pxFailRel("mach_vm_protect() failed"); + } +} + +std::string HostSys::GetFileMappingName(const char* prefix) +{ + // name actually is not used. + return {}; +} + +void* HostSys::CreateSharedMemory(const char* name, size_t size) +{ + mach_vm_size_t vm_size = size; + mach_port_t port; + const kern_return_t res = mach_make_memory_entry_64( + mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL); + if (res != KERN_SUCCESS) + { + ERROR_LOG("mach_make_memory_entry_64() failed: {}", res); + return nullptr; + } + + return reinterpret_cast(static_cast(port)); +} + +void HostSys::DestroySharedMemory(void* ptr) +{ + mach_port_deallocate(mach_task_self(), static_cast(reinterpret_cast(ptr))); +} + +void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode) +{ + mach_vm_address_t ptr = reinterpret_cast(baseaddr); + const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE, + static_cast(reinterpret_cast(handle)), offset, FALSE, + MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return nullptr; + } + + return reinterpret_cast(ptr); +} + +void HostSys::UnmapSharedMemory(void* baseaddr, size_t size) +{ + const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast(baseaddr), size); + if (res != KERN_SUCCESS) + pxFailRel("Failed to unmap shared memory"); +} + +#ifdef _M_ARM64 + +void HostSys::FlushInstructionCache(void* address, u32 size) +{ + __builtin___clear_cache(reinterpret_cast(address), reinterpret_cast(address) + size); +} + +#endif + +SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages) + : m_base_ptr(base_ptr) + , m_size(size) + , m_num_pages(num_pages) +{ +} + +SharedMemoryMappingArea::~SharedMemoryMappingArea() +{ + pxAssertRel(m_num_mappings == 0, "No mappings left"); + + if (mach_vm_deallocate(mach_task_self(), reinterpret_cast(m_base_ptr), m_size) != KERN_SUCCESS) + pxFailRel("Failed to release shared memory area"); +} + + +std::unique_ptr SharedMemoryMappingArea::Create(size_t size) +{ + pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned"); + + mach_vm_address_t alloc; + const kern_return_t res = + mach_vm_map(mach_task_self(), &alloc, size, 0, VM_FLAGS_ANYWHERE, + MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return {}; + } + + return std::unique_ptr(new SharedMemoryMappingArea(reinterpret_cast(alloc), size, size / __pagesize)); +} + +u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode) +{ + pxAssert(static_cast(map_base) >= m_base_ptr && static_cast(map_base) < (m_base_ptr + m_size)); + + const kern_return_t res = + mach_vm_map(mach_task_self(), reinterpret_cast(&map_base), map_size, 0, VM_FLAGS_OVERWRITE, + static_cast(reinterpret_cast(file_handle)), file_offset, false, + MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) [[unlikely]] + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return nullptr; + } + + m_num_mappings++; + return static_cast(map_base); +} + +bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) +{ + pxAssert(static_cast(map_base) >= m_base_ptr && static_cast(map_base) < (m_base_ptr + m_size)); + + const kern_return_t res = + mach_vm_map(mach_task_self(), reinterpret_cast(&map_base), map_size, 0, VM_FLAGS_OVERWRITE, + MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE); + if (res != KERN_SUCCESS) [[unlikely]] + { + ERROR_LOG("mach_vm_map() failed: {}", res); + return false; + } + + m_num_mappings--; + return true; +} + +#ifdef _M_ARM64 + +static thread_local int s_code_write_depth = 0; + +void HostSys::BeginCodeWrite() +{ + if ((s_code_write_depth++) == 0) + pthread_jit_write_protect_np(0); +} + +void HostSys::EndCodeWrite() +{ + pxAssert(s_code_write_depth > 0); + if ((--s_code_write_depth) == 0) + pthread_jit_write_protect_np(1); +} + +#endif + #endif diff --git a/common/Linux/LnxHostSys.cpp b/common/Linux/LnxHostSys.cpp index 70ce27c391..f0c713b82f 100644 --- a/common/Linux/LnxHostSys.cpp +++ b/common/Linux/LnxHostSys.cpp @@ -29,14 +29,6 @@ #define MAP_ANONYMOUS MAP_ANON #endif -// MacOS does not have MAP_FIXED_NOREPLACE, which means our mappings are -// vulnerable to races with the main/Qt threads. TODO: Investigate using -// mach_vm_allocate()/mach_vm_map() instead of mmap(), but Apple's -// documentation for these routines is non-existant... -#if defined(__APPLE__) && !defined(MAP_FIXED_NOREPLACE) -#define MAP_FIXED_NOREPLACE MAP_FIXED -#endif - // FreeBSD does not have MAP_FIXED_NOREPLACE, but does have MAP_EXCL. // MAP_FIXED combined with MAP_EXCL behaves like MAP_FIXED_NOREPLACE. #if defined(__FreeBSD__) && !defined(MAP_FIXED_NOREPLACE) @@ -230,6 +222,8 @@ bool PageFaultHandler::Install(Error* error) return true; } +#ifndef __APPLE__ + static __ri uint LinuxProt(const PageProtectionMode& mode) { u32 lnxmode = 0; @@ -406,23 +400,6 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) return true; } -#endif - -#if defined(_M_ARM64) && defined(__APPLE__) - -static thread_local int s_code_write_depth = 0; - -void HostSys::BeginCodeWrite() -{ - if ((s_code_write_depth++) == 0) - pthread_jit_write_protect_np(0); -} - -void HostSys::EndCodeWrite() -{ - pxAssert(s_code_write_depth > 0); - if ((--s_code_write_depth) == 0) - pthread_jit_write_protect_np(1); -} +#endif // __APPLE__ #endif