2023-12-22 11:57:49 +00:00
|
|
|
// SPDX-FileCopyrightText: 2002-2023 PCSX2 Dev Team
|
|
|
|
// SPDX-License-Identifier: LGPL-3.0+
|
2009-11-16 00:40:09 +00:00
|
|
|
|
2023-12-26 10:25:26 +00:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
#define _XOPEN_SOURCE
|
|
|
|
#endif
|
|
|
|
|
2021-09-03 10:43:33 +00:00
|
|
|
#if !defined(_WIN32)
|
2022-05-18 13:27:23 +00:00
|
|
|
#include <cstdio>
|
2009-11-16 00:40:09 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <signal.h>
|
2010-10-27 23:58:10 +00:00
|
|
|
#include <errno.h>
|
2022-10-12 13:57:53 +00:00
|
|
|
#include <fcntl.h>
|
2012-03-17 11:21:51 +00:00
|
|
|
#include <unistd.h>
|
2009-11-16 00:40:09 +00:00
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
#include <mutex>
|
|
|
|
|
2022-05-18 13:27:23 +00:00
|
|
|
#include "fmt/core.h"
|
|
|
|
|
|
|
|
#include "common/Assertions.h"
|
2024-05-06 15:37:56 +00:00
|
|
|
#include "common/BitUtils.h"
|
2022-05-09 10:11:30 +00:00
|
|
|
#include "common/Console.h"
|
2024-05-06 15:37:56 +00:00
|
|
|
#include "common/Error.h"
|
2023-12-26 11:25:10 +00:00
|
|
|
#include "common/HostSys.h"
|
2021-09-01 20:31:46 +00:00
|
|
|
|
2015-11-17 17:38:26 +00:00
|
|
|
// Apple uses the MAP_ANON define instead of MAP_ANONYMOUS, but they mean
|
|
|
|
// the same thing.
|
|
|
|
#if defined(__APPLE__) && !defined(MAP_ANONYMOUS)
|
2016-11-12 15:28:37 +00:00
|
|
|
#define MAP_ANONYMOUS MAP_ANON
|
2015-11-17 17:38:26 +00:00
|
|
|
#endif
|
|
|
|
|
2024-05-22 00:25:52 +00:00
|
|
|
// FreeBSD does not have MAP_FIXED_NOREPLACE, but does have MAP_EXCL.
|
|
|
|
// MAP_FIXED combined with MAP_EXCL behaves like MAP_FIXED_NOREPLACE.
|
|
|
|
#if defined(__FreeBSD__) && !defined(MAP_FIXED_NOREPLACE)
|
|
|
|
#define MAP_FIXED_NOREPLACE MAP_FIXED | MAP_EXCL
|
|
|
|
#endif
|
|
|
|
|
2022-10-29 03:39:19 +00:00
|
|
|
#include <cerrno>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#ifndef __APPLE__
|
|
|
|
#include <ucontext.h>
|
|
|
|
#endif
|
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
static std::recursive_mutex s_exception_handler_mutex;
|
2024-05-06 15:37:56 +00:00
|
|
|
static bool s_in_exception_handler = false;
|
2024-05-06 15:50:24 +00:00
|
|
|
static bool s_exception_handler_installed = false;
|
2010-11-15 14:05:02 +00:00
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
#ifdef __APPLE__
|
2023-12-26 10:25:26 +00:00
|
|
|
#include <mach/task.h>
|
|
|
|
#include <mach/mach_init.h>
|
|
|
|
#include <mach/mach_port.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__APPLE__) || defined(__aarch64__)
|
2022-10-29 03:39:19 +00:00
|
|
|
static struct sigaction s_old_sigbus_action;
|
2023-12-26 10:25:26 +00:00
|
|
|
#endif
|
|
|
|
#if !defined(__APPLE__) || defined(__aarch64__)
|
2022-11-19 13:18:58 +00:00
|
|
|
static struct sigaction s_old_sigsegv_action;
|
2022-10-29 03:39:19 +00:00
|
|
|
#endif
|
|
|
|
|
2024-05-06 15:37:56 +00:00
|
|
|
#ifdef __aarch64__
|
|
|
|
[[maybe_unused]] static bool IsStoreInstruction(uptr ptr)
|
|
|
|
{
|
|
|
|
u32 bits;
|
2024-05-21 13:14:47 +00:00
|
|
|
std::memcpy(&bits, reinterpret_cast<const void*>(ptr), sizeof(bits));
|
2024-05-06 15:37:56 +00:00
|
|
|
|
|
|
|
// Based on vixl's disassembler Instruction::IsStore().
|
|
|
|
// if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed)
|
|
|
|
if ((bits & 0x0a000000) != 0x08000000)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed)
|
|
|
|
if ((bits & 0x3a000000) == 0x28000000)
|
|
|
|
{
|
|
|
|
// return Mask(LoadStorePairLBit) == 0
|
|
|
|
return (bits & (1 << 22)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (bits & 0xC4C00000)
|
|
|
|
{
|
|
|
|
case 0x00000000: // STRB_w
|
|
|
|
case 0x40000000: // STRH_w
|
|
|
|
case 0x80000000: // STR_w
|
|
|
|
case 0xC0000000: // STR_x
|
|
|
|
case 0x04000000: // STR_b
|
|
|
|
case 0x44000000: // STR_h
|
|
|
|
case 0x84000000: // STR_s
|
|
|
|
case 0xC4000000: // STR_d
|
|
|
|
case 0x04800000: // STR_q
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
static void CallExistingSignalHandler(int signal, siginfo_t* siginfo, void* ctx)
|
|
|
|
{
|
2023-12-26 10:25:26 +00:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
const struct sigaction& sa = (signal == SIGBUS) ? s_old_sigbus_action : s_old_sigsegv_action;
|
|
|
|
#elif defined(__APPLE__)
|
2022-12-28 06:53:37 +00:00
|
|
|
const struct sigaction& sa = s_old_sigbus_action;
|
|
|
|
#else
|
|
|
|
const struct sigaction& sa = s_old_sigsegv_action;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (sa.sa_flags & SA_SIGINFO)
|
|
|
|
{
|
|
|
|
sa.sa_sigaction(signal, siginfo, ctx);
|
|
|
|
}
|
|
|
|
else if (sa.sa_handler == SIG_DFL)
|
|
|
|
{
|
|
|
|
// Re-raising the signal would just queue it, and since we'd restore the handler back to us,
|
|
|
|
// we'd end up right back here again. So just abort, because that's probably what it'd do anyway.
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
else if (sa.sa_handler != SIG_IGN)
|
|
|
|
{
|
|
|
|
sa.sa_handler(signal);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-15 14:05:02 +00:00
|
|
|
// Linux implementation of SIGSEGV handler. Bind it using sigaction().
|
2024-05-06 15:37:56 +00:00
|
|
|
static void SysPageFaultSignalFilter(int signal, siginfo_t* info, void* ctx)
|
2010-11-15 14:05:02 +00:00
|
|
|
{
|
2022-12-28 06:53:37 +00:00
|
|
|
// Executing the handler concurrently from multiple threads wouldn't go down well.
|
|
|
|
std::unique_lock lock(s_exception_handler_mutex);
|
2010-11-15 14:05:02 +00:00
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
// Prevent recursive exception filtering.
|
|
|
|
if (s_in_exception_handler)
|
|
|
|
{
|
2023-03-19 15:37:46 +00:00
|
|
|
lock.unlock();
|
2024-05-06 15:37:56 +00:00
|
|
|
CallExistingSignalHandler(signal, info, ctx);
|
2022-12-28 06:53:37 +00:00
|
|
|
return;
|
|
|
|
}
|
2010-11-17 03:18:36 +00:00
|
|
|
|
2024-05-06 15:37:56 +00:00
|
|
|
#if defined(__linux__)
|
|
|
|
const uptr exception_address = reinterpret_cast<uptr>(info->si_addr);
|
|
|
|
#if defined(__x86_64__)
|
|
|
|
const uptr exception_pc = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_RIP]);
|
|
|
|
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_ERR] & 2) != 0;
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
const uptr exception_pc = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext.pc);
|
|
|
|
const bool is_write = IsStoreInstruction(exception_pc);
|
|
|
|
#endif
|
|
|
|
#elif defined(__APPLE__)
|
|
|
|
#if defined(__x86_64__)
|
|
|
|
const uptr exception_pc = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__rip);
|
|
|
|
const uptr exception_address = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__faultvaddr);
|
|
|
|
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__err & 2) != 0;
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
const uptr exception_address = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far);
|
|
|
|
const uptr exception_pc = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc);
|
|
|
|
const bool is_write = IsStoreInstruction(exception_pc);
|
|
|
|
#endif
|
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
#if defined(__x86_64__)
|
|
|
|
const uptr exception_address = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_addr);
|
|
|
|
const uptr exception_pc = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_rip);
|
|
|
|
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_err & 2) != 0;
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
const uptr exception_address = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far);
|
|
|
|
const uptr exception_pc = static_cast<uptr>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc);
|
|
|
|
const bool is_write = IsStoreInstruction(exception_pc);
|
2023-12-26 10:25:26 +00:00
|
|
|
#endif
|
2022-10-29 03:39:19 +00:00
|
|
|
#endif
|
2011-08-12 02:31:49 +00:00
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
s_in_exception_handler = true;
|
2011-08-12 02:31:49 +00:00
|
|
|
|
2024-05-06 15:37:56 +00:00
|
|
|
const bool handled = PageFaultHandler::HandlePageFault(exception_pc, exception_address, is_write);
|
2010-11-15 14:05:02 +00:00
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
s_in_exception_handler = false;
|
|
|
|
|
|
|
|
// Resumes execution right where we left off (re-executes instruction that caused the SIGSEGV).
|
|
|
|
if (handled)
|
2021-09-06 18:28:26 +00:00
|
|
|
return;
|
2010-11-15 14:05:02 +00:00
|
|
|
|
2022-12-28 06:53:37 +00:00
|
|
|
// Call old signal handler, which will likely dump core.
|
2023-03-19 15:37:46 +00:00
|
|
|
lock.unlock();
|
2024-05-06 15:37:56 +00:00
|
|
|
CallExistingSignalHandler(signal, info, ctx);
|
2010-11-15 14:05:02 +00:00
|
|
|
}
|
|
|
|
|
2024-05-06 15:37:56 +00:00
|
|
|
bool PageFaultHandler::Install(Error* error)
|
2010-11-15 14:05:02 +00:00
|
|
|
{
|
2022-12-28 06:53:37 +00:00
|
|
|
std::unique_lock lock(s_exception_handler_mutex);
|
2024-05-06 15:37:56 +00:00
|
|
|
pxAssertRel(!s_exception_handler_installed, "Page fault handler has already been installed.");
|
2022-12-28 06:53:37 +00:00
|
|
|
|
2024-05-06 15:37:56 +00:00
|
|
|
struct sigaction sa;
|
|
|
|
|
|
|
|
sigemptyset(&sa.sa_mask);
|
|
|
|
sa.sa_flags = SA_SIGINFO;
|
|
|
|
sa.sa_sigaction = SysPageFaultSignalFilter;
|
2023-03-19 15:37:46 +00:00
|
|
|
#ifdef __linux__
|
2024-05-06 15:37:56 +00:00
|
|
|
// Don't block the signal from executing recursively, we want to fire the original handler.
|
|
|
|
sa.sa_flags |= SA_NODEFER;
|
2023-03-19 15:37:46 +00:00
|
|
|
#endif
|
2023-12-26 10:25:26 +00:00
|
|
|
#if defined(__APPLE__) || defined(__aarch64__)
|
2024-05-06 15:37:56 +00:00
|
|
|
// MacOS uses SIGBUS for memory permission violations, as well as SIGSEGV on ARM64.
|
|
|
|
if (sigaction(SIGBUS, &sa, &s_old_sigbus_action) != 0)
|
|
|
|
{
|
|
|
|
Error::SetErrno(error, "sigaction() for SIGSEGV failed: ", errno);
|
|
|
|
return false;
|
|
|
|
}
|
2023-12-26 10:25:26 +00:00
|
|
|
#endif
|
|
|
|
#if !defined(__APPLE__) || defined(__aarch64__)
|
2024-05-06 15:37:56 +00:00
|
|
|
if (sigaction(SIGSEGV, &sa, &s_old_sigsegv_action) != 0)
|
|
|
|
{
|
|
|
|
Error::SetErrno(error, "sigaction() for SIGBUS failed: ", errno);
|
|
|
|
return false;
|
|
|
|
}
|
2023-12-26 10:25:26 +00:00
|
|
|
#endif
|
|
|
|
#if defined(__APPLE__) && defined(__aarch64__)
|
2024-05-06 15:37:56 +00:00
|
|
|
// Stops LLDB getting in a EXC_BAD_ACCESS loop when passing page faults to PCSX2.
|
|
|
|
task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, MACH_PORT_NULL, EXCEPTION_DEFAULT, 0);
|
2022-12-28 06:53:37 +00:00
|
|
|
#endif
|
|
|
|
|
2024-05-06 15:37:56 +00:00
|
|
|
s_exception_handler_installed = true;
|
2022-12-28 06:53:37 +00:00
|
|
|
return true;
|
|
|
|
}
|
2010-11-15 14:05:02 +00:00
|
|
|
|
2024-05-29 08:57:00 +00:00
|
|
|
#ifndef __APPLE__
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
static __ri uint LinuxProt(const PageProtectionMode& mode)
|
2010-11-05 01:33:01 +00:00
|
|
|
{
|
2022-10-12 13:57:53 +00:00
|
|
|
u32 lnxmode = 0;
|
2021-09-06 18:28:26 +00:00
|
|
|
|
|
|
|
if (mode.CanWrite())
|
|
|
|
lnxmode |= PROT_WRITE;
|
|
|
|
if (mode.CanRead())
|
|
|
|
lnxmode |= PROT_READ;
|
|
|
|
if (mode.CanExecute())
|
|
|
|
lnxmode |= PROT_EXEC | PROT_READ;
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
return lnxmode;
|
2010-11-05 01:33:01 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
|
2010-10-22 16:23:52 +00:00
|
|
|
{
|
2023-12-22 10:30:31 +00:00
|
|
|
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
2009-11-16 00:40:09 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
if (mode.IsNone())
|
|
|
|
return nullptr;
|
2022-05-19 14:01:56 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
const u32 prot = LinuxProt(mode);
|
2022-05-19 14:01:56 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
u32 flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
|
|
|
if (base)
|
2024-05-21 14:05:24 +00:00
|
|
|
flags |= MAP_FIXED_NOREPLACE;
|
2009-11-16 00:40:09 +00:00
|
|
|
|
2023-12-26 10:25:26 +00:00
|
|
|
#if defined(__APPLE__) && defined(_M_ARM64)
|
|
|
|
if (mode.CanExecute())
|
|
|
|
flags |= MAP_JIT;
|
|
|
|
#endif
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void* res = mmap(base, size, prot, flags, -1, 0);
|
|
|
|
if (res == MAP_FAILED)
|
|
|
|
return nullptr;
|
2010-11-03 14:18:30 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
return res;
|
|
|
|
}
|
2010-11-05 01:33:01 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void HostSys::Munmap(void* base, size_t size)
|
|
|
|
{
|
|
|
|
if (!base)
|
|
|
|
return;
|
2010-11-05 01:33:01 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
munmap((void*)base, size);
|
2010-10-22 16:23:52 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
|
2010-10-22 16:23:52 +00:00
|
|
|
{
|
2023-12-22 10:30:31 +00:00
|
|
|
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
|
2016-11-12 15:28:37 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
const u32 lnxmode = LinuxProt(mode);
|
2016-11-12 15:28:37 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
const int result = mprotect(baseaddr, size, lnxmode);
|
|
|
|
if (result != 0)
|
|
|
|
pxFail("mprotect() failed");
|
2010-10-22 16:23:52 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
std::string HostSys::GetFileMappingName(const char* prefix)
|
2010-11-03 14:18:30 +00:00
|
|
|
{
|
2022-10-12 13:57:53 +00:00
|
|
|
const unsigned pid = static_cast<unsigned>(getpid());
|
2022-10-27 14:51:37 +00:00
|
|
|
#if defined(__FreeBSD__)
|
|
|
|
// FreeBSD's shm_open(3) requires name to be absolute
|
|
|
|
return fmt::format("/tmp/{}_{}", prefix, pid);
|
|
|
|
#else
|
2022-10-12 13:57:53 +00:00
|
|
|
return fmt::format("{}_{}", prefix, pid);
|
2022-10-27 14:51:37 +00:00
|
|
|
#endif
|
2010-11-03 14:18:30 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void* HostSys::CreateSharedMemory(const char* name, size_t size)
|
2010-11-03 14:18:30 +00:00
|
|
|
{
|
2022-10-12 13:57:53 +00:00
|
|
|
const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600);
|
|
|
|
if (fd < 0)
|
|
|
|
{
|
|
|
|
std::fprintf(stderr, "shm_open failed: %d\n", errno);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we're not going to be opening this mapping in other processes, so remove the file
|
|
|
|
shm_unlink(name);
|
|
|
|
|
|
|
|
// ensure it's the correct size
|
|
|
|
if (ftruncate(fd, static_cast<off_t>(size)) < 0)
|
|
|
|
{
|
2023-09-04 12:31:52 +00:00
|
|
|
std::fprintf(stderr, "ftruncate(%zu) failed: %d\n", size, errno);
|
2022-10-12 13:57:53 +00:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return reinterpret_cast<void*>(static_cast<intptr_t>(fd));
|
2010-11-03 14:18:30 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void HostSys::DestroySharedMemory(void* ptr)
|
2010-11-03 14:18:30 +00:00
|
|
|
{
|
2022-10-12 13:57:53 +00:00
|
|
|
close(static_cast<int>(reinterpret_cast<intptr_t>(ptr)));
|
2010-11-03 14:18:30 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
|
2010-10-22 16:23:52 +00:00
|
|
|
{
|
2022-10-12 13:57:53 +00:00
|
|
|
const uint lnxmode = LinuxProt(mode);
|
2010-10-22 16:23:52 +00:00
|
|
|
|
2024-05-21 12:38:33 +00:00
|
|
|
const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED_NOREPLACE) : MAP_SHARED;
|
2022-10-12 13:57:53 +00:00
|
|
|
void* ptr = mmap(baseaddr, size, lnxmode, flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)), static_cast<off_t>(offset));
|
|
|
|
if (ptr == MAP_FAILED)
|
|
|
|
return nullptr;
|
2010-10-22 16:23:52 +00:00
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
return ptr;
|
2010-10-22 16:23:52 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 13:57:53 +00:00
|
|
|
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
|
2010-10-22 16:23:52 +00:00
|
|
|
{
|
2024-05-21 12:38:33 +00:00
|
|
|
if (munmap(baseaddr, size) != 0)
|
2022-10-12 13:57:53 +00:00
|
|
|
pxFailRel("Failed to unmap shared memory");
|
2010-10-22 16:23:52 +00:00
|
|
|
}
|
|
|
|
|
2023-12-26 10:25:26 +00:00
|
|
|
#ifdef _M_ARM64
|
|
|
|
|
|
|
|
void HostSys::FlushInstructionCache(void* address, u32 size)
|
|
|
|
{
|
|
|
|
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2022-10-29 03:39:19 +00:00
|
|
|
SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
|
|
|
|
: m_base_ptr(base_ptr)
|
|
|
|
, m_size(size)
|
|
|
|
, m_num_pages(num_pages)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
SharedMemoryMappingArea::~SharedMemoryMappingArea()
|
|
|
|
{
|
|
|
|
pxAssertRel(m_num_mappings == 0, "No mappings left");
|
|
|
|
|
|
|
|
if (munmap(m_base_ptr, m_size) != 0)
|
|
|
|
pxFailRel("Failed to release shared memory area");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
|
|
|
|
{
|
|
|
|
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");
|
|
|
|
|
|
|
|
void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
|
|
|
if (alloc == MAP_FAILED)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(static_cast<u8*>(alloc), size, size / __pagesize));
|
|
|
|
}
|
|
|
|
|
|
|
|
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
|
|
|
|
{
|
|
|
|
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
|
|
|
|
|
2024-05-21 12:38:33 +00:00
|
|
|
// MAP_FIXED is okay here, since we've reserved the entire region, and *want* to overwrite the mapping.
|
2022-10-29 03:39:19 +00:00
|
|
|
const uint lnxmode = LinuxProt(mode);
|
|
|
|
void* const ptr = mmap(map_base, map_size, lnxmode, MAP_SHARED | MAP_FIXED,
|
|
|
|
static_cast<int>(reinterpret_cast<intptr_t>(file_handle)), static_cast<off_t>(file_offset));
|
|
|
|
if (ptr == MAP_FAILED)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
m_num_mappings++;
|
|
|
|
return static_cast<u8*>(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
|
|
|
|
{
|
|
|
|
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
|
|
|
|
|
|
|
|
if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
m_num_mappings--;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-05-29 08:57:00 +00:00
|
|
|
#endif // __APPLE__
|
2023-12-26 10:25:26 +00:00
|
|
|
|
|
|
|
#endif
|