CPU/CodeCache: Fix NewRec on Apple Silicon
This commit is contained in:
parent
f786138175
commit
117cbeecaa
|
@ -179,13 +179,13 @@ void CPU::CodeCache::ProcessStartup()
|
|||
}
|
||||
#endif
|
||||
|
||||
if (!Common::PageFaultHandler::InstallHandler(&s_block_lut, &ExceptionHandler))
|
||||
if (!Common::PageFaultHandler::InstallHandler(ExceptionHandler))
|
||||
Panic("Failed to install page fault handler");
|
||||
}
|
||||
|
||||
void CPU::CodeCache::ProcessShutdown()
|
||||
{
|
||||
Common::PageFaultHandler::RemoveHandler(&s_block_lut);
|
||||
Common::PageFaultHandler::RemoveHandler(ExceptionHandler);
|
||||
|
||||
#ifdef ENABLE_RECOMPILER_SUPPORT
|
||||
s_code_buffer.Destroy();
|
||||
|
@ -1609,6 +1609,8 @@ Common::PageFaultHandler::HandlerResult CPU::CodeCache::HandleFastmemException(v
|
|||
info.gpr_bitmask, static_cast<unsigned>(info.address_register), static_cast<unsigned>(info.data_register),
|
||||
info.AccessSizeInBytes(), static_cast<unsigned>(info.is_signed));
|
||||
|
||||
MemMap::BeginCodeWrite();
|
||||
|
||||
BackpatchLoadStore(exception_pc, info);
|
||||
|
||||
// queue block for recompilation later
|
||||
|
@ -1628,6 +1630,8 @@ Common::PageFaultHandler::HandlerResult CPU::CodeCache::HandleFastmemException(v
|
|||
}
|
||||
}
|
||||
|
||||
MemMap::EndCodeWrite();
|
||||
|
||||
// and store the pc in the faulting list, so that we don't emit another fastmem loadstore
|
||||
s_fastmem_faulting_pcs.insert(info.guest_pc);
|
||||
s_fastmem_backpatch_info.erase(iter);
|
||||
|
@ -1641,8 +1645,6 @@ bool CPU::CodeCache::HasPreviouslyFaultedOnPC(u32 guest_pc)
|
|||
|
||||
void CPU::CodeCache::BackpatchLoadStore(void* host_pc, const LoadstoreBackpatchInfo& info)
|
||||
{
|
||||
MemMap::BeginCodeWrite();
|
||||
|
||||
#ifdef ENABLE_RECOMPILER
|
||||
if (g_settings.cpu_execution_mode == CPUExecutionMode::Recompiler)
|
||||
Recompiler::CodeGenerator::BackpatchLoadStore(host_pc, info);
|
||||
|
@ -1651,8 +1653,6 @@ void CPU::CodeCache::BackpatchLoadStore(void* host_pc, const LoadstoreBackpatchI
|
|||
if (g_settings.cpu_execution_mode == CPUExecutionMode::NewRec)
|
||||
NewRec::BackpatchLoadStore(host_pc, info);
|
||||
#endif
|
||||
|
||||
MemMap::EndCodeWrite();
|
||||
}
|
||||
|
||||
#endif // ENABLE_RECOMPILER_SUPPORT
|
||||
|
|
|
@ -2,11 +2,15 @@
|
|||
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
|
||||
|
||||
#include "page_fault_handler.h"
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/log.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
Log_SetChannel(Common::PageFaultHandler);
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
@ -22,16 +26,17 @@ Log_SetChannel(Common::PageFaultHandler);
|
|||
#define USE_SIGSEGV 1
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/mach_port.h>
|
||||
#include <mach/task.h>
|
||||
#endif
|
||||
|
||||
namespace Common::PageFaultHandler {
|
||||
|
||||
struct RegisteredHandler
|
||||
{
|
||||
Callback callback;
|
||||
const void* owner;
|
||||
};
|
||||
static std::vector<RegisteredHandler> m_handlers;
|
||||
static std::mutex m_handler_lock;
|
||||
static thread_local bool s_in_handler;
|
||||
static std::recursive_mutex s_exception_handler_mutex;
|
||||
static Handler s_exception_handler_callback;
|
||||
static bool s_in_exception_handler;
|
||||
|
||||
#if defined(CPU_ARCH_ARM64)
|
||||
static bool IsStoreInstruction(const void* ptr)
|
||||
|
@ -83,10 +88,16 @@ static PVOID s_veh_handle;
|
|||
|
||||
static LONG ExceptionHandler(PEXCEPTION_POINTERS exi)
|
||||
{
|
||||
if (exi->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION || s_in_handler)
|
||||
// Executing the handler concurrently from multiple threads wouldn't go down well.
|
||||
std::unique_lock lock(s_exception_handler_mutex);
|
||||
|
||||
// Prevent recursive exception filtering.
|
||||
if (s_in_exception_handler)
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
s_in_handler = true;
|
||||
// Only interested in page faults.
|
||||
if (exi->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
#if defined(_M_AMD64)
|
||||
void* const exception_pc = reinterpret_cast<void*>(exi->ContextRecord->Rip);
|
||||
|
@ -97,34 +108,64 @@ static LONG ExceptionHandler(PEXCEPTION_POINTERS exi)
|
|||
#endif
|
||||
|
||||
void* const exception_address = reinterpret_cast<void*>(exi->ExceptionRecord->ExceptionInformation[1]);
|
||||
bool const is_write = exi->ExceptionRecord->ExceptionInformation[0] == 1;
|
||||
const bool is_write = exi->ExceptionRecord->ExceptionInformation[0] == 1;
|
||||
|
||||
std::lock_guard<std::mutex> guard(m_handler_lock);
|
||||
for (const RegisteredHandler& rh : m_handlers)
|
||||
{
|
||||
if (rh.callback(exception_pc, exception_address, is_write) == HandlerResult::ContinueExecution)
|
||||
{
|
||||
s_in_handler = false;
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
}
|
||||
s_in_exception_handler = true;
|
||||
|
||||
s_in_handler = false;
|
||||
const HandlerResult handled = s_exception_handler_callback(exception_pc, exception_address, is_write);
|
||||
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
s_in_exception_handler = false;
|
||||
|
||||
return (handled == HandlerResult::ContinueExecution) ? EXCEPTION_CONTINUE_EXECUTION : EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
#elif defined(USE_SIGSEGV)
|
||||
|
||||
static struct sigaction s_old_sigsegv_action;
|
||||
#if defined(__APPLE__) || defined(__aarch64__)
|
||||
static struct sigaction s_old_sigbus_action;
|
||||
#endif
|
||||
#if !defined(__APPLE__) || defined(__aarch64__)
|
||||
static struct sigaction s_old_sigsegv_action;
|
||||
#endif
|
||||
|
||||
static void SIGSEGVHandler(int sig, siginfo_t* info, void* ctx)
|
||||
static void CallExistingSignalHandler(int signal, siginfo_t* siginfo, void* ctx)
|
||||
{
|
||||
if ((info->si_code != SEGV_MAPERR && info->si_code != SEGV_ACCERR) || s_in_handler)
|
||||
#if defined(__aarch64__)
|
||||
const struct sigaction& sa = (signal == SIGBUS) ? s_old_sigbus_action : s_old_sigsegv_action;
|
||||
#elif defined(__APPLE__)
|
||||
const struct sigaction& sa = s_old_sigbus_action;
|
||||
#else
|
||||
const struct sigaction& sa = s_old_sigsegv_action;
|
||||
#endif
|
||||
|
||||
if (sa.sa_flags & SA_SIGINFO)
|
||||
{
|
||||
sa.sa_sigaction(signal, siginfo, ctx);
|
||||
}
|
||||
else if (sa.sa_handler == SIG_DFL)
|
||||
{
|
||||
// Re-raising the signal would just queue it, and since we'd restore the handler back to us,
|
||||
// we'd end up right back here again. So just abort, because that's probably what it'd do anyway.
|
||||
abort();
|
||||
}
|
||||
else if (sa.sa_handler != SIG_IGN)
|
||||
{
|
||||
sa.sa_handler(signal);
|
||||
}
|
||||
}
|
||||
|
||||
static void SignalHandler(int sig, siginfo_t* info, void* ctx)
|
||||
{
|
||||
// Executing the handler concurrently from multiple threads wouldn't go down well.
|
||||
std::unique_lock lock(s_exception_handler_mutex);
|
||||
|
||||
// Prevent recursive exception filtering.
|
||||
if (s_in_exception_handler)
|
||||
{
|
||||
lock.unlock();
|
||||
CallExistingSignalHandler(sig, info, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(__linux__) || defined(__ANDROID__)
|
||||
void* const exception_address = reinterpret_cast<void*>(info->si_addr);
|
||||
|
@ -181,49 +222,28 @@ static void SIGSEGVHandler(int sig, siginfo_t* info, void* ctx)
|
|||
|
||||
#endif
|
||||
|
||||
std::lock_guard<std::mutex> guard(m_handler_lock);
|
||||
for (const RegisteredHandler& rh : m_handlers)
|
||||
{
|
||||
if (rh.callback(exception_pc, exception_address, is_write) == HandlerResult::ContinueExecution)
|
||||
{
|
||||
s_in_handler = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
s_in_exception_handler = true;
|
||||
|
||||
// call old signal handler
|
||||
#if !defined(__APPLE__) && !defined(__aarch64__)
|
||||
const struct sigaction& sa = s_old_sigsegv_action;
|
||||
#else
|
||||
const struct sigaction& sa = (sig == SIGBUS) ? s_old_sigbus_action : s_old_sigsegv_action;
|
||||
#endif
|
||||
if (sa.sa_flags & SA_SIGINFO)
|
||||
sa.sa_sigaction(sig, info, ctx);
|
||||
else if (sa.sa_handler == SIG_DFL)
|
||||
signal(sig, SIG_DFL);
|
||||
else if (sa.sa_handler == SIG_IGN)
|
||||
const HandlerResult result = s_exception_handler_callback(exception_pc, exception_address, is_write);
|
||||
|
||||
s_in_exception_handler = false;
|
||||
|
||||
// Resumes execution right where we left off (re-executes instruction that caused the SIGSEGV).
|
||||
if (result == HandlerResult::ContinueExecution)
|
||||
return;
|
||||
else
|
||||
sa.sa_handler(sig);
|
||||
|
||||
// Call old signal handler, which will likely dump core.
|
||||
lock.unlock();
|
||||
CallExistingSignalHandler(sig, info, ctx);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
bool InstallHandler(const void* owner, Callback callback)
|
||||
bool InstallHandler(Handler handler)
|
||||
{
|
||||
bool was_empty;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(m_handler_lock);
|
||||
if (std::find_if(m_handlers.begin(), m_handlers.end(),
|
||||
[owner](const RegisteredHandler& rh) { return rh.owner == owner; }) != m_handlers.end())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
was_empty = m_handlers.empty();
|
||||
}
|
||||
|
||||
if (was_empty)
|
||||
std::unique_lock lock(s_exception_handler_mutex);
|
||||
AssertMsg(!s_exception_handler_callback, "A page fault handler is already registered.");
|
||||
if (!s_exception_handler_callback)
|
||||
{
|
||||
#if defined(_WIN32) && (defined(CPU_ARCH_X64) || defined(CPU_ARCH_ARM64))
|
||||
s_veh_handle = AddVectoredExceptionHandler(1, ExceptionHandler);
|
||||
|
@ -233,69 +253,62 @@ bool InstallHandler(const void* owner, Callback callback)
|
|||
return false;
|
||||
}
|
||||
#elif defined(USE_SIGSEGV)
|
||||
struct sigaction sa = {};
|
||||
sa.sa_sigaction = SIGSEGVHandler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
if (sigaction(SIGSEGV, &sa, &s_old_sigsegv_action) < 0)
|
||||
{
|
||||
Log_ErrorPrintf("sigaction(SIGSEGV) failed: %d", errno);
|
||||
return false;
|
||||
}
|
||||
#if defined(__APPLE__) || defined(__aarch64__)
|
||||
if (sigaction(SIGBUS, &sa, &s_old_sigbus_action) < 0)
|
||||
{
|
||||
Log_ErrorPrintf("sigaction(SIGBUS) failed: %d", errno);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
struct sigaction sa;
|
||||
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
sa.sa_sigaction = SignalHandler;
|
||||
#ifdef __linux__
|
||||
// Don't block the signal from executing recursively, we want to fire the original handler.
|
||||
sa.sa_flags |= SA_NODEFER;
|
||||
#endif
|
||||
#if defined(__APPLE__) || defined(__aarch64__)
|
||||
// MacOS uses SIGBUS for memory permission violations
|
||||
if (sigaction(SIGBUS, &sa, &s_old_sigbus_action) != 0)
|
||||
return false;
|
||||
#endif
|
||||
#if !defined(__APPLE__) || defined(__aarch64__)
|
||||
if (sigaction(SIGSEGV, &sa, &s_old_sigsegv_action) != 0)
|
||||
return false;
|
||||
#endif
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, MACH_PORT_NULL, EXCEPTION_DEFAULT, 0);
|
||||
#endif
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
m_handlers.push_back(RegisteredHandler{callback, owner});
|
||||
s_exception_handler_callback = handler;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RemoveHandler(const void* owner)
|
||||
bool RemoveHandler(Handler handler)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(m_handler_lock);
|
||||
auto it = std::find_if(m_handlers.begin(), m_handlers.end(),
|
||||
[owner](const RegisteredHandler& rh) { return rh.owner == owner; });
|
||||
if (it == m_handlers.end())
|
||||
std::unique_lock lock(s_exception_handler_mutex);
|
||||
AssertMsg(!s_exception_handler_callback || s_exception_handler_callback == handler,
|
||||
"Not removing the same handler previously registered.");
|
||||
if (!s_exception_handler_callback)
|
||||
return false;
|
||||
|
||||
m_handlers.erase(it);
|
||||
s_exception_handler_callback = nullptr;
|
||||
|
||||
if (m_handlers.empty())
|
||||
{
|
||||
#if defined(_WIN32) && (defined(CPU_ARCH_X64) || defined(CPU_ARCH_ARM64))
|
||||
RemoveVectoredExceptionHandler(s_veh_handle);
|
||||
s_veh_handle = nullptr;
|
||||
RemoveVectoredExceptionHandler(s_veh_handle);
|
||||
s_veh_handle = nullptr;
|
||||
#elif defined(USE_SIGSEGV)
|
||||
// restore old signal handler
|
||||
struct sigaction sa;
|
||||
#if defined(__APPLE__) || defined(__aarch64__)
|
||||
if (sigaction(SIGBUS, &s_old_sigbus_action, nullptr) < 0)
|
||||
{
|
||||
Log_ErrorPrintf("sigaction(SIGBUS) failed: %d", errno);
|
||||
return false;
|
||||
}
|
||||
s_old_sigbus_action = {};
|
||||
sigaction(SIGBUS, &s_old_sigbus_action, &sa);
|
||||
s_old_sigbus_action = {};
|
||||
#endif
|
||||
#if !defined(__APPLE__) || defined(__aarch64__)
|
||||
sigaction(SIGSEGV, &s_old_sigsegv_action, &sa);
|
||||
s_old_sigsegv_action = {};
|
||||
#endif
|
||||
|
||||
if (sigaction(SIGSEGV, &s_old_sigsegv_action, nullptr) < 0)
|
||||
{
|
||||
Log_ErrorPrintf("sigaction(SIGSEGV) failed: %d", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
s_old_sigsegv_action = {};
|
||||
#else
|
||||
return false;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -11,10 +11,9 @@ enum class HandlerResult
|
|||
ExecuteNextHandler,
|
||||
};
|
||||
|
||||
using Callback = HandlerResult (*)(void* exception_pc, void* fault_address, bool is_write);
|
||||
using Handle = void*;
|
||||
using Handler = HandlerResult (*)(void* exception_pc, void* fault_address, bool is_write);
|
||||
|
||||
bool InstallHandler(const void* owner, Callback callback);
|
||||
bool RemoveHandler(const void* owner);
|
||||
bool InstallHandler(Handler callback);
|
||||
bool RemoveHandler(Handler callback);
|
||||
|
||||
} // namespace Common::PageFaultHandler
|
||||
|
|
Loading…
Reference in New Issue