CPU/CodeCache: Fix crash on Apple Silicon

This commit is contained in:
Stenzek 2023-10-24 18:23:55 +10:00
parent 06c4dc5e1b
commit f786138175
No known key found for this signature in database
5 changed files with 72 additions and 46 deletions

View File

@ -18,6 +18,11 @@
#include <unistd.h>
#endif
#if defined(__APPLE__) && defined(__aarch64__)
// pthread_jit_write_protect_np()
#include <pthread.h>
#endif
Log_SetChannel(MemoryArena);
#ifdef _WIN32
@ -398,3 +403,31 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
}
#endif
#if defined(__APPLE__) && defined(__aarch64__)
static thread_local int s_code_write_depth = 0;
void MemMap::BeginCodeWrite()
{
// Log_DebugFmt("BeginCodeWrite(): {}", s_code_write_depth);
if ((s_code_write_depth++) == 0)
{
// Log_DebugPrint(" pthread_jit_write_protect_np(0)");
pthread_jit_write_protect_np(0);
}
}
void MemMap::EndCodeWrite()
{
// Log_DebugFmt("EndCodeWrite(): {}", s_code_write_depth);
DebugAssert(s_code_write_depth > 0);
if ((--s_code_write_depth) == 0)
{
// Log_DebugPrint(" pthread_jit_write_protect_np(1)");
pthread_jit_write_protect_np(1);
}
}
#endif

View File

@ -41,6 +41,17 @@ void DestroySharedMemory(void* ptr);
void* MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode);
void UnmapSharedMemory(void* baseaddr, size_t size);
bool MemProtect(void* baseaddr, size_t size, PageProtect mode);
/// JIT write protect for Apple Silicon. Needs to be called prior to writing to any RWX pages.
#if !defined(__APPLE__) || !defined(__aarch64__)
// clang-format off
ALWAYS_INLINE static void BeginCodeWrite() { }
ALWAYS_INLINE static void EndCodeWrite() { }
// clang-format on
#else
void BeginCodeWrite();
void EndCodeWrite();
#endif
} // namespace MemMap
class SharedMemoryMappingArea

View File

@ -14,6 +14,7 @@
#include "common/assert.h"
#include "common/intrin.h"
#include "common/log.h"
#include "common/memmap.h"
Log_SetChannel(CPU::CodeCache);
@ -628,6 +629,11 @@ void CPU::CodeCache::InvalidateBlocksWithPageIndex(u32 index)
new_block_state = BlockState::NeedsRecompile;
}
if (!ppi.first_block_in_page)
return;
MemMap::BeginCodeWrite();
Block* block = ppi.first_block_in_page;
while (block)
{
@ -637,6 +643,8 @@ void CPU::CodeCache::InvalidateBlocksWithPageIndex(u32 index)
ppi.first_block_in_page = nullptr;
ppi.last_block_in_page = nullptr;
MemMap::EndCodeWrite();
}
CPU::CodeCache::PageProtectionMode CPU::CodeCache::GetProtectionModeForPC(u32 pc)
@ -1279,6 +1287,7 @@ void CPU::CodeCache::CompileOrRevalidateBlock(u32 start_pc)
{
// TODO: this doesn't currently handle when the cache overflows...
DebugAssert(IsUsingAnyRecompiler());
MemMap::BeginCodeWrite();
Block* block = LookupBlock(start_pc);
if (block)
@ -1290,6 +1299,7 @@ void CPU::CodeCache::CompileOrRevalidateBlock(u32 start_pc)
DebugAssert(block->host_code);
SetCodeLUT(start_pc, block->host_code);
BacklinkBlocks(start_pc, block->host_code);
MemMap::EndCodeWrite();
return;
}
@ -1303,6 +1313,7 @@ void CPU::CodeCache::CompileOrRevalidateBlock(u32 start_pc)
Log_ErrorFmt("Failed to read block at 0x{:08X}, falling back to uncached interpreter", start_pc);
SetCodeLUT(start_pc, g_interpret_block);
BacklinkBlocks(start_pc, g_interpret_block);
MemMap::EndCodeWrite();
return;
}
@ -1322,20 +1333,26 @@ void CPU::CodeCache::CompileOrRevalidateBlock(u32 start_pc)
Log_ErrorFmt("Failed to compile block at 0x{:08X}, falling back to uncached interpreter", start_pc);
SetCodeLUT(start_pc, g_interpret_block);
BacklinkBlocks(start_pc, g_interpret_block);
MemMap::EndCodeWrite();
return;
}
SetCodeLUT(start_pc, block->host_code);
BacklinkBlocks(start_pc, block->host_code);
MemMap::EndCodeWrite();
}
void CPU::CodeCache::DiscardAndRecompileBlock(u32 start_pc)
{
MemMap::BeginCodeWrite();
Log_DevPrintf("Discard block %08X with manual protection", start_pc);
Block* block = LookupBlock(start_pc);
DebugAssert(block && block->state == BlockState::Valid);
InvalidateBlock(block, BlockState::NeedsRecompile);
CompileOrRevalidateBlock(start_pc);
MemMap::EndCodeWrite();
}
const void* CPU::CodeCache::CreateBlockLink(Block* block, void* code, u32 newpc)
@ -1430,7 +1447,7 @@ void CPU::CodeCache::ClearASMFunctions()
void CPU::CodeCache::CompileASMFunctions()
{
s_code_buffer.WriteProtect(false);
MemMap::BeginCodeWrite();
const u32 asm_size = EmitASMFunctions(s_code_buffer.GetFreeCodePointer(), s_code_buffer.GetFreeCodeSpace());
@ -1439,13 +1456,11 @@ void CPU::CodeCache::CompileASMFunctions()
#endif
s_code_buffer.CommitCode(asm_size);
s_code_buffer.WriteProtect(true);
MemMap::EndCodeWrite();
}
bool CPU::CodeCache::CompileBlock(Block* block)
{
s_code_buffer.WriteProtect(false);
const void* host_code = nullptr;
u32 host_code_size = 0;
u32 host_far_code_size = 0;
@ -1462,8 +1477,6 @@ bool CPU::CodeCache::CompileBlock(Block* block)
host_code = NewRec::g_compiler->CompileBlock(block, &host_code_size, &host_far_code_size);
#endif
s_code_buffer.WriteProtect(true);
block->host_code = host_code;
if (!host_code)
@ -1628,7 +1641,7 @@ bool CPU::CodeCache::HasPreviouslyFaultedOnPC(u32 guest_pc)
void CPU::CodeCache::BackpatchLoadStore(void* host_pc, const LoadstoreBackpatchInfo& info)
{
s_code_buffer.WriteProtect(false);
MemMap::BeginCodeWrite();
#ifdef ENABLE_RECOMPILER
if (g_settings.cpu_execution_mode == CPUExecutionMode::Recompiler)
@ -1639,7 +1652,7 @@ void CPU::CodeCache::BackpatchLoadStore(void* host_pc, const LoadstoreBackpatchI
NewRec::BackpatchLoadStore(host_pc, info);
#endif
s_code_buffer.WriteProtect(true);
MemMap::EndCodeWrite();
}
#endif // ENABLE_RECOMPILER_SUPPORT

View File

@ -1,11 +1,15 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-FileCopyrightText: 2019-2023 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "jit_code_buffer.h"
#include "common/align.h"
#include "common/assert.h"
#include "common/log.h"
#include "common/memmap.h"
#include <algorithm>
Log_SetChannel(JitCodeBuffer);
#if defined(_WIN32)
@ -15,11 +19,6 @@ Log_SetChannel(JitCodeBuffer);
#include <sys/mman.h>
#endif
#if defined(__APPLE__) && defined(__aarch64__)
// pthread_jit_write_protect_np()
#include <pthread.h>
#endif
JitCodeBuffer::JitCodeBuffer() = default;
JitCodeBuffer::JitCodeBuffer(u32 size, u32 far_code_size)
@ -235,7 +234,7 @@ void JitCodeBuffer::CommitFarCode(u32 length)
void JitCodeBuffer::Reset()
{
WriteProtect(false);
MemMap::BeginCodeWrite();
m_free_code_ptr = m_code_ptr + m_guard_size + m_code_reserve_size;
m_code_used = 0;
@ -250,7 +249,7 @@ void JitCodeBuffer::Reset()
FlushInstructionCache(m_free_far_code_ptr, m_far_code_size);
}
WriteProtect(true);
MemMap::EndCodeWrite();
}
void JitCodeBuffer::Align(u32 alignment, u8 padding_value)
@ -275,26 +274,3 @@ void JitCodeBuffer::FlushInstructionCache(void* address, u32 size)
#error Unknown platform.
#endif
}
#if defined(__APPLE__) && defined(__aarch64__)
void JitCodeBuffer::WriteProtect(bool enabled)
{
static bool initialized = false;
static bool needs_write_protect = false;
if (!initialized)
{
initialized = true;
needs_write_protect = (pthread_jit_write_protect_supported_np() != 0);
if (needs_write_protect)
Log_InfoPrint("pthread_jit_write_protect_np() will be used before writing to JIT space.");
}
if (!needs_write_protect)
return;
pthread_jit_write_protect_np(enabled ? 1 : 0);
}
#endif

View File

@ -47,13 +47,6 @@ public:
/// Flushes the instruction cache on the host for the specified range.
static void FlushInstructionCache(void* address, u32 size);
/// For Apple Silicon - Toggles write protection on the JIT space.
#if defined(__APPLE__) && defined(__aarch64__)
static void WriteProtect(bool enabled);
#else
ALWAYS_INLINE static void WriteProtect(bool enabled) {}
#endif
private:
u8* m_code_ptr = nullptr;
u8* m_free_code_ptr = nullptr;