Merge pull request #1840 from stenzek/map-jit
CPU/Recompiler: Use MAP_JIT for code space on Apple Silicon
This commit is contained in:
commit
a6a3590722
|
@ -13,6 +13,11 @@ Log_SetChannel(JitCodeBuffer);
|
|||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
// pthread_jit_write_protect_np()
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
JitCodeBuffer::JitCodeBuffer() = default;
|
||||
|
||||
JitCodeBuffer::JitCodeBuffer(u32 size, u32 far_code_size)
|
||||
|
@ -46,8 +51,13 @@ bool JitCodeBuffer::Allocate(u32 size /* = 64 * 1024 * 1024 */, u32 far_code_siz
|
|||
return false;
|
||||
}
|
||||
#elif defined(__linux__) || defined(__ANDROID__) || defined(__APPLE__) || defined(__HAIKU__) || defined(__FreeBSD__)
|
||||
m_code_ptr = static_cast<u8*>(
|
||||
mmap(nullptr, m_total_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
// MAP_JIT and toggleable write protection is required on Apple Silicon.
|
||||
flags |= MAP_JIT;
|
||||
#endif
|
||||
|
||||
m_code_ptr = static_cast<u8*>(mmap(nullptr, m_total_size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0));
|
||||
if (!m_code_ptr)
|
||||
{
|
||||
Log_ErrorPrintf("mmap(RWX, %u) for internal buffer failed: %d", m_total_size, errno);
|
||||
|
@ -195,6 +205,8 @@ void JitCodeBuffer::CommitFarCode(u32 length)
|
|||
|
||||
void JitCodeBuffer::Reset()
|
||||
{
|
||||
WriteProtect(false);
|
||||
|
||||
m_free_code_ptr = m_code_ptr + m_guard_size;
|
||||
m_code_used = 0;
|
||||
std::memset(m_free_code_ptr, 0, m_code_size);
|
||||
|
@ -207,6 +219,8 @@ void JitCodeBuffer::Reset()
|
|||
std::memset(m_free_far_code_ptr, 0, m_far_code_size);
|
||||
FlushInstructionCache(m_free_far_code_ptr, m_far_code_size);
|
||||
}
|
||||
|
||||
WriteProtect(true);
|
||||
}
|
||||
|
||||
void JitCodeBuffer::Align(u32 alignment, u8 padding_value)
|
||||
|
@ -231,3 +245,26 @@ void JitCodeBuffer::FlushInstructionCache(void* address, u32 size)
|
|||
#error Unknown platform.
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
|
||||
void JitCodeBuffer::WriteProtect(bool enabled)
|
||||
{
|
||||
static bool initialized = false;
|
||||
static bool needs_write_protect = false;
|
||||
|
||||
if (!initialized)
|
||||
{
|
||||
initialized = true;
|
||||
needs_write_protect = (pthread_jit_write_protect_supported_np() != 0);
|
||||
if (needs_write_protect)
|
||||
Log_InfoPrint("pthread_jit_write_protect_np() will be used before writing to JIT space.");
|
||||
}
|
||||
|
||||
if (!needs_write_protect)
|
||||
return;
|
||||
|
||||
pthread_jit_write_protect_np(enabled ? 1 : 0);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -29,6 +29,13 @@ public:
|
|||
/// Flushes the instruction cache on the host for the specified range.
|
||||
static void FlushInstructionCache(void* address, u32 size);
|
||||
|
||||
/// For Apple Silicon - Toggles write protection on the JIT space.
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
static void WriteProtect(bool enabled);
|
||||
#else
|
||||
ALWAYS_INLINE static void WriteProtect(bool enabled) {}
|
||||
#endif
|
||||
|
||||
private:
|
||||
u8* m_code_ptr = nullptr;
|
||||
u8* m_free_code_ptr = nullptr;
|
||||
|
@ -45,4 +52,3 @@ private:
|
|||
u32 m_old_protection = 0;
|
||||
bool m_owns_buffer = false;
|
||||
};
|
||||
|
||||
|
|
|
@ -286,6 +286,8 @@ void Execute()
|
|||
|
||||
void CompileDispatcher()
|
||||
{
|
||||
s_code_buffer.WriteProtect(false);
|
||||
|
||||
{
|
||||
Recompiler::CodeGenerator cg(&s_code_buffer);
|
||||
s_asm_dispatcher = cg.CompileDispatcher();
|
||||
|
@ -294,6 +296,8 @@ void CompileDispatcher()
|
|||
Recompiler::CodeGenerator cg(&s_code_buffer);
|
||||
s_single_block_asm_dispatcher = cg.CompileSingleBlockDispatcher();
|
||||
}
|
||||
|
||||
s_code_buffer.WriteProtect(true);
|
||||
}
|
||||
|
||||
CodeBlock::HostCodePointer* GetFastMapPointer()
|
||||
|
@ -613,8 +617,12 @@ bool CompileBlock(CodeBlock* block)
|
|||
Flush();
|
||||
}
|
||||
|
||||
s_code_buffer.WriteProtect(false);
|
||||
Recompiler::CodeGenerator codegen(&s_code_buffer);
|
||||
if (!codegen.CompileBlock(block, &block->host_code, &block->host_code_size))
|
||||
const bool compile_result = codegen.CompileBlock(block, &block->host_code, &block->host_code_size);
|
||||
s_code_buffer.WriteProtect(true);
|
||||
|
||||
if (!compile_result)
|
||||
{
|
||||
Log_ErrorPrintf("Failed to compile host code for block at 0x%08X", block->key.GetPC());
|
||||
return false;
|
||||
|
@ -839,7 +847,10 @@ Common::PageFaultHandler::HandlerResult MMapPageFaultHandler(void* exception_pc,
|
|||
}
|
||||
|
||||
// found it, do fixup
|
||||
if (Recompiler::CodeGenerator::BackpatchLoadStore(lbi))
|
||||
s_code_buffer.WriteProtect(false);
|
||||
const bool backpatch_result = Recompiler::CodeGenerator::BackpatchLoadStore(lbi);
|
||||
s_code_buffer.WriteProtect(true);
|
||||
if (backpatch_result)
|
||||
{
|
||||
// remove the backpatch entry since we won't be coming back to this one
|
||||
block->loadstore_backpatch_info.erase(bpi_iter);
|
||||
|
@ -880,7 +891,10 @@ Common::PageFaultHandler::HandlerResult LUTPageFaultHandler(void* exception_pc,
|
|||
if (lbi.host_pc == exception_pc)
|
||||
{
|
||||
// found it, do fixup
|
||||
if (Recompiler::CodeGenerator::BackpatchLoadStore(lbi))
|
||||
s_code_buffer.WriteProtect(false);
|
||||
const bool backpatch_result = Recompiler::CodeGenerator::BackpatchLoadStore(lbi);
|
||||
s_code_buffer.WriteProtect(true);
|
||||
if (backpatch_result)
|
||||
{
|
||||
// remove the backpatch entry since we won't be coming back to this one
|
||||
block->loadstore_backpatch_info.erase(bpi_iter);
|
||||
|
|
Loading…
Reference in New Issue