rpcs3/Utilities/JIT.h

558 lines
13 KiB
C
Raw Normal View History

#pragma once
#include "util/types.hpp"
2019-11-29 23:11:28 +00:00
// Include asmjit with warnings ignored
2018-06-12 18:03:53 +00:00
#define ASMJIT_EMBED
2021-12-28 19:25:36 +00:00
#define ASMJIT_STATIC
#define ASMJIT_BUILD_DEBUG
#undef Bool
2019-11-29 23:11:28 +00:00
#ifdef _MSC_VER
#pragma warning(push, 0)
#include <asmjit/asmjit.h>
#pragma warning(pop)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
2021-03-05 19:05:37 +00:00
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
2021-03-23 19:32:50 +00:00
#pragma GCC diagnostic ignored "-Wredundant-decls"
2021-04-03 16:38:02 +00:00
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
2021-03-30 15:31:46 +00:00
#pragma GCC diagnostic ignored "-Weffc++"
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wdeprecated-anon-enum-enum-conversion"
#pragma GCC diagnostic ignored "-Wcast-qual"
#else
2021-03-13 15:03:08 +00:00
#pragma GCC diagnostic ignored "-Wduplicated-branches"
#pragma GCC diagnostic ignored "-Wdeprecated-enum-enum-conversion"
2021-03-13 15:03:08 +00:00
#endif
2018-06-12 18:03:53 +00:00
#include <asmjit/asmjit.h>
#if defined(ARCH_ARM64)
#include <asmjit/a64.h>
#endif
2019-11-29 23:11:28 +00:00
#pragma GCC diagnostic pop
#endif
#include <array>
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <unordered_map>
2022-08-29 00:55:59 +00:00
#include <util/v128.hpp>
#if defined(ARCH_X64)
using native_asm = asmjit::x86::Assembler;
using native_args = std::array<asmjit::x86::Gp, 4>;
#elif defined(ARCH_ARM64)
using native_asm = asmjit::a64::Assembler;
using native_args = std::array<asmjit::a64::Gp, 4>;
#endif
void jit_announce(uptr func, usz size, std::string_view name);
void jit_announce(auto* func, usz size, std::string_view name)
{
jit_announce(uptr(func), size, name);
}
enum class jit_class
{
ppu_code,
ppu_data,
spu_code,
spu_data,
};
2021-12-28 19:25:36 +00:00
struct jit_runtime_base
{
jit_runtime_base() noexcept = default;
virtual ~jit_runtime_base() = default;
jit_runtime_base(const jit_runtime_base&) = delete;
jit_runtime_base& operator=(const jit_runtime_base&) = delete;
const asmjit::Environment& environment() const noexcept;
void* _add(asmjit::CodeHolder* code) noexcept;
virtual uchar* _alloc(usz size, usz align) noexcept = 0;
};
// ASMJIT runtime for emitting code in a single 2G region
2021-12-28 19:25:36 +00:00
struct jit_runtime final : jit_runtime_base
{
jit_runtime();
~jit_runtime() override;
// Allocate executable memory
2021-12-28 19:25:36 +00:00
uchar* _alloc(usz size, usz align) noexcept override;
// Allocate memory
2020-12-18 07:39:54 +00:00
static u8* alloc(usz size, uint align, bool exec = true) noexcept;
// Should be called at least once after global initialization
static void initialize();
// Deallocate all memory
static void finalize() noexcept;
};
namespace asmjit
{
// Should only be used to build global functions
2021-12-28 19:25:36 +00:00
jit_runtime_base& get_global_runtime();
// Don't use directly
2021-12-28 19:25:36 +00:00
class inline_runtime : public jit_runtime_base
{
uchar* m_data;
usz m_size;
public:
2021-12-28 19:25:36 +00:00
inline_runtime(uchar* data, usz size);
~inline_runtime();
2021-12-28 19:25:36 +00:00
uchar* _alloc(usz size, usz align) noexcept override;
};
// Emit xbegin and adjacent loop, return label at xbegin (don't use xabort please)
template <typename F>
2021-12-28 19:25:36 +00:00
[[nodiscard]] inline asmjit::Label build_transaction_enter(asmjit::x86::Assembler& c, asmjit::Label fallback, F func)
{
Label fall = c.newLabel();
Label begin = c.newLabel();
c.jmp(begin);
c.bind(fall);
// Don't repeat on zero status (may indicate syscall or interrupt)
c.test(x86::eax, x86::eax);
c.jz(fallback);
// First invoked after failure (can fallback to proceed, or jump anywhere else)
func();
// Other bad statuses are ignored regardless of repeat flag (TODO)
2021-12-28 19:25:36 +00:00
c.align(AlignMode::kCode, 16);
c.bind(begin);
return fall;
// xbegin should be issued manually, allows to add more check before entering transaction
}
// Helper to spill RDX (EDX) register for RDTSC
2021-12-28 19:25:36 +00:00
inline void build_swap_rdx_with(asmjit::x86::Assembler& c, std::array<x86::Gp, 4>& args, const asmjit::x86::Gp& with)
{
#ifdef _WIN32
c.xchg(args[1], with);
args[1] = with;
#else
c.xchg(args[2], with);
args[2] = with;
#endif
}
// Get full RDTSC value into chosen register (clobbers rax/rdx or saves only rax with other target)
2021-12-28 19:25:36 +00:00
inline void build_get_tsc(asmjit::x86::Assembler& c, const asmjit::x86::Gp& to = asmjit::x86::rax)
{
if (&to != &x86::rax && &to != &x86::rdx)
{
// Swap to save its contents
c.xchg(x86::rax, to);
}
c.rdtsc();
c.shl(x86::rdx, 32);
if (&to == &x86::rax)
{
c.or_(x86::rax, x86::rdx);
}
else if (&to == &x86::rdx)
{
c.or_(x86::rdx, x86::rax);
}
else
{
// Swap back, maybe there is more effective way to do it
c.xchg(x86::rax, to);
c.mov(to.r32(), to.r32());
c.or_(to.r64(), x86::rdx);
}
}
2021-12-28 19:25:36 +00:00
inline void build_init_args_from_ghc(native_asm& c, native_args& args)
{
#if defined(ARCH_X64)
// TODO: handle case when args don't overlap with r13/rbp/r12/rbx
c.mov(args[0], x86::r13);
c.mov(args[1], x86::rbp);
c.mov(args[2], x86::r12);
c.mov(args[3], x86::rbx);
#else
static_cast<void>(c);
static_cast<void>(args);
#endif
}
inline void build_init_ghc_args(native_asm& c, native_args& args)
{
#if defined(ARCH_X64)
// TODO: handle case when args don't overlap with r13/rbp/r12/rbx
c.mov(x86::r13, args[0]);
c.mov(x86::rbp, args[1]);
c.mov(x86::r12, args[2]);
c.mov(x86::rbx, args[3]);
#else
static_cast<void>(c);
static_cast<void>(args);
#endif
}
#if defined(ARCH_X64)
struct simd_builder : native_asm
{
2022-08-29 00:55:59 +00:00
std::unordered_map<v128, Label> consts;
Operand v0, v1, v2, v3, v4, v5;
uint vsize = 16;
uint vmask = 0;
simd_builder(CodeHolder* ch) noexcept;
2022-08-29 00:55:59 +00:00
~simd_builder();
void operator()() noexcept;
void _init(uint new_vsize = 0);
void vec_cleanup_ret();
void vec_set_all_zeros(const Operand& v);
void vec_set_all_ones(const Operand& v);
void vec_set_const(const Operand& v, const v128& value);
void vec_clobbering_test(u32 esize, const Operand& v, const Operand& rhs);
void vec_broadcast_gpr(u32 esize, const Operand& v, const x86::Gp& r);
// return x86::ptr(base, ctr, X, 0) where X is set for esize accordingly
x86::Mem ptr_scale_for_vec(u32 esize, const x86::Gp& base, const x86::Gp& index);
void vec_load_unaligned(u32 esize, const Operand& v, const x86::Mem& src);
void vec_store_unaligned(u32 esize, const Operand& v, const x86::Mem& dst);
void vec_partial_move(u32 esize, const Operand& dst, const Operand& src);
void _vec_binary_op(x86::Inst::Id sse_op, x86::Inst::Id vex_op, x86::Inst::Id evex_op, const Operand& dst, const Operand& lhs, const Operand& rhs);
void vec_shuffle_xi8(const Operand& dst, const Operand& lhs, const Operand& rhs)
{
using enum x86::Inst::Id;
_vec_binary_op(kIdPshufb, kIdVpshufb, kIdVpshufb, dst, lhs, rhs);
}
void vec_xor(u32, const Operand& dst, const Operand& lhs, const Operand& rhs)
{
using enum x86::Inst::Id;
_vec_binary_op(kIdPxor, kIdVpxor, kIdVpxord, dst, lhs, rhs);
}
void vec_or(u32, const Operand& dst, const Operand& lhs, const Operand& rhs)
{
using enum x86::Inst::Id;
_vec_binary_op(kIdPor, kIdVpor, kIdVpord, dst, lhs, rhs);
}
void vec_andn(u32, const Operand& dst, const Operand& lhs, const Operand& rhs)
{
using enum x86::Inst::Id;
_vec_binary_op(kIdPandn, kIdVpandn, kIdVpandnd, dst, lhs, rhs);
}
void vec_umin(u32 esize, const Operand& dst, const Operand& lhs, const Operand& rhs);
void vec_umax(u32 esize, const Operand& dst, const Operand& lhs, const Operand& rhs);
void vec_cmp_eq(u32 esize, const Operand& dst, const Operand& lhs, const Operand& rhs);
void vec_extract_high(u32 esize, const Operand& dst, const Operand& src);
void vec_extract_gpr(u32 esize, const x86::Gp& dst, const Operand& src);
simd_builder& keep_if_not_masked()
{
if (vmask && vmask < 8)
{
this->k(x86::KReg(vmask));
}
return *this;
}
simd_builder& zero_if_not_masked()
{
if (vmask && vmask < 8)
{
this->k(x86::KReg(vmask));
this->z();
}
return *this;
}
void build_loop(u32 esize, const x86::Gp& reg_ctr, const x86::Gp& reg_cnt, auto&& build, auto&& reduce)
{
ensure((esize & (esize - 1)) == 0);
ensure(esize <= vsize);
Label body = this->newLabel();
Label next = this->newLabel();
Label exit = this->newLabel();
const u32 step = vsize / esize;
this->xor_(reg_ctr.r32(), reg_ctr.r32()); // Reset counter reg
this->cmp(reg_cnt, step);
this->jb(next); // If count < step, skip main loop body
this->align(AlignMode::kCode, 16);
this->bind(body);
this->sub(reg_cnt, step);
build();
this->add(reg_ctr, step);
this->cmp(reg_cnt, step);
this->jae(body);
this->bind(next);
if (vmask)
{
// Build single last iteration (masked)
this->test(reg_cnt, reg_cnt);
this->jz(exit);
if (esize == 1 && vsize == 64)
{
this->bzhi(reg_cnt.r64(), x86::Mem(consts[~u128()], 0), reg_cnt.r64());
this->kmovq(x86::k7, reg_cnt.r64());
}
else
{
this->bzhi(reg_cnt.r32(), x86::Mem(consts[~u128()], 0), reg_cnt.r32());
this->kmovd(x86::k7, reg_cnt.r32());
}
vmask = 7;
build();
// Rollout reduction step
this->bind(exit);
while (true)
{
vsize /= 2;
if (vsize < esize)
break;
this->_init(vsize);
reduce();
}
}
else
{
// Build unrolled loop tail (reduced vector width)
while (true)
{
vsize /= 2;
if (vsize < esize)
break;
// Shall not clobber flags
this->_init(vsize);
reduce();
if (vsize == esize)
{
// Last "iteration"
this->test(reg_cnt, reg_cnt);
this->jz(exit);
build();
}
else
{
const u32 step = vsize / esize;
Label next = this->newLabel();
this->cmp(reg_cnt, step);
this->jb(next);
build();
this->add(reg_ctr, step);
this->sub(reg_cnt, step);
this->bind(next);
}
}
this->bind(exit);
}
this->_init(0);
}
};
// for (; count > 0; ctr++, count--)
inline void build_loop(native_asm& c, auto ctr, auto count, auto&& build)
{
asmjit::Label body = c.newLabel();
asmjit::Label exit = c.newLabel();
c.test(count, count);
c.jz(exit);
c.align(asmjit::AlignMode::kCode, 16);
c.bind(body);
build();
c.inc(ctr);
c.sub(count, 1);
c.ja(body);
c.bind(exit);
}
inline void maybe_flush_lbr(native_asm& c, uint count = 2)
{
// Workaround for bad LBR callstacks which happen in some situations (mainly TSX) - execute additional RETs
Label next = c.newLabel();
c.lea(x86::rcx, x86::qword_ptr(next));
for (u32 i = 0; i < count; i++)
{
c.push(x86::rcx);
c.sub(x86::rcx, 16);
}
for (u32 i = 0; i < count; i++)
{
c.ret();
c.align(asmjit::AlignMode::kCode, 16);
}
c.bind(next);
}
#endif
}
// Build runtime function with asmjit::X86Assembler
template <typename FT, typename Asm = native_asm, typename F>
2022-08-17 13:53:05 +00:00
inline FT build_function_asm(std::string_view name, F&& builder, ::jit_runtime* custom_runtime = nullptr)
{
PPU LLVM arm64+macOS port (#12115) * BufferUtils: use naive function pointer on Apple arm64 Use naive function pointer on Apple arm64 because ASLR breaks asmjit. See BufferUtils.cpp comment for explanation on why this happens and how to fix if you want to use asmjit. * build-macos: fix source maps for Mac Tell Qt not to strip debug symbols when we're in debug or relwithdebinfo modes. * LLVM PPU: fix aarch64 on macOS Force MachO on macOS to fix LLVM being unable to patch relocations during codegen. Adds Aarch64 NEON intrinsics for x86 intrinsics used by PPUTranslator/Recompiler. * virtual memory: use 16k pages on aarch64 macOS Temporary hack to get things working by using 16k pages instead of 4k pages in VM emulation. * PPU/SPU: fix NEON intrinsics and compilation for arm64 macOS Fixes some intrinsics usage and patches usages of asmjit to properly emit absolute jmps so ASLR doesn't cause out of bounds rel jumps. Also patches the SPU recompiler to properly work on arm64 by telling LLVM to target arm64. * virtual memory: fix W^X toggles on macOS aarch64 Fixes W^X on macOS aarch64 by setting all JIT mmap'd regions to default to RW mode. For both SPU and PPU execution threads, when initialization finishes we toggle to RX mode. This exploits Apple's per-thread setting for RW/RX to let us be technically compliant with the OS's W^X enforcement while not needing to actually separate the memory allocated for code/data. * PPU: implement aarch64 specific functions Implements ppu_gateway for arm64 and patches LLVM initialization to use the correct triple. Adds some fixes for macOS W^X JIT restrictions when entering/exiting JITed code. * PPU: Mark rpcs3 calls as non-tail Strictly speaking, rpcs3 JIT -> C++ calls are not tail calls. If you call a function inside e.g. an L2 syscall, it will clobber LR on arm64 and subtly break returns in emulated code. Only JIT -> JIT "calls" should be tail. * macOS/arm64: compatibility fixes * vm: patch virtual memory for arm64 macOS Tag mmap calls with MAP_JIT to allow W^X on macOS. Fix mmap calls to existing mmap'd addresses that were tagged with MAP_JIT on macOS. Fix memory unmapping on 16K page machines with a hack to mark "unmapped" pages as RW. * PPU: remove wrong comment * PPU: fix a merge regression * vm: remove 16k page hacks * PPU: formatting fixes * PPU: fix arm64 null function assembly * ppu: clean up arch-specific instructions
2022-06-14 12:28:38 +00:00
#ifdef __APPLE__
pthread_jit_write_protect_np(false);
#endif
using namespace asmjit;
2022-08-17 13:53:05 +00:00
auto& rt = custom_runtime ? *custom_runtime : get_global_runtime();
CodeHolder code;
2021-12-28 19:25:36 +00:00
code.init(rt.environment());
#if defined(ARCH_X64)
native_args args;
#ifdef _WIN32
args[0] = x86::rcx;
args[1] = x86::rdx;
args[2] = x86::r8;
args[3] = x86::r9;
#else
args[0] = x86::rdi;
args[1] = x86::rsi;
args[2] = x86::rdx;
args[3] = x86::rcx;
#endif
#elif defined(ARCH_ARM64)
native_args args;
args[0] = a64::x0;
args[1] = a64::x1;
args[2] = a64::x2;
args[3] = a64::x3;
#endif
Asm compiler(&code);
2021-12-28 19:25:36 +00:00
compiler.addEncodingOptions(EncodingOptions::kOptimizedAlign);
if constexpr (std::is_invocable_r_v<bool, F, Asm&, native_args&>)
{
if (!builder(compiler, args))
return nullptr;
}
else
{
builder(compiler, args);
}
2022-08-29 00:55:59 +00:00
if constexpr (std::is_invocable_r_v<void, Asm>)
{
// Finalization
compiler();
}
2021-12-28 19:25:36 +00:00
const auto result = rt._add(&code);
jit_announce(result, code.codeSize(), name);
return reinterpret_cast<FT>(uptr(result));
}
#ifdef LLVM_AVAILABLE
namespace llvm
{
class LLVMContext;
class ExecutionEngine;
class Module;
}
// Temporary compiler interface
class jit_compiler final
{
2017-06-24 15:36:49 +00:00
// Local LLVM context
std::unique_ptr<llvm::LLVMContext> m_context{};
2017-06-24 15:36:49 +00:00
// Execution instance
2021-04-03 16:38:02 +00:00
std::unique_ptr<llvm::ExecutionEngine> m_engine{};
2017-02-26 15:56:31 +00:00
// Arch
2021-04-03 16:38:02 +00:00
std::string m_cpu{};
2017-02-26 15:56:31 +00:00
public:
jit_compiler(const std::unordered_map<std::string, u64>& _link, const std::string& _cpu, u32 flags = 0);
~jit_compiler();
2017-06-24 15:36:49 +00:00
// Get LLVM context
auto& get_context()
{
return *m_context;
2017-06-24 15:36:49 +00:00
}
2018-05-01 10:20:36 +00:00
auto& get_engine() const
{
return *m_engine;
}
2017-07-15 09:20:40 +00:00
// Add module (path to obj cache dir)
void add(std::unique_ptr<llvm::Module> _module, const std::string& path);
2018-05-01 10:20:36 +00:00
// Add module (not cached)
void add(std::unique_ptr<llvm::Module> _module);
2018-05-01 10:20:36 +00:00
2017-07-15 09:20:40 +00:00
// Add object (path to obj file)
void add(const std::string& path);
2022-08-17 13:53:05 +00:00
// Update global mapping for a single value
void update_global_mapping(const std::string& name, u64 addr);
// Check object file
static bool check(const std::string& path);
// Finalize
2017-06-24 15:36:49 +00:00
void fin();
2017-02-26 15:56:31 +00:00
// Get compiled function address
2017-06-24 15:36:49 +00:00
u64 get(const std::string& name);
2017-02-26 15:56:31 +00:00
// Get CPU info
static std::string cpu(const std::string& _cpu);
// Get system triple (PPU)
static std::string triple1();
// Get system triple (SPU)
static std::string triple2();
};
#endif