CPU: Preparations for AArch64 recompiler

This commit is contained in:
Connor McLaughlin 2019-12-03 01:29:56 +10:00
parent efb8033d48
commit bbe1cb5fe9
7 changed files with 1905 additions and 27 deletions

View File

@ -90,6 +90,13 @@ if(${CPU_ARCH} STREQUAL "x64")
cpu_recompiler_code_generator_x64.cpp
)
message("Building x64 recompiler")
elseif(${CPU_ARCH} STREQUAL "aarch64")
target_compile_definitions(core PRIVATE "WITH_RECOMPILER=1")
target_sources(core PRIVATE ${RECOMPILER_SRCS}
cpu_recompiler_code_generator_aarch64.cpp
)
target_link_libraries(core PRIVATE vixl)
message("Building AArch64 recompiler")
else()
message("Not building recompiler")
endif()

View File

@ -5,16 +5,6 @@ Log_SetChannel(CPU::Recompiler);
namespace CPU::Recompiler {
CodeGenerator::CodeGenerator(Core* cpu, JitCodeBuffer* code_buffer, const ASMFunctions& asm_functions)
: m_cpu(cpu), m_code_buffer(code_buffer), m_asm_functions(asm_functions), m_register_cache(*this),
m_near_emitter(code_buffer->GetFreeCodeSpace(), code_buffer->GetFreeCodePointer()),
m_far_emitter(code_buffer->GetFreeFarCodeSpace(), code_buffer->GetFreeFarCodePointer()), m_emit(&m_near_emitter)
{
InitHostRegs();
}
CodeGenerator::~CodeGenerator() = default;
u32 CodeGenerator::CalculateRegisterOffset(Reg reg)
{
return uint32(offsetof(Core, m_regs.r[0]) + (static_cast<u32>(reg) * sizeof(u32)));
@ -255,6 +245,16 @@ Value CodeGenerator::ConvertValueSize(const Value& value, RegSize size, bool sig
return new_value;
}
Value CodeGenerator::GetValueInHostRegister(const Value& value)
{
if (value.IsInHostRegister())
return Value(value.regcache, value.host_reg, value.size, ValueFlags::Valid | ValueFlags::InHostRegister);
Value new_value = m_register_cache.AllocateScratch(value.size);
EmitCopyValue(new_value.host_reg, value);
return new_value;
}
void CodeGenerator::ConvertValueSizeInPlace(Value* value, RegSize size, bool sign_extend)
{
DebugAssert(value->size != size);

View File

@ -156,6 +156,8 @@ private:
Value ConvertValueSize(const Value& value, RegSize size, bool sign_extend);
void ConvertValueSizeInPlace(Value* value, RegSize size, bool sign_extend);
Value GetValueInHostRegister(const Value& value);
void SwitchToFarCode();
void SwitchToNearCode();
void* GetCurrentNearCodePointer() const;

File diff suppressed because it is too large Load Diff

View File

@ -2,10 +2,6 @@
namespace CPU::Recompiler {
#if !defined(Y_CPU_X64)
void CodeGenerator::AlignCodeBuffer(JitCodeBuffer* code_buffer) {}
#endif
void CodeGenerator::EmitLoadGuestRegister(HostReg host_reg, Reg guest_reg)
{
EmitLoadCPUStructField(host_reg, RegSize_32, CalculateRegisterOffset(guest_reg));
@ -25,15 +21,4 @@ void CodeGenerator::EmitStoreInterpreterLoadDelay(Reg reg, const Value& value)
m_load_delay_dirty = true;
}
#if !defined(Y_CPU_X64)
void CodeGenerator::EmitBranch(Condition condition, Reg lr_reg, bool relative, const Value& branch_address)
{
Panic("Not implemented");
}
void CodeGenerator::EmitRaiseException(Exception excode, Condition condition /* = Condition::Always */)
{
Panic("Not implemented");
}
#endif
} // namespace CPU::Recompiler
} // namespace CPU::Recompiler

View File

@ -74,6 +74,16 @@ static const Xbyak::Reg64 GetCPUPtrReg()
return GetHostReg64(RCPUPTR);
}
CodeGenerator::CodeGenerator(Core* cpu, JitCodeBuffer* code_buffer, const ASMFunctions& asm_functions)
: m_cpu(cpu), m_code_buffer(code_buffer), m_asm_functions(asm_functions), m_register_cache(*this),
m_near_emitter(code_buffer->GetFreeCodeSpace(), code_buffer->GetFreeCodePointer()),
m_far_emitter(code_buffer->GetFreeFarCodeSpace(), code_buffer->GetFreeFarCodePointer()), m_emit(&m_near_emitter)
{
InitHostRegs();
}
CodeGenerator::~CodeGenerator() = default;
const char* CodeGenerator::GetHostRegName(HostReg reg, RegSize size /*= HostPointerSize*/)
{
static constexpr std::array<const char*, HostReg_Count> reg8_names = {

View File

@ -4,6 +4,9 @@
#if defined(Y_CPU_X64)
#define XBYAK_NO_OP_NAMES 1
#include "xbyak.h"
#elif defined(Y_CPU_AARCH64)
#include <vixl/aarch64/constants-aarch64.h>
#include <vixl/aarch64/macro-assembler-aarch64.h>
#endif
namespace CPU {
@ -63,6 +66,24 @@ constexpr u32 MAX_FAR_HOST_BYTES_PER_INSTRUCTION = 128;
// Are shifts implicitly masked to 0..31?
constexpr bool SHIFTS_ARE_IMPLICITLY_MASKED = true;
#elif defined(Y_CPU_AARCH64)
using HostReg = unsigned;
using CodeEmitter = vixl::aarch64::MacroAssembler;
enum : u32
{
HostReg_Count = vixl::aarch64::kNumberOfRegisters
};
constexpr HostReg HostReg_Invalid = static_cast<HostReg>(HostReg_Count);
constexpr RegSize HostPointerSize = RegSize_64;
// A reasonable "maximum" number of bytes per instruction.
constexpr u32 MAX_NEAR_HOST_BYTES_PER_INSTRUCTION = 64;
constexpr u32 MAX_FAR_HOST_BYTES_PER_INSTRUCTION = 128;
// Are shifts implicitly masked to 0..31?
constexpr bool SHIFTS_ARE_IMPLICITLY_MASKED = true;
#else
using HostReg = int;
@ -84,4 +105,4 @@ constexpr bool SHIFTS_ARE_IMPLICITLY_MASKED = false;
} // namespace Recompiler
} // namespace CPU
} // namespace CPU