[x64] Allow the JIT to use volatile registers

This commit is contained in:
Dr. Chat 2018-11-18 15:22:56 -06:00
parent b57bb74965
commit 384ec98a42
3 changed files with 49 additions and 55 deletions

View File

@ -446,14 +446,13 @@ GuestToHostThunk X64ThunkEmitter::EmitGuestToHostThunk() {
sub(rsp, stack_size);
// Save off volatile registers.
// TODO(DrChat): Enable when necessary.
// EmitSaveVolatileRegs();
EmitSaveVolatileRegs();
mov(rax, rcx); // function
mov(rcx, GetContextReg()); // context
call(rax);
// EmitLoadVolatileRegs();
EmitLoadVolatileRegs();
add(rsp, stack_size);
ret();
@ -468,21 +467,22 @@ extern "C" uint64_t ResolveFunction(void* raw_context, uint32_t target_address);
ResolveFunctionThunk X64ThunkEmitter::EmitResolveFunctionThunk() {
// ebx = target PPC address
// rcx = context
uint32_t stack_size = 0x18;
const size_t stack_size = StackLayout::THUNK_STACK_SIZE;
// rsp + 0 = return address
mov(qword[rsp + 8 * 2], rdx);
mov(qword[rsp + 8 * 1], rcx);
sub(rsp, stack_size);
// Save volatile registers
EmitSaveVolatileRegs();
mov(rcx, rsi); // context
mov(rdx, rbx);
mov(rax, uint64_t(&ResolveFunction));
call(rax);
EmitLoadVolatileRegs();
add(rsp, stack_size);
mov(rcx, qword[rsp + 8 * 1]);
mov(rdx, qword[rsp + 8 * 2]);
jmp(rax);
void* fn = Emplace(stack_size);
@ -491,34 +491,38 @@ ResolveFunctionThunk X64ThunkEmitter::EmitResolveFunctionThunk() {
void X64ThunkEmitter::EmitSaveVolatileRegs() {
// Save off volatile registers.
mov(qword[rsp + offsetof(StackLayout::Thunk, r[0])], rcx);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[1])], rdx);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[2])], r8);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[3])], r9);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[4])], r10);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[5])], r11);
// mov(qword[rsp + offsetof(StackLayout::Thunk, r[0])], rax);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[1])], rcx);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[2])], rdx);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[3])], r8);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[4])], r9);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[5])], r10);
mov(qword[rsp + offsetof(StackLayout::Thunk, r[6])], r11);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[0])], xmm1);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[1])], xmm2);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[2])], xmm3);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[3])], xmm4);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[4])], xmm5);
// movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[0])], xmm0);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[1])], xmm1);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[2])], xmm2);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[3])], xmm3);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[4])], xmm4);
movaps(qword[rsp + offsetof(StackLayout::Thunk, xmm[5])], xmm5);
}
void X64ThunkEmitter::EmitLoadVolatileRegs() {
// Load volatile registers from our stack frame.
movaps(xmm1, qword[rsp + offsetof(StackLayout::Thunk, xmm[0])]);
movaps(xmm2, qword[rsp + offsetof(StackLayout::Thunk, xmm[1])]);
movaps(xmm3, qword[rsp + offsetof(StackLayout::Thunk, xmm[2])]);
movaps(xmm4, qword[rsp + offsetof(StackLayout::Thunk, xmm[3])]);
movaps(xmm5, qword[rsp + offsetof(StackLayout::Thunk, xmm[4])]);
// movaps(xmm0, qword[rsp + offsetof(StackLayout::Thunk, xmm[0])]);
movaps(xmm1, qword[rsp + offsetof(StackLayout::Thunk, xmm[1])]);
movaps(xmm2, qword[rsp + offsetof(StackLayout::Thunk, xmm[2])]);
movaps(xmm3, qword[rsp + offsetof(StackLayout::Thunk, xmm[3])]);
movaps(xmm4, qword[rsp + offsetof(StackLayout::Thunk, xmm[4])]);
movaps(xmm5, qword[rsp + offsetof(StackLayout::Thunk, xmm[5])]);
mov(rcx, qword[rsp + offsetof(StackLayout::Thunk, r[0])]);
mov(rdx, qword[rsp + offsetof(StackLayout::Thunk, r[1])]);
mov(r8, qword[rsp + offsetof(StackLayout::Thunk, r[2])]);
mov(r9, qword[rsp + offsetof(StackLayout::Thunk, r[3])]);
mov(r10, qword[rsp + offsetof(StackLayout::Thunk, r[4])]);
mov(r11, qword[rsp + offsetof(StackLayout::Thunk, r[5])]);
// mov(rax, qword[rsp + offsetof(StackLayout::Thunk, r[0])]);
mov(rcx, qword[rsp + offsetof(StackLayout::Thunk, r[1])]);
mov(rdx, qword[rsp + offsetof(StackLayout::Thunk, r[2])]);
mov(r8, qword[rsp + offsetof(StackLayout::Thunk, r[3])]);
mov(r9, qword[rsp + offsetof(StackLayout::Thunk, r[4])]);
mov(r10, qword[rsp + offsetof(StackLayout::Thunk, r[5])]);
mov(r11, qword[rsp + offsetof(StackLayout::Thunk, r[6])]);
}
void X64ThunkEmitter::EmitSaveNonvolatileRegs() {

View File

@ -56,12 +56,13 @@ static const size_t kStashOffset = 32;
// static const size_t kStashOffsetHigh = 32 + 32;
const uint32_t X64Emitter::gpr_reg_map_[X64Emitter::GPR_COUNT] = {
Xbyak::Operand::RBX, Xbyak::Operand::R12, Xbyak::Operand::R13,
Xbyak::Operand::R14, Xbyak::Operand::R15,
Xbyak::Operand::RBX, Xbyak::Operand::R10, Xbyak::Operand::R11,
Xbyak::Operand::R12, Xbyak::Operand::R13, Xbyak::Operand::R14,
Xbyak::Operand::R15,
};
const uint32_t X64Emitter::xmm_reg_map_[X64Emitter::XMM_COUNT] = {
6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
};
X64Emitter::X64Emitter(X64Backend* backend, XbyakAllocator* allocator)
@ -494,30 +495,20 @@ void X64Emitter::CallExtern(const hir::Instr* instr, const Function* function) {
}
}
void X64Emitter::CallNative(void* fn) {
mov(rax, reinterpret_cast<uint64_t>(fn));
mov(rcx, GetContextReg());
call(rax);
}
void X64Emitter::CallNative(void* fn) { CallNativeSafe(fn); }
void X64Emitter::CallNative(uint64_t (*fn)(void* raw_context)) {
mov(rax, reinterpret_cast<uint64_t>(fn));
mov(rcx, GetContextReg());
call(rax);
CallNativeSafe(reinterpret_cast<void*>(fn));
}
void X64Emitter::CallNative(uint64_t (*fn)(void* raw_context, uint64_t arg0)) {
mov(rax, reinterpret_cast<uint64_t>(fn));
mov(rcx, GetContextReg());
call(rax);
CallNativeSafe(reinterpret_cast<void*>(fn));
}
void X64Emitter::CallNative(uint64_t (*fn)(void* raw_context, uint64_t arg0),
uint64_t arg0) {
mov(rax, reinterpret_cast<uint64_t>(fn));
mov(rcx, GetContextReg());
mov(rdx, arg0);
call(rax);
mov(GetNativeParam(0), arg0);
CallNativeSafe(reinterpret_cast<void*>(fn));
}
void X64Emitter::CallNativeSafe(void* fn) {
@ -537,8 +528,7 @@ void X64Emitter::SetReturnAddress(uint64_t value) {
mov(qword[rsp + StackLayout::GUEST_CALL_RET_ADDR], rax);
}
Xbyak::Reg64 X64Emitter::GetNativeParam(uint32_t param)
{
Xbyak::Reg64 X64Emitter::GetNativeParam(uint32_t param) {
if (param == 0)
return rdx;
else if (param == 1)

View File

@ -139,13 +139,13 @@ class X64Emitter : public Xbyak::CodeGenerator {
std::vector<SourceMapEntry>* out_source_map);
public:
// Reserved: rsp
// Reserved: rsp, rsi, rdi
// Scratch: rax/rcx/rdx
// xmm0-2
// Available: rbx, r12-r15 (save to get r8-r11, rbp, rsi, rdi?)
// xmm6-xmm15 (save to get xmm3-xmm5)
static const int GPR_COUNT = 5;
static const int XMM_COUNT = 10;
// Available: rbx, r10-r15
// xmm4-xmm15 (save to get xmm3)
static const int GPR_COUNT = 7;
static const int XMM_COUNT = 12;
static void SetupReg(const hir::Value* v, Xbyak::Reg8& r) {
auto idx = gpr_reg_map_[v->reg.index];