Adding bl (unconditional only).

This commit is contained in:
Ben Vanik 2013-05-24 02:21:12 -07:00
parent 2153256109
commit eb9b9b0321
3 changed files with 179 additions and 154 deletions

View File

@ -63,117 +63,124 @@ namespace x64 {
// return e.GenerateIndirectionBranch(cia, target, lk, likely_local);
// }
// int XeEmitBranchTo(
// X64Emitter& e, X86Compiler& c, const char* src, uint32_t cia,
// bool lk, jit_value_t condition) {
// FunctionBlock* fn_block = e.fn_block();
int XeEmitBranchTo(
X64Emitter& e, X86Compiler& c, const char* src, uint32_t cia,
bool lk, void* condition = NULL) {
FunctionBlock* fn_block = e.fn_block();
// // Fast-path for branches to other blocks.
// // Only valid when not tracing branches.
// if (!FLAGS_trace_branches &&
// fn_block->outgoing_type == FunctionBlock::kTargetBlock) {
// e.branch_to_block_if(fn_block->outgoing_address, condition);
// return 0;
// }
// Fast-path for branches to other blocks.
// Only valid when not tracing branches.
if (fn_block->outgoing_type == FunctionBlock::kTargetBlock) {
if (condition) {
XEASSERTALWAYS();
} else {
//e.TraceBranch(cia);
//c.jmp(e.GetBlockLabel(fn_block->outgoing_address));
}
return 0;
}
// // Only branch of conditionals when we have one.
// jit_label_t post_jump_label = jit_label_undefined;
// if (condition) {
// // TODO(benvanik): add debug info for this?
// // char name[32];
// // xesnprintfa(name, XECOUNT(name), "loc_%.8X_bcx", i.address);
// jit_insn_branch_if_not(f, condition, &post_jump_label);
// }
// Only branch of conditionals when we have one.
//jit_label_t post_jump_label = jit_label_undefined;
if (condition) {
// TODO(benvanik): add debug info for this?
// char name[32];
// xesnprintfa(name, XECOUNT(name), "loc_%.8X_bcx", i.address);
//jit_insn_branch_if_not(f, condition, &post_jump_label);
XEASSERTALWAYS();
}
// if (FLAGS_trace_branches) {
// e.TraceBranch(cia);
// }
e.TraceBranch(cia);
// // Get the basic block and switch behavior based on outgoing type.
// int result = 0;
// switch (fn_block->outgoing_type) {
// case FunctionBlock::kTargetBlock:
// // Taken care of above usually.
// e.branch_to_block(fn_block->outgoing_address);
// break;
// case FunctionBlock::kTargetFunction:
// {
// // Spill all registers to memory.
// // TODO(benvanik): only spill ones used by the target function? Use
// // calling convention flags on the function to not spill temp
// // registers?
// e.SpillRegisters();
// Get the basic block and switch behavior based on outgoing type.
int result = 0;
switch (fn_block->outgoing_type) {
case FunctionBlock::kTargetBlock:
// Taken care of above.
XEASSERTALWAYS();
result = 1;
break;
case FunctionBlock::kTargetFunction:
{
// Spill all registers to memory.
// TODO(benvanik): only spill ones used by the target function? Use
// calling convention flags on the function to not spill temp
// registers?
e.SpillRegisters();
// XEASSERTNOTNULL(fn_block->outgoing_function);
// // TODO(benvanik): check to see if this is the last block in the function.
// // This would enable tail calls/etc.
// bool is_end = false;
// if (!lk || is_end) {
// // Tail. No need to refill the local register values, just return.
// // We optimize this by passing in the LR from our parent instead of the
// // next instruction. This allows the return from our callee to pop
// // all the way up.
// e.call_function(fn_block->outgoing_function,
// jit_value_get_param(f, 1), true);
// jit_insn_return(f, NULL);
// } else {
// // Will return here eventually.
// // Refill registers from state.
// e.call_function(fn_block->outgoing_function,
// e.get_uint64(cia + 4), false);
// e.FillRegisters();
// }
// break;
// }
// case FunctionBlock::kTargetLR:
// {
// // An indirect jump.
// printf("INDIRECT JUMP VIA LR: %.8X\n", cia);
// result = XeEmitIndirectBranchTo(e, f, src, cia, lk, kXEPPCRegLR);
// break;
// }
// case FunctionBlock::kTargetCTR:
// {
// // An indirect jump.
// printf("INDIRECT JUMP VIA CTR: %.8X\n", cia);
// result = XeEmitIndirectBranchTo(e, f, src, cia, lk, kXEPPCRegCTR);
// break;
// }
// default:
// case FunctionBlock::kTargetNone:
// XEASSERTALWAYS();
// result = 1;
// break;
// }
XEASSERTNOTNULL(fn_block->outgoing_function);
// TODO(benvanik): check to see if this is the last block in the function.
// This would enable tail calls/etc.
bool is_end = false;
if (!lk || is_end) {
// Tail. No need to refill the local register values, just return.
// We optimize this by passing in the LR from our parent instead of the
// next instruction. This allows the return from our callee to pop
// all the way up.
e.CallFunction(fn_block->outgoing_function, c.getGpArg(1), true);
// No ret needed - we jumped!
} else {
// Will return here eventually.
// Refill registers from state.
GpVar lr(c.newGpVar());
c.mov(lr, imm(cia + 4));
e.CallFunction(fn_block->outgoing_function, lr, false);
e.FillRegisters();
}
break;
}
case FunctionBlock::kTargetLR:
{
// An indirect jump.
printf("INDIRECT JUMP VIA LR: %.8X\n", cia);
XEASSERTALWAYS();
//result = XeEmitIndirectBranchTo(e, c, src, cia, lk, kXEPPCRegLR);
break;
}
case FunctionBlock::kTargetCTR:
{
// An indirect jump.
printf("INDIRECT JUMP VIA CTR: %.8X\n", cia);
XEASSERTALWAYS();
//result = XeEmitIndirectBranchTo(e, c, src, cia, lk, kXEPPCRegCTR);
break;
}
default:
case FunctionBlock::kTargetNone:
XEASSERTALWAYS();
result = 1;
break;
}
// if (condition) {
// jit_insn_label(f, &post_jump_label);
// }
if (condition) {
XEASSERTALWAYS();
//jit_insn_label(f, &post_jump_label);
}
// return result;
// }
return result;
}
// XEEMITTER(bx, 0x48000000, I )(X64Emitter& e, X86Compiler& c, InstrData& i) {
// // if AA then
// // NIA <- EXTS(LI || 0b00)
// // else
// // NIA <- CIA + EXTS(LI || 0b00)
// // if LK then
// // LR <- CIA + 4
XEEMITTER(bx, 0x48000000, I )(X64Emitter& e, X86Compiler& c, InstrData& i) {
// if AA then
// NIA <- EXTS(LI || 0b00)
// else
// NIA <- CIA + EXTS(LI || 0b00)
// if LK then
// LR <- CIA + 4
// uint32_t nia;
// if (i.I.AA) {
// nia = XEEXTS26(i.I.LI << 2);
// } else {
// nia = i.address + XEEXTS26(i.I.LI << 2);
// }
// if (i.I.LK) {
// e.update_lr_value(e.get_uint64(i.address + 4));
// }
uint32_t nia;
if (i.I.AA) {
nia = XEEXTS26(i.I.LI << 2);
} else {
nia = i.address + XEEXTS26(i.I.LI << 2);
}
if (i.I.LK) {
e.update_lr_value(imm(i.address + 4));
}
// return XeEmitBranchTo(e, f, "bx", i.address, i.I.LK, NULL);
// }
return XeEmitBranchTo(e, c, "bx", i.address, i.I.LK);
}
// XEEMITTER(bcx, 0x40000000, B )(X64Emitter& e, X86Compiler& c, InstrData& i) {
// // if ¬BO[2] then
@ -244,7 +251,7 @@ namespace x64 {
// } else {
// nia = i.address + XEEXTS26(i.B.BD << 2);
// }
// if (XeEmitBranchTo(e, f, "bcx", i.address, i.B.LK, ok)) {
// if (XeEmitBranchTo(e, c, "bcx", i.address, i.B.LK, ok)) {
// return 1;
// }
@ -287,7 +294,7 @@ namespace x64 {
// ok = cond_ok;
// }
// if (XeEmitBranchTo(e, f, "bcctrx", i.address, i.XL.LK, ok)) {
// if (XeEmitBranchTo(e, c, "bcctrx", i.address, i.XL.LK, ok)) {
// return 1;
// }
@ -353,7 +360,7 @@ namespace x64 {
// ok = cond_ok;
// }
// if (XeEmitBranchTo(e, f, "bclrx", i.address, i.XL.LK, ok)) {
// if (XeEmitBranchTo(e, c, "bclrx", i.address, i.XL.LK, ok)) {
// return 1;
// }
@ -657,7 +664,7 @@ XEEMITTER(mtspr, 0x7C0003A6, XFX)(X64Emitter& e, X86Compiler& c, InstrDat
void X64RegisterEmitCategoryControl() {
// XEREGISTERINSTR(bx, 0x48000000);
XEREGISTERINSTR(bx, 0x48000000);
// XEREGISTERINSTR(bcx, 0x40000000);
// XEREGISTERINSTR(bcctrx, 0x4C000420);
// XEREGISTERINSTR(bclrx, 0x4C000020);

View File

@ -599,25 +599,10 @@ void X64Emitter::GenerateBasicBlock(FunctionBlock* block) {
// TODO(benvanik): finish up BB
}
// int X64Emitter::branch_to_block(uint32_t address) {
// std::map<uint32_t, jit_label_t>::iterator it = bbs_.find(address);
// return jit_insn_branch(fn_, &it->second);
// }
// int X64Emitter::branch_to_block_if(uint32_t address, jit_value_t value) {
// std::map<uint32_t, jit_label_t>::iterator it = bbs_.find(address);
// if (value) {
// return jit_insn_branch_if(fn_, value, &it->second);
// } else {
// return jit_insn_branch(fn_, &it->second);
// }
// }
// int X64Emitter::branch_to_block_if_not(uint32_t address, jit_value_t value) {
// XEASSERTNOTNULL(value);
// std::map<uint32_t, jit_label_t>::iterator it = bbs_.find(address);
// return jit_insn_branch_if_not(fn_, value, &it->second);
// }
Label& X64Emitter::GetBlockLabel(uint32_t address) {
std::map<uint32_t, Label>::iterator it = bbs_.find(address);
return it->second;
}
// int X64Emitter::branch_to_return() {
// return jit_insn_branch(fn_, &return_block_);
@ -631,20 +616,44 @@ void X64Emitter::GenerateBasicBlock(FunctionBlock* block) {
// return jit_insn_branch_if_not(fn_, value, &return_block_);
// }
// int X64Emitter::call_function(FunctionSymbol* target_symbol,
// jit_value_t lr, bool tail) {
// PrepareFunction(target_symbol);
// jit_function_t target_fn = (jit_function_t)target_symbol->impl_value;
// XEASSERTNOTNULL(target_fn);
// int flags = 0;
// if (tail) {
// flags |= JIT_CALL_TAIL;
// }
// jit_value_t args[] = {jit_value_get_param(fn_, 0), lr};
// jit_insn_call(fn_, target_symbol->name(), target_fn, fn_signature_,
// args, XECOUNT(args), flags);
// return 1;
// }
int X64Emitter::CallFunction(FunctionSymbol* target_symbol,
GpVar& lr, bool tail) {
X86Compiler& c = compiler_;
// Prep the target function.
// If the target function was small we could try to make the whole thing now.
PrepareFunction(target_symbol);
void* target_ptr = target_symbol->impl_value;
XEASSERTNOTNULL(target_ptr);
if (tail) {
// Tail calls are just jumps.
#if defined(ASMJIT_WINDOWS)
// Calling convetion: kX86FuncConvX64W
// Arguments passed as RCX, RDX, R8, R9
c.alloc(c.getGpArg(0), rcx);
c.alloc(lr, rdx);
c.jmp(imm((uint64_t)target_ptr));
#else
// Calling convetion: kX86FuncConvX64U
// Arguments passed as RDI, RSI, RDX, RCX, R8, R9
c.alloc(c.getGpArg(0), rdi);
c.alloc(lr, rsi);
c.jmp(imm((uint64_t)target_ptr));
#endif // ASMJIT_WINDOWS
} else {
// void fn(ppc_state*, uint64_t)
X86CompilerFuncCall* call = c.call(target_ptr);
call->setComment(target_symbol->name());
call->setPrototype(kX86FuncConvDefault,
FuncBuilder2<void, void*, uint64_t>());
call->setArgument(0, c.getGpArg(0));
call->setArgument(1, lr);
}
return 0;
}
void X64Emitter::TraceKernelCall() {
X86Compiler& c = compiler_;
@ -952,21 +961,24 @@ void X64Emitter::FillRegisters() {
if (FLAGS_annotate_disassembly) {
c.comment("Filling XER");
}
c.mov(locals_.xer, ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, xer), 8));
c.mov(locals_.xer,
qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, xer)));
}
if (locals_.lr.getId() != kInvalidValue) {
if (FLAGS_annotate_disassembly) {
c.comment("Filling LR");
}
c.mov(locals_.lr, ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, lr), 8));
c.mov(locals_.lr,
qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, lr)));
}
if (locals_.ctr.getId() != kInvalidValue) {
if (FLAGS_annotate_disassembly) {
c.comment("Filling CTR");
}
c.mov(locals_.ctr, ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, ctr), 8));
c.mov(locals_.ctr,
qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, ctr)));
}
// Fill the split CR values by extracting each one from the CR.
@ -986,7 +998,7 @@ void X64Emitter::FillRegisters() {
c.comment("Filling CR");
}
cr = c.newGpVar();
c.mov(cr, ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, cr), 8));
c.mov(cr, qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, cr)));
cr_tmp = c.newGpVar();
}
// (cr >> 28 - n * 4) & 0xF
@ -1004,7 +1016,7 @@ void X64Emitter::FillRegisters() {
c.comment("Filling r%d", n);
}
c.mov(locals_.gpr[n],
ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, r) + 8 * n, 8));
qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, r) + 8 * n));
}
}
@ -1014,7 +1026,7 @@ void X64Emitter::FillRegisters() {
c.comment("Filling f%d", n);
}
c.mov(locals_.fpr[n],
ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, f) + 8 * n, 8));
qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, f) + 8 * n));
}
}
}
@ -1031,7 +1043,7 @@ void X64Emitter::SpillRegisters() {
if (FLAGS_annotate_disassembly) {
c.comment("Spilling XER");
}
c.mov(ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, xer)),
c.mov(qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, xer)),
locals_.xer);
}
@ -1039,7 +1051,7 @@ void X64Emitter::SpillRegisters() {
if (FLAGS_annotate_disassembly) {
c.comment("Spilling LR");
}
c.mov(ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, lr)),
c.mov(qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, lr)),
locals_.lr);
}
@ -1047,7 +1059,7 @@ void X64Emitter::SpillRegisters() {
if (FLAGS_annotate_disassembly) {
c.comment("Spilling CTR");
}
c.mov(ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, ctr)),
c.mov(qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, ctr)),
locals_.ctr);
}
@ -1079,7 +1091,7 @@ void X64Emitter::SpillRegisters() {
}
}
if (cr.getId() != kInvalidValue) {
c.mov(ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, cr)),
c.mov(qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, cr)),
cr);
}
@ -1089,7 +1101,7 @@ void X64Emitter::SpillRegisters() {
if (FLAGS_annotate_disassembly) {
c.comment("Spilling r%d", n);
}
c.mov(ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, r) + 8 * n),
c.mov(qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, r) + 8 * n),
v);
}
}
@ -1100,7 +1112,7 @@ void X64Emitter::SpillRegisters() {
if (FLAGS_annotate_disassembly) {
c.comment("Spilling f%d", n);
}
c.mov(ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, f) + 8 * n),
c.mov(qword_ptr(c.getGpArg(0), offsetof(xe_ppc_state_t, f) + 8 * n),
v);
}
}
@ -1184,6 +1196,12 @@ void X64Emitter::update_lr_value(GpVar& value) {
c.mov(locals_.lr, zero_extend(value, 8));
}
void X64Emitter::update_lr_value(AsmJit::Imm& imm) {
X86Compiler& c = compiler_;
XEASSERT(locals_.lr.getId() != kInvalidValue);
c.mov(locals_.lr, imm);
}
GpVar& X64Emitter::ctr_value() {
X86Compiler& c = compiler_;
XEASSERT(locals_.ctr.getId() != kInvalidValue);

View File

@ -41,14 +41,13 @@ public:
sdb::FunctionSymbol* symbol();
sdb::FunctionBlock* fn_block();
// int branch_to_block(uint32_t address);
// int branch_to_block_if(uint32_t address, jit_value_t value);
// int branch_to_block_if_not(uint32_t address, jit_value_t value);
AsmJit::Label& GetBlockLabel(uint32_t address);
int CallFunction(sdb::FunctionSymbol* target_symbol, AsmJit::GpVar& lr,
bool tail);
// int branch_to_return();
// int branch_to_return_if(jit_value_t value);
// int branch_to_return_if_not(jit_value_t value);
// int call_function(sdb::FunctionSymbol* target_symbol, jit_value_t lr,
// bool tail);
void TraceKernelCall();
void TraceUserCall();
@ -70,6 +69,7 @@ public:
AsmJit::GpVar& lr_value();
void update_lr_value(AsmJit::GpVar& value);
void update_lr_value(AsmJit::Imm& imm);
AsmJit::GpVar& ctr_value();
void update_ctr_value(AsmJit::GpVar& value);