Only create the shared return block if required.

This commit is contained in:
Ben Vanik 2013-05-25 00:45:16 -07:00
parent da62ec56f9
commit ed401449b7
1 changed files with 19 additions and 10 deletions

View File

@ -231,7 +231,7 @@ int X64Emitter::MakeFunction(FunctionSymbol* symbol) {
symbol_ = symbol;
fn_block_ = NULL;
return_block_ = c.newLabel();
return_block_ = Label();
internal_indirection_block_ = Label();
external_indirection_block_ = Label();
@ -425,7 +425,8 @@ FunctionBlock* X64Emitter::fn_block() {
void X64Emitter::GenerateSharedBlocks() {
X86Compiler& c = compiler_;
// Create a return block.
// Create a return block, if it was used.
if (return_block_.getId() != kInvalidValue) {
// This spills registers and returns. All non-tail returns should branch
// here to do the return and ensure registers are spilled.
// This will be moved to the end after all the other blocks are created.
@ -435,6 +436,7 @@ void X64Emitter::GenerateSharedBlocks() {
c.bind(return_block_);
SpillRegisters();
c.ret();
}
// Build indirection block on demand.
// We have already prepped all basic blocks, so we can build these tables now.
@ -616,6 +618,11 @@ void X64Emitter::GenerateBasicBlock(FunctionBlock* block) {
}
Label& X64Emitter::GetReturnLabel() {
X86Compiler& c = compiler_;
// Implicit creation on first use.
if (return_block_.getId() == kInvalidValue) {
return_block_ = c.newLabel();
}
return return_block_;
}
@ -632,6 +639,8 @@ int X64Emitter::CallFunction(FunctionSymbol* target_symbol,
// If the target function was small we could try to make the whole thing now.
PrepareFunction(target_symbol);
SpillRegisters();
uint64_t target_ptr = (uint64_t)target_symbol->impl_value;
XEASSERTNOTNULL(target_ptr);