Hooking up code emission.

This commit is contained in:
Ben Vanik 2014-01-02 20:41:13 -08:00
parent 7969349126
commit e14d3379cb
12 changed files with 280 additions and 17 deletions

View File

@ -72,7 +72,7 @@ void LIRBuilder::Dump(StringBuffer* str) {
continue;
}
if (i->opcode == &LIR_OPCODE_COMMENT_info) {
str->Append(" ; %s\n", (char*)i->arg[0].i64);
str->Append(" ; %s\n", i->arg[0].string);
i = i->next;
continue;
}
@ -189,7 +189,19 @@ LIRInstr* LIRBuilder::AppendInstr(
}
void LIRBuilder::Comment(const char* format, ...) {
char buffer[1024];
va_list args;
va_start(args, format);
xevsnprintfa(buffer, 1024, format, args);
va_end(args);
size_t len = xestrlena(buffer);
if (!len) {
return;
}
void* p = arena_->Alloc(len + 1);
xe_copy_struct(p, buffer, len + 1);
auto instr = AppendInstr(LIR_OPCODE_COMMENT_info);
instr->arg[0].string = (char*)p;
}
void LIRBuilder::Nop() {

View File

@ -76,6 +76,7 @@ typedef union {
float f32;
double f64;
uint64_t offset;
char* string;
} LIROperand;

View File

@ -55,8 +55,13 @@ int LoweringTable::Process(
while (hir_block) {
auto hir_label = hir_block->label_head;
while (hir_label) {
// TODO(benvanik): copy name to LIR label.
hir_label->tag = lir_builder->NewLabel();
auto lir_label = lir_builder->NewLabel();
if (hir_label->name) {
size_t label_len = xestrlena(hir_label->name);
lir_label->name = (char*)lir_builder->arena()->Alloc(label_len + 1);
xe_copy_struct(lir_label->name, hir_label->name, label_len + 1);
}
hir_label->tag = lir_label;
hir_label = hir_label->next;
}
hir_block = hir_block->next;

View File

@ -6,6 +6,8 @@
'x64_assembler.h',
'x64_backend.cc',
'x64_backend.h',
'x64_code_cache.cc',
'x64_code_cache.h',
'x64_emitter.cc',
'x64_emitter.h',
'x64_function.cc',

View File

@ -73,7 +73,6 @@ int X64Assembler::Initialize() {
void X64Assembler::Reset() {
builder_->Reset();
optimizer_->Reset();
emitter_->Reset();
string_buffer_.Reset();
Assembler::Reset();
}
@ -107,13 +106,14 @@ int X64Assembler::Assemble(
}
// Emit machine code.
// TODO(benvanik): machine code.
//result = emitter_->Emit(builder_, &machine_code, &length);
void* machine_code = 0;
size_t code_size = 0;
result = emitter_->Emit(builder_, machine_code, code_size);
XEEXPECTZERO(result);
// Stash generated machine code.
if (debug_info) {
DumpMachineCode(&string_buffer_);
DumpMachineCode(machine_code, code_size, &string_buffer_);
debug_info->set_machine_code_disasm(string_buffer_.ToString());
string_buffer_.Reset();
}
@ -132,19 +132,20 @@ XECLEANUP:
return result;
}
void X64Assembler::DumpMachineCode(StringBuffer* str) {
void X64Assembler::DumpMachineCode(
void* machine_code, size_t code_size, StringBuffer* str) {
BE::DISASM disasm;
xe_zero_struct(&disasm, sizeof(disasm));
disasm.Archi = 64;
disasm.Options = BE::Tabulation + BE::MasmSyntax + BE::PrefixedNumeral;
disasm.EIP = 0;// (BE::UIntPtr)assembler_.getCode();
BE::UIntPtr eip_end = 0;// assembler_.getCode() + assembler_.getCodeSize();
disasm.EIP = (BE::UIntPtr)machine_code;
BE::UIntPtr eip_end = disasm.EIP + code_size;
while (disasm.EIP < eip_end) {
size_t len = BE::Disasm(&disasm);
if (len == BE::UNKNOWN_OPCODE) {
break;
}
str->Append("%p %s", disasm.EIP, disasm.CompleteInstr);
str->Append("%p %s\n", disasm.EIP, disasm.CompleteInstr);
disasm.EIP += len;
}
}

View File

@ -39,7 +39,7 @@ public:
runtime::DebugInfo* debug_info, runtime::Function** out_function);
private:
void DumpMachineCode(StringBuffer* str);
void DumpMachineCode(void* machine_code, size_t code_size, StringBuffer* str);
private:
X64Backend* x64_backend_;

View File

@ -11,6 +11,7 @@
#include <alloy/backend/x64/tracing.h>
#include <alloy/backend/x64/x64_assembler.h>
#include <alloy/backend/x64/x64_code_cache.h>
#include <alloy/backend/x64/lowering/lowering_table.h>
#include <alloy/backend/x64/lowering/lowering_sequences.h>
@ -22,7 +23,7 @@ using namespace alloy::runtime;
X64Backend::X64Backend(Runtime* runtime) :
lowering_table_(0),
code_cache_(0), lowering_table_(0),
Backend(runtime) {
}
@ -30,6 +31,7 @@ X64Backend::~X64Backend() {
alloy::tracing::WriteEvent(EventType::Deinit({
}));
delete lowering_table_;
delete code_cache_;
}
int X64Backend::Initialize() {
@ -38,6 +40,12 @@ int X64Backend::Initialize() {
return result;
}
code_cache_ = new X64CodeCache();
result = code_cache_->Initialize();
if (result) {
return result;
}
lowering_table_ = new LoweringTable(this);
RegisterSequences(lowering_table_);

View File

@ -19,6 +19,7 @@ namespace alloy {
namespace backend {
namespace x64 {
class X64CodeCache;
namespace lowering { class LoweringTable; }
@ -30,6 +31,7 @@ public:
X64Backend(runtime::Runtime* runtime);
virtual ~X64Backend();
X64CodeCache* code_cache() const { return code_cache_; }
lowering::LoweringTable* lowering_table() const { return lowering_table_; }
virtual int Initialize();
@ -37,6 +39,7 @@ public:
virtual Assembler* CreateAssembler();
private:
X64CodeCache* code_cache_;
lowering::LoweringTable* lowering_table_;
};

View File

@ -0,0 +1,88 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2013 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <alloy/backend/x64/x64_code_cache.h>
#include <alloy/backend/x64/tracing.h>
using namespace alloy;
using namespace alloy::backend;
using namespace alloy::backend::x64;
X64CodeCache::X64CodeCache(size_t chunk_size) :
chunk_size_(chunk_size),
head_chunk_(NULL), active_chunk_(NULL) {
lock_ = AllocMutex();
}
X64CodeCache::~X64CodeCache() {
LockMutex(lock_);
auto chunk = head_chunk_;
while (chunk) {
auto next = chunk->next;
delete chunk;
chunk = next;
}
head_chunk_ = NULL;
UnlockMutex(lock_);
FreeMutex(lock_);
}
int X64CodeCache::Initialize() {
return 0;
}
void* X64CodeCache::PlaceCode(void* machine_code, size_t code_size) {
// Always move the code to land on 16b alignment. We do this by rounding up
// to 16b so that all offsets are aligned.
code_size = XEROUNDUP(code_size, 16);
LockMutex(lock_);
if (active_chunk_) {
if (active_chunk_->capacity - active_chunk_->offset < code_size) {
auto next = active_chunk_->next;
if (!next) {
XEASSERT(code_size < chunk_size_); // need to support larger chunks
next = new CodeChunk(chunk_size_);
active_chunk_->next = next;
}
active_chunk_ = next;
}
} else {
head_chunk_ = active_chunk_ = new CodeChunk(chunk_size_);
}
void* final_address = active_chunk_->buffer + active_chunk_->offset;
active_chunk_->offset += code_size;
UnlockMutex(lock_);
xe_copy_struct(final_address, machine_code, code_size);
// This isn't needed on x64 (probably), but is convention.
FlushInstructionCache(GetCurrentProcess(), final_address, code_size);
return final_address;
}
X64CodeCache::CodeChunk::CodeChunk(size_t chunk_size) :
next(NULL),
capacity(chunk_size), buffer(0), offset(0) {
buffer = (uint8_t*)VirtualAlloc(
NULL, capacity,
MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
}
X64CodeCache::CodeChunk::~CodeChunk() {
if (buffer) {
VirtualFree(buffer, 0, MEM_RELEASE);
}
}

View File

@ -0,0 +1,60 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2013 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef ALLOY_BACKEND_X64_X64_CODE_CACHE_H_
#define ALLOY_BACKEND_X64_X64_CODE_CACHE_H_
#include <alloy/core.h>
namespace alloy {
namespace backend {
namespace x64 {
class X64CodeCache {
public:
X64CodeCache(size_t chunk_size = DEFAULT_CHUNK_SIZE);
virtual ~X64CodeCache();
int Initialize();
// TODO(benvanik): ELF serialization/etc
// TODO(benvanik): keep track of code blocks
// TODO(benvanik): padding/guards/etc
void* PlaceCode(void* machine_code, size_t code_size);
private:
class CodeChunk {
public:
CodeChunk(size_t chunk_size);
~CodeChunk();
public:
CodeChunk* next;
size_t capacity;
uint8_t* buffer;
size_t offset;
};
private:
static const size_t DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024;
Mutex* lock_;
size_t chunk_size_;
CodeChunk* head_chunk_;
CodeChunk* active_chunk_;
};
} // namespace x64
} // namespace backend
} // namespace alloy
#endif // ALLOY_BACKEND_X64_X64_CODE_CACHE_H_

View File

@ -10,23 +10,98 @@
#include <alloy/backend/x64/x64_emitter.h>
#include <alloy/backend/x64/x64_backend.h>
#include <alloy/backend/x64/x64_code_cache.h>
#include <alloy/backend/x64/lir/lir_builder.h>
#include <third_party/xbyak/xbyak/xbyak.h>
using namespace alloy;
using namespace alloy::backend;
using namespace alloy::backend::x64;
using namespace alloy::backend::x64::lir;
using namespace alloy::runtime;
using namespace Xbyak;
namespace alloy {
namespace backend {
namespace x64 {
class XbyakAllocator : public Allocator {
public:
virtual bool useProtect() const { return false; }
};
class XbyakGenerator : public CodeGenerator {
public:
XbyakGenerator(XbyakAllocator* allocator);
virtual ~XbyakGenerator();
void* Emplace(X64CodeCache* code_cache);
int Emit(LIRBuilder* builder);
private:
};
} // namespace x64
} // namespace backend
} // namespace alloy
X64Emitter::X64Emitter(X64Backend* backend) :
backend_(backend) {
backend_(backend),
code_cache_(backend->code_cache()) {
allocator_ = new XbyakAllocator();
generator_ = new XbyakGenerator(allocator_);
}
X64Emitter::~X64Emitter() {
delete generator_;
delete allocator_;
}
int X64Emitter::Initialize() {
return 0;
}
void X64Emitter::Reset() {
int X64Emitter::Emit(
LIRBuilder* builder, void*& out_code_address, size_t& out_code_size) {
// Fill the generator with code.
int result = generator_->Emit(builder);
if (result) {
return result;
}
// Copy the final code to the cache and relocate it.
out_code_size = generator_->getSize();
out_code_address = generator_->Emplace(code_cache_);
return 0;
}
XbyakGenerator::XbyakGenerator(XbyakAllocator* allocator) :
CodeGenerator(1 * 1024 * 1024, AutoGrow, allocator) {
}
XbyakGenerator::~XbyakGenerator() {
}
void* XbyakGenerator::Emplace(X64CodeCache* code_cache) {
// To avoid changing xbyak, we do a switcharoo here.
// top_ points to the Xbyak buffer, and since we are in AutoGrow mode
// it has pending relocations. We copy the top_ to our buffer, swap the
// pointer, relocate, then return the original scratch pointer for use.
uint8_t* old_address = top_;
void* new_address = code_cache->PlaceCode(top_, size_);
top_ = (uint8_t*)new_address;
ready();
top_ = old_address;
reset();
return new_address;
}
int XbyakGenerator::Emit(LIRBuilder* builder) {
//
xor(rax, rax);
ret();
return 0;
}

View File

@ -18,7 +18,11 @@ namespace backend {
namespace x64 {
class X64Backend;
class X64CodeCache;
namespace lir { class LIRBuilder; }
class XbyakAllocator;
class XbyakGenerator;
class X64Emitter {
public:
@ -27,10 +31,14 @@ public:
int Initialize();
void Reset();
int Emit(lir::LIRBuilder* builder,
void*& out_code_address, size_t& out_code_size);
private:
X64Backend* backend_;
X64Backend* backend_;
X64CodeCache* code_cache_;
XbyakAllocator* allocator_;
XbyakGenerator* generator_;
};