Oh my. Basic CFA/DFA, local variable support, misc fixes, etc.

This commit is contained in:
Ben Vanik 2014-02-02 00:33:57 -08:00
parent b29276e167
commit bca349b302
37 changed files with 3048 additions and 28 deletions

View File

@ -18,6 +18,8 @@
DECLARE_bool(debug);
DECLARE_bool(always_disasm);
DECLARE_bool(validate_hir);
DECLARE_uint64(break_on_instruction);
DECLARE_uint64(break_on_memory);

View File

@ -21,10 +21,12 @@ using namespace alloy;
DEFINE_bool(debug, DEFAULT_DEBUG_FLAG,
"Allow debugging and retain debug information.");
DEFINE_bool(always_disasm, false,
"Always add debug info to functions, even when no debugger is attached.");
DEFINE_bool(validate_hir, false,
"Perform validation checks on the HIR during compilation.");
// Breakpoints:
DEFINE_uint64(break_on_instruction, 0,
"int3 before the given guest address is executed.");

View File

@ -74,6 +74,15 @@ int IVMAssembler::Assemble(
builder->ResetLabelTags();
// Function prologue.
size_t stack_size = 0;
auto locals = builder->locals();
for (auto it = locals.begin(); it != locals.end(); ++it) {
auto slot = *it;
size_t stack_offset = stack_size;
slot->set_constant(stack_offset);
stack_size += GetTypeSize(slot->type);
}
ctx.stack_size = stack_size;
auto block = builder->first_block();
while (block) {

View File

@ -33,6 +33,7 @@ IVMFunction::~IVMFunction() {
void IVMFunction::Setup(TranslationContext& ctx) {
register_count_ = ctx.register_count;
stack_size_ = ctx.stack_size;
intcode_count_ = ctx.intcode_count;
intcodes_ = (IntCode*)ctx.intcode_arena->CloneContents();
source_map_count_ = ctx.source_map_count;
@ -108,11 +109,13 @@ int IVMFunction::CallImpl(ThreadState* thread_state) {
// Setup register file on stack.
auto stack = (IVMStack*)thread_state->backend_data();
auto register_file = (Register*)stack->Alloc(register_count_);
auto local_stack = (uint8_t*)alloca(stack_size_);
Memory* memory = thread_state->memory();
IntCodeState ics;
ics.rf = register_file;
ics.locals = local_stack;
ics.context = (uint8_t*)thread_state->raw_context();
ics.membase = memory->membase();
ics.did_carry = 0;

View File

@ -38,9 +38,10 @@ private:
void OnBreakpointHit(runtime::ThreadState* thread_state, IntCode* i);
private:
size_t register_count_;
size_t intcode_count_;
IntCode* intcodes_;
size_t register_count_;
size_t stack_size_;
size_t intcode_count_;
IntCode* intcodes_;
size_t source_map_count_;
SourceMapEntry* source_map_;
};

View File

@ -1342,6 +1342,88 @@ int Translate_LOAD_CLOCK(TranslationContext& ctx, Instr* i) {
return DispatchToC(ctx, i, IntCode_LOAD_CLOCK);
}
uint32_t IntCode_LOAD_LOCAL_I8(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].i8 = *((int8_t*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
uint32_t IntCode_LOAD_LOCAL_I16(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].i16 = *((int16_t*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
uint32_t IntCode_LOAD_LOCAL_I32(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].i32 = *((int32_t*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
uint32_t IntCode_LOAD_LOCAL_I64(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].i64 = *((int64_t*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
uint32_t IntCode_LOAD_LOCAL_F32(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].f32 = *((float*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
uint32_t IntCode_LOAD_LOCAL_F64(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].f64 = *((double*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
uint32_t IntCode_LOAD_LOCAL_V128(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].v128 = *((vec128_t*)(ics.locals + ics.rf[i->src1_reg].u64));
return IA_NEXT;
}
int Translate_LOAD_LOCAL(TranslationContext& ctx, Instr* i) {
static IntCodeFn fns[] = {
IntCode_LOAD_LOCAL_I8,
IntCode_LOAD_LOCAL_I16,
IntCode_LOAD_LOCAL_I32,
IntCode_LOAD_LOCAL_I64,
IntCode_LOAD_LOCAL_F32,
IntCode_LOAD_LOCAL_F64,
IntCode_LOAD_LOCAL_V128,
};
return DispatchToC(ctx, i, fns[i->dest->type]);
}
uint32_t IntCode_STORE_LOCAL_I8(IntCodeState& ics, const IntCode* i) {
*((int8_t*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].i8;
return IA_NEXT;
}
uint32_t IntCode_STORE_LOCAL_I16(IntCodeState& ics, const IntCode* i) {
*((int16_t*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].i16;
return IA_NEXT;
}
uint32_t IntCode_STORE_LOCAL_I32(IntCodeState& ics, const IntCode* i) {
*((int32_t*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].i32;
return IA_NEXT;
}
uint32_t IntCode_STORE_LOCAL_I64(IntCodeState& ics, const IntCode* i) {
*((int64_t*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].i64;
return IA_NEXT;
}
uint32_t IntCode_STORE_LOCAL_F32(IntCodeState& ics, const IntCode* i) {
*((float*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].f32;
return IA_NEXT;
}
uint32_t IntCode_STORE_LOCAL_F64(IntCodeState& ics, const IntCode* i) {
*((double*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].f64;
return IA_NEXT;
}
uint32_t IntCode_STORE_LOCAL_V128(IntCodeState& ics, const IntCode* i) {
*((vec128_t*)(ics.locals + ics.rf[i->src1_reg].u64)) = ics.rf[i->src2_reg].v128;
return IA_NEXT;
}
int Translate_STORE_LOCAL(TranslationContext& ctx, Instr* i) {
static IntCodeFn fns[] = {
IntCode_STORE_LOCAL_I8,
IntCode_STORE_LOCAL_I16,
IntCode_STORE_LOCAL_I32,
IntCode_STORE_LOCAL_I64,
IntCode_STORE_LOCAL_F32,
IntCode_STORE_LOCAL_F64,
IntCode_STORE_LOCAL_V128,
};
return DispatchToC(ctx, i, fns[i->src2.value->type]);
}
uint32_t IntCode_LOAD_CONTEXT_I8(IntCodeState& ics, const IntCode* i) {
ics.rf[i->dest_reg].i8 = *((int8_t*)(ics.context + ics.rf[i->src1_reg].u64));
DPRINT("%d (%X) = ctx i8 +%d\n", ics.rf[i->dest_reg].i8, ics.rf[i->dest_reg].u8, ics.rf[i->src1_reg].u64);
@ -4039,6 +4121,9 @@ static const TranslateFn dispatch_table[] = {
Translate_LOAD_CLOCK,
Translate_LOAD_LOCAL,
Translate_STORE_LOCAL,
Translate_LOAD_CONTEXT,
Translate_STORE_CONTEXT,

View File

@ -41,6 +41,7 @@ typedef union {
typedef struct {
Register* rf;
uint8_t* locals;
uint8_t* context;
uint8_t* membase;
int8_t did_carry;
@ -103,6 +104,7 @@ typedef struct {
Arena* source_map_arena;
Arena* scratch_arena;
LabelRef* label_ref_head;
size_t stack_size;
} TranslationContext;

View File

@ -815,6 +815,117 @@ table->AddSequence(OPCODE_LOAD_CLOCK, [](X64Emitter& e, Instr*& i) {
return true;
});
// --------------------------------------------------------------------------
// Stack Locals
// --------------------------------------------------------------------------
table->AddSequence(OPCODE_LOAD_LOCAL, [](X64Emitter& e, Instr*& i) {
auto addr = e.rsp + i->src1.value->AsUint32();
if (i->Match(SIG_TYPE_I8, SIG_TYPE_IGNORE)) {
Reg8 dest;
e.BeginOp(i->dest, dest, REG_DEST);
e.mov(dest, e.byte[addr]);
e.EndOp(dest);
} else if (i->Match(SIG_TYPE_I16, SIG_TYPE_IGNORE)) {
Reg16 dest;
e.BeginOp(i->dest, dest, REG_DEST);
e.mov(dest, e.word[addr]);
e.EndOp(dest);
} else if (i->Match(SIG_TYPE_I32, SIG_TYPE_IGNORE)) {
Reg32 dest;
e.BeginOp(i->dest, dest, REG_DEST);
e.mov(dest, e.dword[addr]);
e.EndOp(dest);
} else if (i->Match(SIG_TYPE_I64, SIG_TYPE_IGNORE)) {
Reg64 dest;
e.BeginOp(i->dest, dest, REG_DEST);
e.mov(dest, e.qword[addr]);
e.EndOp(dest);
} else if (i->Match(SIG_TYPE_F32, SIG_TYPE_IGNORE)) {
Xmm dest;
e.BeginOp(i->dest, dest, REG_DEST);
e.movss(dest, e.dword[addr]);
e.EndOp(dest);
} else if (i->Match(SIG_TYPE_F64, SIG_TYPE_IGNORE)) {
Xmm dest;
e.BeginOp(i->dest, dest, REG_DEST);
e.movsd(dest, e.qword[addr]);
e.EndOp(dest);
} else if (i->Match(SIG_TYPE_V128, SIG_TYPE_IGNORE)) {
Xmm dest;
e.BeginOp(i->dest, dest, REG_DEST);
// NOTE: we always know we are aligned.
e.movaps(dest, e.ptr[addr]);
e.EndOp(dest);
} else {
ASSERT_INVALID_TYPE();
}
i = e.Advance(i);
return true;
});
table->AddSequence(OPCODE_STORE_LOCAL, [](X64Emitter& e, Instr*& i) {
auto addr = e.rsp + i->src1.value->AsUint32();
if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I8)) {
Reg8 src;
e.BeginOp(i->src2.value, src, 0);
e.mov(e.byte[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I8C)) {
e.mov(e.byte[addr], i->src2.value->constant.i8);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I16)) {
Reg16 src;
e.BeginOp(i->src2.value, src, 0);
e.mov(e.word[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I16C)) {
e.mov(e.word[addr], i->src2.value->constant.i16);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I32)) {
Reg32 src;
e.BeginOp(i->src2.value, src, 0);
e.mov(e.dword[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I32C)) {
e.mov(e.dword[addr], i->src2.value->constant.i32);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I64)) {
Reg64 src;
e.BeginOp(i->src2.value, src, 0);
e.mov(e.qword[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_I64C)) {
MovMem64(e, addr, i->src2.value->constant.i64);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_F32)) {
Xmm src;
e.BeginOp(i->src2.value, src, 0);
e.movss(e.dword[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_F32C)) {
e.mov(e.dword[addr], i->src2.value->constant.i32);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_F64)) {
Xmm src;
e.BeginOp(i->src2.value, src, 0);
e.movsd(e.qword[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_F64C)) {
MovMem64(e, addr, i->src2.value->constant.i64);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_V128)) {
Xmm src;
e.BeginOp(i->src2.value, src, 0);
// NOTE: we always know we are aligned.
e.movaps(e.ptr[addr], src);
e.EndOp(src);
} else if (i->Match(SIG_TYPE_X, SIG_TYPE_IGNORE, SIG_TYPE_V128C)) {
// TODO(benvanik): check zero
// TODO(benvanik): correct order?
MovMem64(e, addr, i->src2.value->constant.v128.low);
MovMem64(e, addr + 8, i->src2.value->constant.v128.high);
} else {
ASSERT_INVALID_TYPE();
}
i = e.Advance(i);
return true;
});
// --------------------------------------------------------------------------
// Context
// --------------------------------------------------------------------------
@ -2892,6 +3003,7 @@ table->AddSequence(OPCODE_ATOMIC_EXCHANGE, [](X64Emitter& e, Instr*& i) {
i->src1.value, src1, 0,
i->src2.value, src2, 0);
e.mov(dest, src2);
e.lock();
e.xchg(e.dword[src1], dest);
e.EndOp(dest, src1, src2);
} else {

View File

@ -97,6 +97,8 @@ void* X64Emitter::Emplace(size_t stack_size) {
return new_address;
}
#define XEALIGN(value, align) ((value + align - 1) & ~(align - 1))
int X64Emitter::Emit(HIRBuilder* builder) {
// These are the registers we will not be using. All others are fare game.
const uint32_t reserved_regs =
@ -120,6 +122,19 @@ int X64Emitter::Emit(HIRBuilder* builder) {
GetRegBit(xmm4) |
GetRegBit(xmm5);
// Calculate stack size. We need to align things to their natural sizes.
// This could be much better (sort by type/etc).
auto locals = builder->locals();
size_t stack_offset = 0;
for (auto it = locals.begin(); it != locals.end(); ++it) {
auto slot = *it;
size_t type_size = GetTypeSize(slot->type);
// Align to natural size.
stack_offset = XEALIGN(stack_offset, type_size);
slot->set_constant(stack_offset);
stack_offset += type_size;
}
// Function prolog.
// Must be 16b aligned.
// Windows is very strict about the form of this and the epilog:

View File

@ -11,11 +11,14 @@
#define ALLOY_COMPILER_COMPILER_PASSES_H_
#include <alloy/compiler/passes/constant_propagation_pass.h>
#include <alloy/compiler/passes/control_flow_analysis_pass.h>
#include <alloy/compiler/passes/context_promotion_pass.h>
#include <alloy/compiler/passes/data_flow_analysis_pass.h>
#include <alloy/compiler/passes/dead_code_elimination_pass.h>
#include <alloy/compiler/passes/finalization_pass.h>
//#include <alloy/compiler/passes/dead_store_elimination_pass.h>
#include <alloy/compiler/passes/simplification_pass.h>
#include <alloy/compiler/passes/validation_pass.h>
#include <alloy/compiler/passes/value_reduction_pass.h>
// TODO:

View File

@ -0,0 +1,72 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <alloy/compiler/passes/control_flow_analysis_pass.h>
#include <alloy/backend/backend.h>
#include <alloy/compiler/compiler.h>
#include <alloy/runtime/runtime.h>
#pragma warning(push)
#pragma warning(disable : 4244)
#pragma warning(disable : 4267)
#include <llvm/ADT/BitVector.h>
#pragma warning(pop)
using namespace alloy;
using namespace alloy::backend;
using namespace alloy::compiler;
using namespace alloy::compiler::passes;
using namespace alloy::frontend;
using namespace alloy::hir;
using namespace alloy::runtime;
ControlFlowAnalysisPass::ControlFlowAnalysisPass() :
CompilerPass() {
}
ControlFlowAnalysisPass::~ControlFlowAnalysisPass() {
}
int ControlFlowAnalysisPass::Run(HIRBuilder* builder) {
// TODO(benvanik): reset edges for all blocks? Needed to be re-runnable.
// Add edges.
auto block = builder->first_block();
while (block) {
auto instr = block->instr_head;
while (instr) {
if (instr->opcode->flags & OPCODE_FLAG_BRANCH) {
if (instr->opcode == &OPCODE_BRANCH_info) {
auto label = instr->src1.label;
builder->AddEdge(block, label->block, Edge::UNCONDITIONAL);
} else if (instr->opcode == &OPCODE_BRANCH_TRUE_info ||
instr->opcode == &OPCODE_BRANCH_FALSE_info) {
auto label = instr->src2.label;
builder->AddEdge(block, label->block, 0);
}
}
instr = instr->next;
}
block = block->next;
}
// Mark dominators.
block = builder->first_block();
while (block) {
if (block->incoming_edge_head &&
!block->incoming_edge_head->incoming_next) {
block->incoming_edge_head->flags |= Edge::DOMINATES;
}
block = block->next;
}
return 0;
}

View File

@ -0,0 +1,37 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef ALLOY_COMPILER_PASSES_CONTROL_FLOW_ANALYSIS_PASS_H_
#define ALLOY_COMPILER_PASSES_CONTROL_FLOW_ANALYSIS_PASS_H_
#include <alloy/compiler/compiler_pass.h>
namespace alloy {
namespace compiler {
namespace passes {
class ControlFlowAnalysisPass : public CompilerPass {
public:
ControlFlowAnalysisPass();
virtual ~ControlFlowAnalysisPass();
virtual int Run(hir::HIRBuilder* builder);
private:
};
} // namespace passes
} // namespace compiler
} // namespace alloy
#endif // ALLOY_COMPILER_PASSES_CONTROL_FLOW_ANALYSIS_PASS_H_

View File

@ -0,0 +1,203 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <alloy/compiler/passes/data_flow_analysis_pass.h>
#include <alloy/backend/backend.h>
#include <alloy/compiler/compiler.h>
#include <alloy/runtime/runtime.h>
#pragma warning(push)
#pragma warning(disable : 4244)
#pragma warning(disable : 4267)
#include <llvm/ADT/BitVector.h>
#pragma warning(pop)
using namespace alloy;
using namespace alloy::backend;
using namespace alloy::compiler;
using namespace alloy::compiler::passes;
using namespace alloy::frontend;
using namespace alloy::hir;
using namespace alloy::runtime;
DataFlowAnalysisPass::DataFlowAnalysisPass() :
CompilerPass() {
}
DataFlowAnalysisPass::~DataFlowAnalysisPass() {
}
int DataFlowAnalysisPass::Run(HIRBuilder* builder) {
auto arena = builder->arena();
// Linearize blocks so that we can detect cycles and propagate dependencies.
uint32_t block_count = LinearizeBlocks(builder);
// Analyze value flow and add locals as needed.
AnalyzeFlow(builder, block_count);
return 0;
}
uint32_t DataFlowAnalysisPass::LinearizeBlocks(HIRBuilder* builder) {
// TODO(benvanik): actually do this - we cheat now knowing that they are in
// sequential order.
uint32_t block_ordinal = 0;
auto block = builder->first_block();
while (block) {
block->ordinal = block_ordinal++;
block = block->next;
}
return block_ordinal;
}
void DataFlowAnalysisPass::AnalyzeFlow(HIRBuilder* builder,
uint32_t block_count) {
uint32_t max_value_estimate =
builder->max_value_ordinal() + 1 + block_count * 4;
// Stash for value map. We may want to maintain this during building.
auto arena = builder->arena();
Value** value_map = (Value**)arena->Alloc(
sizeof(Value*) * max_value_estimate);
// Allocate incoming bitvectors for use by blocks. We don't need outgoing
// because they are only used during the block iteration.
// Mapped by block ordinal.
// TODO(benvanik): cache this list, grow as needed, etc.
auto incoming_bitvectors = (llvm::BitVector**)arena->Alloc(
sizeof(llvm::BitVector*) * block_count);
for (auto n = 0u; n < block_count; n++) {
incoming_bitvectors[n] = new llvm::BitVector(max_value_estimate);
}
// Walk blocks in reverse and calculate incoming/outgoing values.
auto block = builder->last_block();
while (block) {
// allocate bitsets based on max value number
block->incoming_values = incoming_bitvectors[block->ordinal];
auto& incoming_values = *block->incoming_values;
// Walk instructions and gather up incoming values.
auto instr = block->instr_head;
while (instr) {
uint32_t signature = instr->opcode->signature;
#define SET_INCOMING_VALUE(v) \
if (v->def && v->def->block != block) { \
incoming_values.set(v->ordinal); \
} \
XEASSERT(v->ordinal < max_value_estimate); \
value_map[v->ordinal] = v;
if (GET_OPCODE_SIG_TYPE_SRC1(signature) == OPCODE_SIG_TYPE_V) {
SET_INCOMING_VALUE(instr->src1.value);
}
if (GET_OPCODE_SIG_TYPE_SRC2(signature) == OPCODE_SIG_TYPE_V) {
SET_INCOMING_VALUE(instr->src2.value);
}
if (GET_OPCODE_SIG_TYPE_SRC3(signature) == OPCODE_SIG_TYPE_V) {
SET_INCOMING_VALUE(instr->src3.value);
}
#undef SET_INCOMING_VALUE
instr = instr->next;
}
// Add all successor incoming values to our outgoing, as we need to
// pass them through.
llvm::BitVector outgoing_values(max_value_estimate);
auto outgoing_edge = block->outgoing_edge_head;
while (outgoing_edge) {
if (outgoing_edge->dest->ordinal > block->ordinal) {
outgoing_values |= *outgoing_edge->dest->incoming_values;
}
outgoing_edge = outgoing_edge->outgoing_next;
}
incoming_values |= outgoing_values;
// Add stores for all outgoing values.
auto outgoing_ordinal = outgoing_values.find_first();
while (outgoing_ordinal != -1) {
Value* src_value = value_map[outgoing_ordinal];
XEASSERTNOTNULL(src_value);
if (!src_value->local_slot) {
src_value->local_slot = builder->AllocLocal(src_value->type);
}
builder->StoreLocal(src_value->local_slot, src_value);
// If we are in the block the value was defined in:
if (src_value->def->block == block) {
// Move the store to right after the def, or as soon after
// as we can (respecting PAIRED flags).
auto def_next = src_value->def->next;
while (def_next && def_next->opcode->flags & OPCODE_FLAG_PAIRED_PREV) {
def_next = def_next->next;
}
XEASSERTNOTNULL(def_next);
builder->last_instr()->MoveBefore(def_next);
// We don't need it in the incoming list.
incoming_values.reset(outgoing_ordinal);
} else {
// Eh, just throw at the end, before the first branch.
auto tail = block->instr_tail;
while (tail && tail->opcode->flags & OPCODE_FLAG_BRANCH) {
tail = tail->prev;
}
XEASSERTNOTZERO(tail);
builder->last_instr()->MoveBefore(tail->next);
}
outgoing_ordinal = outgoing_values.find_next(outgoing_ordinal);
}
// Add loads for all incoming values and rename them in the block.
auto incoming_ordinal = incoming_values.find_first();
while (incoming_ordinal != -1) {
Value* src_value = value_map[incoming_ordinal];
XEASSERTNOTNULL(src_value);
if (!src_value->local_slot) {
src_value->local_slot = builder->AllocLocal(src_value->type);
}
Value* local_value = builder->LoadLocal(src_value->local_slot);
builder->last_instr()->MoveBefore(block->instr_head);
// Swap uses of original value with the local value.
auto instr = block->instr_head;
while (instr) {
uint32_t signature = instr->opcode->signature;
if (GET_OPCODE_SIG_TYPE_SRC1(signature) == OPCODE_SIG_TYPE_V) {
if (instr->src1.value == src_value) {
instr->set_src1(local_value);
}
}
if (GET_OPCODE_SIG_TYPE_SRC2(signature) == OPCODE_SIG_TYPE_V) {
if (instr->src2.value == src_value) {
instr->set_src2(local_value);
}
}
if (GET_OPCODE_SIG_TYPE_SRC3(signature) == OPCODE_SIG_TYPE_V) {
if (instr->src3.value == src_value) {
instr->set_src3(local_value);
}
}
instr = instr->next;
}
incoming_ordinal = incoming_values.find_next(incoming_ordinal);
}
block = block->prev;
}
// Cleanup bitvectors.
for (auto n = 0u; n < block_count; n++) {
delete incoming_bitvectors[n];
}
}

View File

@ -0,0 +1,39 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef ALLOY_COMPILER_PASSES_DATA_FLOW_ANALYSIS_PASS_H_
#define ALLOY_COMPILER_PASSES_DATA_FLOW_ANALYSIS_PASS_H_
#include <alloy/compiler/compiler_pass.h>
namespace alloy {
namespace compiler {
namespace passes {
class DataFlowAnalysisPass : public CompilerPass {
public:
DataFlowAnalysisPass();
virtual ~DataFlowAnalysisPass();
virtual int Run(hir::HIRBuilder* builder);
private:
uint32_t LinearizeBlocks(hir::HIRBuilder* builder);
void AnalyzeFlow(hir::HIRBuilder* builder, uint32_t block_count);
};
} // namespace passes
} // namespace compiler
} // namespace alloy
#endif // ALLOY_COMPILER_PASSES_DATA_FLOW_ANALYSIS_PASS_H_

View File

@ -59,20 +59,21 @@ int DeadCodeEliminationPass::Run(HIRBuilder* builder) {
// all removed ops with NOP and then do a single pass that removes them
// all.
bool any_removed = false;
bool any_instr_removed = false;
bool any_locals_removed = false;
Block* block = builder->first_block();
while (block) {
// Walk instructions in reverse.
Instr* i = block->instr_tail;
while (i) {
Instr* prev = i->prev;
auto prev = i->prev;
const OpcodeInfo* opcode = i->opcode;
uint32_t signature = opcode->signature;
auto opcode = i->opcode;
if (!(opcode->flags & OPCODE_FLAG_VOLATILE) &&
i->dest && !i->dest->use_head) {
// Has no uses and is not volatile. This instruction can die!
MakeNopRecursive(i);
any_removed = true;
any_instr_removed = true;
} else if (opcode == &OPCODE_ASSIGN_info) {
// Assignment. These are useless, so just try to remove by completely
// replacing the value.
@ -82,11 +83,31 @@ int DeadCodeEliminationPass::Run(HIRBuilder* builder) {
i = prev;
}
// Walk instructions forward.
i = block->instr_head;
while (i) {
auto next = i->next;
auto opcode = i->opcode;
if (opcode == &OPCODE_STORE_LOCAL_info) {
// Check to see if the store has any interceeding uses after the load.
// If not, it can be removed (as the local is just passing through the
// function).
// We do this after the previous pass so that removed code doesn't keep
// the local alive.
if (!CheckLocalUse(i)) {
any_locals_removed = true;
}
}
i = next;
}
block = block->next;
}
// Remove all nops.
if (any_removed) {
if (any_instr_removed) {
Block* block = builder->first_block();
while (block) {
Instr* i = block->instr_head;
@ -102,6 +123,21 @@ int DeadCodeEliminationPass::Run(HIRBuilder* builder) {
}
}
// Remove any locals that no longer have uses.
if (any_locals_removed) {
// TODO(benvanik): local removal/dealloc.
auto locals = builder->locals();
for (auto it = locals.begin(); it != locals.end();) {
auto next = ++it;
auto value = *it;
if (!value->use_head) {
// Unused, can be removed.
locals.erase(it);
}
it = next;
}
}
return 0;
}
@ -150,3 +186,24 @@ void DeadCodeEliminationPass::ReplaceAssignment(Instr* i) {
i->Remove();
}
bool DeadCodeEliminationPass::CheckLocalUse(Instr* i) {
auto slot = i->src1.value;
auto src = i->src2.value;
auto use = src->use_head;
if (use) {
auto use_instr = use->instr;
if (use_instr->opcode != &OPCODE_LOAD_LOCAL_info) {
// A valid use (probably). Keep it.
return true;
}
// Load/store are paired. They can both be removed.
use_instr->Remove();
}
i->Remove();
return false;
}

View File

@ -28,6 +28,7 @@ public:
private:
void MakeNopRecursive(hir::Instr* i);
void ReplaceAssignment(hir::Instr* i);
bool CheckLocalUse(hir::Instr* i);
};

View File

@ -5,6 +5,10 @@
'constant_propagation_pass.h',
'context_promotion_pass.cc',
'context_promotion_pass.h',
'control_flow_analysis_pass.cc',
'control_flow_analysis_pass.h',
'data_flow_analysis_pass.cc',
'data_flow_analysis_pass.h',
'dead_code_elimination_pass.cc',
'dead_code_elimination_pass.h',
'finalization_pass.cc',
@ -13,6 +17,8 @@
#'dead_store_elimination_pass.h',
'simplification_pass.cc',
'simplification_pass.h',
'validation_pass.cc',
'validation_pass.h',
'value_reduction_pass.cc',
'value_reduction_pass.h',
],

View File

@ -0,0 +1,99 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <alloy/compiler/passes/validation_pass.h>
#include <alloy/backend/backend.h>
#include <alloy/compiler/compiler.h>
#include <alloy/runtime/runtime.h>
using namespace alloy;
using namespace alloy::backend;
using namespace alloy::compiler;
using namespace alloy::compiler::passes;
using namespace alloy::frontend;
using namespace alloy::hir;
using namespace alloy::runtime;
ValidationPass::ValidationPass() :
CompilerPass() {
}
ValidationPass::~ValidationPass() {
}
int ValidationPass::Run(HIRBuilder* builder) {
StringBuffer str;
builder->Dump(&str);
printf(str.GetString());
fflush(stdout);
str.Reset();
auto block = builder->first_block();
while (block) {
auto label = block->label_head;
while (label) {
XEASSERT(label->block == block);
if (label->block != block) {
return 1;
}
label = label->next;
}
auto instr = block->instr_head;
while (instr) {
if (ValidateInstruction(block, instr)) {
return 1;
}
instr = instr->next;
}
block = block->next;
}
return 0;
}
int ValidationPass::ValidateInstruction(Block* block, Instr* instr) {
XEASSERT(instr->block == block);
if (instr->block != block) {
return 1;
}
uint32_t signature = instr->opcode->signature;
if (GET_OPCODE_SIG_TYPE_SRC1(signature) == OPCODE_SIG_TYPE_V) {
if (ValidateValue(block, instr, instr->src1.value)) {
return 1;
}
}
if (GET_OPCODE_SIG_TYPE_SRC2(signature) == OPCODE_SIG_TYPE_V) {
if (ValidateValue(block, instr, instr->src2.value)) {
return 1;
}
}
if (GET_OPCODE_SIG_TYPE_SRC3(signature) == OPCODE_SIG_TYPE_V) {
if (ValidateValue(block, instr, instr->src3.value)) {
return 1;
}
}
return 0;
}
int ValidationPass::ValidateValue(Block* block, Instr* instr, Value* value) {
if (value->def) {
/*auto def = value->def;
XEASSERT(def->block == block);
if (def->block != block) {
return 1;
}*/
}
return 0;
}

View File

@ -0,0 +1,39 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef ALLOY_COMPILER_PASSES_VALIDATION_PASS_H_
#define ALLOY_COMPILER_PASSES_VALIDATION_PASS_H_
#include <alloy/compiler/compiler_pass.h>
namespace alloy {
namespace compiler {
namespace passes {
class ValidationPass : public CompilerPass {
public:
ValidationPass();
virtual ~ValidationPass();
virtual int Run(hir::HIRBuilder* builder);
private:
int ValidateInstruction(hir::Block* block, hir::Instr* instr);
int ValidateValue(hir::Block* block, hir::Instr* instr, hir::Value* value);
};
} // namespace passes
} // namespace compiler
} // namespace alloy
#endif // ALLOY_COMPILER_PASSES_VALIDATION_PASS_H_

View File

@ -13,7 +13,11 @@
#include <alloy/compiler/compiler.h>
#include <alloy/runtime/runtime.h>
#include <bitset>
#pragma warning(push)
#pragma warning(disable : 4244)
#pragma warning(disable : 4267)
#include <llvm/ADT/BitVector.h>
#pragma warning(pop)
using namespace alloy;
using namespace alloy::backend;
@ -51,8 +55,7 @@ void ValueReductionPass::ComputeLastUse(Value* value) {
int ValueReductionPass::Run(HIRBuilder* builder) {
// Walk each block and reuse variable ordinals as much as possible.
// Let's hope this is enough.
std::bitset<1024> ordinals;
llvm::BitVector ordinals(builder->max_value_ordinal());
auto block = builder->first_block();
while (block) {
@ -82,7 +85,7 @@ int ValueReductionPass::Run(HIRBuilder* builder) {
if (v->last_use == instr) {
// Available.
if (!instr->src1.value->IsConstant()) {
ordinals.set(v->ordinal, false);
ordinals.reset(v->ordinal);
}
}
}
@ -94,7 +97,7 @@ int ValueReductionPass::Run(HIRBuilder* builder) {
if (v->last_use == instr) {
// Available.
if (!instr->src2.value->IsConstant()) {
ordinals.set(v->ordinal, false);
ordinals.reset(v->ordinal);
}
}
}
@ -106,7 +109,7 @@ int ValueReductionPass::Run(HIRBuilder* builder) {
if (v->last_use == instr) {
// Available.
if (!instr->src3.value->IsConstant()) {
ordinals.set(v->ordinal, false);
ordinals.reset(v->ordinal);
}
}
}
@ -115,7 +118,7 @@ int ValueReductionPass::Run(HIRBuilder* builder) {
// source value ordinal.
auto v = instr->dest;
// Find a lower ordinal.
for (auto n = 0; n < ordinals.size(); n++) {
for (auto n = 0u; n < ordinals.size(); n++) {
if (!ordinals.test(n)) {
ordinals.set(n);
v->ordinal = n;

View File

@ -38,20 +38,40 @@ PPCTranslator::PPCTranslator(PPCFrontend* frontend) :
assembler_ = backend->CreateAssembler();
assembler_->Initialize();
bool validate = FLAGS_validate_hir;
// Build the CFG first.
compiler_->AddPass(new passes::ControlFlowAnalysisPass());
// Passes are executed in the order they are added. Multiple of the same
// pass type may be used.
if (validate) compiler_->AddPass(new passes::ValidationPass());
//compiler_->AddPass(new passes::ContextPromotionPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
compiler_->AddPass(new passes::SimplificationPass());
// TODO(benvanik): run repeatedly?
if (validate) compiler_->AddPass(new passes::ValidationPass());
compiler_->AddPass(new passes::ConstantPropagationPass());
//compiler_->AddPass(new passes::TypePropagationPass());
//compiler_->AddPass(new passes::ByteSwapEliminationPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
compiler_->AddPass(new passes::SimplificationPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
//compiler_->AddPass(new passes::DeadStoreEliminationPass());
//if (validate) compiler_->AddPass(new passes::ValidationPass());
compiler_->AddPass(new passes::DeadCodeEliminationPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
// Adds local load/stores.
compiler_->AddPass(new passes::DataFlowAnalysisPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
compiler_->AddPass(new passes::SimplificationPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
// Run DCE one more time to cleanup any local manipulation.
compiler_->AddPass(new passes::DeadCodeEliminationPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
// Removes all unneeded variables. Try not to add new ones after this.
compiler_->AddPass(new passes::ValueReductionPass());
if (validate) compiler_->AddPass(new passes::ValidationPass());
// Must come last. The HIR is not really HIR after this.
compiler_->AddPass(new passes::FinalizationPass());

View File

@ -12,15 +12,37 @@
#include <alloy/core.h>
XEDECLARECLASS1(llvm, BitVector);
namespace alloy {
namespace hir {
class Block;
class HIRBuilder;
class Instr;
class Label;
class Edge {
public:
enum EdgeFlags {
UNCONDITIONAL = (1 << 0),
DOMINATES = (1 << 1),
};
public:
Edge* outgoing_next;
Edge* outgoing_prev;
Edge* incoming_next;
Edge* incoming_prev;
Block* src;
Block* dest;
uint32_t flags;
};
class Block {
public:
Arena* arena;
@ -28,6 +50,10 @@ public:
Block* next;
Block* prev;
Edge* incoming_edge_head;
Edge* outgoing_edge_head;
llvm::BitVector* incoming_values;
Label* label_head;
Label* label_tail;

View File

@ -41,6 +41,7 @@ void HIRBuilder::Reset() {
attributes_ = 0;
next_label_id_ = 0;
next_value_ordinal_ = 0;
locals_.clear();
block_head_ = block_tail_ = NULL;
current_block_ = NULL;
#if XE_DEBUG
@ -141,6 +142,13 @@ void HIRBuilder::Dump(StringBuffer* str) {
str->Append("; attributes = %.8X\n", attributes_);
}
for (auto it = locals_.begin(); it != locals_.end(); ++it) {
auto local = *it;
str->Append(" ; local ");
DumpValue(str, local);
str->Append("\n");
}
uint32_t block_ordinal = 0;
Block* block = block_head_;
while (block) {
@ -161,6 +169,39 @@ void HIRBuilder::Dump(StringBuffer* str) {
label = label->next;
}
Edge* incoming_edge = block->incoming_edge_head;
while (incoming_edge) {
auto src_label = incoming_edge->src->label_head;
if (src_label && src_label->name) {
str->Append(" ; in: %s", src_label->name);
} else if (src_label) {
str->Append(" ; in: label%d", src_label->id);
} else {
str->Append(" ; in: <block%d>",
incoming_edge->src->ordinal);
}
str->Append(", dom:%d, uncond:%d\n",
(incoming_edge->flags & Edge::DOMINATES) ? 1 : 0,
(incoming_edge->flags & Edge::UNCONDITIONAL) ? 1 : 0);
incoming_edge = incoming_edge->incoming_next;
}
Edge* outgoing_edge = block->outgoing_edge_head;
while (outgoing_edge) {
auto dest_label = outgoing_edge->dest->label_head;
if (dest_label && dest_label->name) {
str->Append(" ; out: %s", dest_label->name);
} else if (dest_label) {
str->Append(" ; out: label%d", dest_label->id);
} else {
str->Append(" ; out: <block%d>",
outgoing_edge->dest->ordinal);
}
str->Append(", dom:%d, uncond:%d\n",
(outgoing_edge->flags & Edge::DOMINATES) ? 1 : 0,
(outgoing_edge->flags & Edge::UNCONDITIONAL) ? 1 : 0);
outgoing_edge = outgoing_edge->outgoing_next;
}
Instr* i = block->instr_head;
while (i) {
if (i->opcode->flags & OPCODE_FLAG_HIDE) {
@ -303,6 +344,7 @@ void HIRBuilder::InsertLabel(Label* label, Instr* prev_instr) {
block_tail_ = new_block;
}
new_block->label_head = new_block->label_tail = label;
new_block->incoming_edge_head = new_block->outgoing_edge_head = NULL;
label->block = new_block;
label->prev = label->next = NULL;
@ -319,8 +361,7 @@ void HIRBuilder::InsertLabel(Label* label, Instr* prev_instr) {
new_block->instr_tail = old_prev_tail;
}
for (auto instr = new_block->instr_head; instr != new_block->instr_tail;
instr = instr->next) {
for (auto instr = new_block->instr_head; instr; instr = instr->next) {
instr->block = new_block;
}
@ -342,6 +383,19 @@ void HIRBuilder::ResetLabelTags() {
}
}
void HIRBuilder::AddEdge(Block* src, Block* dest, uint32_t flags) {
Edge* edge = arena_->Alloc<Edge>();
edge->src = src;
edge->dest = dest;
edge->flags = flags;
edge->outgoing_prev = NULL;
edge->outgoing_next = src->outgoing_edge_head;
src->outgoing_edge_head = edge;
edge->incoming_prev = NULL;
edge->incoming_next = dest->incoming_edge_head;
dest->incoming_edge_head = edge;
}
Block* HIRBuilder::AppendBlock() {
Block* block = arena_->Alloc<Block>();
block->arena = arena_;
@ -356,6 +410,7 @@ Block* HIRBuilder::AppendBlock() {
}
current_block_ = block;
block->label_head = block->label_tail = NULL;
block->incoming_edge_head = block->outgoing_edge_head = NULL;
block->instr_head = block->instr_tail = NULL;
return block;
}
@ -420,6 +475,7 @@ Value* HIRBuilder::AllocValue(TypeName type) {
value->def = NULL;
value->use_head = NULL;
value->last_use = NULL;
value->local_slot = NULL;
value->tag = NULL;
value->reg = -1;
return value;
@ -434,6 +490,7 @@ Value* HIRBuilder::CloneValue(Value* source) {
value->def = NULL;
value->use_head = NULL;
value->last_use = NULL;
value->local_slot = NULL;
value->tag = NULL;
value->reg = -1;
return value;
@ -877,6 +934,28 @@ Value* HIRBuilder::LoadClock() {
return i->dest;
}
Value* HIRBuilder::AllocLocal(TypeName type) {
Value* slot = AllocValue(type);
locals_.push_back(slot);
return slot;
}
Value* HIRBuilder::LoadLocal(Value* slot) {
Instr* i = AppendInstr(
OPCODE_LOAD_LOCAL_info, 0,
AllocValue(slot->type));
i->set_src1(slot);
i->src2.value = i->src3.value = NULL;
return i->dest;
}
void HIRBuilder::StoreLocal(Value* slot, Value* value) {
Instr* i = AppendInstr(OPCODE_STORE_LOCAL_info, 0);
i->set_src1(slot);
i->set_src2(value);
i->src3.value = NULL;
}
Value* HIRBuilder::LoadContext(size_t offset, TypeName type) {
Instr* i = AppendInstr(
OPCODE_LOAD_CONTEXT_info, 0,

View File

@ -41,7 +41,12 @@ public:
uint32_t attributes() const { return attributes_; }
void set_attributes(uint32_t value) { attributes_ = value; }
std::vector<Value*>& locals() { return locals_; }
uint32_t max_value_ordinal() const { return next_value_ordinal_; }
Block* first_block() const { return block_head_; }
Block* last_block() const { return block_tail_; }
Block* current_block() const;
Instr* last_instr() const;
@ -50,12 +55,11 @@ public:
void InsertLabel(Label* label, Instr* prev_instr);
void ResetLabelTags();
void AddEdge(Block* src, Block* dest, uint32_t flags);
// static allocations:
// Value* AllocStatic(size_t length);
// stack allocations:
// Value* AllocLocal(TypeName type);
void Comment(const char* format, ...);
void Nop();
@ -116,6 +120,10 @@ public:
Value* LoadClock();
Value* AllocLocal(TypeName type);
Value* LoadLocal(Value* slot);
void StoreLocal(Value* slot, Value* value);
Value* LoadContext(size_t offset, TypeName type);
void StoreContext(size_t offset, Value* value);
@ -230,6 +238,8 @@ protected:
uint32_t next_label_id_;
uint32_t next_value_ordinal_;
std::vector<Value*> locals_;
Block* block_head_;
Block* block_tail_;
Block* current_block_;

View File

@ -61,6 +61,36 @@ bool Instr::Match(SignatureType dest_req,
((src3_req == SIG_TYPE_IGNORE) || (src3_req == TO_SIG_TYPE(src3.value)));
}
void Instr::MoveBefore(Instr* other) {
if (next == other) {
return;
}
// Remove from current location.
if (prev) {
prev->next = next;
} else {
block->instr_head = next;
}
if (next) {
next->prev = prev;
} else {
block->instr_tail = prev;
}
// Insert into new location.
block = other->block;
next = other;
prev = other->prev;
other->prev = this;
if (prev) {
prev->next = this;
}
if (other == block->instr_head) {
block->instr_head = this;
}
}
void Instr::Replace(const OpcodeInfo* opcode, uint16_t flags) {
this->opcode = opcode;
this->flags = flags;

View File

@ -79,6 +79,7 @@ public:
SignatureType src2 = SIG_TYPE_X,
SignatureType src3 = SIG_TYPE_X) const;
void MoveBefore(Instr* other);
void Replace(const OpcodeInfo* opcode, uint16_t flags);
void Remove();
};

View File

@ -117,6 +117,9 @@ enum Opcode {
OPCODE_LOAD_CLOCK,
OPCODE_LOAD_LOCAL,
OPCODE_STORE_LOCAL,
OPCODE_LOAD_CONTEXT,
OPCODE_STORE_CONTEXT,
@ -202,6 +205,7 @@ enum OpcodeFlags {
OPCODE_FLAG_VOLATILE = (1 << 4),
OPCODE_FLAG_IGNORE = (1 << 5),
OPCODE_FLAG_HIDE = (1 << 6),
OPCODE_FLAG_PAIRED_PREV = (1 << 7),
};
enum OpcodeSignatureType {

View File

@ -182,6 +182,18 @@ DEFINE_OPCODE(
OPCODE_SIG_V,
0);
DEFINE_OPCODE(
OPCODE_LOAD_LOCAL,
"load_local",
OPCODE_SIG_V_V,
0);
DEFINE_OPCODE(
OPCODE_STORE_LOCAL,
"store_local",
OPCODE_SIG_X_V_V,
0);
DEFINE_OPCODE(
OPCODE_LOAD_CONTEXT,
"load_context",
@ -297,17 +309,17 @@ DEFINE_OPCODE(
OPCODE_DID_CARRY,
"did_carry",
OPCODE_SIG_V_V,
0);
OPCODE_FLAG_PAIRED_PREV);
DEFINE_OPCODE(
OPCODE_DID_OVERFLOW,
"did_overflow",
OPCODE_SIG_V_V,
0);
OPCODE_FLAG_PAIRED_PREV);
DEFINE_OPCODE(
OPCODE_DID_SATURATE,
"did_saturate",
OPCODE_SIG_V_V,
0);
OPCODE_FLAG_PAIRED_PREV);
DEFINE_OPCODE(
OPCODE_VECTOR_COMPARE_EQ,

View File

@ -42,6 +42,25 @@ static bool IsFloatType(TypeName type_name) {
static bool IsVecType(TypeName type_name) {
return type_name == VEC128_TYPE;
}
static size_t GetTypeSize(TypeName type_name) {
switch (type_name) {
case INT8_TYPE:
return 1;
case INT16_TYPE:
return 2;
case INT32_TYPE:
return 4;
case INT64_TYPE:
return 8;
case FLOAT32_TYPE:
return 4;
case FLOAT64_TYPE:
return 8;
default:
case VEC128_TYPE:
return 16;
}
}
enum ValueFlags {
VALUE_IS_CONSTANT = (1 << 1),
@ -78,6 +97,7 @@ public:
Use* use_head;
// NOTE: for performance reasons this is not maintained during construction.
Instr* last_use;
Value* local_slot;
// TODO(benvanik): remove to shrink size.
void* tag;

35
third_party/llvm.gypi vendored Normal file
View File

@ -0,0 +1,35 @@
# Copyright 2014 Ben Vanik. All Rights Reserved.
{
'targets': [
{
'target_name': 'llvm',
'type': '<(library)',
'direct_dependent_settings': {
'include_dirs': [
'llvm/include/',
],
'defines': [
],
},
'msvs_disabled_warnings': [4267],
'defines': [
],
'include_dirs': [
'llvm/include/',
],
'sources': [
'llvm/dummy.cc',
'llvm/include/llvm/ADT/BitVector.h',
'llvm/include/llvm/Support/Compiler.h',
'llvm/include/llvm/Support/MathExtras.h',
'llvm/include/llvm/Support/type_traits.h',
],
}
]
}

71
third_party/llvm/LICENSE.txt vendored Normal file
View File

@ -0,0 +1,71 @@
==============================================================================
LLVM Release License
==============================================================================
University of Illinois/NCSA
Open Source License
Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
LLVM Team
University of Illinois at Urbana-Champaign
http://llvm.org
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal with
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the
documentation and/or other materials provided with the distribution.
* Neither the names of the LLVM Team, University of Illinois at
Urbana-Champaign, nor the names of its contributors may be used to
endorse or promote products derived from this Software without specific
prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
SOFTWARE.
==============================================================================
Copyrights and Licenses for Third Party Software Distributed with LLVM:
==============================================================================
The LLVM software contains code written by third parties. Such software will
have its own individual LICENSE.TXT file in the directory in which it appears.
This file will describe the copyrights, license, and restrictions which apply
to that code.
The disclaimer of warranty in the University of Illinois Open Source License
applies to all code in the LLVM Distribution, and nothing in any of the
other licenses gives permission to use the names of the LLVM Team or the
University of Illinois to endorse or promote products derived from this
Software.
The following pieces of software have additional or alternate copyrights,
licenses, and/or restrictions:
Program Directory
------- ---------
Autoconf llvm/autoconf
llvm/projects/ModuleMaker/autoconf
llvm/projects/sample/autoconf
Google Test llvm/utils/unittest/googletest
OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
ARM contributions llvm/lib/Target/ARM/LICENSE.TXT
md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h

1
third_party/llvm/dummy.cc vendored Normal file
View File

@ -0,0 +1 @@
// here just to keep gyp happy

View File

@ -0,0 +1,602 @@
//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the BitVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
#include "llvm/Support/Compiler.h"
#ifdef LLVM_IGNORE_XENIA
#include "llvm/Support/ErrorHandling.h"
#else
#define llvm_unreachable(msg) assert(false)
#endif // LLVM_IGNORE_XENIA
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstdlib>
namespace llvm {
class BitVector {
typedef unsigned long BitWord;
enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
BitWord *Bits; // Actual bits.
unsigned Size; // Size of bitvector in bits.
unsigned Capacity; // Size of allocated memory in BitWord.
public:
// Encapsulation of a single bit.
class reference {
friend class BitVector;
BitWord *WordRef;
unsigned BitPos;
reference(); // Undefined
public:
reference(BitVector &b, unsigned Idx) {
WordRef = &b.Bits[Idx / BITWORD_SIZE];
BitPos = Idx % BITWORD_SIZE;
}
~reference() {}
reference &operator=(reference t) {
*this = bool(t);
return *this;
}
reference& operator=(bool t) {
if (t)
*WordRef |= 1L << BitPos;
else
*WordRef &= ~(1L << BitPos);
return *this;
}
operator bool() const {
return ((*WordRef) & (1L << BitPos)) ? true : false;
}
};
/// BitVector default ctor - Creates an empty bitvector.
BitVector() : Size(0), Capacity(0) {
Bits = 0;
}
/// BitVector ctor - Creates a bitvector of specified number of bits. All
/// bits are initialized to the specified value.
explicit BitVector(unsigned s, bool t = false) : Size(s) {
Capacity = NumBitWords(s);
Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
init_words(Bits, Capacity, t);
if (t)
clear_unused_bits();
}
/// BitVector copy ctor.
BitVector(const BitVector &RHS) : Size(RHS.size()) {
if (Size == 0) {
Bits = 0;
Capacity = 0;
return;
}
Capacity = NumBitWords(RHS.size());
Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
#if LLVM_HAS_RVALUE_REFERENCES
BitVector(BitVector &&RHS)
: Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
RHS.Bits = 0;
}
#endif
~BitVector() {
std::free(Bits);
}
/// empty - Tests whether there are no bits in this bitvector.
bool empty() const { return Size == 0; }
/// size - Returns the number of bits in this bitvector.
unsigned size() const { return Size; }
/// count - Returns the number of bits which are set.
unsigned count() const {
unsigned NumBits = 0;
for (unsigned i = 0; i < NumBitWords(size()); ++i)
if (sizeof(BitWord) == 4)
NumBits += CountPopulation_32((uint32_t)Bits[i]);
else if (sizeof(BitWord) == 8)
NumBits += CountPopulation_64(Bits[i]);
else
llvm_unreachable("Unsupported!");
return NumBits;
}
/// any - Returns true if any bit is set.
bool any() const {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
if (Bits[i] != 0)
return true;
return false;
}
/// all - Returns true if all bits are set.
bool all() const {
for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
if (Bits[i] != ~0UL)
return false;
// If bits remain check that they are ones. The unused bits are always zero.
if (unsigned Remainder = Size % BITWORD_SIZE)
return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
return true;
}
/// none - Returns true if none of the bits are set.
bool none() const {
return !any();
}
/// find_first - Returns the index of the first set bit, -1 if none
/// of the bits are set.
int find_first() const {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + countTrailingZeros((uint32_t)Bits[i]);
if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Unsupported!");
}
return -1;
}
/// find_next - Returns the index of the next set bit following the
/// "Prev" bit. Returns -1 if the next set bit is not found.
int find_next(unsigned Prev) const {
++Prev;
if (Prev >= Size)
return -1;
unsigned WordPos = Prev / BITWORD_SIZE;
unsigned BitPos = Prev % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off previous bits.
Copy &= ~0UL << BitPos;
if (Copy != 0) {
if (sizeof(BitWord) == 4)
return WordPos * BITWORD_SIZE + countTrailingZeros((uint32_t)Copy);
if (sizeof(BitWord) == 8)
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
llvm_unreachable("Unsupported!");
}
// Check subsequent words.
for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i)
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + countTrailingZeros((uint32_t)Bits[i]);
if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Unsupported!");
}
return -1;
}
/// clear - Clear all bits.
void clear() {
Size = 0;
}
/// resize - Grow or shrink the bitvector.
void resize(unsigned N, bool t = false) {
if (N > Capacity * BITWORD_SIZE) {
unsigned OldCapacity = Capacity;
grow(N);
init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t);
}
// Set any old unused bits that are now included in the BitVector. This
// may set bits that are not included in the new vector, but we will clear
// them back out below.
if (N > Size)
set_unused_bits(t);
// Update the size, and clear out any bits that are now unused
unsigned OldSize = Size;
Size = N;
if (t || N < OldSize)
clear_unused_bits();
}
void reserve(unsigned N) {
if (N > Capacity * BITWORD_SIZE)
grow(N);
}
// Set, reset, flip
BitVector &set() {
init_words(Bits, Capacity, true);
clear_unused_bits();
return *this;
}
BitVector &set(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
return *this;
}
/// set - Efficiently set a range of bits in [I, E)
BitVector &set(unsigned I, unsigned E) {
assert(I <= E && "Attempted to set backwards range!");
assert(E <= size() && "Attempted to set out-of-bounds range!");
if (I == E) return *this;
if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
BitWord EMask = 1UL << (E % BITWORD_SIZE);
BitWord IMask = 1UL << (I % BITWORD_SIZE);
BitWord Mask = EMask - IMask;
Bits[I / BITWORD_SIZE] |= Mask;
return *this;
}
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
Bits[I / BITWORD_SIZE] |= PrefixMask;
I = RoundUpToAlignment(I, BITWORD_SIZE);
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
Bits[I / BITWORD_SIZE] = ~0UL;
BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
if (I < E)
Bits[I / BITWORD_SIZE] |= PostfixMask;
return *this;
}
BitVector &reset() {
init_words(Bits, Capacity, false);
return *this;
}
BitVector &reset(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
return *this;
}
/// reset - Efficiently reset a range of bits in [I, E)
BitVector &reset(unsigned I, unsigned E) {
assert(I <= E && "Attempted to reset backwards range!");
assert(E <= size() && "Attempted to reset out-of-bounds range!");
if (I == E) return *this;
if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
BitWord EMask = 1UL << (E % BITWORD_SIZE);
BitWord IMask = 1UL << (I % BITWORD_SIZE);
BitWord Mask = EMask - IMask;
Bits[I / BITWORD_SIZE] &= ~Mask;
return *this;
}
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
Bits[I / BITWORD_SIZE] &= ~PrefixMask;
I = RoundUpToAlignment(I, BITWORD_SIZE);
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
Bits[I / BITWORD_SIZE] = 0UL;
BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
if (I < E)
Bits[I / BITWORD_SIZE] &= ~PostfixMask;
return *this;
}
BitVector &flip() {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
Bits[i] = ~Bits[i];
clear_unused_bits();
return *this;
}
BitVector &flip(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] ^= 1L << (Idx % BITWORD_SIZE);
return *this;
}
// Indexing.
reference operator[](unsigned Idx) {
assert (Idx < Size && "Out-of-bounds Bit access.");
return reference(*this, Idx);
}
bool operator[](unsigned Idx) const {
assert (Idx < Size && "Out-of-bounds Bit access.");
BitWord Mask = 1L << (Idx % BITWORD_SIZE);
return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
}
bool test(unsigned Idx) const {
return (*this)[Idx];
}
/// Test if any common bits are set.
bool anyCommon(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
if (Bits[i] & RHS.Bits[i])
return true;
return false;
}
// Comparison operators.
bool operator==(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
if (Bits[i] != RHS.Bits[i])
return false;
// Verify that any extra words are all zeros.
if (i != ThisWords) {
for (; i != ThisWords; ++i)
if (Bits[i])
return false;
} else if (i != RHSWords) {
for (; i != RHSWords; ++i)
if (RHS.Bits[i])
return false;
}
return true;
}
bool operator!=(const BitVector &RHS) const {
return !(*this == RHS);
}
/// Intersection, union, disjoint union.
BitVector &operator&=(const BitVector &RHS) {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
Bits[i] &= RHS.Bits[i];
// Any bits that are just in this bitvector become zero, because they aren't
// in the RHS bit vector. Any words only in RHS are ignored because they
// are already zero in the LHS.
for (; i != ThisWords; ++i)
Bits[i] = 0;
return *this;
}
/// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
BitVector &reset(const BitVector &RHS) {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
Bits[i] &= ~RHS.Bits[i];
return *this;
}
/// test - Check if (This - RHS) is zero.
/// This is the same as reset(RHS) and any().
bool test(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
if ((Bits[i] & ~RHS.Bits[i]) != 0)
return true;
for (; i != ThisWords ; ++i)
if (Bits[i] != 0)
return true;
return false;
}
BitVector &operator|=(const BitVector &RHS) {
if (size() < RHS.size())
resize(RHS.size());
for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
Bits[i] |= RHS.Bits[i];
return *this;
}
BitVector &operator^=(const BitVector &RHS) {
if (size() < RHS.size())
resize(RHS.size());
for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
Bits[i] ^= RHS.Bits[i];
return *this;
}
// Assignment operator.
const BitVector &operator=(const BitVector &RHS) {
if (this == &RHS) return *this;
Size = RHS.size();
unsigned RHSWords = NumBitWords(Size);
if (Size <= Capacity * BITWORD_SIZE) {
if (Size)
std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord));
clear_unused_bits();
return *this;
}
// Grow the bitvector to have enough elements.
Capacity = RHSWords;
BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord));
// Destroy the old bits.
std::free(Bits);
Bits = NewBits;
return *this;
}
#if LLVM_HAS_RVALUE_REFERENCES
const BitVector &operator=(BitVector &&RHS) {
if (this == &RHS) return *this;
std::free(Bits);
Bits = RHS.Bits;
Size = RHS.Size;
Capacity = RHS.Capacity;
RHS.Bits = 0;
return *this;
}
#endif
void swap(BitVector &RHS) {
std::swap(Bits, RHS.Bits);
std::swap(Size, RHS.Size);
std::swap(Capacity, RHS.Capacity);
}
//===--------------------------------------------------------------------===//
// Portable bit mask operations.
//===--------------------------------------------------------------------===//
//
// These methods all operate on arrays of uint32_t, each holding 32 bits. The
// fixed word size makes it easier to work with literal bit vector constants
// in portable code.
//
// The LSB in each word is the lowest numbered bit. The size of a portable
// bit mask is always a whole multiple of 32 bits. If no bit mask size is
// given, the bit mask is assumed to cover the entire BitVector.
/// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
/// This computes "*this |= Mask".
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<true, false>(Mask, MaskWords);
}
/// clearBitsInMask - Clear any bits in this vector that are set in Mask.
/// Don't resize. This computes "*this &= ~Mask".
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<false, false>(Mask, MaskWords);
}
/// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
/// Don't resize. This computes "*this |= ~Mask".
void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<true, true>(Mask, MaskWords);
}
/// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
/// Don't resize. This computes "*this &= Mask".
void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<false, true>(Mask, MaskWords);
}
private:
unsigned NumBitWords(unsigned S) const {
return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
}
// Set the unused bits in the high words.
void set_unused_bits(bool t = true) {
// Set high words first.
unsigned UsedWords = NumBitWords(Size);
if (Capacity > UsedWords)
init_words(&Bits[UsedWords], (Capacity-UsedWords), t);
// Then set any stray high bits of the last used word.
unsigned ExtraBits = Size % BITWORD_SIZE;
if (ExtraBits) {
BitWord ExtraBitMask = ~0UL << ExtraBits;
if (t)
Bits[UsedWords-1] |= ExtraBitMask;
else
Bits[UsedWords-1] &= ~ExtraBitMask;
}
}
// Clear the unused bits in the high words.
void clear_unused_bits() {
set_unused_bits(false);
}
void grow(unsigned NewSize) {
Capacity = std::max(NumBitWords(NewSize), Capacity * 2);
Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord));
clear_unused_bits();
}
void init_words(BitWord *B, unsigned NumWords, bool t) {
memset(B, 0 - (int)t, NumWords*sizeof(BitWord));
}
template<bool AddBits, bool InvertMask>
void applyMask(const uint32_t *Mask, unsigned MaskWords) {
assert(BITWORD_SIZE % 32 == 0 && "Unsupported BitWord size.");
MaskWords = std::min(MaskWords, (size() + 31) / 32);
const unsigned Scale = BITWORD_SIZE / 32;
unsigned i;
for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
BitWord BW = Bits[i];
// This inner loop should unroll completely when BITWORD_SIZE > 32.
for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
uint32_t M = *Mask++;
if (InvertMask) M = ~M;
if (AddBits) BW |= BitWord(M) << b;
else BW &= ~(BitWord(M) << b);
}
Bits[i] = BW;
}
for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
uint32_t M = *Mask++;
if (InvertMask) M = ~M;
if (AddBits) Bits[i] |= BitWord(M) << b;
else Bits[i] &= ~(BitWord(M) << b);
}
if (AddBits)
clear_unused_bits();
}
};
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of BitVector swap.
inline void
swap(llvm::BitVector &LHS, llvm::BitVector &RHS) {
LHS.swap(RHS);
}
}
#endif

View File

@ -0,0 +1,446 @@
//===-- llvm/Support/Compiler.h - Compiler abstraction support --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines several macros, based on the current compiler. This allows
// use of compiler-specific features in a way that remains portable.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_COMPILER_H
#define LLVM_SUPPORT_COMPILER_H
//#include "llvm/Config/llvm-config.h"
#ifndef __has_feature
# define __has_feature(x) 0
#endif
#ifndef __has_extension
# define __has_extension(x) 0
#endif
#ifndef __has_attribute
# define __has_attribute(x) 0
#endif
#ifndef __has_builtin
# define __has_builtin(x) 0
#endif
/// \macro __GNUC_PREREQ
/// \brief Defines __GNUC_PREREQ if glibc's features.h isn't available.
#ifndef __GNUC_PREREQ
# if defined(__GNUC__) && defined(__GNUC_MINOR__)
# define __GNUC_PREREQ(maj, min) \
((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
# else
# define __GNUC_PREREQ(maj, min) 0
# endif
#endif
/// \macro LLVM_MSC_PREREQ
/// \brief Is the compiler MSVC of at least the specified version?
/// The common \param version values to check for are:
/// * 1600: Microsoft Visual Studio 2010 / 10.0
/// * 1700: Microsoft Visual Studio 2012 / 11.0
/// * 1800: Microsoft Visual Studio 2013 / 12.0
#ifdef _MSC_VER
#define LLVM_MSC_PREREQ(version) (_MSC_VER >= (version))
#else
#define LLVM_MSC_PREREQ(version) 0
#endif
/// \brief Does the compiler support r-value references?
/// This implies that <utility> provides the one-argument std::move; it
/// does not imply the existence of any other C++ library features.
#if __has_feature(cxx_rvalue_references) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1600)
#define LLVM_HAS_RVALUE_REFERENCES 1
#else
#define LLVM_HAS_RVALUE_REFERENCES 0
#endif
/// \brief Does the compiler support r-value reference *this?
///
/// Sadly, this is separate from just r-value reference support because GCC
/// implemented everything but this thus far. No release of GCC yet has support
/// for this feature so it is enabled with Clang only.
/// FIXME: This should change to a version check when GCC grows support for it.
#if __has_feature(cxx_rvalue_references)
#define LLVM_HAS_RVALUE_REFERENCE_THIS 1
#else
#define LLVM_HAS_RVALUE_REFERENCE_THIS 0
#endif
/// \macro LLVM_HAS_CXX11_TYPETRAITS
/// \brief Does the compiler have the C++11 type traits.
///
/// #include <type_traits>
///
/// * enable_if
/// * {true,false}_type
/// * is_constructible
/// * etc...
#if defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1700)
#define LLVM_HAS_CXX11_TYPETRAITS 1
#else
#define LLVM_HAS_CXX11_TYPETRAITS 0
#endif
/// \macro LLVM_HAS_CXX11_STDLIB
/// \brief Does the compiler have the C++11 standard library.
///
/// Implies LLVM_HAS_RVALUE_REFERENCES, LLVM_HAS_CXX11_TYPETRAITS
#if defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1700)
#define LLVM_HAS_CXX11_STDLIB 1
#else
#define LLVM_HAS_CXX11_STDLIB 0
#endif
/// \macro LLVM_HAS_VARIADIC_TEMPLATES
/// \brief Does this compiler support variadic templates.
///
/// Implies LLVM_HAS_RVALUE_REFERENCES and the existence of std::forward.
#if __has_feature(cxx_variadic_templates) || LLVM_MSC_PREREQ(1800)
# define LLVM_HAS_VARIADIC_TEMPLATES 1
#else
# define LLVM_HAS_VARIADIC_TEMPLATES 0
#endif
/// llvm_move - Expands to ::std::move if the compiler supports
/// r-value references; otherwise, expands to the argument.
#if LLVM_HAS_RVALUE_REFERENCES
#define llvm_move(value) (::std::move(value))
#else
#define llvm_move(value) (value)
#endif
/// Expands to '&' if r-value references are supported.
///
/// This can be used to provide l-value/r-value overrides of member functions.
/// The r-value override should be guarded by LLVM_HAS_RVALUE_REFERENCE_THIS
#if LLVM_HAS_RVALUE_REFERENCE_THIS
#define LLVM_LVALUE_FUNCTION &
#else
#define LLVM_LVALUE_FUNCTION
#endif
/// LLVM_DELETED_FUNCTION - Expands to = delete if the compiler supports it.
/// Use to mark functions as uncallable. Member functions with this should
/// be declared private so that some behavior is kept in C++03 mode.
///
/// class DontCopy {
/// private:
/// DontCopy(const DontCopy&) LLVM_DELETED_FUNCTION;
/// DontCopy &operator =(const DontCopy&) LLVM_DELETED_FUNCTION;
/// public:
/// ...
/// };
#if __has_feature(cxx_deleted_functions) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1800)
#define LLVM_DELETED_FUNCTION = delete
#else
#define LLVM_DELETED_FUNCTION
#endif
/// LLVM_FINAL - Expands to 'final' if the compiler supports it.
/// Use to mark classes or virtual methods as final.
#if __has_feature(cxx_override_control) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1700)
#define LLVM_FINAL final
#else
#define LLVM_FINAL
#endif
/// LLVM_OVERRIDE - Expands to 'override' if the compiler supports it.
/// Use to mark virtual methods as overriding a base class method.
#if __has_feature(cxx_override_control) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1700)
#define LLVM_OVERRIDE override
#else
#define LLVM_OVERRIDE
#endif
#if __has_feature(cxx_constexpr) || defined(__GXX_EXPERIMENTAL_CXX0X__)
# define LLVM_CONSTEXPR constexpr
#else
# define LLVM_CONSTEXPR
#endif
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
/// functions, making them private to any shared library they are linked into.
/// On PE/COFF targets, library visibility is the default, so this isn't needed.
#if (__has_attribute(visibility) || __GNUC_PREREQ(4, 0)) && \
!defined(__MINGW32__) && !defined(__CYGWIN__) && !defined(LLVM_ON_WIN32)
#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
#else
#define LLVM_LIBRARY_VISIBILITY
#endif
#if __has_attribute(used) || __GNUC_PREREQ(3, 1)
#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
#else
#define LLVM_ATTRIBUTE_USED
#endif
#if __has_attribute(warn_unused_result) || __GNUC_PREREQ(3, 4)
#define LLVM_ATTRIBUTE_UNUSED_RESULT __attribute__((__warn_unused_result__))
#else
#define LLVM_ATTRIBUTE_UNUSED_RESULT
#endif
// Some compilers warn about unused functions. When a function is sometimes
// used or not depending on build settings (e.g. a function only called from
// within "assert"), this attribute can be used to suppress such warnings.
//
// However, it shouldn't be used for unused *variables*, as those have a much
// more portable solution:
// (void)unused_var_name;
// Prefer cast-to-void wherever it is sufficient.
#if __has_attribute(unused) || __GNUC_PREREQ(3, 1)
#define LLVM_ATTRIBUTE_UNUSED __attribute__((__unused__))
#else
#define LLVM_ATTRIBUTE_UNUSED
#endif
// FIXME: Provide this for PE/COFF targets.
#if (__has_attribute(weak) || __GNUC_PREREQ(4, 0)) && \
(!defined(__MINGW32__) && !defined(__CYGWIN__) && !defined(LLVM_ON_WIN32))
#define LLVM_ATTRIBUTE_WEAK __attribute__((__weak__))
#else
#define LLVM_ATTRIBUTE_WEAK
#endif
// Prior to clang 3.2, clang did not accept any spelling of
// __has_attribute(const), so assume it is supported.
#if defined(__clang__) || defined(__GNUC__)
// aka 'CONST' but following LLVM Conventions.
#define LLVM_READNONE __attribute__((__const__))
#else
#define LLVM_READNONE
#endif
#if __has_attribute(pure) || defined(__GNUC__)
// aka 'PURE' but following LLVM Conventions.
#define LLVM_READONLY __attribute__((__pure__))
#else
#define LLVM_READONLY
#endif
#if __has_builtin(__builtin_expect) || __GNUC_PREREQ(4, 0)
#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
#else
#define LLVM_LIKELY(EXPR) (EXPR)
#define LLVM_UNLIKELY(EXPR) (EXPR)
#endif
// C++ doesn't support 'extern template' of template specializations. GCC does,
// but requires __extension__ before it. In the header, use this:
// EXTERN_TEMPLATE_INSTANTIATION(class foo<bar>);
// in the .cpp file, use this:
// TEMPLATE_INSTANTIATION(class foo<bar>);
#ifdef __GNUC__
#define EXTERN_TEMPLATE_INSTANTIATION(X) __extension__ extern template X
#define TEMPLATE_INSTANTIATION(X) template X
#else
#define EXTERN_TEMPLATE_INSTANTIATION(X)
#define TEMPLATE_INSTANTIATION(X)
#endif
/// LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so,
/// mark a method "not for inlining".
#if __has_attribute(noinline) || __GNUC_PREREQ(3, 4)
#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
#else
#define LLVM_ATTRIBUTE_NOINLINE
#endif
/// LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do
/// so, mark a method "always inline" because it is performance sensitive. GCC
/// 3.4 supported this but is buggy in various cases and produces unimplemented
/// errors, just use it in GCC 4.0 and later.
#if __has_attribute(always_inline) || __GNUC_PREREQ(4, 0)
#define LLVM_ATTRIBUTE_ALWAYS_INLINE inline __attribute__((always_inline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define LLVM_ATTRIBUTE_ALWAYS_INLINE
#endif
#ifdef __GNUC__
#define LLVM_ATTRIBUTE_NORETURN __attribute__((noreturn))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_NORETURN __declspec(noreturn)
#else
#define LLVM_ATTRIBUTE_NORETURN
#endif
/// LLVM_EXTENSION - Support compilers where we have a keyword to suppress
/// pedantic diagnostics.
#ifdef __GNUC__
#define LLVM_EXTENSION __extension__
#else
#define LLVM_EXTENSION
#endif
// LLVM_ATTRIBUTE_DEPRECATED(decl, "message")
#if __has_feature(attribute_deprecated_with_message)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
decl __attribute__((deprecated(message)))
#elif defined(__GNUC__)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
decl __attribute__((deprecated))
#elif defined(_MSC_VER)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
__declspec(deprecated(message)) decl
#else
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
decl
#endif
/// LLVM_BUILTIN_UNREACHABLE - On compilers which support it, expands
/// to an expression which states that it is undefined behavior for the
/// compiler to reach this point. Otherwise is not defined.
#if __has_builtin(__builtin_unreachable) || __GNUC_PREREQ(4, 5)
# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
#elif defined(_MSC_VER)
# define LLVM_BUILTIN_UNREACHABLE __assume(false)
#endif
/// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression
/// which causes the program to exit abnormally.
#if __has_builtin(__builtin_trap) || __GNUC_PREREQ(4, 3)
# define LLVM_BUILTIN_TRAP __builtin_trap()
#else
# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0
#endif
/// \macro LLVM_ASSUME_ALIGNED
/// \brief Returns a pointer with an assumed alignment.
#if __has_builtin(__builtin_assume_aligned) && __GNUC_PREREQ(4, 7)
# define LLVM_ASSUME_ALIGNED(p, a) __builtin_assume_aligned(p, a)
#elif defined(LLVM_BUILTIN_UNREACHABLE)
// As of today, clang does not support __builtin_assume_aligned.
# define LLVM_ASSUME_ALIGNED(p, a) \
(((uintptr_t(p) % (a)) == 0) ? (p) : (LLVM_BUILTIN_UNREACHABLE, (p)))
#else
# define LLVM_ASSUME_ALIGNED(p, a) (p)
#endif
/// \macro LLVM_FUNCTION_NAME
/// \brief Expands to __func__ on compilers which support it. Otherwise,
/// expands to a compiler-dependent replacement.
#if defined(_MSC_VER)
# define LLVM_FUNCTION_NAME __FUNCTION__
#else
# define LLVM_FUNCTION_NAME __func__
#endif
#if defined(HAVE_SANITIZER_MSAN_INTERFACE_H)
# include <sanitizer/msan_interface.h>
#else
# define __msan_allocated_memory(p, size)
# define __msan_unpoison(p, size)
#endif
/// \macro LLVM_MEMORY_SANITIZER_BUILD
/// \brief Whether LLVM itself is built with MemorySanitizer instrumentation.
#if __has_feature(memory_sanitizer)
# define LLVM_MEMORY_SANITIZER_BUILD 1
#else
# define LLVM_MEMORY_SANITIZER_BUILD 0
#endif
/// \macro LLVM_ADDRESS_SANITIZER_BUILD
/// \brief Whether LLVM itself is built with AddressSanitizer instrumentation.
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
# define LLVM_ADDRESS_SANITIZER_BUILD 1
#else
# define LLVM_ADDRESS_SANITIZER_BUILD 0
#endif
/// \macro LLVM_IS_UNALIGNED_ACCESS_FAST
/// \brief Is unaligned memory access fast on the host machine.
///
/// Don't specialize on alignment for platforms where unaligned memory accesses
/// generates the same code as aligned memory accesses for common types.
#if defined(_M_AMD64) || defined(_M_IX86) || defined(__amd64) || \
defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || \
defined(_X86_) || defined(__i386) || defined(__i386__)
# define LLVM_IS_UNALIGNED_ACCESS_FAST 1
#else
# define LLVM_IS_UNALIGNED_ACCESS_FAST 0
#endif
/// \macro LLVM_EXPLICIT
/// \brief Expands to explicit on compilers which support explicit conversion
/// operators. Otherwise expands to nothing.
#if __has_feature(cxx_explicit_conversions) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1800)
#define LLVM_EXPLICIT explicit
#else
#define LLVM_EXPLICIT
#endif
/// \macro LLVM_STATIC_ASSERT
/// \brief Expands to C/C++'s static_assert on compilers which support it.
#if __has_feature(cxx_static_assert) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || LLVM_MSC_PREREQ(1600)
# define LLVM_STATIC_ASSERT(expr, msg) static_assert(expr, msg)
#elif __has_feature(c_static_assert)
# define LLVM_STATIC_ASSERT(expr, msg) _Static_assert(expr, msg)
#elif __has_extension(c_static_assert)
# define LLVM_STATIC_ASSERT(expr, msg) LLVM_EXTENSION _Static_assert(expr, msg)
#else
# define LLVM_STATIC_ASSERT(expr, msg)
#endif
/// \macro LLVM_ENUM_INT_TYPE
/// \brief Expands to colon followed by the given integral type on compilers
/// which support C++11 strong enums. This can be used to make enums unsigned
/// with MSVC.
#if __has_feature(cxx_strong_enums) || LLVM_MSC_PREREQ(1600)
# define LLVM_ENUM_INT_TYPE(intty) : intty
#else
# define LLVM_ENUM_INT_TYPE(intty)
#endif
/// \brief Does the compiler support C++11 semantics for strongly typed forward
/// declared enums?
#if __has_feature(cxx_strong_enums) || LLVM_MSC_PREREQ(1700)
#define LLVM_HAS_STRONG_ENUMS 1
#else
#define LLVM_HAS_STRONG_ENUMS 0
#endif
/// \brief Does the compiler support generalized initializers (using braced
/// lists and std::initializer_list). While clang may claim it supports general
/// initializers, if we're using MSVC's headers, we might not have a usable
/// std::initializer list type from the STL. Disable this for now.
#if __has_feature(cxx_generalized_initializers) && !defined(_MSC_VER)
#define LLVM_HAS_INITIALIZER_LISTS 1
#else
#define LLVM_HAS_INITIALIZER_LISTS 0
#endif
/// \brief Mark debug helper function definitions like dump() that should not be
/// stripped from debug builds.
// FIXME: Move this to a private config.h as it's not usable in public headers.
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED
#else
#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE
#endif
#endif

View File

@ -0,0 +1,626 @@
//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains some functions that are useful for math stuff.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_MATHEXTRAS_H
#define LLVM_SUPPORT_MATHEXTRAS_H
#include "llvm/Support/Compiler.h"
#ifdef IGNORED_LLVM_XENIA
#include "llvm/Support/SwapByteOrder.h"
#endif // IGNORED_LLVM_XENIA
#include "llvm/Support/type_traits.h"
#include <cstring>
#ifdef _MSC_VER
#include <intrin.h>
#include <limits>
#endif
namespace llvm {
/// \brief The behavior an operation has on an input of 0.
enum ZeroBehavior {
/// \brief The returned value is undefined.
ZB_Undefined,
/// \brief The returned value is numeric_limits<T>::max()
ZB_Max,
/// \brief The returned value is numeric_limits<T>::digits
ZB_Width
};
/// \brief Count number of 0's from the least significant bit to the most
/// stopping at the first 1.
///
/// Only unsigned integral types are allowed.
///
/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
/// valid arguments.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed, std::size_t>::type
countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
(void)ZB;
if (!Val)
return std::numeric_limits<T>::digits;
if (Val & 0x1)
return 0;
// Bisection method.
std::size_t ZeroBits = 0;
T Shift = std::numeric_limits<T>::digits >> 1;
T Mask = std::numeric_limits<T>::max() >> Shift;
while (Shift) {
if ((Val & Mask) == 0) {
Val >>= Shift;
ZeroBits |= Shift;
}
Shift >>= 1;
Mask >>= Shift;
}
return ZeroBits;
}
// Disable signed.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed, std::size_t>::type
countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) LLVM_DELETED_FUNCTION;
#if __GNUC__ >= 4 || _MSC_VER
template <>
inline std::size_t countTrailingZeros<uint32_t>(uint32_t Val, ZeroBehavior ZB) {
if (ZB != ZB_Undefined && Val == 0)
return 32;
#if __has_builtin(__builtin_ctz) || __GNUC_PREREQ(4, 0)
return __builtin_ctz(Val);
#elif _MSC_VER
unsigned long Index;
_BitScanForward(&Index, Val);
return Index;
#endif
}
#if !defined(_MSC_VER) || defined(_M_X64)
template <>
inline std::size_t countTrailingZeros<uint64_t>(uint64_t Val, ZeroBehavior ZB) {
if (ZB != ZB_Undefined && Val == 0)
return 64;
#if __has_builtin(__builtin_ctzll) || __GNUC_PREREQ(4, 0)
return __builtin_ctzll(Val);
#elif _MSC_VER
unsigned long Index;
_BitScanForward64(&Index, Val);
return Index;
#endif
}
#endif
#endif
/// \brief Count number of 0's from the most significant bit to the least
/// stopping at the first 1.
///
/// Only unsigned integral types are allowed.
///
/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
/// valid arguments.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed, std::size_t>::type
countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
(void)ZB;
if (!Val)
return std::numeric_limits<T>::digits;
// Bisection method.
std::size_t ZeroBits = 0;
for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
T Tmp = Val >> Shift;
if (Tmp)
Val = Tmp;
else
ZeroBits |= Shift;
}
return ZeroBits;
}
// Disable signed.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed, std::size_t>::type
countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) LLVM_DELETED_FUNCTION;
#if __GNUC__ >= 4 || _MSC_VER
template <>
inline std::size_t countLeadingZeros<uint32_t>(uint32_t Val, ZeroBehavior ZB) {
if (ZB != ZB_Undefined && Val == 0)
return 32;
#if __has_builtin(__builtin_clz) || __GNUC_PREREQ(4, 0)
return __builtin_clz(Val);
#elif _MSC_VER
unsigned long Index;
_BitScanReverse(&Index, Val);
return Index ^ 31;
#endif
}
#if !defined(_MSC_VER) || defined(_M_X64)
template <>
inline std::size_t countLeadingZeros<uint64_t>(uint64_t Val, ZeroBehavior ZB) {
if (ZB != ZB_Undefined && Val == 0)
return 64;
#if __has_builtin(__builtin_clzll) || __GNUC_PREREQ(4, 0)
return __builtin_clzll(Val);
#elif _MSC_VER
unsigned long Index;
_BitScanReverse64(&Index, Val);
return Index ^ 63;
#endif
}
#endif
#endif
/// \brief Get the index of the first set bit starting from the least
/// significant bit.
///
/// Only unsigned integral types are allowed.
///
/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
/// valid arguments.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed, T>::type
findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
if (ZB == ZB_Max && Val == 0)
return std::numeric_limits<T>::max();
return countTrailingZeros(Val, ZB_Undefined);
}
// Disable signed.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed, T>::type
findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) LLVM_DELETED_FUNCTION;
/// \brief Get the index of the last set bit starting from the least
/// significant bit.
///
/// Only unsigned integral types are allowed.
///
/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
/// valid arguments.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed, T>::type
findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
if (ZB == ZB_Max && Val == 0)
return std::numeric_limits<T>::max();
// Use ^ instead of - because both gcc and llvm can remove the associated ^
// in the __builtin_clz intrinsic on x86.
return countLeadingZeros(Val, ZB_Undefined) ^
(std::numeric_limits<T>::digits - 1);
}
// Disable signed.
template <typename T>
typename enable_if_c<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed, T>::type
findLastSet(T Val, ZeroBehavior ZB = ZB_Max) LLVM_DELETED_FUNCTION;
/// \brief Macro compressed bit reversal table for 256 bits.
///
/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
static const unsigned char BitReverseTable256[256] = {
#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
R6(0), R6(2), R6(1), R6(3)
};
/// \brief Reverse the bits in \p Val.
template <typename T>
T reverseBits(T Val) {
unsigned char in[sizeof(Val)];
unsigned char out[sizeof(Val)];
std::memcpy(in, &Val, sizeof(Val));
for (unsigned i = 0; i < sizeof(Val); ++i)
out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
std::memcpy(&Val, out, sizeof(Val));
return Val;
}
// NOTE: The following support functions use the _32/_64 extensions instead of
// type overloading so that signed and unsigned integers can be used without
// ambiguity.
/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
inline uint32_t Hi_32(uint64_t Value) {
return static_cast<uint32_t>(Value >> 32);
}
/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
inline uint32_t Lo_32(uint64_t Value) {
return static_cast<uint32_t>(Value);
}
/// isInt - Checks if an integer fits into the given bit width.
template<unsigned N>
inline bool isInt(int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
// Template specializations to get better code for common cases.
template<>
inline bool isInt<8>(int64_t x) {
return static_cast<int8_t>(x) == x;
}
template<>
inline bool isInt<16>(int64_t x) {
return static_cast<int16_t>(x) == x;
}
template<>
inline bool isInt<32>(int64_t x) {
return static_cast<int32_t>(x) == x;
}
/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
/// left by S.
template<unsigned N, unsigned S>
inline bool isShiftedInt(int64_t x) {
return isInt<N+S>(x) && (x % (1<<S) == 0);
}
/// isUInt - Checks if an unsigned integer fits into the given bit width.
template<unsigned N>
inline bool isUInt(uint64_t x) {
return N >= 64 || x < (UINT64_C(1)<<(N));
}
// Template specializations to get better code for common cases.
template<>
inline bool isUInt<8>(uint64_t x) {
return static_cast<uint8_t>(x) == x;
}
template<>
inline bool isUInt<16>(uint64_t x) {
return static_cast<uint16_t>(x) == x;
}
template<>
inline bool isUInt<32>(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
/// isShiftedUInt<N,S> - Checks if a unsigned integer is an N bit number shifted
/// left by S.
template<unsigned N, unsigned S>
inline bool isShiftedUInt(uint64_t x) {
return isUInt<N+S>(x) && (x % (1<<S) == 0);
}
/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
/// bit width.
inline bool isUIntN(unsigned N, uint64_t x) {
return x == (x & (~0ULL >> (64 - N)));
}
/// isIntN - Checks if an signed integer fits into the given (dynamic)
/// bit width.
inline bool isIntN(unsigned N, int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
/// isMask_32 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (32 bit
/// version). Ex. isMask_32(0x0000FFFFU) == true.
inline bool isMask_32(uint32_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
/// isMask_64 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (64 bit
/// version).
inline bool isMask_64(uint64_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
/// isShiftedMask_32 - This function returns true if the argument contains a
/// sequence of ones with the remainder zero (32 bit version.)
/// Ex. isShiftedMask_32(0x0000FF00U) == true.
inline bool isShiftedMask_32(uint32_t Value) {
return isMask_32((Value - 1) | Value);
}
/// isShiftedMask_64 - This function returns true if the argument contains a
/// sequence of ones with the remainder zero (64 bit version.)
inline bool isShiftedMask_64(uint64_t Value) {
return isMask_64((Value - 1) | Value);
}
/// isPowerOf2_32 - This function returns true if the argument is a power of
/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
inline bool isPowerOf2_32(uint32_t Value) {
return Value && !(Value & (Value - 1));
}
/// isPowerOf2_64 - This function returns true if the argument is a power of two
/// > 0 (64 bit edition.)
inline bool isPowerOf2_64(uint64_t Value) {
return Value && !(Value & (Value - int64_t(1L)));
}
#ifdef IGNORED_LLVM_XENIA
/// ByteSwap_16 - This function returns a byte-swapped representation of the
/// 16-bit argument, Value.
inline uint16_t ByteSwap_16(uint16_t Value) {
return sys::SwapByteOrder_16(Value);
}
/// ByteSwap_32 - This function returns a byte-swapped representation of the
/// 32-bit argument, Value.
inline uint32_t ByteSwap_32(uint32_t Value) {
return sys::SwapByteOrder_32(Value);
}
/// ByteSwap_64 - This function returns a byte-swapped representation of the
/// 64-bit argument, Value.
inline uint64_t ByteSwap_64(uint64_t Value) {
return sys::SwapByteOrder_64(Value);
}
#endif // IGNORED_LLVM_XENIA
/// CountLeadingOnes_32 - this function performs the operation of
/// counting the number of ones from the most significant bit to the first zero
/// bit. Ex. CountLeadingOnes_32(0xFF0FFF00) == 8.
/// Returns 32 if the word is all ones.
inline unsigned CountLeadingOnes_32(uint32_t Value) {
return countLeadingZeros(~Value);
}
/// CountLeadingOnes_64 - This function performs the operation
/// of counting the number of ones from the most significant bit to the first
/// zero bit (64 bit edition.)
/// Returns 64 if the word is all ones.
inline unsigned CountLeadingOnes_64(uint64_t Value) {
return countLeadingZeros(~Value);
}
/// CountTrailingOnes_32 - this function performs the operation of
/// counting the number of ones from the least significant bit to the first zero
/// bit. Ex. CountTrailingOnes_32(0x00FF00FF) == 8.
/// Returns 32 if the word is all ones.
inline unsigned CountTrailingOnes_32(uint32_t Value) {
return countTrailingZeros(~Value);
}
/// CountTrailingOnes_64 - This function performs the operation
/// of counting the number of ones from the least significant bit to the first
/// zero bit (64 bit edition.)
/// Returns 64 if the word is all ones.
inline unsigned CountTrailingOnes_64(uint64_t Value) {
return countTrailingZeros(~Value);
}
/// CountPopulation_32 - this function counts the number of set bits in a value.
/// Ex. CountPopulation(0xF000F000) = 8
/// Returns 0 if the word is zero.
inline unsigned CountPopulation_32(uint32_t Value) {
#if __GNUC__ >= 4
return __builtin_popcount(Value);
#else
uint32_t v = Value - ((Value >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
#endif
}
/// CountPopulation_64 - this function counts the number of set bits in a value,
/// (64 bit edition.)
inline unsigned CountPopulation_64(uint64_t Value) {
#if __GNUC__ >= 4
return __builtin_popcountll(Value);
#else
uint64_t v = Value - ((Value >> 1) & 0x5555555555555555ULL);
v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
#endif
}
/// Log2_32 - This function returns the floor log base 2 of the specified value,
/// -1 if the value is zero. (32 bit edition.)
/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
inline unsigned Log2_32(uint32_t Value) {
return 31 - countLeadingZeros(Value);
}
/// Log2_64 - This function returns the floor log base 2 of the specified value,
/// -1 if the value is zero. (64 bit edition.)
inline unsigned Log2_64(uint64_t Value) {
return 63 - countLeadingZeros(Value);
}
/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
/// value, 32 if the value is zero. (32 bit edition).
/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
inline unsigned Log2_32_Ceil(uint32_t Value) {
return 32 - countLeadingZeros(Value - 1);
}
/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
/// value, 64 if the value is zero. (64 bit edition.)
inline unsigned Log2_64_Ceil(uint64_t Value) {
return 64 - countLeadingZeros(Value - 1);
}
/// GreatestCommonDivisor64 - Return the greatest common divisor of the two
/// values using Euclid's algorithm.
inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
while (B) {
uint64_t T = B;
B = A % B;
A = T;
}
return A;
}
/// BitsToDouble - This function takes a 64-bit integer and returns the bit
/// equivalent double.
inline double BitsToDouble(uint64_t Bits) {
union {
uint64_t L;
double D;
} T;
T.L = Bits;
return T.D;
}
/// BitsToFloat - This function takes a 32-bit integer and returns the bit
/// equivalent float.
inline float BitsToFloat(uint32_t Bits) {
union {
uint32_t I;
float F;
} T;
T.I = Bits;
return T.F;
}
/// DoubleToBits - This function takes a double and returns the bit
/// equivalent 64-bit integer. Note that copying doubles around
/// changes the bits of NaNs on some hosts, notably x86, so this
/// routine cannot be used if these bits are needed.
inline uint64_t DoubleToBits(double Double) {
union {
uint64_t L;
double D;
} T;
T.D = Double;
return T.L;
}
/// FloatToBits - This function takes a float and returns the bit
/// equivalent 32-bit integer. Note that copying floats around
/// changes the bits of NaNs on some hosts, notably x86, so this
/// routine cannot be used if these bits are needed.
inline uint32_t FloatToBits(float Float) {
union {
uint32_t I;
float F;
} T;
T.F = Float;
return T.I;
}
/// Platform-independent wrappers for the C99 isnan() function.
int IsNAN(float f);
int IsNAN(double d);
/// Platform-independent wrappers for the C99 isinf() function.
int IsInf(float f);
int IsInf(double d);
/// MinAlign - A and B are either alignments or offsets. Return the minimum
/// alignment that may be assumed after adding the two together.
inline uint64_t MinAlign(uint64_t A, uint64_t B) {
// The largest power of 2 that divides both A and B.
//
// Replace "-Value" by "1+~Value" in the following commented code to avoid
// MSVC warning C4146
// return (A | B) & -(A | B);
return (A | B) & (1 + ~(A | B));
}
/// NextPowerOf2 - Returns the next power of two (in 64-bits)
/// that is strictly greater than A. Returns zero on overflow.
inline uint64_t NextPowerOf2(uint64_t A) {
A |= (A >> 1);
A |= (A >> 2);
A |= (A >> 4);
A |= (A >> 8);
A |= (A >> 16);
A |= (A >> 32);
return A + 1;
}
/// Returns the power of two which is less than or equal to the given value.
/// Essentially, it is a floor operation across the domain of powers of two.
inline uint64_t PowerOf2Floor(uint64_t A) {
if (!A) return 0;
return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
}
/// Returns the next integer (mod 2**64) that is greater than or equal to
/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
/// Examples:
/// \code
/// RoundUpToAlignment(5, 8) = 8
/// RoundUpToAlignment(17, 8) = 24
/// RoundUpToAlignment(~0LL, 8) = 0
/// \endcode
inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
return ((Value + Align - 1) / Align) * Align;
}
/// Returns the offset to the next integer (mod 2**64) that is greater than
/// or equal to \p Value and is a multiple of \p Align. \p Align must be
/// non-zero.
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
return RoundUpToAlignment(Value, Align) - Value;
}
/// abs64 - absolute value of a 64-bit int. Not all environments support
/// "abs" on whatever their name for the 64-bit int type is. The absolute
/// value of the largest negative number is undefined, as with "abs".
inline int64_t abs64(int64_t x) {
return (x < 0) ? -x : x;
}
/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
/// Usage int32_t r = SignExtend32<5>(x);
template <unsigned B> inline int32_t SignExtend32(uint32_t x) {
return int32_t(x << (32 - B)) >> (32 - B);
}
/// \brief Sign extend number in the bottom B bits of X to a 32-bit int.
/// Requires 0 < B <= 32.
inline int32_t SignExtend32(uint32_t X, unsigned B) {
return int32_t(X << (32 - B)) >> (32 - B);
}
/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
/// Usage int64_t r = SignExtend64<5>(x);
template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
return int64_t(x << (64 - B)) >> (64 - B);
}
/// \brief Sign extend number in the bottom B bits of X to a 64-bit int.
/// Requires 0 < B <= 64.
inline int64_t SignExtend64(uint64_t X, unsigned B) {
return int64_t(X << (64 - B)) >> (64 - B);
}
#if defined(_MSC_VER)
// Visual Studio defines the HUGE_VAL class of macros using purposeful
// constant arithmetic overflow, which it then warns on when encountered.
const float huge_valf = std::numeric_limits<float>::infinity();
#else
const float huge_valf = HUGE_VALF;
#endif
} // End llvm namespace
#endif

View File

@ -0,0 +1,244 @@
//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides a template class that determines if a type is a class or
// not. The basic mechanism, based on using the pointer to member function of
// a zero argument to a function was "boosted" from the boost type_traits
// library. See http://www.boost.org/ for all the gory details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
#define LLVM_SUPPORT_TYPE_TRAITS_H
//#include "llvm/Support/DataTypes.h"
#include <cstddef>
#include <utility>
#ifndef __has_feature
#define LLVM_DEFINED_HAS_FEATURE
#define __has_feature(x) 0
#endif
// This is actually the conforming implementation which works with abstract
// classes. However, enough compilers have trouble with it that most will use
// the one in boost/type_traits/object_traits.hpp. This implementation actually
// works with VC7.0, but other interactions seem to fail when we use it.
namespace llvm {
namespace dont_use
{
// These two functions should never be used. They are helpers to
// the is_class template below. They cannot be located inside
// is_class because doing so causes at least GCC to think that
// the value of the "value" enumerator is not constant. Placing
// them out here (for some strange reason) allows the sizeof
// operator against them to magically be constant. This is
// important to make the is_class<T>::value idiom zero cost. it
// evaluates to a constant 1 or 0 depending on whether the
// parameter T is a class or not (respectively).
template<typename T> char is_class_helper(void(T::*)());
template<typename T> double is_class_helper(...);
}
template <typename T>
struct is_class
{
// is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For
// more details:
// http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1
public:
static const bool value =
sizeof(char) == sizeof(dont_use::is_class_helper<T>(0));
};
/// isPodLike - This is a type trait that is used to determine whether a given
/// type can be copied around with memcpy instead of running ctors etc.
template <typename T>
struct isPodLike {
#if __has_feature(is_trivially_copyable)
// If the compiler supports the is_trivially_copyable trait use it, as it
// matches the definition of isPodLike closely.
static const bool value = __is_trivially_copyable(T);
#else
// If we don't know anything else, we can (at least) assume that all non-class
// types are PODs.
static const bool value = !is_class<T>::value;
#endif
};
// std::pair's are pod-like if their elements are.
template<typename T, typename U>
struct isPodLike<std::pair<T, U> > {
static const bool value = isPodLike<T>::value && isPodLike<U>::value;
};
template <class T, T v>
struct integral_constant {
typedef T value_type;
static const value_type value = v;
typedef integral_constant<T,v> type;
operator value_type() { return value; }
};
typedef integral_constant<bool, true> true_type;
typedef integral_constant<bool, false> false_type;
/// \brief Metafunction that determines whether the two given types are
/// equivalent.
template<typename T, typename U> struct is_same : public false_type {};
template<typename T> struct is_same<T, T> : public true_type {};
/// \brief Metafunction that removes const qualification from a type.
template <typename T> struct remove_const { typedef T type; };
template <typename T> struct remove_const<const T> { typedef T type; };
/// \brief Metafunction that removes volatile qualification from a type.
template <typename T> struct remove_volatile { typedef T type; };
template <typename T> struct remove_volatile<volatile T> { typedef T type; };
/// \brief Metafunction that removes both const and volatile qualification from
/// a type.
template <typename T> struct remove_cv {
typedef typename remove_const<typename remove_volatile<T>::type>::type type;
};
/// \brief Helper to implement is_integral metafunction.
template <typename T> struct is_integral_impl : false_type {};
template <> struct is_integral_impl< bool> : true_type {};
template <> struct is_integral_impl< char> : true_type {};
template <> struct is_integral_impl< signed char> : true_type {};
template <> struct is_integral_impl<unsigned char> : true_type {};
template <> struct is_integral_impl< wchar_t> : true_type {};
template <> struct is_integral_impl< short> : true_type {};
template <> struct is_integral_impl<unsigned short> : true_type {};
template <> struct is_integral_impl< int> : true_type {};
template <> struct is_integral_impl<unsigned int> : true_type {};
template <> struct is_integral_impl< long> : true_type {};
template <> struct is_integral_impl<unsigned long> : true_type {};
template <> struct is_integral_impl< long long> : true_type {};
template <> struct is_integral_impl<unsigned long long> : true_type {};
/// \brief Metafunction that determines whether the given type is an integral
/// type.
template <typename T>
struct is_integral : is_integral_impl<T> {};
/// \brief Metafunction to remove reference from a type.
template <typename T> struct remove_reference { typedef T type; };
template <typename T> struct remove_reference<T&> { typedef T type; };
/// \brief Metafunction that determines whether the given type is a pointer
/// type.
template <typename T> struct is_pointer : false_type {};
template <typename T> struct is_pointer<T*> : true_type {};
template <typename T> struct is_pointer<T* const> : true_type {};
template <typename T> struct is_pointer<T* volatile> : true_type {};
template <typename T> struct is_pointer<T* const volatile> : true_type {};
/// \brief Metafunction that determines wheather the given type is a reference.
template <typename T> struct is_reference : false_type {};
template <typename T> struct is_reference<T&> : true_type {};
/// \brief Metafunction that determines whether the given type is either an
/// integral type or an enumeration type.
///
/// Note that this accepts potentially more integral types than we whitelist
/// above for is_integral because it is based on merely being convertible
/// implicitly to an integral type.
template <typename T> class is_integral_or_enum {
// Provide an overload which can be called with anything implicitly
// convertible to an unsigned long long. This should catch integer types and
// enumeration types at least. We blacklist classes with conversion operators
// below.
static double check_int_convertible(unsigned long long);
static char check_int_convertible(...);
typedef typename remove_reference<T>::type UnderlyingT;
static UnderlyingT &nonce_instance;
public:
static const bool
value = (!is_class<UnderlyingT>::value && !is_pointer<UnderlyingT>::value &&
!is_same<UnderlyingT, float>::value &&
!is_same<UnderlyingT, double>::value &&
sizeof(char) != sizeof(check_int_convertible(nonce_instance)));
};
// enable_if_c - Enable/disable a template based on a metafunction
template<bool Cond, typename T = void>
struct enable_if_c {
typedef T type;
};
template<typename T> struct enable_if_c<false, T> { };
// enable_if - Enable/disable a template based on a metafunction
template<typename Cond, typename T = void>
struct enable_if : public enable_if_c<Cond::value, T> { };
namespace dont_use {
template<typename Base> char base_of_helper(const volatile Base*);
template<typename Base> double base_of_helper(...);
}
/// is_base_of - Metafunction to determine whether one type is a base class of
/// (or identical to) another type.
template<typename Base, typename Derived>
struct is_base_of {
static const bool value
= is_class<Base>::value && is_class<Derived>::value &&
sizeof(char) == sizeof(dont_use::base_of_helper<Base>((Derived*)0));
};
// remove_pointer - Metafunction to turn Foo* into Foo. Defined in
// C++0x [meta.trans.ptr].
template <typename T> struct remove_pointer { typedef T type; };
template <typename T> struct remove_pointer<T*> { typedef T type; };
template <typename T> struct remove_pointer<T*const> { typedef T type; };
template <typename T> struct remove_pointer<T*volatile> { typedef T type; };
template <typename T> struct remove_pointer<T*const volatile> {
typedef T type; };
// If T is a pointer, just return it. If it is not, return T&.
template<typename T, typename Enable = void>
struct add_lvalue_reference_if_not_pointer { typedef T &type; };
template<typename T>
struct add_lvalue_reference_if_not_pointer<T,
typename enable_if<is_pointer<T> >::type> {
typedef T type;
};
// If T is a pointer to X, return a pointer to const X. If it is not, return
// const T.
template<typename T, typename Enable = void>
struct add_const_past_pointer { typedef const T type; };
template<typename T>
struct add_const_past_pointer<T, typename enable_if<is_pointer<T> >::type> {
typedef const typename remove_pointer<T>::type *type;
};
template <bool, typename T, typename F>
struct conditional { typedef T type; };
template <typename T, typename F>
struct conditional<false, T, F> { typedef F type; };
}
#ifdef LLVM_DEFINED_HAS_FEATURE
#undef __has_feature
#endif
#endif

View File

@ -5,6 +5,7 @@
'third_party/beaengine.gypi',
'third_party/gflags.gypi',
'third_party/jansson.gypi',
'third_party/llvm.gypi',
'third_party/sparsehash.gypi',
'third_party/wslay.gypi',
],
@ -187,10 +188,12 @@
'dependencies': [
'beaengine',
'gflags',
'llvm',
],
'export_dependent_settings': [
'beaengine',
'gflags',
'llvm',
],
'direct_dependent_settings': {