Mostly fixing up alloy clang build.

This commit is contained in:
Ben Vanik 2014-08-18 22:01:55 -07:00
parent 2356164525
commit d578f5d0f0
16 changed files with 64 additions and 56 deletions

View File

@ -83,7 +83,7 @@ int IVMAssembler::Assemble(FunctionInfo* symbol_info, HIRBuilder* builder,
stack_offset += type_size; stack_offset += type_size;
} }
// Ensure 16b alignment. // Ensure 16b alignment.
stack_offset = poly::align(stack_offset, 16ull); stack_offset = poly::align(stack_offset, static_cast<size_t>(16));
ctx.stack_size = stack_offset; ctx.stack_size = stack_offset;
auto block = builder->first_block(); auto block = builder->first_block();

View File

@ -102,7 +102,8 @@ uint32_t AllocConstant(TranslationContext& ctx, Value* value) {
uint32_t AllocLabel(TranslationContext& ctx, Label* label) { uint32_t AllocLabel(TranslationContext& ctx, Label* label) {
// If it's a back-branch to an already tagged label avoid setting up // If it's a back-branch to an already tagged label avoid setting up
// a reference. // a reference.
uint32_t value = reinterpret_cast<uint32_t>(label->tag); uint32_t value =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(label->tag));
if (value & 0x80000000) { if (value & 0x80000000) {
// Already set. // Already set.
return AllocConstant(ctx, value & ~0x80000000); return AllocConstant(ctx, value & ~0x80000000);
@ -125,7 +126,7 @@ uint32_t AllocLabel(TranslationContext& ctx, Label* label) {
uint32_t AllocDynamicRegister(TranslationContext& ctx, Value* value) { uint32_t AllocDynamicRegister(TranslationContext& ctx, Value* value) {
if (value->flags & VALUE_IS_ALLOCATED) { if (value->flags & VALUE_IS_ALLOCATED) {
return reinterpret_cast<uint32_t>(value->tag); return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value->tag));
} else { } else {
value->flags |= VALUE_IS_ALLOCATED; value->flags |= VALUE_IS_ALLOCATED;
auto reg = ctx.register_count++; auto reg = ctx.register_count++;
@ -2334,13 +2335,13 @@ int Translate_DID_SATURATE(TranslationContext& ctx, Instr* i) {
} \ } \
return IA_NEXT; return IA_NEXT;
uint32_t IntCode_VECTOR_COMPARE_EQ_I8(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_EQ_I8(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(uint8_t, b16, b16, 16, == )}; VECTOR_COMPARER(uint8_t, b16, b16, 16, == )};
uint32_t IntCode_VECTOR_COMPARE_EQ_I16(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_EQ_I16(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(uint16_t, s8, s8, 8, == )}; VECTOR_COMPARER(uint16_t, s8, s8, 8, == )};
uint32_t IntCode_VECTOR_COMPARE_EQ_I32(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_EQ_I32(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(uint32_t, i4, i4, 4, == )}; VECTOR_COMPARER(uint32_t, i4, i4, 4, == )};
uint32_t IntCode_VECTOR_COMPARE_EQ_F32(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_EQ_F32(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(float, f4, i4, 4, == )}; VECTOR_COMPARER(float, f4, i4, 4, == )};
int Translate_VECTOR_COMPARE_EQ(TranslationContext& ctx, Instr* i) { int Translate_VECTOR_COMPARE_EQ(TranslationContext& ctx, Instr* i) {
static IntCodeFn fns[] = { static IntCodeFn fns[] = {
@ -2352,13 +2353,13 @@ int Translate_VECTOR_COMPARE_EQ(TranslationContext& ctx, Instr* i) {
return DispatchToC(ctx, i, fns[i->flags]); return DispatchToC(ctx, i, fns[i->flags]);
} }
uint32_t IntCode_VECTOR_COMPARE_SGT_I8(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_SGT_I8(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(int8_t, b16, b16, 16, > )}; VECTOR_COMPARER(int8_t, b16, b16, 16, > )};
uint32_t IntCode_VECTOR_COMPARE_SGT_I16(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_SGT_I16(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(int16_t, s8, s8, 8, > )}; VECTOR_COMPARER(int16_t, s8, s8, 8, > )};
uint32_t IntCode_VECTOR_COMPARE_SGT_I32(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_SGT_I32(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(int32_t, i4, i4, 4, > )}; VECTOR_COMPARER(int32_t, i4, i4, 4, > )};
uint32_t IntCode_VECTOR_COMPARE_SGT_F32(IntCodeState& ics, const IntCode* i){ uint32_t IntCode_VECTOR_COMPARE_SGT_F32(IntCodeState& ics, const IntCode* i) {
VECTOR_COMPARER(float, f4, i4, 4, > )}; VECTOR_COMPARER(float, f4, i4, 4, > )};
int Translate_VECTOR_COMPARE_SGT(TranslationContext& ctx, Instr* i) { int Translate_VECTOR_COMPARE_SGT(TranslationContext& ctx, Instr* i) {
static IntCodeFn fns[] = { static IntCodeFn fns[] = {

View File

@ -9,10 +9,11 @@
#include <alloy/backend/x64/x64_code_cache.h> #include <alloy/backend/x64/x64_code_cache.h>
#include <poly/assert.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <poly/assert.h>
#include <poly/math.h>
namespace alloy { namespace alloy {
namespace backend { namespace backend {
namespace x64 { namespace x64 {
@ -51,7 +52,7 @@ void* X64CodeCache::PlaceCode(void* machine_code, size_t code_size,
// Always move the code to land on 16b alignment. We do this by rounding up // Always move the code to land on 16b alignment. We do this by rounding up
// to 16b so that all offsets are aligned. // to 16b so that all offsets are aligned.
code_size = XEROUNDUP(code_size, 16); code_size = poly::round_up(code_size, 16);
lock_.lock(); lock_.lock();

View File

@ -127,7 +127,7 @@ int X64Emitter::Emit(HIRBuilder* builder, size_t& out_stack_size) {
} }
// Ensure 16b alignment. // Ensure 16b alignment.
stack_offset -= StackLayout::GUEST_STACK_SIZE; stack_offset -= StackLayout::GUEST_STACK_SIZE;
stack_offset = poly::align(stack_offset, 16ull); stack_offset = poly::align(stack_offset, static_cast<size_t>(16));
// Function prolog. // Function prolog.
// Must be 16b aligned. // Must be 16b aligned.
@ -242,7 +242,7 @@ void X64Emitter::EmitTraceSource(const Instr* instr) {
uint8_t dest_reg_1 = instr->flags >> 8; uint8_t dest_reg_1 = instr->flags >> 8;
xdb::protocol::EventType event_type; xdb::protocol::EventType event_type;
size_t event_size; size_t event_size = 0;
if (dest_reg_0 == 100) { if (dest_reg_0 == 100) {
event_type = xdb::protocol::EventType::INSTR; event_type = xdb::protocol::EventType::INSTR;
event_size = sizeof(xdb::protocol::InstrEvent); event_size = sizeof(xdb::protocol::InstrEvent);
@ -269,6 +269,7 @@ void X64Emitter::EmitTraceSource(const Instr* instr) {
event_size = sizeof(xdb::protocol::InstrEventR8R8); event_size = sizeof(xdb::protocol::InstrEventR8R8);
} }
} }
assert_not_zero(event_size);
mov(rax, trace_base); mov(rax, trace_base);
mov(r8d, static_cast<uint32_t>(event_size)); mov(r8d, static_cast<uint32_t>(event_size));
@ -289,6 +290,7 @@ void X64Emitter::EmitTraceSource(const Instr* instr) {
mov(word[r8 + 2], ax); mov(word[r8 + 2], ax);
switch (event_type) { switch (event_type) {
default:
case xdb::protocol::EventType::INSTR: case xdb::protocol::EventType::INSTR:
break; break;
case xdb::protocol::EventType::INSTR_R8: case xdb::protocol::EventType::INSTR_R8:
@ -330,8 +332,7 @@ void X64Emitter::EmitTraceSourceAppendValue(const Value* value,
void X64Emitter::EmitGetCurrentThreadId() { void X64Emitter::EmitGetCurrentThreadId() {
// rcx must point to context. We could fetch from the stack if needed. // rcx must point to context. We could fetch from the stack if needed.
mov(ax, mov(ax, word[rcx + runtime_->frontend()->context_info()->thread_id_offset()]);
word[rcx + runtime_->frontend()->context_info()->thread_id_offset()]);
} }
void X64Emitter::EmitTraceUserCallReturn() { void X64Emitter::EmitTraceUserCallReturn() {
@ -345,7 +346,7 @@ void X64Emitter::EmitTraceUserCallReturn() {
lock(); lock();
xadd(qword[rax], r8); xadd(qword[rax], r8);
mov(rax, static_cast<uint64_t>(xdb::protocol::EventType::USER_CALL_RETURN) | mov(rax, static_cast<uint64_t>(xdb::protocol::EventType::USER_CALL_RETURN) |
(static_cast<uint64_t>(0) << 8) | (0ull << 32)); (static_cast<uint64_t>(0) << 8) | (0ull << 32));
mov(qword[r8], rax); mov(qword[r8], rax);
EmitGetCurrentThreadId(); EmitGetCurrentThreadId();
mov(word[r8 + 2], ax); mov(word[r8 + 2], ax);
@ -645,8 +646,6 @@ void X64Emitter::CallExtern(const hir::Instr* instr,
static_cast<uint32_t>(sizeof(xdb::protocol::KernelCallReturnEvent))); static_cast<uint32_t>(sizeof(xdb::protocol::KernelCallReturnEvent)));
lock(); lock();
xadd(qword[rax], r8); xadd(qword[rax], r8);
uint32_t module_id = 0;
uint32_t ordinal = 0;
mov(rax, mov(rax,
static_cast<uint64_t>(xdb::protocol::EventType::KERNEL_CALL_RETURN) | static_cast<uint64_t>(xdb::protocol::EventType::KERNEL_CALL_RETURN) |
(static_cast<uint64_t>(0) << 8) | (0)); (static_cast<uint64_t>(0) << 8) | (0));

View File

@ -4491,26 +4491,24 @@ EMITTER_OPCODE_TABLE(
// TODO(benvanik): AVX512 has a native variable rotate (rolv). // TODO(benvanik): AVX512 has a native variable rotate (rolv).
EMITTER(VECTOR_ROTATE_LEFT_V128, MATCH(I<OPCODE_VECTOR_ROTATE_LEFT, V128<>, V128<>, V128<>>)) { EMITTER(VECTOR_ROTATE_LEFT_V128, MATCH(I<OPCODE_VECTOR_ROTATE_LEFT, V128<>, V128<>, V128<>>)) {
static __m128i EmulateVectorRotateLeftI8(__m128i src1, __m128i src2) { static __m128i EmulateVectorRotateLeftI8(__m128i src1, __m128i src2) {
alignas(16) __m128i value; alignas(16) uint8_t value[16];
alignas(16) __m128i shamt; alignas(16) uint8_t shamt[16];
_mm_store_si128(&value, src1); _mm_store_si128(reinterpret_cast<__m128i*>(&value), src1);
_mm_store_si128(&shamt, src2); _mm_store_si128(reinterpret_cast<__m128i*>(&shamt), src2);
for (size_t i = 0; i < 16; ++i) { for (size_t i = 0; i < 16; ++i) {
value.m128i_u8[i] = poly::rotate_left<uint8_t>( value[i] = poly::rotate_left<uint8_t>(value[i], shamt[i] & 0x3);
value.m128i_u8[i], shamt.m128i_u8[i] & 0x3);
} }
return _mm_load_si128(&value); return _mm_load_si128(reinterpret_cast<__m128i*>(&value));
} }
static __m128i EmulateVectorRotateLeftI16(__m128i src1, __m128i src2) { static __m128i EmulateVectorRotateLeftI16(__m128i src1, __m128i src2) {
alignas(16) __m128i value; alignas(16) uint16_t value[8];
alignas(16) __m128i shamt; alignas(16) uint16_t shamt[8];
_mm_store_si128(&value, src1); _mm_store_si128(reinterpret_cast<__m128i*>(&value), src1);
_mm_store_si128(&shamt, src2); _mm_store_si128(reinterpret_cast<__m128i*>(&shamt), src2);
for (size_t i = 0; i < 8; ++i) { for (size_t i = 0; i < 8; ++i) {
value.m128i_u16[i] = poly::rotate_left<uint16_t>( value[i] = poly::rotate_left<uint16_t>(value[i], shamt[i] & 0xF);
value.m128i_u16[i], shamt.m128i_u16[i] & 0xF);
} }
return _mm_load_si128(&value); return _mm_load_si128(reinterpret_cast<__m128i*>(&value));
} }
static void Emit(X64Emitter& e, const EmitArgType& i) { static void Emit(X64Emitter& e, const EmitArgType& i) {
switch (i.instr->flags) { switch (i.instr->flags) {

View File

@ -19,6 +19,7 @@
#include <llvm/ADT/BitVector.h> #include <llvm/ADT/BitVector.h>
#pragma warning(pop) #pragma warning(pop)
#else #else
#include <cmath>
#include <llvm/ADT/BitVector.h> #include <llvm/ADT/BitVector.h>
#endif // XE_COMPILER_MSVC #endif // XE_COMPILER_MSVC

View File

@ -9,6 +9,8 @@
#include <alloy/frontend/ppc/ppc_context.h> #include <alloy/frontend/ppc/ppc_context.h>
#include <cstdlib>
namespace alloy { namespace alloy {
namespace frontend { namespace frontend {
namespace ppc { namespace ppc {

View File

@ -14,7 +14,7 @@
namespace alloy { namespace alloy {
StringBuffer::StringBuffer(size_t initial_capacity) { StringBuffer::StringBuffer(size_t initial_capacity) {
buffer_.reserve(std::max(initial_capacity, 1024ull)); buffer_.reserve(std::max(initial_capacity, static_cast<size_t>(1024)));
} }
StringBuffer::~StringBuffer() = default; StringBuffer::~StringBuffer() = default;

View File

@ -19,7 +19,7 @@ namespace poly {
#define static_assert_size(type, size) \ #define static_assert_size(type, size) \
static_assert(sizeof(type) == size, \ static_assert(sizeof(type) == size, \
"bad definition for "## #type##": must be "## #size##" bytes") "bad definition for " #type ": must be " #size " bytes")
// We rely on assert being compiled out in NDEBUG. // We rely on assert being compiled out in NDEBUG.
#define poly_assert assert #define poly_assert assert

View File

@ -40,10 +40,12 @@ inline int64_t atomic_exchange(int64_t new_value, volatile int64_t* value) {
return OSAtomicCompareAndSwap64Barrier(*value, new_value, value); return OSAtomicCompareAndSwap64Barrier(*value, new_value, value);
} }
//inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) { inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) {
//} return OSAtomicAdd32Barrier(amount, value) - amount;
//inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) { }
//} inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) {
return OSAtomicAdd64Barrier(amount, value) - amount;
}
inline bool atomic_cas(int32_t old_value, int32_t new_value, inline bool atomic_cas(int32_t old_value, int32_t new_value,
volatile int32_t* value) { volatile int32_t* value) {
@ -51,7 +53,7 @@ inline bool atomic_cas(int32_t old_value, int32_t new_value,
old_value, new_value, reinterpret_cast<volatile int32_t*>(value)); old_value, new_value, reinterpret_cast<volatile int32_t*>(value));
} }
inline bool atomic_cas(int64_t old_value, int64_t new_value, inline bool atomic_cas(int64_t old_value, int64_t new_value,
volatile int32_t* value) { volatile int64_t* value) {
return OSAtomicCompareAndSwap64Barrier( return OSAtomicCompareAndSwap64Barrier(
old_value, new_value, reinterpret_cast<volatile int64_t*>(value)); old_value, new_value, reinterpret_cast<volatile int64_t*>(value));
} }
@ -110,10 +112,12 @@ inline int64_t atomic_exchange(int64_t new_value, volatile int64_t* value) {
return __sync_val_compare_and_swap(*value, value, new_value); return __sync_val_compare_and_swap(*value, value, new_value);
} }
//inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) { inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) {
//} return __sync_fetch_and_add(amount, value);
//inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) { }
//} inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) {
return __sync_fetch_and_add(amount, value);
}
inline bool atomic_cas(int32_t old_value, int32_t new_value, inline bool atomic_cas(int32_t old_value, int32_t new_value,
volatile int32_t* value) { volatile int32_t* value) {

View File

@ -36,12 +36,14 @@
// C++1y make_unique. // C++1y make_unique.
// http://herbsutter.com/2013/05/29/gotw-89-solution-smart-pointers/ // http://herbsutter.com/2013/05/29/gotw-89-solution-smart-pointers/
// This is present in clang with -std=c++1y, but not otherwise. // This is present in clang with -std=c++1y, but not otherwise.
#if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 4)
namespace std { namespace std {
template <typename T, typename... Args> template <typename T, typename... Args>
unique_ptr<T> make_unique(Args&&... args) { unique_ptr<T> make_unique(Args&&... args) {
return unique_ptr<T>(new T(forward<Args>(args)...)); return unique_ptr<T>(new T(forward<Args>(args)...));
} }
} // namespace std } // namespace std
#endif // clang < 3.4
#endif // !XE_COMPILER_MSVC #endif // !XE_COMPILER_MSVC
namespace poly {} // namespace poly namespace poly {} // namespace poly

View File

@ -9,22 +9,20 @@
#include <poly/main.h> #include <poly/main.h>
#include <gflags/gflags.h>
#include <poly/string.h> #include <poly/string.h>
namespace poly { namespace poly {
bool has_console_attached() { bool has_console_attached() { return true; }
return true;
}
} // namespace poly } // namespace poly
extern "C" int main(int argc, char** argv) { extern "C" int main(int argc, char** argv) {
auto entry_info = poly::GetEntryInfo(); auto entry_info = poly::GetEntryInfo();
google::SetUsageMessage(std::string("usage: ") + google::SetUsageMessage(std::string("usage: ") +
poly::to_string(entry_info.usage)); poly::to_string(entry_info.usage));
google::SetVersionString("1.0"); google::SetVersionString("1.0");
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);

View File

@ -36,22 +36,22 @@ class PosixMappedMemory : public MappedMemory {
std::unique_ptr<MappedMemory> MappedMemory::Open(const std::wstring& path, std::unique_ptr<MappedMemory> MappedMemory::Open(const std::wstring& path,
Mode mode, size_t offset, Mode mode, size_t offset,
size_t length) { size_t length) {
const char* mode; const char* mode_str;
int prot; int prot;
switch (mode) { switch (mode) {
case Mode::READ: case Mode::READ:
mode = "rb"; mode_str = "rb";
prot = PROT_READ; prot = PROT_READ;
break; break;
case Mode::READ_WRITE: case Mode::READ_WRITE:
mode = "r+b"; mode_str = "r+b";
prot = PROT_READ | PROT_WRITE; prot = PROT_READ | PROT_WRITE;
break; break;
} }
auto mm = std::make_unique<PosixMappedMemory>(path, mode); auto mm = std::make_unique<PosixMappedMemory>(path, mode);
mm->file_handle = fopen(poly::to_string(path).c_str(), mode); mm->file_handle = fopen(poly::to_string(path).c_str(), mode_str);
if (!mm->file_handle) { if (!mm->file_handle) {
return nullptr; return nullptr;
} }

View File

@ -14,6 +14,7 @@
#include <cstdint> #include <cstdint>
#include <cstring> #include <cstring>
#include <type_traits>
#include <poly/config.h> #include <poly/config.h>
#include <poly/platform.h> #include <poly/platform.h>

View File

@ -10,6 +10,7 @@
#include <poly/string.h> #include <poly/string.h>
#include <codecvt> #include <codecvt>
#include <locale>
namespace poly { namespace poly {

View File

@ -110,7 +110,7 @@
'SYMROOT': '<(DEPTH)/build/xenia/', 'SYMROOT': '<(DEPTH)/build/xenia/',
'ALWAYS_SEARCH_USER_PATHS': 'NO', 'ALWAYS_SEARCH_USER_PATHS': 'NO',
'ARCHS': ['x86_64'], 'ARCHS': ['x86_64'],
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11', 'CLANG_CXX_LANGUAGE_STANDARD': 'c++1y',
'COMBINE_HIDPI_IMAGES': 'YES', 'COMBINE_HIDPI_IMAGES': 'YES',
'GCC_C_LANGUAGE_STANDARD': 'gnu99', 'GCC_C_LANGUAGE_STANDARD': 'gnu99',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', 'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES',