Mostly fixing up alloy clang build.
This commit is contained in:
parent
2356164525
commit
d578f5d0f0
|
@ -83,7 +83,7 @@ int IVMAssembler::Assemble(FunctionInfo* symbol_info, HIRBuilder* builder,
|
|||
stack_offset += type_size;
|
||||
}
|
||||
// Ensure 16b alignment.
|
||||
stack_offset = poly::align(stack_offset, 16ull);
|
||||
stack_offset = poly::align(stack_offset, static_cast<size_t>(16));
|
||||
ctx.stack_size = stack_offset;
|
||||
|
||||
auto block = builder->first_block();
|
||||
|
|
|
@ -102,7 +102,8 @@ uint32_t AllocConstant(TranslationContext& ctx, Value* value) {
|
|||
uint32_t AllocLabel(TranslationContext& ctx, Label* label) {
|
||||
// If it's a back-branch to an already tagged label avoid setting up
|
||||
// a reference.
|
||||
uint32_t value = reinterpret_cast<uint32_t>(label->tag);
|
||||
uint32_t value =
|
||||
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(label->tag));
|
||||
if (value & 0x80000000) {
|
||||
// Already set.
|
||||
return AllocConstant(ctx, value & ~0x80000000);
|
||||
|
@ -125,7 +126,7 @@ uint32_t AllocLabel(TranslationContext& ctx, Label* label) {
|
|||
|
||||
uint32_t AllocDynamicRegister(TranslationContext& ctx, Value* value) {
|
||||
if (value->flags & VALUE_IS_ALLOCATED) {
|
||||
return reinterpret_cast<uint32_t>(value->tag);
|
||||
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value->tag));
|
||||
} else {
|
||||
value->flags |= VALUE_IS_ALLOCATED;
|
||||
auto reg = ctx.register_count++;
|
||||
|
@ -2334,13 +2335,13 @@ int Translate_DID_SATURATE(TranslationContext& ctx, Instr* i) {
|
|||
} \
|
||||
return IA_NEXT;
|
||||
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_I8(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_I8(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(uint8_t, b16, b16, 16, == )};
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_I16(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_I16(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(uint16_t, s8, s8, 8, == )};
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_I32(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_I32(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(uint32_t, i4, i4, 4, == )};
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_F32(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_EQ_F32(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(float, f4, i4, 4, == )};
|
||||
int Translate_VECTOR_COMPARE_EQ(TranslationContext& ctx, Instr* i) {
|
||||
static IntCodeFn fns[] = {
|
||||
|
@ -2352,13 +2353,13 @@ int Translate_VECTOR_COMPARE_EQ(TranslationContext& ctx, Instr* i) {
|
|||
return DispatchToC(ctx, i, fns[i->flags]);
|
||||
}
|
||||
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_I8(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_I8(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(int8_t, b16, b16, 16, > )};
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_I16(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_I16(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(int16_t, s8, s8, 8, > )};
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_I32(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_I32(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(int32_t, i4, i4, 4, > )};
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_F32(IntCodeState& ics, const IntCode* i){
|
||||
uint32_t IntCode_VECTOR_COMPARE_SGT_F32(IntCodeState& ics, const IntCode* i) {
|
||||
VECTOR_COMPARER(float, f4, i4, 4, > )};
|
||||
int Translate_VECTOR_COMPARE_SGT(TranslationContext& ctx, Instr* i) {
|
||||
static IntCodeFn fns[] = {
|
||||
|
|
|
@ -9,10 +9,11 @@
|
|||
|
||||
#include <alloy/backend/x64/x64_code_cache.h>
|
||||
|
||||
#include <poly/assert.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <poly/assert.h>
|
||||
#include <poly/math.h>
|
||||
|
||||
namespace alloy {
|
||||
namespace backend {
|
||||
namespace x64 {
|
||||
|
@ -51,7 +52,7 @@ void* X64CodeCache::PlaceCode(void* machine_code, size_t code_size,
|
|||
|
||||
// Always move the code to land on 16b alignment. We do this by rounding up
|
||||
// to 16b so that all offsets are aligned.
|
||||
code_size = XEROUNDUP(code_size, 16);
|
||||
code_size = poly::round_up(code_size, 16);
|
||||
|
||||
lock_.lock();
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ int X64Emitter::Emit(HIRBuilder* builder, size_t& out_stack_size) {
|
|||
}
|
||||
// Ensure 16b alignment.
|
||||
stack_offset -= StackLayout::GUEST_STACK_SIZE;
|
||||
stack_offset = poly::align(stack_offset, 16ull);
|
||||
stack_offset = poly::align(stack_offset, static_cast<size_t>(16));
|
||||
|
||||
// Function prolog.
|
||||
// Must be 16b aligned.
|
||||
|
@ -242,7 +242,7 @@ void X64Emitter::EmitTraceSource(const Instr* instr) {
|
|||
uint8_t dest_reg_1 = instr->flags >> 8;
|
||||
|
||||
xdb::protocol::EventType event_type;
|
||||
size_t event_size;
|
||||
size_t event_size = 0;
|
||||
if (dest_reg_0 == 100) {
|
||||
event_type = xdb::protocol::EventType::INSTR;
|
||||
event_size = sizeof(xdb::protocol::InstrEvent);
|
||||
|
@ -269,6 +269,7 @@ void X64Emitter::EmitTraceSource(const Instr* instr) {
|
|||
event_size = sizeof(xdb::protocol::InstrEventR8R8);
|
||||
}
|
||||
}
|
||||
assert_not_zero(event_size);
|
||||
|
||||
mov(rax, trace_base);
|
||||
mov(r8d, static_cast<uint32_t>(event_size));
|
||||
|
@ -289,6 +290,7 @@ void X64Emitter::EmitTraceSource(const Instr* instr) {
|
|||
mov(word[r8 + 2], ax);
|
||||
|
||||
switch (event_type) {
|
||||
default:
|
||||
case xdb::protocol::EventType::INSTR:
|
||||
break;
|
||||
case xdb::protocol::EventType::INSTR_R8:
|
||||
|
@ -330,8 +332,7 @@ void X64Emitter::EmitTraceSourceAppendValue(const Value* value,
|
|||
|
||||
void X64Emitter::EmitGetCurrentThreadId() {
|
||||
// rcx must point to context. We could fetch from the stack if needed.
|
||||
mov(ax,
|
||||
word[rcx + runtime_->frontend()->context_info()->thread_id_offset()]);
|
||||
mov(ax, word[rcx + runtime_->frontend()->context_info()->thread_id_offset()]);
|
||||
}
|
||||
|
||||
void X64Emitter::EmitTraceUserCallReturn() {
|
||||
|
@ -345,7 +346,7 @@ void X64Emitter::EmitTraceUserCallReturn() {
|
|||
lock();
|
||||
xadd(qword[rax], r8);
|
||||
mov(rax, static_cast<uint64_t>(xdb::protocol::EventType::USER_CALL_RETURN) |
|
||||
(static_cast<uint64_t>(0) << 8) | (0ull << 32));
|
||||
(static_cast<uint64_t>(0) << 8) | (0ull << 32));
|
||||
mov(qword[r8], rax);
|
||||
EmitGetCurrentThreadId();
|
||||
mov(word[r8 + 2], ax);
|
||||
|
@ -645,8 +646,6 @@ void X64Emitter::CallExtern(const hir::Instr* instr,
|
|||
static_cast<uint32_t>(sizeof(xdb::protocol::KernelCallReturnEvent)));
|
||||
lock();
|
||||
xadd(qword[rax], r8);
|
||||
uint32_t module_id = 0;
|
||||
uint32_t ordinal = 0;
|
||||
mov(rax,
|
||||
static_cast<uint64_t>(xdb::protocol::EventType::KERNEL_CALL_RETURN) |
|
||||
(static_cast<uint64_t>(0) << 8) | (0));
|
||||
|
|
|
@ -4491,26 +4491,24 @@ EMITTER_OPCODE_TABLE(
|
|||
// TODO(benvanik): AVX512 has a native variable rotate (rolv).
|
||||
EMITTER(VECTOR_ROTATE_LEFT_V128, MATCH(I<OPCODE_VECTOR_ROTATE_LEFT, V128<>, V128<>, V128<>>)) {
|
||||
static __m128i EmulateVectorRotateLeftI8(__m128i src1, __m128i src2) {
|
||||
alignas(16) __m128i value;
|
||||
alignas(16) __m128i shamt;
|
||||
_mm_store_si128(&value, src1);
|
||||
_mm_store_si128(&shamt, src2);
|
||||
alignas(16) uint8_t value[16];
|
||||
alignas(16) uint8_t shamt[16];
|
||||
_mm_store_si128(reinterpret_cast<__m128i*>(&value), src1);
|
||||
_mm_store_si128(reinterpret_cast<__m128i*>(&shamt), src2);
|
||||
for (size_t i = 0; i < 16; ++i) {
|
||||
value.m128i_u8[i] = poly::rotate_left<uint8_t>(
|
||||
value.m128i_u8[i], shamt.m128i_u8[i] & 0x3);
|
||||
value[i] = poly::rotate_left<uint8_t>(value[i], shamt[i] & 0x3);
|
||||
}
|
||||
return _mm_load_si128(&value);
|
||||
return _mm_load_si128(reinterpret_cast<__m128i*>(&value));
|
||||
}
|
||||
static __m128i EmulateVectorRotateLeftI16(__m128i src1, __m128i src2) {
|
||||
alignas(16) __m128i value;
|
||||
alignas(16) __m128i shamt;
|
||||
_mm_store_si128(&value, src1);
|
||||
_mm_store_si128(&shamt, src2);
|
||||
alignas(16) uint16_t value[8];
|
||||
alignas(16) uint16_t shamt[8];
|
||||
_mm_store_si128(reinterpret_cast<__m128i*>(&value), src1);
|
||||
_mm_store_si128(reinterpret_cast<__m128i*>(&shamt), src2);
|
||||
for (size_t i = 0; i < 8; ++i) {
|
||||
value.m128i_u16[i] = poly::rotate_left<uint16_t>(
|
||||
value.m128i_u16[i], shamt.m128i_u16[i] & 0xF);
|
||||
value[i] = poly::rotate_left<uint16_t>(value[i], shamt[i] & 0xF);
|
||||
}
|
||||
return _mm_load_si128(&value);
|
||||
return _mm_load_si128(reinterpret_cast<__m128i*>(&value));
|
||||
}
|
||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||
switch (i.instr->flags) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <llvm/ADT/BitVector.h>
|
||||
#pragma warning(pop)
|
||||
#else
|
||||
#include <cmath>
|
||||
#include <llvm/ADT/BitVector.h>
|
||||
#endif // XE_COMPILER_MSVC
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
#include <alloy/frontend/ppc/ppc_context.h>
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
namespace alloy {
|
||||
namespace frontend {
|
||||
namespace ppc {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
namespace alloy {
|
||||
|
||||
StringBuffer::StringBuffer(size_t initial_capacity) {
|
||||
buffer_.reserve(std::max(initial_capacity, 1024ull));
|
||||
buffer_.reserve(std::max(initial_capacity, static_cast<size_t>(1024)));
|
||||
}
|
||||
|
||||
StringBuffer::~StringBuffer() = default;
|
||||
|
|
|
@ -19,7 +19,7 @@ namespace poly {
|
|||
|
||||
#define static_assert_size(type, size) \
|
||||
static_assert(sizeof(type) == size, \
|
||||
"bad definition for "## #type##": must be "## #size##" bytes")
|
||||
"bad definition for " #type ": must be " #size " bytes")
|
||||
|
||||
// We rely on assert being compiled out in NDEBUG.
|
||||
#define poly_assert assert
|
||||
|
|
|
@ -40,10 +40,12 @@ inline int64_t atomic_exchange(int64_t new_value, volatile int64_t* value) {
|
|||
return OSAtomicCompareAndSwap64Barrier(*value, new_value, value);
|
||||
}
|
||||
|
||||
//inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) {
|
||||
//}
|
||||
//inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) {
|
||||
//}
|
||||
inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) {
|
||||
return OSAtomicAdd32Barrier(amount, value) - amount;
|
||||
}
|
||||
inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) {
|
||||
return OSAtomicAdd64Barrier(amount, value) - amount;
|
||||
}
|
||||
|
||||
inline bool atomic_cas(int32_t old_value, int32_t new_value,
|
||||
volatile int32_t* value) {
|
||||
|
@ -51,7 +53,7 @@ inline bool atomic_cas(int32_t old_value, int32_t new_value,
|
|||
old_value, new_value, reinterpret_cast<volatile int32_t*>(value));
|
||||
}
|
||||
inline bool atomic_cas(int64_t old_value, int64_t new_value,
|
||||
volatile int32_t* value) {
|
||||
volatile int64_t* value) {
|
||||
return OSAtomicCompareAndSwap64Barrier(
|
||||
old_value, new_value, reinterpret_cast<volatile int64_t*>(value));
|
||||
}
|
||||
|
@ -110,10 +112,12 @@ inline int64_t atomic_exchange(int64_t new_value, volatile int64_t* value) {
|
|||
return __sync_val_compare_and_swap(*value, value, new_value);
|
||||
}
|
||||
|
||||
//inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) {
|
||||
//}
|
||||
//inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) {
|
||||
//}
|
||||
inline int32_t atomic_exchange_add(int32_t amount, volatile int32_t* value) {
|
||||
return __sync_fetch_and_add(amount, value);
|
||||
}
|
||||
inline int64_t atomic_exchange_add(int64_t amount, volatile int64_t* value) {
|
||||
return __sync_fetch_and_add(amount, value);
|
||||
}
|
||||
|
||||
inline bool atomic_cas(int32_t old_value, int32_t new_value,
|
||||
volatile int32_t* value) {
|
||||
|
|
|
@ -36,12 +36,14 @@
|
|||
// C++1y make_unique.
|
||||
// http://herbsutter.com/2013/05/29/gotw-89-solution-smart-pointers/
|
||||
// This is present in clang with -std=c++1y, but not otherwise.
|
||||
#if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 4)
|
||||
namespace std {
|
||||
template <typename T, typename... Args>
|
||||
unique_ptr<T> make_unique(Args&&... args) {
|
||||
return unique_ptr<T>(new T(forward<Args>(args)...));
|
||||
}
|
||||
} // namespace std
|
||||
#endif // clang < 3.4
|
||||
#endif // !XE_COMPILER_MSVC
|
||||
|
||||
namespace poly {} // namespace poly
|
||||
|
|
|
@ -9,22 +9,20 @@
|
|||
|
||||
#include <poly/main.h>
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
#include <poly/string.h>
|
||||
|
||||
namespace poly {
|
||||
|
||||
bool has_console_attached() {
|
||||
return true;
|
||||
}
|
||||
bool has_console_attached() { return true; }
|
||||
|
||||
} // namespace poly
|
||||
|
||||
|
||||
extern "C" int main(int argc, char** argv) {
|
||||
auto entry_info = poly::GetEntryInfo();
|
||||
|
||||
google::SetUsageMessage(std::string("usage: ") +
|
||||
poly::to_string(entry_info.usage));
|
||||
poly::to_string(entry_info.usage));
|
||||
google::SetVersionString("1.0");
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
|
||||
|
|
|
@ -36,22 +36,22 @@ class PosixMappedMemory : public MappedMemory {
|
|||
std::unique_ptr<MappedMemory> MappedMemory::Open(const std::wstring& path,
|
||||
Mode mode, size_t offset,
|
||||
size_t length) {
|
||||
const char* mode;
|
||||
const char* mode_str;
|
||||
int prot;
|
||||
switch (mode) {
|
||||
case Mode::READ:
|
||||
mode = "rb";
|
||||
mode_str = "rb";
|
||||
prot = PROT_READ;
|
||||
break;
|
||||
case Mode::READ_WRITE:
|
||||
mode = "r+b";
|
||||
mode_str = "r+b";
|
||||
prot = PROT_READ | PROT_WRITE;
|
||||
break;
|
||||
}
|
||||
|
||||
auto mm = std::make_unique<PosixMappedMemory>(path, mode);
|
||||
|
||||
mm->file_handle = fopen(poly::to_string(path).c_str(), mode);
|
||||
mm->file_handle = fopen(poly::to_string(path).c_str(), mode_str);
|
||||
if (!mm->file_handle) {
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
|
||||
#include <poly/config.h>
|
||||
#include <poly/platform.h>
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <poly/string.h>
|
||||
|
||||
#include <codecvt>
|
||||
#include <locale>
|
||||
|
||||
namespace poly {
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@
|
|||
'SYMROOT': '<(DEPTH)/build/xenia/',
|
||||
'ALWAYS_SEARCH_USER_PATHS': 'NO',
|
||||
'ARCHS': ['x86_64'],
|
||||
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11',
|
||||
'CLANG_CXX_LANGUAGE_STANDARD': 'c++1y',
|
||||
'COMBINE_HIDPI_IMAGES': 'YES',
|
||||
'GCC_C_LANGUAGE_STANDARD': 'gnu99',
|
||||
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES',
|
||||
|
|
Loading…
Reference in New Issue