Cleaning up asserts and file/line macros.

This commit is contained in:
Ben Vanik 2014-07-12 16:51:52 -07:00
parent 840357413c
commit bf882714d0
92 changed files with 636 additions and 613 deletions

View File

@ -45,7 +45,7 @@ void* Arena::Alloc(size_t size) {
if (active_chunk_->capacity - active_chunk_->offset < size + 4096) {
Chunk* next = active_chunk_->next;
if (!next) {
XEASSERT(size < chunk_size_); // need to support larger chunks
assert_true(size < chunk_size_, "need to support larger chunks");
next = new Chunk(chunk_size_);
active_chunk_->next = next;
}

View File

@ -159,8 +159,8 @@ uint32_t AllocOpRegister(TranslationContext& ctx, OpcodeSignatureType sig_type,
uint32_t IntCode_INVALID(IntCodeState& ics, const IntCode* i);
uint32_t IntCode_INVALID_TYPE(IntCodeState& ics, const IntCode* i);
int DispatchToC(TranslationContext& ctx, Instr* i, IntCodeFn fn) {
XEASSERT(fn != IntCode_INVALID);
XEASSERT(fn != IntCode_INVALID_TYPE);
assert_true(fn != IntCode_INVALID);
assert_true(fn != IntCode_INVALID_TYPE);
const OpcodeInfo* op = i->opcode;
uint32_t sig = op->signature;
@ -194,11 +194,11 @@ int DispatchToC(TranslationContext& ctx, Instr* i, IntCodeFn fn) {
}
uint32_t IntCode_INVALID(IntCodeState& ics, const IntCode* i) {
XEASSERTALWAYS();
assert_always();
return IA_NEXT;
}
uint32_t IntCode_INVALID_TYPE(IntCodeState& ics, const IntCode* i) {
XEASSERTALWAYS();
assert_always();
return IA_NEXT;
}
int TranslateInvalid(TranslationContext& ctx, Instr* i) {
@ -368,7 +368,7 @@ uint32_t IntCode_CALL_XX(IntCodeState& ics, const IntCode* i, uint32_t reg) {
if (!fn) {
ics.thread_state->runtime()->ResolveFunction(symbol_info->address(), &fn);
}
XEASSERTNOTNULL(fn);
assert_not_null(fn);
// TODO(benvanik): proper tail call support, somehow.
uint64_t return_address =
(i->flags & CALL_TAIL) ? ics.return_address : ics.call_return_address;
@ -444,7 +444,7 @@ uint32_t IntCode_CALL_INDIRECT_XX(IntCodeState& ics, const IntCode* i,
// Real call.
Function* fn = NULL;
ics.thread_state->runtime()->ResolveFunction(target, &fn);
XEASSERTNOTNULL(fn);
assert_not_null(fn);
// TODO(benvanik): proper tail call support, somehow.
uint64_t return_address =
(i->flags & CALL_TAIL) ? ics.return_address : ics.call_return_address;
@ -2309,17 +2309,17 @@ uint32_t IntCode_ADD_I64_I64(IntCodeState& ics, const IntCode* i) {
return IA_NEXT;
}
uint32_t IntCode_ADD_F32_F32(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
ics.rf[i->dest_reg].f32 = ics.rf[i->src1_reg].f32 + ics.rf[i->src2_reg].f32;
return IA_NEXT;
}
uint32_t IntCode_ADD_F64_F64(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
ics.rf[i->dest_reg].f64 = ics.rf[i->src1_reg].f64 + ics.rf[i->src2_reg].f64;
return IA_NEXT;
}
uint32_t IntCode_ADD_V128_V128(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
const vec128_t& src1 = ics.rf[i->src1_reg].v128;
const vec128_t& src2 = ics.rf[i->src2_reg].v128;
vec128_t& dest = ics.rf[i->dest_reg].v128;
@ -2380,13 +2380,13 @@ uint32_t IntCode_ADD_CARRY_I64_I64(IntCodeState& ics, const IntCode* i) {
return IA_NEXT;
}
uint32_t IntCode_ADD_CARRY_F32_F32(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
ics.rf[i->dest_reg].f32 = ics.rf[i->src1_reg].f32 + ics.rf[i->src2_reg].f32 +
ics.rf[i->src3_reg].i8;
return IA_NEXT;
}
uint32_t IntCode_ADD_CARRY_F64_F64(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
ics.rf[i->dest_reg].f64 = ics.rf[i->src1_reg].f64 + ics.rf[i->src2_reg].f64 +
ics.rf[i->src3_reg].i8;
return IA_NEXT;
@ -2570,12 +2570,12 @@ uint32_t IntCode_SUB_I64_I64(IntCodeState& ics, const IntCode* i) {
return IA_NEXT;
}
uint32_t IntCode_SUB_F32_F32(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
ics.rf[i->dest_reg].f32 = ics.rf[i->src1_reg].f32 - ics.rf[i->src2_reg].f32;
return IA_NEXT;
}
uint32_t IntCode_SUB_F64_F64(IntCodeState& ics, const IntCode* i) {
XEASSERT(!i->flags);
assert_true(!i->flags);
ics.rf[i->dest_reg].f64 = ics.rf[i->src1_reg].f64 - ics.rf[i->src2_reg].f64;
return IA_NEXT;
}
@ -3564,7 +3564,7 @@ int Translate_BYTE_SWAP(TranslationContext& ctx, Instr* i) {
uint32_t IntCode_CNTLZ_I8(IntCodeState& ics, const IntCode* i) {
// CHECK
XEASSERTALWAYS();
assert_always();
DWORD index;
DWORD mask = ics.rf[i->src1_reg].i8;
BOOLEAN is_nonzero = _BitScanReverse(&index, mask);
@ -3573,7 +3573,7 @@ uint32_t IntCode_CNTLZ_I8(IntCodeState& ics, const IntCode* i) {
}
uint32_t IntCode_CNTLZ_I16(IntCodeState& ics, const IntCode* i) {
// CHECK
XEASSERTALWAYS();
assert_always();
DWORD index;
DWORD mask = ics.rf[i->src1_reg].i16;
BOOLEAN is_nonzero = _BitScanReverse(&index, mask);

View File

@ -32,7 +32,7 @@ Register* IVMStack::Alloc(size_t register_count) {
if (active_chunk_->capacity - active_chunk_->offset < size) {
Chunk* next = active_chunk_->next;
if (!next) {
XEASSERT(size < chunk_size_); // need to support larger chunks
assert_true(size < chunk_size_, "need to support larger chunks");
next = new Chunk(chunk_size_);
next->prev = active_chunk_;
active_chunk_->next = next;

View File

@ -73,7 +73,7 @@ void* X64CodeCache::PlaceCode(void* machine_code, size_t code_size,
if (active_chunk_->capacity - active_chunk_->offset < code_size) {
auto next = active_chunk_->next;
if (!next) {
XEASSERT(code_size < chunk_size_); // need to support larger chunks
assert_true(code_size < chunk_size_, "need to support larger chunks");
next = new X64CodeChunk(chunk_size_);
active_chunk_->next = next;
}
@ -197,7 +197,7 @@ void X64CodeChunk::AddTableEntry(uint8_t* code, size_t code_size,
size_t new_size = old_size * 2;
auto new_table =
(RUNTIME_FUNCTION*)xe_realloc(fn_table, old_size, new_size);
XEASSERTNOTNULL(new_table);
assert_not_null(new_table);
if (!new_table) {
return;
}

View File

@ -140,7 +140,7 @@ int X64Emitter::Emit(HIRBuilder* builder, size_t& out_stack_size) {
// Adding or changing anything here must be matched!
const bool emit_prolog = true;
const size_t stack_size = StackLayout::GUEST_STACK_SIZE + stack_offset;
XEASSERT((stack_size + 8) % 16 == 0);
assert_true((stack_size + 8) % 16 == 0);
out_stack_size = stack_size;
stack_size_ = stack_size;
if (emit_prolog) {
@ -167,7 +167,7 @@ int X64Emitter::Emit(HIRBuilder* builder, size_t& out_stack_size) {
const Instr* new_tail = instr;
if (!SelectSequence(*this, instr, &new_tail)) {
// No sequence found!
XEASSERTALWAYS();
assert_always();
XELOGE("Unable to process HIR opcode %s", instr->opcode->name);
break;
}
@ -231,7 +231,7 @@ void X64Emitter::Trap(uint16_t trap_type) {
void X64Emitter::UnimplementedInstr(const hir::Instr* i) {
// TODO(benvanik): notify debugger.
db(0xCC);
XEASSERTALWAYS();
assert_always();
}
// Total size of ResolveFunctionSymbol call site in bytes.
@ -259,7 +259,7 @@ uint64_t ResolveFunctionSymbol(void* raw_context, uint64_t symbol_info_ptr) {
// Resolve function. This will demand compile as required.
Function* fn = NULL;
thread_state->runtime()->ResolveFunction(symbol_info->address(), &fn);
XEASSERTNOTNULL(fn);
assert_not_null(fn);
auto x64_fn = static_cast<X64Function*>(fn);
uint64_t addr = reinterpret_cast<uint64_t>(x64_fn->machine_code());
@ -307,7 +307,7 @@ void X64Emitter::Call(const hir::Instr* instr,
// 5b
ReloadECX();
size_t total_size = getSize() - start;
XEASSERT(total_size == TOTAL_RESOLVE_SIZE);
assert_true(total_size == TOTAL_RESOLVE_SIZE);
// EDX overwritten, don't bother reloading.
}
@ -334,7 +334,7 @@ uint64_t ResolveFunctionAddress(void* raw_context, uint64_t target_address) {
Function* fn = NULL;
thread_state->runtime()->ResolveFunction(target_address, &fn);
XEASSERTNOTNULL(fn);
assert_not_null(fn);
auto x64_fn = static_cast<X64Function*>(fn);
return reinterpret_cast<uint64_t>(x64_fn->machine_code());
}
@ -375,7 +375,7 @@ uint64_t UndefinedCallExtern(void* raw_context, uint64_t symbol_info_ptr) {
}
void X64Emitter::CallExtern(const hir::Instr* instr,
const FunctionInfo* symbol_info) {
XEASSERT(symbol_info->behavior() == FunctionInfo::BEHAVIOR_EXTERN);
assert_true(symbol_info->behavior() == FunctionInfo::BEHAVIOR_EXTERN);
if (!symbol_info->extern_handler()) {
CallNative(UndefinedCallExtern, reinterpret_cast<uint64_t>(symbol_info));
} else {

View File

@ -133,7 +133,7 @@ struct ValueOp : Op<ValueOp<T, KEY_TYPE, REG_TYPE, CONST_TYPE, TAG>, KEY_TYPE> {
bool is_constant;
virtual bool ConstantFitsIn32Reg() const { return true; }
const REG_TYPE& reg() const {
XEASSERT(!is_constant);
assert_true(!is_constant);
return reg_;
}
operator const REG_TYPE&() const {
@ -184,28 +184,28 @@ protected:
template <int TAG = -1>
struct I8 : ValueOp<I8<TAG>, KEY_TYPE_V_I8, Reg8, int8_t, TAG> {
const int8_t constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.i8;
}
};
template <int TAG = -1>
struct I16 : ValueOp<I16<TAG>, KEY_TYPE_V_I16, Reg16, int16_t, TAG> {
const int16_t constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.i16;
}
};
template <int TAG = -1>
struct I32 : ValueOp<I32<TAG>, KEY_TYPE_V_I32, Reg32, int32_t, TAG> {
const int32_t constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.i32;
}
};
template <int TAG = -1>
struct I64 : ValueOp<I64<TAG>, KEY_TYPE_V_I64, Reg64, int64_t, TAG> {
const int64_t constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.i64;
}
bool ConstantFitsIn32Reg() const override {
@ -223,21 +223,21 @@ struct I64 : ValueOp<I64<TAG>, KEY_TYPE_V_I64, Reg64, int64_t, TAG> {
template <int TAG = -1>
struct F32 : ValueOp<F32<TAG>, KEY_TYPE_V_F32, Xmm, float, TAG> {
const float constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.f32;
}
};
template <int TAG = -1>
struct F64 : ValueOp<F64<TAG>, KEY_TYPE_V_F64, Xmm, double, TAG> {
const double constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.f64;
}
};
template <int TAG = -1>
struct V128 : ValueOp<V128<TAG>, KEY_TYPE_V_V128, Xmm, vec128_t, TAG> {
const vec128_t& constant() const {
XEASSERT(is_constant);
assert_true(is_constant);
return value->constant.v128;
}
};
@ -542,7 +542,7 @@ struct SingleSequence : public Sequence<SingleSequence<SEQ, T>, T> {
X64Emitter& e, const EmitArgType& i,
const REG_REG_FN& reg_reg_fn, const REG_CONST_FN& reg_const_fn) {
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
if (i.dest == i.src2) {
if (i.src1.ConstantFitsIn32Reg()) {
reg_const_fn(e, i.dest, static_cast<int32_t>(i.src1.constant()));
@ -584,7 +584,7 @@ struct SingleSequence : public Sequence<SingleSequence<SEQ, T>, T> {
X64Emitter& e, const EmitArgType& i,
const REG_REG_FN& reg_reg_fn, const REG_CONST_FN& reg_const_fn) {
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
if (i.dest == i.src2) {
auto temp = GetTempReg<decltype(i.src2)::reg_type>(e);
e.mov(temp, i.src2);
@ -632,7 +632,7 @@ struct SingleSequence : public Sequence<SingleSequence<SEQ, T>, T> {
static void EmitCommutativeBinaryXmmOp(
X64Emitter& e, const EmitArgType& i, const FN& fn) {
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
e.LoadConstantXmm(e.xmm0, i.src1.constant());
fn(e, i.dest, e.xmm0, i.src2);
} else if (i.src2.is_constant) {
@ -647,7 +647,7 @@ struct SingleSequence : public Sequence<SingleSequence<SEQ, T>, T> {
static void EmitAssociativeBinaryXmmOp(
X64Emitter& e, const EmitArgType& i, const FN& fn) {
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
e.LoadConstantXmm(e.xmm0, i.src1.constant());
fn(e, i.dest, e.xmm0, i.src2);
} else if (i.src2.is_constant) {
@ -663,7 +663,7 @@ struct SingleSequence : public Sequence<SingleSequence<SEQ, T>, T> {
X64Emitter& e, const EmitArgType& i,
const REG_REG_FN& reg_reg_fn, const REG_CONST_FN& reg_const_fn) {
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
if (i.src1.ConstantFitsIn32Reg()) {
reg_const_fn(e, i.src2, static_cast<int32_t>(i.src1.constant()));
} else {
@ -688,7 +688,7 @@ struct SingleSequence : public Sequence<SingleSequence<SEQ, T>, T> {
X64Emitter& e, const EmitArgType& i,
const REG_REG_FN& reg_reg_fn, const REG_CONST_FN& reg_const_fn) {
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
if (i.src1.ConstantFitsIn32Reg()) {
reg_const_fn(e, i.dest, i.src2, static_cast<int32_t>(i.src1.constant()), true);
} else {

View File

@ -1016,7 +1016,7 @@ EMITTER(LOAD_VECTOR_SHL_I8, MATCH(I<OPCODE_LOAD_VECTOR_SHL, V128<>, I8<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (i.src1.is_constant) {
auto sh = i.src1.constant();
XEASSERT(sh < XECOUNT(lvsl_table));
assert_true(sh < XECOUNT(lvsl_table));
e.mov(e.rax, (uintptr_t)&lvsl_table[sh]);
e.vmovaps(i.dest, e.ptr[e.rax]);
} else {
@ -1068,7 +1068,7 @@ EMITTER(LOAD_VECTOR_SHR_I8, MATCH(I<OPCODE_LOAD_VECTOR_SHR, V128<>, I8<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
if (i.src1.is_constant) {
auto sh = i.src1.constant();
XEASSERT(sh < XECOUNT(lvsr_table));
assert_true(sh < XECOUNT(lvsr_table));
e.mov(e.rax, (uintptr_t)&lvsr_table[sh]);
e.vmovaps(i.dest, e.ptr[e.rax]);
} else {
@ -2176,28 +2176,28 @@ EMITTER_ASSOCIATIVE_COMPARE_FLT_XX(UGE, setae);
// https://code.google.com/p/corkami/wiki/x86oddities
EMITTER(DID_CARRY_I8, MATCH(I<OPCODE_DID_CARRY, I8<>, I8<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
e.LoadEflags();
e.setc(i.dest);
}
};
EMITTER(DID_CARRY_I16, MATCH(I<OPCODE_DID_CARRY, I8<>, I16<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
e.LoadEflags();
e.setc(i.dest);
}
};
EMITTER(DID_CARRY_I32, MATCH(I<OPCODE_DID_CARRY, I8<>, I32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
e.LoadEflags();
e.setc(i.dest);
}
};
EMITTER(DID_CARRY_I64, MATCH(I<OPCODE_DID_CARRY, I8<>, I64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
e.LoadEflags();
e.setc(i.dest);
}
@ -2629,8 +2629,8 @@ EMITTER(VECTOR_ADD, MATCH(I<OPCODE_VECTOR_ADD, V128<>, V128<>, V128<>>)) {
if (saturate) {
if (is_unsigned) {
// We reuse all these temps...
XEASSERT(src1 != e.xmm0 && src1 != e.xmm1 && src1 != e.xmm2);
XEASSERT(src2 != e.xmm0 && src2 != e.xmm1 && src2 != e.xmm2);
assert_true(src1 != e.xmm0 && src1 != e.xmm1 && src1 != e.xmm2);
assert_true(src2 != e.xmm0 && src2 != e.xmm1 && src2 != e.xmm2);
// Clamp to 0xFFFFFFFF.
// Wish there was a vpaddusd...
// | A | B | C | D |
@ -2655,7 +2655,7 @@ EMITTER(VECTOR_ADD, MATCH(I<OPCODE_VECTOR_ADD, V128<>, V128<>, V128<>>)) {
// dest.f[n] = xmm1.f[n] ? xmm1.f[n] : dest.f[n];
e.vblendvps(dest, dest, e.xmm1, e.xmm1);
} else {
XEASSERTALWAYS();
assert_always();
}
} else {
e.vpaddd(dest, src1, src2);
@ -2664,7 +2664,7 @@ EMITTER(VECTOR_ADD, MATCH(I<OPCODE_VECTOR_ADD, V128<>, V128<>, V128<>>)) {
case FLOAT32_TYPE:
e.vaddps(dest, src1, src2);
break;
default: XEASSERTUNHANDLEDCASE(part_type); break;
default: assert_unhandled_case(part_type); break;
}
});
}
@ -2728,7 +2728,7 @@ EMITTER(SUB_I64, MATCH(I<OPCODE_SUB, I64<>, I64<>, I64<>>)) {
};
EMITTER(SUB_F32, MATCH(I<OPCODE_SUB, F32<>, F32<>, F32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitAssociativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vsubss(dest, src1, src2);
@ -2737,7 +2737,7 @@ EMITTER(SUB_F32, MATCH(I<OPCODE_SUB, F32<>, F32<>, F32<>>)) {
};
EMITTER(SUB_F64, MATCH(I<OPCODE_SUB, F64<>, F64<>, F64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitAssociativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vsubsd(dest, src1, src2);
@ -2746,7 +2746,7 @@ EMITTER(SUB_F64, MATCH(I<OPCODE_SUB, F64<>, F64<>, F64<>>)) {
};
EMITTER(SUB_V128, MATCH(I<OPCODE_SUB, V128<>, V128<>, V128<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitAssociativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vsubps(dest, src1, src2);
@ -2774,7 +2774,7 @@ EMITTER(MUL_I8, MATCH(I<OPCODE_MUL, I8<>, I8<>, I8<>>)) {
// dest hi, dest low = src * edx
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
e.movzx(e.edx, i.src2);
e.mov(e.eax, static_cast<uint8_t>(i.src1.constant()));
e.mulx(e.edx, i.dest.reg().cvt32(), e.eax);
@ -2793,7 +2793,7 @@ EMITTER(MUL_I16, MATCH(I<OPCODE_MUL, I16<>, I16<>, I16<>>)) {
// dest hi, dest low = src * edx
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
e.movzx(e.edx, i.src2);
e.mov(e.ax, static_cast<uint16_t>(i.src1.constant()));
e.mulx(e.edx, i.dest.reg().cvt32(), e.eax);
@ -2813,7 +2813,7 @@ EMITTER(MUL_I32, MATCH(I<OPCODE_MUL, I32<>, I32<>, I32<>>)) {
// dest hi, dest low = src * edx
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
e.mov(e.edx, i.src2);
e.mov(e.eax, i.src1.constant());
e.mulx(e.edx, i.dest, e.eax);
@ -2833,7 +2833,7 @@ EMITTER(MUL_I64, MATCH(I<OPCODE_MUL, I64<>, I64<>, I64<>>)) {
// dest hi, dest low = src * rdx
// TODO(benvanik): place src2 in edx?
if (i.src1.is_constant) {
XEASSERT(!i.src2.is_constant);
assert_true(!i.src2.is_constant);
e.mov(e.rdx, i.src2);
e.mov(e.rax, i.src1.constant());
e.mulx(e.rdx, i.dest, e.rax);
@ -2850,7 +2850,7 @@ EMITTER(MUL_I64, MATCH(I<OPCODE_MUL, I64<>, I64<>, I64<>>)) {
};
EMITTER(MUL_F32, MATCH(I<OPCODE_MUL, F32<>, F32<>, F32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitCommutativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vmulss(dest, src1, src2);
@ -2859,7 +2859,7 @@ EMITTER(MUL_F32, MATCH(I<OPCODE_MUL, F32<>, F32<>, F32<>>)) {
};
EMITTER(MUL_F64, MATCH(I<OPCODE_MUL, F64<>, F64<>, F64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitCommutativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vmulsd(dest, src1, src2);
@ -2868,7 +2868,7 @@ EMITTER(MUL_F64, MATCH(I<OPCODE_MUL, F64<>, F64<>, F64<>>)) {
};
EMITTER(MUL_V128, MATCH(I<OPCODE_MUL, V128<>, V128<>, V128<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitCommutativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vmulps(dest, src1, src2);
@ -2993,7 +2993,7 @@ EMITTER(DIV_I8, MATCH(I<OPCODE_DIV, I8<>, I8<>, I8<>>)) {
// NOTE: RDX clobbered.
bool clobbered_rcx = false;
if (i.src2.is_constant) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
clobbered_rcx = true;
e.mov(e.cl, i.src2.constant());
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
@ -3032,7 +3032,7 @@ EMITTER(DIV_I16, MATCH(I<OPCODE_DIV, I16<>, I16<>, I16<>>)) {
// NOTE: RDX clobbered.
bool clobbered_rcx = false;
if (i.src2.is_constant) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
clobbered_rcx = true;
e.mov(e.cx, i.src2.constant());
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
@ -3081,7 +3081,7 @@ EMITTER(DIV_I32, MATCH(I<OPCODE_DIV, I32<>, I32<>, I32<>>)) {
// NOTE: RDX clobbered.
bool clobbered_rcx = false;
if (i.src2.is_constant) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
clobbered_rcx = true;
e.mov(e.ecx, i.src2.constant());
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
@ -3130,7 +3130,7 @@ EMITTER(DIV_I64, MATCH(I<OPCODE_DIV, I64<>, I64<>, I64<>>)) {
// NOTE: RDX clobbered.
bool clobbered_rcx = false;
if (i.src2.is_constant) {
XEASSERT(!i.src1.is_constant);
assert_true(!i.src1.is_constant);
clobbered_rcx = true;
e.mov(e.rcx, i.src2.constant());
if (i.instr->flags & ARITHMETIC_UNSIGNED) {
@ -3176,7 +3176,7 @@ EMITTER(DIV_I64, MATCH(I<OPCODE_DIV, I64<>, I64<>, I64<>>)) {
};
EMITTER(DIV_F32, MATCH(I<OPCODE_DIV, F32<>, F32<>, F32<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitAssociativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vdivss(dest, src1, src2);
@ -3185,7 +3185,7 @@ EMITTER(DIV_F32, MATCH(I<OPCODE_DIV, F32<>, F32<>, F32<>>)) {
};
EMITTER(DIV_F64, MATCH(I<OPCODE_DIV, F64<>, F64<>, F64<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitAssociativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vdivsd(dest, src1, src2);
@ -3194,7 +3194,7 @@ EMITTER(DIV_F64, MATCH(I<OPCODE_DIV, F64<>, F64<>, F64<>>)) {
};
EMITTER(DIV_V128, MATCH(I<OPCODE_DIV, V128<>, V128<>, V128<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
EmitAssociativeBinaryXmmOp(e, i,
[](X64Emitter& e, Xmm dest, Xmm src1, Xmm src2) {
e.vdivps(dest, src1, src2);
@ -3380,7 +3380,7 @@ EMITTER(NEG_F64, MATCH(I<OPCODE_NEG, F64<>, F64<>>)) {
};
EMITTER(NEG_V128, MATCH(I<OPCODE_NEG, V128<>, V128<>>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERT(!i.instr->flags);
assert_true(!i.instr->flags);
e.vxorps(i.dest, i.src1, e.GetXmmConstPtr(XMMSignMaskPS));
}
};
@ -3483,7 +3483,7 @@ EMITTER(POW2_F32, MATCH(I<OPCODE_POW2, F32<>, F32<>>)) {
return _mm_load_ss(&result);
}
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
e.lea(e.r8, e.StashXmm(i.src1));
e.CallNativeSafe(EmulatePow2);
e.vmovaps(i.dest, e.xmm0);
@ -3495,7 +3495,7 @@ EMITTER(POW2_F64, MATCH(I<OPCODE_POW2, F64<>, F64<>>)) {
return _mm_load_sd(&result);
}
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
e.lea(e.r8, e.StashXmm(i.src1));
e.CallNativeSafe(EmulatePow2);
e.vmovaps(i.dest, e.xmm0);
@ -3534,7 +3534,7 @@ EMITTER(LOG2_F32, MATCH(I<OPCODE_LOG2, F32<>, F32<>>)) {
return _mm_load_ss(&result);
}
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
e.lea(e.r8, e.StashXmm(i.src1));
e.CallNativeSafe(EmulateLog2);
e.vmovaps(i.dest, e.xmm0);
@ -3546,7 +3546,7 @@ EMITTER(LOG2_F64, MATCH(I<OPCODE_LOG2, F64<>, F64<>>)) {
return _mm_load_sd(&result);
}
static void Emit(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
e.lea(e.r8, e.StashXmm(i.src1));
e.CallNativeSafe(EmulateLog2);
e.vmovaps(i.dest, e.xmm0);
@ -3958,7 +3958,7 @@ EMITTER(VECTOR_SHL_V128, MATCH(I<OPCODE_VECTOR_SHL, V128<>, V128<>, V128<>>)) {
EmitInt32(e, i);
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
}
@ -3990,7 +3990,7 @@ EMITTER(VECTOR_SHL_V128, MATCH(I<OPCODE_VECTOR_SHL, V128<>, V128<>, V128<>>)) {
}
} else {
// Counts differ, so pre-mask and load constant.
XEASSERTALWAYS();
assert_always();
}
} else {
// Fully variable shift.
@ -4046,11 +4046,11 @@ EMITTER(VECTOR_SHL_V128, MATCH(I<OPCODE_VECTOR_SHL, V128<>, V128<>, V128<>>)) {
e.vpsllw(i.dest, i.src1, shamt.s8[0] & 0xF);
} else {
// Counts differ, so pre-mask and load constant.
XEASSERTALWAYS();
assert_always();
}
} else {
// Fully variable shift.
XEASSERTALWAYS();
assert_always();
}
}
static void EmitInt32(X64Emitter& e, const EmitArgType& i) {
@ -4105,7 +4105,7 @@ EMITTER(VECTOR_SHR_V128, MATCH(I<OPCODE_VECTOR_SHR, V128<>, V128<>, V128<>>)) {
EmitInt32(e, i);
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
}
@ -4137,11 +4137,11 @@ EMITTER(VECTOR_SHR_V128, MATCH(I<OPCODE_VECTOR_SHR, V128<>, V128<>, V128<>>)) {
}
} else {
// Counts differ, so pre-mask and load constant.
XEASSERTALWAYS();
assert_always();
}
} else {
// Fully variable shift.
XEASSERTALWAYS();
assert_always();
}
}
static void EmitInt16(X64Emitter& e, const EmitArgType& i) {
@ -4159,11 +4159,11 @@ EMITTER(VECTOR_SHR_V128, MATCH(I<OPCODE_VECTOR_SHR, V128<>, V128<>, V128<>>)) {
e.vpsrlw(i.dest, i.src1, shamt.s8[0] & 0xF);
} else {
// Counts differ, so pre-mask and load constant.
XEASSERTALWAYS();
assert_always();
}
} else {
// Fully variable shift.
XEASSERTALWAYS();
assert_always();
}
}
static void EmitInt32(X64Emitter& e, const EmitArgType& i) {
@ -4215,7 +4215,7 @@ EMITTER(VECTOR_SHA_V128, MATCH(I<OPCODE_VECTOR_SHA, V128<>, V128<>, V128<>>)) {
e.vpsravd(i.dest, i.src1, e.xmm0);
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
}
@ -4375,14 +4375,14 @@ EMITTER(EXTRACT_I8, MATCH(I<OPCODE_EXTRACT, I8<>, V128<>, I8<>>)) {
if (i.src2.is_constant) {
e.vpextrb(i.dest.reg().cvt32(), i.src1, VEC128_B(i.src2.constant()));
} else {
XEASSERTALWAYS();
assert_always();
// TODO(benvanik): try out hlide's version:
// e.mov(e.eax, 0x80808003);
// e.xor(e.al, i.src2);
// e.and(e.al, 15);
// e.vmovd(e.xmm0, e.eax);
// e.vpshufb(e.xmm0, i.src1, e.xmm0);
// e.vmovd(i.dest.reg().cvt32(), e.xmm0);
// e.vmovd(i.dest.reg().cvt32(), e.xmm0);
}
}
};
@ -4443,7 +4443,7 @@ EMITTER(EXTRACT_F32, MATCH(I<OPCODE_EXTRACT, F32<>, V128<>, I8<>>)) {
if (i.src2.is_constant) {
e.vextractps(i.dest, i.src1, VEC128_F(i.src2.constant()));
} else {
XEASSERTALWAYS();
assert_always();
// TODO(benvanik): try out hlide's version:
// e.mov(e.eax, 3);
// e.and(e.al, i.src2); // eax = [(i&3), 0, 0, 0]
@ -4573,7 +4573,7 @@ EMITTER(PERMUTE_I32, MATCH(I<OPCODE_PERMUTE, V128<>, I32<>, V128<>, V128<>>)) {
}
} else {
// Permute by non-constant.
XEASSERTALWAYS();
assert_always();
}
}
};
@ -4650,9 +4650,9 @@ EMITTER(SWIZZLE, MATCH(I<OPCODE_SWIZZLE, V128<>, V128<>, OffsetOp>)) {
static void Emit(X64Emitter& e, const EmitArgType& i) {
auto element_type = i.instr->flags;
if (element_type == INT8_TYPE) {
XEASSERTALWAYS();
assert_always();
} else if (element_type == INT16_TYPE) {
XEASSERTALWAYS();
assert_always();
} else if (element_type == INT32_TYPE || element_type == FLOAT32_TYPE) {
uint8_t swizzle_mask = static_cast<uint8_t>(i.src2.value);
swizzle_mask =
@ -4662,9 +4662,9 @@ EMITTER(SWIZZLE, MATCH(I<OPCODE_SWIZZLE, V128<>, V128<>, OffsetOp>)) {
(((swizzle_mask >> 0) & 0x3) << 6);
e.vpshufd(i.dest, i.src1, swizzle_mask);
} else if (element_type == INT64_TYPE || element_type == FLOAT64_TYPE) {
XEASSERTALWAYS();
assert_always();
} else {
XEASSERTALWAYS();
assert_always();
}
}
};
@ -4703,7 +4703,7 @@ EMITTER(PACK, MATCH(I<OPCODE_PACK, V128<>, V128<>>)) {
case PACK_TYPE_S16_IN_32_HI:
EmitS16_IN_32_HI(e, i);
break;
default: XEASSERTUNHANDLEDCASE(i.instr->flags); break;
default: assert_unhandled_case(i.instr->flags); break;
}
}
static void EmitD3DCOLOR(X64Emitter& e, const EmitArgType& i) {
@ -4755,19 +4755,19 @@ EMITTER(PACK, MATCH(I<OPCODE_PACK, V128<>, V128<>>)) {
e.vpblendw(i.dest, e.xmm0, B11110000);
}
static void EmitSHORT_2(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
}
static void EmitS8_IN_16_LO(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
}
static void EmitS8_IN_16_HI(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
}
static void EmitS16_IN_32_LO(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
}
static void EmitS16_IN_32_HI(X64Emitter& e, const EmitArgType& i) {
XEASSERTALWAYS();
assert_always();
}
};
EMITTER_OPCODE_TABLE(
@ -4805,7 +4805,7 @@ EMITTER(UNPACK, MATCH(I<OPCODE_UNPACK, V128<>, V128<>>)) {
case PACK_TYPE_S16_IN_32_HI:
EmitS16_IN_32_HI(e, i);
break;
default: XEASSERTUNHANDLEDCASE(i.instr->flags); break;
default: assert_unhandled_case(i.instr->flags); break;
}
}
static void EmitD3DCOLOR(X64Emitter& e, const EmitArgType& i) {

View File

@ -277,13 +277,13 @@ int ConstantPropagationPass::Run(HIRBuilder* builder) {
break;
case OPCODE_DID_CARRY:
XEASSERT(!i->src1.value->IsConstant());
assert_true(!i->src1.value->IsConstant());
break;
case OPCODE_DID_OVERFLOW:
XEASSERT(!i->src1.value->IsConstant());
assert_true(!i->src1.value->IsConstant());
break;
case OPCODE_DID_SATURATE:
XEASSERT(!i->src1.value->IsConstant());
assert_true(!i->src1.value->IsConstant());
break;
case OPCODE_ADD:

View File

@ -97,7 +97,7 @@ void DataFlowAnalysisPass::AnalyzeFlow(HIRBuilder* builder,
if (v->def && v->def->block != block) { \
incoming_values.set(v->ordinal); \
} \
XEASSERT(v->ordinal < max_value_estimate); \
assert_true(v->ordinal < max_value_estimate); \
value_map[v->ordinal] = v;
if (GET_OPCODE_SIG_TYPE_SRC1(signature) == OPCODE_SIG_TYPE_V) {
SET_INCOMING_VALUE(instr->src1.value);
@ -128,7 +128,7 @@ void DataFlowAnalysisPass::AnalyzeFlow(HIRBuilder* builder,
auto outgoing_ordinal = outgoing_values.find_first();
while (outgoing_ordinal != -1) {
Value* src_value = value_map[outgoing_ordinal];
XEASSERTNOTNULL(src_value);
assert_not_null(src_value);
if (!src_value->local_slot) {
src_value->local_slot = builder->AllocLocal(src_value->type);
}
@ -142,7 +142,7 @@ void DataFlowAnalysisPass::AnalyzeFlow(HIRBuilder* builder,
while (def_next && def_next->opcode->flags & OPCODE_FLAG_PAIRED_PREV) {
def_next = def_next->next;
}
XEASSERTNOTNULL(def_next);
assert_not_null(def_next);
builder->last_instr()->MoveBefore(def_next);
// We don't need it in the incoming list.
@ -153,7 +153,7 @@ void DataFlowAnalysisPass::AnalyzeFlow(HIRBuilder* builder,
while (tail && tail->opcode->flags & OPCODE_FLAG_BRANCH) {
tail = tail->prev;
}
XEASSERTNOTZERO(tail);
assert_not_zero(tail);
builder->last_instr()->MoveBefore(tail->next);
}
@ -164,7 +164,7 @@ void DataFlowAnalysisPass::AnalyzeFlow(HIRBuilder* builder,
auto incoming_ordinal = incoming_values.find_first();
while (incoming_ordinal != -1) {
Value* src_value = value_map[incoming_ordinal];
XEASSERTNOTNULL(src_value);
assert_not_null(src_value);
if (!src_value->local_slot) {
src_value->local_slot = builder->AllocLocal(src_value->type);
}

View File

@ -114,13 +114,13 @@ int RegisterAllocationPass::Run(HIRBuilder* builder) {
// dest.
has_preferred_reg = true;
preferred_reg = instr->src1.value->reg;
XEASSERTNOTNULL(preferred_reg.set);
assert_not_null(preferred_reg.set);
}
}
if (GET_OPCODE_SIG_TYPE_DEST(signature) == OPCODE_SIG_TYPE_V) {
// Must not have been set already.
XEASSERTNULL(instr->dest->reg.set);
assert_null(instr->dest->reg.set);
// Sort the usage list. We depend on this in future uses of this
// variable.
@ -144,7 +144,7 @@ int RegisterAllocationPass::Run(HIRBuilder* builder) {
if (!SpillOneRegister(builder, instr->dest->type)) {
// Unable to spill anything - this shouldn't happen.
XELOGE("Unable to spill any registers");
XEASSERTALWAYS();
assert_always();
return 1;
}
@ -152,7 +152,7 @@ int RegisterAllocationPass::Run(HIRBuilder* builder) {
if (!TryAllocateRegister(instr->dest)) {
// Boned.
XELOGE("Register allocation failed");
XEASSERTALWAYS();
assert_always();
return 1;
}
}
@ -330,14 +330,14 @@ bool RegisterAllocationPass::SpillOneRegister(HIRBuilder* builder,
DumpUsage("SpillOneRegister (pre)");
// Pick the one with the furthest next use.
XEASSERT(!usage_set->upcoming_uses.empty());
assert_true(!usage_set->upcoming_uses.empty());
auto furthest_usage = std::max_element(usage_set->upcoming_uses.begin(),
usage_set->upcoming_uses.end(),
RegisterUsage::Comparer());
auto spill_value = furthest_usage->value;
Value::Use* prev_use = furthest_usage->use->prev;
Value::Use* next_use = furthest_usage->use;
XEASSERTNOTNULL(next_use);
assert_not_null(next_use);
usage_set->upcoming_uses.erase(furthest_usage);
DumpUsage("SpillOneRegister (post)");
const auto reg = spill_value->reg;
@ -361,11 +361,11 @@ bool RegisterAllocationPass::SpillOneRegister(HIRBuilder* builder,
builder->StoreLocal(spill_value->local_slot, spill_value);
auto spill_store = builder->last_instr();
auto spill_store_use = spill_store->src2_use;
XEASSERTNULL(spill_store_use->prev);
assert_null(spill_store_use->prev);
if (prev_use && prev_use->instr->opcode->flags & OPCODE_FLAG_PAIRED_PREV) {
// Instruction is paired. This is bad. We will insert the spill after the
// paired instruction.
XEASSERTNOTNULL(prev_use->instr->next);
assert_not_null(prev_use->instr->next);
spill_store->MoveBefore(prev_use->instr->next);
// Update last use.

View File

@ -45,7 +45,7 @@ int ValidationPass::Run(HIRBuilder* builder) {
while (block) {
auto label = block->label_head;
while (label) {
XEASSERT(label->block == block);
assert_true(label->block == block);
if (label->block != block) {
return 1;
}
@ -67,7 +67,7 @@ int ValidationPass::Run(HIRBuilder* builder) {
}
int ValidationPass::ValidateInstruction(Block* block, Instr* instr) {
XEASSERT(instr->block == block);
assert_true(instr->block == block);
if (instr->block != block) {
return 1;
}
@ -95,7 +95,7 @@ int ValidationPass::ValidateInstruction(Block* block, Instr* instr) {
int ValidationPass::ValidateValue(Block* block, Instr* instr, Value* value) {
// if (value->def) {
// auto def = value->def;
// XEASSERT(def->block == block);
// assert_true(def->block == block);
// if (def->block != block) {
// return 1;
// }

View File

@ -11,7 +11,15 @@
#define ALLOY_CORE_H_
// TODO(benvanik): move the common stuff into here?
#include <xenia/common.h>
#include <xenia/atomic.h>
#include <xenia/byte_order.h>
#include <xenia/config.h>
#include <xenia/logging.h>
#include <xenia/malloc.h>
#include <xenia/platform.h>
#include <xenia/profiling.h>
#include <xenia/string.h>
#include <xenia/types.h>
#include <poly/poly.h>

View File

@ -412,7 +412,7 @@ void Disasm_rld(InstrData& i, StringBuffer* str) {
i.MD.Rc ? "." : "", i.MD.RA, i.MD.RT, (i.MD.SH5 << 5) | i.MD.SH,
(i.MD.MB5 << 5) | i.MD.MB);
} else {
XEASSERTALWAYS();
assert_always();
}
}
void Disasm_rlwim(InstrData& i, StringBuffer* str) {

View File

@ -23,7 +23,7 @@ namespace ppc {
RegisterInstrEmit(opcode, (InstrEmitFn)InstrEmit_##name);
#define XEINSTRNOTIMPLEMENTED()
//#define XEINSTRNOTIMPLEMENTED XEASSERTALWAYS
//#define XEINSTRNOTIMPLEMENTED assert_trueALWAYS
//#define XEINSTRNOTIMPLEMENTED() __debugbreak()
} // namespace ppc

View File

@ -62,7 +62,7 @@ Value* CalculateEA_0(PPCHIRBuilder& f, uint32_t ra, uint32_t rb);
#define VX128_R_VB128 (i.VX128_R.VB128l | (i.VX128_R.VB128h << 5))
unsigned int xerotl(unsigned int value, unsigned int shift) {
XEASSERT(shift < 32);
assert_true(shift < 32);
return shift == 0 ? value : ((value << shift) | (value >> (32 - shift)));
}
@ -588,7 +588,7 @@ int InstrEmit_vcmpxxfp_(PPCHIRBuilder& f, InstrData& i, vcmpxxfp_op cmpop,
v = f.VectorCompareSGE(f.LoadVR(va), f.LoadVR(vb), FLOAT32_TYPE);
break;
default:
XEASSERTUNHANDLEDCASE(cmpop);
assert_unhandled_case(cmpop);
return 1;
}
if (rc) {
@ -648,7 +648,7 @@ int InstrEmit_vcmpxxi_(PPCHIRBuilder& f, InstrData& i, vcmpxxi_op cmpop,
v = f.VectorCompareEQ(f.LoadVR(va), f.LoadVR(vb), INT32_TYPE);
break;
default:
XEASSERTUNHANDLEDCASE(width);
assert_unhandled_case(width);
return 1;
}
break;
@ -664,7 +664,7 @@ int InstrEmit_vcmpxxi_(PPCHIRBuilder& f, InstrData& i, vcmpxxi_op cmpop,
v = f.VectorCompareSGT(f.LoadVR(va), f.LoadVR(vb), INT32_TYPE);
break;
default:
XEASSERTUNHANDLEDCASE(width);
assert_unhandled_case(width);
return 1;
}
break;
@ -680,12 +680,12 @@ int InstrEmit_vcmpxxi_(PPCHIRBuilder& f, InstrData& i, vcmpxxi_op cmpop,
v = f.VectorCompareUGT(f.LoadVR(va), f.LoadVR(vb), INT32_TYPE);
break;
default:
XEASSERTUNHANDLEDCASE(width);
assert_unhandled_case(width);
return 1;
}
break;
default:
XEASSERTUNHANDLEDCASE(cmpop);
assert_unhandled_case(cmpop);
return 1;
}
if (rc) {
@ -1233,7 +1233,7 @@ XEEMITTER(vrlimi128, VX128_4(6, 1808), VX128_4)(PPCHIRBuilder& f,
swizzle_mask = SWIZZLE_XYZW_TO_WXYZ;
break;
default:
XEASSERTALWAYS();
assert_always();
return 1;
}
v = f.Swizzle(f.LoadVR(vb), FLOAT32_TYPE, swizzle_mask);
@ -1707,7 +1707,7 @@ XEEMITTER(vupkhsh, 0x1000024E, VX)(PPCHIRBuilder& f, InstrData& i) {
}
XEEMITTER(vupkhsh128, 0x100002CE, VX)(PPCHIRBuilder& f, InstrData& i) {
uint32_t va = VX128_VA128;
XEASSERTZERO(va);
assert_zero(va);
return InstrEmit_vupkhsh_(f, VX128_VD128, VX128_VB128);
}
@ -1722,7 +1722,7 @@ XEEMITTER(vupklsh, 0x100002CE, VX)(PPCHIRBuilder& f, InstrData& i) {
}
XEEMITTER(vupklsh128, 0x100002CE, VX)(PPCHIRBuilder& f, InstrData& i) {
uint32_t va = VX128_VA128;
XEASSERTZERO(va);
assert_zero(va);
return InstrEmit_vupklsh_(f, VX128_VD128, VX128_VB128);
}
@ -1784,7 +1784,7 @@ XEEMITTER(vpkd3d128, VX128_4(6, 1552), VX128_4)(PPCHIRBuilder& f,
v = f.Pack(v, PACK_TYPE_FLOAT16_4);
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
return 1;
}
// http://hlssmod.net/he_code/public/pixelwriter.h
@ -1819,7 +1819,7 @@ XEEMITTER(vpkd3d128, VX128_4(6, 1552), VX128_4)(PPCHIRBuilder& f,
control = (control & ~mask) | (src & mask);
break;
default:
XEASSERTUNHANDLEDCASE(pack);
assert_unhandled_case(pack);
return 1;
}
v = f.Permute(f.LoadConstant(control), f.LoadVR(vd), v, INT32_TYPE);
@ -1851,7 +1851,7 @@ XEEMITTER(vupkd3d128, VX128_3(6, 2032), VX128_3)(PPCHIRBuilder& f,
v = f.Unpack(v, PACK_TYPE_FLOAT16_4);
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
return 1;
}
f.StoreVR(vd, v);

View File

@ -28,7 +28,7 @@ XEEMITTER(addx, 0x7C000214, XO)(PPCHIRBuilder& f, InstrData& i) {
Value* v = f.Add(f.LoadGPR(i.XO.RA), f.LoadGPR(i.XO.RB));
f.StoreGPR(i.XO.RT, v);
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow(EFLAGS OF?);
}
if (i.XO.Rc) {
@ -45,7 +45,7 @@ XEEMITTER(addcx, 0x7C000014, XO)(PPCHIRBuilder& f, InstrData& i) {
f.StoreCA(f.DidCarry(v));
f.StoreGPR(i.XO.RT, v);
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow(EFLAGS OF?);
}
if (i.XO.Rc) {
@ -61,7 +61,7 @@ XEEMITTER(addex, 0x7C000114, XO)(PPCHIRBuilder& f, InstrData& i) {
f.StoreCA(f.DidCarry(v));
f.StoreGPR(i.XO.RT, v);
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow(EFLAGS OF?);
}
if (i.XO.Rc) {
@ -124,7 +124,7 @@ XEEMITTER(addmex, 0x7C0001D4, XO)(PPCHIRBuilder& f, InstrData& i) {
if (i.XO.OE) {
// With XER[SO] update too.
// e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1));
XEASSERTALWAYS();
assert_always();
} else {
// Just CA update.
f.StoreCA(f.DidCarry(v));
@ -143,7 +143,7 @@ XEEMITTER(addzex, 0x7C000194, XO)(PPCHIRBuilder& f, InstrData& i) {
if (i.XO.OE) {
// With XER[SO] update too.
// e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1));
XEASSERTALWAYS();
assert_always();
} else {
// Just CA update.
f.StoreCA(f.DidCarry(v));
@ -172,7 +172,7 @@ XEEMITTER(divdx, 0x7C0003D2, XO)(PPCHIRBuilder& f, InstrData& i) {
if (i.XO.OE) {
// If we are OE=1 we need to clear the overflow bit.
// e.update_xer_with_overflow(e.get_uint64(0));
XEASSERTALWAYS();
assert_always();
return 1;
}
if (i.XO.Rc) {
@ -198,7 +198,7 @@ XEEMITTER(divdux, 0x7C000392, XO)(PPCHIRBuilder& f, InstrData& i) {
if (i.XO.OE) {
// If we are OE=1 we need to clear the overflow bit.
// e.update_xer_with_overflow(e.get_uint64(0));
XEASSERTALWAYS();
assert_always();
return 1;
}
if (i.XO.Rc) {
@ -226,7 +226,7 @@ XEEMITTER(divwx, 0x7C0003D6, XO)(PPCHIRBuilder& f, InstrData& i) {
if (i.XO.OE) {
// If we are OE=1 we need to clear the overflow bit.
// e.update_xer_with_overflow(e.get_uint64(0));
XEASSERTALWAYS();
assert_always();
return 1;
}
if (i.XO.Rc) {
@ -255,7 +255,7 @@ XEEMITTER(divwux, 0x7C000396, XO)(PPCHIRBuilder& f, InstrData& i) {
if (i.XO.OE) {
// If we are OE=1 we need to clear the overflow bit.
// e.update_xer_with_overflow(e.get_uint64(0));
XEASSERTALWAYS();
assert_always();
return 1;
}
if (i.XO.Rc) {
@ -379,7 +379,7 @@ XEEMITTER(negx, 0x7C0000D0, XO)(PPCHIRBuilder& f, InstrData& i) {
// if RA == 0x8000000000000000 then no-op and set OV=1
// This may just magically do that...
XEASSERTALWAYS();
assert_always();
// Function* ssub_with_overflow = Intrinsic::getDeclaration(
// e.gen_module(), Intrinsic::ssub_with_overflow, jit_type_nint);
// jit_value_t v = b.CreateCall2(ssub_with_overflow,
@ -408,7 +408,7 @@ XEEMITTER(subfx, 0x7C000050, XO)(PPCHIRBuilder& f, InstrData& i) {
Value* v = f.Sub(f.LoadGPR(i.XO.RB), f.LoadGPR(i.XO.RA));
f.StoreGPR(i.XO.RT, v);
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow(EFLAGS??);
}
if (i.XO.Rc) {
@ -424,7 +424,7 @@ XEEMITTER(subfcx, 0x7C000010, XO)(PPCHIRBuilder& f, InstrData& i) {
f.StoreCA(f.DidCarry(v));
f.StoreGPR(i.XO.RT, v);
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow(EFLAGS??);
}
if (i.XO.Rc) {
@ -449,7 +449,7 @@ XEEMITTER(subfex, 0x7C000110, XO)(PPCHIRBuilder& f, InstrData& i) {
f.StoreCA(f.DidCarry(v));
f.StoreGPR(i.XO.RT, v);
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1));
}
if (i.XO.Rc) {
@ -463,7 +463,7 @@ XEEMITTER(subfmex, 0x7C0001D0, XO)(PPCHIRBuilder& f, InstrData& i) {
Value* v = f.AddWithCarry(f.Not(f.LoadGPR(i.XO.RA)),
f.LoadConstant((int64_t)-1), f.LoadCA());
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1));
} else {
f.StoreCA(f.DidCarry(v));
@ -480,7 +480,7 @@ XEEMITTER(subfzex, 0x7C000190, XO)(PPCHIRBuilder& f, InstrData& i) {
Value* v = f.AddWithCarry(f.Not(f.LoadGPR(i.XO.RA)), f.LoadZero(INT64_TYPE),
f.LoadCA());
if (i.XO.OE) {
XEASSERTALWAYS();
assert_always();
// e.update_xer_with_overflow_and_carry(b.CreateExtractValue(v, 1));
} else {
f.StoreCA(f.DidCarry(v));
@ -1112,7 +1112,7 @@ XEEMITTER(sradix, 0x7C000674, XS)(PPCHIRBuilder& f, InstrData& i) {
// CA is set if any bits are shifted out of the right and if the result
// is negative.
XEASSERT(sh);
assert_true(sh);
uint64_t mask = XEMASK(64 - sh, 63);
Value* ca = f.And(f.Truncate(f.Shr(v, 63), INT8_TYPE),
f.IsTrue(f.And(v, f.LoadConstant(mask))));

View File

@ -206,7 +206,7 @@ Value* PPCHIRBuilder::LoadLR() {
}
void PPCHIRBuilder::StoreLR(Value* value) {
XEASSERT(value->type == INT64_TYPE);
assert_true(value->type == INT64_TYPE);
StoreContext(offsetof(PPCContext, lr), value);
}
@ -215,12 +215,12 @@ Value* PPCHIRBuilder::LoadCTR() {
}
void PPCHIRBuilder::StoreCTR(Value* value) {
XEASSERT(value->type == INT64_TYPE);
assert_true(value->type == INT64_TYPE);
StoreContext(offsetof(PPCContext, ctr), value);
}
Value* PPCHIRBuilder::LoadCR(uint32_t n) {
XEASSERTALWAYS();
assert_always();
return 0;
}
@ -230,7 +230,7 @@ Value* PPCHIRBuilder::LoadCRField(uint32_t n, uint32_t bit) {
void PPCHIRBuilder::StoreCR(uint32_t n, Value* value) {
// TODO(benvanik): split bits out and store in values.
XEASSERTALWAYS();
assert_always();
}
void PPCHIRBuilder::UpdateCR(uint32_t n, Value* lhs, bool is_signed) {
@ -271,23 +271,23 @@ Value* PPCHIRBuilder::LoadFPSCR() {
}
void PPCHIRBuilder::StoreFPSCR(Value* value) {
XEASSERT(value->type == INT64_TYPE);
assert_true(value->type == INT64_TYPE);
StoreContext(offsetof(PPCContext, fpscr), value);
}
Value* PPCHIRBuilder::LoadXER() {
XEASSERTALWAYS();
assert_always();
return NULL;
}
void PPCHIRBuilder::StoreXER(Value* value) { XEASSERTALWAYS(); }
void PPCHIRBuilder::StoreXER(Value* value) { assert_always(); }
Value* PPCHIRBuilder::LoadCA() {
return LoadContext(offsetof(PPCContext, xer_ca), INT8_TYPE);
}
void PPCHIRBuilder::StoreCA(Value* value) {
XEASSERT(value->type == INT8_TYPE);
assert_true(value->type == INT8_TYPE);
StoreContext(offsetof(PPCContext, xer_ca), value);
}
@ -305,7 +305,7 @@ Value* PPCHIRBuilder::LoadGPR(uint32_t reg) {
}
void PPCHIRBuilder::StoreGPR(uint32_t reg, Value* value) {
XEASSERT(value->type == INT64_TYPE);
assert_true(value->type == INT64_TYPE);
StoreContext(offsetof(PPCContext, r) + reg * 8, value);
}
@ -314,7 +314,7 @@ Value* PPCHIRBuilder::LoadFPR(uint32_t reg) {
}
void PPCHIRBuilder::StoreFPR(uint32_t reg, Value* value) {
XEASSERT(value->type == FLOAT64_TYPE);
assert_true(value->type == FLOAT64_TYPE);
StoreContext(offsetof(PPCContext, f) + reg * 8, value);
}
@ -323,7 +323,7 @@ Value* PPCHIRBuilder::LoadVR(uint32_t reg) {
}
void PPCHIRBuilder::StoreVR(uint32_t reg, Value* value) {
XEASSERT(value->type == VEC128_TYPE);
assert_true(value->type == VEC128_TYPE);
StoreContext(offsetof(PPCContext, v) + reg * 16, value);
}

View File

@ -146,7 +146,7 @@ void InstrAccessBits::MarkAccess(InstrRegister& reg) {
}
break;
default:
XEASSERTUNHANDLEDCASE(reg.set);
assert_unhandled_case(reg.set);
break;
}
}
@ -384,11 +384,11 @@ InstrType* GetInstrType(uint32_t code) {
int RegisterInstrEmit(uint32_t code, InstrEmitFn emit) {
InstrType* instr_type = GetInstrType(code);
XEASSERTNOTNULL(instr_type);
assert_not_null(instr_type);
if (!instr_type) {
return 1;
}
XEASSERTNULL(instr_type->emit);
assert_null(instr_type->emit);
instr_type->emit = emit;
return 0;
}

View File

@ -23,12 +23,12 @@ void Block::AssertNoCycles() {
while ((hare = hare->next)) {
if (hare == tortoise) {
// Cycle!
XEASSERTALWAYS();
assert_always();
}
hare = hare->next;
if (hare == tortoise) {
// Cycle!
XEASSERTALWAYS();
assert_always();
}
tortoise = tortoise->next;
if (!hare || !tortoise) {

View File

@ -114,7 +114,7 @@ void HIRBuilder::DumpValue(StringBuffer* str, Value* value) {
value->constant.v128.w);
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
} else {
@ -276,12 +276,12 @@ void HIRBuilder::AssertNoCycles() {
while ((hare = hare->next)) {
if (hare == tortoise) {
// Cycle!
XEASSERTALWAYS();
assert_always();
}
hare = hare->next;
if (hare == tortoise) {
// Cycle!
XEASSERTALWAYS();
assert_always();
}
tortoise = tortoise->next;
if (!hare || !tortoise) {
@ -937,7 +937,7 @@ Value* HIRBuilder::LoadConstant(const vec128_t& value) {
}
Value* HIRBuilder::LoadVectorShl(Value* sh) {
XEASSERT(sh->type == INT8_TYPE);
assert_true(sh->type == INT8_TYPE);
Instr* i =
AppendInstr(OPCODE_LOAD_VECTOR_SHL_info, 0, AllocValue(VEC128_TYPE));
i->set_src1(sh);
@ -946,7 +946,7 @@ Value* HIRBuilder::LoadVectorShl(Value* sh) {
}
Value* HIRBuilder::LoadVectorShr(Value* sh) {
XEASSERT(sh->type == INT8_TYPE);
assert_true(sh->type == INT8_TYPE);
Instr* i =
AppendInstr(OPCODE_LOAD_VECTOR_SHR_info, 0, AllocValue(VEC128_TYPE));
i->set_src1(sh);
@ -1050,7 +1050,7 @@ Value* HIRBuilder::Min(Value* value1, Value* value2) {
}
Value* HIRBuilder::Select(Value* cond, Value* value1, Value* value2) {
XEASSERT(cond->type == INT8_TYPE); // for now
assert_true(cond->type == INT8_TYPE); // for now
ASSERT_TYPES_EQUAL(value1, value2);
if (cond->IsConstant()) {
@ -1233,7 +1233,7 @@ Value* HIRBuilder::Add(Value* value1, Value* value2,
Value* HIRBuilder::AddWithCarry(Value* value1, Value* value2, Value* value3,
uint32_t arithmetic_flags) {
ASSERT_TYPES_EQUAL(value1, value2);
XEASSERT(value3->type == INT8_TYPE);
assert_true(value3->type == INT8_TYPE);
Instr* i = AppendInstr(OPCODE_ADD_CARRY_info, arithmetic_flags,
AllocValue(value1->type));
@ -1250,7 +1250,7 @@ Value* HIRBuilder::VectorAdd(Value* value1, Value* value2, TypeName part_type,
// This is shady.
uint32_t flags = part_type | (arithmetic_flags << 8);
XEASSERTZERO(flags >> 16);
assert_zero(flags >> 16);
Instr* i = AppendInstr(OPCODE_VECTOR_ADD_info, (uint16_t)flags,
AllocValue(value1->type));
@ -1701,7 +1701,7 @@ Value* HIRBuilder::Permute(Value* control, Value* value1, Value* value2,
Value* HIRBuilder::Swizzle(Value* value, TypeName part_type,
uint32_t swizzle_mask) {
// For now.
XEASSERT(part_type == INT32_TYPE || part_type == FLOAT32_TYPE);
assert_true(part_type == INT32_TYPE || part_type == FLOAT32_TYPE);
if (swizzle_mask == SWIZZLE_XYZW_TO_XYZW) {
return Assign(value);

View File

@ -36,7 +36,7 @@ void Value::RemoveUse(Use* use) {
}
uint32_t Value::AsUint32() {
XEASSERT(IsConstant());
assert_true(IsConstant());
switch (type) {
case INT8_TYPE:
return constant.i8;
@ -47,13 +47,13 @@ uint32_t Value::AsUint32() {
case INT64_TYPE:
return (uint32_t)constant.i64;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
return 0;
}
}
uint64_t Value::AsUint64() {
XEASSERT(IsConstant());
assert_true(IsConstant());
switch (type) {
case INT8_TYPE:
return constant.i8;
@ -64,14 +64,14 @@ uint64_t Value::AsUint64() {
case INT64_TYPE:
return constant.i64;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
return 0;
}
}
void Value::Cast(TypeName target_type) {
// TODO(benvanik): big matrix.
XEASSERTALWAYS();
assert_always();
}
void Value::ZeroExtend(TypeName target_type) {
@ -89,7 +89,7 @@ void Value::ZeroExtend(TypeName target_type) {
constant.i64 = constant.i64 & 0xFFFFFFFF;
return;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -109,7 +109,7 @@ void Value::SignExtend(TypeName target_type) {
constant.i64 = constant.i8;
return;
default:
XEASSERTUNHANDLEDCASE(target_type);
assert_unhandled_case(target_type);
return;
}
case INT16_TYPE:
@ -122,7 +122,7 @@ void Value::SignExtend(TypeName target_type) {
constant.i64 = constant.i16;
return;
default:
XEASSERTUNHANDLEDCASE(target_type);
assert_unhandled_case(target_type);
return;
}
case INT32_TYPE:
@ -132,11 +132,11 @@ void Value::SignExtend(TypeName target_type) {
constant.i64 = constant.i32;
return;
default:
XEASSERTUNHANDLEDCASE(target_type);
assert_unhandled_case(target_type);
return;
}
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
return;
}
}
@ -150,7 +150,7 @@ void Value::Truncate(TypeName target_type) {
constant.i64 = constant.i64 & 0xFF;
return;
default:
XEASSERTUNHANDLEDCASE(target_type);
assert_unhandled_case(target_type);
return;
}
case INT32_TYPE:
@ -164,7 +164,7 @@ void Value::Truncate(TypeName target_type) {
constant.i64 = constant.i64 & 0xFFFF;
return;
default:
XEASSERTUNHANDLEDCASE(target_type);
assert_unhandled_case(target_type);
return;
}
case INT64_TYPE:
@ -182,29 +182,29 @@ void Value::Truncate(TypeName target_type) {
constant.i64 = constant.i64 & 0xFFFFFFFF;
return;
default:
XEASSERTUNHANDLEDCASE(target_type);
assert_unhandled_case(target_type);
return;
}
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
return;
}
}
void Value::Convert(TypeName target_type, RoundMode round_mode) {
// TODO(benvanik): big matrix.
XEASSERTALWAYS();
assert_always();
}
void Value::Round(RoundMode round_mode) {
// TODO(benvanik): big matrix.
XEASSERTALWAYS();
assert_always();
}
bool Value::Add(Value* other) {
#define CHECK_DID_CARRY(v1, v2) (((uint64_t)v2) > ~((uint64_t)v1))
#define ADD_DID_CARRY(a, b) CHECK_DID_CARRY(a, b)
XEASSERT(type == other->type);
assert_true(type == other->type);
bool did_carry = false;
switch (type) {
case INT8_TYPE:
@ -230,7 +230,7 @@ bool Value::Add(Value* other) {
constant.f64 += other->constant.f64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
return did_carry;
@ -238,7 +238,7 @@ bool Value::Add(Value* other) {
bool Value::Sub(Value* other) {
#define SUB_DID_CARRY(a, b) (b > a)
XEASSERT(type == other->type);
assert_true(type == other->type);
bool did_carry = false;
switch (type) {
case INT8_TYPE:
@ -264,14 +264,14 @@ bool Value::Sub(Value* other) {
constant.f64 -= other->constant.f64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
return did_carry;
}
void Value::Mul(Value* other) {
XEASSERT(type == other->type);
assert_true(type == other->type);
switch (type) {
case INT8_TYPE:
constant.i8 *= other->constant.i8;
@ -292,13 +292,13 @@ void Value::Mul(Value* other) {
constant.f64 *= other->constant.f64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::Div(Value* other) {
XEASSERT(type == other->type);
assert_true(type == other->type);
switch (type) {
case INT8_TYPE:
constant.i8 /= other->constant.i8;
@ -319,19 +319,19 @@ void Value::Div(Value* other) {
constant.f64 /= other->constant.f64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::MulAdd(Value* dest, Value* value1, Value* value2, Value* value3) {
// TODO(benvanik): big matrix.
XEASSERTALWAYS();
assert_always();
}
void Value::MulSub(Value* dest, Value* value1, Value* value2, Value* value3) {
// TODO(benvanik): big matrix.
XEASSERTALWAYS();
assert_always();
}
void Value::Neg() {
@ -355,7 +355,7 @@ void Value::Neg() {
constant.f64 = -constant.f64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -381,7 +381,7 @@ void Value::Abs() {
constant.f64 = abs(constant.f64);
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -395,7 +395,7 @@ void Value::Sqrt() {
constant.f64 = 1.0 / sqrt(constant.f64);
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -409,13 +409,13 @@ void Value::RSqrt() {
constant.f64 = sqrt(constant.f64);
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::And(Value* other) {
XEASSERT(type == other->type);
assert_true(type == other->type);
switch (type) {
case INT8_TYPE:
constant.i8 &= other->constant.i8;
@ -430,13 +430,13 @@ void Value::And(Value* other) {
constant.i64 &= other->constant.i64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::Or(Value* other) {
XEASSERT(type == other->type);
assert_true(type == other->type);
switch (type) {
case INT8_TYPE:
constant.i8 |= other->constant.i8;
@ -451,13 +451,13 @@ void Value::Or(Value* other) {
constant.i64 |= other->constant.i64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::Xor(Value* other) {
XEASSERT(type == other->type);
assert_true(type == other->type);
switch (type) {
case INT8_TYPE:
constant.i8 ^= other->constant.i8;
@ -472,7 +472,7 @@ void Value::Xor(Value* other) {
constant.i64 ^= other->constant.i64;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -496,13 +496,13 @@ void Value::Not() {
constant.v128.high = ~constant.v128.high;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::Shl(Value* other) {
XEASSERT(other->type == INT8_TYPE);
assert_true(other->type == INT8_TYPE);
switch (type) {
case INT8_TYPE:
constant.i8 <<= other->constant.i8;
@ -517,13 +517,13 @@ void Value::Shl(Value* other) {
constant.i64 <<= other->constant.i8;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::Shr(Value* other) {
XEASSERT(other->type == INT8_TYPE);
assert_true(other->type == INT8_TYPE);
switch (type) {
case INT8_TYPE:
constant.i8 = (uint8_t)constant.i8 >> other->constant.i8;
@ -538,13 +538,13 @@ void Value::Shr(Value* other) {
constant.i64 = (uint16_t)constant.i64 >> other->constant.i8;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
void Value::Sha(Value* other) {
XEASSERT(other->type == INT8_TYPE);
assert_true(other->type == INT8_TYPE);
switch (type) {
case INT8_TYPE:
constant.i8 = constant.i8 >> other->constant.i8;
@ -559,7 +559,7 @@ void Value::Sha(Value* other) {
constant.i64 = constant.i64 >> other->constant.i8;
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -584,7 +584,7 @@ void Value::ByteSwap() {
}
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
@ -604,14 +604,14 @@ void Value::CountLeadingZeros(const Value* other) {
constant.i8 = poly::lzcnt(constant.i64);
break;
default:
XEASSERTUNHANDLEDCASE(type);
assert_unhandled_case(type);
break;
}
}
bool Value::Compare(Opcode opcode, Value* other) {
// TODO(benvanik): big matrix.
XEASSERTALWAYS();
assert_always();
return false;
}

View File

@ -48,7 +48,7 @@ static size_t GetTypeSize(TypeName type_name) {
case VEC128_TYPE:
return 16;
default:
XEASSERTUNHANDLEDCASE(type_name);
assert_unhandled_case(type_name);
return 0;
}
}
@ -177,13 +177,13 @@ class Value {
inline bool IsConstant() const { return !!(flags & VALUE_IS_CONSTANT); }
bool IsConstantTrue() const {
if (type == VEC128_TYPE) {
XEASSERTALWAYS();
assert_always();
}
return (flags & VALUE_IS_CONSTANT) && !!constant.i64;
}
bool IsConstantFalse() const {
if (type == VEC128_TYPE) {
XEASSERTALWAYS();
assert_always();
}
return (flags & VALUE_IS_CONSTANT) && !constant.i64;
}
@ -196,20 +196,20 @@ class Value {
}
bool IsConstantEQ(Value* other) const {
if (type == VEC128_TYPE) {
XEASSERTALWAYS();
assert_always();
}
return (flags & VALUE_IS_CONSTANT) && (other->flags & VALUE_IS_CONSTANT) &&
constant.i64 == other->constant.i64;
}
bool IsConstantNE(Value* other) const {
if (type == VEC128_TYPE) {
XEASSERTALWAYS();
assert_always();
}
return (flags & VALUE_IS_CONSTANT) && (other->flags & VALUE_IS_CONSTANT) &&
constant.i64 != other->constant.i64;
}
bool IsConstantSLT(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return constant.i8 < other->constant.i8;
@ -224,12 +224,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 < other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantSLE(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return constant.i8 <= other->constant.i8;
@ -244,12 +244,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 <= other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantSGT(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return constant.i8 > other->constant.i8;
@ -264,12 +264,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 > other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantSGE(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return constant.i8 >= other->constant.i8;
@ -284,12 +284,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 >= other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantULT(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return (uint8_t)constant.i8 < (uint8_t)other->constant.i8;
@ -304,12 +304,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 < other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantULE(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return (uint8_t)constant.i8 <= (uint8_t)other->constant.i8;
@ -324,12 +324,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 <= other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantUGT(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return (uint8_t)constant.i8 > (uint8_t)other->constant.i8;
@ -344,12 +344,12 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 > other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}
bool IsConstantUGE(Value* other) const {
XEASSERT(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
assert_true(flags & VALUE_IS_CONSTANT && other->flags & VALUE_IS_CONSTANT);
switch (type) {
case INT8_TYPE:
return (uint8_t)constant.i8 >= (uint8_t)other->constant.i8;
@ -364,7 +364,7 @@ class Value {
case FLOAT64_TYPE:
return constant.f64 >= other->constant.f64;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
}

View File

@ -48,7 +48,7 @@ void Memory::Copy(uint64_t dest, uint64_t src, size_t size) {
uint64_t Memory::SearchAligned(uint64_t start, uint64_t end,
const uint32_t* values, size_t value_count) {
XEASSERT(start <= end);
assert_true(start <= end);
const uint32_t* p = (const uint32_t*)(membase_ + start);
const uint32_t* pe = (const uint32_t*)(membase_ + end);
while (p != pe) {

View File

@ -50,7 +50,7 @@ Runtime::~Runtime() {
int Runtime::Initialize(Frontend* frontend, Backend* backend) {
// Must be initialized by subclass before calling into this.
XEASSERTNOTNULL(memory_);
assert_not_null(memory_);
// Create debugger first. Other types hook up to it.
debugger_ = new Debugger(this);

77
src/poly/assert.h Normal file
View File

@ -0,0 +1,77 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef POLY_ASSERT_H_
#define POLY_ASSERT_H_
#include <assert.h>
#include <poly/config.h>
#include <poly/platform.h>
namespace poly {
#define static_assert_size(type, size) \
static_assert(sizeof(type) == size, \
"bad definition for "## #type##": must be "## #size##" bytes")
// We rely on assert being compiled out in NDEBUG.
#define poly_assert assert
#define __POLY_EXPAND(x) x
#define __POLY_ARGC(...) \
__POLY_EXPAND(__POLY_ARGC_IMPL(__VA_ARGS__, 15, 14, 13, 12, 11, 10, 9, 8, 7, \
6, 5, 4, 3, 2, 1, 0))
#define __POLY_ARGC_IMPL(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, \
x13, x14, x15, N, ...) \
N
#define __POLY_MACRO_DISPATCH(func, ...) \
__POLY_MACRO_DISPATCH_(func, __POLY_ARGC(__VA_ARGS__))
#define __POLY_MACRO_DISPATCH_(func, nargs) __POLY_MACRO_DISPATCH__(func, nargs)
#define __POLY_MACRO_DISPATCH__(func, nargs) func##nargs
#define assert_always(...) poly_assert(false)
#define assert_true(...) \
__POLY_MACRO_DISPATCH(assert_true, __VA_ARGS__)(__VA_ARGS__)
#define assert_true1(expr) poly_assert(expr)
#define assert_true2(expr, message) poly_assert((expr) || !message)
#define assert_false(...) \
__POLY_MACRO_DISPATCH(assert_false, __VA_ARGS__)(__VA_ARGS__)
#define assert_false1(expr) poly_assert(!(expr))
#define assert_false2(expr, message) poly_assert(!(expr) || !message)
#define assert_zero(...) \
__POLY_MACRO_DISPATCH(assert_zero, __VA_ARGS__)(__VA_ARGS__)
#define assert_zero1(expr) poly_assert((expr) == 0)
#define assert_zero2(expr, message) poly_assert((expr) == 0 || !message)
#define assert_not_zero(...) \
__POLY_MACRO_DISPATCH(assert_not_zero, __VA_ARGS__)(__VA_ARGS__)
#define assert_not_zero1(expr) poly_assert((expr) != 0)
#define assert_not_zero2(expr, message) poly_assert((expr) != 0 || !message)
#define assert_null(...) \
__POLY_MACRO_DISPATCH(assert_null, __VA_ARGS__)(__VA_ARGS__)
#define assert_null1(expr) poly_assert((expr) == nullptr)
#define assert_null2(expr, message) poly_assert((expr) == nullptr || !message)
#define assert_not_null(...) \
__POLY_MACRO_DISPATCH(assert_not_null, __VA_ARGS__)(__VA_ARGS__)
#define assert_not_null1(expr) poly_assert((expr) != nullptr)
#define assert_not_null2(expr, message) \
poly_assert((expr) != nullptr || !message)
#define assert_unhandled_case(variable) \
assert_always("unhandled switch("## #variable##") case")
} // namespace poly
#endif // POLY_ASSERT_H_

View File

@ -11,7 +11,7 @@
#define POLY_CONFIG_H_
#if defined(DEBUG) || defined(_DEBUG)
#define XE_DEBUG 1
#define XE_DEBUG 1
#endif // DEBUG
#endif // POLY_CONFIG_H_

View File

@ -16,6 +16,7 @@
// http://en.cppreference.com/w/cpp/language/storage_duration
#if XE_COMPILER_MSVC
// VC++2014 may have this.
#define _ALLOW_KEYWORD_MACROS 1
#define thread_local __declspec(thread)
#elif XE_LIKE_OSX
// Clang supports it on OSX but the runtime doesn't.

View File

@ -25,15 +25,25 @@ namespace poly {
// return value is the size of the input operand (8, 16, 32, or 64). If the most
// significant bit of value is one, the return value is zero.
#if XE_COMPILER_MSVC
inline uint8_t lzcnt(uint8_t v) { return static_cast<uint8_t>(__lzcnt16(v) - 8); }
inline uint8_t lzcnt(uint8_t v) {
return static_cast<uint8_t>(__lzcnt16(v) - 8);
}
inline uint8_t lzcnt(uint16_t v) { return static_cast<uint8_t>(__lzcnt16(v)); }
inline uint8_t lzcnt(uint32_t v) { return static_cast<uint8_t>(__lzcnt(v)); }
inline uint8_t lzcnt(uint64_t v) { return static_cast<uint8_t>(__lzcnt64(v)); }
#else
inline uint8_t lzcnt(uint8_t v) { return static_cast<uint8_t>(__builtin_clzs(v) - 8); }
inline uint8_t lzcnt(uint16_t v) { return static_cast<uint8_t>(__builtin_clzs(v)); }
inline uint8_t lzcnt(uint32_t v) { return static_cast<uint8_t>(__builtin_clz(v)); }
inline uint8_t lzcnt(uint64_t v) { return static_cast<uint8_t>(__builtin_clzll(v)); }
inline uint8_t lzcnt(uint8_t v) {
return static_cast<uint8_t>(__builtin_clzs(v) - 8);
}
inline uint8_t lzcnt(uint16_t v) {
return static_cast<uint8_t>(__builtin_clzs(v));
}
inline uint8_t lzcnt(uint32_t v) {
return static_cast<uint8_t>(__builtin_clz(v));
}
inline uint8_t lzcnt(uint64_t v) {
return static_cast<uint8_t>(__builtin_clzll(v));
}
#endif // XE_COMPILER_MSVC
inline uint8_t lzcnt(int8_t v) { return lzcnt(static_cast<uint8_t>(v)); }
inline uint8_t lzcnt(int16_t v) { return lzcnt(static_cast<uint16_t>(v)); }
@ -49,7 +59,8 @@ inline bool bit_scan_forward(uint32_t v, uint32_t* out_first_set_index) {
return _BitScanForward(reinterpret_cast<DWORD*>(out_first_set_index), v) != 0;
}
inline bool bit_scan_forward(uint64_t v, uint32_t* out_first_set_index) {
return _BitScanForward64(reinterpret_cast<DWORD*>(out_first_set_index), v) != 0;
return _BitScanForward64(reinterpret_cast<DWORD*>(out_first_set_index), v) !=
0;
}
#else
inline bool bit_scan_forward(uint32_t v, uint32_t* out_first_set_index) {

View File

@ -10,6 +10,7 @@
#ifndef POLY_POLY_H_
#define POLY_POLY_H_
#include <poly/assert.h>
#include <poly/config.h>
#include <poly/cxx_compat.h>
#include <poly/math.h>

View File

@ -1,9 +1,11 @@
# Copyright 2014 Ben Vanik. All Rights Reserved.
{
'sources': [
'assert.h',
'config.h',
'cxx_compat.h',
'math.h',
'platform.h',
'poly-private.h',
'poly.cc',
'poly.h',

View File

@ -82,7 +82,7 @@ void AudioSystem::ThreadStart() {
auto result = WaitForMultipleObjectsEx(maximum_client_count_, client_wait_handles_, FALSE, INFINITE, FALSE);
if (result == WAIT_FAILED) {
DWORD err = GetLastError();
XEASSERTALWAYS();
assert_always();
break;
}
@ -131,7 +131,7 @@ void AudioSystem::Shutdown() {
X_STATUS AudioSystem::RegisterClient(
uint32_t callback, uint32_t callback_arg, size_t* out_index) {
XEASSERTTRUE(unused_clients_.size());
assert_true(unused_clients_.size());
xe_mutex_lock(lock_);
auto index = unused_clients_.front();
@ -143,7 +143,7 @@ X_STATUS AudioSystem::RegisterClient(
if (XFAILED(result)) {
return result;
}
XEASSERTNOTNULL(driver != NULL);
assert_not_null(driver);
unused_clients_.pop();
@ -165,8 +165,8 @@ void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) {
SCOPE_profile_cpu_f("apu");
xe_mutex_lock(lock_);
XEASSERTTRUE(index < maximum_client_count_);
XEASSERTTRUE(clients_[index].driver != NULL);
assert_true(index < maximum_client_count_);
assert_true(clients_[index].driver != NULL);
(clients_[index].driver)->SubmitFrame(samples_ptr);
ResetEvent(client_wait_handles_[index]);
xe_mutex_unlock(lock_);
@ -176,7 +176,7 @@ void AudioSystem::UnregisterClient(size_t index) {
SCOPE_profile_cpu_f("apu");
xe_mutex_lock(lock_);
XEASSERTTRUE(index < maximum_client_count_);
assert_true(index < maximum_client_count_);
DestroyDriver(clients_[index].driver);
clients_[index] = { 0 };
unused_clients_.push(index);
@ -199,4 +199,4 @@ void AudioSystem::WriteRegister(uint64_t addr, uint64_t value) {
uint32_t r = addr & 0xFFFF;
XELOGAPU("WriteRegister(%.4X, %.8X)", r, value);
// 1804h is written to with 0x02000000 and 0x03000000 around a lock operation
}
}

View File

@ -29,5 +29,5 @@ X_STATUS NopAudioSystem::CreateDriver(size_t index, HANDLE wait_handle, AudioDri
}
void NopAudioSystem::DestroyDriver(AudioDriver* driver) {
XEASSERTALWAYS();
assert_always();
}

View File

@ -67,7 +67,7 @@ void XAudio2AudioDriver::Initialize() {
hr = XAudio2Create(&audio_, 0, XAUDIO2_DEFAULT_PROCESSOR);
if (FAILED(hr)) {
XELOGE("XAudio2Create failed with %.8X", hr);
XEASSERTALWAYS();
assert_always();
return;
}
@ -83,7 +83,7 @@ void XAudio2AudioDriver::Initialize() {
hr = audio_->CreateMasteringVoice(&mastering_voice_);
if (FAILED(hr)) {
XELOGE("CreateMasteringVoice failed with %.8X", hr);
XEASSERTALWAYS();
assert_always();
return;
}
@ -106,14 +106,14 @@ void XAudio2AudioDriver::Initialize() {
voice_callback_);
if (FAILED(hr)) {
XELOGE("CreateSourceVoice failed with %.8X", hr);
XEASSERTALWAYS();
assert_always();
return;
}
hr = pcm_voice_->Start();
if (FAILED(hr)) {
XELOGE("Start failed with %.8X", hr);
XEASSERTALWAYS();
assert_always();
return;
}
@ -148,7 +148,7 @@ void XAudio2AudioDriver::SubmitFrame(uint32_t frame_ptr) {
hr = pcm_voice_->SubmitSourceBuffer(&buffer);
if (FAILED(hr)) {
XELOGE("SubmitSourceBuffer failed with %.8X", hr);
XEASSERTALWAYS();
assert_always();
return;
}
}

View File

@ -31,7 +31,7 @@ void XAudio2AudioSystem::Initialize() {
}
X_STATUS XAudio2AudioSystem::CreateDriver(size_t index, HANDLE wait, AudioDriver** out_driver) {
XEASSERTNOTNULL(out_driver);
assert_not_null(out_driver);
auto driver = new XAudio2AudioDriver(emulator_, wait);
driver->Initialize();
*out_driver = driver;
@ -39,9 +39,9 @@ X_STATUS XAudio2AudioSystem::CreateDriver(size_t index, HANDLE wait, AudioDriver
}
void XAudio2AudioSystem::DestroyDriver(AudioDriver* driver) {
XEASSERTNOTNULL(driver);
assert_not_null(driver);
auto xdriver = static_cast<XAudio2AudioDriver*>(driver);
xdriver->Shutdown();
XEASSERTNOTNULL(xdriver);
assert_not_null(xdriver);
delete xdriver;
}

View File

@ -1,74 +0,0 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2013 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef XENIA_ASSERT_H_
#define XENIA_ASSERT_H_
#include <assert.h>
#include <xenia/assert.h>
#include <xenia/config.h>
#include <xenia/platform.h>
#include <xenia/string.h>
#include <xenia/types.h>
#if 0 && XE_COMPILER_MSVC && defined(UNICODE) && UNICODE
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
#if !defined(__WFILE__)
#define WIDEN2(x) L##x
#define WIDEN(x) WIDEN2(x)
#define __WFILE__ WIDEN(__FILE__)
#define __WFUNCTION__ WIDEN(__FUNCTION__)
#endif
#define XE_CURRENT_FILE __WFILE__
#define XE_CURRENT_FUNCTION __WFUNCTION__
#else
#define XE_CURRENT_FILE __FILE__
#define XE_CURRENT_FUNCTION __FUNCTION__
#endif // MSVC
#define XE_CURRENT_LINE __LINE__
#define __XE_ASSERT(expr) assert(expr)
#if XE_OPTION_ENABLE_ASSERTS
#define XEASSERTCORE(expr) __XE_ASSERT(expr)
#else
#define XEASSERTCORE(expr) XE_EMPTY_MACRO
#endif // ENABLE_ASSERTS
#define XEASSERTALWAYS() XEASSERTCORE( 0 )
#define XEASSERT(expr) XEASSERTCORE( (expr) )
#define XEASSERTTRUE(expr) XEASSERTCORE( (expr) )
#define XEASSERTFALSE(expr) XEASSERTCORE(!(expr) )
#define XEASSERTZERO(expr) XEASSERTCORE( (expr) == 0 )
#define XEASSERTNOTZERO(expr) XEASSERTCORE( (expr) != 0 )
#define XEASSERTNULL(expr) XEASSERTCORE( (expr) == NULL )
#define XEASSERTNOTNULL(expr) XEASSERTCORE( (expr) != NULL )
#define XEASSERTUNHANDLEDCASE(var) XEASSERTALWAYS()
#if XE_COMPILER_MSVC
// http://msdn.microsoft.com/en-us/library/bb918086.aspx
// TODO(benvanik): if 2010+, use static_assert?
// http://msdn.microsoft.com/en-us/library/dd293588.aspx
#define XESTATICASSERT(expr, message) _STATIC_ASSERT(expr)
//#elif XE_COMPILER_GNUC
// http://stackoverflow.com/questions/3385515/static-assert-in-c
//#define XESTATICASSERT(expr, message) ({ extern int __attribute__((error("assertion failure: '" #expr "' not true - " #message))) compile_time_check(); ((expr)?0:compile_time_check()),0; })
#else
// http://stackoverflow.com/questions/3385515/static-assert-in-c
#define XESTATICASSERT3(expr, L) typedef char static_assertion_##L[(expr)?1:-1]
#define XESTATICASSERT2(expr, L) XESTATICASSERT3(expr, L)
#define XESTATICASSERT(expr, message) XESTATICASSERT2(expr, __LINE__)
#endif // MSVC
#define XEASSERTSTRUCTSIZE(target, size) XESTATICASSERT(sizeof(target) == size, "bad definition for " ## target ## ": must be " ## size ## " bytes")
#endif // XENIA_ASSERT_H_

View File

@ -13,6 +13,7 @@
#include <cstdint>
#include <xenia/platform.h>
#include <xenia/types.h>
// These functions are modeled off of the Apple OSAtomic routines

View File

@ -10,7 +10,8 @@
#ifndef XENIA_COMMON_H_
#define XENIA_COMMON_H_
#include <xenia/assert.h>
#include <poly/assert.h>
#include <xenia/atomic.h>
#include <xenia/byte_order.h>
#include <xenia/config.h>

View File

@ -12,5 +12,5 @@
const xechar_t* xe_path_get_tmp(const xechar_t* prefix) {
//
XEASSERTALWAYS();
assert_always();
}

View File

@ -11,7 +11,7 @@
const xechar_t* xe_path_get_tmp(const xechar_t* prefix) {
//
XEASSERTALWAYS();
//
assert_always();
return NULL;
}

View File

@ -68,7 +68,7 @@ Processor::~Processor() {
}
int Processor::Setup() {
XEASSERTNULL(runtime_);
assert_null(runtime_);
runtime_ = new XenonRuntime(memory_, export_resolver_);
if (!runtime_) {
@ -127,7 +127,7 @@ uint64_t Processor::Execute(
SCOPE_profile_cpu_f("cpu");
PPCContext* context = thread_state->context();
XEASSERT(arg_count <= 5);
assert_true(arg_count <= 5);
for (size_t i = 0; i < arg_count; ++i) {
context->r[3 + i] = args[i];
}

View File

@ -174,7 +174,7 @@ LONG CALLBACK CheckMMIOHandler(PEXCEPTION_POINTERS ex_info) {
auto action = ex_info->ExceptionRecord->ExceptionInformation[0];
if (action == 0) {
uint64_t value = range.read(range.context, address & 0xFFFFFFFF);
XEASSERT((disasm.Argument1.ArgType & BE::REGISTER_TYPE) ==
assert_true((disasm.Argument1.ArgType & BE::REGISTER_TYPE) ==
BE::REGISTER_TYPE);
uint64_t* reg_ptr = GetContextRegPtr(disasm.Argument1.ArgType,
ex_info->ContextRecord);
@ -203,7 +203,7 @@ LONG CALLBACK CheckMMIOHandler(PEXCEPTION_POINTERS ex_info) {
} else if ((disasm.Argument2.ArgType & BE::CONSTANT_TYPE) == BE::CONSTANT_TYPE) {
value = disasm.Instruction.Immediat;
} else {
XEASSERTALWAYS();
assert_always();
}
switch (disasm.Argument2.ArgSize) {
case 8:
@ -290,7 +290,7 @@ int XenonMemory::Initialize() {
NULL);
if (!mapping_) {
XELOGE("Unable to reserve the 4gb guest address space.");
XEASSERTNOTNULL(mapping_);
assert_not_null(mapping_);
XEFAIL();
}
@ -306,7 +306,7 @@ int XenonMemory::Initialize() {
}
if (!mapping_base_) {
XELOGE("Unable to find a continuous block in the 64bit address space.");
XEASSERTALWAYS();
assert_always();
XEFAIL();
}
membase_ = mapping_base_;
@ -362,7 +362,7 @@ int XenonMemory::MapViews(uint8_t* mapping_base) {
0xC0000000, 0xDFFFFFFF, 0x00000000, // - physical 16mb pages
0xE0000000, 0xFFFFFFFF, 0x00000000, // - physical 4k pages
};
XEASSERT(XECOUNT(map_info) == XECOUNT(views_.all_views));
assert_true(XECOUNT(map_info) == XECOUNT(views_.all_views));
for (size_t n = 0; n < XECOUNT(map_info); n++) {
views_.all_views[n] = (uint8_t*)MapViewOfFileEx(
mapping_,
@ -398,14 +398,14 @@ bool XenonMemory::AddMappedRange(uint64_t address, uint64_t mask,
protect = PAGE_READONLY;
} else {
// Write-only memory is not supported.
XEASSERTALWAYS();
assert_always();
}
if (!VirtualAlloc(Translate(address),
size,
MEM_COMMIT, protect)) {
return false;
}
XEASSERT(g_mapped_range_count_ + 1 < XECOUNT(g_mapped_ranges_));
assert_true(g_mapped_range_count_ + 1 < XECOUNT(g_mapped_ranges_));
g_mapped_ranges_[g_mapped_range_count_++] = {
reinterpret_cast<uint64_t>(mapping_base_) | address,
0xFFFFFFFF00000000 | mask,
@ -518,13 +518,13 @@ uint64_t XenonMemory::HeapAlloc(
if (base_address >= XENON_MEMORY_VIRTUAL_HEAP_LOW &&
base_address < XENON_MEMORY_VIRTUAL_HEAP_HIGH) {
// Overlapping managed heap.
XEASSERTALWAYS();
assert_always();
return 0;
}
if (base_address >= XENON_MEMORY_PHYSICAL_HEAP_LOW &&
base_address < XENON_MEMORY_PHYSICAL_HEAP_HIGH) {
// Overlapping managed heap.
XEASSERTALWAYS();
assert_always();
return 0;
}
@ -534,7 +534,7 @@ uint64_t XenonMemory::HeapAlloc(
void* pv = VirtualAlloc(p, size, MEM_COMMIT, PAGE_READWRITE);
if (!pv) {
// Failed.
XEASSERTALWAYS();
assert_always();
return 0;
}

View File

@ -32,7 +32,7 @@ XenonThreadState::XenonThreadState(
// Allocate with 64b alignment.
context_ = (PPCContext*)xe_malloc_aligned(sizeof(PPCContext));
XEASSERT(((uint64_t)context_ & 0xF) == 0);
assert_true(((uint64_t)context_ & 0xF) == 0);
xe_zero_struct(context_, sizeof(PPCContext));
// Stash pointers to common structures that callbacks may need.

View File

@ -127,7 +127,7 @@ XECLEANUP:
}
void Emulator::set_main_window(Window* window) {
XEASSERTNULL(main_window_);
assert_null(main_window_);
main_window_ = window;
window->closed.AddListener([](UIEvent& e) {

View File

@ -78,7 +78,7 @@ KernelExport* ExportResolver::GetExportByOrdinal(const char* library_name,
KernelExport* ExportResolver::GetExportByName(const char* library_name,
const char* name) {
// TODO(benvanik): lookup by name.
XEASSERTALWAYS();
assert_always();
return NULL;
}
@ -86,7 +86,7 @@ void ExportResolver::SetVariableMapping(const char* library_name,
const uint32_t ordinal,
uint32_t value) {
KernelExport* kernel_export = GetExportByOrdinal(library_name, ordinal);
XEASSERTNOTNULL(kernel_export);
assert_not_null(kernel_export);
kernel_export->is_implemented = true;
kernel_export->variable_ptr = value;
}
@ -96,7 +96,7 @@ void ExportResolver::SetFunctionMapping(
void* shim_data, xe_kernel_export_shim_fn shim,
xe_kernel_export_impl_fn impl) {
KernelExport* kernel_export = GetExportByOrdinal(library_name, ordinal);
XEASSERTNOTNULL(kernel_export);
assert_not_null(kernel_export);
kernel_export->is_implemented = true;
kernel_export->function_data.shim_data = shim_data;
kernel_export->function_data.shim = shim;

View File

@ -144,10 +144,10 @@ void CommandProcessor::ExecutePrimaryBuffer(
uint32_t n = 0;
while (args.ptr != end_ptr) {
n += ExecutePacket(args);
XEASSERT(args.ptr < args.max_address);
assert_true(args.ptr < args.max_address);
}
if (end_index > start_index) {
XEASSERT(n == (end_index - start_index));
assert_true(n == (end_index - start_index));
}
XETRACECP(" ExecutePrimaryBuffer End");
@ -164,7 +164,7 @@ void CommandProcessor::ExecuteIndirectBuffer(uint32_t ptr, uint32_t length) {
args.ptr_mask = 0;
for (uint32_t n = 0; n < length;) {
n += ExecutePacket(args);
XEASSERT(n <= length);
assert_true(n <= length);
}
XETRACECP(" ExecuteIndirectBuffer End");
@ -343,7 +343,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
value = GpuSwap(value, endianness);
} else {
// Register.
XEASSERT(poll_reg_addr < RegisterFile::kRegisterCount);
assert_true(poll_reg_addr < RegisterFile::kRegisterCount);
value = regs->values[poll_reg_addr].u32;
if (poll_reg_addr == XE_GPU_REG_COHER_STATUS_HOST) {
MakeCoherent();
@ -438,7 +438,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
value = GpuSwap(value, endianness);
} else {
// Register.
XEASSERT(poll_reg_addr < RegisterFile::kRegisterCount);
assert_true(poll_reg_addr < RegisterFile::kRegisterCount);
value = regs->values[poll_reg_addr].u32;
}
bool matched = false;
@ -496,7 +496,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
// Just an event flag? Where does this write?
} else {
// Write to an address.
XEASSERTALWAYS();
assert_always();
ADVANCE_PTR(count - 1);
}
}
@ -564,7 +564,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
draw_command_.index_buffer = nullptr;
} else {
// Unknown source select.
XEASSERTALWAYS();
assert_always();
}
driver_->Draw(draw_command_);
} else {
@ -584,7 +584,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
uint32_t index_count = d0 >> 16;
uint32_t prim_type = d0 & 0x3F;
uint32_t src_sel = (d0 >> 6) & 0x3;
XEASSERT(src_sel == 0x2); // 'SrcSel=AutoIndex'
assert_true(src_sel == 0x2); // 'SrcSel=AutoIndex'
if (!driver_->PrepareDraw(draw_command_)) {
draw_command_.prim_type = (XE_GPU_PRIMITIVE_TYPE)prim_type;
draw_command_.start_index = 0;
@ -619,7 +619,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
}
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
}
@ -660,7 +660,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
uint32_t start_size = READ_PTR();
uint32_t start = start_size >> 16;
uint32_t size = start_size & 0xFFFF; // dwords
XEASSERT(start == 0);
assert_true(start == 0);
driver_->LoadShader((XE_GPU_SHADER_TYPE)type,
GpuToCpu(packet_ptr, addr), size * 4, start);
}
@ -675,9 +675,9 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
uint32_t start_size = READ_PTR();
uint32_t start = start_size >> 16;
uint32_t size = start_size & 0xFFFF; // dwords
XEASSERT(start == 0);
assert_true(start == 0);
// TODO(benvanik): figure out if this could wrap.
XEASSERT(args.ptr + size * 4 < args.max_address);
assert_true(args.ptr + size * 4 < args.max_address);
driver_->LoadShader((XE_GPU_SHADER_TYPE)type,
args.ptr, size * 4, start);
ADVANCE_PTR(size);
@ -751,7 +751,7 @@ uint32_t CommandProcessor::ExecutePacket(PacketArgs& args) {
void CommandProcessor::WriteRegister(
uint32_t packet_ptr, uint32_t index, uint32_t value) {
RegisterFile* regs = driver_->register_file();
XEASSERT(index < RegisterFile::kRegisterCount);
assert_true(index < RegisterFile::kRegisterCount);
regs->values[index].u32 = value;
// If this is a COHER register, set the dirty flag.

View File

@ -53,7 +53,7 @@ int D3D11IndexBufferResource::InvalidateRegion(
SCOPE_profile_cpu_f("gpu");
// All that's done so far:
XEASSERT(info_.endianness == 0x2);
assert_true(info_.endianness == 0x2);
D3D11_MAPPED_SUBRESOURCE res;
HRESULT hr = resource_cache_->context()->Map(

View File

@ -33,8 +33,8 @@ D3D11GraphicsSystem::~D3D11GraphicsSystem() {
void D3D11GraphicsSystem::Initialize() {
GraphicsSystem::Initialize();
XEASSERTNULL(timer_queue_);
XEASSERTNULL(vsync_timer_);
assert_null(timer_queue_);
assert_null(vsync_timer_);
timer_queue_ = CreateTimerQueue();
CreateTimerQueueTimer(
@ -111,7 +111,7 @@ void D3D11GraphicsSystem::Initialize() {
// Create the window.
// This will pump through the run-loop and and be where our swapping
// will take place.
XEASSERTNULL(window_);
assert_null(window_);
window_ = new D3D11Window(run_loop_, dxgi_factory_, device_);
if (window_->Initialize("Xenia D3D11", 1280, 720)) {
XELOGE("Failed to create D3D11Window");
@ -126,7 +126,7 @@ void D3D11GraphicsSystem::Initialize() {
// Create the driver.
// This runs in the worker thread and builds command lines to present
// in the window.
XEASSERTNULL(driver_);
assert_null(driver_);
driver_ = new D3D11GraphicsDriver(
memory_, window_->swap_chain(), device_);
if (driver_->Initialize()) {
@ -180,7 +180,7 @@ void __stdcall D3D11GraphicsSystem::VsyncCallback(D3D11GraphicsSystem* gs,
void D3D11GraphicsSystem::Shutdown() {
GraphicsSystem::Shutdown();
if (vsync_timer_) {
DeleteTimerQueueTimer(timer_queue_, vsync_timer_, NULL);
}

View File

@ -140,7 +140,7 @@ D3D11ProfilerDisplay::D3D11ProfilerDisplay(D3D11Window* window) : window_(window
!SetupShaders() ||
!SetupFont()) {
// Hrm.
XEASSERTALWAYS();
assert_always();
}
// Pass through mouse events.
@ -183,7 +183,7 @@ bool D3D11ProfilerDisplay::SetupState() {
blend_desc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
blend_desc.RenderTarget[0].RenderTargetWriteMask = 0x0F;
hr = device->CreateBlendState(&blend_desc, &blend_state_);
XEASSERT(SUCCEEDED(hr));
assert_true(SUCCEEDED(hr));
D3D11_DEPTH_STENCIL_DESC depth_stencil_desc;
xe_zero_struct(&depth_stencil_desc, sizeof(depth_stencil_desc));
@ -191,7 +191,7 @@ bool D3D11ProfilerDisplay::SetupState() {
depth_stencil_desc.StencilEnable = false;
depth_stencil_desc.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ZERO;
hr = device->CreateDepthStencilState(&depth_stencil_desc, &depth_stencil_state_);
XEASSERT(SUCCEEDED(hr));
assert_true(SUCCEEDED(hr));
return true;
}
@ -484,7 +484,7 @@ D3D11ProfilerDisplay::Vertex* D3D11ProfilerDisplay::AllocateVertices(
if (draw_state_.vertex_index + count > XECOUNT(draw_state_.vertex_buffer)) {
Flush();
}
XEASSERT(draw_state_.vertex_index + count <= XECOUNT(draw_state_.vertex_buffer));
assert_true(draw_state_.vertex_index + count <= XECOUNT(draw_state_.vertex_buffer));
size_t head = draw_state_.vertex_index;
draw_state_.vertex_index += count;
@ -493,7 +493,7 @@ D3D11ProfilerDisplay::Vertex* D3D11ProfilerDisplay::AllocateVertices(
draw_state_.commands[draw_state_.command_index - 1].primitive == primitive) {
draw_state_.commands[draw_state_.command_index - 1].vertex_count += count;
} else {
XEASSERT(draw_state_.command_index < XECOUNT(draw_state_.commands));
assert_true(draw_state_.command_index < XECOUNT(draw_state_.commands));
draw_state_.commands[draw_state_.command_index].primitive = primitive;
draw_state_.commands[draw_state_.command_index].vertex_count = count;
++draw_state_.command_index;
@ -511,7 +511,7 @@ void D3D11ProfilerDisplay::Flush() {
context->Map(vertex_buffer_, 0, D3D11_MAP_WRITE_DISCARD, 0, &res);
memcpy(res.pData, draw_state_.vertex_buffer, sizeof(Vertex) * draw_state_.vertex_index);
context->Unmap(vertex_buffer_, 0);
uint32_t stride = 20;
uint32_t offset = 0;
context->IASetVertexBuffers(0, 1, &vertex_buffer_, &stride, &offset);

View File

@ -252,7 +252,7 @@ int D3D11VertexShaderResource::CreateInputLayout(const void* byte_code,
vtx_format = DXGI_FORMAT_R32G32B32A32_FLOAT;
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
element_descs[el_index].SemanticName = "XE_VF";
@ -299,7 +299,7 @@ int D3D11VertexShaderResource::DemandGeometryShader(
shader = new D3D11QuadListGeometryShader(device);
break;
default:
XEASSERTALWAYS();
assert_always();
return 1;
}
if (!shader) {

View File

@ -58,7 +58,7 @@ const char* GetFormatTypeName(const VertexBufferResource::DeclElement& el) {
return "float4";
default:
XELOGE("Unknown vertex format: %d", el.format);
XEASSERTALWAYS();
assert_always();
return "float4";
}
}
@ -141,7 +141,7 @@ int D3D11ShaderTranslator::TranslateVertexShader(
append(
"struct VS_OUTPUT {\n");
if (alloc_counts.positions) {
XEASSERT(alloc_counts.positions == 1);
assert_true(alloc_counts.positions == 1);
append(
" float4 oPos : SV_POSITION;\n");
}
@ -244,7 +244,7 @@ int D3D11ShaderTranslator::TranslatePixelShader(
append(
"struct VS_OUTPUT {\n");
if (alloc_counts.positions) {
XEASSERT(alloc_counts.positions == 1);
assert_true(alloc_counts.positions == 1);
append(
" float4 oPos : SV_POSITION;\n");
}
@ -417,7 +417,7 @@ void D3D11ShaderTranslator::AppendDestRegName(uint32_t num, uint32_t dst_exp) {
default:
// TODO(benvanik): other render targets?
// TODO(benvanik): depth?
XEASSERTALWAYS();
assert_always();
break;
}
break;
@ -1272,7 +1272,7 @@ int D3D11ShaderTranslator::GetFormatComponentCount(uint32_t format) {
return 4;
default:
XELOGE("Unknown vertex format: %d", format);
XEASSERTALWAYS();
assert_always();
return 4;
}
}
@ -1348,7 +1348,7 @@ int D3D11ShaderTranslator::TranslateExec(const instr_cf_exec_t& cf) {
case TEX_SET_GRADIENTS_H:
case TEX_SET_GRADIENTS_V:
default:
XEASSERTALWAYS();
assert_always();
break;
}
} else {

View File

@ -136,13 +136,13 @@ int D3D11TextureResource::CreateHandle2D() {
int D3D11TextureResource::CreateHandle3D() {
XELOGE("D3D11: CreateTexture3D not yet implemented");
XEASSERTALWAYS();
assert_always();
return 1;
}
int D3D11TextureResource::CreateHandleCube() {
XELOGE("D3D11: CreateTextureCube not yet implemented");
XEASSERTALWAYS();
assert_always();
return 1;
}

View File

@ -157,7 +157,7 @@ int GraphicsDriver::PopulateShaders(DrawCommand& command) {
XELOGE("No pixel shader bound; ignoring");
return 1;
}
xe_gpu_program_cntl_t program_cntl;
program_cntl.dword_0 = register_file_[XE_GPU_REG_SQ_PROGRAM_CNTL].u32;
if (!vertex_shader_->is_prepared()) {
@ -181,7 +181,7 @@ int GraphicsDriver::PopulateShaders(DrawCommand& command) {
int GraphicsDriver::PopulateInputAssembly(DrawCommand& command) {
SCOPE_profile_cpu_f("gpu");
const auto& buffer_inputs = command.vertex_shader->buffer_inputs();
command.vertex_buffer_count = buffer_inputs.count;
for (size_t n = 0; n < buffer_inputs.count; n++) {
@ -201,10 +201,10 @@ int GraphicsDriver::PopulateInputAssembly(DrawCommand& command) {
fetch = &group->vertex_fetch_2;
break;
}
XEASSERTNOTNULL(fetch);
assert_not_null(fetch);
// If this assert doesn't hold, maybe we just abort?
XEASSERT(fetch->type == 0x3);
XEASSERTNOTZERO(fetch->size);
assert_true(fetch->type == 0x3);
assert_not_zero(fetch->size);
const auto& info = desc.info;
@ -278,7 +278,7 @@ int GraphicsDriver::PopulateSamplerSet(
XELOGW("D3D11: unknown texture format %d", info.format);
return 0; // invalid texture used
}
// TODO(benvanik): quick validate without refetching intraframe.
// Fetch texture from the cache.
MemoryRange memory_range;

View File

@ -72,7 +72,7 @@ void GraphicsSystem::ThreadStart() {
// Initialize driver and ringbuffer.
Initialize();
XEASSERTNOTNULL(driver_);
assert_not_null(driver_);
SetEvent(thread_wait_);
// Main run loop.
@ -128,7 +128,7 @@ void GraphicsSystem::InitializeRingBuffer(uint32_t ptr, uint32_t page_count) {
while (!driver_) {
Sleep(0);
}
XEASSERTNOTNULL(driver_);
assert_not_null(driver_);
command_processor_->Initialize(driver_, ptr, page_count);
}
@ -154,7 +154,7 @@ uint64_t GraphicsSystem::ReadRegister(uint64_t addr) {
return 1;
}
XEASSERT(r >= 0 && r < RegisterFile::kRegisterCount);
assert_true(r >= 0 && r < RegisterFile::kRegisterCount);
return regs->values[r].u32;
}
@ -175,7 +175,7 @@ void GraphicsSystem::WriteRegister(uint64_t addr, uint64_t value) {
break;
}
XEASSERT(r >= 0 && r < RegisterFile::kRegisterCount);
assert_true(r >= 0 && r < RegisterFile::kRegisterCount);
regs->values[r].u32 = (uint32_t)value;
}

View File

@ -40,11 +40,11 @@ NopGraphicsSystem::~NopGraphicsSystem() {
void NopGraphicsSystem::Initialize() {
GraphicsSystem::Initialize();
XEASSERTNULL(driver_);
assert_null(driver_);
driver_ = new NopGraphicsDriver(memory_);
XEASSERTNULL(timer_queue_);
XEASSERTNULL(vsync_timer_);
assert_null(timer_queue_);
assert_null(vsync_timer_);
timer_queue_ = CreateTimerQueue();
CreateTimerQueueTimer(

View File

@ -29,7 +29,7 @@ ShaderResource::ShaderResource(const MemoryRange& memory_range,
// Verify.
dword_count_ = memory_range.length / 4;
XEASSERT(dword_count_ <= 512);
assert_true(dword_count_ <= 512);
// Copy bytes and swap.
size_t byte_size = dword_count_ * sizeof(uint32_t);
@ -122,7 +122,7 @@ void ShaderResource::GatherExec(const instr_cf_exec_t* cf) {
case TEX_SET_GRADIENTS_H:
case TEX_SET_GRADIENTS_V:
default:
XEASSERTALWAYS();
assert_always();
break;
}
} else {
@ -145,7 +145,7 @@ void ShaderResource::GatherExec(const instr_cf_exec_t* cf) {
}
void ShaderResource::GatherVertexFetch(const instr_fetch_vtx_t* vtx) {
XEASSERT(type_ == XE_GPU_SHADER_TYPE_VERTEX);
assert_true(type_ == XE_GPU_SHADER_TYPE_VERTEX);
// dst_reg/dst_swiz
// src_reg/src_swiz
@ -188,16 +188,16 @@ void ShaderResource::GatherVertexFetch(const instr_fetch_vtx_t* vtx) {
auto& desc = inputs.descs[n];
auto& info = desc.info;
if (desc.fetch_slot == fetch_slot) {
XEASSERT(info.element_count <= XECOUNT(info.elements));
assert_true(info.element_count <= XECOUNT(info.elements));
// It may not hold that all strides are equal, but I hope it does.
XEASSERT(!vtx->stride || info.stride_words == vtx->stride);
assert_true(!vtx->stride || info.stride_words == vtx->stride);
el = &info.elements[info.element_count++];
break;
}
}
if (!el) {
XEASSERTNOTZERO(vtx->stride);
XEASSERT(inputs.count + 1 < XECOUNT(inputs.descs));
assert_not_zero(vtx->stride);
assert_true(inputs.count + 1 < XECOUNT(inputs.descs));
auto& desc = inputs.descs[inputs.count++];
desc.input_index = inputs.count - 1;
desc.fetch_slot = fetch_slot;
@ -243,7 +243,7 @@ void ShaderResource::GatherVertexFetch(const instr_fetch_vtx_t* vtx) {
break;
default:
XELOGE("Unknown vertex format: %d", el->format);
XEASSERTALWAYS();
assert_always();
break;
}
}
@ -251,7 +251,7 @@ void ShaderResource::GatherVertexFetch(const instr_fetch_vtx_t* vtx) {
void ShaderResource::GatherTextureFetch(const xenos::instr_fetch_tex_t* tex) {
// TODO(benvanik): check dest_swiz to see if we are writing anything.
XEASSERT(sampler_inputs_.count + 1 < XECOUNT(sampler_inputs_.descs));
assert_true(sampler_inputs_.count + 1 < XECOUNT(sampler_inputs_.descs));
auto& input = sampler_inputs_.descs[sampler_inputs_.count++];
input.input_index = sampler_inputs_.count - 1;
input.fetch_slot = tex->const_idx & 0xF; // ?

View File

@ -98,11 +98,11 @@ Entry* DiscImageDevice::ResolvePath(const char* path) {
}
X_STATUS DiscImageDevice::QueryVolume(XVolumeInfo* out_info, size_t length) {
XEASSERTALWAYS();
assert_always();
return X_STATUS_NOT_IMPLEMENTED;
}
X_STATUS DiscImageDevice::QueryFileSystemAttributes(XFileSystemAttributeInfo* out_info, size_t length) {
XEASSERTALWAYS();
assert_always();
return X_STATUS_NOT_IMPLEMENTED;
}

View File

@ -53,7 +53,7 @@ DiscImageEntry::~DiscImageEntry() {
}
X_STATUS DiscImageEntry::QueryInfo(XFileInfo* out_info) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
out_info->creation_time = 0;
out_info->last_access_time = 0;
out_info->last_write_time = 0;
@ -66,7 +66,7 @@ X_STATUS DiscImageEntry::QueryInfo(XFileInfo* out_info) {
X_STATUS DiscImageEntry::QueryDirectory(
XDirectoryInfo* out_info, size_t length, const char* file_name, bool restart) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
if (restart == true && gdfx_entry_iterator_ != gdfx_entry_->children.end()) {
gdfx_entry_iterator_ = gdfx_entry_->children.end();

View File

@ -67,7 +67,7 @@ Entry* HostPathDevice::ResolvePath(const char* path) {
// TODO(gibbed): call into HostPathDevice?
X_STATUS HostPathDevice::QueryVolume(XVolumeInfo* out_info, size_t length) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
const char* name = "test"; // TODO(gibbed): actual value
auto end = (uint8_t*)out_info + length;
@ -86,7 +86,7 @@ X_STATUS HostPathDevice::QueryVolume(XVolumeInfo* out_info, size_t length) {
// TODO(gibbed): call into HostPathDevice?
X_STATUS HostPathDevice::QueryFileSystemAttributes(XFileSystemAttributeInfo* out_info, size_t length) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
const char* name = "test"; // TODO(gibbed): actual value
auto end = (uint8_t*)out_info + length;

View File

@ -54,7 +54,7 @@ HostPathEntry::~HostPathEntry() {
#define COMBINE_TIME(t) (((uint64_t)t.dwHighDateTime << 32) | t.dwLowDateTime)
X_STATUS HostPathEntry::QueryInfo(XFileInfo* out_info) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
WIN32_FILE_ATTRIBUTE_DATA data;
if (!GetFileAttributesEx(
@ -75,7 +75,7 @@ X_STATUS HostPathEntry::QueryInfo(XFileInfo* out_info) {
X_STATUS HostPathEntry::QueryDirectory(
XDirectoryInfo* out_info, size_t length, const char* file_name, bool restart) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
WIN32_FIND_DATA ffd;

View File

@ -100,11 +100,11 @@ Entry* STFSContainerDevice::ResolvePath(const char* path) {
X_STATUS STFSContainerDevice::QueryVolume(XVolumeInfo* out_info, size_t length) {
XEASSERTALWAYS();
assert_always();
return X_STATUS_NOT_IMPLEMENTED;
}
X_STATUS STFSContainerDevice::QueryFileSystemAttributes(XFileSystemAttributeInfo* out_info, size_t length) {
XEASSERTALWAYS();
assert_always();
return X_STATUS_NOT_IMPLEMENTED;
}

View File

@ -32,7 +32,7 @@ STFSContainerEntry::~STFSContainerEntry() {
}
X_STATUS STFSContainerEntry::QueryInfo(XFileInfo* out_info) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
out_info->creation_time = stfs_entry_->update_timestamp;
out_info->last_access_time = stfs_entry_->access_timestamp;
out_info->last_write_time = stfs_entry_->update_timestamp;
@ -45,7 +45,7 @@ X_STATUS STFSContainerEntry::QueryInfo(XFileInfo* out_info) {
X_STATUS STFSContainerEntry::QueryDirectory(
XDirectoryInfo* out_info, size_t length, const char* file_name, bool restart) {
XEASSERTNOTNULL(out_info);
assert_not_null(out_info);
if (restart && stfs_entry_iterator_ != stfs_entry_->children.end()) {
stfs_entry_iterator_ = stfs_entry_->children.end();

View File

@ -26,7 +26,7 @@ MemoryMapping::~MemoryMapping() {
Entry::Entry(Type type, Device* device, const char* path) :
type_(type),
device_(device) {
XEASSERTNOTNULL(device);
assert_not_null(device);
path_ = xestrdupa(path);
// TODO(benvanik): *shudder*
absolute_path_ = xestrdupa((std::string(device->path()) + std::string(path)).c_str());

View File

@ -42,7 +42,7 @@ KernelState::KernelState(Emulator* emulator) :
object_table_ = new ObjectTable();
object_mutex_ = xe_mutex_alloc(10000);
XEASSERTNULL(shared_kernel_state_);
assert_null(shared_kernel_state_);
shared_kernel_state_ = this;
}
@ -55,7 +55,7 @@ KernelState::~KernelState() {
delete dispatcher_;
XEASSERT(shared_kernel_state_ == this);
assert_true(shared_kernel_state_ == this);
shared_kernel_state_ = NULL;
}
@ -81,7 +81,7 @@ XModule* KernelState::GetModule(const char* name) {
return NULL;
} else {
// TODO(benvanik): support user modules/loading/etc.
XEASSERTALWAYS();
assert_always();
return NULL;
}
}

View File

@ -22,7 +22,7 @@ ObjectTable::ObjectTable() :
table_(NULL),
last_free_entry_(0) {
table_mutex_ = xe_mutex_alloc(0);
XEASSERTNOTNULL(table_mutex_);
assert_not_null(table_mutex_);
}
ObjectTable::~ObjectTable() {
@ -88,7 +88,7 @@ X_STATUS ObjectTable::FindFreeSlot(uint32_t* out_slot) {
}
X_STATUS ObjectTable::AddHandle(XObject* object, X_HANDLE* out_handle) {
XEASSERTNOTNULL(out_handle);
assert_not_null(out_handle);
X_STATUS result = X_STATUS_SUCCESS;
@ -156,7 +156,7 @@ X_STATUS ObjectTable::RemoveHandle(X_HANDLE handle) {
}
X_STATUS ObjectTable::GetObject(X_HANDLE handle, XObject** out_object) {
XEASSERTNOTNULL(out_object);
assert_not_null(out_object);
X_STATUS result = X_STATUS_SUCCESS;
@ -197,7 +197,7 @@ X_STATUS ObjectTable::GetObject(X_HANDLE handle, XObject** out_object) {
X_HANDLE ObjectTable::TranslateHandle(X_HANDLE handle) {
if (handle == 0xFFFFFFFF) {
// CurrentProcess
//XEASSERTALWAYS();
//assert_always();
return 0;
} else if (handle == 0xFFFFFFFE) {
// CurrentThread

View File

@ -26,13 +26,13 @@ XEvent::~XEvent() {
}
void XEvent::Initialize(bool manual_reset, bool initial_state) {
XEASSERTNULL(handle_);
assert_null(handle_);
handle_ = CreateEvent(NULL, manual_reset, initial_state, NULL);
}
void XEvent::InitializeNative(void* native_ptr, DISPATCH_HEADER& header) {
XEASSERTNULL(handle_);
assert_null(handle_);
bool manual_reset;
switch (header.type_flags >> 24) {
@ -43,7 +43,7 @@ void XEvent::InitializeNative(void* native_ptr, DISPATCH_HEADER& header) {
manual_reset = false;
break;
default:
XEASSERTALWAYS();
assert_always();
return;
}

View File

@ -81,7 +81,7 @@ public:
} while (info->next_entry_offset != 0);
}
};
XEASSERTSTRUCTSIZE(XDirectoryInfo, 72);
static_assert_size(XDirectoryInfo, 72);
// http://msdn.microsoft.com/en-us/library/windows/hardware/ff540287(v=vs.85).aspx
class XVolumeInfo {
@ -102,7 +102,7 @@ public:
xe_copy_memory(dst + 20, this->label_length, this->label, this->label_length);
}
};
XEASSERTSTRUCTSIZE(XVolumeInfo, 24);
static_assert_size(XVolumeInfo, 24);
// http://msdn.microsoft.com/en-us/library/windows/hardware/ff540251(v=vs.85).aspx
class XFileSystemAttributeInfo {
@ -121,7 +121,7 @@ public:
xe_copy_memory(dst + 12, this->fs_name_length, this->fs_name, this->fs_name_length);
}
};
XEASSERTSTRUCTSIZE(XFileSystemAttributeInfo, 16);
static_assert_size(XFileSystemAttributeInfo, 16);
class XFile : public XObject {
public:

View File

@ -26,22 +26,22 @@ XMutant::~XMutant() {
}
void XMutant::Initialize(bool initial_owner) {
XEASSERTNULL(handle_);
assert_null(handle_);
handle_ = CreateMutex(NULL, initial_owner ? TRUE : FALSE, NULL);
}
void XMutant::InitializeNative(void* native_ptr, DISPATCH_HEADER& header) {
XEASSERTNULL(handle_);
assert_null(handle_);
// Haven't seen this yet, but it's possible.
XEASSERTALWAYS();
assert_always();
}
X_STATUS XMutant::ReleaseMutant(
uint32_t priority_increment, bool abandon, bool wait) {
// TODO(benvanik): abandoning.
XEASSERTFALSE(abandon);
assert_false(abandon);
BOOL result = ReleaseMutex(handle_);
if (result) {
return X_STATUS_SUCCESS;

View File

@ -28,7 +28,7 @@ XNotifyListener::~XNotifyListener() {
}
void XNotifyListener::Initialize(uint64_t mask) {
XEASSERTNULL(wait_handle_);
assert_null(wait_handle_);
lock_ = xe_mutex_alloc();
wait_handle_ = CreateEvent(NULL, TRUE, FALSE, NULL);

View File

@ -26,13 +26,13 @@ XSemaphore::~XSemaphore() {
}
void XSemaphore::Initialize(int32_t initial_count, int32_t maximum_count) {
XEASSERTNULL(handle_);
assert_null(handle_);
handle_ = CreateSemaphore(NULL, initial_count, maximum_count, NULL);
}
void XSemaphore::InitializeNative(void* native_ptr, DISPATCH_HEADER& header) {
XEASSERTNULL(handle_);
assert_null(handle_);
// NOT IMPLEMENTED
// We expect Initialize to be called shortly.

View File

@ -334,7 +334,7 @@ X_STATUS XThread::PlatformCreate() {
this);
#else
// TODO(benvanik): pthread_create_suspended_np on linux
XEASSERTALWAYS();
assert_always();
#endif // OSX
} else {
result_code = pthread_create(
@ -552,7 +552,7 @@ X_STATUS XThread::Delay(
if (timeout_ticks > 0) {
// Absolute time, based on January 1, 1601.
// TODO(benvanik): convert time to relative time.
XEASSERTALWAYS();
assert_always();
timeout_ms = 0;
} else if (timeout_ticks < 0) {
// Relative time.

View File

@ -28,7 +28,7 @@ XTimer::~XTimer() {
}
void XTimer::Initialize(uint32_t timer_type) {
XEASSERTNULL(handle_);
assert_null(handle_);
bool manual_reset = false;
switch (timer_type) {
@ -39,7 +39,7 @@ void XTimer::Initialize(uint32_t timer_type) {
manual_reset = false;
break;
default:
XEASSERTALWAYS();
assert_always();
break;
}
@ -71,7 +71,7 @@ X_STATUS XTimer::SetTimer(
void XTimer::CompletionRoutine(
XTimer* timer, DWORD timer_low, DWORD timer_high) {
XEASSERT(timer->current_routine_);
assert_true(timer->current_routine_);
// Queue APC to call back routine with (arg, low, high).
// TODO(benvanik): APC dispatch.

View File

@ -238,7 +238,7 @@ int xe_xex2_read_header(const uint8_t *addr, const size_t length,
{
const size_t max_count = XECOUNT(header->import_libraries);
size_t count = XEGETUINT32BE(pp + 0x08);
XEASSERT(count <= max_count);
assert_true(count <= max_count);
if (count > max_count) {
XELOGW("ignoring %zu extra entries in XEX_HEADER_IMPORT_LIBRARIES",
(max_count - count));
@ -260,7 +260,7 @@ int xe_xex2_read_header(const uint8_t *addr, const size_t length,
const uint16_t name_index = XEGETUINT16BE(pp + 0x24) & 0xFF;
for (size_t i = 0, j = 0; i < string_table_size;) {
XEASSERT(j <= 0xFF);
assert_true(j <= 0xFF);
if (j == name_index) {
XEIGNORE(xestrcpya(library->name, XECOUNT(library->name),
string_table + i));
@ -293,7 +293,7 @@ int xe_xex2_read_header(const uint8_t *addr, const size_t length,
{
const size_t max_count = XECOUNT(header->static_libraries);
size_t count = (opt_header->length - 4) / 16;
XEASSERT(count <= max_count);
assert_true(count <= max_count);
if (count > max_count) {
XELOGW("ignoring %zu extra entries in XEX_HEADER_STATIC_LIBRARIES",
(max_count - count));
@ -326,7 +326,7 @@ int xe_xex2_read_header(const uint8_t *addr, const size_t length,
switch (fmt->compression_type) {
case XEX_COMPRESSION_NONE:
// TODO: XEX_COMPRESSION_NONE
XEASSERTALWAYS();
assert_always();
break;
case XEX_COMPRESSION_BASIC:
{
@ -368,7 +368,7 @@ int xe_xex2_read_header(const uint8_t *addr, const size_t length,
break;
case XEX_COMPRESSION_DELTA:
// TODO: XEX_COMPRESSION_DELTA
XEASSERTALWAYS();
assert_always();
break;
}
}
@ -460,7 +460,7 @@ typedef struct mspack_memory_file_t {
} mspack_memory_file;
mspack_memory_file *mspack_memory_open(struct mspack_system *sys,
void* buffer, const size_t buffer_size) {
XEASSERT(buffer_size < INT_MAX);
assert_true(buffer_size < INT_MAX);
if (buffer_size >= INT_MAX) {
return NULL;
}
@ -575,7 +575,7 @@ int xe_xex2_read_image_uncompressed(const xe_xex2_header_t *header,
uncompressed_size);
return 0;
default:
XEASSERTALWAYS();
assert_always();
return 1;
}
@ -642,7 +642,7 @@ int xe_xex2_read_image_basic_compressed(const xe_xex2_header_t *header,
}
break;
default:
XEASSERTALWAYS();
assert_always();
return 1;
}
@ -701,7 +701,7 @@ int xe_xex2_read_image_compressed(const xe_xex2_header_t *header,
(uint8_t*)input_buffer, input_size);
break;
default:
XEASSERTALWAYS();
assert_always();
return false;
}
@ -809,7 +809,7 @@ int xe_xex2_read_image(xe_xex2_ref xex, const uint8_t *xex_addr,
return xe_xex2_read_image_compressed(
header, xex_addr, xex_length, memory);
default:
XEASSERTALWAYS();
assert_always();
return 1;
}
}
@ -922,7 +922,7 @@ int xe_xex2_find_import_infos(xe_xex2_ref xex,
break;
}
}
XEASSERT(library_index != (size_t)-1);
assert_true(library_index != (size_t)-1);
// Records:
// The number of records does not correspond to the number of imports!
@ -947,9 +947,9 @@ int xe_xex2_find_import_infos(xe_xex2_ref xex,
// Allocate storage.
xe_xex2_import_info_t *infos = (xe_xex2_import_info_t*)xe_calloc(
info_count * sizeof(xe_xex2_import_info_t));
XEEXPECTNOTNULL(infos);
assert_not_null(infos);
XEASSERTNOTZERO(info_count);
assert_not_zero(info_count);
// Construct infos.
for (size_t n = 0, i = 0; n < library->record_count; n++) {
@ -958,7 +958,7 @@ int xe_xex2_find_import_infos(xe_xex2_ref xex,
const uint32_t type = (value & 0xFF000000) >> 24;
// Verify library index matches given library.
//XEASSERT(library_index == ((value >> 16) & 0xFF));
//assert_true(library_index == ((value >> 16) & 0xFF));
switch (type) {
case 0x00:
@ -971,14 +971,14 @@ int xe_xex2_find_import_infos(xe_xex2_ref xex,
case 0x01:
{
// Thunk for previous record.
XEASSERT(i > 0);
assert_true(i > 0);
xe_xex2_import_info_t* info = &infos[i - 1];
XEASSERT(info->ordinal == (value & 0xFFFF));
assert_true(info->ordinal == (value & 0xFFFF));
info->thunk_address = record;
}
break;
default:
//XEASSERTALWAYS();
//assert_always();
break;
}
}
@ -986,10 +986,6 @@ int xe_xex2_find_import_infos(xe_xex2_ref xex,
xex->library_imports[library_index].count = info_count;
xex->library_imports[library_index].infos = infos;
return 0;
XECLEANUP:
xe_free(infos);
return 1;
}
int xe_xex2_get_import_infos(xe_xex2_ref xex,

View File

@ -33,7 +33,7 @@ SHIM_CALL XamContentGetLicenseMask_shim(
mask_ptr,
overlapped_ptr);
XEASSERTZERO(overlapped_ptr);
assert_zero(overlapped_ptr);
// Arcade games seem to call this and check the result mask for random bits.
// If we fail, the games seem to use a hardcoded mask, which is likely trial.

View File

@ -44,8 +44,8 @@ SHIM_CALL XMsgInProcessCall_shim(
uint32_t a = SHIM_MEM_32(arg1 + 0); // 0x00000002
uint32_t b = SHIM_MEM_32(arg1 + 4); // out ptr to 4b - expect 0
XELOGD("XMPGetStatusEx(%.8X, %.8X)", a, b);
XEASSERTZERO(arg2);
XEASSERT(a == 2);
assert_zero(arg2);
assert_true(a == 2);
SHIM_SET_MEM_32(b, 0);
handled = true;
} else if (message == 0x0007001A) {
@ -59,8 +59,8 @@ SHIM_CALL XMsgInProcessCall_shim(
uint32_t a = SHIM_MEM_32(arg1 + 0); // 0x00000002
uint32_t b = SHIM_MEM_32(arg1 + 4); // out ptr to 4b - expect 0
XELOGD("XMPGetStatus(%.8X, %.8X)", a, b);
XEASSERTZERO(arg2);
XEASSERT(a == 2);
assert_zero(arg2);
assert_true(a == 2);
SHIM_SET_MEM_32(b, 0);
handled = true;
}

View File

@ -136,7 +136,7 @@ SHIM_CALL XamUserReadProfileSettings_shim(
// 0xfffe07d1 = profile?
// TODO(benvanik): implement overlapped support
XEASSERTZERO(overlapped_ptr);
assert_zero(overlapped_ptr);
// First call asks for size (fill buffer_size_ptr).
// Second call asks for buffer contents with that size.

View File

@ -75,7 +75,7 @@ SHIM_CALL XAudioGetVoiceCategoryVolumeChangeMask_shim(
"XAudioGetVoiceCategoryVolumeChangeMask(%.8X, %.8X)",
driver_ptr, out_ptr);
XEASSERT((driver_ptr & 0xFFFF0000) == 0x41550000);
assert_true((driver_ptr & 0xFFFF0000) == 0x41550000);
auto audio_system = state->emulator()->audio_system();
@ -124,7 +124,7 @@ SHIM_CALL XAudioRegisterRenderDriverClient_shim(
return;
}
XEASSERTTRUE(!(index & ~0x0000FFFF));
assert_true(!(index & ~0x0000FFFF));
SHIM_SET_MEM_32(driver_ptr, 0x41550000 | (index & 0x0000FFFF));
SHIM_SET_RETURN_32(X_ERROR_SUCCESS);
}
@ -138,7 +138,7 @@ SHIM_CALL XAudioUnregisterRenderDriverClient_shim(
"XAudioUnregisterRenderDriverClient(%.8X)",
driver_ptr);
XEASSERT((driver_ptr & 0xFFFF0000) == 0x41550000);
assert_true((driver_ptr & 0xFFFF0000) == 0x41550000);
auto audio_system = state->emulator()->audio_system();
audio_system->UnregisterClient(driver_ptr & 0x0000FFFF);
@ -155,7 +155,7 @@ SHIM_CALL XAudioSubmitRenderDriverFrame_shim(
"XAudioSubmitRenderDriverFrame(%.8X, %.8X)",
driver_ptr, samples_ptr);
XEASSERT((driver_ptr & 0xFFFF0000) == 0x41550000);
assert_true((driver_ptr & 0xFFFF0000) == 0x41550000);
auto audio_system = state->emulator()->audio_system();
audio_system->SubmitFrame(driver_ptr & 0x0000FFFF, samples_ptr);

View File

@ -168,7 +168,7 @@ SHIM_CALL DbgPrint_shim(
local[0] = '\0';
strncat(local, start, end + 1 - start);
XEASSERT(arg_size == 8 || arg_size == 4);
assert_true(arg_size == 8 || arg_size == 4);
if (arg_size == 8) {
if (arg_extras == 0) {
uint64_t value = arg_index < 7
@ -179,7 +179,7 @@ SHIM_CALL DbgPrint_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (arg_size == 4) {
@ -192,13 +192,13 @@ SHIM_CALL DbgPrint_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
}
else if (*end == 'n')
{
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = arg_index < 7
? SHIM_GET_ARG_32(1 + arg_index)
@ -207,7 +207,7 @@ SHIM_CALL DbgPrint_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (*end == 's' ||
@ -216,7 +216,7 @@ SHIM_CALL DbgPrint_shim(
local[0] = '\0';
strncat(local, start, end + 1 - start);
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = arg_index < 7
? SHIM_GET_ARG_32(1 + arg_index)
@ -227,11 +227,11 @@ SHIM_CALL DbgPrint_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else {
XEASSERT(false);
assert_true(false);
break;
}
@ -271,7 +271,7 @@ SHIM_CALL RtlRaiseException_shim(
// SetThreadName. FFS.
uint32_t thread_info_ptr = record_ptr + 20;
uint32_t type = SHIM_MEM_32(thread_info_ptr + 0);
XEASSERT(type == 0x1000);
assert_true(type == 0x1000);
uint32_t name_ptr = SHIM_MEM_32(thread_info_ptr + 4);
uint32_t thread_id = SHIM_MEM_32(thread_info_ptr + 8);

View File

@ -25,13 +25,13 @@ namespace kernel {
void xeHalReturnToFirmware(uint32_t routine) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// void
// IN FIRMWARE_REENTRY Routine
// Routine must be 1 'HalRebootRoutine'
XEASSERT(routine == 1);
assert_true(routine == 1);
// TODO(benvank): diediedie much more gracefully
// Not sure how to blast back up the stack in LLVM without exceptions, though.

View File

@ -70,8 +70,8 @@ SHIM_CALL NtCreateFile_shim(
attrs.root_directory != 0) {
result = state->object_table()->GetObject(
attrs.root_directory, (XObject**)&root_file);
XEASSERT(XSUCCEEDED(result));
XEASSERT(root_file->type() == XObject::Type::kTypeFile);
assert_true(XSUCCEEDED(result));
assert_true(root_file->type() == XObject::Type::kTypeFile);
auto root_path = root_file->absolute_path();
auto target_path = xestrdupa((std::string(root_path) + std::string(object_name)).c_str());
@ -147,9 +147,9 @@ SHIM_CALL NtOpenFile_shim(
if (attrs.root_directory != 0xFFFFFFFD) { // ObDosDevices
result = state->object_table()->GetObject(
attrs.root_directory, (XObject**)&root_file);
XEASSERT(XSUCCEEDED(result));
XEASSERT(root_file->type() == XObject::Type::kTypeFile);
XEASSERTALWAYS();
assert_true(XSUCCEEDED(result));
assert_true(root_file->type() == XObject::Type::kTypeFile);
assert_always();
}
// Resolve the file using the virtual file system.
@ -226,7 +226,7 @@ SHIM_CALL NtReadFile_shim(
byte_offset);
// Async not supported yet.
XEASSERTNULL(apc_routine_ptr);
assert_zero(apc_routine_ptr);
X_STATUS result = X_STATUS_SUCCESS;
uint32_t info = 0;
@ -338,13 +338,13 @@ SHIM_CALL NtSetInformationFile_shim(
// struct FILE_POSITION_INFORMATION {
// LARGE_INTEGER CurrentByteOffset;
// };
XEASSERT(length == 8);
assert_true(length == 8);
info = 8;
file->set_position(SHIM_MEM_64(file_info_ptr));
break;
default:
// Unsupported, for now.
XEASSERTALWAYS();
assert_always();
info = 0;
break;
}
@ -394,7 +394,7 @@ SHIM_CALL NtQueryInformationFile_shim(
switch (file_info_class) {
case XFileInternalInformation:
// Internal unique file pointer. Not sure why anyone would want this.
XEASSERT(length == 8);
assert_true(length == 8);
info = 8;
// TODO(benvanik): use pointer to fs:: entry?
SHIM_SET_MEM_64(file_info_ptr, hash_combine(0, file->absolute_path()));
@ -403,7 +403,7 @@ SHIM_CALL NtQueryInformationFile_shim(
// struct FILE_POSITION_INFORMATION {
// LARGE_INTEGER CurrentByteOffset;
// };
XEASSERT(length == 8);
assert_true(length == 8);
info = 8;
SHIM_SET_MEM_64(file_info_ptr, file->position());
break;
@ -418,7 +418,7 @@ SHIM_CALL NtQueryInformationFile_shim(
// ULONG FileAttributes;
// ULONG Unknown;
// };
XEASSERT(length == 56);
assert_true(length == 56);
XFileInfo file_info;
result = file->QueryInfo(&file_info);
if (XSUCCEEDED(result)) {
@ -447,7 +447,7 @@ SHIM_CALL NtQueryInformationFile_shim(
break;
default:
// Unsupported, for now.
XEASSERTALWAYS();
assert_always();
info = 0;
break;
}
@ -489,9 +489,9 @@ SHIM_CALL NtQueryFullAttributesFile_shim(
if (attrs.root_directory != 0xFFFFFFFD) { // ObDosDevices
result = state->object_table()->GetObject(
attrs.root_directory, (XObject**)&root_file);
XEASSERT(XSUCCEEDED(result));
XEASSERT(root_file->type() == XObject::Type::kTypeFile);
XEASSERTALWAYS();
assert_true(XSUCCEEDED(result));
assert_true(root_file->type() == XObject::Type::kTypeFile);
assert_always();
}
// Resolve the file using the virtual file system.
@ -560,7 +560,7 @@ SHIM_CALL NtQueryVolumeInformationFile_shim(
}
default:
// Unsupported, for now.
XEASSERTALWAYS();
assert_always();
info = 0;
break;
}

View File

@ -29,7 +29,7 @@ X_STATUS xeNtAllocateVirtualMemory(
uint32_t allocation_type, uint32_t protect_bits,
uint32_t unknown) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// NTSTATUS
// _Inout_ PVOID *BaseAddress,
@ -39,7 +39,7 @@ X_STATUS xeNtAllocateVirtualMemory(
// ? handle?
// I've only seen zero.
XEASSERT(unknown == 0);
assert_true(unknown == 0);
// This allocates memory from the kernel heap, which is initialized on startup
// and shared by both the kernel implementation and user code.
@ -73,7 +73,7 @@ X_STATUS xeNtAllocateVirtualMemory(
// already happened.
if (*base_addr_ptr) {
// Having a pointer already means that this is likely a follow-on COMMIT.
XEASSERT(!(allocation_type & X_MEM_RESERVE) &&
assert_true(!(allocation_type & X_MEM_RESERVE) &&
(allocation_type & X_MEM_COMMIT));
return X_STATUS_SUCCESS;
}
@ -127,7 +127,7 @@ X_STATUS xeNtFreeVirtualMemory(
uint32_t* base_addr_ptr, uint32_t* region_size_ptr,
uint32_t free_type, uint32_t unknown) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// NTSTATUS
// _Inout_ PVOID *BaseAddress,
@ -136,7 +136,7 @@ X_STATUS xeNtFreeVirtualMemory(
// ? handle?
// I've only seen zero.
XEASSERT(unknown == 0);
assert_true(unknown == 0);
if (!*base_addr_ptr) {
return X_STATUS_MEMORY_NOT_ALLOCATED;
@ -193,7 +193,7 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
uint32_t type, uint32_t region_size, uint32_t protect_bits,
uint32_t min_addr_range, uint32_t max_addr_range, uint32_t alignment) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// Type will usually be 0 (user request?), where 1 and 2 are sometimes made
// by D3D/etc.
@ -230,8 +230,8 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
// and the memory must be allocated there. I haven't seen a game do this,
// and instead they all do min=0 / max=-1 to indicate the system should pick.
// If we have to suport arbitrary placement things will get nasty.
XEASSERT(min_addr_range == 0);
XEASSERT(max_addr_range == 0xFFFFFFFF);
assert_true(min_addr_range == 0);
assert_true(max_addr_range == 0xFFFFFFFF);
// Allocate.
uint32_t flags = MEMORY_FLAG_PHYSICAL;
@ -280,7 +280,7 @@ SHIM_CALL MmAllocatePhysicalMemoryEx_shim(
void xeMmFreePhysicalMemory(uint32_t type, uint32_t base_address) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// base_address = result of MmAllocatePhysicalMemory.
@ -310,7 +310,7 @@ SHIM_CALL MmFreePhysicalMemory_shim(
uint32_t xeMmQueryAddressProtect(uint32_t base_address) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
uint32_t access = state->memory()->QueryProtect(base_address);
@ -334,7 +334,7 @@ SHIM_CALL MmQueryAddressProtect_shim(
uint32_t xeMmQueryAllocationSize(uint32_t base_address) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
size_t size = state->memory()->QuerySize(base_address);

View File

@ -28,7 +28,7 @@ void xeKeBugCheckEx(uint32_t code, uint32_t param1, uint32_t param2, uint32_t pa
XELOGD("*** STOP: 0x%.8X (0x%.8X, 0x%.8X, 0x%.8X, 0x%.8X)", code, param1, param2, param3, param4);
fflush(stdout);
DebugBreak();
XEASSERTALWAYS();
assert_always();
}
SHIM_CALL KeBugCheck_shim(

View File

@ -43,7 +43,7 @@ X_STATUS xeExGetXConfigSetting(
value = 0x00001000; // USA/Canada
break;
default:
XEASSERTUNHANDLEDCASE(setting);
assert_unhandled_case(setting);
return X_STATUS_INVALID_PARAMETER_2;
}
break;
@ -80,12 +80,12 @@ X_STATUS xeExGetXConfigSetting(
value = 0;
break;
default:
XEASSERTUNHANDLEDCASE(setting);
assert_unhandled_case(setting);
return X_STATUS_INVALID_PARAMETER_2;
}
break;
default:
XEASSERTUNHANDLEDCASE(category);
assert_unhandled_case(category);
return X_STATUS_INVALID_PARAMETER_1;
}
@ -134,7 +134,7 @@ SHIM_CALL ExGetXConfigSetting_shim(
int xeXexCheckExecutablePriviledge(uint32_t privilege) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// BOOL
// DWORD Privilege
@ -176,7 +176,7 @@ SHIM_CALL XexCheckExecutablePrivilege_shim(
int xeXexGetModuleHandle(const char* module_name,
X_HANDLE* module_handle_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// BOOL
// LPCSZ ModuleName

View File

@ -30,7 +30,7 @@ namespace kernel {
uint32_t xeRtlCompareMemory(uint32_t source1_ptr, uint32_t source2_ptr,
uint32_t length) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// SIZE_T
// _In_ const VOID *Source1,
@ -74,7 +74,7 @@ SHIM_CALL RtlCompareMemory_shim(
uint32_t xeRtlCompareMemoryUlong(uint32_t source_ptr, uint32_t length,
uint32_t pattern) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// SIZE_T
// _In_ PVOID Source,
@ -124,7 +124,7 @@ SHIM_CALL RtlCompareMemoryUlong_shim(
void xeRtlFillMemoryUlong(uint32_t destination_ptr, uint32_t length,
uint32_t pattern) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Out_ PVOID Destination,
@ -172,7 +172,7 @@ SHIM_CALL RtlFillMemoryUlong_shim(
// http://msdn.microsoft.com/en-us/library/ff561918
void xeRtlInitAnsiString(uint32_t destination_ptr, uint32_t source_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Out_ PANSI_STRING DestinationString,
@ -207,7 +207,7 @@ SHIM_CALL RtlInitAnsiString_shim(
// http://msdn.microsoft.com/en-us/library/ff561899
void xeRtlFreeAnsiString(uint32_t string_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Inout_ PANSI_STRING AnsiString
@ -242,7 +242,7 @@ SHIM_CALL RtlFreeAnsiString_shim(
// http://msdn.microsoft.com/en-us/library/ff561934
void xeRtlInitUnicodeString(uint32_t destination_ptr, uint32_t source_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Out_ PUNICODE_STRING DestinationString,
@ -281,7 +281,7 @@ SHIM_CALL RtlInitUnicodeString_shim(
// http://msdn.microsoft.com/en-us/library/ff561903
void xeRtlFreeUnicodeString(uint32_t string_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Inout_ PUNICODE_STRING UnicodeString
@ -310,7 +310,7 @@ SHIM_CALL RtlFreeUnicodeString_shim(
X_STATUS xeRtlUnicodeStringToAnsiString(
uint32_t destination_ptr, uint32_t source_ptr, uint32_t alloc_dest) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// NTSTATUS
// _Inout_ PANSI_STRING DestinationString,
@ -318,7 +318,7 @@ X_STATUS xeRtlUnicodeStringToAnsiString(
// _In_ BOOLEAN AllocateDestinationString
XELOGE("RtlUnicodeStringToAnsiString not yet implemented");
XEASSERTALWAYS();
assert_always();
if (alloc_dest) {
// Allocate a new buffer to place the string into.
@ -438,7 +438,7 @@ SHIM_CALL RtlNtStatusToDosError_shim(
uint32_t xeRtlImageXexHeaderField(uint32_t xex_header_base_ptr,
uint32_t image_field) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// PVOID
// PVOID XexHeaderBase
@ -525,11 +525,11 @@ typedef struct {
#pragma pack(pop)
}
XEASSERTSTRUCTSIZE(X_RTL_CRITICAL_SECTION, 28);
static_assert_size(X_RTL_CRITICAL_SECTION, 28);
void xeRtlInitializeCriticalSection(uint32_t cs_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Out_ LPCRITICAL_SECTION lpCriticalSection
@ -556,7 +556,7 @@ SHIM_CALL RtlInitializeCriticalSection_shim(
X_STATUS xeRtlInitializeCriticalSectionAndSpinCount(
uint32_t cs_ptr, uint32_t spin_count) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// NTSTATUS
// _Out_ LPCRITICAL_SECTION lpCriticalSection,
@ -597,7 +597,7 @@ SHIM_CALL RtlInitializeCriticalSectionAndSpinCount_shim(
// TODO(benvanik): remove the need for passing in thread_id.
void xeRtlEnterCriticalSection(uint32_t cs_ptr, uint32_t thread_id) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Inout_ LPCRITICAL_SECTION lpCriticalSection
@ -651,7 +651,7 @@ SHIM_CALL RtlEnterCriticalSection_shim(
// TODO(benvanik): remove the need for passing in thread_id.
uint32_t xeRtlTryEnterCriticalSection(uint32_t cs_ptr, uint32_t thread_id) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// DWORD
// _Inout_ LPCRITICAL_SECTION lpCriticalSection
@ -689,7 +689,7 @@ SHIM_CALL RtlTryEnterCriticalSection_shim(
void xeRtlLeaveCriticalSection(uint32_t cs_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// VOID
// _Inout_ LPCRITICAL_SECTION lpCriticalSection

View File

@ -173,7 +173,7 @@ SHIM_CALL vsprintf_shim(
local[0] = '\0';
strncat(local, start, end + 1 - start);
XEASSERT(arg_size == 8 || arg_size == 4);
assert_true(arg_size == 8 || arg_size == 4);
if (arg_size == 8) {
if (arg_extras == 0) {
uint64_t value = SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
@ -182,7 +182,7 @@ SHIM_CALL vsprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (arg_size == 4) {
@ -193,20 +193,20 @@ SHIM_CALL vsprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
}
else if (*end == 'n')
{
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
SHIM_SET_MEM_32(value, (uint32_t)((b - buffer) / sizeof(char)));
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (*end == 's' ||
@ -215,7 +215,7 @@ SHIM_CALL vsprintf_shim(
local[0] = '\0';
strncat(local, start, end + 1 - start);
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
const void* pointer = (const void*)SHIM_MEM_ADDR(value);
@ -224,11 +224,11 @@ SHIM_CALL vsprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else {
XEASSERT(false);
assert_true(false);
break;
}
format = end;
@ -386,7 +386,7 @@ SHIM_CALL _vsnprintf_shim(
local[0] = '\0';
strncat(local, start, end + 1 - start);
XEASSERT(arg_size == 8 || arg_size == 4);
assert_true(arg_size == 8 || arg_size == 4);
if (arg_size == 8) {
if (arg_extras == 0) {
uint64_t value = SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
@ -395,7 +395,7 @@ SHIM_CALL _vsnprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (arg_size == 4) {
@ -406,20 +406,20 @@ SHIM_CALL _vsnprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
}
else if (*end == 'n')
{
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
SHIM_SET_MEM_32(value, (uint32_t)((b - buffer) / sizeof(char)));
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (*end == 's' ||
@ -428,7 +428,7 @@ SHIM_CALL _vsnprintf_shim(
local[0] = '\0';
strncat(local, start, end + 1 - start);
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
const void* pointer = (const void*)SHIM_MEM_ADDR(value);
@ -437,11 +437,11 @@ SHIM_CALL _vsnprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else {
XEASSERT(false);
assert_true(false);
break;
}
format = end;
@ -612,7 +612,7 @@ SHIM_CALL _vswprintf_shim(
local[0] = '\0';
wcsncat(local, start, end + 1 - start);
XEASSERT(arg_size == 8 || arg_size == 4);
assert_true(arg_size == 8 || arg_size == 4);
if (arg_size == 8) {
if (arg_extras == 0) {
uint64_t value = SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
@ -621,7 +621,7 @@ SHIM_CALL _vswprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (arg_size == 4) {
@ -632,20 +632,20 @@ SHIM_CALL _vswprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
}
else if (*end == 'n')
{
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
SHIM_SET_MEM_32(value, (uint32_t)((b - buffer) / sizeof(wchar_t)));
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (*end == 'p') {
@ -653,7 +653,7 @@ SHIM_CALL _vswprintf_shim(
local[0] = '\0';
wcsncat(local, start, end + 1 - start);
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
const void* pointer = (void*)SHIM_MEM_ADDR(value);
@ -662,7 +662,7 @@ SHIM_CALL _vswprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else if (*end == 's') {
@ -670,7 +670,7 @@ SHIM_CALL _vswprintf_shim(
local[0] = '\0';
wcsncat(local, start, end + 1 - start);
XEASSERT(arg_size == 4);
assert_true(arg_size == 4);
if (arg_extras == 0) {
uint32_t value = (uint32_t)SHIM_MEM_64(arg_ptr + (arg_index * 8)); // TODO: check if this is correct...
const wchar_t* data = (const wchar_t*)SHIM_MEM_ADDR(value);
@ -687,11 +687,11 @@ SHIM_CALL _vswprintf_shim(
arg_index++;
}
else {
XEASSERT(false);
assert_true(false);
}
}
else {
XEASSERT(false);
assert_true(false);
break;
}
format = end;

View File

@ -69,7 +69,7 @@ X_STATUS xeExCreateThread(
uint32_t xapi_thread_startup,
uint32_t start_address, uint32_t start_context, uint32_t creation_flags) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// DWORD
// LPHANDLE Handle,
@ -158,7 +158,7 @@ SHIM_CALL ExTerminateThread_shim(
X_STATUS xeNtResumeThread(uint32_t handle, uint32_t* out_suspend_count) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
X_STATUS result = X_STATUS_SUCCESS;
@ -198,7 +198,7 @@ SHIM_CALL NtResumeThread_shim(
X_STATUS xeKeResumeThread(void* thread_ptr, uint32_t* out_suspend_count) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
X_STATUS result = X_STATUS_SUCCESS;
@ -236,7 +236,7 @@ SHIM_CALL KeResumeThread_shim(
X_STATUS xeNtSuspendThread(uint32_t handle, uint32_t* out_suspend_count) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
X_STATUS result = X_STATUS_SUCCESS;
@ -276,7 +276,7 @@ SHIM_CALL NtSuspendThread_shim(
uint32_t xeKeSetAffinityThread(void* thread_ptr, uint32_t affinity) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XThread* thread = (XThread*)XObject::GetObject(state, thread_ptr);
if (thread) {
@ -326,7 +326,7 @@ SHIM_CALL KeQueryBasePriorityThread_shim(
uint32_t xeKeSetBasePriorityThread(void* thread_ptr, int32_t increment) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
int32_t prev_priority = 0;
@ -358,7 +358,7 @@ SHIM_CALL KeSetBasePriorityThread_shim(
uint32_t xeKeGetCurrentProcessType() {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
// DWORD
@ -595,7 +595,7 @@ SHIM_CALL KeTlsSetValue_shim(
X_STATUS xeNtCreateEvent(uint32_t* handle_ptr, void* obj_attributes,
uint32_t event_type, uint32_t initial_state) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XEvent* ev = new XEvent(state);
ev->Initialize(!event_type, !!initial_state);
@ -638,10 +638,10 @@ SHIM_CALL NtCreateEvent_shim(
int32_t xeKeSetEvent(void* event_ptr, uint32_t increment, uint32_t wait) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XEvent* ev = (XEvent*)XObject::GetObject(state, event_ptr);
XEASSERTNOTNULL(ev);
assert_not_null(ev);
if (!ev) {
return 0;
}
@ -708,7 +708,7 @@ SHIM_CALL KePulseEvent_shim(
void* event_ptr = SHIM_MEM_ADDR(event_ref);
XEvent* ev = (XEvent*)XObject::GetObject(state, event_ptr);
XEASSERTNOTNULL(ev);
assert_not_null(ev);
if (ev) {
result = ev->Pulse(increment, !!wait);
}
@ -746,10 +746,10 @@ SHIM_CALL NtPulseEvent_shim(
int32_t xeKeResetEvent(void* event_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XEvent* ev = (XEvent*)XEvent::GetObject(state, event_ptr);
XEASSERTNOTNULL(ev);
assert_not_null(ev);
if (!ev) {
return 0;
}
@ -825,11 +825,11 @@ SHIM_CALL NtCreateSemaphore_shim(
void xeKeInitializeSemaphore(
void* semaphore_ptr, int32_t count, int32_t limit) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XSemaphore* sem = (XSemaphore*)XSemaphore::GetObject(
state, semaphore_ptr, 5 /* SemaphoreObject */);
XEASSERTNOTNULL(sem);
assert_not_null(sem);
if (!sem) {
return;
}
@ -856,10 +856,10 @@ SHIM_CALL KeInitializeSemaphore_shim(
int32_t xeKeReleaseSemaphore(
void* semaphore_ptr, int32_t increment, int32_t adjustment, bool wait) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XSemaphore* sem = (XSemaphore*)XSemaphore::GetObject(state, semaphore_ptr);
XEASSERTNOTNULL(sem);
assert_not_null(sem);
if (!sem) {
return 0;
}
@ -954,7 +954,7 @@ SHIM_CALL NtReleaseMutant_shim(
// Whatever arg 1 is all games seem to set it to 0, so whether it's
// abandon or wait we just say false. Which is good, cause they are
// both ignored.
XEASSERTZERO(unknown);
assert_zero(unknown);
uint32_t priority_increment = 0;
bool abandon = false;
bool wait = false;
@ -1016,8 +1016,8 @@ SHIM_CALL NtSetTimerEx_shim(
uint32_t period_ms = SHIM_GET_ARG_32(6);
uint32_t unk_zero = SHIM_GET_ARG_32(7);
XEASSERT(unk_one == 1);
XEASSERT(unk_zero == 0);
assert_true(unk_one == 1);
assert_true(unk_zero == 0);
uint64_t due_time = SHIM_MEM_64(due_time_ptr);
@ -1075,7 +1075,7 @@ X_STATUS xeKeWaitForSingleObject(
void* object_ptr, uint32_t wait_reason, uint32_t processor_mode,
uint32_t alertable, uint64_t* opt_timeout) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
XObject* object = XObject::GetObject(state, object_ptr);
if (!object) {
@ -1153,7 +1153,7 @@ SHIM_CALL KeWaitForMultipleObjects_shim(
count, objects_ptr, wait_type, wait_reason, processor_mode,
alertable, timeout_ptr, wait_block_array_ptr);
XEASSERT(wait_type >= 0 && wait_type <= 1);
assert_true(wait_type >= 0 && wait_type <= 1);
X_STATUS result = X_STATUS_SUCCESS;
@ -1192,7 +1192,7 @@ SHIM_CALL NtWaitForMultipleObjectsEx_shim(
count, handles_ptr, wait_type, wait_mode,
alertable, timeout_ptr);
XEASSERT(wait_type >= 0 && wait_type <= 1);
assert_true(wait_type >= 0 && wait_type <= 1);
X_STATUS result = X_STATUS_SUCCESS;

View File

@ -146,7 +146,7 @@ SHIM_CALL VdQueryVideoMode_shim(
void xeVdInitializeEngines(uint32_t unk0, uint32_t callback, uint32_t unk1,
uint32_t unk2_ptr, uint32_t unk3_ptr) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;
@ -188,7 +188,7 @@ SHIM_CALL VdShutdownEngines_shim(
void xeVdSetGraphicsInterruptCallback(uint32_t callback, uint32_t user_data) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;
@ -217,7 +217,7 @@ SHIM_CALL VdSetGraphicsInterruptCallback_shim(
void xeVdInitializeRingBuffer(uint32_t ptr, uint32_t page_count) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;
@ -249,7 +249,7 @@ SHIM_CALL VdInitializeRingBuffer_shim(
void xeVdEnableRingBufferRPtrWriteBack(uint32_t ptr, uint32_t block_size) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;
@ -317,7 +317,7 @@ SHIM_CALL VdGetSystemCommandBuffer_shim(
void xeVdSetSystemCommandBufferGpuIdentifierAddress(uint32_t unk) {
KernelState* state = shared_kernel_state_;
XEASSERTNOTNULL(state);
assert_not_null(state);
GraphicsSystem* gs = state->emulator()->graphics_system();
if (!gs) {
return;

View File

@ -36,7 +36,7 @@ typedef struct {
}
X_VIDEO_MODE;
#pragma pack(pop)
XEASSERTSTRUCTSIZE(X_VIDEO_MODE, 48);
static_assert_size(X_VIDEO_MODE, 48);
void xeVdGetCurrentDisplayGamma(uint32_t* arg0, float* arg1);
uint32_t xeVdQueryVideoFlags();

View File

@ -29,8 +29,8 @@ XObject::XObject(KernelState* kernel_state, Type type) :
}
XObject::~XObject() {
XEASSERTZERO(handle_ref_count_);
XEASSERTZERO(pointer_ref_count_);
assert_zero(handle_ref_count_);
assert_zero(pointer_ref_count_);
}
Memory* XObject::memory() const {
@ -62,7 +62,7 @@ void XObject::Retain() {
void XObject::Release() {
if (!xe_atomic_dec_32(&pointer_ref_count_)) {
XEASSERT(pointer_ref_count_ >= handle_ref_count_);
assert_true(pointer_ref_count_ >= handle_ref_count_);
delete this;
}
}
@ -75,7 +75,7 @@ uint32_t XObject::TimeoutTicksToMs(int64_t timeout_ticks) {
if (timeout_ticks > 0) {
// Absolute time, based on January 1, 1601.
// TODO(benvanik): convert time to relative time.
XEASSERTALWAYS();
assert_always();
return 0;
} else if (timeout_ticks < 0) {
// Relative time.
@ -135,7 +135,7 @@ X_STATUS XObject::WaitMultiple(
void** wait_handles = (void**)alloca(sizeof(void*) * count);
for (uint32_t n = 0; n < count; n++) {
wait_handles[n] = objects[n]->GetWaitHandle();
XEASSERTNOTNULL(wait_handles[n]);
assert_not_null(wait_handles[n]);
}
DWORD timeout_ms = opt_timeout ?
@ -167,7 +167,7 @@ void XObject::SetNativePointer(uint32_t native_ptr) {
header.wait_list_flink = XESWAP32(header_be->wait_list_flink);
header.wait_list_blink = XESWAP32(header_be->wait_list_blink);
XEASSERT(!(header.wait_list_blink & 0x1));
assert_true(!(header.wait_list_blink & 0x1));
// Stash pointer in struct.
uint64_t object_ptr = reinterpret_cast<uint64_t>(this);
@ -252,7 +252,7 @@ XObject* XObject::GetObject(KernelState* kernel_state, void* native_ptr,
case 23: // ProfileObject
case 24: // ThreadedDpcObject
default:
XEASSERTALWAYS();
assert_always();
XObject::UnlockType();
return NULL;
}

View File

@ -32,14 +32,14 @@ void xe_handle_fatal(
#if XE_OPTION_ENABLE_LOGGING
#define XELOGCORE(level, fmt, ...) xe_log_line( \
XE_CURRENT_FILE, XE_CURRENT_LINE, XE_CURRENT_FUNCTION, level, \
__FILE__, __LINE__, __FUNCTION__, level, \
fmt, ##__VA_ARGS__)
#else
#define XELOGCORE(level, fmt, ...) XE_EMPTY_MACRO
#endif // ENABLE_LOGGING
#define XEFATAL(fmt, ...) do { \
xe_handle_fatal(XE_CURRENT_FILE, XE_CURRENT_LINE, XE_CURRENT_FUNCTION, \
xe_handle_fatal(__FILE__, __LINE__, __FUNCTION__, \
fmt, ##__VA_ARGS__); \
} while (false);

View File

@ -55,7 +55,7 @@ namespace xe {
// Enters a CPU profiling scope by function name, active for the duration of
// the containing block. No previous definition required.
#define SCOPE_profile_cpu_f(group_name) \
MICROPROFILE_SCOPEI(group_name, XE_CURRENT_FUNCTION, xe::Profiler::GetColor(XE_CURRENT_FUNCTION))
MICROPROFILE_SCOPEI(group_name, __FUNCTION__, xe::Profiler::GetColor(__FUNCTION__))
// Enters a previously defined GPU profiling scope, active for the duration
// of the containing block.
@ -70,7 +70,7 @@ namespace xe {
// Enters a GPU profiling scope by function name, active for the duration of
// the containing block. No previous definition required.
#define SCOPE_profile_gpu_f(group_name) \
MICROPROFILE_SCOPEGPUI(group_name, XE_CURRENT_FUNCTION, xe::Profiler::GetColor(XE_CURRENT_FUNCTION))
MICROPROFILE_SCOPEGPUI(group_name, __FUNCTION__, xe::Profiler::GetColor(__FUNCTION__))
// Tracks a CPU value counter.
#define COUNT_profile_cpu(name, count) MICROPROFILE_META_CPU(name, count)

View File

@ -1,7 +1,6 @@
# Copyright 2013 Ben Vanik. All Rights Reserved.
{
'sources': [
'assert.h',
'atomic.h',
'byte_order.h',
'common.h',

View File

@ -100,7 +100,7 @@ bool Window::OnResize(uint32_t width, uint32_t height) {
}
void Window::EndResizing() {
XEASSERT(resizing_);
assert_true(resizing_);
resizing_ = false;
auto e = UIEvent(this);
resized(e);