diff --git a/Source/Core/AudioCommon/OpenSLESStream.cpp b/Source/Core/AudioCommon/OpenSLESStream.cpp index b7359418ee..6e1d210407 100644 --- a/Source/Core/AudioCommon/OpenSLESStream.cpp +++ b/Source/Core/AudioCommon/OpenSLESStream.cpp @@ -46,7 +46,7 @@ static void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void* context) // Comment from sample code: // the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT, // which for this code example would indicate a programming error - _assert_msg_(AUDIO, SL_RESULT_SUCCESS == result, "Couldn't enqueue audio stream."); + ASSERT_MSG(AUDIO, SL_RESULT_SUCCESS == result, "Couldn't enqueue audio stream."); } bool OpenSLESStream::Init() diff --git a/Source/Core/Common/Arm64Emitter.cpp b/Source/Core/Common/Arm64Emitter.cpp index fe8993b354..db48d5c16f 100644 --- a/Source/Core/Common/Arm64Emitter.cpp +++ b/Source/Core/Common/Arm64Emitter.cpp @@ -210,7 +210,7 @@ bool IsImmLogical(uint64_t value, unsigned int width, unsigned int* n, unsigned int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; // Ensure that the index to the multipliers array is within bounds. - _dbg_assert_(DYNA_REC, + DEBUG_ASSERT(DYNA_REC, (multiplier_idx >= 0) && (static_cast(multiplier_idx) < multipliers.size())); uint64_t multiplier = multipliers[multiplier_idx]; @@ -481,13 +481,13 @@ void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr bool b64Bit = Is64Bit(Rt); s64 distance = (s64)ptr - (s64)m_code; - _assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, - __FUNCTION__, distance); + ASSERT_MSG(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, + __FUNCTION__, distance); distance >>= 2; - _assert_msg_(DYNA_REC, distance >= -0x40000 && distance <= 0x3FFFF, - "%s: Received too large distance: %" PRIx64, __FUNCTION__, distance); + ASSERT_MSG(DYNA_REC, distance >= -0x40000 && distance <= 0x3FFFF, + "%s: Received too large distance: %" PRIx64, __FUNCTION__, distance); Rt = DecodeReg(Rt); Write32((b64Bit << 31) | (0x34 << 24) | (op << 24) | (((u32)distance << 5) & 0xFFFFE0) | Rt); @@ -498,13 +498,13 @@ void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const voi bool b64Bit = Is64Bit(Rt); s64 distance = (s64)ptr - (s64)m_code; - _assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, - __FUNCTION__, distance); + ASSERT_MSG(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, + __FUNCTION__, distance); distance >>= 2; - _assert_msg_(DYNA_REC, distance >= -0x3FFF && distance < 0x3FFF, - "%s: Received too large distance: %" PRIx64, __FUNCTION__, distance); + ASSERT_MSG(DYNA_REC, distance >= -0x3FFF && distance < 0x3FFF, + "%s: Received too large distance: %" PRIx64, __FUNCTION__, distance); Rt = DecodeReg(Rt); Write32((b64Bit << 31) | (0x36 << 24) | (op << 24) | (bits << 19) | @@ -515,13 +515,13 @@ void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr) { s64 distance = (s64)ptr - s64(m_code); - _assert_msg_(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, - __FUNCTION__, distance); + ASSERT_MSG(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, + __FUNCTION__, distance); distance >>= 2; - _assert_msg_(DYNA_REC, distance >= -0x2000000LL && distance <= 0x1FFFFFFLL, - "%s: Received too large distance: %" PRIx64, __FUNCTION__, distance); + ASSERT_MSG(DYNA_REC, distance >= -0x2000000LL && distance <= 0x1FFFFFFLL, + "%s: Received too large distance: %" PRIx64, __FUNCTION__, distance); Write32((op << 31) | (0x5 << 26) | (distance & 0x3FFFFFF)); } @@ -534,8 +534,8 @@ void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 opc, u32 op2, u32 op3, u32 void ARM64XEmitter::EncodeExceptionInst(u32 instenc, u32 imm) { - _assert_msg_(DYNA_REC, !(imm & ~0xFFFF), "%s: Exception instruction too large immediate: %d", - __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFFF), "%s: Exception instruction too large immediate: %d", + __FUNCTION__, imm); Write32((0xD4 << 24) | (ExcEnc[instenc][0] << 21) | (imm << 5) | (ExcEnc[instenc][1] << 2) | ExcEnc[instenc][2]); @@ -574,10 +574,10 @@ void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 n { bool b64Bit = Is64Bit(Rn); - _assert_msg_(DYNA_REC, !(imm & ~0x1F), "%s: too large immediate: %d", __FUNCTION__, imm) - _assert_msg_(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv) + ASSERT_MSG(DYNA_REC, !(imm & ~0x1F), "%s: too large immediate: %d", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv); - Rn = DecodeReg(Rn); + Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (imm << 16) | (cond << 12) | (1 << 11) | (Rn << 5) | nzcv); } @@ -587,9 +587,9 @@ void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u { bool b64Bit = Is64Bit(Rm); - _assert_msg_(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv) + ASSERT_MSG(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __FUNCTION__, nzcv); - Rm = DecodeReg(Rm); + Rm = DecodeReg(Rm); Rn = DecodeReg(Rn); Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (Rm << 16) | (cond << 12) | (Rn << 5) | nzcv); @@ -658,7 +658,7 @@ void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm) bool b64Bit = Is64Bit(Rt); bool bVec = IsVector(Rt); - _assert_msg_(DYNA_REC, !(imm & 0xFFFFF), "%s: offset too large %d", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & 0xFFFFF), "%s: offset too large %d", __FUNCTION__, imm); Rt = DecodeReg(Rt); if (b64Bit && bitop != 0x2) // LDRSW(0x2) uses 64bit reg, doesn't have 64bit bit set @@ -692,7 +692,7 @@ void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, else imm >>= 2; - _assert_msg_(DYNA_REC, !(imm & ~0xF), "%s: offset too large %d", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & ~0xF), "%s: offset too large %d", __FUNCTION__, imm); u32 opc = 0; if (b128Bit) @@ -715,7 +715,7 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM u32 offset = imm & 0x1FF; - _assert_msg_(DYNA_REC, !(imm < -256 || imm > 255), "%s: offset too large %d", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s: offset too large %d", __FUNCTION__, imm); Rt = DecodeReg(Rt); Rn = DecodeReg(Rn); @@ -735,10 +735,10 @@ void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, else if (size == 16) imm >>= 1; - _assert_msg_(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED): offset must be positive %d", __FUNCTION__, - imm); - _assert_msg_(DYNA_REC, !(imm & ~0xFFF), "%s(INDEX_UNSIGNED): offset too large %d", __FUNCTION__, - imm); + ASSERT_MSG(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED): offset must be positive %d", __FUNCTION__, + imm); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s(INDEX_UNSIGNED): offset too large %d", __FUNCTION__, + imm); Rt = DecodeReg(Rt); Rn = DecodeReg(Rn); @@ -749,7 +749,7 @@ void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount { bool b64Bit = Is64Bit(Rd); - _assert_msg_(DYNA_REC, !(imm & ~0xFFFF), "%s: immediate out of range: %d", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFFF), "%s: immediate out of range: %d", __FUNCTION__, imm); Rd = DecodeReg(Rd); Write32((b64Bit << 31) | (op << 29) | (0x25 << 23) | (pos << 21) | (imm << 5) | Rd); @@ -781,7 +781,7 @@ void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, { bool b64Bit = Is64Bit(Rd); - _assert_msg_(DYNA_REC, !(imm & ~0xFFF), "%s: immediate too large: %x", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s: immediate too large: %x", __FUNCTION__, imm); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -821,7 +821,7 @@ void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64R type_encode = 0b011; break; case INDEX_UNSIGNED: - _assert_msg_(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __FUNCTION__); break; } @@ -851,8 +851,8 @@ void ARM64XEmitter::EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm) void ARM64XEmitter::EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm) { - _assert_msg_(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", - __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", __FUNCTION__, + imm); Rt = DecodeReg(Rt); Rn = DecodeReg(Rn); @@ -903,42 +903,37 @@ void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch) Not = true; case 0: // CBZ { - _assert_msg_(DYNA_REC, IsInRangeImm19(distance), - "%s(%d): Received too large distance: %" PRIx64, __FUNCTION__, branch.type, - distance); + ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), "%s(%d): Received too large distance: %" PRIx64, + __FUNCTION__, branch.type, distance); bool b64Bit = Is64Bit(branch.reg); ARM64Reg reg = DecodeReg(branch.reg); inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (MaskImm19(distance) << 5) | reg; } break; case 2: // B (conditional) - _assert_msg_(DYNA_REC, IsInRangeImm19(distance), - "%s(%d): Received too large distance: %" PRIx64, __FUNCTION__, branch.type, - distance); + ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), "%s(%d): Received too large distance: %" PRIx64, + __FUNCTION__, branch.type, distance); inst = (0x2A << 25) | (MaskImm19(distance) << 5) | branch.cond; break; case 4: // TBNZ Not = true; case 3: // TBZ { - _assert_msg_(DYNA_REC, IsInRangeImm14(distance), - "%s(%d): Received too large distance: %" PRIx64, __FUNCTION__, branch.type, - distance); + ASSERT_MSG(DYNA_REC, IsInRangeImm14(distance), "%s(%d): Received too large distance: %" PRIx64, + __FUNCTION__, branch.type, distance); ARM64Reg reg = DecodeReg(branch.reg); inst = ((branch.bit & 0x20) << 26) | (0x1B << 25) | (Not << 24) | ((branch.bit & 0x1F) << 19) | (MaskImm14(distance) << 5) | reg; } break; case 5: // B (uncoditional) - _assert_msg_(DYNA_REC, IsInRangeImm26(distance), - "%s(%d): Received too large distance: %" PRIx64, __FUNCTION__, branch.type, - distance); + ASSERT_MSG(DYNA_REC, IsInRangeImm26(distance), "%s(%d): Received too large distance: %" PRIx64, + __FUNCTION__, branch.type, distance); inst = (0x5 << 26) | MaskImm26(distance); break; case 6: // BL (unconditional) - _assert_msg_(DYNA_REC, IsInRangeImm26(distance), - "%s(%d): Received too large distance: %" PRIx64, __FUNCTION__, branch.type, - distance); + ASSERT_MSG(DYNA_REC, IsInRangeImm26(distance), "%s(%d): Received too large distance: %" PRIx64, + __FUNCTION__, branch.type, distance); inst = (0x25 << 26) | MaskImm26(distance); break; } @@ -1026,9 +1021,9 @@ void ARM64XEmitter::B(CCFlags cond, const void* ptr) distance >>= 2; - _assert_msg_(DYNA_REC, IsInRangeImm19(distance), - "%s: Received too large distance: %p->%p %" PRIi64 " %" PRIx64, __FUNCTION__, m_code, - ptr, distance, distance); + ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), + "%s: Received too large distance: %p->%p %" PRIi64 " %" PRIx64, __FUNCTION__, m_code, + ptr, distance, distance); Write32((0x54 << 24) | (MaskImm19(distance) << 5) | cond); } @@ -1151,7 +1146,7 @@ void ARM64XEmitter::_MSR(PStateField field, u8 imm) op2 = 7; break; default: - _assert_msg_(DYNA_REC, false, "Invalid PStateField to do a imm move to"); + ASSERT_MSG(DYNA_REC, false, "Invalid PStateField to do a imm move to"); break; } EncodeSystemInst(0, op1, 4, imm, op2, WSP); @@ -1197,7 +1192,7 @@ static void GetSystemReg(PStateField field, int& o0, int& op1, int& CRn, int& CR op2 = 0; break; default: - _assert_msg_(DYNA_REC, false, "Invalid PStateField to do a register move from/to"); + ASSERT_MSG(DYNA_REC, false, "Invalid PStateField to do a register move from/to"); break; } } @@ -1205,7 +1200,7 @@ static void GetSystemReg(PStateField field, int& o0, int& op1, int& CRn, int& CR void ARM64XEmitter::_MSR(PStateField field, ARM64Reg Rt) { int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0; - _assert_msg_(DYNA_REC, Is64Bit(Rt), "MSR: Rt must be 64-bit"); + ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "MSR: Rt must be 64-bit"); GetSystemReg(field, o0, op1, CRn, CRm, op2); EncodeSystemInst(o0, op1, CRn, CRm, op2, DecodeReg(Rt)); } @@ -1213,14 +1208,14 @@ void ARM64XEmitter::_MSR(PStateField field, ARM64Reg Rt) void ARM64XEmitter::MRS(ARM64Reg Rt, PStateField field) { int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0; - _assert_msg_(DYNA_REC, Is64Bit(Rt), "MRS: Rt must be 64-bit"); + ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "MRS: Rt must be 64-bit"); GetSystemReg(field, o0, op1, CRn, CRm, op2); EncodeSystemInst(o0 | 4, op1, CRn, CRm, op2, DecodeReg(Rt)); } void ARM64XEmitter::CNTVCT(Arm64Gen::ARM64Reg Rt) { - _assert_msg_(DYNA_REC, Is64Bit(Rt), "CNTVCT: Rt must be 64-bit"); + ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "CNTVCT: Rt must be 64-bit"); // MRS , CNTVCT_EL0 ; Read CNTVCT_EL0 into Xt EncodeSystemInst(3 | 4, 3, 0xe, 0, 2, DecodeReg(Rt)); @@ -1542,7 +1537,7 @@ void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm) if (IsGPR(Rd) && IsGPR(Rm)) ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0)); else - _assert_msg_(DYNA_REC, false, "Non-GPRs not supported in MOV"); + ASSERT_MSG(DYNA_REC, false, "Non-GPRs not supported in MOV"); } void ARM64XEmitter::MVN(ARM64Reg Rd, ARM64Reg Rm) { @@ -1643,17 +1638,17 @@ void ARM64XEmitter::UBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) void ARM64XEmitter::BFI(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width) { u32 size = Is64Bit(Rn) ? 64 : 32; - _assert_msg_(DYNA_REC, (lsb + width) <= size, - "%s passed lsb %d and width %d which is greater than the register size!", - __FUNCTION__, lsb, width); + ASSERT_MSG(DYNA_REC, (lsb + width) <= size, + "%s passed lsb %d and width %d which is greater than the register size!", __FUNCTION__, + lsb, width); EncodeBitfieldMOVInst(1, Rd, Rn, (size - lsb) % size, width - 1); } void ARM64XEmitter::UBFIZ(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width) { u32 size = Is64Bit(Rn) ? 64 : 32; - _assert_msg_(DYNA_REC, (lsb + width) <= size, - "%s passed lsb %d and width %d which is greater than the register size!", - __FUNCTION__, lsb, width); + ASSERT_MSG(DYNA_REC, (lsb + width) <= size, + "%s passed lsb %d and width %d which is greater than the register size!", __FUNCTION__, + lsb, width); EncodeBitfieldMOVInst(2, Rd, Rn, (size - lsb) % size, width - 1); } void ARM64XEmitter::EXTR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u32 shift) @@ -1675,7 +1670,7 @@ void ARM64XEmitter::SXTH(ARM64Reg Rd, ARM64Reg Rn) } void ARM64XEmitter::SXTW(ARM64Reg Rd, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, Is64Bit(Rd), "%s requires 64bit register as destination", __FUNCTION__); + ASSERT_MSG(DYNA_REC, Is64Bit(Rd), "%s requires 64bit register as destination", __FUNCTION__); SBFM(Rd, Rn, 0, 31); } void ARM64XEmitter::UXTB(ARM64Reg Rd, ARM64Reg Rn) @@ -1966,7 +1961,7 @@ void ARM64XEmitter::LDUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm) } void ARM64XEmitter::LDURSW(ARM64Reg Rt, ARM64Reg Rn, s32 imm) { - _assert_msg_(DYNA_REC, !Is64Bit(Rt), "%s must have a 64bit destination register!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !Is64Bit(Rt), "%s must have a 64bit destination register!", __FUNCTION__); EncodeLoadStoreUnscaled(2, 2, Rt, Rn, imm); } @@ -2117,7 +2112,7 @@ void ARM64XEmitter::ABI_PushRegisters(BitSet32 registers) for (int i = 0; i < (num_regs - 1) / 2; i++) STP(INDEX_SIGNED, (ARM64Reg)(X0 + *it++), (ARM64Reg)(X0 + *it++), SP, 16 * (i + 1)); - _assert_msg_(DYNA_REC, it == registers.end(), "%s registers don't match.", __FUNCTION__); + ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __FUNCTION__); } void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask) @@ -2148,7 +2143,7 @@ void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask) else LDP(INDEX_POST, first, second, SP, stack_size); - _assert_msg_(DYNA_REC, it == registers.end(), "%s registers don't match.", __FUNCTION__); + ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __FUNCTION__); } // Float Emitter @@ -2173,11 +2168,11 @@ void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, if (type == INDEX_UNSIGNED) { - _assert_msg_(DYNA_REC, !(imm & ((size - 1) >> 3)), - "%s(INDEX_UNSIGNED) immediate offset must be aligned to size! (%d) (%p)", - __FUNCTION__, imm, m_emit->GetCodePtr()); - _assert_msg_(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED) immediate offset must be positive!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(imm & ((size - 1) >> 3)), + "%s(INDEX_UNSIGNED) immediate offset must be aligned to size! (%d) (%p)", + __FUNCTION__, imm, m_emit->GetCodePtr()); + ASSERT_MSG(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED) immediate offset must be positive!", + __FUNCTION__); if (size == 16) imm >>= 1; else if (size == 32) @@ -2190,8 +2185,8 @@ void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, } else { - _assert_msg_(DYNA_REC, !(imm < -256 || imm > 255), - "%s immediate offset must be within range of -256 to 256!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), + "%s immediate offset must be within range of -256 to 256!", __FUNCTION__); encoded_imm = (imm & 0x1FF) << 2; if (type == INDEX_POST) encoded_imm |= 1; @@ -2206,8 +2201,7 @@ void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, void ARM64FloatEmitter::EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !IsQuad(Rd), "%s only supports double and single registers!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s only supports double and single registers!", __FUNCTION__); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); Rm = DecodeReg(Rm); @@ -2219,7 +2213,7 @@ void ARM64FloatEmitter::EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, void ARM64FloatEmitter::EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__); bool quad = IsQuad(Rd); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -2240,7 +2234,7 @@ void ARM64FloatEmitter::EmitCopy(bool Q, u32 op, u32 imm5, u32 imm4, ARM64Reg Rd void ARM64FloatEmitter::Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -2251,7 +2245,7 @@ void ARM64FloatEmitter::Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64 void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __FUNCTION__); bool quad = IsQuad(Rt); Rt = DecodeReg(Rt); Rn = DecodeReg(Rn); @@ -2263,7 +2257,7 @@ void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __FUNCTION__); bool quad = IsQuad(Rt); Rt = DecodeReg(Rt); Rn = DecodeReg(Rn); @@ -2275,7 +2269,7 @@ void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, void ARM64FloatEmitter::Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -2286,7 +2280,7 @@ void ARM64FloatEmitter::Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64R void ARM64FloatEmitter::EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, Rn <= SP, "%s only supports GPR as source!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, Rn <= SP, "%s only supports GPR as source!", __FUNCTION__); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -2297,7 +2291,7 @@ void ARM64FloatEmitter::EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round, bool sign) { - _dbg_assert_msg_(DYNA_REC, IsScalar(Rn), "fcvts: Rn must be floating point"); + DEBUG_ASSERT_MSG(DYNA_REC, IsScalar(Rn), "fcvts: Rn must be floating point"); if (IsGPR(Rd)) { // Use the encoding that transfers the result to a GPR. @@ -2382,7 +2376,7 @@ void ARM64FloatEmitter::EmitConversion2(bool sf, bool S, bool direction, u32 typ void ARM64FloatEmitter::EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !IsQuad(Rn), "%s doesn't support vector!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsQuad(Rn), "%s doesn't support vector!", __FUNCTION__); bool is_double = IsDouble(Rn); Rn = DecodeReg(Rn); @@ -2395,7 +2389,7 @@ void ARM64FloatEmitter::EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Re void ARM64FloatEmitter::EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); bool is_double = IsDouble(Rd); Rd = DecodeReg(Rd); @@ -2408,7 +2402,7 @@ void ARM64FloatEmitter::EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd void ARM64FloatEmitter::EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __FUNCTION__); bool quad = IsQuad(Rd); @@ -2430,7 +2424,7 @@ void ARM64FloatEmitter::EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, void ARM64FloatEmitter::EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64Reg Rd, u32 imm8) { - _assert_msg_(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); bool is_double = !IsSingle(Rd); @@ -2443,7 +2437,7 @@ void ARM64FloatEmitter::EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64R void ARM64FloatEmitter::EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, immh, "%s bad encoding! Can't have zero immh", __FUNCTION__); + ASSERT_MSG(DYNA_REC, immh, "%s bad encoding! Can't have zero immh", __FUNCTION__); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -2506,7 +2500,7 @@ void ARM64FloatEmitter::EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 void ARM64FloatEmitter::EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __FUNCTION__); Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -2530,8 +2524,8 @@ void ARM64FloatEmitter::EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, void ARM64FloatEmitter::EmitLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm) { - _assert_msg_(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", - __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", __FUNCTION__, + imm); Rt = DecodeReg(Rt); Rn = DecodeReg(Rn); @@ -2556,25 +2550,25 @@ void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type, type_encode = 0b011; break; case INDEX_UNSIGNED: - _assert_msg_(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __FUNCTION__); break; } if (size == 128) { - _assert_msg_(DYNA_REC, !(imm & 0xF), "%s received invalid offset 0x%x!", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & 0xF), "%s received invalid offset 0x%x!", __FUNCTION__, imm); opc = 2; imm >>= 4; } else if (size == 64) { - _assert_msg_(DYNA_REC, !(imm & 0x7), "%s received invalid offset 0x%x!", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & 0x7), "%s received invalid offset 0x%x!", __FUNCTION__, imm); opc = 1; imm >>= 3; } else if (size == 32) { - _assert_msg_(DYNA_REC, !(imm & 0x3), "%s received invalid offset 0x%x!", __FUNCTION__, imm); + ASSERT_MSG(DYNA_REC, !(imm & 0x3), "%s received invalid offset 0x%x!", __FUNCTION__, imm); opc = 0; imm >>= 2; } @@ -2590,8 +2584,8 @@ void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type, void ARM64FloatEmitter::EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) { - _assert_msg_(DYNA_REC, Rm.GetType() == ArithOption::TYPE_EXTENDEDREG, - "%s must contain an extended reg as Rm!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, Rm.GetType() == ArithOption::TYPE_EXTENDEDREG, + "%s must contain an extended reg as Rm!", __FUNCTION__); u32 encoded_size = 0; u32 encoded_op = 0; @@ -2952,8 +2946,8 @@ void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Re // Loadstore multiple structure void ARM64FloatEmitter::LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __FUNCTION__); u32 opcode = 0; if (count == 1) opcode = 0b111; @@ -2968,9 +2962,9 @@ void ARM64FloatEmitter::LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn) void ARM64FloatEmitter::LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", - __FUNCTION__); - _assert_msg_(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __FUNCTION__); + ASSERT_MSG(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __FUNCTION__); u32 opcode = 0; if (count == 1) @@ -2985,8 +2979,8 @@ void ARM64FloatEmitter::LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM6 } void ARM64FloatEmitter::ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn) { - _assert_msg_(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __FUNCTION__); u32 opcode = 0; if (count == 1) opcode = 0b111; @@ -3001,9 +2995,9 @@ void ARM64FloatEmitter::ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn) void ARM64FloatEmitter::ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) { - _assert_msg_(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", - __FUNCTION__); - _assert_msg_(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __FUNCTION__); + ASSERT_MSG(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __FUNCTION__); u32 opcode = 0; if (count == 1) @@ -3026,7 +3020,7 @@ void ARM64FloatEmitter::FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top) } else { - _assert_msg_(DYNA_REC, !IsQuad(Rd) && !IsQuad(Rn), "FMOV can't move to/from quads"); + ASSERT_MSG(DYNA_REC, !IsQuad(Rd) && !IsQuad(Rn), "FMOV can't move to/from quads"); int rmode = 0; int opcode = 6; int sf = 0; @@ -3042,7 +3036,7 @@ void ARM64FloatEmitter::FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top) else { // TODO - _assert_msg_(DYNA_REC, 0, "FMOV: Unhandled case"); + ASSERT_MSG(DYNA_REC, 0, "FMOV: Unhandled case"); } Rd = DecodeReg(Rd); Rn = DecodeReg(Rn); @@ -3405,9 +3399,9 @@ void ARM64FloatEmitter::INS(u8 size, ARM64Reg Rd, u8 index1, ARM64Reg Rn, u8 ind void ARM64FloatEmitter::UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) { bool b64Bit = Is64Bit(Rd); - _assert_msg_(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __FUNCTION__); - _assert_msg_(DYNA_REC, !(b64Bit && size != 64), - "%s must have a size of 64 when destination is 64bit!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(b64Bit && size != 64), + "%s must have a size of 64 when destination is 64bit!", __FUNCTION__); u32 imm5 = 0; if (size == 8) @@ -3436,9 +3430,8 @@ void ARM64FloatEmitter::UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) void ARM64FloatEmitter::SMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) { bool b64Bit = Is64Bit(Rd); - _assert_msg_(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __FUNCTION__); - _assert_msg_(DYNA_REC, size != 64, "%s doesn't support 64bit destination. Use UMOV!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, size != 64, "%s doesn't support 64bit destination. Use UMOV!", __FUNCTION__); u32 imm5 = 0; if (size == 8) @@ -3670,8 +3663,8 @@ void ARM64FloatEmitter::UXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) void ARM64FloatEmitter::SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper) { - _assert_msg_(DYNA_REC, shift < src_size, "%s shift amount must less than the element size!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift < src_size, "%s shift amount must less than the element size!", + __FUNCTION__); u32 immh = 0; u32 immb = shift & 0xFFF; @@ -3693,8 +3686,8 @@ void ARM64FloatEmitter::SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, void ARM64FloatEmitter::USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper) { - _assert_msg_(DYNA_REC, shift < src_size, "%s shift amount must less than the element size!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift < src_size, "%s shift amount must less than the element size!", + __FUNCTION__); u32 immh = 0; u32 immb = shift & 0xFFF; @@ -3716,8 +3709,8 @@ void ARM64FloatEmitter::USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, void ARM64FloatEmitter::SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper) { - _assert_msg_(DYNA_REC, shift < dest_size, "%s shift amount must less than the element size!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift < dest_size, "%s shift amount must less than the element size!", + __FUNCTION__); u32 immh = 0; u32 immb = shift & 0xFFF; @@ -3750,8 +3743,8 @@ void ARM64FloatEmitter::UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper) // vector x indexed element void ARM64FloatEmitter::FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index) { - _assert_msg_(DYNA_REC, size == 32 || size == 64, "%s only supports 32bit or 64bit size!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, size == 32 || size == 64, "%s only supports 32bit or 64bit size!", + __FUNCTION__); bool L = false; bool H = false; @@ -3770,8 +3763,8 @@ void ARM64FloatEmitter::FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 void ARM64FloatEmitter::FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index) { - _assert_msg_(DYNA_REC, size == 32 || size == 64, "%s only supports 32bit or 64bit size!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, size == 32 || size == 64, "%s only supports 32bit or 64bit size!", + __FUNCTION__); bool L = false; bool H = false; @@ -3797,26 +3790,24 @@ void ARM64FloatEmitter::MOVI(u8 size, ARM64Reg Rd, u64 imm, u8 shift) u8 abcdefgh = imm & 0xFF; if (size == 8) { - _assert_msg_(DYNA_REC, shift == 0, "%s(size8) doesn't support shift!", __FUNCTION__); - _assert_msg_(DYNA_REC, !(imm & ~0xFFULL), "%s(size8) only supports 8bit values!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift == 0, "%s(size8) doesn't support shift!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFULL), "%s(size8) only supports 8bit values!", __FUNCTION__); } else if (size == 16) { - _assert_msg_(DYNA_REC, shift == 0 || shift == 8, "%s(size16) only supports shift of {0, 8}!", - __FUNCTION__); - _assert_msg_(DYNA_REC, !(imm & ~0xFFULL), "%s(size16) only supports 8bit values!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8, "%s(size16) only supports shift of {0, 8}!", + __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFULL), "%s(size16) only supports 8bit values!", __FUNCTION__); if (shift == 8) cmode |= 2; } else if (size == 32) { - _assert_msg_(DYNA_REC, shift == 0 || shift == 8 || shift == 16 || shift == 24, - "%s(size32) only supports shift of {0, 8, 16, 24}!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8 || shift == 16 || shift == 24, + "%s(size32) only supports shift of {0, 8, 16, 24}!", __FUNCTION__); // XXX: Implement support for MOVI - shifting ones variant - _assert_msg_(DYNA_REC, !(imm & ~0xFFULL), "%s(size32) only supports 8bit values!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFULL), "%s(size32) only supports 8bit values!", __FUNCTION__); switch (shift) { case 8: @@ -3834,7 +3825,7 @@ void ARM64FloatEmitter::MOVI(u8 size, ARM64Reg Rd, u64 imm, u8 shift) } else // 64 { - _assert_msg_(DYNA_REC, shift == 0, "%s(size64) doesn't support shift!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift == 0, "%s(size64) doesn't support shift!", __FUNCTION__); op = 1; cmode = 0xE; @@ -3842,8 +3833,7 @@ void ARM64FloatEmitter::MOVI(u8 size, ARM64Reg Rd, u64 imm, u8 shift) for (int i = 0; i < 8; ++i) { u8 tmp = (imm >> (i << 3)) & 0xFF; - _assert_msg_(DYNA_REC, tmp == 0xFF || tmp == 0, "%s(size64) Invalid immediate!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, tmp == 0xFF || tmp == 0, "%s(size64) Invalid immediate!", __FUNCTION__); if (tmp == 0xFF) abcdefgh |= (1 << i); } @@ -3858,16 +3848,16 @@ void ARM64FloatEmitter::BIC(u8 size, ARM64Reg Rd, u8 imm, u8 shift) u8 op = 1; if (size == 16) { - _assert_msg_(DYNA_REC, shift == 0 || shift == 8, "%s(size16) only supports shift of {0, 8}!", - __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8, "%s(size16) only supports shift of {0, 8}!", + __FUNCTION__); if (shift == 8) cmode |= 2; } else if (size == 32) { - _assert_msg_(DYNA_REC, shift == 0 || shift == 8 || shift == 16 || shift == 24, - "%s(size32) only supports shift of {0, 8, 16, 24}!", __FUNCTION__); + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8 || shift == 16 || shift == 24, + "%s(size32) only supports shift of {0, 8, 16, 24}!", __FUNCTION__); // XXX: Implement support for MOVI - shifting ones variant switch (shift) { @@ -3885,7 +3875,9 @@ void ARM64FloatEmitter::BIC(u8 size, ARM64Reg Rd, u8 imm, u8 shift) } } else - _assert_msg_(DYNA_REC, false, "%s only supports size of {16, 32}!", __FUNCTION__); + { + ASSERT_MSG(DYNA_REC, false, "%s only supports size of {16, 32}!", __FUNCTION__); + } EncodeModImm(Q, op, cmode, 0, Rd, imm); } @@ -4065,9 +4057,9 @@ void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - _assert_msg_(DYNA_REC, scratch != INVALID_REG, - "ANDI2R - failed to construct logical immediate value from %08x, need scratch", - (u32)imm); + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "ANDI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); MOVI2R(scratch, imm); AND(Rd, Rn, scratch); } @@ -4082,9 +4074,9 @@ void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - _assert_msg_(DYNA_REC, scratch != INVALID_REG, - "ORRI2R - failed to construct logical immediate value from %08x, need scratch", - (u32)imm); + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "ORRI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); MOVI2R(scratch, imm); ORR(Rd, Rn, scratch); } @@ -4099,9 +4091,9 @@ void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - _assert_msg_(DYNA_REC, scratch != INVALID_REG, - "EORI2R - failed to construct logical immediate value from %08x, need scratch", - (u32)imm); + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "EORI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); MOVI2R(scratch, imm); EOR(Rd, Rn, scratch); } @@ -4116,9 +4108,9 @@ void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) } else { - _assert_msg_(DYNA_REC, scratch != INVALID_REG, - "ANDSI2R - failed to construct logical immediate value from %08x, need scratch", - (u32)imm); + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "ANDSI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); MOVI2R(scratch, imm); ANDS(Rd, Rn, scratch); } @@ -4190,9 +4182,9 @@ void ARM64XEmitter::ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool nega return; } - _assert_msg_(DYNA_REC, has_scratch, - "ADDI2R - failed to construct arithmetic immediate value from %08x, need scratch", - (u32)imm); + ASSERT_MSG(DYNA_REC, has_scratch, + "ADDI2R - failed to construct arithmetic immediate value from %08x, need scratch", + (u32)imm); negative ^= MOVI2R2(scratch, imm, imm_neg); switch ((negative << 1) | flags) @@ -4338,7 +4330,7 @@ bool FPImm8FromFloat(float value, uint8_t* immOut) void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool negate) { - _assert_msg_(DYNA_REC, !IsDouble(Rd), "MOVI2F does not yet support double precision"); + ASSERT_MSG(DYNA_REC, !IsDouble(Rd), "MOVI2F does not yet support double precision"); uint8_t imm8; if (value == 0.0) { @@ -4354,8 +4346,8 @@ void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool } else { - _assert_msg_(DYNA_REC, scratch != INVALID_REG, - "Failed to find a way to generate FP immediate %f without scratch", value); + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "Failed to find a way to generate FP immediate %f without scratch", value); u32 ival; if (negate) value = -value; diff --git a/Source/Core/Common/Arm64Emitter.h b/Source/Core/Common/Arm64Emitter.h index 2f49795519..8e9ceaa842 100644 --- a/Source/Core/Common/Arm64Emitter.h +++ b/Source/Core/Common/Arm64Emitter.h @@ -490,7 +490,7 @@ public: return (m_shifttype << 22) | (m_shift << 10); break; default: - _dbg_assert_msg_(DYNA_REC, false, "Invalid type in GetData"); + DEBUG_ASSERT_MSG(DYNA_REC, false, "Invalid type in GetData"); break; } return 0; @@ -846,7 +846,7 @@ public: template void MOVP2R(ARM64Reg Rd, P* ptr) { - _assert_msg_(DYNA_REC, Is64Bit(Rd), "Can't store pointers in 32-bit registers"); + ASSERT_MSG(DYNA_REC, Is64Bit(Rd), "Can't store pointers in 32-bit registers"); MOVI2R(Rd, (uintptr_t)ptr); } diff --git a/Source/Core/Common/Assert.h b/Source/Core/Common/Assert.h index aecfbdd6d1..4ea2b50352 100644 --- a/Source/Core/Common/Assert.h +++ b/Source/Core/Common/Assert.h @@ -10,14 +10,14 @@ #include "Common/MsgHandler.h" #ifdef _WIN32 -#define _assert_msg_(_t_, _a_, _fmt_, ...) \ +#define ASSERT_MSG(_t_, _a_, _fmt_, ...) \ if (!(_a_)) \ { \ if (!PanicYesNo(_fmt_ "\n\nIgnore and continue?", __VA_ARGS__)) \ Crash(); \ } -#define _dbg_assert_msg_(_t_, _a_, _msg_, ...) \ +#define DEBUG_ASSERT_MSG(_t_, _a_, _msg_, ...) \ if (MAX_LOGLEVEL >= LogTypes::LOG_LEVELS::LDEBUG && !(_a_)) \ { \ ERROR_LOG(_t_, _msg_, __VA_ARGS__); \ @@ -25,14 +25,14 @@ Crash(); \ } #else -#define _assert_msg_(_t_, _a_, _fmt_, ...) \ +#define ASSERT_MSG(_t_, _a_, _fmt_, ...) \ if (!(_a_)) \ { \ if (!PanicYesNo(_fmt_, ##__VA_ARGS__)) \ Crash(); \ } -#define _dbg_assert_msg_(_t_, _a_, _msg_, ...) \ +#define DEBUG_ASSERT_MSG(_t_, _a_, _msg_, ...) \ if (MAX_LOGLEVEL >= LogTypes::LOG_LEVELS::LDEBUG && !(_a_)) \ { \ ERROR_LOG(_t_, _msg_, ##__VA_ARGS__); \ @@ -41,11 +41,11 @@ } #endif -#define _assert_(_a_) \ - _assert_msg_(MASTER_LOG, _a_, \ - _trans("An error occurred.\n\n Line: %d\n File: %s\n\nIgnore and continue?"), \ - __LINE__, __FILE__) +#define ASSERT(_a_) \ + ASSERT_MSG(MASTER_LOG, _a_, \ + _trans("An error occurred.\n\n Line: %d\n File: %s\n\nIgnore and continue?"), \ + __LINE__, __FILE__) -#define _dbg_assert_(_t_, _a_) \ +#define DEBUG_ASSERT(_t_, _a_) \ if (MAX_LOGLEVEL >= LogTypes::LOG_LEVELS::LDEBUG) \ - _assert_(_a_) + ASSERT(_a_) diff --git a/Source/Core/Common/ChunkFile.h b/Source/Core/Common/ChunkFile.h index ea4bb2cd54..80247849ab 100644 --- a/Source/Core/Common/ChunkFile.h +++ b/Source/Core/Common/ChunkFile.h @@ -285,7 +285,7 @@ private: break; case MODE_VERIFY: - _dbg_assert_msg_(COMMON, !memcmp(data, *ptr, size), + DEBUG_ASSERT_MSG(COMMON, !memcmp(data, *ptr, size), "Savestate verification failure: buf %p != %p (size %u).\n", data, *ptr, size); break; diff --git a/Source/Core/Common/CodeBlock.h b/Source/Core/Common/CodeBlock.h index 9cbe3ffc5b..97a2c06ad0 100644 --- a/Source/Core/Common/CodeBlock.h +++ b/Source/Core/Common/CodeBlock.h @@ -67,7 +67,7 @@ public: // Call this when shutting down. Don't rely on the destructor, even though it'll do the job. void FreeCodeSpace() { - _assert_(!m_is_child); + ASSERT(!m_is_child); Common::FreeMemoryPages(region, total_region_size); region = nullptr; region_size = 0; @@ -87,7 +87,7 @@ public: void ResetCodePtr() { T::SetCodePtr(region); } size_t GetSpaceLeft() const { - _assert_(static_cast(T::GetCodePtr() - region) < region_size); + ASSERT(static_cast(T::GetCodePtr() - region) < region_size); return region_size - (T::GetCodePtr() - region); } @@ -100,7 +100,7 @@ public: bool HasChildren() const { return region_size != total_region_size; } u8* AllocChildCodeSpace(size_t child_size) { - _assert_msg_(DYNA_REG, child_size < GetSpaceLeft(), "Insufficient space for child allocation."); + ASSERT_MSG(DYNA_REG, child_size < GetSpaceLeft(), "Insufficient space for child allocation."); u8* child_region = region + region_size - child_size; region_size -= child_size; return child_region; diff --git a/Source/Core/Common/FileUtil.cpp b/Source/Core/Common/FileUtil.cpp index e798826cf5..b86b9f9dd4 100644 --- a/Source/Core/Common/FileUtil.cpp +++ b/Source/Core/Common/FileUtil.cpp @@ -715,7 +715,7 @@ std::string GetSysDirectory() sysDir = GetExeDirectory() + DIR_SEP + SYSDATA_DIR; #elif defined ANDROID sysDir = s_android_sys_directory; - _assert_msg_(COMMON, !sysDir.empty(), "Sys directory has not been set"); + ASSERT_MSG(COMMON, !sysDir.empty(), "Sys directory has not been set"); #else sysDir = SYSDATA_DIR; #endif diff --git a/Source/Core/Common/SysConf.cpp b/Source/Core/Common/SysConf.cpp index 34f406bc6b..22e902d042 100644 --- a/Source/Core/Common/SysConf.cpp +++ b/Source/Core/Common/SysConf.cpp @@ -31,7 +31,7 @@ static size_t GetNonArrayEntrySize(SysConf::Entry::Type type) case SysConf::Entry::Type::LongLong: return 8; default: - _assert_(false); + ASSERT(false); return 0; } } diff --git a/Source/Core/Common/SysConf.h b/Source/Core/Common/SysConf.h index 3306a6445b..92ff1c6718 100644 --- a/Source/Core/Common/SysConf.h +++ b/Source/Core/Common/SysConf.h @@ -55,7 +55,7 @@ public: template void SetData(T value) { - _assert_(sizeof(value) == bytes.size()); + ASSERT(sizeof(value) == bytes.size()); std::memcpy(bytes.data(), &value, bytes.size()); } diff --git a/Source/Core/Common/x64Emitter.cpp b/Source/Core/Common/x64Emitter.cpp index 7db8beac51..2654ca0cdc 100644 --- a/Source/Core/Common/x64Emitter.cpp +++ b/Source/Core/Common/x64Emitter.cpp @@ -122,8 +122,8 @@ void XEmitter::ReserveCodeSpace(int bytes) const u8* XEmitter::AlignCodeTo(size_t alignment) { - _assert_msg_(DYNA_REC, alignment != 0 && (alignment & (alignment - 1)) == 0, - "Alignment must be power of two"); + ASSERT_MSG(DYNA_REC, alignment != 0 && (alignment & (alignment - 1)) == 0, + "Alignment must be power of two"); u64 c = reinterpret_cast(code) & (alignment - 1); if (c) ReserveCodeSpace(static_cast(alignment - c)); @@ -150,7 +150,7 @@ const u8* XEmitter::AlignCodePage() // causing a subtle JIT bug. void XEmitter::CheckFlags() { - _assert_msg_(DYNA_REC, !flags_locked, "Attempt to modify flags while flags locked!"); + ASSERT_MSG(DYNA_REC, !flags_locked, "Attempt to modify flags while flags locked!"); } void XEmitter::WriteModRM(int mod, int reg, int rm) @@ -187,8 +187,8 @@ void OpArg::WriteREX(XEmitter* emit, int opBits, int bits, int customOp) const { emit->Write8(op); // Check the operation doesn't access AH, BH, CH, or DH. - _dbg_assert_(DYNA_REC, (offsetOrBaseReg & 0x100) == 0); - _dbg_assert_(DYNA_REC, (customOp & 0x100) == 0); + DEBUG_ASSERT(DYNA_REC, (offsetOrBaseReg & 0x100) == 0); + DEBUG_ASSERT(DYNA_REC, (customOp & 0x100) == 0); } } @@ -236,9 +236,9 @@ void OpArg::WriteRest(XEmitter* emit, int extraBytes, X64Reg _operandReg, // TODO : add some checks u64 ripAddr = (u64)emit->GetCodePtr() + 4 + extraBytes; s64 distance = (s64)offset - (s64)ripAddr; - _assert_msg_(DYNA_REC, - (distance < 0x80000000LL && distance >= -0x80000000LL) || !warn_64bit_offset, - "WriteRest: op out of range (0x%" PRIx64 " uses 0x%" PRIx64 ")", ripAddr, offset); + ASSERT_MSG(DYNA_REC, + (distance < 0x80000000LL && distance >= -0x80000000LL) || !warn_64bit_offset, + "WriteRest: op out of range (0x%" PRIx64 " uses 0x%" PRIx64 ")", ripAddr, offset); s32 offs = (s32)distance; emit->Write32((u32)offs); return; @@ -351,7 +351,7 @@ void OpArg::WriteRest(XEmitter* emit, int extraBytes, X64Reg _operandReg, ss = 0; break; default: - _assert_msg_(DYNA_REC, 0, "Invalid scale for SIB byte"); + ASSERT_MSG(DYNA_REC, 0, "Invalid scale for SIB byte"); ss = 0; break; } @@ -389,8 +389,8 @@ void XEmitter::JMP(const u8* addr, bool force5Bytes) if (!force5Bytes) { s64 distance = (s64)(fn - ((u64)code + 2)); - _assert_msg_(DYNA_REC, distance >= -0x80 && distance < 0x80, - "Jump target too far away, needs force5Bytes = true"); + ASSERT_MSG(DYNA_REC, distance >= -0x80 && distance < 0x80, + "Jump target too far away, needs force5Bytes = true"); // 8 bits will do Write8(0xEB); Write8((u8)(s8)distance); @@ -399,8 +399,8 @@ void XEmitter::JMP(const u8* addr, bool force5Bytes) { s64 distance = (s64)(fn - ((u64)code + 5)); - _assert_msg_(DYNA_REC, distance >= -0x80000000LL && distance < 0x80000000LL, - "Jump target too far away, needs indirect register"); + ASSERT_MSG(DYNA_REC, distance >= -0x80000000LL && distance < 0x80000000LL, + "Jump target too far away, needs indirect register"); Write8(0xE9); Write32((u32)(s32)distance); } @@ -410,7 +410,7 @@ void XEmitter::JMPptr(const OpArg& arg2) { OpArg arg = arg2; if (arg.IsImm()) - _assert_msg_(DYNA_REC, 0, "JMPptr - Imm argument"); + ASSERT_MSG(DYNA_REC, 0, "JMPptr - Imm argument"); arg.operandReg = 4; arg.WriteREX(this, 0, 0); Write8(0xFF); @@ -428,7 +428,7 @@ void XEmitter::JMPself() void XEmitter::CALLptr(OpArg arg) { if (arg.IsImm()) - _assert_msg_(DYNA_REC, 0, "CALLptr - Imm argument"); + ASSERT_MSG(DYNA_REC, 0, "CALLptr - Imm argument"); arg.operandReg = 2; arg.WriteREX(this, 0, 0); Write8(0xFF); @@ -438,8 +438,8 @@ void XEmitter::CALLptr(OpArg arg) void XEmitter::CALL(const void* fnptr) { u64 distance = u64(fnptr) - (u64(code) + 5); - _assert_msg_(DYNA_REC, distance < 0x0000000080000000ULL || distance >= 0xFFFFFFFF80000000ULL, - "CALL out of range (%p calls %p)", code, fnptr); + ASSERT_MSG(DYNA_REC, distance < 0x0000000080000000ULL || distance >= 0xFFFFFFFF80000000ULL, + "CALL out of range (%p calls %p)", code, fnptr); Write8(0xE8); Write32(u32(distance)); } @@ -500,8 +500,8 @@ void XEmitter::J_CC(CCFlags conditionCode, const u8* addr) if (distance < -0x80 || distance >= 0x80) { distance = (s64)(fn - ((u64)code + 6)); - _assert_msg_(DYNA_REC, distance >= -0x80000000LL && distance < 0x80000000LL, - "Jump target too far away, needs indirect register"); + ASSERT_MSG(DYNA_REC, distance >= -0x80000000LL && distance < 0x80000000LL, + "Jump target too far away, needs indirect register"); Write8(0x0F); Write8(0x80 + conditionCode); Write32((u32)(s32)distance); @@ -518,15 +518,15 @@ void XEmitter::SetJumpTarget(const FixupBranch& branch) if (branch.type == 0) { s64 distance = (s64)(code - branch.ptr); - _assert_msg_(DYNA_REC, distance >= -0x80 && distance < 0x80, - "Jump target too far away, needs force5Bytes = true"); + ASSERT_MSG(DYNA_REC, distance >= -0x80 && distance < 0x80, + "Jump target too far away, needs force5Bytes = true"); branch.ptr[-1] = (u8)(s8)distance; } else if (branch.type == 1) { s64 distance = (s64)(code - branch.ptr); - _assert_msg_(DYNA_REC, distance >= -0x80000000LL && distance < 0x80000000LL, - "Jump target too far away, needs indirect register"); + ASSERT_MSG(DYNA_REC, distance >= -0x80000000LL && distance < 0x80000000LL, + "Jump target too far away, needs indirect register"); s32 valid_distance = static_cast(distance); std::memcpy(&branch.ptr[-4], &valid_distance, sizeof(s32)); @@ -553,7 +553,7 @@ void XEmitter::RET_FAST() // The first sign of decadence: optimized NOPs. void XEmitter::NOP(size_t size) { - _dbg_assert_(DYNA_REC, (int)size > 0); + DEBUG_ASSERT(DYNA_REC, (int)size > 0); while (true) { switch (size) @@ -792,7 +792,7 @@ void XEmitter::PUSH(int bits, const OpArg& reg) Write32((u32)reg.offset); break; default: - _assert_msg_(DYNA_REC, 0, "PUSH - Bad imm bits"); + ASSERT_MSG(DYNA_REC, 0, "PUSH - Bad imm bits"); break; } } @@ -811,7 +811,7 @@ void XEmitter::POP(int /*bits*/, const OpArg& reg) if (reg.IsSimpleReg()) POP(reg.GetSimpleReg()); else - _assert_msg_(DYNA_REC, 0, "POP - Unsupported encoding"); + ASSERT_MSG(DYNA_REC, 0, "POP - Unsupported encoding"); } void XEmitter::BSWAP(int bits, X64Reg reg) @@ -830,7 +830,7 @@ void XEmitter::BSWAP(int bits, X64Reg reg) } else { - _assert_msg_(DYNA_REC, 0, "BSWAP - Wrong number of bits"); + ASSERT_MSG(DYNA_REC, 0, "BSWAP - Wrong number of bits"); } } @@ -844,7 +844,7 @@ void XEmitter::UD2() void XEmitter::PREFETCH(PrefetchLevel level, OpArg arg) { - _assert_msg_(DYNA_REC, !arg.IsImm(), "PREFETCH - Imm argument"); + ASSERT_MSG(DYNA_REC, !arg.IsImm(), "PREFETCH - Imm argument"); arg.operandReg = (u8)level; arg.WriteREX(this, 0, 0); Write8(0x0F); @@ -854,7 +854,7 @@ void XEmitter::PREFETCH(PrefetchLevel level, OpArg arg) void XEmitter::SETcc(CCFlags flag, OpArg dest) { - _assert_msg_(DYNA_REC, !dest.IsImm(), "SETcc - Imm argument"); + ASSERT_MSG(DYNA_REC, !dest.IsImm(), "SETcc - Imm argument"); dest.operandReg = 0; dest.WriteREX(this, 0, 8); Write8(0x0F); @@ -864,8 +864,8 @@ void XEmitter::SETcc(CCFlags flag, OpArg dest) void XEmitter::CMOVcc(int bits, X64Reg dest, OpArg src, CCFlags flag) { - _assert_msg_(DYNA_REC, !src.IsImm(), "CMOVcc - Imm argument"); - _assert_msg_(DYNA_REC, bits != 8, "CMOVcc - 8 bits unsupported"); + ASSERT_MSG(DYNA_REC, !src.IsImm(), "CMOVcc - Imm argument"); + ASSERT_MSG(DYNA_REC, bits != 8, "CMOVcc - 8 bits unsupported"); if (bits == 16) Write8(0x66); src.operandReg = dest; @@ -877,7 +877,7 @@ void XEmitter::CMOVcc(int bits, X64Reg dest, OpArg src, CCFlags flag) void XEmitter::WriteMulDivType(int bits, OpArg src, int ext) { - _assert_msg_(DYNA_REC, !src.IsImm(), "WriteMulDivType - Imm argument"); + ASSERT_MSG(DYNA_REC, !src.IsImm(), "WriteMulDivType - Imm argument"); CheckFlags(); src.operandReg = ext; if (bits == 16) @@ -921,7 +921,7 @@ void XEmitter::NOT(int bits, const OpArg& src) void XEmitter::WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2, bool rep) { - _assert_msg_(DYNA_REC, !src.IsImm(), "WriteBitSearchType - Imm argument"); + ASSERT_MSG(DYNA_REC, !src.IsImm(), "WriteBitSearchType - Imm argument"); CheckFlags(); src.operandReg = (u8)dest; if (bits == 16) @@ -937,7 +937,7 @@ void XEmitter::WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2, bo void XEmitter::MOVNTI(int bits, const OpArg& dest, X64Reg src) { if (bits <= 16) - _assert_msg_(DYNA_REC, 0, "MOVNTI - bits<=16"); + ASSERT_MSG(DYNA_REC, 0, "MOVNTI - bits<=16"); WriteBitSearchType(bits, src, dest, 0xC3); } @@ -967,7 +967,7 @@ void XEmitter::LZCNT(int bits, X64Reg dest, const OpArg& src) void XEmitter::MOVSX(int dbits, int sbits, X64Reg dest, OpArg src) { - _assert_msg_(DYNA_REC, !src.IsImm(), "MOVSX - Imm argument"); + ASSERT_MSG(DYNA_REC, !src.IsImm(), "MOVSX - Imm argument"); if (dbits == sbits) { MOV(dbits, R(dest), src); @@ -1000,7 +1000,7 @@ void XEmitter::MOVSX(int dbits, int sbits, X64Reg dest, OpArg src) void XEmitter::MOVZX(int dbits, int sbits, X64Reg dest, OpArg src) { - _assert_msg_(DYNA_REC, !src.IsImm(), "MOVZX - Imm argument"); + ASSERT_MSG(DYNA_REC, !src.IsImm(), "MOVZX - Imm argument"); if (dbits == sbits) { MOV(dbits, R(dest), src); @@ -1027,14 +1027,14 @@ void XEmitter::MOVZX(int dbits, int sbits, X64Reg dest, OpArg src) } else { - _assert_msg_(DYNA_REC, 0, "MOVZX - Invalid size"); + ASSERT_MSG(DYNA_REC, 0, "MOVZX - Invalid size"); } src.WriteRest(this); } void XEmitter::WriteMOVBE(int bits, u8 op, X64Reg reg, const OpArg& arg) { - _assert_msg_(DYNA_REC, cpu_info.bMOVBE, "Generating MOVBE on a system that does not support it."); + ASSERT_MSG(DYNA_REC, cpu_info.bMOVBE, "Generating MOVBE on a system that does not support it."); if (bits == 8) { MOV(8, op & 1 ? arg : R(reg), op & 1 ? R(reg) : arg); @@ -1042,7 +1042,7 @@ void XEmitter::WriteMOVBE(int bits, u8 op, X64Reg reg, const OpArg& arg) } if (bits == 16) Write8(0x66); - _assert_msg_(DYNA_REC, !arg.IsSimpleReg() && !arg.IsImm(), "MOVBE: need r<-m or m<-r!"); + ASSERT_MSG(DYNA_REC, !arg.IsSimpleReg() && !arg.IsImm(), "MOVBE: need r<-m or m<-r!"); arg.WriteREX(this, bits, bits, reg); Write8(0x0F); Write8(0x38); @@ -1127,7 +1127,7 @@ void XEmitter::SwapAndStore(int size, const OpArg& dst, X64Reg src, MovInfo* inf void XEmitter::LEA(int bits, X64Reg dest, OpArg src) { - _assert_msg_(DYNA_REC, !src.IsImm(), "LEA - Imm argument"); + ASSERT_MSG(DYNA_REC, !src.IsImm(), "LEA - Imm argument"); src.operandReg = (u8)dest; if (bits == 16) Write8(0x66); // TODO: performance warning @@ -1143,12 +1143,12 @@ void XEmitter::WriteShift(int bits, OpArg dest, const OpArg& shift, int ext) bool writeImm = false; if (dest.IsImm()) { - _assert_msg_(DYNA_REC, 0, "WriteShift - can't shift imms"); + ASSERT_MSG(DYNA_REC, 0, "WriteShift - can't shift imms"); } if ((shift.IsSimpleReg() && shift.GetSimpleReg() != ECX) || (shift.IsImm() && shift.GetImmBits() != 8)) { - _assert_msg_(DYNA_REC, 0, "WriteShift - illegal argument"); + ASSERT_MSG(DYNA_REC, 0, "WriteShift - illegal argument"); } dest.operandReg = ext; if (bits == 16) @@ -1214,11 +1214,11 @@ void XEmitter::WriteBitTest(int bits, const OpArg& dest, const OpArg& index, int CheckFlags(); if (dest.IsImm()) { - _assert_msg_(DYNA_REC, 0, "WriteBitTest - can't test imms"); + ASSERT_MSG(DYNA_REC, 0, "WriteBitTest - can't test imms"); } if ((index.IsImm() && index.GetImmBits() != 8)) { - _assert_msg_(DYNA_REC, 0, "WriteBitTest - illegal argument"); + ASSERT_MSG(DYNA_REC, 0, "WriteBitTest - illegal argument"); } if (bits == 16) Write8(0x66); @@ -1263,16 +1263,16 @@ void XEmitter::SHRD(int bits, const OpArg& dest, const OpArg& src, const OpArg& CheckFlags(); if (dest.IsImm()) { - _assert_msg_(DYNA_REC, 0, "SHRD - can't use imms as destination"); + ASSERT_MSG(DYNA_REC, 0, "SHRD - can't use imms as destination"); } if (!src.IsSimpleReg()) { - _assert_msg_(DYNA_REC, 0, "SHRD - must use simple register as source"); + ASSERT_MSG(DYNA_REC, 0, "SHRD - must use simple register as source"); } if ((shift.IsSimpleReg() && shift.GetSimpleReg() != ECX) || (shift.IsImm() && shift.GetImmBits() != 8)) { - _assert_msg_(DYNA_REC, 0, "SHRD - illegal shift"); + ASSERT_MSG(DYNA_REC, 0, "SHRD - illegal shift"); } if (bits == 16) Write8(0x66); @@ -1298,16 +1298,16 @@ void XEmitter::SHLD(int bits, const OpArg& dest, const OpArg& src, const OpArg& CheckFlags(); if (dest.IsImm()) { - _assert_msg_(DYNA_REC, 0, "SHLD - can't use imms as destination"); + ASSERT_MSG(DYNA_REC, 0, "SHLD - can't use imms as destination"); } if (!src.IsSimpleReg()) { - _assert_msg_(DYNA_REC, 0, "SHLD - must use simple register as source"); + ASSERT_MSG(DYNA_REC, 0, "SHLD - must use simple register as source"); } if ((shift.IsSimpleReg() && shift.GetSimpleReg() != ECX) || (shift.IsImm() && shift.GetImmBits() != 8)) { - _assert_msg_(DYNA_REC, 0, "SHLD - illegal shift"); + ASSERT_MSG(DYNA_REC, 0, "SHLD - illegal shift"); } if (bits == 16) Write8(0x66); @@ -1346,7 +1346,7 @@ void OpArg::WriteNormalOp(XEmitter* emit, bool toRM, NormalOp op, const OpArg& o X64Reg _operandReg; if (IsImm()) { - _assert_msg_(DYNA_REC, 0, "WriteNormalOp - Imm argument, wrong order"); + ASSERT_MSG(DYNA_REC, 0, "WriteNormalOp - Imm argument, wrong order"); } if (bits == 16) @@ -1360,7 +1360,7 @@ void OpArg::WriteNormalOp(XEmitter* emit, bool toRM, NormalOp op, const OpArg& o if (!toRM) { - _assert_msg_(DYNA_REC, 0, "WriteNormalOp - Writing to Imm (!toRM)"); + ASSERT_MSG(DYNA_REC, 0, "WriteNormalOp - Writing to Imm (!toRM)"); } if (operand.scale == SCALE_IMM8 && bits == 8) @@ -1436,8 +1436,8 @@ void OpArg::WriteNormalOp(XEmitter* emit, bool toRM, NormalOp op, const OpArg& o { if (scale) { - _assert_msg_(DYNA_REC, 0, - "WriteNormalOp - MOV with 64-bit imm requres register destination"); + ASSERT_MSG(DYNA_REC, 0, + "WriteNormalOp - MOV with 64-bit imm requires register destination"); } // mov reg64, imm64 else if (op == nrmMOV) @@ -1446,11 +1446,11 @@ void OpArg::WriteNormalOp(XEmitter* emit, bool toRM, NormalOp op, const OpArg& o emit->Write64((u64)operand.offset); return; } - _assert_msg_(DYNA_REC, 0, "WriteNormalOp - Only MOV can take 64-bit imm"); + ASSERT_MSG(DYNA_REC, 0, "WriteNormalOp - Only MOV can take 64-bit imm"); } else { - _assert_msg_(DYNA_REC, 0, "WriteNormalOp - Unhandled case %d %d", operand.scale, bits); + ASSERT_MSG(DYNA_REC, 0, "WriteNormalOp - Unhandled case %d %d", operand.scale, bits); } _operandReg = (X64Reg)normalops[op].ext; // pass extension in REG of ModRM } @@ -1484,7 +1484,7 @@ void OpArg::WriteNormalOp(XEmitter* emit, bool toRM, NormalOp op, const OpArg& o emit->Write32((u32)operand.offset); break; default: - _assert_msg_(DYNA_REC, 0, "WriteNormalOp - Unhandled case"); + ASSERT_MSG(DYNA_REC, 0, "WriteNormalOp - Unhandled case"); } } @@ -1493,7 +1493,7 @@ void XEmitter::WriteNormalOp(int bits, NormalOp op, const OpArg& a1, const OpArg if (a1.IsImm()) { // Booh! Can't write to an imm - _assert_msg_(DYNA_REC, 0, "WriteNormalOp - a1 cannot be imm"); + ASSERT_MSG(DYNA_REC, 0, "WriteNormalOp - a1 cannot be imm"); return; } if (a2.IsImm()) @@ -1508,8 +1508,8 @@ void XEmitter::WriteNormalOp(int bits, NormalOp op, const OpArg& a1, const OpArg } else { - _assert_msg_(DYNA_REC, a2.IsSimpleReg() || a2.IsImm(), - "WriteNormalOp - a1 and a2 cannot both be memory"); + ASSERT_MSG(DYNA_REC, a2.IsSimpleReg() || a2.IsImm(), + "WriteNormalOp - a1 and a2 cannot both be memory"); a1.WriteNormalOp(this, true, op, a2, bits); } } @@ -1587,7 +1587,7 @@ void XEmitter::CMP_or_TEST(int bits, const OpArg& a1, const OpArg& a2) void XEmitter::MOV_sum(int bits, X64Reg dest, const OpArg& a1, const OpArg& a2) { // This stomps on flags, so ensure they aren't locked - _dbg_assert_(DYNA_REC, !flags_locked); + DEBUG_ASSERT(DYNA_REC, !flags_locked); // Zero shortcuts (note that this can generate no code in the case where a1 == dest && a2 == zero // or a2 == dest && a1 == zero) @@ -1659,19 +1659,19 @@ void XEmitter::IMUL(int bits, X64Reg regOp, const OpArg& a1, const OpArg& a2) CheckFlags(); if (bits == 8) { - _assert_msg_(DYNA_REC, 0, "IMUL - illegal bit size!"); + ASSERT_MSG(DYNA_REC, 0, "IMUL - illegal bit size!"); return; } if (a1.IsImm()) { - _assert_msg_(DYNA_REC, 0, "IMUL - second arg cannot be imm!"); + ASSERT_MSG(DYNA_REC, 0, "IMUL - second arg cannot be imm!"); return; } if (!a2.IsImm()) { - _assert_msg_(DYNA_REC, 0, "IMUL - third arg must be imm!"); + ASSERT_MSG(DYNA_REC, 0, "IMUL - third arg must be imm!"); return; } @@ -1701,7 +1701,7 @@ void XEmitter::IMUL(int bits, X64Reg regOp, const OpArg& a1, const OpArg& a2) } else { - _assert_msg_(DYNA_REC, 0, "IMUL - unhandled case!"); + ASSERT_MSG(DYNA_REC, 0, "IMUL - unhandled case!"); } } } @@ -1711,7 +1711,7 @@ void XEmitter::IMUL(int bits, X64Reg regOp, const OpArg& a) CheckFlags(); if (bits == 8) { - _assert_msg_(DYNA_REC, 0, "IMUL - illegal bit size!"); + ASSERT_MSG(DYNA_REC, 0, "IMUL - illegal bit size!"); return; } @@ -1890,7 +1890,7 @@ void XEmitter::MOVQ_xmm(OpArg arg, X64Reg src) void XEmitter::WriteMXCSR(OpArg arg, int ext) { if (arg.IsImm() || arg.IsSimpleReg()) - _assert_msg_(DYNA_REC, 0, "MXCSR - invalid operand"); + ASSERT_MSG(DYNA_REC, 0, "MXCSR - invalid operand"); arg.operandReg = ext; arg.WriteREX(this, 0, 0); @@ -3248,8 +3248,8 @@ void XEmitter::FWAIT() void XEmitter::WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, const OpArg& arg) { int mf = 0; - _assert_msg_(DYNA_REC, !(bits == 80 && op_80b == floatINVALID), - "WriteFloatLoadStore: 80 bits not supported for this instruction"); + ASSERT_MSG(DYNA_REC, !(bits == 80 && op_80b == floatINVALID), + "WriteFloatLoadStore: 80 bits not supported for this instruction"); switch (bits) { case 32: @@ -3262,7 +3262,7 @@ void XEmitter::WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, const O mf = 2; break; default: - _assert_msg_(DYNA_REC, 0, "WriteFloatLoadStore: invalid bits (should be 32/64/80)"); + ASSERT_MSG(DYNA_REC, 0, "WriteFloatLoadStore: invalid bits (should be 32/64/80)"); } Write8(0xd9 | mf); // x87 instructions use the reg field of the ModR/M byte as opcode: diff --git a/Source/Core/Common/x64Emitter.h b/Source/Core/Common/x64Emitter.h index 1879d6b16c..bfaac81e17 100644 --- a/Source/Core/Common/x64Emitter.h +++ b/Source/Core/Common/x64Emitter.h @@ -156,64 +156,64 @@ struct OpArg u64 Imm64() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM64); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM64); return (u64)offset; } u32 Imm32() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM32); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM32); return (u32)offset; } u16 Imm16() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM16); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM16); return (u16)offset; } u8 Imm8() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM8); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM8); return (u8)offset; } s64 SImm64() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM64); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM64); return (s64)offset; } s32 SImm32() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM32); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM32); return (s32)offset; } s16 SImm16() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM16); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM16); return (s16)offset; } s8 SImm8() const { - _dbg_assert_(DYNA_REC, scale == SCALE_IMM8); + DEBUG_ASSERT(DYNA_REC, scale == SCALE_IMM8); return (s8)offset; } OpArg AsImm64() const { - _dbg_assert_(DYNA_REC, IsImm()); + DEBUG_ASSERT(DYNA_REC, IsImm()); return OpArg((u64)offset, SCALE_IMM64); } OpArg AsImm32() const { - _dbg_assert_(DYNA_REC, IsImm()); + DEBUG_ASSERT(DYNA_REC, IsImm()); return OpArg((u32)offset, SCALE_IMM32); } OpArg AsImm16() const { - _dbg_assert_(DYNA_REC, IsImm()); + DEBUG_ASSERT(DYNA_REC, IsImm()); return OpArg((u16)offset, SCALE_IMM16); } OpArg AsImm8() const { - _dbg_assert_(DYNA_REC, IsImm()); + DEBUG_ASSERT(DYNA_REC, IsImm()); return OpArg((u8)offset, SCALE_IMM8); } @@ -253,7 +253,7 @@ struct OpArg void AddMemOffset(int val) { - _dbg_assert_msg_(DYNA_REC, scale == SCALE_RIP || (scale <= SCALE_ATREG && scale > SCALE_NONE), + DEBUG_ASSERT_MSG(DYNA_REC, scale == SCALE_RIP || (scale <= SCALE_ATREG && scale > SCALE_NONE), "Tried to increment an OpArg which doesn't have an offset"); offset += val; } @@ -329,7 +329,7 @@ inline u32 PtrOffset(const void* ptr, const void* base = nullptr) s64 distance = (s64)ptr - (s64)base; if (distance >= 0x80000000LL || distance < -0x80000000LL) { - _assert_msg_(DYNA_REC, 0, "pointer offset out of range"); + ASSERT_MSG(DYNA_REC, 0, "pointer offset out of range"); return 0; } diff --git a/Source/Core/Core/ConfigManager.cpp b/Source/Core/Core/ConfigManager.cpp index 220f9e0a0b..a59dc8a126 100644 --- a/Source/Core/Core/ConfigManager.cpp +++ b/Source/Core/Core/ConfigManager.cpp @@ -843,7 +843,7 @@ const char* SConfig::GetDirectoryForRegion(DiscIO::Region region) return EUR_DIR; case DiscIO::Region::NTSC_K: - _assert_msg_(BOOT, false, "NTSC-K is not a valid GameCube region"); + ASSERT_MSG(BOOT, false, "NTSC-K is not a valid GameCube region"); return nullptr; default: diff --git a/Source/Core/Core/CoreTiming.cpp b/Source/Core/Core/CoreTiming.cpp index 2b76fbf7f2..c462789b67 100644 --- a/Source/Core/Core/CoreTiming.cpp +++ b/Source/Core/Core/CoreTiming.cpp @@ -104,10 +104,10 @@ EventType* RegisterEvent(const std::string& name, TimedCallback callback) { // check for existing type with same name. // we want event type names to remain unique so that we can use them for serialization. - _assert_msg_(POWERPC, s_event_types.find(name) == s_event_types.end(), - "CoreTiming Event \"%s\" is already registered. Events should only be registered " - "during Init to avoid breaking save states.", - name.c_str()); + ASSERT_MSG(POWERPC, s_event_types.find(name) == s_event_types.end(), + "CoreTiming Event \"%s\" is already registered. Events should only be registered " + "during Init to avoid breaking save states.", + name.c_str()); auto info = s_event_types.emplace(name, EventType{callback, nullptr}); EventType* event_type = &info.first->second; @@ -117,7 +117,7 @@ EventType* RegisterEvent(const std::string& name, TimedCallback callback) void UnregisterAllEvents() { - _assert_msg_(POWERPC, s_event_queue.empty(), "Cannot unregister events with events pending"); + ASSERT_MSG(POWERPC, s_event_queue.empty(), "Cannot unregister events with events pending"); s_event_types.clear(); } @@ -230,7 +230,7 @@ void ClearPendingEvents() void ScheduleEvent(s64 cycles_into_future, EventType* event_type, u64 userdata, FromThread from) { - _assert_msg_(POWERPC, event_type, "Event type is nullptr, will crash now."); + ASSERT_MSG(POWERPC, event_type, "Event type is nullptr, will crash now."); bool from_cpu_thread; if (from == FromThread::ANY) @@ -240,9 +240,9 @@ void ScheduleEvent(s64 cycles_into_future, EventType* event_type, u64 userdata, else { from_cpu_thread = from == FromThread::CPU; - _assert_msg_(POWERPC, from_cpu_thread == Core::IsCPUThread(), - "A \"%s\" event was scheduled from the wrong thread (%s)", - event_type->name->c_str(), from_cpu_thread ? "CPU" : "non-CPU"); + ASSERT_MSG(POWERPC, from_cpu_thread == Core::IsCPUThread(), + "A \"%s\" event was scheduled from the wrong thread (%s)", event_type->name->c_str(), + from_cpu_thread ? "CPU" : "non-CPU"); } if (from_cpu_thread) diff --git a/Source/Core/Core/DSP/DSPCore.cpp b/Source/Core/Core/DSP/DSPCore.cpp index 2a0727ff41..7b0a4a2244 100644 --- a/Source/Core/Core/DSP/DSPCore.cpp +++ b/Source/Core/Core/DSP/DSPCore.cpp @@ -373,7 +373,7 @@ u16 DSPCore_ReadRegister(size_t reg) case DSP_REG_ACM1: return g_dsp.r.ac[reg - DSP_REG_ACM0].m; default: - _assert_msg_(DSP_CORE, 0, "cannot happen"); + ASSERT_MSG(DSP_CORE, 0, "cannot happen"); return 0; } } diff --git a/Source/Core/Core/DSP/Interpreter/DSPIntUtil.h b/Source/Core/Core/DSP/Interpreter/DSPIntUtil.h index 2462e61e62..3791001309 100644 --- a/Source/Core/Core/DSP/Interpreter/DSPIntUtil.h +++ b/Source/Core/Core/DSP/Interpreter/DSPIntUtil.h @@ -161,7 +161,7 @@ static inline u16 dsp_op_read_reg(int _reg) case DSP_REG_ACM1: return g_dsp.r.ac[reg - DSP_REG_ACM0].m; default: - _assert_msg_(DSP_INT, 0, "cannot happen"); + ASSERT_MSG(DSP_INT, 0, "cannot happen"); return 0; } } diff --git a/Source/Core/Core/DSP/Jit/x64/DSPEmitter.cpp b/Source/Core/Core/DSP/Jit/x64/DSPEmitter.cpp index 642b229bbd..b795007675 100644 --- a/Source/Core/Core/DSP/Jit/x64/DSPEmitter.cpp +++ b/Source/Core/Core/DSP/Jit/x64/DSPEmitter.cpp @@ -140,7 +140,7 @@ void DSPEmitter::FallBackToInterpreter(UDSPInstruction inst) // Fall back to interpreter m_gpr.PushRegs(); - _assert_msg_(DSPLLE, op_template->intFunc, "No function for %04x", inst); + ASSERT_MSG(DSPLLE, op_template->intFunc, "No function for %04x", inst); ABI_CallFunctionC16(op_template->intFunc, inst); m_gpr.PopRegs(); } diff --git a/Source/Core/Core/DSP/Jit/x64/DSPJitCCUtil.cpp b/Source/Core/Core/DSP/Jit/x64/DSPJitCCUtil.cpp index c8d8522d0b..9cae4e2fd2 100644 --- a/Source/Core/Core/DSP/Jit/x64/DSPJitCCUtil.cpp +++ b/Source/Core/Core/DSP/Jit/x64/DSPJitCCUtil.cpp @@ -15,7 +15,7 @@ namespace DSP::JIT::x64 // Clobbers scratch void DSPEmitter::Update_SR_Register(Gen::X64Reg val, Gen::X64Reg scratch) { - _assert_(val != scratch); + ASSERT(val != scratch); const OpArg sr_reg = m_gpr.GetReg(DSP_REG_SR); // // 0x04 diff --git a/Source/Core/Core/DSP/Jit/x64/DSPJitRegCache.cpp b/Source/Core/Core/DSP/Jit/x64/DSPJitRegCache.cpp index 936f7ed41d..979b924733 100644 --- a/Source/Core/Core/DSP/Jit/x64/DSPJitRegCache.cpp +++ b/Source/Core/Core/DSP/Jit/x64/DSPJitRegCache.cpp @@ -83,7 +83,7 @@ static Gen::OpArg GetRegisterPointer(size_t reg) case DSP_REG_PROD_64: return MDisp(R15, static_cast(offsetof(SDSP, r.prod.val))); default: - _assert_msg_(DSPLLE, 0, "cannot happen"); + ASSERT_MSG(DSPLLE, 0, "cannot happen"); return M(static_cast(nullptr)); } } @@ -191,8 +191,8 @@ DSPJitRegCache::DSPJitRegCache(const DSPJitRegCache& cache) DSPJitRegCache& DSPJitRegCache::operator=(const DSPJitRegCache& cache) { - _assert_msg_(DSPLLE, &m_emitter == &cache.m_emitter, "emitter does not match"); - _assert_msg_(DSPLLE, m_is_temporary, "register cache not temporary??"); + ASSERT_MSG(DSPLLE, &m_emitter == &cache.m_emitter, "emitter does not match"); + ASSERT_MSG(DSPLLE, m_is_temporary, "register cache not temporary??"); m_is_merged = false; m_xregs = cache.m_xregs; @@ -203,7 +203,7 @@ DSPJitRegCache& DSPJitRegCache::operator=(const DSPJitRegCache& cache) DSPJitRegCache::~DSPJitRegCache() { - _assert_msg_(DSPLLE, !m_is_temporary || m_is_merged, "temporary cache not merged"); + ASSERT_MSG(DSPLLE, !m_is_temporary || m_is_merged, "temporary cache not merged"); } void DSPJitRegCache::Drop() @@ -292,22 +292,22 @@ void DSPJitRegCache::FlushRegs(DSPJitRegCache& cache, bool emit) // consistency checks for (size_t i = 0; i < m_xregs.size(); i++) { - _assert_msg_(DSPLLE, m_xregs[i].guest_reg == cache.m_xregs[i].guest_reg, - "cache and current xreg guest_reg mismatch for %u", static_cast(i)); + ASSERT_MSG(DSPLLE, m_xregs[i].guest_reg == cache.m_xregs[i].guest_reg, + "cache and current xreg guest_reg mismatch for %u", static_cast(i)); } for (size_t i = 0; i < m_regs.size(); i++) { - _assert_msg_(DSPLLE, m_regs[i].loc.IsImm() == cache.m_regs[i].loc.IsImm(), - "cache and current reg loc mismatch for %i", static_cast(i)); - _assert_msg_(DSPLLE, m_regs[i].loc.GetSimpleReg() == cache.m_regs[i].loc.GetSimpleReg(), - "cache and current reg loc mismatch for %i", static_cast(i)); - _assert_msg_(DSPLLE, m_regs[i].dirty || !cache.m_regs[i].dirty, - "cache and current reg dirty mismatch for %i", static_cast(i)); - _assert_msg_(DSPLLE, m_regs[i].used == cache.m_regs[i].used, - "cache and current reg used mismatch for %i", static_cast(i)); - _assert_msg_(DSPLLE, m_regs[i].shift == cache.m_regs[i].shift, - "cache and current reg shift mismatch for %i", static_cast(i)); + ASSERT_MSG(DSPLLE, m_regs[i].loc.IsImm() == cache.m_regs[i].loc.IsImm(), + "cache and current reg loc mismatch for %i", static_cast(i)); + ASSERT_MSG(DSPLLE, m_regs[i].loc.GetSimpleReg() == cache.m_regs[i].loc.GetSimpleReg(), + "cache and current reg loc mismatch for %i", static_cast(i)); + ASSERT_MSG(DSPLLE, m_regs[i].dirty || !cache.m_regs[i].dirty, + "cache and current reg dirty mismatch for %i", static_cast(i)); + ASSERT_MSG(DSPLLE, m_regs[i].used == cache.m_regs[i].used, + "cache and current reg used mismatch for %i", static_cast(i)); + ASSERT_MSG(DSPLLE, m_regs[i].shift == cache.m_regs[i].shift, + "cache and current reg shift mismatch for %i", static_cast(i)); } m_use_ctr = cache.m_use_ctr; @@ -321,7 +321,7 @@ void DSPJitRegCache::FlushMemBackedRegs() for (size_t i = 0; i < m_regs.size(); i++) { - _assert_msg_(DSPLLE, !m_regs[i].used, "register %u still in use", static_cast(i)); + ASSERT_MSG(DSPLLE, !m_regs[i].used, "register %u still in use", static_cast(i)); if (m_regs[i].used) { @@ -351,27 +351,27 @@ void DSPJitRegCache::FlushRegs() MovToMemory(i); } - _assert_msg_(DSPLLE, !m_regs[i].loc.IsSimpleReg(), "register %zu is still a simple reg", i); + ASSERT_MSG(DSPLLE, !m_regs[i].loc.IsSimpleReg(), "register %zu is still a simple reg", i); } - _assert_msg_(DSPLLE, m_xregs[RSP].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", RSP); - _assert_msg_(DSPLLE, m_xregs[RBX].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", RBX); - _assert_msg_(DSPLLE, m_xregs[RBP].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RBP); - _assert_msg_(DSPLLE, m_xregs[RSI].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RSI); - _assert_msg_(DSPLLE, m_xregs[RDI].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RDI); + ASSERT_MSG(DSPLLE, m_xregs[RSP].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", RSP); + ASSERT_MSG(DSPLLE, m_xregs[RBX].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", RBX); + ASSERT_MSG(DSPLLE, m_xregs[RBP].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RBP); + ASSERT_MSG(DSPLLE, m_xregs[RSI].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RSI); + ASSERT_MSG(DSPLLE, m_xregs[RDI].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", RDI); #ifdef STATIC_REG_ACCS - _assert_msg_(DSPLLE, m_xregs[R8].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", R8); - _assert_msg_(DSPLLE, m_xregs[R9].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", R9); + ASSERT_MSG(DSPLLE, m_xregs[R8].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", R8); + ASSERT_MSG(DSPLLE, m_xregs[R9].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", R9); #else - _assert_msg_(DSPLLE, m_xregs[R8].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R8); - _assert_msg_(DSPLLE, m_xregs[R9].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R9); + ASSERT_MSG(DSPLLE, m_xregs[R8].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R8); + ASSERT_MSG(DSPLLE, m_xregs[R9].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R9); #endif - _assert_msg_(DSPLLE, m_xregs[R10].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R10); - _assert_msg_(DSPLLE, m_xregs[R11].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R11); - _assert_msg_(DSPLLE, m_xregs[R12].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R12); - _assert_msg_(DSPLLE, m_xregs[R13].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R13); - _assert_msg_(DSPLLE, m_xregs[R14].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R14); - _assert_msg_(DSPLLE, m_xregs[R15].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", R15); + ASSERT_MSG(DSPLLE, m_xregs[R10].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R10); + ASSERT_MSG(DSPLLE, m_xregs[R11].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R11); + ASSERT_MSG(DSPLLE, m_xregs[R12].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R12); + ASSERT_MSG(DSPLLE, m_xregs[R13].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R13); + ASSERT_MSG(DSPLLE, m_xregs[R14].guest_reg == DSP_REG_NONE, "wrong xreg state for %d", R14); + ASSERT_MSG(DSPLLE, m_xregs[R15].guest_reg == DSP_REG_STATIC, "wrong xreg state for %d", R15); m_use_ctr = 0; } @@ -398,7 +398,7 @@ void DSPJitRegCache::SaveRegs() MovToMemory(i); } - _assert_msg_(DSPLLE, !m_regs[i].loc.IsSimpleReg(), "register %zu is still a simple reg", i); + ASSERT_MSG(DSPLLE, !m_regs[i].loc.IsSimpleReg(), "register %zu is still a simple reg", i); } } @@ -413,7 +413,7 @@ void DSPJitRegCache::PushRegs() MovToMemory(i); } - _assert_msg_(DSPLLE, !m_regs[i].loc.IsSimpleReg(), "register %zu is still a simple reg", i); + ASSERT_MSG(DSPLLE, !m_regs[i].loc.IsSimpleReg(), "register %zu is still a simple reg", i); } int push_count = 0; @@ -438,9 +438,9 @@ void DSPJitRegCache::PushRegs() m_xregs[i].guest_reg = DSP_REG_NONE; } - _assert_msg_(DSPLLE, - m_xregs[i].guest_reg == DSP_REG_NONE || m_xregs[i].guest_reg == DSP_REG_STATIC, - "register %zu is still used", i); + ASSERT_MSG(DSPLLE, + m_xregs[i].guest_reg == DSP_REG_NONE || m_xregs[i].guest_reg == DSP_REG_STATIC, + "register %zu is still used", i); } } @@ -481,10 +481,10 @@ X64Reg DSPJitRegCache::MakeABICallSafe(X64Reg reg) void DSPJitRegCache::MovToHostReg(size_t reg, X64Reg host_reg, bool load) { - _assert_msg_(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); - _assert_msg_(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, - m_regs[reg].parentReg); - _assert_msg_(DSPLLE, !m_regs[reg].used, "moving to host reg in use guest reg %zu", reg); + ASSERT_MSG(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); + ASSERT_MSG(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, + m_regs[reg].parentReg); + ASSERT_MSG(DSPLLE, !m_regs[reg].used, "moving to host reg in use guest reg %zu", reg); X64Reg old_reg = m_regs[reg].loc.GetSimpleReg(); if (old_reg == host_reg) { @@ -510,7 +510,7 @@ void DSPJitRegCache::MovToHostReg(size_t reg, X64Reg host_reg, bool load) m_emitter.MOV(64, R(host_reg), m_regs[reg].loc); break; default: - _assert_msg_(DSPLLE, 0, "unsupported memory size"); + ASSERT_MSG(DSPLLE, 0, "unsupported memory size"); break; } } @@ -524,10 +524,10 @@ void DSPJitRegCache::MovToHostReg(size_t reg, X64Reg host_reg, bool load) void DSPJitRegCache::MovToHostReg(size_t reg, bool load) { - _assert_msg_(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); - _assert_msg_(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, - m_regs[reg].parentReg); - _assert_msg_(DSPLLE, !m_regs[reg].used, "moving to host reg in use guest reg %zu", reg); + ASSERT_MSG(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); + ASSERT_MSG(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, + m_regs[reg].parentReg); + ASSERT_MSG(DSPLLE, !m_regs[reg].used, "moving to host reg in use guest reg %zu", reg); if (m_regs[reg].loc.IsSimpleReg()) { @@ -554,11 +554,11 @@ void DSPJitRegCache::MovToHostReg(size_t reg, bool load) void DSPJitRegCache::RotateHostReg(size_t reg, int shift, bool emit) { - _assert_msg_(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); - _assert_msg_(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, - m_regs[reg].parentReg); - _assert_msg_(DSPLLE, m_regs[reg].loc.IsSimpleReg(), "register %zu is not a simple reg", reg); - _assert_msg_(DSPLLE, !m_regs[reg].used, "rotating in use guest reg %zu", reg); + ASSERT_MSG(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); + ASSERT_MSG(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, + m_regs[reg].parentReg); + ASSERT_MSG(DSPLLE, m_regs[reg].loc.IsSimpleReg(), "register %zu is not a simple reg", reg); + ASSERT_MSG(DSPLLE, !m_regs[reg].used, "rotating in use guest reg %zu", reg); if (shift > m_regs[reg].shift && emit) { @@ -595,10 +595,10 @@ void DSPJitRegCache::RotateHostReg(size_t reg, int shift, bool emit) void DSPJitRegCache::MovToMemory(size_t reg) { - _assert_msg_(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); - _assert_msg_(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, - m_regs[reg].parentReg); - _assert_msg_(DSPLLE, !m_regs[reg].used, "moving to memory in use guest reg %zu", reg); + ASSERT_MSG(DSPLLE, reg < m_regs.size(), "bad register name %zu", reg); + ASSERT_MSG(DSPLLE, m_regs[reg].parentReg == DSP_REG_NONE, "register %zu is proxy for %d", reg, + m_regs[reg].parentReg); + ASSERT_MSG(DSPLLE, !m_regs[reg].used, "moving to memory in use guest reg %zu", reg); if (m_regs[reg].used) { @@ -620,7 +620,7 @@ void DSPJitRegCache::MovToMemory(size_t reg) // TODO: Immediates? } - _assert_msg_(DSPLLE, m_regs[reg].shift == 0, "still shifted??"); + ASSERT_MSG(DSPLLE, m_regs[reg].shift == 0, "still shifted??"); // move to mem OpArg tmp = m_regs[reg].mem; @@ -639,7 +639,7 @@ void DSPJitRegCache::MovToMemory(size_t reg) m_emitter.MOV(64, tmp, m_regs[reg].loc); break; default: - _assert_msg_(DSPLLE, 0, "unsupported memory size"); + ASSERT_MSG(DSPLLE, 0, "unsupported memory size"); break; } m_regs[reg].dirty = false; @@ -678,7 +678,7 @@ OpArg DSPJitRegCache::GetReg(int reg, bool load) shift = 0; } - _assert_msg_(DSPLLE, !m_regs[real_reg].used, "register %d already in use", real_reg); + ASSERT_MSG(DSPLLE, !m_regs[real_reg].used, "register %d already in use", real_reg); if (m_regs[real_reg].used) { @@ -689,7 +689,7 @@ OpArg DSPJitRegCache::GetReg(int reg, bool load) MovToHostReg(real_reg, load); // TODO: actually handle INVALID_REG - _assert_msg_(DSPLLE, m_regs[real_reg].loc.IsSimpleReg(), "did not get host reg for %d", reg); + ASSERT_MSG(DSPLLE, m_regs[real_reg].loc.IsSimpleReg(), "did not get host reg for %d", reg); RotateHostReg(real_reg, shift, load); const OpArg oparg = m_regs[real_reg].loc; @@ -815,7 +815,7 @@ void DSPJitRegCache::ReadReg(int sreg, X64Reg host_dreg, RegisterExtension exten m_emitter.MOV(64, R(host_dreg), reg); break; default: - _assert_msg_(DSPLLE, 0, "unsupported memory size"); + ASSERT_MSG(DSPLLE, 0, "unsupported memory size"); break; } PutReg(sreg, false); @@ -845,7 +845,7 @@ void DSPJitRegCache::WriteReg(int dreg, OpArg arg) } break; default: - _assert_msg_(DSPLLE, 0, "unsupported memory size"); + ASSERT_MSG(DSPLLE, 0, "unsupported memory size"); break; } } @@ -863,7 +863,7 @@ void DSPJitRegCache::WriteReg(int dreg, OpArg arg) m_emitter.MOV(64, reg, arg); break; default: - _assert_msg_(DSPLLE, 0, "unsupported memory size"); + ASSERT_MSG(DSPLLE, 0, "unsupported memory size"); break; } } @@ -910,16 +910,16 @@ void DSPJitRegCache::SpillXReg(X64Reg reg) { if (m_xregs[reg].guest_reg <= DSP_REG_MAX_MEM_BACKED) { - _assert_msg_(DSPLLE, !m_regs[m_xregs[reg].guest_reg].used, - "to be spilled host reg %x(guest reg %zx) still in use!", reg, - m_xregs[reg].guest_reg); + ASSERT_MSG(DSPLLE, !m_regs[m_xregs[reg].guest_reg].used, + "to be spilled host reg %x(guest reg %zx) still in use!", reg, + m_xregs[reg].guest_reg); MovToMemory(m_xregs[reg].guest_reg); } else { - _assert_msg_(DSPLLE, m_xregs[reg].guest_reg == DSP_REG_NONE, - "to be spilled host reg %x still in use!", reg); + ASSERT_MSG(DSPLLE, m_xregs[reg].guest_reg == DSP_REG_NONE, + "to be spilled host reg %x still in use!", reg); } } @@ -950,7 +950,7 @@ X64Reg DSPJitRegCache::GetFreeXReg() { X64Reg reg = FindSpillFreeXReg(); - _assert_msg_(DSPLLE, reg != INVALID_REG, "could not find register"); + ASSERT_MSG(DSPLLE, reg != INVALID_REG, "could not find register"); if (reg == INVALID_REG) { m_emitter.INT3(); @@ -972,7 +972,7 @@ void DSPJitRegCache::GetXReg(X64Reg reg) { SpillXReg(reg); } - _assert_msg_(DSPLLE, m_xregs[reg].guest_reg == DSP_REG_NONE, "register already in use"); + ASSERT_MSG(DSPLLE, m_xregs[reg].guest_reg == DSP_REG_NONE, "register already in use"); m_xregs[reg].guest_reg = DSP_REG_USED; } @@ -984,7 +984,7 @@ void DSPJitRegCache::PutXReg(X64Reg reg) return; } - _assert_msg_(DSPLLE, m_xregs[reg].guest_reg == DSP_REG_USED, "PutXReg without get(Free)XReg"); + ASSERT_MSG(DSPLLE, m_xregs[reg].guest_reg == DSP_REG_USED, "PutXReg without get(Free)XReg"); m_xregs[reg].guest_reg = DSP_REG_NONE; } diff --git a/Source/Core/Core/FifoPlayer/FifoAnalyzer.cpp b/Source/Core/Core/FifoPlayer/FifoAnalyzer.cpp index 4773d7ab38..d1302f24fc 100644 --- a/Source/Core/Core/FifoPlayer/FifoAnalyzer.cpp +++ b/Source/Core/Core/FifoPlayer/FifoAnalyzer.cpp @@ -102,7 +102,7 @@ u32 AnalyzeCommand(const u8* data, DecodeMode mode) // The recorder should have expanded display lists into the fifo stream and skipped the call to // start them // That is done to make it easier to track where memory is updated - _assert_(false); + ASSERT(false); data += 8; break; @@ -170,17 +170,17 @@ void LoadCPReg(u32 subCmd, u32 value, CPMemory& cpMem) break; case 0x70: - _assert_((subCmd & 0x0F) < 8); + ASSERT((subCmd & 0x0F) < 8); cpMem.vtxAttr[subCmd & 7].g0.Hex = value; break; case 0x80: - _assert_((subCmd & 0x0F) < 8); + ASSERT((subCmd & 0x0F) < 8); cpMem.vtxAttr[subCmd & 7].g1.Hex = value; break; case 0x90: - _assert_((subCmd & 0x0F) < 8); + ASSERT((subCmd & 0x0F) < 8); cpMem.vtxAttr[subCmd & 7].g2.Hex = value; break; @@ -267,7 +267,7 @@ void CalculateVertexElementSizes(int sizes[], int vatIndex, const CPMemory& cpMe size = 4; break; default: - _assert_(0); + ASSERT(0); break; } break; diff --git a/Source/Core/Core/FifoPlayer/FifoPlayer.cpp b/Source/Core/Core/FifoPlayer/FifoPlayer.cpp index 7936951f31..3e6e8d367d 100644 --- a/Source/Core/Core/FifoPlayer/FifoPlayer.cpp +++ b/Source/Core/Core/FifoPlayer/FifoPlayer.cpp @@ -328,7 +328,7 @@ void FifoPlayer::WriteFramePart(u32 dataStart, u32 dataEnd, u32& nextMemUpdate, void FifoPlayer::WriteAllMemoryUpdates() { - _assert_(m_File); + ASSERT(m_File); for (u32 frameNum = 0; frameNum < m_File->GetFrameCount(); ++frameNum) { diff --git a/Source/Core/Core/HLE/HLE.cpp b/Source/Core/Core/HLE/HLE.cpp index d3291a9a4d..51699ffd21 100644 --- a/Source/Core/Core/HLE/HLE.cpp +++ b/Source/Core/Core/HLE/HLE.cpp @@ -181,9 +181,6 @@ void Execute(u32 _CurrentPC, u32 _Instruction) { PanicAlert("HLE system tried to call an undefined HLE function %i.", FunctionIndex); } - - // _dbg_assert_msg_(HLE,NPC == LR, "Broken HLE function (doesn't set NPC)", - // OSPatches[pos].m_szPatchName); } u32 GetFunctionIndex(u32 address) diff --git a/Source/Core/Core/HW/DVD/DVDInterface.cpp b/Source/Core/Core/HW/DVD/DVDInterface.cpp index 5c3704667b..14a6318e7a 100644 --- a/Source/Core/Core/HW/DVD/DVDInterface.cpp +++ b/Source/Core/Core/HW/DVD/DVDInterface.cpp @@ -382,7 +382,7 @@ static void DTKStreamingCallback(const std::vector& audio_data, s64 cycles_l void Init() { - _assert_(!IsDiscInside()); + ASSERT(!IsDiscInside()); DVDThread::Start(); @@ -535,7 +535,7 @@ void RegisterMMIO(MMIO::Mapping* mmio, u32 base) if (s_DISR.BREAK) { - _dbg_assert_(DVDINTERFACE, 0); + DEBUG_ASSERT(DVDINTERFACE, 0); } UpdateInterrupts(); diff --git a/Source/Core/Core/HW/DVD/DVDThread.cpp b/Source/Core/Core/HW/DVD/DVDThread.cpp index f695d122a1..762b07c01d 100644 --- a/Source/Core/Core/HW/DVD/DVDThread.cpp +++ b/Source/Core/Core/HW/DVD/DVDThread.cpp @@ -107,7 +107,7 @@ void Start() static void StartDVDThread() { - _assert_(!s_dvd_thread.joinable()); + ASSERT(!s_dvd_thread.joinable()); s_dvd_thread_exiting.Clear(); s_dvd_thread = std::thread(DVDThread); } @@ -120,7 +120,7 @@ void Stop() static void StopDVDThread() { - _assert_(s_dvd_thread.joinable()); + ASSERT(s_dvd_thread.joinable()); // By setting s_DVD_thread_exiting, we ask the DVD thread to cleanly exit. // In case the request queue is empty, we need to set s_request_queue_expanded @@ -224,7 +224,7 @@ bool UpdateRunningGameMetadata(const DiscIO::Partition& partition, std::optional void WaitUntilIdle() { - _assert_(Core::IsCPUThread()); + ASSERT(Core::IsCPUThread()); while (!s_request_queue.Empty()) s_result_queue_expanded.Wait(); @@ -251,7 +251,7 @@ static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offs const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion) { - _assert_(Core::IsCPUThread()); + ASSERT(Core::IsCPUThread()); ReadRequest request; diff --git a/Source/Core/Core/HW/EXI/EXI.cpp b/Source/Core/Core/HW/EXI/EXI.cpp index a983e98eba..20d2e7b6f7 100644 --- a/Source/Core/Core/HW/EXI/EXI.cpp +++ b/Source/Core/Core/HW/EXI/EXI.cpp @@ -107,7 +107,7 @@ void RegisterMMIO(MMIO::Mapping* mmio, u32 base) { for (int i = 0; i < MAX_EXI_CHANNELS; ++i) { - _dbg_assert_(EXPANSIONINTERFACE, g_Channels[i] != nullptr); + DEBUG_ASSERT(EXPANSIONINTERFACE, g_Channels[i] != nullptr); // Each channel has 5 32 bit registers assigned to it. We offset the // base that we give to each channel for registration. // diff --git a/Source/Core/Core/HW/EXI/EXI_Channel.cpp b/Source/Core/Core/HW/EXI/EXI_Channel.cpp index 0232a99198..df37e19521 100644 --- a/Source/Core/Core/HW/EXI/EXI_Channel.cpp +++ b/Source/Core/Core/HW/EXI/EXI_Channel.cpp @@ -118,7 +118,7 @@ void CEXIChannel::RegisterMMIO(MMIO::Mapping* mmio, u32 base) device->ImmReadWrite(m_imm_data, m_control.TLEN + 1); break; default: - _dbg_assert_msg_(EXPANSIONINTERFACE, 0, + DEBUG_ASSERT_MSG(EXPANSIONINTERFACE, 0, "EXI Imm: Unknown transfer type %i", m_control.RW); } } @@ -134,7 +134,7 @@ void CEXIChannel::RegisterMMIO(MMIO::Mapping* mmio, u32 base) device->DMAWrite(m_dma_memory_address, m_dma_length); break; default: - _dbg_assert_msg_(EXPANSIONINTERFACE, 0, + DEBUG_ASSERT_MSG(EXPANSIONINTERFACE, 0, "EXI DMA: Unknown transfer type %i", m_control.RW); } } @@ -172,7 +172,7 @@ void CEXIChannel::AddDevice(const TEXIDevices device_type, const int device_num) void CEXIChannel::AddDevice(std::unique_ptr device, const int device_num, bool notify_presence_changed) { - _dbg_assert_(EXPANSIONINTERFACE, device_num < NUM_DEVICES); + DEBUG_ASSERT(EXPANSIONINTERFACE, device_num < NUM_DEVICES); // Replace it with the new one m_devices[device_num] = std::move(device); diff --git a/Source/Core/Core/HW/EXI/EXI_DeviceAD16.cpp b/Source/Core/Core/HW/EXI/EXI_DeviceAD16.cpp index 6154563b50..ca75ca8fcd 100644 --- a/Source/Core/Core/HW/EXI/EXI_DeviceAD16.cpp +++ b/Source/Core/Core/HW/EXI/EXI_DeviceAD16.cpp @@ -39,7 +39,7 @@ void CEXIAD16::TransferByte(u8& byte) switch (m_position) { case 1: - _dbg_assert_(EXPANSIONINTERFACE, byte == 0x00); + DEBUG_ASSERT(EXPANSIONINTERFACE, byte == 0x00); break; // just skip case 2: byte = m_ad16_register.U8[0]; diff --git a/Source/Core/Core/HW/EXI/EXI_DeviceIPL.cpp b/Source/Core/Core/HW/EXI/EXI_DeviceIPL.cpp index 69f2b6fc91..79a5a8027b 100644 --- a/Source/Core/Core/HW/EXI/EXI_DeviceIPL.cpp +++ b/Source/Core/Core/HW/EXI/EXI_DeviceIPL.cpp @@ -302,7 +302,7 @@ void CEXIIPL::TransferByte(u8& _uByte) else { device_name = "illegal address"; - _dbg_assert_msg_(EXPANSIONINTERFACE, 0, "EXI IPL-DEV: %s %08x", device_name.c_str(), + DEBUG_ASSERT_MSG(EXPANSIONINTERFACE, 0, "EXI IPL-DEV: %s %08x", device_name.c_str(), m_uAddress); } break; @@ -431,7 +431,7 @@ u32 CEXIIPL::GetEmulatedTime(u32 epoch) } else { - _assert_(!Core::WantsDeterminism()); + ASSERT(!Core::WantsDeterminism()); ltime = Common::Timer::GetLocalTimeSinceJan1970() - SystemTimers::GetLocalTimeRTCOffset(); } diff --git a/Source/Core/Core/HW/EXI/EXI_DeviceMemoryCard.cpp b/Source/Core/Core/HW/EXI/EXI_DeviceMemoryCard.cpp index 0addf79c7d..f18b45b5ec 100644 --- a/Source/Core/Core/HW/EXI/EXI_DeviceMemoryCard.cpp +++ b/Source/Core/Core/HW/EXI/EXI_DeviceMemoryCard.cpp @@ -104,8 +104,8 @@ void CEXIMemoryCard::Shutdown() CEXIMemoryCard::CEXIMemoryCard(const int index, bool gciFolder) : card_index(index) { - _assert_msg_(EXPANSIONINTERFACE, static_cast(index) < s_et_cmd_done.size(), - "Trying to create invalid memory card index %d.", index); + ASSERT_MSG(EXPANSIONINTERFACE, static_cast(index) < s_et_cmd_done.size(), + "Trying to create invalid memory card index %d.", index); // NOTE: When loading a save state, DMA completion callbacks (s_et_transfer_complete) and such // may have been restored, we need to anticipate those arriving. diff --git a/Source/Core/Core/HW/GCMemcard/GCMemcardDirectory.cpp b/Source/Core/Core/HW/GCMemcard/GCMemcardDirectory.cpp index ae90bcd5d3..38e24eca42 100644 --- a/Source/Core/Core/HW/GCMemcard/GCMemcardDirectory.cpp +++ b/Source/Core/Core/HW/GCMemcard/GCMemcardDirectory.cpp @@ -216,7 +216,7 @@ s32 GCMemcardDirectory::Read(u32 src_address, s32 length, u8* dest_address) length -= extra; // verify that we haven't calculated a length beyond BLOCK_SIZE - _dbg_assert_msg_(EXPANSIONINTERFACE, (src_address + length) % BLOCK_SIZE == 0, + DEBUG_ASSERT_MSG(EXPANSIONINTERFACE, (src_address + length) % BLOCK_SIZE == 0, "Memcard directory Read Logic Error"); } @@ -276,7 +276,7 @@ s32 GCMemcardDirectory::Write(u32 dest_address, s32 length, const u8* src_addres length -= extra; // verify that we haven't calculated a length beyond BLOCK_SIZE - _dbg_assert_msg_(EXPANSIONINTERFACE, (dest_address + length) % BLOCK_SIZE == 0, + DEBUG_ASSERT_MSG(EXPANSIONINTERFACE, (dest_address + length) % BLOCK_SIZE == 0, "Memcard directory Write Logic Error"); } if (m_last_block != block) diff --git a/Source/Core/Core/HW/MMIO.cpp b/Source/Core/Core/HW/MMIO.cpp index 5f52ef3392..e8f3bd88d9 100644 --- a/Source/Core/Core/HW/MMIO.cpp +++ b/Source/Core/Core/HW/MMIO.cpp @@ -148,7 +148,7 @@ private: std::function InvalidReadLambda() const { return [](u32) { - _dbg_assert_msg_(MEMMAP, 0, "Called the read lambda on a write " + DEBUG_ASSERT_MSG(MEMMAP, 0, "Called the read lambda on a write " "complex handler."); return 0; }; @@ -157,7 +157,7 @@ private: std::function InvalidWriteLambda() const { return [](u32, T) { - _dbg_assert_msg_(MEMMAP, 0, "Called the write lambda on a read " + DEBUG_ASSERT_MSG(MEMMAP, 0, "Called the write lambda on a read " "complex handler."); }; } diff --git a/Source/Core/Core/HW/MMIO.h b/Source/Core/Core/HW/MMIO.h index 8ee706a1ab..8eec0443e2 100644 --- a/Source/Core/Core/HW/MMIO.h +++ b/Source/Core/Core/HW/MMIO.h @@ -62,7 +62,7 @@ inline bool IsMMIOAddress(u32 address) // The block ID can easily be computed by simply checking bit 24 (CC vs. CD). inline u32 UniqueID(u32 address) { - _dbg_assert_msg_(MEMMAP, ((address & 0xFFFF0000) == 0x0C000000) || + DEBUG_ASSERT_MSG(MEMMAP, ((address & 0xFFFF0000) == 0x0C000000) || ((address & 0xFFFF0000) == 0x0D000000) || ((address & 0xFFFF0000) == 0x0D800000), "Trying to get the ID of a non-existing MMIO address."); @@ -210,13 +210,13 @@ private: template <> inline u64 Mapping::Read(u32 addr) { - _dbg_assert_(MEMMAP, 0); + DEBUG_ASSERT(MEMMAP, 0); return 0; } template <> inline void Mapping::Write(u32 addr, u64 val) { - _dbg_assert_(MEMMAP, 0); + DEBUG_ASSERT(MEMMAP, 0); } } diff --git a/Source/Core/Core/HW/ProcessorInterface.cpp b/Source/Core/Core/HW/ProcessorInterface.cpp index 30b5fad71c..42e2924157 100644 --- a/Source/Core/Core/HW/ProcessorInterface.cpp +++ b/Source/Core/Core/HW/ProcessorInterface.cpp @@ -178,7 +178,7 @@ static const char* Debug_GetInterruptName(u32 _causemask) void SetInterrupt(u32 _causemask, bool _bSet) { - _dbg_assert_msg_(POWERPC, Core::IsCPUThread(), "SetInterrupt from wrong thread"); + DEBUG_ASSERT_MSG(POWERPC, Core::IsCPUThread(), "SetInterrupt from wrong thread"); if (_bSet && !(m_InterruptCause & _causemask)) { diff --git a/Source/Core/Core/HW/WiimoteReal/IOhidapi.cpp b/Source/Core/Core/HW/WiimoteReal/IOhidapi.cpp index f4f3e065cd..a72f4e16c3 100644 --- a/Source/Core/Core/HW/WiimoteReal/IOhidapi.cpp +++ b/Source/Core/Core/HW/WiimoteReal/IOhidapi.cpp @@ -38,7 +38,7 @@ namespace WiimoteReal WiimoteScannerHidapi::WiimoteScannerHidapi() { int ret = hid_init(); - _assert_msg_(WIIMOTE, ret == 0, "Couldn't initialise hidapi."); + ASSERT_MSG(WIIMOTE, ret == 0, "Couldn't initialise hidapi."); } WiimoteScannerHidapi::~WiimoteScannerHidapi() @@ -134,7 +134,7 @@ int WiimoteHidapi::IORead(u8* buf) int WiimoteHidapi::IOWrite(const u8* buf, size_t len) { - _dbg_assert_(WIIMOTE, buf[0] == (WR_SET_REPORT | BT_OUTPUT)); + DEBUG_ASSERT(WIIMOTE, buf[0] == (WR_SET_REPORT | BT_OUTPUT)); int result = hid_write(m_handle, buf + 1, len - 1); if (result == -1) { diff --git a/Source/Core/Core/IOS/DI/DI.cpp b/Source/Core/Core/IOS/DI/DI.cpp index af8b5343bd..e8808f4df5 100644 --- a/Source/Core/Core/IOS/DI/DI.cpp +++ b/Source/Core/Core/IOS/DI/DI.cpp @@ -98,8 +98,8 @@ IPCCommandResult DI::IOCtlV(const IOCtlVRequest& request) { case DVDInterface::DVDLowOpenPartition: { - _dbg_assert_msg_(IOS_DI, request.in_vectors[1].address == 0, "DVDLowOpenPartition with ticket"); - _dbg_assert_msg_(IOS_DI, request.in_vectors[2].address == 0, + DEBUG_ASSERT_MSG(IOS_DI, request.in_vectors[1].address == 0, "DVDLowOpenPartition with ticket"); + DEBUG_ASSERT_MSG(IOS_DI, request.in_vectors[2].address == 0, "DVDLowOpenPartition with cert chain"); const u64 partition_offset = diff --git a/Source/Core/Core/IOS/Device.cpp b/Source/Core/Core/IOS/Device.cpp index f0f81b0692..b0301205aa 100644 --- a/Source/Core/Core/IOS/Device.cpp +++ b/Source/Core/Core/IOS/Device.cpp @@ -80,7 +80,7 @@ IOCtlVRequest::IOCtlVRequest(const u32 address_) : Request(address_) const IOCtlVRequest::IOVector* IOCtlVRequest::GetVector(size_t index) const { - _assert_(index < (in_vectors.size() + io_vectors.size())); + ASSERT(index < (in_vectors.size() + io_vectors.size())); if (index < in_vectors.size()) return &in_vectors[index]; return &io_vectors[index - in_vectors.size()]; diff --git a/Source/Core/Core/IOS/ES/Formats.cpp b/Source/Core/Core/IOS/ES/Formats.cpp index 0f5af9833c..d5f5ee17f0 100644 --- a/Source/Core/Core/IOS/ES/Formats.cpp +++ b/Source/Core/Core/IOS/ES/Formats.cpp @@ -376,7 +376,7 @@ std::vector TicketReader::GetRawTicketView(u32 ticket_num) const // Copy the rest of the ticket view structure from the ticket. view.insert(view.end(), view_start, view_start + (sizeof(TicketView) - sizeof(version))); - _assert_(view.size() == sizeof(TicketView)); + ASSERT(view.size() == sizeof(TicketView)); return view; } diff --git a/Source/Core/Core/IOS/FS/FS.cpp b/Source/Core/Core/IOS/FS/FS.cpp index 6b1f696215..a30ef7d9dc 100644 --- a/Source/Core/Core/IOS/FS/FS.cpp +++ b/Source/Core/Core/IOS/FS/FS.cpp @@ -230,7 +230,7 @@ IPCCommandResult FS::GetStats(const IOCtlRequest& request) IPCCommandResult FS::CreateDirectory(const IOCtlRequest& request) { - _dbg_assert_(IOS_FILEIO, request.buffer_out_size == 0); + DEBUG_ASSERT(IOS_FILEIO, request.buffer_out_size == 0); u32 Addr = request.buffer_in; u32 OwnerID = Memory::Read_U32(Addr); @@ -255,7 +255,7 @@ IPCCommandResult FS::CreateDirectory(const IOCtlRequest& request) DirName += DIR_SEP; File::CreateFullPath(DirName); - _dbg_assert_msg_(IOS_FILEIO, File::IsDirectory(DirName), "FS: CREATE_DIR %s failed", + DEBUG_ASSERT_MSG(IOS_FILEIO, File::IsDirectory(DirName), "FS: CREATE_DIR %s failed", DirName.c_str()); return GetFSReply(IPC_SUCCESS); @@ -301,7 +301,7 @@ IPCCommandResult FS::SetAttribute(const IOCtlRequest& request) IPCCommandResult FS::GetAttribute(const IOCtlRequest& request) { - _dbg_assert_msg_(IOS_FILEIO, request.buffer_out_size == 76, + DEBUG_ASSERT_MSG(IOS_FILEIO, request.buffer_out_size == 76, " GET_ATTR needs an 76 bytes large output buffer but it is %i bytes large", request.buffer_out_size); @@ -377,7 +377,7 @@ IPCCommandResult FS::GetAttribute(const IOCtlRequest& request) IPCCommandResult FS::DeleteFile(const IOCtlRequest& request) { - _dbg_assert_(IOS_FILEIO, request.buffer_out_size == 0); + DEBUG_ASSERT(IOS_FILEIO, request.buffer_out_size == 0); int Offset = 0; const std::string wii_path = Memory::GetString(request.buffer_in + Offset, 64); @@ -407,7 +407,7 @@ IPCCommandResult FS::DeleteFile(const IOCtlRequest& request) IPCCommandResult FS::RenameFile(const IOCtlRequest& request) { - _dbg_assert_(IOS_FILEIO, request.buffer_out_size == 0); + DEBUG_ASSERT(IOS_FILEIO, request.buffer_out_size == 0); int Offset = 0; const std::string wii_path = Memory::GetString(request.buffer_in + Offset, 64); @@ -454,7 +454,7 @@ IPCCommandResult FS::RenameFile(const IOCtlRequest& request) IPCCommandResult FS::CreateFile(const IOCtlRequest& request) { - _dbg_assert_(IOS_FILEIO, request.buffer_out_size == 0); + DEBUG_ASSERT(IOS_FILEIO, request.buffer_out_size == 0); u32 Addr = request.buffer_in; u32 OwnerID = Memory::Read_U32(Addr); @@ -600,9 +600,9 @@ IPCCommandResult FS::ReadDirectory(const IOCtlVRequest& request) IPCCommandResult FS::GetUsage(const IOCtlVRequest& request) { - _dbg_assert_(IOS_FILEIO, request.io_vectors.size() == 2); - _dbg_assert_(IOS_FILEIO, request.io_vectors[0].size == 4); - _dbg_assert_(IOS_FILEIO, request.io_vectors[1].size == 4); + DEBUG_ASSERT(IOS_FILEIO, request.io_vectors.size() == 2); + DEBUG_ASSERT(IOS_FILEIO, request.io_vectors[0].size == 4); + DEBUG_ASSERT(IOS_FILEIO, request.io_vectors[1].size == 4); // this command sucks because it asks of the number of used // fsBlocks and inodes diff --git a/Source/Core/Core/IOS/FS/FileIO.cpp b/Source/Core/Core/IOS/FS/FileIO.cpp index ac56846df4..d7e3cb3f44 100644 --- a/Source/Core/Core/IOS/FS/FileIO.cpp +++ b/Source/Core/Core/IOS/FS/FileIO.cpp @@ -33,7 +33,7 @@ std::string BuildFilename(const std::string& wii_path) if (wii_path.compare(0, 1, "/") == 0) return nand_path + Common::EscapePath(wii_path); - _assert_(false); + ASSERT(false); return nand_path; } diff --git a/Source/Core/Core/IOS/IOS.cpp b/Source/Core/Core/IOS/IOS.cpp index a41effe7b7..975bdd8268 100644 --- a/Source/Core/Core/IOS/IOS.cpp +++ b/Source/Core/Core/IOS/IOS.cpp @@ -180,7 +180,7 @@ Kernel::Kernel() { // Until the Wii root and NAND path stuff is entirely managed by IOS and made non-static, // using more than one IOS instance at a time is not supported. - _assert_(GetIOS() == nullptr); + ASSERT(GetIOS() == nullptr); Core::InitializeWiiRoot(false); m_is_responsible_for_nand_root = true; AddCoreDevices(); @@ -362,7 +362,7 @@ bool Kernel::BootIOS(const u64 ios_title_id, const std::string& boot_content_pat void Kernel::AddDevice(std::unique_ptr device) { - _assert_(device->GetDeviceType() == Device::Device::DeviceType::Static); + ASSERT(device->GetDeviceType() == Device::Device::DeviceType::Static); m_device_map[device->GetDeviceName()] = std::move(device); } @@ -546,7 +546,7 @@ IPCCommandResult Kernel::HandleIPCCommand(const Request& request) ret = device->IOCtlV(IOCtlVRequest{request.address}); break; default: - _assert_msg_(IOS, false, "Unexpected command: %x", request.command); + ASSERT_MSG(IOS, false, "Unexpected command: %x", request.command); ret = Device::Device::GetDefaultReply(IPC_EINVAL); break; } diff --git a/Source/Core/Core/IOS/IOSC.cpp b/Source/Core/Core/IOS/IOSC.cpp index bfe7024ddf..ec17caf0de 100644 --- a/Source/Core/Core/IOS/IOSC.cpp +++ b/Source/Core/Core/IOS/IOSC.cpp @@ -217,7 +217,7 @@ ReturnCode IOSC::ImportPublicKey(Handle dest_handle, const u8* public_key, if (dest_entry->subtype == SUBTYPE_RSA2048 || dest_entry->subtype == SUBTYPE_RSA4096) { - _assert_(public_key_exponent); + ASSERT(public_key_exponent); std::memcpy(&dest_entry->misc_data, public_key_exponent, 4); } return IPC_SUCCESS; @@ -310,7 +310,7 @@ ReturnCode IOSC::VerifyPublicKeySign(const std::array& sha1, Handle sign case SUBTYPE_RSA4096: { const size_t expected_key_size = entry->subtype == SUBTYPE_RSA2048 ? 0x100 : 0x200; - _assert_(entry->data.size() == expected_key_size); + ASSERT(entry->data.size() == expected_key_size); mbedtls_rsa_context rsa; mbedtls_rsa_init(&rsa, MBEDTLS_RSA_PKCS_V15, 0); @@ -569,7 +569,7 @@ void IOSC::LoadDefaultEntries(ConsoleType console_type) 3}; break; default: - _assert_msg_(IOS, false, "Unknown console type"); + ASSERT_MSG(IOS, false, "Unknown console type"); break; } @@ -658,7 +658,7 @@ const IOSC::KeyEntry* IOSC::FindEntry(Handle handle, SearchMode mode) const IOSC::Handle IOSC::GetHandleFromIterator(IOSC::KeyEntries::iterator iterator) const { - _assert_(iterator != m_key_entries.end()); + ASSERT(iterator != m_key_entries.end()); return static_cast(iterator - m_key_entries.begin()); } diff --git a/Source/Core/Core/IOS/Network/IP/Top.cpp b/Source/Core/Core/IOS/Network/IP/Top.cpp index b11800e12d..53eaab6510 100644 --- a/Source/Core/Core/IOS/Network/IP/Top.cpp +++ b/Source/Core/Core/IOS/Network/IP/Top.cpp @@ -691,8 +691,8 @@ IPCCommandResult NetIPTop::HandleGetHostByNameRequest(const IOCtlRequest& reques request.buffer_out + 4); // Returned struct must be ipv4. - _assert_msg_(IOS_NET, remoteHost->h_addrtype == AF_INET && remoteHost->h_length == sizeof(u32), - "returned host info is not IPv4"); + ASSERT_MSG(IOS_NET, remoteHost->h_addrtype == AF_INET && remoteHost->h_length == sizeof(u32), + "returned host info is not IPv4"); Memory::Write_U16(AF_INET, request.buffer_out + 8); Memory::Write_U16(sizeof(u32), request.buffer_out + 10); diff --git a/Source/Core/Core/IOS/USB/Bluetooth/BTEmu.cpp b/Source/Core/Core/IOS/USB/Bluetooth/BTEmu.cpp index 2eaaeed78a..b9ca570d58 100644 --- a/Source/Core/Core/IOS/USB/Bluetooth/BTEmu.cpp +++ b/Source/Core/Core/IOS/USB/Bluetooth/BTEmu.cpp @@ -171,8 +171,8 @@ IPCCommandResult BluetoothEmu::IOCtlV(const IOCtlVRequest& request) const auto* acl_header = reinterpret_cast(Memory::GetPointer(ctrl.data_address)); - _dbg_assert_(IOS_WIIMOTE, HCI_BC_FLAG(acl_header->con_handle) == HCI_POINT2POINT); - _dbg_assert_(IOS_WIIMOTE, HCI_PB_FLAG(acl_header->con_handle) == HCI_PACKET_START); + DEBUG_ASSERT(IOS_WIIMOTE, HCI_BC_FLAG(acl_header->con_handle) == HCI_POINT2POINT); + DEBUG_ASSERT(IOS_WIIMOTE, HCI_PB_FLAG(acl_header->con_handle) == HCI_PACKET_START); SendToDevice(HCI_CON_HANDLE(acl_header->con_handle), Memory::GetPointer(ctrl.data_address + sizeof(hci_acldata_hdr_t)), @@ -187,7 +187,7 @@ IPCCommandResult BluetoothEmu::IOCtlV(const IOCtlVRequest& request) break; } default: - _dbg_assert_msg_(IOS_WIIMOTE, 0, "Unknown USB::IOCTLV_USBV0_BLKMSG: %x", ctrl.endpoint); + DEBUG_ASSERT_MSG(IOS_WIIMOTE, 0, "Unknown USB::IOCTLV_USBV0_BLKMSG: %x", ctrl.endpoint); } break; } @@ -203,7 +203,7 @@ IPCCommandResult BluetoothEmu::IOCtlV(const IOCtlVRequest& request) } else { - _dbg_assert_msg_(IOS_WIIMOTE, 0, "Unknown USB::IOCTLV_USBV0_INTRMSG: %x", ctrl.endpoint); + DEBUG_ASSERT_MSG(IOS_WIIMOTE, 0, "Unknown USB::IOCTLV_USBV0_INTRMSG: %x", ctrl.endpoint); } break; } @@ -382,7 +382,7 @@ void BluetoothEmu::ACLPool::Store(const u8* data, const u16 size, const u16 conn return; } - _dbg_assert_msg_(IOS_WIIMOTE, size < ACL_PKT_SIZE, "ACL packet too large for pool"); + DEBUG_ASSERT_MSG(IOS_WIIMOTE, size < ACL_PKT_SIZE, "ACL packet too large for pool"); m_queue.push_back(Packet()); auto& packet = m_queue.back(); @@ -437,7 +437,7 @@ bool BluetoothEmu::SendEventInquiryResponse() if (m_WiiMotes.empty()) return false; - _dbg_assert_(IOS_WIIMOTE, sizeof(SHCIEventInquiryResult) - 2 + + DEBUG_ASSERT(IOS_WIIMOTE, sizeof(SHCIEventInquiryResult) - 2 + (m_WiiMotes.size() * sizeof(hci_inquiry_response)) < 256); @@ -701,7 +701,7 @@ bool BluetoothEmu::SendEventReadRemoteVerInfo(u16 _connectionHandle) void BluetoothEmu::SendEventCommandComplete(u16 opcode, const void* data, u32 data_size) { - _dbg_assert_(IOS_WIIMOTE, (sizeof(SHCIEventCommand) - 2 + data_size) < 256); + DEBUG_ASSERT(IOS_WIIMOTE, (sizeof(SHCIEventCommand) - 2 + data_size) < 256); SQueuedEvent event(sizeof(SHCIEventCommand) + data_size, 0); @@ -1117,7 +1117,7 @@ void BluetoothEmu::ExecuteHCICommandMessage(const USB::V0CtrlMessage& ctrl_messa } else { - _dbg_assert_msg_(IOS_WIIMOTE, 0, "Unknown USB_IOCTL_CTRLMSG: 0x%04X (ocf: 0x%x ogf 0x%x)", + DEBUG_ASSERT_MSG(IOS_WIIMOTE, 0, "Unknown USB_IOCTL_CTRLMSG: 0x%04X (ocf: 0x%x ogf 0x%x)", pMsg->Opcode, ocf, ogf); } break; diff --git a/Source/Core/Core/IOS/USB/Bluetooth/WiimoteDevice.cpp b/Source/Core/Core/IOS/USB/Bluetooth/WiimoteDevice.cpp index e2ba50e42f..2db637a170 100644 --- a/Source/Core/Core/IOS/USB/Bluetooth/WiimoteDevice.cpp +++ b/Source/Core/Core/IOS/USB/Bluetooth/WiimoteDevice.cpp @@ -255,7 +255,7 @@ void WiimoteDevice::ExecuteL2capCmd(u8* _pData, u32 _Size) default: { - _dbg_assert_msg_(IOS_WIIMOTE, DoesChannelExist(pHeader->dcid), + DEBUG_ASSERT_MSG(IOS_WIIMOTE, DoesChannelExist(pHeader->dcid), "L2CAP: SendACLPacket to unknown channel %i", pHeader->dcid); CChannelMap::iterator itr = m_Channel.find(pHeader->dcid); @@ -393,7 +393,7 @@ void WiimoteDevice::ReceiveConnectionResponse(u8 _Ident, u8* _pData, u32 _Size) { l2cap_con_rsp_cp* rsp = (l2cap_con_rsp_cp*)_pData; - _dbg_assert_(IOS_WIIMOTE, _Size == sizeof(l2cap_con_rsp_cp)); + DEBUG_ASSERT(IOS_WIIMOTE, _Size == sizeof(l2cap_con_rsp_cp)); DEBUG_LOG(IOS_WIIMOTE, "[L2CAP] ReceiveConnectionResponse"); DEBUG_LOG(IOS_WIIMOTE, " DCID: 0x%04x", rsp->dcid); @@ -401,9 +401,9 @@ void WiimoteDevice::ReceiveConnectionResponse(u8 _Ident, u8* _pData, u32 _Size) DEBUG_LOG(IOS_WIIMOTE, " Result: 0x%04x", rsp->result); DEBUG_LOG(IOS_WIIMOTE, " Status: 0x%04x", rsp->status); - _dbg_assert_(IOS_WIIMOTE, rsp->result == L2CAP_SUCCESS); - _dbg_assert_(IOS_WIIMOTE, rsp->status == L2CAP_NO_INFO); - _dbg_assert_(IOS_WIIMOTE, DoesChannelExist(rsp->scid)); + DEBUG_ASSERT(IOS_WIIMOTE, rsp->result == L2CAP_SUCCESS); + DEBUG_ASSERT(IOS_WIIMOTE, rsp->status == L2CAP_NO_INFO); + DEBUG_ASSERT(IOS_WIIMOTE, DoesChannelExist(rsp->scid)); SChannel& rChannel = m_Channel[rsp->scid]; rChannel.DCID = rsp->dcid; @@ -420,9 +420,9 @@ void WiimoteDevice::ReceiveConfigurationReq(u8 _Ident, u8* _pData, u32 _Size) u32 Offset = 0; l2cap_cfg_req_cp* pCommandConfigReq = (l2cap_cfg_req_cp*)_pData; - _dbg_assert_(IOS_WIIMOTE, pCommandConfigReq->flags == + DEBUG_ASSERT(IOS_WIIMOTE, pCommandConfigReq->flags == 0x00); // 1 means that the options are send in multi-packets - _dbg_assert_(IOS_WIIMOTE, DoesChannelExist(pCommandConfigReq->dcid)); + DEBUG_ASSERT(IOS_WIIMOTE, DoesChannelExist(pCommandConfigReq->dcid)); SChannel& rChannel = m_Channel[pCommandConfigReq->dcid]; @@ -453,7 +453,7 @@ void WiimoteDevice::ReceiveConfigurationReq(u8 _Ident, u8* _pData, u32 _Size) { case L2CAP_OPT_MTU: { - _dbg_assert_(IOS_WIIMOTE, pOptions->length == L2CAP_OPT_MTU_SIZE); + DEBUG_ASSERT(IOS_WIIMOTE, pOptions->length == L2CAP_OPT_MTU_SIZE); l2cap_cfg_opt_val_t* pMTU = (l2cap_cfg_opt_val_t*)&_pData[Offset]; rChannel.MTU = pMTU->mtu; DEBUG_LOG(IOS_WIIMOTE, " MTU: 0x%04x", pMTU->mtu); @@ -462,7 +462,7 @@ void WiimoteDevice::ReceiveConfigurationReq(u8 _Ident, u8* _pData, u32 _Size) case L2CAP_OPT_FLUSH_TIMO: { - _dbg_assert_(IOS_WIIMOTE, pOptions->length == L2CAP_OPT_FLUSH_TIMO_SIZE); + DEBUG_ASSERT(IOS_WIIMOTE, pOptions->length == L2CAP_OPT_FLUSH_TIMO_SIZE); l2cap_cfg_opt_val_t* pFlushTimeOut = (l2cap_cfg_opt_val_t*)&_pData[Offset]; rChannel.FlushTimeOut = pFlushTimeOut->flush_timo; DEBUG_LOG(IOS_WIIMOTE, " FlushTimeOut: 0x%04x", pFlushTimeOut->flush_timo); @@ -470,7 +470,7 @@ void WiimoteDevice::ReceiveConfigurationReq(u8 _Ident, u8* _pData, u32 _Size) break; default: - _dbg_assert_msg_(IOS_WIIMOTE, 0, "Unknown Option: 0x%02x", pOptions->type); + DEBUG_ASSERT_MSG(IOS_WIIMOTE, 0, "Unknown Option: 0x%02x", pOptions->type); break; } @@ -500,7 +500,7 @@ void WiimoteDevice::ReceiveConfigurationResponse(u8 _Ident, u8* _pData, u32 _Siz DEBUG_LOG(IOS_WIIMOTE, " Flags: 0x%04x", rsp->flags); DEBUG_LOG(IOS_WIIMOTE, " Result: 0x%04x", rsp->result); - _dbg_assert_(IOS_WIIMOTE, rsp->result == L2CAP_SUCCESS); + DEBUG_ASSERT(IOS_WIIMOTE, rsp->result == L2CAP_SUCCESS); // update state machine SChannel& rChannel = m_Channel[rsp->scid]; @@ -579,7 +579,7 @@ void WiimoteDevice::SendDisconnectRequest(u16 scid) void WiimoteDevice::SendConfigurationRequest(u16 scid, u16 MTU, u16 FlushTimeOut) { - _dbg_assert_(IOS_WIIMOTE, DoesChannelExist(scid)); + DEBUG_ASSERT(IOS_WIIMOTE, DoesChannelExist(scid)); SChannel& rChannel = m_Channel[scid]; u8 Buffer[1024]; @@ -653,12 +653,12 @@ void WiimoteDevice::SDPSendServiceSearchResponse(u16 cid, u16 TransactionID, // verify block... we handle search pattern for HID service only { CBigEndianBuffer buffer(pServiceSearchPattern); - _dbg_assert_(IOS_WIIMOTE, buffer.Read8(0) == SDP_SEQ8); // data sequence - _dbg_assert_(IOS_WIIMOTE, buffer.Read8(1) == 0x03); // sequence size + DEBUG_ASSERT(IOS_WIIMOTE, buffer.Read8(0) == SDP_SEQ8); // data sequence + DEBUG_ASSERT(IOS_WIIMOTE, buffer.Read8(1) == 0x03); // sequence size // HIDClassID - _dbg_assert_(IOS_WIIMOTE, buffer.Read8(2) == 0x19); - _dbg_assert_(IOS_WIIMOTE, buffer.Read16(3) == 0x1124); + DEBUG_ASSERT(IOS_WIIMOTE, buffer.Read8(2) == 0x19); + DEBUG_ASSERT(IOS_WIIMOTE, buffer.Read16(3) == 0x1124); } u8 DataFrame[1000]; @@ -722,7 +722,7 @@ static int ParseAttribList(u8* pAttribIDList, u16& _startID, u16& _endID) if (MAX_LOGLEVEL >= LogTypes::LOG_LEVELS::LDEBUG) { - _dbg_assert_(IOS_WIIMOTE, sequence == SDP_SEQ8); + DEBUG_ASSERT(IOS_WIIMOTE, sequence == SDP_SEQ8); (void)seqSize; } @@ -756,8 +756,6 @@ void WiimoteDevice::SDPSendServiceAttributeResponse(u16 cid, u16 TransactionID, PanicAlert("Unknown service handle %x", ServiceHandle); } - // _dbg_assert_(IOS_WIIMOTE, ServiceHandle == 0x10000); - u32 contState = ParseCont(pContinuationState); u32 packetSize = 0; @@ -800,7 +798,7 @@ void WiimoteDevice::HandleSDP(u16 cid, u8* _pData, u32 _Size) { WARN_LOG(IOS_WIIMOTE, "!!! SDP_ServiceSearchRequest !!!"); - _dbg_assert_(IOS_WIIMOTE, _Size == 13); + DEBUG_ASSERT(IOS_WIIMOTE, _Size == 13); u16 TransactionID = buffer.Read16(1); u8* pServiceSearchPattern = buffer.GetPointer(5); @@ -891,7 +889,7 @@ void WiimoteDevice::ReceiveL2capData(u16 scid, const void* _pData, u32 _Size) Offset += sizeof(l2cap_hdr_t); // Check if we are already reporting on this channel - _dbg_assert_(IOS_WIIMOTE, DoesChannelExist(scid)); + DEBUG_ASSERT(IOS_WIIMOTE, DoesChannelExist(scid)); SChannel& rChannel = m_Channel[scid]; // Add an additional 4 byte header to the Wiimote report diff --git a/Source/Core/Core/IOS/USB/Bluetooth/WiimoteHIDAttr.cpp b/Source/Core/Core/IOS/USB/Bluetooth/WiimoteHIDAttr.cpp index c493131fb1..e80899ae2c 100644 --- a/Source/Core/Core/IOS/USB/Bluetooth/WiimoteHIDAttr.cpp +++ b/Source/Core/Core/IOS/USB/Bluetooth/WiimoteHIDAttr.cpp @@ -94,7 +94,7 @@ const u8* GetAttribPacket(u32 serviceHandle, u32 cont, u32& _size) if (serviceHandle == 0x10001) { - _dbg_assert_(IOS_WIIMOTE, cont == 0x00); + DEBUG_ASSERT(IOS_WIIMOTE, cont == 0x00); _size = sizeof(packet4_0x10001); return packet4_0x10001; } diff --git a/Source/Core/Core/IOS/USB/Common.cpp b/Source/Core/Core/IOS/USB/Common.cpp index a91ac518b1..4f1112c9ae 100644 --- a/Source/Core/Core/IOS/USB/Common.cpp +++ b/Source/Core/Core/IOS/USB/Common.cpp @@ -20,7 +20,7 @@ namespace USB { std::unique_ptr TransferCommand::MakeBuffer(const size_t size) const { - _assert_msg_(IOS_USB, data_address != 0, "Invalid data_address"); + ASSERT_MSG(IOS_USB, data_address != 0, "Invalid data_address"); auto buffer = std::make_unique(size); Memory::CopyFromEmu(buffer.get(), data_address, size); return buffer; @@ -28,7 +28,7 @@ std::unique_ptr TransferCommand::MakeBuffer(const size_t size) const void TransferCommand::FillBuffer(const u8* src, const size_t size) const { - _assert_msg_(IOS_USB, size == 0 || data_address != 0, "Invalid data_address"); + ASSERT_MSG(IOS_USB, size == 0 || data_address != 0, "Invalid data_address"); Memory::CopyToEmu(data_address, src, size); } diff --git a/Source/Core/Core/IOS/USB/Host.cpp b/Source/Core/Core/IOS/USB/Host.cpp index db12d5e71c..237bff8bad 100644 --- a/Source/Core/Core/IOS/USB/Host.cpp +++ b/Source/Core/Core/IOS/USB/Host.cpp @@ -35,7 +35,7 @@ USBHost::USBHost(Kernel& ios, const std::string& device_name) : Device(ios, devi { #ifdef __LIBUSB__ const int ret = libusb_init(&m_libusb_context); - _dbg_assert_msg_(IOS_USB, ret == 0, "Failed to init libusb for USB passthrough."); + DEBUG_ASSERT_MSG(IOS_USB, ret == 0, "Failed to init libusb for USB passthrough."); #endif } diff --git a/Source/Core/Core/IOS/USB/LibusbDevice.cpp b/Source/Core/Core/IOS/USB/LibusbDevice.cpp index ca4cdfec33..812176008d 100644 --- a/Source/Core/Core/IOS/USB/LibusbDevice.cpp +++ b/Source/Core/Core/IOS/USB/LibusbDevice.cpp @@ -109,9 +109,9 @@ LibusbDevice::GetEndpoints(const u8 config, const u8 interface_number, const u8 ERROR_LOG(IOS_USB, "Invalid config descriptor %u for %04x:%04x", config, m_vid, m_pid); return descriptors; } - _assert_(interface_number < m_config_descriptors[config]->Get()->bNumInterfaces); + ASSERT(interface_number < m_config_descriptors[config]->Get()->bNumInterfaces); const auto& interface = m_config_descriptors[config]->Get()->interface[interface_number]; - _assert_(alt_setting < interface.num_altsetting); + ASSERT(alt_setting < interface.num_altsetting); const libusb_interface_descriptor& interface_descriptor = interface.altsetting[alt_setting]; for (u8 i = 0; i < interface_descriptor.bNumEndpoints; ++i) { diff --git a/Source/Core/Core/Movie.cpp b/Source/Core/Core/Movie.cpp index 8c292e53b1..bf3b0c099f 100644 --- a/Source/Core/Core/Movie.cpp +++ b/Source/Core/Core/Movie.cpp @@ -1256,7 +1256,7 @@ void EndPlayInput(bool cont) if (cont) { // If !IsMovieActive(), changing s_playMode requires calling UpdateWantDeterminism - _assert_(IsMovieActive()); + ASSERT(IsMovieActive()); s_playMode = MODE_RECORDING; Core::DisplayMessage("Reached movie end. Resuming recording.", 2000); diff --git a/Source/Core/Core/PatchEngine.cpp b/Source/Core/Core/PatchEngine.cpp index 91975528ab..84bd43b043 100644 --- a/Source/Core/Core/PatchEngine.cpp +++ b/Source/Core/Core/PatchEngine.cpp @@ -194,7 +194,7 @@ static void ApplyPatches(const std::vector& patches) // We require at least 2 stack frames, if the stack is shallower than that then it won't work. static bool IsStackSane() { - _dbg_assert_(ACTIONREPLAY, UReg_MSR(MSR).DR && UReg_MSR(MSR).IR); + DEBUG_ASSERT(ACTIONREPLAY, UReg_MSR(MSR).DR && UReg_MSR(MSR).IR); // Check the stack pointer u32 SP = GPR(1); diff --git a/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp b/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp index 928ec241b9..e0bfdffe17 100644 --- a/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp +++ b/Source/Core/Core/PowerPC/Interpreter/Interpreter.cpp @@ -320,9 +320,9 @@ void Interpreter::unknown_instruction(UGeckoInstruction inst) for (int i = 0; i < 32; i += 4) NOTICE_LOG(POWERPC, "r%d: 0x%08x r%d: 0x%08x r%d:0x%08x r%d: 0x%08x", i, rGPR[i], i + 1, rGPR[i + 1], i + 2, rGPR[i + 2], i + 3, rGPR[i + 3]); - _assert_msg_(POWERPC, 0, - "\nIntCPU: Unknown instruction %08x at PC = %08x last_PC = %08x LR = %08x\n", - inst.hex, PC, last_pc, LR); + ASSERT_MSG(POWERPC, 0, + "\nIntCPU: Unknown instruction %08x at PC = %08x last_PC = %08x LR = %08x\n", + inst.hex, PC, last_pc, LR); } void Interpreter::ClearCache() diff --git a/Source/Core/Core/PowerPC/Interpreter/Interpreter_Branch.cpp b/Source/Core/Core/PowerPC/Interpreter/Interpreter_Branch.cpp index d253715181..b81d26380e 100644 --- a/Source/Core/Core/PowerPC/Interpreter/Interpreter_Branch.cpp +++ b/Source/Core/Core/PowerPC/Interpreter/Interpreter_Branch.cpp @@ -75,7 +75,7 @@ void Interpreter::bcx(UGeckoInstruction inst) void Interpreter::bcctrx(UGeckoInstruction inst) { - _dbg_assert_msg_(POWERPC, inst.BO_2 & BO_DONT_DECREMENT_FLAG, + DEBUG_ASSERT_MSG(POWERPC, inst.BO_2 & BO_DONT_DECREMENT_FLAG, "bcctrx with decrement and test CTR option is invalid!"); int condition = ((inst.BO_2 >> 4) | (GetCRBit(inst.BI_2) == ((inst.BO_2 >> 3) & 1))) & 1; diff --git a/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStore.cpp b/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStore.cpp index 5313d2eda2..b6d22e8b2b 100644 --- a/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStore.cpp @@ -313,7 +313,7 @@ void Interpreter::stwu(UGeckoInstruction inst) void Interpreter::dcba(UGeckoInstruction inst) { - _assert_msg_(POWERPC, 0, "dcba - Not implemented - not a Gekko instruction"); + ASSERT_MSG(POWERPC, 0, "dcba - Not implemented - not a Gekko instruction"); } void Interpreter::dcbf(UGeckoInstruction inst) @@ -391,9 +391,6 @@ void Interpreter::eciwx(UGeckoInstruction inst) if (EA & 3) PowerPC::ppcState.Exceptions |= EXCEPTION_ALIGNMENT; - // _assert_msg_(POWERPC,0,"eciwx - fill r%i with word @ %08x from device %02x", - // inst.RS, EA, PowerPC::ppcState.spr[SPR_EAR] & 0x1f); - rGPR[inst.RD] = PowerPC::Read_U32(EA); } @@ -408,9 +405,6 @@ void Interpreter::ecowx(UGeckoInstruction inst) if (EA & 3) PowerPC::ppcState.Exceptions |= EXCEPTION_ALIGNMENT; - // _assert_msg_(POWERPC,0,"ecowx - send stw request (%08x@%08x) to device %02x", - // rGPR[_inst.RS], EA, PowerPC::ppcState.spr[SPR_EAR] & 0x1f); - PowerPC::Write_U32(rGPR[inst.RS], EA); } diff --git a/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStorePaired.cpp b/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStorePaired.cpp index d889b9d867..2661804c71 100644 --- a/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStorePaired.cpp +++ b/Source/Core/Core/PowerPC/Interpreter/Interpreter_LoadStorePaired.cpp @@ -210,7 +210,7 @@ void Interpreter::Helper_Quantize(u32 addr, u32 instI, u32 instRS, u32 instW) case QUANTIZE_INVALID1: case QUANTIZE_INVALID2: case QUANTIZE_INVALID3: - _assert_msg_(POWERPC, 0, "PS dequantize - unknown type to read"); + ASSERT_MSG(POWERPC, 0, "PS dequantize - unknown type to read"); break; } } @@ -280,7 +280,7 @@ void Interpreter::Helper_Dequantize(u32 addr, u32 instI, u32 instRD, u32 instW) case QUANTIZE_INVALID1: case QUANTIZE_INVALID2: case QUANTIZE_INVALID3: - _assert_msg_(POWERPC, 0, "PS dequantize - unknown type to read"); + ASSERT_MSG(POWERPC, 0, "PS dequantize - unknown type to read"); ps0 = 0.f; ps1 = 0.f; break; diff --git a/Source/Core/Core/PowerPC/Interpreter/Interpreter_SystemRegisters.cpp b/Source/Core/Core/PowerPC/Interpreter/Interpreter_SystemRegisters.cpp index 12c4b57dc3..ff6abe522c 100644 --- a/Source/Core/Core/PowerPC/Interpreter/Interpreter_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/Interpreter/Interpreter_SystemRegisters.cpp @@ -195,7 +195,7 @@ void Interpreter::mtsrin(UGeckoInstruction inst) void Interpreter::mftb(UGeckoInstruction inst) { int iIndex = (inst.TBR >> 5) | ((inst.TBR & 0x1F) << 5); - _dbg_assert_msg_(POWERPC, (iIndex == SPR_TL) || (iIndex == SPR_TU), "Invalid mftb"); + DEBUG_ASSERT_MSG(POWERPC, (iIndex == SPR_TL) || (iIndex == SPR_TU), "Invalid mftb"); (void)iIndex; mfspr(inst); } @@ -313,7 +313,7 @@ void Interpreter::mtspr(UGeckoInstruction inst) break; case SPR_WPAR: - _assert_msg_(POWERPC, rGPR[inst.RD] == 0x0C008000, "Gather pipe @ %08x", PC); + ASSERT_MSG(POWERPC, rGPR[inst.RD] == 0x0C008000, "Gather pipe @ %08x", PC); GPFifo::ResetGatherPipe(); break; diff --git a/Source/Core/Core/PowerPC/Jit64/Jit.cpp b/Source/Core/Core/PowerPC/Jit64/Jit.cpp index a895d48544..14c5d9ca75 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit.cpp @@ -895,9 +895,9 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc // If we have a fastmem loadstore, we can omit the exception check and let fastmem handle // it. FixupBranch memException; - _assert_msg_(DYNA_REC, !(js.fastmemLoadStore && js.fixupExceptionHandler), - "Fastmem loadstores shouldn't have exception handler fixups (PC=%x)!", - ops[i].address); + ASSERT_MSG(DYNA_REC, !(js.fastmemLoadStore && js.fixupExceptionHandler), + "Fastmem loadstores shouldn't have exception handler fixups (PC=%x)!", + ops[i].address); if (!js.fastmemLoadStore && !js.fixupExceptionHandler) { TEST(32, PPCSTATE(Exceptions), Imm32(EXCEPTION_DSI)); diff --git a/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp b/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp index b932db49b0..e272969ffc 100644 --- a/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp +++ b/Source/Core/Core/PowerPC/Jit64/JitRegCache.cpp @@ -98,7 +98,7 @@ void RegCache::Flush(FlushMode mode, BitSet32 regsToFlush) } else { - _assert_msg_(DYNA_REC, 0, "Jit64 - Flush unhandled case, reg %u PC: %08x", i, PC); + ASSERT_MSG(DYNA_REC, 0, "Jit64 - Flush unhandled case, reg %u PC: %08x", i, PC); } } } @@ -313,7 +313,7 @@ X64Reg RegCache::GetFreeXReg() } // Still no dice? Die! - _assert_msg_(DYNA_REC, 0, "Regcache ran out of regs"); + ASSERT_MSG(DYNA_REC, 0, "Regcache ran out of regs"); return INVALID_REG; } diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_Branch.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_Branch.cpp index eb4b5eabd6..7451042100 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_Branch.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_Branch.cpp @@ -183,7 +183,7 @@ void Jit64::bcctrx(UGeckoInstruction inst) JITDISABLE(bJITBranchOff); // bcctrx doesn't decrement and/or test CTR - _dbg_assert_msg_(POWERPC, inst.BO_2 & BO_DONT_DECREMENT_FLAG, + DEBUG_ASSERT_MSG(POWERPC, inst.BO_2 & BO_DONT_DECREMENT_FLAG, "bcctrx with decrement and test CTR option is invalid!"); if (inst.BO_2 & BO_DONT_CHECK_CONDITION) diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_FloatingPoint.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_FloatingPoint.cpp index 0edf485055..da0bff319e 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_FloatingPoint.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_FloatingPoint.cpp @@ -83,7 +83,7 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm_out, X64Reg xmm, X64Re return; } - _assert_(xmm != clobber); + ASSERT(xmm != clobber); std::vector inputs; u32 a = inst.FA, b = inst.FB, c = inst.FC; @@ -126,7 +126,7 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm_out, X64Reg xmm, X64Re FixupBranch handle_nan = J_CC(CC_NZ, true); SwitchToFarCode(); SetJumpTarget(handle_nan); - _assert_msg_(DYNA_REC, clobber == XMM0, "BLENDVPD implicitly uses XMM0"); + ASSERT_MSG(DYNA_REC, clobber == XMM0, "BLENDVPD implicitly uses XMM0"); BLENDVPD(xmm, MConst(psGeneratedQNaN)); for (u32 x : inputs) { @@ -221,7 +221,7 @@ void Jit64::fp_arith(UGeckoInstruction inst) round_input); break; default: - _assert_msg_(DYNA_REC, 0, "fp_arith WTF!!!"); + ASSERT_MSG(DYNA_REC, 0, "fp_arith WTF!!!"); } HandleNaNs(inst, fpr.RX(d), dest); if (single) diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp index 15f4965e63..d7fea23464 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp @@ -142,7 +142,7 @@ void Jit64::FinalizeCarryOverflow(bool oe, bool inv) // LT/GT either. void Jit64::ComputeRC(const OpArg& arg, bool needs_test, bool needs_sext) { - _assert_msg_(DYNA_REC, arg.IsSimpleReg() || arg.IsImm(), "Invalid ComputeRC operand"); + ASSERT_MSG(DYNA_REC, arg.IsSimpleReg() || arg.IsImm(), "Invalid ComputeRC operand"); if (arg.IsImm()) { MOV(64, PPCSTATE(cr_val[0]), Imm32(arg.SImm32())); @@ -269,7 +269,7 @@ void Jit64::regimmop(int d, int a, bool binary, u32 value, Operation doop, } else { - _assert_msg_(DYNA_REC, 0, "WTF regimmop"); + ASSERT_MSG(DYNA_REC, 0, "WTF regimmop"); } if (Rc) ComputeRC(gpr.R(d), needs_test, doop != And || (value & 0x80000000)); @@ -598,7 +598,7 @@ void Jit64::boolX(UGeckoInstruction inst) JITDISABLE(bJITIntegerOff); int a = inst.RA, s = inst.RS, b = inst.RB; bool needs_test = false; - _dbg_assert_msg_(DYNA_REC, inst.OPCD == 31, "Invalid boolX"); + DEBUG_ASSERT_MSG(DYNA_REC, inst.OPCD == 31, "Invalid boolX"); if (gpr.R(s).IsImm() && gpr.R(b).IsImm()) { diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp index fc6e1f262a..02ecc8ce3d 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_LoadStore.cpp @@ -401,7 +401,7 @@ void Jit64::stX(UGeckoInstruction inst) accessSize = 8; break; default: - _assert_msg_(DYNA_REC, 0, "stX: Invalid access size."); + ASSERT_MSG(DYNA_REC, 0, "stX: Invalid access size."); return; } diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_Paired.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_Paired.cpp index 7b129d5470..456a7d644a 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_Paired.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_Paired.cpp @@ -134,7 +134,7 @@ void Jit64::ps_mergeXX(UGeckoInstruction inst) avx_op(&XEmitter::VUNPCKHPD, &XEmitter::UNPCKHPD, fpr.RX(d), fpr.R(a), fpr.R(b)); break; // 11 default: - _assert_msg_(DYNA_REC, 0, "ps_merge - invalid op"); + ASSERT_MSG(DYNA_REC, 0, "ps_merge - invalid op"); } fpr.UnlockAll(); } diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp index e3a694b71b..5ba168147e 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_SystemRegisters.cpp @@ -40,7 +40,7 @@ void Jit64::GetCRFieldBit(int field, int bit, X64Reg out, bool negate) break; default: - _assert_msg_(DYNA_REC, false, "Invalid CR bit"); + ASSERT_MSG(DYNA_REC, false, "Invalid CR bit"); } } @@ -173,7 +173,7 @@ FixupBranch Jit64::JumpIfCRFieldBit(int field, int bit, bool jump_if_set) return J_CC(jump_if_set ? CC_C : CC_NC, true); default: - _assert_msg_(DYNA_REC, false, "Invalid CR bit"); + ASSERT_MSG(DYNA_REC, false, "Invalid CR bit"); } // Should never happen. @@ -548,7 +548,7 @@ void Jit64::crXXX(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITSystemRegistersOff); - _dbg_assert_msg_(DYNA_REC, inst.OPCD == 19, "Invalid crXXX"); + DEBUG_ASSERT_MSG(DYNA_REC, inst.OPCD == 19, "Invalid crXXX"); // Special case: crclr if (inst.CRBA == inst.CRBB && inst.CRBA == inst.CRBD && inst.SUBOP10 == 193) diff --git a/Source/Core/Core/PowerPC/Jit64Common/ConstantPool.cpp b/Source/Core/Core/PowerPC/Jit64Common/ConstantPool.cpp index 72d11bcf85..bf5fc5311e 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/ConstantPool.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/ConstantPool.cpp @@ -46,7 +46,7 @@ const void* ConstantPool::GetConstant(const void* value, size_t element_size, si if (iter == m_const_info.end()) { void* ptr = std::align(ALIGNMENT, value_size, m_current_ptr, m_remaining_size); - _assert_msg_(DYNA_REC, ptr, "Constant pool has run out of space."); + ASSERT_MSG(DYNA_REC, ptr, "Constant pool has run out of space."); m_current_ptr = static_cast(m_current_ptr) + value_size; m_remaining_size -= value_size; @@ -56,8 +56,7 @@ const void* ConstantPool::GetConstant(const void* value, size_t element_size, si } const ConstantInfo& info = iter->second; - _assert_msg_(DYNA_REC, info.m_size == value_size, - "Constant has incorrect size in constant pool."); + ASSERT_MSG(DYNA_REC, info.m_size == value_size, "Constant has incorrect size in constant pool."); u8* location = static_cast(info.m_location); return location + element_size * index; } diff --git a/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp b/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp index 9c88cc8726..a7536a77a6 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp @@ -354,8 +354,8 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress, return; } - _assert_msg_(DYNA_REC, opAddress.IsSimpleReg(), - "Incorrect use of SafeLoadToReg (address isn't register or immediate)"); + ASSERT_MSG(DYNA_REC, opAddress.IsSimpleReg(), + "Incorrect use of SafeLoadToReg (address isn't register or immediate)"); X64Reg reg_addr = opAddress.GetSimpleReg(); if (offset) { diff --git a/Source/Core/Core/PowerPC/Jit64Common/Jit64Base.cpp b/Source/Core/Core/PowerPC/Jit64Common/Jit64Base.cpp index 118b6e553a..10813c4d81 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/Jit64Base.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/Jit64Base.cpp @@ -109,7 +109,7 @@ bool Jitx86Base::BackPatch(u32 emAddress, SContext* ctx) *ptr = Common::swap64(static_cast(*ptr)); break; default: - _dbg_assert_(DYNA_REC, 0); + DEBUG_ASSERT(DYNA_REC, 0); break; } } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp index da49e0a3bf..9ac99abf67 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Branch.cpp @@ -197,8 +197,8 @@ void JitArm64::bcctrx(UGeckoInstruction inst) FALLBACK_IF(!(inst.BO_2 & BO_DONT_CHECK_CONDITION)); // bcctrx doesn't decrement and/or test CTR - _assert_msg_(DYNA_REC, inst.BO_2 & BO_DONT_DECREMENT_FLAG, - "bcctrx with decrement and test CTR option is invalid!"); + ASSERT_MSG(DYNA_REC, inst.BO_2 & BO_DONT_DECREMENT_FLAG, + "bcctrx with decrement and test CTR option is invalid!"); // BO_2 == 1z1zz -> b always diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp index 2e55f7001b..82d495e265 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_FloatingPoint.cpp @@ -65,7 +65,7 @@ void JitArm64::fp_arith(UGeckoInstruction inst) m_float_emit.FMUL(size, VD, VA, VC); break; default: - _assert_msg_(DYNA_REC, 0, "fp_arith"); + ASSERT_MSG(DYNA_REC, 0, "fp_arith"); break; } } @@ -110,7 +110,7 @@ void JitArm64::fp_arith(UGeckoInstruction inst) m_float_emit.FNMADD(VD, VA, VC, VB); break; // fnmadd: "D = -(A*C + B)" vs "Vd = (-Va) + (-Vn)*Vm" default: - _assert_msg_(DYNA_REC, 0, "fp_arith"); + ASSERT_MSG(DYNA_REC, 0, "fp_arith"); break; } } @@ -161,7 +161,7 @@ void JitArm64::fp_logic(UGeckoInstruction inst) m_float_emit.FABS(size, VD, VB); break; default: - _assert_msg_(DYNA_REC, 0, "fp_logic"); + ASSERT_MSG(DYNA_REC, 0, "fp_logic"); break; } } @@ -189,7 +189,7 @@ void JitArm64::fp_logic(UGeckoInstruction inst) m_float_emit.FABS(reg_encoder(VD), reg_encoder(VB)); break; default: - _assert_msg_(DYNA_REC, 0, "fp_logic"); + ASSERT_MSG(DYNA_REC, 0, "fp_logic"); break; } } diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp index 3aba8aae4b..405227cfc1 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_Paired.cpp @@ -61,7 +61,7 @@ void JitArm64::ps_mergeXX(UGeckoInstruction inst) m_float_emit.TRN2(size, VD, VA, VB); break; default: - _assert_msg_(DYNA_REC, 0, "ps_merge - invalid op"); + ASSERT_MSG(DYNA_REC, 0, "ps_merge - invalid op"); break; } } @@ -144,7 +144,7 @@ void JitArm64::ps_maddXX(UGeckoInstruction inst) m_float_emit.FNEG(size, VD, VD); break; default: - _assert_msg_(DYNA_REC, 0, "ps_madd - invalid op"); + ASSERT_MSG(DYNA_REC, 0, "ps_madd - invalid op"); break; } fpr.FixSinglePrecision(d); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp index 051b416f30..ede381a32b 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp @@ -57,16 +57,16 @@ u32 Arm64RegCache::GetUnlockedRegisterCount() void Arm64RegCache::LockRegister(ARM64Reg host_reg) { auto reg = std::find(m_host_registers.begin(), m_host_registers.end(), host_reg); - _assert_msg_(DYNA_REC, reg != m_host_registers.end(), - "Don't try locking a register that isn't in the cache. Reg %d", host_reg); + ASSERT_MSG(DYNA_REC, reg != m_host_registers.end(), + "Don't try locking a register that isn't in the cache. Reg %d", host_reg); reg->Lock(); } void Arm64RegCache::UnlockRegister(ARM64Reg host_reg) { auto reg = std::find(m_host_registers.begin(), m_host_registers.end(), host_reg); - _assert_msg_(DYNA_REC, reg != m_host_registers.end(), - "Don't try unlocking a register that isn't in the cache. Reg %d", host_reg); + ASSERT_MSG(DYNA_REC, reg != m_host_registers.end(), + "Don't try unlocking a register that isn't in the cache. Reg %d", host_reg); reg->Unlock(); } @@ -116,19 +116,19 @@ bool Arm64GPRCache::IsCalleeSaved(ARM64Reg reg) const OpArg& Arm64GPRCache::GetGuestGPROpArg(size_t preg) const { - _assert_(preg < GUEST_GPR_COUNT); + ASSERT(preg < GUEST_GPR_COUNT); return m_guest_registers[preg]; } Arm64GPRCache::GuestRegInfo Arm64GPRCache::GetGuestGPR(size_t preg) { - _assert_(preg < GUEST_GPR_COUNT); + ASSERT(preg < GUEST_GPR_COUNT); return {32, PPCSTATE_OFF(gpr[preg]), m_guest_registers[GUEST_GPR_OFFSET + preg]}; } Arm64GPRCache::GuestRegInfo Arm64GPRCache::GetGuestCR(size_t preg) { - _assert_(preg < GUEST_CR_COUNT); + ASSERT(preg < GUEST_CR_COUNT); return {64, PPCSTATE_OFF(cr_val[preg]), m_guest_registers[GUEST_CR_OFFSET + preg]}; } @@ -138,7 +138,7 @@ Arm64GPRCache::GuestRegInfo Arm64GPRCache::GetGuestByIndex(size_t index) return GetGuestGPR(index - GUEST_GPR_OFFSET); if (index >= GUEST_CR_OFFSET && index < GUEST_CR_OFFSET + GUEST_CR_COUNT) return GetGuestCR(index - GUEST_CR_OFFSET); - _assert_msg_(DYNA_REC, false, "Invalid index for guest register"); + ASSERT_MSG(DYNA_REC, false, "Invalid index for guest register"); } void Arm64GPRCache::FlushRegister(size_t index, bool maintain_state) @@ -474,7 +474,7 @@ ARM64Reg Arm64FPRCache::R(size_t preg, RegType type) return host_reg; } default: - _dbg_assert_msg_(DYNA_REC, false, "Invalid OpArg Type!"); + DEBUG_ASSERT_MSG(DYNA_REC, false, "Invalid OpArg Type!"); break; } // We've got an issue if we end up here diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp index f5071e71bc..50d429febb 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_SystemRegisters.cpp @@ -29,7 +29,7 @@ FixupBranch JitArm64::JumpIfCRFieldBit(int field, int bit, bool jump_if_set) case CR_LT_BIT: // check bit 62 set return jump_if_set ? TBNZ(XA, 62) : TBZ(XA, 62); default: - _assert_msg_(DYNA_REC, false, "Invalid CR bit"); + ASSERT_MSG(DYNA_REC, false, "Invalid CR bit"); } } @@ -532,7 +532,7 @@ void JitArm64::crXXX(UGeckoInstruction inst) break; default: - _assert_msg_(DYNA_REC, false, "Invalid CR bit"); + ASSERT_MSG(DYNA_REC, false, "Invalid CR bit"); } } diff --git a/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp b/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp index 49f0ac2172..eb77a28ae8 100644 --- a/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/Jit_Util.cpp @@ -45,7 +45,7 @@ private: m_emit->STR(INDEX_UNSIGNED, reg, X0, 0); break; default: - _assert_msg_(DYNA_REC, false, "Unknown size %d passed to MMIOWriteCodeGenerator!", sbits); + ASSERT_MSG(DYNA_REC, false, "Unknown size %d passed to MMIOWriteCodeGenerator!", sbits); break; } } @@ -138,7 +138,7 @@ private: m_emit->LDR(INDEX_UNSIGNED, m_dst_reg, X0, 0); break; default: - _assert_msg_(DYNA_REC, false, "Unknown size %d passed to MMIOReadCodeGenerator!", sbits); + ASSERT_MSG(DYNA_REC, false, "Unknown size %d passed to MMIOReadCodeGenerator!", sbits); break; } } diff --git a/Source/Core/Core/PowerPC/MMU.cpp b/Source/Core/Core/PowerPC/MMU.cpp index ad218b4244..6498282025 100644 --- a/Source/Core/Core/PowerPC/MMU.cpp +++ b/Source/Core/Core/PowerPC/MMU.cpp @@ -741,7 +741,7 @@ void DMA_MemoryToLC(const u32 cacheAddr, const u32 memAddr, const u32 numBlocks) void ClearCacheLine(u32 address) { - _dbg_assert_(POWERPC, (address & 0x1F) == 0); + DEBUG_ASSERT(POWERPC, (address & 0x1F) == 0); if (UReg_MSR(MSR).DR) { auto translated_address = TranslateAddress(address); diff --git a/Source/Core/Core/PowerPC/PPCAnalyst.cpp b/Source/Core/Core/PowerPC/PPCAnalyst.cpp index 578f32d1eb..fd995c01bc 100644 --- a/Source/Core/Core/PowerPC/PPCAnalyst.cpp +++ b/Source/Core/Core/PowerPC/PPCAnalyst.cpp @@ -194,7 +194,7 @@ bool AnalyzeFunction(u32 startAddr, Symbol& func, int max_size) bool ReanalyzeFunction(u32 start_addr, Symbol& func, int max_size) { - _assert_msg_(OSHLE, func.analyzed, "The function wasn't previously analyzed!"); + ASSERT_MSG(OSHLE, func.analyzed, "The function wasn't previously analyzed!"); func.analyzed = false; return AnalyzeFunction(start_addr, func, max_size); diff --git a/Source/Core/Core/PowerPC/PPCTables.cpp b/Source/Core/Core/PowerPC/PPCTables.cpp index 0237d10786..66b3d675e1 100644 --- a/Source/Core/Core/PowerPC/PPCTables.cpp +++ b/Source/Core/Core/PowerPC/PPCTables.cpp @@ -56,7 +56,7 @@ GekkoOPInfo* GetOpInfo(UGeckoInstruction _inst) case 63: return m_infoTable63[_inst.SUBOP10]; default: - _assert_msg_(POWERPC, 0, "GetOpInfo - invalid subtable op %08x @ %08x", _inst.hex, PC); + ASSERT_MSG(POWERPC, 0, "GetOpInfo - invalid subtable op %08x @ %08x", _inst.hex, PC); return nullptr; } } @@ -64,7 +64,7 @@ GekkoOPInfo* GetOpInfo(UGeckoInstruction _inst) { if (info->type == OPTYPE_INVALID) { - _assert_msg_(POWERPC, 0, "GetOpInfo - invalid op %08x @ %08x", _inst.hex, PC); + ASSERT_MSG(POWERPC, 0, "GetOpInfo - invalid op %08x @ %08x", _inst.hex, PC); return nullptr; } return m_infoTable[_inst.OPCD]; @@ -89,7 +89,7 @@ Interpreter::Instruction GetInterpreterOp(UGeckoInstruction _inst) case 63: return Interpreter::m_op_table63[_inst.SUBOP10]; default: - _assert_msg_(POWERPC, 0, "GetInterpreterOp - invalid subtable op %08x @ %08x", _inst.hex, PC); + ASSERT_MSG(POWERPC, 0, "GetInterpreterOp - invalid subtable op %08x @ %08x", _inst.hex, PC); return nullptr; } } @@ -97,7 +97,7 @@ Interpreter::Instruction GetInterpreterOp(UGeckoInstruction _inst) { if (info->type == OPTYPE_INVALID) { - _assert_msg_(POWERPC, 0, "GetInterpreterOp - invalid op %08x @ %08x", _inst.hex, PC); + ASSERT_MSG(POWERPC, 0, "GetInterpreterOp - invalid op %08x @ %08x", _inst.hex, PC); return nullptr; } return Interpreter::m_op_table[_inst.OPCD]; diff --git a/Source/Core/Core/PowerPC/PowerPC.cpp b/Source/Core/Core/PowerPC/PowerPC.cpp index fec282e2d6..bb62c11b31 100644 --- a/Source/Core/Core/PowerPC/PowerPC.cpp +++ b/Source/Core/Core/PowerPC/PowerPC.cpp @@ -514,7 +514,7 @@ void CheckExternalExceptions() DEBUG_LOG(POWERPC, "EXCEPTION_EXTERNAL_INT"); ppcState.Exceptions &= ~EXCEPTION_EXTERNAL_INT; - _dbg_assert_msg_(POWERPC, (SRR1 & 0x02) != 0, "EXTERNAL_INT unrecoverable???"); + DEBUG_ASSERT_MSG(POWERPC, (SRR1 & 0x02) != 0, "EXTERNAL_INT unrecoverable???"); } else if (exceptions & EXCEPTION_PERFORMANCE_MONITOR) { @@ -540,7 +540,7 @@ void CheckExternalExceptions() } else { - _dbg_assert_msg_(POWERPC, 0, "Unknown EXT interrupt: Exceptions == %08x", exceptions); + DEBUG_ASSERT_MSG(POWERPC, 0, "Unknown EXT interrupt: Exceptions == %08x", exceptions); ERROR_LOG(POWERPC, "Unknown EXTERNAL INTERRUPT exception: Exceptions == %08x", exceptions); } } diff --git a/Source/Core/Core/WiiUtils.cpp b/Source/Core/Core/WiiUtils.cpp index 84ae9a3d20..5a141b2467 100644 --- a/Source/Core/Core/WiiUtils.cpp +++ b/Source/Core/Core/WiiUtils.cpp @@ -340,16 +340,16 @@ OnlineSystemUpdater::Response OnlineSystemUpdater::GetSystemTitles() // Construct the request by loading the template first, then updating some fields. pugi::xml_document doc; pugi::xml_parse_result result = doc.load_string(GET_SYSTEM_TITLES_REQUEST_PAYLOAD); - _assert_(result); + ASSERT(result); // Nintendo does not really care about the device ID or verify that we *are* that device, // as long as it is a valid Wii device ID. const std::string device_id = GetDeviceId(); - _assert_(doc.select_node("//DeviceId").node().text().set(device_id.c_str())); + ASSERT(doc.select_node("//DeviceId").node().text().set(device_id.c_str())); // Write the correct device region. const std::string region = m_requested_region.empty() ? GetDeviceRegion() : m_requested_region; - _assert_(doc.select_node("//RegionId").node().text().set(region.c_str())); + ASSERT(doc.select_node("//RegionId").node().text().set(region.c_str())); std::ostringstream stream; doc.save(stream); diff --git a/Source/Core/DiscIO/DirectoryBlob.cpp b/Source/Core/DiscIO/DirectoryBlob.cpp index 696877b528..5cbdc08210 100644 --- a/Source/Core/DiscIO/DirectoryBlob.cpp +++ b/Source/Core/DiscIO/DirectoryBlob.cpp @@ -92,7 +92,7 @@ bool DiscContent::Read(u64* offset, u64* length, u8** buffer) const if (m_size == 0) return true; - _dbg_assert_(DISCIO, *offset >= m_offset); + DEBUG_ASSERT(DISCIO, *offset >= m_offset); const u64 offset_in_content = *offset - m_offset; if (offset_in_content < m_size) @@ -160,7 +160,7 @@ bool DiscContentContainer::Read(u64 offset, u64 length, u8* buffer) const return false; ++it; - _dbg_assert_(DISCIO, it == m_contents.end() || it->GetOffset() >= offset); + DEBUG_ASSERT(DISCIO, it == m_contents.end() || it->GetOffset() >= offset); } // Zero fill if we went beyond the last DiscContent @@ -688,7 +688,7 @@ void DirectoryBlobPartition::BuildFST(u64 fst_address) name_table_offset); // overflow check, compare the aligned name offset with the aligned name table size - _assert_(Common::AlignUp(name_offset, 1ull << m_address_shift) == name_table_size); + ASSERT(Common::AlignUp(name_offset, 1ull << m_address_shift) == name_table_size); // write FST size and location Write32((u32)(fst_address >> m_address_shift), 0x0424, &m_disc_header); diff --git a/Source/Core/DiscIO/VolumeGC.cpp b/Source/Core/DiscIO/VolumeGC.cpp index 64c0daefca..1de11d3545 100644 --- a/Source/Core/DiscIO/VolumeGC.cpp +++ b/Source/Core/DiscIO/VolumeGC.cpp @@ -30,7 +30,7 @@ namespace DiscIO { VolumeGC::VolumeGC(std::unique_ptr reader) : m_pReader(std::move(reader)) { - _assert_(m_pReader); + ASSERT(m_pReader); m_file_system = [this]() -> std::unique_ptr { auto file_system = std::make_unique(this, PARTITION_NONE); diff --git a/Source/Core/DiscIO/VolumeWad.cpp b/Source/Core/DiscIO/VolumeWad.cpp index 2d98986f81..ce58f400bb 100644 --- a/Source/Core/DiscIO/VolumeWad.cpp +++ b/Source/Core/DiscIO/VolumeWad.cpp @@ -28,7 +28,7 @@ namespace DiscIO { VolumeWAD::VolumeWAD(std::unique_ptr reader) : m_reader(std::move(reader)) { - _assert_(m_reader); + ASSERT(m_reader); // Source: http://wiibrew.org/wiki/WAD_files m_hdr_size = m_reader->ReadSwapped(0x00).value_or(0); diff --git a/Source/Core/DiscIO/VolumeWii.cpp b/Source/Core/DiscIO/VolumeWii.cpp index bc0c410c13..135afcb603 100644 --- a/Source/Core/DiscIO/VolumeWii.cpp +++ b/Source/Core/DiscIO/VolumeWii.cpp @@ -39,7 +39,7 @@ VolumeWii::VolumeWii(std::unique_ptr reader) : m_pReader(std::move(reader)), m_game_partition(PARTITION_NONE), m_last_decrypted_block(UINT64_MAX) { - _assert_(m_pReader); + ASSERT(m_pReader); if (m_pReader->ReadSwapped(0x60) != u32(0)) { diff --git a/Source/Core/DiscIO/WbfsBlob.cpp b/Source/Core/DiscIO/WbfsBlob.cpp index ae379e6e51..0227a692c2 100644 --- a/Source/Core/DiscIO/WbfsBlob.cpp +++ b/Source/Core/DiscIO/WbfsBlob.cpp @@ -58,7 +58,7 @@ void WbfsFileReader::OpenAdditionalFiles(const std::string& path) if (path.length() < 4) return; - _assert_(m_files.size() > 0); // The code below gives .wbf0 for index 0, but it should be .wbfs + ASSERT(m_files.size() > 0); // The code below gives .wbf0 for index 0, but it should be .wbfs while (true) { diff --git a/Source/Core/DiscIO/WiiWad.cpp b/Source/Core/DiscIO/WiiWad.cpp index 8d6272d3a8..e980455779 100644 --- a/Source/Core/DiscIO/WiiWad.cpp +++ b/Source/Core/DiscIO/WiiWad.cpp @@ -74,7 +74,7 @@ bool WiiWAD::ParseWAD() } if (MAX_LOGLEVEL >= LogTypes::LOG_LEVELS::LDEBUG) - _dbg_assert_msg_(BOOT, *reserved == 0x00, "WiiWAD: Reserved must be 0x00"); + DEBUG_ASSERT_MSG(BOOT, *reserved == 0x00, "WiiWAD: Reserved must be 0x00"); u32 offset = 0x40; m_certificate_chain = CreateWADEntry(*m_reader, *certificate_chain_size, offset); diff --git a/Source/Core/DolphinWX/FifoPlayerDlg.cpp b/Source/Core/DolphinWX/FifoPlayerDlg.cpp index 18ced62d58..bf1a90467b 100644 --- a/Source/Core/DolphinWX/FifoPlayerDlg.cpp +++ b/Source/Core/DolphinWX/FifoPlayerDlg.cpp @@ -714,7 +714,7 @@ void FifoPlayerDlg::OnObjectListSelectionChanged(wxCommandEvent& event) // The recorder should have expanded display lists into the fifo stream and skipped the // call to start them // That is done to make it easier to track where memory is updated - _assert_(false); + ASSERT(false); objectdata += 8; newLabel = wxString::Format("CALL DL"); break; diff --git a/Source/Core/DolphinWX/TASInputDlg.cpp b/Source/Core/DolphinWX/TASInputDlg.cpp index 64744f91f3..f778236a81 100644 --- a/Source/Core/DolphinWX/TASInputDlg.cpp +++ b/Source/Core/DolphinWX/TASInputDlg.cpp @@ -1200,8 +1200,8 @@ void TASInputDlg::InvalidateExtension() void TASInputDlg::UpdateFromInvalidatedButton(wxCommandEvent& event) { auto* button = static_cast(event.GetClientData()); - _assert_msg_(PAD, button->id == button->checkbox->GetId(), "Button ids do not match: %i != %i", - button->id, button->checkbox->GetId()); + ASSERT_MSG(PAD, button->id == button->checkbox->GetId(), "Button ids do not match: %i != %i", + button->id, button->checkbox->GetId()); button->checkbox->SetValue(button->value); button->is_checked = button->value; } @@ -1209,8 +1209,8 @@ void TASInputDlg::UpdateFromInvalidatedButton(wxCommandEvent& event) void TASInputDlg::UpdateFromInvalidatedControl(wxCommandEvent& event) { auto* control = static_cast(event.GetClientData()); - _assert_msg_(PAD, control->text_id == control->text->GetId(), - "Control ids do not match: %i != %i", control->text_id, control->text->GetId()); + ASSERT_MSG(PAD, control->text_id == control->text->GetId(), "Control ids do not match: %i != %i", + control->text_id, control->text->GetId()); control->text->SetValue(std::to_string(control->value)); } diff --git a/Source/Core/DolphinWX/VideoConfigDiag.cpp b/Source/Core/DolphinWX/VideoConfigDiag.cpp index f1a57f8b8c..cdebfd27e9 100644 --- a/Source/Core/DolphinWX/VideoConfigDiag.cpp +++ b/Source/Core/DolphinWX/VideoConfigDiag.cpp @@ -1245,7 +1245,7 @@ void VideoConfigDiag::PopulateAAList() if (mode == 1) { choice_aamode->AppendString(_("None")); - _assert_msg_(VIDEO, !supports_ssaa || m_msaa_modes == 0, "SSAA setting won't work correctly"); + ASSERT_MSG(VIDEO, !supports_ssaa || m_msaa_modes == 0, "SSAA setting won't work correctly"); } else { diff --git a/Source/Core/InputCommon/ControllerInterface/evdev/evdev.cpp b/Source/Core/InputCommon/ControllerInterface/evdev/evdev.cpp index 2049c85237..b6ed84f772 100644 --- a/Source/Core/InputCommon/ControllerInterface/evdev/evdev.cpp +++ b/Source/Core/InputCommon/ControllerInterface/evdev/evdev.cpp @@ -56,7 +56,7 @@ static void HotplugThreadFunc() NOTICE_LOG(SERIALINTERFACE, "evdev hotplug thread started"); udev* udev = udev_new(); - _assert_msg_(PAD, udev != nullptr, "Couldn't initialize libudev."); + ASSERT_MSG(PAD, udev != nullptr, "Couldn't initialize libudev."); // Set up monitoring udev_monitor* monitor = udev_monitor_new_from_netlink(udev, "udev"); @@ -120,7 +120,7 @@ static void StartHotplugThread() return; s_wakeup_eventfd = eventfd(0, 0); - _assert_msg_(PAD, s_wakeup_eventfd != -1, "Couldn't create eventfd."); + ASSERT_MSG(PAD, s_wakeup_eventfd != -1, "Couldn't create eventfd."); s_hotplug_thread = std::thread(HotplugThreadFunc); } @@ -152,7 +152,7 @@ void PopulateDevices() // this ever changes, hopefully udev will take care of this. udev* udev = udev_new(); - _assert_msg_(PAD, udev != nullptr, "Couldn't initialize libudev."); + ASSERT_MSG(PAD, udev != nullptr, "Couldn't initialize libudev."); // List all input devices udev_enumerate* enumerate = udev_enumerate_new(udev); diff --git a/Source/Core/UICommon/GameFile.cpp b/Source/Core/UICommon/GameFile.cpp index 1a6b0717c2..e28ef3b274 100644 --- a/Source/Core/UICommon/GameFile.cpp +++ b/Source/Core/UICommon/GameFile.cpp @@ -305,7 +305,7 @@ std::string GameFile::GetUniqueIdentifier() const std::string GameFile::GetWiiFSPath() const { - _assert_(DiscIO::IsWii(m_platform)); + ASSERT(DiscIO::IsWii(m_platform)); return Common::GetTitleDataPath(m_title_id, Common::FROM_CONFIGURED_ROOT); } diff --git a/Source/Core/UICommon/VideoUtils.cpp b/Source/Core/UICommon/VideoUtils.cpp index c21cb93b8f..3656f53209 100644 --- a/Source/Core/UICommon/VideoUtils.cpp +++ b/Source/Core/UICommon/VideoUtils.cpp @@ -54,7 +54,7 @@ std::vector GetAvailableAntialiasingModes(int& msaa_modes) if (mode == 1) { modes.push_back("None"); - _assert_msg_(VIDEO, !supports_ssaa || msaa_modes == 0, "SSAA setting won't work correctly"); + ASSERT_MSG(VIDEO, !supports_ssaa || msaa_modes == 0, "SSAA setting won't work correctly"); } else { diff --git a/Source/Core/VideoBackends/D3D/D3DUtil.cpp b/Source/Core/VideoBackends/D3D/D3DUtil.cpp index 138db0554b..e0a473be83 100644 --- a/Source/Core/VideoBackends/D3D/D3DUtil.cpp +++ b/Source/Core/VideoBackends/D3D/D3DUtil.cpp @@ -62,7 +62,7 @@ public: int BeginAppendData(void** write_ptr, unsigned int size, unsigned int vertex_size) { - _dbg_assert_(VIDEO, size < max_size); + DEBUG_ASSERT(VIDEO, size < max_size); D3D11_MAPPED_SUBRESOURCE map; unsigned int aligned_offset = Common::AlignUp(offset, vertex_size); diff --git a/Source/Core/VideoBackends/D3D/DXPipeline.cpp b/Source/Core/VideoBackends/D3D/DXPipeline.cpp index 278030a66b..df827e1fba 100644 --- a/Source/Core/VideoBackends/D3D/DXPipeline.cpp +++ b/Source/Core/VideoBackends/D3D/DXPipeline.cpp @@ -81,7 +81,7 @@ std::unique_ptr DXPipeline::Create(const AbstractPipelineConfig& con const DXShader* vertex_shader = static_cast(config.vertex_shader); const DXShader* geometry_shader = static_cast(config.geometry_shader); const DXShader* pixel_shader = static_cast(config.pixel_shader); - _assert_(vertex_shader != nullptr && pixel_shader != nullptr); + ASSERT(vertex_shader != nullptr && pixel_shader != nullptr); ID3D11InputLayout* input_layout = const_cast(static_cast(config.vertex_format)) diff --git a/Source/Core/VideoBackends/D3D/DXShader.cpp b/Source/Core/VideoBackends/D3D/DXShader.cpp index 8ceda15e2b..bba5188575 100644 --- a/Source/Core/VideoBackends/D3D/DXShader.cpp +++ b/Source/Core/VideoBackends/D3D/DXShader.cpp @@ -43,31 +43,31 @@ D3DBlob* DXShader::GetByteCode() const ID3D11VertexShader* DXShader::GetD3DVertexShader() const { - _dbg_assert_(VIDEO, m_stage == ShaderStage::Vertex); + DEBUG_ASSERT(VIDEO, m_stage == ShaderStage::Vertex); return static_cast(m_shader); } ID3D11GeometryShader* DXShader::GetD3DGeometryShader() const { - _dbg_assert_(VIDEO, m_stage == ShaderStage::Geometry); + DEBUG_ASSERT(VIDEO, m_stage == ShaderStage::Geometry); return static_cast(m_shader); } ID3D11PixelShader* DXShader::GetD3DPixelShader() const { - _dbg_assert_(VIDEO, m_stage == ShaderStage::Pixel); + DEBUG_ASSERT(VIDEO, m_stage == ShaderStage::Pixel); return static_cast(m_shader); } ID3D11ComputeShader* DXShader::GetD3DComputeShader() const { - _dbg_assert_(VIDEO, m_stage == ShaderStage::Compute); + DEBUG_ASSERT(VIDEO, m_stage == ShaderStage::Compute); return static_cast(m_shader); } bool DXShader::HasBinary() const { - _assert_(m_bytecode); + ASSERT(m_bytecode); return true; } diff --git a/Source/Core/VideoBackends/D3D/DXTexture.cpp b/Source/Core/VideoBackends/D3D/DXTexture.cpp index 51a017b741..4953c0c3a7 100644 --- a/Source/Core/VideoBackends/D3D/DXTexture.cpp +++ b/Source/Core/VideoBackends/D3D/DXTexture.cpp @@ -140,8 +140,8 @@ void DXTexture::CopyRectangleFromTexture(const AbstractTexture* src, u32 dst_layer, u32 dst_level) { const DXTexture* srcentry = static_cast(src); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); D3D11_BOX src_box; src_box.left = src_rect.left; @@ -162,7 +162,7 @@ void DXTexture::ScaleRectangleFromTexture(const AbstractTexture* source, const MathUtil::Rectangle& dstrect) { const DXTexture* srcentry = static_cast(source); - _assert_(m_config.rendertarget); + ASSERT(m_config.rendertarget); g_renderer->ResetAPIState(); // reset any game specific settings @@ -193,9 +193,9 @@ void DXTexture::ResolveFromTexture(const AbstractTexture* src, const MathUtil::R u32 layer, u32 level) { const DXTexture* srcentry = static_cast(src); - _dbg_assert_(VIDEO, m_config.samples > 1 && m_config.width == srcentry->m_config.width && + DEBUG_ASSERT(VIDEO, m_config.samples > 1 && m_config.width == srcentry->m_config.width && m_config.height == srcentry->m_config.height && m_config.samples == 1); - _dbg_assert_(VIDEO, + DEBUG_ASSERT(VIDEO, rect.left + rect.GetWidth() <= static_cast(srcentry->m_config.width) && rect.top + rect.GetHeight() <= static_cast(srcentry->m_config.height)); @@ -263,13 +263,13 @@ void DXStagingTexture::CopyFromTexture(const AbstractTexture* src, const MathUtil::Rectangle& src_rect, u32 src_layer, u32 src_level, const MathUtil::Rectangle& dst_rect) { - _assert_(m_type == StagingTextureType::Readback); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); - _assert_(src_rect.left >= 0 && static_cast(src_rect.right) <= src->GetConfig().width && - src_rect.top >= 0 && static_cast(src_rect.bottom) <= src->GetConfig().height); - _assert_(dst_rect.left >= 0 && static_cast(dst_rect.right) <= m_config.width && - dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= m_config.height); + ASSERT(m_type == StagingTextureType::Readback); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.left >= 0 && static_cast(src_rect.right) <= src->GetConfig().width && + src_rect.top >= 0 && static_cast(src_rect.bottom) <= src->GetConfig().height); + ASSERT(dst_rect.left >= 0 && static_cast(dst_rect.right) <= m_config.width && + dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= m_config.height); if (IsMapped()) DXStagingTexture::Unmap(); @@ -287,13 +287,13 @@ void DXStagingTexture::CopyToTexture(const MathUtil::Rectangle& src_rect, A const MathUtil::Rectangle& dst_rect, u32 dst_layer, u32 dst_level) { - _assert_(m_type == StagingTextureType::Upload); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); - _assert_(src_rect.left >= 0 && static_cast(src_rect.right) <= m_config.width && - src_rect.top >= 0 && static_cast(src_rect.bottom) <= m_config.height); - _assert_(dst_rect.left >= 0 && static_cast(dst_rect.right) <= dst->GetConfig().width && - dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= dst->GetConfig().height); + ASSERT(m_type == StagingTextureType::Upload); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.left >= 0 && static_cast(src_rect.right) <= m_config.width && + src_rect.top >= 0 && static_cast(src_rect.bottom) <= m_config.height); + ASSERT(dst_rect.left >= 0 && static_cast(dst_rect.right) <= dst->GetConfig().width && + dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= dst->GetConfig().height); if (IsMapped()) DXStagingTexture::Unmap(); diff --git a/Source/Core/VideoBackends/D3D/PSTextureEncoder.cpp b/Source/Core/VideoBackends/D3D/PSTextureEncoder.cpp index ad13894331..8d48332af7 100644 --- a/Source/Core/VideoBackends/D3D/PSTextureEncoder.cpp +++ b/Source/Core/VideoBackends/D3D/PSTextureEncoder.cpp @@ -48,7 +48,7 @@ void PSTextureEncoder::Init() m_encoding_render_texture = g_renderer->CreateTexture(encoding_texture_config); m_encoding_readback_texture = g_renderer->CreateStagingTexture(StagingTextureType::Readback, encoding_texture_config); - _assert_(m_encoding_render_texture && m_encoding_readback_texture); + ASSERT(m_encoding_render_texture && m_encoding_readback_texture); // Create constant buffer for uploading data to shaders D3D11_BUFFER_DESC bd = CD3D11_BUFFER_DESC(sizeof(EFBEncodeParams), D3D11_BIND_CONSTANT_BUFFER); diff --git a/Source/Core/VideoBackends/D3D/Render.cpp b/Source/Core/VideoBackends/D3D/Render.cpp index 91475b89d9..d8eac0ff18 100644 --- a/Source/Core/VideoBackends/D3D/Render.cpp +++ b/Source/Core/VideoBackends/D3D/Render.cpp @@ -270,7 +270,7 @@ std::unique_ptr Renderer::CreatePipeline(const AbstractPipelin void Renderer::UpdateUtilityUniformBuffer(const void* uniforms, u32 uniforms_size) { - _dbg_assert_(VIDEO, uniforms_size > 0 && uniforms_size < UTILITY_UBO_SIZE); + DEBUG_ASSERT(VIDEO, uniforms_size > 0 && uniforms_size < UTILITY_UBO_SIZE); D3D11_MAPPED_SUBRESOURCE mapped; HRESULT hr = D3D::context->Map(m_utility_uniform_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped); CHECK(SUCCEEDED(hr), "Map utility UBO"); @@ -323,7 +323,7 @@ void Renderer::DrawUtilityPipeline(const void* uniforms, u32 uniforms_size, cons if (vertices_ptr) { vertices_this_draw = std::min(vertices_this_draw, UTILITY_VBO_SIZE / vertex_stride); - _dbg_assert_(VIDEO, vertices_this_draw > 0); + DEBUG_ASSERT(VIDEO, vertices_this_draw > 0); UpdateUtilityVertexBuffer(vertices_ptr, vertex_stride, vertices_this_draw); D3D::stateman->SetVertexBuffer(m_utility_vertex_buffer, vertex_stride, 0); } diff --git a/Source/Core/VideoBackends/OGL/OGLTexture.cpp b/Source/Core/VideoBackends/OGL/OGLTexture.cpp index c57506d233..bf77db0f6d 100644 --- a/Source/Core/VideoBackends/OGL/OGLTexture.cpp +++ b/Source/Core/VideoBackends/OGL/OGLTexture.cpp @@ -108,7 +108,7 @@ bool UsePersistentStagingBuffers() OGLTexture::OGLTexture(const TextureConfig& tex_config) : AbstractTexture(tex_config) { - _dbg_assert_msg_(VIDEO, !tex_config.IsMultisampled() || tex_config.levels == 1, + DEBUG_ASSERT_MSG(VIDEO, !tex_config.IsMultisampled() || tex_config.levels == 1, "OpenGL does not support multisampled textures with mip levels"); GLenum target = @@ -138,7 +138,7 @@ OGLTexture::OGLTexture(const TextureConfig& tex_config) : AbstractTexture(tex_co if (m_config.rendertarget) { // We can't render to compressed formats. - _assert_(!IsCompressedFormat(m_config.format)); + ASSERT(!IsCompressedFormat(m_config.format)); if (!g_ogl_config.bSupportsTextureStorage && !tex_config.IsMultisampled()) { for (u32 level = 0; level < m_config.levels; level++) @@ -185,8 +185,8 @@ void OGLTexture::CopyRectangleFromTexture(const AbstractTexture* src, u32 dst_layer, u32 dst_level) { const OGLTexture* srcentry = static_cast(src); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); if (g_ogl_config.bSupportsCopySubImage) { glCopyImageSubData(srcentry->m_texId, GL_TEXTURE_2D_ARRAY, src_level, src_rect.left, @@ -279,9 +279,9 @@ void OGLTexture::ResolveFromTexture(const AbstractTexture* src, const MathUtil::Rectangle& rect, u32 layer, u32 level) { const OGLTexture* srcentry = static_cast(src); - _dbg_assert_(VIDEO, m_config.samples > 1 && m_config.width == srcentry->m_config.width && + DEBUG_ASSERT(VIDEO, m_config.samples > 1 && m_config.width == srcentry->m_config.width && m_config.height == srcentry->m_config.height && m_config.samples == 1); - _dbg_assert_(VIDEO, + DEBUG_ASSERT(VIDEO, rect.left + rect.GetWidth() <= static_cast(srcentry->m_config.width) && rect.top + rect.GetHeight() <= static_cast(srcentry->m_config.height)); BlitFramebuffer(const_cast(srcentry), rect, layer, level, rect, layer, level); @@ -398,7 +398,7 @@ std::unique_ptr OGLStagingTexture::Create(StagingTextureType glBufferStorage(target, buffer_size, nullptr, buffer_flags); buffer_ptr = reinterpret_cast(glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, buffer_size, map_flags)); - _assert_(buffer_ptr != nullptr); + ASSERT(buffer_ptr != nullptr); } else { @@ -417,13 +417,13 @@ void OGLStagingTexture::CopyFromTexture(const AbstractTexture* src, const MathUtil::Rectangle& src_rect, u32 src_layer, u32 src_level, const MathUtil::Rectangle& dst_rect) { - _assert_(m_type == StagingTextureType::Readback); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); - _assert_(src_rect.left >= 0 && static_cast(src_rect.right) <= src->GetConfig().width && - src_rect.top >= 0 && static_cast(src_rect.bottom) <= src->GetConfig().height); - _assert_(dst_rect.left >= 0 && static_cast(dst_rect.right) <= m_config.width && - dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= m_config.height); + ASSERT(m_type == StagingTextureType::Readback); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.left >= 0 && static_cast(src_rect.right) <= src->GetConfig().width && + src_rect.top >= 0 && static_cast(src_rect.bottom) <= src->GetConfig().height); + ASSERT(dst_rect.left >= 0 && static_cast(dst_rect.right) <= m_config.width && + dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= m_config.height); // Unmap the buffer before writing when not using persistent mappings. if (!UsePersistentStagingBuffers()) @@ -492,13 +492,13 @@ void OGLStagingTexture::CopyToTexture(const MathUtil::Rectangle& src_rect, const MathUtil::Rectangle& dst_rect, u32 dst_layer, u32 dst_level) { - _assert_(m_type == StagingTextureType::Upload); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); - _assert_(src_rect.left >= 0 && static_cast(src_rect.right) <= m_config.width && - src_rect.top >= 0 && static_cast(src_rect.bottom) <= m_config.height); - _assert_(dst_rect.left >= 0 && static_cast(dst_rect.right) <= dst->GetConfig().width && - dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= dst->GetConfig().height); + ASSERT(m_type == StagingTextureType::Upload); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.left >= 0 && static_cast(src_rect.right) <= m_config.width && + src_rect.top >= 0 && static_cast(src_rect.bottom) <= m_config.height); + ASSERT(dst_rect.left >= 0 && static_cast(dst_rect.right) <= dst->GetConfig().width && + dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= dst->GetConfig().height); size_t src_offset = src_rect.top * m_config.GetStride() + src_rect.left * m_texel_size; size_t copy_size = src_rect.GetHeight() * m_config.GetStride(); @@ -655,7 +655,7 @@ std::unique_ptr OGLFramebuffer::Create(const OGLTexture* color_a } } - _dbg_assert_(VIDEO, glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE); + DEBUG_ASSERT(VIDEO, glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE); FramebufferManager::SetFramebuffer(0); return std::make_unique(color_format, depth_format, width, height, layers, samples, fbo); diff --git a/Source/Core/VideoBackends/OGL/ProgramShaderCache.cpp b/Source/Core/VideoBackends/OGL/ProgramShaderCache.cpp index 95d5b0a9cd..0990f01fb6 100644 --- a/Source/Core/VideoBackends/OGL/ProgramShaderCache.cpp +++ b/Source/Core/VideoBackends/OGL/ProgramShaderCache.cpp @@ -507,7 +507,7 @@ void ProgramShaderCache::Shutdown() s_last_VAO = 0; // All pipeline programs should have been released. - _dbg_assert_(VIDEO, s_pipeline_programs.empty()); + DEBUG_ASSERT(VIDEO, s_pipeline_programs.empty()); s_pipeline_programs.clear(); } @@ -575,14 +575,14 @@ const PipelineProgram* ProgramShaderCache::GetPipelineProgram(const GLVertexForm prog->key = key; // Attach shaders. - _assert_(vertex_shader && vertex_shader->GetStage() == ShaderStage::Vertex); - _assert_(pixel_shader && pixel_shader->GetStage() == ShaderStage::Pixel); + ASSERT(vertex_shader && vertex_shader->GetStage() == ShaderStage::Vertex); + ASSERT(pixel_shader && pixel_shader->GetStage() == ShaderStage::Pixel); prog->shader.glprogid = glCreateProgram(); glAttachShader(prog->shader.glprogid, vertex_shader->GetGLShaderID()); glAttachShader(prog->shader.glprogid, pixel_shader->GetGLShaderID()); if (geometry_shader) { - _assert_(geometry_shader->GetStage() == ShaderStage::Geometry); + ASSERT(geometry_shader->GetStage() == ShaderStage::Geometry); glAttachShader(prog->shader.glprogid, geometry_shader->GetGLShaderID()); } @@ -627,7 +627,7 @@ const PipelineProgram* ProgramShaderCache::GetPipelineProgram(const GLVertexForm void ProgramShaderCache::ReleasePipelineProgram(const PipelineProgram* prog) { auto iter = s_pipeline_programs.find(prog->key); - _assert_(iter != s_pipeline_programs.end() && prog == iter->second.get()); + ASSERT(iter != s_pipeline_programs.end() && prog == iter->second.get()); if (--iter->second->reference_count == 0) { diff --git a/Source/Core/VideoBackends/OGL/Render.cpp b/Source/Core/VideoBackends/OGL/Render.cpp index a0c3bb6c8d..9d640661a0 100644 --- a/Source/Core/VideoBackends/OGL/Render.cpp +++ b/Source/Core/VideoBackends/OGL/Render.cpp @@ -1690,7 +1690,7 @@ void Renderer::DrawUtilityPipeline(const void* uniforms, u32 uniforms_size, cons void Renderer::UploadUtilityUniforms(const void* uniforms, u32 uniforms_size) { - _dbg_assert_(VIDEO, uniforms_size > 0); + DEBUG_ASSERT(VIDEO, uniforms_size > 0); auto buf = ProgramShaderCache::GetUniformBuffer()->Map( uniforms_size, ProgramShaderCache::GetUniformBufferAlignment()); diff --git a/Source/Core/VideoBackends/OGL/TextureCache.cpp b/Source/Core/VideoBackends/OGL/TextureCache.cpp index bd2e1cfcd1..24f94e39db 100644 --- a/Source/Core/VideoBackends/OGL/TextureCache.cpp +++ b/Source/Core/VideoBackends/OGL/TextureCache.cpp @@ -132,7 +132,7 @@ GLuint TextureCache::GetColorCopyPositionUniform() const bool TextureCache::CompilePaletteShader(TLUTFormat tlutfmt, const std::string& vcode, const std::string& pcode, const std::string& gcode) { - _assert_(IsValidTLUTFormat(tlutfmt)); + ASSERT(IsValidTLUTFormat(tlutfmt)); PaletteShader& shader = m_palette_shaders[static_cast(tlutfmt)]; if (!ProgramShaderCache::CompileShader(shader.shader, vcode, pcode, gcode)) @@ -289,7 +289,7 @@ void TextureCache::ConvertTexture(TCacheEntry* destination, TCacheEntry* source, if (!g_ActiveConfig.backend_info.bSupportsPaletteConversion) return; - _assert_(IsValidTLUTFormat(tlutfmt)); + ASSERT(IsValidTLUTFormat(tlutfmt)); const PaletteShader& palette_shader = m_palette_shaders[static_cast(tlutfmt)]; g_renderer->ResetAPIState(); diff --git a/Source/Core/VideoBackends/Software/Clipper.cpp b/Source/Core/VideoBackends/Software/Clipper.cpp index f527156a27..8fb66ab070 100644 --- a/Source/Core/VideoBackends/Software/Clipper.cpp +++ b/Source/Core/VideoBackends/Software/Clipper.cpp @@ -318,7 +318,7 @@ void ProcessTriangle(OutputVertexData* v0, OutputVertexData* v1, OutputVertexDat for (int i = 0; i + 3 <= numIndices; i += 3) { - _assert_(i < NUM_INDICES); + ASSERT(i < NUM_INDICES); if (indices[i] != SKIP_FLAG) { PerspectiveDivide(Vertices[indices[i]]); diff --git a/Source/Core/VideoBackends/Software/SWTexture.cpp b/Source/Core/VideoBackends/Software/SWTexture.cpp index 78f083f768..eb450d642f 100644 --- a/Source/Core/VideoBackends/Software/SWTexture.cpp +++ b/Source/Core/VideoBackends/Software/SWTexture.cpp @@ -57,7 +57,7 @@ void SWTexture::CopyRectangleFromTexture(const AbstractTexture* src, u32 src_level, const MathUtil::Rectangle& dst_rect, u32 dst_layer, u32 dst_level) { - _assert_(src_level == 0 && src_layer == 0 && dst_layer == 0 && dst_level == 0); + ASSERT(src_level == 0 && src_layer == 0 && dst_layer == 0 && dst_level == 0); CopyTextureData(src->GetConfig(), static_cast(src)->m_data.data(), src_rect.left, src_rect.top, src_rect.GetWidth(), src_rect.GetHeight(), m_config, m_data.data(), dst_rect.left, dst_rect.top); @@ -121,7 +121,7 @@ void SWStagingTexture::CopyFromTexture(const AbstractTexture* src, const MathUtil::Rectangle& src_rect, u32 src_layer, u32 src_level, const MathUtil::Rectangle& dst_rect) { - _assert_(src_level == 0 && src_layer == 0); + ASSERT(src_level == 0 && src_layer == 0); CopyTextureData(src->GetConfig(), static_cast(src)->GetData(), src_rect.left, src_rect.top, src_rect.GetWidth(), src_rect.GetHeight(), m_config, m_data.data(), dst_rect.left, dst_rect.top); @@ -132,7 +132,7 @@ void SWStagingTexture::CopyToTexture(const MathUtil::Rectangle& src_rect, A const MathUtil::Rectangle& dst_rect, u32 dst_layer, u32 dst_level) { - _assert_(dst_level == 0 && dst_layer == 0); + ASSERT(dst_level == 0 && dst_layer == 0); CopyTextureData(m_config, m_data.data(), src_rect.left, src_rect.top, src_rect.GetWidth(), src_rect.GetHeight(), dst->GetConfig(), static_cast(dst)->GetData(), dst_rect.left, dst_rect.top); diff --git a/Source/Core/VideoBackends/Software/SWVertexLoader.cpp b/Source/Core/VideoBackends/Software/SWVertexLoader.cpp index a00fc057d8..07053fee26 100644 --- a/Source/Core/VideoBackends/Software/SWVertexLoader.cpp +++ b/Source/Core/VideoBackends/Software/SWVertexLoader.cpp @@ -196,8 +196,8 @@ static void ReadVertexAttribute(T* dst, DataReader src, const AttributeFormat& f break; } - _assert_msg_(VIDEO, !format.integer || format.type != VAR_FLOAT, - "only non-float values are allowed to be streamed as integer"); + ASSERT_MSG(VIDEO, !format.integer || format.type != VAR_FLOAT, + "only non-float values are allowed to be streamed as integer"); } for (; i < components; i++) { diff --git a/Source/Core/VideoBackends/Software/Tev.cpp b/Source/Core/VideoBackends/Software/Tev.cpp index 572da58f9c..a9f2c61338 100644 --- a/Source/Core/VideoBackends/Software/Tev.cpp +++ b/Source/Core/VideoBackends/Software/Tev.cpp @@ -564,8 +564,8 @@ void Tev::Indirect(unsigned int stageNum, s32 s, s32 t) void Tev::Draw() { - _assert_(Position[0] >= 0 && Position[0] < EFB_WIDTH); - _assert_(Position[1] >= 0 && Position[1] < EFB_HEIGHT); + ASSERT(Position[0] >= 0 && Position[0] < EFB_WIDTH); + ASSERT(Position[1] >= 0 && Position[1] < EFB_HEIGHT); INCSTAT(stats.thisFrame.tevPixelsIn); diff --git a/Source/Core/VideoBackends/Software/TransformUnit.cpp b/Source/Core/VideoBackends/Software/TransformUnit.cpp index 5de48f4f05..8a2b4e7fb8 100644 --- a/Source/Core/VideoBackends/Software/TransformUnit.cpp +++ b/Source/Core/VideoBackends/Software/TransformUnit.cpp @@ -129,7 +129,7 @@ static void TransformTexCoordRegular(const TexMtxInfo& texinfo, int coordNum, bo src = srcVertex->normal[2]; break; default: - _assert_(texinfo.sourcerow >= XF_SRCTEX0_INROW && texinfo.sourcerow <= XF_SRCTEX7_INROW); + ASSERT(texinfo.sourcerow >= XF_SRCTEX0_INROW && texinfo.sourcerow <= XF_SRCTEX7_INROW); src.x = srcVertex->texCoords[texinfo.sourcerow - XF_SRCTEX0_INROW][0]; src.y = srcVertex->texCoords[texinfo.sourcerow - XF_SRCTEX0_INROW][1]; src.z = 1.0f; @@ -148,7 +148,7 @@ static void TransformTexCoordRegular(const TexMtxInfo& texinfo, int coordNum, bo } else // texinfo.projection == XF_TEXPROJ_STQ { - _assert_(!specialCase); + ASSERT(!specialCase); if (texinfo.inputform == XF_TEXINPUT_AB11) MultiplyVec2Mat34(src, mat, *dst); @@ -289,7 +289,7 @@ static void LightColor(const Vec3& pos, const Vec3& normal, u8 lightNum, const L AddScaledIntegerColor(light->color, attn * difAttn, lightCol); break; default: - _assert_(0); + ASSERT(0); } } @@ -315,7 +315,7 @@ static void LightAlpha(const Vec3& pos, const Vec3& normal, u8 lightNum, const L lightCol += light->color[0] * attn * difAttn; break; default: - _assert_(0); + ASSERT(0); } } @@ -432,15 +432,15 @@ void TransformTexCoord(const InputVertexData* src, OutputVertexData* dst, bool s } break; case XF_TEXGEN_COLOR_STRGBC0: - _assert_(texinfo.sourcerow == XF_SRCCOLORS_INROW); - _assert_(texinfo.inputform == XF_TEXINPUT_AB11); + ASSERT(texinfo.sourcerow == XF_SRCCOLORS_INROW); + ASSERT(texinfo.inputform == XF_TEXINPUT_AB11); dst->texCoords[coordNum].x = (float)dst->color[0][0] / 255.0f; dst->texCoords[coordNum].y = (float)dst->color[0][1] / 255.0f; dst->texCoords[coordNum].z = 1.0f; break; case XF_TEXGEN_COLOR_STRGBC1: - _assert_(texinfo.sourcerow == XF_SRCCOLORS_INROW); - _assert_(texinfo.inputform == XF_TEXINPUT_AB11); + ASSERT(texinfo.sourcerow == XF_SRCCOLORS_INROW); + ASSERT(texinfo.inputform == XF_TEXINPUT_AB11); dst->texCoords[coordNum].x = (float)dst->color[1][0] / 255.0f; dst->texCoords[coordNum].y = (float)dst->color[1][1] / 255.0f; dst->texCoords[coordNum].z = 1.0f; diff --git a/Source/Core/VideoBackends/Vulkan/BoundingBox.cpp b/Source/Core/VideoBackends/Vulkan/BoundingBox.cpp index 5cfc174bf8..169404d7ca 100644 --- a/Source/Core/VideoBackends/Vulkan/BoundingBox.cpp +++ b/Source/Core/VideoBackends/Vulkan/BoundingBox.cpp @@ -115,7 +115,7 @@ void BoundingBox::Invalidate() s32 BoundingBox::Get(size_t index) { - _assert_(index < NUM_VALUES); + ASSERT(index < NUM_VALUES); if (!m_valid) Readback(); @@ -127,7 +127,7 @@ s32 BoundingBox::Get(size_t index) void BoundingBox::Set(size_t index, s32 value) { - _assert_(index < NUM_VALUES); + ASSERT(index < NUM_VALUES); // If we're currently valid, update the stored value in both our cache and the GPU buffer. if (m_valid) diff --git a/Source/Core/VideoBackends/Vulkan/CommandBufferManager.cpp b/Source/Core/VideoBackends/Vulkan/CommandBufferManager.cpp index 4b13320476..b60e6d16b6 100644 --- a/Source/Core/VideoBackends/Vulkan/CommandBufferManager.cpp +++ b/Source/Core/VideoBackends/Vulkan/CommandBufferManager.cpp @@ -226,7 +226,7 @@ void CommandBufferManager::WaitForFence(VkFence fence) if (m_frame_resources[command_buffer_index].fence == fence) break; } - _assert_(command_buffer_index < m_frame_resources.size()); + ASSERT(command_buffer_index < m_frame_resources.size()); // Has this command buffer already been waited for? if (!m_frame_resources[command_buffer_index].needs_fence_wait) @@ -342,7 +342,7 @@ void CommandBufferManager::SubmitCommandBuffer(size_t index, VkSemaphore wait_se if (present_swap_chain != VK_NULL_HANDLE) { // Should have a signal semaphore. - _assert_(signal_semaphore != VK_NULL_HANDLE); + ASSERT(signal_semaphore != VK_NULL_HANDLE); VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, nullptr, 1, @@ -489,14 +489,14 @@ void CommandBufferManager::AddFencePointCallback( const CommandBufferExecutedCallback& executed_callback) { // Shouldn't be adding twice. - _assert_(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end()); + ASSERT(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end()); m_fence_point_callbacks.emplace(key, std::make_pair(queued_callback, executed_callback)); } void CommandBufferManager::RemoveFencePointCallback(const void* key) { auto iter = m_fence_point_callbacks.find(key); - _assert_(iter != m_fence_point_callbacks.end()); + ASSERT(iter != m_fence_point_callbacks.end()); m_fence_point_callbacks.erase(iter); } diff --git a/Source/Core/VideoBackends/Vulkan/FramebufferManager.cpp b/Source/Core/VideoBackends/Vulkan/FramebufferManager.cpp index 27a1fd30f8..1df851cb7c 100644 --- a/Source/Core/VideoBackends/Vulkan/FramebufferManager.cpp +++ b/Source/Core/VideoBackends/Vulkan/FramebufferManager.cpp @@ -393,9 +393,9 @@ Texture2D* FramebufferManager::ResolveEFBColorTexture(const VkRect2D& region) // It's not valid to resolve out-of-bounds coordinates. // Ensuring the region is within the image is the caller's responsibility. - _assert_(region.offset.x >= 0 && region.offset.y >= 0 && - (static_cast(region.offset.x) + region.extent.width) <= GetEFBWidth() && - (static_cast(region.offset.y) + region.extent.height) <= GetEFBHeight()); + ASSERT(region.offset.x >= 0 && region.offset.y >= 0 && + (static_cast(region.offset.x) + region.extent.width) <= GetEFBWidth() && + (static_cast(region.offset.y) + region.extent.height) <= GetEFBHeight()); // Resolving is considered to be a transfer operation. m_efb_color_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(), diff --git a/Source/Core/VideoBackends/Vulkan/PerfQuery.cpp b/Source/Core/VideoBackends/Vulkan/PerfQuery.cpp index 11ba898bf6..ef47cb9ba2 100644 --- a/Source/Core/VideoBackends/Vulkan/PerfQuery.cpp +++ b/Source/Core/VideoBackends/Vulkan/PerfQuery.cpp @@ -76,7 +76,7 @@ void PerfQuery::EnableQuery(PerfQueryGroup type) { u32 index = (m_query_read_pos + m_query_count) % PERF_QUERY_BUFFER_SIZE; ActiveQuery& entry = m_query_buffer[index]; - _assert_(!entry.active && !entry.available); + ASSERT(!entry.active && !entry.available); entry.active = true; m_query_count++; @@ -245,12 +245,12 @@ void PerfQuery::OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fe if (entry.available) { // These should be grouped together, and at the start. - _assert_(copy_count == 0); + ASSERT(copy_count == 0); continue; } // If this wrapped around, we need to flush the entries before the end of the buffer. - _assert_(entry.active); + ASSERT(entry.active); if (index < copy_start_index) { QueueCopyQueryResults(command_buffer, fence, copy_start_index, copy_count); @@ -311,7 +311,7 @@ void PerfQuery::ProcessResults(u32 start_index, u32 query_count) query_count * sizeof(PerfQueryDataType)); // Should be at maximum query_count queries pending. - _assert_(query_count <= m_query_count); + ASSERT(query_count <= m_query_count); DEBUG_LOG(VIDEO, "process queries %u-%u", start_index, start_index + query_count - 1); // Remove pending queries. @@ -321,7 +321,7 @@ void PerfQuery::ProcessResults(u32 start_index, u32 query_count) ActiveQuery& entry = m_query_buffer[index]; // Should have a fence associated with it (waiting for a result). - _assert_(entry.pending_fence != VK_NULL_HANDLE); + ASSERT(entry.pending_fence != VK_NULL_HANDLE); entry.pending_fence = VK_NULL_HANDLE; entry.available = false; entry.active = false; diff --git a/Source/Core/VideoBackends/Vulkan/PostProcessing.cpp b/Source/Core/VideoBackends/Vulkan/PostProcessing.cpp index f2a8b3e0d0..983567b2ac 100644 --- a/Source/Core/VideoBackends/Vulkan/PostProcessing.cpp +++ b/Source/Core/VideoBackends/Vulkan/PostProcessing.cpp @@ -117,13 +117,13 @@ void VulkanPostProcessing::FillUniformBuffer(u8* buf, const TargetRectangle& src break; case PostProcessingShaderConfiguration::ConfigurationOption::OptionType::OPTION_INTEGER: - _assert_(it.second.m_integer_values.size() < 4); + ASSERT(it.second.m_integer_values.size() < 4); std::copy_n(it.second.m_integer_values.begin(), it.second.m_integer_values.size(), value.as_int); break; case PostProcessingShaderConfiguration::ConfigurationOption::OptionType::OPTION_FLOAT: - _assert_(it.second.m_float_values.size() < 4); + ASSERT(it.second.m_float_values.size() < 4); std::copy_n(it.second.m_float_values.begin(), it.second.m_float_values.size(), value.as_float); break; diff --git a/Source/Core/VideoBackends/Vulkan/Renderer.cpp b/Source/Core/VideoBackends/Vulkan/Renderer.cpp index 1beb4f3461..13bd72ff2a 100644 --- a/Source/Core/VideoBackends/Vulkan/Renderer.cpp +++ b/Source/Core/VideoBackends/Vulkan/Renderer.cpp @@ -1123,7 +1123,7 @@ void Renderer::SetTexture(u32 index, const AbstractTexture* texture) // Texture should always be in SHADER_READ_ONLY layout prior to use. // This is so we don't need to transition during render passes. auto* tex = texture ? static_cast(texture)->GetRawTexIdentifier() : nullptr; - _dbg_assert_(VIDEO, !tex || tex->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + DEBUG_ASSERT(VIDEO, !tex || tex->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); StateTracker::GetInstance()->SetTexture(index, tex ? tex->GetView() : VK_NULL_HANDLE); } diff --git a/Source/Core/VideoBackends/Vulkan/StagingBuffer.cpp b/Source/Core/VideoBackends/Vulkan/StagingBuffer.cpp index 6924f41eca..7a2c238489 100644 --- a/Source/Core/VideoBackends/Vulkan/StagingBuffer.cpp +++ b/Source/Core/VideoBackends/Vulkan/StagingBuffer.cpp @@ -38,8 +38,8 @@ bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size) else m_map_size = size; - _assert_(!m_map_pointer); - _assert_(m_map_offset + m_map_size <= m_size); + ASSERT(!m_map_pointer); + ASSERT(m_map_offset + m_map_size <= m_size); void* map_pointer; VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0, @@ -56,7 +56,7 @@ bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size) void StagingBuffer::Unmap() { - _assert_(m_map_pointer); + ASSERT(m_map_pointer); vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); m_map_pointer = nullptr; @@ -66,7 +66,7 @@ void StagingBuffer::Unmap() void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size) { - _assert_(offset >= m_map_offset); + ASSERT(offset >= m_map_offset); if (m_coherent) return; @@ -83,7 +83,7 @@ void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer, if (m_coherent) return; - _assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); + ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags, offset, size, VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage); } @@ -96,7 +96,7 @@ void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer, if (m_coherent) return; - _assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); + ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); Util::BufferMemoryBarrier(command_buffer, m_buffer, 0, dst_access_flags, offset, size, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dst_pipeline_stage); } @@ -108,14 +108,14 @@ void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBi if (m_coherent) return; - _assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); + ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE)); Util::BufferMemoryBarrier(command_buffer, m_buffer, src_access_flags, VK_ACCESS_HOST_READ_BIT, offset, size, src_pipeline_stage, VK_PIPELINE_STAGE_HOST_BIT); } void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size) { - _assert_(offset >= m_map_offset); + ASSERT(offset >= m_map_offset); if (m_coherent) return; @@ -126,8 +126,8 @@ void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size) void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches) { - _assert_((offset + size) <= m_size); - _assert_(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset))); + ASSERT((offset + size) <= m_size); + ASSERT(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset))); if (invalidate_caches) InvalidateCPUCache(offset, size); @@ -137,8 +137,8 @@ void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool inva void StagingBuffer::Write(VkDeviceSize offset, const void* data, size_t size, bool invalidate_caches) { - _assert_((offset + size) <= m_size); - _assert_(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset))); + ASSERT((offset + size) <= m_size); + ASSERT(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset))); memcpy(m_map_pointer + (offset - m_map_offset), data, size); if (invalidate_caches) diff --git a/Source/Core/VideoBackends/Vulkan/StateTracker.cpp b/Source/Core/VideoBackends/Vulkan/StateTracker.cpp index a3961daf48..38a91c61ad 100644 --- a/Source/Core/VideoBackends/Vulkan/StateTracker.cpp +++ b/Source/Core/VideoBackends/Vulkan/StateTracker.cpp @@ -37,7 +37,7 @@ StateTracker* StateTracker::GetInstance() bool StateTracker::CreateInstance() { - _assert_(!s_state_tracker); + ASSERT(!s_state_tracker); s_state_tracker = std::make_unique(); if (!s_state_tracker->Initialize()) { @@ -116,7 +116,7 @@ void StateTracker::SetIndexBuffer(VkBuffer buffer, VkDeviceSize offset, VkIndexT void StateTracker::SetRenderPass(VkRenderPass load_render_pass, VkRenderPass clear_render_pass) { // Should not be changed within a render pass. - _assert_(!InRenderPass()); + ASSERT(!InRenderPass()); m_load_render_pass = load_render_pass; m_clear_render_pass = clear_render_pass; } @@ -124,7 +124,7 @@ void StateTracker::SetRenderPass(VkRenderPass load_render_pass, VkRenderPass cle void StateTracker::SetFramebuffer(VkFramebuffer framebuffer, const VkRect2D& render_area) { // Should not be changed within a render pass. - _assert_(!InRenderPass()); + ASSERT(!InRenderPass()); m_framebuffer = framebuffer; m_framebuffer_size = render_area; } @@ -395,7 +395,7 @@ void StateTracker::EndRenderPass() void StateTracker::BeginClearRenderPass(const VkRect2D& area, const VkClearValue* clear_values, u32 num_clear_values) { - _assert_(!InRenderPass()); + ASSERT(!InRenderPass()); m_current_render_pass = m_clear_render_pass; m_framebuffer_render_area = area; diff --git a/Source/Core/VideoBackends/Vulkan/StreamBuffer.cpp b/Source/Core/VideoBackends/Vulkan/StreamBuffer.cpp index 25aa9e3d07..b659eef0da 100644 --- a/Source/Core/VideoBackends/Vulkan/StreamBuffer.cpp +++ b/Source/Core/VideoBackends/Vulkan/StreamBuffer.cpp @@ -209,8 +209,8 @@ bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_ // Can we find a fence to wait on that will give us enough memory? if (allow_reuse && WaitForClearSpace(required_bytes)) { - _assert_(m_current_offset == m_current_gpu_position || - (m_current_offset + required_bytes) < m_current_gpu_position); + ASSERT(m_current_offset == m_current_gpu_position || + (m_current_offset + required_bytes) < m_current_gpu_position); m_current_offset = Util::AlignBufferOffset(m_current_offset, alignment); m_last_allocation_size = num_bytes; return true; @@ -232,8 +232,8 @@ bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_ void StreamBuffer::CommitMemory(size_t final_num_bytes) { - _assert_((m_current_offset + final_num_bytes) <= m_current_size); - _assert_(final_num_bytes <= m_last_allocation_size); + ASSERT((m_current_offset + final_num_bytes) <= m_current_size); + ASSERT(final_num_bytes <= m_last_allocation_size); // For non-coherent mappings, flush the memory range if (!m_coherent_mapping) diff --git a/Source/Core/VideoBackends/Vulkan/SwapChain.cpp b/Source/Core/VideoBackends/Vulkan/SwapChain.cpp index 88bb00ee29..3294b944ef 100644 --- a/Source/Core/VideoBackends/Vulkan/SwapChain.cpp +++ b/Source/Core/VideoBackends/Vulkan/SwapChain.cpp @@ -155,7 +155,7 @@ bool SwapChain::SelectSurfaceFormat() std::vector surface_formats(format_count); res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, surface_formats.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); // If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA if (surface_formats[0].format == VK_FORMAT_UNDEFINED) @@ -189,7 +189,7 @@ bool SwapChain::SelectPresentMode() std::vector present_modes(mode_count); res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, present_modes.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); // Checks if a particular mode is supported, if it is, returns that mode. auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) { @@ -341,7 +341,7 @@ bool SwapChain::CreateSwapChain() bool SwapChain::SetupSwapChainImages() { - _assert_(m_swap_chain_images.empty()); + ASSERT(m_swap_chain_images.empty()); uint32_t image_count; VkResult res = @@ -355,7 +355,7 @@ bool SwapChain::SetupSwapChainImages() std::vector images(image_count); res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); m_swap_chain_images.reserve(image_count); for (uint32_t i = 0; i < image_count; i++) diff --git a/Source/Core/VideoBackends/Vulkan/Texture2D.cpp b/Source/Core/VideoBackends/Vulkan/Texture2D.cpp index a15f8f8d04..39eb9bfcd8 100644 --- a/Source/Core/VideoBackends/Vulkan/Texture2D.cpp +++ b/Source/Core/VideoBackends/Vulkan/Texture2D.cpp @@ -302,7 +302,7 @@ void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, ComputeImageLayout new_layout) { - _assert_(new_layout != ComputeImageLayout::Undefined); + ASSERT(new_layout != ComputeImageLayout::Undefined); if (m_compute_layout == new_layout) return; diff --git a/Source/Core/VideoBackends/Vulkan/TextureCache.cpp b/Source/Core/VideoBackends/Vulkan/TextureCache.cpp index af9b39fa73..cb61cd4de4 100644 --- a/Source/Core/VideoBackends/Vulkan/TextureCache.cpp +++ b/Source/Core/VideoBackends/Vulkan/TextureCache.cpp @@ -222,7 +222,7 @@ void TextureCache::CopyEFBToCacheEntry(TCacheEntry* entry, bool is_depth_copy, framebuffer_mgr->FlushEFBPokes(); // Has to be flagged as a render target. - _assert_(texture->GetFramebuffer() != VK_NULL_HANDLE); + ASSERT(texture->GetFramebuffer() != VK_NULL_HANDLE); // Can't be done in a render pass, since we're doing our own render pass! VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer(); diff --git a/Source/Core/VideoBackends/Vulkan/TextureConverter.cpp b/Source/Core/VideoBackends/Vulkan/TextureConverter.cpp index 74e7e12baf..eb9cde54ba 100644 --- a/Source/Core/VideoBackends/Vulkan/TextureConverter.cpp +++ b/Source/Core/VideoBackends/Vulkan/TextureConverter.cpp @@ -158,8 +158,8 @@ void TextureConverter::ConvertTexture(TextureCacheBase::TCacheEntry* dst_entry, VKTexture* source_texture = static_cast(src_entry->texture.get()); VKTexture* destination_texture = static_cast(dst_entry->texture.get()); - _assert_(static_cast(palette_format) < NUM_PALETTE_CONVERSION_SHADERS); - _assert_(destination_texture->GetConfig().rendertarget); + ASSERT(static_cast(palette_format) < NUM_PALETTE_CONVERSION_SHADERS); + ASSERT(destination_texture->GetConfig().rendertarget); // We want to align to 2 bytes (R16) or the device's texel buffer alignment, whichever is greater. size_t palette_size = src_entry->format == TextureFormat::I4 ? 32 : 512; diff --git a/Source/Core/VideoBackends/Vulkan/Util.cpp b/Source/Core/VideoBackends/Vulkan/Util.cpp index facae2ac14..07c0311da1 100644 --- a/Source/Core/VideoBackends/Vulkan/Util.cpp +++ b/Source/Core/VideoBackends/Vulkan/Util.cpp @@ -397,7 +397,7 @@ void UtilityShaderDraw::CommitPSUniforms(size_t size) void UtilityShaderDraw::SetPushConstants(const void* data, size_t data_size) { - _assert_(static_cast(data_size) < PUSH_CONSTANT_BUFFER_SIZE); + ASSERT(static_cast(data_size) < PUSH_CONSTANT_BUFFER_SIZE); vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, @@ -414,8 +414,8 @@ void UtilityShaderDraw::SetPSSampler(size_t index, VkImageView view, VkSampler s void UtilityShaderDraw::SetPSTexelBuffer(VkBufferView view) { // Should only be used with the texture conversion pipeline layout. - _assert_(m_pipeline_info.pipeline_layout == - g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION)); + ASSERT(m_pipeline_info.pipeline_layout == + g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION)); m_ps_texel_buffer = view; } @@ -765,7 +765,7 @@ void ComputeShaderDispatcher::CommitUniformBuffer(size_t size) void ComputeShaderDispatcher::SetPushConstants(const void* data, size_t data_size) { - _assert_(static_cast(data_size) < PUSH_CONSTANT_BUFFER_SIZE); + ASSERT(static_cast(data_size) < PUSH_CONSTANT_BUFFER_SIZE); vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, static_cast(data_size), data); diff --git a/Source/Core/VideoBackends/Vulkan/VKPipeline.cpp b/Source/Core/VideoBackends/Vulkan/VKPipeline.cpp index c6c0aa0c85..31dcc72a6a 100644 --- a/Source/Core/VideoBackends/Vulkan/VKPipeline.cpp +++ b/Source/Core/VideoBackends/Vulkan/VKPipeline.cpp @@ -27,7 +27,7 @@ VKPipeline::~VKPipeline() std::unique_ptr VKPipeline::Create(const AbstractPipelineConfig& config) { - _dbg_assert_(VIDEO, config.vertex_shader && config.pixel_shader); + DEBUG_ASSERT(VIDEO, config.vertex_shader && config.pixel_shader); // Get render pass for config. VkRenderPass render_pass = g_object_cache->GetRenderPass( diff --git a/Source/Core/VideoBackends/Vulkan/VKShader.cpp b/Source/Core/VideoBackends/Vulkan/VKShader.cpp index ed723235db..5b44ed99a4 100644 --- a/Source/Core/VideoBackends/Vulkan/VKShader.cpp +++ b/Source/Core/VideoBackends/Vulkan/VKShader.cpp @@ -34,7 +34,7 @@ VKShader::~VKShader() bool VKShader::HasBinary() const { - _assert_(!m_spv.empty()); + ASSERT(!m_spv.empty()); return true; } diff --git a/Source/Core/VideoBackends/Vulkan/VKTexture.cpp b/Source/Core/VideoBackends/Vulkan/VKTexture.cpp index a812e87602..cd09bf6dd5 100644 --- a/Source/Core/VideoBackends/Vulkan/VKTexture.cpp +++ b/Source/Core/VideoBackends/Vulkan/VKTexture.cpp @@ -131,13 +131,13 @@ void VKTexture::CopyRectangleFromTexture(const AbstractTexture* src, { Texture2D* src_texture = static_cast(src)->GetRawTexIdentifier(); - _assert_msg_(VIDEO, static_cast(src_rect.GetWidth()) <= src_texture->GetWidth() && - static_cast(src_rect.GetHeight()) <= src_texture->GetHeight(), - "Source rect is too large for CopyRectangleFromTexture"); + ASSERT_MSG(VIDEO, static_cast(src_rect.GetWidth()) <= src_texture->GetWidth() && + static_cast(src_rect.GetHeight()) <= src_texture->GetHeight(), + "Source rect is too large for CopyRectangleFromTexture"); - _assert_msg_(VIDEO, static_cast(dst_rect.GetWidth()) <= m_config.width && - static_cast(dst_rect.GetHeight()) <= m_config.height, - "Dest rect is too large for CopyRectangleFromTexture"); + ASSERT_MSG(VIDEO, static_cast(dst_rect.GetWidth()) <= m_config.width && + static_cast(dst_rect.GetHeight()) <= m_config.height, + "Dest rect is too large for CopyRectangleFromTexture"); VkImageCopy image_copy = { {VK_IMAGE_ASPECT_COLOR_BIT, src_level, src_layer, src_texture->GetLayers()}, @@ -176,8 +176,8 @@ void VKTexture::ScaleRectangleFromTexture(const AbstractTexture* source, StateTracker::GetInstance()->SetPendingRebind(); // Can't render to a non-rendertarget (no framebuffer). - _assert_msg_(VIDEO, m_config.rendertarget, - "Destination texture for partial copy is not a rendertarget"); + ASSERT_MSG(VIDEO, m_config.rendertarget, + "Destination texture for partial copy is not a rendertarget"); // Render pass expects dst_texture to be in COLOR_ATTACHMENT_OPTIMAL state. // src_texture should already be in SHADER_READ_ONLY state, but transition in case (XFB). @@ -216,10 +216,10 @@ void VKTexture::ResolveFromTexture(const AbstractTexture* src, const MathUtil::R u32 layer, u32 level) { const VKTexture* srcentry = static_cast(src); - _dbg_assert_(VIDEO, m_config.samples == 1 && m_config.width == srcentry->m_config.width && + DEBUG_ASSERT(VIDEO, m_config.samples == 1 && m_config.width == srcentry->m_config.width && m_config.height == srcentry->m_config.height && srcentry->m_config.samples > 1); - _dbg_assert_(VIDEO, + DEBUG_ASSERT(VIDEO, rect.left + rect.GetWidth() <= static_cast(srcentry->m_config.width) && rect.top + rect.GetHeight() <= static_cast(srcentry->m_config.height)); @@ -407,13 +407,13 @@ void VKStagingTexture::CopyFromTexture(const AbstractTexture* src, const MathUtil::Rectangle& src_rect, u32 src_layer, u32 src_level, const MathUtil::Rectangle& dst_rect) { - _assert_(m_type == StagingTextureType::Readback); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); - _assert_(src_rect.left >= 0 && static_cast(src_rect.right) <= src->GetConfig().width && - src_rect.top >= 0 && static_cast(src_rect.bottom) <= src->GetConfig().height); - _assert_(dst_rect.left >= 0 && static_cast(dst_rect.right) <= m_config.width && - dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= m_config.height); + ASSERT(m_type == StagingTextureType::Readback); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.left >= 0 && static_cast(src_rect.right) <= src->GetConfig().width && + src_rect.top >= 0 && static_cast(src_rect.bottom) <= src->GetConfig().height); + ASSERT(dst_rect.left >= 0 && static_cast(dst_rect.right) <= m_config.width && + dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= m_config.height); Texture2D* src_tex = static_cast(src)->GetRawTexIdentifier(); CopyFromTexture(src_tex, src_rect, src_layer, src_level, dst_rect); @@ -458,7 +458,7 @@ void VKStagingTexture::CopyFromTexture(Texture2D* src, const MathUtil::Rectangle m_needs_flush = true; g_command_buffer_mgr->AddFencePointCallback(this, [this](VkCommandBuffer buf, VkFence fence) { - _assert_(m_needs_flush); + ASSERT(m_needs_flush); m_flush_fence = fence; }, [this](VkFence fence) { @@ -473,13 +473,13 @@ void VKStagingTexture::CopyToTexture(const MathUtil::Rectangle& src_rect, A const MathUtil::Rectangle& dst_rect, u32 dst_layer, u32 dst_level) { - _assert_(m_type == StagingTextureType::Upload); - _assert_(src_rect.GetWidth() == dst_rect.GetWidth() && - src_rect.GetHeight() == dst_rect.GetHeight()); - _assert_(src_rect.left >= 0 && static_cast(src_rect.right) <= m_config.width && - src_rect.top >= 0 && static_cast(src_rect.bottom) <= m_config.height); - _assert_(dst_rect.left >= 0 && static_cast(dst_rect.right) <= dst->GetConfig().width && - dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= dst->GetConfig().height); + ASSERT(m_type == StagingTextureType::Upload); + ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() && + src_rect.GetHeight() == dst_rect.GetHeight()); + ASSERT(src_rect.left >= 0 && static_cast(src_rect.right) <= m_config.width && + src_rect.top >= 0 && static_cast(src_rect.bottom) <= m_config.height); + ASSERT(dst_rect.left >= 0 && static_cast(dst_rect.right) <= dst->GetConfig().width && + dst_rect.top >= 0 && static_cast(dst_rect.bottom) <= dst->GetConfig().height); if (m_needs_flush) { @@ -518,7 +518,7 @@ void VKStagingTexture::CopyToTexture(const MathUtil::Rectangle& src_rect, A m_needs_flush = true; g_command_buffer_mgr->AddFencePointCallback(this, [this](VkCommandBuffer buf, VkFence fence) { - _assert_(m_needs_flush); + ASSERT(m_needs_flush); m_flush_fence = fence; }, [this](VkFence fence) { diff --git a/Source/Core/VideoBackends/Vulkan/VertexFormat.cpp b/Source/Core/VideoBackends/Vulkan/VertexFormat.cpp index c28110c844..3ea824e530 100644 --- a/Source/Core/VideoBackends/Vulkan/VertexFormat.cpp +++ b/Source/Core/VideoBackends/Vulkan/VertexFormat.cpp @@ -42,7 +42,7 @@ static VkFormat VarToVkFormat(VarType t, uint32_t components, bool integer) VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT }; - _assert_(components > 0 && components <= 4); + ASSERT(components > 0 && components <= 4); return integer ? integer_type_lookup[t][components - 1] : float_type_lookup[t][components - 1]; } @@ -120,7 +120,7 @@ void VertexFormat::SetupInputState() void VertexFormat::AddAttribute(uint32_t location, uint32_t binding, VkFormat format, uint32_t offset) { - _assert_(m_num_attributes < MAX_VERTEX_ATTRIBUTES); + ASSERT(m_num_attributes < MAX_VERTEX_ATTRIBUTES); m_attribute_descriptions[m_num_attributes].location = location; m_attribute_descriptions[m_num_attributes].binding = binding; diff --git a/Source/Core/VideoBackends/Vulkan/VulkanContext.cpp b/Source/Core/VideoBackends/Vulkan/VulkanContext.cpp index 495bdaf519..77702f436b 100644 --- a/Source/Core/VideoBackends/Vulkan/VulkanContext.cpp +++ b/Source/Core/VideoBackends/Vulkan/VulkanContext.cpp @@ -59,7 +59,7 @@ bool VulkanContext::CheckValidationLayerAvailablility() std::vector extension_list(extension_count); res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extension_list.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); u32 layer_count = 0; res = vkEnumerateInstanceLayerProperties(&layer_count, nullptr); @@ -71,7 +71,7 @@ bool VulkanContext::CheckValidationLayerAvailablility() std::vector layer_list(layer_count); res = vkEnumerateInstanceLayerProperties(&layer_count, layer_list.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); // Check for both VK_EXT_debug_report and VK_LAYER_LUNARG_standard_validation return (std::find_if(extension_list.begin(), extension_list.end(), @@ -148,7 +148,7 @@ bool VulkanContext::SelectInstanceExtensions(ExtensionList* extension_list, bool std::vector available_extension_list(extension_count); res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, available_extension_list.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); for (const auto& extension_properties : available_extension_list) INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName); @@ -391,7 +391,7 @@ bool VulkanContext::SelectDeviceExtensions(ExtensionList* extension_list, bool e std::vector available_extension_list(extension_count); res = vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count, available_extension_list.data()); - _assert_(res == VK_SUCCESS); + ASSERT(res == VK_SUCCESS); for (const auto& extension_properties : available_extension_list) INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName); diff --git a/Source/Core/VideoCommon/AbstractStagingTexture.cpp b/Source/Core/VideoCommon/AbstractStagingTexture.cpp index 6d4e973761..4cb1b2f4bc 100644 --- a/Source/Core/VideoCommon/AbstractStagingTexture.cpp +++ b/Source/Core/VideoCommon/AbstractStagingTexture.cpp @@ -35,12 +35,12 @@ void AbstractStagingTexture::CopyToTexture(AbstractTexture* dst, u32 dst_layer, void AbstractStagingTexture::ReadTexels(const MathUtil::Rectangle& rect, void* out_ptr, u32 out_stride) { - _assert_(m_type != StagingTextureType::Upload); + ASSERT(m_type != StagingTextureType::Upload); if (!PrepareForAccess()) return; - _assert_(rect.left >= 0 && static_cast(rect.right) <= m_config.width && rect.top >= 0 && - static_cast(rect.bottom) <= m_config.height); + ASSERT(rect.left >= 0 && static_cast(rect.right) <= m_config.width && rect.top >= 0 && + static_cast(rect.bottom) <= m_config.height); // Offset pointer to point to start of region being copied out. const char* current_ptr = m_map_pointer; @@ -68,11 +68,11 @@ void AbstractStagingTexture::ReadTexels(const MathUtil::Rectangle& rect, vo void AbstractStagingTexture::ReadTexel(u32 x, u32 y, void* out_ptr) { - _assert_(m_type != StagingTextureType::Upload); + ASSERT(m_type != StagingTextureType::Upload); if (!PrepareForAccess()) return; - _assert_(x < m_config.width && y < m_config.height); + ASSERT(x < m_config.width && y < m_config.height); const char* src_ptr = m_map_pointer + y * m_map_stride + x * m_texel_size; std::memcpy(out_ptr, src_ptr, m_texel_size); } @@ -80,12 +80,12 @@ void AbstractStagingTexture::ReadTexel(u32 x, u32 y, void* out_ptr) void AbstractStagingTexture::WriteTexels(const MathUtil::Rectangle& rect, const void* in_ptr, u32 in_stride) { - _assert_(m_type != StagingTextureType::Readback); + ASSERT(m_type != StagingTextureType::Readback); if (!PrepareForAccess()) return; - _assert_(rect.left >= 0 && static_cast(rect.right) <= m_config.width && rect.top >= 0 && - static_cast(rect.bottom) <= m_config.height); + ASSERT(rect.left >= 0 && static_cast(rect.right) <= m_config.width && rect.top >= 0 && + static_cast(rect.bottom) <= m_config.height); // Offset pointer to point to start of region being copied to. char* current_ptr = m_map_pointer; @@ -112,11 +112,11 @@ void AbstractStagingTexture::WriteTexels(const MathUtil::Rectangle& rect, c void AbstractStagingTexture::WriteTexel(u32 x, u32 y, const void* in_ptr) { - _assert_(m_type != StagingTextureType::Readback); + ASSERT(m_type != StagingTextureType::Readback); if (!PrepareForAccess()) return; - _assert_(x < m_config.width && y < m_config.height); + ASSERT(x < m_config.width && y < m_config.height); char* dest_ptr = m_map_pointer + y * m_map_stride + x * m_texel_size; std::memcpy(dest_ptr, in_ptr, m_texel_size); } diff --git a/Source/Core/VideoCommon/AbstractTexture.cpp b/Source/Core/VideoCommon/AbstractTexture.cpp index 8a870de4bb..b01fbce085 100644 --- a/Source/Core/VideoCommon/AbstractTexture.cpp +++ b/Source/Core/VideoCommon/AbstractTexture.cpp @@ -20,8 +20,8 @@ bool AbstractTexture::Save(const std::string& filename, unsigned int level) // We can't dump compressed textures currently (it would mean drawing them to a RGBA8 // framebuffer, and saving that). TextureCache does not call Save for custom textures // anyway, so this is fine for now. - _assert_(!IsCompressedFormat(m_config.format)); - _assert_(level < m_config.levels); + ASSERT(!IsCompressedFormat(m_config.format)); + ASSERT(level < m_config.levels); // Determine dimensions of image we want to save. u32 level_width = std::max(1u, m_config.width >> level); diff --git a/Source/Core/VideoCommon/AsyncShaderCompiler.cpp b/Source/Core/VideoCommon/AsyncShaderCompiler.cpp index 2f14c1621b..30c78a1ef3 100644 --- a/Source/Core/VideoCommon/AsyncShaderCompiler.cpp +++ b/Source/Core/VideoCommon/AsyncShaderCompiler.cpp @@ -17,7 +17,7 @@ AsyncShaderCompiler::~AsyncShaderCompiler() { // Pending work can be left at shutdown. // The work item classes are expected to clean up after themselves. - _assert_(!HasWorkerThreads()); + ASSERT(!HasWorkerThreads()); } void AsyncShaderCompiler::QueueWorkItem(WorkItemPtr item) diff --git a/Source/Core/VideoCommon/CommandProcessor.cpp b/Source/Core/VideoCommon/CommandProcessor.cpp index 424435a178..b5e8d68c5b 100644 --- a/Source/Core/VideoCommon/CommandProcessor.cpp +++ b/Source/Core/VideoCommon/CommandProcessor.cpp @@ -333,16 +333,16 @@ void GatherPipeBursted() Fifo::RunGpu(); - _assert_msg_(COMMANDPROCESSOR, fifo.CPReadWriteDistance <= fifo.CPEnd - fifo.CPBase, - "FIFO is overflowed by GatherPipe !\nCPU thread is too fast!"); + ASSERT_MSG(COMMANDPROCESSOR, fifo.CPReadWriteDistance <= fifo.CPEnd - fifo.CPBase, + "FIFO is overflowed by GatherPipe !\nCPU thread is too fast!"); // check if we are in sync - _assert_msg_(COMMANDPROCESSOR, fifo.CPWritePointer == ProcessorInterface::Fifo_CPUWritePointer, - "FIFOs linked but out of sync"); - _assert_msg_(COMMANDPROCESSOR, fifo.CPBase == ProcessorInterface::Fifo_CPUBase, - "FIFOs linked but out of sync"); - _assert_msg_(COMMANDPROCESSOR, fifo.CPEnd == ProcessorInterface::Fifo_CPUEnd, - "FIFOs linked but out of sync"); + ASSERT_MSG(COMMANDPROCESSOR, fifo.CPWritePointer == ProcessorInterface::Fifo_CPUWritePointer, + "FIFOs linked but out of sync"); + ASSERT_MSG(COMMANDPROCESSOR, fifo.CPBase == ProcessorInterface::Fifo_CPUBase, + "FIFOs linked but out of sync"); + ASSERT_MSG(COMMANDPROCESSOR, fifo.CPEnd == ProcessorInterface::Fifo_CPUEnd, + "FIFOs linked but out of sync"); } void UpdateInterrupts(u64 userdata) diff --git a/Source/Core/VideoCommon/Fifo.cpp b/Source/Core/VideoCommon/Fifo.cpp index 7b6a412c2c..34f8def354 100644 --- a/Source/Core/VideoCommon/Fifo.cpp +++ b/Source/Core/VideoCommon/Fifo.cpp @@ -342,10 +342,10 @@ void RunGpuLoop() else readPtr += 32; - _assert_msg_(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0, - "Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce " - "instability in the game. Please report it.", - fifo.CPReadWriteDistance - 32); + ASSERT_MSG(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0, + "Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce " + "instability in the game. Please report it.", + fifo.CPReadWriteDistance - 32); u8* write_ptr = s_video_buffer_write_ptr; s_video_buffer_read_ptr = OpcodeDecoder::Run( diff --git a/Source/Core/VideoCommon/LightingShaderGen.cpp b/Source/Core/VideoCommon/LightingShaderGen.cpp index 4139018552..5fa2c5b977 100644 --- a/Source/Core/VideoCommon/LightingShaderGen.cpp +++ b/Source/Core/VideoCommon/LightingShaderGen.cpp @@ -67,7 +67,7 @@ static void GenerateLightShader(ShaderCode& object, const LightingUidData& uid_d swizzle_components, LIGHT_COL_PARAMS(index, swizzle)); break; default: - _assert_(0); + ASSERT(0); } object.Write("\n"); diff --git a/Source/Core/VideoCommon/PixelShaderGen.cpp b/Source/Core/VideoCommon/PixelShaderGen.cpp index 5e2d8e60ad..ddd3aa1015 100644 --- a/Source/Core/VideoCommon/PixelShaderGen.cpp +++ b/Source/Core/VideoCommon/PixelShaderGen.cpp @@ -947,7 +947,7 @@ static void WriteStage(ShaderCode& out, const pixel_shader_uid_data* uid_data, i } else if (tevind.mid <= 7 && bHasTexCoord) { // s matrix - _assert_(tevind.mid >= 5); + ASSERT(tevind.mid >= 5); int mtxidx = 2 * (tevind.mid - 5); out.SetConstantsUsed(C_INDTEXMTX + mtxidx, C_INDTEXMTX + mtxidx); @@ -969,7 +969,7 @@ static void WriteStage(ShaderCode& out, const pixel_shader_uid_data* uid_data, i } else if (tevind.mid <= 11 && bHasTexCoord) { // t matrix - _assert_(tevind.mid >= 9); + ASSERT(tevind.mid >= 9); int mtxidx = 2 * (tevind.mid - 9); out.SetConstantsUsed(C_INDTEXMTX + mtxidx, C_INDTEXMTX + mtxidx); diff --git a/Source/Core/VideoCommon/RenderBase.cpp b/Source/Core/VideoCommon/RenderBase.cpp index a5e5272651..3e81312541 100644 --- a/Source/Core/VideoCommon/RenderBase.cpp +++ b/Source/Core/VideoCommon/RenderBase.cpp @@ -761,7 +761,7 @@ void Renderer::RenderFrameDump() TextureConfig config(target_width, target_height, 1, 1, 1, AbstractTextureFormat::RGBA8, true); m_frame_dump_render_texture.reset(); m_frame_dump_render_texture = CreateTexture(config); - _assert_(m_frame_dump_render_texture); + ASSERT(m_frame_dump_render_texture); } // Scaling is likely to occur here, but if possible, do a bit-for-bit copy. diff --git a/Source/Core/VideoCommon/TextureCacheBase.cpp b/Source/Core/VideoCommon/TextureCacheBase.cpp index 4556318ddf..278d2486b9 100644 --- a/Source/Core/VideoCommon/TextureCacheBase.cpp +++ b/Source/Core/VideoCommon/TextureCacheBase.cpp @@ -1971,7 +1971,7 @@ void TextureCacheBase::TCacheEntry::SetXfbCopy(u32 stride) is_xfb_copy = true; memory_stride = stride; - _assert_msg_(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small"); + ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small"); size_in_bytes = memory_stride * NumBlocksY(); } @@ -1982,7 +1982,7 @@ void TextureCacheBase::TCacheEntry::SetEfbCopy(u32 stride) is_xfb_copy = false; memory_stride = stride; - _assert_msg_(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small"); + ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small"); size_in_bytes = memory_stride * NumBlocksY(); } diff --git a/Source/Core/VideoCommon/VertexLoader.cpp b/Source/Core/VideoCommon/VertexLoader.cpp index 1db8deec0c..f89089049d 100644 --- a/Source/Core/VideoCommon/VertexLoader.cpp +++ b/Source/Core/VideoCommon/VertexLoader.cpp @@ -244,7 +244,7 @@ void VertexLoader::CompileVertexTranslator() WriteCall(Color_ReadDirect_32b_8888); break; default: - _assert_(0); + ASSERT(0); break; } break; @@ -271,7 +271,7 @@ void VertexLoader::CompileVertexTranslator() WriteCall(Color_ReadIndex8_32b_8888); break; default: - _assert_(0); + ASSERT(0); break; } break; @@ -298,7 +298,7 @@ void VertexLoader::CompileVertexTranslator() WriteCall(Color_ReadIndex16_32b_8888); break; default: - _assert_(0); + ASSERT(0); break; } break; @@ -325,12 +325,12 @@ void VertexLoader::CompileVertexTranslator() if (tc[i] != NOT_PRESENT) { - _assert_msg_(VIDEO, DIRECT <= tc[i] && tc[i] <= INDEX16, - "Invalid texture coordinates!\n(tc[i] = %d)", (u32)tc[i]); - _assert_msg_(VIDEO, FORMAT_UBYTE <= format && format <= FORMAT_FLOAT, - "Invalid texture coordinates format!\n(format = %d)", format); - _assert_msg_(VIDEO, 0 <= elements && elements <= 1, - "Invalid number of texture coordinates elements!\n(elements = %d)", elements); + ASSERT_MSG(VIDEO, DIRECT <= tc[i] && tc[i] <= INDEX16, + "Invalid texture coordinates!\n(tc[i] = %d)", (u32)tc[i]); + ASSERT_MSG(VIDEO, FORMAT_UBYTE <= format && format <= FORMAT_FLOAT, + "Invalid texture coordinates format!\n(format = %d)", format); + ASSERT_MSG(VIDEO, 0 <= elements && elements <= 1, + "Invalid number of texture coordinates elements!\n(elements = %d)", elements); components |= VB_HAS_UV0 << i; WriteCall(VertexLoader_TextCoord::GetFunction(tc[i], format, elements)); diff --git a/Source/Core/VideoCommon/VertexLoaderManager.cpp b/Source/Core/VideoCommon/VertexLoaderManager.cpp index bb52197585..9cb7d23089 100644 --- a/Source/Core/VideoCommon/VertexLoaderManager.cpp +++ b/Source/Core/VideoCommon/VertexLoaderManager.cpp @@ -329,19 +329,19 @@ void LoadCPReg(u32 sub_cmd, u32 value, bool is_preprocess) break; case 0x70: - _assert_((sub_cmd & 0x0F) < 8); + ASSERT((sub_cmd & 0x0F) < 8); state->vtx_attr[sub_cmd & 7].g0.Hex = value; state->attr_dirty[sub_cmd & 7] = true; break; case 0x80: - _assert_((sub_cmd & 0x0F) < 8); + ASSERT((sub_cmd & 0x0F) < 8); state->vtx_attr[sub_cmd & 7].g1.Hex = value; state->attr_dirty[sub_cmd & 7] = true; break; case 0x90: - _assert_((sub_cmd & 0x0F) < 8); + ASSERT((sub_cmd & 0x0F) < 8); state->vtx_attr[sub_cmd & 7].g2.Hex = value; state->attr_dirty[sub_cmd & 7] = true; break; diff --git a/Source/Core/VideoCommon/VertexShaderGen.cpp b/Source/Core/VideoCommon/VertexShaderGen.cpp index d6f65b12b4..ccec60731c 100644 --- a/Source/Core/VideoCommon/VertexShaderGen.cpp +++ b/Source/Core/VideoCommon/VertexShaderGen.cpp @@ -21,8 +21,8 @@ VertexShaderUid GetVertexShaderUid() vertex_shader_uid_data* uid_data = out.GetUidData(); memset(uid_data, 0, sizeof(*uid_data)); - _assert_(bpmem.genMode.numtexgens == xfmem.numTexGen.numTexGens); - _assert_(bpmem.genMode.numcolchans == xfmem.numChan.numColorChans); + ASSERT(bpmem.genMode.numtexgens == xfmem.numTexGen.numTexGens); + ASSERT(bpmem.genMode.numcolchans == xfmem.numChan.numColorChans); uid_data->numTexGens = xfmem.numTexGen.numTexGens; uid_data->components = VertexLoaderManager::g_current_components; @@ -262,8 +262,8 @@ ShaderCode GenerateVertexShaderCode(APIType api_type, const ShaderHostConfig& ho } break; case XF_SRCCOLORS_INROW: - _assert_(texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC0 || - texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC1); + ASSERT(texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC0 || + texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC1); break; case XF_SRCBINORMAL_T_INROW: if (uid_data->components & VB_HAS_NRM1) @@ -278,7 +278,7 @@ ShaderCode GenerateVertexShaderCode(APIType api_type, const ShaderHostConfig& ho } break; default: - _assert_(texinfo.sourcerow <= XF_SRCTEX7_INROW); + ASSERT(texinfo.sourcerow <= XF_SRCTEX7_INROW); if (uid_data->components & (VB_HAS_UV0 << (texinfo.sourcerow - XF_SRCTEX0_INROW))) out.Write("coord = float4(rawtex%d.x, rawtex%d.y, 1.0, 1.0);\n", texinfo.sourcerow - XF_SRCTEX0_INROW, texinfo.sourcerow - XF_SRCTEX0_INROW); @@ -307,7 +307,7 @@ ShaderCode GenerateVertexShaderCode(APIType api_type, const ShaderHostConfig& ho { // The following assert was triggered in House of the Dead Overkill and Star Wars Rogue // Squadron 2 - //_assert_(0); // should have normals + // ASSERT(0); // should have normals out.Write("o.tex%d.xyz = o.tex%d.xyz;\n", i, texinfo.embosssourceshift); } diff --git a/Source/Core/VideoCommon/XFStructs.cpp b/Source/Core/VideoCommon/XFStructs.cpp index 41a3a3e447..6403ccf6a4 100644 --- a/Source/Core/VideoCommon/XFStructs.cpp +++ b/Source/Core/VideoCommon/XFStructs.cpp @@ -99,11 +99,9 @@ static void XFRegWritten(int transferSize, u32 baseAddress, DataReader src) break; case XFMEM_SETMATRIXINDA: - //_assert_msg_(GX_XF, 0, "XF matrixindex0"); VertexShaderManager::SetTexMatrixChangedA(newValue); break; case XFMEM_SETMATRIXINDB: - //_assert_msg_(GX_XF, 0, "XF matrixindex1"); VertexShaderManager::SetTexMatrixChangedB(newValue); break;