From 1295bc4272bb1c2f1456f7121c3fd86e05387a45 Mon Sep 17 00:00:00 2001 From: JosJuice Date: Fri, 24 Dec 2021 19:09:12 +0100 Subject: [PATCH] Jit64: Make paired loads always safe It's always a good sign when the comments say "this will definitely crash" and "I don't know if this is for a good reason". Fixes https://bugs.dolphin-emu.org/issues/12762. --- .../Core/PowerPC/Jit64Common/EmuCodeBlock.cpp | 16 ---- .../Core/PowerPC/Jit64Common/EmuCodeBlock.h | 4 - .../PowerPC/Jit64Common/Jit64AsmCommon.cpp | 90 ++++--------------- 3 files changed, 16 insertions(+), 94 deletions(-) diff --git a/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp b/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp index 80d1d0f7fc..ccdb448663 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.cpp @@ -130,22 +130,6 @@ FixupBranch EmuCodeBlock::CheckIfSafeAddress(const OpArg& reg_value, X64Reg reg_ return J_CC(CC_Z, m_far_code.Enabled()); } -void EmuCodeBlock::UnsafeLoadRegToReg(X64Reg reg_addr, X64Reg reg_value, int accessSize, s32 offset, - bool signExtend) -{ - OpArg src = MComplex(RMEM, reg_addr, SCALE_1, offset); - LoadAndSwap(accessSize, reg_value, src, signExtend); -} - -void EmuCodeBlock::UnsafeLoadRegToRegNoSwap(X64Reg reg_addr, X64Reg reg_value, int accessSize, - s32 offset, bool signExtend) -{ - if (signExtend) - MOVSX(32, accessSize, reg_value, MComplex(RMEM, reg_addr, SCALE_1, offset)); - else - MOVZX(32, accessSize, reg_value, MComplex(RMEM, reg_addr, SCALE_1, offset)); -} - void EmuCodeBlock::UnsafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int accessSize, s32 offset, bool swap, MovInfo* info) { diff --git a/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.h b/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.h index 1340633578..5522f920d3 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.h +++ b/Source/Core/Core/PowerPC/Jit64Common/EmuCodeBlock.h @@ -55,10 +55,6 @@ public: Gen::FixupBranch CheckIfSafeAddress(const Gen::OpArg& reg_value, Gen::X64Reg reg_addr, BitSet32 registers_in_use); - void UnsafeLoadRegToReg(Gen::X64Reg reg_addr, Gen::X64Reg reg_value, int accessSize, - s32 offset = 0, bool signExtend = false); - void UnsafeLoadRegToRegNoSwap(Gen::X64Reg reg_addr, Gen::X64Reg reg_value, int accessSize, - s32 offset, bool signExtend = false); // these return the address of the MOV, for backpatching void UnsafeWriteRegToReg(Gen::OpArg reg_value, Gen::X64Reg reg_addr, int accessSize, s32 offset = 0, bool swap = true, Gen::MovInfo* info = nullptr); diff --git a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp index 98f9bfadaf..b6fc24e89b 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp @@ -573,7 +573,6 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type, int size = sizes[type] * (single ? 1 : 2); bool isInline = quantize != -1; - bool safe_access = m_jit.jo.memcheck || !m_jit.jo.fastmem; // illegal if (type == QUANTIZE_INVALID1 || type == QUANTIZE_INVALID2 || type == QUANTIZE_INVALID3) @@ -591,34 +590,15 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type, bool extend = single && (type == QUANTIZE_S8 || type == QUANTIZE_S16); - if (safe_access) + BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD; + int flags = isInline ? 0 : + SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG | + SAFE_LOADSTORE_DR_ON | SAFE_LOADSTORE_NO_UPDATE_PC; + SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), size, 0, regsToSave, extend, flags); + if (!single && (type == QUANTIZE_U8 || type == QUANTIZE_S8)) { - BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD; - int flags = isInline ? 0 : - SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG | - SAFE_LOADSTORE_DR_ON | SAFE_LOADSTORE_NO_UPDATE_PC; - SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), size, 0, regsToSave, extend, flags); - if (!single && (type == QUANTIZE_U8 || type == QUANTIZE_S8)) - { - // TODO: Support not swapping in safeLoadToReg to avoid bswapping twice - ROR(16, R(RSCRATCH_EXTRA), Imm8(8)); - } - } - else - { - switch (type) - { - case QUANTIZE_U8: - case QUANTIZE_S8: - UnsafeLoadRegToRegNoSwap(RSCRATCH_EXTRA, RSCRATCH_EXTRA, size, 0, extend); - break; - case QUANTIZE_U16: - case QUANTIZE_S16: - UnsafeLoadRegToReg(RSCRATCH_EXTRA, RSCRATCH_EXTRA, size, 0, extend); - break; - default: - break; - } + // TODO: Support not swapping in safeLoadToReg to avoid bswapping twice + ROR(16, R(RSCRATCH_EXTRA), Imm8(8)); } if (single) @@ -717,59 +697,21 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline) { int size = single ? 32 : 64; bool extend = false; - bool safe_access = m_jit.jo.memcheck || !m_jit.jo.fastmem; - if (safe_access) - { - BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE; - int flags = isInline ? 0 : - SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG | - SAFE_LOADSTORE_DR_ON | SAFE_LOADSTORE_NO_UPDATE_PC; - SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), size, 0, regsToSave, extend, flags); - } + BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE; + int flags = isInline ? 0 : + SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_PROLOG | + SAFE_LOADSTORE_DR_ON | SAFE_LOADSTORE_NO_UPDATE_PC; + SafeLoadToReg(RSCRATCH_EXTRA, R(RSCRATCH_EXTRA), size, 0, regsToSave, extend, flags); if (single) { - if (safe_access) - { - MOVD_xmm(XMM0, R(RSCRATCH_EXTRA)); - } - else if (cpu_info.bSSSE3) - { - MOVD_xmm(XMM0, MRegSum(RMEM, RSCRATCH_EXTRA)); - PSHUFB(XMM0, MConst(pbswapShuffle1x4)); - } - else - { - LoadAndSwap(32, RSCRATCH_EXTRA, MRegSum(RMEM, RSCRATCH_EXTRA)); - MOVD_xmm(XMM0, R(RSCRATCH_EXTRA)); - } - + MOVD_xmm(XMM0, R(RSCRATCH_EXTRA)); UNPCKLPS(XMM0, MConst(m_one)); } else { - // FIXME? This code (in non-MMU mode) assumes all accesses are directly to RAM, i.e. - // don't need hardware access handling. This will definitely crash if paired loads occur - // from non-RAM areas, but as far as I know, this never happens. I don't know if this is - // for a good reason, or merely because no game does this. - // If we find something that actually does do this, maybe this should be changed. How - // much of a performance hit would it be? - if (safe_access) - { - ROL(64, R(RSCRATCH_EXTRA), Imm8(32)); - MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA)); - } - else if (cpu_info.bSSSE3) - { - MOVQ_xmm(XMM0, MRegSum(RMEM, RSCRATCH_EXTRA)); - PSHUFB(XMM0, MConst(pbswapShuffle2x4)); - } - else - { - LoadAndSwap(64, RSCRATCH_EXTRA, MRegSum(RMEM, RSCRATCH_EXTRA)); - ROL(64, R(RSCRATCH_EXTRA), Imm8(32)); - MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA)); - } + ROL(64, R(RSCRATCH_EXTRA), Imm8(32)); + MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA)); } }