From 73b6166f18d2909f68dd1b382aab4402c87359ab Mon Sep 17 00:00:00 2001 From: MerryMage Date: Sun, 27 Dec 2020 15:08:45 +0000 Subject: [PATCH] Jit_Integer: rlwinmx: Use BEXTR where possible --- Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp index b0a9cf4cf1..5acee790dc 100644 --- a/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp +++ b/Source/Core/Core/PowerPC/Jit64/Jit_Integer.cpp @@ -1534,6 +1534,7 @@ void Jit64::rlwinmx(UGeckoInstruction inst) { const bool left_shift = inst.SH && inst.MB == 0 && inst.ME == 31 - inst.SH; const bool right_shift = inst.SH && inst.ME == 31 && inst.MB == 32 - inst.SH; + const bool field_extract = inst.SH && inst.ME == 31 && inst.MB > 32 - inst.SH; const u32 mask = MakeRotationMask(inst.MB, inst.ME); const bool simple_mask = mask == 0xff || mask == 0xffff; // In case of a merged branch, track whether or not we've set flags. @@ -1565,6 +1566,13 @@ void Jit64::rlwinmx(UGeckoInstruction inst) SHL(32, Ra, Imm8(inst.SH)); needs_sext = inst.SH + mask_size >= 32; } + // Use BEXTR where possible: Only AMD implements this in one uop + else if (field_extract && cpu_info.bBMI1 && cpu_info.vendor == CPUVendor::AMD) + { + MOV(32, R(RSCRATCH), Imm32((mask_size << 8) | (32 - inst.SH))); + BEXTR(32, Ra, Rs, RSCRATCH); + needs_sext = false; + } else { if (a != s)