Merge pull request #11304 from JosJuice/jit64-nan-c

Jit64: Correctly handle NaNs for ps_mulsX/ps_sumX
This commit is contained in:
Mai 2022-11-29 12:57:43 +00:00 committed by GitHub
commit 7cd9a78ebf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 102 additions and 82 deletions

View File

@ -17,6 +17,8 @@
// ---------- // ----------
#pragma once #pragma once
#include <optional>
#include <rangeset/rangesizeset.h> #include <rangeset/rangesizeset.h>
#include "Common/CommonTypes.h" #include "Common/CommonTypes.h"
@ -127,7 +129,8 @@ public:
bool duplicate = false); bool duplicate = false);
void FinalizeDoubleResult(Gen::X64Reg output, const Gen::OpArg& input); void FinalizeDoubleResult(Gen::X64Reg output, const Gen::OpArg& input);
void HandleNaNs(UGeckoInstruction inst, Gen::X64Reg xmm, Gen::X64Reg clobber, void HandleNaNs(UGeckoInstruction inst, Gen::X64Reg xmm, Gen::X64Reg clobber,
std::vector<int> inputs); std::optional<Gen::OpArg> Ra, std::optional<Gen::OpArg> Rb,
std::optional<Gen::OpArg> Rc);
void MultiplyImmediate(u32 imm, int a, int d, bool overflow); void MultiplyImmediate(u32 imm, int a, int d, bool overflow);

View File

@ -6,6 +6,7 @@
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <limits> #include <limits>
#include <optional>
#include <vector> #include <vector>
#include "Common/Assert.h" #include "Common/Assert.h"
@ -92,7 +93,8 @@ void Jit64::FinalizeDoubleResult(X64Reg output, const OpArg& input)
SetFPRFIfNeeded(input, false); SetFPRFIfNeeded(input, false);
} }
void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::vector<int> inputs) void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::optional<OpArg> Ra,
std::optional<OpArg> Rb, std::optional<OpArg> Rc)
{ {
// | PowerPC | x86 // | PowerPC | x86
// ---------------------+----------+--------- // ---------------------+----------+---------
@ -107,15 +109,6 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::
ASSERT(xmm != clobber); ASSERT(xmm != clobber);
// Remove duplicates from inputs
for (auto it = inputs.begin(); it != inputs.end();)
{
if (std::find(inputs.begin(), it, *it) != it)
it = inputs.erase(it);
else
++it;
}
if (inst.OPCD != 4) if (inst.OPCD != 4)
{ {
// not paired-single // not paired-single
@ -127,14 +120,17 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::
// If any inputs are NaNs, pick the first NaN of them // If any inputs are NaNs, pick the first NaN of them
std::vector<FixupBranch> fixups; std::vector<FixupBranch> fixups;
for (int x : inputs) const auto check_input = [&](const OpArg& Rx) {
{
RCOpArg Rx = fpr.Use(x, RCMode::Read);
RegCache::Realize(Rx);
MOVDDUP(xmm, Rx); MOVDDUP(xmm, Rx);
UCOMISD(xmm, R(xmm)); UCOMISD(xmm, R(xmm));
fixups.push_back(J_CC(CC_P)); fixups.push_back(J_CC(CC_P));
} };
if (Ra)
check_input(*Ra);
if (Rb && Ra != Rb)
check_input(*Rb);
if (Rc && Ra != Rc && Rb != Rc)
check_input(*Rc);
// Otherwise, pick the PPC default NaN (will be finished below) // Otherwise, pick the PPC default NaN (will be finished below)
XORPD(xmm, R(xmm)); XORPD(xmm, R(xmm));
@ -152,8 +148,6 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::
{ {
// paired-single // paired-single
std::reverse(inputs.begin(), inputs.end());
if (cpu_info.bSSE4_1) if (cpu_info.bSSE4_1)
{ {
avx_op(&XEmitter::VCMPPD, &XEmitter::CMPPD, clobber, R(xmm), R(xmm), CMP_UNORD); avx_op(&XEmitter::VCMPPD, &XEmitter::CMPPD, clobber, R(xmm), R(xmm), CMP_UNORD);
@ -167,13 +161,16 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::
BLENDVPD(xmm, MConst(psGeneratedQNaN)); BLENDVPD(xmm, MConst(psGeneratedQNaN));
// If any inputs are NaNs, use those instead // If any inputs are NaNs, use those instead
for (int x : inputs) const auto check_input = [&](const OpArg& Rx) {
{
RCOpArg Rx = fpr.Use(x, RCMode::Read);
RegCache::Realize(Rx);
avx_op(&XEmitter::VCMPPD, &XEmitter::CMPPD, clobber, Rx, Rx, CMP_UNORD); avx_op(&XEmitter::VCMPPD, &XEmitter::CMPPD, clobber, Rx, Rx, CMP_UNORD);
BLENDVPD(xmm, Rx); BLENDVPD(xmm, Rx);
} };
if (Rc)
check_input(*Rc);
if (Rb && Rb != Rc)
check_input(*Rb);
if (Ra && Ra != Rb && Ra != Rc)
check_input(*Ra);
} }
else else
{ {
@ -197,17 +194,20 @@ void Jit64::HandleNaNs(UGeckoInstruction inst, X64Reg xmm, X64Reg clobber, std::
MOVAPD(xmm, tmp); MOVAPD(xmm, tmp);
// If any inputs are NaNs, use those instead // If any inputs are NaNs, use those instead
for (int x : inputs) const auto check_input = [&](const OpArg& Rx) {
{
RCOpArg Rx = fpr.Use(x, RCMode::Read);
RegCache::Realize(Rx);
MOVAPD(clobber, Rx); MOVAPD(clobber, Rx);
CMPPD(clobber, R(clobber), CMP_ORD); CMPPD(clobber, R(clobber), CMP_ORD);
MOVAPD(tmp, R(clobber)); MOVAPD(tmp, R(clobber));
ANDNPD(clobber, Rx); ANDNPD(clobber, Rx);
ANDPD(xmm, tmp); ANDPD(xmm, tmp);
ORPD(xmm, R(clobber)); ORPD(xmm, R(clobber));
} };
if (Rc)
check_input(*Rc);
if (Rb && Rb != Rc)
check_input(*Rb);
if (Ra && Ra != Rb && Ra != Rc)
check_input(*Ra);
} }
// Turn SNaNs into QNaNs // Turn SNaNs into QNaNs
@ -236,8 +236,7 @@ void Jit64::fp_arith(UGeckoInstruction inst)
bool single = inst.OPCD == 4 || inst.OPCD == 59; bool single = inst.OPCD == 4 || inst.OPCD == 59;
// If both the inputs are known to have identical top and bottom halves, we can skip the MOVDDUP // If both the inputs are known to have identical top and bottom halves, we can skip the MOVDDUP
// at the end by // at the end by using packed arithmetic instead.
// using packed arithmetic instead.
bool packed = inst.OPCD == 4 || bool packed = inst.OPCD == 4 ||
(inst.OPCD == 59 && js.op->fprIsDuplicated[a] && js.op->fprIsDuplicated[arg2]); (inst.OPCD == 59 && js.op->fprIsDuplicated[a] && js.op->fprIsDuplicated[arg2]);
// Packed divides are slower than scalar divides on basically all x86, so this optimization isn't // Packed divides are slower than scalar divides on basically all x86, so this optimization isn't
@ -246,64 +245,76 @@ void Jit64::fp_arith(UGeckoInstruction inst)
if (inst.OPCD == 59 && (inst.SUBOP5 == 18 || cpu_info.bAtom)) if (inst.OPCD == 59 && (inst.SUBOP5 == 18 || cpu_info.bAtom))
packed = false; packed = false;
bool round_input = single && !js.op->fprIsSingle[inst.FC]; void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&) = nullptr;
bool preserve_inputs = m_accurate_nans; void (XEmitter::*sseOp)(X64Reg, const OpArg&) = nullptr;
bool reversible = false;
const auto fp_tri_op = [&](int op1, int op2, bool reversible, bool round_rhs = false;
void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&), bool preserve_inputs = false;
void (XEmitter::*sseOp)(X64Reg, const OpArg&), bool roundRHS = false) {
RCX64Reg Rd = fpr.Bind(d, !single ? RCMode::ReadWrite : RCMode::Write);
RCOpArg Rop1 = fpr.Use(op1, RCMode::Read);
RCOpArg Rop2 = fpr.Use(op2, RCMode::Read);
RegCache::Realize(Rd, Rop1, Rop2);
X64Reg dest = preserve_inputs ? XMM1 : static_cast<X64Reg>(Rd);
if (roundRHS)
{
if (d == op1 && !preserve_inputs)
{
Force25BitPrecision(XMM0, Rop2, XMM1);
(this->*sseOp)(Rd, R(XMM0));
}
else
{
Force25BitPrecision(dest, Rop2, XMM0);
(this->*sseOp)(dest, Rop1);
}
}
else
{
avx_op(avxOp, sseOp, dest, Rop1, Rop2, packed, reversible);
}
HandleNaNs(inst, dest, XMM0, {op1, op2});
if (single)
FinalizeSingleResult(Rd, R(dest), packed, true);
else
FinalizeDoubleResult(Rd, R(dest));
};
switch (inst.SUBOP5) switch (inst.SUBOP5)
{ {
case 18: case 18:
fp_tri_op(a, b, false, packed ? &XEmitter::VDIVPD : &XEmitter::VDIVSD, preserve_inputs = m_accurate_nans;
packed ? &XEmitter::DIVPD : &XEmitter::DIVSD); avxOp = packed ? &XEmitter::VDIVPD : &XEmitter::VDIVSD;
sseOp = packed ? &XEmitter::DIVPD : &XEmitter::DIVSD;
break; break;
case 20: case 20:
fp_tri_op(a, b, false, packed ? &XEmitter::VSUBPD : &XEmitter::VSUBSD, avxOp = packed ? &XEmitter::VSUBPD : &XEmitter::VSUBSD;
packed ? &XEmitter::SUBPD : &XEmitter::SUBSD); sseOp = packed ? &XEmitter::SUBPD : &XEmitter::SUBSD;
break; break;
case 21: case 21:
fp_tri_op(a, b, true, packed ? &XEmitter::VADDPD : &XEmitter::VADDSD, reversible = !m_accurate_nans;
packed ? &XEmitter::ADDPD : &XEmitter::ADDSD); avxOp = packed ? &XEmitter::VADDPD : &XEmitter::VADDSD;
sseOp = packed ? &XEmitter::ADDPD : &XEmitter::ADDSD;
break; break;
case 25: case 25:
fp_tri_op(a, c, true, packed ? &XEmitter::VMULPD : &XEmitter::VMULSD, reversible = true;
packed ? &XEmitter::MULPD : &XEmitter::MULSD, round_input); round_rhs = single && !js.op->fprIsSingle[c];
preserve_inputs = m_accurate_nans;
avxOp = packed ? &XEmitter::VMULPD : &XEmitter::VMULSD;
sseOp = packed ? &XEmitter::MULPD : &XEmitter::MULSD;
break; break;
default: default:
ASSERT_MSG(DYNA_REC, 0, "fp_arith WTF!!!"); ASSERT_MSG(DYNA_REC, 0, "fp_arith WTF!!!");
} }
RCX64Reg Rd = fpr.Bind(d, !single ? RCMode::ReadWrite : RCMode::Write);
RCOpArg Ra = fpr.Use(a, RCMode::Read);
RCOpArg Rarg2 = fpr.Use(arg2, RCMode::Read);
RegCache::Realize(Rd, Ra, Rarg2);
X64Reg dest = preserve_inputs ? XMM1 : static_cast<X64Reg>(Rd);
if (round_rhs)
{
if (a == d && !preserve_inputs)
{
Force25BitPrecision(XMM0, Rarg2, XMM1);
(this->*sseOp)(Rd, R(XMM0));
}
else
{
Force25BitPrecision(dest, Rarg2, XMM0);
(this->*sseOp)(dest, Ra);
}
}
else
{
avx_op(avxOp, sseOp, dest, Ra, Rarg2, packed, reversible);
}
switch (inst.SUBOP5)
{
case 18:
HandleNaNs(inst, dest, XMM0, Ra, Rarg2, std::nullopt);
break;
case 25:
HandleNaNs(inst, dest, XMM0, Ra, std::nullopt, Rarg2);
break;
}
if (single)
FinalizeSingleResult(Rd, R(dest), packed, true);
else
FinalizeDoubleResult(Rd, R(dest));
} }
void Jit64::fmaddXX(UGeckoInstruction inst) void Jit64::fmaddXX(UGeckoInstruction inst)
@ -499,7 +510,7 @@ void Jit64::fmaddXX(UGeckoInstruction inst)
result_xmm = Rd; result_xmm = Rd;
} }
HandleNaNs(inst, result_xmm, XMM0, {a, b, c}); HandleNaNs(inst, result_xmm, XMM0, Ra, Rb, Rc);
if (single) if (single)
FinalizeSingleResult(Rd, R(result_xmm), packed, true); FinalizeSingleResult(Rd, R(result_xmm), packed, true);

View File

@ -3,6 +3,8 @@
#include "Core/PowerPC/Jit64/Jit.h" #include "Core/PowerPC/Jit64/Jit.h"
#include <optional>
#include "Common/CPUDetect.h" #include "Common/CPUDetect.h"
#include "Common/CommonTypes.h" #include "Common/CommonTypes.h"
#include "Common/MsgHandler.h" #include "Common/MsgHandler.h"
@ -77,7 +79,8 @@ void Jit64::ps_sum(UGeckoInstruction inst)
default: default:
PanicAlertFmt("ps_sum WTF!!!"); PanicAlertFmt("ps_sum WTF!!!");
} }
HandleNaNs(inst, tmp, tmp == XMM1 ? XMM0 : XMM1, {a, b, c}); // We're intentionally not calling HandleNaNs here.
// For addition and subtraction specifically, x86's NaN behavior matches PPC's.
FinalizeSingleResult(Rd, R(tmp)); FinalizeSingleResult(Rd, R(tmp));
} }
@ -96,23 +99,26 @@ void Jit64::ps_muls(UGeckoInstruction inst)
RCOpArg Ra = fpr.Use(a, RCMode::Read); RCOpArg Ra = fpr.Use(a, RCMode::Read);
RCOpArg Rc = fpr.Use(c, RCMode::Read); RCOpArg Rc = fpr.Use(c, RCMode::Read);
RCX64Reg Rd = fpr.Bind(d, RCMode::Write); RCX64Reg Rd = fpr.Bind(d, RCMode::Write);
RegCache::Realize(Ra, Rc, Rd); RCX64Reg Rc_duplicated = m_accurate_nans ? fpr.Scratch() : fpr.Scratch(XMM1);
RegCache::Realize(Ra, Rc, Rd, Rc_duplicated);
switch (inst.SUBOP5) switch (inst.SUBOP5)
{ {
case 12: // ps_muls0 case 12: // ps_muls0
MOVDDUP(XMM1, Rc); MOVDDUP(Rc_duplicated, Rc);
break; break;
case 13: // ps_muls1 case 13: // ps_muls1
avx_op(&XEmitter::VSHUFPD, &XEmitter::SHUFPD, XMM1, Rc, Rc, 3); avx_op(&XEmitter::VSHUFPD, &XEmitter::SHUFPD, Rc_duplicated, Rc, Rc, 3);
break; break;
default: default:
PanicAlertFmt("ps_muls WTF!!!"); PanicAlertFmt("ps_muls WTF!!!");
} }
if (round_input) if (round_input)
Force25BitPrecision(XMM1, R(XMM1), XMM0); Force25BitPrecision(XMM1, R(Rc_duplicated), XMM0);
else if (XMM1 != Rc_duplicated)
MOVAPD(XMM1, Rc_duplicated);
MULPD(XMM1, Ra); MULPD(XMM1, Ra);
HandleNaNs(inst, XMM1, XMM0, {a, c}); HandleNaNs(inst, XMM1, XMM0, Ra, std::nullopt, Rc_duplicated);
FinalizeSingleResult(Rd, R(XMM1)); FinalizeSingleResult(Rd, R(XMM1));
} }