JitArm64: Use the updated wrappers.

They are faster, no need to use the slow path in the CPU.
This commit is contained in:
Markus Wick 2017-08-11 23:52:45 +02:00
parent d78009877b
commit d791e5d3a8
4 changed files with 8 additions and 8 deletions

View File

@ -196,14 +196,14 @@ void JitArm64::EmitBackpatchRoutine(u32 flags, bool fastmem, bool do_farcode, AR
{ {
m_float_emit.FCVTN(32, D0, RS); m_float_emit.FCVTN(32, D0, RS);
m_float_emit.UMOV(64, X0, D0, 0); m_float_emit.UMOV(64, X0, D0, 0);
ORR(X0, SP, X0, ArithOption(X0, ST_ROR, 32)); ROR(X0, X0, 32);
MOVP2R(X30, &PowerPC::Write_U64); MOVP2R(X30, &PowerPC::Write_U64);
BLR(X30); BLR(X30);
} }
else if (flags & BackPatchInfo::FLAG_SIZE_F32X2I) else if (flags & BackPatchInfo::FLAG_SIZE_F32X2I)
{ {
m_float_emit.UMOV(64, X0, RS, 0); m_float_emit.UMOV(64, X0, RS, 0);
ORR(X0, SP, X0, ArithOption(X0, ST_ROR, 32)); ROR(X0, X0, 32);
MOVP2R(X30, &PowerPC::Write_U64); MOVP2R(X30, &PowerPC::Write_U64);
BLR(X30); BLR(X30);
} }

View File

@ -499,8 +499,8 @@ void JitArm64::lmw(UGeckoInstruction inst)
LDP(INDEX_POST, EncodeRegTo64(RX1), EncodeRegTo64(RX3), XA, 16); LDP(INDEX_POST, EncodeRegTo64(RX1), EncodeRegTo64(RX3), XA, 16);
REV32(EncodeRegTo64(RX1), EncodeRegTo64(RX1)); REV32(EncodeRegTo64(RX1), EncodeRegTo64(RX1));
REV32(EncodeRegTo64(RX3), EncodeRegTo64(RX3)); REV32(EncodeRegTo64(RX3), EncodeRegTo64(RX3));
ORR(EncodeRegTo64(RX2), ZR, EncodeRegTo64(RX1), ArithOption(EncodeRegTo64(RX1), ST_LSR, 32)); LSR(EncodeRegTo64(RX2), EncodeRegTo64(RX1), 32);
ORR(EncodeRegTo64(RX4), ZR, EncodeRegTo64(RX3), ArithOption(EncodeRegTo64(RX3), ST_LSR, 32)); LSR(EncodeRegTo64(RX4), EncodeRegTo64(RX3), 32);
i += 3; i += 3;
} }
else if (remaining >= 2) else if (remaining >= 2)

View File

@ -300,12 +300,12 @@ void JitArm64::mfspr(UGeckoInstruction inst)
if (iIndex == SPR_TL) if (iIndex == SPR_TL)
MOV(gpr.R(d), Wresult); MOV(gpr.R(d), Wresult);
else else
ORR(EncodeRegTo64(gpr.R(d)), ZR, Xresult, ArithOption(Xresult, ST_LSR, 32)); LSR(EncodeRegTo64(gpr.R(d)), Xresult, 32);
if (nextIndex == SPR_TL) if (nextIndex == SPR_TL)
MOV(gpr.R(n), Wresult); MOV(gpr.R(n), Wresult);
else else
ORR(EncodeRegTo64(gpr.R(n)), ZR, Xresult, ArithOption(Xresult, ST_LSR, 32)); LSR(EncodeRegTo64(gpr.R(n)), Xresult, 32);
gpr.Unlock(Wg, Wresult, WA, WB); gpr.Unlock(Wg, Wresult, WA, WB);
fpr.Unlock(VC, VD); fpr.Unlock(VC, VD);
@ -314,7 +314,7 @@ void JitArm64::mfspr(UGeckoInstruction inst)
} }
gpr.BindToRegister(d, false); gpr.BindToRegister(d, false);
if (iIndex == SPR_TU) if (iIndex == SPR_TU)
ORR(EncodeRegTo64(gpr.R(d)), ZR, Xresult, ArithOption(Xresult, ST_LSR, 32)); LSR(EncodeRegTo64(gpr.R(d)), Xresult, 32);
else else
MOV(gpr.R(d), Wresult); MOV(gpr.R(d), Wresult);

View File

@ -374,7 +374,7 @@ void JitArm64::GenerateCommonAsm()
storePairedFloatSlow = GetCodePtr(); storePairedFloatSlow = GetCodePtr();
float_emit.UMOV(64, X0, Q0, 0); float_emit.UMOV(64, X0, Q0, 0);
ORR(X0, SP, X0, ArithOption(X0, ST_ROR, 32)); ROR(X0, X0, 32);
MOVP2R(X2, &PowerPC::Write_U64); MOVP2R(X2, &PowerPC::Write_U64);
BR(X2); BR(X2);
} }