Jit64: use MRegSum where appropriate

This commit is contained in:
Tillmann Karras 2015-03-05 19:20:50 +01:00
parent e27fae22d8
commit 45dbcf0ed2
6 changed files with 18 additions and 18 deletions

View File

@ -124,7 +124,7 @@ void Jit64AsmRoutineManager::Generate()
else
{
MOV(64, R(RSCRATCH2), Imm64(icache));
MOV(32, R(RSCRATCH), MComplex(RSCRATCH2, RSCRATCH, SCALE_1, 0));
MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH));
}
exit_mem = J();
@ -139,7 +139,7 @@ void Jit64AsmRoutineManager::Generate()
else
{
MOV(64, R(RSCRATCH2), Imm64(icacheVmem));
MOV(32, R(RSCRATCH), MComplex(RSCRATCH2, RSCRATCH, SCALE_1, 0));
MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH));
}
if (SConfig::GetInstance().m_LocalCoreStartupParameter.bWii) exit_vmem = J();
@ -157,7 +157,7 @@ void Jit64AsmRoutineManager::Generate()
else
{
MOV(64, R(RSCRATCH2), Imm64(icacheEx));
MOV(32, R(RSCRATCH), MComplex(RSCRATCH2, RSCRATCH, SCALE_1, 0));
MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH));
}
SetJumpTarget(no_exram);
@ -172,12 +172,12 @@ void Jit64AsmRoutineManager::Generate()
u64 codePointers = (u64)jit->GetBlockCache()->GetCodePointers();
if (codePointers <= INT_MAX)
{
JMPptr(MScaled(RSCRATCH, 8, (s32)codePointers));
JMPptr(MScaled(RSCRATCH, SCALE_8, (s32)codePointers));
}
else
{
MOV(64, R(RSCRATCH2), Imm64(codePointers));
JMPptr(MComplex(RSCRATCH2, RSCRATCH, 8, 0));
JMPptr(MComplex(RSCRATCH2, RSCRATCH, SCALE_8, 0));
}
SetJumpTarget(notfound);

View File

@ -1273,7 +1273,7 @@ void Jit64::addx(UGeckoInstruction inst)
{
gpr.Lock(a, b, d);
gpr.BindToRegister(d, false);
LEA(32, gpr.RX(d), MComplex(gpr.RX(a), gpr.RX(b), 1, 0));
LEA(32, gpr.RX(d), MRegSum(gpr.RX(a), gpr.RX(b)));
needs_test = true;
}
else

View File

@ -234,7 +234,7 @@ void Jit64::lXXx(UGeckoInstruction inst)
}
else if (gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg())
{
LEA(32, RSCRATCH2, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, RSCRATCH2, MRegSum(gpr.RX(a), gpr.RX(b)));
}
else
{
@ -451,7 +451,7 @@ void Jit64::stXx(UGeckoInstruction inst)
if (gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg())
{
LEA(32, RSCRATCH2, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, RSCRATCH2, MRegSum(gpr.RX(a), gpr.RX(b)));
}
else
{

View File

@ -48,7 +48,7 @@ void Jit64::lfXXX(UGeckoInstruction inst)
{
addr = R(RSCRATCH2);
if (a && gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg())
LEA(32, RSCRATCH2, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, RSCRATCH2, MRegSum(gpr.RX(a), gpr.RX(b)));
else
{
MOV(32, addr, gpr.R(b));
@ -160,7 +160,7 @@ void Jit64::stfXXX(UGeckoInstruction inst)
if (indexed)
{
if (a && gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg())
LEA(32, RSCRATCH2, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, RSCRATCH2, MRegSum(gpr.RX(a), gpr.RX(b)));
else
{
MOV(32, R(RSCRATCH2), gpr.R(b));

View File

@ -57,7 +57,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
addr = RSCRATCH2;
if (a && gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg())
{
LEA(32, addr, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, addr, MRegSum(gpr.RX(a), gpr.RX(b)));
}
else
{
@ -105,7 +105,7 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
if (gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg() && (indexed || offset))
{
if (indexed)
LEA(32, RSCRATCH_EXTRA, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, RSCRATCH_EXTRA, MRegSum(gpr.RX(a), gpr.RX(b)));
else
LEA(32, RSCRATCH_EXTRA, MDisp(gpr.RX(a), offset));
}
@ -190,7 +190,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
addr = RSCRATCH2;
if (a && gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg())
{
LEA(32, addr, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, addr, MRegSum(gpr.RX(a), gpr.RX(b)));
}
else
{
@ -282,7 +282,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
if (gpr.R(a).IsSimpleReg() && gpr.R(b).IsSimpleReg() && (indexed || offset))
{
if (indexed)
LEA(32, RSCRATCH_EXTRA, MComplex(gpr.RX(a), gpr.RX(b), SCALE_1, 0));
LEA(32, RSCRATCH_EXTRA, MRegSum(gpr.RX(a), gpr.RX(b)));
else
LEA(32, RSCRATCH_EXTRA, MDisp(gpr.RX(a), offset));
}

View File

@ -428,12 +428,12 @@ void CommonAsmRoutines::GenQuantizedLoads()
}
else if (cpu_info.bSSSE3)
{
MOVQ_xmm(XMM0, MComplex(RMEM, RSCRATCH_EXTRA, 1, 0));
MOVQ_xmm(XMM0, MRegSum(RMEM, RSCRATCH_EXTRA));
PSHUFB(XMM0, M(pbswapShuffle2x4));
}
else
{
LoadAndSwap(64, RSCRATCH_EXTRA, MComplex(RMEM, RSCRATCH_EXTRA, 1, 0));
LoadAndSwap(64, RSCRATCH_EXTRA, MRegSum(RMEM, RSCRATCH_EXTRA));
ROL(64, R(RSCRATCH_EXTRA), Imm8(32));
MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA));
}
@ -448,13 +448,13 @@ void CommonAsmRoutines::GenQuantizedLoads()
}
else if (cpu_info.bSSSE3)
{
MOVD_xmm(XMM0, MComplex(RMEM, RSCRATCH_EXTRA, 1, 0));
MOVD_xmm(XMM0, MRegSum(RMEM, RSCRATCH_EXTRA));
PSHUFB(XMM0, M(pbswapShuffle1x4));
UNPCKLPS(XMM0, M(m_one));
}
else
{
LoadAndSwap(32, RSCRATCH_EXTRA, MComplex(RMEM, RSCRATCH_EXTRA, 1, 0));
LoadAndSwap(32, RSCRATCH_EXTRA, MRegSum(RMEM, RSCRATCH_EXTRA));
MOVD_xmm(XMM0, R(RSCRATCH_EXTRA));
UNPCKLPS(XMM0, M(m_one));
}