Merge pull request #4419 from degasus/master
Jit64: Fix a few trivial PIE issues.
This commit is contained in:
commit
d64c9dd5c9
|
@ -324,7 +324,7 @@ inline OpArg ImmPtr(const void* imm)
|
||||||
return Imm64((u64)imm);
|
return Imm64((u64)imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline u32 PtrOffset(const void* ptr, const void* base)
|
inline u32 PtrOffset(const void* ptr, const void* base = nullptr)
|
||||||
{
|
{
|
||||||
s64 distance = (s64)ptr - (s64)base;
|
s64 distance = (s64)ptr - (s64)base;
|
||||||
if (distance >= 0x80000000LL || distance < -0x80000000LL)
|
if (distance >= 0x80000000LL || distance < -0x80000000LL)
|
||||||
|
|
|
@ -21,11 +21,14 @@ void DSPEmitter::dsp_reg_stack_push(int stack_reg)
|
||||||
MOV(8, M(&g_dsp.reg_stack_ptr[stack_reg]), R(AL));
|
MOV(8, M(&g_dsp.reg_stack_ptr[stack_reg]), R(AL));
|
||||||
|
|
||||||
X64Reg tmp1 = gpr.GetFreeXReg();
|
X64Reg tmp1 = gpr.GetFreeXReg();
|
||||||
|
X64Reg tmp2 = gpr.GetFreeXReg();
|
||||||
// g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]] = g_dsp.r[DSP_REG_ST0 + stack_reg];
|
// g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]] = g_dsp.r[DSP_REG_ST0 + stack_reg];
|
||||||
MOV(16, R(tmp1), M(&g_dsp.r.st[stack_reg]));
|
MOV(16, R(tmp1), M(&g_dsp.r.st[stack_reg]));
|
||||||
MOVZX(64, 8, RAX, R(AL));
|
MOVZX(64, 8, RAX, R(AL));
|
||||||
MOV(16, MComplex(EAX, EAX, SCALE_1, PtrOffset(&g_dsp.reg_stack[stack_reg][0], nullptr)), R(tmp1));
|
MOV(64, R(tmp2), ImmPtr(g_dsp.reg_stack[stack_reg]));
|
||||||
|
MOV(16, MComplex(tmp2, EAX, SCALE_2, 0), R(tmp1));
|
||||||
gpr.PutXReg(tmp1);
|
gpr.PutXReg(tmp1);
|
||||||
|
gpr.PutXReg(tmp2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// clobbers:
|
// clobbers:
|
||||||
|
@ -36,10 +39,13 @@ void DSPEmitter::dsp_reg_stack_pop(int stack_reg)
|
||||||
// g_dsp.r[DSP_REG_ST0 + stack_reg] = g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]];
|
// g_dsp.r[DSP_REG_ST0 + stack_reg] = g_dsp.reg_stack[stack_reg][g_dsp.reg_stack_ptr[stack_reg]];
|
||||||
MOV(8, R(AL), M(&g_dsp.reg_stack_ptr[stack_reg]));
|
MOV(8, R(AL), M(&g_dsp.reg_stack_ptr[stack_reg]));
|
||||||
X64Reg tmp1 = gpr.GetFreeXReg();
|
X64Reg tmp1 = gpr.GetFreeXReg();
|
||||||
|
X64Reg tmp2 = gpr.GetFreeXReg();
|
||||||
MOVZX(64, 8, RAX, R(AL));
|
MOVZX(64, 8, RAX, R(AL));
|
||||||
MOV(16, R(tmp1), MComplex(EAX, EAX, SCALE_1, PtrOffset(&g_dsp.reg_stack[stack_reg][0], nullptr)));
|
MOV(64, R(tmp2), ImmPtr(g_dsp.reg_stack[stack_reg]));
|
||||||
|
MOV(16, R(tmp1), MComplex(tmp2, EAX, SCALE_2, 0));
|
||||||
MOV(16, M(&g_dsp.r.st[stack_reg]), R(tmp1));
|
MOV(16, M(&g_dsp.r.st[stack_reg]), R(tmp1));
|
||||||
gpr.PutXReg(tmp1);
|
gpr.PutXReg(tmp1);
|
||||||
|
gpr.PutXReg(tmp2);
|
||||||
|
|
||||||
// g_dsp.reg_stack_ptr[stack_reg]--;
|
// g_dsp.reg_stack_ptr[stack_reg]--;
|
||||||
// g_dsp.reg_stack_ptr[stack_reg] &= DSP_STACK_MASK;
|
// g_dsp.reg_stack_ptr[stack_reg] &= DSP_STACK_MASK;
|
||||||
|
|
|
@ -631,7 +631,7 @@ const u8* Jit64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitBloc
|
||||||
// Conditionally add profiling code.
|
// Conditionally add profiling code.
|
||||||
if (Profiler::g_ProfileBlocks)
|
if (Profiler::g_ProfileBlocks)
|
||||||
{
|
{
|
||||||
MOV(64, R(RSCRATCH), Imm64((u64)&b->runCount));
|
MOV(64, R(RSCRATCH), ImmPtr(&b->runCount));
|
||||||
ADD(32, MatR(RSCRATCH), Imm8(1));
|
ADD(32, MatR(RSCRATCH), Imm8(1));
|
||||||
b->ticCounter = 0;
|
b->ticCounter = 0;
|
||||||
b->ticStart = 0;
|
b->ticStart = 0;
|
||||||
|
|
|
@ -34,7 +34,7 @@ void Jit64AsmRoutineManager::Generate()
|
||||||
{
|
{
|
||||||
// Pivot the stack to our custom one.
|
// Pivot the stack to our custom one.
|
||||||
MOV(64, R(RSCRATCH), R(RSP));
|
MOV(64, R(RSCRATCH), R(RSP));
|
||||||
MOV(64, R(RSP), Imm64((u64)m_stack_top - 0x20));
|
MOV(64, R(RSP), ImmPtr(m_stack_top - 0x20));
|
||||||
MOV(64, MDisp(RSP, 0x18), R(RSCRATCH));
|
MOV(64, MDisp(RSP, 0x18), R(RSCRATCH));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -96,10 +96,10 @@ void Jit64AsmRoutineManager::Generate()
|
||||||
// need to do this for indirect jumps, just exceptions etc.
|
// need to do this for indirect jumps, just exceptions etc.
|
||||||
TEST(32, PPCSTATE(msr), Imm32(1 << (31 - 27)));
|
TEST(32, PPCSTATE(msr), Imm32(1 << (31 - 27)));
|
||||||
FixupBranch physmem = J_CC(CC_NZ);
|
FixupBranch physmem = J_CC(CC_NZ);
|
||||||
MOV(64, R(RMEM), Imm64((u64)Memory::physical_base));
|
MOV(64, R(RMEM), ImmPtr(Memory::physical_base));
|
||||||
FixupBranch membaseend = J();
|
FixupBranch membaseend = J();
|
||||||
SetJumpTarget(physmem);
|
SetJumpTarget(physmem);
|
||||||
MOV(64, R(RMEM), Imm64((u64)Memory::logical_base));
|
MOV(64, R(RMEM), ImmPtr(Memory::logical_base));
|
||||||
SetJumpTarget(membaseend);
|
SetJumpTarget(membaseend);
|
||||||
|
|
||||||
// The following is a translation of JitBaseBlockCache::Dispatch into assembly.
|
// The following is a translation of JitBaseBlockCache::Dispatch into assembly.
|
||||||
|
|
|
@ -51,7 +51,8 @@ void Jit64::GenerateOverflow()
|
||||||
// rare).
|
// rare).
|
||||||
static const u8 ovtable[4] = {0, 0, XER_SO_MASK, XER_SO_MASK};
|
static const u8 ovtable[4] = {0, 0, XER_SO_MASK, XER_SO_MASK};
|
||||||
MOVZX(32, 8, RSCRATCH, PPCSTATE(xer_so_ov));
|
MOVZX(32, 8, RSCRATCH, PPCSTATE(xer_so_ov));
|
||||||
MOV(8, R(RSCRATCH), MDisp(RSCRATCH, (u32)(u64)ovtable));
|
MOV(64, R(RSCRATCH2), ImmPtr(ovtable));
|
||||||
|
MOV(8, R(RSCRATCH), MRegSum(RSCRATCH, RSCRATCH2));
|
||||||
MOV(8, PPCSTATE(xer_so_ov), R(RSCRATCH));
|
MOV(8, PPCSTATE(xer_so_ov), R(RSCRATCH));
|
||||||
SetJumpTarget(exit);
|
SetJumpTarget(exit);
|
||||||
}
|
}
|
||||||
|
|
|
@ -344,7 +344,7 @@ void Jit64::dcbz(UGeckoInstruction inst)
|
||||||
// Perform lookup to see if we can use fast path.
|
// Perform lookup to see if we can use fast path.
|
||||||
MOV(32, R(RSCRATCH2), R(RSCRATCH));
|
MOV(32, R(RSCRATCH2), R(RSCRATCH));
|
||||||
SHR(32, R(RSCRATCH2), Imm8(PowerPC::BAT_INDEX_SHIFT));
|
SHR(32, R(RSCRATCH2), Imm8(PowerPC::BAT_INDEX_SHIFT));
|
||||||
TEST(32, MScaled(RSCRATCH2, SCALE_4, (u32)(u64)&PowerPC::dbat_table[0]), Imm32(2));
|
TEST(32, MScaled(RSCRATCH2, SCALE_4, PtrOffset(&PowerPC::dbat_table[0])), Imm32(2));
|
||||||
FixupBranch slow = J_CC(CC_Z, true);
|
FixupBranch slow = J_CC(CC_Z, true);
|
||||||
|
|
||||||
// Fast path: compute full address, then zero out 32 bytes of memory.
|
// Fast path: compute full address, then zero out 32 bytes of memory.
|
||||||
|
|
|
@ -93,9 +93,9 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
|
||||||
MOVZX(32, 8, RSCRATCH, R(RSCRATCH2));
|
MOVZX(32, 8, RSCRATCH, R(RSCRATCH2));
|
||||||
|
|
||||||
if (w)
|
if (w)
|
||||||
CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)asm_routines.singleStoreQuantized));
|
CALLptr(MScaled(RSCRATCH, SCALE_8, PtrOffset(asm_routines.singleStoreQuantized)));
|
||||||
else
|
else
|
||||||
CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)asm_routines.pairedStoreQuantized));
|
CALLptr(MScaled(RSCRATCH, SCALE_8, PtrOffset(asm_routines.pairedStoreQuantized)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (update && jo.memcheck)
|
if (update && jo.memcheck)
|
||||||
|
@ -158,7 +158,7 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
|
||||||
AND(32, R(RSCRATCH2), gqr);
|
AND(32, R(RSCRATCH2), gqr);
|
||||||
MOVZX(32, 8, RSCRATCH, R(RSCRATCH2));
|
MOVZX(32, 8, RSCRATCH, R(RSCRATCH2));
|
||||||
|
|
||||||
CALLptr(MScaled(RSCRATCH, SCALE_8, (u32)(u64)(&asm_routines.pairedLoadQuantized[w * 8])));
|
CALLptr(MScaled(RSCRATCH, SCALE_8, PtrOffset(&asm_routines.pairedLoadQuantized[w * 8])));
|
||||||
}
|
}
|
||||||
|
|
||||||
CVTPS2PD(fpr.RX(s), R(XMM0));
|
CVTPS2PD(fpr.RX(s), R(XMM0));
|
||||||
|
|
|
@ -483,6 +483,7 @@ void Jit64::mtcrf(UGeckoInstruction inst)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
MOV(64, R(RSCRATCH2), ImmPtr(m_crTable));
|
||||||
gpr.Lock(inst.RS);
|
gpr.Lock(inst.RS);
|
||||||
gpr.BindToRegister(inst.RS, true, false);
|
gpr.BindToRegister(inst.RS, true, false);
|
||||||
for (int i = 0; i < 8; i++)
|
for (int i = 0; i < 8; i++)
|
||||||
|
@ -494,7 +495,7 @@ void Jit64::mtcrf(UGeckoInstruction inst)
|
||||||
SHR(32, R(RSCRATCH), Imm8(28 - (i * 4)));
|
SHR(32, R(RSCRATCH), Imm8(28 - (i * 4)));
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
AND(32, R(RSCRATCH), Imm8(0xF));
|
AND(32, R(RSCRATCH), Imm8(0xF));
|
||||||
MOV(64, R(RSCRATCH), MScaled(RSCRATCH, SCALE_8, (u32)(u64)m_crTable));
|
MOV(64, R(RSCRATCH), MComplex(RSCRATCH2, RSCRATCH, SCALE_8, 0));
|
||||||
MOV(64, PPCSTATE(cr_val[i]), R(RSCRATCH));
|
MOV(64, PPCSTATE(cr_val[i]), R(RSCRATCH));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -529,7 +530,8 @@ void Jit64::mcrxr(UGeckoInstruction inst)
|
||||||
// [SO OV CA 0] << 3
|
// [SO OV CA 0] << 3
|
||||||
SHL(32, R(RSCRATCH), Imm8(4));
|
SHL(32, R(RSCRATCH), Imm8(4));
|
||||||
|
|
||||||
MOV(64, R(RSCRATCH), MDisp(RSCRATCH, (u32)(u64)m_crTable));
|
MOV(64, R(RSCRATCH2), ImmPtr(m_crTable));
|
||||||
|
MOV(64, R(RSCRATCH), MRegSum(RSCRATCH, RSCRATCH2));
|
||||||
MOV(64, PPCSTATE(cr_val[inst.CRFD]), R(RSCRATCH));
|
MOV(64, PPCSTATE(cr_val[inst.CRFD]), R(RSCRATCH));
|
||||||
|
|
||||||
// Clear XER[0-3]
|
// Clear XER[0-3]
|
||||||
|
|
|
@ -27,10 +27,8 @@ void CommonAsmRoutines::GenFifoWrite(int size)
|
||||||
const void* start = GetCodePtr();
|
const void* start = GetCodePtr();
|
||||||
|
|
||||||
// Assume value in RSCRATCH
|
// Assume value in RSCRATCH
|
||||||
u32 gather_pipe = (u32)(u64)GPFifo::m_gatherPipe;
|
|
||||||
_assert_msg_(DYNA_REC, gather_pipe <= 0x7FFFFFFF, "Gather pipe not in low 2GB of memory!");
|
|
||||||
MOV(32, R(RSCRATCH2), M(&GPFifo::m_gatherPipeCount));
|
MOV(32, R(RSCRATCH2), M(&GPFifo::m_gatherPipeCount));
|
||||||
SwapAndStore(size, MDisp(RSCRATCH2, gather_pipe), RSCRATCH);
|
SwapAndStore(size, MDisp(RSCRATCH2, PtrOffset(GPFifo::m_gatherPipe)), RSCRATCH);
|
||||||
ADD(32, R(RSCRATCH2), Imm8(size >> 3));
|
ADD(32, R(RSCRATCH2), Imm8(size >> 3));
|
||||||
MOV(32, M(&GPFifo::m_gatherPipeCount), R(RSCRATCH2));
|
MOV(32, M(&GPFifo::m_gatherPipeCount), R(RSCRATCH2));
|
||||||
RET();
|
RET();
|
||||||
|
@ -72,9 +70,9 @@ void CommonAsmRoutines::GenFrsqrte()
|
||||||
|
|
||||||
SHR(64, R(RSCRATCH), Imm8(37));
|
SHR(64, R(RSCRATCH), Imm8(37));
|
||||||
AND(32, R(RSCRATCH), Imm32(0x7FF));
|
AND(32, R(RSCRATCH), Imm32(0x7FF));
|
||||||
IMUL(32, RSCRATCH, MScaled(RSCRATCH_EXTRA, SCALE_4, (u32)(u64)MathUtil::frsqrte_expected_dec));
|
IMUL(32, RSCRATCH, MScaled(RSCRATCH_EXTRA, SCALE_4, PtrOffset(MathUtil::frsqrte_expected_dec)));
|
||||||
MOV(32, R(RSCRATCH_EXTRA),
|
MOV(32, R(RSCRATCH_EXTRA),
|
||||||
MScaled(RSCRATCH_EXTRA, SCALE_4, (u32)(u64)MathUtil::frsqrte_expected_base));
|
MScaled(RSCRATCH_EXTRA, SCALE_4, PtrOffset(MathUtil::frsqrte_expected_base)));
|
||||||
SUB(32, R(RSCRATCH_EXTRA), R(RSCRATCH));
|
SUB(32, R(RSCRATCH_EXTRA), R(RSCRATCH));
|
||||||
SHL(64, R(RSCRATCH_EXTRA), Imm8(26));
|
SHL(64, R(RSCRATCH_EXTRA), Imm8(26));
|
||||||
OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(frsqrte_expected_base[index] -
|
OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(frsqrte_expected_base[index] -
|
||||||
|
@ -141,11 +139,11 @@ void CommonAsmRoutines::GenFres()
|
||||||
AND(32, R(RSCRATCH), Imm32(0x3FF)); // i % 1024
|
AND(32, R(RSCRATCH), Imm32(0x3FF)); // i % 1024
|
||||||
AND(32, R(RSCRATCH2), Imm8(0x1F)); // i / 1024
|
AND(32, R(RSCRATCH2), Imm8(0x1F)); // i / 1024
|
||||||
|
|
||||||
IMUL(32, RSCRATCH, MScaled(RSCRATCH2, SCALE_4, (u32)(u64)MathUtil::fres_expected_dec));
|
IMUL(32, RSCRATCH, MScaled(RSCRATCH2, SCALE_4, PtrOffset(MathUtil::fres_expected_dec)));
|
||||||
ADD(32, R(RSCRATCH), Imm8(1));
|
ADD(32, R(RSCRATCH), Imm8(1));
|
||||||
SHR(32, R(RSCRATCH), Imm8(1));
|
SHR(32, R(RSCRATCH), Imm8(1));
|
||||||
|
|
||||||
MOV(32, R(RSCRATCH2), MScaled(RSCRATCH2, SCALE_4, (u32)(u64)MathUtil::fres_expected_base));
|
MOV(32, R(RSCRATCH2), MScaled(RSCRATCH2, SCALE_4, PtrOffset(MathUtil::fres_expected_base)));
|
||||||
SUB(32, R(RSCRATCH2), R(RSCRATCH));
|
SUB(32, R(RSCRATCH2), R(RSCRATCH));
|
||||||
SHL(64, R(RSCRATCH2), Imm8(29));
|
SHL(64, R(RSCRATCH2), Imm8(29));
|
||||||
OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(fres_expected_base[i / 1024] -
|
OR(64, R(RSCRATCH2), R(RSCRATCH_EXTRA)); // vali |= (s64)(fres_expected_base[i / 1024] -
|
||||||
|
@ -205,7 +203,7 @@ void CommonAsmRoutines::GenMfcr()
|
||||||
// SO: Bit 61 set; set flag bit 0
|
// SO: Bit 61 set; set flag bit 0
|
||||||
// LT: Bit 62 set; set flag bit 3
|
// LT: Bit 62 set; set flag bit 3
|
||||||
SHR(64, R(cr_val), Imm8(61));
|
SHR(64, R(cr_val), Imm8(61));
|
||||||
OR(32, R(dst), MScaled(cr_val, SCALE_4, (u32)(u64)m_flagTable));
|
OR(32, R(dst), MScaled(cr_val, SCALE_4, PtrOffset(m_flagTable)));
|
||||||
}
|
}
|
||||||
RET();
|
RET();
|
||||||
|
|
||||||
|
@ -298,7 +296,7 @@ void QuantizedMemoryRoutines::GenQuantizedStore(bool single, EQuantizeType type,
|
||||||
if (quantize == -1)
|
if (quantize == -1)
|
||||||
{
|
{
|
||||||
SHR(32, R(RSCRATCH2), Imm8(5));
|
SHR(32, R(RSCRATCH2), Imm8(5));
|
||||||
MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
|
MULSS(XMM0, MDisp(RSCRATCH2, PtrOffset(m_quantizeTableS)));
|
||||||
}
|
}
|
||||||
else if (quantize > 0)
|
else if (quantize > 0)
|
||||||
{
|
{
|
||||||
|
@ -336,7 +334,7 @@ void QuantizedMemoryRoutines::GenQuantizedStore(bool single, EQuantizeType type,
|
||||||
if (quantize == -1)
|
if (quantize == -1)
|
||||||
{
|
{
|
||||||
SHR(32, R(RSCRATCH2), Imm8(5));
|
SHR(32, R(RSCRATCH2), Imm8(5));
|
||||||
MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_quantizeTableS));
|
MOVQ_xmm(XMM1, MDisp(RSCRATCH2, PtrOffset(m_quantizeTableS)));
|
||||||
MULPS(XMM0, R(XMM1));
|
MULPS(XMM0, R(XMM1));
|
||||||
}
|
}
|
||||||
else if (quantize > 0)
|
else if (quantize > 0)
|
||||||
|
@ -493,7 +491,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type,
|
||||||
if (quantize == -1)
|
if (quantize == -1)
|
||||||
{
|
{
|
||||||
SHR(32, R(RSCRATCH2), Imm8(5));
|
SHR(32, R(RSCRATCH2), Imm8(5));
|
||||||
MULSS(XMM0, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
|
MULSS(XMM0, MDisp(RSCRATCH2, PtrOffset(m_dequantizeTableS)));
|
||||||
}
|
}
|
||||||
else if (quantize > 0)
|
else if (quantize > 0)
|
||||||
{
|
{
|
||||||
|
@ -565,7 +563,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type,
|
||||||
if (quantize == -1)
|
if (quantize == -1)
|
||||||
{
|
{
|
||||||
SHR(32, R(RSCRATCH2), Imm8(5));
|
SHR(32, R(RSCRATCH2), Imm8(5));
|
||||||
MOVQ_xmm(XMM1, MDisp(RSCRATCH2, (u32)(u64)m_dequantizeTableS));
|
MOVQ_xmm(XMM1, MDisp(RSCRATCH2, PtrOffset(m_dequantizeTableS)));
|
||||||
MULPS(XMM0, R(XMM1));
|
MULPS(XMM0, R(XMM1));
|
||||||
}
|
}
|
||||||
else if (quantize > 0)
|
else if (quantize > 0)
|
||||||
|
|
|
@ -234,7 +234,7 @@ FixupBranch EmuCodeBlock::CheckIfSafeAddress(const OpArg& reg_value, X64Reg reg_
|
||||||
|
|
||||||
// Perform lookup to see if we can use fast path.
|
// Perform lookup to see if we can use fast path.
|
||||||
SHR(32, R(scratch), Imm8(PowerPC::BAT_INDEX_SHIFT));
|
SHR(32, R(scratch), Imm8(PowerPC::BAT_INDEX_SHIFT));
|
||||||
TEST(32, MScaled(scratch, SCALE_4, (u32)(u64)&PowerPC::dbat_table[0]), Imm32(2));
|
TEST(32, MScaled(scratch, SCALE_4, PtrOffset(&PowerPC::dbat_table[0])), Imm32(2));
|
||||||
|
|
||||||
if (scratch == reg_addr)
|
if (scratch == reg_addr)
|
||||||
POP(scratch);
|
POP(scratch);
|
||||||
|
|
|
@ -32,12 +32,12 @@ static const u8* memory_base_ptr = (u8*)&g_main_cp_state.array_strides;
|
||||||
|
|
||||||
static OpArg MPIC(const void* ptr, X64Reg scale_reg, int scale = SCALE_1)
|
static OpArg MPIC(const void* ptr, X64Reg scale_reg, int scale = SCALE_1)
|
||||||
{
|
{
|
||||||
return MComplex(base_reg, scale_reg, scale, (s32)((u8*)ptr - memory_base_ptr));
|
return MComplex(base_reg, scale_reg, scale, PtrOffset(ptr, memory_base_ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
static OpArg MPIC(const void* ptr)
|
static OpArg MPIC(const void* ptr)
|
||||||
{
|
{
|
||||||
return MDisp(base_reg, (s32)((u8*)ptr - memory_base_ptr));
|
return MDisp(base_reg, PtrOffset(ptr, memory_base_ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
VertexLoaderX64::VertexLoaderX64(const TVtxDesc& vtx_desc, const VAT& vtx_att)
|
VertexLoaderX64::VertexLoaderX64(const TVtxDesc& vtx_desc, const VAT& vtx_att)
|
||||||
|
|
Loading…
Reference in New Issue