Merge pull request #4624 from leoetlino/jit-warning-fixes
Add the g_ prefix to the jit global
This commit is contained in:
commit
7a5fe4b7ed
|
@ -65,8 +65,8 @@ void BreakPoints::Add(const TBreakPoint& bp)
|
|||
if (!IsAddressBreakPoint(bp.iAddress))
|
||||
{
|
||||
m_BreakPoints.push_back(bp);
|
||||
if (jit)
|
||||
jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
||||
if (g_jit)
|
||||
g_jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,8 +81,8 @@ void BreakPoints::Add(u32 em_address, bool temp)
|
|||
|
||||
m_BreakPoints.push_back(pt);
|
||||
|
||||
if (jit)
|
||||
jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
||||
if (g_jit)
|
||||
g_jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,8 +93,8 @@ void BreakPoints::Remove(u32 em_address)
|
|||
if (i->iAddress == em_address)
|
||||
{
|
||||
m_BreakPoints.erase(i);
|
||||
if (jit)
|
||||
jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
||||
if (g_jit)
|
||||
g_jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -102,11 +102,11 @@ void BreakPoints::Remove(u32 em_address)
|
|||
|
||||
void BreakPoints::Clear()
|
||||
{
|
||||
if (jit)
|
||||
if (g_jit)
|
||||
{
|
||||
for (const TBreakPoint& bp : m_BreakPoints)
|
||||
{
|
||||
jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
||||
g_jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,8 +120,8 @@ void BreakPoints::ClearAllTemporary()
|
|||
{
|
||||
if (bp->bTemporary)
|
||||
{
|
||||
if (jit)
|
||||
jit->GetBlockCache()->InvalidateICache(bp->iAddress, 4, true);
|
||||
if (g_jit)
|
||||
g_jit->GetBlockCache()->InvalidateICache(bp->iAddress, 4, true);
|
||||
bp = m_BreakPoints.erase(bp);
|
||||
}
|
||||
else
|
||||
|
@ -175,8 +175,8 @@ void MemChecks::Add(const TMemCheck& _rMemoryCheck)
|
|||
m_MemChecks.push_back(_rMemoryCheck);
|
||||
// If this is the first one, clear the JIT cache so it can switch to
|
||||
// watchpoint-compatible code.
|
||||
if (!had_any && jit)
|
||||
jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
||||
if (!had_any && g_jit)
|
||||
g_jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
||||
}
|
||||
|
||||
void MemChecks::Remove(u32 _Address)
|
||||
|
@ -186,8 +186,8 @@ void MemChecks::Remove(u32 _Address)
|
|||
if (i->StartAddress == _Address)
|
||||
{
|
||||
m_MemChecks.erase(i);
|
||||
if (!HasAny() && jit)
|
||||
jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
||||
if (!HasAny() && g_jit)
|
||||
g_jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -362,7 +362,7 @@ namespace Jit64Tables
|
|||
{
|
||||
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||
{
|
||||
Jit64* jit64 = (Jit64*)jit;
|
||||
Jit64* jit64 = (Jit64*)g_jit;
|
||||
(jit64->*dynaOpTable[op.inst.OPCD])(op.inst);
|
||||
GekkoOPInfo* info = op.opinfo;
|
||||
if (info)
|
||||
|
@ -370,11 +370,11 @@ void CompileInstruction(PPCAnalyst::CodeOp& op)
|
|||
#ifdef OPLOG
|
||||
if (!strcmp(info->opname, OP_TO_LOG)) // "mcrfs"
|
||||
{
|
||||
rsplocations.push_back(jit.js.compilerPC);
|
||||
rsplocations.push_back(g_jit.js.compilerPC);
|
||||
}
|
||||
#endif
|
||||
info->compileCount++;
|
||||
info->lastUse = jit->js.compilerPC;
|
||||
info->lastUse = g_jit->js.compilerPC;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ void Jit64AsmRoutineManager::Generate()
|
|||
|
||||
// Fast block number lookup.
|
||||
MOV(32, R(RSCRATCH), PPCSTATE(pc));
|
||||
u64 icache = reinterpret_cast<u64>(jit->GetBlockCache()->GetICache());
|
||||
u64 icache = reinterpret_cast<u64>(g_jit->GetBlockCache()->GetICache());
|
||||
AND(32, R(RSCRATCH), Imm32(JitBaseBlockCache::iCache_Mask << 2));
|
||||
if (icache <= INT_MAX)
|
||||
{
|
||||
|
@ -120,7 +120,7 @@ void Jit64AsmRoutineManager::Generate()
|
|||
}
|
||||
|
||||
// Check whether the block we found matches the current state.
|
||||
u64 blocks = reinterpret_cast<u64>(jit->GetBlockCache()->GetBlocks());
|
||||
u64 blocks = reinterpret_cast<u64>(g_jit->GetBlockCache()->GetBlocks());
|
||||
IMUL(32, RSCRATCH, R(RSCRATCH), Imm32(sizeof(JitBlock)));
|
||||
if (blocks <= INT_MAX)
|
||||
{
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
void JitBlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const JitBlock* dest)
|
||||
{
|
||||
u8* location = source.exitPtrs;
|
||||
const u8* address = dest ? dest->checkedEntry : jit->GetAsmRoutines()->dispatcher;
|
||||
const u8* address = dest ? dest->checkedEntry : g_jit->GetAsmRoutines()->dispatcher;
|
||||
Gen::XEmitter emit(location);
|
||||
if (*location == 0xE8)
|
||||
{
|
||||
|
|
|
@ -49,12 +49,12 @@ void EmuCodeBlock::MemoryExceptionCheck()
|
|||
// load/store, the trampoline generator will have stashed the exception
|
||||
// handler (that we previously generated after the fastmem instruction) in
|
||||
// trampolineExceptionHandler.
|
||||
if (jit->js.generatingTrampoline)
|
||||
if (g_jit->js.generatingTrampoline)
|
||||
{
|
||||
if (jit->js.trampolineExceptionHandler)
|
||||
if (g_jit->js.trampolineExceptionHandler)
|
||||
{
|
||||
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
|
||||
J_CC(CC_NZ, jit->js.trampolineExceptionHandler);
|
||||
J_CC(CC_NZ, g_jit->js.trampolineExceptionHandler);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -62,11 +62,11 @@ void EmuCodeBlock::MemoryExceptionCheck()
|
|||
// If memcheck (ie: MMU) mode is enabled and we haven't generated an
|
||||
// exception handler for this instruction yet, we will generate an
|
||||
// exception check.
|
||||
if (jit->jo.memcheck && !jit->js.fastmemLoadStore && !jit->js.fixupExceptionHandler)
|
||||
if (g_jit->jo.memcheck && !g_jit->js.fastmemLoadStore && !g_jit->js.fixupExceptionHandler)
|
||||
{
|
||||
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
|
||||
jit->js.exceptionHandler = J_CC(Gen::CC_NZ, true);
|
||||
jit->js.fixupExceptionHandler = true;
|
||||
g_jit->js.exceptionHandler = J_CC(Gen::CC_NZ, true);
|
||||
g_jit->js.fixupExceptionHandler = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,19 +209,19 @@ void EmuCodeBlock::UnsafeWriteGatherPipe(int accessSize)
|
|||
switch (accessSize)
|
||||
{
|
||||
case 8:
|
||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite8);
|
||||
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite8);
|
||||
break;
|
||||
case 16:
|
||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite16);
|
||||
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite16);
|
||||
break;
|
||||
case 32:
|
||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite32);
|
||||
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite32);
|
||||
break;
|
||||
case 64:
|
||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite64);
|
||||
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite64);
|
||||
break;
|
||||
}
|
||||
jit->js.fifoBytesSinceCheck += accessSize >> 3;
|
||||
g_jit->js.fifoBytesSinceCheck += accessSize >> 3;
|
||||
}
|
||||
|
||||
// Visitor that generates code to read a MMIO value.
|
||||
|
@ -335,17 +335,17 @@ void EmuCodeBlock::MMIOLoadToReg(MMIO::Mapping* mmio, Gen::X64Reg reg_value,
|
|||
void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress, int accessSize,
|
||||
s32 offset, BitSet32 registersInUse, bool signExtend, int flags)
|
||||
{
|
||||
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || jit->jo.alwaysUseMemFuncs;
|
||||
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || g_jit->jo.alwaysUseMemFuncs;
|
||||
|
||||
registersInUse[reg_value] = false;
|
||||
if (jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
||||
if (g_jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
||||
{
|
||||
u8* backpatchStart = GetWritableCodePtr();
|
||||
MovInfo mov;
|
||||
bool offsetAddedToAddress =
|
||||
UnsafeLoadToReg(reg_value, opAddress, accessSize, offset, signExtend, &mov);
|
||||
TrampolineInfo& info = m_back_patch_info[mov.address];
|
||||
info.pc = jit->js.compilerPC;
|
||||
info.pc = g_jit->js.compilerPC;
|
||||
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
|
||||
info.start = backpatchStart;
|
||||
info.read = true;
|
||||
|
@ -364,7 +364,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
|
|||
}
|
||||
info.len = static_cast<u32>(GetCodePtr() - info.start);
|
||||
|
||||
jit->js.fastmemLoadStore = mov.address;
|
||||
g_jit->js.fastmemLoadStore = mov.address;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -492,18 +492,18 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
|||
BitSet32 registersInUse, int flags)
|
||||
{
|
||||
bool swap = !(flags & SAFE_LOADSTORE_NO_SWAP);
|
||||
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || jit->jo.alwaysUseMemFuncs;
|
||||
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || g_jit->jo.alwaysUseMemFuncs;
|
||||
|
||||
// set the correct immediate format
|
||||
reg_value = FixImmediate(accessSize, reg_value);
|
||||
|
||||
if (jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
||||
if (g_jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
||||
{
|
||||
u8* backpatchStart = GetWritableCodePtr();
|
||||
MovInfo mov;
|
||||
UnsafeWriteRegToReg(reg_value, reg_addr, accessSize, offset, swap, &mov);
|
||||
TrampolineInfo& info = m_back_patch_info[mov.address];
|
||||
info.pc = jit->js.compilerPC;
|
||||
info.pc = g_jit->js.compilerPC;
|
||||
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
|
||||
info.start = backpatchStart;
|
||||
info.read = false;
|
||||
|
@ -521,7 +521,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
|||
}
|
||||
info.len = static_cast<u32>(GetCodePtr() - info.start);
|
||||
|
||||
jit->js.fastmemLoadStore = mov.address;
|
||||
g_jit->js.fastmemLoadStore = mov.address;
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
|||
}
|
||||
|
||||
// PC is used by memory watchpoints (if enabled) or to print accurate PC locations in debug logs
|
||||
MOV(32, PPCSTATE(pc), Imm32(jit->js.compilerPC));
|
||||
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
|
||||
|
||||
size_t rsp_alignment = (flags & SAFE_LOADSTORE_NO_PROLOG) ? 8 : 0;
|
||||
ABI_PushRegistersAndAdjustStack(registersInUse, rsp_alignment);
|
||||
|
@ -619,7 +619,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
|
|||
|
||||
// If we already know the address through constant folding, we can do some
|
||||
// fun tricks...
|
||||
if (jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(address))
|
||||
if (g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(address))
|
||||
{
|
||||
if (!arg.IsSimpleReg(RSCRATCH))
|
||||
MOV(accessSize, R(RSCRATCH), arg);
|
||||
|
@ -635,7 +635,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
|
|||
else
|
||||
{
|
||||
// Helps external systems know which instruction triggered the write
|
||||
MOV(32, PPCSTATE(pc), Imm32(jit->js.compilerPC));
|
||||
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
|
||||
|
||||
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
|
||||
switch (accessSize)
|
||||
|
@ -714,7 +714,7 @@ void EmuCodeBlock::ForceSinglePrecision(X64Reg output, const OpArg& input, bool
|
|||
bool duplicate)
|
||||
{
|
||||
// Most games don't need these. Zelda requires it though - some platforms get stuck without them.
|
||||
if (jit->jo.accurateSinglePrecision)
|
||||
if (g_jit->jo.accurateSinglePrecision)
|
||||
{
|
||||
if (packed)
|
||||
{
|
||||
|
@ -830,7 +830,7 @@ alignas(16) static const u64 psRoundBit[2] = {0x8000000, 0x8000000};
|
|||
// It needs a temp, so let the caller pass that in.
|
||||
void EmuCodeBlock::Force25BitPrecision(X64Reg output, const OpArg& input, X64Reg tmp)
|
||||
{
|
||||
if (jit->jo.accurateSinglePrecision)
|
||||
if (g_jit->jo.accurateSinglePrecision)
|
||||
{
|
||||
// mantissa = (mantissa & ~0xFFFFFFF) + ((mantissa & (1ULL << 27)) << 1);
|
||||
if (input.IsSimpleReg() && cpu_info.bAVX)
|
||||
|
|
|
@ -456,7 +456,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type,
|
|||
|
||||
bool extend = single && (type == QUANTIZE_S8 || type == QUANTIZE_S16);
|
||||
|
||||
if (jit->jo.memcheck)
|
||||
if (g_jit->jo.memcheck)
|
||||
{
|
||||
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD;
|
||||
int flags =
|
||||
|
@ -580,7 +580,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
|
|||
int size = single ? 32 : 64;
|
||||
bool extend = false;
|
||||
|
||||
if (jit->jo.memcheck)
|
||||
if (g_jit->jo.memcheck)
|
||||
{
|
||||
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE;
|
||||
int flags =
|
||||
|
@ -590,7 +590,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
|
|||
|
||||
if (single)
|
||||
{
|
||||
if (jit->jo.memcheck)
|
||||
if (g_jit->jo.memcheck)
|
||||
{
|
||||
MOVD_xmm(XMM0, R(RSCRATCH_EXTRA));
|
||||
}
|
||||
|
@ -615,7 +615,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
|
|||
// for a good reason, or merely because no game does this.
|
||||
// If we find something that actually does do this, maybe this should be changed. How
|
||||
// much of a performance hit would it be?
|
||||
if (jit->jo.memcheck)
|
||||
if (g_jit->jo.memcheck)
|
||||
{
|
||||
ROL(64, R(RSCRATCH_EXTRA), Imm8(32));
|
||||
MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA));
|
||||
|
|
|
@ -380,7 +380,7 @@ namespace JitILTables
|
|||
{
|
||||
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||
{
|
||||
JitIL* jitil = (JitIL*)jit;
|
||||
JitIL* jitil = (JitIL*)g_jit;
|
||||
(jitil->*dynaOpTable[op.inst.OPCD])(op.inst);
|
||||
GekkoOPInfo* info = op.opinfo;
|
||||
if (info)
|
||||
|
@ -388,16 +388,16 @@ void CompileInstruction(PPCAnalyst::CodeOp& op)
|
|||
#ifdef OPLOG
|
||||
if (!strcmp(info->opname, OP_TO_LOG)) // "mcrfs"
|
||||
{
|
||||
rsplocations.push_back(jit.js.compilerPC);
|
||||
rsplocations.push_back(g_jit.js.compilerPC);
|
||||
}
|
||||
#endif
|
||||
info->compileCount++;
|
||||
info->lastUse = jit->js.compilerPC;
|
||||
info->lastUse = g_jit->js.compilerPC;
|
||||
}
|
||||
else
|
||||
{
|
||||
PanicAlert("Tried to compile illegal (or unknown) instruction %08x, at %08x", op.inst.hex,
|
||||
jit->js.compilerPC);
|
||||
g_jit->js.compilerPC);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -503,7 +503,7 @@ const u8* JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitB
|
|||
|
||||
// Gather pipe writes using a non-immediate address are discovered by profiling.
|
||||
bool gatherPipeIntCheck =
|
||||
jit->js.fifoWriteAddresses.find(ops[i].address) != jit->js.fifoWriteAddresses.end();
|
||||
g_jit->js.fifoWriteAddresses.find(ops[i].address) != g_jit->js.fifoWriteAddresses.end();
|
||||
|
||||
if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo))
|
||||
{
|
||||
|
|
|
@ -28,7 +28,7 @@ void JitArm64BlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const
|
|||
else
|
||||
{
|
||||
emit.MOVI2R(DISPATCHER_PC, source.exitAddress);
|
||||
emit.B(jit->GetAsmRoutines()->dispatcher);
|
||||
emit.B(g_jit->GetAsmRoutines()->dispatcher);
|
||||
}
|
||||
emit.FlushIcache();
|
||||
}
|
||||
|
|
|
@ -597,7 +597,7 @@ void JitArm64::dcbx(UGeckoInstruction inst)
|
|||
AND(value, addr, 32 - 10, 28 - 10); // upper three bits and last 10 bit are masked for the bitset
|
||||
// of cachelines, 0x1ffffc00
|
||||
LSR(value, value, 5 + 5); // >> 5 for cache line size, >> 5 for width of bitset
|
||||
MOVP2R(EncodeRegTo64(WA), jit->GetBlockCache()->GetBlockBitSet());
|
||||
MOVP2R(EncodeRegTo64(WA), g_jit->GetBlockCache()->GetBlockBitSet());
|
||||
LDR(value, EncodeRegTo64(WA), ArithOption(EncodeRegTo64(value), true));
|
||||
|
||||
LSR(addr, addr, 5); // mask sizeof cacheline, & 0x1f is the position within the bitset
|
||||
|
|
|
@ -326,7 +326,7 @@ void JitArm64::stfXX(UGeckoInstruction inst)
|
|||
ARM64Reg XA = EncodeRegTo64(addr_reg);
|
||||
|
||||
if (is_immediate &&
|
||||
!(jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr)))
|
||||
!(g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr)))
|
||||
{
|
||||
MOVI2R(XA, imm_addr);
|
||||
|
||||
|
@ -350,7 +350,7 @@ void JitArm64::stfXX(UGeckoInstruction inst)
|
|||
|
||||
if (is_immediate)
|
||||
{
|
||||
if (jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr))
|
||||
if (g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr))
|
||||
{
|
||||
int accessSize;
|
||||
if (flags & BackPatchInfo::FLAG_SIZE_F64)
|
||||
|
|
|
@ -367,7 +367,7 @@ namespace JitArm64Tables
|
|||
{
|
||||
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||
{
|
||||
JitArm64* jitarm = (JitArm64*)jit;
|
||||
JitArm64* jitarm = (JitArm64*)g_jit;
|
||||
(jitarm->*dynaOpTable[op.inst.OPCD])(op.inst);
|
||||
GekkoOPInfo* info = op.opinfo;
|
||||
if (info)
|
||||
|
@ -375,11 +375,11 @@ void CompileInstruction(PPCAnalyst::CodeOp& op)
|
|||
#ifdef OPLOG
|
||||
if (!strcmp(info->opname, OP_TO_LOG))
|
||||
{ ///"mcrfs"
|
||||
rsplocations.push_back(jit.js.compilerPC);
|
||||
rsplocations.push_back(g_jit.js.compilerPC);
|
||||
}
|
||||
#endif
|
||||
info->compileCount++;
|
||||
info->lastUse = jit->js.compilerPC;
|
||||
info->lastUse = g_jit->js.compilerPC;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ void JitArm64::GenerateAsm()
|
|||
ARM64Reg cache_base = X27;
|
||||
ARM64Reg block_num = W27;
|
||||
ANDI2R(pc_masked, DISPATCHER_PC, JitBaseBlockCache::iCache_Mask << 2);
|
||||
MOVP2R(cache_base, jit->GetBlockCache()->GetICache());
|
||||
MOVP2R(cache_base, g_jit->GetBlockCache()->GetICache());
|
||||
LDR(block_num, cache_base, EncodeRegTo64(pc_masked));
|
||||
|
||||
// blocks[block_num]
|
||||
|
@ -83,7 +83,7 @@ void JitArm64::GenerateAsm()
|
|||
ARM64Reg jit_block_size = W24;
|
||||
MOVI2R(jit_block_size, sizeof(JitBlock));
|
||||
MUL(block_num, block_num, jit_block_size);
|
||||
MOVP2R(block, jit->GetBlockCache()->GetBlocks());
|
||||
MOVP2R(block, g_jit->GetBlockCache()->GetBlocks());
|
||||
ADD(block, block, EncodeRegTo64(block_num));
|
||||
|
||||
// b.effectiveAddress != addr || b.msrBits != msr
|
||||
|
|
|
@ -10,11 +10,11 @@
|
|||
#include "Core/PowerPC/PPCAnalyst.h"
|
||||
#include "Core/PowerPC/PowerPC.h"
|
||||
|
||||
JitBase* jit;
|
||||
JitBase* g_jit;
|
||||
|
||||
void Jit(u32 em_address)
|
||||
{
|
||||
jit->Jit(em_address);
|
||||
g_jit->Jit(em_address);
|
||||
}
|
||||
|
||||
u32 Helper_Mask(u8 mb, u8 me)
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
class JitBase;
|
||||
|
||||
extern JitBase* jit;
|
||||
extern JitBase* g_jit;
|
||||
|
||||
class JitBase : public CPUCoreBase
|
||||
{
|
||||
|
@ -114,7 +114,7 @@ public:
|
|||
JitOptions jo;
|
||||
JitState js;
|
||||
|
||||
static const u8* Dispatch() { return jit->GetBlockCache()->Dispatch(); };
|
||||
static const u8* Dispatch() { return g_jit->GetBlockCache()->Dispatch(); };
|
||||
virtual JitBaseBlockCache* GetBlockCache() = 0;
|
||||
|
||||
virtual void Jit(u32 em_address) = 0;
|
||||
|
|
|
@ -66,8 +66,8 @@ void JitBaseBlockCache::Clear()
|
|||
else
|
||||
Core::DisplayMessage("Clearing code cache.", 3000);
|
||||
#endif
|
||||
jit->js.fifoWriteAddresses.clear();
|
||||
jit->js.pairedQuantizeAddresses.clear();
|
||||
g_jit->js.fifoWriteAddresses.clear();
|
||||
g_jit->js.pairedQuantizeAddresses.clear();
|
||||
for (int i = 1; i < num_blocks; i++)
|
||||
{
|
||||
DestroyBlock(i, false);
|
||||
|
@ -346,8 +346,8 @@ void JitBaseBlockCache::InvalidateICache(u32 address, const u32 length, bool for
|
|||
{
|
||||
for (u32 i = address; i < address + length; i += 4)
|
||||
{
|
||||
jit->js.fifoWriteAddresses.erase(i);
|
||||
jit->js.pairedQuantizeAddresses.erase(i);
|
||||
g_jit->js.fifoWriteAddresses.erase(i);
|
||||
g_jit->js.pairedQuantizeAddresses.erase(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@ namespace JitInterface
|
|||
{
|
||||
void DoState(PointerWrap& p)
|
||||
{
|
||||
if (jit && p.GetMode() == PointerWrap::MODE_READ)
|
||||
jit->ClearCache();
|
||||
if (g_jit && p.GetMode() == PointerWrap::MODE_READ)
|
||||
g_jit->ClearCache();
|
||||
}
|
||||
CPUCoreBase* InitJitCore(int core)
|
||||
{
|
||||
|
@ -63,11 +63,11 @@ CPUCoreBase* InitJitCore(int core)
|
|||
|
||||
default:
|
||||
PanicAlert("Unrecognizable cpu_core: %d", core);
|
||||
jit = nullptr;
|
||||
g_jit = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
jit = static_cast<JitBase*>(ptr);
|
||||
jit->Init();
|
||||
g_jit = static_cast<JitBase*>(ptr);
|
||||
g_jit->Init();
|
||||
return ptr;
|
||||
}
|
||||
void InitTables(int core)
|
||||
|
@ -97,7 +97,7 @@ void InitTables(int core)
|
|||
}
|
||||
CPUCoreBase* GetCore()
|
||||
{
|
||||
return jit;
|
||||
return g_jit;
|
||||
}
|
||||
|
||||
void WriteProfileResults(const std::string& filename)
|
||||
|
@ -127,23 +127,23 @@ void WriteProfileResults(const std::string& filename)
|
|||
|
||||
void GetProfileResults(ProfileStats* prof_stats)
|
||||
{
|
||||
// Can't really do this with no jit core available
|
||||
if (!jit)
|
||||
// Can't really do this with no g_jit core available
|
||||
if (!g_jit)
|
||||
return;
|
||||
|
||||
prof_stats->cost_sum = 0;
|
||||
prof_stats->timecost_sum = 0;
|
||||
prof_stats->block_stats.clear();
|
||||
prof_stats->block_stats.reserve(jit->GetBlockCache()->GetNumBlocks());
|
||||
prof_stats->block_stats.reserve(g_jit->GetBlockCache()->GetNumBlocks());
|
||||
|
||||
Core::EState old_state = Core::GetState();
|
||||
if (old_state == Core::CORE_RUN)
|
||||
Core::SetState(Core::CORE_PAUSE);
|
||||
|
||||
QueryPerformanceFrequency((LARGE_INTEGER*)&prof_stats->countsPerSec);
|
||||
for (int i = 0; i < jit->GetBlockCache()->GetNumBlocks(); i++)
|
||||
for (int i = 0; i < g_jit->GetBlockCache()->GetNumBlocks(); i++)
|
||||
{
|
||||
const JitBlock* block = jit->GetBlockCache()->GetBlock(i);
|
||||
const JitBlock* block = g_jit->GetBlockCache()->GetBlock(i);
|
||||
// Rough heuristic. Mem instructions should cost more.
|
||||
u64 cost = block->originalSize * (block->runCount / 4);
|
||||
u64 timecost = block->ticCounter;
|
||||
|
@ -162,25 +162,25 @@ void GetProfileResults(ProfileStats* prof_stats)
|
|||
|
||||
int GetHostCode(u32* address, const u8** code, u32* code_size)
|
||||
{
|
||||
if (!jit)
|
||||
if (!g_jit)
|
||||
{
|
||||
*code_size = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int block_num = jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address, MSR);
|
||||
int block_num = g_jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address, MSR);
|
||||
if (block_num < 0)
|
||||
{
|
||||
for (int i = 0; i < 500; i++)
|
||||
{
|
||||
block_num = jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address - 4 * i, MSR);
|
||||
block_num = g_jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address - 4 * i, MSR);
|
||||
if (block_num >= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (block_num >= 0)
|
||||
{
|
||||
JitBlock* block = jit->GetBlockCache()->GetBlock(block_num);
|
||||
JitBlock* block = g_jit->GetBlockCache()->GetBlock(block_num);
|
||||
if (!(block->effectiveAddress <= *address &&
|
||||
block->originalSize + block->effectiveAddress >= *address))
|
||||
block_num = -1;
|
||||
|
@ -194,7 +194,7 @@ int GetHostCode(u32* address, const u8** code, u32* code_size)
|
|||
}
|
||||
}
|
||||
|
||||
JitBlock* block = jit->GetBlockCache()->GetBlock(block_num);
|
||||
JitBlock* block = g_jit->GetBlockCache()->GetBlock(block_num);
|
||||
|
||||
*code = block->checkedEntry;
|
||||
*code_size = block->codeSize;
|
||||
|
@ -205,28 +205,28 @@ int GetHostCode(u32* address, const u8** code, u32* code_size)
|
|||
bool HandleFault(uintptr_t access_address, SContext* ctx)
|
||||
{
|
||||
// Prevent nullptr dereference on a crash with no JIT present
|
||||
if (!jit)
|
||||
if (!g_jit)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return jit->HandleFault(access_address, ctx);
|
||||
return g_jit->HandleFault(access_address, ctx);
|
||||
}
|
||||
|
||||
bool HandleStackFault()
|
||||
{
|
||||
if (!jit)
|
||||
if (!g_jit)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return jit->HandleStackFault();
|
||||
return g_jit->HandleStackFault();
|
||||
}
|
||||
|
||||
void ClearCache()
|
||||
{
|
||||
if (jit)
|
||||
jit->ClearCache();
|
||||
if (g_jit)
|
||||
g_jit->ClearCache();
|
||||
}
|
||||
void ClearSafe()
|
||||
{
|
||||
|
@ -234,19 +234,19 @@ void ClearSafe()
|
|||
// inside a JIT'ed block: it clears the instruction cache, but not
|
||||
// the JIT'ed code.
|
||||
// TODO: There's probably a better way to handle this situation.
|
||||
if (jit)
|
||||
jit->GetBlockCache()->Clear();
|
||||
if (g_jit)
|
||||
g_jit->GetBlockCache()->Clear();
|
||||
}
|
||||
|
||||
void InvalidateICache(u32 address, u32 size, bool forced)
|
||||
{
|
||||
if (jit)
|
||||
jit->GetBlockCache()->InvalidateICache(address, size, forced);
|
||||
if (g_jit)
|
||||
g_jit->GetBlockCache()->InvalidateICache(address, size, forced);
|
||||
}
|
||||
|
||||
void CompileExceptionCheck(ExceptionType type)
|
||||
{
|
||||
if (!jit)
|
||||
if (!g_jit)
|
||||
return;
|
||||
|
||||
std::unordered_set<u32>* exception_addresses = nullptr;
|
||||
|
@ -254,13 +254,13 @@ void CompileExceptionCheck(ExceptionType type)
|
|||
switch (type)
|
||||
{
|
||||
case ExceptionType::EXCEPTIONS_FIFO_WRITE:
|
||||
exception_addresses = &jit->js.fifoWriteAddresses;
|
||||
exception_addresses = &g_jit->js.fifoWriteAddresses;
|
||||
break;
|
||||
case ExceptionType::EXCEPTIONS_PAIRED_QUANTIZE:
|
||||
exception_addresses = &jit->js.pairedQuantizeAddresses;
|
||||
exception_addresses = &g_jit->js.pairedQuantizeAddresses;
|
||||
break;
|
||||
case ExceptionType::EXCEPTIONS_SPECULATIVE_CONSTANTS:
|
||||
exception_addresses = &jit->js.noSpeculativeConstantsAddresses;
|
||||
exception_addresses = &g_jit->js.noSpeculativeConstantsAddresses;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -277,17 +277,17 @@ void CompileExceptionCheck(ExceptionType type)
|
|||
|
||||
// Invalidate the JIT block so that it gets recompiled with the external exception check
|
||||
// included.
|
||||
jit->GetBlockCache()->InvalidateICache(PC, 4, true);
|
||||
g_jit->GetBlockCache()->InvalidateICache(PC, 4, true);
|
||||
}
|
||||
}
|
||||
|
||||
void Shutdown()
|
||||
{
|
||||
if (jit)
|
||||
if (g_jit)
|
||||
{
|
||||
jit->Shutdown();
|
||||
delete jit;
|
||||
jit = nullptr;
|
||||
g_jit->Shutdown();
|
||||
delete g_jit;
|
||||
g_jit = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,8 +124,8 @@ void CCodeWindow::OnProfilerMenu(wxCommandEvent& event)
|
|||
{
|
||||
case IDM_PROFILE_BLOCKS:
|
||||
Core::SetState(Core::CORE_PAUSE);
|
||||
if (jit != nullptr)
|
||||
jit->ClearCache();
|
||||
if (g_jit != nullptr)
|
||||
g_jit->ClearCache();
|
||||
Profiler::g_ProfileBlocks = GetParentMenuBar()->IsChecked(IDM_PROFILE_BLOCKS);
|
||||
Core::SetState(Core::CORE_RUN);
|
||||
break;
|
||||
|
@ -135,7 +135,7 @@ void CCodeWindow::OnProfilerMenu(wxCommandEvent& event)
|
|||
|
||||
if (Core::GetState() == Core::CORE_PAUSE && PowerPC::GetMode() == PowerPC::MODE_JIT)
|
||||
{
|
||||
if (jit != nullptr)
|
||||
if (g_jit != nullptr)
|
||||
{
|
||||
std::string filename = File::GetUserPath(D_DUMP_IDX) + "Debug/profiler.txt";
|
||||
File::CreateFullPath(filename);
|
||||
|
|
|
@ -56,7 +56,7 @@ TEST(PageFault, PageFault)
|
|||
Common::WriteProtectMemory(data, PAGE_GRAN, false);
|
||||
|
||||
PageFaultFakeJit pfjit;
|
||||
jit = &pfjit;
|
||||
g_jit = &pfjit;
|
||||
pfjit.m_data = data;
|
||||
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
|
@ -67,7 +67,7 @@ TEST(PageFault, PageFault)
|
|||
((unsigned long long)std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count())
|
||||
|
||||
EMM::UninstallExceptionHandler();
|
||||
jit = nullptr;
|
||||
g_jit = nullptr;
|
||||
|
||||
printf("page fault timing:\n");
|
||||
printf("start->HandleFault %llu ns\n", AS_NS(pfjit.m_pre_unprotect_time - start));
|
||||
|
|
Loading…
Reference in New Issue