Merge pull request #4624 from leoetlino/jit-warning-fixes
Add the g_ prefix to the jit global
This commit is contained in:
commit
7a5fe4b7ed
|
@ -65,8 +65,8 @@ void BreakPoints::Add(const TBreakPoint& bp)
|
||||||
if (!IsAddressBreakPoint(bp.iAddress))
|
if (!IsAddressBreakPoint(bp.iAddress))
|
||||||
{
|
{
|
||||||
m_BreakPoints.push_back(bp);
|
m_BreakPoints.push_back(bp);
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
g_jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,8 +81,8 @@ void BreakPoints::Add(u32 em_address, bool temp)
|
||||||
|
|
||||||
m_BreakPoints.push_back(pt);
|
m_BreakPoints.push_back(pt);
|
||||||
|
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
g_jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,8 +93,8 @@ void BreakPoints::Remove(u32 em_address)
|
||||||
if (i->iAddress == em_address)
|
if (i->iAddress == em_address)
|
||||||
{
|
{
|
||||||
m_BreakPoints.erase(i);
|
m_BreakPoints.erase(i);
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
g_jit->GetBlockCache()->InvalidateICache(em_address, 4, true);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,11 +102,11 @@ void BreakPoints::Remove(u32 em_address)
|
||||||
|
|
||||||
void BreakPoints::Clear()
|
void BreakPoints::Clear()
|
||||||
{
|
{
|
||||||
if (jit)
|
if (g_jit)
|
||||||
{
|
{
|
||||||
for (const TBreakPoint& bp : m_BreakPoints)
|
for (const TBreakPoint& bp : m_BreakPoints)
|
||||||
{
|
{
|
||||||
jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
g_jit->GetBlockCache()->InvalidateICache(bp.iAddress, 4, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,8 +120,8 @@ void BreakPoints::ClearAllTemporary()
|
||||||
{
|
{
|
||||||
if (bp->bTemporary)
|
if (bp->bTemporary)
|
||||||
{
|
{
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->GetBlockCache()->InvalidateICache(bp->iAddress, 4, true);
|
g_jit->GetBlockCache()->InvalidateICache(bp->iAddress, 4, true);
|
||||||
bp = m_BreakPoints.erase(bp);
|
bp = m_BreakPoints.erase(bp);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -175,8 +175,8 @@ void MemChecks::Add(const TMemCheck& _rMemoryCheck)
|
||||||
m_MemChecks.push_back(_rMemoryCheck);
|
m_MemChecks.push_back(_rMemoryCheck);
|
||||||
// If this is the first one, clear the JIT cache so it can switch to
|
// If this is the first one, clear the JIT cache so it can switch to
|
||||||
// watchpoint-compatible code.
|
// watchpoint-compatible code.
|
||||||
if (!had_any && jit)
|
if (!had_any && g_jit)
|
||||||
jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
g_jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemChecks::Remove(u32 _Address)
|
void MemChecks::Remove(u32 _Address)
|
||||||
|
@ -186,8 +186,8 @@ void MemChecks::Remove(u32 _Address)
|
||||||
if (i->StartAddress == _Address)
|
if (i->StartAddress == _Address)
|
||||||
{
|
{
|
||||||
m_MemChecks.erase(i);
|
m_MemChecks.erase(i);
|
||||||
if (!HasAny() && jit)
|
if (!HasAny() && g_jit)
|
||||||
jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
g_jit->GetBlockCache()->SchedulateClearCacheThreadSafe();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -362,7 +362,7 @@ namespace Jit64Tables
|
||||||
{
|
{
|
||||||
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||||
{
|
{
|
||||||
Jit64* jit64 = (Jit64*)jit;
|
Jit64* jit64 = (Jit64*)g_jit;
|
||||||
(jit64->*dynaOpTable[op.inst.OPCD])(op.inst);
|
(jit64->*dynaOpTable[op.inst.OPCD])(op.inst);
|
||||||
GekkoOPInfo* info = op.opinfo;
|
GekkoOPInfo* info = op.opinfo;
|
||||||
if (info)
|
if (info)
|
||||||
|
@ -370,11 +370,11 @@ void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||||
#ifdef OPLOG
|
#ifdef OPLOG
|
||||||
if (!strcmp(info->opname, OP_TO_LOG)) // "mcrfs"
|
if (!strcmp(info->opname, OP_TO_LOG)) // "mcrfs"
|
||||||
{
|
{
|
||||||
rsplocations.push_back(jit.js.compilerPC);
|
rsplocations.push_back(g_jit.js.compilerPC);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
info->compileCount++;
|
info->compileCount++;
|
||||||
info->lastUse = jit->js.compilerPC;
|
info->lastUse = g_jit->js.compilerPC;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,7 @@ void Jit64AsmRoutineManager::Generate()
|
||||||
|
|
||||||
// Fast block number lookup.
|
// Fast block number lookup.
|
||||||
MOV(32, R(RSCRATCH), PPCSTATE(pc));
|
MOV(32, R(RSCRATCH), PPCSTATE(pc));
|
||||||
u64 icache = reinterpret_cast<u64>(jit->GetBlockCache()->GetICache());
|
u64 icache = reinterpret_cast<u64>(g_jit->GetBlockCache()->GetICache());
|
||||||
AND(32, R(RSCRATCH), Imm32(JitBaseBlockCache::iCache_Mask << 2));
|
AND(32, R(RSCRATCH), Imm32(JitBaseBlockCache::iCache_Mask << 2));
|
||||||
if (icache <= INT_MAX)
|
if (icache <= INT_MAX)
|
||||||
{
|
{
|
||||||
|
@ -120,7 +120,7 @@ void Jit64AsmRoutineManager::Generate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether the block we found matches the current state.
|
// Check whether the block we found matches the current state.
|
||||||
u64 blocks = reinterpret_cast<u64>(jit->GetBlockCache()->GetBlocks());
|
u64 blocks = reinterpret_cast<u64>(g_jit->GetBlockCache()->GetBlocks());
|
||||||
IMUL(32, RSCRATCH, R(RSCRATCH), Imm32(sizeof(JitBlock)));
|
IMUL(32, RSCRATCH, R(RSCRATCH), Imm32(sizeof(JitBlock)));
|
||||||
if (blocks <= INT_MAX)
|
if (blocks <= INT_MAX)
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
void JitBlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const JitBlock* dest)
|
void JitBlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const JitBlock* dest)
|
||||||
{
|
{
|
||||||
u8* location = source.exitPtrs;
|
u8* location = source.exitPtrs;
|
||||||
const u8* address = dest ? dest->checkedEntry : jit->GetAsmRoutines()->dispatcher;
|
const u8* address = dest ? dest->checkedEntry : g_jit->GetAsmRoutines()->dispatcher;
|
||||||
Gen::XEmitter emit(location);
|
Gen::XEmitter emit(location);
|
||||||
if (*location == 0xE8)
|
if (*location == 0xE8)
|
||||||
{
|
{
|
||||||
|
|
|
@ -49,12 +49,12 @@ void EmuCodeBlock::MemoryExceptionCheck()
|
||||||
// load/store, the trampoline generator will have stashed the exception
|
// load/store, the trampoline generator will have stashed the exception
|
||||||
// handler (that we previously generated after the fastmem instruction) in
|
// handler (that we previously generated after the fastmem instruction) in
|
||||||
// trampolineExceptionHandler.
|
// trampolineExceptionHandler.
|
||||||
if (jit->js.generatingTrampoline)
|
if (g_jit->js.generatingTrampoline)
|
||||||
{
|
{
|
||||||
if (jit->js.trampolineExceptionHandler)
|
if (g_jit->js.trampolineExceptionHandler)
|
||||||
{
|
{
|
||||||
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
|
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
|
||||||
J_CC(CC_NZ, jit->js.trampolineExceptionHandler);
|
J_CC(CC_NZ, g_jit->js.trampolineExceptionHandler);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -62,11 +62,11 @@ void EmuCodeBlock::MemoryExceptionCheck()
|
||||||
// If memcheck (ie: MMU) mode is enabled and we haven't generated an
|
// If memcheck (ie: MMU) mode is enabled and we haven't generated an
|
||||||
// exception handler for this instruction yet, we will generate an
|
// exception handler for this instruction yet, we will generate an
|
||||||
// exception check.
|
// exception check.
|
||||||
if (jit->jo.memcheck && !jit->js.fastmemLoadStore && !jit->js.fixupExceptionHandler)
|
if (g_jit->jo.memcheck && !g_jit->js.fastmemLoadStore && !g_jit->js.fixupExceptionHandler)
|
||||||
{
|
{
|
||||||
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
|
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
|
||||||
jit->js.exceptionHandler = J_CC(Gen::CC_NZ, true);
|
g_jit->js.exceptionHandler = J_CC(Gen::CC_NZ, true);
|
||||||
jit->js.fixupExceptionHandler = true;
|
g_jit->js.fixupExceptionHandler = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,19 +209,19 @@ void EmuCodeBlock::UnsafeWriteGatherPipe(int accessSize)
|
||||||
switch (accessSize)
|
switch (accessSize)
|
||||||
{
|
{
|
||||||
case 8:
|
case 8:
|
||||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite8);
|
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite8);
|
||||||
break;
|
break;
|
||||||
case 16:
|
case 16:
|
||||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite16);
|
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite16);
|
||||||
break;
|
break;
|
||||||
case 32:
|
case 32:
|
||||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite32);
|
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite32);
|
||||||
break;
|
break;
|
||||||
case 64:
|
case 64:
|
||||||
CALL(jit->GetAsmRoutines()->fifoDirectWrite64);
|
CALL(g_jit->GetAsmRoutines()->fifoDirectWrite64);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
jit->js.fifoBytesSinceCheck += accessSize >> 3;
|
g_jit->js.fifoBytesSinceCheck += accessSize >> 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Visitor that generates code to read a MMIO value.
|
// Visitor that generates code to read a MMIO value.
|
||||||
|
@ -335,17 +335,17 @@ void EmuCodeBlock::MMIOLoadToReg(MMIO::Mapping* mmio, Gen::X64Reg reg_value,
|
||||||
void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress, int accessSize,
|
void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress, int accessSize,
|
||||||
s32 offset, BitSet32 registersInUse, bool signExtend, int flags)
|
s32 offset, BitSet32 registersInUse, bool signExtend, int flags)
|
||||||
{
|
{
|
||||||
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || jit->jo.alwaysUseMemFuncs;
|
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || g_jit->jo.alwaysUseMemFuncs;
|
||||||
|
|
||||||
registersInUse[reg_value] = false;
|
registersInUse[reg_value] = false;
|
||||||
if (jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
if (g_jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
||||||
{
|
{
|
||||||
u8* backpatchStart = GetWritableCodePtr();
|
u8* backpatchStart = GetWritableCodePtr();
|
||||||
MovInfo mov;
|
MovInfo mov;
|
||||||
bool offsetAddedToAddress =
|
bool offsetAddedToAddress =
|
||||||
UnsafeLoadToReg(reg_value, opAddress, accessSize, offset, signExtend, &mov);
|
UnsafeLoadToReg(reg_value, opAddress, accessSize, offset, signExtend, &mov);
|
||||||
TrampolineInfo& info = m_back_patch_info[mov.address];
|
TrampolineInfo& info = m_back_patch_info[mov.address];
|
||||||
info.pc = jit->js.compilerPC;
|
info.pc = g_jit->js.compilerPC;
|
||||||
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
|
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
|
||||||
info.start = backpatchStart;
|
info.start = backpatchStart;
|
||||||
info.read = true;
|
info.read = true;
|
||||||
|
@ -364,7 +364,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
|
||||||
}
|
}
|
||||||
info.len = static_cast<u32>(GetCodePtr() - info.start);
|
info.len = static_cast<u32>(GetCodePtr() - info.start);
|
||||||
|
|
||||||
jit->js.fastmemLoadStore = mov.address;
|
g_jit->js.fastmemLoadStore = mov.address;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -492,18 +492,18 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
||||||
BitSet32 registersInUse, int flags)
|
BitSet32 registersInUse, int flags)
|
||||||
{
|
{
|
||||||
bool swap = !(flags & SAFE_LOADSTORE_NO_SWAP);
|
bool swap = !(flags & SAFE_LOADSTORE_NO_SWAP);
|
||||||
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || jit->jo.alwaysUseMemFuncs;
|
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0 || g_jit->jo.alwaysUseMemFuncs;
|
||||||
|
|
||||||
// set the correct immediate format
|
// set the correct immediate format
|
||||||
reg_value = FixImmediate(accessSize, reg_value);
|
reg_value = FixImmediate(accessSize, reg_value);
|
||||||
|
|
||||||
if (jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
if (g_jit->jo.fastmem && !(flags & SAFE_LOADSTORE_NO_FASTMEM) && !slowmem)
|
||||||
{
|
{
|
||||||
u8* backpatchStart = GetWritableCodePtr();
|
u8* backpatchStart = GetWritableCodePtr();
|
||||||
MovInfo mov;
|
MovInfo mov;
|
||||||
UnsafeWriteRegToReg(reg_value, reg_addr, accessSize, offset, swap, &mov);
|
UnsafeWriteRegToReg(reg_value, reg_addr, accessSize, offset, swap, &mov);
|
||||||
TrampolineInfo& info = m_back_patch_info[mov.address];
|
TrampolineInfo& info = m_back_patch_info[mov.address];
|
||||||
info.pc = jit->js.compilerPC;
|
info.pc = g_jit->js.compilerPC;
|
||||||
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
|
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
|
||||||
info.start = backpatchStart;
|
info.start = backpatchStart;
|
||||||
info.read = false;
|
info.read = false;
|
||||||
|
@ -521,7 +521,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
||||||
}
|
}
|
||||||
info.len = static_cast<u32>(GetCodePtr() - info.start);
|
info.len = static_cast<u32>(GetCodePtr() - info.start);
|
||||||
|
|
||||||
jit->js.fastmemLoadStore = mov.address;
|
g_jit->js.fastmemLoadStore = mov.address;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -554,7 +554,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
|
||||||
}
|
}
|
||||||
|
|
||||||
// PC is used by memory watchpoints (if enabled) or to print accurate PC locations in debug logs
|
// PC is used by memory watchpoints (if enabled) or to print accurate PC locations in debug logs
|
||||||
MOV(32, PPCSTATE(pc), Imm32(jit->js.compilerPC));
|
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
|
||||||
|
|
||||||
size_t rsp_alignment = (flags & SAFE_LOADSTORE_NO_PROLOG) ? 8 : 0;
|
size_t rsp_alignment = (flags & SAFE_LOADSTORE_NO_PROLOG) ? 8 : 0;
|
||||||
ABI_PushRegistersAndAdjustStack(registersInUse, rsp_alignment);
|
ABI_PushRegistersAndAdjustStack(registersInUse, rsp_alignment);
|
||||||
|
@ -619,7 +619,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
|
||||||
|
|
||||||
// If we already know the address through constant folding, we can do some
|
// If we already know the address through constant folding, we can do some
|
||||||
// fun tricks...
|
// fun tricks...
|
||||||
if (jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(address))
|
if (g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(address))
|
||||||
{
|
{
|
||||||
if (!arg.IsSimpleReg(RSCRATCH))
|
if (!arg.IsSimpleReg(RSCRATCH))
|
||||||
MOV(accessSize, R(RSCRATCH), arg);
|
MOV(accessSize, R(RSCRATCH), arg);
|
||||||
|
@ -635,7 +635,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Helps external systems know which instruction triggered the write
|
// Helps external systems know which instruction triggered the write
|
||||||
MOV(32, PPCSTATE(pc), Imm32(jit->js.compilerPC));
|
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
|
||||||
|
|
||||||
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
|
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
|
||||||
switch (accessSize)
|
switch (accessSize)
|
||||||
|
@ -714,7 +714,7 @@ void EmuCodeBlock::ForceSinglePrecision(X64Reg output, const OpArg& input, bool
|
||||||
bool duplicate)
|
bool duplicate)
|
||||||
{
|
{
|
||||||
// Most games don't need these. Zelda requires it though - some platforms get stuck without them.
|
// Most games don't need these. Zelda requires it though - some platforms get stuck without them.
|
||||||
if (jit->jo.accurateSinglePrecision)
|
if (g_jit->jo.accurateSinglePrecision)
|
||||||
{
|
{
|
||||||
if (packed)
|
if (packed)
|
||||||
{
|
{
|
||||||
|
@ -830,7 +830,7 @@ alignas(16) static const u64 psRoundBit[2] = {0x8000000, 0x8000000};
|
||||||
// It needs a temp, so let the caller pass that in.
|
// It needs a temp, so let the caller pass that in.
|
||||||
void EmuCodeBlock::Force25BitPrecision(X64Reg output, const OpArg& input, X64Reg tmp)
|
void EmuCodeBlock::Force25BitPrecision(X64Reg output, const OpArg& input, X64Reg tmp)
|
||||||
{
|
{
|
||||||
if (jit->jo.accurateSinglePrecision)
|
if (g_jit->jo.accurateSinglePrecision)
|
||||||
{
|
{
|
||||||
// mantissa = (mantissa & ~0xFFFFFFF) + ((mantissa & (1ULL << 27)) << 1);
|
// mantissa = (mantissa & ~0xFFFFFFF) + ((mantissa & (1ULL << 27)) << 1);
|
||||||
if (input.IsSimpleReg() && cpu_info.bAVX)
|
if (input.IsSimpleReg() && cpu_info.bAVX)
|
||||||
|
|
|
@ -456,7 +456,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type,
|
||||||
|
|
||||||
bool extend = single && (type == QUANTIZE_S8 || type == QUANTIZE_S16);
|
bool extend = single && (type == QUANTIZE_S8 || type == QUANTIZE_S16);
|
||||||
|
|
||||||
if (jit->jo.memcheck)
|
if (g_jit->jo.memcheck)
|
||||||
{
|
{
|
||||||
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD;
|
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD;
|
||||||
int flags =
|
int flags =
|
||||||
|
@ -580,7 +580,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
|
||||||
int size = single ? 32 : 64;
|
int size = single ? 32 : 64;
|
||||||
bool extend = false;
|
bool extend = false;
|
||||||
|
|
||||||
if (jit->jo.memcheck)
|
if (g_jit->jo.memcheck)
|
||||||
{
|
{
|
||||||
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE;
|
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE;
|
||||||
int flags =
|
int flags =
|
||||||
|
@ -590,7 +590,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
|
||||||
|
|
||||||
if (single)
|
if (single)
|
||||||
{
|
{
|
||||||
if (jit->jo.memcheck)
|
if (g_jit->jo.memcheck)
|
||||||
{
|
{
|
||||||
MOVD_xmm(XMM0, R(RSCRATCH_EXTRA));
|
MOVD_xmm(XMM0, R(RSCRATCH_EXTRA));
|
||||||
}
|
}
|
||||||
|
@ -615,7 +615,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
|
||||||
// for a good reason, or merely because no game does this.
|
// for a good reason, or merely because no game does this.
|
||||||
// If we find something that actually does do this, maybe this should be changed. How
|
// If we find something that actually does do this, maybe this should be changed. How
|
||||||
// much of a performance hit would it be?
|
// much of a performance hit would it be?
|
||||||
if (jit->jo.memcheck)
|
if (g_jit->jo.memcheck)
|
||||||
{
|
{
|
||||||
ROL(64, R(RSCRATCH_EXTRA), Imm8(32));
|
ROL(64, R(RSCRATCH_EXTRA), Imm8(32));
|
||||||
MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA));
|
MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA));
|
||||||
|
|
|
@ -380,7 +380,7 @@ namespace JitILTables
|
||||||
{
|
{
|
||||||
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||||
{
|
{
|
||||||
JitIL* jitil = (JitIL*)jit;
|
JitIL* jitil = (JitIL*)g_jit;
|
||||||
(jitil->*dynaOpTable[op.inst.OPCD])(op.inst);
|
(jitil->*dynaOpTable[op.inst.OPCD])(op.inst);
|
||||||
GekkoOPInfo* info = op.opinfo;
|
GekkoOPInfo* info = op.opinfo;
|
||||||
if (info)
|
if (info)
|
||||||
|
@ -388,16 +388,16 @@ void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||||
#ifdef OPLOG
|
#ifdef OPLOG
|
||||||
if (!strcmp(info->opname, OP_TO_LOG)) // "mcrfs"
|
if (!strcmp(info->opname, OP_TO_LOG)) // "mcrfs"
|
||||||
{
|
{
|
||||||
rsplocations.push_back(jit.js.compilerPC);
|
rsplocations.push_back(g_jit.js.compilerPC);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
info->compileCount++;
|
info->compileCount++;
|
||||||
info->lastUse = jit->js.compilerPC;
|
info->lastUse = g_jit->js.compilerPC;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
PanicAlert("Tried to compile illegal (or unknown) instruction %08x, at %08x", op.inst.hex,
|
PanicAlert("Tried to compile illegal (or unknown) instruction %08x, at %08x", op.inst.hex,
|
||||||
jit->js.compilerPC);
|
g_jit->js.compilerPC);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -503,7 +503,7 @@ const u8* JitArm64::DoJit(u32 em_address, PPCAnalyst::CodeBuffer* code_buf, JitB
|
||||||
|
|
||||||
// Gather pipe writes using a non-immediate address are discovered by profiling.
|
// Gather pipe writes using a non-immediate address are discovered by profiling.
|
||||||
bool gatherPipeIntCheck =
|
bool gatherPipeIntCheck =
|
||||||
jit->js.fifoWriteAddresses.find(ops[i].address) != jit->js.fifoWriteAddresses.end();
|
g_jit->js.fifoWriteAddresses.find(ops[i].address) != g_jit->js.fifoWriteAddresses.end();
|
||||||
|
|
||||||
if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo))
|
if (jo.optimizeGatherPipe && (js.fifoBytesSinceCheck >= 32 || js.mustCheckFifo))
|
||||||
{
|
{
|
||||||
|
|
|
@ -28,7 +28,7 @@ void JitArm64BlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
emit.MOVI2R(DISPATCHER_PC, source.exitAddress);
|
emit.MOVI2R(DISPATCHER_PC, source.exitAddress);
|
||||||
emit.B(jit->GetAsmRoutines()->dispatcher);
|
emit.B(g_jit->GetAsmRoutines()->dispatcher);
|
||||||
}
|
}
|
||||||
emit.FlushIcache();
|
emit.FlushIcache();
|
||||||
}
|
}
|
||||||
|
|
|
@ -597,7 +597,7 @@ void JitArm64::dcbx(UGeckoInstruction inst)
|
||||||
AND(value, addr, 32 - 10, 28 - 10); // upper three bits and last 10 bit are masked for the bitset
|
AND(value, addr, 32 - 10, 28 - 10); // upper three bits and last 10 bit are masked for the bitset
|
||||||
// of cachelines, 0x1ffffc00
|
// of cachelines, 0x1ffffc00
|
||||||
LSR(value, value, 5 + 5); // >> 5 for cache line size, >> 5 for width of bitset
|
LSR(value, value, 5 + 5); // >> 5 for cache line size, >> 5 for width of bitset
|
||||||
MOVP2R(EncodeRegTo64(WA), jit->GetBlockCache()->GetBlockBitSet());
|
MOVP2R(EncodeRegTo64(WA), g_jit->GetBlockCache()->GetBlockBitSet());
|
||||||
LDR(value, EncodeRegTo64(WA), ArithOption(EncodeRegTo64(value), true));
|
LDR(value, EncodeRegTo64(WA), ArithOption(EncodeRegTo64(value), true));
|
||||||
|
|
||||||
LSR(addr, addr, 5); // mask sizeof cacheline, & 0x1f is the position within the bitset
|
LSR(addr, addr, 5); // mask sizeof cacheline, & 0x1f is the position within the bitset
|
||||||
|
|
|
@ -326,7 +326,7 @@ void JitArm64::stfXX(UGeckoInstruction inst)
|
||||||
ARM64Reg XA = EncodeRegTo64(addr_reg);
|
ARM64Reg XA = EncodeRegTo64(addr_reg);
|
||||||
|
|
||||||
if (is_immediate &&
|
if (is_immediate &&
|
||||||
!(jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr)))
|
!(g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr)))
|
||||||
{
|
{
|
||||||
MOVI2R(XA, imm_addr);
|
MOVI2R(XA, imm_addr);
|
||||||
|
|
||||||
|
@ -350,7 +350,7 @@ void JitArm64::stfXX(UGeckoInstruction inst)
|
||||||
|
|
||||||
if (is_immediate)
|
if (is_immediate)
|
||||||
{
|
{
|
||||||
if (jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr))
|
if (g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(imm_addr))
|
||||||
{
|
{
|
||||||
int accessSize;
|
int accessSize;
|
||||||
if (flags & BackPatchInfo::FLAG_SIZE_F64)
|
if (flags & BackPatchInfo::FLAG_SIZE_F64)
|
||||||
|
|
|
@ -367,7 +367,7 @@ namespace JitArm64Tables
|
||||||
{
|
{
|
||||||
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||||
{
|
{
|
||||||
JitArm64* jitarm = (JitArm64*)jit;
|
JitArm64* jitarm = (JitArm64*)g_jit;
|
||||||
(jitarm->*dynaOpTable[op.inst.OPCD])(op.inst);
|
(jitarm->*dynaOpTable[op.inst.OPCD])(op.inst);
|
||||||
GekkoOPInfo* info = op.opinfo;
|
GekkoOPInfo* info = op.opinfo;
|
||||||
if (info)
|
if (info)
|
||||||
|
@ -375,11 +375,11 @@ void CompileInstruction(PPCAnalyst::CodeOp& op)
|
||||||
#ifdef OPLOG
|
#ifdef OPLOG
|
||||||
if (!strcmp(info->opname, OP_TO_LOG))
|
if (!strcmp(info->opname, OP_TO_LOG))
|
||||||
{ ///"mcrfs"
|
{ ///"mcrfs"
|
||||||
rsplocations.push_back(jit.js.compilerPC);
|
rsplocations.push_back(g_jit.js.compilerPC);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
info->compileCount++;
|
info->compileCount++;
|
||||||
info->lastUse = jit->js.compilerPC;
|
info->lastUse = g_jit->js.compilerPC;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ void JitArm64::GenerateAsm()
|
||||||
ARM64Reg cache_base = X27;
|
ARM64Reg cache_base = X27;
|
||||||
ARM64Reg block_num = W27;
|
ARM64Reg block_num = W27;
|
||||||
ANDI2R(pc_masked, DISPATCHER_PC, JitBaseBlockCache::iCache_Mask << 2);
|
ANDI2R(pc_masked, DISPATCHER_PC, JitBaseBlockCache::iCache_Mask << 2);
|
||||||
MOVP2R(cache_base, jit->GetBlockCache()->GetICache());
|
MOVP2R(cache_base, g_jit->GetBlockCache()->GetICache());
|
||||||
LDR(block_num, cache_base, EncodeRegTo64(pc_masked));
|
LDR(block_num, cache_base, EncodeRegTo64(pc_masked));
|
||||||
|
|
||||||
// blocks[block_num]
|
// blocks[block_num]
|
||||||
|
@ -83,7 +83,7 @@ void JitArm64::GenerateAsm()
|
||||||
ARM64Reg jit_block_size = W24;
|
ARM64Reg jit_block_size = W24;
|
||||||
MOVI2R(jit_block_size, sizeof(JitBlock));
|
MOVI2R(jit_block_size, sizeof(JitBlock));
|
||||||
MUL(block_num, block_num, jit_block_size);
|
MUL(block_num, block_num, jit_block_size);
|
||||||
MOVP2R(block, jit->GetBlockCache()->GetBlocks());
|
MOVP2R(block, g_jit->GetBlockCache()->GetBlocks());
|
||||||
ADD(block, block, EncodeRegTo64(block_num));
|
ADD(block, block, EncodeRegTo64(block_num));
|
||||||
|
|
||||||
// b.effectiveAddress != addr || b.msrBits != msr
|
// b.effectiveAddress != addr || b.msrBits != msr
|
||||||
|
|
|
@ -10,11 +10,11 @@
|
||||||
#include "Core/PowerPC/PPCAnalyst.h"
|
#include "Core/PowerPC/PPCAnalyst.h"
|
||||||
#include "Core/PowerPC/PowerPC.h"
|
#include "Core/PowerPC/PowerPC.h"
|
||||||
|
|
||||||
JitBase* jit;
|
JitBase* g_jit;
|
||||||
|
|
||||||
void Jit(u32 em_address)
|
void Jit(u32 em_address)
|
||||||
{
|
{
|
||||||
jit->Jit(em_address);
|
g_jit->Jit(em_address);
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 Helper_Mask(u8 mb, u8 me)
|
u32 Helper_Mask(u8 mb, u8 me)
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
|
|
||||||
class JitBase;
|
class JitBase;
|
||||||
|
|
||||||
extern JitBase* jit;
|
extern JitBase* g_jit;
|
||||||
|
|
||||||
class JitBase : public CPUCoreBase
|
class JitBase : public CPUCoreBase
|
||||||
{
|
{
|
||||||
|
@ -114,7 +114,7 @@ public:
|
||||||
JitOptions jo;
|
JitOptions jo;
|
||||||
JitState js;
|
JitState js;
|
||||||
|
|
||||||
static const u8* Dispatch() { return jit->GetBlockCache()->Dispatch(); };
|
static const u8* Dispatch() { return g_jit->GetBlockCache()->Dispatch(); };
|
||||||
virtual JitBaseBlockCache* GetBlockCache() = 0;
|
virtual JitBaseBlockCache* GetBlockCache() = 0;
|
||||||
|
|
||||||
virtual void Jit(u32 em_address) = 0;
|
virtual void Jit(u32 em_address) = 0;
|
||||||
|
|
|
@ -66,8 +66,8 @@ void JitBaseBlockCache::Clear()
|
||||||
else
|
else
|
||||||
Core::DisplayMessage("Clearing code cache.", 3000);
|
Core::DisplayMessage("Clearing code cache.", 3000);
|
||||||
#endif
|
#endif
|
||||||
jit->js.fifoWriteAddresses.clear();
|
g_jit->js.fifoWriteAddresses.clear();
|
||||||
jit->js.pairedQuantizeAddresses.clear();
|
g_jit->js.pairedQuantizeAddresses.clear();
|
||||||
for (int i = 1; i < num_blocks; i++)
|
for (int i = 1; i < num_blocks; i++)
|
||||||
{
|
{
|
||||||
DestroyBlock(i, false);
|
DestroyBlock(i, false);
|
||||||
|
@ -346,8 +346,8 @@ void JitBaseBlockCache::InvalidateICache(u32 address, const u32 length, bool for
|
||||||
{
|
{
|
||||||
for (u32 i = address; i < address + length; i += 4)
|
for (u32 i = address; i < address + length; i += 4)
|
||||||
{
|
{
|
||||||
jit->js.fifoWriteAddresses.erase(i);
|
g_jit->js.fifoWriteAddresses.erase(i);
|
||||||
jit->js.pairedQuantizeAddresses.erase(i);
|
g_jit->js.pairedQuantizeAddresses.erase(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,8 +36,8 @@ namespace JitInterface
|
||||||
{
|
{
|
||||||
void DoState(PointerWrap& p)
|
void DoState(PointerWrap& p)
|
||||||
{
|
{
|
||||||
if (jit && p.GetMode() == PointerWrap::MODE_READ)
|
if (g_jit && p.GetMode() == PointerWrap::MODE_READ)
|
||||||
jit->ClearCache();
|
g_jit->ClearCache();
|
||||||
}
|
}
|
||||||
CPUCoreBase* InitJitCore(int core)
|
CPUCoreBase* InitJitCore(int core)
|
||||||
{
|
{
|
||||||
|
@ -63,11 +63,11 @@ CPUCoreBase* InitJitCore(int core)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
PanicAlert("Unrecognizable cpu_core: %d", core);
|
PanicAlert("Unrecognizable cpu_core: %d", core);
|
||||||
jit = nullptr;
|
g_jit = nullptr;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
jit = static_cast<JitBase*>(ptr);
|
g_jit = static_cast<JitBase*>(ptr);
|
||||||
jit->Init();
|
g_jit->Init();
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
void InitTables(int core)
|
void InitTables(int core)
|
||||||
|
@ -97,7 +97,7 @@ void InitTables(int core)
|
||||||
}
|
}
|
||||||
CPUCoreBase* GetCore()
|
CPUCoreBase* GetCore()
|
||||||
{
|
{
|
||||||
return jit;
|
return g_jit;
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteProfileResults(const std::string& filename)
|
void WriteProfileResults(const std::string& filename)
|
||||||
|
@ -127,23 +127,23 @@ void WriteProfileResults(const std::string& filename)
|
||||||
|
|
||||||
void GetProfileResults(ProfileStats* prof_stats)
|
void GetProfileResults(ProfileStats* prof_stats)
|
||||||
{
|
{
|
||||||
// Can't really do this with no jit core available
|
// Can't really do this with no g_jit core available
|
||||||
if (!jit)
|
if (!g_jit)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
prof_stats->cost_sum = 0;
|
prof_stats->cost_sum = 0;
|
||||||
prof_stats->timecost_sum = 0;
|
prof_stats->timecost_sum = 0;
|
||||||
prof_stats->block_stats.clear();
|
prof_stats->block_stats.clear();
|
||||||
prof_stats->block_stats.reserve(jit->GetBlockCache()->GetNumBlocks());
|
prof_stats->block_stats.reserve(g_jit->GetBlockCache()->GetNumBlocks());
|
||||||
|
|
||||||
Core::EState old_state = Core::GetState();
|
Core::EState old_state = Core::GetState();
|
||||||
if (old_state == Core::CORE_RUN)
|
if (old_state == Core::CORE_RUN)
|
||||||
Core::SetState(Core::CORE_PAUSE);
|
Core::SetState(Core::CORE_PAUSE);
|
||||||
|
|
||||||
QueryPerformanceFrequency((LARGE_INTEGER*)&prof_stats->countsPerSec);
|
QueryPerformanceFrequency((LARGE_INTEGER*)&prof_stats->countsPerSec);
|
||||||
for (int i = 0; i < jit->GetBlockCache()->GetNumBlocks(); i++)
|
for (int i = 0; i < g_jit->GetBlockCache()->GetNumBlocks(); i++)
|
||||||
{
|
{
|
||||||
const JitBlock* block = jit->GetBlockCache()->GetBlock(i);
|
const JitBlock* block = g_jit->GetBlockCache()->GetBlock(i);
|
||||||
// Rough heuristic. Mem instructions should cost more.
|
// Rough heuristic. Mem instructions should cost more.
|
||||||
u64 cost = block->originalSize * (block->runCount / 4);
|
u64 cost = block->originalSize * (block->runCount / 4);
|
||||||
u64 timecost = block->ticCounter;
|
u64 timecost = block->ticCounter;
|
||||||
|
@ -162,25 +162,25 @@ void GetProfileResults(ProfileStats* prof_stats)
|
||||||
|
|
||||||
int GetHostCode(u32* address, const u8** code, u32* code_size)
|
int GetHostCode(u32* address, const u8** code, u32* code_size)
|
||||||
{
|
{
|
||||||
if (!jit)
|
if (!g_jit)
|
||||||
{
|
{
|
||||||
*code_size = 0;
|
*code_size = 0;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int block_num = jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address, MSR);
|
int block_num = g_jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address, MSR);
|
||||||
if (block_num < 0)
|
if (block_num < 0)
|
||||||
{
|
{
|
||||||
for (int i = 0; i < 500; i++)
|
for (int i = 0; i < 500; i++)
|
||||||
{
|
{
|
||||||
block_num = jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address - 4 * i, MSR);
|
block_num = g_jit->GetBlockCache()->GetBlockNumberFromStartAddress(*address - 4 * i, MSR);
|
||||||
if (block_num >= 0)
|
if (block_num >= 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (block_num >= 0)
|
if (block_num >= 0)
|
||||||
{
|
{
|
||||||
JitBlock* block = jit->GetBlockCache()->GetBlock(block_num);
|
JitBlock* block = g_jit->GetBlockCache()->GetBlock(block_num);
|
||||||
if (!(block->effectiveAddress <= *address &&
|
if (!(block->effectiveAddress <= *address &&
|
||||||
block->originalSize + block->effectiveAddress >= *address))
|
block->originalSize + block->effectiveAddress >= *address))
|
||||||
block_num = -1;
|
block_num = -1;
|
||||||
|
@ -194,7 +194,7 @@ int GetHostCode(u32* address, const u8** code, u32* code_size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JitBlock* block = jit->GetBlockCache()->GetBlock(block_num);
|
JitBlock* block = g_jit->GetBlockCache()->GetBlock(block_num);
|
||||||
|
|
||||||
*code = block->checkedEntry;
|
*code = block->checkedEntry;
|
||||||
*code_size = block->codeSize;
|
*code_size = block->codeSize;
|
||||||
|
@ -205,28 +205,28 @@ int GetHostCode(u32* address, const u8** code, u32* code_size)
|
||||||
bool HandleFault(uintptr_t access_address, SContext* ctx)
|
bool HandleFault(uintptr_t access_address, SContext* ctx)
|
||||||
{
|
{
|
||||||
// Prevent nullptr dereference on a crash with no JIT present
|
// Prevent nullptr dereference on a crash with no JIT present
|
||||||
if (!jit)
|
if (!g_jit)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return jit->HandleFault(access_address, ctx);
|
return g_jit->HandleFault(access_address, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HandleStackFault()
|
bool HandleStackFault()
|
||||||
{
|
{
|
||||||
if (!jit)
|
if (!g_jit)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return jit->HandleStackFault();
|
return g_jit->HandleStackFault();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearCache()
|
void ClearCache()
|
||||||
{
|
{
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->ClearCache();
|
g_jit->ClearCache();
|
||||||
}
|
}
|
||||||
void ClearSafe()
|
void ClearSafe()
|
||||||
{
|
{
|
||||||
|
@ -234,19 +234,19 @@ void ClearSafe()
|
||||||
// inside a JIT'ed block: it clears the instruction cache, but not
|
// inside a JIT'ed block: it clears the instruction cache, but not
|
||||||
// the JIT'ed code.
|
// the JIT'ed code.
|
||||||
// TODO: There's probably a better way to handle this situation.
|
// TODO: There's probably a better way to handle this situation.
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->GetBlockCache()->Clear();
|
g_jit->GetBlockCache()->Clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InvalidateICache(u32 address, u32 size, bool forced)
|
void InvalidateICache(u32 address, u32 size, bool forced)
|
||||||
{
|
{
|
||||||
if (jit)
|
if (g_jit)
|
||||||
jit->GetBlockCache()->InvalidateICache(address, size, forced);
|
g_jit->GetBlockCache()->InvalidateICache(address, size, forced);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompileExceptionCheck(ExceptionType type)
|
void CompileExceptionCheck(ExceptionType type)
|
||||||
{
|
{
|
||||||
if (!jit)
|
if (!g_jit)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
std::unordered_set<u32>* exception_addresses = nullptr;
|
std::unordered_set<u32>* exception_addresses = nullptr;
|
||||||
|
@ -254,13 +254,13 @@ void CompileExceptionCheck(ExceptionType type)
|
||||||
switch (type)
|
switch (type)
|
||||||
{
|
{
|
||||||
case ExceptionType::EXCEPTIONS_FIFO_WRITE:
|
case ExceptionType::EXCEPTIONS_FIFO_WRITE:
|
||||||
exception_addresses = &jit->js.fifoWriteAddresses;
|
exception_addresses = &g_jit->js.fifoWriteAddresses;
|
||||||
break;
|
break;
|
||||||
case ExceptionType::EXCEPTIONS_PAIRED_QUANTIZE:
|
case ExceptionType::EXCEPTIONS_PAIRED_QUANTIZE:
|
||||||
exception_addresses = &jit->js.pairedQuantizeAddresses;
|
exception_addresses = &g_jit->js.pairedQuantizeAddresses;
|
||||||
break;
|
break;
|
||||||
case ExceptionType::EXCEPTIONS_SPECULATIVE_CONSTANTS:
|
case ExceptionType::EXCEPTIONS_SPECULATIVE_CONSTANTS:
|
||||||
exception_addresses = &jit->js.noSpeculativeConstantsAddresses;
|
exception_addresses = &g_jit->js.noSpeculativeConstantsAddresses;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,17 +277,17 @@ void CompileExceptionCheck(ExceptionType type)
|
||||||
|
|
||||||
// Invalidate the JIT block so that it gets recompiled with the external exception check
|
// Invalidate the JIT block so that it gets recompiled with the external exception check
|
||||||
// included.
|
// included.
|
||||||
jit->GetBlockCache()->InvalidateICache(PC, 4, true);
|
g_jit->GetBlockCache()->InvalidateICache(PC, 4, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Shutdown()
|
void Shutdown()
|
||||||
{
|
{
|
||||||
if (jit)
|
if (g_jit)
|
||||||
{
|
{
|
||||||
jit->Shutdown();
|
g_jit->Shutdown();
|
||||||
delete jit;
|
delete g_jit;
|
||||||
jit = nullptr;
|
g_jit = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,8 +124,8 @@ void CCodeWindow::OnProfilerMenu(wxCommandEvent& event)
|
||||||
{
|
{
|
||||||
case IDM_PROFILE_BLOCKS:
|
case IDM_PROFILE_BLOCKS:
|
||||||
Core::SetState(Core::CORE_PAUSE);
|
Core::SetState(Core::CORE_PAUSE);
|
||||||
if (jit != nullptr)
|
if (g_jit != nullptr)
|
||||||
jit->ClearCache();
|
g_jit->ClearCache();
|
||||||
Profiler::g_ProfileBlocks = GetParentMenuBar()->IsChecked(IDM_PROFILE_BLOCKS);
|
Profiler::g_ProfileBlocks = GetParentMenuBar()->IsChecked(IDM_PROFILE_BLOCKS);
|
||||||
Core::SetState(Core::CORE_RUN);
|
Core::SetState(Core::CORE_RUN);
|
||||||
break;
|
break;
|
||||||
|
@ -135,7 +135,7 @@ void CCodeWindow::OnProfilerMenu(wxCommandEvent& event)
|
||||||
|
|
||||||
if (Core::GetState() == Core::CORE_PAUSE && PowerPC::GetMode() == PowerPC::MODE_JIT)
|
if (Core::GetState() == Core::CORE_PAUSE && PowerPC::GetMode() == PowerPC::MODE_JIT)
|
||||||
{
|
{
|
||||||
if (jit != nullptr)
|
if (g_jit != nullptr)
|
||||||
{
|
{
|
||||||
std::string filename = File::GetUserPath(D_DUMP_IDX) + "Debug/profiler.txt";
|
std::string filename = File::GetUserPath(D_DUMP_IDX) + "Debug/profiler.txt";
|
||||||
File::CreateFullPath(filename);
|
File::CreateFullPath(filename);
|
||||||
|
|
|
@ -56,7 +56,7 @@ TEST(PageFault, PageFault)
|
||||||
Common::WriteProtectMemory(data, PAGE_GRAN, false);
|
Common::WriteProtectMemory(data, PAGE_GRAN, false);
|
||||||
|
|
||||||
PageFaultFakeJit pfjit;
|
PageFaultFakeJit pfjit;
|
||||||
jit = &pfjit;
|
g_jit = &pfjit;
|
||||||
pfjit.m_data = data;
|
pfjit.m_data = data;
|
||||||
|
|
||||||
auto start = std::chrono::high_resolution_clock::now();
|
auto start = std::chrono::high_resolution_clock::now();
|
||||||
|
@ -67,7 +67,7 @@ TEST(PageFault, PageFault)
|
||||||
((unsigned long long)std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count())
|
((unsigned long long)std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count())
|
||||||
|
|
||||||
EMM::UninstallExceptionHandler();
|
EMM::UninstallExceptionHandler();
|
||||||
jit = nullptr;
|
g_jit = nullptr;
|
||||||
|
|
||||||
printf("page fault timing:\n");
|
printf("page fault timing:\n");
|
||||||
printf("start->HandleFault %llu ns\n", AS_NS(pfjit.m_pre_unprotect_time - start));
|
printf("start->HandleFault %llu ns\n", AS_NS(pfjit.m_pre_unprotect_time - start));
|
||||||
|
|
Loading…
Reference in New Issue