stop using g_jit outside of JitInterface

Replace g_jit in x86-64 ASM routines code by m_jit member reference
This commit is contained in:
CrystalGamma 2018-12-02 14:16:17 +01:00
parent 3fa81f39fb
commit 2f490e44fb
14 changed files with 57 additions and 41 deletions

View File

@ -18,7 +18,7 @@
using namespace Gen;
Jit64AsmRoutineManager::Jit64AsmRoutineManager(JitBase& jit) : m_jit{jit}
Jit64AsmRoutineManager::Jit64AsmRoutineManager(Jitx86Base& jit) : m_jit{jit}, CommonAsmRoutines(jit)
{
}

View File

@ -35,7 +35,7 @@ public:
// want to ensure this number is big enough.
static constexpr size_t CODE_SIZE = 16384;
explicit Jit64AsmRoutineManager(JitBase& jit);
explicit Jit64AsmRoutineManager(Jitx86Base& jit);
void Init(u8* stack_top);

View File

@ -54,12 +54,13 @@ void EmuCodeBlock::MemoryExceptionCheck()
// load/store, the trampoline generator will have stashed the exception
// handler (that we previously generated after the fastmem instruction) in
// trampolineExceptionHandler.
if (g_jit->js.generatingTrampoline)
auto& js = m_jit.js;
if (js.generatingTrampoline)
{
if (g_jit->js.trampolineExceptionHandler)
if (js.trampolineExceptionHandler)
{
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
J_CC(CC_NZ, g_jit->js.trampolineExceptionHandler);
J_CC(CC_NZ, js.trampolineExceptionHandler);
}
return;
}
@ -67,11 +68,11 @@ void EmuCodeBlock::MemoryExceptionCheck()
// If memcheck (ie: MMU) mode is enabled and we haven't generated an
// exception handler for this instruction yet, we will generate an
// exception check.
if (g_jit->jo.memcheck && !g_jit->js.fastmemLoadStore && !g_jit->js.fixupExceptionHandler)
if (m_jit.jo.memcheck && !js.fastmemLoadStore && !js.fixupExceptionHandler)
{
TEST(32, PPCSTATE(Exceptions), Gen::Imm32(EXCEPTION_DSI));
g_jit->js.exceptionHandler = J_CC(Gen::CC_NZ, true);
g_jit->js.fixupExceptionHandler = true;
js.exceptionHandler = J_CC(Gen::CC_NZ, true);
js.fixupExceptionHandler = true;
}
}
@ -318,8 +319,9 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
{
bool slowmem = (flags & SAFE_LOADSTORE_FORCE_SLOWMEM) != 0;
auto& js = m_jit.js;
registersInUse[reg_value] = false;
if (g_jit->jo.fastmem && !(flags & (SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_UPDATE_PC)) &&
if (m_jit.jo.fastmem && !(flags & (SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_UPDATE_PC)) &&
!slowmem)
{
u8* backpatchStart = GetWritableCodePtr();
@ -327,7 +329,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
bool offsetAddedToAddress =
UnsafeLoadToReg(reg_value, opAddress, accessSize, offset, signExtend, &mov);
TrampolineInfo& info = m_back_patch_info[mov.address];
info.pc = g_jit->js.compilerPC;
info.pc = js.compilerPC;
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
info.start = backpatchStart;
info.read = true;
@ -346,7 +348,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
}
info.len = static_cast<u32>(GetCodePtr() - info.start);
g_jit->js.fastmemLoadStore = mov.address;
js.fastmemLoadStore = mov.address;
return;
}
@ -384,7 +386,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg& opAddress,
// Invalid for calls from Jit64AsmCommon routines
if (!(flags & SAFE_LOADSTORE_NO_UPDATE_PC))
{
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
MOV(32, PPCSTATE(pc), Imm32(js.compilerPC));
}
size_t rsp_alignment = (flags & SAFE_LOADSTORE_NO_PROLOG) ? 8 : 0;
@ -448,7 +450,7 @@ void EmuCodeBlock::SafeLoadToRegImmediate(X64Reg reg_value, u32 address, int acc
}
// Helps external systems know which instruction triggered the read.
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
MOV(32, PPCSTATE(pc), Imm32(m_jit.js.compilerPC));
// Fall back to general-case code.
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
@ -490,14 +492,15 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
// set the correct immediate format
reg_value = FixImmediate(accessSize, reg_value);
if (g_jit->jo.fastmem && !(flags & (SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_UPDATE_PC)) &&
auto& js = m_jit.js;
if (m_jit.jo.fastmem && !(flags & (SAFE_LOADSTORE_NO_FASTMEM | SAFE_LOADSTORE_NO_UPDATE_PC)) &&
!slowmem)
{
u8* backpatchStart = GetWritableCodePtr();
MovInfo mov;
UnsafeWriteRegToReg(reg_value, reg_addr, accessSize, offset, swap, &mov);
TrampolineInfo& info = m_back_patch_info[mov.address];
info.pc = g_jit->js.compilerPC;
info.pc = js.compilerPC;
info.nonAtomicSwapStoreSrc = mov.nonAtomicSwapStore ? mov.nonAtomicSwapStoreSrc : INVALID_REG;
info.start = backpatchStart;
info.read = false;
@ -515,7 +518,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
}
info.len = static_cast<u32>(GetCodePtr() - info.start);
g_jit->js.fastmemLoadStore = mov.address;
js.fastmemLoadStore = mov.address;
return;
}
@ -551,7 +554,7 @@ void EmuCodeBlock::SafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int acces
// Invalid for calls from Jit64AsmCommon routines
if (!(flags & SAFE_LOADSTORE_NO_UPDATE_PC))
{
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
MOV(32, PPCSTATE(pc), Imm32(js.compilerPC));
}
size_t rsp_alignment = (flags & SAFE_LOADSTORE_NO_PROLOG) ? 8 : 0;
@ -617,7 +620,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
// If we already know the address through constant folding, we can do some
// fun tricks...
if (g_jit->jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(address))
if (m_jit.jo.optimizeGatherPipe && PowerPC::IsOptimizableGatherPipeWrite(address))
{
X64Reg arg_reg = RSCRATCH;
@ -634,7 +637,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
ADD(64, R(RSCRATCH2), Imm8(accessSize >> 3));
MOV(64, PPCSTATE(gather_pipe_ptr), R(RSCRATCH2));
g_jit->js.fifoBytesSinceCheck += accessSize >> 3;
m_jit.js.fifoBytesSinceCheck += accessSize >> 3;
return false;
}
else if (PowerPC::IsOptimizableRAMAddress(address))
@ -645,7 +648,7 @@ bool EmuCodeBlock::WriteToConstAddress(int accessSize, OpArg arg, u32 address,
else
{
// Helps external systems know which instruction triggered the write
MOV(32, PPCSTATE(pc), Imm32(g_jit->js.compilerPC));
MOV(32, PPCSTATE(pc), Imm32(m_jit.js.compilerPC));
ABI_PushRegistersAndAdjustStack(registersInUse, 0);
switch (accessSize)
@ -724,7 +727,7 @@ void EmuCodeBlock::ForceSinglePrecision(X64Reg output, const OpArg& input, bool
bool duplicate)
{
// Most games don't need these. Zelda requires it though - some platforms get stuck without them.
if (g_jit->jo.accurateSinglePrecision)
if (m_jit.jo.accurateSinglePrecision)
{
if (packed)
{
@ -840,7 +843,7 @@ alignas(16) static const u64 psRoundBit[2] = {0x8000000, 0x8000000};
// It needs a temp, so let the caller pass that in.
void EmuCodeBlock::Force25BitPrecision(X64Reg output, const OpArg& input, X64Reg tmp)
{
if (g_jit->jo.accurateSinglePrecision)
if (m_jit.jo.accurateSinglePrecision)
{
// mantissa = (mantissa & ~0xFFFFFFF) + ((mantissa & (1ULL << 27)) << 1);
if (input.IsSimpleReg() && cpu_info.bAVX)

View File

@ -19,10 +19,13 @@ namespace MMIO
class Mapping;
}
class Jitx86Base;
// Like XCodeBlock but has some utilities for memory access.
class EmuCodeBlock : public Gen::X64CodeBlock
{
public:
explicit EmuCodeBlock(Jitx86Base& jit) : m_jit{jit} {}
void MemoryExceptionCheck();
// Simple functions to switch between near and far code emitting
@ -125,6 +128,7 @@ public:
void Clear();
protected:
Jitx86Base& m_jit;
ConstantPool m_const_pool;
FarCodeCache m_far_code;
u8* m_near_code; // Backed up when we switch to far code.

View File

@ -505,7 +505,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoad(bool single, EQuantizeType type,
bool extend = single && (type == QUANTIZE_S8 || type == QUANTIZE_S16);
if (g_jit->jo.memcheck)
if (m_jit.jo.memcheck)
{
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE_LOAD;
int flags = isInline ? 0 :
@ -632,7 +632,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
int size = single ? 32 : 64;
bool extend = false;
if (g_jit->jo.memcheck)
if (m_jit.jo.memcheck)
{
BitSet32 regsToSave = QUANTIZED_REGS_TO_SAVE;
int flags = isInline ? 0 :
@ -643,7 +643,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
if (single)
{
if (g_jit->jo.memcheck)
if (m_jit.jo.memcheck)
{
MOVD_xmm(XMM0, R(RSCRATCH_EXTRA));
}
@ -668,7 +668,7 @@ void QuantizedMemoryRoutines::GenQuantizedLoadFloat(bool single, bool isInline)
// for a good reason, or merely because no game does this.
// If we find something that actually does do this, maybe this should be changed. How
// much of a performance hit would it be?
if (g_jit->jo.memcheck)
if (m_jit.jo.memcheck)
{
ROL(64, R(RSCRATCH_EXTRA), Imm8(32));
MOVQ_xmm(XMM0, R(RSCRATCH_EXTRA));

View File

@ -13,6 +13,7 @@ enum EQuantizeType : u32;
class QuantizedMemoryRoutines : public EmuCodeBlock
{
public:
explicit QuantizedMemoryRoutines(Jitx86Base& jit) : EmuCodeBlock(jit) {}
void GenQuantizedLoad(bool single, EQuantizeType type, int quantize);
void GenQuantizedStore(bool single, EQuantizeType type, int quantize);
@ -24,6 +25,7 @@ private:
class CommonAsmRoutines : public CommonAsmRoutinesBase, public QuantizedMemoryRoutines
{
public:
explicit CommonAsmRoutines(Jitx86Base& jit) : QuantizedMemoryRoutines(jit) {}
void GenFrsqrte();
void GenFres();
void GenMfcr();

View File

@ -32,14 +32,15 @@ constexpr size_t CODE_SIZE = 1024 * 1024 * 32;
class Jitx86Base : public JitBase, public QuantizedMemoryRoutines
{
public:
Jitx86Base() : QuantizedMemoryRoutines(*this) {}
JitBlockCache* GetBlockCache() override { return &blocks; }
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
protected:
bool BackPatch(u32 emAddress, SContext* ctx);
JitBlockCache blocks{*this};
TrampolineCache trampolines;
public:
JitBlockCache* GetBlockCache() override { return &blocks; }
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
TrampolineCache trampolines{*this};
};
void LogGeneratedX86(size_t size, const PPCAnalyst::CodeBuffer& code_buffer, const u8* normalEntry,

View File

@ -24,6 +24,7 @@ class TrampolineCache : public EmuCodeBlock
const u8* GenerateWriteTrampoline(const TrampolineInfo& info);
public:
explicit TrampolineCache(Jitx86Base& jit) : EmuCodeBlock(jit) {}
const u8* GenerateTrampoline(const TrampolineInfo& info);
void ClearCodeSpace();
};

View File

@ -10,8 +10,6 @@
#include "Core/PowerPC/PPCAnalyst.h"
#include "Core/PowerPC/PowerPC.h"
JitBase* g_jit;
const u8* JitBase::Dispatch(JitBase& jit)
{
return jit.GetBlockCache()->Dispatch();

View File

@ -39,10 +39,6 @@
#define JITDISABLE(setting) \
FALLBACK_IF(SConfig::GetInstance().bJITOff || SConfig::GetInstance().setting)
class JitBase;
extern JitBase* g_jit;
class JitBase : public CPUCoreBase
{
protected:

View File

@ -40,6 +40,11 @@
namespace JitInterface
{
static JitBase* g_jit = nullptr;
void SetJit(JitBase* jit)
{
g_jit = jit;
}
void DoState(PointerWrap& p)
{
if (g_jit && p.GetMode() == PointerWrap::MODE_READ)

View File

@ -11,6 +11,7 @@
class CPUCoreBase;
class PointerWrap;
class JitBase;
namespace PowerPC
{
@ -65,5 +66,8 @@ void InvalidateICache(u32 address, u32 size, bool forced);
void CompileExceptionCheck(ExceptionType type);
/// used for the page fault unit test, don't use outside of tests!
void SetJit(JitBase* jit);
void Shutdown();
}

View File

@ -8,6 +8,7 @@
#include "Common/Timer.h"
#include "Core/MemTools.h"
#include "Core/PowerPC/JitCommon/JitBase.h"
#include "Core/PowerPC/JitInterface.h"
// include order is important
#include <gtest/gtest.h> // NOLINT
@ -56,7 +57,7 @@ TEST(PageFault, PageFault)
Common::WriteProtectMemory(data, PAGE_GRAN, false);
PageFaultFakeJit pfjit;
g_jit = &pfjit;
JitInterface::SetJit(&pfjit);
pfjit.m_data = data;
auto start = std::chrono::high_resolution_clock::now();
@ -67,7 +68,7 @@ TEST(PageFault, PageFault)
((unsigned long long)std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count())
EMM::UninstallExceptionHandler();
g_jit = nullptr;
JitInterface::SetJit(nullptr);
printf("page fault timing:\n");
printf("start->HandleFault %llu ns\n", AS_NS(pfjit.m_pre_unprotect_time - start));

View File

@ -10,8 +10,8 @@
#include "Common/FloatUtils.h"
#include "Common/x64ABI.h"
#include "Core/PowerPC/Gekko.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64Common/Jit64AsmCommon.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
#include <gtest/gtest.h>
@ -19,7 +19,7 @@
class TestCommonAsmRoutines : public CommonAsmRoutines
{
public:
TestCommonAsmRoutines()
TestCommonAsmRoutines() : CommonAsmRoutines(jit)
{
using namespace Gen;
@ -49,6 +49,7 @@ public:
}
u64 (*wrapped_frsqrte)(u64, UReg_FPSCR&);
Jit64 jit;
};
TEST(Jit64, Frsqrte)