Merge pull request #7460 from MerryMage/regcache-old

JitRegCache: Cleanup
This commit is contained in:
Tilka 2018-10-07 11:52:57 +01:00 committed by GitHub
commit e630218587
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 275 additions and 225 deletions

View File

@ -14,14 +14,14 @@ FPURegCache::FPURegCache(Jit64& jit) : RegCache{jit}
{
}
void FPURegCache::StoreRegister(size_t preg, const OpArg& new_loc)
void FPURegCache::StoreRegister(preg_t preg, const OpArg& new_loc)
{
m_emitter->MOVAPD(new_loc, m_regs[preg].location.GetSimpleReg());
m_emitter->MOVAPD(new_loc, m_regs[preg].Location().GetSimpleReg());
}
void FPURegCache::LoadRegister(size_t preg, X64Reg new_loc)
void FPURegCache::LoadRegister(preg_t preg, X64Reg new_loc)
{
m_emitter->MOVAPD(new_loc, m_regs[preg].location);
m_emitter->MOVAPD(new_loc, m_regs[preg].Location());
}
const X64Reg* FPURegCache::GetAllocationOrder(size_t* count) const
@ -32,9 +32,9 @@ const X64Reg* FPURegCache::GetAllocationOrder(size_t* count) const
return allocation_order;
}
OpArg FPURegCache::GetDefaultLocation(size_t reg) const
OpArg FPURegCache::GetDefaultLocation(preg_t preg) const
{
return PPCSTATE(ps[reg][0]);
return PPCSTATE(ps[preg][0]);
}
BitSet32 FPURegCache::GetRegUtilization() const
@ -42,7 +42,7 @@ BitSet32 FPURegCache::GetRegUtilization() const
return m_jit.js.op->gprInReg;
}
BitSet32 FPURegCache::CountRegsIn(size_t preg, u32 lookahead) const
BitSet32 FPURegCache::CountRegsIn(preg_t preg, u32 lookahead) const
{
BitSet32 regs_used;

View File

@ -12,11 +12,12 @@ class FPURegCache final : public RegCache
{
public:
explicit FPURegCache(Jit64& jit);
Gen::OpArg GetDefaultLocation(preg_t preg) const override;
void StoreRegister(size_t preg, const Gen::OpArg& newLoc) override;
void LoadRegister(size_t preg, Gen::X64Reg newLoc) override;
protected:
void StoreRegister(preg_t preg, const Gen::OpArg& newLoc) override;
void LoadRegister(preg_t preg, Gen::X64Reg newLoc) override;
const Gen::X64Reg* GetAllocationOrder(size_t* count) const override;
Gen::OpArg GetDefaultLocation(size_t reg) const override;
BitSet32 GetRegUtilization() const override;
BitSet32 CountRegsIn(size_t preg, u32 lookahead) const override;
BitSet32 CountRegsIn(preg_t preg, u32 lookahead) const override;
};

View File

@ -14,19 +14,19 @@ GPRRegCache::GPRRegCache(Jit64& jit) : RegCache{jit}
{
}
void GPRRegCache::StoreRegister(size_t preg, const OpArg& new_loc)
void GPRRegCache::StoreRegister(preg_t preg, const OpArg& new_loc)
{
m_emitter->MOV(32, new_loc, m_regs[preg].location);
m_emitter->MOV(32, new_loc, m_regs[preg].Location());
}
void GPRRegCache::LoadRegister(size_t preg, X64Reg new_loc)
void GPRRegCache::LoadRegister(preg_t preg, X64Reg new_loc)
{
m_emitter->MOV(32, ::Gen::R(new_loc), m_regs[preg].location);
m_emitter->MOV(32, ::Gen::R(new_loc), m_regs[preg].Location());
}
OpArg GPRRegCache::GetDefaultLocation(size_t reg) const
OpArg GPRRegCache::GetDefaultLocation(preg_t preg) const
{
return PPCSTATE(gpr[reg]);
return PPCSTATE(gpr[preg]);
}
const X64Reg* GPRRegCache::GetAllocationOrder(size_t* count) const
@ -46,13 +46,12 @@ const X64Reg* GPRRegCache::GetAllocationOrder(size_t* count) const
return allocation_order;
}
void GPRRegCache::SetImmediate32(size_t preg, u32 imm_value, bool dirty)
void GPRRegCache::SetImmediate32(preg_t preg, u32 imm_value, bool dirty)
{
// "dirty" can be false to avoid redundantly flushing an immediate when
// processing speculative constants.
DiscardRegContentsIfCached(preg);
m_regs[preg].away |= dirty;
m_regs[preg].location = Imm32(imm_value);
m_regs[preg].SetToImm32(imm_value, dirty);
}
BitSet32 GPRRegCache::GetRegUtilization() const
@ -60,7 +59,7 @@ BitSet32 GPRRegCache::GetRegUtilization() const
return m_jit.js.op->gprInReg;
}
BitSet32 GPRRegCache::CountRegsIn(size_t preg, u32 lookahead) const
BitSet32 GPRRegCache::CountRegsIn(preg_t preg, u32 lookahead) const
{
BitSet32 regs_used;

View File

@ -12,12 +12,13 @@ class GPRRegCache final : public RegCache
{
public:
explicit GPRRegCache(Jit64& jit);
Gen::OpArg GetDefaultLocation(preg_t preg) const override;
void SetImmediate32(preg_t preg, u32 imm_value, bool dirty = true);
void StoreRegister(size_t preg, const Gen::OpArg& new_loc) override;
void LoadRegister(size_t preg, Gen::X64Reg new_loc) override;
Gen::OpArg GetDefaultLocation(size_t reg) const override;
protected:
void StoreRegister(preg_t preg, const Gen::OpArg& new_loc) override;
void LoadRegister(preg_t preg, Gen::X64Reg new_loc) override;
const Gen::X64Reg* GetAllocationOrder(size_t* count) const override;
void SetImmediate32(size_t preg, u32 imm_value, bool dirty = true);
BitSet32 GetRegUtilization() const override;
BitSet32 CountRegsIn(size_t preg, u32 lookahead) const override;
BitSet32 CountRegsIn(preg_t preg, u32 lookahead) const override;
};

View File

@ -935,10 +935,10 @@ u8* Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
}
#if defined(_DEBUG) || defined(DEBUGFAST)
if (gpr.SanityCheck() || fpr.SanityCheck())
if (!gpr.SanityCheck() || !fpr.SanityCheck())
{
std::string ppc_inst = Common::GekkoDisassembler::Disassemble(op.inst.hex, em_address);
// NOTICE_LOG(DYNA_REC, "Unflushed register: %s", ppc_inst.c_str());
NOTICE_LOG(DYNA_REC, "Unflushed register: %s", ppc_inst.c_str());
}
#endif
i += js.skipInstructions;

View File

@ -26,47 +26,20 @@ RegCache::RegCache(Jit64& jit) : m_jit{jit}
void RegCache::Start()
{
for (auto& xreg : m_xregs)
{
xreg.free = true;
xreg.dirty = false;
xreg.locked = false;
xreg.ppcReg = static_cast<size_t>(INVALID_REG);
}
m_xregs.fill({});
for (size_t i = 0; i < m_regs.size(); i++)
{
m_regs[i].location = GetDefaultLocation(i);
m_regs[i].away = false;
m_regs[i].locked = false;
m_regs[i] = PPCCachedReg{GetDefaultLocation(i)};
}
// todo: sort to find the most popular regs
/*
int maxPreload = 2;
for (int i = 0; i < 32; i++)
{
if (stats.numReads[i] > 2 || stats.numWrites[i] >= 2)
{
LoadToX64(i, true, false); //stats.firstRead[i] <= stats.firstWrite[i], false);
maxPreload--;
if (!maxPreload)
break;
}
}*/
// Find top regs - preload them (load bursts ain't bad)
// But only preload IF written OR reads >= 3
}
void RegCache::DiscardRegContentsIfCached(size_t preg)
void RegCache::DiscardRegContentsIfCached(preg_t preg)
{
if (IsBound(preg))
if (m_regs[preg].IsBound())
{
X64Reg xr = m_regs[preg].location.GetSimpleReg();
m_xregs[xr].free = true;
m_xregs[xr].dirty = false;
m_xregs[xr].ppcReg = static_cast<size_t>(INVALID_REG);
m_regs[preg].away = false;
m_regs[preg].location = GetDefaultLocation(preg);
X64Reg xr = m_regs[preg].Location().GetSimpleReg();
m_xregs[xr].SetFlushed();
m_regs[preg].SetFlushed();
}
}
@ -77,207 +50,180 @@ void RegCache::SetEmitter(XEmitter* emitter)
void RegCache::Flush(FlushMode mode, BitSet32 regsToFlush)
{
for (size_t i = 0; i < m_xregs.size(); i++)
{
if (m_xregs[i].locked)
PanicAlert("Someone forgot to unlock X64 reg %zu", i);
}
ASSERT_MSG(
DYNA_REC,
std::none_of(m_xregs.begin(), m_xregs.end(), [](const auto& x) { return x.IsLocked(); }),
"Someone forgot to unlock a X64 reg");
for (unsigned int i : regsToFlush)
{
if (m_regs[i].locked)
{
PanicAlert("Someone forgot to unlock PPC reg %u (X64 reg %i).", i, RX(i));
}
ASSERT_MSG(DYNA_REC, !m_regs[i].IsLocked(), "Someone forgot to unlock PPC reg %u (X64 reg %i).",
i, RX(i));
if (m_regs[i].away)
{
if (m_regs[i].location.IsSimpleReg() || m_regs[i].location.IsImm())
{
StoreFromRegister(i, mode);
}
else
{
ASSERT_MSG(DYNA_REC, 0, "Jit64 - Flush unhandled case, reg %u PC: %08x", i, PC);
}
}
else if (m_regs[i].location.IsImm())
switch (m_regs[i].GetLocationType())
{
case PPCCachedReg::LocationType::Default:
break;
case PPCCachedReg::LocationType::SpeculativeImmediate:
// We can have a cached value without a host register through speculative constants.
// It must be cleared when flushing, otherwise it may be out of sync with PPCSTATE,
// if PPCSTATE is modified externally (e.g. fallback to interpreter).
m_regs[i].location = GetDefaultLocation(i);
m_regs[i].SetFlushed();
break;
case PPCCachedReg::LocationType::Bound:
case PPCCachedReg::LocationType::Immediate:
StoreFromRegister(i, mode);
break;
}
}
}
void RegCache::FlushR(X64Reg reg)
{
if (reg >= m_xregs.size())
PanicAlert("Flushing non existent reg");
if (!m_xregs[reg].free)
{
StoreFromRegister(m_xregs[reg].ppcReg);
}
}
void RegCache::FlushR(X64Reg reg, X64Reg reg2)
{
FlushR(reg);
FlushR(reg2);
}
void RegCache::FlushLockX(X64Reg reg)
{
FlushR(reg);
FlushX(reg);
LockX(reg);
}
void RegCache::FlushLockX(X64Reg reg1, X64Reg reg2)
{
FlushR(reg1);
FlushR(reg2);
FlushX(reg1);
FlushX(reg2);
LockX(reg1);
LockX(reg2);
}
int RegCache::SanityCheck() const
bool RegCache::SanityCheck() const
{
for (size_t i = 0; i < m_regs.size(); i++)
{
if (m_regs[i].away)
switch (m_regs[i].GetLocationType())
{
if (m_regs[i].location.IsSimpleReg())
{
Gen::X64Reg simple = m_regs[i].location.GetSimpleReg();
if (m_xregs[simple].locked)
return 1;
if (m_xregs[simple].ppcReg != i)
return 2;
}
else if (m_regs[i].location.IsImm())
{
return 3;
}
case PPCCachedReg::LocationType::Default:
case PPCCachedReg::LocationType::SpeculativeImmediate:
case PPCCachedReg::LocationType::Immediate:
break;
case PPCCachedReg::LocationType::Bound:
{
if (m_regs[i].IsLocked())
return false;
Gen::X64Reg xr = m_regs[i].Location().GetSimpleReg();
if (m_xregs[xr].IsLocked())
return false;
if (m_xregs[xr].Contents() != i)
return false;
break;
}
}
}
return 0;
return true;
}
void RegCache::KillImmediate(size_t preg, bool doLoad, bool makeDirty)
void RegCache::KillImmediate(preg_t preg, bool doLoad, bool makeDirty)
{
if (m_regs[preg].away)
switch (m_regs[preg].GetLocationType())
{
if (m_regs[preg].location.IsImm())
BindToRegister(preg, doLoad, makeDirty);
else if (m_regs[preg].location.IsSimpleReg())
m_xregs[RX(preg)].dirty |= makeDirty;
case PPCCachedReg::LocationType::Default:
case PPCCachedReg::LocationType::SpeculativeImmediate:
break;
case PPCCachedReg::LocationType::Bound:
if (makeDirty)
m_xregs[RX(preg)].MakeDirty();
break;
case PPCCachedReg::LocationType::Immediate:
BindToRegister(preg, doLoad, makeDirty);
break;
}
}
void RegCache::BindToRegister(size_t i, bool doLoad, bool makeDirty)
void RegCache::BindToRegister(preg_t i, bool doLoad, bool makeDirty)
{
if (!m_regs[i].away || m_regs[i].location.IsImm())
if (!m_regs[i].IsBound())
{
X64Reg xr = GetFreeXReg();
if (m_xregs[xr].dirty)
PanicAlert("Xreg already dirty");
if (m_xregs[xr].locked)
PanicAlert("GetFreeXReg returned locked register");
m_xregs[xr].free = false;
m_xregs[xr].ppcReg = i;
m_xregs[xr].dirty = makeDirty || m_regs[i].away;
ASSERT_MSG(DYNA_REC, !m_xregs[xr].IsDirty(), "Xreg %i already dirty", xr);
ASSERT_MSG(DYNA_REC, !m_xregs[xr].IsLocked(), "GetFreeXReg returned locked register");
m_xregs[xr].SetBoundTo(i, makeDirty || m_regs[i].IsAway());
if (doLoad)
LoadRegister(i, xr);
for (size_t j = 0; j < m_regs.size(); j++)
{
if (i != j && m_regs[j].location.IsSimpleReg(xr))
{
Crash();
}
LoadRegister(i, xr);
}
m_regs[i].away = true;
m_regs[i].location = ::Gen::R(xr);
ASSERT_MSG(DYNA_REC,
std::none_of(m_regs.begin(), m_regs.end(),
[xr](const auto& r) { return r.Location().IsSimpleReg(xr); }),
"Xreg %i already bound", xr);
m_regs[i].SetBoundTo(xr);
}
else
{
// reg location must be simplereg; memory locations
// and immediates are taken care of above.
m_xregs[RX(i)].dirty |= makeDirty;
if (makeDirty)
m_xregs[RX(i)].MakeDirty();
}
if (m_xregs[RX(i)].locked)
{
PanicAlert("Seriously WTF, this reg should have been flushed");
}
ASSERT_MSG(DYNA_REC, !m_xregs[RX(i)].IsLocked(), "WTF, this reg should have been flushed");
}
void RegCache::StoreFromRegister(size_t i, FlushMode mode)
void RegCache::StoreFromRegister(preg_t i, FlushMode mode)
{
if (m_regs[i].away)
bool doStore = false;
switch (m_regs[i].GetLocationType())
{
bool doStore;
if (m_regs[i].location.IsSimpleReg())
{
X64Reg xr = RX(i);
doStore = m_xregs[xr].dirty;
if (mode == FlushMode::All)
{
m_xregs[xr].free = true;
m_xregs[xr].ppcReg = static_cast<size_t>(INVALID_REG);
m_xregs[xr].dirty = false;
}
}
else
{
// must be immediate - do nothing
doStore = true;
}
OpArg newLoc = GetDefaultLocation(i);
if (doStore)
StoreRegister(i, newLoc);
case PPCCachedReg::LocationType::Default:
case PPCCachedReg::LocationType::SpeculativeImmediate:
return;
case PPCCachedReg::LocationType::Bound:
{
X64Reg xr = RX(i);
doStore = m_xregs[xr].IsDirty();
if (mode == FlushMode::All)
{
m_regs[i].location = newLoc;
m_regs[i].away = false;
}
m_xregs[xr].SetFlushed();
break;
}
case PPCCachedReg::LocationType::Immediate:
doStore = true;
break;
}
if (doStore)
StoreRegister(i, GetDefaultLocation(i));
if (mode == FlushMode::All)
m_regs[i].SetFlushed();
}
const OpArg& RegCache::R(size_t preg) const
const OpArg& RegCache::R(preg_t preg) const
{
return m_regs[preg].location;
return m_regs[preg].Location();
}
X64Reg RegCache::RX(size_t preg) const
X64Reg RegCache::RX(preg_t preg) const
{
if (IsBound(preg))
return m_regs[preg].location.GetSimpleReg();
PanicAlert("Unbound register - %zu", preg);
return Gen::INVALID_REG;
ASSERT_MSG(DYNA_REC, m_regs[preg].IsBound(), "Unbound register - %zu", preg);
return m_regs[preg].Location().GetSimpleReg();
}
void RegCache::UnlockAll()
{
for (auto& reg : m_regs)
reg.locked = false;
reg.Unlock();
}
void RegCache::UnlockAllX()
{
for (auto& xreg : m_xregs)
xreg.locked = false;
xreg.Unlock();
}
bool RegCache::IsFreeX(size_t xreg) const
{
return m_xregs[xreg].free && !m_xregs[xreg].locked;
}
bool RegCache::IsBound(size_t preg) const
{
return m_regs[preg].away && m_regs[preg].location.IsSimpleReg();
return m_xregs[xreg].IsFree();
}
X64Reg RegCache::GetFreeXReg()
@ -287,7 +233,7 @@ X64Reg RegCache::GetFreeXReg()
for (size_t i = 0; i < aCount; i++)
{
X64Reg xr = aOrder[i];
if (!m_xregs[xr].locked && m_xregs[xr].free)
if (m_xregs[xr].IsFree())
{
return xr;
}
@ -301,8 +247,8 @@ X64Reg RegCache::GetFreeXReg()
for (size_t i = 0; i < aCount; i++)
{
X64Reg xreg = (X64Reg)aOrder[i];
size_t preg = m_xregs[xreg].ppcReg;
if (m_xregs[xreg].locked || m_regs[preg].locked)
preg_t preg = m_xregs[xreg].Contents();
if (m_xregs[xreg].IsLocked() || m_regs[preg].IsLocked())
continue;
float score = ScoreRegister(xreg);
if (score < min_score)
@ -320,7 +266,7 @@ X64Reg RegCache::GetFreeXReg()
}
// Still no dice? Die!
ASSERT_MSG(DYNA_REC, 0, "Regcache ran out of regs");
ASSERT_MSG(DYNA_REC, false, "Regcache ran out of regs");
return INVALID_REG;
}
@ -330,23 +276,33 @@ int RegCache::NumFreeRegisters() const
size_t aCount;
const X64Reg* aOrder = GetAllocationOrder(&aCount);
for (size_t i = 0; i < aCount; i++)
if (!m_xregs[aOrder[i]].locked && m_xregs[aOrder[i]].free)
if (m_xregs[aOrder[i]].IsFree())
count++;
return count;
}
void RegCache::FlushX(X64Reg reg)
{
ASSERT_MSG(DYNA_REC, reg < m_xregs.size(), "Flushing non-existent reg %i", reg);
ASSERT(!m_xregs[reg].IsLocked());
if (!m_xregs[reg].IsFree())
{
StoreFromRegister(m_xregs[reg].Contents());
}
}
// Estimate roughly how bad it would be to de-allocate this register. Higher score
// means more bad.
float RegCache::ScoreRegister(X64Reg xreg) const
{
size_t preg = m_xregs[xreg].ppcReg;
preg_t preg = m_xregs[xreg].Contents();
float score = 0;
// If it's not dirty, we don't need a store to write it back to the register file, so
// bias a bit against dirty registers. Testing shows that a bias of 2 seems roughly
// right: 3 causes too many extra clobbers, while 1 saves very few clobbers relative
// to the number of extra stores it causes.
if (m_xregs[xreg].dirty)
if (m_xregs[xreg].IsDirty())
score += 2;
// If the register isn't actually needed in a physical register for a later instruction,

View File

@ -7,24 +7,118 @@
#include <array>
#include <cinttypes>
#include "Common/Assert.h"
#include "Common/x64Emitter.h"
#include "Core/PowerPC/PPCAnalyst.h"
class Jit64;
struct PPCCachedReg
using preg_t = size_t;
class PPCCachedReg
{
Gen::OpArg location;
bool away; // value not in source register
bool locked;
public:
enum class LocationType
{
/// Value is currently at its default location
Default,
/// Value is currently bound to a x64 register
Bound,
/// Value is known as an immediate and has not been written back to its default location
Immediate,
/// Value is known as an immediate and is already present at its default location
SpeculativeImmediate,
};
PPCCachedReg() = default;
explicit PPCCachedReg(Gen::OpArg default_location_)
: default_location(default_location_), location(default_location_)
{
}
const Gen::OpArg& Location() const { return location; }
LocationType GetLocationType() const
{
if (!away)
{
if (location.IsImm())
return LocationType::SpeculativeImmediate;
ASSERT(location == default_location);
return LocationType::Default;
}
ASSERT(location.IsImm() || location.IsSimpleReg());
return location.IsImm() ? LocationType::Immediate : LocationType::Bound;
}
bool IsAway() const { return away; }
bool IsBound() const { return GetLocationType() == LocationType::Bound; }
void SetBoundTo(Gen::X64Reg xreg)
{
away = true;
location = Gen::R(xreg);
}
void SetFlushed()
{
away = false;
location = default_location;
}
void SetToImm32(u32 imm32, bool dirty = true)
{
away |= dirty;
location = Gen::Imm32(imm32);
}
bool IsLocked() const { return locked; }
void Lock() { locked = true; }
void Unlock() { locked = false; }
private:
Gen::OpArg default_location{};
Gen::OpArg location{};
bool away = false; // value not in source register
bool locked = false;
};
struct X64CachedReg
class X64CachedReg
{
size_t ppcReg;
bool dirty;
bool free;
bool locked;
public:
preg_t Contents() const { return ppcReg; }
void SetBoundTo(preg_t ppcReg_, bool dirty_)
{
free = false;
ppcReg = ppcReg_;
dirty = dirty_;
}
void SetFlushed()
{
ppcReg = static_cast<preg_t>(Gen::INVALID_REG);
free = true;
dirty = false;
}
bool IsFree() const { return free && !locked; }
bool IsDirty() const { return dirty; }
void MakeDirty() { dirty = true; }
bool IsLocked() const { return locked; }
void Lock() { locked = true; }
void Unlock() { locked = false; }
private:
preg_t ppcReg = static_cast<preg_t>(Gen::INVALID_REG);
bool free = true;
bool dirty = false;
bool locked = false;
};
class RegCache
@ -41,33 +135,28 @@ public:
explicit RegCache(Jit64& jit);
virtual ~RegCache() = default;
virtual void StoreRegister(size_t preg, const Gen::OpArg& new_loc) = 0;
virtual void LoadRegister(size_t preg, Gen::X64Reg new_loc) = 0;
virtual Gen::OpArg GetDefaultLocation(size_t reg) const = 0;
virtual Gen::OpArg GetDefaultLocation(preg_t preg) const = 0;
void Start();
void DiscardRegContentsIfCached(size_t preg);
void DiscardRegContentsIfCached(preg_t preg);
void SetEmitter(Gen::XEmitter* emitter);
void Flush(FlushMode mode = FlushMode::All, BitSet32 regsToFlush = BitSet32::AllTrue(32));
void FlushR(Gen::X64Reg reg);
void FlushR(Gen::X64Reg reg, Gen::X64Reg reg2);
void FlushLockX(Gen::X64Reg reg);
void FlushLockX(Gen::X64Reg reg1, Gen::X64Reg reg2);
int SanityCheck() const;
void KillImmediate(size_t preg, bool doLoad, bool makeDirty);
bool SanityCheck() const;
void KillImmediate(preg_t preg, bool doLoad, bool makeDirty);
// TODO - instead of doload, use "read", "write"
// read only will not set dirty flag
void BindToRegister(size_t preg, bool doLoad = true, bool makeDirty = true);
void StoreFromRegister(size_t preg, FlushMode mode = FlushMode::All);
void BindToRegister(preg_t preg, bool doLoad = true, bool makeDirty = true);
void StoreFromRegister(preg_t preg, FlushMode mode = FlushMode::All);
const Gen::OpArg& R(size_t preg) const;
Gen::X64Reg RX(size_t preg) const;
const Gen::OpArg& R(preg_t preg) const;
Gen::X64Reg RX(preg_t preg) const;
// Register locking.
@ -75,7 +164,7 @@ public:
template <typename T>
void Lock(T p)
{
m_regs[p].locked = true;
m_regs[p].Lock();
}
template <typename T, typename... Args>
void Lock(T first, Args... args)
@ -88,9 +177,9 @@ public:
template <typename T>
void LockX(T x)
{
if (m_xregs[x].locked)
if (m_xregs[x].IsLocked())
PanicAlert("RegCache: x %i already locked!", x);
m_xregs[x].locked = true;
m_xregs[x].Lock();
}
template <typename T, typename... Args>
void LockX(T first, Args... args)
@ -102,9 +191,9 @@ public:
template <typename T>
void UnlockX(T x)
{
if (!m_xregs[x].locked)
if (!m_xregs[x].IsLocked())
PanicAlert("RegCache: x %i already unlocked!", x);
m_xregs[x].locked = false;
m_xregs[x].Unlock();
}
template <typename T, typename... Args>
void UnlockX(T first, Args... args)
@ -117,16 +206,20 @@ public:
void UnlockAllX();
bool IsFreeX(size_t xreg) const;
bool IsBound(size_t preg) const;
Gen::X64Reg GetFreeXReg();
int NumFreeRegisters() const;
protected:
virtual void StoreRegister(preg_t preg, const Gen::OpArg& new_loc) = 0;
virtual void LoadRegister(preg_t preg, Gen::X64Reg new_loc) = 0;
virtual const Gen::X64Reg* GetAllocationOrder(size_t* count) const = 0;
virtual BitSet32 GetRegUtilization() const = 0;
virtual BitSet32 CountRegsIn(size_t preg, u32 lookahead) const = 0;
virtual BitSet32 CountRegsIn(preg_t preg, u32 lookahead) const = 0;
void FlushX(Gen::X64Reg reg);
float ScoreRegister(Gen::X64Reg xreg) const;

View File

@ -460,7 +460,7 @@ void Jit64::fmrx(UGeckoInstruction inst)
fpr.Lock(b, d);
if (fpr.IsBound(d))
if (fpr.R(d).IsSimpleReg())
{
// We don't need to load d, but if it is loaded, we need to mark it as dirty.
fpr.BindToRegister(d);