CPU/Recompiler: Move branch codegen to base class
This commit is contained in:
parent
aabe5b9287
commit
29355bc44d
|
@ -747,7 +747,6 @@ Value CodeGenerator::XorValues(const Value& lhs, const Value& rhs)
|
|||
if (lhs.IsInHostRegister())
|
||||
{
|
||||
EmitXor(res.host_reg, lhs.host_reg, rhs);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -789,6 +788,38 @@ Value CodeGenerator::NotValue(const Value& val)
|
|||
return res;
|
||||
}
|
||||
|
||||
void CodeGenerator::GenerateExceptionExit(Exception excode, Condition condition /* = Condition::Always */)
|
||||
{
|
||||
if (condition == Condition::Always)
|
||||
{
|
||||
// no need to use far code if we're always raising the exception
|
||||
m_register_cache.InvalidateGuestRegister(Reg::pc);
|
||||
m_register_cache.FlushAllGuestRegisters(true, true);
|
||||
m_register_cache.FlushLoadDelay(true);
|
||||
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseException, m_register_cache.GetCPUPtr(),
|
||||
Value::FromConstantU8(static_cast<u8>(excode)));
|
||||
return;
|
||||
}
|
||||
|
||||
LabelType skip_exception;
|
||||
EmitConditionalBranch(condition, true, &skip_exception);
|
||||
|
||||
m_register_cache.PushState();
|
||||
|
||||
EmitBranch(GetCurrentFarCodePointer());
|
||||
|
||||
SwitchToFarCode();
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseException, m_register_cache.GetCPUPtr(),
|
||||
Value::FromConstantU8(static_cast<u8>(excode)));
|
||||
EmitExceptionExit();
|
||||
SwitchToNearCode();
|
||||
|
||||
m_register_cache.PopState();
|
||||
|
||||
EmitBindLabel(&skip_exception);
|
||||
}
|
||||
|
||||
void CodeGenerator::BlockPrologue()
|
||||
{
|
||||
EmitStoreCPUStructField(offsetof(Core, m_exception_raised), Value::FromConstantU8(0));
|
||||
|
@ -1203,7 +1234,7 @@ bool CodeGenerator::Compile_Add(const CodeBlockInstruction& cbi)
|
|||
|
||||
Value result = AddValues(lhs, rhs, check_overflow);
|
||||
if (check_overflow)
|
||||
EmitRaiseException(Exception::Ov, Condition::Overflow);
|
||||
GenerateExceptionExit(Exception::Ov, Condition::Overflow);
|
||||
|
||||
m_register_cache.WriteGuestRegister(dest, std::move(result));
|
||||
|
||||
|
@ -1223,7 +1254,7 @@ bool CodeGenerator::Compile_Subtract(const CodeBlockInstruction& cbi)
|
|||
|
||||
Value result = SubValues(lhs, rhs, check_overflow);
|
||||
if (check_overflow)
|
||||
EmitRaiseException(Exception::Ov, Condition::Overflow);
|
||||
GenerateExceptionExit(Exception::Ov, Condition::Overflow);
|
||||
|
||||
m_register_cache.WriteGuestRegister(cbi.instruction.r.rd, std::move(result));
|
||||
|
||||
|
@ -1291,6 +1322,92 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
{
|
||||
InstructionPrologue(cbi, 1);
|
||||
|
||||
auto DoBranch = [this](Condition condition, const Value& lhs, const Value& rhs, Reg lr_reg, Value&& branch_target) {
|
||||
// ensure the lr register is flushed, since we want it's correct value after the branch
|
||||
// we don't want to invalidate it yet because of "jalr r0, r0", branch_target could be the lr_reg.
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
m_register_cache.FlushGuestRegister(lr_reg, false, true);
|
||||
|
||||
// compute return address, which is also set as the new pc when the branch isn't taken
|
||||
Value new_pc;
|
||||
if (condition != Condition::Always || lr_reg != Reg::count)
|
||||
{
|
||||
new_pc = AddValues(m_register_cache.ReadGuestRegister(Reg::pc), Value::FromConstantU32(4), false);
|
||||
if (!new_pc.IsInHostRegister())
|
||||
new_pc = GetValueInHostRegister(new_pc);
|
||||
}
|
||||
|
||||
LabelType skip_branch;
|
||||
if (condition != Condition::Always)
|
||||
{
|
||||
// condition is inverted because we want the case for skipping it
|
||||
if (lhs.IsValid() && rhs.IsValid())
|
||||
EmitConditionalBranch(condition, true, lhs.host_reg, rhs, &skip_branch);
|
||||
else if (lhs.IsValid())
|
||||
EmitConditionalBranch(condition, true, lhs.host_reg, lhs.size, &skip_branch);
|
||||
else
|
||||
EmitConditionalBranch(condition, true, &skip_branch);
|
||||
}
|
||||
|
||||
// save the old PC if we want to
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
{
|
||||
// Can't cache because we have two branches. Load delay cancel is due to the immediate flush afterwards,
|
||||
// if we don't cancel it, at the end of the instruction the value we write can be overridden.
|
||||
EmitCancelInterpreterLoadDelayForReg(lr_reg);
|
||||
EmitStoreGuestRegister(lr_reg, new_pc);
|
||||
}
|
||||
|
||||
// we don't need to test the address of constant branches unless they're definitely misaligned, which would be
|
||||
// strange.
|
||||
if (!branch_target.IsConstant() || (branch_target.constant_value & 0x3) != 0)
|
||||
{
|
||||
LabelType branch_okay;
|
||||
|
||||
if (branch_target.IsConstant())
|
||||
{
|
||||
Log_WarningPrintf("Misaligned constant target branch 0x%08X, this is strange",
|
||||
Truncate32(branch_target.constant_value));
|
||||
}
|
||||
else
|
||||
{
|
||||
// check the alignment of the target
|
||||
EmitTest(branch_target.host_reg, Value::FromConstantU32(0x3));
|
||||
EmitConditionalBranch(Condition::Zero, false, &branch_okay);
|
||||
}
|
||||
|
||||
// exception exit for misaligned target
|
||||
m_register_cache.PushState();
|
||||
EmitBranch(GetCurrentFarCodePointer());
|
||||
EmitBindLabel(&branch_okay);
|
||||
|
||||
SwitchToFarCode();
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseAddressException, m_register_cache.GetCPUPtr(), branch_target,
|
||||
Value::FromConstantU8(0), Value::FromConstantU8(1));
|
||||
EmitExceptionExit();
|
||||
SwitchToNearCode();
|
||||
|
||||
m_register_cache.PopState();
|
||||
}
|
||||
|
||||
// branch taken path - change the return address/new pc
|
||||
if (condition != Condition::Always)
|
||||
EmitCopyValue(new_pc.GetHostRegister(), branch_target);
|
||||
|
||||
// converge point
|
||||
EmitBindLabel(&skip_branch);
|
||||
|
||||
// update pc
|
||||
if (condition != Condition::Always)
|
||||
m_register_cache.WriteGuestRegister(Reg::pc, std::move(new_pc));
|
||||
else
|
||||
m_register_cache.WriteGuestRegister(Reg::pc, std::move(branch_target));
|
||||
|
||||
// now invalidate lr becuase it was possibly written in the branch, and we don't need branch_target anymore
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
m_register_cache.InvalidateGuestRegister(lr_reg);
|
||||
};
|
||||
|
||||
// Compute the branch target.
|
||||
// This depends on the form of the instruction.
|
||||
switch (cbi.instruction.op)
|
||||
|
@ -1303,8 +1420,8 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
OrValues(AndValues(m_register_cache.ReadGuestRegister(Reg::pc), Value::FromConstantU32(0xF0000000)),
|
||||
Value::FromConstantU32(cbi.instruction.j.target << 2));
|
||||
|
||||
EmitBranch(Condition::Always, (cbi.instruction.op == InstructionOp::jal) ? Reg::ra : Reg::count,
|
||||
std::move(branch_target));
|
||||
DoBranch(Condition::Always, Value(), Value(), (cbi.instruction.op == InstructionOp::jal) ? Reg::ra : Reg::count,
|
||||
std::move(branch_target));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1314,16 +1431,16 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
{
|
||||
// npc = rs, link to rt
|
||||
Value branch_target = m_register_cache.ReadGuestRegister(cbi.instruction.r.rs);
|
||||
EmitBranch(Condition::Always,
|
||||
(cbi.instruction.r.funct == InstructionFunct::jalr) ? cbi.instruction.r.rd : Reg::count,
|
||||
std::move(branch_target));
|
||||
DoBranch(Condition::Always, Value(), Value(),
|
||||
(cbi.instruction.r.funct == InstructionFunct::jalr) ? cbi.instruction.r.rd : Reg::count,
|
||||
std::move(branch_target));
|
||||
}
|
||||
else if (cbi.instruction.r.funct == InstructionFunct::syscall ||
|
||||
cbi.instruction.r.funct == InstructionFunct::break_)
|
||||
{
|
||||
const Exception excode =
|
||||
(cbi.instruction.r.funct == InstructionFunct::syscall) ? Exception::Syscall : Exception::BP;
|
||||
EmitRaiseException(excode);
|
||||
GenerateExceptionExit(excode);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1342,10 +1459,8 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
// branch <- rs op rt
|
||||
Value lhs = m_register_cache.ReadGuestRegister(cbi.instruction.i.rs, true, true);
|
||||
Value rhs = m_register_cache.ReadGuestRegister(cbi.instruction.i.rt);
|
||||
EmitCmp(lhs.host_reg, rhs);
|
||||
|
||||
const Condition condition = (cbi.instruction.op == InstructionOp::beq) ? Condition::Equal : Condition::NotEqual;
|
||||
EmitBranch(condition, Reg::count, std::move(branch_target));
|
||||
DoBranch(condition, lhs, rhs, Reg::count, std::move(branch_target));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1358,11 +1473,10 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
|
||||
// branch <- rs op 0
|
||||
Value lhs = m_register_cache.ReadGuestRegister(cbi.instruction.i.rs, true, true);
|
||||
EmitCmp(lhs.host_reg, Value::FromConstantU32(0));
|
||||
|
||||
const Condition condition =
|
||||
(cbi.instruction.op == InstructionOp::bgtz) ? Condition::Greater : Condition::LessEqual;
|
||||
EmitBranch(condition, Reg::count, std::move(branch_target));
|
||||
DoBranch(condition, lhs, Value::FromConstantU32(0), Reg::count, std::move(branch_target));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1378,9 +1492,7 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
const bool link = (rt & u8(0x1E)) == u8(0x10);
|
||||
|
||||
// Read has to happen before the link as the compare can use ra.
|
||||
// This is a little dangerous since lhs can get freed, but there aren't any allocations inbetween here and the
|
||||
// test so it shouldn't be an issue.
|
||||
Value lhs = m_register_cache.ReadGuestRegister(cbi.instruction.i.rs, true, true);
|
||||
Value lhs = m_register_cache.ReadGuestRegisterToScratch(cbi.instruction.i.rs);
|
||||
|
||||
// The return address is always written if link is set, regardless of whether the branch is taken.
|
||||
if (link)
|
||||
|
@ -1390,8 +1502,7 @@ bool CodeGenerator::Compile_Branch(const CodeBlockInstruction& cbi)
|
|||
Reg::ra, AddValues(m_register_cache.ReadGuestRegister(Reg::pc), Value::FromConstantU32(4), false));
|
||||
}
|
||||
|
||||
EmitTest(lhs.host_reg, lhs);
|
||||
EmitBranch(condition, Reg::count, std::move(branch_target));
|
||||
DoBranch(condition, lhs, Value(), Reg::count, std::move(branch_target));
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -82,13 +82,12 @@ public:
|
|||
void EmitBranch(const void* address, bool allow_scratch = true);
|
||||
|
||||
// Branching, generates two paths.
|
||||
void EmitBranch(Condition condition, Reg lr_reg, Value&& branch_target);
|
||||
void EmitConditionalBranch(Condition condition, bool invert, HostReg value, RegSize size, LabelType* label);
|
||||
void EmitConditionalBranch(Condition condition, bool invert, HostReg lhs, const Value& rhs, LabelType* label);
|
||||
void EmitConditionalBranch(Condition condition, bool invert, LabelType* label);
|
||||
void EmitBranchIfBitClear(HostReg reg, RegSize size, u8 bit, LabelType* label);
|
||||
void EmitBindLabel(LabelType* label);
|
||||
|
||||
// Raising exception if condition is true.
|
||||
void EmitRaiseException(Exception excode, Condition condition = Condition::Always);
|
||||
|
||||
u32 PrepareStackForCall();
|
||||
void RestoreStackAfterCall(u32 adjust_size);
|
||||
|
||||
|
@ -148,6 +147,9 @@ public:
|
|||
Value XorValues(const Value& lhs, const Value& rhs);
|
||||
Value NotValue(const Value& val);
|
||||
|
||||
// Raising exception if condition is true.
|
||||
void GenerateExceptionExit(Exception excode, Condition condition = Condition::Always);
|
||||
|
||||
private:
|
||||
// Host register setup
|
||||
void InitHostRegs();
|
||||
|
|
|
@ -1408,155 +1408,211 @@ void CodeGenerator::EmitBranch(const void* address, bool allow_scratch)
|
|||
m_emit->br(GetHostReg64(temp));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void EmitConditionalJump(Condition condition, bool invert, a64::MacroAssembler* emit, const T& label)
|
||||
static a64::Condition TranslateCondition(Condition condition, bool invert)
|
||||
{
|
||||
switch (condition)
|
||||
{
|
||||
case Condition::Always:
|
||||
emit->b(label);
|
||||
break;
|
||||
return a64::nv;
|
||||
|
||||
case Condition::NotEqual:
|
||||
invert ? emit->b(label, a64::eq) : emit->b(label, a64::ne);
|
||||
break;
|
||||
case Condition::NotZero:
|
||||
return invert ? a64::eq : a64::ne;
|
||||
|
||||
case Condition::Equal:
|
||||
invert ? emit->b(label, a64::ne) : emit->b(label, a64::eq);
|
||||
break;
|
||||
case Condition::Zero:
|
||||
return invert ? a64::ne : a64::eq;
|
||||
|
||||
case Condition::Overflow:
|
||||
invert ? emit->b(label, a64::vc) : emit->b(label, a64::vs);
|
||||
break;
|
||||
return invert ? a64::vc : a64::vs;
|
||||
|
||||
case Condition::Greater:
|
||||
invert ? emit->b(label, a64::le) : emit->b(label, a64::gt);
|
||||
break;
|
||||
return invert ? a64::le : a64::gt;
|
||||
|
||||
case Condition::GreaterEqual:
|
||||
invert ? emit->b(label, a64::lt) : emit->b(label, a64::ge);
|
||||
break;
|
||||
return invert ? a64::lt : a64::ge;
|
||||
|
||||
case Condition::Less:
|
||||
invert ? emit->b(label, a64::ge) : emit->b(label, a64::lt);
|
||||
break;
|
||||
return invert ? a64::ge : a64::lt;
|
||||
|
||||
case Condition::LessEqual:
|
||||
invert ? emit->b(label, a64::gt) : emit->b(label, a64::le);
|
||||
break;
|
||||
return invert ? a64::gt : a64::le;
|
||||
|
||||
case Condition::Negative:
|
||||
invert ? emit->b(label, a64::pl) : emit->b(label, a64::mi);
|
||||
break;
|
||||
return invert ? a64::pl : a64::mi;
|
||||
|
||||
case Condition::PositiveOrZero:
|
||||
invert ? emit->b(label, a64::mi) : emit->b(label, a64::pl);
|
||||
break;
|
||||
return invert ? a64::mi : a64::pl;
|
||||
|
||||
case Condition::Above:
|
||||
invert ? emit->b(label, a64::ls) : emit->b(label, a64::hi);
|
||||
break;
|
||||
return invert ? a64::ls : a64::hi;
|
||||
|
||||
case Condition::AboveEqual:
|
||||
invert ? emit->b(label, a64::cc) : emit->b(label, a64::cs);
|
||||
break;
|
||||
return invert ? a64::cc : a64::cs;
|
||||
|
||||
case Condition::Below:
|
||||
invert ? emit->b(label, a64::cs) : emit->b(label, a64::cc);
|
||||
break;
|
||||
return invert ? a64::cs : a64::cc;
|
||||
|
||||
case Condition::BelowEqual:
|
||||
invert ? emit->b(label, a64::hi) : emit->b(label, a64::ls);
|
||||
break;
|
||||
return invert ? a64::hi : a64::ls;
|
||||
|
||||
default:
|
||||
UnreachableCode();
|
||||
break;
|
||||
return a64::nv;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitBranch(Condition condition, Reg lr_reg, Value&& branch_target)
|
||||
void CodeGenerator::EmitConditionalBranch(Condition condition, bool invert, HostReg value, RegSize size,
|
||||
LabelType* label)
|
||||
{
|
||||
// ensure the lr register is flushed, since we want it's correct value after the branch
|
||||
// we don't want to invalidate it yet because of "jalr r0, r0", branch_target could be the lr_reg.
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
m_register_cache.FlushGuestRegister(lr_reg, false, true);
|
||||
|
||||
// compute return address, which is also set as the new pc when the branch isn't taken
|
||||
Value new_pc;
|
||||
if (condition != Condition::Always || lr_reg != Reg::count)
|
||||
switch (condition)
|
||||
{
|
||||
new_pc = AddValues(m_register_cache.ReadGuestRegister(Reg::pc), Value::FromConstantU32(4), false);
|
||||
if (!new_pc.IsInHostRegister())
|
||||
new_pc = GetValueInHostRegister(new_pc);
|
||||
}
|
||||
case Condition::NotEqual:
|
||||
case Condition::Equal:
|
||||
case Condition::Overflow:
|
||||
case Condition::Greater:
|
||||
case Condition::GreaterEqual:
|
||||
case Condition::LessEqual:
|
||||
case Condition::Less:
|
||||
case Condition::Above:
|
||||
case Condition::AboveEqual:
|
||||
case Condition::Below:
|
||||
case Condition::BelowEqual:
|
||||
Panic("Needs a comparison value");
|
||||
return;
|
||||
|
||||
a64::Label skip_branch;
|
||||
if (condition != Condition::Always)
|
||||
{
|
||||
// condition is inverted because we want the case for skipping it
|
||||
EmitConditionalJump(condition, true, m_emit, &skip_branch);
|
||||
}
|
||||
|
||||
// save the old PC if we want to
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
{
|
||||
// Can't cache because we have two branches. Load delay cancel is due to the immediate flush afterwards,
|
||||
// if we don't cancel it, at the end of the instruction the value we write can be overridden.
|
||||
EmitCancelInterpreterLoadDelayForReg(lr_reg);
|
||||
EmitStoreGuestRegister(lr_reg, new_pc);
|
||||
}
|
||||
|
||||
// we don't need to test the address of constant branches unless they're definitely misaligned, which would be
|
||||
// strange.
|
||||
if (!branch_target.IsConstant() || (branch_target.constant_value & 0x3) != 0)
|
||||
{
|
||||
m_register_cache.PushState();
|
||||
|
||||
if (branch_target.IsConstant())
|
||||
case Condition::Negative:
|
||||
case Condition::PositiveOrZero:
|
||||
{
|
||||
Log_WarningPrintf("Misaligned constant target branch 0x%08X, this is strange",
|
||||
Truncate32(branch_target.constant_value));
|
||||
}
|
||||
else
|
||||
{
|
||||
// check the alignment of the target
|
||||
a64::Label branch_target_okay;
|
||||
m_emit->tst(GetHostReg32(branch_target), 0x3);
|
||||
m_emit->B(a64::eq, &branch_target_okay);
|
||||
switch (size)
|
||||
{
|
||||
case RegSize_8:
|
||||
m_emit->tst(GetHostReg8(value), GetHostReg8(value));
|
||||
break;
|
||||
case RegSize_16:
|
||||
m_emit->tst(GetHostReg16(value), GetHostReg16(value));
|
||||
break;
|
||||
case RegSize_32:
|
||||
m_emit->tst(GetHostReg32(value), GetHostReg32(value));
|
||||
break;
|
||||
case RegSize_64:
|
||||
m_emit->tst(GetHostReg64(value), GetHostReg64(value));
|
||||
break;
|
||||
default:
|
||||
UnreachableCode();
|
||||
break;
|
||||
}
|
||||
|
||||
Value far_code_addr = m_register_cache.AllocateScratch(RegSize_64);
|
||||
m_emit->Mov(GetHostReg64(far_code_addr), reinterpret_cast<intptr_t>(GetCurrentFarCodePointer()));
|
||||
m_emit->br(GetHostReg64(far_code_addr));
|
||||
m_emit->Bind(&branch_target_okay);
|
||||
EmitConditionalBranch(condition, invert, label);
|
||||
return;
|
||||
}
|
||||
|
||||
// exception exit for misaligned target
|
||||
SwitchToFarCode();
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseAddressException, m_register_cache.GetCPUPtr(), branch_target,
|
||||
Value::FromConstantU8(0), Value::FromConstantU8(1));
|
||||
EmitExceptionExit();
|
||||
SwitchToNearCode();
|
||||
case Condition::NotZero:
|
||||
{
|
||||
switch (size)
|
||||
{
|
||||
case RegSize_8:
|
||||
m_emit->cbnz(GetHostReg8(value), label);
|
||||
break;
|
||||
case RegSize_16:
|
||||
m_emit->cbz(GetHostReg16(value), label);
|
||||
break;
|
||||
case RegSize_32:
|
||||
m_emit->cbnz(GetHostReg32(value), label);
|
||||
break;
|
||||
case RegSize_64:
|
||||
m_emit->cbnz(GetHostReg64(value), label);
|
||||
break;
|
||||
default:
|
||||
UnreachableCode();
|
||||
break;
|
||||
}
|
||||
|
||||
m_register_cache.PopState();
|
||||
return;
|
||||
}
|
||||
|
||||
case Condition::Zero:
|
||||
{
|
||||
switch (size)
|
||||
{
|
||||
case RegSize_8:
|
||||
m_emit->cbz(GetHostReg8(value), label);
|
||||
break;
|
||||
case RegSize_16:
|
||||
m_emit->cbz(GetHostReg16(value), label);
|
||||
break;
|
||||
case RegSize_32:
|
||||
m_emit->cbz(GetHostReg32(value), label);
|
||||
break;
|
||||
case RegSize_64:
|
||||
m_emit->cbz(GetHostReg64(value), label);
|
||||
break;
|
||||
default:
|
||||
UnreachableCode();
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
case Condition::Always:
|
||||
m_emit->b(label);
|
||||
return;
|
||||
|
||||
default:
|
||||
UnreachableCode();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// branch taken path - change the return address/new pc
|
||||
if (condition != Condition::Always)
|
||||
EmitCopyValue(new_pc.GetHostRegister(), branch_target);
|
||||
void CodeGenerator::EmitConditionalBranch(Condition condition, bool invert, HostReg lhs, const Value& rhs,
|
||||
LabelType* label)
|
||||
{
|
||||
switch (condition)
|
||||
{
|
||||
case Condition::NotEqual:
|
||||
case Condition::Equal:
|
||||
case Condition::Overflow:
|
||||
case Condition::Greater:
|
||||
case Condition::GreaterEqual:
|
||||
case Condition::LessEqual:
|
||||
case Condition::Less:
|
||||
case Condition::Above:
|
||||
case Condition::AboveEqual:
|
||||
case Condition::Below:
|
||||
case Condition::BelowEqual:
|
||||
{
|
||||
EmitCmp(lhs, rhs);
|
||||
EmitConditionalBranch(condition, invert, label);
|
||||
return;
|
||||
}
|
||||
|
||||
// converge point
|
||||
m_emit->Bind(&skip_branch);
|
||||
case Condition::Negative:
|
||||
case Condition::PositiveOrZero:
|
||||
case Condition::NotZero:
|
||||
case Condition::Zero:
|
||||
{
|
||||
Assert(!rhs.IsValid() || (rhs.IsConstant() && rhs.GetS64ConstantValue() == 0));
|
||||
EmitConditionalBranch(condition, invert, lhs, rhs.size, label);
|
||||
return;
|
||||
}
|
||||
|
||||
// update pc
|
||||
if (condition != Condition::Always)
|
||||
m_register_cache.WriteGuestRegister(Reg::pc, std::move(new_pc));
|
||||
case Condition::Always:
|
||||
m_emit->b(label);
|
||||
return;
|
||||
|
||||
default:
|
||||
UnreachableCode();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitConditionalBranch(Condition condition, bool invert, LabelType* label)
|
||||
{
|
||||
if (condition == Condition::Always)
|
||||
m_emit->b(label);
|
||||
else
|
||||
m_register_cache.WriteGuestRegister(Reg::pc, std::move(branch_target));
|
||||
|
||||
// now invalidate lr becuase it was possibly written in the branch, and we don't need branch_target anymore
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
m_register_cache.InvalidateGuestRegister(lr_reg);
|
||||
m_emit->b(label, TranslateCondition(condition, invert));
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitBranchIfBitClear(HostReg reg, RegSize size, u8 bit, LabelType* label)
|
||||
|
@ -1580,42 +1636,6 @@ void CodeGenerator::EmitBindLabel(LabelType* label)
|
|||
m_emit->Bind(label);
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitRaiseException(Exception excode, Condition condition /* = Condition::Always */)
|
||||
{
|
||||
if (condition == Condition::Always)
|
||||
{
|
||||
// no need to use far code if we're always raising the exception
|
||||
m_register_cache.InvalidateGuestRegister(Reg::pc);
|
||||
m_register_cache.FlushAllGuestRegisters(true, true);
|
||||
m_register_cache.FlushLoadDelay(true);
|
||||
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseException, m_register_cache.GetCPUPtr(),
|
||||
Value::FromConstantU8(static_cast<u8>(excode)));
|
||||
return;
|
||||
}
|
||||
|
||||
a64::Label skip_raise_exception;
|
||||
EmitConditionalJump(condition, true, m_emit, &skip_raise_exception);
|
||||
|
||||
m_register_cache.PushState();
|
||||
|
||||
{
|
||||
Value far_code_addr = m_register_cache.AllocateScratch(RegSize_64);
|
||||
m_emit->Mov(GetHostReg64(far_code_addr), reinterpret_cast<intptr_t>(GetCurrentFarCodePointer()));
|
||||
m_emit->Br(GetHostReg64(far_code_addr));
|
||||
}
|
||||
|
||||
m_emit->Bind(&skip_raise_exception);
|
||||
|
||||
SwitchToFarCode();
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseException, m_register_cache.GetCPUPtr(),
|
||||
Value::FromConstantU8(static_cast<u8>(excode)));
|
||||
EmitExceptionExit();
|
||||
SwitchToNearCode();
|
||||
|
||||
m_register_cache.PopState();
|
||||
}
|
||||
|
||||
void ASMFunctions::Generate(JitCodeBuffer* code_buffer) {}
|
||||
|
||||
} // namespace CPU::Recompiler
|
||||
|
|
|
@ -1794,7 +1794,9 @@ void CodeGenerator::EmitCancelInterpreterLoadDelayForReg(Reg reg)
|
|||
|
||||
void CodeGenerator::EmitBranch(const void* address, bool allow_scratch)
|
||||
{
|
||||
if (Xbyak::inner::IsInInt32(reinterpret_cast<uintptr_t>(address)))
|
||||
const s64 jump_distance =
|
||||
static_cast<s64>(reinterpret_cast<intptr_t>(address) - reinterpret_cast<intptr_t>(GetCurrentCodePointer()));
|
||||
if (Xbyak::inner::IsInInt32(static_cast<u64>(jump_distance)))
|
||||
{
|
||||
m_emit->jmp(address);
|
||||
return;
|
||||
|
@ -1807,65 +1809,171 @@ void CodeGenerator::EmitBranch(const void* address, bool allow_scratch)
|
|||
m_emit->jmp(GetHostReg64(temp));
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void EmitConditionalJump(Condition condition, bool invert, Xbyak::CodeGenerator* emit, const T& label)
|
||||
void CodeGenerator::EmitConditionalBranch(Condition condition, bool invert, HostReg value, RegSize size,
|
||||
LabelType* label)
|
||||
{
|
||||
switch (condition)
|
||||
{
|
||||
case Condition::NotEqual:
|
||||
case Condition::Equal:
|
||||
case Condition::Overflow:
|
||||
case Condition::Greater:
|
||||
case Condition::GreaterEqual:
|
||||
case Condition::LessEqual:
|
||||
case Condition::Less:
|
||||
case Condition::Above:
|
||||
case Condition::AboveEqual:
|
||||
case Condition::Below:
|
||||
case Condition::BelowEqual:
|
||||
Panic("Needs a comparison value");
|
||||
return;
|
||||
|
||||
case Condition::Negative:
|
||||
case Condition::PositiveOrZero:
|
||||
case Condition::NotZero:
|
||||
case Condition::Zero:
|
||||
{
|
||||
switch (size)
|
||||
{
|
||||
case RegSize_8:
|
||||
m_emit->test(GetHostReg8(value), GetHostReg8(value));
|
||||
break;
|
||||
case RegSize_16:
|
||||
m_emit->test(GetHostReg16(value), GetHostReg16(value));
|
||||
break;
|
||||
case RegSize_32:
|
||||
m_emit->test(GetHostReg32(value), GetHostReg32(value));
|
||||
break;
|
||||
case RegSize_64:
|
||||
m_emit->test(GetHostReg64(value), GetHostReg64(value));
|
||||
break;
|
||||
default:
|
||||
UnreachableCode();
|
||||
break;
|
||||
}
|
||||
|
||||
EmitConditionalBranch(condition, invert, label);
|
||||
return;
|
||||
}
|
||||
|
||||
case Condition::Always:
|
||||
m_emit->jmp(*label);
|
||||
return;
|
||||
|
||||
default:
|
||||
UnreachableCode();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitConditionalBranch(Condition condition, bool invert, HostReg lhs, const Value& rhs,
|
||||
LabelType* label)
|
||||
{
|
||||
switch (condition)
|
||||
{
|
||||
case Condition::NotEqual:
|
||||
case Condition::Equal:
|
||||
case Condition::Overflow:
|
||||
case Condition::Greater:
|
||||
case Condition::GreaterEqual:
|
||||
case Condition::LessEqual:
|
||||
case Condition::Less:
|
||||
case Condition::Above:
|
||||
case Condition::AboveEqual:
|
||||
case Condition::Below:
|
||||
case Condition::BelowEqual:
|
||||
{
|
||||
EmitCmp(lhs, rhs);
|
||||
EmitConditionalBranch(condition, invert, label);
|
||||
return;
|
||||
}
|
||||
|
||||
case Condition::Negative:
|
||||
case Condition::PositiveOrZero:
|
||||
case Condition::NotZero:
|
||||
case Condition::Zero:
|
||||
{
|
||||
Assert(!rhs.IsValid() || (rhs.IsConstant() && rhs.GetS64ConstantValue() == 0));
|
||||
EmitConditionalBranch(condition, invert, lhs, rhs.size, label);
|
||||
return;
|
||||
}
|
||||
|
||||
case Condition::Always:
|
||||
m_emit->jmp(*label);
|
||||
return;
|
||||
|
||||
default:
|
||||
UnreachableCode();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitConditionalBranch(Condition condition, bool invert, LabelType* label)
|
||||
{
|
||||
switch (condition)
|
||||
{
|
||||
case Condition::Always:
|
||||
emit->jmp(label);
|
||||
m_emit->jmp(*label);
|
||||
break;
|
||||
|
||||
case Condition::NotEqual:
|
||||
invert ? emit->je(label) : emit->jne(label);
|
||||
invert ? m_emit->je(*label) : m_emit->jne(*label);
|
||||
break;
|
||||
|
||||
case Condition::Equal:
|
||||
invert ? emit->jne(label) : emit->je(label);
|
||||
invert ? m_emit->jne(*label) : m_emit->je(*label);
|
||||
break;
|
||||
|
||||
case Condition::Overflow:
|
||||
invert ? emit->jno(label) : emit->jo(label);
|
||||
invert ? m_emit->jno(*label) : m_emit->jo(*label);
|
||||
break;
|
||||
|
||||
case Condition::Greater:
|
||||
invert ? emit->jng(label) : emit->jg(label);
|
||||
invert ? m_emit->jng(*label) : m_emit->jg(*label);
|
||||
break;
|
||||
|
||||
case Condition::GreaterEqual:
|
||||
invert ? emit->jnge(label) : emit->jge(label);
|
||||
invert ? m_emit->jnge(*label) : m_emit->jge(*label);
|
||||
break;
|
||||
|
||||
case Condition::Less:
|
||||
invert ? emit->jnl(label) : emit->jl(label);
|
||||
invert ? m_emit->jnl(*label) : m_emit->jl(*label);
|
||||
break;
|
||||
|
||||
case Condition::LessEqual:
|
||||
invert ? emit->jnle(label) : emit->jle(label);
|
||||
invert ? m_emit->jnle(*label) : m_emit->jle(*label);
|
||||
break;
|
||||
|
||||
case Condition::Negative:
|
||||
invert ? emit->jns(label) : emit->js(label);
|
||||
invert ? m_emit->jns(*label) : m_emit->js(*label);
|
||||
break;
|
||||
|
||||
case Condition::PositiveOrZero:
|
||||
invert ? emit->js(label) : emit->jns(label);
|
||||
invert ? m_emit->js(*label) : m_emit->jns(*label);
|
||||
break;
|
||||
|
||||
case Condition::Above:
|
||||
invert ? emit->jna(label) : emit->ja(label);
|
||||
invert ? m_emit->jna(*label) : m_emit->ja(*label);
|
||||
break;
|
||||
|
||||
case Condition::AboveEqual:
|
||||
invert ? emit->jnae(label) : emit->jae(label);
|
||||
invert ? m_emit->jnae(*label) : m_emit->jae(*label);
|
||||
break;
|
||||
|
||||
case Condition::Below:
|
||||
invert ? emit->jnb(label) : emit->jb(label);
|
||||
invert ? m_emit->jnb(*label) : m_emit->jb(*label);
|
||||
break;
|
||||
|
||||
case Condition::BelowEqual:
|
||||
invert ? emit->jnbe(label) : emit->jbe(label);
|
||||
invert ? m_emit->jnbe(*label) : m_emit->jbe(*label);
|
||||
break;
|
||||
|
||||
case Condition::NotZero:
|
||||
invert ? m_emit->jz(*label) : m_emit->jnz(*label);
|
||||
break;
|
||||
|
||||
case Condition::Zero:
|
||||
invert ? m_emit->jnz(*label) : m_emit->jz(*label);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1874,84 +1982,6 @@ static void EmitConditionalJump(Condition condition, bool invert, Xbyak::CodeGen
|
|||
}
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitBranch(Condition condition, Reg lr_reg, Value&& branch_target)
|
||||
{
|
||||
// ensure the lr register is flushed, since we want it's correct value after the branch
|
||||
// we don't want to invalidate it yet because of "jalr r0, r0", branch_target could be the lr_reg.
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
m_register_cache.FlushGuestRegister(lr_reg, false, true);
|
||||
|
||||
// compute return address, which is also set as the new pc when the branch isn't taken
|
||||
Value new_pc;
|
||||
if (condition != Condition::Always || lr_reg != Reg::count)
|
||||
{
|
||||
new_pc = AddValues(m_register_cache.ReadGuestRegister(Reg::pc), Value::FromConstantU32(4), false);
|
||||
if (!new_pc.IsInHostRegister())
|
||||
new_pc = GetValueInHostRegister(new_pc);
|
||||
}
|
||||
|
||||
Xbyak::Label skip_branch;
|
||||
if (condition != Condition::Always)
|
||||
{
|
||||
// condition is inverted because we want the case for skipping it
|
||||
EmitConditionalJump(condition, true, m_emit, skip_branch);
|
||||
}
|
||||
|
||||
// save the old PC if we want to
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
{
|
||||
// Can't cache because we have two branches. Load delay cancel is due to the immediate flush afterwards,
|
||||
// if we don't cancel it, at the end of the instruction the value we write can be overridden.
|
||||
EmitCancelInterpreterLoadDelayForReg(lr_reg);
|
||||
EmitStoreGuestRegister(lr_reg, new_pc);
|
||||
}
|
||||
|
||||
// we don't need to test the address of constant branches unless they're definitely misaligned, which would be
|
||||
// strange.
|
||||
if (!branch_target.IsConstant() || (branch_target.constant_value & 0x3) != 0)
|
||||
{
|
||||
if (branch_target.IsConstant())
|
||||
{
|
||||
Log_WarningPrintf("Misaligned constant target branch 0x%08X, this is strange",
|
||||
Truncate32(branch_target.constant_value));
|
||||
}
|
||||
else
|
||||
{
|
||||
// check the alignment of the target
|
||||
m_emit->test(GetHostReg32(branch_target), 0x3);
|
||||
m_emit->jnz(GetCurrentFarCodePointer());
|
||||
}
|
||||
|
||||
m_register_cache.PushState();
|
||||
|
||||
// exception exit for misaligned target
|
||||
SwitchToFarCode();
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseAddressException, m_register_cache.GetCPUPtr(), branch_target,
|
||||
Value::FromConstantU8(0), Value::FromConstantU8(1));
|
||||
EmitExceptionExit();
|
||||
SwitchToNearCode();
|
||||
|
||||
m_register_cache.PopState();
|
||||
}
|
||||
|
||||
// branch taken path - change the return address/new pc
|
||||
if (condition != Condition::Always)
|
||||
EmitCopyValue(new_pc.GetHostRegister(), branch_target);
|
||||
|
||||
// converge point
|
||||
m_emit->L(skip_branch);
|
||||
|
||||
// update pc
|
||||
if (condition != Condition::Always)
|
||||
m_register_cache.WriteGuestRegister(Reg::pc, std::move(new_pc));
|
||||
else
|
||||
m_register_cache.WriteGuestRegister(Reg::pc, std::move(branch_target));
|
||||
|
||||
// now invalidate lr becuase it was possibly written in the branch, and we don't need branch_target anymore
|
||||
if (lr_reg != Reg::count && lr_reg != Reg::zero)
|
||||
m_register_cache.InvalidateGuestRegister(lr_reg);
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitBranchIfBitClear(HostReg reg, RegSize size, u8 bit, LabelType* label)
|
||||
{
|
||||
switch (size)
|
||||
|
@ -1982,34 +2012,6 @@ void CodeGenerator::EmitBindLabel(LabelType* label)
|
|||
m_emit->L(*label);
|
||||
}
|
||||
|
||||
void CodeGenerator::EmitRaiseException(Exception excode, Condition condition /* = Condition::Always */)
|
||||
{
|
||||
if (condition == Condition::Always)
|
||||
{
|
||||
// no need to use far code if we're always raising the exception
|
||||
m_register_cache.InvalidateGuestRegister(Reg::pc);
|
||||
m_register_cache.FlushAllGuestRegisters(true, true);
|
||||
m_register_cache.FlushLoadDelay(true);
|
||||
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseException, m_register_cache.GetCPUPtr(),
|
||||
Value::FromConstantU8(static_cast<u8>(excode)));
|
||||
return;
|
||||
}
|
||||
|
||||
m_register_cache.PushState();
|
||||
|
||||
const void* far_code_ptr = GetCurrentFarCodePointer();
|
||||
EmitConditionalJump(condition, false, m_emit, far_code_ptr);
|
||||
|
||||
SwitchToFarCode();
|
||||
EmitFunctionCall(nullptr, &Thunks::RaiseException, m_register_cache.GetCPUPtr(),
|
||||
Value::FromConstantU8(static_cast<u8>(excode)));
|
||||
EmitExceptionExit();
|
||||
SwitchToNearCode();
|
||||
|
||||
m_register_cache.PopState();
|
||||
}
|
||||
|
||||
void ASMFunctions::Generate(JitCodeBuffer* code_buffer) {}
|
||||
|
||||
} // namespace CPU::Recompiler
|
||||
|
|
|
@ -485,6 +485,38 @@ Value RegisterCache::ReadGuestRegister(Reg guest_reg, bool cache /* = true */, b
|
|||
}
|
||||
}
|
||||
|
||||
Value RegisterCache::ReadGuestRegisterToScratch(Reg guest_reg)
|
||||
{
|
||||
HostReg host_reg = AllocateHostReg();
|
||||
|
||||
Value& cache_value = m_state.guest_reg_state[static_cast<u8>(guest_reg)];
|
||||
if (cache_value.IsValid())
|
||||
{
|
||||
m_code_generator.EmitCopyValue(host_reg, cache_value);
|
||||
|
||||
if (cache_value.IsConstant())
|
||||
{
|
||||
Log_DebugPrintf("Copying guest register %s from constant 0x%08X to scratch host register %s", GetRegName(guest_reg),
|
||||
static_cast<u32>(cache_value.constant_value),
|
||||
m_code_generator.GetHostRegName(host_reg, RegSize_32));
|
||||
}
|
||||
else
|
||||
{
|
||||
Log_DebugPrintf("Copying guest register %s from %s to scratch host register %s", GetRegName(guest_reg),
|
||||
m_code_generator.GetHostRegName(cache_value.host_reg, RegSize_32), m_code_generator.GetHostRegName(host_reg, RegSize_32));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_code_generator.EmitLoadGuestRegister(host_reg, guest_reg);
|
||||
|
||||
Log_DebugPrintf("Loading guest register %s to scratch host register %s", GetRegName(guest_reg),
|
||||
m_code_generator.GetHostRegName(host_reg, RegSize_32));
|
||||
}
|
||||
|
||||
return Value::FromScratch(this, host_reg, RegSize_32);
|
||||
}
|
||||
|
||||
Value RegisterCache::WriteGuestRegister(Reg guest_reg, Value&& value)
|
||||
{
|
||||
// ignore writes to register zero
|
||||
|
|
|
@ -273,6 +273,10 @@ public:
|
|||
Value ReadGuestRegister(Reg guest_reg, bool cache = true, bool force_host_register = false,
|
||||
HostReg forced_host_reg = HostReg_Invalid);
|
||||
|
||||
/// Reads the guest register to a caller-owned scratch register. This will ensure the cache won't invalidate the value
|
||||
/// from some other write.
|
||||
Value ReadGuestRegisterToScratch(Reg guest_reg);
|
||||
|
||||
/// Creates a copy of value, and stores it to guest_reg.
|
||||
Value WriteGuestRegister(Reg guest_reg, Value&& value);
|
||||
|
||||
|
|
|
@ -44,8 +44,8 @@ enum class Condition : u8
|
|||
Below, // unsigned variant of Less
|
||||
BelowEqual, // unsigned variant of LessEqual
|
||||
|
||||
NotZero = NotEqual,
|
||||
Zero = Equal
|
||||
NotZero,
|
||||
Zero
|
||||
};
|
||||
|
||||
#if defined(Y_CPU_X64)
|
||||
|
|
Loading…
Reference in New Issue