CPU/CodeCache: Fetch second delay slot from first branch for double branches
This commit is contained in:
parent
3890a539ee
commit
042bdd9c0c
|
@ -460,6 +460,7 @@ bool CompileBlock(CodeBlock* block)
|
|||
{
|
||||
u32 pc = block->GetPC();
|
||||
bool is_branch_delay_slot = false;
|
||||
bool is_unconditional_branch_delay_slot = false;
|
||||
bool is_load_delay_slot = false;
|
||||
|
||||
#if 0
|
||||
|
@ -479,6 +480,7 @@ bool CompileBlock(CodeBlock* block)
|
|||
cbi.is_branch_delay_slot = is_branch_delay_slot;
|
||||
cbi.is_load_delay_slot = is_load_delay_slot;
|
||||
cbi.is_branch_instruction = IsBranchInstruction(cbi.instruction);
|
||||
cbi.is_unconditional_branch_instruction = IsUnconditionalBranchInstruction(cbi.instruction);
|
||||
cbi.is_load_instruction = IsMemoryLoadInstruction(cbi.instruction);
|
||||
cbi.is_store_instruction = IsMemoryStoreInstruction(cbi.instruction);
|
||||
cbi.has_load_delay = InstructionHasLoadDelay(cbi.instruction);
|
||||
|
@ -498,9 +500,24 @@ bool CompileBlock(CodeBlock* block)
|
|||
block->contains_loadstore_instructions |= cbi.is_load_instruction;
|
||||
block->contains_loadstore_instructions |= cbi.is_store_instruction;
|
||||
|
||||
pc += sizeof(cbi.instruction.bits);
|
||||
|
||||
if (is_branch_delay_slot && cbi.is_branch_instruction)
|
||||
{
|
||||
if (!is_unconditional_branch_delay_slot)
|
||||
{
|
||||
Log_WarningPrintf("Conditional branch delay slot at %08X, skipping block", cbi.pc);
|
||||
return false;
|
||||
}
|
||||
|
||||
// change the pc for the second branch's delay slot, it comes from the first branch
|
||||
const CodeBlockInstruction& prev_cbi = block->instructions.back();
|
||||
pc = GetBranchInstructionTarget(prev_cbi.instruction, prev_cbi.pc);
|
||||
Log_DevPrintf("Double branch at %08X, using delay slot from %08X -> %08X", cbi.pc, prev_cbi.pc, pc);
|
||||
}
|
||||
|
||||
// instruction is decoded now
|
||||
block->instructions.push_back(cbi);
|
||||
pc += sizeof(cbi.instruction.bits);
|
||||
|
||||
// if we're in a branch delay slot, the block is now done
|
||||
// except if this is a branch in a branch delay slot, then we grab the one after that, and so on...
|
||||
|
@ -509,6 +526,7 @@ bool CompileBlock(CodeBlock* block)
|
|||
|
||||
// if this is a branch, we grab the next instruction (delay slot), and then exit
|
||||
is_branch_delay_slot = cbi.is_branch_instruction;
|
||||
is_unconditional_branch_delay_slot = cbi.is_unconditional_branch_instruction;
|
||||
|
||||
// same for load delay
|
||||
is_load_delay_slot = cbi.has_load_delay;
|
||||
|
|
|
@ -52,6 +52,7 @@ struct CodeBlockInstruction
|
|||
u32 pc;
|
||||
|
||||
bool is_branch_instruction : 1;
|
||||
bool is_unconditional_branch_instruction : 1;
|
||||
bool is_branch_delay_slot : 1;
|
||||
bool is_load_instruction : 1;
|
||||
bool is_store_instruction : 1;
|
||||
|
@ -83,6 +84,7 @@ struct CodeBlock
|
|||
#endif
|
||||
|
||||
bool contains_loadstore_instructions = false;
|
||||
bool contains_double_branches = false;
|
||||
bool invalidated = false;
|
||||
|
||||
const u32 GetPC() const { return key.GetPC(); }
|
||||
|
|
|
@ -1557,7 +1557,8 @@ void InterpretUncachedBlock()
|
|||
g_state.exception_raised = false;
|
||||
|
||||
// Fetch the next instruction, except if we're in a branch delay slot. The "fetch" is done in the next block.
|
||||
if (!g_state.current_instruction_in_branch_delay_slot)
|
||||
const bool branch = IsBranchInstruction(g_state.current_instruction);
|
||||
if (!g_state.current_instruction_in_branch_delay_slot || branch)
|
||||
{
|
||||
if (!FetchInstruction())
|
||||
break;
|
||||
|
@ -1573,7 +1574,6 @@ void InterpretUncachedBlock()
|
|||
// next load delay
|
||||
UpdateLoadDelay();
|
||||
|
||||
const bool branch = IsBranchInstruction(g_state.current_instruction);
|
||||
if (g_state.exception_raised || (!branch && in_branch_delay_slot) ||
|
||||
IsExitBlockInstruction(g_state.current_instruction))
|
||||
{
|
||||
|
|
|
@ -44,6 +44,62 @@ bool IsBranchInstruction(const Instruction& instruction)
|
|||
}
|
||||
}
|
||||
|
||||
bool IsUnconditionalBranchInstruction(const Instruction& instruction)
|
||||
{
|
||||
switch (instruction.op)
|
||||
{
|
||||
case InstructionOp::j:
|
||||
case InstructionOp::jal:
|
||||
case InstructionOp::b:
|
||||
return true;
|
||||
|
||||
case InstructionOp::beq:
|
||||
{
|
||||
if (instruction.i.rs == Reg::zero && instruction.i.rt == Reg::zero)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case InstructionOp::funct:
|
||||
{
|
||||
switch (instruction.r.funct)
|
||||
{
|
||||
case InstructionFunct::jr:
|
||||
case InstructionFunct::jalr:
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
u32 GetBranchInstructionTarget(const Instruction& instruction, u32 instruction_pc)
|
||||
{
|
||||
switch (instruction.op)
|
||||
{
|
||||
case InstructionOp::j:
|
||||
case InstructionOp::jal:
|
||||
return ((instruction_pc + 4) & UINT32_C(0xF0000000)) | (instruction.j.target << 2);
|
||||
|
||||
case InstructionOp::b:
|
||||
case InstructionOp::beq:
|
||||
case InstructionOp::bgtz:
|
||||
case InstructionOp::blez:
|
||||
case InstructionOp::bne:
|
||||
return instruction_pc + 4 + (instruction.i.imm_sext32() << 2);
|
||||
|
||||
default:
|
||||
return instruction_pc;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsMemoryLoadInstruction(const Instruction& instruction)
|
||||
{
|
||||
switch (instruction.op)
|
||||
|
|
|
@ -212,6 +212,8 @@ union Instruction
|
|||
|
||||
// Instruction helpers.
|
||||
bool IsBranchInstruction(const Instruction& instruction);
|
||||
bool IsUnconditionalBranchInstruction(const Instruction& instruction);
|
||||
u32 GetBranchInstructionTarget(const Instruction& instruction, u32 instruction_pc);
|
||||
bool IsMemoryLoadInstruction(const Instruction& instruction);
|
||||
bool IsMemoryStoreInstruction(const Instruction& instruction);
|
||||
bool InstructionHasLoadDelay(const Instruction& instruction);
|
||||
|
|
Loading…
Reference in New Issue