CPU: Implement breakpoints in the recompiler
Both cop0 execution and debug breakpoints.
This commit is contained in:
parent
1d138da3ff
commit
f17782bcc0
|
@ -57,6 +57,7 @@ static void DeallocateLUTs();
|
|||
static void ResetCodeLUT();
|
||||
static void SetCodeLUT(u32 pc, const void* function);
|
||||
static void InvalidateBlock(Block* block, BlockState new_state);
|
||||
static void ResetBlockCompileCount(Block* block);
|
||||
static void ClearBlocks();
|
||||
|
||||
static Block* LookupBlock(u32 pc);
|
||||
|
@ -637,6 +638,12 @@ void CPU::CodeCache::InvalidateBlock(Block* block, BlockState new_state)
|
|||
block->state = new_state;
|
||||
}
|
||||
|
||||
void CPU::CodeCache::ResetBlockCompileCount(Block* block)
|
||||
{
|
||||
block->compile_frame = System::GetFrameNumber();
|
||||
block->compile_count = 1;
|
||||
}
|
||||
|
||||
void CPU::CodeCache::InvalidateAllRAMBlocks()
|
||||
{
|
||||
// TODO: maybe combine the backlink into one big instruction flush cache?
|
||||
|
@ -661,6 +668,35 @@ void CPU::CodeCache::InvalidateAllRAMBlocks()
|
|||
Bus::ClearRAMCodePageFlags();
|
||||
}
|
||||
|
||||
void CPU::CodeCache::InvalidateOverlappingBlocks(u32 pc, bool force_recompile)
|
||||
{
|
||||
const u32 table = pc >> LUT_TABLE_SHIFT;
|
||||
Block** const blocks = s_block_lut[table];
|
||||
if (!blocks)
|
||||
return;
|
||||
|
||||
// optionally force recompilation
|
||||
const BlockState new_block_state = force_recompile ? BlockState::NeedsRecompile : BlockState::Invalidated;
|
||||
|
||||
// loop through all blocks in the page
|
||||
for (u32 i = 0; i < LUT_TABLE_SIZE; i++)
|
||||
{
|
||||
Block* const block = blocks[i];
|
||||
if (!block)
|
||||
continue;
|
||||
|
||||
if (pc >= block->pc && pc < (block->pc + block->size))
|
||||
{
|
||||
// this is pretty gross, if it's a RAM block we need to unlink it from the page list
|
||||
RemoveBlockFromPageList(block);
|
||||
|
||||
// don't get trolled into the interpreter if bps are toggled
|
||||
InvalidateBlock(block, new_block_state);
|
||||
ResetBlockCompileCount(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CPU::CodeCache::ClearBlocks()
|
||||
{
|
||||
for (u32 i = 0; i < Bus::RAM_8MB_CODE_PAGE_COUNT; i++)
|
||||
|
@ -859,6 +895,9 @@ bool CPU::CodeCache::ReadBlockInstructions(u32 start_pc, BlockInstructionList* i
|
|||
const bool use_icache = CPU::IsCachedAddress(start_pc);
|
||||
const bool dynamic_fetch_ticks = (!use_icache && Bus::GetMemoryAccessTimePtr(start_pc & PHYSICAL_MEMORY_ADDRESS_MASK,
|
||||
MemoryAccessSize::Word) != nullptr);
|
||||
const bool debug_breakpoints_enabled = CPU::HasAnyBreakpoints(BreakpointType::Execute);
|
||||
const bool cop0_breakpoints_enabled = CPU::g_state.cop0_regs.dcic.ExecutionBreakpointsEnabled();
|
||||
|
||||
u32 pc = start_pc;
|
||||
bool is_branch_delay_slot = false;
|
||||
bool is_load_delay_slot = false;
|
||||
|
@ -926,6 +965,8 @@ bool CPU::CodeCache::ReadBlockInstructions(u32 start_pc, BlockInstructionList* i
|
|||
info.is_load_instruction = IsMemoryLoadInstruction(instruction);
|
||||
info.is_store_instruction = IsMemoryStoreInstruction(instruction);
|
||||
info.has_load_delay = InstructionHasLoadDelay(instruction);
|
||||
info.is_cop0_breakpoint = (cop0_breakpoints_enabled && CPU::Cop0BreakpointMatchesPC(pc));
|
||||
info.is_debug_breakpoint = (debug_breakpoints_enabled && CPU::HasBreakpointAtAddress(BreakpointType::Execute, pc));
|
||||
|
||||
if (use_icache)
|
||||
{
|
||||
|
@ -972,6 +1013,19 @@ bool CPU::CodeCache::ReadBlockInstructions(u32 start_pc, BlockInstructionList* i
|
|||
// instruction is decoded now
|
||||
instructions->emplace_back(instruction, info);
|
||||
|
||||
if (info.is_cop0_breakpoint)
|
||||
{
|
||||
// end block early at breakpoints, the instruction doesn't actually get executed
|
||||
WARNING_LOG("Ending block 0x{:08X} at 0x{:08X} due to cop0 breakpoint", start_pc, pc);
|
||||
break;
|
||||
}
|
||||
else if (info.is_debug_breakpoint && is_branch_delay_slot)
|
||||
{
|
||||
// can't handle debug breakpoints in branch delay slots currently, since it can't resume
|
||||
ERROR_LOG("Can't handle debug breakpoint at 0x{:08X} in branch delay slot, skipping block", pc);
|
||||
return false;
|
||||
}
|
||||
|
||||
// if we're in a branch delay slot, the block is now done
|
||||
// except if this is a branch in a branch delay slot, then we grab the one after that, and so on...
|
||||
if (is_branch_delay_slot && !info.is_branch_instruction)
|
||||
|
@ -1539,6 +1593,13 @@ const void* CPU::CodeCache::GetInterpretUncachedBlockFunction()
|
|||
}
|
||||
}
|
||||
|
||||
void CPU::CodeCache::ResetAndExitExecution()
|
||||
{
|
||||
DEV_LOG("Resetting code cache and exiting execution");
|
||||
Reset();
|
||||
CPU::ExitExecution();
|
||||
}
|
||||
|
||||
void CPU::CodeCache::CompileASMFunctions()
|
||||
{
|
||||
MemMap::BeginCodeWrite();
|
||||
|
@ -1710,13 +1771,11 @@ PageFaultHandler::HandlerResult CPU::CodeCache::HandleFastmemException(void* exc
|
|||
if (block)
|
||||
{
|
||||
// This is a bit annoying, we have to remove it from the page list if it's a RAM block.
|
||||
// Need to reset the recompile count, otherwise it'll get trolled into an interpreter fallback.
|
||||
DEV_LOG("Queuing block {:08X} for recompilation due to backpatch", block->pc);
|
||||
RemoveBlockFromPageList(block);
|
||||
InvalidateBlock(block, BlockState::NeedsRecompile);
|
||||
|
||||
// Need to reset the recompile count, otherwise it'll get trolled into an interpreter fallback.
|
||||
block->compile_frame = System::GetFrameNumber();
|
||||
block->compile_count = 1;
|
||||
ResetBlockCompileCount(block);
|
||||
}
|
||||
|
||||
MemMap::EndCodeWrite();
|
||||
|
|
|
@ -37,4 +37,7 @@ void InvalidateBlocksWithPageIndex(u32 page_index);
|
|||
/// Invalidates all blocks in the cache.
|
||||
void InvalidateAllRAMBlocks();
|
||||
|
||||
/// Invalidates blocks with the specified PC, forcing recompilation.
|
||||
void InvalidateOverlappingBlocks(u32 pc, bool force_recompile);
|
||||
|
||||
} // namespace CPU::CodeCache
|
||||
|
|
|
@ -44,6 +44,8 @@ struct InstructionInfo
|
|||
bool is_load_instruction : 1;
|
||||
bool is_store_instruction : 1;
|
||||
bool is_load_delay_slot : 1;
|
||||
bool is_cop0_breakpoint : 1;
|
||||
bool is_debug_breakpoint : 1;
|
||||
bool is_last_instruction : 1;
|
||||
bool has_load_delay : 1;
|
||||
|
||||
|
@ -233,6 +235,7 @@ void CommitFarCode(u32 length);
|
|||
void AlignCode(u32 alignment);
|
||||
|
||||
const void* GetInterpretUncachedBlockFunction();
|
||||
void ResetAndExitExecution();
|
||||
|
||||
void CompileOrRevalidateBlock(u32 start_pc);
|
||||
void DiscardAndRecompileBlock(u32 start_pc);
|
||||
|
|
|
@ -530,15 +530,18 @@ ALWAYS_INLINE_RELEASE void CPU::Cop0ExecutionBreakpointCheck()
|
|||
if (!g_state.cop0_regs.dcic.ExecutionBreakpointsEnabled())
|
||||
return;
|
||||
|
||||
const u32 pc = g_state.current_instruction_pc;
|
||||
const u32 bpc = g_state.cop0_regs.BPC;
|
||||
const u32 bpcm = g_state.cop0_regs.BPCM;
|
||||
if (Cop0BreakpointMatchesPC(g_state.current_instruction_pc))
|
||||
DispatchCop0ExecutionBreakpoint();
|
||||
}
|
||||
|
||||
// Break condition is "((PC XOR BPC) AND BPCM)=0".
|
||||
if (bpcm == 0 || ((pc ^ bpc) & bpcm) != 0u)
|
||||
return;
|
||||
bool CPU::AreCop0ExecutionBreakpointsActive()
|
||||
{
|
||||
return (g_state.cop0_regs.dcic.ExecutionBreakpointsEnabled() && IsCop0ExecutionBreakpointUnmasked());
|
||||
}
|
||||
|
||||
DEV_LOG("Cop0 execution breakpoint at {:08X}", pc);
|
||||
void CPU::DispatchCop0ExecutionBreakpoint()
|
||||
{
|
||||
DEV_LOG("Cop0 execution breakpoint at {:08X}", g_state.current_instruction_pc);
|
||||
g_state.cop0_regs.dcic.status_any_break = true;
|
||||
g_state.cop0_regs.dcic.status_bpc_code_break = true;
|
||||
DispatchCop0Breakpoint();
|
||||
|
@ -2013,15 +2016,19 @@ CPUExecutionMode CPU::GetCurrentExecutionMode()
|
|||
|
||||
bool CPU::UpdateDebugDispatcherFlag()
|
||||
{
|
||||
const bool has_any_breakpoints = (HasAnyBreakpoints() || s_break_type == ExecutionBreakType::SingleStep);
|
||||
|
||||
const auto& dcic = g_state.cop0_regs.dcic;
|
||||
const bool has_cop0_breakpoints = dcic.super_master_enable_1 && dcic.super_master_enable_2 &&
|
||||
dcic.execution_breakpoint_enable && IsCop0ExecutionBreakpointUnmasked();
|
||||
const bool has_execution_breakpoints = HasAnyBreakpoints(BreakpointType::Execute);
|
||||
const bool requires_interpreter =
|
||||
(HasAnyBreakpoints(BreakpointType::Read) || HasAnyBreakpoints(BreakpointType::Write) ||
|
||||
s_break_type == ExecutionBreakType::SingleStep);
|
||||
|
||||
// TODO: Don't force the int just for cop0 breakpoints, they're cheap. Also need it for cop0 data breakpoints.
|
||||
const bool use_debug_dispatcher =
|
||||
has_any_breakpoints || has_cop0_breakpoints || s_trace_to_log ||
|
||||
(g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter && g_settings.bios_tty_logging);
|
||||
s_trace_to_log || requires_interpreter ||
|
||||
(g_settings.cpu_execution_mode == CPUExecutionMode::Interpreter &&
|
||||
(has_cop0_breakpoints || has_execution_breakpoints || g_settings.bios_tty_logging));
|
||||
if (use_debug_dispatcher == g_state.using_debug_dispatcher)
|
||||
return false;
|
||||
|
||||
|
@ -2103,10 +2110,9 @@ void CPU::CheckForExecutionModeChange()
|
|||
fastjmp_jmp(&s_jmp_buf, 1);
|
||||
}
|
||||
|
||||
bool CPU::HasAnyBreakpoints()
|
||||
bool CPU::HasAnyBreakpoints(BreakpointType type)
|
||||
{
|
||||
return (GetBreakpointList(BreakpointType::Execute).size() + GetBreakpointList(BreakpointType::Read).size() +
|
||||
GetBreakpointList(BreakpointType::Write).size()) > 0;
|
||||
return (GetBreakpointList(type).size() > 0);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE CPU::BreakpointList& CPU::GetBreakpointList(BreakpointType type)
|
||||
|
@ -2129,10 +2135,7 @@ bool CPU::HasBreakpointAtAddress(BreakpointType type, VirtualMemoryAddress addre
|
|||
for (Breakpoint& bp : GetBreakpointList(type))
|
||||
{
|
||||
if (bp.enabled && (bp.address & 0x0FFFFFFFu) == (address & 0x0FFFFFFFu))
|
||||
{
|
||||
bp.hit_count++;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2174,12 +2177,16 @@ bool CPU::AddBreakpoint(BreakpointType type, VirtualMemoryAddress address, bool
|
|||
|
||||
Breakpoint bp{address, nullptr, auto_clear ? 0 : s_breakpoint_counter++, 0, type, auto_clear, enabled};
|
||||
GetBreakpointList(type).push_back(std::move(bp));
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
if (!auto_clear)
|
||||
Host::ReportDebuggerMessage(fmt::format("Added breakpoint at 0x{:08X}.", address));
|
||||
|
||||
if (type == BreakpointType::Execute && s_current_execution_mode != CPUExecutionMode::Interpreter && enabled)
|
||||
CodeCache::InvalidateOverlappingBlocks(address, true);
|
||||
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2192,8 +2199,13 @@ bool CPU::AddBreakpointWithCallback(BreakpointType type, VirtualMemoryAddress ad
|
|||
|
||||
Breakpoint bp{address, callback, 0, 0, type, false, true};
|
||||
GetBreakpointList(type).push_back(std::move(bp));
|
||||
|
||||
if (type == BreakpointType::Execute && s_current_execution_mode != CPUExecutionMode::Interpreter)
|
||||
CodeCache::InvalidateOverlappingBlocks(address, true);
|
||||
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2209,12 +2221,15 @@ bool CPU::SetBreakpointEnabled(BreakpointType type, VirtualMemoryAddress address
|
|||
GetBreakpointTypeName(type), address));
|
||||
it->enabled = enabled;
|
||||
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
if (address == s_last_breakpoint_check_pc && !enabled)
|
||||
s_last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
|
||||
|
||||
if (type == BreakpointType::Execute && s_current_execution_mode != CPUExecutionMode::Interpreter)
|
||||
CodeCache::InvalidateOverlappingBlocks(address, true);
|
||||
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2229,21 +2244,36 @@ bool CPU::RemoveBreakpoint(BreakpointType type, VirtualMemoryAddress address)
|
|||
Host::ReportDebuggerMessage(fmt::format("Removed {} breakpoint at 0x{:08X}.", GetBreakpointTypeName(type), address));
|
||||
|
||||
bplist.erase(it);
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
if (address == s_last_breakpoint_check_pc)
|
||||
s_last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
|
||||
|
||||
if (type == BreakpointType::Execute && s_current_execution_mode != CPUExecutionMode::Interpreter)
|
||||
CodeCache::InvalidateOverlappingBlocks(address, true);
|
||||
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void CPU::ClearBreakpoints()
|
||||
{
|
||||
if (s_current_execution_mode != CPUExecutionMode::Interpreter)
|
||||
{
|
||||
for (const Breakpoint& bp : s_breakpoints[static_cast<u32>(BreakpointType::Execute)])
|
||||
{
|
||||
if (bp.enabled)
|
||||
CodeCache::InvalidateOverlappingBlocks(bp.address, true);
|
||||
}
|
||||
}
|
||||
|
||||
for (BreakpointList& bplist : s_breakpoints)
|
||||
bplist.clear();
|
||||
|
||||
s_breakpoint_counter = 0;
|
||||
s_last_breakpoint_check_pc = INVALID_BREAKPOINT_PC;
|
||||
|
||||
if (UpdateDebugDispatcherFlag())
|
||||
System::InterruptExecution();
|
||||
}
|
||||
|
@ -2390,6 +2420,21 @@ ALWAYS_INLINE_RELEASE void CPU::ExecutionBreakpointCheck()
|
|||
}
|
||||
}
|
||||
|
||||
u32 CPU::DispatchDebugBreakpoint()
|
||||
{
|
||||
const u32 prev_npc = g_state.npc;
|
||||
DEV_LOG("Debug execution breakpoint at {:08X}", g_state.current_instruction_pc);
|
||||
|
||||
if (CheckBreakpointList(BreakpointType::Execute, g_state.current_instruction_pc))
|
||||
{
|
||||
s_break_type = ExecutionBreakType::None;
|
||||
ExitExecution();
|
||||
}
|
||||
|
||||
// return true if pc has changed and the rec needs to bail out
|
||||
return BoolToUInt32(g_state.npc != prev_npc);
|
||||
}
|
||||
|
||||
template<MemoryAccessType type>
|
||||
ALWAYS_INLINE_RELEASE void CPU::MemoryBreakpointCheck(VirtualMemoryAddress address)
|
||||
{
|
||||
|
@ -2587,6 +2632,8 @@ void CPU::CodeCache::InterpretUncachedBlock()
|
|||
if (!FetchInstructionForInterpreterFallback())
|
||||
return;
|
||||
|
||||
// TODO: needs to check debug breakpoints, for when the breakpoint is in a branch delay slot
|
||||
|
||||
// At this point, pc contains the last address executed (in the previous block). The instruction has not been fetched
|
||||
// yet. pc shouldn't be updated until the fetch occurs, that way the exception occurs in the delay slot.
|
||||
bool in_branch_delay_slot = false;
|
||||
|
|
|
@ -229,7 +229,7 @@ using BreakpointList = std::vector<Breakpoint>;
|
|||
|
||||
// Breakpoints
|
||||
const char* GetBreakpointTypeName(BreakpointType type);
|
||||
bool HasAnyBreakpoints();
|
||||
bool HasAnyBreakpoints(BreakpointType type);
|
||||
bool HasBreakpointAtAddress(BreakpointType type, VirtualMemoryAddress address);
|
||||
BreakpointList CopyBreakpointList(bool include_auto_clear = false, bool include_callbacks = false);
|
||||
bool AddBreakpoint(BreakpointType type, VirtualMemoryAddress address, bool auto_clear = false, bool enabled = true);
|
||||
|
|
|
@ -132,6 +132,22 @@ ALWAYS_INLINE static void StallUntilGTEComplete()
|
|||
(g_state.gte_completion_tick > g_state.pending_ticks) ? g_state.gte_completion_tick : g_state.pending_ticks;
|
||||
}
|
||||
|
||||
// cop0 breakpoint check
|
||||
ALWAYS_INLINE static bool Cop0BreakpointMatchesPC(u32 pc)
|
||||
{
|
||||
const u32 bpc = g_state.cop0_regs.BPC;
|
||||
const u32 bpcm = g_state.cop0_regs.BPCM;
|
||||
|
||||
// Break condition is "((PC XOR BPC) AND BPCM)=0".
|
||||
// TODO: is the != 0 here correct?
|
||||
return (bpcm != 0 && ((pc ^ bpc) & bpcm) == 0u);
|
||||
}
|
||||
|
||||
// cop0 breakpoint dispatch
|
||||
bool AreCop0ExecutionBreakpointsActive();
|
||||
void DispatchCop0ExecutionBreakpoint();
|
||||
u32 DispatchDebugBreakpoint();
|
||||
|
||||
// kernel call interception
|
||||
void HandleA0Syscall();
|
||||
void HandleB0Syscall();
|
||||
|
|
|
@ -350,6 +350,7 @@ bool CPU::Recompiler::Recompiler::TrySwapDelaySlot(Reg rs, Reg rt, Reg rd)
|
|||
return false;
|
||||
|
||||
const Instruction* next_instruction = inst + 1;
|
||||
const CodeCache::InstructionInfo* next_iinfo = iinfo + 1;
|
||||
DebugAssert(next_instruction < (m_block->Instructions() + m_block->size));
|
||||
|
||||
const Reg opcode_rs = next_instruction->r.rs;
|
||||
|
@ -366,6 +367,10 @@ bool CPU::Recompiler::Recompiler::TrySwapDelaySlot(Reg rs, Reg rt, Reg rd)
|
|||
const u32 backup_instruction_pc = m_current_instruction_pc;
|
||||
const bool backup_instruction_delay_slot = m_current_instruction_branch_delay_slot;
|
||||
|
||||
// branch delay slots with a breakpoint can't be swapped
|
||||
if (next_iinfo->is_cop0_breakpoint || next_iinfo->is_debug_breakpoint)
|
||||
goto is_unsafe;
|
||||
|
||||
if (next_instruction->bits == 0)
|
||||
{
|
||||
// nop
|
||||
|
@ -1188,6 +1193,13 @@ void CPU::Recompiler::Recompiler::CompileInstruction()
|
|||
m_current_instruction_pc, str);
|
||||
#endif
|
||||
|
||||
if (iinfo->is_cop0_breakpoint || iinfo->is_debug_breakpoint)
|
||||
{
|
||||
CompileExecutionBreakpointCheck();
|
||||
if (m_block_ended)
|
||||
return;
|
||||
}
|
||||
|
||||
m_cycles++;
|
||||
|
||||
if (IsNopInstruction(*inst))
|
||||
|
|
|
@ -119,6 +119,8 @@ protected:
|
|||
(FLUSH_CYCLES | FLUSH_GTE_DONE_CYCLE), // GTE cycles needed because it stalls when a GTE instruction is next.
|
||||
FLUSH_FOR_EARLY_BLOCK_EXIT =
|
||||
(FLUSH_FLUSH_MIPS_REGISTERS | FLUSH_CYCLES | FLUSH_GTE_DONE_CYCLE | FLUSH_PC | FLUSH_LOAD_DELAY),
|
||||
FLUSH_FOR_BREAKPOINT =
|
||||
(FLUSH_FLUSH_MIPS_REGISTERS | FLUSH_CYCLES | FLUSH_GTE_DONE_CYCLE | FLUSH_INSTRUCTION_BITS | FLUSH_FOR_C_CALL),
|
||||
FLUSH_FOR_INTERPRETER = (FLUSH_FLUSH_MIPS_REGISTERS | FLUSH_INVALIDATE_MIPS_REGISTERS |
|
||||
FLUSH_FREE_CALLER_SAVED_REGISTERS | FLUSH_PC | FLUSH_CYCLES | FLUSH_INSTRUCTION_BITS |
|
||||
FLUSH_LOAD_DELAY | FLUSH_GTE_DONE_CYCLE | FLUSH_INVALIDATE_SPECULATIVE_CONSTANTS),
|
||||
|
@ -338,6 +340,9 @@ protected:
|
|||
|
||||
virtual void Compile_Fallback() = 0;
|
||||
|
||||
// returns true if further compilation should be skipped
|
||||
virtual bool CompileExecutionBreakpointCheck() = 0;
|
||||
|
||||
void Compile_j();
|
||||
virtual void Compile_jr(CompileFlags cf) = 0;
|
||||
void Compile_jr_const(CompileFlags cf);
|
||||
|
|
|
@ -579,6 +579,12 @@ void CPU::X64Recompiler::GenerateCall(const void* func, s32 arg1reg /*= -1*/, s3
|
|||
|
||||
void CPU::X64Recompiler::EndBlock(const std::optional<u32>& newpc, bool do_event_test)
|
||||
{
|
||||
if (iinfo->is_cop0_breakpoint && m_block_ended)
|
||||
{
|
||||
// block already ended by breakpoint
|
||||
return;
|
||||
}
|
||||
|
||||
if (newpc.has_value())
|
||||
{
|
||||
if (m_dirty_pc || m_compiler_pc != newpc)
|
||||
|
@ -980,6 +986,39 @@ void CPU::X64Recompiler::Compile_Fallback()
|
|||
m_load_delay_dirty = EMULATE_LOAD_DELAYS;
|
||||
}
|
||||
|
||||
bool CPU::X64Recompiler::CompileExecutionBreakpointCheck()
|
||||
{
|
||||
// flush regs and instruction bits, since cop0 needs it for the delay slot/cop bits
|
||||
Flush(FLUSH_FOR_BREAKPOINT);
|
||||
|
||||
// cop0 has to come first, because debug can exit execution
|
||||
if (iinfo->is_cop0_breakpoint)
|
||||
{
|
||||
// cop0 always exits regardless of what the debug bp does
|
||||
cg->call(&CPU::DispatchCop0ExecutionBreakpoint);
|
||||
if (iinfo->is_debug_breakpoint)
|
||||
cg->call(&CPU::DispatchDebugBreakpoint);
|
||||
EndBlock(std::nullopt, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
// only a debug bp?
|
||||
if (iinfo->is_debug_breakpoint)
|
||||
{
|
||||
// debug may or may not exit execution, or change pc
|
||||
cg->call(&CPU::DispatchDebugBreakpoint);
|
||||
cg->test(cg->eax, cg->eax);
|
||||
SwitchToFarCode(true, &CodeGenerator::jnz);
|
||||
BackupHostState();
|
||||
EndBlock(std::nullopt, true);
|
||||
RestoreHostState();
|
||||
SwitchToNearCode(false);
|
||||
}
|
||||
|
||||
// we can continue
|
||||
return true;
|
||||
}
|
||||
|
||||
void CPU::X64Recompiler::CheckBranchTarget(const Xbyak::Reg32& pcreg)
|
||||
{
|
||||
if (!g_settings.cpu_recompiler_memory_exceptions)
|
||||
|
@ -2289,16 +2328,26 @@ void CPU::X64Recompiler::Compile_mtc0(CompileFlags cf)
|
|||
cg->mov(RWARG1, cg->dword[PTR(&g_state.cop0_regs.sr.bits)]);
|
||||
TestInterrupts(RWARG1);
|
||||
}
|
||||
else if (reg == Cop0Reg::DCIC || reg == Cop0Reg::BPCM)
|
||||
else if (reg == Cop0Reg::DCIC || reg == Cop0Reg::BPC || reg == Cop0Reg::BPCM)
|
||||
{
|
||||
// need to check whether we're switching to debug mode
|
||||
Flush(FLUSH_FOR_C_CALL);
|
||||
cg->call(&CPU::UpdateDebugDispatcherFlag);
|
||||
|
||||
// dcic can enable data breakpoints
|
||||
if (reg == Cop0Reg::DCIC)
|
||||
{
|
||||
cg->call(&CPU::UpdateDebugDispatcherFlag);
|
||||
cg->test(cg->al, cg->al);
|
||||
SwitchToFarCode(true, &Xbyak::CodeGenerator::jnz);
|
||||
SwitchToNearCode(false);
|
||||
}
|
||||
|
||||
cg->call(&CPU::AreCop0ExecutionBreakpointsActive);
|
||||
cg->test(cg->al, cg->al);
|
||||
SwitchToFarCode(true, &Xbyak::CodeGenerator::jnz);
|
||||
BackupHostState();
|
||||
Flush(FLUSH_FOR_EARLY_BLOCK_EXIT);
|
||||
cg->call(&CPU::ExitExecution); // does not return
|
||||
cg->call(&CodeCache::ResetAndExitExecution); // does not return
|
||||
RestoreHostState();
|
||||
SwitchToNearCode(false);
|
||||
}
|
||||
|
|
|
@ -51,6 +51,8 @@ protected:
|
|||
|
||||
void Compile_Fallback() override;
|
||||
|
||||
bool CompileExecutionBreakpointCheck() override;
|
||||
|
||||
void CheckBranchTarget(const Xbyak::Reg32& pcreg);
|
||||
void Compile_jr(CompileFlags cf) override;
|
||||
void Compile_jalr(CompileFlags cf) override;
|
||||
|
|
Loading…
Reference in New Issue