TimingEvents: Switch to 64-bit counters
This commit is contained in:
parent
b3c18ff8c7
commit
ce83e52c78
|
@ -383,7 +383,7 @@ static RequestRegister s_request_register = {};
|
|||
static u8 s_interrupt_enable_register = INTERRUPT_REGISTER_MASK;
|
||||
static u8 s_interrupt_flag_register = 0;
|
||||
static u8 s_pending_async_interrupt = 0;
|
||||
static u32 s_last_interrupt_time = 0;
|
||||
static GlobalTicks s_last_interrupt_time = 0;
|
||||
|
||||
static CDImage::Position s_setloc_position = {};
|
||||
static CDImage::LBA s_requested_lba{};
|
||||
|
@ -391,7 +391,7 @@ static CDImage::LBA s_current_lba{}; // this is the hold position
|
|||
static CDImage::LBA s_seek_start_lba{};
|
||||
static CDImage::LBA s_seek_end_lba{};
|
||||
static CDImage::LBA s_physical_lba{}; // current position of the disc with respect to time
|
||||
static u32 s_physical_lba_update_tick = 0;
|
||||
static GlobalTicks s_physical_lba_update_tick = 0;
|
||||
static u32 s_physical_lba_update_carry = 0;
|
||||
static bool s_setloc_pending = false;
|
||||
static bool s_read_after_seek = false;
|
||||
|
@ -664,14 +664,36 @@ bool CDROM::DoState(StateWrapper& sw)
|
|||
|
||||
sw.Do(&s_interrupt_enable_register);
|
||||
sw.Do(&s_interrupt_flag_register);
|
||||
sw.DoEx(&s_last_interrupt_time, 57, System::GetGlobalTickCounter() - MINIMUM_INTERRUPT_DELAY);
|
||||
|
||||
if (sw.GetVersion() < 71) [[unlikely]]
|
||||
{
|
||||
u32 last_interrupt_time32 = 0;
|
||||
sw.DoEx(&last_interrupt_time32, 57, static_cast<u32>(System::GetGlobalTickCounter() - MINIMUM_INTERRUPT_DELAY));
|
||||
s_last_interrupt_time = last_interrupt_time32;
|
||||
}
|
||||
else
|
||||
{
|
||||
sw.Do(&s_last_interrupt_time);
|
||||
}
|
||||
|
||||
sw.Do(&s_pending_async_interrupt);
|
||||
sw.DoPOD(&s_setloc_position);
|
||||
sw.Do(&s_current_lba);
|
||||
sw.Do(&s_seek_start_lba);
|
||||
sw.Do(&s_seek_end_lba);
|
||||
sw.DoEx(&s_physical_lba, 49, s_current_lba);
|
||||
sw.DoEx(&s_physical_lba_update_tick, 49, static_cast<u32>(0));
|
||||
|
||||
if (sw.GetVersion() < 71) [[unlikely]]
|
||||
{
|
||||
u32 physical_lba_update_tick32 = 0;
|
||||
sw.DoEx(&physical_lba_update_tick32, 49, static_cast<u32>(0));
|
||||
s_physical_lba_update_tick = physical_lba_update_tick32;
|
||||
}
|
||||
else
|
||||
{
|
||||
sw.Do(&s_physical_lba_update_tick);
|
||||
}
|
||||
|
||||
sw.DoEx(&s_physical_lba_update_carry, 54, static_cast<u32>(0));
|
||||
sw.Do(&s_setloc_pending);
|
||||
sw.Do(&s_read_after_seek);
|
||||
|
@ -1292,8 +1314,7 @@ void CDROM::QueueDeliverAsyncInterrupt()
|
|||
// something similar anyway, the INT1 task won't run immediately after the INT3 is cleared.
|
||||
DebugAssert(HasPendingAsyncInterrupt());
|
||||
|
||||
// underflows here are okay
|
||||
const u32 diff = System::GetGlobalTickCounter() - s_last_interrupt_time;
|
||||
const u32 diff = static_cast<u32>(System::GetGlobalTickCounter() - s_last_interrupt_time);
|
||||
if (diff >= MINIMUM_INTERRUPT_DELAY)
|
||||
{
|
||||
DeliverAsyncInterrupt(nullptr, 0, 0);
|
||||
|
@ -2657,7 +2678,7 @@ void CDROM::UpdatePositionWhileSeeking()
|
|||
|
||||
void CDROM::UpdatePhysicalPosition(bool update_logical)
|
||||
{
|
||||
const u32 ticks = System::GetGlobalTickCounter();
|
||||
const GlobalTicks ticks = System::GetGlobalTickCounter();
|
||||
if (IsSeeking() || IsReadingOrPlaying() || !IsMotorOn())
|
||||
{
|
||||
// If we're seeking+reading the first sector (no stat bits set), we need to return the set/current lba, not the last
|
||||
|
@ -2676,7 +2697,7 @@ void CDROM::UpdatePhysicalPosition(bool update_logical)
|
|||
}
|
||||
|
||||
const u32 ticks_per_read = GetTicksForRead();
|
||||
const u32 diff = ticks - s_physical_lba_update_tick + s_physical_lba_update_carry;
|
||||
const u32 diff = static_cast<u32>((ticks - s_physical_lba_update_tick) + s_physical_lba_update_carry);
|
||||
const u32 sector_diff = diff / ticks_per_read;
|
||||
const u32 carry = diff % ticks_per_read;
|
||||
if (sector_diff > 0)
|
||||
|
|
|
@ -887,9 +887,10 @@ void CPU::CodeCache::LogCurrentState()
|
|||
|
||||
const auto& regs = g_state.regs;
|
||||
WriteToExecutionLog(
|
||||
"tick=%u dc=%u/%u pc=%08X at=%08X v0=%08X v1=%08X a0=%08X a1=%08X a2=%08X a3=%08X t0=%08X t1=%08X t2=%08X t3=%08X "
|
||||
"t4=%08X t5=%08X t6=%08X t7=%08X s0=%08X s1=%08X s2=%08X s3=%08X s4=%08X s5=%08X s6=%08X s7=%08X t8=%08X t9=%08X "
|
||||
"k0=%08X k1=%08X gp=%08X sp=%08X fp=%08X ra=%08X hi=%08X lo=%08X ldr=%s ldv=%08X cause=%08X sr=%08X gte=%08X\n",
|
||||
"tick=%" PRIu64
|
||||
" dc=%u/%u pc=%08X at=%08X v0=%08X v1=%08X a0=%08X a1=%08X a2=%08X a3=%08X t0=%08X t1=%08X t2=%08X t3=%08X t4=%08X "
|
||||
"t5=%08X t6=%08X t7=%08X s0=%08X s1=%08X s2=%08X s3=%08X s4=%08X s5=%08X s6=%08X s7=%08X t8=%08X t9=%08X k0=%08X "
|
||||
"k1=%08X gp=%08X sp=%08X fp=%08X ra=%08X hi=%08X lo=%08X ldr=%s ldv=%08X cause=%08X sr=%08X gte=%08X\n",
|
||||
System::GetGlobalTickCounter(), g_state.pending_ticks, g_state.downcount, g_state.pc, regs.at, regs.v0, regs.v1,
|
||||
regs.a0, regs.a1, regs.a2, regs.a3, regs.t0, regs.t1, regs.t2, regs.t3, regs.t4, regs.t5, regs.t6, regs.t7, regs.s0,
|
||||
regs.s1, regs.s2, regs.s3, regs.s4, regs.s5, regs.s6, regs.s7, regs.t8, regs.t9, regs.k0, regs.k1, regs.gp, regs.sp,
|
||||
|
|
|
@ -78,9 +78,9 @@ struct PGXP_value
|
|||
struct State
|
||||
{
|
||||
// ticks the CPU has executed
|
||||
TickCount downcount = 0;
|
||||
TickCount pending_ticks = 0;
|
||||
TickCount gte_completion_tick = 0;
|
||||
u32 downcount = 0;
|
||||
u32 pending_ticks = 0;
|
||||
u32 gte_completion_tick = 0;
|
||||
|
||||
Registers regs = {};
|
||||
Cop0Registers cop0_regs = {};
|
||||
|
@ -152,7 +152,7 @@ ALWAYS_INLINE static Registers& GetRegs()
|
|||
return g_state.regs;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static TickCount GetPendingTicks()
|
||||
ALWAYS_INLINE static u32 GetPendingTicks()
|
||||
{
|
||||
return g_state.pending_ticks;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ ALWAYS_INLINE static void ResetPendingTicks()
|
|||
}
|
||||
ALWAYS_INLINE static void AddPendingTicks(TickCount ticks)
|
||||
{
|
||||
g_state.pending_ticks += ticks;
|
||||
g_state.pending_ticks += static_cast<u32>(ticks);
|
||||
}
|
||||
|
||||
// state helpers
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "common/types.h"
|
||||
|
||||
static constexpr u32 SAVE_STATE_MAGIC = 0x43435544;
|
||||
static constexpr u32 SAVE_STATE_VERSION = 70;
|
||||
static constexpr u32 SAVE_STATE_VERSION = 71;
|
||||
static constexpr u32 SAVE_STATE_MINIMUM_VERSION = 42;
|
||||
|
||||
static_assert(SAVE_STATE_VERSION >= SAVE_STATE_MINIMUM_VERSION);
|
||||
|
|
|
@ -298,7 +298,7 @@ static System::FrameTimeHistory s_frame_time_history;
|
|||
static u32 s_frame_time_history_pos = 0;
|
||||
static u32 s_last_frame_number = 0;
|
||||
static u32 s_last_internal_frame_number = 0;
|
||||
static u32 s_last_global_tick_counter = 0;
|
||||
static GlobalTicks s_last_global_tick_counter = 0;
|
||||
static u64 s_last_cpu_time = 0;
|
||||
static u64 s_last_sw_time = 0;
|
||||
static u32 s_presents_since_last_update = 0;
|
||||
|
@ -633,7 +633,7 @@ void System::UpdateOverclock()
|
|||
UpdateThrottlePeriod();
|
||||
}
|
||||
|
||||
u32 System::GetGlobalTickCounter()
|
||||
GlobalTicks System::GetGlobalTickCounter()
|
||||
{
|
||||
// When running events, the counter actually goes backwards, because the pending ticks are added in chunks.
|
||||
// So, we need to return the counter with all pending ticks added in such cases.
|
||||
|
@ -1957,7 +1957,7 @@ void System::Execute()
|
|||
|
||||
// TODO: Purge reset/restore
|
||||
g_gpu->RestoreDeviceContext();
|
||||
TimingEvents::UpdateCPUDowncount();
|
||||
TimingEvents::CommitLeftoverTicks();
|
||||
|
||||
if (s_rewind_load_counter >= 0)
|
||||
DoRewind();
|
||||
|
@ -3167,7 +3167,7 @@ void System::UpdatePerformanceCounters()
|
|||
|
||||
const u32 frames_run = s_frame_number - s_last_frame_number;
|
||||
const float frames_runf = static_cast<float>(frames_run);
|
||||
const u32 global_tick_counter = GetGlobalTickCounter();
|
||||
const GlobalTicks global_tick_counter = GetGlobalTickCounter();
|
||||
|
||||
// TODO: Make the math here less rubbish
|
||||
const double pct_divider =
|
||||
|
|
|
@ -192,7 +192,7 @@ ALWAYS_INLINE_RELEASE TickCount UnscaleTicksToOverclock(TickCount ticks, TickCou
|
|||
TickCount GetMaxSliceTicks();
|
||||
void UpdateOverclock();
|
||||
|
||||
u32 GetGlobalTickCounter();
|
||||
GlobalTicks GetGlobalTickCounter();
|
||||
u32 GetFrameNumber();
|
||||
u32 GetInternalFrameNumber();
|
||||
void IncrementInternalFrameNumber();
|
||||
|
|
|
@ -16,11 +16,14 @@ Log_SetChannel(TimingEvents);
|
|||
|
||||
namespace TimingEvents {
|
||||
|
||||
static GlobalTicks GetTimestampForNewEvent();
|
||||
|
||||
static void SortEvent(TimingEvent* event);
|
||||
static void AddActiveEvent(TimingEvent* event);
|
||||
static void RemoveActiveEvent(TimingEvent* event);
|
||||
static void SortEvents();
|
||||
static TimingEvent* FindActiveEvent(const std::string_view name);
|
||||
static void CommitGlobalTicks(const GlobalTicks new_global_ticks);
|
||||
|
||||
namespace {
|
||||
struct TimingEventsState
|
||||
|
@ -28,11 +31,11 @@ struct TimingEventsState
|
|||
TimingEvent* active_events_head = nullptr;
|
||||
TimingEvent* active_events_tail = nullptr;
|
||||
TimingEvent* current_event = nullptr;
|
||||
TickCount current_event_new_downcount = 0;
|
||||
u32 active_event_count = 0;
|
||||
u32 global_tick_counter = 0;
|
||||
u32 event_run_tick_counter = 0;
|
||||
bool frame_done = false;
|
||||
GlobalTicks current_event_next_run_time = 0;
|
||||
GlobalTicks global_tick_counter = 0;
|
||||
GlobalTicks event_run_tick_counter = 0;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
|
@ -40,12 +43,19 @@ ALIGN_TO_CACHE_LINE static TimingEventsState s_state;
|
|||
|
||||
} // namespace TimingEvents
|
||||
|
||||
u32 TimingEvents::GetGlobalTickCounter()
|
||||
GlobalTicks TimingEvents::GetGlobalTickCounter()
|
||||
{
|
||||
return s_state.global_tick_counter;
|
||||
}
|
||||
|
||||
u32 TimingEvents::GetEventRunTickCounter()
|
||||
GlobalTicks TimingEvents::GetTimestampForNewEvent()
|
||||
{
|
||||
// we want to schedule relative to the currently-being processed event, but if we haven't run events in a while, it
|
||||
// needs to include the pending time. so explicitly add the two.
|
||||
return s_state.global_tick_counter + CPU::GetPendingTicks();
|
||||
}
|
||||
|
||||
GlobalTicks TimingEvents::GetEventRunTickCounter()
|
||||
{
|
||||
return s_state.event_run_tick_counter;
|
||||
}
|
||||
|
@ -58,6 +68,7 @@ void TimingEvents::Initialize()
|
|||
void TimingEvents::Reset()
|
||||
{
|
||||
s_state.global_tick_counter = 0;
|
||||
s_state.event_run_tick_counter = 0;
|
||||
}
|
||||
|
||||
void TimingEvents::Shutdown()
|
||||
|
@ -67,7 +78,9 @@ void TimingEvents::Shutdown()
|
|||
|
||||
void TimingEvents::UpdateCPUDowncount()
|
||||
{
|
||||
const u32 event_downcount = s_state.active_events_head->GetDowncount();
|
||||
DebugAssert(s_state.active_events_head->m_next_run_time >= s_state.global_tick_counter);
|
||||
const u32 event_downcount =
|
||||
static_cast<u32>(s_state.active_events_head->m_next_run_time - s_state.global_tick_counter);
|
||||
CPU::g_state.downcount = CPU::HasPendingInterrupt() ? 0 : event_downcount;
|
||||
}
|
||||
|
||||
|
@ -78,13 +91,13 @@ TimingEvent** TimingEvents::GetHeadEventPtr()
|
|||
|
||||
void TimingEvents::SortEvent(TimingEvent* event)
|
||||
{
|
||||
const TickCount event_downcount = event->m_downcount;
|
||||
const GlobalTicks event_runtime = event->m_next_run_time;
|
||||
|
||||
if (event->prev && event->prev->m_downcount > event_downcount)
|
||||
if (event->prev && event->prev->m_next_run_time > event_runtime)
|
||||
{
|
||||
// move backwards
|
||||
TimingEvent* current = event->prev;
|
||||
while (current && current->m_downcount > event_downcount)
|
||||
while (current && current->m_next_run_time > event_runtime)
|
||||
current = current->prev;
|
||||
|
||||
// unlink
|
||||
|
@ -120,11 +133,11 @@ void TimingEvents::SortEvent(TimingEvent* event)
|
|||
UpdateCPUDowncount();
|
||||
}
|
||||
}
|
||||
else if (event->next && event_downcount > event->next->m_downcount)
|
||||
else if (event->next && event_runtime > event->next->m_next_run_time)
|
||||
{
|
||||
// move forwards
|
||||
TimingEvent* current = event->next;
|
||||
while (current && event_downcount > current->m_downcount)
|
||||
while (current && event_runtime > current->m_next_run_time)
|
||||
current = current->next;
|
||||
|
||||
// unlink
|
||||
|
@ -135,6 +148,7 @@ void TimingEvents::SortEvent(TimingEvent* event)
|
|||
else
|
||||
{
|
||||
s_state.active_events_head = event->next;
|
||||
if (!s_state.current_event)
|
||||
UpdateCPUDowncount();
|
||||
}
|
||||
if (event->next)
|
||||
|
@ -155,6 +169,7 @@ void TimingEvents::SortEvent(TimingEvent* event)
|
|||
else
|
||||
{
|
||||
s_state.active_events_head = event;
|
||||
if (!s_state.current_event)
|
||||
UpdateCPUDowncount();
|
||||
}
|
||||
|
||||
|
@ -177,9 +192,10 @@ void TimingEvents::AddActiveEvent(TimingEvent* event)
|
|||
DebugAssert(!event->prev && !event->next);
|
||||
s_state.active_event_count++;
|
||||
|
||||
const GlobalTicks event_runtime = event->m_next_run_time;
|
||||
TimingEvent* current = nullptr;
|
||||
TimingEvent* next = s_state.active_events_head;
|
||||
while (next && event->m_downcount > next->m_downcount)
|
||||
while (next && event_runtime > next->m_next_run_time)
|
||||
{
|
||||
current = next;
|
||||
next = next->next;
|
||||
|
@ -240,7 +256,7 @@ void TimingEvents::RemoveActiveEvent(TimingEvent* event)
|
|||
else
|
||||
{
|
||||
s_state.active_events_head = event->next;
|
||||
if (s_state.active_events_head)
|
||||
if (s_state.active_events_head && !s_state.current_event)
|
||||
UpdateCPUDowncount();
|
||||
}
|
||||
|
||||
|
@ -295,62 +311,70 @@ void TimingEvents::SetFrameDone()
|
|||
CPU::g_state.downcount = 0;
|
||||
}
|
||||
|
||||
void TimingEvents::RunEvents()
|
||||
void TimingEvents::CancelRunningEvent()
|
||||
{
|
||||
DebugAssert(!s_state.current_event);
|
||||
if (!s_state.current_event)
|
||||
return;
|
||||
|
||||
do
|
||||
{
|
||||
TickCount pending_ticks = CPU::GetPendingTicks();
|
||||
if (pending_ticks >= s_state.active_events_head->GetDowncount())
|
||||
{
|
||||
CPU::ResetPendingTicks();
|
||||
s_state.event_run_tick_counter = s_state.global_tick_counter + static_cast<u32>(pending_ticks);
|
||||
|
||||
do
|
||||
{
|
||||
const TickCount time = std::min(pending_ticks, s_state.active_events_head->GetDowncount());
|
||||
s_state.global_tick_counter += static_cast<u32>(time);
|
||||
pending_ticks -= time;
|
||||
|
||||
// Apply downcount to all events.
|
||||
// This will result in a negative downcount for those events which are late.
|
||||
for (TimingEvent* event = s_state.active_events_head; event; event = event->next)
|
||||
{
|
||||
event->m_downcount -= time;
|
||||
event->m_time_since_last_run += time;
|
||||
// Might need to sort it, since we're bailing out.
|
||||
if (s_state.current_event->IsActive())
|
||||
SortEvent(s_state.current_event);
|
||||
s_state.current_event = nullptr;
|
||||
}
|
||||
|
||||
// Now we can actually run the callbacks.
|
||||
while (s_state.active_events_head->m_downcount <= 0)
|
||||
ALWAYS_INLINE_RELEASE void TimingEvents::CommitGlobalTicks(const GlobalTicks new_global_ticks)
|
||||
{
|
||||
s_state.event_run_tick_counter = new_global_ticks;
|
||||
|
||||
do
|
||||
{
|
||||
// move it to the end, since that'll likely be its new position
|
||||
TimingEvent* event = s_state.active_events_head;
|
||||
s_state.global_tick_counter = std::min(new_global_ticks, event->m_next_run_time);
|
||||
|
||||
// Now we can actually run the callbacks.
|
||||
while (s_state.global_tick_counter >= event->m_next_run_time)
|
||||
{
|
||||
s_state.current_event = event;
|
||||
|
||||
// Factor late time into the time for the next invocation.
|
||||
const TickCount ticks_late = -event->m_downcount;
|
||||
const TickCount ticks_to_execute = event->m_time_since_last_run;
|
||||
const TickCount ticks_late = static_cast<TickCount>(s_state.global_tick_counter - event->m_next_run_time);
|
||||
const TickCount ticks_to_execute = static_cast<TickCount>(s_state.global_tick_counter - event->m_last_run_time);
|
||||
|
||||
// Why don't we modify event->m_downcount directly? Because otherwise the event list won't be sorted.
|
||||
// Adding the interval may cause this event to have a greater downcount than the next, and a new event
|
||||
// may be inserted at the front, despite having a higher downcount than the next.
|
||||
s_state.current_event_new_downcount = event->m_downcount + event->m_interval;
|
||||
event->m_time_since_last_run = 0;
|
||||
s_state.current_event_next_run_time = event->m_next_run_time + static_cast<u32>(event->m_interval);
|
||||
event->m_last_run_time = s_state.global_tick_counter;
|
||||
|
||||
// The cycles_late is only an indicator, it doesn't modify the cycles to execute.
|
||||
event->m_callback(event->m_callback_param, ticks_to_execute, ticks_late);
|
||||
if (event->m_active)
|
||||
{
|
||||
event->m_downcount = s_state.current_event_new_downcount;
|
||||
event->m_next_run_time = s_state.current_event_next_run_time;
|
||||
SortEvent(event);
|
||||
}
|
||||
}
|
||||
} while (pending_ticks > 0);
|
||||
|
||||
event = s_state.active_events_head;
|
||||
}
|
||||
} while (new_global_ticks > s_state.global_tick_counter);
|
||||
s_state.current_event = nullptr;
|
||||
}
|
||||
|
||||
void TimingEvents::RunEvents()
|
||||
{
|
||||
DebugAssert(!s_state.current_event);
|
||||
DebugAssert(CPU::GetPendingTicks() >= CPU::g_state.downcount);
|
||||
|
||||
do
|
||||
{
|
||||
const GlobalTicks new_global_ticks =
|
||||
s_state.event_run_tick_counter + static_cast<GlobalTicks>(CPU::GetPendingTicks());
|
||||
if (new_global_ticks >= s_state.active_events_head->m_next_run_time)
|
||||
{
|
||||
CPU::ResetPendingTicks();
|
||||
CommitGlobalTicks(new_global_ticks);
|
||||
}
|
||||
|
||||
if (s_state.frame_done)
|
||||
{
|
||||
s_state.frame_done = false;
|
||||
|
@ -364,12 +388,30 @@ void TimingEvents::RunEvents()
|
|||
} while (CPU::GetPendingTicks() >= CPU::g_state.downcount);
|
||||
}
|
||||
|
||||
void TimingEvents::CommitLeftoverTicks()
|
||||
{
|
||||
#ifdef _DEBUG
|
||||
if (s_state.event_run_tick_counter > s_state.global_tick_counter)
|
||||
WARNING_LOG("Late-running {} ticks before execution", s_state.event_run_tick_counter - s_state.global_tick_counter);
|
||||
#endif
|
||||
|
||||
CommitGlobalTicks(s_state.event_run_tick_counter);
|
||||
|
||||
if (CPU::HasPendingInterrupt())
|
||||
CPU::DispatchInterrupt();
|
||||
|
||||
UpdateCPUDowncount();
|
||||
}
|
||||
|
||||
bool TimingEvents::DoState(StateWrapper& sw)
|
||||
{
|
||||
sw.Do(&s_state.global_tick_counter);
|
||||
|
||||
if (sw.IsReading())
|
||||
if (sw.GetVersion() < 71) [[unlikely]]
|
||||
{
|
||||
u32 old_global_tick_counter = 0;
|
||||
sw.Do(&old_global_tick_counter);
|
||||
s_state.global_tick_counter = static_cast<GlobalTicks>(old_global_tick_counter);
|
||||
s_state.event_run_tick_counter = s_state.global_tick_counter;
|
||||
|
||||
// Load timestamps for the clock events.
|
||||
// Any oneshot events should be recreated by the load state method, so we can fix up their times here.
|
||||
u32 event_count = 0;
|
||||
|
@ -394,9 +436,8 @@ bool TimingEvents::DoState(StateWrapper& sw)
|
|||
continue;
|
||||
}
|
||||
|
||||
// Using reschedule is safe here since we call sort afterwards.
|
||||
event->m_downcount = downcount;
|
||||
event->m_time_since_last_run = time_since_last_run;
|
||||
event->m_next_run_time = s_state.global_tick_counter + static_cast<u32>(downcount);
|
||||
event->m_last_run_time = s_state.global_tick_counter - static_cast<u32>(time_since_last_run);
|
||||
event->m_period = period;
|
||||
event->m_interval = interval;
|
||||
}
|
||||
|
@ -407,34 +448,84 @@ bool TimingEvents::DoState(StateWrapper& sw)
|
|||
sw.Do(&last_event_run_time);
|
||||
}
|
||||
|
||||
DEBUG_LOG("Loaded {} events from save state.", event_count);
|
||||
|
||||
// Add pending ticks to the CPU, this'll happen if we saved state when we weren't paused.
|
||||
const TickCount pending_ticks =
|
||||
static_cast<TickCount>(s_state.event_run_tick_counter - s_state.global_tick_counter);
|
||||
DebugAssert(pending_ticks >= 0);
|
||||
CPU::AddPendingTicks(pending_ticks);
|
||||
SortEvents();
|
||||
UpdateCPUDowncount();
|
||||
}
|
||||
else
|
||||
{
|
||||
sw.Do(&s_state.global_tick_counter);
|
||||
sw.Do(&s_state.event_run_tick_counter);
|
||||
|
||||
if (sw.IsReading())
|
||||
{
|
||||
// Load timestamps for the clock events.
|
||||
// Any oneshot events should be recreated by the load state method, so we can fix up their times here.
|
||||
u32 event_count = 0;
|
||||
sw.Do(&event_count);
|
||||
|
||||
for (u32 i = 0; i < event_count; i++)
|
||||
{
|
||||
TinyString event_name;
|
||||
GlobalTicks next_run_time, last_run_time;
|
||||
TickCount period, interval;
|
||||
sw.Do(&event_name);
|
||||
sw.Do(&next_run_time);
|
||||
sw.Do(&last_run_time);
|
||||
sw.Do(&period);
|
||||
sw.Do(&interval);
|
||||
if (sw.HasError())
|
||||
return false;
|
||||
|
||||
TimingEvent* event = FindActiveEvent(event_name);
|
||||
if (!event)
|
||||
{
|
||||
WARNING_LOG("Save state has event '{}', but couldn't find this event when loading.", event_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
event->m_next_run_time = next_run_time;
|
||||
event->m_last_run_time = last_run_time;
|
||||
event->m_period = period;
|
||||
event->m_interval = interval;
|
||||
}
|
||||
|
||||
DEBUG_LOG("Loaded {} events from save state.", event_count);
|
||||
SortEvents();
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
sw.Do(&s_state.active_event_count);
|
||||
|
||||
for (TimingEvent* event = s_state.active_events_head; event; event = event->next)
|
||||
{
|
||||
sw.Do(&event->m_name);
|
||||
sw.Do(&event->m_downcount);
|
||||
sw.Do(&event->m_time_since_last_run);
|
||||
sw.Do(&event->m_next_run_time);
|
||||
sw.Do(&event->m_last_run_time);
|
||||
sw.Do(&event->m_period);
|
||||
sw.Do(&event->m_interval);
|
||||
}
|
||||
|
||||
DEBUG_LOG("Wrote {} events to save state.", s_state.active_event_count);
|
||||
}
|
||||
}
|
||||
|
||||
return !sw.HasError();
|
||||
}
|
||||
|
||||
TimingEvent::TimingEvent(const std::string_view name, TickCount period, TickCount interval,
|
||||
TimingEventCallback callback, void* callback_param)
|
||||
: m_callback(callback), m_callback_param(callback_param), m_downcount(interval), m_time_since_last_run(0),
|
||||
m_period(period), m_interval(interval), m_name(name)
|
||||
: m_callback(callback), m_callback_param(callback_param), m_period(period), m_interval(interval), m_name(name)
|
||||
{
|
||||
const GlobalTicks ts = TimingEvents::GetTimestampForNewEvent();
|
||||
m_last_run_time = ts;
|
||||
m_next_run_time = ts + static_cast<u32>(interval);
|
||||
}
|
||||
|
||||
TimingEvent::~TimingEvent()
|
||||
|
@ -444,46 +535,51 @@ TimingEvent::~TimingEvent()
|
|||
|
||||
TickCount TimingEvent::GetTicksSinceLastExecution() const
|
||||
{
|
||||
return CPU::GetPendingTicks() + m_time_since_last_run;
|
||||
// Can be negative if event A->B invoked B early while in the event loop.
|
||||
const GlobalTicks ts = TimingEvents::GetTimestampForNewEvent();
|
||||
return (ts >= m_last_run_time) ? static_cast<TickCount>(ts - m_last_run_time) : 0;
|
||||
}
|
||||
|
||||
TickCount TimingEvent::GetTicksUntilNextExecution() const
|
||||
{
|
||||
return std::max(m_downcount - CPU::GetPendingTicks(), static_cast<TickCount>(0));
|
||||
const GlobalTicks ts = TimingEvents::GetTimestampForNewEvent();
|
||||
return (ts >= m_next_run_time) ? 0 : static_cast<TickCount>(m_next_run_time - ts);
|
||||
}
|
||||
|
||||
void TimingEvent::Delay(TickCount ticks)
|
||||
{
|
||||
using namespace TimingEvents;
|
||||
|
||||
if (!m_active)
|
||||
{
|
||||
Panic("Trying to delay an inactive event");
|
||||
return;
|
||||
}
|
||||
|
||||
m_downcount += ticks;
|
||||
|
||||
DebugAssert(TimingEvents::s_state.current_event != this);
|
||||
TimingEvents::SortEvent(this);
|
||||
if (TimingEvents::s_state.active_events_head == this)
|
||||
TimingEvents::UpdateCPUDowncount();
|
||||
|
||||
m_next_run_time += static_cast<u32>(ticks);
|
||||
SortEvent(this);
|
||||
if (s_state.active_events_head == this)
|
||||
UpdateCPUDowncount();
|
||||
}
|
||||
|
||||
void TimingEvent::Schedule(TickCount ticks)
|
||||
{
|
||||
using namespace TimingEvents;
|
||||
|
||||
const TickCount pending_ticks = CPU::GetPendingTicks();
|
||||
const TickCount new_downcount = pending_ticks + ticks;
|
||||
const GlobalTicks ts = GetTimestampForNewEvent();
|
||||
const GlobalTicks next_run_time = ts + static_cast<u32>(ticks);
|
||||
|
||||
// See note in RunEvents().
|
||||
s_state.current_event_new_downcount =
|
||||
(s_state.current_event == this) ? new_downcount : s_state.current_event_new_downcount;
|
||||
s_state.current_event_next_run_time =
|
||||
(s_state.current_event == this) ? next_run_time : s_state.current_event_next_run_time;
|
||||
|
||||
if (!m_active)
|
||||
{
|
||||
// Event is going active, so we want it to only execute ticks from the current timestamp.
|
||||
m_downcount = new_downcount;
|
||||
m_time_since_last_run = -pending_ticks;
|
||||
m_next_run_time = next_run_time;
|
||||
m_last_run_time = ts;
|
||||
m_active = true;
|
||||
AddActiveEvent(this);
|
||||
}
|
||||
|
@ -493,7 +589,7 @@ void TimingEvent::Schedule(TickCount ticks)
|
|||
// If this is a call from an IO handler for example, re-sort the event queue.
|
||||
if (s_state.current_event != this)
|
||||
{
|
||||
m_downcount = new_downcount;
|
||||
m_next_run_time = next_run_time;
|
||||
SortEvent(this);
|
||||
if (s_state.active_events_head == this)
|
||||
UpdateCPUDowncount();
|
||||
|
@ -503,6 +599,7 @@ void TimingEvent::Schedule(TickCount ticks)
|
|||
|
||||
void TimingEvent::SetIntervalAndSchedule(TickCount ticks)
|
||||
{
|
||||
DebugAssert(ticks > 0);
|
||||
SetInterval(ticks);
|
||||
Schedule(ticks);
|
||||
}
|
||||
|
@ -516,51 +613,61 @@ void TimingEvent::SetPeriodAndSchedule(TickCount ticks)
|
|||
|
||||
void TimingEvent::InvokeEarly(bool force /* = false */)
|
||||
{
|
||||
using namespace TimingEvents;
|
||||
|
||||
if (!m_active)
|
||||
return;
|
||||
|
||||
const TickCount pending_ticks = CPU::GetPendingTicks();
|
||||
const TickCount ticks_to_execute = m_time_since_last_run + pending_ticks;
|
||||
if ((!force && ticks_to_execute < m_period) || ticks_to_execute <= 0)
|
||||
// Might happen due to other InvokeEarly()'s mid event loop.
|
||||
const GlobalTicks ts = GetTimestampForNewEvent();
|
||||
if (ts <= m_last_run_time)
|
||||
return;
|
||||
|
||||
// Shouldn't be invoking early when we're the current event running.
|
||||
DebugAssert(TimingEvents::s_state.current_event != this);
|
||||
// TODO: Make DebugAssert instead.
|
||||
Assert(s_state.current_event != this);
|
||||
|
||||
m_downcount = pending_ticks + m_interval;
|
||||
m_time_since_last_run -= ticks_to_execute;
|
||||
m_callback(m_callback_param, ticks_to_execute, 0);
|
||||
const TickCount ticks_to_execute = static_cast<TickCount>(ts - m_last_run_time);
|
||||
if (!force && ticks_to_execute < m_period)
|
||||
return;
|
||||
|
||||
m_next_run_time = ts + static_cast<u32>(m_interval);
|
||||
m_last_run_time = ts;
|
||||
|
||||
// Since we've changed the downcount, we need to re-sort the events.
|
||||
TimingEvents::SortEvent(this);
|
||||
if (TimingEvents::s_state.active_events_head == this)
|
||||
TimingEvents::UpdateCPUDowncount();
|
||||
SortEvent(this);
|
||||
if (s_state.active_events_head == this)
|
||||
UpdateCPUDowncount();
|
||||
|
||||
m_callback(m_callback_param, ticks_to_execute, 0);
|
||||
}
|
||||
|
||||
void TimingEvent::Activate()
|
||||
{
|
||||
using namespace TimingEvents;
|
||||
|
||||
if (m_active)
|
||||
return;
|
||||
|
||||
// leave the downcount intact
|
||||
// if we're running events, this is going to be zero, so no effect
|
||||
const TickCount pending_ticks = CPU::GetPendingTicks();
|
||||
m_downcount += pending_ticks;
|
||||
m_time_since_last_run -= pending_ticks;
|
||||
const GlobalTicks ts = GetTimestampForNewEvent();
|
||||
const GlobalTicks next_run_time = ts + static_cast<u32>(m_interval);
|
||||
m_next_run_time = next_run_time;
|
||||
m_last_run_time = ts;
|
||||
|
||||
s_state.current_event_next_run_time =
|
||||
(s_state.current_event == this) ? next_run_time : s_state.current_event_next_run_time;
|
||||
|
||||
m_active = true;
|
||||
TimingEvents::AddActiveEvent(this);
|
||||
AddActiveEvent(this);
|
||||
}
|
||||
|
||||
void TimingEvent::Deactivate()
|
||||
{
|
||||
using namespace TimingEvents;
|
||||
|
||||
if (!m_active)
|
||||
return;
|
||||
|
||||
const TickCount pending_ticks = CPU::GetPendingTicks();
|
||||
m_downcount -= pending_ticks;
|
||||
m_time_since_last_run += pending_ticks;
|
||||
|
||||
m_active = false;
|
||||
TimingEvents::RemoveActiveEvent(this);
|
||||
RemoveActiveEvent(this);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ public:
|
|||
// Returns the number of ticks between each event.
|
||||
ALWAYS_INLINE TickCount GetPeriod() const { return m_period; }
|
||||
ALWAYS_INLINE TickCount GetInterval() const { return m_interval; }
|
||||
ALWAYS_INLINE TickCount GetDowncount() const { return m_downcount; }
|
||||
|
||||
// Includes pending time.
|
||||
TickCount GetTicksSinceLastExecution() const;
|
||||
|
@ -65,8 +64,9 @@ public:
|
|||
TimingEventCallback m_callback;
|
||||
void* m_callback_param;
|
||||
|
||||
TickCount m_downcount;
|
||||
TickCount m_time_since_last_run;
|
||||
GlobalTicks m_next_run_time = 0;
|
||||
GlobalTicks m_last_run_time = 0;
|
||||
|
||||
TickCount m_period;
|
||||
TickCount m_interval;
|
||||
bool m_active = false;
|
||||
|
@ -76,8 +76,8 @@ public:
|
|||
|
||||
namespace TimingEvents {
|
||||
|
||||
u32 GetGlobalTickCounter();
|
||||
u32 GetEventRunTickCounter();
|
||||
GlobalTicks GetGlobalTickCounter();
|
||||
GlobalTicks GetEventRunTickCounter();
|
||||
|
||||
void Initialize();
|
||||
void Reset();
|
||||
|
@ -87,7 +87,9 @@ bool DoState(StateWrapper& sw);
|
|||
|
||||
bool IsRunningEvents();
|
||||
void SetFrameDone();
|
||||
void CancelRunningEvent();
|
||||
void RunEvents();
|
||||
void CommitLeftoverTicks();
|
||||
|
||||
void UpdateCPUDowncount();
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ enum class MemoryAccessSize : u32
|
|||
};
|
||||
|
||||
using TickCount = s32;
|
||||
using GlobalTicks = u64;
|
||||
|
||||
enum class ConsoleRegion
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue