mirror of https://git.suyu.dev/suyu/suyu
CoreTiming: Ported the CoreTiming namespace from PPSSPP
Implemented the required calls to make it work. CoreTiming: Added a new logging class Core_Timing.
This commit is contained in:
parent
6fad32ea98
commit
9bf82beb4c
|
@ -22,6 +22,7 @@ static std::shared_ptr<Logger> global_logger;
|
|||
SUB(Common, Memory) \
|
||||
CLS(Core) \
|
||||
SUB(Core, ARM11) \
|
||||
SUB(Core, Timing) \
|
||||
CLS(Config) \
|
||||
CLS(Debug) \
|
||||
SUB(Debug, Emulated) \
|
||||
|
|
|
@ -41,6 +41,7 @@ enum class Class : ClassType {
|
|||
Common_Memory, ///< Memory mapping and management functions
|
||||
Core, ///< LLE emulation core
|
||||
Core_ARM11, ///< ARM11 CPU core
|
||||
Core_Timing, ///< CoreTiming functions
|
||||
Config, ///< Emulator configuration (including commandline)
|
||||
Debug, ///< Debugging tools
|
||||
Debug_Emulated, ///< Debug messages from the emulated programs
|
||||
|
|
|
@ -103,6 +103,8 @@ public:
|
|||
return num_instructions;
|
||||
}
|
||||
|
||||
s64 down_count; ///< A decreasing counter of remaining cycles before the next event, decreased by the cpu run loop
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include "core/arm/dyncom/arm_dyncom.h"
|
||||
#include "core/arm/dyncom/arm_dyncom_interpreter.h"
|
||||
|
||||
#include "core/core_timing.h"
|
||||
|
||||
const static cpu_config_t s_arm11_cpu_info = {
|
||||
"armv6", "arm11", 0x0007b000, 0x0007f000, NONCACHE
|
||||
};
|
||||
|
@ -77,6 +79,9 @@ u64 ARM_DynCom::GetTicks() const {
|
|||
|
||||
void ARM_DynCom::AddTicks(u64 ticks) {
|
||||
this->ticks += ticks;
|
||||
down_count -= ticks;
|
||||
if (down_count < 0)
|
||||
CoreTiming::Advance();
|
||||
}
|
||||
|
||||
void ARM_DynCom::ExecuteInstructions(int num_instructions) {
|
||||
|
@ -85,7 +90,8 @@ void ARM_DynCom::ExecuteInstructions(int num_instructions) {
|
|||
// Dyncom only breaks on instruction dispatch. This only happens on every instruction when
|
||||
// executing one instruction at a time. Otherwise, if a block is being executed, more
|
||||
// instructions may actually be executed than specified.
|
||||
ticks += InterpreterMainLoop(state.get());
|
||||
unsigned ticks_executed = InterpreterMainLoop(state.get());
|
||||
AddTicks(ticks_executed);
|
||||
}
|
||||
|
||||
void ARM_DynCom::SaveContext(ThreadContext& ctx) {
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
|
||||
// Copyright (c) 2012- PPSSPP Project / Dolphin Project.
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <vector>
|
||||
#include <cstdio>
|
||||
#include <atomic>
|
||||
#include <cstdio>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "common/chunk_file.h"
|
||||
#include "common/msg_handler.h"
|
||||
#include "common/string_util.h"
|
||||
|
||||
#include "common/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
|
||||
|
@ -22,7 +20,6 @@ int g_clock_rate_arm11 = 268123480;
|
|||
|
||||
namespace CoreTiming
|
||||
{
|
||||
|
||||
struct EventType
|
||||
{
|
||||
EventType() {}
|
||||
|
@ -41,262 +38,247 @@ struct BaseEvent
|
|||
s64 time;
|
||||
u64 userdata;
|
||||
int type;
|
||||
// Event *next;
|
||||
};
|
||||
|
||||
typedef LinkedListItem<BaseEvent> Event;
|
||||
|
||||
Event* first;
|
||||
Event *tsFirst;
|
||||
Event *tsLast;
|
||||
Event* ts_first;
|
||||
Event* ts_last;
|
||||
|
||||
// event pools
|
||||
Event *eventPool = 0;
|
||||
Event *eventTsPool = 0;
|
||||
int allocatedTsEvents = 0;
|
||||
Event* event_pool = 0;
|
||||
Event* event_ts_pool = 0;
|
||||
int allocated_ts_events = 0;
|
||||
// Optimization to skip MoveEvents when possible.
|
||||
std::atomic<u32> hasTsEvents;
|
||||
std::atomic<bool> has_ts_events(false);
|
||||
|
||||
// Downcount has been moved to currentMIPS, to save a couple of clocks in every ARM JIT block
|
||||
// as we can already reach that structure through a register.
|
||||
int slicelength;
|
||||
int g_slice_length;
|
||||
|
||||
MEMORY_ALIGNED16(s64) globalTimer;
|
||||
s64 idledCycles;
|
||||
s64 global_timer;
|
||||
s64 idled_cycles;
|
||||
s64 last_global_time_ticks;
|
||||
s64 last_global_time_us;
|
||||
|
||||
static std::recursive_mutex externalEventSection;
|
||||
static std::recursive_mutex external_event_section;
|
||||
|
||||
// Warning: not included in save state.
|
||||
void(*advanceCallback)(int cyclesExecuted) = nullptr;
|
||||
using AdvanceCallback = void(int cycles_executed);
|
||||
AdvanceCallback* advance_callback = nullptr;
|
||||
std::vector<MHzChangeCallback> mhz_change_callbacks;
|
||||
|
||||
void SetClockFrequencyMHz(int cpuMhz)
|
||||
{
|
||||
g_clock_rate_arm11 = cpuMhz * 1000000;
|
||||
// TODO: Rescale times of scheduled events?
|
||||
void FireMhzChange() {
|
||||
for (auto callback : mhz_change_callbacks)
|
||||
callback();
|
||||
}
|
||||
|
||||
int GetClockFrequencyMHz()
|
||||
{
|
||||
void SetClockFrequencyMHz(int cpu_mhz) {
|
||||
// When the mhz changes, we keep track of what "time" it was before hand.
|
||||
// This way, time always moves forward, even if mhz is changed.
|
||||
last_global_time_us = GetGlobalTimeUs();
|
||||
last_global_time_ticks = GetTicks();
|
||||
|
||||
g_clock_rate_arm11 = cpu_mhz * 1000000;
|
||||
// TODO: Rescale times of scheduled events?
|
||||
|
||||
FireMhzChange();
|
||||
}
|
||||
|
||||
int GetClockFrequencyMHz() {
|
||||
return g_clock_rate_arm11 / 1000000;
|
||||
}
|
||||
|
||||
u64 GetGlobalTimeUs() {
|
||||
s64 ticks_since_last = GetTicks() - last_global_time_ticks;
|
||||
int freq = GetClockFrequencyMHz();
|
||||
s64 us_since_last = ticks_since_last / freq;
|
||||
return last_global_time_us + us_since_last;
|
||||
}
|
||||
|
||||
Event* GetNewEvent()
|
||||
{
|
||||
if (!eventPool)
|
||||
Event* GetNewEvent() {
|
||||
if (!event_pool)
|
||||
return new Event;
|
||||
|
||||
Event* ev = eventPool;
|
||||
eventPool = ev->next;
|
||||
return ev;
|
||||
Event* event = event_pool;
|
||||
event_pool = event->next;
|
||||
return event;
|
||||
}
|
||||
|
||||
Event* GetNewTsEvent()
|
||||
{
|
||||
allocatedTsEvents++;
|
||||
Event* GetNewTsEvent() {
|
||||
allocated_ts_events++;
|
||||
|
||||
if (!eventTsPool)
|
||||
if (!event_ts_pool)
|
||||
return new Event;
|
||||
|
||||
Event* ev = eventTsPool;
|
||||
eventTsPool = ev->next;
|
||||
return ev;
|
||||
Event* event = event_ts_pool;
|
||||
event_ts_pool = event->next;
|
||||
return event;
|
||||
}
|
||||
|
||||
void FreeEvent(Event* ev)
|
||||
{
|
||||
ev->next = eventPool;
|
||||
eventPool = ev;
|
||||
void FreeEvent(Event* event) {
|
||||
event->next = event_pool;
|
||||
event_pool = event;
|
||||
}
|
||||
|
||||
void FreeTsEvent(Event* ev)
|
||||
{
|
||||
ev->next = eventTsPool;
|
||||
eventTsPool = ev;
|
||||
allocatedTsEvents--;
|
||||
void FreeTsEvent(Event* event) {
|
||||
event->next = event_ts_pool;
|
||||
event_ts_pool = event;
|
||||
allocated_ts_events--;
|
||||
}
|
||||
|
||||
int RegisterEvent(const char *name, TimedCallback callback)
|
||||
{
|
||||
int RegisterEvent(const char* name, TimedCallback callback) {
|
||||
event_types.push_back(EventType(callback, name));
|
||||
return (int)event_types.size() - 1;
|
||||
}
|
||||
|
||||
void AntiCrashCallback(u64 userdata, int cyclesLate)
|
||||
{
|
||||
LOG_CRITICAL(Core, "Savestate broken: an unregistered event was called.");
|
||||
void AntiCrashCallback(u64 userdata, int cycles_late) {
|
||||
LOG_CRITICAL(Core_Timing, "Savestate broken: an unregistered event was called.");
|
||||
Core::Halt("invalid timing events");
|
||||
}
|
||||
|
||||
void RestoreRegisterEvent(int event_type, const char *name, TimedCallback callback)
|
||||
{
|
||||
void RestoreRegisterEvent(int event_type, const char* name, TimedCallback callback) {
|
||||
if (event_type >= (int)event_types.size())
|
||||
event_types.resize(event_type + 1, EventType(AntiCrashCallback, "INVALID EVENT"));
|
||||
|
||||
event_types[event_type] = EventType(callback, name);
|
||||
}
|
||||
|
||||
void UnregisterAllEvents()
|
||||
{
|
||||
void UnregisterAllEvents() {
|
||||
if (first)
|
||||
PanicAlert("Cannot unregister events with events pending");
|
||||
event_types.clear();
|
||||
}
|
||||
|
||||
void Init()
|
||||
{
|
||||
//currentMIPS->downcount = INITIAL_SLICE_LENGTH;
|
||||
//slicelength = INITIAL_SLICE_LENGTH;
|
||||
globalTimer = 0;
|
||||
idledCycles = 0;
|
||||
hasTsEvents = 0;
|
||||
void Init() {
|
||||
Core::g_app_core->down_count = INITIAL_SLICE_LENGTH;
|
||||
g_slice_length = INITIAL_SLICE_LENGTH;
|
||||
global_timer = 0;
|
||||
idled_cycles = 0;
|
||||
last_global_time_ticks = 0;
|
||||
last_global_time_us = 0;
|
||||
has_ts_events = 0;
|
||||
mhz_change_callbacks.clear();
|
||||
}
|
||||
|
||||
void Shutdown()
|
||||
{
|
||||
void Shutdown() {
|
||||
MoveEvents();
|
||||
ClearPendingEvents();
|
||||
UnregisterAllEvents();
|
||||
|
||||
while (eventPool)
|
||||
{
|
||||
Event *ev = eventPool;
|
||||
eventPool = ev->next;
|
||||
delete ev;
|
||||
while (event_pool) {
|
||||
Event* event = event_pool;
|
||||
event_pool = event->next;
|
||||
delete event;
|
||||
}
|
||||
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
while (eventTsPool)
|
||||
{
|
||||
Event *ev = eventTsPool;
|
||||
eventTsPool = ev->next;
|
||||
delete ev;
|
||||
std::lock_guard<std::recursive_mutex> lock(external_event_section);
|
||||
while (event_ts_pool) {
|
||||
Event* event = event_ts_pool;
|
||||
event_ts_pool = event->next;
|
||||
delete event;
|
||||
}
|
||||
}
|
||||
|
||||
u64 GetTicks()
|
||||
{
|
||||
LOG_ERROR(Core, "Unimplemented function!");
|
||||
return 0;
|
||||
//return (u64)globalTimer + slicelength - currentMIPS->downcount;
|
||||
u64 GetTicks() {
|
||||
return (u64)global_timer + g_slice_length - Core::g_app_core->down_count;
|
||||
}
|
||||
|
||||
u64 GetIdleTicks()
|
||||
{
|
||||
return (u64)idledCycles;
|
||||
u64 GetIdleTicks() {
|
||||
return (u64)idled_cycles;
|
||||
}
|
||||
|
||||
|
||||
// This is to be called when outside threads, such as the graphics thread, wants to
|
||||
// schedule things to be executed on the main thread.
|
||||
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
Event *ne = GetNewTsEvent();
|
||||
ne->time = GetTicks() + cyclesIntoFuture;
|
||||
ne->type = event_type;
|
||||
ne->next = 0;
|
||||
ne->userdata = userdata;
|
||||
if (!tsFirst)
|
||||
tsFirst = ne;
|
||||
if (tsLast)
|
||||
tsLast->next = ne;
|
||||
tsLast = ne;
|
||||
void ScheduleEvent_Threadsafe(s64 cycles_into_future, int event_type, u64 userdata) {
|
||||
std::lock_guard<std::recursive_mutex> lock(external_event_section);
|
||||
Event* new_event = GetNewTsEvent();
|
||||
new_event->time = GetTicks() + cycles_into_future;
|
||||
new_event->type = event_type;
|
||||
new_event->next = 0;
|
||||
new_event->userdata = userdata;
|
||||
if (!ts_first)
|
||||
ts_first = new_event;
|
||||
if (ts_last)
|
||||
ts_last->next = new_event;
|
||||
ts_last = new_event;
|
||||
|
||||
hasTsEvents.store(1, std::memory_order_release);
|
||||
has_ts_events = true;
|
||||
}
|
||||
|
||||
// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
|
||||
// in which case the event will get handled immediately, before returning.
|
||||
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata)
|
||||
{
|
||||
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata) {
|
||||
if (false) //Core::IsCPUThread())
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
std::lock_guard<std::recursive_mutex> lock(external_event_section);
|
||||
event_types[event_type].callback(userdata, 0);
|
||||
}
|
||||
else
|
||||
ScheduleEvent_Threadsafe(0, event_type, userdata);
|
||||
}
|
||||
|
||||
void ClearPendingEvents()
|
||||
{
|
||||
while (first)
|
||||
{
|
||||
Event *e = first->next;
|
||||
void ClearPendingEvents() {
|
||||
while (first) {
|
||||
Event* event = first->next;
|
||||
FreeEvent(first);
|
||||
first = e;
|
||||
first = event;
|
||||
}
|
||||
}
|
||||
|
||||
void AddEventToQueue(Event* ne)
|
||||
{
|
||||
Event* prev = nullptr;
|
||||
Event** pNext = &first;
|
||||
for (;;)
|
||||
{
|
||||
Event*& next = *pNext;
|
||||
if (!next || ne->time < next->time)
|
||||
{
|
||||
ne->next = next;
|
||||
next = ne;
|
||||
void AddEventToQueue(Event* new_event) {
|
||||
Event* prev_event = nullptr;
|
||||
Event** next_event = &first;
|
||||
for (;;) {
|
||||
Event*& next = *next_event;
|
||||
if (!next || new_event->time < next->time) {
|
||||
new_event->next = next;
|
||||
next = new_event;
|
||||
break;
|
||||
}
|
||||
prev = next;
|
||||
pNext = &prev->next;
|
||||
prev_event = next;
|
||||
next_event = &prev_event->next;
|
||||
}
|
||||
}
|
||||
|
||||
// This must be run ONLY from within the cpu thread
|
||||
// cyclesIntoFuture may be VERY inaccurate if called from anything else
|
||||
// than Advance
|
||||
void ScheduleEvent(s64 cyclesIntoFuture, int event_type, u64 userdata)
|
||||
{
|
||||
Event *ne = GetNewEvent();
|
||||
ne->userdata = userdata;
|
||||
ne->type = event_type;
|
||||
ne->time = GetTicks() + cyclesIntoFuture;
|
||||
AddEventToQueue(ne);
|
||||
void ScheduleEvent(s64 cycles_into_future, int event_type, u64 userdata) {
|
||||
Event* new_event = GetNewEvent();
|
||||
new_event->userdata = userdata;
|
||||
new_event->type = event_type;
|
||||
new_event->time = GetTicks() + cycles_into_future;
|
||||
AddEventToQueue(new_event);
|
||||
}
|
||||
|
||||
// Returns cycles left in timer.
|
||||
s64 UnscheduleEvent(int event_type, u64 userdata)
|
||||
{
|
||||
s64 UnscheduleEvent(int event_type, u64 userdata) {
|
||||
s64 result = 0;
|
||||
if (!first)
|
||||
return result;
|
||||
while (first)
|
||||
{
|
||||
if (first->type == event_type && first->userdata == userdata)
|
||||
{
|
||||
result = first->time - globalTimer;
|
||||
while (first) {
|
||||
if (first->type == event_type && first->userdata == userdata) {
|
||||
result = first->time - GetTicks();
|
||||
|
||||
Event* next = first->next;
|
||||
FreeEvent(first);
|
||||
first = next;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!first)
|
||||
return result;
|
||||
Event *prev = first;
|
||||
Event *ptr = prev->next;
|
||||
while (ptr)
|
||||
{
|
||||
if (ptr->type == event_type && ptr->userdata == userdata)
|
||||
{
|
||||
result = ptr->time - globalTimer;
|
||||
|
||||
prev->next = ptr->next;
|
||||
Event* prev_event = first;
|
||||
Event* ptr = prev_event->next;
|
||||
|
||||
while (ptr) {
|
||||
if (ptr->type == event_type && ptr->userdata == userdata) {
|
||||
result = ptr->time - GetTicks();
|
||||
|
||||
prev_event->next = ptr->next;
|
||||
FreeEvent(ptr);
|
||||
ptr = prev->next;
|
||||
}
|
||||
else
|
||||
{
|
||||
prev = ptr;
|
||||
ptr = prev_event->next;
|
||||
} else {
|
||||
prev_event = ptr;
|
||||
ptr = ptr->next;
|
||||
}
|
||||
}
|
||||
|
@ -304,51 +286,44 @@ s64 UnscheduleEvent(int event_type, u64 userdata)
|
|||
return result;
|
||||
}
|
||||
|
||||
s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata)
|
||||
{
|
||||
s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata) {
|
||||
s64 result = 0;
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
if (!tsFirst)
|
||||
std::lock_guard<std::recursive_mutex> lock(external_event_section);
|
||||
if (!ts_first)
|
||||
return result;
|
||||
while (tsFirst)
|
||||
{
|
||||
if (tsFirst->type == event_type && tsFirst->userdata == userdata)
|
||||
{
|
||||
result = tsFirst->time - globalTimer;
|
||||
|
||||
Event *next = tsFirst->next;
|
||||
FreeTsEvent(tsFirst);
|
||||
tsFirst = next;
|
||||
}
|
||||
else
|
||||
{
|
||||
while (ts_first) {
|
||||
if (ts_first->type == event_type && ts_first->userdata == userdata) {
|
||||
result = ts_first->time - GetTicks();
|
||||
|
||||
Event* next = ts_first->next;
|
||||
FreeTsEvent(ts_first);
|
||||
ts_first = next;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!tsFirst)
|
||||
|
||||
if (!ts_first)
|
||||
{
|
||||
tsLast = nullptr;
|
||||
ts_last = nullptr;
|
||||
return result;
|
||||
}
|
||||
|
||||
Event *prev = tsFirst;
|
||||
Event *ptr = prev->next;
|
||||
while (ptr)
|
||||
{
|
||||
if (ptr->type == event_type && ptr->userdata == userdata)
|
||||
{
|
||||
result = ptr->time - globalTimer;
|
||||
Event* prev_event = ts_first;
|
||||
Event* next = prev_event->next;
|
||||
while (next) {
|
||||
if (next->type == event_type && next->userdata == userdata) {
|
||||
result = next->time - GetTicks();
|
||||
|
||||
prev->next = ptr->next;
|
||||
if (ptr == tsLast)
|
||||
tsLast = prev;
|
||||
FreeTsEvent(ptr);
|
||||
ptr = prev->next;
|
||||
}
|
||||
else
|
||||
{
|
||||
prev = ptr;
|
||||
ptr = ptr->next;
|
||||
prev_event->next = next->next;
|
||||
if (next == ts_last)
|
||||
ts_last = prev_event;
|
||||
FreeTsEvent(next);
|
||||
next = prev_event->next;
|
||||
} else {
|
||||
prev_event = next;
|
||||
next = next->next;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,271 +331,217 @@ s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata)
|
|||
}
|
||||
|
||||
// Warning: not included in save state.
|
||||
void RegisterAdvanceCallback(void(*callback)(int cyclesExecuted))
|
||||
{
|
||||
advanceCallback = callback;
|
||||
void RegisterAdvanceCallback(AdvanceCallback* callback) {
|
||||
advance_callback = callback;
|
||||
}
|
||||
|
||||
bool IsScheduled(int event_type)
|
||||
{
|
||||
void RegisterMHzChangeCallback(MHzChangeCallback callback) {
|
||||
mhz_change_callbacks.push_back(callback);
|
||||
}
|
||||
|
||||
bool IsScheduled(int event_type) {
|
||||
if (!first)
|
||||
return false;
|
||||
Event *e = first;
|
||||
while (e) {
|
||||
if (e->type == event_type)
|
||||
Event* event = first;
|
||||
while (event) {
|
||||
if (event->type == event_type)
|
||||
return true;
|
||||
e = e->next;
|
||||
event = event->next;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void RemoveEvent(int event_type)
|
||||
{
|
||||
void RemoveEvent(int event_type) {
|
||||
if (!first)
|
||||
return;
|
||||
while (first)
|
||||
{
|
||||
if (first->type == event_type)
|
||||
{
|
||||
while (first) {
|
||||
if (first->type == event_type) {
|
||||
Event *next = first->next;
|
||||
FreeEvent(first);
|
||||
first = next;
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!first)
|
||||
return;
|
||||
Event* prev = first;
|
||||
Event *ptr = prev->next;
|
||||
while (ptr)
|
||||
{
|
||||
if (ptr->type == event_type)
|
||||
{
|
||||
prev->next = ptr->next;
|
||||
FreeEvent(ptr);
|
||||
ptr = prev->next;
|
||||
}
|
||||
else
|
||||
{
|
||||
prev = ptr;
|
||||
ptr = ptr->next;
|
||||
Event* next = prev->next;
|
||||
while (next) {
|
||||
if (next->type == event_type) {
|
||||
prev->next = next->next;
|
||||
FreeEvent(next);
|
||||
next = prev->next;
|
||||
} else {
|
||||
prev = next;
|
||||
next = next->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveThreadsafeEvent(int event_type)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
if (!tsFirst)
|
||||
{
|
||||
void RemoveThreadsafeEvent(int event_type) {
|
||||
std::lock_guard<std::recursive_mutex> lock(external_event_section);
|
||||
if (!ts_first)
|
||||
return;
|
||||
}
|
||||
while (tsFirst)
|
||||
{
|
||||
if (tsFirst->type == event_type)
|
||||
{
|
||||
Event *next = tsFirst->next;
|
||||
FreeTsEvent(tsFirst);
|
||||
tsFirst = next;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
while (ts_first) {
|
||||
if (ts_first->type == event_type) {
|
||||
Event* next = ts_first->next;
|
||||
FreeTsEvent(ts_first);
|
||||
ts_first = next;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!tsFirst)
|
||||
{
|
||||
tsLast = nullptr;
|
||||
|
||||
if (!ts_first) {
|
||||
ts_last = nullptr;
|
||||
return;
|
||||
}
|
||||
Event *prev = tsFirst;
|
||||
Event *ptr = prev->next;
|
||||
while (ptr)
|
||||
{
|
||||
if (ptr->type == event_type)
|
||||
{
|
||||
prev->next = ptr->next;
|
||||
if (ptr == tsLast)
|
||||
tsLast = prev;
|
||||
FreeTsEvent(ptr);
|
||||
ptr = prev->next;
|
||||
}
|
||||
else
|
||||
{
|
||||
prev = ptr;
|
||||
ptr = ptr->next;
|
||||
|
||||
Event* prev = ts_first;
|
||||
Event* next = prev->next;
|
||||
while (next) {
|
||||
if (next->type == event_type) {
|
||||
prev->next = next->next;
|
||||
if (next == ts_last)
|
||||
ts_last = prev;
|
||||
FreeTsEvent(next);
|
||||
next = prev->next;
|
||||
} else {
|
||||
prev = next;
|
||||
next = next->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveAllEvents(int event_type)
|
||||
{
|
||||
void RemoveAllEvents(int event_type) {
|
||||
RemoveThreadsafeEvent(event_type);
|
||||
RemoveEvent(event_type);
|
||||
}
|
||||
|
||||
// This raise only the events required while the fifo is processing data
|
||||
void ProcessFifoWaitEvents()
|
||||
{
|
||||
while (first)
|
||||
{
|
||||
if (first->time <= globalTimer)
|
||||
{
|
||||
//LOG(TIMER, "[Scheduler] %s (%lld, %lld) ",
|
||||
// first->name ? first->name : "?", (u64)globalTimer, (u64)first->time);
|
||||
void ProcessFifoWaitEvents() {
|
||||
while (first) {
|
||||
if (first->time <= (s64)GetTicks()) {
|
||||
Event* evt = first;
|
||||
first = first->next;
|
||||
event_types[evt->type].callback(evt->userdata, (int)(globalTimer - evt->time));
|
||||
event_types[evt->type].callback(evt->userdata, (int)(GetTicks() - evt->time));
|
||||
FreeEvent(evt);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MoveEvents()
|
||||
{
|
||||
hasTsEvents.store(0, std::memory_order_release);
|
||||
void MoveEvents() {
|
||||
has_ts_events = false;
|
||||
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
std::lock_guard<std::recursive_mutex> lock(external_event_section);
|
||||
// Move events from async queue into main queue
|
||||
while (tsFirst)
|
||||
{
|
||||
Event *next = tsFirst->next;
|
||||
AddEventToQueue(tsFirst);
|
||||
tsFirst = next;
|
||||
while (ts_first) {
|
||||
Event* next = ts_first->next;
|
||||
AddEventToQueue(ts_first);
|
||||
ts_first = next;
|
||||
}
|
||||
tsLast = nullptr;
|
||||
ts_last = nullptr;
|
||||
|
||||
// Move free events to threadsafe pool
|
||||
while (allocatedTsEvents > 0 && eventPool)
|
||||
{
|
||||
Event *ev = eventPool;
|
||||
eventPool = ev->next;
|
||||
ev->next = eventTsPool;
|
||||
eventTsPool = ev;
|
||||
allocatedTsEvents--;
|
||||
while (allocated_ts_events > 0 && event_pool) {
|
||||
Event* event = event_pool;
|
||||
event_pool = event->next;
|
||||
event->next = event_ts_pool;
|
||||
event_ts_pool = event;
|
||||
allocated_ts_events--;
|
||||
}
|
||||
}
|
||||
|
||||
void Advance()
|
||||
{
|
||||
LOG_ERROR(Core, "Unimplemented function!");
|
||||
//int cyclesExecuted = slicelength - currentMIPS->downcount;
|
||||
//globalTimer += cyclesExecuted;
|
||||
//currentMIPS->downcount = slicelength;
|
||||
|
||||
//if (Common::AtomicLoadAcquire(hasTsEvents))
|
||||
// MoveEvents();
|
||||
//ProcessFifoWaitEvents();
|
||||
|
||||
//if (!first)
|
||||
//{
|
||||
// // WARN_LOG(TIMER, "WARNING - no events in queue. Setting currentMIPS->downcount to 10000");
|
||||
// currentMIPS->downcount += 10000;
|
||||
//}
|
||||
//else
|
||||
//{
|
||||
// slicelength = (int)(first->time - globalTimer);
|
||||
// if (slicelength > MAX_SLICE_LENGTH)
|
||||
// slicelength = MAX_SLICE_LENGTH;
|
||||
// currentMIPS->downcount = slicelength;
|
||||
//}
|
||||
//if (advanceCallback)
|
||||
// advanceCallback(cyclesExecuted);
|
||||
void ForceCheck() {
|
||||
int cycles_executed = g_slice_length - Core::g_app_core->down_count;
|
||||
global_timer += cycles_executed;
|
||||
// This will cause us to check for new events immediately.
|
||||
Core::g_app_core->down_count = 0;
|
||||
// But let's not eat a bunch more time in Advance() because of this.
|
||||
g_slice_length = 0;
|
||||
}
|
||||
|
||||
void LogPendingEvents()
|
||||
{
|
||||
Event *ptr = first;
|
||||
while (ptr)
|
||||
{
|
||||
//INFO_LOG(TIMER, "PENDING: Now: %lld Pending: %lld Type: %d", globalTimer, ptr->time, ptr->type);
|
||||
ptr = ptr->next;
|
||||
void Advance() {
|
||||
int cycles_executed = g_slice_length - Core::g_app_core->down_count;
|
||||
global_timer += cycles_executed;
|
||||
Core::g_app_core->down_count = g_slice_length;
|
||||
|
||||
if (has_ts_events)
|
||||
MoveEvents();
|
||||
ProcessFifoWaitEvents();
|
||||
|
||||
if (!first) {
|
||||
if (g_slice_length < 10000) {
|
||||
g_slice_length += 10000;
|
||||
Core::g_app_core->down_count += g_slice_length;
|
||||
}
|
||||
} else {
|
||||
// Note that events can eat cycles as well.
|
||||
int target = (int)(first->time - global_timer);
|
||||
if (target > MAX_SLICE_LENGTH)
|
||||
target = MAX_SLICE_LENGTH;
|
||||
|
||||
const int diff = target - g_slice_length;
|
||||
g_slice_length += diff;
|
||||
Core::g_app_core->down_count += diff;
|
||||
}
|
||||
if (advance_callback)
|
||||
advance_callback(cycles_executed);
|
||||
}
|
||||
|
||||
void LogPendingEvents() {
|
||||
Event* event = first;
|
||||
while (event) {
|
||||
//LOG_TRACE(Core_Timing, "PENDING: Now: %lld Pending: %lld Type: %d", globalTimer, next->time, next->type);
|
||||
event = event->next;
|
||||
}
|
||||
}
|
||||
|
||||
void Idle(int maxIdle)
|
||||
{
|
||||
LOG_ERROR(Core, "Unimplemented function!");
|
||||
//int cyclesDown = currentMIPS->downcount;
|
||||
//if (maxIdle != 0 && cyclesDown > maxIdle)
|
||||
// cyclesDown = maxIdle;
|
||||
void Idle(int max_idle) {
|
||||
int cycles_down = Core::g_app_core->down_count;
|
||||
if (max_idle != 0 && cycles_down > max_idle)
|
||||
cycles_down = max_idle;
|
||||
|
||||
//if (first && cyclesDown > 0)
|
||||
//{
|
||||
// int cyclesExecuted = slicelength - currentMIPS->downcount;
|
||||
// int cyclesNextEvent = (int) (first->time - globalTimer);
|
||||
if (first && cycles_down > 0) {
|
||||
int cycles_executed = g_slice_length - Core::g_app_core->down_count;
|
||||
int cycles_next_event = (int)(first->time - global_timer);
|
||||
|
||||
// if (cyclesNextEvent < cyclesExecuted + cyclesDown)
|
||||
// {
|
||||
// cyclesDown = cyclesNextEvent - cyclesExecuted;
|
||||
// // Now, now... no time machines, please.
|
||||
// if (cyclesDown < 0)
|
||||
// cyclesDown = 0;
|
||||
// }
|
||||
//}
|
||||
|
||||
//INFO_LOG(TIME, "Idle for %i cycles! (%f ms)", cyclesDown, cyclesDown / (float)(g_clock_rate_arm11 * 0.001f));
|
||||
|
||||
//idledCycles += cyclesDown;
|
||||
//currentMIPS->downcount -= cyclesDown;
|
||||
//if (currentMIPS->downcount == 0)
|
||||
// currentMIPS->downcount = -1;
|
||||
if (cycles_next_event < cycles_executed + cycles_down) {
|
||||
cycles_down = cycles_next_event - cycles_executed;
|
||||
// Now, now... no time machines, please.
|
||||
if (cycles_down < 0)
|
||||
cycles_down = 0;
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetScheduledEventsSummary()
|
||||
{
|
||||
Event *ptr = first;
|
||||
LOG_TRACE(Core_Timing, "Idle for %i cycles! (%f ms)", cycles_down, cycles_down / (float)(g_clock_rate_arm11 * 0.001f));
|
||||
|
||||
idled_cycles += cycles_down;
|
||||
Core::g_app_core->down_count -= cycles_down;
|
||||
if (Core::g_app_core->down_count == 0)
|
||||
Core::g_app_core->down_count = -1;
|
||||
}
|
||||
|
||||
std::string GetScheduledEventsSummary() {
|
||||
Event* event = first;
|
||||
std::string text = "Scheduled events\n";
|
||||
text.reserve(1000);
|
||||
while (ptr)
|
||||
{
|
||||
unsigned int t = ptr->type;
|
||||
while (event) {
|
||||
unsigned int t = event->type;
|
||||
if (t >= event_types.size())
|
||||
PanicAlert("Invalid event type"); // %i", t);
|
||||
const char *name = event_types[ptr->type].name;
|
||||
const char* name = event_types[event->type].name;
|
||||
if (!name)
|
||||
name = "[unknown]";
|
||||
|
||||
text += Common::StringFromFormat("%s : %i %08x%08x\n", name, (int)ptr->time,
|
||||
(u32)(ptr->userdata >> 32), (u32)(ptr->userdata));
|
||||
|
||||
ptr = ptr->next;
|
||||
text += Common::StringFromFormat("%s : %i %08x%08x\n", name, (int)event->time,
|
||||
(u32)(event->userdata >> 32), (u32)(event->userdata));
|
||||
event = event->next;
|
||||
}
|
||||
return text;
|
||||
}
|
||||
|
||||
void Event_DoState(PointerWrap &p, BaseEvent *ev)
|
||||
{
|
||||
p.Do(*ev);
|
||||
}
|
||||
|
||||
void DoState(PointerWrap &p)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
|
||||
|
||||
auto s = p.Section("CoreTiming", 1);
|
||||
if (!s)
|
||||
return;
|
||||
|
||||
int n = (int)event_types.size();
|
||||
p.Do(n);
|
||||
// These (should) be filled in later by the modules.
|
||||
event_types.resize(n, EventType(AntiCrashCallback, "INVALID EVENT"));
|
||||
|
||||
p.DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, Event_DoState>(first, (Event **)nullptr);
|
||||
p.DoLinkedList<BaseEvent, GetNewTsEvent, FreeTsEvent, Event_DoState>(tsFirst, &tsLast);
|
||||
|
||||
p.Do(g_clock_rate_arm11);
|
||||
p.Do(slicelength);
|
||||
p.Do(globalTimer);
|
||||
p.Do(idledCycles);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
|
||||
// Copyright (c) 2012- PPSSPP Project / Dolphin Project.
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
// This is a system to schedule events into the emulated machine's future. Time is measured
|
||||
// in main CPU clock cycles.
|
||||
|
||||
|
@ -12,15 +14,15 @@
|
|||
|
||||
// See HW/SystemTimers.cpp for the main part of Dolphin's usage of this scheduler.
|
||||
|
||||
// The int cyclesLate that the callbacks get is how many cycles late it was.
|
||||
// The int cycles_late that the callbacks get is how many cycles late it was.
|
||||
// So to schedule a new event on a regular basis:
|
||||
// inside callback:
|
||||
// ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever")
|
||||
// ScheduleEvent(periodInCycles - cycles_late, callback, "whatever")
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "common/common.h"
|
||||
|
||||
class PointerWrap;
|
||||
|
||||
extern int g_clock_rate_arm11;
|
||||
|
||||
inline s64 msToCycles(int ms) {
|
||||
|
@ -55,55 +57,84 @@ inline s64 cyclesToUs(s64 cycles) {
|
|||
return cycles / (g_clock_rate_arm11 / 1000000);
|
||||
}
|
||||
|
||||
namespace CoreTiming {
|
||||
inline u64 cyclesToMs(s64 cycles) {
|
||||
return cycles / (g_clock_rate_arm11 / 1000);
|
||||
}
|
||||
|
||||
namespace CoreTiming
|
||||
{
|
||||
void Init();
|
||||
void Shutdown();
|
||||
|
||||
typedef void(*TimedCallback)(u64 userdata, int cyclesLate);
|
||||
typedef void(*MHzChangeCallback)();
|
||||
typedef std::function<void(u64 userdata, int cycles_late)> TimedCallback;
|
||||
|
||||
u64 GetTicks();
|
||||
u64 GetIdleTicks();
|
||||
u64 GetGlobalTimeUs();
|
||||
|
||||
// Returns the event_type identifier.
|
||||
/**
|
||||
* Registers an event type with the specified name and callback
|
||||
* @param name Name of the event type
|
||||
* @param callback Function that will execute when this event fires
|
||||
* @returns An identifier for the event type that was registered
|
||||
*/
|
||||
int RegisterEvent(const char* name, TimedCallback callback);
|
||||
// For save states.
|
||||
/// For save states.
|
||||
void RestoreRegisterEvent(int event_type, const char *name, TimedCallback callback);
|
||||
void UnregisterAllEvents();
|
||||
|
||||
// userdata MAY NOT CONTAIN POINTERS. userdata might get written and reloaded from disk,
|
||||
// when we implement state saves.
|
||||
void ScheduleEvent(s64 cyclesIntoFuture, int event_type, u64 userdata = 0);
|
||||
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata = 0);
|
||||
/// userdata MAY NOT CONTAIN POINTERS. userdata might get written and reloaded from disk,
|
||||
/// when we implement state saves.
|
||||
/**
|
||||
* Schedules an event to run after the specified number of cycles,
|
||||
* with an optional parameter to be passed to the callback handler.
|
||||
* This must be run ONLY from within the cpu thread.
|
||||
* @param cycles_into_future The number of cycles after which this event will be fired
|
||||
* @param event_type The event type to fire, as returned from RegisterEvent
|
||||
* @param userdata Optional parameter to pass to the callback when fired
|
||||
*/
|
||||
void ScheduleEvent(s64 cycles_into_future, int event_type, u64 userdata = 0);
|
||||
|
||||
void ScheduleEvent_Threadsafe(s64 cycles_into_future, int event_type, u64 userdata = 0);
|
||||
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata = 0);
|
||||
|
||||
/**
|
||||
* Unschedules an event with the specified type and userdata
|
||||
* @param event_type The type of event to unschedule, as returned from RegisterEvent
|
||||
* @param userdata The userdata that identifies this event, as passed to ScheduleEvent
|
||||
* @returns The remaining ticks until the next invocation of the event callback
|
||||
*/
|
||||
s64 UnscheduleEvent(int event_type, u64 userdata);
|
||||
|
||||
s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata);
|
||||
|
||||
void RemoveEvent(int event_type);
|
||||
void RemoveThreadsafeEvent(int event_type);
|
||||
void RemoveAllEvents(int event_type);
|
||||
bool IsScheduled(int event_type);
|
||||
/// Runs any pending events and updates downcount for the next slice of cycles
|
||||
void Advance();
|
||||
void MoveEvents();
|
||||
void ProcessFifoWaitEvents();
|
||||
void ForceCheck();
|
||||
|
||||
// Pretend that the main CPU has executed enough cycles to reach the next event.
|
||||
/// Pretend that the main CPU has executed enough cycles to reach the next event.
|
||||
void Idle(int maxIdle = 0);
|
||||
|
||||
// Clear all pending events. This should ONLY be done on exit or state load.
|
||||
/// Clear all pending events. This should ONLY be done on exit or state load.
|
||||
void ClearPendingEvents();
|
||||
|
||||
void LogPendingEvents();
|
||||
|
||||
// Warning: not included in save states.
|
||||
void RegisterAdvanceCallback(void(*callback)(int cyclesExecuted));
|
||||
/// Warning: not included in save states.
|
||||
void RegisterAdvanceCallback(void(*callback)(int cycles_executed));
|
||||
void RegisterMHzChangeCallback(MHzChangeCallback callback);
|
||||
|
||||
std::string GetScheduledEventsSummary();
|
||||
|
||||
void DoState(PointerWrap &p);
|
||||
|
||||
void SetClockFrequencyMHz(int cpuMhz);
|
||||
void SetClockFrequencyMHz(int cpu_mhz);
|
||||
int GetClockFrequencyMHz();
|
||||
extern int slicelength;
|
||||
extern int g_slice_length;
|
||||
|
||||
} // namespace
|
||||
|
|
Loading…
Reference in New Issue