Make CoreTiming's threadsafe events lock-free.

Not sure if this actually helps in practice, but might help in
pathological cases, and almost certainly can't hurt.
This commit is contained in:
comex 2013-08-19 15:16:01 -04:00
parent 7fe440340f
commit c3b9f3556f
2 changed files with 83 additions and 127 deletions

View File

@ -10,7 +10,7 @@
namespace Common namespace Common
{ {
template <typename T> template <typename T, bool NeedSize = true>
class FifoQueue class FifoQueue
{ {
public: public:
@ -27,37 +27,39 @@ public:
u32 Size() const u32 Size() const
{ {
static_assert(NeedSize, "using Size() on FifoQueue without NeedSize");
return m_size; return m_size;
} }
bool Empty() const bool Empty() const
{ {
//return (m_read_ptr == m_write_ptr); return !m_read_ptr->next;
return (0 == m_size);
} }
T& Front() const T& Front() const
{ {
return *m_read_ptr->current; return m_read_ptr->current;
} }
template <typename Arg> template <typename Arg>
void Push(Arg&& t) void Push(Arg&& t)
{ {
// create the element, add it to the queue // create the element, add it to the queue
m_write_ptr->current = new T(std::forward<Arg>(t)); m_write_ptr->current = std::move(t);
// set the next pointer to a new element ptr // set the next pointer to a new element ptr
// then advance the write pointer // then advance the write pointer
m_write_ptr = m_write_ptr->next = new ElementPtr(); m_write_ptr = m_write_ptr->next = new ElementPtr();
Common::AtomicIncrement(m_size); if (NeedSize)
Common::AtomicIncrement(m_size);
} }
void Pop() void Pop()
{ {
Common::AtomicDecrement(m_size); if (NeedSize)
ElementPtr *const tmpptr = m_read_ptr; Common::AtomicDecrement(m_size);
ElementPtr *tmpptr = m_read_ptr;
// advance the read pointer // advance the read pointer
m_read_ptr = m_read_ptr->next; m_read_ptr = tmpptr->next;
// set the next element to NULL to stop the recursive deletion // set the next element to NULL to stop the recursive deletion
tmpptr->next = NULL; tmpptr->next = NULL;
delete tmpptr; // this also deletes the element delete tmpptr; // this also deletes the element
@ -82,31 +84,63 @@ public:
m_write_ptr = m_read_ptr = new ElementPtr(); m_write_ptr = m_read_ptr = new ElementPtr();
} }
private:
class ElementPtr;
public:
class iterator
{
public:
iterator() {}
bool operator==(iterator other) { return other.m_pp == m_pp; }
bool operator!=(iterator other) { return !(*this == other); }
T *operator->() { return &**this; }
T& operator*() { return (*m_pp)->current; }
void operator++() { m_pp = &(*m_pp)->next; }
protected:
iterator(ElementPtr *volatile *pp) : m_pp(pp) {}
ElementPtr *volatile *m_pp;
friend class FifoQueue<T, NeedSize>;
};
iterator begin()
{
return iterator(&m_read_ptr);
}
iterator end()
{
return iterator(&m_write_ptr->next);
}
iterator erase(iterator itr)
{
ElementPtr *elp = *itr.m_pp;
*itr.m_pp = elp->next;
delete elp;
return itr;
}
private: private:
// stores a pointer to element // stores a pointer to element
// and a pointer to the next ElementPtr // and a pointer to the next ElementPtr
class ElementPtr class ElementPtr
{ {
public: public:
ElementPtr() : current(NULL), next(NULL) {} ElementPtr() : next(NULL) {}
~ElementPtr() ~ElementPtr()
{ {
if (current) if (next)
{ delete next;
delete current;
// recusion ftw
if (next)
delete next;
}
} }
T *volatile current; T current;
ElementPtr *volatile next; ElementPtr *volatile next;
}; };
ElementPtr *volatile m_write_ptr; ElementPtr *m_write_ptr;
ElementPtr *volatile m_read_ptr; ElementPtr *m_read_ptr;
volatile u32 m_size; volatile u32 m_size;
}; };

View File

@ -10,6 +10,7 @@
#include "Core.h" #include "Core.h"
#include "StringUtil.h" #include "StringUtil.h"
#include "VideoBackendBase.h" #include "VideoBackendBase.h"
#include "FifoQueue.h"
#define MAX_SLICE_LENGTH 20000 #define MAX_SLICE_LENGTH 20000
@ -29,20 +30,17 @@ struct BaseEvent
s64 time; s64 time;
u64 userdata; u64 userdata;
int type; int type;
// Event *next;
}; };
typedef LinkedListItem<BaseEvent> Event; typedef LinkedListItem<BaseEvent> Event;
// STATE_TO_SAVE // STATE_TO_SAVE
Event *first; static Event *first;
Event *tsFirst; static std::mutex tsWriteLock;
Event *tsLast; Common::FifoQueue<BaseEvent, false> tsQueue;
// event pools // event pools
Event *eventPool = 0; Event *eventPool = 0;
Event *eventTsPool = 0;
int allocatedTsEvents = 0;
int downcount, slicelength; int downcount, slicelength;
int maxSliceLength = MAX_SLICE_LENGTH; int maxSliceLength = MAX_SLICE_LENGTH;
@ -57,7 +55,6 @@ u64 fakeTBStartTicks;
int ev_lost; int ev_lost;
static std::recursive_mutex externalEventSection;
void (*advanceCallback)(int cyclesExecuted) = NULL; void (*advanceCallback)(int cyclesExecuted) = NULL;
@ -71,31 +68,12 @@ Event* GetNewEvent()
return ev; return ev;
} }
Event* GetNewTsEvent()
{
allocatedTsEvents++;
if(!eventTsPool)
return new Event;
Event* ev = eventTsPool;
eventTsPool = ev->next;
return ev;
}
void FreeEvent(Event* ev) void FreeEvent(Event* ev)
{ {
ev->next = eventPool; ev->next = eventPool;
eventPool = ev; eventPool = ev;
} }
void FreeTsEvent(Event* ev)
{
ev->next = eventTsPool;
eventTsPool = ev;
allocatedTsEvents--;
}
static void EmptyTimedCallback(u64 userdata, int cyclesLate) {} static void EmptyTimedCallback(u64 userdata, int cyclesLate) {}
int RegisterEvent(const char *name, TimedCallback callback) int RegisterEvent(const char *name, TimedCallback callback)
@ -141,6 +119,7 @@ void Init()
void Shutdown() void Shutdown()
{ {
std::lock_guard<std::mutex> lk(tsWriteLock);
MoveEvents(); MoveEvents();
ClearPendingEvents(); ClearPendingEvents();
UnregisterAllEvents(); UnregisterAllEvents();
@ -151,14 +130,6 @@ void Shutdown()
eventPool = ev->next; eventPool = ev->next;
delete ev; delete ev;
} }
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
while(eventTsPool)
{
Event *ev = eventTsPool;
eventTsPool = ev->next;
delete ev;
}
} }
void EventDoState(PointerWrap &p, BaseEvent* ev) void EventDoState(PointerWrap &p, BaseEvent* ev)
@ -197,7 +168,7 @@ void EventDoState(PointerWrap &p, BaseEvent* ev)
void DoState(PointerWrap &p) void DoState(PointerWrap &p)
{ {
std::lock_guard<std::recursive_mutex> lk(externalEventSection); std::lock_guard<std::mutex> lk(tsWriteLock);
p.Do(downcount); p.Do(downcount);
p.Do(slicelength); p.Do(slicelength);
p.Do(globalTimer); p.Do(globalTimer);
@ -208,11 +179,10 @@ void DoState(PointerWrap &p)
p.Do(fakeTBStartTicks); p.Do(fakeTBStartTicks);
p.DoMarker("CoreTimingData"); p.DoMarker("CoreTimingData");
MoveEvents();
p.DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, EventDoState>(first); p.DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, EventDoState>(first);
p.DoMarker("CoreTimingEvents"); p.DoMarker("CoreTimingEvents");
p.DoLinkedList<BaseEvent, GetNewTsEvent, FreeTsEvent, EventDoState>(tsFirst, &tsLast);
p.DoMarker("CoreTimingTsEvents");
} }
u64 GetTicks() u64 GetTicks()
@ -229,17 +199,12 @@ u64 GetIdleTicks()
// schedule things to be executed on the main thread. // schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata) void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata)
{ {
std::lock_guard<std::recursive_mutex> lk(externalEventSection); std::lock_guard<std::mutex> lk(tsWriteLock);
Event *ne = GetNewTsEvent(); Event ne;
ne->time = globalTimer + cyclesIntoFuture; ne.time = globalTimer + cyclesIntoFuture;
ne->type = event_type; ne.type = event_type;
ne->next = 0; ne.userdata = userdata;
ne->userdata = userdata; tsQueue.Push(ne);
if(!tsFirst)
tsFirst = ne;
if(tsLast)
tsLast->next = ne;
tsLast = ne;
} }
// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread // Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
@ -248,7 +213,6 @@ void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata)
{ {
if(Core::IsCPUThread()) if(Core::IsCPUThread())
{ {
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
event_types[event_type].callback(userdata, 0); event_types[event_type].callback(userdata, 0);
} }
else else
@ -357,45 +321,12 @@ void RemoveEvent(int event_type)
void RemoveThreadsafeEvent(int event_type) void RemoveThreadsafeEvent(int event_type)
{ {
std::lock_guard<std::recursive_mutex> lk(externalEventSection); std::lock_guard<std::mutex> lk(tsWriteLock);
if (!tsFirst) for (Common::FifoQueue<BaseEvent, false>::iterator itr = tsQueue.begin(); itr != tsQueue.end(); ++itr)
{ {
return; if (itr->type == event_type)
}
while(tsFirst)
{
if (tsFirst->type == event_type)
{ {
Event *next = tsFirst->next; itr = tsQueue.erase(itr);
FreeTsEvent(tsFirst);
tsFirst = next;
}
else
{
break;
}
}
if (!tsFirst)
{
return;
}
Event *prev = tsFirst;
Event *ptr = prev->next;
while (ptr)
{
if (ptr->type == event_type)
{
prev->next = ptr->next;
FreeTsEvent(ptr);
ptr = prev->next;
}
else
{
prev = ptr;
ptr = ptr->next;
} }
} }
} }
@ -452,24 +383,14 @@ void ProcessFifoWaitEvents()
void MoveEvents() void MoveEvents()
{ {
std::lock_guard<std::recursive_mutex> lk(externalEventSection); BaseEvent sevt;
// Move events from async queue into main queue while (tsQueue.Pop(sevt))
while (tsFirst)
{ {
Event *next = tsFirst->next; Event *evt = GetNewEvent();
AddEventToQueue(tsFirst); evt->time = sevt.time;
tsFirst = next; evt->userdata = sevt.userdata;
} evt->type = sevt.type;
tsLast = NULL; AddEventToQueue(evt);
// Move free events to threadsafe pool
while(allocatedTsEvents > 0 && eventPool)
{
Event *ev = eventPool;
eventPool = ev->next;
ev->next = eventTsPool;
eventTsPool = ev;
allocatedTsEvents--;
} }
} }
@ -606,3 +527,4 @@ void SetFakeTBStartTicks(u64 val)
} }
} // namespace } // namespace