Make CoreTiming's threadsafe events lock-free.

Not sure if this actually helps in practice, but might help in
pathological cases, and almost certainly can't hurt.
This commit is contained in:
comex 2013-08-19 15:16:01 -04:00
parent 7fe440340f
commit c3b9f3556f
2 changed files with 83 additions and 127 deletions

View File

@ -10,7 +10,7 @@
namespace Common
{
template <typename T>
template <typename T, bool NeedSize = true>
class FifoQueue
{
public:
@ -27,37 +27,39 @@ public:
u32 Size() const
{
static_assert(NeedSize, "using Size() on FifoQueue without NeedSize");
return m_size;
}
bool Empty() const
{
//return (m_read_ptr == m_write_ptr);
return (0 == m_size);
return !m_read_ptr->next;
}
T& Front() const
{
return *m_read_ptr->current;
return m_read_ptr->current;
}
template <typename Arg>
void Push(Arg&& t)
{
// create the element, add it to the queue
m_write_ptr->current = new T(std::forward<Arg>(t));
m_write_ptr->current = std::move(t);
// set the next pointer to a new element ptr
// then advance the write pointer
m_write_ptr = m_write_ptr->next = new ElementPtr();
Common::AtomicIncrement(m_size);
if (NeedSize)
Common::AtomicIncrement(m_size);
}
void Pop()
{
Common::AtomicDecrement(m_size);
ElementPtr *const tmpptr = m_read_ptr;
if (NeedSize)
Common::AtomicDecrement(m_size);
ElementPtr *tmpptr = m_read_ptr;
// advance the read pointer
m_read_ptr = m_read_ptr->next;
m_read_ptr = tmpptr->next;
// set the next element to NULL to stop the recursive deletion
tmpptr->next = NULL;
delete tmpptr; // this also deletes the element
@ -82,31 +84,63 @@ public:
m_write_ptr = m_read_ptr = new ElementPtr();
}
private:
class ElementPtr;
public:
class iterator
{
public:
iterator() {}
bool operator==(iterator other) { return other.m_pp == m_pp; }
bool operator!=(iterator other) { return !(*this == other); }
T *operator->() { return &**this; }
T& operator*() { return (*m_pp)->current; }
void operator++() { m_pp = &(*m_pp)->next; }
protected:
iterator(ElementPtr *volatile *pp) : m_pp(pp) {}
ElementPtr *volatile *m_pp;
friend class FifoQueue<T, NeedSize>;
};
iterator begin()
{
return iterator(&m_read_ptr);
}
iterator end()
{
return iterator(&m_write_ptr->next);
}
iterator erase(iterator itr)
{
ElementPtr *elp = *itr.m_pp;
*itr.m_pp = elp->next;
delete elp;
return itr;
}
private:
// stores a pointer to element
// and a pointer to the next ElementPtr
class ElementPtr
{
public:
ElementPtr() : current(NULL), next(NULL) {}
ElementPtr() : next(NULL) {}
~ElementPtr()
{
if (current)
{
delete current;
// recusion ftw
if (next)
delete next;
}
if (next)
delete next;
}
T *volatile current;
T current;
ElementPtr *volatile next;
};
ElementPtr *volatile m_write_ptr;
ElementPtr *volatile m_read_ptr;
ElementPtr *m_write_ptr;
ElementPtr *m_read_ptr;
volatile u32 m_size;
};

View File

@ -10,6 +10,7 @@
#include "Core.h"
#include "StringUtil.h"
#include "VideoBackendBase.h"
#include "FifoQueue.h"
#define MAX_SLICE_LENGTH 20000
@ -29,20 +30,17 @@ struct BaseEvent
s64 time;
u64 userdata;
int type;
// Event *next;
};
typedef LinkedListItem<BaseEvent> Event;
// STATE_TO_SAVE
Event *first;
Event *tsFirst;
Event *tsLast;
static Event *first;
static std::mutex tsWriteLock;
Common::FifoQueue<BaseEvent, false> tsQueue;
// event pools
Event *eventPool = 0;
Event *eventTsPool = 0;
int allocatedTsEvents = 0;
int downcount, slicelength;
int maxSliceLength = MAX_SLICE_LENGTH;
@ -57,7 +55,6 @@ u64 fakeTBStartTicks;
int ev_lost;
static std::recursive_mutex externalEventSection;
void (*advanceCallback)(int cyclesExecuted) = NULL;
@ -71,31 +68,12 @@ Event* GetNewEvent()
return ev;
}
Event* GetNewTsEvent()
{
allocatedTsEvents++;
if(!eventTsPool)
return new Event;
Event* ev = eventTsPool;
eventTsPool = ev->next;
return ev;
}
void FreeEvent(Event* ev)
{
ev->next = eventPool;
eventPool = ev;
}
void FreeTsEvent(Event* ev)
{
ev->next = eventTsPool;
eventTsPool = ev;
allocatedTsEvents--;
}
static void EmptyTimedCallback(u64 userdata, int cyclesLate) {}
int RegisterEvent(const char *name, TimedCallback callback)
@ -141,6 +119,7 @@ void Init()
void Shutdown()
{
std::lock_guard<std::mutex> lk(tsWriteLock);
MoveEvents();
ClearPendingEvents();
UnregisterAllEvents();
@ -151,14 +130,6 @@ void Shutdown()
eventPool = ev->next;
delete ev;
}
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
while(eventTsPool)
{
Event *ev = eventTsPool;
eventTsPool = ev->next;
delete ev;
}
}
void EventDoState(PointerWrap &p, BaseEvent* ev)
@ -197,7 +168,7 @@ void EventDoState(PointerWrap &p, BaseEvent* ev)
void DoState(PointerWrap &p)
{
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
std::lock_guard<std::mutex> lk(tsWriteLock);
p.Do(downcount);
p.Do(slicelength);
p.Do(globalTimer);
@ -208,11 +179,10 @@ void DoState(PointerWrap &p)
p.Do(fakeTBStartTicks);
p.DoMarker("CoreTimingData");
MoveEvents();
p.DoLinkedList<BaseEvent, GetNewEvent, FreeEvent, EventDoState>(first);
p.DoMarker("CoreTimingEvents");
p.DoLinkedList<BaseEvent, GetNewTsEvent, FreeTsEvent, EventDoState>(tsFirst, &tsLast);
p.DoMarker("CoreTimingTsEvents");
}
u64 GetTicks()
@ -229,17 +199,12 @@ u64 GetIdleTicks()
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata)
{
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
Event *ne = GetNewTsEvent();
ne->time = globalTimer + cyclesIntoFuture;
ne->type = event_type;
ne->next = 0;
ne->userdata = userdata;
if(!tsFirst)
tsFirst = ne;
if(tsLast)
tsLast->next = ne;
tsLast = ne;
std::lock_guard<std::mutex> lk(tsWriteLock);
Event ne;
ne.time = globalTimer + cyclesIntoFuture;
ne.type = event_type;
ne.userdata = userdata;
tsQueue.Push(ne);
}
// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
@ -248,7 +213,6 @@ void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata)
{
if(Core::IsCPUThread())
{
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
event_types[event_type].callback(userdata, 0);
}
else
@ -357,51 +321,18 @@ void RemoveEvent(int event_type)
void RemoveThreadsafeEvent(int event_type)
{
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
if (!tsFirst)
std::lock_guard<std::mutex> lk(tsWriteLock);
for (Common::FifoQueue<BaseEvent, false>::iterator itr = tsQueue.begin(); itr != tsQueue.end(); ++itr)
{
return;
}
while(tsFirst)
{
if (tsFirst->type == event_type)
if (itr->type == event_type)
{
Event *next = tsFirst->next;
FreeTsEvent(tsFirst);
tsFirst = next;
}
else
{
break;
}
}
if (!tsFirst)
{
return;
}
Event *prev = tsFirst;
Event *ptr = prev->next;
while (ptr)
{
if (ptr->type == event_type)
{
prev->next = ptr->next;
FreeTsEvent(ptr);
ptr = prev->next;
}
else
{
prev = ptr;
ptr = ptr->next;
itr = tsQueue.erase(itr);
}
}
}
void RemoveAllEvents(int event_type)
{
{
RemoveThreadsafeEvent(event_type);
RemoveEvent(event_type);
}
@ -447,34 +378,24 @@ void ProcessFifoWaitEvents()
{
break;
}
}
}
}
void MoveEvents()
{
std::lock_guard<std::recursive_mutex> lk(externalEventSection);
// Move events from async queue into main queue
while (tsFirst)
BaseEvent sevt;
while (tsQueue.Pop(sevt))
{
Event *next = tsFirst->next;
AddEventToQueue(tsFirst);
tsFirst = next;
}
tsLast = NULL;
// Move free events to threadsafe pool
while(allocatedTsEvents > 0 && eventPool)
{
Event *ev = eventPool;
eventPool = ev->next;
ev->next = eventTsPool;
eventTsPool = ev;
allocatedTsEvents--;
Event *evt = GetNewEvent();
evt->time = sevt.time;
evt->userdata = sevt.userdata;
evt->type = sevt.type;
AddEventToQueue(evt);
}
}
void Advance()
{
{
MoveEvents();
int cyclesExecuted = slicelength - downcount;
@ -606,3 +527,4 @@ void SetFakeTBStartTicks(u64 val)
}
} // namespace