mirror of https://github.com/PCSX2/pcsx2.git
gsdx: Make GSJobQueue non-inheritable
In the previous code, the threads were created and destroyed in the base class constructor and destructor, so the threads could potentially be active while the object is in a partially constructed or destroyed state. The thread however, relies on a virtual function to process the queue items, and the vtable might not be in the desired state when the object is partially constructed or destroyed. This probably only matters during object destruction - no items are in the queue during object construction so the virtual function won't be called, but items may still be queued up when the destructor is called, so the virtual function can be called. It wasn't an issue because all uses of the thread explicitly waited for the queues to be empty before invoking the destructor. Adjust the constructor to take a std::function parameter, which the thread will use instead to process queue items, and avoid inheriting from the GSJobQueue class. This will also eliminate the need to explicitly wait for all jobs to finish (unless there are other external factors, of course), which would probably make future code safer.
This commit is contained in:
parent
cdeed349e3
commit
ac78688a32
|
@ -492,7 +492,7 @@ bool GSCapture::BeginCapture(float fps, GSVector2i recomendedResolution, float a
|
|||
m_size.y = theApp.GetConfigI("CaptureHeight");
|
||||
|
||||
for(int i = 0; i < m_threads; i++) {
|
||||
m_workers.push_back(std::unique_ptr<GSPng::Worker>(new GSPng::Worker()));
|
||||
m_workers.push_back(std::unique_ptr<GSPng::Worker>(new GSPng::Worker(&GSPng::Process)));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -555,10 +555,6 @@ bool GSCapture::EndCapture()
|
|||
}
|
||||
|
||||
#elif defined(__unix__)
|
||||
// XXX Might not be necessary to wait
|
||||
for(size_t i = 0; i < m_workers.size(); i++) {
|
||||
m_workers[i]->Wait();
|
||||
}
|
||||
m_workers.clear();
|
||||
|
||||
m_frame = 0;
|
||||
|
|
|
@ -140,7 +140,7 @@ namespace GSPng {
|
|||
_aligned_free(m_image);
|
||||
}
|
||||
|
||||
void Worker::Process(shared_ptr<Transaction>& item)
|
||||
void Process(std::shared_ptr<Transaction>& item)
|
||||
{
|
||||
Save(item->m_fmt, item->m_file, item->m_image, item->m_w, item->m_h, item->m_pitch, item->m_compression);
|
||||
}
|
||||
|
|
|
@ -52,12 +52,7 @@ namespace GSPng {
|
|||
|
||||
bool Save(GSPng::Format fmt, const string& file, uint8* image, int w, int h, int pitch, int compression, bool rb_swapped = false);
|
||||
|
||||
class Worker : public GSJobQueue<shared_ptr<Transaction>, 16 >
|
||||
{
|
||||
public:
|
||||
Worker() {};
|
||||
virtual ~Worker() {};
|
||||
void Process(std::shared_ptr<Transaction> &item);
|
||||
|
||||
void Process(shared_ptr<Transaction>& item);
|
||||
};
|
||||
using Worker = GSJobQueue<std::shared_ptr<Transaction>, 16>;
|
||||
}
|
||||
|
|
|
@ -1158,11 +1158,6 @@ GSRasterizerList::GSRasterizerList(int threads, GSPerfMon* perfmon)
|
|||
|
||||
GSRasterizerList::~GSRasterizerList()
|
||||
{
|
||||
for(auto i = m_workers.begin(); i != m_workers.end(); i++)
|
||||
{
|
||||
delete *i;
|
||||
}
|
||||
|
||||
_aligned_free(m_scanline);
|
||||
}
|
||||
|
||||
|
@ -1213,33 +1208,8 @@ int GSRasterizerList::GetPixels(bool reset)
|
|||
|
||||
for(size_t i = 0; i < m_workers.size(); i++)
|
||||
{
|
||||
pixels += m_workers[i]->GetPixels(reset);
|
||||
pixels += m_r[i]->GetPixels(reset);
|
||||
}
|
||||
|
||||
return pixels;
|
||||
}
|
||||
|
||||
// GSRasterizerList::GSWorker
|
||||
|
||||
GSRasterizerList::GSWorker::GSWorker(GSRasterizer* r)
|
||||
: GSJobQueue<shared_ptr<GSRasterizerData>, 65536>()
|
||||
, m_r(r)
|
||||
{
|
||||
}
|
||||
|
||||
GSRasterizerList::GSWorker::~GSWorker()
|
||||
{
|
||||
Wait();
|
||||
|
||||
delete m_r;
|
||||
}
|
||||
|
||||
int GSRasterizerList::GSWorker::GetPixels(bool reset)
|
||||
{
|
||||
return m_r->GetPixels(reset);
|
||||
}
|
||||
|
||||
void GSRasterizerList::GSWorker::Process(shared_ptr<GSRasterizerData>& item)
|
||||
{
|
||||
m_r->Draw(item.get());
|
||||
}
|
||||
|
|
|
@ -181,23 +181,12 @@ public:
|
|||
class GSRasterizerList : public IRasterizer
|
||||
{
|
||||
protected:
|
||||
class GSWorker : public GSJobQueue<shared_ptr<GSRasterizerData>, 65536 >
|
||||
{
|
||||
GSRasterizer* m_r;
|
||||
|
||||
public:
|
||||
GSWorker(GSRasterizer* r);
|
||||
virtual ~GSWorker();
|
||||
|
||||
int GetPixels(bool reset);
|
||||
|
||||
// GSJobQueue
|
||||
|
||||
void Process(shared_ptr<GSRasterizerData>& item);
|
||||
};
|
||||
using GSWorker = GSJobQueue<std::shared_ptr<GSRasterizerData>, 65536>;
|
||||
|
||||
GSPerfMon* m_perfmon;
|
||||
vector<GSWorker*> m_workers;
|
||||
// Worker threads depend on the rasterizers, so don't change the order.
|
||||
std::vector<std::unique_ptr<GSRasterizer>> m_r;
|
||||
std::vector<std::unique_ptr<GSWorker>> m_workers;
|
||||
uint8* m_scanline;
|
||||
int m_thread_height;
|
||||
|
||||
|
@ -214,18 +203,19 @@ public:
|
|||
{
|
||||
return new GSRasterizer(new DS(), 0, 1, perfmon);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
GSRasterizerList* rl = new GSRasterizerList(threads, perfmon);
|
||||
|
||||
for(int i = 0; i < threads; i++)
|
||||
{
|
||||
rl->m_workers.push_back(new GSWorker(new GSRasterizer(new DS(), i, threads, perfmon)));
|
||||
rl->m_r.push_back(std::unique_ptr<GSRasterizer>(new GSRasterizer(new DS(), i, threads, perfmon)));
|
||||
auto &r = *rl->m_r[i];
|
||||
rl->m_workers.push_back(std::unique_ptr<GSWorker>(new GSWorker(
|
||||
[&r](std::shared_ptr<GSRasterizerData> &item) { r.Draw(item.get()); })));
|
||||
}
|
||||
|
||||
return rl;
|
||||
}
|
||||
}
|
||||
|
||||
// IRasterizer
|
||||
|
||||
|
|
|
@ -24,23 +24,11 @@
|
|||
#include "GSdx.h"
|
||||
#include "boost_spsc_queue.hpp"
|
||||
|
||||
template<class T> class IGSJobQueue
|
||||
{
|
||||
public:
|
||||
IGSJobQueue() {}
|
||||
virtual ~IGSJobQueue() {}
|
||||
|
||||
virtual bool IsEmpty() const = 0;
|
||||
virtual void Push(const T& item) = 0;
|
||||
virtual void Wait() = 0;
|
||||
|
||||
virtual void Process(T& item) = 0;
|
||||
};
|
||||
|
||||
template<class T, int CAPACITY> class GSJobQueue : public IGSJobQueue<T>
|
||||
template<class T, int CAPACITY> class GSJobQueue final
|
||||
{
|
||||
private:
|
||||
std::thread m_thread;
|
||||
std::function<void(T&)> m_func;
|
||||
std::atomic<int16_t> m_count;
|
||||
std::atomic<bool> m_exit;
|
||||
ringbuffer_base<T, CAPACITY> m_queue;
|
||||
|
@ -81,14 +69,15 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
GSJobQueue() :
|
||||
GSJobQueue(std::function<void(T&)> func) :
|
||||
m_func(func),
|
||||
m_count(0),
|
||||
m_exit(false)
|
||||
{
|
||||
m_thread = std::thread(&GSJobQueue::ThreadProc, this);
|
||||
}
|
||||
|
||||
virtual ~GSJobQueue()
|
||||
~GSJobQueue()
|
||||
{
|
||||
m_exit = true;
|
||||
do {
|
||||
|
@ -129,6 +118,6 @@ public:
|
|||
}
|
||||
|
||||
void operator() (T& item) {
|
||||
this->Process(item);
|
||||
m_func(item);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -107,6 +107,7 @@ typedef int64 sint64;
|
|||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
|
Loading…
Reference in New Issue