MTVU: small thread scheduling improvement

* Use yield to avoid a spin loop during WAIT
* Don't flush the full buffer when we miss space
This commit is contained in:
Gregory Hainaut 2017-01-18 19:10:05 +01:00
parent fff5230631
commit e3d1871f86
2 changed files with 8 additions and 2 deletions

View File

@ -225,6 +225,7 @@ public:
#include <algorithm>
#include <memory>
#include <atomic>
#include <thread>
#include "Pcsx2Defs.h"

View File

@ -170,7 +170,11 @@ __ri void VU_Thread::WaitOnSize(s32 size)
{ // Let MTVU run to free up buffer space
KickStart();
if (IsDevBuild) DevCon.WriteLn("WaitOnSize()");
ScopedLock lock(mtxBusy);
// Locking might trigger a full flush of the ring buffer. Yield
// will be more aggressive, and only flush the minimal size.
// Performance will be smoother but it will consume extra CPU cycle
// on the EE thread (not an issue on 4 cores).
std::this_thread::yield();
}
}
}
@ -292,7 +296,7 @@ void VU_Thread::KickStart(bool forceKick)
bool VU_Thread::IsDone()
{
return !isBusy.load(std::memory_order_acquire) && GetReadPos() == GetWritePos();
return GetReadPos() == GetWritePos();
}
void VU_Thread::WaitVU()
@ -303,6 +307,7 @@ void VU_Thread::WaitVU()
//DevCon.WriteLn("WaitVU()");
pxAssert(THREAD_VU1);
KickStart();
std::this_thread::yield(); // Give a chance to the MTVU thread to actually start
ScopedLock lock(mtxBusy);
}
}