From e3d1871f866a157152a2e0f4e356663952fd103e Mon Sep 17 00:00:00 2001 From: Gregory Hainaut Date: Wed, 18 Jan 2017 19:10:05 +0100 Subject: [PATCH] MTVU: small thread scheduling improvement * Use yield to avoid a spin loop during WAIT * Don't flush the full buffer when we miss space --- common/include/Utilities/Dependencies.h | 1 + pcsx2/MTVU.cpp | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/common/include/Utilities/Dependencies.h b/common/include/Utilities/Dependencies.h index 714a7f8c0d..539f2990b7 100644 --- a/common/include/Utilities/Dependencies.h +++ b/common/include/Utilities/Dependencies.h @@ -225,6 +225,7 @@ public: #include #include #include +#include #include "Pcsx2Defs.h" diff --git a/pcsx2/MTVU.cpp b/pcsx2/MTVU.cpp index 72ffc678bd..6e5c46fe0a 100644 --- a/pcsx2/MTVU.cpp +++ b/pcsx2/MTVU.cpp @@ -170,7 +170,11 @@ __ri void VU_Thread::WaitOnSize(s32 size) { // Let MTVU run to free up buffer space KickStart(); if (IsDevBuild) DevCon.WriteLn("WaitOnSize()"); - ScopedLock lock(mtxBusy); + // Locking might trigger a full flush of the ring buffer. Yield + // will be more aggressive, and only flush the minimal size. + // Performance will be smoother but it will consume extra CPU cycle + // on the EE thread (not an issue on 4 cores). + std::this_thread::yield(); } } } @@ -292,7 +296,7 @@ void VU_Thread::KickStart(bool forceKick) bool VU_Thread::IsDone() { - return !isBusy.load(std::memory_order_acquire) && GetReadPos() == GetWritePos(); + return GetReadPos() == GetWritePos(); } void VU_Thread::WaitVU() @@ -303,6 +307,7 @@ void VU_Thread::WaitVU() //DevCon.WriteLn("WaitVU()"); pxAssert(THREAD_VU1); KickStart(); + std::this_thread::yield(); // Give a chance to the MTVU thread to actually start ScopedLock lock(mtxBusy); } }