Thread.cpp refinement

Hide thread mutex
Safe notify() method
Other refactoring
This commit is contained in:
Nekotekina 2016-09-07 01:38:52 +03:00
parent da878c36bd
commit a5a2d43d7c
35 changed files with 532 additions and 591 deletions

View File

@ -8,6 +8,7 @@
#ifdef _WIN32 #ifdef _WIN32
#include <Windows.h> #include <Windows.h>
#include <Psapi.h> #include <Psapi.h>
#include <process.h>
#else #else
#ifdef __APPLE__ #ifdef __APPLE__
#define _XOPEN_SOURCE #define _XOPEN_SOURCE
@ -16,8 +17,16 @@
#include <errno.h> #include <errno.h>
#include <signal.h> #include <signal.h>
#include <ucontext.h> #include <ucontext.h>
#include <pthread.h>
#include <sys/time.h>
#include <sys/resource.h>
#endif #endif
#include "sync.h"
thread_local u64 g_tls_fault_rsx = 0;
thread_local u64 g_tls_fault_spu = 0;
static void report_fatal_error(const std::string& msg) static void report_fatal_error(const std::string& msg)
{ {
std::string _msg = msg + "\n" std::string _msg = msg + "\n"
@ -1009,6 +1018,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
{ {
if (rsx::g_access_violation_handler && rsx::g_access_violation_handler(addr, is_writing)) if (rsx::g_access_violation_handler && rsx::g_access_violation_handler(addr, is_writing))
{ {
g_tls_fault_rsx++;
return true; return true;
} }
@ -1139,6 +1149,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
// skip processed instruction // skip processed instruction
RIP(context) += i_size; RIP(context) += i_size;
g_tls_fault_spu++;
return true; return true;
} }
@ -1878,103 +1889,65 @@ const bool s_self_test = []() -> bool
return true; return true;
}(); }();
#include <thread>
#include <mutex>
#include <condition_variable>
#include <exception>
#include <chrono>
thread_local DECLARE(thread_ctrl::g_tls_this_thread) = nullptr;
struct thread_ctrl::internal
{
std::mutex mutex;
std::condition_variable cond;
std::condition_variable jcv; // Allows simultaneous joining
std::condition_variable icv;
task_stack atexit;
std::exception_ptr exception; // Stored exception
std::chrono::high_resolution_clock::time_point time_limit;
#ifdef _WIN32
DWORD thread_id = 0;
#endif
x64_context _context{};
x64_context* const thread_ctx = &this->_context;
atomic_t<void(*)()> interrupt{}; // Interrupt function
};
thread_local thread_ctrl::internal* g_tls_internal = nullptr;
extern std::condition_variable& get_current_thread_cv()
{
return g_tls_internal->cond;
}
// TODO // TODO
extern atomic_t<u32> g_thread_count(0); extern atomic_t<u32> g_thread_count(0);
thread_local DECLARE(thread_ctrl::g_tls_this_thread) = nullptr;
extern thread_local std::string(*g_tls_log_prefix)(); extern thread_local std::string(*g_tls_log_prefix)();
void thread_ctrl::start(const std::shared_ptr<thread_ctrl>& ctrl, task_stack task) void thread_ctrl::start(const std::shared_ptr<thread_ctrl>& ctrl, task_stack task)
{ {
reinterpret_cast<std::thread&>(ctrl->m_thread) = std::thread([ctrl, task = std::move(task)] #ifdef _WIN32
using thread_result = uint;
using thread_type = thread_result(__stdcall*)(void* arg);
#else
using thread_result = void*;
using thread_type = thread_result(*)(void* arg);
#endif
// Thread entry point
const thread_type entry = [](void* arg) -> thread_result
{ {
// Recover shared_ptr from short-circuited thread_ctrl object pointer
const std::shared_ptr<thread_ctrl> ctrl = static_cast<thread_ctrl*>(arg)->m_self;
try try
{ {
ctrl->initialize(); ctrl->initialize();
task.exec(); task_stack{std::move(ctrl->m_task)}.invoke();
} }
catch (...) catch (...)
{ {
ctrl->m_data->exception = std::current_exception(); // Capture exception
ctrl->finalize(std::current_exception());
return 0;
} }
ctrl->finalize(); ctrl->finalize(nullptr);
}); return 0;
} };
void thread_ctrl::wait_start(u64 timeout) ctrl->m_self = ctrl;
{ ctrl->m_task = std::move(task);
m_data->time_limit = std::chrono::high_resolution_clock::now() + std::chrono::microseconds(timeout);
}
bool thread_ctrl::wait_wait(u64 timeout) // TODO: implement simple thread pool
{ #ifdef _WIN32
std::unique_lock<std::mutex> lock(m_data->mutex, std::adopt_lock); std::uintptr_t thread = _beginthreadex(nullptr, 0, entry, ctrl.get(), 0, nullptr);
verify("thread_ctrl::start" HERE), thread != 0;
#else
pthread_t thread;
verify("thread_ctrl::start" HERE), pthread_create(&thread, nullptr, entry, ctrl.get());
#endif
if (timeout && m_data->cond.wait_until(lock, m_data->time_limit) == std::cv_status::timeout) // TODO: this is unsafe and must be duplicated in thread_ctrl::initialize
{ ctrl->m_thread = thread;
lock.release();
return false;
}
m_data->cond.wait(lock);
lock.release();
return true;
}
void thread_ctrl::test()
{
if (m_data && m_data->exception)
{
std::rethrow_exception(m_data->exception);
}
} }
void thread_ctrl::initialize() void thread_ctrl::initialize()
{ {
// Initialize TLS variable // Initialize TLS variable
g_tls_this_thread = this; g_tls_this_thread = this;
g_tls_internal = this->m_data;
#ifdef _WIN32
m_data->thread_id = GetCurrentThreadId();
#endif
g_tls_log_prefix = [] g_tls_log_prefix = []
{ {
@ -1983,8 +1956,7 @@ void thread_ctrl::initialize()
++g_thread_count; ++g_thread_count;
#if defined(_MSC_VER) #ifdef _MSC_VER
struct THREADNAME_INFO struct THREADNAME_INFO
{ {
DWORD dwType; DWORD dwType;
@ -2010,11 +1982,10 @@ void thread_ctrl::initialize()
{ {
} }
} }
#endif #endif
} }
void thread_ctrl::finalize() noexcept void thread_ctrl::finalize(std::exception_ptr eptr) noexcept
{ {
// Disable and discard possible interrupts // Disable and discard possible interrupts
interrupt_disable(); interrupt_disable();
@ -2023,135 +1994,213 @@ void thread_ctrl::finalize() noexcept
// TODO // TODO
vm::reservation_free(); vm::reservation_free();
// Call atexit functions // Run atexit functions
if (m_data) m_data->atexit.exec(); m_task.invoke();
m_task.reset();
#ifdef _WIN32
ULONG64 cycles{};
QueryThreadCycleTime(GetCurrentThread(), &cycles);
FILETIME ctime, etime, ktime, utime;
GetThreadTimes(GetCurrentThread(), &ctime, &etime, &ktime, &utime);
const u64 time = ((ktime.dwLowDateTime | (u64)ktime.dwHighDateTime << 32) + (utime.dwLowDateTime | (u64)utime.dwHighDateTime << 32)) * 100ull;
#elif __linux__
const u64 cycles = 0; // Not supported
struct ::rusage stats{};
::getrusage(RUSAGE_THREAD, &stats);
const u64 time = (stats.ru_utime.tv_sec + stats.ru_stime.tv_sec) * 1000000000ull + (stats.ru_utime.tv_usec + stats.ru_stime.tv_usec) * 1000ull;
#else
const u64 cycles = 0;
const u64 time = 0;
#endif
LOG_NOTICE(GENERAL, "Thread time: %fs (%fGc); Faults: %u [rsx:%u, spu:%u];",
time / 1000000000.,
cycles / 1000000000.,
vm::g_tls_fault_count,
g_tls_fault_rsx,
g_tls_fault_spu);
--g_thread_count; --g_thread_count;
#ifdef _WIN32 // Untangle circular reference, set exception
ULONG64 time; semaphore_lock{m_mutex}, m_self.reset(), m_exception = eptr;
QueryThreadCycleTime(GetCurrentThread(), &time);
LOG_NOTICE(GENERAL, "Thread time: %f Gc", time / 1000000000.); // Signal joining waiters
#endif m_jcv.notify_all();
} }
void thread_ctrl::push_atexit(task_stack task) void thread_ctrl::_push(task_stack task)
{ {
m_data->atexit.push(std::move(task)); g_tls_this_thread->m_task.push(std::move(task));
}
bool thread_ctrl::_wait_for(u64 usec)
{
auto _this = g_tls_this_thread;
struct half_lock
{
semaphore<>& ref;
void lock()
{
// Used to avoid additional lock + unlock
}
void unlock()
{
ref.post();
}
}
_lock{_this->m_mutex};
if (u32 sig = _this->m_signal.load())
{
thread_ctrl::test();
if (sig & 1)
{
_this->m_signal &= ~1;
return true;
}
}
_this->m_mutex.wait();
while (_this->m_cond.wait(_lock, usec))
{
if (u32 sig = _this->m_signal.load())
{
thread_ctrl::test();
if (sig & 1)
{
_this->m_signal &= ~1;
return true;
}
}
if (usec != -1)
{
return false;
}
_this->m_mutex.wait();
if (u32 sig = _this->m_signal.load())
{
if (sig & 2 && _this->m_exception)
{
_this->_throw();
}
if (sig & 1)
{
_this->m_signal &= ~1;
_this->m_mutex.post();
return true;
}
}
}
// Timeout
return false;
}
[[noreturn]] void thread_ctrl::_throw()
{
std::exception_ptr ex = std::exchange(m_exception, std::exception_ptr{});
m_signal &= ~3;
m_mutex.post();
std::rethrow_exception(std::move(ex));
}
void thread_ctrl::_notify(cond_variable thread_ctrl::* ptr)
{
// Optimized lock + unlock
if (!m_mutex.get())
{
m_mutex.wait();
m_mutex.post();
}
(this->*ptr).notify_one();
} }
thread_ctrl::thread_ctrl(std::string&& name) thread_ctrl::thread_ctrl(std::string&& name)
: m_name(std::move(name)) : m_name(std::move(name))
{ {
static_assert(sizeof(std::thread) <= sizeof(m_thread), "Small storage");
#pragma push_macro("new")
#undef new
new (&m_thread) std::thread;
#pragma pop_macro("new")
initialize_once();
} }
thread_ctrl::~thread_ctrl() thread_ctrl::~thread_ctrl()
{ {
if (reinterpret_cast<std::thread&>(m_thread).joinable()) if (m_thread)
{ {
reinterpret_cast<std::thread&>(m_thread).detach(); #ifdef _WIN32
CloseHandle((HANDLE)m_thread.raw());
#else
pthread_detach(m_thread.raw());
#endif
} }
delete m_data;
reinterpret_cast<std::thread&>(m_thread).~thread();
} }
void thread_ctrl::initialize_once() std::exception_ptr thread_ctrl::get_exception() const
{ {
if (UNLIKELY(!m_data)) semaphore_lock lock(m_mutex);
{ return m_exception;
auto ptr = new thread_ctrl::internal; }
if (!m_data.compare_and_swap_test(nullptr, ptr)) void thread_ctrl::set_exception(std::exception_ptr ptr)
{
semaphore_lock lock(m_mutex);
m_exception = ptr;
if (m_exception)
{ {
delete ptr; m_signal |= 2;
m_cond.notify_one();
} }
else
{
m_signal &= ~2;
} }
} }
void thread_ctrl::join() void thread_ctrl::join()
{ {
// Increase contention counter #ifdef _WIN32
const u32 _j = m_joining++; //verify("thread_ctrl::join" HERE), WaitForSingleObjectEx((HANDLE)m_thread.load(), -1, false) == WAIT_OBJECT_0;
#endif
if (LIKELY(_j >= 0x80000000)) semaphore_lock lock(m_mutex);
{
// Already joined (signal condition)
m_joining = 0x80000000;
}
else if (LIKELY(_j == 0))
{
// Winner joins the thread
reinterpret_cast<std::thread&>(m_thread).join();
// Notify others if necessary while (m_self)
if (UNLIKELY(m_joining.exchange(0x80000000) != 1))
{ {
// Serialize for reliable notification m_jcv.wait(lock);
m_data->mutex.lock();
m_data->mutex.unlock();
m_data->jcv.notify_all();
}
}
else
{
// Hard way
std::unique_lock<std::mutex> lock(m_data->mutex);
m_data->jcv.wait(lock, [&] { return m_joining >= 0x80000000; });
} }
if (UNLIKELY(m_data && m_data->exception && !std::uncaught_exception())) if (UNLIKELY(m_exception && !std::uncaught_exception()))
{ {
std::rethrow_exception(m_data->exception); std::rethrow_exception(m_exception);
} }
} }
void thread_ctrl::lock()
{
m_data->mutex.lock();
}
void thread_ctrl::unlock()
{
m_data->mutex.unlock();
}
void thread_ctrl::lock_notify()
{
if (UNLIKELY(g_tls_this_thread == this))
{
return;
}
// Serialize for reliable notification, condition is assumed to be changed externally
m_data->mutex.lock();
m_data->mutex.unlock();
m_data->cond.notify_one();
}
void thread_ctrl::notify() void thread_ctrl::notify()
{ {
m_data->cond.notify_one(); if (!(m_signal & 1))
{
m_signal |= 1;
_notify(&thread_ctrl::m_cond);
}
} }
void thread_ctrl::set_exception(std::exception_ptr e) static thread_local x64_context s_tls_context{};
{
m_data->exception = e;
}
static void _handle_interrupt(x64_context* ctx) static void _handle_interrupt(x64_context* ctx)
{ {
// Copy context for further use (TODO: is it safe on all platforms?) // Copy context for further use (TODO: is it safe on all platforms?)
g_tls_internal->_context = *ctx; s_tls_context = *ctx;
thread_ctrl::handle_interrupt(); thread_ctrl::handle_interrupt();
} }
@ -2166,7 +2215,7 @@ static thread_local void(*s_tls_handler)() = nullptr;
s_tls_handler(); s_tls_handler();
// Restore context in the case of return // Restore context in the case of return
const auto ctx = g_tls_internal->thread_ctx; const auto ctx = &s_tls_context;
if (s_tls_ret_pos) if (s_tls_ret_pos)
{ {
@ -2188,26 +2237,22 @@ static thread_local void(*s_tls_handler)() = nullptr;
void thread_ctrl::handle_interrupt() void thread_ctrl::handle_interrupt()
{ {
const auto _this = g_tls_this_thread; const auto _this = g_tls_this_thread;
const auto ctx = g_tls_internal->thread_ctx; const auto ctx = &s_tls_context;
if (_this->m_guard & 0x80000000) if (_this->m_guard & 0x80000000)
{ {
// Discard interrupt if interrupts are disabled // Discard interrupt if interrupts are disabled
if (g_tls_internal->interrupt.exchange(nullptr)) if (_this->m_iptr.exchange(nullptr))
{ {
_this->lock(); _this->_notify(&thread_ctrl::m_icv);
_this->unlock();
g_tls_internal->icv.notify_one();
} }
} }
else if (_this->m_guard == 0) else if (_this->m_guard == 0)
{ {
// Set interrupt immediately if no guard set // Set interrupt immediately if no guard set
if (const auto handler = g_tls_internal->interrupt.exchange(nullptr)) if (const auto handler = _this->m_iptr.exchange(nullptr))
{ {
_this->lock(); _this->_notify(&thread_ctrl::m_icv);
_this->unlock();
g_tls_internal->icv.notify_one();
#ifdef _WIN32 #ifdef _WIN32
// Install function call // Install function call
@ -2234,13 +2279,15 @@ void thread_ctrl::handle_interrupt()
void thread_ctrl::interrupt(void(*handler)()) void thread_ctrl::interrupt(void(*handler)())
{ {
semaphore_lock lock(m_mutex);
verify(HERE), this != g_tls_this_thread; // TODO: self-interrupt verify(HERE), this != g_tls_this_thread; // TODO: self-interrupt
verify(HERE), m_data->interrupt.compare_and_swap_test(nullptr, handler); // TODO: multiple interrupts verify(HERE), m_iptr.compare_and_swap_test(nullptr, handler); // TODO: multiple interrupts
#ifdef _WIN32 #ifdef _WIN32
const auto ctx = m_data->thread_ctx; const auto ctx = &s_tls_context;
const HANDLE nt = OpenThread(THREAD_ALL_ACCESS, FALSE, m_data->thread_id); const HANDLE nt = (HANDLE)m_thread.load();//OpenThread(THREAD_ALL_ACCESS, FALSE, m_data->thread_id);
verify(HERE), nt; verify(HERE), nt;
verify(HERE), SuspendThread(nt) != -1; verify(HERE), SuspendThread(nt) != -1;
@ -2254,28 +2301,24 @@ void thread_ctrl::interrupt(void(*handler)())
RIP(ctx) = _rip; RIP(ctx) = _rip;
verify(HERE), ResumeThread(nt) != -1; verify(HERE), ResumeThread(nt) != -1;
CloseHandle(nt); //CloseHandle(nt);
#else #else
pthread_kill(reinterpret_cast<std::thread&>(m_thread).native_handle(), SIGUSR1); pthread_kill(m_thread.load(), SIGUSR1);
#endif #endif
std::unique_lock<std::mutex> lock(m_data->mutex, std::adopt_lock); while (m_iptr)
while (m_data->interrupt)
{ {
m_data->icv.wait(lock); m_icv.wait(lock);
} }
lock.release();
} }
void thread_ctrl::test_interrupt() void thread_ctrl::test_interrupt()
{ {
if (m_guard & 0x80000000) if (m_guard & 0x80000000)
{ {
if (m_data->interrupt.exchange(nullptr)) if (m_iptr.exchange(nullptr))
{ {
lock(), unlock(), m_data->icv.notify_one(); _notify(&thread_ctrl::m_icv);
} }
return; return;
@ -2286,18 +2329,30 @@ void thread_ctrl::test_interrupt()
m_guard = 0; m_guard = 0;
// Execute delayed interrupt handler // Execute delayed interrupt handler
if (const auto handler = m_data->interrupt.exchange(nullptr)) if (const auto handler = m_iptr.exchange(nullptr))
{ {
lock(), unlock(), m_data->icv.notify_one(); _notify(&thread_ctrl::m_icv);
return handler(); return handler();
} }
} }
} }
void thread_ctrl::sleep(u64 useconds) void thread_ctrl::test()
{ {
std::this_thread::sleep_for(std::chrono::microseconds(useconds)); const auto _this = g_tls_this_thread;
if (_this->m_signal & 2)
{
_this->m_mutex.wait();
if (_this->m_exception)
{
_this->_throw();
}
_this->m_mutex.post();
}
} }
@ -2341,3 +2396,7 @@ void named_thread::start_thread(const std::shared_ptr<void>& _this)
on_exit(); on_exit();
}); });
} }
task_stack::task_base::~task_base()
{
}

View File

@ -7,6 +7,9 @@
#include <string> #include <string>
#include <memory> #include <memory>
#include "sema.h"
#include "cond.h"
// Will report exception and call std::abort() if put in catch(...) // Will report exception and call std::abort() if put in catch(...)
[[noreturn]] void catch_all_exceptions(); [[noreturn]] void catch_all_exceptions();
@ -17,19 +20,19 @@ class task_stack
{ {
std::unique_ptr<task_base> next; std::unique_ptr<task_base> next;
virtual ~task_base() = default; virtual ~task_base();
virtual void exec() virtual void invoke()
{ {
if (next) if (next)
{ {
next->exec(); next->invoke();
} }
} }
}; };
template<typename F> template <typename F>
struct task_type : task_base struct task_type final : task_base
{ {
std::remove_reference_t<F> func; std::remove_reference_t<F> func;
@ -38,10 +41,10 @@ class task_stack
{ {
} }
void exec() override void invoke() final override
{ {
func(); func();
task_base::exec(); task_base::invoke();
} }
}; };
@ -50,7 +53,7 @@ class task_stack
public: public:
task_stack() = default; task_stack() = default;
template<typename F> template <typename F>
task_stack(F&& func) task_stack(F&& func)
: m_stack(new task_type<F>(std::forward<F>(func))) : m_stack(new task_type<F>(std::forward<F>(func)))
{ {
@ -70,11 +73,11 @@ public:
m_stack.reset(); m_stack.reset();
} }
void exec() const void invoke() const
{ {
if (m_stack) if (m_stack)
{ {
m_stack->exec(); m_stack->invoke();
} }
} }
}; };
@ -82,23 +85,41 @@ public:
// Thread control class // Thread control class
class thread_ctrl final class thread_ctrl final
{ {
public: // TODO // Current thread
struct internal;
private:
static thread_local thread_ctrl* g_tls_this_thread; static thread_local thread_ctrl* g_tls_this_thread;
// Thread handle storage // Self pointer
std::aligned_storage_t<16> m_thread; std::shared_ptr<thread_ctrl> m_self;
// Thread join contention counter // Thread handle (platform-specific)
atomic_t<u32> m_joining{}; atomic_t<std::uintptr_t> m_thread{0};
// Thread mutex
mutable semaphore<> m_mutex;
// Thread condition variable
cond_variable m_cond;
// Thread flags
atomic_t<u32> m_signal{0};
// Thread joining condition variable
cond_variable m_jcv;
// Remotely set or caught exception
std::exception_ptr m_exception;
// Thread initial task or atexit task
task_stack m_task;
// Thread interrupt guard counter // Thread interrupt guard counter
volatile u32 m_guard = 0x80000000; volatile u32 m_guard = 0x80000000;
// Thread internals // Thread interrupt condition variable
atomic_t<internal*> m_data{}; cond_variable m_icv;
// Interrupt function
atomic_t<void(*)()> m_iptr{nullptr};
// Fixed name // Fixed name
std::string m_name; std::string m_name;
@ -110,19 +131,19 @@ private:
void initialize(); void initialize();
// Called at the thread end // Called at the thread end
void finalize() noexcept; void finalize(std::exception_ptr) noexcept;
// Get atexit function // Add task (atexit)
void push_atexit(task_stack); static void _push(task_stack);
// Start waiting // Internal waiting function, may throw. Infinite value is -1.
void wait_start(u64 timeout); static bool _wait_for(u64 usec);
// Proceed waiting // Internal throwing function. Mutex must be locked and will be unlocked.
bool wait_wait(u64 timeout); [[noreturn]] void _throw();
// Check exception // Internal notification function
void test(); void _notify(cond_variable thread_ctrl::*);
public: public:
thread_ctrl(std::string&& name); thread_ctrl(std::string&& name);
@ -137,63 +158,22 @@ public:
return m_name; return m_name;
} }
// Initialize internal data // Get exception
void initialize_once(); std::exception_ptr get_exception() const;
// Set exception
void set_exception(std::exception_ptr ptr);
// Get thread result (may throw, simultaneous joining allowed) // Get thread result (may throw, simultaneous joining allowed)
void join(); void join();
// Lock thread mutex // Notify the thread
void lock();
// Lock conditionally (double-checked)
template<typename F>
bool lock_if(F&& pred)
{
if (pred())
{
lock();
try
{
if (LIKELY(pred()))
{
return true;
}
else
{
unlock();
return false;
}
}
catch (...)
{
unlock();
throw;
}
}
else
{
return false;
}
}
// Unlock thread mutex (internal data must be initialized)
void unlock();
// Lock, unlock, notify the thread (required if the condition changed locklessly)
void lock_notify();
// Notify the thread (internal data must be initialized)
void notify(); void notify();
// Set exception (internal data must be initialized, thread mutex must be locked)
void set_exception(std::exception_ptr);
// Internal // Internal
static void handle_interrupt(); static void handle_interrupt();
// Interrupt thread with specified handler call (thread mutex must be locked) // Interrupt thread with specified handler call
void interrupt(void(*handler)()); void interrupt(void(*handler)());
// Interrupt guard recursive enter // Interrupt guard recursive enter
@ -226,90 +206,45 @@ public:
// Check interrupt if delayed by guard scope // Check interrupt if delayed by guard scope
void test_interrupt(); void test_interrupt();
// Current thread sleeps for specified amount of microseconds. // Wait once with timeout. Abortable, may throw. May spuriously return false.
// Wrapper for std::this_thread::sleep, doesn't require valid thread_ctrl. static inline bool wait_for(u64 usec)
[[deprecated]] static void sleep(u64 useconds);
// Wait until pred(). Abortable, may throw. Thread must be locked.
// Timeout in microseconds (zero means infinite).
template<typename F>
static inline auto wait_for(u64 useconds, F&& pred)
{ {
if (useconds) return _wait_for(usec);
{
g_tls_this_thread->wait_start(useconds);
} }
while (true) // Wait. Abortable, may throw.
{
g_tls_this_thread->test();
if (auto&& result = pred())
{
return result;
}
else if (!g_tls_this_thread->wait_wait(useconds) && useconds)
{
return result;
}
}
}
// Wait once. Abortable, may throw. Thread must be locked.
// Timeout in microseconds (zero means infinite).
static inline bool wait_for(u64 useconds = 0)
{
if (useconds)
{
g_tls_this_thread->wait_start(useconds);
}
g_tls_this_thread->test();
if (!g_tls_this_thread->wait_wait(useconds) && useconds)
{
return false;
}
g_tls_this_thread->test();
return true;
}
// Wait until pred(). Abortable, may throw. Thread must be locked.
template<typename F>
static inline auto wait(F&& pred)
{
while (true)
{
g_tls_this_thread->test();
if (auto&& result = pred())
{
return result;
}
g_tls_this_thread->wait_wait(0);
}
}
// Wait once. Abortable, may throw. Thread must be locked.
static inline void wait() static inline void wait()
{ {
g_tls_this_thread->test(); _wait_for(-1);
g_tls_this_thread->wait_wait(0);
g_tls_this_thread->test();
} }
// Wait eternally. Abortable, may throw. Thread must be locked. // Wait until pred(). Abortable, may throw.
template<typename F, typename RT = std::result_of_t<F()>>
static inline RT wait(F&& pred)
{
while (true)
{
if (RT result = pred())
{
return result;
}
_wait_for(-1);
}
}
// Wait eternally until aborted.
[[noreturn]] static inline void eternalize() [[noreturn]] static inline void eternalize()
{ {
while (true) while (true)
{ {
g_tls_this_thread->test(); _wait_for(-1);
g_tls_this_thread->wait_wait(0);
} }
} }
// Test exception (may throw).
static void test();
// Get current thread (may be nullptr) // Get current thread (may be nullptr)
static thread_ctrl* get_current() static thread_ctrl* get_current()
{ {
@ -320,14 +255,14 @@ public:
template<typename F> template<typename F>
static inline void atexit(F&& func) static inline void atexit(F&& func)
{ {
return g_tls_this_thread->push_atexit(std::forward<F>(func)); _push(std::forward<F>(func));
} }
// Named thread factory // Create detached named thread
template<typename N, typename F> template<typename N, typename F>
static inline void spawn(N&& name, F&& func) static inline void spawn(N&& name, F&& func)
{ {
auto&& out = std::make_shared<thread_ctrl>(std::forward<N>(name)); auto out = std::make_shared<thread_ctrl>(std::forward<N>(name));
thread_ctrl::start(out, std::forward<F>(func)); thread_ctrl::start(out, std::forward<F>(func));
} }
@ -382,7 +317,7 @@ public:
} }
// Access thread_ctrl // Access thread_ctrl
thread_ctrl* operator->() const thread_ctrl* get() const
{ {
return m_thread.get(); return m_thread.get();
} }
@ -392,60 +327,12 @@ public:
return m_thread->join(); return m_thread->join();
} }
void lock() const
{
return m_thread->lock();
}
void unlock() const
{
return m_thread->unlock();
}
void lock_notify() const
{
return m_thread->lock_notify();
}
void notify() const void notify() const
{ {
return m_thread->notify(); return m_thread->notify();
} }
}; };
// Simple thread mutex locker
class thread_lock final
{
thread_ctrl* m_thread;
public:
thread_lock(const thread_lock&) = delete;
// Lock specified thread
thread_lock(thread_ctrl* thread)
: m_thread(thread)
{
m_thread->lock();
}
// Lock specified named_thread
thread_lock(named_thread& thread)
: thread_lock(thread.operator->())
{
}
// Lock current thread
thread_lock()
: thread_lock(thread_ctrl::get_current())
{
}
~thread_lock()
{
m_thread->unlock();
}
};
// Interrupt guard scope // Interrupt guard scope
class thread_guard final class thread_guard final
{ {
@ -455,24 +342,24 @@ public:
thread_guard(const thread_guard&) = delete; thread_guard(const thread_guard&) = delete;
thread_guard(thread_ctrl* thread) thread_guard(thread_ctrl* thread)
: m_thread(thread) //: m_thread(thread)
{ {
m_thread->guard_enter(); //m_thread->guard_enter();
} }
thread_guard(named_thread& thread) thread_guard(named_thread& thread)
: thread_guard(thread.operator->()) //: thread_guard(thread.get())
{ {
} }
thread_guard() thread_guard()
: thread_guard(thread_ctrl::get_current()) //: thread_guard(thread_ctrl::get_current())
{ {
} }
~thread_guard() noexcept(false) ~thread_guard() noexcept(false)
{ {
m_thread->guard_leave(); //m_thread->guard_leave();
} }
}; };
@ -498,7 +385,7 @@ public:
} }
// Access thread_ctrl // Access thread_ctrl
thread_ctrl* operator->() const thread_ctrl* get() const
{ {
return m_thread.get(); return m_thread.get();
} }

View File

@ -20,13 +20,13 @@ public:
constexpr cond_variable() = default; constexpr cond_variable() = default;
// Intrusive wait algorithm for lockable objects // Intrusive wait algorithm for lockable objects
template <typename T, void (T::*Unlock)() = &T::unlock, void (T::*Lock)() = &T::lock> template <typename T>
explicit_bool_t wait(T& object, u64 usec_timeout = -1) explicit_bool_t wait(T& object, u64 usec_timeout = -1)
{ {
const u32 _old = m_value.fetch_add(1); // Increment waiter counter const u32 _old = m_value.fetch_add(1); // Increment waiter counter
(object.*Unlock)(); object.unlock();
const bool res = imp_wait(_old, usec_timeout); const bool res = imp_wait(_old, usec_timeout);
(object.*Lock)(); object.lock();
return res; return res;
} }

View File

@ -99,27 +99,39 @@ public:
} }
}; };
// Simplified shared (reader) lock implementation, std::shared_lock compatible. // Simplified shared (reader) lock implementation.
class reader_lock final class reader_lock final
{ {
shared_mutex& m_mutex; shared_mutex& m_mutex;
void lock()
{
m_mutex.lock_shared();
}
void unlock()
{
m_mutex.unlock_shared();
}
friend class cond_variable;
public: public:
reader_lock(const reader_lock&) = delete; reader_lock(const reader_lock&) = delete;
explicit reader_lock(shared_mutex& mutex) explicit reader_lock(shared_mutex& mutex)
: m_mutex(mutex) : m_mutex(mutex)
{ {
m_mutex.lock_shared(); lock();
} }
~reader_lock() ~reader_lock()
{ {
m_mutex.unlock_shared(); unlock();
} }
}; };
// Simplified exclusive (writer) lock implementation, std::lock_guard compatible. // Simplified exclusive (writer) lock implementation.
class writer_lock final class writer_lock final
{ {
shared_mutex& m_mutex; shared_mutex& m_mutex;

View File

@ -13,6 +13,8 @@ class semaphore_base
void imp_post(s32 _old); void imp_post(s32 _old);
friend class semaphore_lock;
protected: protected:
explicit constexpr semaphore_base(s32 value) explicit constexpr semaphore_base(s32 value)
: m_value{value} : m_value{value}
@ -108,3 +110,34 @@ public:
return Max; return Max;
} }
}; };
class semaphore_lock
{
semaphore_base& m_base;
void lock()
{
m_base.wait();
}
void unlock()
{
m_base.post(INT32_MAX);
}
friend class cond_variable;
public:
explicit semaphore_lock(const semaphore_lock&) = delete;
semaphore_lock(semaphore_base& sema)
: m_base(sema)
{
lock();
}
~semaphore_lock()
{
unlock();
}
};

View File

@ -43,8 +43,6 @@ void cpu_thread::on_task()
Emu.SendDbgCommand(DID_CREATE_THREAD, this); Emu.SendDbgCommand(DID_CREATE_THREAD, this);
std::unique_lock<named_thread> lock(*this);
// Check thread status // Check thread status
while (!test(state & cpu_flag::exit)) while (!test(state & cpu_flag::exit))
{ {
@ -53,8 +51,6 @@ void cpu_thread::on_task()
// check stop status // check stop status
if (!test(state & cpu_flag::stop)) if (!test(state & cpu_flag::stop))
{ {
if (lock) lock.unlock();
try try
{ {
cpu_task(); cpu_task();
@ -73,12 +69,6 @@ void cpu_thread::on_task()
continue; continue;
} }
if (!lock)
{
lock.lock();
continue;
}
thread_ctrl::wait(); thread_ctrl::wait();
} }
} }
@ -86,7 +76,7 @@ void cpu_thread::on_task()
void cpu_thread::on_stop() void cpu_thread::on_stop()
{ {
state += cpu_flag::exit; state += cpu_flag::exit;
lock_notify(); notify();
} }
cpu_thread::~cpu_thread() cpu_thread::~cpu_thread()
@ -100,8 +90,6 @@ cpu_thread::cpu_thread(u32 id)
bool cpu_thread::check_state() bool cpu_thread::check_state()
{ {
std::unique_lock<named_thread> lock(*this, std::defer_lock);
while (true) while (true)
{ {
CHECK_EMU_STATUS; // check at least once CHECK_EMU_STATUS; // check at least once
@ -116,12 +104,6 @@ bool cpu_thread::check_state()
break; break;
} }
if (!lock)
{
lock.lock();
continue;
}
thread_ctrl::wait(); thread_ctrl::wait();
} }
@ -144,7 +126,7 @@ bool cpu_thread::check_state()
void cpu_thread::run() void cpu_thread::run()
{ {
state -= cpu_flag::stop; state -= cpu_flag::stop;
lock_notify(); notify();
} }
void cpu_thread::set_signal() void cpu_thread::set_signal()

View File

@ -799,7 +799,7 @@ s32 cellFsAioRead(vm::ptr<CellFsAio> aio, vm::ptr<s32> id, fs_aio_cb_t func)
{ aio, func }, { aio, func },
}); });
m->thread->lock_notify(); m->thread->notify();
return CELL_OK; return CELL_OK;
} }
@ -825,7 +825,7 @@ s32 cellFsAioWrite(vm::ptr<CellFsAio> aio, vm::ptr<s32> id, fs_aio_cb_t func)
{ aio, func }, { aio, func },
}); });
m->thread->lock_notify(); m->thread->notify();
return CELL_OK; return CELL_OK;
} }

View File

@ -773,8 +773,6 @@ void spursSysServiceIdleHandler(SPUThread& spu, SpursKernelContext* ctxt)
{ {
bool shouldExit; bool shouldExit;
std::unique_lock<named_thread> lock(spu, std::defer_lock);
while (true) while (true)
{ {
vm::reservation_acquire(vm::base(spu.offset + 0x100), vm::cast(ctxt->spurs.addr(), HERE), 128); vm::reservation_acquire(vm::base(spu.offset + 0x100), vm::cast(ctxt->spurs.addr(), HERE), 128);
@ -862,8 +860,6 @@ void spursSysServiceIdleHandler(SPUThread& spu, SpursKernelContext* ctxt)
if (spuIdling && shouldExit == false && foundReadyWorkload == false) if (spuIdling && shouldExit == false && foundReadyWorkload == false)
{ {
// The system service blocks by making a reservation and waiting on the lock line reservation lost event. // The system service blocks by making a reservation and waiting on the lock line reservation lost event.
CHECK_EMU_STATUS;
if (!lock) { lock.lock(); continue; }
thread_ctrl::wait_for(1000); thread_ctrl::wait_for(1000);
continue; continue;
} }

View File

@ -75,6 +75,7 @@ struct vdec_thread : ppu_thread
u64 next_pts{}; u64 next_pts{};
u64 next_dts{}; u64 next_dts{};
std::mutex mutex;
std::queue<vdec_frame> out; std::queue<vdec_frame> out;
std::queue<u64> user_data; // TODO std::queue<u64> user_data; // TODO
@ -325,7 +326,7 @@ struct vdec_thread : ppu_thread
cellVdec.trace("Got picture (pts=0x%llx[0x%llx], dts=0x%llx[0x%llx])", frame.pts, frame->pkt_pts, frame.dts, frame->pkt_dts); cellVdec.trace("Got picture (pts=0x%llx[0x%llx], dts=0x%llx[0x%llx])", frame.pts, frame->pkt_pts, frame.dts, frame->pkt_dts);
thread_lock{*this}, out.push(std::move(frame)); std::lock_guard<std::mutex>{mutex}, out.push(std::move(frame));
cb_func(*this, id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, cb_arg); cb_func(*this, id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, cb_arg);
} }
@ -437,7 +438,7 @@ s32 cellVdecClose(u32 handle)
} }
vdec->cmd_push({vdec_cmd::close, 0}); vdec->cmd_push({vdec_cmd::close, 0});
vdec->lock_notify(); vdec->notify();
vdec->join(); vdec->join();
idm::remove<ppu_thread>(handle); idm::remove<ppu_thread>(handle);
return CELL_OK; return CELL_OK;
@ -455,7 +456,7 @@ s32 cellVdecStartSeq(u32 handle)
} }
vdec->cmd_push({vdec_cmd::start_seq, 0}); vdec->cmd_push({vdec_cmd::start_seq, 0});
vdec->lock_notify(); vdec->notify();
return CELL_OK; return CELL_OK;
} }
@ -471,7 +472,7 @@ s32 cellVdecEndSeq(u32 handle)
} }
vdec->cmd_push({vdec_cmd::end_seq, 0}); vdec->cmd_push({vdec_cmd::end_seq, 0});
vdec->lock_notify(); vdec->notify();
return CELL_OK; return CELL_OK;
} }
@ -497,7 +498,7 @@ s32 cellVdecDecodeAu(u32 handle, CellVdecDecodeMode mode, vm::cptr<CellVdecAuInf
auInfo->codecSpecificData, auInfo->codecSpecificData,
}); });
vdec->lock_notify(); vdec->notify();
return CELL_OK; return CELL_OK;
} }
@ -514,7 +515,7 @@ s32 cellVdecGetPicture(u32 handle, vm::cptr<CellVdecPicFormat> format, vm::ptr<u
vdec_frame frame; vdec_frame frame;
{ {
thread_lock lock(*vdec); std::lock_guard<std::mutex> lock(vdec->mutex);
if (vdec->out.empty()) if (vdec->out.empty())
{ {
@ -639,7 +640,7 @@ s32 cellVdecGetPicItem(u32 handle, vm::pptr<CellVdecPicItem> picItem)
u64 usrd; u64 usrd;
u32 frc; u32 frc;
{ {
thread_lock lock(*vdec); std::lock_guard<std::mutex> lock(vdec->mutex);
if (vdec->out.empty()) if (vdec->out.empty())
{ {
@ -830,7 +831,7 @@ s32 cellVdecSetFrameRate(u32 handle, CellVdecFrameRate frc)
// TODO: check frc value // TODO: check frc value
vdec->cmd_push({vdec_cmd::set_frc, frc}); vdec->cmd_push({vdec_cmd::set_frc, frc});
vdec->lock_notify(); vdec->notify();
return CELL_OK; return CELL_OK;
} }

View File

@ -50,7 +50,7 @@ s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio,
return eq.name == "_mxr000\0"_u64; return eq.name == "_mxr000\0"_u64;
})) }))
{ {
thread_ctrl::sleep(50000); thread_ctrl::wait_for(50000);
} }
} }

View File

@ -312,33 +312,22 @@ void ppu_thread::cmd_pop(u32 count)
cmd64 ppu_thread::cmd_wait() cmd64 ppu_thread::cmd_wait()
{ {
std::unique_lock<named_thread> lock(*this, std::defer_lock);
while (true) while (true)
{ {
if (UNLIKELY(test(state))) if (UNLIKELY(test(state)))
{ {
if (lock) lock.unlock(); if (check_state())
if (check_state()) // check_status() requires unlocked mutex
{ {
return cmd64{}; return cmd64{};
} }
} }
// Lightweight queue doesn't care about mutex state
if (cmd64 result = cmd_queue[cmd_queue.peek()].exchange(cmd64{})) if (cmd64 result = cmd_queue[cmd_queue.peek()].exchange(cmd64{}))
{ {
return result; return result;
} }
if (!lock) thread_ctrl::wait();
{
lock.lock();
continue;
}
thread_ctrl::wait(); // Waiting requires locked mutex
} }
} }

View File

@ -503,7 +503,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
u32 SPUThread::get_events(bool waiting) u32 SPUThread::get_events(bool waiting)
{ {
// check reservation status and set SPU_EVENT_LR if lost // check reservation status and set SPU_EVENT_LR if lost
if (last_raddr != 0 && !vm::reservation_test(operator->())) if (last_raddr != 0 && !vm::reservation_test(this->get()))
{ {
ch_event_stat |= SPU_EVENT_LR; ch_event_stat |= SPU_EVENT_LR;
@ -546,7 +546,7 @@ void SPUThread::set_events(u32 mask)
// Notify if some events were set // Notify if some events were set
if (~old_stat & mask && old_stat & SPU_EVENT_WAITING && ch_event_stat & SPU_EVENT_WAITING) if (~old_stat & mask && old_stat & SPU_EVENT_WAITING && ch_event_stat & SPU_EVENT_WAITING)
{ {
lock_notify(); notify();
} }
} }
@ -600,7 +600,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
{ {
if (!channel.try_pop(out)) if (!channel.try_pop(out))
{ {
thread_lock{*this}, thread_ctrl::wait([&] { return test(state & cpu_flag::stop) || channel.try_pop(out); }); thread_ctrl::wait([&] { return test(state & cpu_flag::stop) || channel.try_pop(out); });
return !test(state & cpu_flag::stop); return !test(state & cpu_flag::stop);
} }
@ -615,8 +615,6 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
// break; // break;
case SPU_RdInMbox: case SPU_RdInMbox:
{ {
std::unique_lock<named_thread> lock(*this, std::defer_lock);
while (true) while (true)
{ {
if (const uint old_count = ch_in_mbox.try_pop(out)) if (const uint old_count = ch_in_mbox.try_pop(out))
@ -636,12 +634,6 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
return false; return false;
} }
if (!lock)
{
lock.lock();
continue;
}
thread_ctrl::wait(); thread_ctrl::wait();
} }
} }
@ -691,8 +683,6 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
case SPU_RdEventStat: case SPU_RdEventStat:
{ {
std::unique_lock<named_thread> lock(*this, std::defer_lock);
// start waiting or return immediately // start waiting or return immediately
if (u32 res = get_events(true)) if (u32 res = get_events(true))
{ {
@ -707,8 +697,6 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
} }
else else
{ {
lock.lock();
// simple waiting loop otherwise // simple waiting loop otherwise
while (!get_events(true) && !test(state & cpu_flag::stop)) while (!get_events(true) && !test(state & cpu_flag::stop))
{ {
@ -754,8 +742,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{ {
if (offset >= RAW_SPU_BASE_ADDR) if (offset >= RAW_SPU_BASE_ADDR)
{ {
std::unique_lock<named_thread> lock(*this, std::defer_lock);
while (!ch_out_intr_mbox.try_push(value)) while (!ch_out_intr_mbox.try_push(value))
{ {
CHECK_EMU_STATUS; CHECK_EMU_STATUS;
@ -765,12 +751,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
return false; return false;
} }
if (!lock)
{
lock.lock();
continue;
}
thread_ctrl::wait(); thread_ctrl::wait();
} }
@ -961,8 +941,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrOutMbox: case SPU_WrOutMbox:
{ {
std::unique_lock<named_thread> lock(*this, std::defer_lock);
while (!ch_out_mbox.try_push(value)) while (!ch_out_mbox.try_push(value))
{ {
CHECK_EMU_STATUS; CHECK_EMU_STATUS;
@ -972,12 +950,6 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
return false; return false;
} }
if (!lock)
{
lock.lock();
continue;
}
thread_ctrl::wait(); thread_ctrl::wait();
} }
@ -1237,7 +1209,7 @@ bool SPUThread::stop_and_signal(u32 code)
return false; return false;
} }
group->cv.wait_for(lv2_lock, 1ms); group->cv.wait(lv2_lock, 1000);
} }
// change group status // change group status
@ -1278,7 +1250,7 @@ bool SPUThread::stop_and_signal(u32 code)
return false; return false;
} }
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
// event data must be set by push() // event data must be set by push()
@ -1303,7 +1275,7 @@ bool SPUThread::stop_and_signal(u32 code)
if (thread && thread.get() != this) if (thread && thread.get() != this)
{ {
thread->state -= cpu_flag::suspend; thread->state -= cpu_flag::suspend;
thread->lock_notify(); thread->notify();
} }
} }
@ -1342,7 +1314,7 @@ bool SPUThread::stop_and_signal(u32 code)
if (thread && thread.get() != this) if (thread && thread.get() != this)
{ {
thread->state += cpu_flag::stop; thread->state += cpu_flag::stop;
thread->lock_notify(); thread->notify();
} }
} }

View File

@ -180,7 +180,7 @@ public:
data.value |= value; data.value |= value;
}); });
if (old.wait) spu.lock_notify(); if (old.wait) spu.notify();
} }
// push unconditionally (overwriting previous value), may require notification // push unconditionally (overwriting previous value), may require notification
@ -193,7 +193,7 @@ public:
data.value = value; data.value = value;
}); });
if (old.wait) spu.lock_notify(); if (old.wait) spu.notify();
} }
// returns true on success // returns true on success
@ -228,7 +228,7 @@ public:
// value is not cleared and may be read again // value is not cleared and may be read again
}); });
if (old.wait) spu.lock_notify(); if (old.wait) spu.notify();
return old.value; return old.value;
} }
@ -295,7 +295,7 @@ public:
return false; return false;
})) }))
{ {
spu.lock_notify(); spu.notify();
} }
} }

View File

@ -1013,4 +1013,4 @@ extern ppu_function_t ppu_get_syscall(u64 code)
return nullptr; return nullptr;
} }
DECLARE(lv2_lock_t::mutex); DECLARE(lv2_lock_guard::g_sema);

View File

@ -222,11 +222,11 @@ s32 sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
continue; continue;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -282,11 +282,11 @@ s32 sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_event_t>
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -162,11 +162,11 @@ s32 sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -28,7 +28,7 @@ void lv2_int_serv_t::exec()
{ ppu_cmd::lle_call, 2 }, { ppu_cmd::lle_call, 2 },
}); });
thread->lock_notify(); thread->notify();
} }
void lv2_int_serv_t::join(ppu_thread& ppu, lv2_lock_t lv2_lock) void lv2_int_serv_t::join(ppu_thread& ppu, lv2_lock_t lv2_lock)
@ -41,14 +41,14 @@ void lv2_int_serv_t::join(ppu_thread& ppu, lv2_lock_t lv2_lock)
{ ppu_cmd::opcode, ppu_instructions::SC(0) }, { ppu_cmd::opcode, ppu_instructions::SC(0) },
}); });
thread->lock_notify(); thread->notify();
// Join thread (TODO) // Join thread (TODO)
while (!test(thread->state & cpu_flag::exit)) while (!test(thread->state & cpu_flag::exit))
{ {
CHECK_EMU_STATUS; CHECK_EMU_STATUS;
get_current_thread_cv().wait_for(lv2_lock, 1ms); LV2_UNLOCK, thread_ctrl::wait_for(1000);
} }
// Cleanup // Cleanup
@ -155,7 +155,7 @@ void sys_interrupt_thread_eoi(ppu_thread& ppu) // Low-level PPU function example
if (ppu.lr == 0 || ppu.gpr[11] != 88) if (ppu.lr == 0 || ppu.gpr[11] != 88)
{ {
// Low-level function must disable interrupts before throwing (not related to sys_interrupt_*, it's rather coincidence) // Low-level function must disable interrupts before throwing (not related to sys_interrupt_*, it's rather coincidence)
ppu->interrupt_disable(); ppu.get()->interrupt_disable();
throw cpu_flag::ret; throw cpu_flag::ret;
} }
} }

View File

@ -202,11 +202,11 @@ s32 _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 t
} }
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -114,11 +114,11 @@ s32 _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -148,11 +148,11 @@ s32 sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -74,7 +74,7 @@ s32 sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr)
{ {
CHECK_EMU_STATUS; CHECK_EMU_STATUS;
get_current_thread_cv().wait_for(lv2_lock, 1ms); LV2_UNLOCK, thread_ctrl::wait_for(1000);
} }
// get exit status from the register // get exit status from the register

View File

@ -130,11 +130,11 @@ s32 sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }
@ -253,11 +253,11 @@ s32 sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -108,11 +108,11 @@ s32 sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
return CELL_ETIMEDOUT; return CELL_ETIMEDOUT;
} }
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)); LV2_UNLOCK, thread_ctrl::wait_for(timeout - passed);
} }
else else
{ {
get_current_thread_cv().wait(lv2_lock); LV2_UNLOCK, thread_ctrl::wait();
} }
} }

View File

@ -421,7 +421,7 @@ s32 sys_spu_thread_group_resume(u32 id)
if (thread) if (thread)
{ {
thread->state -= cpu_flag::suspend; thread->state -= cpu_flag::suspend;
thread->lock_notify(); thread->notify();
} }
} }
@ -504,7 +504,7 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
if (thread) if (thread)
{ {
thread->state += cpu_flag::stop; thread->state += cpu_flag::stop;
thread->lock_notify(); thread->notify();
} }
} }
@ -563,7 +563,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
CHECK_EMU_STATUS; CHECK_EMU_STATUS;
group->cv.wait_for(lv2_lock, 1ms); group->cv.wait(lv2_lock, 1000);
} }
switch (group->join_state & ~SPU_TGJSF_IS_JOINING) switch (group->join_state & ~SPU_TGJSF_IS_JOINING)

View File

@ -157,7 +157,7 @@ struct lv2_spu_group_t
s32 exit_status; // SPU Thread Group Exit Status s32 exit_status; // SPU Thread Group Exit Status
atomic_t<u32> join_state; // flags used to detect exit cause atomic_t<u32> join_state; // flags used to detect exit cause
std::condition_variable cv; // used to signal waiting PPU thread cond_variable cv; // used to signal waiting PPU thread
std::weak_ptr<lv2_event_queue_t> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events std::weak_ptr<lv2_event_queue_t> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
std::weak_ptr<lv2_event_queue_t> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION std::weak_ptr<lv2_event_queue_t> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION

View File

@ -1,8 +1,10 @@
#pragma once #pragma once
#include "Utilities/SleepQueue.h" #include "Utilities/SleepQueue.h"
#include <mutex> #include "Utilities/Thread.h"
#include <condition_variable> #include "Utilities/mutex.h"
#include "Utilities/sema.h"
#include "Utilities/cond.h"
// attr_protocol (waiting scheduling policy) // attr_protocol (waiting scheduling policy)
enum enum
@ -43,27 +45,26 @@ enum
SYS_SYNC_NOT_ADAPTIVE = 0x2000, SYS_SYNC_NOT_ADAPTIVE = 0x2000,
}; };
extern std::condition_variable& get_current_thread_cv(); // Temporary implementation for LV2_UNLOCK (TODO: remove it)
struct lv2_lock_guard
// Simple class for global mutex to pass unique_lock and check it
struct lv2_lock_t
{ {
using type = std::unique_lock<std::mutex>; static semaphore<> g_sema;
type& ref; lv2_lock_guard(const lv2_lock_guard&) = delete;
lv2_lock_t(type& lv2_lock) lv2_lock_guard()
: ref(lv2_lock)
{ {
verify(HERE), ref.owns_lock(), ref.mutex() == &mutex; g_sema.post();
} }
operator type&() const ~lv2_lock_guard()
{ {
return ref; g_sema.wait();
} }
static type::mutex_type mutex;
}; };
#define LV2_LOCK lv2_lock_t::type lv2_lock(lv2_lock_t::mutex) using lv2_lock_t = semaphore_lock&;
#define LV2_LOCK semaphore_lock lv2_lock(lv2_lock_guard::g_sema)
#define LV2_UNLOCK lv2_lock_guard{}

View File

@ -16,7 +16,8 @@ extern u64 get_system_time();
void lv2_timer_t::on_task() void lv2_timer_t::on_task()
{ {
thread_lock lock(*this); //thread_lock lock(*this);
LV2_LOCK;
while (state <= SYS_TIMER_STATE_RUN) while (state <= SYS_TIMER_STATE_RUN)
{ {
@ -24,7 +25,7 @@ void lv2_timer_t::on_task()
if (state == SYS_TIMER_STATE_RUN) if (state == SYS_TIMER_STATE_RUN)
{ {
LV2_LOCK; //LV2_LOCK;
while (get_system_time() >= expire) while (get_system_time() >= expire)
{ {
@ -52,7 +53,7 @@ void lv2_timer_t::on_task()
continue; continue;
} }
thread_ctrl::wait_for(1000); LV2_UNLOCK, thread_ctrl::wait_for(1000);
} }
} }
@ -65,7 +66,7 @@ void lv2_timer_t::on_stop()
{ {
// Signal thread using invalid state and join // Signal thread using invalid state and join
state = -1; state = -1;
this->lock_notify(); this->notify();
named_thread::on_stop(); named_thread::on_stop();
} }
@ -170,7 +171,7 @@ s32 _sys_timer_start(u32 timer_id, u64 base_time, u64 period)
timer->expire = base_time ? base_time : start_time + period; timer->expire = base_time ? base_time : start_time + period;
timer->period = period; timer->period = period;
timer->state = SYS_TIMER_STATE_RUN; timer->state = SYS_TIMER_STATE_RUN;
timer->lock_notify(); timer->notify();
return CELL_OK; return CELL_OK;
} }

View File

@ -32,19 +32,18 @@ namespace vm
, m_thread(ptr->thread) , m_thread(ptr->thread)
{ {
// Initialize waiter // Initialize waiter
writer_lock{s_mutex}, s_waiters.emplace(m_ptr); writer_lock lock(s_mutex);
s_waiters.emplace(m_ptr);
m_thread->lock();
} }
~waiter() ~waiter()
{ {
// Reset thread // Reset thread
atomic_storage<thread_ctrl*>::store(m_ptr->thread, nullptr); m_ptr->thread = nullptr;
m_thread->unlock();
// Remove waiter // Remove waiter
writer_lock{s_mutex}, s_waiters.erase(m_ptr); writer_lock lock(s_mutex);
s_waiters.erase(m_ptr);
} }
}; };
@ -54,23 +53,13 @@ namespace vm
bool waiter_base::try_notify() bool waiter_base::try_notify()
{ {
const auto _t = atomic_storage<thread_ctrl*>::load(thread); const auto _t = thread.load();
if (UNLIKELY(!_t))
{
// Return if thread not found
return false;
}
// Lock the thread
_t->lock();
try try
{ {
// Test predicate // Test predicate
if (UNLIKELY(!thread || !test())) if (UNLIKELY(!_t || !test()))
{ {
_t->unlock();
return false; return false;
} }
} }
@ -81,9 +70,11 @@ namespace vm
} }
// Signal the thread with nullptr // Signal the thread with nullptr
atomic_storage<thread_ctrl*>::store(thread, nullptr); if (auto _t = thread.exchange(nullptr))
_t->unlock(); {
_t->notify(); _t->notify();
}
return true; return true;
} }
@ -128,10 +119,10 @@ namespace vm
// Poll waiters periodically (TODO) // Poll waiters periodically (TODO)
while (notify_all() && !Emu.IsPaused() && !Emu.IsStopped()) while (notify_all() && !Emu.IsPaused() && !Emu.IsStopped())
{ {
thread_ctrl::sleep(50); thread_ctrl::wait_for(50);
} }
thread_ctrl::sleep(1000); thread_ctrl::wait_for(1000);
} }
}); });
} }

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include "Utilities/types.h" #include "Utilities/types.h"
#include "Utilities/Atomic.h"
class thread_ctrl; class thread_ctrl;
@ -10,7 +11,7 @@ namespace vm
{ {
u32 addr; u32 addr;
u32 mask; u32 mask;
thread_ctrl* thread{}; atomic_t<thread_ctrl*> thread{};
void initialize(u32 addr, u32 size); void initialize(u32 addr, u32 size);
bool try_notify(); bool try_notify();

View File

@ -928,7 +928,7 @@ struct psp2_event_flag final
idm::check<ARMv7Thread>(cmd.arg, [](auto& cpu) idm::check<ARMv7Thread>(cmd.arg, [](auto& cpu)
{ {
cpu.state += cpu_flag::signal; cpu.state += cpu_flag::signal;
cpu.lock_notify(); cpu.notify();
}); });
break; break;
@ -955,7 +955,7 @@ struct psp2_event_flag final
{ {
if (!exec(task::signal, cpu.id)) if (!exec(task::signal, cpu.id))
{ {
thread_lock{cpu}, thread_ctrl::wait([&] { return cpu.state.test_and_reset(cpu_flag::signal); }); thread_ctrl::wait([&] { return cpu.state.test_and_reset(cpu_flag::signal); });
} }
else else
{ {
@ -980,7 +980,7 @@ private:
cpu.GPR[0] = SCE_KERNEL_ERROR_EVF_MULTI; cpu.GPR[0] = SCE_KERNEL_ERROR_EVF_MULTI;
cpu.GPR[1] = pattern; cpu.GPR[1] = pattern;
cpu.state += cpu_flag::signal; cpu.state += cpu_flag::signal;
cpu->lock_notify(); cpu.notify();
return; return;
} }
@ -1002,7 +1002,7 @@ private:
cpu.GPR[0] = SCE_OK; cpu.GPR[0] = SCE_OK;
cpu.GPR[1] = old_pattern; cpu.GPR[1] = old_pattern;
cpu.state += cpu_flag::signal; cpu.state += cpu_flag::signal;
cpu->lock_notify(); cpu.notify();
} }
else else
{ {
@ -1100,7 +1100,7 @@ private:
cpu.state += cpu_flag::signal; cpu.state += cpu_flag::signal;
cpu.owner = nullptr; cpu.owner = nullptr;
waiters -= attr & SCE_KERNEL_ATTR_MULTI ? 1 : cpu.id; waiters -= attr & SCE_KERNEL_ATTR_MULTI ? 1 : cpu.id;
cpu->lock_notify(); cpu.notify();
} }
} }
} }
@ -1131,7 +1131,7 @@ private:
cpu.GPR[1] = _pattern; cpu.GPR[1] = _pattern;
cpu.state += cpu_flag::signal; cpu.state += cpu_flag::signal;
cpu.owner = nullptr; cpu.owner = nullptr;
cpu.lock_notify(); cpu.notify();
} }
pattern = _pattern; pattern = _pattern;
@ -1268,9 +1268,13 @@ error_code sceKernelWaitEventFlag(ARMv7Thread& cpu, s32 evfId, u32 bitPattern, u
// Second chance // Second chance
if (!evf->exec(psp2_event_flag::task::wait, cpu.id) || !cpu.state.test_and_reset(cpu_flag::signal)) if (!evf->exec(psp2_event_flag::task::wait, cpu.id) || !cpu.state.test_and_reset(cpu_flag::signal))
{ {
thread_lock lock(cpu); while (!cpu.state.test_and_reset(cpu_flag::signal))
{
if (timeout)
{
const u64 passed = get_system_time() - start_time;
if (!thread_ctrl::wait_for(timeout, [&] { return cpu.state.test_and_reset(cpu_flag::signal); })) if (passed >= timeout)
{ {
if (!evf->exec(psp2_event_flag::task::timeout, cpu.id)) if (!evf->exec(psp2_event_flag::task::timeout, cpu.id))
{ {
@ -1283,6 +1287,16 @@ error_code sceKernelWaitEventFlag(ARMv7Thread& cpu, s32 evfId, u32 bitPattern, u
cpu.state -= cpu_flag::signal; cpu.state -= cpu_flag::signal;
} }
} }
break;
}
thread_ctrl::wait_for(timeout - passed);
}
else
{
thread_ctrl::wait();
}
} }
} }

View File

@ -377,7 +377,7 @@ namespace rsx
{ ppu_cmd::lle_call, vblank_handler }, { ppu_cmd::lle_call, vblank_handler },
}); });
intr_thread->lock_notify(); intr_thread->notify();
} }
continue; continue;
@ -697,7 +697,7 @@ namespace rsx
//void thread::invoke(std::function<bool()> callback) //void thread::invoke(std::function<bool()> callback)
//{ //{
// if (operator->() == thread_ctrl::get_current()) // if (get() == thread_ctrl::get_current())
// { // {
// while (true) // while (true)
// { // {

View File

@ -779,7 +779,7 @@ namespace rsx
{ ppu_cmd::lle_call, rsx->flip_handler }, { ppu_cmd::lle_call, rsx->flip_handler },
}); });
rsx->intr_thread->lock_notify(); rsx->intr_thread->notify();
} }
} }
@ -793,7 +793,7 @@ namespace rsx
{ ppu_cmd::lle_call, rsx->user_handler }, { ppu_cmd::lle_call, rsx->user_handler },
}); });
rsx->intr_thread->lock_notify(); rsx->intr_thread->notify();
} }
} }

View File

@ -408,11 +408,15 @@ void Emulator::Resume()
SendDbgCommand(DID_RESUME_EMU); SendDbgCommand(DID_RESUME_EMU);
{
LV2_LOCK;
idm::select<ppu_thread, SPUThread, RawSPUThread, ARMv7Thread>([](u32, cpu_thread& cpu) idm::select<ppu_thread, SPUThread, RawSPUThread, ARMv7Thread>([](u32, cpu_thread& cpu)
{ {
cpu.state -= cpu_flag::dbg_global_pause; cpu.state -= cpu_flag::dbg_global_pause;
cpu.lock_notify(); cpu.notify();
}); });
}
rpcs3::on_resume()(); rpcs3::on_resume()();
@ -437,10 +441,8 @@ void Emulator::Stop()
idm::select<ppu_thread, SPUThread, RawSPUThread, ARMv7Thread>([](u32, cpu_thread& cpu) idm::select<ppu_thread, SPUThread, RawSPUThread, ARMv7Thread>([](u32, cpu_thread& cpu)
{ {
cpu.state += cpu_flag::dbg_global_stop; cpu.state += cpu_flag::dbg_global_stop;
cpu->lock(); cpu.get()->set_exception(std::make_exception_ptr(EmulationStopped()));
cpu->set_exception(std::make_exception_ptr(EmulationStopped())); cpu.notify();
cpu->unlock();
cpu->notify();
}); });
} }

View File

@ -440,7 +440,7 @@ void InterpreterDisAsmFrame::DoRun(wxCommandEvent& WXUNUSED(event))
if (cpu && test(cpu->state & cpu_state_pause)) if (cpu && test(cpu->state & cpu_state_pause))
{ {
cpu->state -= cpu_flag::dbg_pause; cpu->state -= cpu_flag::dbg_pause;
(*cpu)->lock_notify(); cpu->notify();
} }
} }
@ -462,7 +462,7 @@ void InterpreterDisAsmFrame::DoStep(wxCommandEvent& WXUNUSED(event))
state -= cpu_flag::dbg_pause; state -= cpu_flag::dbg_pause;
}))) })))
{ {
(*cpu)->lock_notify(); cpu->notify();
} }
} }
} }