From 790ce8aee1d5089ecc17f9a54dfefa57fb75ccea Mon Sep 17 00:00:00 2001 From: Ben Vanik Date: Sun, 6 Sep 2015 13:34:08 -0700 Subject: [PATCH] Switch back to std:: muteces. mutices. mutexen. --- src/xenia/apu/xma_context.cc | 10 +++++----- src/xenia/apu/xma_context.h | 4 ++-- src/xenia/apu/xma_decoder.cc | 6 +++--- src/xenia/apu/xma_decoder.h | 4 ++-- src/xenia/base/mutex.cc | 4 ++-- src/xenia/base/mutex.h | 23 ++++++++--------------- src/xenia/base/type_pool.h | 10 ++++------ src/xenia/cpu/frontend/ppc_context.h | 4 ++-- src/xenia/cpu/frontend/ppc_frontend.cc | 8 ++++---- src/xenia/cpu/frontend/ppc_translator.cc | 6 +++--- src/xenia/gpu/gl4/command_processor.cc | 8 ++++---- src/xenia/gpu/gl4/command_processor.h | 3 ++- src/xenia/gpu/gl4/gl4_graphics_system.cc | 2 +- src/xenia/gpu/gl4/texture_cache.cc | 2 +- src/xenia/gpu/gl4/texture_cache.h | 4 ++-- src/xenia/ui/loop_win.cc | 6 +++--- src/xenia/ui/loop_win.h | 3 +-- 17 files changed, 49 insertions(+), 58 deletions(-) diff --git a/src/xenia/apu/xma_context.cc b/src/xenia/apu/xma_context.cc index 42eed6eda..bb615045d 100644 --- a/src/xenia/apu/xma_context.cc +++ b/src/xenia/apu/xma_context.cc @@ -99,7 +99,7 @@ int XmaContext::Setup(uint32_t id, Memory* memory, uint32_t guest_ptr) { } void XmaContext::Work() { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); if (!is_allocated() || !is_enabled()) { return; } @@ -113,7 +113,7 @@ void XmaContext::Work() { } void XmaContext::Enable() { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); auto context_ptr = memory()->TranslateVirtual(guest_ptr()); XMA_CONTEXT_DATA data(context_ptr); @@ -141,7 +141,7 @@ bool XmaContext::Block(bool poll) { } void XmaContext::Clear() { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); XELOGAPU("XmaContext: reset context %d", id()); auto context_ptr = memory()->TranslateVirtual(guest_ptr()); @@ -158,14 +158,14 @@ void XmaContext::Clear() { } void XmaContext::Disable() { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); XELOGAPU("XmaContext: disabling context %d", id()); set_is_enabled(false); } void XmaContext::Release() { // Lock it in case the decoder thread is working on it now. - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); assert_true(is_allocated_ == true); set_is_allocated(false); diff --git a/src/xenia/apu/xma_context.h b/src/xenia/apu/xma_context.h index 113129e36..8bcf543b7 100644 --- a/src/xenia/apu/xma_context.h +++ b/src/xenia/apu/xma_context.h @@ -11,9 +11,9 @@ #define XENIA_APU_XMA_CONTEXT_H_ #include +#include #include -#include "xenia/base/mutex.h" #include "xenia/memory.h" #include "xenia/xbox.h" @@ -192,7 +192,7 @@ class XmaContext { uint32_t id_ = 0; uint32_t guest_ptr_ = 0; - xe::mutex lock_; + std::mutex lock_; bool is_allocated_ = false; bool is_enabled_ = false; diff --git a/src/xenia/apu/xma_decoder.cc b/src/xenia/apu/xma_decoder.cc index 4d7a7bf11..e3184b8c4 100644 --- a/src/xenia/apu/xma_decoder.cc +++ b/src/xenia/apu/xma_decoder.cc @@ -163,7 +163,7 @@ int XmaDecoder::GetContextId(uint32_t guest_ptr) { } uint32_t XmaDecoder::AllocateContext() { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); for (uint32_t n = 0; n < kContextCount; n++) { XmaContext& context = contexts_[n]; @@ -177,7 +177,7 @@ uint32_t XmaDecoder::AllocateContext() { } void XmaDecoder::ReleaseContext(uint32_t guest_ptr) { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); auto context_id = GetContextId(guest_ptr); assert_true(context_id >= 0); @@ -187,7 +187,7 @@ void XmaDecoder::ReleaseContext(uint32_t guest_ptr) { } bool XmaDecoder::BlockOnContext(uint32_t guest_ptr, bool poll) { - std::lock_guard lock(lock_); + std::lock_guard lock(lock_); auto context_id = GetContextId(guest_ptr); assert_true(context_id >= 0); diff --git a/src/xenia/apu/xma_decoder.h b/src/xenia/apu/xma_decoder.h index 8166217cb..98877a176 100644 --- a/src/xenia/apu/xma_decoder.h +++ b/src/xenia/apu/xma_decoder.h @@ -11,10 +11,10 @@ #define XENIA_APU_XMA_DECODER_H_ #include +#include #include #include "xenia/apu/xma_context.h" -#include "xenia/base/mutex.h" #include "xenia/kernel/objects/xthread.h" #include "xenia/xbox.h" @@ -66,7 +66,7 @@ class XmaDecoder { kernel::object_ref worker_thread_; xe::threading::Fence worker_fence_; - xe::mutex lock_; + std::mutex lock_; // Stored little endian, accessed through 0x7FEA.... union { diff --git a/src/xenia/base/mutex.cc b/src/xenia/base/mutex.cc index a0e9a4f6c..80bdb8411 100644 --- a/src/xenia/base/mutex.cc +++ b/src/xenia/base/mutex.cc @@ -11,8 +11,8 @@ namespace xe { -xe::recursive_mutex& global_critical_region::mutex() { - static xe::recursive_mutex global_mutex; +std::recursive_mutex& global_critical_region::mutex() { + static std::recursive_mutex global_mutex; return global_mutex; } diff --git a/src/xenia/base/mutex.h b/src/xenia/base/mutex.h index 6b1080c73..98fb6c548 100644 --- a/src/xenia/base/mutex.h +++ b/src/xenia/base/mutex.h @@ -14,13 +14,6 @@ namespace xe { -// This exists to allow us to swap the mutex implementation with one that -// we can mess with in the debugger (such as allowing the debugger to ignore -// locks while all threads are suspended). std::mutex should not be used. - -using mutex = std::mutex; -using recursive_mutex = std::recursive_mutex; - // The global critical region mutex singleton. // This must guard any operation that may suspend threads or be sensitive to // being suspended such as global table locks and such. @@ -44,7 +37,7 @@ using recursive_mutex = std::recursive_mutex; // [thread 0]: // DoKernelStuff(): // auto global_lock = global_critical_region_.Acquire(); -// std::lock_guard table_lock(table_mutex_); +// std::lock_guard table_lock(table_mutex_); // table_->InsertStuff(); // [thread 1]: // MySuspendThread(): @@ -61,25 +54,25 @@ using recursive_mutex = std::recursive_mutex; // }; class global_critical_region { public: - static xe::recursive_mutex& mutex(); + static std::recursive_mutex& mutex(); // Acquires a lock on the global critical section. // Use this when keeping an instance is not possible. Otherwise, prefer // to keep an instance of global_critical_region near the members requiring // it to keep things readable. - static std::unique_lock AcquireDirect() { - return std::unique_lock(mutex()); + static std::unique_lock AcquireDirect() { + return std::unique_lock(mutex()); } // Acquires a lock on the global critical section. - inline std::unique_lock Acquire() { - return std::unique_lock(mutex()); + inline std::unique_lock Acquire() { + return std::unique_lock(mutex()); } // Tries to acquire a lock on the glboal critical section. // Check owns_lock() to see if the lock was successfully acquired. - inline std::unique_lock TryAcquire() { - return std::unique_lock(mutex(), std::try_to_lock); + inline std::unique_lock TryAcquire() { + return std::unique_lock(mutex(), std::try_to_lock); } }; diff --git a/src/xenia/base/type_pool.h b/src/xenia/base/type_pool.h index 55ded9812..81f0ba77f 100644 --- a/src/xenia/base/type_pool.h +++ b/src/xenia/base/type_pool.h @@ -13,8 +13,6 @@ #include #include -#include "xenia/base/mutex.h" - namespace xe { template @@ -23,7 +21,7 @@ class TypePool { ~TypePool() { Reset(); } void Reset() { - std::lock_guard guard(lock_); + std::lock_guard guard(lock_); for (auto it = list_.begin(); it != list_.end(); ++it) { T* value = *it; delete value; @@ -34,7 +32,7 @@ class TypePool { T* Allocate(A arg0) { T* result = 0; { - std::lock_guard guard(lock_); + std::lock_guard guard(lock_); if (list_.size()) { result = list_.back(); list_.pop_back(); @@ -47,12 +45,12 @@ class TypePool { } void Release(T* value) { - std::lock_guard guard(lock_); + std::lock_guard guard(lock_); list_.push_back(value); } private: - xe::mutex lock_; + std::mutex lock_; std::vector list_; }; diff --git a/src/xenia/cpu/frontend/ppc_context.h b/src/xenia/cpu/frontend/ppc_context.h index 4974e59fb..12f78bd07 100644 --- a/src/xenia/cpu/frontend/ppc_context.h +++ b/src/xenia/cpu/frontend/ppc_context.h @@ -11,9 +11,9 @@ #define XENIA_CPU_FRONTEND_PPC_CONTEXT_H_ #include +#include #include -#include "xenia/base/mutex.h" #include "xenia/base/vec128.h" namespace xe { @@ -407,7 +407,7 @@ typedef struct alignas(64) PPCContext_s { // Global interrupt lock, held while interrupts are disabled or interrupts are // executing. This is shared among all threads and comes from the processor. - xe::recursive_mutex* global_mutex; + std::recursive_mutex* global_mutex; // Used to shuttle data into externs. Contents volatile. uint64_t scratch; diff --git a/src/xenia/cpu/frontend/ppc_frontend.cc b/src/xenia/cpu/frontend/ppc_frontend.cc index 7e82e573b..22a0ed9ca 100644 --- a/src/xenia/cpu/frontend/ppc_frontend.cc +++ b/src/xenia/cpu/frontend/ppc_frontend.cc @@ -61,15 +61,15 @@ Memory* PPCFrontend::memory() const { return processor_->memory(); } // Checks the state of the global lock and sets scratch to the current MSR // value. void CheckGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) { - auto global_mutex = reinterpret_cast(arg0); + auto global_mutex = reinterpret_cast(arg0); auto global_lock_count = reinterpret_cast(arg1); - std::lock_guard lock(*global_mutex); + std::lock_guard lock(*global_mutex); ppc_context->scratch = *global_lock_count ? 0 : 0x8000; } // Enters the global lock. Safe to recursion. void EnterGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) { - auto global_mutex = reinterpret_cast(arg0); + auto global_mutex = reinterpret_cast(arg0); auto global_lock_count = reinterpret_cast(arg1); global_mutex->lock(); xe::atomic_inc(global_lock_count); @@ -77,7 +77,7 @@ void EnterGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) { // Leaves the global lock. Safe to recursion. void LeaveGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) { - auto global_mutex = reinterpret_cast(arg0); + auto global_mutex = reinterpret_cast(arg0); auto global_lock_count = reinterpret_cast(arg1); auto new_lock_count = xe::atomic_dec(global_lock_count); assert_true(new_lock_count >= 0); diff --git a/src/xenia/cpu/frontend/ppc_translator.cc b/src/xenia/cpu/frontend/ppc_translator.cc index cbde6d8fc..862abb725 100644 --- a/src/xenia/cpu/frontend/ppc_translator.cc +++ b/src/xenia/cpu/frontend/ppc_translator.cc @@ -172,9 +172,9 @@ bool PPCTranslator::Translate(GuestFunction* function, // Emit function. uint32_t emit_flags = 0; - if (debug_info) { - emit_flags |= PPCHIRBuilder::EMIT_DEBUG_COMMENTS; - } + // if (debug_info) { + emit_flags |= PPCHIRBuilder::EMIT_DEBUG_COMMENTS; + //} if (!builder_->Emit(function, emit_flags)) { return false; } diff --git a/src/xenia/gpu/gl4/command_processor.cc b/src/xenia/gpu/gl4/command_processor.cc index ef1e74674..c06fc83fa 100644 --- a/src/xenia/gpu/gl4/command_processor.cc +++ b/src/xenia/gpu/gl4/command_processor.cc @@ -587,7 +587,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr, // If we skip a lot then we may need to buffer more, but as the display // thread should be fairly idle that shouldn't happen. if (!FLAGS_vsync) { - std::lock_guard lock(swap_state_.mutex); + std::lock_guard lock(swap_state_.mutex); if (swap_state_.pending) { swap_state_.pending = false; // TODO(benvanik): frame skip counter. @@ -597,7 +597,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr, // Spin until no more pending swap. while (true) { { - std::lock_guard lock(swap_state_.mutex); + std::lock_guard lock(swap_state_.mutex); if (!swap_state_.pending) { break; } @@ -609,7 +609,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr, // One-time initialization. // TODO(benvanik): move someplace more sane? if (!swap_state_.front_buffer_texture) { - std::lock_guard lock(swap_state_.mutex); + std::lock_guard lock(swap_state_.mutex); swap_state_.width = frontbuffer_width; swap_state_.height = frontbuffer_height; glCreateTextures(GL_TEXTURE_2D, 1, &swap_state_.front_buffer_texture); @@ -647,7 +647,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr, { // Set pending so that the display will swap the next time it can. - std::lock_guard lock(swap_state_.mutex); + std::lock_guard lock(swap_state_.mutex); swap_state_.pending = true; } diff --git a/src/xenia/gpu/gl4/command_processor.h b/src/xenia/gpu/gl4/command_processor.h index 0073851cf..a609d3549 100644 --- a/src/xenia/gpu/gl4/command_processor.h +++ b/src/xenia/gpu/gl4/command_processor.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -46,7 +47,7 @@ class GL4GraphicsSystem; struct SwapState { // Lock must be held when changing data in this structure. - xe::mutex mutex; + std::mutex mutex; // Dimensions of the framebuffer textures. Should match window size. uint32_t width = 0; uint32_t height = 0; diff --git a/src/xenia/gpu/gl4/gl4_graphics_system.cc b/src/xenia/gpu/gl4/gl4_graphics_system.cc index 845ddb91d..f97793471 100644 --- a/src/xenia/gpu/gl4/gl4_graphics_system.cc +++ b/src/xenia/gpu/gl4/gl4_graphics_system.cc @@ -295,7 +295,7 @@ void GL4GraphicsSystem::Swap(xe::ui::UIEvent* e) { // Check for pending swap. auto& swap_state = command_processor_->swap_state(); { - std::lock_guard lock(swap_state.mutex); + std::lock_guard lock(swap_state.mutex); if (swap_state.pending) { swap_state.pending = false; std::swap(swap_state.front_buffer_texture, diff --git a/src/xenia/gpu/gl4/texture_cache.cc b/src/xenia/gpu/gl4/texture_cache.cc index e33fe7ec7..65ba9c7da 100644 --- a/src/xenia/gpu/gl4/texture_cache.cc +++ b/src/xenia/gpu/gl4/texture_cache.cc @@ -196,7 +196,7 @@ void TextureCache::EvictAllTextures() { } { - std::lock_guard lock(invalidated_textures_mutex_); + std::lock_guard lock(invalidated_textures_mutex_); invalidated_textures_sets_[0].clear(); invalidated_textures_sets_[1].clear(); } diff --git a/src/xenia/gpu/gl4/texture_cache.h b/src/xenia/gpu/gl4/texture_cache.h index fe9334c25..d214dac53 100644 --- a/src/xenia/gpu/gl4/texture_cache.h +++ b/src/xenia/gpu/gl4/texture_cache.h @@ -10,10 +10,10 @@ #ifndef XENIA_GPU_GL4_TEXTURE_CACHE_H_ #define XENIA_GPU_GL4_TEXTURE_CACHE_H_ +#include #include #include -#include "xenia/base/mutex.h" #include "xenia/gpu/sampler_info.h" #include "xenia/gpu/texture_info.h" #include "xenia/memory.h" @@ -104,7 +104,7 @@ class TextureCache { std::vector read_buffer_textures_; - xe::mutex invalidated_textures_mutex_; + std::mutex invalidated_textures_mutex_; std::vector* invalidated_textures_; std::vector invalidated_textures_sets_[2]; }; diff --git a/src/xenia/ui/loop_win.cc b/src/xenia/ui/loop_win.cc index e21d61c61..b0317c48f 100644 --- a/src/xenia/ui/loop_win.cc +++ b/src/xenia/ui/loop_win.cc @@ -54,7 +54,7 @@ Win32Loop::~Win32Loop() { thread_.join(); DeleteTimerQueueEx(timer_queue_, INVALID_HANDLE_VALUE); - std::lock_guard lock(pending_timers_mutex_); + std::lock_guard lock(pending_timers_mutex_); while (!pending_timers_.empty()) { auto timer = pending_timers_.back(); pending_timers_.pop_back(); @@ -102,7 +102,7 @@ void Win32Loop::TimerQueueCallback(void* context, uint8_t) { auto fn = std::move(timer->fn); DeleteTimerQueueTimer(timer->timer_queue, timer->timer_handle, NULL); { - std::lock_guard lock(loop->pending_timers_mutex_); + std::lock_guard lock(loop->pending_timers_mutex_); loop->pending_timers_.remove(timer); } delete timer; @@ -119,7 +119,7 @@ void Win32Loop::PostDelayed(std::function fn, uint64_t delay_millis) { timer->timer_queue = timer_queue_; timer->fn = std::move(fn); { - std::lock_guard lock(pending_timers_mutex_); + std::lock_guard lock(pending_timers_mutex_); pending_timers_.push_back(timer); } CreateTimerQueueTimer(&timer->timer_handle, timer_queue_, diff --git a/src/xenia/ui/loop_win.h b/src/xenia/ui/loop_win.h index 236aaa13a..893dfabe4 100644 --- a/src/xenia/ui/loop_win.h +++ b/src/xenia/ui/loop_win.h @@ -14,7 +14,6 @@ #include #include -#include "xenia/base/mutex.h" #include "xenia/base/platform_win.h" #include "xenia/base/threading.h" #include "xenia/ui/loop.h" @@ -50,7 +49,7 @@ class Win32Loop : public Loop { xe::threading::Fence quit_fence_; HANDLE timer_queue_; - xe::mutex pending_timers_mutex_; + std::mutex pending_timers_mutex_; std::list pending_timers_; };