Switch back to std:: muteces. mutices. mutexen.

This commit is contained in:
Ben Vanik 2015-09-06 13:34:08 -07:00
parent cb3dbcccbc
commit 790ce8aee1
17 changed files with 49 additions and 58 deletions

View File

@ -99,7 +99,7 @@ int XmaContext::Setup(uint32_t id, Memory* memory, uint32_t guest_ptr) {
}
void XmaContext::Work() {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
if (!is_allocated() || !is_enabled()) {
return;
}
@ -113,7 +113,7 @@ void XmaContext::Work() {
}
void XmaContext::Enable() {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
auto context_ptr = memory()->TranslateVirtual(guest_ptr());
XMA_CONTEXT_DATA data(context_ptr);
@ -141,7 +141,7 @@ bool XmaContext::Block(bool poll) {
}
void XmaContext::Clear() {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
XELOGAPU("XmaContext: reset context %d", id());
auto context_ptr = memory()->TranslateVirtual(guest_ptr());
@ -158,14 +158,14 @@ void XmaContext::Clear() {
}
void XmaContext::Disable() {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
XELOGAPU("XmaContext: disabling context %d", id());
set_is_enabled(false);
}
void XmaContext::Release() {
// Lock it in case the decoder thread is working on it now.
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
assert_true(is_allocated_ == true);
set_is_allocated(false);

View File

@ -11,9 +11,9 @@
#define XENIA_APU_XMA_CONTEXT_H_
#include <atomic>
#include <mutex>
#include <queue>
#include "xenia/base/mutex.h"
#include "xenia/memory.h"
#include "xenia/xbox.h"
@ -192,7 +192,7 @@ class XmaContext {
uint32_t id_ = 0;
uint32_t guest_ptr_ = 0;
xe::mutex lock_;
std::mutex lock_;
bool is_allocated_ = false;
bool is_enabled_ = false;

View File

@ -163,7 +163,7 @@ int XmaDecoder::GetContextId(uint32_t guest_ptr) {
}
uint32_t XmaDecoder::AllocateContext() {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
for (uint32_t n = 0; n < kContextCount; n++) {
XmaContext& context = contexts_[n];
@ -177,7 +177,7 @@ uint32_t XmaDecoder::AllocateContext() {
}
void XmaDecoder::ReleaseContext(uint32_t guest_ptr) {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
auto context_id = GetContextId(guest_ptr);
assert_true(context_id >= 0);
@ -187,7 +187,7 @@ void XmaDecoder::ReleaseContext(uint32_t guest_ptr) {
}
bool XmaDecoder::BlockOnContext(uint32_t guest_ptr, bool poll) {
std::lock_guard<xe::mutex> lock(lock_);
std::lock_guard<std::mutex> lock(lock_);
auto context_id = GetContextId(guest_ptr);
assert_true(context_id >= 0);

View File

@ -11,10 +11,10 @@
#define XENIA_APU_XMA_DECODER_H_
#include <atomic>
#include <mutex>
#include <queue>
#include "xenia/apu/xma_context.h"
#include "xenia/base/mutex.h"
#include "xenia/kernel/objects/xthread.h"
#include "xenia/xbox.h"
@ -66,7 +66,7 @@ class XmaDecoder {
kernel::object_ref<kernel::XHostThread> worker_thread_;
xe::threading::Fence worker_fence_;
xe::mutex lock_;
std::mutex lock_;
// Stored little endian, accessed through 0x7FEA....
union {

View File

@ -11,8 +11,8 @@
namespace xe {
xe::recursive_mutex& global_critical_region::mutex() {
static xe::recursive_mutex global_mutex;
std::recursive_mutex& global_critical_region::mutex() {
static std::recursive_mutex global_mutex;
return global_mutex;
}

View File

@ -14,13 +14,6 @@
namespace xe {
// This exists to allow us to swap the mutex implementation with one that
// we can mess with in the debugger (such as allowing the debugger to ignore
// locks while all threads are suspended). std::mutex should not be used.
using mutex = std::mutex;
using recursive_mutex = std::recursive_mutex;
// The global critical region mutex singleton.
// This must guard any operation that may suspend threads or be sensitive to
// being suspended such as global table locks and such.
@ -44,7 +37,7 @@ using recursive_mutex = std::recursive_mutex;
// [thread 0]:
// DoKernelStuff():
// auto global_lock = global_critical_region_.Acquire();
// std::lock_guard<xe::mutex> table_lock(table_mutex_);
// std::lock_guard<std::mutex> table_lock(table_mutex_);
// table_->InsertStuff();
// [thread 1]:
// MySuspendThread():
@ -61,25 +54,25 @@ using recursive_mutex = std::recursive_mutex;
// };
class global_critical_region {
public:
static xe::recursive_mutex& mutex();
static std::recursive_mutex& mutex();
// Acquires a lock on the global critical section.
// Use this when keeping an instance is not possible. Otherwise, prefer
// to keep an instance of global_critical_region near the members requiring
// it to keep things readable.
static std::unique_lock<xe::recursive_mutex> AcquireDirect() {
return std::unique_lock<xe::recursive_mutex>(mutex());
static std::unique_lock<std::recursive_mutex> AcquireDirect() {
return std::unique_lock<std::recursive_mutex>(mutex());
}
// Acquires a lock on the global critical section.
inline std::unique_lock<xe::recursive_mutex> Acquire() {
return std::unique_lock<xe::recursive_mutex>(mutex());
inline std::unique_lock<std::recursive_mutex> Acquire() {
return std::unique_lock<std::recursive_mutex>(mutex());
}
// Tries to acquire a lock on the glboal critical section.
// Check owns_lock() to see if the lock was successfully acquired.
inline std::unique_lock<xe::recursive_mutex> TryAcquire() {
return std::unique_lock<xe::recursive_mutex>(mutex(), std::try_to_lock);
inline std::unique_lock<std::recursive_mutex> TryAcquire() {
return std::unique_lock<std::recursive_mutex>(mutex(), std::try_to_lock);
}
};

View File

@ -13,8 +13,6 @@
#include <mutex>
#include <vector>
#include "xenia/base/mutex.h"
namespace xe {
template <class T, typename A>
@ -23,7 +21,7 @@ class TypePool {
~TypePool() { Reset(); }
void Reset() {
std::lock_guard<xe::mutex> guard(lock_);
std::lock_guard<std::mutex> guard(lock_);
for (auto it = list_.begin(); it != list_.end(); ++it) {
T* value = *it;
delete value;
@ -34,7 +32,7 @@ class TypePool {
T* Allocate(A arg0) {
T* result = 0;
{
std::lock_guard<xe::mutex> guard(lock_);
std::lock_guard<std::mutex> guard(lock_);
if (list_.size()) {
result = list_.back();
list_.pop_back();
@ -47,12 +45,12 @@ class TypePool {
}
void Release(T* value) {
std::lock_guard<xe::mutex> guard(lock_);
std::lock_guard<std::mutex> guard(lock_);
list_.push_back(value);
}
private:
xe::mutex lock_;
std::mutex lock_;
std::vector<T*> list_;
};

View File

@ -11,9 +11,9 @@
#define XENIA_CPU_FRONTEND_PPC_CONTEXT_H_
#include <cstdint>
#include <mutex>
#include <string>
#include "xenia/base/mutex.h"
#include "xenia/base/vec128.h"
namespace xe {
@ -407,7 +407,7 @@ typedef struct alignas(64) PPCContext_s {
// Global interrupt lock, held while interrupts are disabled or interrupts are
// executing. This is shared among all threads and comes from the processor.
xe::recursive_mutex* global_mutex;
std::recursive_mutex* global_mutex;
// Used to shuttle data into externs. Contents volatile.
uint64_t scratch;

View File

@ -61,15 +61,15 @@ Memory* PPCFrontend::memory() const { return processor_->memory(); }
// Checks the state of the global lock and sets scratch to the current MSR
// value.
void CheckGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) {
auto global_mutex = reinterpret_cast<xe::recursive_mutex*>(arg0);
auto global_mutex = reinterpret_cast<std::recursive_mutex*>(arg0);
auto global_lock_count = reinterpret_cast<int32_t*>(arg1);
std::lock_guard<xe::recursive_mutex> lock(*global_mutex);
std::lock_guard<std::recursive_mutex> lock(*global_mutex);
ppc_context->scratch = *global_lock_count ? 0 : 0x8000;
}
// Enters the global lock. Safe to recursion.
void EnterGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) {
auto global_mutex = reinterpret_cast<xe::recursive_mutex*>(arg0);
auto global_mutex = reinterpret_cast<std::recursive_mutex*>(arg0);
auto global_lock_count = reinterpret_cast<int32_t*>(arg1);
global_mutex->lock();
xe::atomic_inc(global_lock_count);
@ -77,7 +77,7 @@ void EnterGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) {
// Leaves the global lock. Safe to recursion.
void LeaveGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) {
auto global_mutex = reinterpret_cast<xe::recursive_mutex*>(arg0);
auto global_mutex = reinterpret_cast<std::recursive_mutex*>(arg0);
auto global_lock_count = reinterpret_cast<int32_t*>(arg1);
auto new_lock_count = xe::atomic_dec(global_lock_count);
assert_true(new_lock_count >= 0);

View File

@ -172,9 +172,9 @@ bool PPCTranslator::Translate(GuestFunction* function,
// Emit function.
uint32_t emit_flags = 0;
if (debug_info) {
emit_flags |= PPCHIRBuilder::EMIT_DEBUG_COMMENTS;
}
// if (debug_info) {
emit_flags |= PPCHIRBuilder::EMIT_DEBUG_COMMENTS;
//}
if (!builder_->Emit(function, emit_flags)) {
return false;
}

View File

@ -587,7 +587,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr,
// If we skip a lot then we may need to buffer more, but as the display
// thread should be fairly idle that shouldn't happen.
if (!FLAGS_vsync) {
std::lock_guard<xe::mutex> lock(swap_state_.mutex);
std::lock_guard<std::mutex> lock(swap_state_.mutex);
if (swap_state_.pending) {
swap_state_.pending = false;
// TODO(benvanik): frame skip counter.
@ -597,7 +597,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr,
// Spin until no more pending swap.
while (true) {
{
std::lock_guard<xe::mutex> lock(swap_state_.mutex);
std::lock_guard<std::mutex> lock(swap_state_.mutex);
if (!swap_state_.pending) {
break;
}
@ -609,7 +609,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr,
// One-time initialization.
// TODO(benvanik): move someplace more sane?
if (!swap_state_.front_buffer_texture) {
std::lock_guard<xe::mutex> lock(swap_state_.mutex);
std::lock_guard<std::mutex> lock(swap_state_.mutex);
swap_state_.width = frontbuffer_width;
swap_state_.height = frontbuffer_height;
glCreateTextures(GL_TEXTURE_2D, 1, &swap_state_.front_buffer_texture);
@ -647,7 +647,7 @@ void CommandProcessor::IssueSwap(uint32_t frontbuffer_ptr,
{
// Set pending so that the display will swap the next time it can.
std::lock_guard<xe::mutex> lock(swap_state_.mutex);
std::lock_guard<std::mutex> lock(swap_state_.mutex);
swap_state_.pending = true;
}

View File

@ -14,6 +14,7 @@
#include <cstring>
#include <functional>
#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <unordered_map>
@ -46,7 +47,7 @@ class GL4GraphicsSystem;
struct SwapState {
// Lock must be held when changing data in this structure.
xe::mutex mutex;
std::mutex mutex;
// Dimensions of the framebuffer textures. Should match window size.
uint32_t width = 0;
uint32_t height = 0;

View File

@ -295,7 +295,7 @@ void GL4GraphicsSystem::Swap(xe::ui::UIEvent* e) {
// Check for pending swap.
auto& swap_state = command_processor_->swap_state();
{
std::lock_guard<xe::mutex> lock(swap_state.mutex);
std::lock_guard<std::mutex> lock(swap_state.mutex);
if (swap_state.pending) {
swap_state.pending = false;
std::swap(swap_state.front_buffer_texture,

View File

@ -196,7 +196,7 @@ void TextureCache::EvictAllTextures() {
}
{
std::lock_guard<xe::mutex> lock(invalidated_textures_mutex_);
std::lock_guard<std::mutex> lock(invalidated_textures_mutex_);
invalidated_textures_sets_[0].clear();
invalidated_textures_sets_[1].clear();
}

View File

@ -10,10 +10,10 @@
#ifndef XENIA_GPU_GL4_TEXTURE_CACHE_H_
#define XENIA_GPU_GL4_TEXTURE_CACHE_H_
#include <mutex>
#include <unordered_map>
#include <vector>
#include "xenia/base/mutex.h"
#include "xenia/gpu/sampler_info.h"
#include "xenia/gpu/texture_info.h"
#include "xenia/memory.h"
@ -104,7 +104,7 @@ class TextureCache {
std::vector<ReadBufferTexture*> read_buffer_textures_;
xe::mutex invalidated_textures_mutex_;
std::mutex invalidated_textures_mutex_;
std::vector<TextureEntry*>* invalidated_textures_;
std::vector<TextureEntry*> invalidated_textures_sets_[2];
};

View File

@ -54,7 +54,7 @@ Win32Loop::~Win32Loop() {
thread_.join();
DeleteTimerQueueEx(timer_queue_, INVALID_HANDLE_VALUE);
std::lock_guard<xe::mutex> lock(pending_timers_mutex_);
std::lock_guard<std::mutex> lock(pending_timers_mutex_);
while (!pending_timers_.empty()) {
auto timer = pending_timers_.back();
pending_timers_.pop_back();
@ -102,7 +102,7 @@ void Win32Loop::TimerQueueCallback(void* context, uint8_t) {
auto fn = std::move(timer->fn);
DeleteTimerQueueTimer(timer->timer_queue, timer->timer_handle, NULL);
{
std::lock_guard<xe::mutex> lock(loop->pending_timers_mutex_);
std::lock_guard<std::mutex> lock(loop->pending_timers_mutex_);
loop->pending_timers_.remove(timer);
}
delete timer;
@ -119,7 +119,7 @@ void Win32Loop::PostDelayed(std::function<void()> fn, uint64_t delay_millis) {
timer->timer_queue = timer_queue_;
timer->fn = std::move(fn);
{
std::lock_guard<xe::mutex> lock(pending_timers_mutex_);
std::lock_guard<std::mutex> lock(pending_timers_mutex_);
pending_timers_.push_back(timer);
}
CreateTimerQueueTimer(&timer->timer_handle, timer_queue_,

View File

@ -14,7 +14,6 @@
#include <list>
#include <thread>
#include "xenia/base/mutex.h"
#include "xenia/base/platform_win.h"
#include "xenia/base/threading.h"
#include "xenia/ui/loop.h"
@ -50,7 +49,7 @@ class Win32Loop : public Loop {
xe::threading::Fence quit_fence_;
HANDLE timer_queue_;
xe::mutex pending_timers_mutex_;
std::mutex pending_timers_mutex_;
std::list<PendingTimer*> pending_timers_;
};