From 3c96b6fa0af572b5b30a1e799bca6a5b97dad60c Mon Sep 17 00:00:00 2001 From: Ben Vanik Date: Sun, 6 Sep 2015 09:30:54 -0700 Subject: [PATCH] DANGER DANGER. Switching to global critical region. This changes almost all locks held by guest threads to use a single global critical region. This emulates the behavior on the PPC of disabling interrupts (by calls like KeRaiseIrqlToDpcLevel or masking interrupts), and prevents deadlocks from occuring when threads are suspended or otherwise blocked. This has performance implications and a pass is needed to ensure the locking is as granular as possible. It could also break everything because it's fundamentally unsound. We'll see. --- src/xenia/apu/audio_system.cc | 10 ++-- src/xenia/apu/audio_system.h | 6 +- src/xenia/apu/xma_context.h | 2 +- src/xenia/apu/xma_decoder.h | 2 +- src/xenia/base/mutex.cc | 19 ++++++ src/xenia/base/mutex.h | 62 ++++++++++++++++++++ src/xenia/base/reset_scope.h | 2 - src/xenia/cpu/backend/x64/x64_code_cache.cc | 4 +- src/xenia/cpu/backend/x64/x64_code_cache.h | 7 +-- src/xenia/cpu/entry_table.cc | 14 ++--- src/xenia/cpu/entry_table.h | 3 +- src/xenia/cpu/frontend/ppc_frontend.cc | 9 +-- src/xenia/cpu/frontend/ppc_frontend.h | 2 - src/xenia/cpu/function.h | 1 - src/xenia/cpu/mmio_handler.cc | 12 ++-- src/xenia/cpu/mmio_handler.h | 3 +- src/xenia/cpu/module.cc | 32 +++++----- src/xenia/cpu/module.h | 3 +- src/xenia/cpu/processor.cc | 12 ++-- src/xenia/cpu/processor.h | 4 +- src/xenia/cpu/thread_state.cc | 2 +- src/xenia/debug/debugger.cc | 2 +- src/xenia/debug/debugger.h | 2 + src/xenia/gpu/gl4/texture_cache.h | 1 - src/xenia/hid/winkey/winkey_input_driver.cc | 23 ++++---- src/xenia/hid/winkey/winkey_input_driver.h | 5 +- src/xenia/kernel/apps/xmp_app.cc | 6 +- src/xenia/kernel/apps/xmp_app.h | 3 +- src/xenia/kernel/content_manager.cc | 14 ++--- src/xenia/kernel/content_manager.h | 3 +- src/xenia/kernel/dispatcher.cc | 4 -- src/xenia/kernel/dispatcher.h | 10 +--- src/xenia/kernel/kernel_state.cc | 34 +++++------ src/xenia/kernel/kernel_state.h | 11 ++-- src/xenia/kernel/object_table.cc | 24 ++++---- src/xenia/kernel/object_table.h | 3 +- src/xenia/kernel/objects/xnotify_listener.cc | 6 +- src/xenia/kernel/objects/xnotify_listener.h | 2 +- src/xenia/kernel/objects/xthread.cc | 20 ++++--- src/xenia/kernel/objects/xthread.h | 4 +- src/xenia/kernel/xboxkrnl_threading.cc | 37 +++++------- src/xenia/kernel/xobject.cc | 6 +- src/xenia/memory.cc | 31 +++++----- src/xenia/memory.h | 3 +- src/xenia/vfs/device.cc | 2 +- src/xenia/vfs/device.h | 3 +- src/xenia/vfs/entry.cc | 8 +-- src/xenia/vfs/entry.h | 2 + src/xenia/vfs/virtual_file_system.cc | 8 +-- src/xenia/vfs/virtual_file_system.h | 2 +- 50 files changed, 271 insertions(+), 219 deletions(-) create mode 100644 src/xenia/base/mutex.cc diff --git a/src/xenia/apu/audio_system.cc b/src/xenia/apu/audio_system.cc index f75d3ef85..45b1f4709 100644 --- a/src/xenia/apu/audio_system.cc +++ b/src/xenia/apu/audio_system.cc @@ -124,10 +124,10 @@ void AudioSystem::WorkerThreadMain() { if (result.first == xe::threading::WaitResult::kSuccess) { size_t index = result.second; do { - lock_.lock(); + auto global_lock = global_critical_region_.Acquire(); uint32_t client_callback = clients_[index].callback; uint32_t client_callback_arg = clients_[index].wrapped_callback_arg; - lock_.unlock(); + global_lock.unlock(); if (client_callback) { SCOPE_profile_cpu_i("apu", "xe::apu::AudioSystem->client_callback"); @@ -169,7 +169,7 @@ void AudioSystem::Shutdown() { X_STATUS AudioSystem::RegisterClient(uint32_t callback, uint32_t callback_arg, size_t* out_index) { assert_true(unused_clients_.size()); - std::lock_guard lock(lock_); + auto global_lock = global_critical_region_.Acquire(); auto index = unused_clients_.front(); @@ -201,7 +201,7 @@ X_STATUS AudioSystem::RegisterClient(uint32_t callback, uint32_t callback_arg, void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) { SCOPE_profile_cpu_f("apu"); - std::lock_guard lock(lock_); + auto global_lock = global_critical_region_.Acquire(); assert_true(index < kMaximumClientCount); assert_true(clients_[index].driver != NULL); (clients_[index].driver)->SubmitFrame(samples_ptr); @@ -210,7 +210,7 @@ void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) { void AudioSystem::UnregisterClient(size_t index) { SCOPE_profile_cpu_f("apu"); - std::lock_guard lock(lock_); + auto global_lock = global_critical_region_.Acquire(); assert_true(index < kMaximumClientCount); DestroyDriver(clients_[index].driver); clients_[index] = {0}; diff --git a/src/xenia/apu/audio_system.h b/src/xenia/apu/audio_system.h index 6803ea0aa..de8901c37 100644 --- a/src/xenia/apu/audio_system.h +++ b/src/xenia/apu/audio_system.h @@ -11,9 +11,9 @@ #define XENIA_APU_AUDIO_SYSTEM_H_ #include -#include #include +#include "xenia/base/mutex.h" #include "xenia/base/threading.h" #include "xenia/cpu/processor.h" #include "xenia/kernel/objects/xthread.h" @@ -67,10 +67,8 @@ class AudioSystem { std::atomic worker_running_ = {false}; kernel::object_ref worker_thread_; - xe::mutex lock_; - + xe::global_critical_region global_critical_region_; static const size_t kMaximumClientCount = 8; - struct { AudioDriver* driver; uint32_t callback; diff --git a/src/xenia/apu/xma_context.h b/src/xenia/apu/xma_context.h index 53b7fd78f..113129e36 100644 --- a/src/xenia/apu/xma_context.h +++ b/src/xenia/apu/xma_context.h @@ -11,9 +11,9 @@ #define XENIA_APU_XMA_CONTEXT_H_ #include -#include #include +#include "xenia/base/mutex.h" #include "xenia/memory.h" #include "xenia/xbox.h" diff --git a/src/xenia/apu/xma_decoder.h b/src/xenia/apu/xma_decoder.h index 81aa77e56..8166217cb 100644 --- a/src/xenia/apu/xma_decoder.h +++ b/src/xenia/apu/xma_decoder.h @@ -11,10 +11,10 @@ #define XENIA_APU_XMA_DECODER_H_ #include -#include #include #include "xenia/apu/xma_context.h" +#include "xenia/base/mutex.h" #include "xenia/kernel/objects/xthread.h" #include "xenia/xbox.h" diff --git a/src/xenia/base/mutex.cc b/src/xenia/base/mutex.cc new file mode 100644 index 000000000..a0e9a4f6c --- /dev/null +++ b/src/xenia/base/mutex.cc @@ -0,0 +1,19 @@ +/** + ****************************************************************************** + * Xenia : Xbox 360 Emulator Research Project * + ****************************************************************************** + * Copyright 2015 Ben Vanik. All rights reserved. * + * Released under the BSD license - see LICENSE in the root for more details. * + ****************************************************************************** + */ + +#include "xenia/base/mutex.h" + +namespace xe { + +xe::recursive_mutex& global_critical_region::mutex() { + static xe::recursive_mutex global_mutex; + return global_mutex; +} + +} // namespace xe diff --git a/src/xenia/base/mutex.h b/src/xenia/base/mutex.h index 2b72b97f1..6b1080c73 100644 --- a/src/xenia/base/mutex.h +++ b/src/xenia/base/mutex.h @@ -21,6 +21,68 @@ namespace xe { using mutex = std::mutex; using recursive_mutex = std::recursive_mutex; +// The global critical region mutex singleton. +// This must guard any operation that may suspend threads or be sensitive to +// being suspended such as global table locks and such. +// To prevent deadlocks this should be the first lock acquired and be held +// for the entire duration of the critical region (longer than any other lock). +// +// As a general rule if some code can only be accessed from the guest you can +// guard it with only the global critical region and be assured nothing else +// will touch it. If it will be accessed from non-guest threads you may need +// some additional protection. +// +// You can think of this as disabling interrupts in the guest. The thread in the +// global critical region has exclusive access to the entire system and cannot +// be preempted. This also means that all activity done while in the critical +// region must be extremely fast (no IO!), as it has the chance to block any +// other thread until its done. +// +// For example, in the following situation thread 1 will not be able to suspend +// thread 0 until it has exited its critical region, preventing it from being +// suspended while holding the table lock: +// [thread 0]: +// DoKernelStuff(): +// auto global_lock = global_critical_region_.Acquire(); +// std::lock_guard table_lock(table_mutex_); +// table_->InsertStuff(); +// [thread 1]: +// MySuspendThread(): +// auto global_lock = global_critical_region_.Acquire(); +// ::SuspendThread(thread0); +// +// To use the region it's strongly recommended that you keep an instance near +// the data requiring it. This makes it clear to those reading that the data +// is protected by the global critical region. For example: +// class MyType { +// // Implies my_list_ is protected: +// xe::global_critical_region global_critical_region_; +// std::list<...> my_list_; +// }; +class global_critical_region { + public: + static xe::recursive_mutex& mutex(); + + // Acquires a lock on the global critical section. + // Use this when keeping an instance is not possible. Otherwise, prefer + // to keep an instance of global_critical_region near the members requiring + // it to keep things readable. + static std::unique_lock AcquireDirect() { + return std::unique_lock(mutex()); + } + + // Acquires a lock on the global critical section. + inline std::unique_lock Acquire() { + return std::unique_lock(mutex()); + } + + // Tries to acquire a lock on the glboal critical section. + // Check owns_lock() to see if the lock was successfully acquired. + inline std::unique_lock TryAcquire() { + return std::unique_lock(mutex(), std::try_to_lock); + } +}; + } // namespace xe #endif // XENIA_BASE_MUTEX_H_ diff --git a/src/xenia/base/reset_scope.h b/src/xenia/base/reset_scope.h index 870057107..6230083bc 100644 --- a/src/xenia/base/reset_scope.h +++ b/src/xenia/base/reset_scope.h @@ -10,8 +10,6 @@ #ifndef XENIA_BASE_RESET_SCOPE_H_ #define XENIA_BASE_RESET_SCOPE_H_ -#include - namespace xe { template diff --git a/src/xenia/cpu/backend/x64/x64_code_cache.cc b/src/xenia/cpu/backend/x64/x64_code_cache.cc index e94561dd4..8a2a51cde 100644 --- a/src/xenia/cpu/backend/x64/x64_code_cache.cc +++ b/src/xenia/cpu/backend/x64/x64_code_cache.cc @@ -129,7 +129,7 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code, uint8_t* code_address = nullptr; UnwindReservation unwind_reservation; { - std::lock_guard allocation_lock(allocation_mutex_); + auto global_lock = global_critical_region_.Acquire(); low_mark = generated_code_offset_; @@ -192,7 +192,7 @@ uint32_t X64CodeCache::PlaceData(const void* data, size_t length) { size_t high_mark; uint8_t* data_address = nullptr; { - std::lock_guard allocation_lock(allocation_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Reserve code. // Always move the code to land on 16b alignment. diff --git a/src/xenia/cpu/backend/x64/x64_code_cache.h b/src/xenia/cpu/backend/x64/x64_code_cache.h index 2853bca04..31f001e48 100644 --- a/src/xenia/cpu/backend/x64/x64_code_cache.h +++ b/src/xenia/cpu/backend/x64/x64_code_cache.h @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -91,9 +90,9 @@ class X64CodeCache : public CodeCache { std::wstring file_name_; xe::memory::FileMappingHandle mapping_ = nullptr; - // Must be held when manipulating the offsets or counts of anything, to keep - // the tables consistent and ordered. - xe::mutex allocation_mutex_; + // NOTE: the global critical region must be held when manipulating the offsets + // or counts of anything, to keep the tables consistent and ordered. + xe::global_critical_region global_critical_region_; // Value that the indirection table will be initialized with upon commit. uint32_t indirection_default_value_ = 0xFEEDF00D; diff --git a/src/xenia/cpu/entry_table.cc b/src/xenia/cpu/entry_table.cc index da438ba7f..7d2f08e05 100644 --- a/src/xenia/cpu/entry_table.cc +++ b/src/xenia/cpu/entry_table.cc @@ -18,7 +18,7 @@ namespace cpu { EntryTable::EntryTable() = default; EntryTable::~EntryTable() { - std::lock_guard guard(lock_); + auto global_lock = global_critical_region_.Acquire(); for (auto it : map_) { Entry* entry = it.second; delete entry; @@ -26,7 +26,7 @@ EntryTable::~EntryTable() { } Entry* EntryTable::Get(uint32_t address) { - std::lock_guard guard(lock_); + auto global_lock = global_critical_region_.Acquire(); const auto& it = map_.find(address); Entry* entry = it != map_.end() ? it->second : nullptr; if (entry) { @@ -42,7 +42,7 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) { // TODO(benvanik): replace with a map with wait-free for find. // https://github.com/facebook/folly/blob/master/folly/AtomicHashMap.h - lock_.lock(); + auto global_lock = global_critical_region_.Acquire(); const auto& it = map_.find(address); Entry* entry = it != map_.end() ? it->second : nullptr; Entry::Status status; @@ -51,10 +51,10 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) { if (entry->status == Entry::STATUS_COMPILING) { // Still compiling, so spin. do { - lock_.unlock(); + global_lock.unlock(); // TODO(benvanik): sleep for less time? xe::threading::Sleep(std::chrono::microseconds(10)); - lock_.lock(); + global_lock.lock(); } while (entry->status == Entry::STATUS_COMPILING); } status = entry->status; @@ -68,13 +68,13 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) { map_[address] = entry; status = Entry::STATUS_NEW; } - lock_.unlock(); + global_lock.unlock(); *out_entry = entry; return status; } std::vector EntryTable::FindWithAddress(uint32_t address) { - std::lock_guard guard(lock_); + auto global_lock = global_critical_region_.Acquire(); std::vector fns; for (auto& it : map_) { Entry* entry = it.second; diff --git a/src/xenia/cpu/entry_table.h b/src/xenia/cpu/entry_table.h index 3e5a9548f..e3c4bc271 100644 --- a/src/xenia/cpu/entry_table.h +++ b/src/xenia/cpu/entry_table.h @@ -10,7 +10,6 @@ #ifndef XENIA_CPU_ENTRY_TABLE_H_ #define XENIA_CPU_ENTRY_TABLE_H_ -#include #include #include @@ -46,8 +45,8 @@ class EntryTable { std::vector FindWithAddress(uint32_t address); private: + xe::global_critical_region global_critical_region_; // TODO(benvanik): replace with a better data structure. - xe::mutex lock_; std::unordered_map map_; }; diff --git a/src/xenia/cpu/frontend/ppc_frontend.cc b/src/xenia/cpu/frontend/ppc_frontend.cc index e4522a58b..7e82e573b 100644 --- a/src/xenia/cpu/frontend/ppc_frontend.cc +++ b/src/xenia/cpu/frontend/ppc_frontend.cc @@ -9,6 +9,7 @@ #include "xenia/cpu/frontend/ppc_frontend.h" +#include "xenia/base/atomic.h" #include "xenia/cpu/frontend/ppc_context.h" #include "xenia/cpu/frontend/ppc_disasm.h" #include "xenia/cpu/frontend/ppc_emit.h" @@ -71,20 +72,20 @@ void EnterGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) { auto global_mutex = reinterpret_cast(arg0); auto global_lock_count = reinterpret_cast(arg1); global_mutex->lock(); - *global_lock_count = *global_lock_count + 1; + xe::atomic_inc(global_lock_count); } // Leaves the global lock. Safe to recursion. void LeaveGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) { auto global_mutex = reinterpret_cast(arg0); auto global_lock_count = reinterpret_cast(arg1); - *global_lock_count = *global_lock_count - 1; - assert_true(*global_lock_count >= 0); + auto new_lock_count = xe::atomic_dec(global_lock_count); + assert_true(new_lock_count >= 0); global_mutex->unlock(); } bool PPCFrontend::Initialize() { - void* arg0 = reinterpret_cast(processor_->global_mutex()); + void* arg0 = reinterpret_cast(&xe::global_critical_region::mutex()); void* arg1 = reinterpret_cast(&builtins_.global_lock_count); builtins_.check_global_lock = processor_->DefineBuiltin("CheckGlobalLock", CheckGlobalLock, arg0, arg1); diff --git a/src/xenia/cpu/frontend/ppc_frontend.h b/src/xenia/cpu/frontend/ppc_frontend.h index 1e1663607..e4d7d8529 100644 --- a/src/xenia/cpu/frontend/ppc_frontend.h +++ b/src/xenia/cpu/frontend/ppc_frontend.h @@ -11,9 +11,7 @@ #define XENIA_CPU_FRONTEND_PPC_FRONTEND_H_ #include -#include -#include "xenia/base/mutex.h" #include "xenia/base/type_pool.h" #include "xenia/cpu/frontend/context_info.h" #include "xenia/cpu/function.h" diff --git a/src/xenia/cpu/function.h b/src/xenia/cpu/function.h index ad1d799b6..0cfd225e0 100644 --- a/src/xenia/cpu/function.h +++ b/src/xenia/cpu/function.h @@ -13,7 +13,6 @@ #include #include -#include "xenia/base/mutex.h" #include "xenia/cpu/debug_info.h" #include "xenia/cpu/frontend/ppc_context.h" #include "xenia/cpu/symbol.h" diff --git a/src/xenia/cpu/mmio_handler.cc b/src/xenia/cpu/mmio_handler.cc index 27ff93f33..5b704cf4d 100644 --- a/src/xenia/cpu/mmio_handler.cc +++ b/src/xenia/cpu/mmio_handler.cc @@ -113,9 +113,9 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address, entry->callback = callback; entry->callback_context = callback_context; entry->callback_data = callback_data; - write_watch_mutex_.lock(); + global_critical_region_.mutex().lock(); write_watches_.push_back(entry); - write_watch_mutex_.unlock(); + global_critical_region_.mutex().unlock(); // Make the desired range read only under all address spaces. xe::memory::Protect(physical_membase_ + entry->address, entry->length, @@ -154,12 +154,12 @@ void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) { ClearWriteWatch(entry); // Remove from table. - write_watch_mutex_.lock(); + global_critical_region_.mutex().lock(); auto it = std::find(write_watches_.begin(), write_watches_.end(), entry); if (it != write_watches_.end()) { write_watches_.erase(it); } - write_watch_mutex_.unlock(); + global_critical_region_.mutex().unlock(); delete entry; } @@ -170,7 +170,7 @@ bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) { physical_address &= 0x1FFFFFFF; } std::list pending_invalidates; - write_watch_mutex_.lock(); + global_critical_region_.mutex().lock(); for (auto it = write_watches_.begin(); it != write_watches_.end();) { auto entry = *it; if (entry->address <= physical_address && @@ -186,7 +186,7 @@ bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) { } ++it; } - write_watch_mutex_.unlock(); + global_critical_region_.mutex().unlock(); if (pending_invalidates.empty()) { // Rethrow access violation - range was not being watched. return false; diff --git a/src/xenia/cpu/mmio_handler.h b/src/xenia/cpu/mmio_handler.h index 476b20988..c8517fbb0 100644 --- a/src/xenia/cpu/mmio_handler.h +++ b/src/xenia/cpu/mmio_handler.h @@ -12,7 +12,6 @@ #include #include -#include #include #include "xenia/base/mutex.h" @@ -90,8 +89,8 @@ class MMIOHandler { std::vector mapped_ranges_; + xe::global_critical_region global_critical_region_; // TODO(benvanik): data structure magic. - xe::mutex write_watch_mutex_; std::list write_watches_; static MMIOHandler* global_handler_; diff --git a/src/xenia/cpu/module.cc b/src/xenia/cpu/module.cc index 8b17a7825..d672e233d 100644 --- a/src/xenia/cpu/module.cc +++ b/src/xenia/cpu/module.cc @@ -29,7 +29,7 @@ Module::~Module() = default; bool Module::ContainsAddress(uint32_t address) { return true; } Symbol* Module::LookupSymbol(uint32_t address, bool wait) { - lock_.lock(); + auto global_lock = global_critical_region_.Acquire(); const auto it = map_.find(address); Symbol* symbol = it != map_.end() ? it->second : nullptr; if (symbol) { @@ -37,10 +37,10 @@ Symbol* Module::LookupSymbol(uint32_t address, bool wait) { // Some other thread is declaring the symbol - wait. if (wait) { do { - lock_.unlock(); + global_lock.unlock(); // TODO(benvanik): sleep for less time? xe::threading::Sleep(std::chrono::microseconds(100)); - lock_.lock(); + global_lock.lock(); } while (symbol->status() == Symbol::Status::kDeclaring); } else { // Immediate request, just return. @@ -48,31 +48,31 @@ Symbol* Module::LookupSymbol(uint32_t address, bool wait) { } } } - lock_.unlock(); + global_lock.unlock(); return symbol; } Symbol::Status Module::DeclareSymbol(Symbol::Type type, uint32_t address, Symbol** out_symbol) { *out_symbol = nullptr; - lock_.lock(); + auto global_lock = global_critical_region_.Acquire(); auto it = map_.find(address); Symbol* symbol = it != map_.end() ? it->second : nullptr; Symbol::Status status; if (symbol) { // If we exist but are the wrong type, die. if (symbol->type() != type) { - lock_.unlock(); + global_lock.unlock(); return Symbol::Status::kFailed; } // If we aren't ready yet spin and wait. if (symbol->status() == Symbol::Status::kDeclaring) { // Still declaring, so spin. do { - lock_.unlock(); + global_lock.unlock(); // TODO(benvanik): sleep for less time? xe::threading::Sleep(std::chrono::microseconds(100)); - lock_.lock(); + global_lock.lock(); } while (symbol->status() == Symbol::Status::kDeclaring); } status = symbol->status(); @@ -90,7 +90,7 @@ Symbol::Status Module::DeclareSymbol(Symbol::Type type, uint32_t address, list_.emplace_back(symbol); status = Symbol::Status::kNew; } - lock_.unlock(); + global_lock.unlock(); *out_symbol = symbol; // Get debug info from providers, if this is new. @@ -117,7 +117,7 @@ Symbol::Status Module::DeclareVariable(uint32_t address, Symbol** out_symbol) { } Symbol::Status Module::DefineSymbol(Symbol* symbol) { - lock_.lock(); + auto global_lock = global_critical_region_.Acquire(); Symbol::Status status; if (symbol->status() == Symbol::Status::kDeclared) { // Declared but undefined, so request caller define it. @@ -126,16 +126,16 @@ Symbol::Status Module::DefineSymbol(Symbol* symbol) { } else if (symbol->status() == Symbol::Status::kDefining) { // Still defining, so spin. do { - lock_.unlock(); + global_lock.unlock(); // TODO(benvanik): sleep for less time? xe::threading::Sleep(std::chrono::microseconds(100)); - lock_.lock(); + global_lock.lock(); } while (symbol->status() == Symbol::Status::kDefining); status = symbol->status(); } else { status = symbol->status(); } - lock_.unlock(); + global_lock.unlock(); return status; } @@ -148,7 +148,7 @@ Symbol::Status Module::DefineVariable(Symbol* symbol) { } void Module::ForEachFunction(std::function callback) { - std::lock_guard guard(lock_); + auto global_lock = global_critical_region_.Acquire(); for (auto& symbol : list_) { if (symbol->type() == Symbol::Type::kFunction) { Function* info = static_cast(symbol.get()); @@ -159,7 +159,7 @@ void Module::ForEachFunction(std::function callback) { void Module::ForEachSymbol(size_t start_index, size_t end_index, std::function callback) { - std::lock_guard guard(lock_); + auto global_lock = global_critical_region_.Acquire(); start_index = std::min(start_index, list_.size()); end_index = std::min(end_index, list_.size()); for (size_t i = start_index; i <= end_index; ++i) { @@ -169,7 +169,7 @@ void Module::ForEachSymbol(size_t start_index, size_t end_index, } size_t Module::QuerySymbolCount() { - std::lock_guard guard(lock_); + auto global_lock = global_critical_region_.Acquire(); return list_.size(); } diff --git a/src/xenia/cpu/module.h b/src/xenia/cpu/module.h index 0a3adfc95..ba7ae2477 100644 --- a/src/xenia/cpu/module.h +++ b/src/xenia/cpu/module.h @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -64,8 +63,8 @@ class Module { Symbol** out_symbol); Symbol::Status DefineSymbol(Symbol* symbol); + xe::global_critical_region global_critical_region_; // TODO(benvanik): replace with a better data structure. - xe::mutex lock_; std::unordered_map map_; std::vector> list_; }; diff --git a/src/xenia/cpu/processor.cc b/src/xenia/cpu/processor.cc index 2cd7db16d..bb109de09 100644 --- a/src/xenia/cpu/processor.cc +++ b/src/xenia/cpu/processor.cc @@ -60,7 +60,7 @@ Processor::Processor(xe::Memory* memory, ExportResolver* export_resolver, Processor::~Processor() { { - std::lock_guard guard(modules_lock_); + auto global_lock = global_critical_region_.Acquire(); modules_.clear(); } @@ -126,13 +126,13 @@ bool Processor::Setup() { } bool Processor::AddModule(std::unique_ptr module) { - std::lock_guard guard(modules_lock_); + auto global_lock = global_critical_region_.Acquire(); modules_.push_back(std::move(module)); return true; } Module* Processor::GetModule(const char* name) { - std::lock_guard guard(modules_lock_); + auto global_lock = global_critical_region_.Acquire(); for (const auto& module : modules_) { if (module->name() == name) { return module.get(); @@ -142,7 +142,7 @@ Module* Processor::GetModule(const char* name) { } std::vector Processor::GetModules() { - std::lock_guard guard(modules_lock_); + auto global_lock = global_critical_region_.Acquire(); std::vector clone(modules_.size()); for (const auto& module : modules_) { clone.push_back(module.get()); @@ -215,7 +215,7 @@ Function* Processor::LookupFunction(uint32_t address) { // Find the module that contains the address. Module* code_module = nullptr; { - std::lock_guard guard(modules_lock_); + auto global_lock = global_critical_region_.Acquire(); // TODO(benvanik): sort by code address (if contiguous) so can bsearch. // TODO(benvanik): cache last module low/high, as likely to be in there. for (const auto& module : modules_) { @@ -335,7 +335,7 @@ uint64_t Processor::ExecuteInterrupt(ThreadState* thread_state, // Hold the global lock during interrupt dispatch. // This will block if any code is in a critical region (has interrupts // disabled) or if any other interrupt is executing. - std::lock_guard lock(global_mutex_); + auto global_lock = global_critical_region_.Acquire(); PPCContext* context = thread_state->context(); assert_true(arg_count <= 5); diff --git a/src/xenia/cpu/processor.h b/src/xenia/cpu/processor.h index 73601b20e..17bb321bc 100644 --- a/src/xenia/cpu/processor.h +++ b/src/xenia/cpu/processor.h @@ -86,7 +86,6 @@ class Processor { uint64_t ExecuteInterrupt(ThreadState* thread_state, uint32_t address, uint64_t args[], size_t arg_count); - xe::recursive_mutex* global_mutex() { return &global_mutex_; } Irql RaiseIrql(Irql new_value); void LowerIrql(Irql old_value); @@ -104,13 +103,12 @@ class Processor { ExportResolver* export_resolver_ = nullptr; EntryTable entry_table_; - xe::mutex modules_lock_; + xe::global_critical_region global_critical_region_; std::vector> modules_; Module* builtin_module_ = nullptr; uint32_t next_builtin_address_ = 0xFFFF0000u; Irql irql_; - xe::recursive_mutex global_mutex_; }; } // namespace cpu diff --git a/src/xenia/cpu/thread_state.cc b/src/xenia/cpu/thread_state.cc index dd24c24f3..7c19d6053 100644 --- a/src/xenia/cpu/thread_state.cc +++ b/src/xenia/cpu/thread_state.cc @@ -90,7 +90,7 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id, std::memset(context_, 0, sizeof(PPCContext)); // Stash pointers to common structures that callbacks may need. - context_->global_mutex = processor_->global_mutex(); + context_->global_mutex = &xe::global_critical_region::mutex(); context_->virtual_membase = memory_->virtual_membase(); context_->physical_membase = memory_->physical_membase(); context_->processor = processor_; diff --git a/src/xenia/debug/debugger.cc b/src/xenia/debug/debugger.cc index a3bdd6a9e..dbd7a0231 100644 --- a/src/xenia/debug/debugger.cc +++ b/src/xenia/debug/debugger.cc @@ -11,7 +11,6 @@ #include -#include #include #include "xenia/base/filesystem.h" @@ -254,6 +253,7 @@ void Debugger::FindBreakpoints(uint32_t address, } bool Debugger::SuspendAllThreads() { + auto global_lock = global_critical_region_.Acquire(); auto threads = emulator_->kernel_state()->object_table()->GetObjectsByType( XObject::kTypeThread); diff --git a/src/xenia/debug/debugger.h b/src/xenia/debug/debugger.h index d23fa8191..889c1e06c 100644 --- a/src/xenia/debug/debugger.h +++ b/src/xenia/debug/debugger.h @@ -20,6 +20,7 @@ #include #include "xenia/base/mapped_memory.h" +#include "xenia/base/mutex.h" #include "xenia/base/threading.h" #include "xenia/cpu/processor.h" #include "xenia/cpu/thread_state.h" @@ -106,6 +107,7 @@ class Debugger { std::recursive_mutex mutex_; ExecutionState execution_state_ = ExecutionState::kStopped; + xe::global_critical_region global_critical_region_; std::multimap breakpoints_; }; diff --git a/src/xenia/gpu/gl4/texture_cache.h b/src/xenia/gpu/gl4/texture_cache.h index 15177a3be..fe9334c25 100644 --- a/src/xenia/gpu/gl4/texture_cache.h +++ b/src/xenia/gpu/gl4/texture_cache.h @@ -10,7 +10,6 @@ #ifndef XENIA_GPU_GL4_TEXTURE_CACHE_H_ #define XENIA_GPU_GL4_TEXTURE_CACHE_H_ -#include #include #include diff --git a/src/xenia/hid/winkey/winkey_input_driver.cc b/src/xenia/hid/winkey/winkey_input_driver.cc index 16d4b4bf6..7be1a5b26 100644 --- a/src/xenia/hid/winkey/winkey_input_driver.cc +++ b/src/xenia/hid/winkey/winkey_input_driver.cc @@ -24,7 +24,7 @@ WinKeyInputDriver::WinKeyInputDriver(InputSystem* input_system) // Register a key listener. input_system_->emulator()->display_window()->on_key_down.AddListener( [this](ui::KeyEvent* evt) { - std::lock_guard lock(key_event_mutex_); + auto global_lock = global_critical_region_.Acquire(); KeyEvent key; key.vkey = evt->key_code(); @@ -35,7 +35,7 @@ WinKeyInputDriver::WinKeyInputDriver(InputSystem* input_system) }); input_system_->emulator()->display_window()->on_key_up.AddListener( [this](ui::KeyEvent* evt) { - std::lock_guard lock(key_event_mutex_); + auto global_lock = global_critical_region_.Acquire(); KeyEvent key; key.vkey = evt->key_code(); @@ -210,18 +210,17 @@ X_RESULT WinKeyInputDriver::GetKeystroke(uint32_t user_index, uint32_t flags, uint8_t hid_code = 0; // Pop from the queue. - key_event_mutex_.lock(); - if (key_events_.size() == 0) { - key_event_mutex_.unlock(); - - // No keys! - return X_ERROR_EMPTY; + KeyEvent evt; + { + auto global_lock = global_critical_region_.Acquire(); + if (key_events_.empty()) { + // No keys! + return X_ERROR_EMPTY; + } + evt = key_events_.front(); + key_events_.pop(); } - KeyEvent evt = key_events_.front(); - key_events_.pop(); - key_event_mutex_.unlock(); - // TODO: Some other way to toggle this... if (IS_KEY_TOGGLED(VK_CAPITAL)) { // dpad toggled diff --git a/src/xenia/hid/winkey/winkey_input_driver.h b/src/xenia/hid/winkey/winkey_input_driver.h index ba8ec9518..65d2cd476 100644 --- a/src/xenia/hid/winkey/winkey_input_driver.h +++ b/src/xenia/hid/winkey/winkey_input_driver.h @@ -10,9 +10,9 @@ #ifndef XENIA_HID_WINKEY_WINKEY_INPUT_DRIVER_H_ #define XENIA_HID_WINKEY_WINKEY_INPUT_DRIVER_H_ -#include #include +#include "xenia/base/mutex.h" #include "xenia/hid/input_driver.h" namespace xe { @@ -40,8 +40,9 @@ class WinKeyInputDriver : public InputDriver { bool transition = false; // going up(false) or going down(true) bool prev_state = false; // down(true) or up(false) }; + + xe::global_critical_region global_critical_region_; std::queue key_events_; - std::mutex key_event_mutex_; uint32_t packet_number_; }; diff --git a/src/xenia/kernel/apps/xmp_app.cc b/src/xenia/kernel/apps/xmp_app.cc index 5a8fb8ca5..3be4fdbb7 100644 --- a/src/xenia/kernel/apps/xmp_app.cc +++ b/src/xenia/kernel/apps/xmp_app.cc @@ -81,7 +81,7 @@ X_RESULT XXMPApp::XMPCreateTitlePlaylist( xe::store_and_swap(memory_->TranslateVirtual(out_playlist_handle), playlist->handle); - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); playlists_.insert({playlist->handle, playlist.get()}); playlist.release(); return X_ERROR_SUCCESS; @@ -89,7 +89,7 @@ X_RESULT XXMPApp::XMPCreateTitlePlaylist( X_RESULT XXMPApp::XMPDeleteTitlePlaylist(uint32_t playlist_handle) { XELOGD("XMPDeleteTitlePlaylist(%.8X)", playlist_handle); - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = playlists_.find(playlist_handle); if (it == playlists_.end()) { XELOGE("Playlist %.8X not found", playlist_handle); @@ -109,7 +109,7 @@ X_RESULT XXMPApp::XMPPlayTitlePlaylist(uint32_t playlist_handle, XELOGD("XMPPlayTitlePlaylist(%.8X, %.8X)", playlist_handle, song_handle); Playlist* playlist = nullptr; { - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = playlists_.find(playlist_handle); if (it == playlists_.end()) { XELOGE("Playlist %.8X not found", playlist_handle); diff --git a/src/xenia/kernel/apps/xmp_app.h b/src/xenia/kernel/apps/xmp_app.h index 03420c009..6d135d752 100644 --- a/src/xenia/kernel/apps/xmp_app.h +++ b/src/xenia/kernel/apps/xmp_app.h @@ -11,7 +11,6 @@ #define XENIA_KERNEL_APPS_XMP_APP_H_ #include -#include #include #include #include @@ -102,7 +101,7 @@ class XXMPApp : public XApp { Playlist* active_playlist_; int active_song_index_; - xe::mutex mutex_; + xe::global_critical_region global_critical_region_; std::unordered_map playlists_; uint32_t next_playlist_handle_; uint32_t next_song_handle_; diff --git a/src/xenia/kernel/content_manager.cc b/src/xenia/kernel/content_manager.cc index 9b098853a..2cf3713d3 100644 --- a/src/xenia/kernel/content_manager.cc +++ b/src/xenia/kernel/content_manager.cc @@ -125,7 +125,7 @@ std::unique_ptr ContentManager::ResolvePackage( return nullptr; } - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto package = std::make_unique(kernel_state_, root_name, data, package_path); @@ -139,7 +139,7 @@ bool ContentManager::ContentExists(const XCONTENT_DATA& data) { X_RESULT ContentManager::CreateContent(std::string root_name, const XCONTENT_DATA& data) { - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); if (open_packages_.count(root_name)) { // Already content open with this root name. @@ -166,7 +166,7 @@ X_RESULT ContentManager::CreateContent(std::string root_name, X_RESULT ContentManager::OpenContent(std::string root_name, const XCONTENT_DATA& data) { - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); if (open_packages_.count(root_name)) { // Already content open with this root name. @@ -189,7 +189,7 @@ X_RESULT ContentManager::OpenContent(std::string root_name, } X_RESULT ContentManager::CloseContent(std::string root_name) { - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = open_packages_.find(root_name); if (it == open_packages_.end()) { @@ -205,7 +205,7 @@ X_RESULT ContentManager::CloseContent(std::string root_name) { X_RESULT ContentManager::GetContentThumbnail(const XCONTENT_DATA& data, std::vector* buffer) { - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto package_path = ResolvePackagePath(data); auto thumb_path = xe::join_paths(package_path, kThumbnailFileName); if (xe::filesystem::PathExists(thumb_path)) { @@ -224,7 +224,7 @@ X_RESULT ContentManager::GetContentThumbnail(const XCONTENT_DATA& data, X_RESULT ContentManager::SetContentThumbnail(const XCONTENT_DATA& data, std::vector buffer) { - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto package_path = ResolvePackagePath(data); xe::filesystem::CreateFolder(package_path); if (xe::filesystem::PathExists(package_path)) { @@ -239,7 +239,7 @@ X_RESULT ContentManager::SetContentThumbnail(const XCONTENT_DATA& data, } X_RESULT ContentManager::DeleteContent(const XCONTENT_DATA& data) { - std::lock_guard lock(content_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto package_path = ResolvePackagePath(data); if (xe::filesystem::PathExists(package_path)) { diff --git a/src/xenia/kernel/content_manager.h b/src/xenia/kernel/content_manager.h index a142a8eca..550a1c188 100644 --- a/src/xenia/kernel/content_manager.h +++ b/src/xenia/kernel/content_manager.h @@ -11,7 +11,6 @@ #define XENIA_KERNEL_CONTENT_MANAGER_H_ #include -#include #include #include #include @@ -88,7 +87,7 @@ class ContentManager { KernelState* kernel_state_; std::wstring root_path_; - xe::recursive_mutex content_mutex_; + xe::global_critical_region global_critical_region_; std::unordered_map open_packages_; }; diff --git a/src/xenia/kernel/dispatcher.cc b/src/xenia/kernel/dispatcher.cc index a57d7a218..0a9867881 100644 --- a/src/xenia/kernel/dispatcher.cc +++ b/src/xenia/kernel/dispatcher.cc @@ -22,9 +22,5 @@ Dispatcher::Dispatcher(KernelState* kernel_state) Dispatcher::~Dispatcher() { delete dpc_list_; } -void Dispatcher::Lock() { lock_.lock(); } - -void Dispatcher::Unlock() { lock_.unlock(); } - } // namespace kernel } // namespace xe diff --git a/src/xenia/kernel/dispatcher.h b/src/xenia/kernel/dispatcher.h index 991653a2b..b8cb07c2c 100644 --- a/src/xenia/kernel/dispatcher.h +++ b/src/xenia/kernel/dispatcher.h @@ -10,9 +10,6 @@ #ifndef XENIA_KERNEL_DISPATCHER_H_ #define XENIA_KERNEL_DISPATCHER_H_ -#include - -#include "xenia/base/mutex.h" #include "xenia/xbox.h" namespace xe { @@ -21,6 +18,7 @@ namespace kernel { class KernelState; class NativeList; +// All access must be guarded by the global critical section. class Dispatcher { public: explicit Dispatcher(KernelState* kernel_state); @@ -28,16 +26,10 @@ class Dispatcher { KernelState* kernel_state() const { return kernel_state_; } - void Lock(); - void Unlock(); - NativeList* dpc_list() const { return dpc_list_; } - private: private: KernelState* kernel_state_; - - xe::mutex lock_; NativeList* dpc_list_; }; diff --git a/src/xenia/kernel/kernel_state.cc b/src/xenia/kernel/kernel_state.cc index ce136bf75..94e59f7a1 100644 --- a/src/xenia/kernel/kernel_state.cc +++ b/src/xenia/kernel/kernel_state.cc @@ -174,7 +174,7 @@ bool KernelState::IsKernelModule(const char* name) { // Executing module isn't a kernel module. return false; } - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); for (auto kernel_module : kernel_modules_) { if (kernel_module->Matches(name)) { return true; @@ -204,7 +204,7 @@ object_ref KernelState::GetModule(const char* name) { // Some games request this, for some reason. wtf. return nullptr; } - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); for (auto kernel_module : kernel_modules_) { if (kernel_module->Matches(name)) { return retain_object(kernel_module.get()); @@ -262,9 +262,9 @@ void KernelState::SetExecutableModule(object_ref module) { dispatch_thread_ = object_ref(new XHostThread(this, 128 * 1024, 0, [this]() { while (dispatch_thread_running_) { - std::unique_lock lock(dispatch_mutex_); + auto global_lock = global_critical_region_.Acquire(); if (dispatch_queue_.empty()) { - dispatch_cond_.wait(lock); + dispatch_cond_.wait(global_lock); if (!dispatch_thread_running_) { break; } @@ -281,7 +281,7 @@ void KernelState::SetExecutableModule(object_ref module) { } void KernelState::LoadKernelModule(object_ref kernel_module) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); kernel_modules_.push_back(std::move(kernel_module)); } @@ -296,7 +296,7 @@ object_ref KernelState::LoadUserModule(const char* raw_name) { object_ref module; { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); // See if we've already loaded it for (auto& existing_module : user_modules_) { @@ -337,7 +337,7 @@ object_ref KernelState::LoadUserModule(const char* raw_name) { } void KernelState::TerminateTitle(bool from_guest_thread) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); // First: call terminate routines. // TODO(benvanik): these might take arguments. @@ -391,13 +391,13 @@ void KernelState::TerminateTitle(bool from_guest_thread) { // code anymore) // Also, manually invoke the lock guard's destructor, because Terminate // does not return. - lock.~lock_guard(); + global_lock.unlock(); XThread::GetCurrentThread()->Terminate(0); } } void KernelState::RegisterThread(XThread* thread) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); threads_by_id_[thread->thread_id()] = thread; auto pib = @@ -406,7 +406,7 @@ void KernelState::RegisterThread(XThread* thread) { } void KernelState::UnregisterThread(XThread* thread) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = threads_by_id_.find(thread->thread_id()); if (it != threads_by_id_.end()) { threads_by_id_.erase(it); @@ -418,7 +418,7 @@ void KernelState::UnregisterThread(XThread* thread) { } void KernelState::OnThreadExecute(XThread* thread) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Must be called on executing thread. assert_true(XThread::GetCurrentThread() == thread); @@ -440,7 +440,7 @@ void KernelState::OnThreadExecute(XThread* thread) { } void KernelState::OnThreadExit(XThread* thread) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Must be called on executing thread. assert_true(XThread::GetCurrentThread() == thread); @@ -466,7 +466,7 @@ void KernelState::OnThreadExit(XThread* thread) { } object_ref KernelState::GetThreadByID(uint32_t thread_id) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); XThread* thread = nullptr; auto it = threads_by_id_.find(thread_id); if (it != threads_by_id_.end()) { @@ -476,7 +476,7 @@ object_ref KernelState::GetThreadByID(uint32_t thread_id) { } void KernelState::RegisterNotifyListener(XNotifyListener* listener) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); notify_listeners_.push_back(retain_object(listener)); // Games seem to expect a few notifications on startup, only for the first @@ -500,7 +500,7 @@ void KernelState::RegisterNotifyListener(XNotifyListener* listener) { } void KernelState::UnregisterNotifyListener(XNotifyListener* listener) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); for (auto it = notify_listeners_.begin(); it != notify_listeners_.end(); ++it) { if ((*it).get() == listener) { @@ -511,7 +511,7 @@ void KernelState::UnregisterNotifyListener(XNotifyListener* listener) { } void KernelState::BroadcastNotification(XNotificationID id, uint32_t data) { - std::lock_guard lock(object_mutex_); + auto global_lock = global_critical_region_.Acquire(); for (auto it = notify_listeners_.begin(); it != notify_listeners_.end(); ++it) { (*it)->EnqueueNotification(id, data); @@ -574,7 +574,7 @@ void KernelState::CompleteOverlappedDeferredEx( auto ptr = memory()->TranslateVirtual(overlapped_ptr); XOverlappedSetResult(ptr, X_ERROR_IO_PENDING); XOverlappedSetContext(ptr, XThread::GetCurrentThreadHandle()); - std::unique_lock lock(dispatch_mutex_); + auto global_lock = global_critical_region_.Acquire(); dispatch_queue_.push_back([this, completion_callback, overlapped_ptr, result, extended_error, length]() { xe::threading::Sleep( diff --git a/src/xenia/kernel/kernel_state.h b/src/xenia/kernel/kernel_state.h index dadfdb38c..4a42c308d 100644 --- a/src/xenia/kernel/kernel_state.h +++ b/src/xenia/kernel/kernel_state.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "xenia/base/mutex.h" @@ -105,8 +104,8 @@ class KernelState { UserProfile* user_profile() const { return user_profile_.get(); } ContentManager* content_manager() const { return content_manager_.get(); } + // Access must be guarded by the global critical region. ObjectTable* object_table() const { return object_table_; } - xe::recursive_mutex& object_mutex() { return object_mutex_; } uint32_t process_type() const; void set_process_type(uint32_t value); @@ -178,8 +177,10 @@ class KernelState { std::unique_ptr user_profile_; std::unique_ptr content_manager_; + xe::global_critical_region global_critical_region_; + + // Must be guarded by the global critical region. ObjectTable* object_table_; - xe::recursive_mutex object_mutex_; std::unordered_map threads_by_id_; std::vector> notify_listeners_; bool has_notified_startup_; @@ -194,8 +195,8 @@ class KernelState { std::atomic dispatch_thread_running_; object_ref dispatch_thread_; - std::mutex dispatch_mutex_; - std::condition_variable dispatch_cond_; + // Must be guarded by the global critical region. + std::condition_variable_any dispatch_cond_; std::list> dispatch_queue_; friend class XObject; diff --git a/src/xenia/kernel/object_table.cc b/src/xenia/kernel/object_table.cc index 817d329a3..8531aa258 100644 --- a/src/xenia/kernel/object_table.cc +++ b/src/xenia/kernel/object_table.cc @@ -22,7 +22,7 @@ ObjectTable::ObjectTable() : table_capacity_(0), table_(nullptr), last_free_entry_(0) {} ObjectTable::~ObjectTable() { - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Release all objects. for (uint32_t n = 0; n < table_capacity_; n++) { @@ -89,7 +89,7 @@ X_STATUS ObjectTable::AddHandle(XObject* object, X_HANDLE* out_handle) { uint32_t slot = 0; { - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Find a free slot. result = FindFreeSlot(&slot); @@ -128,7 +128,7 @@ X_STATUS ObjectTable::DuplicateHandle(X_HANDLE handle, X_HANDLE* out_handle) { } X_STATUS ObjectTable::RetainHandle(X_HANDLE handle) { - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); ObjectTableEntry* entry = LookupTable(handle); if (!entry) { @@ -140,7 +140,7 @@ X_STATUS ObjectTable::RetainHandle(X_HANDLE handle) { } X_STATUS ObjectTable::ReleaseHandle(X_HANDLE handle) { - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); ObjectTableEntry* entry = LookupTable(handle); if (!entry) { @@ -170,7 +170,7 @@ X_STATUS ObjectTable::RemoveHandle(X_HANDLE handle) { return X_STATUS_INVALID_HANDLE; } - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); if (entry->object) { auto object = entry->object; entry->object = nullptr; @@ -189,7 +189,7 @@ ObjectTable::ObjectTableEntry* ObjectTable::LookupTable(X_HANDLE handle) { return nullptr; } - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Lower 2 bits are ignored. uint32_t slot = handle >> 2; @@ -208,7 +208,7 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) { XObject* object = nullptr; if (!already_locked) { - table_mutex_.lock(); + global_critical_region_.mutex().lock(); } // Lower 2 bits are ignored. @@ -228,7 +228,7 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) { } if (!already_locked) { - table_mutex_.unlock(); + global_critical_region_.mutex().unlock(); } return object; @@ -236,7 +236,7 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) { void ObjectTable::GetObjectsByType(XObject::Type type, std::vector>* results) { - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); for (uint32_t slot = 0; slot < table_capacity_; ++slot) { auto& entry = table_[slot]; if (entry.object) { @@ -267,7 +267,7 @@ X_STATUS ObjectTable::AddNameMapping(const std::string& name, X_HANDLE handle) { std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(), tolower); - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); if (name_table_.count(lower_name)) { return X_STATUS_OBJECT_NAME_COLLISION; } @@ -281,7 +281,7 @@ void ObjectTable::RemoveNameMapping(const std::string& name) { std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(), tolower); - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = name_table_.find(lower_name); if (it != name_table_.end()) { name_table_.erase(it); @@ -295,7 +295,7 @@ X_STATUS ObjectTable::GetObjectByName(const std::string& name, std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(), tolower); - std::lock_guard lock(table_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = name_table_.find(lower_name); if (it == name_table_.end()) { *out_handle = X_INVALID_HANDLE_VALUE; diff --git a/src/xenia/kernel/object_table.h b/src/xenia/kernel/object_table.h index d006a4ca5..0747bb16f 100644 --- a/src/xenia/kernel/object_table.h +++ b/src/xenia/kernel/object_table.h @@ -10,7 +10,6 @@ #ifndef XENIA_KERNEL_OBJECT_TABLE_H_ #define XENIA_KERNEL_OBJECT_TABLE_H_ -#include #include #include #include @@ -65,7 +64,7 @@ class ObjectTable { X_HANDLE TranslateHandle(X_HANDLE handle); X_STATUS FindFreeSlot(uint32_t* out_slot); - xe::recursive_mutex table_mutex_; + xe::global_critical_region global_critical_region_; uint32_t table_capacity_; ObjectTableEntry* table_; uint32_t last_free_entry_; diff --git a/src/xenia/kernel/objects/xnotify_listener.cc b/src/xenia/kernel/objects/xnotify_listener.cc index db93a327b..b84a9d8e3 100644 --- a/src/xenia/kernel/objects/xnotify_listener.cc +++ b/src/xenia/kernel/objects/xnotify_listener.cc @@ -36,7 +36,7 @@ void XNotifyListener::EnqueueNotification(XNotificationID id, uint32_t data) { return; } - std::lock_guard lock(lock_); + auto global_lock = global_critical_region_.Acquire(); if (notifications_.count(id)) { // Already exists. Overwrite. notifications_[id] = data; @@ -50,7 +50,7 @@ void XNotifyListener::EnqueueNotification(XNotificationID id, uint32_t data) { bool XNotifyListener::DequeueNotification(XNotificationID* out_id, uint32_t* out_data) { - std::lock_guard lock(lock_); + auto global_lock = global_critical_region_.Acquire(); bool dequeued = false; if (notification_count_) { dequeued = true; @@ -68,7 +68,7 @@ bool XNotifyListener::DequeueNotification(XNotificationID* out_id, bool XNotifyListener::DequeueNotification(XNotificationID id, uint32_t* out_data) { - std::lock_guard lock(lock_); + auto global_lock = global_critical_region_.Acquire(); bool dequeued = false; if (notification_count_) { auto it = notifications_.find(id); diff --git a/src/xenia/kernel/objects/xnotify_listener.h b/src/xenia/kernel/objects/xnotify_listener.h index 7a9638771..1b16b5a70 100644 --- a/src/xenia/kernel/objects/xnotify_listener.h +++ b/src/xenia/kernel/objects/xnotify_listener.h @@ -40,7 +40,7 @@ class XNotifyListener : public XObject { private: std::unique_ptr wait_handle_; - xe::mutex lock_; + xe::global_critical_region global_critical_region_; std::unordered_map notifications_; size_t notification_count_ = 0; uint64_t mask_ = 0; diff --git a/src/xenia/kernel/objects/xthread.cc b/src/xenia/kernel/objects/xthread.cc index 32800df6b..4218d338a 100644 --- a/src/xenia/kernel/objects/xthread.cc +++ b/src/xenia/kernel/objects/xthread.cc @@ -16,7 +16,6 @@ #include "xenia/base/clock.h" #include "xenia/base/logging.h" #include "xenia/base/math.h" -#include "xenia/base/mutex.h" #include "xenia/base/threading.h" #include "xenia/cpu/processor.h" #include "xenia/emulator.h" @@ -36,7 +35,6 @@ namespace kernel { uint32_t next_xthread_id_ = 0; thread_local XThread* current_thread_tls_ = nullptr; -xe::mutex critical_region_; XThread::XThread(KernelState* kernel_state, uint32_t stack_size, uint32_t xapi_thread_startup, uint32_t start_address, @@ -444,11 +442,12 @@ void XThread::Execute() { } void XThread::EnterCriticalRegion() { - // Global critical region. This isn't right, but is easy. - critical_region_.lock(); + xe::global_critical_region::mutex().lock(); } -void XThread::LeaveCriticalRegion() { critical_region_.unlock(); } +void XThread::LeaveCriticalRegion() { + xe::global_critical_region::mutex().unlock(); +} uint32_t XThread::RaiseIrql(uint32_t new_irql) { return irql_.exchange(new_irql); @@ -458,11 +457,11 @@ void XThread::LowerIrql(uint32_t new_irql) { irql_ = new_irql; } void XThread::CheckApcs() { DeliverAPCs(); } -void XThread::LockApc() { apc_lock_.lock(); } +void XThread::LockApc() { EnterCriticalRegion(); } void XThread::UnlockApc(bool queue_delivery) { bool needs_apc = apc_list_->HasPending(); - apc_lock_.unlock(); + LeaveCriticalRegion(); if (needs_apc && queue_delivery) { thread_->QueueUserCallback([this]() { DeliverAPCs(); }); } @@ -652,8 +651,15 @@ X_STATUS XThread::Resume(uint32_t* out_suspend_count) { } X_STATUS XThread::Suspend(uint32_t* out_suspend_count) { + auto global_lock = global_critical_region_.Acquire(); + ++guest_object()->suspend_count; + // If we are suspending ourselves, we can't hold the lock. + if (XThread::GetCurrentThread() == this) { + global_lock.unlock(); + } + if (thread_->Suspend(out_suspend_count)) { return X_STATUS_SUCCESS; } else { diff --git a/src/xenia/kernel/objects/xthread.h b/src/xenia/kernel/objects/xthread.h index 290a503b1..fe35d2195 100644 --- a/src/xenia/kernel/objects/xthread.h +++ b/src/xenia/kernel/objects/xthread.h @@ -11,9 +11,9 @@ #define XENIA_KERNEL_OBJECTS_XTHREAD_H_ #include -#include #include +#include "xenia/base/mutex.h" #include "xenia/base/threading.h" #include "xenia/cpu/thread_state.h" #include "xenia/kernel/xobject.h" @@ -189,8 +189,8 @@ class XThread : public XObject { int32_t priority_ = 0; uint32_t affinity_ = 0; + xe::global_critical_region global_critical_region_; std::atomic irql_ = {0}; - xe::mutex apc_lock_; NativeList* apc_list_ = nullptr; }; diff --git a/src/xenia/kernel/xboxkrnl_threading.cc b/src/xenia/kernel/xboxkrnl_threading.cc index dab69e190..b4af69042 100644 --- a/src/xenia/kernel/xboxkrnl_threading.cc +++ b/src/xenia/kernel/xboxkrnl_threading.cc @@ -1274,15 +1274,13 @@ SHIM_CALL KeInsertQueueDpc_shim(PPCContext* ppc_context, uint32_t list_entry_ptr = dpc_ptr + 4; // Lock dispatcher. + auto global_lock = xe::global_critical_region::AcquireDirect(); auto dispatcher = kernel_state->dispatcher(); - dispatcher->Lock(); - auto dpc_list = dispatcher->dpc_list(); // If already in a queue, abort. if (dpc_list->IsQueued(list_entry_ptr)) { SHIM_SET_RETURN_32(0); - dispatcher->Unlock(); return; } @@ -1292,8 +1290,6 @@ SHIM_CALL KeInsertQueueDpc_shim(PPCContext* ppc_context, dpc_list->Insert(list_entry_ptr); - dispatcher->Unlock(); - SHIM_SET_RETURN_32(1); } @@ -1307,8 +1303,8 @@ SHIM_CALL KeRemoveQueueDpc_shim(PPCContext* ppc_context, uint32_t list_entry_ptr = dpc_ptr + 4; + auto global_lock = xe::global_critical_region::AcquireDirect(); auto dispatcher = kernel_state->dispatcher(); - dispatcher->Lock(); auto dpc_list = dispatcher->dpc_list(); if (dpc_list->IsQueued(list_entry_ptr)) { @@ -1316,8 +1312,6 @@ SHIM_CALL KeRemoveQueueDpc_shim(PPCContext* ppc_context, result = true; } - dispatcher->Unlock(); - SHIM_SET_RETURN_32(result ? 1 : 0); } @@ -1329,8 +1323,7 @@ pointer_result_t InterlockedPushEntrySList( // Hold a global lock during this method. Once in the lock we assume we have // exclusive access to the structure. - std::lock_guard lock( - *kernel_state()->processor()->global_mutex()); + auto global_lock = xe::global_critical_region::AcquireDirect(); alignas(8) X_SLIST_HEADER old_hdr = *plist_ptr; alignas(8) X_SLIST_HEADER new_hdr = {0}; @@ -1341,9 +1334,9 @@ pointer_result_t InterlockedPushEntrySList( entry->next = old_hdr.next.next; new_hdr.next.next = entry.guest_address(); - xe::atomic_cas(*reinterpret_cast(&old_hdr), - *reinterpret_cast(&new_hdr), - reinterpret_cast(plist_ptr.host_address())); + *reinterpret_cast(plist_ptr.host_address()) = + *reinterpret_cast(&new_hdr); + xe::threading::SyncMemory(); return old_head; } @@ -1355,8 +1348,7 @@ pointer_result_t InterlockedPopEntrySList(pointer_t plist_ptr) { // Hold a global lock during this method. Once in the lock we assume we have // exclusive access to the structure. - std::lock_guard lock( - *kernel_state()->processor()->global_mutex()); + auto global_lock = xe::global_critical_region::AcquireDirect(); uint32_t popped = 0; @@ -1373,9 +1365,9 @@ pointer_result_t InterlockedPopEntrySList(pointer_t plist_ptr) { new_hdr.next.next = next->next; new_hdr.sequence = old_hdr.sequence; - xe::atomic_cas(*reinterpret_cast(&old_hdr), - *reinterpret_cast(&new_hdr), - reinterpret_cast(plist_ptr.host_address())); + *reinterpret_cast(plist_ptr.host_address()) = + *reinterpret_cast(&new_hdr); + xe::threading::SyncMemory(); return popped; } @@ -1387,8 +1379,7 @@ pointer_result_t InterlockedFlushSList(pointer_t plist_ptr) { // Hold a global lock during this method. Once in the lock we assume we have // exclusive access to the structure. - std::lock_guard lock( - *kernel_state()->processor()->global_mutex()); + auto global_lock = xe::global_critical_region::AcquireDirect(); alignas(8) X_SLIST_HEADER old_hdr = *plist_ptr; alignas(8) X_SLIST_HEADER new_hdr = {0}; @@ -1397,9 +1388,9 @@ pointer_result_t InterlockedFlushSList(pointer_t plist_ptr) { new_hdr.depth = 0; new_hdr.sequence = 0; - xe::atomic_cas(*reinterpret_cast(&old_hdr), - *reinterpret_cast(&new_hdr), - reinterpret_cast(plist_ptr.host_address())); + *reinterpret_cast(plist_ptr.host_address()) = + *reinterpret_cast(&new_hdr); + xe::threading::SyncMemory(); return first; } diff --git a/src/xenia/kernel/xobject.cc b/src/xenia/kernel/xobject.cc index 93e1938fe..14f9bf97f 100644 --- a/src/xenia/kernel/xobject.cc +++ b/src/xenia/kernel/xobject.cc @@ -227,7 +227,7 @@ X_STATUS XObject::WaitMultiple(uint32_t count, XObject** objects, } uint8_t* XObject::CreateNative(uint32_t size) { - std::lock_guard lock(kernel_state_->object_mutex()); + auto global_lock = xe::global_critical_region::AcquireDirect(); uint32_t total_size = size + sizeof(X_OBJECT_HEADER); @@ -255,7 +255,7 @@ uint8_t* XObject::CreateNative(uint32_t size) { } void XObject::SetNativePointer(uint32_t native_ptr, bool uninitialized) { - std::lock_guard lock(kernel_state_->object_mutex()); + auto global_lock = xe::global_critical_region::AcquireDirect(); // If hit: We've already setup the native ptr with CreateNative! assert_zero(guest_object_ptr_); @@ -289,7 +289,7 @@ object_ref XObject::GetNativeObject(KernelState* kernel_state, // We identify this by checking the low bit of wait_list_blink - if it's 1, // we have already put our pointer in there. - std::lock_guard lock(kernel_state->object_mutex()); + auto global_lock = xe::global_critical_region::AcquireDirect(); auto header = reinterpret_cast(native_ptr); diff --git a/src/xenia/memory.cc b/src/xenia/memory.cc index c7c56cf12..1a3cebb45 100644 --- a/src/xenia/memory.cc +++ b/src/xenia/memory.cc @@ -13,7 +13,6 @@ #include #include -#include #include "xenia/base/clock.h" #include "xenia/base/logging.h" @@ -461,7 +460,7 @@ void BaseHeap::Dispose() { } void BaseHeap::DumpMap() { - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); XELOGE("------------------------------------------------------------------"); XELOGE("Heap: %.8X-%.8X", heap_base_, heap_base_ + heap_size_); XELOGE("------------------------------------------------------------------"); @@ -535,7 +534,7 @@ bool BaseHeap::AllocFixed(uint32_t base_address, uint32_t size, return false; } - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // - If we are reserving the entire range requested must not be already // reserved. @@ -620,7 +619,7 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address, return false; } - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Find a free page range. // The base page must match the requested alignment, so we first scan for @@ -751,7 +750,7 @@ bool BaseHeap::Decommit(uint32_t address, uint32_t size) { std::min(uint32_t(page_table_.size()) - 1, start_page_number); end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number); - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Release from host. // TODO(benvanik): find a way to actually decommit memory; @@ -775,7 +774,7 @@ bool BaseHeap::Decommit(uint32_t address, uint32_t size) { } bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) { - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Given address must be a region base address. uint32_t base_page_number = (base_address - heap_base_) / page_size_; @@ -831,7 +830,7 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) { std::min(uint32_t(page_table_.size()) - 1, start_page_number); end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number); - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Ensure all pages are in the same reserved region and all are committed. uint32_t first_base_address = UINT_MAX; @@ -883,7 +882,7 @@ bool BaseHeap::QueryRegionInfo(uint32_t base_address, return false; } - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto start_page_entry = page_table_[start_page_number]; out_info->base_address = base_address; @@ -934,7 +933,7 @@ bool BaseHeap::QuerySize(uint32_t address, uint32_t* out_size) { *out_size = 0; return false; } - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto page_entry = page_table_[page_number]; *out_size = (page_entry.region_page_count * page_size_); return true; @@ -947,7 +946,7 @@ bool BaseHeap::QueryProtect(uint32_t address, uint32_t* out_protect) { *out_protect = 0; return false; } - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); auto page_entry = page_table_[page_number]; *out_protect = page_entry.current_protect; return true; @@ -995,7 +994,7 @@ bool PhysicalHeap::Alloc(uint32_t size, uint32_t alignment, size = xe::round_up(size, page_size_); alignment = xe::round_up(alignment, page_size_); - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Allocate from parent heap (gets our physical address in 0-512mb). uint32_t parent_low_address = GetPhysicalAddress(heap_base_); @@ -1033,7 +1032,7 @@ bool PhysicalHeap::AllocFixed(uint32_t base_address, uint32_t size, size = xe::round_up(size, page_size_); alignment = xe::round_up(alignment, page_size_); - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Allocate from parent heap (gets our physical address in 0-512mb). // NOTE: this can potentially overwrite heap contents if there are already @@ -1074,7 +1073,7 @@ bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address, size = xe::round_up(size, page_size_); alignment = xe::round_up(alignment, page_size_); - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); // Allocate from parent heap (gets our physical address in 0-512mb). low_address = std::max(heap_base_, low_address); @@ -1108,7 +1107,7 @@ bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address, } bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) { - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); uint32_t parent_address = GetPhysicalAddress(address); if (!parent_heap_->Decommit(parent_address, size)) { XELOGE("PhysicalHeap::Decommit failed due to parent heap failure"); @@ -1118,7 +1117,7 @@ bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) { } bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) { - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); uint32_t parent_base_address = GetPhysicalAddress(base_address); if (!parent_heap_->Release(parent_base_address, out_region_size)) { XELOGE("PhysicalHeap::Release failed due to parent heap failure"); @@ -1128,7 +1127,7 @@ bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) { } bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) { - std::lock_guard lock(heap_mutex_); + auto global_lock = global_critical_region_.Acquire(); uint32_t parent_address = GetPhysicalAddress(address); bool parent_result = parent_heap_->Protect(parent_address, size, protect); if (!parent_result) { diff --git a/src/xenia/memory.h b/src/xenia/memory.h index e68c3e49e..0e51f6ca6 100644 --- a/src/xenia/memory.h +++ b/src/xenia/memory.h @@ -12,7 +12,6 @@ #include #include -#include #include #include @@ -115,8 +114,8 @@ class BaseHeap { uint32_t heap_base_; uint32_t heap_size_; uint32_t page_size_; + xe::global_critical_region global_critical_region_; std::vector page_table_; - xe::recursive_mutex heap_mutex_; }; class VirtualHeap : public BaseHeap { diff --git a/src/xenia/vfs/device.cc b/src/xenia/vfs/device.cc index 09625213b..0f5d3aa08 100644 --- a/src/xenia/vfs/device.cc +++ b/src/xenia/vfs/device.cc @@ -19,7 +19,7 @@ Device::Device(const std::string& mount_path) : mount_path_(mount_path) {} Device::~Device() = default; void Device::Dump(StringBuffer* string_buffer) { - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); root_entry_->Dump(string_buffer, 0); } diff --git a/src/xenia/vfs/device.h b/src/xenia/vfs/device.h index 36cfd04a6..b317891d7 100644 --- a/src/xenia/vfs/device.h +++ b/src/xenia/vfs/device.h @@ -28,7 +28,6 @@ class Device { virtual bool Initialize() = 0; void Dump(StringBuffer* string_buffer); - xe::recursive_mutex& mutex() { return mutex_; } const std::string& mount_path() const { return mount_path_; } virtual bool is_read_only() const { return true; } @@ -41,7 +40,7 @@ class Device { virtual uint32_t bytes_per_sector() const = 0; protected: - xe::recursive_mutex mutex_; + xe::global_critical_region global_critical_region_; std::string mount_path_; std::unique_ptr root_entry_; }; diff --git a/src/xenia/vfs/entry.cc b/src/xenia/vfs/entry.cc index d1c81ec6f..d07795520 100644 --- a/src/xenia/vfs/entry.cc +++ b/src/xenia/vfs/entry.cc @@ -47,7 +47,7 @@ void Entry::Dump(xe::StringBuffer* string_buffer, int indent) { bool Entry::is_read_only() const { return device_->is_read_only(); } Entry* Entry::GetChild(std::string name) { - std::lock_guard lock(device_->mutex()); + auto global_lock = global_critical_region_.Acquire(); // TODO(benvanik): a faster search for (auto& child : children_) { if (strcasecmp(child->name().c_str(), name.c_str()) == 0) { @@ -59,7 +59,7 @@ Entry* Entry::GetChild(std::string name) { Entry* Entry::IterateChildren(const xe::filesystem::WildcardEngine& engine, size_t* current_index) { - std::lock_guard lock(device_->mutex()); + auto global_lock = global_critical_region_.Acquire(); while (*current_index < children_.size()) { auto& child = children_[*current_index]; *current_index = *current_index + 1; @@ -71,10 +71,10 @@ Entry* Entry::IterateChildren(const xe::filesystem::WildcardEngine& engine, } Entry* Entry::CreateEntry(std::string name, uint32_t attributes) { + auto global_lock = global_critical_region_.Acquire(); if (is_read_only()) { return nullptr; } - std::lock_guard lock(device_->mutex()); if (GetChild(name)) { // Already exists. return nullptr; @@ -90,10 +90,10 @@ Entry* Entry::CreateEntry(std::string name, uint32_t attributes) { } bool Entry::Delete(Entry* entry) { + auto global_lock = global_critical_region_.Acquire(); if (is_read_only()) { return false; } - std::lock_guard lock(device_->mutex()); if (entry->parent() != this) { return false; } diff --git a/src/xenia/vfs/entry.h b/src/xenia/vfs/entry.h index 55fcfd194..bc7852a86 100644 --- a/src/xenia/vfs/entry.h +++ b/src/xenia/vfs/entry.h @@ -16,6 +16,7 @@ #include "xenia/base/filesystem.h" #include "xenia/base/mapped_memory.h" +#include "xenia/base/mutex.h" #include "xenia/base/string_buffer.h" #include "xenia/kernel/xobject.h" #include "xenia/xbox.h" @@ -125,6 +126,7 @@ class Entry { } virtual bool DeleteEntryInternal(Entry* entry) { return false; } + xe::global_critical_region global_critical_region_; Device* device_; Entry* parent_; std::string path_; diff --git a/src/xenia/vfs/virtual_file_system.cc b/src/xenia/vfs/virtual_file_system.cc index 4aeb126a1..879ac1480 100644 --- a/src/xenia/vfs/virtual_file_system.cc +++ b/src/xenia/vfs/virtual_file_system.cc @@ -27,20 +27,20 @@ VirtualFileSystem::~VirtualFileSystem() { } bool VirtualFileSystem::RegisterDevice(std::unique_ptr device) { - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); devices_.emplace_back(std::move(device)); return true; } bool VirtualFileSystem::RegisterSymbolicLink(std::string path, std::string target) { - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); symlinks_.insert({path, target}); return true; } bool VirtualFileSystem::UnregisterSymbolicLink(std::string path) { - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); auto it = symlinks_.find(path); if (it == symlinks_.end()) { return false; @@ -50,7 +50,7 @@ bool VirtualFileSystem::UnregisterSymbolicLink(std::string path) { } Entry* VirtualFileSystem::ResolvePath(std::string path) { - std::lock_guard lock(mutex_); + auto global_lock = global_critical_region_.Acquire(); // Resolve relative paths std::string normalized_path(xe::filesystem::CanonicalizePath(path)); diff --git a/src/xenia/vfs/virtual_file_system.h b/src/xenia/vfs/virtual_file_system.h index 68b54d1eb..cd854870b 100644 --- a/src/xenia/vfs/virtual_file_system.h +++ b/src/xenia/vfs/virtual_file_system.h @@ -46,7 +46,7 @@ class VirtualFileSystem { FileAction* out_action); private: - xe::mutex mutex_; + xe::global_critical_region global_critical_region_; std::vector> devices_; std::unordered_map symlinks_; };