DANGER DANGER. Switching to global critical region.
This changes almost all locks held by guest threads to use a single global critical region. This emulates the behavior on the PPC of disabling interrupts (by calls like KeRaiseIrqlToDpcLevel or masking interrupts), and prevents deadlocks from occuring when threads are suspended or otherwise blocked. This has performance implications and a pass is needed to ensure the locking is as granular as possible. It could also break everything because it's fundamentally unsound. We'll see.
This commit is contained in:
parent
33270cd2a0
commit
3c96b6fa0a
|
@ -124,10 +124,10 @@ void AudioSystem::WorkerThreadMain() {
|
|||
if (result.first == xe::threading::WaitResult::kSuccess) {
|
||||
size_t index = result.second;
|
||||
do {
|
||||
lock_.lock();
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
uint32_t client_callback = clients_[index].callback;
|
||||
uint32_t client_callback_arg = clients_[index].wrapped_callback_arg;
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
|
||||
if (client_callback) {
|
||||
SCOPE_profile_cpu_i("apu", "xe::apu::AudioSystem->client_callback");
|
||||
|
@ -169,7 +169,7 @@ void AudioSystem::Shutdown() {
|
|||
X_STATUS AudioSystem::RegisterClient(uint32_t callback, uint32_t callback_arg,
|
||||
size_t* out_index) {
|
||||
assert_true(unused_clients_.size());
|
||||
std::lock_guard<xe::mutex> lock(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
auto index = unused_clients_.front();
|
||||
|
||||
|
@ -201,7 +201,7 @@ X_STATUS AudioSystem::RegisterClient(uint32_t callback, uint32_t callback_arg,
|
|||
void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) {
|
||||
SCOPE_profile_cpu_f("apu");
|
||||
|
||||
std::lock_guard<xe::mutex> lock(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
assert_true(index < kMaximumClientCount);
|
||||
assert_true(clients_[index].driver != NULL);
|
||||
(clients_[index].driver)->SubmitFrame(samples_ptr);
|
||||
|
@ -210,7 +210,7 @@ void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) {
|
|||
void AudioSystem::UnregisterClient(size_t index) {
|
||||
SCOPE_profile_cpu_f("apu");
|
||||
|
||||
std::lock_guard<xe::mutex> lock(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
assert_true(index < kMaximumClientCount);
|
||||
DestroyDriver(clients_[index].driver);
|
||||
clients_[index] = {0};
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#define XENIA_APU_AUDIO_SYSTEM_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/base/threading.h"
|
||||
#include "xenia/cpu/processor.h"
|
||||
#include "xenia/kernel/objects/xthread.h"
|
||||
|
@ -67,10 +67,8 @@ class AudioSystem {
|
|||
std::atomic<bool> worker_running_ = {false};
|
||||
kernel::object_ref<kernel::XHostThread> worker_thread_;
|
||||
|
||||
xe::mutex lock_;
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
static const size_t kMaximumClientCount = 8;
|
||||
|
||||
struct {
|
||||
AudioDriver* driver;
|
||||
uint32_t callback;
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#define XENIA_APU_XMA_CONTEXT_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/memory.h"
|
||||
#include "xenia/xbox.h"
|
||||
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
#define XENIA_APU_XMA_DECODER_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
#include "xenia/apu/xma_context.h"
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/kernel/objects/xthread.h"
|
||||
#include "xenia/xbox.h"
|
||||
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
/**
|
||||
******************************************************************************
|
||||
* Xenia : Xbox 360 Emulator Research Project *
|
||||
******************************************************************************
|
||||
* Copyright 2015 Ben Vanik. All rights reserved. *
|
||||
* Released under the BSD license - see LICENSE in the root for more details. *
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
|
||||
namespace xe {
|
||||
|
||||
xe::recursive_mutex& global_critical_region::mutex() {
|
||||
static xe::recursive_mutex global_mutex;
|
||||
return global_mutex;
|
||||
}
|
||||
|
||||
} // namespace xe
|
|
@ -21,6 +21,68 @@ namespace xe {
|
|||
using mutex = std::mutex;
|
||||
using recursive_mutex = std::recursive_mutex;
|
||||
|
||||
// The global critical region mutex singleton.
|
||||
// This must guard any operation that may suspend threads or be sensitive to
|
||||
// being suspended such as global table locks and such.
|
||||
// To prevent deadlocks this should be the first lock acquired and be held
|
||||
// for the entire duration of the critical region (longer than any other lock).
|
||||
//
|
||||
// As a general rule if some code can only be accessed from the guest you can
|
||||
// guard it with only the global critical region and be assured nothing else
|
||||
// will touch it. If it will be accessed from non-guest threads you may need
|
||||
// some additional protection.
|
||||
//
|
||||
// You can think of this as disabling interrupts in the guest. The thread in the
|
||||
// global critical region has exclusive access to the entire system and cannot
|
||||
// be preempted. This also means that all activity done while in the critical
|
||||
// region must be extremely fast (no IO!), as it has the chance to block any
|
||||
// other thread until its done.
|
||||
//
|
||||
// For example, in the following situation thread 1 will not be able to suspend
|
||||
// thread 0 until it has exited its critical region, preventing it from being
|
||||
// suspended while holding the table lock:
|
||||
// [thread 0]:
|
||||
// DoKernelStuff():
|
||||
// auto global_lock = global_critical_region_.Acquire();
|
||||
// std::lock_guard<xe::mutex> table_lock(table_mutex_);
|
||||
// table_->InsertStuff();
|
||||
// [thread 1]:
|
||||
// MySuspendThread():
|
||||
// auto global_lock = global_critical_region_.Acquire();
|
||||
// ::SuspendThread(thread0);
|
||||
//
|
||||
// To use the region it's strongly recommended that you keep an instance near
|
||||
// the data requiring it. This makes it clear to those reading that the data
|
||||
// is protected by the global critical region. For example:
|
||||
// class MyType {
|
||||
// // Implies my_list_ is protected:
|
||||
// xe::global_critical_region global_critical_region_;
|
||||
// std::list<...> my_list_;
|
||||
// };
|
||||
class global_critical_region {
|
||||
public:
|
||||
static xe::recursive_mutex& mutex();
|
||||
|
||||
// Acquires a lock on the global critical section.
|
||||
// Use this when keeping an instance is not possible. Otherwise, prefer
|
||||
// to keep an instance of global_critical_region near the members requiring
|
||||
// it to keep things readable.
|
||||
static std::unique_lock<xe::recursive_mutex> AcquireDirect() {
|
||||
return std::unique_lock<xe::recursive_mutex>(mutex());
|
||||
}
|
||||
|
||||
// Acquires a lock on the global critical section.
|
||||
inline std::unique_lock<xe::recursive_mutex> Acquire() {
|
||||
return std::unique_lock<xe::recursive_mutex>(mutex());
|
||||
}
|
||||
|
||||
// Tries to acquire a lock on the glboal critical section.
|
||||
// Check owns_lock() to see if the lock was successfully acquired.
|
||||
inline std::unique_lock<xe::recursive_mutex> TryAcquire() {
|
||||
return std::unique_lock<xe::recursive_mutex>(mutex(), std::try_to_lock);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace xe
|
||||
|
||||
#endif // XENIA_BASE_MUTEX_H_
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#ifndef XENIA_BASE_RESET_SCOPE_H_
|
||||
#define XENIA_BASE_RESET_SCOPE_H_
|
||||
|
||||
#include <mutex>
|
||||
|
||||
namespace xe {
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -129,7 +129,7 @@ void* X64CodeCache::PlaceGuestCode(uint32_t guest_address, void* machine_code,
|
|||
uint8_t* code_address = nullptr;
|
||||
UnwindReservation unwind_reservation;
|
||||
{
|
||||
std::lock_guard<xe::mutex> allocation_lock(allocation_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
low_mark = generated_code_offset_;
|
||||
|
||||
|
@ -192,7 +192,7 @@ uint32_t X64CodeCache::PlaceData(const void* data, size_t length) {
|
|||
size_t high_mark;
|
||||
uint8_t* data_address = nullptr;
|
||||
{
|
||||
std::lock_guard<xe::mutex> allocation_lock(allocation_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Reserve code.
|
||||
// Always move the code to land on 16b alignment.
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
@ -91,9 +90,9 @@ class X64CodeCache : public CodeCache {
|
|||
std::wstring file_name_;
|
||||
xe::memory::FileMappingHandle mapping_ = nullptr;
|
||||
|
||||
// Must be held when manipulating the offsets or counts of anything, to keep
|
||||
// the tables consistent and ordered.
|
||||
xe::mutex allocation_mutex_;
|
||||
// NOTE: the global critical region must be held when manipulating the offsets
|
||||
// or counts of anything, to keep the tables consistent and ordered.
|
||||
xe::global_critical_region global_critical_region_;
|
||||
|
||||
// Value that the indirection table will be initialized with upon commit.
|
||||
uint32_t indirection_default_value_ = 0xFEEDF00D;
|
||||
|
|
|
@ -18,7 +18,7 @@ namespace cpu {
|
|||
EntryTable::EntryTable() = default;
|
||||
|
||||
EntryTable::~EntryTable() {
|
||||
std::lock_guard<xe::mutex> guard(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (auto it : map_) {
|
||||
Entry* entry = it.second;
|
||||
delete entry;
|
||||
|
@ -26,7 +26,7 @@ EntryTable::~EntryTable() {
|
|||
}
|
||||
|
||||
Entry* EntryTable::Get(uint32_t address) {
|
||||
std::lock_guard<xe::mutex> guard(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
const auto& it = map_.find(address);
|
||||
Entry* entry = it != map_.end() ? it->second : nullptr;
|
||||
if (entry) {
|
||||
|
@ -42,7 +42,7 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) {
|
|||
// TODO(benvanik): replace with a map with wait-free for find.
|
||||
// https://github.com/facebook/folly/blob/master/folly/AtomicHashMap.h
|
||||
|
||||
lock_.lock();
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
const auto& it = map_.find(address);
|
||||
Entry* entry = it != map_.end() ? it->second : nullptr;
|
||||
Entry::Status status;
|
||||
|
@ -51,10 +51,10 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) {
|
|||
if (entry->status == Entry::STATUS_COMPILING) {
|
||||
// Still compiling, so spin.
|
||||
do {
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
// TODO(benvanik): sleep for less time?
|
||||
xe::threading::Sleep(std::chrono::microseconds(10));
|
||||
lock_.lock();
|
||||
global_lock.lock();
|
||||
} while (entry->status == Entry::STATUS_COMPILING);
|
||||
}
|
||||
status = entry->status;
|
||||
|
@ -68,13 +68,13 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) {
|
|||
map_[address] = entry;
|
||||
status = Entry::STATUS_NEW;
|
||||
}
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
*out_entry = entry;
|
||||
return status;
|
||||
}
|
||||
|
||||
std::vector<Function*> EntryTable::FindWithAddress(uint32_t address) {
|
||||
std::lock_guard<xe::mutex> guard(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
std::vector<Function*> fns;
|
||||
for (auto& it : map_) {
|
||||
Entry* entry = it.second;
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#ifndef XENIA_CPU_ENTRY_TABLE_H_
|
||||
#define XENIA_CPU_ENTRY_TABLE_H_
|
||||
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
|
@ -46,8 +45,8 @@ class EntryTable {
|
|||
std::vector<Function*> FindWithAddress(uint32_t address);
|
||||
|
||||
private:
|
||||
xe::global_critical_region global_critical_region_;
|
||||
// TODO(benvanik): replace with a better data structure.
|
||||
xe::mutex lock_;
|
||||
std::unordered_map<uint32_t, Entry*> map_;
|
||||
};
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include "xenia/cpu/frontend/ppc_frontend.h"
|
||||
|
||||
#include "xenia/base/atomic.h"
|
||||
#include "xenia/cpu/frontend/ppc_context.h"
|
||||
#include "xenia/cpu/frontend/ppc_disasm.h"
|
||||
#include "xenia/cpu/frontend/ppc_emit.h"
|
||||
|
@ -71,20 +72,20 @@ void EnterGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) {
|
|||
auto global_mutex = reinterpret_cast<xe::recursive_mutex*>(arg0);
|
||||
auto global_lock_count = reinterpret_cast<int32_t*>(arg1);
|
||||
global_mutex->lock();
|
||||
*global_lock_count = *global_lock_count + 1;
|
||||
xe::atomic_inc(global_lock_count);
|
||||
}
|
||||
|
||||
// Leaves the global lock. Safe to recursion.
|
||||
void LeaveGlobalLock(PPCContext* ppc_context, void* arg0, void* arg1) {
|
||||
auto global_mutex = reinterpret_cast<xe::recursive_mutex*>(arg0);
|
||||
auto global_lock_count = reinterpret_cast<int32_t*>(arg1);
|
||||
*global_lock_count = *global_lock_count - 1;
|
||||
assert_true(*global_lock_count >= 0);
|
||||
auto new_lock_count = xe::atomic_dec(global_lock_count);
|
||||
assert_true(new_lock_count >= 0);
|
||||
global_mutex->unlock();
|
||||
}
|
||||
|
||||
bool PPCFrontend::Initialize() {
|
||||
void* arg0 = reinterpret_cast<void*>(processor_->global_mutex());
|
||||
void* arg0 = reinterpret_cast<void*>(&xe::global_critical_region::mutex());
|
||||
void* arg1 = reinterpret_cast<void*>(&builtins_.global_lock_count);
|
||||
builtins_.check_global_lock =
|
||||
processor_->DefineBuiltin("CheckGlobalLock", CheckGlobalLock, arg0, arg1);
|
||||
|
|
|
@ -11,9 +11,7 @@
|
|||
#define XENIA_CPU_FRONTEND_PPC_FRONTEND_H_
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/base/type_pool.h"
|
||||
#include "xenia/cpu/frontend/context_info.h"
|
||||
#include "xenia/cpu/function.h"
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/cpu/debug_info.h"
|
||||
#include "xenia/cpu/frontend/ppc_context.h"
|
||||
#include "xenia/cpu/symbol.h"
|
||||
|
|
|
@ -113,9 +113,9 @@ uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
|
|||
entry->callback = callback;
|
||||
entry->callback_context = callback_context;
|
||||
entry->callback_data = callback_data;
|
||||
write_watch_mutex_.lock();
|
||||
global_critical_region_.mutex().lock();
|
||||
write_watches_.push_back(entry);
|
||||
write_watch_mutex_.unlock();
|
||||
global_critical_region_.mutex().unlock();
|
||||
|
||||
// Make the desired range read only under all address spaces.
|
||||
xe::memory::Protect(physical_membase_ + entry->address, entry->length,
|
||||
|
@ -154,12 +154,12 @@ void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {
|
|||
ClearWriteWatch(entry);
|
||||
|
||||
// Remove from table.
|
||||
write_watch_mutex_.lock();
|
||||
global_critical_region_.mutex().lock();
|
||||
auto it = std::find(write_watches_.begin(), write_watches_.end(), entry);
|
||||
if (it != write_watches_.end()) {
|
||||
write_watches_.erase(it);
|
||||
}
|
||||
write_watch_mutex_.unlock();
|
||||
global_critical_region_.mutex().unlock();
|
||||
|
||||
delete entry;
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) {
|
|||
physical_address &= 0x1FFFFFFF;
|
||||
}
|
||||
std::list<WriteWatchEntry*> pending_invalidates;
|
||||
write_watch_mutex_.lock();
|
||||
global_critical_region_.mutex().lock();
|
||||
for (auto it = write_watches_.begin(); it != write_watches_.end();) {
|
||||
auto entry = *it;
|
||||
if (entry->address <= physical_address &&
|
||||
|
@ -186,7 +186,7 @@ bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) {
|
|||
}
|
||||
++it;
|
||||
}
|
||||
write_watch_mutex_.unlock();
|
||||
global_critical_region_.mutex().unlock();
|
||||
if (pending_invalidates.empty()) {
|
||||
// Rethrow access violation - range was not being watched.
|
||||
return false;
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
|
@ -90,8 +89,8 @@ class MMIOHandler {
|
|||
|
||||
std::vector<MMIORange> mapped_ranges_;
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
// TODO(benvanik): data structure magic.
|
||||
xe::mutex write_watch_mutex_;
|
||||
std::list<WriteWatchEntry*> write_watches_;
|
||||
|
||||
static MMIOHandler* global_handler_;
|
||||
|
|
|
@ -29,7 +29,7 @@ Module::~Module() = default;
|
|||
bool Module::ContainsAddress(uint32_t address) { return true; }
|
||||
|
||||
Symbol* Module::LookupSymbol(uint32_t address, bool wait) {
|
||||
lock_.lock();
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
const auto it = map_.find(address);
|
||||
Symbol* symbol = it != map_.end() ? it->second : nullptr;
|
||||
if (symbol) {
|
||||
|
@ -37,10 +37,10 @@ Symbol* Module::LookupSymbol(uint32_t address, bool wait) {
|
|||
// Some other thread is declaring the symbol - wait.
|
||||
if (wait) {
|
||||
do {
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
// TODO(benvanik): sleep for less time?
|
||||
xe::threading::Sleep(std::chrono::microseconds(100));
|
||||
lock_.lock();
|
||||
global_lock.lock();
|
||||
} while (symbol->status() == Symbol::Status::kDeclaring);
|
||||
} else {
|
||||
// Immediate request, just return.
|
||||
|
@ -48,31 +48,31 @@ Symbol* Module::LookupSymbol(uint32_t address, bool wait) {
|
|||
}
|
||||
}
|
||||
}
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
return symbol;
|
||||
}
|
||||
|
||||
Symbol::Status Module::DeclareSymbol(Symbol::Type type, uint32_t address,
|
||||
Symbol** out_symbol) {
|
||||
*out_symbol = nullptr;
|
||||
lock_.lock();
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = map_.find(address);
|
||||
Symbol* symbol = it != map_.end() ? it->second : nullptr;
|
||||
Symbol::Status status;
|
||||
if (symbol) {
|
||||
// If we exist but are the wrong type, die.
|
||||
if (symbol->type() != type) {
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
return Symbol::Status::kFailed;
|
||||
}
|
||||
// If we aren't ready yet spin and wait.
|
||||
if (symbol->status() == Symbol::Status::kDeclaring) {
|
||||
// Still declaring, so spin.
|
||||
do {
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
// TODO(benvanik): sleep for less time?
|
||||
xe::threading::Sleep(std::chrono::microseconds(100));
|
||||
lock_.lock();
|
||||
global_lock.lock();
|
||||
} while (symbol->status() == Symbol::Status::kDeclaring);
|
||||
}
|
||||
status = symbol->status();
|
||||
|
@ -90,7 +90,7 @@ Symbol::Status Module::DeclareSymbol(Symbol::Type type, uint32_t address,
|
|||
list_.emplace_back(symbol);
|
||||
status = Symbol::Status::kNew;
|
||||
}
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
*out_symbol = symbol;
|
||||
|
||||
// Get debug info from providers, if this is new.
|
||||
|
@ -117,7 +117,7 @@ Symbol::Status Module::DeclareVariable(uint32_t address, Symbol** out_symbol) {
|
|||
}
|
||||
|
||||
Symbol::Status Module::DefineSymbol(Symbol* symbol) {
|
||||
lock_.lock();
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
Symbol::Status status;
|
||||
if (symbol->status() == Symbol::Status::kDeclared) {
|
||||
// Declared but undefined, so request caller define it.
|
||||
|
@ -126,16 +126,16 @@ Symbol::Status Module::DefineSymbol(Symbol* symbol) {
|
|||
} else if (symbol->status() == Symbol::Status::kDefining) {
|
||||
// Still defining, so spin.
|
||||
do {
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
// TODO(benvanik): sleep for less time?
|
||||
xe::threading::Sleep(std::chrono::microseconds(100));
|
||||
lock_.lock();
|
||||
global_lock.lock();
|
||||
} while (symbol->status() == Symbol::Status::kDefining);
|
||||
status = symbol->status();
|
||||
} else {
|
||||
status = symbol->status();
|
||||
}
|
||||
lock_.unlock();
|
||||
global_lock.unlock();
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,7 @@ Symbol::Status Module::DefineVariable(Symbol* symbol) {
|
|||
}
|
||||
|
||||
void Module::ForEachFunction(std::function<void(Function*)> callback) {
|
||||
std::lock_guard<xe::mutex> guard(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (auto& symbol : list_) {
|
||||
if (symbol->type() == Symbol::Type::kFunction) {
|
||||
Function* info = static_cast<Function*>(symbol.get());
|
||||
|
@ -159,7 +159,7 @@ void Module::ForEachFunction(std::function<void(Function*)> callback) {
|
|||
|
||||
void Module::ForEachSymbol(size_t start_index, size_t end_index,
|
||||
std::function<void(Symbol*)> callback) {
|
||||
std::lock_guard<xe::mutex> guard(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
start_index = std::min(start_index, list_.size());
|
||||
end_index = std::min(end_index, list_.size());
|
||||
for (size_t i = start_index; i <= end_index; ++i) {
|
||||
|
@ -169,7 +169,7 @@ void Module::ForEachSymbol(size_t start_index, size_t end_index,
|
|||
}
|
||||
|
||||
size_t Module::QuerySymbolCount() {
|
||||
std::lock_guard<xe::mutex> guard(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
return list_.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
@ -64,8 +63,8 @@ class Module {
|
|||
Symbol** out_symbol);
|
||||
Symbol::Status DefineSymbol(Symbol* symbol);
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
// TODO(benvanik): replace with a better data structure.
|
||||
xe::mutex lock_;
|
||||
std::unordered_map<uint32_t, Symbol*> map_;
|
||||
std::vector<std::unique_ptr<Symbol>> list_;
|
||||
};
|
||||
|
|
|
@ -60,7 +60,7 @@ Processor::Processor(xe::Memory* memory, ExportResolver* export_resolver,
|
|||
|
||||
Processor::~Processor() {
|
||||
{
|
||||
std::lock_guard<xe::mutex> guard(modules_lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
modules_.clear();
|
||||
}
|
||||
|
||||
|
@ -126,13 +126,13 @@ bool Processor::Setup() {
|
|||
}
|
||||
|
||||
bool Processor::AddModule(std::unique_ptr<Module> module) {
|
||||
std::lock_guard<xe::mutex> guard(modules_lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
modules_.push_back(std::move(module));
|
||||
return true;
|
||||
}
|
||||
|
||||
Module* Processor::GetModule(const char* name) {
|
||||
std::lock_guard<xe::mutex> guard(modules_lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (const auto& module : modules_) {
|
||||
if (module->name() == name) {
|
||||
return module.get();
|
||||
|
@ -142,7 +142,7 @@ Module* Processor::GetModule(const char* name) {
|
|||
}
|
||||
|
||||
std::vector<Module*> Processor::GetModules() {
|
||||
std::lock_guard<xe::mutex> guard(modules_lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
std::vector<Module*> clone(modules_.size());
|
||||
for (const auto& module : modules_) {
|
||||
clone.push_back(module.get());
|
||||
|
@ -215,7 +215,7 @@ Function* Processor::LookupFunction(uint32_t address) {
|
|||
// Find the module that contains the address.
|
||||
Module* code_module = nullptr;
|
||||
{
|
||||
std::lock_guard<xe::mutex> guard(modules_lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
// TODO(benvanik): sort by code address (if contiguous) so can bsearch.
|
||||
// TODO(benvanik): cache last module low/high, as likely to be in there.
|
||||
for (const auto& module : modules_) {
|
||||
|
@ -335,7 +335,7 @@ uint64_t Processor::ExecuteInterrupt(ThreadState* thread_state,
|
|||
// Hold the global lock during interrupt dispatch.
|
||||
// This will block if any code is in a critical region (has interrupts
|
||||
// disabled) or if any other interrupt is executing.
|
||||
std::lock_guard<xe::recursive_mutex> lock(global_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
PPCContext* context = thread_state->context();
|
||||
assert_true(arg_count <= 5);
|
||||
|
|
|
@ -86,7 +86,6 @@ class Processor {
|
|||
uint64_t ExecuteInterrupt(ThreadState* thread_state, uint32_t address,
|
||||
uint64_t args[], size_t arg_count);
|
||||
|
||||
xe::recursive_mutex* global_mutex() { return &global_mutex_; }
|
||||
Irql RaiseIrql(Irql new_value);
|
||||
void LowerIrql(Irql old_value);
|
||||
|
||||
|
@ -104,13 +103,12 @@ class Processor {
|
|||
ExportResolver* export_resolver_ = nullptr;
|
||||
|
||||
EntryTable entry_table_;
|
||||
xe::mutex modules_lock_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::vector<std::unique_ptr<Module>> modules_;
|
||||
Module* builtin_module_ = nullptr;
|
||||
uint32_t next_builtin_address_ = 0xFFFF0000u;
|
||||
|
||||
Irql irql_;
|
||||
xe::recursive_mutex global_mutex_;
|
||||
};
|
||||
|
||||
} // namespace cpu
|
||||
|
|
|
@ -90,7 +90,7 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
|
|||
std::memset(context_, 0, sizeof(PPCContext));
|
||||
|
||||
// Stash pointers to common structures that callbacks may need.
|
||||
context_->global_mutex = processor_->global_mutex();
|
||||
context_->global_mutex = &xe::global_critical_region::mutex();
|
||||
context_->virtual_membase = memory_->virtual_membase();
|
||||
context_->physical_membase = memory_->physical_membase();
|
||||
context_->processor = processor_;
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
|
||||
#include "xenia/base/filesystem.h"
|
||||
|
@ -254,6 +253,7 @@ void Debugger::FindBreakpoints(uint32_t address,
|
|||
}
|
||||
|
||||
bool Debugger::SuspendAllThreads() {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto threads =
|
||||
emulator_->kernel_state()->object_table()->GetObjectsByType<XThread>(
|
||||
XObject::kTypeThread);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <vector>
|
||||
|
||||
#include "xenia/base/mapped_memory.h"
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/base/threading.h"
|
||||
#include "xenia/cpu/processor.h"
|
||||
#include "xenia/cpu/thread_state.h"
|
||||
|
@ -106,6 +107,7 @@ class Debugger {
|
|||
std::recursive_mutex mutex_;
|
||||
ExecutionState execution_state_ = ExecutionState::kStopped;
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::multimap<uint32_t, Breakpoint*> breakpoints_;
|
||||
};
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#ifndef XENIA_GPU_GL4_TEXTURE_CACHE_H_
|
||||
#define XENIA_GPU_GL4_TEXTURE_CACHE_H_
|
||||
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ WinKeyInputDriver::WinKeyInputDriver(InputSystem* input_system)
|
|||
// Register a key listener.
|
||||
input_system_->emulator()->display_window()->on_key_down.AddListener(
|
||||
[this](ui::KeyEvent* evt) {
|
||||
std::lock_guard<std::mutex> lock(key_event_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
KeyEvent key;
|
||||
key.vkey = evt->key_code();
|
||||
|
@ -35,7 +35,7 @@ WinKeyInputDriver::WinKeyInputDriver(InputSystem* input_system)
|
|||
});
|
||||
input_system_->emulator()->display_window()->on_key_up.AddListener(
|
||||
[this](ui::KeyEvent* evt) {
|
||||
std::lock_guard<std::mutex> lock(key_event_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
KeyEvent key;
|
||||
key.vkey = evt->key_code();
|
||||
|
@ -210,18 +210,17 @@ X_RESULT WinKeyInputDriver::GetKeystroke(uint32_t user_index, uint32_t flags,
|
|||
uint8_t hid_code = 0;
|
||||
|
||||
// Pop from the queue.
|
||||
key_event_mutex_.lock();
|
||||
if (key_events_.size() == 0) {
|
||||
key_event_mutex_.unlock();
|
||||
|
||||
// No keys!
|
||||
return X_ERROR_EMPTY;
|
||||
KeyEvent evt;
|
||||
{
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (key_events_.empty()) {
|
||||
// No keys!
|
||||
return X_ERROR_EMPTY;
|
||||
}
|
||||
evt = key_events_.front();
|
||||
key_events_.pop();
|
||||
}
|
||||
|
||||
KeyEvent evt = key_events_.front();
|
||||
key_events_.pop();
|
||||
key_event_mutex_.unlock();
|
||||
|
||||
// TODO: Some other way to toggle this...
|
||||
if (IS_KEY_TOGGLED(VK_CAPITAL)) {
|
||||
// dpad toggled
|
||||
|
|
|
@ -10,9 +10,9 @@
|
|||
#ifndef XENIA_HID_WINKEY_WINKEY_INPUT_DRIVER_H_
|
||||
#define XENIA_HID_WINKEY_WINKEY_INPUT_DRIVER_H_
|
||||
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/hid/input_driver.h"
|
||||
|
||||
namespace xe {
|
||||
|
@ -40,8 +40,9 @@ class WinKeyInputDriver : public InputDriver {
|
|||
bool transition = false; // going up(false) or going down(true)
|
||||
bool prev_state = false; // down(true) or up(false)
|
||||
};
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::queue<KeyEvent> key_events_;
|
||||
std::mutex key_event_mutex_;
|
||||
|
||||
uint32_t packet_number_;
|
||||
};
|
||||
|
|
|
@ -81,7 +81,7 @@ X_RESULT XXMPApp::XMPCreateTitlePlaylist(
|
|||
xe::store_and_swap<uint32_t>(memory_->TranslateVirtual(out_playlist_handle),
|
||||
playlist->handle);
|
||||
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
playlists_.insert({playlist->handle, playlist.get()});
|
||||
playlist.release();
|
||||
return X_ERROR_SUCCESS;
|
||||
|
@ -89,7 +89,7 @@ X_RESULT XXMPApp::XMPCreateTitlePlaylist(
|
|||
|
||||
X_RESULT XXMPApp::XMPDeleteTitlePlaylist(uint32_t playlist_handle) {
|
||||
XELOGD("XMPDeleteTitlePlaylist(%.8X)", playlist_handle);
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = playlists_.find(playlist_handle);
|
||||
if (it == playlists_.end()) {
|
||||
XELOGE("Playlist %.8X not found", playlist_handle);
|
||||
|
@ -109,7 +109,7 @@ X_RESULT XXMPApp::XMPPlayTitlePlaylist(uint32_t playlist_handle,
|
|||
XELOGD("XMPPlayTitlePlaylist(%.8X, %.8X)", playlist_handle, song_handle);
|
||||
Playlist* playlist = nullptr;
|
||||
{
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = playlists_.find(playlist_handle);
|
||||
if (it == playlists_.end()) {
|
||||
XELOGE("Playlist %.8X not found", playlist_handle);
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#define XENIA_KERNEL_APPS_XMP_APP_H_
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
@ -102,7 +101,7 @@ class XXMPApp : public XApp {
|
|||
Playlist* active_playlist_;
|
||||
int active_song_index_;
|
||||
|
||||
xe::mutex mutex_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::unordered_map<uint32_t, Playlist*> playlists_;
|
||||
uint32_t next_playlist_handle_;
|
||||
uint32_t next_song_handle_;
|
||||
|
|
|
@ -125,7 +125,7 @@ std::unique_ptr<ContentPackage> ContentManager::ResolvePackage(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
auto package = std::make_unique<ContentPackage>(kernel_state_, root_name,
|
||||
data, package_path);
|
||||
|
@ -139,7 +139,7 @@ bool ContentManager::ContentExists(const XCONTENT_DATA& data) {
|
|||
|
||||
X_RESULT ContentManager::CreateContent(std::string root_name,
|
||||
const XCONTENT_DATA& data) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
if (open_packages_.count(root_name)) {
|
||||
// Already content open with this root name.
|
||||
|
@ -166,7 +166,7 @@ X_RESULT ContentManager::CreateContent(std::string root_name,
|
|||
|
||||
X_RESULT ContentManager::OpenContent(std::string root_name,
|
||||
const XCONTENT_DATA& data) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
if (open_packages_.count(root_name)) {
|
||||
// Already content open with this root name.
|
||||
|
@ -189,7 +189,7 @@ X_RESULT ContentManager::OpenContent(std::string root_name,
|
|||
}
|
||||
|
||||
X_RESULT ContentManager::CloseContent(std::string root_name) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
auto it = open_packages_.find(root_name);
|
||||
if (it == open_packages_.end()) {
|
||||
|
@ -205,7 +205,7 @@ X_RESULT ContentManager::CloseContent(std::string root_name) {
|
|||
|
||||
X_RESULT ContentManager::GetContentThumbnail(const XCONTENT_DATA& data,
|
||||
std::vector<uint8_t>* buffer) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto package_path = ResolvePackagePath(data);
|
||||
auto thumb_path = xe::join_paths(package_path, kThumbnailFileName);
|
||||
if (xe::filesystem::PathExists(thumb_path)) {
|
||||
|
@ -224,7 +224,7 @@ X_RESULT ContentManager::GetContentThumbnail(const XCONTENT_DATA& data,
|
|||
|
||||
X_RESULT ContentManager::SetContentThumbnail(const XCONTENT_DATA& data,
|
||||
std::vector<uint8_t> buffer) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto package_path = ResolvePackagePath(data);
|
||||
xe::filesystem::CreateFolder(package_path);
|
||||
if (xe::filesystem::PathExists(package_path)) {
|
||||
|
@ -239,7 +239,7 @@ X_RESULT ContentManager::SetContentThumbnail(const XCONTENT_DATA& data,
|
|||
}
|
||||
|
||||
X_RESULT ContentManager::DeleteContent(const XCONTENT_DATA& data) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
auto package_path = ResolvePackagePath(data);
|
||||
if (xe::filesystem::PathExists(package_path)) {
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#define XENIA_KERNEL_CONTENT_MANAGER_H_
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
@ -88,7 +87,7 @@ class ContentManager {
|
|||
KernelState* kernel_state_;
|
||||
std::wstring root_path_;
|
||||
|
||||
xe::recursive_mutex content_mutex_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::unordered_map<std::string, ContentPackage*> open_packages_;
|
||||
};
|
||||
|
||||
|
|
|
@ -22,9 +22,5 @@ Dispatcher::Dispatcher(KernelState* kernel_state)
|
|||
|
||||
Dispatcher::~Dispatcher() { delete dpc_list_; }
|
||||
|
||||
void Dispatcher::Lock() { lock_.lock(); }
|
||||
|
||||
void Dispatcher::Unlock() { lock_.unlock(); }
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace xe
|
||||
|
|
|
@ -10,9 +10,6 @@
|
|||
#ifndef XENIA_KERNEL_DISPATCHER_H_
|
||||
#define XENIA_KERNEL_DISPATCHER_H_
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/xbox.h"
|
||||
|
||||
namespace xe {
|
||||
|
@ -21,6 +18,7 @@ namespace kernel {
|
|||
class KernelState;
|
||||
class NativeList;
|
||||
|
||||
// All access must be guarded by the global critical section.
|
||||
class Dispatcher {
|
||||
public:
|
||||
explicit Dispatcher(KernelState* kernel_state);
|
||||
|
@ -28,16 +26,10 @@ class Dispatcher {
|
|||
|
||||
KernelState* kernel_state() const { return kernel_state_; }
|
||||
|
||||
void Lock();
|
||||
void Unlock();
|
||||
|
||||
NativeList* dpc_list() const { return dpc_list_; }
|
||||
|
||||
private:
|
||||
private:
|
||||
KernelState* kernel_state_;
|
||||
|
||||
xe::mutex lock_;
|
||||
NativeList* dpc_list_;
|
||||
};
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ bool KernelState::IsKernelModule(const char* name) {
|
|||
// Executing module isn't a kernel module.
|
||||
return false;
|
||||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (auto kernel_module : kernel_modules_) {
|
||||
if (kernel_module->Matches(name)) {
|
||||
return true;
|
||||
|
@ -204,7 +204,7 @@ object_ref<XModule> KernelState::GetModule(const char* name) {
|
|||
// Some games request this, for some reason. wtf.
|
||||
return nullptr;
|
||||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (auto kernel_module : kernel_modules_) {
|
||||
if (kernel_module->Matches(name)) {
|
||||
return retain_object(kernel_module.get());
|
||||
|
@ -262,9 +262,9 @@ void KernelState::SetExecutableModule(object_ref<XUserModule> module) {
|
|||
dispatch_thread_ =
|
||||
object_ref<XHostThread>(new XHostThread(this, 128 * 1024, 0, [this]() {
|
||||
while (dispatch_thread_running_) {
|
||||
std::unique_lock<std::mutex> lock(dispatch_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (dispatch_queue_.empty()) {
|
||||
dispatch_cond_.wait(lock);
|
||||
dispatch_cond_.wait(global_lock);
|
||||
if (!dispatch_thread_running_) {
|
||||
break;
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ void KernelState::SetExecutableModule(object_ref<XUserModule> module) {
|
|||
}
|
||||
|
||||
void KernelState::LoadKernelModule(object_ref<XKernelModule> kernel_module) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
kernel_modules_.push_back(std::move(kernel_module));
|
||||
}
|
||||
|
||||
|
@ -296,7 +296,7 @@ object_ref<XUserModule> KernelState::LoadUserModule(const char* raw_name) {
|
|||
|
||||
object_ref<XUserModule> module;
|
||||
{
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// See if we've already loaded it
|
||||
for (auto& existing_module : user_modules_) {
|
||||
|
@ -337,7 +337,7 @@ object_ref<XUserModule> KernelState::LoadUserModule(const char* raw_name) {
|
|||
}
|
||||
|
||||
void KernelState::TerminateTitle(bool from_guest_thread) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// First: call terminate routines.
|
||||
// TODO(benvanik): these might take arguments.
|
||||
|
@ -391,13 +391,13 @@ void KernelState::TerminateTitle(bool from_guest_thread) {
|
|||
// code anymore)
|
||||
// Also, manually invoke the lock guard's destructor, because Terminate
|
||||
// does not return.
|
||||
lock.~lock_guard();
|
||||
global_lock.unlock();
|
||||
XThread::GetCurrentThread()->Terminate(0);
|
||||
}
|
||||
}
|
||||
|
||||
void KernelState::RegisterThread(XThread* thread) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
threads_by_id_[thread->thread_id()] = thread;
|
||||
|
||||
auto pib =
|
||||
|
@ -406,7 +406,7 @@ void KernelState::RegisterThread(XThread* thread) {
|
|||
}
|
||||
|
||||
void KernelState::UnregisterThread(XThread* thread) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = threads_by_id_.find(thread->thread_id());
|
||||
if (it != threads_by_id_.end()) {
|
||||
threads_by_id_.erase(it);
|
||||
|
@ -418,7 +418,7 @@ void KernelState::UnregisterThread(XThread* thread) {
|
|||
}
|
||||
|
||||
void KernelState::OnThreadExecute(XThread* thread) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Must be called on executing thread.
|
||||
assert_true(XThread::GetCurrentThread() == thread);
|
||||
|
@ -440,7 +440,7 @@ void KernelState::OnThreadExecute(XThread* thread) {
|
|||
}
|
||||
|
||||
void KernelState::OnThreadExit(XThread* thread) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Must be called on executing thread.
|
||||
assert_true(XThread::GetCurrentThread() == thread);
|
||||
|
@ -466,7 +466,7 @@ void KernelState::OnThreadExit(XThread* thread) {
|
|||
}
|
||||
|
||||
object_ref<XThread> KernelState::GetThreadByID(uint32_t thread_id) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
XThread* thread = nullptr;
|
||||
auto it = threads_by_id_.find(thread_id);
|
||||
if (it != threads_by_id_.end()) {
|
||||
|
@ -476,7 +476,7 @@ object_ref<XThread> KernelState::GetThreadByID(uint32_t thread_id) {
|
|||
}
|
||||
|
||||
void KernelState::RegisterNotifyListener(XNotifyListener* listener) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
notify_listeners_.push_back(retain_object(listener));
|
||||
|
||||
// Games seem to expect a few notifications on startup, only for the first
|
||||
|
@ -500,7 +500,7 @@ void KernelState::RegisterNotifyListener(XNotifyListener* listener) {
|
|||
}
|
||||
|
||||
void KernelState::UnregisterNotifyListener(XNotifyListener* listener) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (auto it = notify_listeners_.begin(); it != notify_listeners_.end();
|
||||
++it) {
|
||||
if ((*it).get() == listener) {
|
||||
|
@ -511,7 +511,7 @@ void KernelState::UnregisterNotifyListener(XNotifyListener* listener) {
|
|||
}
|
||||
|
||||
void KernelState::BroadcastNotification(XNotificationID id, uint32_t data) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(object_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (auto it = notify_listeners_.begin(); it != notify_listeners_.end();
|
||||
++it) {
|
||||
(*it)->EnqueueNotification(id, data);
|
||||
|
@ -574,7 +574,7 @@ void KernelState::CompleteOverlappedDeferredEx(
|
|||
auto ptr = memory()->TranslateVirtual(overlapped_ptr);
|
||||
XOverlappedSetResult(ptr, X_ERROR_IO_PENDING);
|
||||
XOverlappedSetContext(ptr, XThread::GetCurrentThreadHandle());
|
||||
std::unique_lock<std::mutex> lock(dispatch_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
dispatch_queue_.push_back([this, completion_callback, overlapped_ptr, result,
|
||||
extended_error, length]() {
|
||||
xe::threading::Sleep(
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <functional>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
|
@ -105,8 +104,8 @@ class KernelState {
|
|||
UserProfile* user_profile() const { return user_profile_.get(); }
|
||||
ContentManager* content_manager() const { return content_manager_.get(); }
|
||||
|
||||
// Access must be guarded by the global critical region.
|
||||
ObjectTable* object_table() const { return object_table_; }
|
||||
xe::recursive_mutex& object_mutex() { return object_mutex_; }
|
||||
|
||||
uint32_t process_type() const;
|
||||
void set_process_type(uint32_t value);
|
||||
|
@ -178,8 +177,10 @@ class KernelState {
|
|||
std::unique_ptr<UserProfile> user_profile_;
|
||||
std::unique_ptr<ContentManager> content_manager_;
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
|
||||
// Must be guarded by the global critical region.
|
||||
ObjectTable* object_table_;
|
||||
xe::recursive_mutex object_mutex_;
|
||||
std::unordered_map<uint32_t, XThread*> threads_by_id_;
|
||||
std::vector<object_ref<XNotifyListener>> notify_listeners_;
|
||||
bool has_notified_startup_;
|
||||
|
@ -194,8 +195,8 @@ class KernelState {
|
|||
|
||||
std::atomic<bool> dispatch_thread_running_;
|
||||
object_ref<XHostThread> dispatch_thread_;
|
||||
std::mutex dispatch_mutex_;
|
||||
std::condition_variable dispatch_cond_;
|
||||
// Must be guarded by the global critical region.
|
||||
std::condition_variable_any dispatch_cond_;
|
||||
std::list<std::function<void()>> dispatch_queue_;
|
||||
|
||||
friend class XObject;
|
||||
|
|
|
@ -22,7 +22,7 @@ ObjectTable::ObjectTable()
|
|||
: table_capacity_(0), table_(nullptr), last_free_entry_(0) {}
|
||||
|
||||
ObjectTable::~ObjectTable() {
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Release all objects.
|
||||
for (uint32_t n = 0; n < table_capacity_; n++) {
|
||||
|
@ -89,7 +89,7 @@ X_STATUS ObjectTable::AddHandle(XObject* object, X_HANDLE* out_handle) {
|
|||
|
||||
uint32_t slot = 0;
|
||||
{
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Find a free slot.
|
||||
result = FindFreeSlot(&slot);
|
||||
|
@ -128,7 +128,7 @@ X_STATUS ObjectTable::DuplicateHandle(X_HANDLE handle, X_HANDLE* out_handle) {
|
|||
}
|
||||
|
||||
X_STATUS ObjectTable::RetainHandle(X_HANDLE handle) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
ObjectTableEntry* entry = LookupTable(handle);
|
||||
if (!entry) {
|
||||
|
@ -140,7 +140,7 @@ X_STATUS ObjectTable::RetainHandle(X_HANDLE handle) {
|
|||
}
|
||||
|
||||
X_STATUS ObjectTable::ReleaseHandle(X_HANDLE handle) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
ObjectTableEntry* entry = LookupTable(handle);
|
||||
if (!entry) {
|
||||
|
@ -170,7 +170,7 @@ X_STATUS ObjectTable::RemoveHandle(X_HANDLE handle) {
|
|||
return X_STATUS_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (entry->object) {
|
||||
auto object = entry->object;
|
||||
entry->object = nullptr;
|
||||
|
@ -189,7 +189,7 @@ ObjectTable::ObjectTableEntry* ObjectTable::LookupTable(X_HANDLE handle) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Lower 2 bits are ignored.
|
||||
uint32_t slot = handle >> 2;
|
||||
|
@ -208,7 +208,7 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) {
|
|||
|
||||
XObject* object = nullptr;
|
||||
if (!already_locked) {
|
||||
table_mutex_.lock();
|
||||
global_critical_region_.mutex().lock();
|
||||
}
|
||||
|
||||
// Lower 2 bits are ignored.
|
||||
|
@ -228,7 +228,7 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) {
|
|||
}
|
||||
|
||||
if (!already_locked) {
|
||||
table_mutex_.unlock();
|
||||
global_critical_region_.mutex().unlock();
|
||||
}
|
||||
|
||||
return object;
|
||||
|
@ -236,7 +236,7 @@ XObject* ObjectTable::LookupObject(X_HANDLE handle, bool already_locked) {
|
|||
|
||||
void ObjectTable::GetObjectsByType(XObject::Type type,
|
||||
std::vector<object_ref<XObject>>* results) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
for (uint32_t slot = 0; slot < table_capacity_; ++slot) {
|
||||
auto& entry = table_[slot];
|
||||
if (entry.object) {
|
||||
|
@ -267,7 +267,7 @@ X_STATUS ObjectTable::AddNameMapping(const std::string& name, X_HANDLE handle) {
|
|||
std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(),
|
||||
tolower);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (name_table_.count(lower_name)) {
|
||||
return X_STATUS_OBJECT_NAME_COLLISION;
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ void ObjectTable::RemoveNameMapping(const std::string& name) {
|
|||
std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(),
|
||||
tolower);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = name_table_.find(lower_name);
|
||||
if (it != name_table_.end()) {
|
||||
name_table_.erase(it);
|
||||
|
@ -295,7 +295,7 @@ X_STATUS ObjectTable::GetObjectByName(const std::string& name,
|
|||
std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(),
|
||||
tolower);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(table_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = name_table_.find(lower_name);
|
||||
if (it == name_table_.end()) {
|
||||
*out_handle = X_INVALID_HANDLE_VALUE;
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#ifndef XENIA_KERNEL_OBJECT_TABLE_H_
|
||||
#define XENIA_KERNEL_OBJECT_TABLE_H_
|
||||
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
@ -65,7 +64,7 @@ class ObjectTable {
|
|||
X_HANDLE TranslateHandle(X_HANDLE handle);
|
||||
X_STATUS FindFreeSlot(uint32_t* out_slot);
|
||||
|
||||
xe::recursive_mutex table_mutex_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
uint32_t table_capacity_;
|
||||
ObjectTableEntry* table_;
|
||||
uint32_t last_free_entry_;
|
||||
|
|
|
@ -36,7 +36,7 @@ void XNotifyListener::EnqueueNotification(XNotificationID id, uint32_t data) {
|
|||
return;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::mutex> lock(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (notifications_.count(id)) {
|
||||
// Already exists. Overwrite.
|
||||
notifications_[id] = data;
|
||||
|
@ -50,7 +50,7 @@ void XNotifyListener::EnqueueNotification(XNotificationID id, uint32_t data) {
|
|||
|
||||
bool XNotifyListener::DequeueNotification(XNotificationID* out_id,
|
||||
uint32_t* out_data) {
|
||||
std::lock_guard<xe::mutex> lock(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
bool dequeued = false;
|
||||
if (notification_count_) {
|
||||
dequeued = true;
|
||||
|
@ -68,7 +68,7 @@ bool XNotifyListener::DequeueNotification(XNotificationID* out_id,
|
|||
|
||||
bool XNotifyListener::DequeueNotification(XNotificationID id,
|
||||
uint32_t* out_data) {
|
||||
std::lock_guard<xe::mutex> lock(lock_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
bool dequeued = false;
|
||||
if (notification_count_) {
|
||||
auto it = notifications_.find(id);
|
||||
|
|
|
@ -40,7 +40,7 @@ class XNotifyListener : public XObject {
|
|||
|
||||
private:
|
||||
std::unique_ptr<xe::threading::Event> wait_handle_;
|
||||
xe::mutex lock_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::unordered_map<XNotificationID, uint32_t> notifications_;
|
||||
size_t notification_count_ = 0;
|
||||
uint64_t mask_ = 0;
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include "xenia/base/clock.h"
|
||||
#include "xenia/base/logging.h"
|
||||
#include "xenia/base/math.h"
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/base/threading.h"
|
||||
#include "xenia/cpu/processor.h"
|
||||
#include "xenia/emulator.h"
|
||||
|
@ -36,7 +35,6 @@ namespace kernel {
|
|||
|
||||
uint32_t next_xthread_id_ = 0;
|
||||
thread_local XThread* current_thread_tls_ = nullptr;
|
||||
xe::mutex critical_region_;
|
||||
|
||||
XThread::XThread(KernelState* kernel_state, uint32_t stack_size,
|
||||
uint32_t xapi_thread_startup, uint32_t start_address,
|
||||
|
@ -444,11 +442,12 @@ void XThread::Execute() {
|
|||
}
|
||||
|
||||
void XThread::EnterCriticalRegion() {
|
||||
// Global critical region. This isn't right, but is easy.
|
||||
critical_region_.lock();
|
||||
xe::global_critical_region::mutex().lock();
|
||||
}
|
||||
|
||||
void XThread::LeaveCriticalRegion() { critical_region_.unlock(); }
|
||||
void XThread::LeaveCriticalRegion() {
|
||||
xe::global_critical_region::mutex().unlock();
|
||||
}
|
||||
|
||||
uint32_t XThread::RaiseIrql(uint32_t new_irql) {
|
||||
return irql_.exchange(new_irql);
|
||||
|
@ -458,11 +457,11 @@ void XThread::LowerIrql(uint32_t new_irql) { irql_ = new_irql; }
|
|||
|
||||
void XThread::CheckApcs() { DeliverAPCs(); }
|
||||
|
||||
void XThread::LockApc() { apc_lock_.lock(); }
|
||||
void XThread::LockApc() { EnterCriticalRegion(); }
|
||||
|
||||
void XThread::UnlockApc(bool queue_delivery) {
|
||||
bool needs_apc = apc_list_->HasPending();
|
||||
apc_lock_.unlock();
|
||||
LeaveCriticalRegion();
|
||||
if (needs_apc && queue_delivery) {
|
||||
thread_->QueueUserCallback([this]() { DeliverAPCs(); });
|
||||
}
|
||||
|
@ -652,8 +651,15 @@ X_STATUS XThread::Resume(uint32_t* out_suspend_count) {
|
|||
}
|
||||
|
||||
X_STATUS XThread::Suspend(uint32_t* out_suspend_count) {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
++guest_object<X_KTHREAD>()->suspend_count;
|
||||
|
||||
// If we are suspending ourselves, we can't hold the lock.
|
||||
if (XThread::GetCurrentThread() == this) {
|
||||
global_lock.unlock();
|
||||
}
|
||||
|
||||
if (thread_->Suspend(out_suspend_count)) {
|
||||
return X_STATUS_SUCCESS;
|
||||
} else {
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
#define XENIA_KERNEL_OBJECTS_XTHREAD_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/base/threading.h"
|
||||
#include "xenia/cpu/thread_state.h"
|
||||
#include "xenia/kernel/xobject.h"
|
||||
|
@ -189,8 +189,8 @@ class XThread : public XObject {
|
|||
int32_t priority_ = 0;
|
||||
uint32_t affinity_ = 0;
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::atomic<uint32_t> irql_ = {0};
|
||||
xe::mutex apc_lock_;
|
||||
NativeList* apc_list_ = nullptr;
|
||||
};
|
||||
|
||||
|
|
|
@ -1274,15 +1274,13 @@ SHIM_CALL KeInsertQueueDpc_shim(PPCContext* ppc_context,
|
|||
uint32_t list_entry_ptr = dpc_ptr + 4;
|
||||
|
||||
// Lock dispatcher.
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
auto dispatcher = kernel_state->dispatcher();
|
||||
dispatcher->Lock();
|
||||
|
||||
auto dpc_list = dispatcher->dpc_list();
|
||||
|
||||
// If already in a queue, abort.
|
||||
if (dpc_list->IsQueued(list_entry_ptr)) {
|
||||
SHIM_SET_RETURN_32(0);
|
||||
dispatcher->Unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1292,8 +1290,6 @@ SHIM_CALL KeInsertQueueDpc_shim(PPCContext* ppc_context,
|
|||
|
||||
dpc_list->Insert(list_entry_ptr);
|
||||
|
||||
dispatcher->Unlock();
|
||||
|
||||
SHIM_SET_RETURN_32(1);
|
||||
}
|
||||
|
||||
|
@ -1307,8 +1303,8 @@ SHIM_CALL KeRemoveQueueDpc_shim(PPCContext* ppc_context,
|
|||
|
||||
uint32_t list_entry_ptr = dpc_ptr + 4;
|
||||
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
auto dispatcher = kernel_state->dispatcher();
|
||||
dispatcher->Lock();
|
||||
|
||||
auto dpc_list = dispatcher->dpc_list();
|
||||
if (dpc_list->IsQueued(list_entry_ptr)) {
|
||||
|
@ -1316,8 +1312,6 @@ SHIM_CALL KeRemoveQueueDpc_shim(PPCContext* ppc_context,
|
|||
result = true;
|
||||
}
|
||||
|
||||
dispatcher->Unlock();
|
||||
|
||||
SHIM_SET_RETURN_32(result ? 1 : 0);
|
||||
}
|
||||
|
||||
|
@ -1329,8 +1323,7 @@ pointer_result_t InterlockedPushEntrySList(
|
|||
|
||||
// Hold a global lock during this method. Once in the lock we assume we have
|
||||
// exclusive access to the structure.
|
||||
std::lock_guard<xe::recursive_mutex> lock(
|
||||
*kernel_state()->processor()->global_mutex());
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
|
||||
alignas(8) X_SLIST_HEADER old_hdr = *plist_ptr;
|
||||
alignas(8) X_SLIST_HEADER new_hdr = {0};
|
||||
|
@ -1341,9 +1334,9 @@ pointer_result_t InterlockedPushEntrySList(
|
|||
entry->next = old_hdr.next.next;
|
||||
new_hdr.next.next = entry.guest_address();
|
||||
|
||||
xe::atomic_cas(*reinterpret_cast<uint64_t*>(&old_hdr),
|
||||
*reinterpret_cast<uint64_t*>(&new_hdr),
|
||||
reinterpret_cast<uint64_t*>(plist_ptr.host_address()));
|
||||
*reinterpret_cast<uint64_t*>(plist_ptr.host_address()) =
|
||||
*reinterpret_cast<uint64_t*>(&new_hdr);
|
||||
xe::threading::SyncMemory();
|
||||
|
||||
return old_head;
|
||||
}
|
||||
|
@ -1355,8 +1348,7 @@ pointer_result_t InterlockedPopEntrySList(pointer_t<X_SLIST_HEADER> plist_ptr) {
|
|||
|
||||
// Hold a global lock during this method. Once in the lock we assume we have
|
||||
// exclusive access to the structure.
|
||||
std::lock_guard<xe::recursive_mutex> lock(
|
||||
*kernel_state()->processor()->global_mutex());
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
|
||||
uint32_t popped = 0;
|
||||
|
||||
|
@ -1373,9 +1365,9 @@ pointer_result_t InterlockedPopEntrySList(pointer_t<X_SLIST_HEADER> plist_ptr) {
|
|||
new_hdr.next.next = next->next;
|
||||
new_hdr.sequence = old_hdr.sequence;
|
||||
|
||||
xe::atomic_cas(*reinterpret_cast<uint64_t*>(&old_hdr),
|
||||
*reinterpret_cast<uint64_t*>(&new_hdr),
|
||||
reinterpret_cast<uint64_t*>(plist_ptr.host_address()));
|
||||
*reinterpret_cast<uint64_t*>(plist_ptr.host_address()) =
|
||||
*reinterpret_cast<uint64_t*>(&new_hdr);
|
||||
xe::threading::SyncMemory();
|
||||
|
||||
return popped;
|
||||
}
|
||||
|
@ -1387,8 +1379,7 @@ pointer_result_t InterlockedFlushSList(pointer_t<X_SLIST_HEADER> plist_ptr) {
|
|||
|
||||
// Hold a global lock during this method. Once in the lock we assume we have
|
||||
// exclusive access to the structure.
|
||||
std::lock_guard<xe::recursive_mutex> lock(
|
||||
*kernel_state()->processor()->global_mutex());
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
|
||||
alignas(8) X_SLIST_HEADER old_hdr = *plist_ptr;
|
||||
alignas(8) X_SLIST_HEADER new_hdr = {0};
|
||||
|
@ -1397,9 +1388,9 @@ pointer_result_t InterlockedFlushSList(pointer_t<X_SLIST_HEADER> plist_ptr) {
|
|||
new_hdr.depth = 0;
|
||||
new_hdr.sequence = 0;
|
||||
|
||||
xe::atomic_cas(*reinterpret_cast<uint64_t*>(&old_hdr),
|
||||
*reinterpret_cast<uint64_t*>(&new_hdr),
|
||||
reinterpret_cast<uint64_t*>(plist_ptr.host_address()));
|
||||
*reinterpret_cast<uint64_t*>(plist_ptr.host_address()) =
|
||||
*reinterpret_cast<uint64_t*>(&new_hdr);
|
||||
xe::threading::SyncMemory();
|
||||
|
||||
return first;
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ X_STATUS XObject::WaitMultiple(uint32_t count, XObject** objects,
|
|||
}
|
||||
|
||||
uint8_t* XObject::CreateNative(uint32_t size) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(kernel_state_->object_mutex());
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
|
||||
uint32_t total_size = size + sizeof(X_OBJECT_HEADER);
|
||||
|
||||
|
@ -255,7 +255,7 @@ uint8_t* XObject::CreateNative(uint32_t size) {
|
|||
}
|
||||
|
||||
void XObject::SetNativePointer(uint32_t native_ptr, bool uninitialized) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(kernel_state_->object_mutex());
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
|
||||
// If hit: We've already setup the native ptr with CreateNative!
|
||||
assert_zero(guest_object_ptr_);
|
||||
|
@ -289,7 +289,7 @@ object_ref<XObject> XObject::GetNativeObject(KernelState* kernel_state,
|
|||
// We identify this by checking the low bit of wait_list_blink - if it's 1,
|
||||
// we have already put our pointer in there.
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(kernel_state->object_mutex());
|
||||
auto global_lock = xe::global_critical_region::AcquireDirect();
|
||||
|
||||
auto header = reinterpret_cast<X_DISPATCH_HEADER*>(native_ptr);
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <mutex>
|
||||
|
||||
#include "xenia/base/clock.h"
|
||||
#include "xenia/base/logging.h"
|
||||
|
@ -461,7 +460,7 @@ void BaseHeap::Dispose() {
|
|||
}
|
||||
|
||||
void BaseHeap::DumpMap() {
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
XELOGE("------------------------------------------------------------------");
|
||||
XELOGE("Heap: %.8X-%.8X", heap_base_, heap_base_ + heap_size_);
|
||||
XELOGE("------------------------------------------------------------------");
|
||||
|
@ -535,7 +534,7 @@ bool BaseHeap::AllocFixed(uint32_t base_address, uint32_t size,
|
|||
return false;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// - If we are reserving the entire range requested must not be already
|
||||
// reserved.
|
||||
|
@ -620,7 +619,7 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|||
return false;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Find a free page range.
|
||||
// The base page must match the requested alignment, so we first scan for
|
||||
|
@ -751,7 +750,7 @@ bool BaseHeap::Decommit(uint32_t address, uint32_t size) {
|
|||
std::min(uint32_t(page_table_.size()) - 1, start_page_number);
|
||||
end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Release from host.
|
||||
// TODO(benvanik): find a way to actually decommit memory;
|
||||
|
@ -775,7 +774,7 @@ bool BaseHeap::Decommit(uint32_t address, uint32_t size) {
|
|||
}
|
||||
|
||||
bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Given address must be a region base address.
|
||||
uint32_t base_page_number = (base_address - heap_base_) / page_size_;
|
||||
|
@ -831,7 +830,7 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
|
|||
std::min(uint32_t(page_table_.size()) - 1, start_page_number);
|
||||
end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Ensure all pages are in the same reserved region and all are committed.
|
||||
uint32_t first_base_address = UINT_MAX;
|
||||
|
@ -883,7 +882,7 @@ bool BaseHeap::QueryRegionInfo(uint32_t base_address,
|
|||
return false;
|
||||
}
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
auto start_page_entry = page_table_[start_page_number];
|
||||
out_info->base_address = base_address;
|
||||
|
@ -934,7 +933,7 @@ bool BaseHeap::QuerySize(uint32_t address, uint32_t* out_size) {
|
|||
*out_size = 0;
|
||||
return false;
|
||||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto page_entry = page_table_[page_number];
|
||||
*out_size = (page_entry.region_page_count * page_size_);
|
||||
return true;
|
||||
|
@ -947,7 +946,7 @@ bool BaseHeap::QueryProtect(uint32_t address, uint32_t* out_protect) {
|
|||
*out_protect = 0;
|
||||
return false;
|
||||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto page_entry = page_table_[page_number];
|
||||
*out_protect = page_entry.current_protect;
|
||||
return true;
|
||||
|
@ -995,7 +994,7 @@ bool PhysicalHeap::Alloc(uint32_t size, uint32_t alignment,
|
|||
size = xe::round_up(size, page_size_);
|
||||
alignment = xe::round_up(alignment, page_size_);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Allocate from parent heap (gets our physical address in 0-512mb).
|
||||
uint32_t parent_low_address = GetPhysicalAddress(heap_base_);
|
||||
|
@ -1033,7 +1032,7 @@ bool PhysicalHeap::AllocFixed(uint32_t base_address, uint32_t size,
|
|||
size = xe::round_up(size, page_size_);
|
||||
alignment = xe::round_up(alignment, page_size_);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Allocate from parent heap (gets our physical address in 0-512mb).
|
||||
// NOTE: this can potentially overwrite heap contents if there are already
|
||||
|
@ -1074,7 +1073,7 @@ bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|||
size = xe::round_up(size, page_size_);
|
||||
alignment = xe::round_up(alignment, page_size_);
|
||||
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Allocate from parent heap (gets our physical address in 0-512mb).
|
||||
low_address = std::max(heap_base_, low_address);
|
||||
|
@ -1108,7 +1107,7 @@ bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|||
}
|
||||
|
||||
bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
uint32_t parent_address = GetPhysicalAddress(address);
|
||||
if (!parent_heap_->Decommit(parent_address, size)) {
|
||||
XELOGE("PhysicalHeap::Decommit failed due to parent heap failure");
|
||||
|
@ -1118,7 +1117,7 @@ bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) {
|
|||
}
|
||||
|
||||
bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
uint32_t parent_base_address = GetPhysicalAddress(base_address);
|
||||
if (!parent_heap_->Release(parent_base_address, out_region_size)) {
|
||||
XELOGE("PhysicalHeap::Release failed due to parent heap failure");
|
||||
|
@ -1128,7 +1127,7 @@ bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
|||
}
|
||||
|
||||
bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
uint32_t parent_address = GetPhysicalAddress(address);
|
||||
bool parent_result = parent_heap_->Protect(parent_address, size, protect);
|
||||
if (!parent_result) {
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
@ -115,8 +114,8 @@ class BaseHeap {
|
|||
uint32_t heap_base_;
|
||||
uint32_t heap_size_;
|
||||
uint32_t page_size_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::vector<PageEntry> page_table_;
|
||||
xe::recursive_mutex heap_mutex_;
|
||||
};
|
||||
|
||||
class VirtualHeap : public BaseHeap {
|
||||
|
|
|
@ -19,7 +19,7 @@ Device::Device(const std::string& mount_path) : mount_path_(mount_path) {}
|
|||
Device::~Device() = default;
|
||||
|
||||
void Device::Dump(StringBuffer* string_buffer) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
root_entry_->Dump(string_buffer, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ class Device {
|
|||
virtual bool Initialize() = 0;
|
||||
void Dump(StringBuffer* string_buffer);
|
||||
|
||||
xe::recursive_mutex& mutex() { return mutex_; }
|
||||
const std::string& mount_path() const { return mount_path_; }
|
||||
|
||||
virtual bool is_read_only() const { return true; }
|
||||
|
@ -41,7 +40,7 @@ class Device {
|
|||
virtual uint32_t bytes_per_sector() const = 0;
|
||||
|
||||
protected:
|
||||
xe::recursive_mutex mutex_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::string mount_path_;
|
||||
std::unique_ptr<Entry> root_entry_;
|
||||
};
|
||||
|
|
|
@ -47,7 +47,7 @@ void Entry::Dump(xe::StringBuffer* string_buffer, int indent) {
|
|||
bool Entry::is_read_only() const { return device_->is_read_only(); }
|
||||
|
||||
Entry* Entry::GetChild(std::string name) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(device_->mutex());
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
// TODO(benvanik): a faster search
|
||||
for (auto& child : children_) {
|
||||
if (strcasecmp(child->name().c_str(), name.c_str()) == 0) {
|
||||
|
@ -59,7 +59,7 @@ Entry* Entry::GetChild(std::string name) {
|
|||
|
||||
Entry* Entry::IterateChildren(const xe::filesystem::WildcardEngine& engine,
|
||||
size_t* current_index) {
|
||||
std::lock_guard<xe::recursive_mutex> lock(device_->mutex());
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
while (*current_index < children_.size()) {
|
||||
auto& child = children_[*current_index];
|
||||
*current_index = *current_index + 1;
|
||||
|
@ -71,10 +71,10 @@ Entry* Entry::IterateChildren(const xe::filesystem::WildcardEngine& engine,
|
|||
}
|
||||
|
||||
Entry* Entry::CreateEntry(std::string name, uint32_t attributes) {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (is_read_only()) {
|
||||
return nullptr;
|
||||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(device_->mutex());
|
||||
if (GetChild(name)) {
|
||||
// Already exists.
|
||||
return nullptr;
|
||||
|
@ -90,10 +90,10 @@ Entry* Entry::CreateEntry(std::string name, uint32_t attributes) {
|
|||
}
|
||||
|
||||
bool Entry::Delete(Entry* entry) {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
if (is_read_only()) {
|
||||
return false;
|
||||
}
|
||||
std::lock_guard<xe::recursive_mutex> lock(device_->mutex());
|
||||
if (entry->parent() != this) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include "xenia/base/filesystem.h"
|
||||
#include "xenia/base/mapped_memory.h"
|
||||
#include "xenia/base/mutex.h"
|
||||
#include "xenia/base/string_buffer.h"
|
||||
#include "xenia/kernel/xobject.h"
|
||||
#include "xenia/xbox.h"
|
||||
|
@ -125,6 +126,7 @@ class Entry {
|
|||
}
|
||||
virtual bool DeleteEntryInternal(Entry* entry) { return false; }
|
||||
|
||||
xe::global_critical_region global_critical_region_;
|
||||
Device* device_;
|
||||
Entry* parent_;
|
||||
std::string path_;
|
||||
|
|
|
@ -27,20 +27,20 @@ VirtualFileSystem::~VirtualFileSystem() {
|
|||
}
|
||||
|
||||
bool VirtualFileSystem::RegisterDevice(std::unique_ptr<Device> device) {
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
devices_.emplace_back(std::move(device));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualFileSystem::RegisterSymbolicLink(std::string path,
|
||||
std::string target) {
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
symlinks_.insert({path, target});
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualFileSystem::UnregisterSymbolicLink(std::string path) {
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
auto it = symlinks_.find(path);
|
||||
if (it == symlinks_.end()) {
|
||||
return false;
|
||||
|
@ -50,7 +50,7 @@ bool VirtualFileSystem::UnregisterSymbolicLink(std::string path) {
|
|||
}
|
||||
|
||||
Entry* VirtualFileSystem::ResolvePath(std::string path) {
|
||||
std::lock_guard<xe::mutex> lock(mutex_);
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
|
||||
// Resolve relative paths
|
||||
std::string normalized_path(xe::filesystem::CanonicalizePath(path));
|
||||
|
|
|
@ -46,7 +46,7 @@ class VirtualFileSystem {
|
|||
FileAction* out_action);
|
||||
|
||||
private:
|
||||
xe::mutex mutex_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::vector<std::unique_ptr<Device>> devices_;
|
||||
std::unordered_map<std::string, std::string> symlinks_;
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue