Switching to xe::mutex.

This commit is contained in:
Ben Vanik 2015-05-24 23:16:43 -07:00
parent 814ec82ad4
commit f5a2b85d42
31 changed files with 133 additions and 108 deletions

View File

@ -56,8 +56,10 @@ const uint32_t kXmaContextSize = 64;
const uint32_t kXmaContextCount = 320;
AudioSystem::AudioSystem(Emulator* emulator)
: emulator_(emulator), memory_(emulator->memory()), worker_running_(false),
decoder_running_(false) {
: emulator_(emulator),
memory_(emulator->memory()),
worker_running_(false),
decoder_running_(false) {
memset(clients_, 0, sizeof(clients_));
for (size_t i = 0; i < maximum_client_count_; ++i) {
unused_clients_.push(i);
@ -249,7 +251,8 @@ void AudioSystem::DecoderThreadMain() {
// TODO - Probably need to move this, I think it might skip the very
// last packet (see the call to PreparePacket)
size_t input_size = (data.input_buffer_0_block_count +
data.input_buffer_1_block_count) * 2048;
data.input_buffer_1_block_count) *
2048;
size_t input_offset = (data.input_buffer_read_offset / 8 - 4);
size_t input_remaining = input_size - input_offset;
if (input_offset > input_size) {
@ -339,7 +342,7 @@ void AudioSystem::Shutdown() {
}
uint32_t AudioSystem::AllocateXmaContext() {
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
for (uint32_t n = 0; n < kXmaContextCount; n++) {
XMAContext& context = xma_context_array_[n];
@ -353,7 +356,7 @@ uint32_t AudioSystem::AllocateXmaContext() {
}
void AudioSystem::ReleaseXmaContext(uint32_t guest_ptr) {
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
// Find it in the list.
for (uint32_t n = 0; n < kXmaContextCount; n++) {
@ -365,9 +368,9 @@ void AudioSystem::ReleaseXmaContext(uint32_t guest_ptr) {
context.in_use = false;
auto context_ptr = memory()->TranslateVirtual(guest_ptr);
std::memset(context_ptr, 0, kXmaContextSize); // Zero it.
std::memset(context_ptr, 0, kXmaContextSize); // Zero it.
context.decoder->DiscardPacket();
context.lock.unlock();
}
}
@ -376,7 +379,7 @@ void AudioSystem::ReleaseXmaContext(uint32_t guest_ptr) {
X_STATUS AudioSystem::RegisterClient(uint32_t callback, uint32_t callback_arg,
size_t* out_index) {
assert_true(unused_clients_.size());
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
auto index = unused_clients_.front();
@ -406,7 +409,7 @@ X_STATUS AudioSystem::RegisterClient(uint32_t callback, uint32_t callback_arg,
void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) {
SCOPE_profile_cpu_f("apu");
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
assert_true(index < maximum_client_count_);
assert_true(clients_[index].driver != NULL);
(clients_[index].driver)->SubmitFrame(samples_ptr);
@ -416,7 +419,7 @@ void AudioSystem::SubmitFrame(size_t index, uint32_t samples_ptr) {
void AudioSystem::UnregisterClient(size_t index) {
SCOPE_profile_cpu_f("apu");
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
assert_true(index < maximum_client_count_);
DestroyDriver(clients_[index].driver);
clients_[index] = {0};
@ -478,9 +481,10 @@ void AudioSystem::WriteRegister(uint32_t addr, uint64_t value) {
auto context_ptr = memory()->TranslateVirtual(context.guest_ptr);
XMAContextData data(context_ptr);
XELOGAPU("AudioSystem: kicking context %d (%d/%d bytes)", context_id,
data.input_buffer_read_offset, data.input_buffer_0_block_count
* XMAContextData::kBytesPerBlock);
XELOGAPU(
"AudioSystem: kicking context %d (%d/%d bytes)", context_id,
data.input_buffer_read_offset,
data.input_buffer_0_block_count * XMAContextData::kBytesPerBlock);
// Reset valid flags so our audio decoder knows to process this one
data.input_buffer_0_valid = data.input_buffer_0_ptr != 0;

View File

@ -51,25 +51,27 @@ struct XMAContextData {
uint32_t input_buffer_0_valid : 1; // +20bit, XMAIsInputBuffer0Valid
uint32_t input_buffer_1_valid : 1; // +21bit, XMAIsInputBuffer1Valid
uint32_t output_buffer_block_count : 5; // +22bit SizeWrite 256byte blocks
uint32_t output_buffer_write_offset : 5; // +27bit, XMAGetOutputBufferWriteOffset
// AKA OffsetWrite
uint32_t
output_buffer_write_offset : 5; // +27bit, XMAGetOutputBufferWriteOffset
// AKA OffsetWrite
// DWORD 1
uint32_t input_buffer_1_block_count : 12; // XMASetInputBuffer1, number of
// 2KB blocks.
uint32_t loop_subframe_end : 2; // +12bit, XMASetLoopData
uint32_t unk_dword_1_a : 3; // ? might be loop_subframe_skip
uint32_t loop_subframe_skip : 3; // +17bit, XMASetLoopData might be subframe_decode_count
uint32_t subframe_decode_count : 4; // +20bit might be subframe_skip_count
uint32_t unk_dword_1_b : 3; // ? NumSubframesToSkip/NumChannels(?)
uint32_t sample_rate : 2; // +27bit enum of sample rates
uint32_t is_stereo : 1; // +29bit might be NumChannels
uint32_t unk_dword_1_c : 1; // ? part of NumChannels?
uint32_t output_buffer_valid : 1; // +31bit, XMAIsOutputBufferValid
uint32_t loop_subframe_skip : 3; // +17bit, XMASetLoopData might be
// subframe_decode_count
uint32_t subframe_decode_count : 4; // +20bit might be subframe_skip_count
uint32_t unk_dword_1_b : 3; // ? NumSubframesToSkip/NumChannels(?)
uint32_t sample_rate : 2; // +27bit enum of sample rates
uint32_t is_stereo : 1; // +29bit might be NumChannels
uint32_t unk_dword_1_c : 1; // ? part of NumChannels?
uint32_t output_buffer_valid : 1; // +31bit, XMAIsOutputBufferValid
// DWORD 2
uint32_t input_buffer_read_offset : 30; // XMAGetInputBufferReadOffset
uint32_t unk_dword_2 : 2; // ErrorStatus/ErrorSet (?)
uint32_t input_buffer_read_offset : 30; // XMAGetInputBufferReadOffset
uint32_t unk_dword_2 : 2; // ErrorStatus/ErrorSet (?)
// DWORD 3
uint32_t loop_start : 26; // XMASetLoopData LoopStartOffset
@ -85,25 +87,25 @@ struct XMAContextData {
// DWORD 6
uint32_t input_buffer_1_ptr; // physical address
// DWORD 7
uint32_t output_buffer_ptr; // physical address
uint32_t output_buffer_ptr; // physical address
// DWORD 8
uint32_t overlap_add_ptr; // PtrOverlapAdd(?)
uint32_t overlap_add_ptr; // PtrOverlapAdd(?)
// DWORD 9
// +0bit, XMAGetOutputBufferReadOffset AKA WriteBufferOffsetRead
uint32_t output_buffer_read_offset : 5;
uint32_t unk_dword_9 : 27; // StopWhenDone/InterruptWhenDone(?)
uint32_t output_buffer_read_offset : 5;
uint32_t unk_dword_9 : 27; // StopWhenDone/InterruptWhenDone(?)
XMAContextData(const void* ptr) {
xe::copy_and_swap_32_aligned(reinterpret_cast<uint32_t*>(this),
reinterpret_cast<const uint32_t*>(ptr),
sizeof(XMAContextData) / 4);
reinterpret_cast<const uint32_t*>(ptr),
sizeof(XMAContextData) / 4);
}
void Store(void* ptr) {
xe::copy_and_swap_32_aligned(reinterpret_cast<uint32_t*>(ptr),
reinterpret_cast<const uint32_t*>(this),
sizeof(XMAContextData) / 4);
reinterpret_cast<const uint32_t*>(this),
sizeof(XMAContextData) / 4);
}
};
static_assert(sizeof(XMAContextData) == 4 * 10, "Must be packed");
@ -166,7 +168,7 @@ class AudioSystem {
kernel::object_ref<kernel::XHostThread> decoder_thread_;
xe::threading::Fence decoder_fence_;
std::mutex lock_;
xe::mutex lock_;
// Stored little endian, accessed through 0x7FEA....
union {
@ -189,16 +191,16 @@ class AudioSystem {
uint32_t register_file_[0xFFFF / 4];
};
struct XMAContext {
uint32_t guest_ptr;
std::mutex lock;
bool in_use;
uint32_t guest_ptr;
xe::mutex lock;
bool in_use;
AudioDecoder* decoder;
};
XMAContext xma_context_array_[320];
std::vector<uint32_t> xma_context_free_list_;
std::vector<uint32_t> xma_context_used_list_; // XMA contexts in use
std::vector<uint32_t> xma_context_used_list_; // XMA contexts in use
static const size_t maximum_client_count_ = 8;

View File

@ -13,6 +13,8 @@
#include <mutex>
#include <vector>
#include "xenia/base/mutex.h"
namespace xe {
template <class T, typename A>
@ -21,7 +23,7 @@ class TypePool {
~TypePool() { Reset(); }
void Reset() {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
for (auto it = list_.begin(); it != list_.end(); ++it) {
T* value = *it;
delete value;
@ -32,7 +34,7 @@ class TypePool {
T* Allocate(A arg0) {
T* result = 0;
{
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
if (list_.size()) {
result = list_.back();
list_.pop_back();
@ -45,12 +47,12 @@ class TypePool {
}
void Release(T* value) {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
list_.push_back(value);
}
private:
std::mutex lock_;
xe::mutex lock_;
std::vector<T*> list_;
};

View File

@ -122,7 +122,7 @@ void* X64CodeCache::PlaceCode(uint32_t guest_address, void* machine_code,
uint8_t* unwind_entry_address = nullptr;
size_t unwind_table_slot = 0;
{
std::lock_guard<std::mutex> allocation_lock(allocation_mutex_);
std::lock_guard<xe::mutex> allocation_lock(allocation_mutex_);
low_mark = generated_code_offset_;

View File

@ -17,6 +17,8 @@
#include <mutex>
#include <vector>
#include "xenia/base/mutex.h"
namespace xe {
namespace cpu {
namespace backend {
@ -54,7 +56,7 @@ class X64CodeCache {
// Must be held when manipulating the offsets or counts of anything, to keep
// the tables consistent and ordered.
std::mutex allocation_mutex_;
xe::mutex allocation_mutex_;
// Value that the indirection table will be initialized with upon commit.
uint32_t indirection_default_value_;

View File

@ -18,7 +18,7 @@ namespace cpu {
EntryTable::EntryTable() = default;
EntryTable::~EntryTable() {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
for (auto it : map_) {
Entry* entry = it.second;
delete entry;
@ -26,7 +26,7 @@ EntryTable::~EntryTable() {
}
Entry* EntryTable::Get(uint32_t address) {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
const auto& it = map_.find(address);
Entry* entry = it != map_.end() ? it->second : nullptr;
if (entry) {
@ -74,7 +74,7 @@ Entry::Status EntryTable::GetOrCreate(uint32_t address, Entry** out_entry) {
}
std::vector<Function*> EntryTable::FindWithAddress(uint32_t address) {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
std::vector<Function*> fns;
for (auto& it : map_) {
Entry* entry = it.second;

View File

@ -14,6 +14,8 @@
#include <unordered_map>
#include <vector>
#include "xenia/base/mutex.h"
namespace xe {
namespace cpu {
@ -45,7 +47,7 @@ class EntryTable {
private:
// TODO(benvanik): replace with a better data structure.
std::mutex lock_;
xe::mutex lock_;
std::unordered_map<uint32_t, Entry*> map_;
};

View File

@ -61,7 +61,7 @@ void CheckGlobalLock(PPCContext* ppc_state, void* arg0, void* arg1) {
ppc_state->scratch = 0x8000;
}
void HandleGlobalLock(PPCContext* ppc_state, void* arg0, void* arg1) {
std::mutex* global_lock = reinterpret_cast<std::mutex*>(arg0);
auto global_lock = reinterpret_cast<xe::mutex*>(arg0);
volatile bool* global_lock_taken = reinterpret_cast<bool*>(arg1);
uint64_t value = ppc_state->scratch;
if (value == 0x8000) {

View File

@ -13,6 +13,7 @@
#include <memory>
#include <mutex>
#include "xenia/base/mutex.h"
#include "xenia/base/type_pool.h"
#include "xenia/cpu/frontend/context_info.h"
#include "xenia/cpu/function.h"
@ -32,7 +33,7 @@ namespace frontend {
class PPCTranslator;
struct PPCBuiltins {
std::mutex global_lock;
xe::mutex global_lock;
bool global_lock_taken;
FunctionInfo* check_global_lock;
FunctionInfo* handle_global_lock;

View File

@ -24,7 +24,7 @@ Function::Function(FunctionInfo* symbol_info)
Function::~Function() = default;
bool Function::AddBreakpoint(Breakpoint* breakpoint) {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
bool found = false;
for (auto other : breakpoints_) {
if (other == breakpoint) {
@ -41,7 +41,7 @@ bool Function::AddBreakpoint(Breakpoint* breakpoint) {
}
bool Function::RemoveBreakpoint(Breakpoint* breakpoint) {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
for (auto it = breakpoints_.begin(); it != breakpoints_.end(); ++it) {
if (*it == breakpoint) {
if (!RemoveBreakpointImpl(breakpoint)) {
@ -54,7 +54,7 @@ bool Function::RemoveBreakpoint(Breakpoint* breakpoint) {
}
Breakpoint* Function::FindBreakpoint(uint32_t address) {
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
Breakpoint* result = nullptr;
for (auto breakpoint : breakpoints_) {
if (breakpoint->address() == address) {

View File

@ -14,6 +14,7 @@
#include <mutex>
#include <vector>
#include "xenia/base/mutex.h"
#include "xenia/cpu/debug_info.h"
#include "xenia/cpu/thread_state.h"
#include "xenia/debug/breakpoint.h"
@ -53,7 +54,7 @@ class Function {
std::unique_ptr<DebugInfo> debug_info_;
// TODO(benvanik): move elsewhere? DebugData?
std::mutex lock_;
xe::mutex lock_;
std::vector<debug::Breakpoint*> breakpoints_;
};

View File

@ -15,6 +15,8 @@
#include <mutex>
#include <vector>
#include "xenia/base/mutex.h"
namespace xe {
namespace cpu {
@ -85,7 +87,7 @@ class MMIOHandler {
std::vector<MMIORange> mapped_ranges_;
// TODO(benvanik): data structure magic.
std::mutex write_watch_mutex_;
xe::mutex write_watch_mutex_;
std::list<WriteWatchEntry*> write_watches_;
static MMIOHandler* global_handler_;

View File

@ -151,7 +151,7 @@ SymbolInfo::Status Module::DefineVariable(VariableInfo* symbol_info) {
void Module::ForEachFunction(std::function<void(FunctionInfo*)> callback) {
SCOPE_profile_cpu_f("cpu");
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
for (auto& symbol_info : list_) {
if (symbol_info->type() == SymbolInfo::TYPE_FUNCTION) {
FunctionInfo* info = static_cast<FunctionInfo*>(symbol_info.get());
@ -163,7 +163,7 @@ void Module::ForEachFunction(std::function<void(FunctionInfo*)> callback) {
void Module::ForEachFunction(size_t since, size_t& version,
std::function<void(FunctionInfo*)> callback) {
SCOPE_profile_cpu_f("cpu");
std::lock_guard<std::mutex> guard(lock_);
std::lock_guard<xe::mutex> guard(lock_);
size_t count = list_.size();
version = count;
for (size_t n = since; n < count; n++) {

View File

@ -16,8 +16,9 @@
#include <unordered_map>
#include <vector>
#include "xenia/memory.h"
#include "xenia/base/mutex.h"
#include "xenia/cpu/symbol_info.h"
#include "xenia/memory.h"
namespace xe {
namespace cpu {
@ -62,7 +63,7 @@ class Module {
private:
// TODO(benvanik): replace with a better data structure.
std::mutex lock_;
xe::mutex lock_;
std::unordered_map<uint32_t, SymbolInfo*> map_;
std::vector<std::unique_ptr<SymbolInfo>> list_;
};

View File

@ -89,7 +89,7 @@ Processor::~Processor() {
}
{
std::lock_guard<std::mutex> guard(modules_lock_);
std::lock_guard<xe::mutex> guard(modules_lock_);
modules_.clear();
}
@ -159,13 +159,13 @@ bool Processor::Setup() {
}
bool Processor::AddModule(std::unique_ptr<Module> module) {
std::lock_guard<std::mutex> guard(modules_lock_);
std::lock_guard<xe::mutex> guard(modules_lock_);
modules_.push_back(std::move(module));
return true;
}
Module* Processor::GetModule(const char* name) {
std::lock_guard<std::mutex> guard(modules_lock_);
std::lock_guard<xe::mutex> guard(modules_lock_);
for (const auto& module : modules_) {
if (module->name() == name) {
return module.get();
@ -175,7 +175,7 @@ Module* Processor::GetModule(const char* name) {
}
std::vector<Module*> Processor::GetModules() {
std::lock_guard<std::mutex> guard(modules_lock_);
std::lock_guard<xe::mutex> guard(modules_lock_);
std::vector<Module*> clone(modules_.size());
for (const auto& module : modules_) {
clone.push_back(module.get());
@ -242,7 +242,7 @@ bool Processor::LookupFunctionInfo(uint32_t address,
// Find the module that contains the address.
Module* code_module = nullptr;
{
std::lock_guard<std::mutex> guard(modules_lock_);
std::lock_guard<xe::mutex> guard(modules_lock_);
// TODO(benvanik): sort by code address (if contiguous) so can bsearch.
// TODO(benvanik): cache last module low/high, as likely to be in there.
for (const auto& module : modules_) {
@ -378,7 +378,7 @@ uint64_t Processor::ExecuteInterrupt(uint32_t cpu, uint32_t address,
SCOPE_profile_cpu_f("cpu");
// Acquire lock on interrupt thread (we can only dispatch one at a time).
std::lock_guard<std::mutex> lock(interrupt_thread_lock_);
std::lock_guard<xe::mutex> lock(interrupt_thread_lock_);
// Set 0x10C(r13) to the current CPU ID.
xe::store_and_swap<uint8_t>(

View File

@ -13,6 +13,7 @@
#include <mutex>
#include <vector>
#include "xenia/base/mutex.h"
#include "xenia/cpu/backend/backend.h"
#include "xenia/cpu/entry_table.h"
#include "xenia/cpu/export_resolver.h"
@ -90,13 +91,13 @@ class Processor {
ExportResolver* export_resolver_;
EntryTable entry_table_;
std::mutex modules_lock_;
xe::mutex modules_lock_;
std::vector<std::unique_ptr<Module>> modules_;
Module* builtin_module_;
uint32_t next_builtin_address_;
Irql irql_;
std::mutex interrupt_thread_lock_;
xe::mutex interrupt_thread_lock_;
ThreadState* interrupt_thread_state_;
uint32_t interrupt_thread_block_;
};

View File

@ -196,7 +196,7 @@ void TextureCache::EvictAllTextures() {
}
{
std::lock_guard<std::mutex> lock(invalidated_textures_mutex_);
std::lock_guard<xe::mutex> lock(invalidated_textures_mutex_);
invalidated_textures_sets_[0].clear();
invalidated_textures_sets_[1].clear();
}

View File

@ -14,6 +14,7 @@
#include <unordered_map>
#include <vector>
#include "xenia/base/mutex.h"
#include "xenia/gpu/gl4/blitter.h"
#include "xenia/gpu/gl4/circular_buffer.h"
#include "xenia/gpu/gl4/gl_context.h"
@ -100,7 +101,7 @@ class TextureCache {
std::vector<ReadBufferTexture*> read_buffer_textures_;
std::mutex invalidated_textures_mutex_;
xe::mutex invalidated_textures_mutex_;
std::vector<TextureEntry*>* invalidated_textures_;
std::vector<TextureEntry*> invalidated_textures_sets_[2];
};

View File

@ -81,7 +81,7 @@ X_RESULT XXMPApp::XMPCreateTitlePlaylist(
xe::store_and_swap<uint32_t>(memory_->TranslateVirtual(out_playlist_handle),
playlist->handle);
std::lock_guard<std::mutex> lock(mutex_);
std::lock_guard<xe::mutex> lock(mutex_);
playlists_.insert({playlist->handle, playlist.get()});
playlist.release();
return X_ERROR_SUCCESS;
@ -89,7 +89,7 @@ X_RESULT XXMPApp::XMPCreateTitlePlaylist(
X_RESULT XXMPApp::XMPDeleteTitlePlaylist(uint32_t playlist_handle) {
XELOGD("XMPDeleteTitlePlaylist(%.8X)", playlist_handle);
std::lock_guard<std::mutex> lock(mutex_);
std::lock_guard<xe::mutex> lock(mutex_);
auto it = playlists_.find(playlist_handle);
if (it == playlists_.end()) {
XELOGE("Playlist %.8X not found", playlist_handle);
@ -109,7 +109,7 @@ X_RESULT XXMPApp::XMPPlayTitlePlaylist(uint32_t playlist_handle,
XELOGD("XMPPlayTitlePlaylist(%.8X, %.8X)", playlist_handle, song_handle);
Playlist* playlist = nullptr;
{
std::lock_guard<std::mutex> lock(mutex_);
std::lock_guard<xe::mutex> lock(mutex_);
auto it = playlists_.find(playlist_handle);
if (it == playlists_.end()) {
XELOGE("Playlist %.8X not found", playlist_handle);

View File

@ -16,6 +16,7 @@
#include <unordered_map>
#include <vector>
#include "xenia/base/mutex.h"
#include "xenia/kernel/app.h"
#include "xenia/kernel/kernel_state.h"
@ -101,7 +102,7 @@ class XXMPApp : public XApp {
Playlist* active_playlist_;
int active_song_index_;
std::mutex mutex_;
xe::mutex mutex_;
std::unordered_map<uint32_t, Playlist*> playlists_;
uint32_t next_playlist_handle_;
uint32_t next_song_handle_;

View File

@ -120,7 +120,7 @@ std::unique_ptr<ContentPackage> ContentManager::ResolvePackage(
return nullptr;
}
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
auto package = std::make_unique<ContentPackage>(kernel_state_, root_name,
data, package_path);
@ -134,7 +134,7 @@ bool ContentManager::ContentExists(const XCONTENT_DATA& data) {
X_RESULT ContentManager::CreateContent(std::string root_name,
const XCONTENT_DATA& data) {
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
if (open_packages_.count(root_name)) {
// Already content open with this root name.
@ -161,7 +161,7 @@ X_RESULT ContentManager::CreateContent(std::string root_name,
X_RESULT ContentManager::OpenContent(std::string root_name,
const XCONTENT_DATA& data) {
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
if (open_packages_.count(root_name)) {
// Already content open with this root name.
@ -184,7 +184,7 @@ X_RESULT ContentManager::OpenContent(std::string root_name,
}
X_RESULT ContentManager::CloseContent(std::string root_name) {
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
auto it = open_packages_.find(root_name);
if (it == open_packages_.end()) {
@ -200,7 +200,7 @@ X_RESULT ContentManager::CloseContent(std::string root_name) {
X_RESULT ContentManager::GetContentThumbnail(const XCONTENT_DATA& data,
std::vector<uint8_t>* buffer) {
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
auto package_path = ResolvePackagePath(data);
auto thumb_path = xe::join_paths(package_path, kThumbnailFileName);
if (xe::fs::PathExists(thumb_path)) {
@ -219,7 +219,7 @@ X_RESULT ContentManager::GetContentThumbnail(const XCONTENT_DATA& data,
X_RESULT ContentManager::SetContentThumbnail(const XCONTENT_DATA& data,
std::vector<uint8_t> buffer) {
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
auto package_path = ResolvePackagePath(data);
if (xe::fs::PathExists(package_path)) {
auto thumb_path = xe::join_paths(package_path, kThumbnailFileName);
@ -233,7 +233,7 @@ X_RESULT ContentManager::SetContentThumbnail(const XCONTENT_DATA& data,
}
X_RESULT ContentManager::DeleteContent(const XCONTENT_DATA& data) {
std::lock_guard<std::recursive_mutex> lock(content_mutex_);
std::lock_guard<xe::recursive_mutex> lock(content_mutex_);
auto package_path = ResolvePackagePath(data);
if (xe::fs::PathExists(package_path)) {

View File

@ -17,6 +17,7 @@
#include <vector>
#include "xenia/base/memory.h"
#include "xenia/base/mutex.h"
#include "xenia/xbox.h"
namespace xe {
@ -87,7 +88,7 @@ class ContentManager {
KernelState* kernel_state_;
std::wstring root_path_;
std::recursive_mutex content_mutex_;
xe::recursive_mutex content_mutex_;
std::unordered_map<std::string, ContentPackage*> open_packages_;
};

View File

@ -12,6 +12,7 @@
#include <mutex>
#include "xenia/base/mutex.h"
#include "xenia/xbox.h"
namespace xe {
@ -36,7 +37,7 @@ class Dispatcher {
private:
KernelState* kernel_state_;
std::mutex lock_;
xe::mutex lock_;
NativeList* dpc_list_;
};

View File

@ -42,7 +42,7 @@ void XNotifyListener::EnqueueNotification(XNotificationID id, uint32_t data) {
return;
}
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
if (notifications_.count(id)) {
// Already exists. Overwrite.
notifications_[id] = data;
@ -56,7 +56,7 @@ void XNotifyListener::EnqueueNotification(XNotificationID id, uint32_t data) {
bool XNotifyListener::DequeueNotification(XNotificationID* out_id,
uint32_t* out_data) {
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
bool dequeued = false;
if (notification_count_) {
dequeued = true;
@ -74,7 +74,7 @@ bool XNotifyListener::DequeueNotification(XNotificationID* out_id,
bool XNotifyListener::DequeueNotification(XNotificationID id,
uint32_t* out_data) {
std::lock_guard<std::mutex> lock(lock_);
std::lock_guard<xe::mutex> lock(lock_);
bool dequeued = false;
if (notification_count_) {
dequeued = true;

View File

@ -13,6 +13,7 @@
#include <mutex>
#include <unordered_map>
#include "xenia/base/mutex.h"
#include "xenia/kernel/xobject.h"
#include "xenia/xbox.h"
@ -36,7 +37,7 @@ class XNotifyListener : public XObject {
private:
HANDLE wait_handle_;
std::mutex lock_;
xe::mutex lock_;
std::unordered_map<XNotificationID, uint32_t> notifications_;
size_t notification_count_;
uint64_t mask_;

View File

@ -13,6 +13,7 @@
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
#include "xenia/base/mutex.h"
#include "xenia/base/threading.h"
#include "xenia/cpu/cpu.h"
#include "xenia/kernel/kernel_state.h"
@ -33,7 +34,7 @@ using namespace xe::cpu;
uint32_t next_xthread_id = 0;
thread_local XThread* current_thread_tls;
std::mutex critical_region_;
xe::mutex critical_region_;
XThread* shared_kernel_thread_ = 0;
XThread::XThread(KernelState* kernel_state, uint32_t stack_size,

View File

@ -97,7 +97,7 @@ class XThread : public XObject {
std::string name_;
std::atomic<uint32_t> irql_;
std::mutex apc_lock_;
xe::mutex apc_lock_;
NativeList* apc_list_;
object_ref<XEvent> event_;

View File

@ -150,6 +150,7 @@ SHIM_CALL NtDuplicateObject_shim(PPCContext* ppc_state, KernelState* state) {
auto object = state->object_table()->LookupObject<XObject>(handle);
if (object) {
object->Retain();
object->RetainHandle();
uint32_t new_handle = object->handle();
if (new_handle_ptr) {

View File

@ -9,6 +9,7 @@
#include "xenia/base/atomic.h"
#include "xenia/base/logging.h"
#include "xenia/base/mutex.h"
#include "xenia/cpu/processor.h"
#include "xenia/kernel/dispatcher.h"
#include "xenia/kernel/kernel_state.h"
@ -1290,7 +1291,7 @@ SHIM_CALL KeRemoveQueueDpc_shim(PPCContext* ppc_state, KernelState* state) {
SHIM_SET_RETURN_64(result ? 1 : 0);
}
std::mutex global_list_mutex_;
xe::mutex global_list_mutex_;
// http://www.nirsoft.net/kernel_struct/vista/SLIST_HEADER.html
SHIM_CALL InterlockedPopEntrySList_shim(PPCContext* ppc_state,
@ -1299,7 +1300,7 @@ SHIM_CALL InterlockedPopEntrySList_shim(PPCContext* ppc_state,
XELOGD("InterlockedPopEntrySList(%.8X)", plist_ptr);
std::lock_guard<std::mutex> lock(global_list_mutex_);
std::lock_guard<xe::mutex> lock(global_list_mutex_);
uint8_t* p = state->memory()->TranslateVirtual(plist_ptr);
auto first = xe::load_and_swap<uint32_t>(p);

View File

@ -69,9 +69,7 @@ uint32_t get_page_count(uint32_t value, uint32_t page_size) {
static Memory* active_memory_ = nullptr;
void CrashDump() {
active_memory_->DumpMap();
}
void CrashDump() { active_memory_->DumpMap(); }
Memory::Memory()
: virtual_membase_(nullptr),
@ -491,7 +489,7 @@ void BaseHeap::Dispose() {
}
void BaseHeap::DumpMap() {
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
XELOGE("------------------------------------------------------------------");
XELOGE("Heap: %.8X-%.8X", heap_base_, heap_base_ + heap_size_);
XELOGE("------------------------------------------------------------------");
@ -565,7 +563,7 @@ bool BaseHeap::AllocFixed(uint32_t base_address, uint32_t size,
return false;
}
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// - If we are reserving the entire range requested must not be already
// reserved.
@ -643,7 +641,7 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
high_page_number =
std::min(uint32_t(page_table_.size()) - 1, high_page_number);
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Find a free page range.
// The base page must match the requested alignment, so we first scan for
@ -765,7 +763,7 @@ bool BaseHeap::Decommit(uint32_t address, uint32_t size) {
std::min(uint32_t(page_table_.size()) - 1, start_page_number);
end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number);
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Release from host.
// TODO(benvanik): find a way to actually decommit memory;
@ -789,7 +787,7 @@ bool BaseHeap::Decommit(uint32_t address, uint32_t size) {
}
bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Given address must be a region base address.
uint32_t base_page_number = (base_address - heap_base_) / page_size_;
@ -844,7 +842,7 @@ bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
std::min(uint32_t(page_table_.size()) - 1, start_page_number);
end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number);
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Ensure all pages are in the same reserved region and all are committed.
uint32_t first_base_address = UINT_MAX;
@ -897,7 +895,7 @@ bool BaseHeap::QueryRegionInfo(uint32_t base_address,
return false;
}
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
auto start_page_entry = page_table_[start_page_number];
out_info->base_address = base_address;
@ -948,7 +946,7 @@ bool BaseHeap::QuerySize(uint32_t address, uint32_t* out_size) {
*out_size = 0;
return false;
}
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
auto page_entry = page_table_[page_number];
*out_size = page_entry.region_page_count * page_size_;
return true;
@ -961,7 +959,7 @@ bool BaseHeap::QueryProtect(uint32_t address, uint32_t* out_protect) {
*out_protect = 0;
return false;
}
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
auto page_entry = page_table_[page_number];
*out_protect = page_entry.current_protect;
return true;
@ -1009,7 +1007,7 @@ bool PhysicalHeap::Alloc(uint32_t size, uint32_t alignment,
size = xe::round_up(size, page_size_);
alignment = xe::round_up(alignment, page_size_);
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Allocate from parent heap (gets our physical address in 0-512mb).
uint32_t parent_low_address = GetPhysicalAddress(heap_base_);
@ -1047,7 +1045,7 @@ bool PhysicalHeap::AllocFixed(uint32_t base_address, uint32_t size,
size = xe::round_up(size, page_size_);
alignment = xe::round_up(alignment, page_size_);
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Allocate from parent heap (gets our physical address in 0-512mb).
// NOTE: this can potentially overwrite heap contents if there are already
@ -1088,7 +1086,7 @@ bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address,
size = xe::round_up(size, page_size_);
alignment = xe::round_up(alignment, page_size_);
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
// Allocate from parent heap (gets our physical address in 0-512mb).
low_address = std::max(heap_base_, low_address);
@ -1122,7 +1120,7 @@ bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address,
}
bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) {
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
uint32_t parent_address = GetPhysicalAddress(address);
if (!parent_heap_->Decommit(parent_address, size)) {
XELOGE("PhysicalHeap::Decommit failed due to parent heap failure");
@ -1132,7 +1130,7 @@ bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) {
}
bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
uint32_t parent_base_address = GetPhysicalAddress(base_address);
if (!parent_heap_->Release(parent_base_address, out_region_size)) {
XELOGE("PhysicalHeap::Release failed due to parent heap failure");
@ -1142,7 +1140,7 @@ bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
}
bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
std::lock_guard<std::recursive_mutex> lock(heap_mutex_);
std::lock_guard<xe::recursive_mutex> lock(heap_mutex_);
uint32_t parent_address = GetPhysicalAddress(address);
bool parent_result = parent_heap_->Protect(parent_address, size, protect);
if (!parent_result) {

View File

@ -16,6 +16,7 @@
#include <string>
#include <vector>
#include "xenia/base/mutex.h"
#include "xenia/base/platform.h"
#include "xenia/cpu/mmio_handler.h"
@ -115,7 +116,7 @@ class BaseHeap {
uint32_t heap_size_;
uint32_t page_size_;
std::vector<PageEntry> page_table_;
std::recursive_mutex heap_mutex_;
xe::recursive_mutex heap_mutex_;
};
class VirtualHeap : public BaseHeap {