From 8fc6e92ef187d9be177e05cdb94ed55d5339a308 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 21 Dec 2020 17:02:04 -0800 Subject: [PATCH 01/20] hle: service: nfp: Remove incorrect signaling behavior in GetDeviceState. --- src/core/hle/service/nfp/nfp.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 5557da72e5..641bcadeae 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -190,12 +190,6 @@ private: void GetDeviceState(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_NFP, "called"); - auto nfc_event = nfp_interface.GetNFCEvent(); - if (!nfc_event->ShouldWait(&ctx.GetThread()) && !has_attached_handle) { - device_state = DeviceState::TagFound; - nfc_event->Clear(); - } - IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); rb.Push(static_cast(device_state)); From 1ae883435d429d7e2d5b6bcc35d6d73a17411544 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 27 Dec 2020 20:56:57 -0800 Subject: [PATCH 02/20] core: hle: kernel: Begin moving common SVC results to its own header. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/svc_results.h | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 src/core/hle/kernel/svc_results.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 893df433a8..d0c1beaf7e 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -210,6 +210,7 @@ add_library(core STATIC hle/kernel/shared_memory.h hle/kernel/svc.cpp hle/kernel/svc.h + hle/kernel/svc_results.h hle/kernel/svc_types.h hle/kernel/svc_wrap.h hle/kernel/synchronization_object.cpp diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h new file mode 100644 index 0000000000..78282f0213 --- /dev/null +++ b/src/core/hle/kernel/svc_results.h @@ -0,0 +1,20 @@ +// Copyright 2020 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/result.h" + +namespace Kernel::Svc { + +constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59}; +constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102}; +constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106}; +constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114}; +constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117}; +constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118}; +constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120}; +constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125}; + +} // namespace Kernel::Svc From 35c3c078e3c079c0a9192b411e20c71b122ff057 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 21 Dec 2020 22:36:53 -0800 Subject: [PATCH 03/20] core: hle: kernel: Update KSynchronizationObject. --- src/core/CMakeLists.txt | 6 +- src/core/hle/kernel/address_arbiter.cpp | 10 +- src/core/hle/kernel/client_port.cpp | 3 - src/core/hle/kernel/client_session.cpp | 11 +- src/core/hle/kernel/client_session.h | 8 +- src/core/hle/kernel/errors.h | 3 + src/core/hle/kernel/k_scheduler.cpp | 5 +- .../hle/kernel/k_synchronization_object.cpp | 171 ++++++++++++++++++ .../hle/kernel/k_synchronization_object.h | 58 ++++++ src/core/hle/kernel/kernel.cpp | 15 +- src/core/hle/kernel/kernel.h | 6 - src/core/hle/kernel/mutex.cpp | 4 +- src/core/hle/kernel/process.cpp | 21 +-- src/core/hle/kernel/process.h | 14 +- src/core/hle/kernel/readable_event.cpp | 18 +- src/core/hle/kernel/readable_event.h | 12 +- src/core/hle/kernel/server_port.cpp | 14 +- src/core/hle/kernel/server_port.h | 7 +- src/core/hle/kernel/server_session.cpp | 23 +-- src/core/hle/kernel/server_session.h | 12 +- src/core/hle/kernel/session.cpp | 11 +- src/core/hle/kernel/session.h | 8 +- src/core/hle/kernel/svc.cpp | 47 ++--- src/core/hle/kernel/svc_wrap.h | 9 +- src/core/hle/kernel/synchronization.cpp | 116 ------------ src/core/hle/kernel/synchronization.h | 44 ----- .../hle/kernel/synchronization_object.cpp | 49 ----- src/core/hle/kernel/synchronization_object.h | 77 -------- src/core/hle/kernel/thread.cpp | 66 ++----- src/core/hle/kernel/thread.h | 131 +++++--------- src/core/hle/service/sm/sm.cpp | 3 - src/yuzu/debugger/wait_tree.cpp | 19 +- src/yuzu/debugger/wait_tree.h | 17 +- 33 files changed, 397 insertions(+), 621 deletions(-) create mode 100644 src/core/hle/kernel/k_synchronization_object.cpp create mode 100644 src/core/hle/kernel/k_synchronization_object.h delete mode 100644 src/core/hle/kernel/synchronization.cpp delete mode 100644 src/core/hle/kernel/synchronization.h delete mode 100644 src/core/hle/kernel/synchronization_object.cpp delete mode 100644 src/core/hle/kernel/synchronization_object.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index d0c1beaf7e..548b3911e7 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -164,6 +164,8 @@ add_library(core STATIC hle/kernel/k_scheduler_lock.h hle/kernel/k_scoped_lock.h hle/kernel/k_scoped_scheduler_lock_and_sleep.h + hle/kernel/k_synchronization_object.cpp + hle/kernel/k_synchronization_object.h hle/kernel/kernel.cpp hle/kernel/kernel.h hle/kernel/memory/address_space_info.cpp @@ -213,10 +215,6 @@ add_library(core STATIC hle/kernel/svc_results.h hle/kernel/svc_types.h hle/kernel/svc_wrap.h - hle/kernel/synchronization_object.cpp - hle/kernel/synchronization_object.h - hle/kernel/synchronization.cpp - hle/kernel/synchronization.h hle/kernel/thread.cpp hle/kernel/thread.h hle/kernel/time_manager.cpp diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 20ffa7d476..23e1ef032f 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -37,7 +37,7 @@ void AddressArbiter::WakeThreads(const std::vector>& wai waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS); RemoveThread(waiting_threads[i]); waiting_threads[i]->WaitForArbitration(false); - waiting_threads[i]->ResumeFromWait(); + waiting_threads[i]->Wakeup(); } } @@ -160,7 +160,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 { KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); - if (current_thread->IsPendingTermination()) { + if (current_thread->IsTerminationRequested()) { lock.CancelSleep(); return ERR_THREAD_TERMINATING; } @@ -201,7 +201,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 current_thread->SetArbiterWaitAddress(address); InsertThread(SharedFrom(current_thread)); - current_thread->SetStatus(ThreadStatus::WaitArb); + current_thread->SetState(ThreadStatus::WaitArb); current_thread->WaitForArbitration(true); } @@ -230,7 +230,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t { KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); - if (current_thread->IsPendingTermination()) { + if (current_thread->IsTerminationRequested()) { lock.CancelSleep(); return ERR_THREAD_TERMINATING; } @@ -256,7 +256,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); current_thread->SetArbiterWaitAddress(address); InsertThread(SharedFrom(current_thread)); - current_thread->SetStatus(ThreadStatus::WaitArb); + current_thread->SetState(ThreadStatus::WaitArb); current_thread->WaitForArbitration(true); } diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp index 8aff2227a0..f8f005f155 100644 --- a/src/core/hle/kernel/client_port.cpp +++ b/src/core/hle/kernel/client_port.cpp @@ -33,9 +33,6 @@ ResultVal> ClientPort::Connect() { server_port->AppendPendingSession(std::move(server)); } - // Wake the threads waiting on the ServerPort - server_port->Signal(); - return MakeResult(std::move(client)); } diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index be9eba5196..e8e52900dd 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp @@ -12,7 +12,7 @@ namespace Kernel { -ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {} +ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} ClientSession::~ClientSession() { // This destructor will be called automatically when the last ClientSession handle is closed by @@ -22,15 +22,6 @@ ClientSession::~ClientSession() { } } -bool ClientSession::ShouldWait(const Thread* thread) const { - UNIMPLEMENTED(); - return {}; -} - -void ClientSession::Acquire(Thread* thread) { - UNIMPLEMENTED(); -} - bool ClientSession::IsSignaled() const { UNIMPLEMENTED(); return true; diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index e5e0690c2b..d5c9ebee8c 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h @@ -7,7 +7,7 @@ #include #include -#include "core/hle/kernel/synchronization_object.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/result.h" union ResultCode; @@ -26,7 +26,7 @@ class KernelCore; class Session; class Thread; -class ClientSession final : public SynchronizationObject { +class ClientSession final : public KSynchronizationObject { public: explicit ClientSession(KernelCore& kernel); ~ClientSession() override; @@ -49,10 +49,6 @@ public: ResultCode SendSyncRequest(std::shared_ptr thread, Core::Memory::Memory& memory, Core::Timing::CoreTiming& core_timing); - bool ShouldWait(const Thread* thread) const override; - - void Acquire(Thread* thread) override; - bool IsSignaled() const override; private: diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index d4e5d88cff..7d32a39f06 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -13,12 +13,14 @@ namespace Kernel { constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59}; +constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59}; constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; +constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106}; constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108}; constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110}; constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113}; @@ -28,6 +30,7 @@ constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115}; constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116}; constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117}; constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118}; +constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118}; constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119}; constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120}; constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121}; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index c5fd82a6b9..f44d319926 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -645,8 +645,7 @@ void KScheduler::Unload(Thread* thread) { void KScheduler::Reload(Thread* thread) { if (thread) { - ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, - "Thread must be runnable."); + ASSERT_MSG(thread->GetState() == ThreadSchedStatus::Runnable, "Thread must be runnable."); // Cancel any outstanding wakeup events for this thread thread->SetIsRunning(true); @@ -772,7 +771,7 @@ void KScheduler::Initialize() { { KScopedSchedulerLock lock{system.Kernel()}; - idle_thread->SetStatus(ThreadStatus::Ready); + idle_thread->SetState(ThreadStatus::Ready); } } diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp new file mode 100644 index 0000000000..e7fd119d8c --- /dev/null +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -0,0 +1,171 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/svc_results.h" +#include "core/hle/kernel/thread.h" + +namespace Kernel { + +ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, + KSynchronizationObject** objects, const s32 num_objects, + s64 timeout) { + // Allocate space on stack for thread nodes. + std::vector thread_nodes(num_objects); + + // Prepare for wait. + Thread* thread = kernel.CurrentScheduler()->GetCurrentThread(); + Handle timer = InvalidHandle; + + { + // Setup the scheduling lock and sleep. + KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout); + + // Check if any of the objects are already signaled. + for (auto i = 0; i < num_objects; ++i) { + ASSERT(objects[i] != nullptr); + + if (objects[i]->IsSignaled()) { + *out_index = i; + slp.CancelSleep(); + return RESULT_SUCCESS; + } + } + + // Check if the timeout is zero. + if (timeout == 0) { + slp.CancelSleep(); + return Svc::ResultTimedOut; + } + + // Check if the thread should terminate. + if (thread->IsTerminationRequested()) { + slp.CancelSleep(); + return Svc::ResultTerminationRequested; + } + + // Check if waiting was canceled. + if (thread->IsWaitCancelled()) { + slp.CancelSleep(); + thread->ClearWaitCancelled(); + return Svc::ResultCancelled; + } + + // Add the waiters. + for (auto i = 0; i < num_objects; ++i) { + thread_nodes[i].thread = thread; + thread_nodes[i].next = nullptr; + + if (objects[i]->thread_list_tail == nullptr) { + objects[i]->thread_list_head = std::addressof(thread_nodes[i]); + } else { + objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]); + } + + objects[i]->thread_list_tail = std::addressof(thread_nodes[i]); + } + + // For debugging only + thread->SetWaitObjectsForDebugging(objects, num_objects); + + // Mark the thread as waiting. + thread->SetCancellable(); + thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); + thread->SetState(ThreadState::WaitSynch); + } + + // The lock/sleep is done, so we should be able to get our result. + + // Thread is no longer cancellable. + thread->ClearCancellable(); + + // For debugging only + thread->SetWaitObjectsForDebugging(nullptr, 0); + + // Cancel the timer as needed. + if (timer != InvalidHandle) { + auto& time_manager = kernel.TimeManager(); + time_manager.UnscheduleTimeEvent(timer); + } + + // Get the wait result. + ResultCode wait_result{RESULT_SUCCESS}; + s32 sync_index = -1; + { + KScopedSchedulerLock lock(kernel); + KSynchronizationObject* synced_obj; + wait_result = thread->GetWaitResult(std::addressof(synced_obj)); + + for (auto i = 0; i < num_objects; ++i) { + // Unlink the object from the list. + ThreadListNode* prev_ptr = + reinterpret_cast(std::addressof(objects[i]->thread_list_head)); + ThreadListNode* prev_val = nullptr; + ThreadListNode *prev, *tail_prev; + + do { + prev = prev_ptr; + prev_ptr = prev_ptr->next; + tail_prev = prev_val; + prev_val = prev_ptr; + } while (prev_ptr != std::addressof(thread_nodes[i])); + + if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) { + objects[i]->thread_list_tail = tail_prev; + } + + prev->next = thread_nodes[i].next; + + if (objects[i] == synced_obj) { + sync_index = i; + } + } + } + + // Set output. + *out_index = sync_index; + return wait_result; +} + +KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} + +KSynchronizationObject ::~KSynchronizationObject() = default; + +void KSynchronizationObject::NotifyAvailable(ResultCode result) { + KScopedSchedulerLock lock(kernel); + + // If we're not signaled, we've nothing to notify. + if (!this->IsSignaled()) { + return; + } + + // Iterate over each thread. + for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { + Thread* thread = cur_node->thread; + if (thread->GetState() == ThreadSchedStatus::Paused) { + thread->SetSyncedObject(this, result); + thread->SetState(ThreadStatus::Ready); + } + } +} + +std::vector KSynchronizationObject::GetWaitingThreadsForDebugging() const { + std::vector threads; + + // If debugging, dump the list of waiters. + { + KScopedSchedulerLock lock(kernel); + for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { + threads.emplace_back(cur_node->thread); + } + } + + return threads; +} +} // namespace Kernel diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h new file mode 100644 index 0000000000..14d80ebf19 --- /dev/null +++ b/src/core/hle/kernel/k_synchronization_object.h @@ -0,0 +1,58 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +#include "core/hle/kernel/object.h" +#include "core/hle/result.h" + +namespace Kernel { + +class KernelCore; +class Synchronization; +class Thread; + +/// Class that represents a Kernel object that a thread can be waiting on +class KSynchronizationObject : public Object { +public: + struct ThreadListNode { + ThreadListNode* next{}; + Thread* thread{}; + }; + + [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index, + KSynchronizationObject** objects, const s32 num_objects, + s64 timeout); + + [[nodiscard]] virtual bool IsSignaled() const = 0; + + [[nodiscard]] std::vector GetWaitingThreadsForDebugging() const; + +protected: + explicit KSynchronizationObject(KernelCore& kernel); + virtual ~KSynchronizationObject(); + + void NotifyAvailable(ResultCode result); + void NotifyAvailable() { + return this->NotifyAvailable(RESULT_SUCCESS); + } + +private: + ThreadListNode* thread_list_head{}; + ThreadListNode* thread_list_tail{}; +}; + +// Specialization of DynamicObjectCast for KSynchronizationObjects +template <> +inline std::shared_ptr DynamicObjectCast( + std::shared_ptr object) { + if (object != nullptr && object->IsWaitable()) { + return std::static_pointer_cast(object); + } + return nullptr; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index e8ece81647..f1dcbe2eb6 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -38,7 +38,6 @@ #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/shared_memory.h" -#include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/lock.h" @@ -51,8 +50,7 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system, KernelCore& kernel) - : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{ - system} {} + : time_manager{system}, global_handle_table{kernel}, system{system} {} void SetMulticore(bool is_multicore) { this->is_multicore = is_multicore; @@ -307,7 +305,6 @@ struct KernelCore::Impl { std::vector> process_list; Process* current_process = nullptr; std::unique_ptr global_scheduler_context; - Kernel::Synchronization synchronization; Kernel::TimeManager time_manager; std::shared_ptr system_resource_limit; @@ -461,14 +458,6 @@ const std::array& Kern return impl->interrupts; } -Kernel::Synchronization& KernelCore::Synchronization() { - return impl->synchronization; -} - -const Kernel::Synchronization& KernelCore::Synchronization() const { - return impl->synchronization; -} - Kernel::TimeManager& KernelCore::TimeManager() { return impl->time_manager; } @@ -615,7 +604,7 @@ void KernelCore::Suspend(bool in_suspention) { KScopedSchedulerLock lock(*this); ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - impl->suspend_threads[i]->SetStatus(status); + impl->suspend_threads[i]->SetState(status); } } } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index e3169f5a7e..9046b5a8a9 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -129,12 +129,6 @@ public: /// Gets the an instance of the current physical CPU core. const Kernel::PhysicalCore& CurrentPhysicalCore() const; - /// Gets the an instance of the Synchronization Interface. - Kernel::Synchronization& Synchronization(); - - /// Gets the an instance of the Synchronization Interface. - const Kernel::Synchronization& Synchronization() const; - /// Gets the an instance of the TimeManager Interface. Kernel::TimeManager& TimeManager(); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 4f8075e0e9..badd883aa8 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -107,7 +107,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, current_thread->SetMutexWaitAddress(address); current_thread->SetWaitHandle(requesting_thread_handle); - current_thread->SetStatus(ThreadStatus::WaitMutex); + current_thread->SetState(ThreadStatus::WaitMutex); // Update the lock holder thread's priority to prevent priority inversion. holding_thread->AddMutexWaiter(current_thread); @@ -145,7 +145,7 @@ std::pair> Mutex::Unlock(std::shared_ptrSetSynchronizationResults(nullptr, RESULT_SUCCESS); new_owner->SetLockOwner(nullptr); - new_owner->ResumeFromWait(); + new_owner->Wakeup(); system.Memory().Write32(address, mutex_value); return {RESULT_SUCCESS, new_owner}; diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index b905b486a7..92e877c3e2 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -55,7 +55,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires { KScopedSchedulerLock lock{kernel}; - thread->SetStatus(ThreadStatus::Ready); + thread->SetState(ThreadStatus::Ready); } } } // Anonymous namespace @@ -406,21 +406,18 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) { ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite); } +bool Process::IsSignaled() const { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + return is_signaled; +} + Process::Process(Core::System& system) - : SynchronizationObject{system.Kernel()}, page_table{std::make_unique( - system)}, + : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique( + system)}, handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} Process::~Process() = default; -void Process::Acquire(Thread* thread) { - ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); -} - -bool Process::ShouldWait(const Thread* thread) const { - return !is_signaled; -} - void Process::ChangeStatus(ProcessStatus new_status) { if (status == new_status) { return; @@ -428,7 +425,7 @@ void Process::ChangeStatus(ProcessStatus new_status) { status = new_status; is_signaled = true; - Signal(); + NotifyAvailable(); } ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index e412e58aac..901f1ff27b 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -13,9 +13,9 @@ #include "common/common_types.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process_capability.h" -#include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" namespace Core { @@ -63,7 +63,7 @@ enum class ProcessStatus { DebugBreak, }; -class Process final : public SynchronizationObject { +class Process final : public KSynchronizationObject { public: explicit Process(Core::System& system); ~Process() override; @@ -304,6 +304,8 @@ public: void LoadModule(CodeSet code_set, VAddr base_addr); + bool IsSignaled() const override; + /////////////////////////////////////////////////////////////////////////////////////////////// // Thread-local storage management @@ -314,12 +316,6 @@ public: void FreeTLSRegion(VAddr tls_address); private: - /// Checks if the specified thread should wait until this process is available. - bool ShouldWait(const Thread* thread) const override; - - /// Acquires/locks this process for the specified thread if it's available. - void Acquire(Thread* thread) override; - /// Changes the process status. If the status is different /// from the current process status, then this will trigger /// a process signal. @@ -410,6 +406,8 @@ private: /// Schedule count of this process s64 schedule_count{}; + bool is_signaled{}; + /// System context Core::System& system; }; diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index cea262ce02..99ed0857e7 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp @@ -14,24 +14,22 @@ namespace Kernel { -ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {} +ReadableEvent::ReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {} ReadableEvent::~ReadableEvent() = default; -bool ReadableEvent::ShouldWait(const Thread* thread) const { - return !is_signaled; -} - -void ReadableEvent::Acquire(Thread* thread) { - ASSERT_MSG(IsSignaled(), "object unavailable!"); -} - void ReadableEvent::Signal() { if (is_signaled) { return; } is_signaled = true; - SynchronizationObject::Signal(); + NotifyAvailable(); +} + +bool ReadableEvent::IsSignaled() const { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + return is_signaled; } void ReadableEvent::Clear() { diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h index 3264dd066b..34e477274a 100644 --- a/src/core/hle/kernel/readable_event.h +++ b/src/core/hle/kernel/readable_event.h @@ -4,8 +4,8 @@ #pragma once +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/object.h" -#include "core/hle/kernel/synchronization_object.h" union ResultCode; @@ -14,7 +14,7 @@ namespace Kernel { class KernelCore; class WritableEvent; -class ReadableEvent final : public SynchronizationObject { +class ReadableEvent final : public KSynchronizationObject { friend class WritableEvent; public: @@ -32,9 +32,6 @@ public: return HANDLE_TYPE; } - bool ShouldWait(const Thread* thread) const override; - void Acquire(Thread* thread) override; - /// Unconditionally clears the readable event's state. void Clear(); @@ -46,11 +43,14 @@ public: /// then ERR_INVALID_STATE will be returned. ResultCode Reset(); - void Signal() override; + void Signal(); + + bool IsSignaled() const override; private: explicit ReadableEvent(KernelCore& kernel); + bool is_signaled{}; std::string name; ///< Name of event (optional) }; diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp index a549ae9d77..82857f93b3 100644 --- a/src/core/hle/kernel/server_port.cpp +++ b/src/core/hle/kernel/server_port.cpp @@ -13,7 +13,7 @@ namespace Kernel { -ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {} +ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} ServerPort::~ServerPort() = default; ResultVal> ServerPort::Accept() { @@ -28,15 +28,9 @@ ResultVal> ServerPort::Accept() { void ServerPort::AppendPendingSession(std::shared_ptr pending_session) { pending_sessions.push_back(std::move(pending_session)); -} - -bool ServerPort::ShouldWait(const Thread* thread) const { - // If there are no pending sessions, we wait until a new one is added. - return pending_sessions.empty(); -} - -void ServerPort::Acquire(Thread* thread) { - ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); + if (pending_sessions.size() == 1) { + NotifyAvailable(); + } } bool ServerPort::IsSignaled() const { diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h index 41b191b86b..6470df993d 100644 --- a/src/core/hle/kernel/server_port.h +++ b/src/core/hle/kernel/server_port.h @@ -9,8 +9,8 @@ #include #include #include "common/common_types.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/object.h" -#include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" namespace Kernel { @@ -20,7 +20,7 @@ class KernelCore; class ServerSession; class SessionRequestHandler; -class ServerPort final : public SynchronizationObject { +class ServerPort final : public KSynchronizationObject { public: explicit ServerPort(KernelCore& kernel); ~ServerPort() override; @@ -79,9 +79,6 @@ public: /// waiting to be accepted by this port. void AppendPendingSession(std::shared_ptr pending_session); - bool ShouldWait(const Thread* thread) const override; - void Acquire(Thread* thread) override; - bool IsSignaled() const override; private: diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index b40fe39163..4f2bb7822f 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -24,7 +24,7 @@ namespace Kernel { -ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {} +ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} ServerSession::~ServerSession() { kernel.ReleaseServiceThread(service_thread); @@ -42,16 +42,6 @@ ResultVal> ServerSession::Create(KernelCore& kern return MakeResult(std::move(session)); } -bool ServerSession::ShouldWait(const Thread* thread) const { - // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. - if (!parent->Client()) { - return false; - } - - // Wait if we have no pending requests, or if we're currently handling a request. - return pending_requesting_threads.empty() || currently_handling != nullptr; -} - bool ServerSession::IsSignaled() const { // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. if (!parent->Client()) { @@ -62,15 +52,6 @@ bool ServerSession::IsSignaled() const { return !pending_requesting_threads.empty() && currently_handling == nullptr; } -void ServerSession::Acquire(Thread* thread) { - ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); - // We are now handling a request, pop it from the stack. - // TODO(Subv): What happens if the client endpoint is closed before any requests are made? - ASSERT(!pending_requesting_threads.empty()); - currently_handling = pending_requesting_threads.back(); - pending_requesting_threads.pop_back(); -} - void ServerSession::ClientDisconnected() { // We keep a shared pointer to the hle handler to keep it alive throughout // the call to ClientDisconnected, as ClientDisconnected invalidates the @@ -172,7 +153,7 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { { KScopedSchedulerLock lock(kernel); if (!context.IsThreadWaiting()) { - context.GetThread().ResumeFromWait(); + context.GetThread().Wakeup(); context.GetThread().SetSynchronizationResults(nullptr, result); } } diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index e8d1d99ea9..9155cf7f5f 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -10,8 +10,8 @@ #include #include "common/threadsafe_queue.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/service_thread.h" -#include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" namespace Core::Memory { @@ -43,7 +43,7 @@ class Thread; * After the server replies to the request, the response is marshalled back to the caller's * TLS buffer and control is transferred back to it. */ -class ServerSession final : public SynchronizationObject { +class ServerSession final : public KSynchronizationObject { friend class ServiceThread; public: @@ -77,8 +77,6 @@ public: return parent.get(); } - bool IsSignaled() const override; - /** * Sets the HLE handler for the session. This handler will be called to service IPC requests * instead of the regular IPC machinery. (The regular IPC machinery is currently not @@ -100,10 +98,6 @@ public: ResultCode HandleSyncRequest(std::shared_ptr thread, Core::Memory::Memory& memory, Core::Timing::CoreTiming& core_timing); - bool ShouldWait(const Thread* thread) const override; - - void Acquire(Thread* thread) override; - /// Called when a client disconnection occurs. void ClientDisconnected(); @@ -130,6 +124,8 @@ public: convert_to_domain = true; } + bool IsSignaled() const override; + private: /// Queues a sync request from the emulated application. ResultCode QueueSyncRequest(std::shared_ptr thread, Core::Memory::Memory& memory); diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp index e4dd53e241..75304b9619 100644 --- a/src/core/hle/kernel/session.cpp +++ b/src/core/hle/kernel/session.cpp @@ -9,7 +9,7 @@ namespace Kernel { -Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {} +Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {} Session::~Session() = default; Session::SessionPair Session::Create(KernelCore& kernel, std::string name) { @@ -24,18 +24,9 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) { return std::make_pair(std::move(client_session), std::move(server_session)); } -bool Session::ShouldWait(const Thread* thread) const { - UNIMPLEMENTED(); - return {}; -} - bool Session::IsSignaled() const { UNIMPLEMENTED(); return true; } -void Session::Acquire(Thread* thread) { - UNIMPLEMENTED(); -} - } // namespace Kernel diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h index 7cd9c0d770..f6dd2c1d25 100644 --- a/src/core/hle/kernel/session.h +++ b/src/core/hle/kernel/session.h @@ -8,7 +8,7 @@ #include #include -#include "core/hle/kernel/synchronization_object.h" +#include "core/hle/kernel/k_synchronization_object.h" namespace Kernel { @@ -19,7 +19,7 @@ class ServerSession; * Parent structure to link the client and server endpoints of a session with their associated * client port. */ -class Session final : public SynchronizationObject { +class Session final : public KSynchronizationObject { public: explicit Session(KernelCore& kernel); ~Session() override; @@ -37,12 +37,8 @@ public: return HANDLE_TYPE; } - bool ShouldWait(const Thread* thread) const override; - bool IsSignaled() const override; - void Acquire(Thread* thread) override; - std::shared_ptr Client() { if (auto result{client.lock()}) { return result; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index de3ed25daf..0a3064c7db 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -26,6 +26,7 @@ #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/memory/page_table.h" @@ -38,7 +39,6 @@ #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" -#include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/kernel/transfer_memory.h" @@ -343,25 +343,14 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { KScopedSchedulerLock lock(kernel); - thread->InvalidateHLECallback(); - thread->SetStatus(ThreadStatus::WaitIPC); + thread->SetState(ThreadStatus::WaitIPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); } - if (thread->HasHLECallback()) { - Handle event_handle = thread->GetHLETimeEvent(); - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } - - { - KScopedSchedulerLock lock(kernel); - auto* sync_object = thread->GetHLESyncObject(); - sync_object->RemoveWaitingThread(SharedFrom(thread)); - } - - thread->InvokeHLECallback(SharedFrom(thread)); + Handle event_handle = thread->GetHLETimeEvent(); + if (event_handle != InvalidHandle) { + auto& time_manager = kernel.TimeManager(); + time_manager.UnscheduleTimeEvent(event_handle); } return thread->GetSignalingResult(); @@ -436,7 +425,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* } /// Wait for the given handles to synchronize, timeout after the specified nanoseconds -static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, +static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, u64 handle_count, s64 nano_seconds) { LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", handles_address, handle_count, nano_seconds); @@ -458,28 +447,26 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr } auto& kernel = system.Kernel(); - Thread::ThreadSynchronizationObjects objects(handle_count); + std::vector objects(handle_count); const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); for (u64 i = 0; i < handle_count; ++i) { const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); - const auto object = handle_table.Get(handle); + const auto object = handle_table.Get(handle); if (object == nullptr) { LOG_ERROR(Kernel_SVC, "Object is a nullptr"); return ERR_INVALID_HANDLE; } - objects[i] = object; + objects[i] = object.get(); } - auto& synchronization = kernel.Synchronization(); - const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds); - *index = handle_result; - return result; + return KSynchronizationObject::Wait(kernel, index, objects.data(), + static_cast(objects.size()), nano_seconds); } static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, - s32 handle_count, u32 timeout_high, Handle* index) { + s32 handle_count, u32 timeout_high, s32* index) { const s64 nano_seconds{(static_cast(timeout_high) << 32) | static_cast(timeout_low)}; return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); } @@ -1655,7 +1642,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); - if (thread->IsPendingTermination()) { + if (thread->IsTerminationRequested()) { lock.CancelSleep(); return ERR_THREAD_TERMINATING; } @@ -1674,7 +1661,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add current_thread->SetCondVarWaitAddress(condition_variable_addr); current_thread->SetMutexWaitAddress(mutex_addr); current_thread->SetWaitHandle(thread_handle); - current_thread->SetStatus(ThreadStatus::WaitCondVar); + current_thread->SetState(ThreadStatus::WaitCondVar); current_process->InsertConditionVariableThread(SharedFrom(current_thread)); } @@ -1761,7 +1748,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ thread->SetLockOwner(nullptr); thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); - thread->ResumeFromWait(); + thread->Wakeup(); } else { // The mutex is already owned by some other thread, make this thread wait on it. const Handle owner_handle = static_cast(mutex_val & Mutex::MutexOwnerMask); @@ -1769,7 +1756,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ auto owner = handle_table.Get(owner_handle); ASSERT(owner); if (thread->GetStatus() == ThreadStatus::WaitCondVar) { - thread->SetStatus(ThreadStatus::WaitMutex); + thread->SetState(ThreadStatus::WaitMutex); } owner->AddMutexWaiter(thread); diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 0b6dd9df0b..f94c487bac 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -215,9 +215,10 @@ void SvcWrap64(Core::System& system) { func(system, static_cast(Param(system, 0)), Param(system, 1), Param(system, 2)).raw); } -template +// Used by WaitSynchronization +template void SvcWrap64(Core::System& system) { - u32 param_1 = 0; + s32 param_1 = 0; const u32 retval = func(system, ¶m_1, Param(system, 1), static_cast(Param(system, 2)), static_cast(Param(system, 3))) .raw; @@ -539,9 +540,9 @@ void SvcWrap32(Core::System& system) { } // Used by WaitSynchronization32 -template +template void SvcWrap32(Core::System& system) { - u32 param_1 = 0; + s32 param_1 = 0; const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2), Param32(system, 3), ¶m_1) .raw; diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp deleted file mode 100644 index d3f520ea2d..0000000000 --- a/src/core/hle/kernel/synchronization.cpp +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2020 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "core/core.h" -#include "core/hle/kernel/errors.h" -#include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/k_scheduler.h" -#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/synchronization.h" -#include "core/hle/kernel/synchronization_object.h" -#include "core/hle/kernel/thread.h" -#include "core/hle/kernel/time_manager.h" - -namespace Kernel { - -Synchronization::Synchronization(Core::System& system) : system{system} {} - -void Synchronization::SignalObject(SynchronizationObject& obj) const { - auto& kernel = system.Kernel(); - KScopedSchedulerLock lock(kernel); - if (obj.IsSignaled()) { - for (auto thread : obj.GetWaitingThreads()) { - if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { - if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) { - ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch); - ASSERT(thread->IsWaitingSync()); - } - thread->SetSynchronizationResults(&obj, RESULT_SUCCESS); - thread->ResumeFromWait(); - } - } - obj.ClearWaitingThreads(); - } -} - -std::pair Synchronization::WaitFor( - std::vector>& sync_objects, s64 nano_seconds) { - auto& kernel = system.Kernel(); - auto* const thread = kernel.CurrentScheduler()->GetCurrentThread(); - Handle event_handle = InvalidHandle; - { - KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); - const auto itr = - std::find_if(sync_objects.begin(), sync_objects.end(), - [thread](const std::shared_ptr& object) { - return object->IsSignaled(); - }); - - if (itr != sync_objects.end()) { - // We found a ready object, acquire it and set the result value - SynchronizationObject* object = itr->get(); - object->Acquire(thread); - const u32 index = static_cast(std::distance(sync_objects.begin(), itr)); - lock.CancelSleep(); - return {RESULT_SUCCESS, index}; - } - - if (nano_seconds == 0) { - lock.CancelSleep(); - return {RESULT_TIMEOUT, InvalidHandle}; - } - - if (thread->IsPendingTermination()) { - lock.CancelSleep(); - return {ERR_THREAD_TERMINATING, InvalidHandle}; - } - - if (thread->IsSyncCancelled()) { - thread->SetSyncCancelled(false); - lock.CancelSleep(); - return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle}; - } - - for (auto& object : sync_objects) { - object->AddWaitingThread(SharedFrom(thread)); - } - - thread->SetSynchronizationObjects(&sync_objects); - thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); - thread->SetStatus(ThreadStatus::WaitSynch); - thread->SetWaitingSync(true); - } - thread->SetWaitingSync(false); - - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } - - { - KScopedSchedulerLock lock(kernel); - ResultCode signaling_result = thread->GetSignalingResult(); - SynchronizationObject* signaling_object = thread->GetSignalingObject(); - thread->SetSynchronizationObjects(nullptr); - auto shared_thread = SharedFrom(thread); - for (auto& obj : sync_objects) { - obj->RemoveWaitingThread(shared_thread); - } - if (signaling_object != nullptr) { - const auto itr = std::find_if( - sync_objects.begin(), sync_objects.end(), - [signaling_object](const std::shared_ptr& object) { - return object.get() == signaling_object; - }); - ASSERT(itr != sync_objects.end()); - signaling_object->Acquire(thread); - const u32 index = static_cast(std::distance(sync_objects.begin(), itr)); - return {signaling_result, index}; - } - return {signaling_result, -1}; - } -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/synchronization.h b/src/core/hle/kernel/synchronization.h deleted file mode 100644 index 379f4b1d30..0000000000 --- a/src/core/hle/kernel/synchronization.h +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2020 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include -#include - -#include "core/hle/kernel/object.h" -#include "core/hle/result.h" - -namespace Core { -class System; -} // namespace Core - -namespace Kernel { - -class SynchronizationObject; - -/** - * The 'Synchronization' class is an interface for handling synchronization methods - * used by Synchronization objects and synchronization SVCs. This centralizes processing of - * such - */ -class Synchronization { -public: - explicit Synchronization(Core::System& system); - - /// Signals a synchronization object, waking up all its waiting threads - void SignalObject(SynchronizationObject& obj) const; - - /// Tries to see if waiting for any of the sync_objects is necessary, if not - /// it returns Success and the handle index of the signaled sync object. In - /// case not, the current thread will be locked and wait for nano_seconds or - /// for a synchronization object to signal. - std::pair WaitFor( - std::vector>& sync_objects, s64 nano_seconds); - -private: - Core::System& system; -}; -} // namespace Kernel diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp deleted file mode 100644 index ba4d39157e..0000000000 --- a/src/core/hle/kernel/synchronization_object.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include -#include "common/assert.h" -#include "common/common_types.h" -#include "common/logging/log.h" -#include "core/core.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/synchronization.h" -#include "core/hle/kernel/synchronization_object.h" -#include "core/hle/kernel/thread.h" - -namespace Kernel { - -SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {} -SynchronizationObject::~SynchronizationObject() = default; - -void SynchronizationObject::Signal() { - kernel.Synchronization().SignalObject(*this); -} - -void SynchronizationObject::AddWaitingThread(std::shared_ptr thread) { - auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread); - if (itr == waiting_threads.end()) - waiting_threads.push_back(std::move(thread)); -} - -void SynchronizationObject::RemoveWaitingThread(std::shared_ptr thread) { - auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread); - // If a thread passed multiple handles to the same object, - // the kernel might attempt to remove the thread from the object's - // waiting threads list multiple times. - if (itr != waiting_threads.end()) - waiting_threads.erase(itr); -} - -void SynchronizationObject::ClearWaitingThreads() { - waiting_threads.clear(); -} - -const std::vector>& SynchronizationObject::GetWaitingThreads() const { - return waiting_threads; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h deleted file mode 100644 index 7408ed51fa..0000000000 --- a/src/core/hle/kernel/synchronization_object.h +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include -#include - -#include "core/hle/kernel/object.h" - -namespace Kernel { - -class KernelCore; -class Synchronization; -class Thread; - -/// Class that represents a Kernel object that a thread can be waiting on -class SynchronizationObject : public Object { -public: - explicit SynchronizationObject(KernelCore& kernel); - ~SynchronizationObject() override; - - /** - * Check if the specified thread should wait until the object is available - * @param thread The thread about which we're deciding. - * @return True if the current thread should wait due to this object being unavailable - */ - virtual bool ShouldWait(const Thread* thread) const = 0; - - /// Acquire/lock the object for the specified thread if it is available - virtual void Acquire(Thread* thread) = 0; - - /// Signal this object - virtual void Signal(); - - virtual bool IsSignaled() const { - return is_signaled; - } - - /** - * Add a thread to wait on this object - * @param thread Pointer to thread to add - */ - void AddWaitingThread(std::shared_ptr thread); - - /** - * Removes a thread from waiting on this object (e.g. if it was resumed already) - * @param thread Pointer to thread to remove - */ - void RemoveWaitingThread(std::shared_ptr thread); - - /// Get a const reference to the waiting threads list for debug use - const std::vector>& GetWaitingThreads() const; - - void ClearWaitingThreads(); - -protected: - std::atomic_bool is_signaled{}; // Tells if this sync object is signaled - -private: - /// Threads waiting for this object to become available - std::vector> waiting_threads; -}; - -// Specialization of DynamicObjectCast for SynchronizationObjects -template <> -inline std::shared_ptr DynamicObjectCast( - std::shared_ptr object) { - if (object != nullptr && object->IsWaitable()) { - return std::static_pointer_cast(object); - } - return nullptr; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index a4f9e0d97a..ac19e29970 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -34,26 +34,19 @@ namespace Kernel { -bool Thread::ShouldWait(const Thread* thread) const { - return status != ThreadStatus::Dead; -} - bool Thread::IsSignaled() const { - return status == ThreadStatus::Dead; + return signaled; } -void Thread::Acquire(Thread* thread) { - ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); -} - -Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} +Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {} Thread::~Thread() = default; void Thread::Stop() { { KScopedSchedulerLock lock(kernel); - SetStatus(ThreadStatus::Dead); - Signal(); + SetState(ThreadStatus::Dead); + signaled = true; + NotifyAvailable(); kernel.GlobalHandleTable().Close(global_handle); if (owner_process) { @@ -67,7 +60,7 @@ void Thread::Stop() { global_handle = 0; } -void Thread::ResumeFromWait() { +void Thread::Wakeup() { KScopedSchedulerLock lock(kernel); switch (status) { case ThreadStatus::Paused: @@ -82,9 +75,6 @@ void Thread::ResumeFromWait() { break; case ThreadStatus::Ready: - // The thread's wakeup callback must have already been cleared when the thread was first - // awoken. - ASSERT(hle_callback == nullptr); // If the thread is waiting on multiple wait objects, it might be awoken more than once // before actually resuming. We can ignore subsequent wakeups if the thread status has // already been set to ThreadStatus::Ready. @@ -96,30 +86,30 @@ void Thread::ResumeFromWait() { return; } - SetStatus(ThreadStatus::Ready); + SetState(ThreadStatus::Ready); } void Thread::OnWakeUp() { KScopedSchedulerLock lock(kernel); - SetStatus(ThreadStatus::Ready); + SetState(ThreadStatus::Ready); } ResultCode Thread::Start() { KScopedSchedulerLock lock(kernel); - SetStatus(ThreadStatus::Ready); + SetState(ThreadStatus::Ready); return RESULT_SUCCESS; } void Thread::CancelWait() { KScopedSchedulerLock lock(kernel); - if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { + if (GetState() != ThreadSchedStatus::Paused || !is_cancellable) { is_sync_cancelled = true; return; } // TODO(Blinkhawk): Implement cancel of server session is_sync_cancelled = false; SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); - SetStatus(ThreadStatus::Ready); + SetState(ThreadStatus::Ready); } static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, @@ -194,7 +184,6 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->processor_id = processor_id; thread->ideal_core = processor_id; thread->affinity_mask.SetAffinity(processor_id, true); - thread->wait_objects = nullptr; thread->mutex_wait_address = 0; thread->condvar_wait_address = 0; thread->wait_handle = 0; @@ -202,6 +191,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); thread->owner_process = owner_process; thread->type = type_flags; + thread->signaled = false; if ((type_flags & THREADTYPE_IDLE) == 0) { auto& scheduler = kernel.GlobalSchedulerContext(); scheduler.AddThread(thread); @@ -234,24 +224,18 @@ void Thread::SetPriority(u32 priority) { UpdatePriority(); } -void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) { +void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { signaling_object = object; signaling_result = result; } -s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr object) const { - ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything"); - const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object); - return static_cast(std::distance(match, wait_objects->rend()) - 1); -} - VAddr Thread::GetCommandBufferAddress() const { // Offset from the start of TLS at which the IPC command buffer begins. constexpr u64 command_header_offset = 0x80; return GetTLSAddress() + command_header_offset; } -void Thread::SetStatus(ThreadStatus new_status) { +void Thread::SetState(ThreadStatus new_status) { if (new_status == status) { return; } @@ -351,28 +335,16 @@ void Thread::UpdatePriority() { lock_owner->UpdatePriority(); } -bool Thread::AllSynchronizationObjectsReady() const { - return std::none_of(wait_objects->begin(), wait_objects->end(), - [this](const std::shared_ptr& object) { - return object->ShouldWait(this); - }); -} - -bool Thread::InvokeHLECallback(std::shared_ptr thread) { - ASSERT(hle_callback); - return hle_callback(std::move(thread)); -} - ResultCode Thread::SetActivity(ThreadActivity value) { KScopedSchedulerLock lock(kernel); - auto sched_status = GetSchedulingStatus(); + auto sched_status = GetState(); if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { return ERR_INVALID_STATE; } - if (IsPendingTermination()) { + if (IsTerminationRequested()) { return RESULT_SUCCESS; } @@ -394,7 +366,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { Handle event_handle{}; { KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); - SetStatus(ThreadStatus::WaitSleep); + SetState(ThreadStatus::WaitSleep); } if (event_handle != InvalidHandle) { @@ -407,7 +379,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { const u32 old_state = scheduling_state; pausing_state |= static_cast(flag); - const u32 base_scheduling = static_cast(GetSchedulingStatus()); + const u32 base_scheduling = static_cast(GetState()); scheduling_state = base_scheduling | pausing_state; KScheduler::OnThreadStateChanged(kernel, this, old_state); } @@ -415,7 +387,7 @@ void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { const u32 old_state = scheduling_state; pausing_state &= ~static_cast(flag); - const u32 base_scheduling = static_cast(GetSchedulingStatus()); + const u32 base_scheduling = static_cast(GetState()); scheduling_state = base_scheduling | pausing_state; KScheduler::OnThreadStateChanged(kernel, this, old_state); } diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 11ef298883..69458548bf 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -14,8 +14,8 @@ #include "common/spin_lock.h" #include "core/arm/arm_interface.h" #include "core/hle/kernel/k_affinity_mask.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/object.h" -#include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" namespace Common { @@ -117,7 +117,7 @@ enum class ThreadSchedMasks : u32 { ForcePauseMask = 0x0070, }; -class Thread final : public SynchronizationObject { +class Thread final : public KSynchronizationObject { public: explicit Thread(KernelCore& kernel); ~Thread() override; @@ -127,10 +127,6 @@ public: using ThreadContext32 = Core::ARM_Interface::ThreadContext32; using ThreadContext64 = Core::ARM_Interface::ThreadContext64; - using ThreadSynchronizationObjects = std::vector>; - - using HLECallback = std::function thread)>; - /** * Creates and returns a new thread. The new thread is immediately scheduled * @param system The instance of the whole system @@ -186,10 +182,6 @@ public: return HANDLE_TYPE; } - bool ShouldWait(const Thread* thread) const override; - void Acquire(Thread* thread) override; - bool IsSignaled() const override; - /** * Gets the thread's current priority * @return The current thread's priority @@ -233,12 +225,14 @@ public: } /// Resumes a thread from waiting - void ResumeFromWait(); + void Wakeup(); void OnWakeUp(); ResultCode Start(); + virtual bool IsSignaled() const override; + /// Cancels a waiting operation that this thread may or may not be within. /// /// When the thread is within a waiting state, this will set the thread's @@ -247,30 +241,21 @@ public: /// void CancelWait(); - void SetSynchronizationResults(SynchronizationObject* object, ResultCode result); + void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result); - SynchronizationObject* GetSignalingObject() const { - return signaling_object; + void SetSyncedObject(KSynchronizationObject* object, ResultCode result) { + SetSynchronizationResults(object, result); + } + + ResultCode GetWaitResult(KSynchronizationObject** out) const { + *out = this->signaling_object; + return signaling_result; } ResultCode GetSignalingResult() const { return signaling_result; } - /** - * Retrieves the index that this particular object occupies in the list of objects - * that the thread passed to WaitSynchronization, starting the search from the last element. - * - * It is used to set the output index of WaitSynchronization when the thread is awakened. - * - * When a thread wakes up due to an object signal, the kernel will use the index of the last - * matching object in the wait objects list in case of having multiple instances of the same - * object in the list. - * - * @param object Object to query the index of. - */ - s32 GetSynchronizationObjectIndex(std::shared_ptr object) const; - /** * Stops a thread, invalidating it from further use */ @@ -345,7 +330,7 @@ public: return status; } - void SetStatus(ThreadStatus new_status); + void SetState(ThreadStatus new_status); s64 GetLastScheduledTick() const { return this->last_scheduled_tick; @@ -387,24 +372,6 @@ public: return owner_process; } - const ThreadSynchronizationObjects& GetSynchronizationObjects() const { - return *wait_objects; - } - - void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) { - wait_objects = objects; - } - - void ClearSynchronizationObjects() { - for (const auto& waiting_object : *wait_objects) { - waiting_object->RemoveWaitingThread(SharedFrom(this)); - } - wait_objects->clear(); - } - - /// Determines whether all the objects this thread is waiting on are ready. - bool AllSynchronizationObjectsReady() const; - const MutexWaitingThreads& GetMutexWaitingThreads() const { return wait_mutex_threads; } @@ -449,34 +416,14 @@ public: arb_wait_address = address; } - bool HasHLECallback() const { - return hle_callback != nullptr; - } - - void SetHLECallback(HLECallback callback) { - hle_callback = std::move(callback); - } - void SetHLETimeEvent(Handle time_event) { hle_time_event = time_event; } - void SetHLESyncObject(SynchronizationObject* object) { - hle_object = object; - } - Handle GetHLETimeEvent() const { return hle_time_event; } - SynchronizationObject* GetHLESyncObject() const { - return hle_object; - } - - void InvalidateHLECallback() { - SetHLECallback(nullptr); - } - bool InvokeHLECallback(std::shared_ptr thread); u32 GetIdealCore() const { @@ -500,7 +447,7 @@ public: this->schedule_count = count; } - ThreadSchedStatus GetSchedulingStatus() const { + ThreadSchedStatus GetState() const { return static_cast(scheduling_state & static_cast(ThreadSchedMasks::LowMask)); } @@ -517,12 +464,12 @@ public: is_running = value; } - bool IsSyncCancelled() const { + bool IsWaitCancelled() const { return is_sync_cancelled; } - void SetSyncCancelled(bool value) { - is_sync_cancelled = value; + void ClearWaitCancelled() { + is_sync_cancelled = false; } Handle GetGlobalHandle() const { @@ -537,16 +484,20 @@ public: waiting_for_arbitration = set; } - bool IsWaitingSync() const { - return is_waiting_on_sync; + bool IsCancellable() const { + return is_cancellable; } - void SetWaitingSync(bool is_waiting) { - is_waiting_on_sync = is_waiting; + void SetCancellable() { + is_cancellable = true; } - bool IsPendingTermination() const { - return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited; + void ClearCancellable() { + is_cancellable = false; + } + + bool IsTerminationRequested() const { + return will_be_terminated || GetState() == ThreadSchedStatus::Exited; } bool IsPaused() const { @@ -622,6 +573,18 @@ public: disable_count--; } + void SetWaitObjectsForDebugging(KSynchronizationObject** objects, s32 num_objects) { + wait_objects_for_debugging.clear(); + wait_objects_for_debugging.reserve(num_objects); + for (auto i = 0; i < num_objects; ++i) { + wait_objects_for_debugging.emplace_back(objects[i]); + } + } + + const std::vector& GetWaitObjectsForDebugging() const { + return wait_objects_for_debugging; + } + private: friend class GlobalSchedulerContext; friend class KScheduler; @@ -630,7 +593,6 @@ private: void SetSchedulingStatus(ThreadSchedStatus new_status); void AddSchedulingFlag(ThreadSchedFlags flag); void RemoveSchedulingFlag(ThreadSchedFlags flag); - void SetCurrentPriority(u32 new_priority); Common::SpinLock context_guard{}; @@ -671,10 +633,10 @@ private: Process* owner_process; /// Objects that the thread is waiting on, in the same order as they were - /// passed to WaitSynchronization. - ThreadSynchronizationObjects* wait_objects; + /// passed to WaitSynchronization. This is used for debugging only. + std::vector wait_objects_for_debugging; - SynchronizationObject* signaling_object; + KSynchronizationObject* signaling_object; ResultCode signaling_result{RESULT_SUCCESS}; /// List of threads that are waiting for a mutex that is held by this thread. @@ -697,10 +659,7 @@ private: /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. Handle global_handle = 0; - /// Callback for HLE Events - HLECallback hle_callback; Handle hle_time_event; - SynchronizationObject* hle_object; KScheduler* scheduler = nullptr; @@ -714,7 +673,7 @@ private: u32 pausing_state = 0; bool is_running = false; - bool is_waiting_on_sync = false; + bool is_cancellable = false; bool is_sync_cancelled = false; bool is_continuous_on_svc = false; @@ -725,6 +684,8 @@ private: bool was_running = false; + bool signaled{}; + std::string name; }; diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp index 4da69f5033..2b91a89d1b 100644 --- a/src/core/hle/service/sm/sm.cpp +++ b/src/core/hle/service/sm/sm.cpp @@ -139,9 +139,6 @@ void SM::GetService(Kernel::HLERequestContext& ctx) { server_port->AppendPendingSession(server); } - // Wake the threads waiting on the ServerPort - server_port->Signal(); - LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId()); IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; rb.Push(RESULT_SUCCESS); diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 0925c10b45..8d91d600a2 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -14,10 +14,10 @@ #include "core/core.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" -#include "core/hle/kernel/synchronization_object.h" #include "core/hle/kernel/thread.h" #include "core/memory.h" @@ -169,7 +169,8 @@ std::vector> WaitTreeCallstack::GetChildren() cons return list; } -WaitTreeSynchronizationObject::WaitTreeSynchronizationObject(const Kernel::SynchronizationObject& o) +WaitTreeSynchronizationObject::WaitTreeSynchronizationObject( + const Kernel::KSynchronizationObject& o) : object(o) {} WaitTreeSynchronizationObject::~WaitTreeSynchronizationObject() = default; @@ -188,7 +189,7 @@ QString WaitTreeSynchronizationObject::GetText() const { } std::unique_ptr WaitTreeSynchronizationObject::make( - const Kernel::SynchronizationObject& object) { + const Kernel::KSynchronizationObject& object) { switch (object.GetHandleType()) { case Kernel::HandleType::ReadableEvent: return std::make_unique(static_cast(object)); @@ -202,7 +203,7 @@ std::unique_ptr WaitTreeSynchronizationObject::ma std::vector> WaitTreeSynchronizationObject::GetChildren() const { std::vector> list; - const auto& threads = object.GetWaitingThreads(); + const auto& threads = object.GetWaitingThreadsForDebugging(); if (threads.empty()) { list.push_back(std::make_unique(tr("waited by no thread"))); } else { @@ -211,8 +212,8 @@ std::vector> WaitTreeSynchronizationObject::GetChi return list; } -WaitTreeObjectList::WaitTreeObjectList( - const std::vector>& list, bool w_all) +WaitTreeObjectList::WaitTreeObjectList(const std::vector& list, + bool w_all) : object_list(list), wait_all(w_all) {} WaitTreeObjectList::~WaitTreeObjectList() = default; @@ -367,8 +368,8 @@ std::vector> WaitTreeThread::GetChildren() const { } if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) { - list.push_back(std::make_unique(thread.GetSynchronizationObjects(), - thread.IsWaitingSync())); + list.push_back(std::make_unique(thread.GetWaitObjectsForDebugging(), + thread.IsCancellable())); } list.push_back(std::make_unique(thread)); @@ -380,7 +381,7 @@ WaitTreeEvent::WaitTreeEvent(const Kernel::ReadableEvent& object) : WaitTreeSynchronizationObject(object) {} WaitTreeEvent::~WaitTreeEvent() = default; -WaitTreeThreadList::WaitTreeThreadList(const std::vector>& list) +WaitTreeThreadList::WaitTreeThreadList(const std::vector& list) : thread_list(list) {} WaitTreeThreadList::~WaitTreeThreadList() = default; diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h index 8e3bc4b242..cf96911ea8 100644 --- a/src/yuzu/debugger/wait_tree.h +++ b/src/yuzu/debugger/wait_tree.h @@ -18,8 +18,8 @@ class EmuThread; namespace Kernel { class HandleTable; +class KSynchronizationObject; class ReadableEvent; -class SynchronizationObject; class Thread; } // namespace Kernel @@ -102,30 +102,29 @@ private: class WaitTreeSynchronizationObject : public WaitTreeExpandableItem { Q_OBJECT public: - explicit WaitTreeSynchronizationObject(const Kernel::SynchronizationObject& object); + explicit WaitTreeSynchronizationObject(const Kernel::KSynchronizationObject& object); ~WaitTreeSynchronizationObject() override; static std::unique_ptr make( - const Kernel::SynchronizationObject& object); + const Kernel::KSynchronizationObject& object); QString GetText() const override; std::vector> GetChildren() const override; protected: - const Kernel::SynchronizationObject& object; + const Kernel::KSynchronizationObject& object; }; class WaitTreeObjectList : public WaitTreeExpandableItem { Q_OBJECT public: - WaitTreeObjectList(const std::vector>& list, - bool wait_all); + WaitTreeObjectList(const std::vector& list, bool wait_all); ~WaitTreeObjectList() override; QString GetText() const override; std::vector> GetChildren() const override; private: - const std::vector>& object_list; + const std::vector& object_list; bool wait_all; }; @@ -150,14 +149,14 @@ public: class WaitTreeThreadList : public WaitTreeExpandableItem { Q_OBJECT public: - explicit WaitTreeThreadList(const std::vector>& list); + explicit WaitTreeThreadList(const std::vector& list); ~WaitTreeThreadList() override; QString GetText() const override; std::vector> GetChildren() const override; private: - const std::vector>& thread_list; + const std::vector& thread_list; }; class WaitTreeModel : public QAbstractItemModel { From fb43b8efd22eaf0eaccf0c9ddc70cf2e06deafeb Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 27 Dec 2020 01:58:16 -0800 Subject: [PATCH 04/20] common: Introduce useful tree structures. --- src/common/CMakeLists.txt | 3 + src/common/intrusive_red_black_tree.h | 627 ++++++++++++++++++++ src/common/parent_of_member.h | 189 ++++++ src/common/tree.h | 822 ++++++++++++++++++++++++++ 4 files changed, 1641 insertions(+) create mode 100644 src/common/intrusive_red_black_tree.h create mode 100644 src/common/parent_of_member.h create mode 100644 src/common/tree.h diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 2c2bd2ee8e..5d781cd772 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -123,6 +123,7 @@ add_library(common STATIC hash.h hex_util.cpp hex_util.h + intrusive_red_black_tree.h logging/backend.cpp logging/backend.h logging/filter.cpp @@ -143,6 +144,7 @@ add_library(common STATIC page_table.h param_package.cpp param_package.h + parent_of_member.h quaternion.h ring_buffer.h scm_rev.cpp @@ -167,6 +169,7 @@ add_library(common STATIC time_zone.h timer.cpp timer.h + tree.h uint128.cpp uint128.h uuid.cpp diff --git a/src/common/intrusive_red_black_tree.h b/src/common/intrusive_red_black_tree.h new file mode 100644 index 0000000000..929b5497e3 --- /dev/null +++ b/src/common/intrusive_red_black_tree.h @@ -0,0 +1,627 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/parent_of_member.h" +#include "common/tree.h" + +namespace Common { + +namespace impl { + +class IntrusiveRedBlackTreeImpl; + +} + +struct IntrusiveRedBlackTreeNode { + +private: + RB_ENTRY(IntrusiveRedBlackTreeNode) entry{}; + + friend class impl::IntrusiveRedBlackTreeImpl; + + template + friend class IntrusiveRedBlackTree; + +public: + constexpr IntrusiveRedBlackTreeNode() = default; +}; + +template +class IntrusiveRedBlackTree; + +namespace impl { + +class IntrusiveRedBlackTreeImpl { + +private: + template + friend class ::Common::IntrusiveRedBlackTree; + +private: + RB_HEAD(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode); + using RootType = IntrusiveRedBlackTreeRoot; + +private: + IntrusiveRedBlackTreeRoot root; + +public: + template + class Iterator; + + using value_type = IntrusiveRedBlackTreeNode; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = Iterator; + using const_iterator = Iterator; + + template + class Iterator { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename IntrusiveRedBlackTreeImpl::value_type; + using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type; + using pointer = std::conditional_t; + using reference = std::conditional_t; + + private: + pointer node; + + public: + explicit Iterator(pointer n) : node(n) {} + + bool operator==(const Iterator& rhs) const { + return this->node == rhs.node; + } + + bool operator!=(const Iterator& rhs) const { + return !(*this == rhs); + } + + pointer operator->() const { + return this->node; + } + + reference operator*() const { + return *this->node; + } + + Iterator& operator++() { + this->node = GetNext(this->node); + return *this; + } + + Iterator& operator--() { + this->node = GetPrev(this->node); + return *this; + } + + Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + + Iterator operator--(int) { + const Iterator it{*this}; + --(*this); + return it; + } + + operator Iterator() const { + return Iterator(this->node); + } + }; + +protected: + // Generate static implementations for non-comparison operations for IntrusiveRedBlackTreeRoot. + RB_GENERATE_WITHOUT_COMPARE_STATIC(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode, entry); + +private: + // Define accessors using RB_* functions. + constexpr void InitializeImpl() { + RB_INIT(&this->root); + } + + bool EmptyImpl() const { + return RB_EMPTY(&this->root); + } + + IntrusiveRedBlackTreeNode* GetMinImpl() const { + return RB_MIN(IntrusiveRedBlackTreeRoot, + const_cast(&this->root)); + } + + IntrusiveRedBlackTreeNode* GetMaxImpl() const { + return RB_MAX(IntrusiveRedBlackTreeRoot, + const_cast(&this->root)); + } + + IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) { + return RB_REMOVE(IntrusiveRedBlackTreeRoot, &this->root, node); + } + +public: + static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) { + return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node); + } + + static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) { + return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node); + } + + static IntrusiveRedBlackTreeNode const* GetNext(const IntrusiveRedBlackTreeNode* node) { + return static_cast( + GetNext(const_cast(node))); + } + + static IntrusiveRedBlackTreeNode const* GetPrev(const IntrusiveRedBlackTreeNode* node) { + return static_cast( + GetPrev(const_cast(node))); + } + +public: + constexpr IntrusiveRedBlackTreeImpl() : root() { + this->InitializeImpl(); + } + + // Iterator accessors. + iterator begin() { + return iterator(this->GetMinImpl()); + } + + const_iterator begin() const { + return const_iterator(this->GetMinImpl()); + } + + iterator end() { + return iterator(static_cast(nullptr)); + } + + const_iterator end() const { + return const_iterator(static_cast(nullptr)); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + iterator iterator_to(reference ref) { + return iterator(&ref); + } + + const_iterator iterator_to(const_reference ref) const { + return const_iterator(&ref); + } + + // Content management. + bool empty() const { + return this->EmptyImpl(); + } + + reference back() { + return *this->GetMaxImpl(); + } + + const_reference back() const { + return *this->GetMaxImpl(); + } + + reference front() { + return *this->GetMinImpl(); + } + + const_reference front() const { + return *this->GetMinImpl(); + } + + iterator erase(iterator it) { + auto cur = std::addressof(*it); + auto next = GetNext(cur); + this->RemoveImpl(cur); + return iterator(next); + } +}; + +} // namespace impl + +template +concept HasLightCompareType = requires { + { std::is_same::value } + ->std::convertible_to; +}; + +namespace impl { + +template +consteval auto* GetLightCompareType() { + if constexpr (HasLightCompareType) { + return static_cast(nullptr); + } else { + return static_cast(nullptr); + } +} + +} // namespace impl + +template +using LightCompareType = std::remove_pointer_t())>; + +template +class IntrusiveRedBlackTree { + +public: + using ImplType = impl::IntrusiveRedBlackTreeImpl; + +private: + ImplType impl{}; + +public: + struct IntrusiveRedBlackTreeRootWithCompare : ImplType::IntrusiveRedBlackTreeRoot {}; + + template + class Iterator; + + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = Iterator; + using const_iterator = Iterator; + + using light_value_type = LightCompareType; + using const_light_pointer = const light_value_type*; + using const_light_reference = const light_value_type&; + + template + class Iterator { + public: + friend class IntrusiveRedBlackTree; + + using ImplIterator = + std::conditional_t; + + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename IntrusiveRedBlackTree::value_type; + using difference_type = typename IntrusiveRedBlackTree::difference_type; + using pointer = std::conditional_t; + using reference = std::conditional_t; + + private: + ImplIterator iterator; + + private: + explicit Iterator(ImplIterator it) : iterator(it) {} + + explicit Iterator(typename std::conditional::type::pointer ptr) + : iterator(ptr) {} + + ImplIterator GetImplIterator() const { + return this->iterator; + } + + public: + bool operator==(const Iterator& rhs) const { + return this->iterator == rhs.iterator; + } + + bool operator!=(const Iterator& rhs) const { + return !(*this == rhs); + } + + pointer operator->() const { + return Traits::GetParent(std::addressof(*this->iterator)); + } + + reference operator*() const { + return *Traits::GetParent(std::addressof(*this->iterator)); + } + + Iterator& operator++() { + ++this->iterator; + return *this; + } + + Iterator& operator--() { + --this->iterator; + return *this; + } + + Iterator operator++(int) { + const Iterator it{*this}; + ++this->iterator; + return it; + } + + Iterator operator--(int) { + const Iterator it{*this}; + --this->iterator; + return it; + } + + operator Iterator() const { + return Iterator(this->iterator); + } + }; + +private: + // Generate static implementations for comparison operations for IntrusiveRedBlackTreeRoot. + RB_GENERATE_WITH_COMPARE_STATIC(IntrusiveRedBlackTreeRootWithCompare, IntrusiveRedBlackTreeNode, + entry, CompareImpl, LightCompareImpl); + +private: + static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs, + const IntrusiveRedBlackTreeNode* rhs) { + return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs)); + } + + static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) { + return Comparator::Compare(*static_cast(elm), *Traits::GetParent(rhs)); + } + + // Define accessors using RB_* functions. + IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) { + return RB_INSERT(IntrusiveRedBlackTreeRootWithCompare, + static_cast(&this->impl.root), + node); + } + + IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const { + return RB_FIND( + IntrusiveRedBlackTreeRootWithCompare, + const_cast( + static_cast(&this->impl.root)), + const_cast(node)); + } + + IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const { + return RB_NFIND( + IntrusiveRedBlackTreeRootWithCompare, + const_cast( + static_cast(&this->impl.root)), + const_cast(node)); + } + + IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const { + return RB_FIND_LIGHT( + IntrusiveRedBlackTreeRootWithCompare, + const_cast( + static_cast(&this->impl.root)), + static_cast(lelm)); + } + + IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const { + return RB_NFIND_LIGHT( + IntrusiveRedBlackTreeRootWithCompare, + const_cast( + static_cast(&this->impl.root)), + static_cast(lelm)); + } + +public: + constexpr IntrusiveRedBlackTree() = default; + + // Iterator accessors. + iterator begin() { + return iterator(this->impl.begin()); + } + + const_iterator begin() const { + return const_iterator(this->impl.begin()); + } + + iterator end() { + return iterator(this->impl.end()); + } + + const_iterator end() const { + return const_iterator(this->impl.end()); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + iterator iterator_to(reference ref) { + return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref)))); + } + + const_iterator iterator_to(const_reference ref) const { + return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref)))); + } + + // Content management. + bool empty() const { + return this->impl.empty(); + } + + reference back() { + return *Traits::GetParent(std::addressof(this->impl.back())); + } + + const_reference back() const { + return *Traits::GetParent(std::addressof(this->impl.back())); + } + + reference front() { + return *Traits::GetParent(std::addressof(this->impl.front())); + } + + const_reference front() const { + return *Traits::GetParent(std::addressof(this->impl.front())); + } + + iterator erase(iterator it) { + return iterator(this->impl.erase(it.GetImplIterator())); + } + + iterator insert(reference ref) { + ImplType::pointer node = Traits::GetNode(std::addressof(ref)); + this->InsertImpl(node); + return iterator(node); + } + + iterator find(const_reference ref) const { + return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref)))); + } + + iterator nfind(const_reference ref) const { + return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref)))); + } + + iterator find_light(const_light_reference ref) const { + return iterator(this->FindLightImpl(std::addressof(ref))); + } + + iterator nfind_light(const_light_reference ref) const { + return iterator(this->NFindLightImpl(std::addressof(ref))); + } +}; + +template > +class IntrusiveRedBlackTreeMemberTraits; + +template +class IntrusiveRedBlackTreeMemberTraits { +public: + template + using TreeType = IntrusiveRedBlackTree; + using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl; + +private: + template + friend class IntrusiveRedBlackTree; + + friend class impl::IntrusiveRedBlackTreeImpl; + + static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) { + return std::addressof(parent->*Member); + } + + static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) { + return std::addressof(parent->*Member); + } + + static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) { + return GetParentPointer(node); + } + + static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) { + return GetParentPointer(node); + } + +private: + static constexpr TYPED_STORAGE(Derived) DerivedStorage = {}; + static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage)); +}; + +template > +class IntrusiveRedBlackTreeMemberTraitsDeferredAssert; + +template +class IntrusiveRedBlackTreeMemberTraitsDeferredAssert { +public: + template + using TreeType = + IntrusiveRedBlackTree; + using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl; + + static constexpr bool IsValid() { + TYPED_STORAGE(Derived) DerivedStorage = {}; + return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage); + } + +private: + template + friend class IntrusiveRedBlackTree; + + friend class impl::IntrusiveRedBlackTreeImpl; + + static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) { + return std::addressof(parent->*Member); + } + + static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) { + return std::addressof(parent->*Member); + } + + static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) { + return GetParentPointer(node); + } + + static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) { + return GetParentPointer(node); + } +}; + +template +class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode { +public: + constexpr Derived* GetPrev() { + return static_cast(impl::IntrusiveRedBlackTreeImpl::GetPrev(this)); + } + constexpr const Derived* GetPrev() const { + return static_cast(impl::IntrusiveRedBlackTreeImpl::GetPrev(this)); + } + + constexpr Derived* GetNext() { + return static_cast(impl::IntrusiveRedBlackTreeImpl::GetNext(this)); + } + constexpr const Derived* GetNext() const { + return static_cast(impl::IntrusiveRedBlackTreeImpl::GetNext(this)); + } +}; + +template +class IntrusiveRedBlackTreeBaseTraits { +public: + template + using TreeType = IntrusiveRedBlackTree; + using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl; + +private: + template + friend class IntrusiveRedBlackTree; + + friend class impl::IntrusiveRedBlackTreeImpl; + + static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) { + return static_cast(parent); + } + + static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) { + return static_cast(parent); + } + + static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) { + return static_cast(node); + } + + static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) { + return static_cast(node); + } +}; + +} // namespace Common diff --git a/src/common/parent_of_member.h b/src/common/parent_of_member.h new file mode 100644 index 0000000000..1af31ee443 --- /dev/null +++ b/src/common/parent_of_member.h @@ -0,0 +1,189 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include + +#include "common/assert.h" +#include "common/common_types.h" + +namespace Common { + +template +struct TypedStorage { + std::aligned_storage_t storage_; +}; + +#define TYPED_STORAGE(...) TypedStorage<__VA_ARGS__, sizeof(__VA_ARGS__), alignof(__VA_ARGS__)> + +template +static constexpr T* GetPointer(TYPED_STORAGE(T) & ts) { + return static_cast(static_cast(std::addressof(ts.storage_))); +} + +template +static constexpr const T* GetPointer(const TYPED_STORAGE(T) & ts) { + return static_cast(static_cast(std::addressof(ts.storage_))); +} + +namespace impl { + +template +struct OffsetOfUnionHolder { + template + union UnionImpl { + using PaddingMember = char; + static constexpr size_t GetOffset() { + return Offset; + } + +#pragma pack(push, 1) + struct { + PaddingMember padding[Offset]; + MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1]; + } data; +#pragma pack(pop) + UnionImpl next_union; + }; + + template + union UnionImpl { + static constexpr size_t GetOffset() { + return 0; + } + + struct { + MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1]; + } data; + UnionImpl next_union; + }; + + template + union UnionImpl {}; +}; + +template +struct OffsetOfCalculator { + using UnionHolder = + typename OffsetOfUnionHolder::template UnionImpl; + union Union { + char c{}; + UnionHolder first_union; + TYPED_STORAGE(ParentType) parent; + + constexpr Union() : c() {} + }; + static constexpr Union U = {}; + + static constexpr const MemberType* GetNextAddress(const MemberType* start, + const MemberType* target) { + while (start < target) { + start++; + } + return start; + } + + static constexpr std::ptrdiff_t GetDifference(const MemberType* start, + const MemberType* target) { + return (target - start) * sizeof(MemberType); + } + + template + static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member, + CurUnion& cur_union) { + constexpr size_t Offset = CurUnion::GetOffset(); + const auto target = std::addressof(GetPointer(U.parent)->*member); + const auto start = std::addressof(cur_union.data.members[0]); + const auto next = GetNextAddress(start, target); + + if (next != target) { + if constexpr (Offset < sizeof(MemberType) - 1) { + return OffsetOfImpl(member, cur_union.next_union); + } else { + UNREACHABLE(); + } + } + + return (next - start) * sizeof(MemberType) + Offset; + } + + static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) { + return OffsetOfImpl(member, U.first_union); + } +}; + +template +struct GetMemberPointerTraits; + +template +struct GetMemberPointerTraits { + using Parent = P; + using Member = M; +}; + +template +using GetParentType = typename GetMemberPointerTraits::Parent; + +template +using GetMemberType = typename GetMemberPointerTraits::Member; + +template > +static inline std::ptrdiff_t OffsetOf = [] { + using DeducedParentType = GetParentType; + using MemberType = GetMemberType; + static_assert(std::is_base_of::value || + std::is_same::value); + + return OffsetOfCalculator::OffsetOf(MemberPtr); +}(); + +} // namespace impl + +template > +constexpr RealParentType& GetParentReference(impl::GetMemberType* member) { + std::ptrdiff_t Offset = impl::OffsetOf; + return *static_cast( + static_cast(static_cast(static_cast(member)) - Offset)); +} + +template > +constexpr RealParentType const& GetParentReference(impl::GetMemberType const* member) { + std::ptrdiff_t Offset = impl::OffsetOf; + return *static_cast(static_cast( + static_cast(static_cast(member)) - Offset)); +} + +template > +constexpr RealParentType* GetParentPointer(impl::GetMemberType* member) { + return std::addressof(GetParentReference(member)); +} + +template > +constexpr RealParentType const* GetParentPointer(impl::GetMemberType const* member) { + return std::addressof(GetParentReference(member)); +} + +template > +constexpr RealParentType& GetParentReference(impl::GetMemberType& member) { + return GetParentReference(std::addressof(member)); +} + +template > +constexpr RealParentType const& GetParentReference(impl::GetMemberType const& member) { + return GetParentReference(std::addressof(member)); +} + +template > +constexpr RealParentType* GetParentPointer(impl::GetMemberType& member) { + return std::addressof(GetParentReference(member)); +} + +template > +constexpr RealParentType const* GetParentPointer(impl::GetMemberType const& member) { + return std::addressof(GetParentReference(member)); +} + +} // namespace Common diff --git a/src/common/tree.h b/src/common/tree.h new file mode 100644 index 0000000000..a6b6366467 --- /dev/null +++ b/src/common/tree.h @@ -0,0 +1,822 @@ +/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */ +/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ +/* $FreeBSD$ */ + +/*- + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_TREE_H_ +#define _SYS_TREE_H_ + +/* FreeBSD has a lot of defines we don't really want. */ +/* tree.h only actually uses __inline and __unused, so we'll just define those. */ + +/* #include */ + +#ifndef __inline +#define __inline inline +#endif + +/* + * This file defines data structures for different types of trees: + * splay trees and red-black trees. + * + * A splay tree is a self-organizing data structure. Every operation + * on the tree causes a splay to happen. The splay moves the requested + * node to the root of the tree and partly rebalances it. + * + * This has the benefit that request locality causes faster lookups as + * the requested nodes move to the top of the tree. On the other hand, + * every lookup causes memory writes. + * + * The Balance Theorem bounds the total access time for m operations + * and n inserts on an initially empty tree as O((m + n)lg n). The + * amortized cost for a sequence of m accesses to a splay tree is O(lg n); + * + * A red-black tree is a binary search tree with the node color as an + * extra attribute. It fulfills a set of conditions: + * - every search path from the root to a leaf consists of the + * same number of black nodes, + * - each red node (except for the root) has a black parent, + * - each leaf node is black. + * + * Every operation on a red-black tree is bounded as O(lg n). + * The maximum height of a red-black tree is 2lg (n+1). + */ + +#define SPLAY_HEAD(name, type) \ + struct name { \ + struct type* sph_root; /* root of the tree */ \ + } + +#define SPLAY_INITIALIZER(root) \ + { NULL } + +#define SPLAY_INIT(root) \ + do { \ + (root)->sph_root = NULL; \ + } while (/*CONSTCOND*/ 0) + +#define SPLAY_ENTRY(type) \ + struct { \ + struct type* spe_left; /* left element */ \ + struct type* spe_right; /* right element */ \ + } + +#define SPLAY_LEFT(elm, field) (elm)->field.spe_left +#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right +#define SPLAY_ROOT(head) (head)->sph_root +#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) + +/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ +#define SPLAY_ROTATE_RIGHT(head, tmp, field) \ + do { \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ + } while (/*CONSTCOND*/ 0) + +#define SPLAY_ROTATE_LEFT(head, tmp, field) \ + do { \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ + } while (/*CONSTCOND*/ 0) + +#define SPLAY_LINKLEFT(head, tmp, field) \ + do { \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ + } while (/*CONSTCOND*/ 0) + +#define SPLAY_LINKRIGHT(head, tmp, field) \ + do { \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ + } while (/*CONSTCOND*/ 0) + +#define SPLAY_ASSEMBLE(head, node, left, right, field) \ + do { \ + SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ + } while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ + +#define SPLAY_PROTOTYPE(name, type, field, cmp) \ + void name##_SPLAY(struct name*, struct type*); \ + void name##_SPLAY_MINMAX(struct name*, int); \ + struct type* name##_SPLAY_INSERT(struct name*, struct type*); \ + struct type* name##_SPLAY_REMOVE(struct name*, struct type*); \ + \ + /* Finds the node with the same key as elm */ \ + static __inline struct type* name##_SPLAY_FIND(struct name* head, struct type* elm) { \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) \ + return (head->sph_root); \ + return (NULL); \ + } \ + \ + static __inline struct type* name##_SPLAY_NEXT(struct name* head, struct type* elm) { \ + name##_SPLAY(head, elm); \ + if (SPLAY_RIGHT(elm, field) != NULL) { \ + elm = SPLAY_RIGHT(elm, field); \ + while (SPLAY_LEFT(elm, field) != NULL) { \ + elm = SPLAY_LEFT(elm, field); \ + } \ + } else \ + elm = NULL; \ + return (elm); \ + } \ + \ + static __inline struct type* name##_SPLAY_MIN_MAX(struct name* head, int val) { \ + name##_SPLAY_MINMAX(head, val); \ + return (SPLAY_ROOT(head)); \ + } + +/* Main splay operation. + * Moves node close to the key of elm to top + */ +#define SPLAY_GENERATE(name, type, field, cmp) \ + struct type* name##_SPLAY_INSERT(struct name* head, struct type* elm) { \ + if (SPLAY_EMPTY(head)) { \ + SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ + } else { \ + int __comp; \ + name##_SPLAY(head, elm); \ + __comp = (cmp)(elm, (head)->sph_root); \ + if (__comp < 0) { \ + SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_RIGHT(elm, field) = (head)->sph_root; \ + SPLAY_LEFT((head)->sph_root, field) = NULL; \ + } else if (__comp > 0) { \ + SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \ + SPLAY_LEFT(elm, field) = (head)->sph_root; \ + SPLAY_RIGHT((head)->sph_root, field) = NULL; \ + } else \ + return ((head)->sph_root); \ + } \ + (head)->sph_root = (elm); \ + return (NULL); \ + } \ + \ + struct type* name##_SPLAY_REMOVE(struct name* head, struct type* elm) { \ + struct type* __tmp; \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) { \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ + } else { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ + name##_SPLAY(head, elm); \ + SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ + } \ + return (elm); \ + } \ + return (NULL); \ + } \ + \ + void name##_SPLAY(struct name* head, struct type* elm) { \ + struct type __node, *__left, *__right, *__tmp; \ + int __comp; \ + \ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ + __left = __right = &__node; \ + \ + while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) < 0) { \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ + } \ + \ + /* Splay with either the minimum or the maximum element \ + * Used to find minimum or maximum element in tree. \ + */ \ + void name##_SPLAY_MINMAX(struct name* head, int __comp) { \ + struct type __node, *__left, *__right, *__tmp; \ + \ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ + __left = __right = &__node; \ + \ + while (1) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp < 0) { \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ + } + +#define SPLAY_NEGINF -1 +#define SPLAY_INF 1 + +#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) +#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) +#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) +#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) +#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) + +#define SPLAY_FOREACH(x, name, head) \ + for ((x) = SPLAY_MIN(name, head); (x) != NULL; (x) = SPLAY_NEXT(name, head, x)) + +/* Macros that define a red-black tree */ +#define RB_HEAD(name, type) \ + struct name { \ + struct type* rbh_root; /* root of the tree */ \ + } + +#define RB_INITIALIZER(root) \ + { NULL } + +#define RB_INIT(root) \ + do { \ + (root)->rbh_root = NULL; \ + } while (/*CONSTCOND*/ 0) + +#define RB_BLACK 0 +#define RB_RED 1 +#define RB_ENTRY(type) \ + struct { \ + struct type* rbe_left; /* left element */ \ + struct type* rbe_right; /* right element */ \ + struct type* rbe_parent; /* parent element */ \ + int rbe_color; /* node color */ \ + } + +#define RB_LEFT(elm, field) (elm)->field.rbe_left +#define RB_RIGHT(elm, field) (elm)->field.rbe_right +#define RB_PARENT(elm, field) (elm)->field.rbe_parent +#define RB_COLOR(elm, field) (elm)->field.rbe_color +#define RB_ROOT(head) (head)->rbh_root +#define RB_EMPTY(head) (RB_ROOT(head) == NULL) + +#define RB_SET(elm, parent, field) \ + do { \ + RB_PARENT(elm, field) = parent; \ + RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ + RB_COLOR(elm, field) = RB_RED; \ + } while (/*CONSTCOND*/ 0) + +#define RB_SET_BLACKRED(black, red, field) \ + do { \ + RB_COLOR(black, field) = RB_BLACK; \ + RB_COLOR(red, field) = RB_RED; \ + } while (/*CONSTCOND*/ 0) + +#ifndef RB_AUGMENT +#define RB_AUGMENT(x) \ + do { \ + } while (0) +#endif + +#define RB_ROTATE_LEFT(head, elm, tmp, field) \ + do { \ + (tmp) = RB_RIGHT(elm, field); \ + if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ + RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_LEFT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ + } while (/*CONSTCOND*/ 0) + +#define RB_ROTATE_RIGHT(head, elm, tmp, field) \ + do { \ + (tmp) = RB_LEFT(elm, field); \ + if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ + RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_RIGHT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ + } while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ +#define RB_PROTOTYPE(name, type, field, cmp) RB_PROTOTYPE_INTERNAL(name, type, field, cmp, ) +#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static) +#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ + RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \ + RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \ + RB_PROTOTYPE_INSERT(name, type, attr); \ + RB_PROTOTYPE_REMOVE(name, type, attr); \ + RB_PROTOTYPE_FIND(name, type, attr); \ + RB_PROTOTYPE_NFIND(name, type, attr); \ + RB_PROTOTYPE_FIND_LIGHT(name, type, attr); \ + RB_PROTOTYPE_NFIND_LIGHT(name, type, attr); \ + RB_PROTOTYPE_NEXT(name, type, attr); \ + RB_PROTOTYPE_PREV(name, type, attr); \ + RB_PROTOTYPE_MINMAX(name, type, attr); +#define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \ + attr void name##_RB_INSERT_COLOR(struct name*, struct type*) +#define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \ + attr void name##_RB_REMOVE_COLOR(struct name*, struct type*, struct type*) +#define RB_PROTOTYPE_REMOVE(name, type, attr) \ + attr struct type* name##_RB_REMOVE(struct name*, struct type*) +#define RB_PROTOTYPE_INSERT(name, type, attr) \ + attr struct type* name##_RB_INSERT(struct name*, struct type*) +#define RB_PROTOTYPE_FIND(name, type, attr) \ + attr struct type* name##_RB_FIND(struct name*, struct type*) +#define RB_PROTOTYPE_NFIND(name, type, attr) \ + attr struct type* name##_RB_NFIND(struct name*, struct type*) +#define RB_PROTOTYPE_FIND_LIGHT(name, type, attr) \ + attr struct type* name##_RB_FIND_LIGHT(struct name*, const void*) +#define RB_PROTOTYPE_NFIND_LIGHT(name, type, attr) \ + attr struct type* name##_RB_NFIND_LIGHT(struct name*, const void*) +#define RB_PROTOTYPE_NEXT(name, type, attr) attr struct type* name##_RB_NEXT(struct type*) +#define RB_PROTOTYPE_PREV(name, type, attr) attr struct type* name##_RB_PREV(struct type*) +#define RB_PROTOTYPE_MINMAX(name, type, attr) attr struct type* name##_RB_MINMAX(struct name*, int) + +/* Main rb operation. + * Moves node close to the key of elm to top + */ +#define RB_GENERATE_WITHOUT_COMPARE(name, type, field) \ + RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, ) +#define RB_GENERATE_WITHOUT_COMPARE_STATIC(name, type, field) \ + RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, static) +#define RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \ + RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ + RB_GENERATE_REMOVE(name, type, field, attr) \ + RB_GENERATE_NEXT(name, type, field, attr) \ + RB_GENERATE_PREV(name, type, field, attr) \ + RB_GENERATE_MINMAX(name, type, field, attr) + +#define RB_GENERATE_WITH_COMPARE(name, type, field, cmp, lcmp) \ + RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, ) +#define RB_GENERATE_WITH_COMPARE_STATIC(name, type, field, cmp, lcmp) \ + RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, static) +#define RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, attr) \ + RB_GENERATE_INSERT_COLOR(name, type, field, attr) \ + RB_GENERATE_INSERT(name, type, field, cmp, attr) \ + RB_GENERATE_FIND(name, type, field, cmp, attr) \ + RB_GENERATE_NFIND(name, type, field, cmp, attr) \ + RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \ + RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) + +#define RB_GENERATE_ALL(name, type, field, cmp) RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, ) +#define RB_GENERATE_ALL_STATIC(name, type, field, cmp) \ + RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, static) +#define RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, attr) \ + RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \ + RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, attr) + +#define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \ + attr void name##_RB_INSERT_COLOR(struct name* head, struct type* elm) { \ + struct type *parent, *gparent, *tmp; \ + while ((parent = RB_PARENT(elm, field)) != NULL && RB_COLOR(parent, field) == RB_RED) { \ + gparent = RB_PARENT(parent, field); \ + if (parent == RB_LEFT(gparent, field)) { \ + tmp = RB_RIGHT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field); \ + elm = gparent; \ + continue; \ + } \ + if (RB_RIGHT(parent, field) == elm) { \ + RB_ROTATE_LEFT(head, parent, tmp, field); \ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_RIGHT(head, gparent, tmp, field); \ + } else { \ + tmp = RB_LEFT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field); \ + elm = gparent; \ + continue; \ + } \ + if (RB_LEFT(parent, field) == elm) { \ + RB_ROTATE_RIGHT(head, parent, tmp, field); \ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_LEFT(head, gparent, tmp, field); \ + } \ + } \ + RB_COLOR(head->rbh_root, field) = RB_BLACK; \ + } + +#define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ + attr void name##_RB_REMOVE_COLOR(struct name* head, struct type* parent, struct type* elm) { \ + struct type* tmp; \ + while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && elm != RB_ROOT(head)) { \ + if (RB_LEFT(parent, field) == elm) { \ + tmp = RB_RIGHT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_LEFT(head, parent, tmp, field); \ + tmp = RB_RIGHT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \ + struct type* oleft; \ + if ((oleft = RB_LEFT(tmp, field)) != NULL) \ + RB_COLOR(oleft, field) = RB_BLACK; \ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_RIGHT(head, tmp, oleft, field); \ + tmp = RB_RIGHT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field); \ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_RIGHT(tmp, field)) \ + RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \ + RB_ROTATE_LEFT(head, parent, tmp, field); \ + elm = RB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = RB_LEFT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_RIGHT(head, parent, tmp, field); \ + tmp = RB_LEFT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \ + struct type* oright; \ + if ((oright = RB_RIGHT(tmp, field)) != NULL) \ + RB_COLOR(oright, field) = RB_BLACK; \ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_LEFT(head, tmp, oright, field); \ + tmp = RB_LEFT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field); \ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_LEFT(tmp, field)) \ + RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \ + RB_ROTATE_RIGHT(head, parent, tmp, field); \ + elm = RB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + RB_COLOR(elm, field) = RB_BLACK; \ + } + +#define RB_GENERATE_REMOVE(name, type, field, attr) \ + attr struct type* name##_RB_REMOVE(struct name* head, struct type* elm) { \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (RB_LEFT(elm, field) == NULL) \ + child = RB_RIGHT(elm, field); \ + else if (RB_RIGHT(elm, field) == NULL) \ + child = RB_LEFT(elm, field); \ + else { \ + struct type* left; \ + elm = RB_RIGHT(elm, field); \ + while ((left = RB_LEFT(elm, field)) != NULL) \ + elm = left; \ + child = RB_RIGHT(elm, field); \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + if (RB_PARENT(elm, field) == old) \ + parent = elm; \ + (elm)->field = (old)->field; \ + if (RB_PARENT(old, field)) { \ + if (RB_LEFT(RB_PARENT(old, field), field) == old) \ + RB_LEFT(RB_PARENT(old, field), field) = elm; \ + else \ + RB_RIGHT(RB_PARENT(old, field), field) = elm; \ + RB_AUGMENT(RB_PARENT(old, field)); \ + } else \ + RB_ROOT(head) = elm; \ + RB_PARENT(RB_LEFT(old, field), field) = elm; \ + if (RB_RIGHT(old, field)) \ + RB_PARENT(RB_RIGHT(old, field), field) = elm; \ + if (parent) { \ + left = parent; \ + do { \ + RB_AUGMENT(left); \ + } while ((left = RB_PARENT(left, field)) != NULL); \ + } \ + goto color; \ + } \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + color: \ + if (color == RB_BLACK) \ + name##_RB_REMOVE_COLOR(head, parent, child); \ + return (old); \ + } + +#define RB_GENERATE_INSERT(name, type, field, cmp, attr) \ + /* Inserts a node into the RB tree */ \ + attr struct type* name##_RB_INSERT(struct name* head, struct type* elm) { \ + struct type* tmp; \ + struct type* parent = NULL; \ + int comp = 0; \ + tmp = RB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + RB_SET(elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + RB_LEFT(parent, field) = elm; \ + else \ + RB_RIGHT(parent, field) = elm; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_INSERT_COLOR(head, elm); \ + return (NULL); \ + } + +#define RB_GENERATE_FIND(name, type, field, cmp, attr) \ + /* Finds the node with the same key as elm */ \ + attr struct type* name##_RB_FIND(struct name* head, struct type* elm) { \ + struct type* tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ + } + +#define RB_GENERATE_NFIND(name, type, field, cmp, attr) \ + /* Finds the first node greater than or equal to the search key */ \ + attr struct type* name##_RB_NFIND(struct name* head, struct type* elm) { \ + struct type* tmp = RB_ROOT(head); \ + struct type* res = NULL; \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) { \ + res = tmp; \ + tmp = RB_LEFT(tmp, field); \ + } else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (res); \ + } + +#define RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \ + /* Finds the node with the same key as elm */ \ + attr struct type* name##_RB_FIND_LIGHT(struct name* head, const void* lelm) { \ + struct type* tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = lcmp(lelm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ + } + +#define RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) \ + /* Finds the first node greater than or equal to the search key */ \ + attr struct type* name##_RB_NFIND_LIGHT(struct name* head, const void* lelm) { \ + struct type* tmp = RB_ROOT(head); \ + struct type* res = NULL; \ + int comp; \ + while (tmp) { \ + comp = lcmp(lelm, tmp); \ + if (comp < 0) { \ + res = tmp; \ + tmp = RB_LEFT(tmp, field); \ + } else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (res); \ + } + +#define RB_GENERATE_NEXT(name, type, field, attr) \ + /* ARGSUSED */ \ + attr struct type* name##_RB_NEXT(struct type* elm) { \ + if (RB_RIGHT(elm, field)) { \ + elm = RB_RIGHT(elm, field); \ + while (RB_LEFT(elm, field)) \ + elm = RB_LEFT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ + } + +#define RB_GENERATE_PREV(name, type, field, attr) \ + /* ARGSUSED */ \ + attr struct type* name##_RB_PREV(struct type* elm) { \ + if (RB_LEFT(elm, field)) { \ + elm = RB_LEFT(elm, field); \ + while (RB_RIGHT(elm, field)) \ + elm = RB_RIGHT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ + } + +#define RB_GENERATE_MINMAX(name, type, field, attr) \ + attr struct type* name##_RB_MINMAX(struct name* head, int val) { \ + struct type* tmp = RB_ROOT(head); \ + struct type* parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = RB_LEFT(tmp, field); \ + else \ + tmp = RB_RIGHT(tmp, field); \ + } \ + return (parent); \ + } + +#define RB_NEGINF -1 +#define RB_INF 1 + +#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) +#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) +#define RB_FIND(name, x, y) name##_RB_FIND(x, y) +#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) +#define RB_FIND_LIGHT(name, x, y) name##_RB_FIND_LIGHT(x, y) +#define RB_NFIND_LIGHT(name, x, y) name##_RB_NFIND_LIGHT(x, y) +#define RB_NEXT(name, x, y) name##_RB_NEXT(y) +#define RB_PREV(name, x, y) name##_RB_PREV(y) +#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) +#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) + +#define RB_FOREACH(x, name, head) \ + for ((x) = RB_MIN(name, head); (x) != NULL; (x) = name##_RB_NEXT(x)) + +#define RB_FOREACH_FROM(x, name, y) \ + for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); (x) = (y)) + +#define RB_FOREACH_SAFE(x, name, head, y) \ + for ((x) = RB_MIN(name, head); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_REVERSE(x, name, head) \ + for ((x) = RB_MAX(name, head); (x) != NULL; (x) = name##_RB_PREV(x)) + +#define RB_FOREACH_REVERSE_FROM(x, name, y) \ + for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); (x) = (y)) + +#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ + for ((x) = RB_MAX(name, head); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ + (x) = (y)) + +#endif /* _SYS_TREE_H_ */ From 4bbf173fc1c518d6189c1c7fae0ecf71a555530e Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 27 Dec 2020 21:04:33 -0800 Subject: [PATCH 05/20] core: hle: kernel: svc_types: Add type definitions for KAddressArbiter. --- src/core/hle/kernel/svc_types.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 11e1d8e2da..d623f7a50d 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -65,4 +65,16 @@ struct MemoryInfo { u32 padding{}; }; +enum class SignalType : u32 { + Signal = 0, + SignalAndIncrementIfEqual = 1, + SignalAndModifyByWaitingCountIfEqual = 2, +}; + +enum class ArbitrationType : u32 { + WaitIfLessThan = 0, + DecrementAndWaitIfLessThan = 1, + WaitIfEqual = 2, +}; + } // namespace Kernel::Svc From 7420a717e6b69d223ea021ae3515538b325a54a4 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 27 Dec 2020 21:08:31 -0800 Subject: [PATCH 06/20] core: hle: kernel: Add some useful functions for checking kernel addresses. --- src/core/hle/kernel/memory/memory_layout.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h index 9b3d6267a0..c7c0b2f491 100644 --- a/src/core/hle/kernel/memory/memory_layout.h +++ b/src/core/hle/kernel/memory/memory_layout.h @@ -5,9 +5,28 @@ #pragma once #include "common/common_types.h" +#include "core/device_memory.h" namespace Kernel::Memory { +constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024; +constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; +constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; +constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth; +constexpr std::size_t KernelVirtualAddressSpaceEnd = + KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); +constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1; +constexpr std::size_t KernelVirtualAddressSpaceSize = + KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; + +constexpr bool IsKernelAddressKey(VAddr key) { + return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; +} + +constexpr bool IsKernelAddress(VAddr address) { + return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; +} + class MemoryRegion final { friend class MemoryLayout; From c3c43e32fcf198444acb493483e03fcb193156df Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 13:16:43 -0800 Subject: [PATCH 07/20] hle: kernel: thread: Replace ThreadStatus/ThreadSchedStatus with a single ThreadState. - This is how the real kernel works, and is more accurate and simpler. --- src/core/hle/kernel/address_arbiter.cpp | 4 +- src/core/hle/kernel/k_scheduler.cpp | 32 ++++---- src/core/hle/kernel/k_scheduler.h | 4 +- .../hle/kernel/k_synchronization_object.cpp | 6 +- src/core/hle/kernel/kernel.cpp | 2 +- src/core/hle/kernel/mutex.cpp | 2 +- src/core/hle/kernel/process.cpp | 4 +- src/core/hle/kernel/svc.cpp | 11 ++- src/core/hle/kernel/thread.cpp | 81 +++++++------------ src/core/hle/kernel/thread.h | 75 ++++++++--------- src/core/hle/kernel/time_manager.cpp | 3 +- src/yuzu/debugger/wait_tree.cpp | 59 ++++---------- 12 files changed, 111 insertions(+), 172 deletions(-) diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 23e1ef032f..fe8675186a 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -201,7 +201,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6 current_thread->SetArbiterWaitAddress(address); InsertThread(SharedFrom(current_thread)); - current_thread->SetState(ThreadStatus::WaitArb); + current_thread->SetState(ThreadState::Waiting); current_thread->WaitForArbitration(true); } @@ -256,7 +256,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); current_thread->SetArbiterWaitAddress(address); InsertThread(SharedFrom(current_thread)); - current_thread->SetState(ThreadStatus::WaitArb); + current_thread->SetState(ThreadState::Waiting); current_thread->WaitForArbitration(true); } diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index f44d319926..cdcb89f682 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -180,22 +180,22 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { return cores_needing_scheduling; } -void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { +void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Check if the state has changed, because if it hasn't there's nothing to do. - const auto cur_state = thread->scheduling_state; + const auto cur_state = thread->GetRawState(); if (cur_state == old_state) { return; } // Update the priority queues. - if (old_state == static_cast(ThreadSchedStatus::Runnable)) { + if (old_state == ThreadState::Runnable) { // If we were previously runnable, then we're not runnable now, and we should remove. GetPriorityQueue(kernel).Remove(thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); - } else if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + } else if (cur_state == ThreadState::Runnable) { // If we're now runnable, then we weren't previously, and we should add. GetPriorityQueue(kernel).PushBack(thread); IncrementScheduledCount(thread); @@ -204,12 +204,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol } void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, - u32 old_priority) { + s32 old_priority) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its priority in the queue. - if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { + if (thread->GetRawState() == ThreadState::Runnable) { GetPriorityQueue(kernel).ChangePriority( old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); IncrementScheduledCount(thread); @@ -222,7 +222,7 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its affinity in the queue. - if (thread->scheduling_state == static_cast(ThreadSchedStatus::Runnable)) { + if (thread->GetRawState() == ThreadState::Runnable) { GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); @@ -395,8 +395,8 @@ void KScheduler::YieldWithoutCoreMigration() { { KScopedSchedulerLock lock(kernel); - const auto cur_state = cur_thread.scheduling_state; - if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == ThreadState::Runnable) { // Put the current thread at the back of the queue. Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); @@ -436,8 +436,8 @@ void KScheduler::YieldWithCoreMigration() { { KScopedSchedulerLock lock(kernel); - const auto cur_state = cur_thread.scheduling_state; - if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == ThreadState::Runnable) { // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); @@ -526,8 +526,8 @@ void KScheduler::YieldToAnyThread() { { KScopedSchedulerLock lock(kernel); - const auto cur_state = cur_thread.scheduling_state; - if (cur_state == static_cast(ThreadSchedStatus::Runnable)) { + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == ThreadState::Runnable) { // Get the current active core. const s32 core_id = cur_thread.GetActiveCore(); @@ -645,7 +645,7 @@ void KScheduler::Unload(Thread* thread) { void KScheduler::Reload(Thread* thread) { if (thread) { - ASSERT_MSG(thread->GetState() == ThreadSchedStatus::Runnable, "Thread must be runnable."); + ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); // Cancel any outstanding wakeup events for this thread thread->SetIsRunning(true); @@ -724,7 +724,7 @@ void KScheduler::SwitchToCurrent() { do { if (current_thread != nullptr && !current_thread->IsHLEThread()) { current_thread->context_guard.lock(); - if (!current_thread->IsRunnable()) { + if (current_thread->GetRawState() != ThreadState::Runnable) { current_thread->context_guard.unlock(); break; } @@ -771,7 +771,7 @@ void KScheduler::Initialize() { { KScopedSchedulerLock lock{system.Kernel()}; - idle_thread->SetState(ThreadStatus::Ready); + idle_thread->SetState(ThreadState::Runnable); } } diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index e84abc84c5..677375d1ad 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -100,11 +100,11 @@ public: void YieldToAnyThread(); /// Notify the scheduler a thread's status has changed. - static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); + static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state); /// Notify the scheduler a thread's priority has changed. static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, - u32 old_priority); + s32 old_priority); /// Notify the scheduler a thread's core and/or affinity mask has changed. static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index e7fd119d8c..64c566caa8 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -77,7 +77,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, // Mark the thread as waiting. thread->SetCancellable(); thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); - thread->SetState(ThreadState::WaitSynch); + thread->SetState(ThreadState::Waiting); } // The lock/sleep is done, so we should be able to get our result. @@ -148,9 +148,9 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) { // Iterate over each thread. for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { Thread* thread = cur_node->thread; - if (thread->GetState() == ThreadSchedStatus::Paused) { + if (thread->GetState() == ThreadState::Waiting) { thread->SetSyncedObject(this, result); - thread->SetState(ThreadStatus::Ready); + thread->SetState(ThreadState::Runnable); } } } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f1dcbe2eb6..af4a5e33d0 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -602,7 +602,7 @@ void KernelCore::Suspend(bool in_suspention) { const bool should_suspend = exception_exited || in_suspention; { KScopedSchedulerLock lock(*this); - ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; + const auto status = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { impl->suspend_threads[i]->SetState(status); } diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index badd883aa8..8a0faacf80 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -107,7 +107,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, current_thread->SetMutexWaitAddress(address); current_thread->SetWaitHandle(requesting_thread_handle); - current_thread->SetState(ThreadStatus::WaitMutex); + current_thread->SetState(ThreadState::Waiting); // Update the lock holder thread's priority to prevent priority inversion. holding_thread->AddMutexWaiter(current_thread); diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 92e877c3e2..a306c7c732 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -55,7 +55,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires { KScopedSchedulerLock lock{kernel}; - thread->SetState(ThreadStatus::Ready); + thread->SetState(ThreadState::Runnable); } } } // Anonymous namespace @@ -318,7 +318,7 @@ void Process::PrepareForTermination() { continue; // TODO(Subv): When are the other running/ready threads terminated? - ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch, + ASSERT_MSG(thread->GetState() == ThreadState::Waiting, "Exiting processes with non-waiting threads is currently unimplemented"); thread->Stop(); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 0a3064c7db..304b8727d6 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -343,7 +343,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { KScopedSchedulerLock lock(kernel); - thread->SetState(ThreadStatus::WaitIPC); + thread->SetState(ThreadState::Waiting); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); } @@ -1546,7 +1546,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) { return ERR_INVALID_HANDLE; } - ASSERT(thread->GetStatus() == ThreadStatus::Dormant); + ASSERT(thread->GetState() == ThreadState::Initialized); return thread->Start(); } @@ -1661,7 +1661,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add current_thread->SetCondVarWaitAddress(condition_variable_addr); current_thread->SetMutexWaitAddress(mutex_addr); current_thread->SetWaitHandle(thread_handle); - current_thread->SetState(ThreadStatus::WaitCondVar); + current_thread->SetState(ThreadState::Waiting); + current_thread->SetWaitingCondVar(true); current_process->InsertConditionVariableThread(SharedFrom(current_thread)); } @@ -1755,9 +1756,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); auto owner = handle_table.Get(owner_handle); ASSERT(owner); - if (thread->GetStatus() == ThreadStatus::WaitCondVar) { - thread->SetState(ThreadStatus::WaitMutex); - } + thread->SetWaitingCondVar(false); owner->AddMutexWaiter(thread); } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ac19e29970..33a4e1fa30 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -44,7 +44,7 @@ Thread::~Thread() = default; void Thread::Stop() { { KScopedSchedulerLock lock(kernel); - SetState(ThreadStatus::Dead); + SetState(ThreadState::Terminated); signaled = true; NotifyAvailable(); kernel.GlobalHandleTable().Close(global_handle); @@ -62,54 +62,43 @@ void Thread::Stop() { void Thread::Wakeup() { KScopedSchedulerLock lock(kernel); - switch (status) { - case ThreadStatus::Paused: - case ThreadStatus::WaitSynch: - case ThreadStatus::WaitHLEEvent: - case ThreadStatus::WaitSleep: - case ThreadStatus::WaitIPC: - case ThreadStatus::WaitMutex: - case ThreadStatus::WaitCondVar: - case ThreadStatus::WaitArb: - case ThreadStatus::Dormant: - break; - - case ThreadStatus::Ready: + switch (thread_state) { + case ThreadState::Runnable: // If the thread is waiting on multiple wait objects, it might be awoken more than once // before actually resuming. We can ignore subsequent wakeups if the thread status has // already been set to ThreadStatus::Ready. return; - case ThreadStatus::Dead: + case ThreadState::Terminated: // This should never happen, as threads must complete before being stopped. DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", GetObjectId()); return; } - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); } void Thread::OnWakeUp() { KScopedSchedulerLock lock(kernel); - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); } ResultCode Thread::Start() { KScopedSchedulerLock lock(kernel); - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); return RESULT_SUCCESS; } void Thread::CancelWait() { KScopedSchedulerLock lock(kernel); - if (GetState() != ThreadSchedStatus::Paused || !is_cancellable) { + if (GetState() != ThreadState::Waiting || !is_cancellable) { is_sync_cancelled = true; return; } // TODO(Blinkhawk): Implement cancel of server session is_sync_cancelled = false; SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); - SetState(ThreadStatus::Ready); + SetState(ThreadState::Runnable); } static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, @@ -173,7 +162,7 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy std::shared_ptr thread = std::make_shared(kernel); thread->thread_id = kernel.CreateNewThreadID(); - thread->status = ThreadStatus::Dormant; + thread->thread_state = ThreadState::Initialized; thread->entry_point = entry_point; thread->stack_top = stack_top; thread->disable_count = 1; @@ -235,27 +224,18 @@ VAddr Thread::GetCommandBufferAddress() const { return GetTLSAddress() + command_header_offset; } -void Thread::SetState(ThreadStatus new_status) { - if (new_status == status) { +void Thread::SetState(ThreadState new_status) { + if (new_status == thread_state) { return; } - switch (new_status) { - case ThreadStatus::Ready: - SetSchedulingStatus(ThreadSchedStatus::Runnable); - break; - case ThreadStatus::Dormant: - SetSchedulingStatus(ThreadSchedStatus::None); - break; - case ThreadStatus::Dead: - SetSchedulingStatus(ThreadSchedStatus::Exited); - break; - default: - SetSchedulingStatus(ThreadSchedStatus::Paused); - break; + if (new_status != ThreadState::Waiting) { + SetWaitingCondVar(false); } - status = new_status; + SetSchedulingStatus(new_status); + + thread_state = new_status; } void Thread::AddMutexWaiter(std::shared_ptr thread) { @@ -312,13 +292,13 @@ void Thread::UpdatePriority() { return; } - if (GetStatus() == ThreadStatus::WaitCondVar) { + if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { owner_process->RemoveConditionVariableThread(SharedFrom(this)); } SetCurrentPriority(new_priority); - if (GetStatus() == ThreadStatus::WaitCondVar) { + if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { owner_process->InsertConditionVariableThread(SharedFrom(this)); } @@ -340,7 +320,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) { auto sched_status = GetState(); - if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { + if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) { return ERR_INVALID_STATE; } @@ -366,7 +346,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { Handle event_handle{}; { KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); - SetState(ThreadStatus::WaitSleep); + SetState(ThreadState::Waiting); } if (event_handle != InvalidHandle) { @@ -377,25 +357,24 @@ ResultCode Thread::Sleep(s64 nanoseconds) { } void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { - const u32 old_state = scheduling_state; + const auto old_state = GetRawState(); pausing_state |= static_cast(flag); - const u32 base_scheduling = static_cast(GetState()); - scheduling_state = base_scheduling | pausing_state; + const auto base_scheduling = GetState(); + thread_state = base_scheduling | static_cast(pausing_state); KScheduler::OnThreadStateChanged(kernel, this, old_state); } void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { - const u32 old_state = scheduling_state; + const auto old_state = GetRawState(); pausing_state &= ~static_cast(flag); - const u32 base_scheduling = static_cast(GetState()); - scheduling_state = base_scheduling | pausing_state; + const auto base_scheduling = GetState(); + thread_state = base_scheduling | static_cast(pausing_state); KScheduler::OnThreadStateChanged(kernel, this, old_state); } -void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { - const u32 old_state = scheduling_state; - scheduling_state = (scheduling_state & static_cast(ThreadSchedMasks::HighMask)) | - static_cast(new_status); +void Thread::SetSchedulingStatus(ThreadState new_status) { + const auto old_state = GetRawState(); + thread_state = (thread_state & ThreadState::HighMask) | new_status; KScheduler::OnThreadStateChanged(kernel, this, old_state); } diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 69458548bf..06dd2ef2d8 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -73,19 +73,26 @@ enum ThreadProcessorId : s32 { (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3) }; -enum class ThreadStatus { - Ready, ///< Ready to run - Paused, ///< Paused by SetThreadActivity or debug - WaitHLEEvent, ///< Waiting for hle event to finish - WaitSleep, ///< Waiting due to a SleepThread SVC - WaitIPC, ///< Waiting for the reply from an IPC request - WaitSynch, ///< Waiting due to WaitSynchronization - WaitMutex, ///< Waiting due to an ArbitrateLock svc - WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc - WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc - Dormant, ///< Created but not yet made ready - Dead ///< Run to completion, or forcefully terminated +enum class ThreadState : u16 { + Initialized = 0, + Waiting = 1, + Runnable = 2, + Terminated = 3, + + SuspendShift = 4, + Mask = (1 << SuspendShift) - 1, + + ProcessSuspended = (1 << (0 + SuspendShift)), + ThreadSuspended = (1 << (1 + SuspendShift)), + DebugSuspended = (1 << (2 + SuspendShift)), + BacktraceSuspended = (1 << (3 + SuspendShift)), + InitSuspended = (1 << (4 + SuspendShift)), + + SuspendFlagMask = ((1 << 5) - 1) << SuspendShift, + + HighMask = 0xfff0, }; +DECLARE_ENUM_FLAG_OPERATORS(ThreadState); enum class ThreadWakeupReason { Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal. @@ -97,13 +104,6 @@ enum class ThreadActivity : u32 { Paused = 1, }; -enum class ThreadSchedStatus : u32 { - None = 0, - Paused = 1, - Runnable = 2, - Exited = 3, -}; - enum class ThreadSchedFlags : u32 { ProcessPauseFlag = 1 << 4, ThreadPauseFlag = 1 << 5, @@ -111,12 +111,6 @@ enum class ThreadSchedFlags : u32 { KernelInitPauseFlag = 1 << 8, }; -enum class ThreadSchedMasks : u32 { - LowMask = 0x000f, - HighMask = 0xfff0, - ForcePauseMask = 0x0070, -}; - class Thread final : public KSynchronizationObject { public: explicit Thread(KernelCore& kernel); @@ -326,11 +320,19 @@ public: std::shared_ptr& GetHostContext(); - ThreadStatus GetStatus() const { - return status; + ThreadState GetState() const { + return thread_state & ThreadState::Mask; } - void SetState(ThreadStatus new_status); + ThreadState GetRawState() const { + return thread_state; + } + + void SetState(ThreadState new_state); + + void SetWaitingCondVar(bool value) { + is_waiting_on_condvar = value; + } s64 GetLastScheduledTick() const { return this->last_scheduled_tick; @@ -447,15 +449,6 @@ public: this->schedule_count = count; } - ThreadSchedStatus GetState() const { - return static_cast(scheduling_state & - static_cast(ThreadSchedMasks::LowMask)); - } - - bool IsRunnable() const { - return scheduling_state == static_cast(ThreadSchedStatus::Runnable); - } - bool IsRunning() const { return is_running; } @@ -497,7 +490,7 @@ public: } bool IsTerminationRequested() const { - return will_be_terminated || GetState() == ThreadSchedStatus::Exited; + return will_be_terminated || GetState() == ThreadState::Terminated; } bool IsPaused() const { @@ -590,7 +583,7 @@ private: friend class KScheduler; friend class Process; - void SetSchedulingStatus(ThreadSchedStatus new_status); + void SetSchedulingStatus(ThreadState new_status); void AddSchedulingFlag(ThreadSchedFlags flag); void RemoveSchedulingFlag(ThreadSchedFlags flag); void SetCurrentPriority(u32 new_priority); @@ -600,8 +593,7 @@ private: ThreadContext64 context_64{}; std::shared_ptr host_context{}; - ThreadStatus status = ThreadStatus::Dormant; - u32 scheduling_state = 0; + ThreadState thread_state = ThreadState::Initialized; u64 thread_id = 0; @@ -647,6 +639,7 @@ private: /// If waiting on a ConditionVariable, this is the ConditionVariable address VAddr condvar_wait_address = 0; + bool is_waiting_on_condvar{}; /// If waiting on a Mutex, this is the mutex address VAddr mutex_wait_address = 0; /// The handle used to wait for the mutex. diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 79628e2b42..b58a76dbab 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -42,8 +42,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 event_handle = timetask->GetGlobalHandle(); if (nanoseconds > 0) { ASSERT(timetask); - ASSERT(timetask->GetStatus() != ThreadStatus::Ready); - ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex); + ASSERT(timetask->GetState() != ThreadState::Runnable); system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, time_manager_event_type, event_handle); } else { diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 8d91d600a2..acf6b7ab5f 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -238,8 +238,8 @@ WaitTreeThread::~WaitTreeThread() = default; QString WaitTreeThread::GetText() const { const auto& thread = static_cast(object); QString status; - switch (thread.GetStatus()) { - case Kernel::ThreadStatus::Ready: + switch (thread.GetState()) { + case Kernel::ThreadState::Runnable: if (!thread.IsPaused()) { if (thread.WasRunning()) { status = tr("running"); @@ -250,35 +250,14 @@ QString WaitTreeThread::GetText() const { status = tr("paused"); } break; - case Kernel::ThreadStatus::Paused: - status = tr("paused"); + case Kernel::ThreadState::Waiting: + status = tr("waiting"); break; - case Kernel::ThreadStatus::WaitHLEEvent: - status = tr("waiting for HLE return"); + case Kernel::ThreadState::Initialized: + status = tr("initialized"); break; - case Kernel::ThreadStatus::WaitSleep: - status = tr("sleeping"); - break; - case Kernel::ThreadStatus::WaitIPC: - status = tr("waiting for IPC reply"); - break; - case Kernel::ThreadStatus::WaitSynch: - status = tr("waiting for objects"); - break; - case Kernel::ThreadStatus::WaitMutex: - status = tr("waiting for mutex"); - break; - case Kernel::ThreadStatus::WaitCondVar: - status = tr("waiting for condition variable"); - break; - case Kernel::ThreadStatus::WaitArb: - status = tr("waiting for address arbiter"); - break; - case Kernel::ThreadStatus::Dormant: - status = tr("dormant"); - break; - case Kernel::ThreadStatus::Dead: - status = tr("dead"); + case Kernel::ThreadState::Terminated: + status = tr("terminated"); break; } @@ -294,8 +273,8 @@ QColor WaitTreeThread::GetColor() const { const std::size_t color_index = IsDarkTheme() ? 1 : 0; const auto& thread = static_cast(object); - switch (thread.GetStatus()) { - case Kernel::ThreadStatus::Ready: + switch (thread.GetState()) { + case Kernel::ThreadState::Runnable: if (!thread.IsPaused()) { if (thread.WasRunning()) { return QColor(WaitTreeColors[0][color_index]); @@ -305,21 +284,11 @@ QColor WaitTreeThread::GetColor() const { } else { return QColor(WaitTreeColors[2][color_index]); } - case Kernel::ThreadStatus::Paused: + case Kernel::ThreadState::Waiting: return QColor(WaitTreeColors[3][color_index]); - case Kernel::ThreadStatus::WaitHLEEvent: - case Kernel::ThreadStatus::WaitIPC: - return QColor(WaitTreeColors[4][color_index]); - case Kernel::ThreadStatus::WaitSleep: - return QColor(WaitTreeColors[5][color_index]); - case Kernel::ThreadStatus::WaitSynch: - case Kernel::ThreadStatus::WaitMutex: - case Kernel::ThreadStatus::WaitCondVar: - case Kernel::ThreadStatus::WaitArb: - return QColor(WaitTreeColors[6][color_index]); - case Kernel::ThreadStatus::Dormant: + case Kernel::ThreadState::Initialized: return QColor(WaitTreeColors[7][color_index]); - case Kernel::ThreadStatus::Dead: + case Kernel::ThreadState::Terminated: return QColor(WaitTreeColors[8][color_index]); default: return WaitTreeItem::GetColor(); @@ -367,7 +336,7 @@ std::vector> WaitTreeThread::GetChildren() const { list.push_back(std::make_unique(tr("not waiting for mutex"))); } - if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) { + if (thread.GetState() == Kernel::ThreadState::Waiting) { list.push_back(std::make_unique(thread.GetWaitObjectsForDebugging(), thread.IsCancellable())); } From d1309fb275ee478fcdbf036976cce3dafa09206e Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 13:36:24 -0800 Subject: [PATCH 08/20] hle: kernel: Rename thread "status" to "state". --- src/core/hle/kernel/kernel.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index af4a5e33d0..8d03f16fb8 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -602,9 +602,9 @@ void KernelCore::Suspend(bool in_suspention) { const bool should_suspend = exception_exited || in_suspention; { KScopedSchedulerLock lock(*this); - const auto status = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; + const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - impl->suspend_threads[i]->SetState(status); + impl->suspend_threads[i]->SetState(state); } } } From f12701b303dc5b994f9969db21cf1c319e94bdf3 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 20:41:01 -0800 Subject: [PATCH 09/20] hle: kernel: k_scheduler: Cleanup OnThreadPriorityChanged. --- src/core/hle/kernel/k_scheduler.cpp | 6 ++---- src/core/hle/kernel/k_scheduler.h | 3 +-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index cdcb89f682..42f0ea4834 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -203,9 +203,7 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, Thread } } -void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, - s32 old_priority) { - +void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its priority in the queue. @@ -292,7 +290,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // If the best thread we can choose has a priority the same or worse than ours, try to // migrate a higher priority thread. - if (best_thread != nullptr && best_thread->GetPriority() >= static_cast(priority)) { + if (best_thread != nullptr && best_thread->GetPriority() >= priority) { Thread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { // If the suggestion's priority is the same as ours, don't bother. diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 677375d1ad..7836651233 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -103,8 +103,7 @@ public: static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state); /// Notify the scheduler a thread's priority has changed. - static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, - s32 old_priority); + static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority); /// Notify the scheduler a thread's core and/or affinity mask has changed. static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, From 92d5c63f0172aa222dca0dbca3aefb4e7b4bec06 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 22:24:05 -0800 Subject: [PATCH 10/20] common: common_funcs: Add R_UNLESS macro. --- src/common/common_funcs.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index 367b6bf6e7..c90978f9c0 100644 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h @@ -93,6 +93,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void); return static_cast(key) == 0; \ } +/// Evaluates a boolean expression, and returns a result unless that expression is true. +#define R_UNLESS(expr, res) \ + { \ + if (!(expr)) { \ + return res; \ + } \ + } + namespace Common { [[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) { From 8a155c4058dd324da13cc3c69ec4311eb3319fd4 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 22:32:11 -0800 Subject: [PATCH 11/20] hle: kernel: Remove unnecessary AddressArbiter definition. --- src/core/hle/kernel/kernel.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 9046b5a8a9..933d9a7d61 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -33,7 +33,6 @@ template class SlabHeap; } // namespace Memory -class AddressArbiter; class ClientPort; class GlobalSchedulerContext; class HandleTable; From 1212fa60b68a4b92fe7715f1bc87c42bc09cba8c Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 22:42:16 -0800 Subject: [PATCH 12/20] core: hle: kernel: Begin moving common SVC defintions to its own header. --- src/core/CMakeLists.txt | 1 + src/core/hle/kernel/svc_common.h | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 src/core/hle/kernel/svc_common.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 548b3911e7..cae80bfbf7 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -212,6 +212,7 @@ add_library(core STATIC hle/kernel/shared_memory.h hle/kernel/svc.cpp hle/kernel/svc.h + hle/kernel/svc_common.h hle/kernel/svc_results.h hle/kernel/svc_types.h hle/kernel/svc_wrap.h diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h new file mode 100644 index 0000000000..7734bb2360 --- /dev/null +++ b/src/core/hle/kernel/svc_common.h @@ -0,0 +1,13 @@ +// Copyright 2020 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Svc { + +constexpr u32 HandleWaitMask{1u << 30}; + +} // namespace Kernel::Svc From b4e6d6c38586eaa5aab7f7df3f9d958755a517c2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Mon, 28 Dec 2020 23:45:28 -0800 Subject: [PATCH 13/20] core: hle: kernel: Update KConditionVariable. --- src/core/CMakeLists.txt | 2 + src/core/hle/kernel/k_condition_variable.cpp | 347 +++++++++++++++++++ src/core/hle/kernel/k_condition_variable.h | 59 ++++ src/core/hle/kernel/object.h | 5 + 4 files changed, 413 insertions(+) create mode 100644 src/core/hle/kernel/k_condition_variable.cpp create mode 100644 src/core/hle/kernel/k_condition_variable.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index cae80bfbf7..a870cd8feb 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -158,6 +158,8 @@ add_library(core STATIC hle/kernel/hle_ipc.cpp hle/kernel/hle_ipc.h hle/kernel/k_affinity_mask.h + hle/kernel/k_condition_variable.cpp + hle/kernel/k_condition_variable.h hle/kernel/k_priority_queue.h hle/kernel/k_scheduler.cpp hle/kernel/k_scheduler.h diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp new file mode 100644 index 0000000000..ef5c174095 --- /dev/null +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -0,0 +1,347 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "core/arm/exclusive_monitor.h" +#include "core/core.h" +#include "core/hle/kernel/k_condition_variable.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/svc_common.h" +#include "core/hle/kernel/svc_results.h" +#include "core/hle/kernel/thread.h" +#include "core/memory.h" + +namespace Kernel { + +namespace { + +bool ReadFromUser(Core::System& system, u32* out, VAddr address) { + *out = system.Memory().Read32(address); + return true; +} + +bool WriteToUser(Core::System& system, VAddr address, const u32* p) { + system.Memory().Write32(address, *p); + return true; +} + +bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, + u32 new_orr_mask) { + auto& monitor = system.Monitor(); + const auto current_core = system.CurrentCoreIndex(); + + // Load the value from the address. + const auto expected = monitor.ExclusiveRead32(current_core, address); + + // Orr in the new mask. + u32 value = expected | new_orr_mask; + + // If the value is zero, use the if_zero value, otherwise use the newly orr'd value. + if (!expected) { + value = if_zero; + } + + // Try to store. + if (!monitor.ExclusiveWrite32(current_core, address, value)) { + // If we failed to store, try again. + return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask); + } + + // We're done. + *out = expected; + return true; +} + +} // namespace + +KConditionVariable::KConditionVariable(Core::System& system_) + : system{system_}, kernel{system.Kernel()} {} + +KConditionVariable::~KConditionVariable() = default; + +ResultCode KConditionVariable::SignalToAddress(VAddr addr) { + Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread(); + + // Signal the address. + { + KScopedSchedulerLock sl(kernel); + + // Remove waiter thread. + s32 num_waiters{}; + Thread* next_owner_thread = + owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); + + // Determine the next tag. + u32 next_value{}; + if (next_owner_thread) { + next_value = next_owner_thread->GetAddressKeyValue(); + if (num_waiters > 1) { + next_value |= Svc::HandleWaitMask; + } + + next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + next_owner_thread->Wakeup(); + } + + // Write the value to userspace. + if (!WriteToUser(system, addr, std::addressof(next_value))) { + if (next_owner_thread) { + next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory); + } + + return Svc::ResultInvalidCurrentMemory; + } + } + + return RESULT_SUCCESS; +} + +ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { + Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + + // Wait for the address. + { + std::shared_ptr owner_thread; + ASSERT(!owner_thread); + { + KScopedSchedulerLock sl(kernel); + cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + + // Check if the thread should terminate. + R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested); + + { + // Read the tag from userspace. + u32 test_tag{}; + R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), + Svc::ResultInvalidCurrentMemory); + + // If the tag isn't the handle (with wait mask), we're done. + R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); + + // Get the lock owner thread. + owner_thread = kernel.CurrentProcess()->GetHandleTable().Get(handle); + R_UNLESS(owner_thread, Svc::ResultInvalidHandle); + + // Update the lock. + cur_thread->SetAddressKey(addr, value); + owner_thread->AddWaiter(cur_thread); + cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetMutexWaitAddressForDebugging(addr); + } + } + ASSERT(owner_thread); + } + + // Remove the thread as a waiter from the lock owner. + { + KScopedSchedulerLock sl(kernel); + Thread* owner_thread = cur_thread->GetLockOwner(); + if (owner_thread != nullptr) { + owner_thread->RemoveWaiter(cur_thread); + } + } + + // Get the wait result. + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(std::addressof(dummy)); +} + +Thread* KConditionVariable::SignalImpl(Thread* thread) { + // Check pre-conditions. + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Update the tag. + VAddr address = thread->GetAddressKey(); + u32 own_tag = thread->GetAddressKeyValue(); + + u32 prev_tag{}; + bool can_access{}; + { + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + // TODO(bunnei): We should call CanAccessAtomic(..) here. + can_access = true; + if (can_access) { + UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, + Svc::HandleWaitMask); + } + } + + Thread* thread_to_close = nullptr; + if (can_access) { + if (prev_tag == InvalidHandle) { + // If nobody held the lock previously, we're all good. + thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + thread->Wakeup(); + } else { + // Get the previous owner. + auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get( + prev_tag & ~Svc::HandleWaitMask); + + if (owner_thread) { + // Add the thread as a waiter on the owner. + owner_thread->AddWaiter(thread); + thread_to_close = owner_thread.get(); + } else { + // The lock was tagged with a thread that doesn't exist. + thread->SetSyncedObject(nullptr, Svc::ResultInvalidState); + thread->Wakeup(); + } + } + } else { + // If the address wasn't accessible, note so. + thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory); + thread->Wakeup(); + } + + return thread_to_close; +} + +void KConditionVariable::Signal(u64 cv_key, s32 count) { + // Prepare for signaling. + constexpr int MaxThreads = 16; + + // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using + // std::shared_ptr. + std::vector> thread_list; + std::array thread_array; + s32 num_to_close{}; + + // Perform signaling. + s32 num_waiters{}; + { + KScopedSchedulerLock sl(kernel); + + auto it = thread_tree.nfind_light({cv_key, -1}); + while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + (it->GetConditionVariableKey() == cv_key)) { + Thread* target_thread = std::addressof(*it); + + if (Thread* thread = SignalImpl(target_thread); thread != nullptr) { + if (num_to_close < MaxThreads) { + thread_array[num_to_close++] = thread; + } else { + thread_list.push_back(SharedFrom(thread)); + } + } + + it = thread_tree.erase(it); + target_thread->ClearConditionVariable(); + ++num_waiters; + } + + // If we have no waiters, clear the has waiter flag. + if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { + const u32 has_waiter_flag{}; + WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); + } + } + + // Close threads in the array. + for (auto i = 0; i < num_to_close; ++i) { + thread_array[i]->Close(); + } + + // Close threads in the list. + for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { + (*it)->Close(); + } +} + +ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { + // Prepare to wait. + Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + Handle timer = InvalidHandle; + + { + KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); + + // Set the synced object. + cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); + + // Check that the thread isn't terminating. + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + return Svc::ResultTerminationRequested; + } + + // Update the value and process for the next owner. + { + // Remove waiter thread. + s32 num_waiters{}; + Thread* next_owner_thread = + cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); + + // Update for the next owner thread. + u32 next_value{}; + if (next_owner_thread != nullptr) { + // Get the next tag value. + next_value = next_owner_thread->GetAddressKeyValue(); + if (num_waiters > 1) { + next_value |= Svc::HandleWaitMask; + } + + // Wake up the next owner. + next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + next_owner_thread->Wakeup(); + } + + // Write to the cv key. + { + const u32 has_waiter_flag = 1; + WriteToUser(system, key, std::addressof(has_waiter_flag)); + // TODO(bunnei): We should call DataMemoryBarrier(..) here. + } + + // Write the value to userspace. + if (!WriteToUser(system, addr, std::addressof(next_value))) { + slp.CancelSleep(); + return Svc::ResultInvalidCurrentMemory; + } + } + + // Update condition variable tracking. + { + cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); + thread_tree.insert(*cur_thread); + } + + // If the timeout is non-zero, set the thread as waiting. + if (timeout != 0) { + cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetMutexWaitAddressForDebugging(addr); + } + } + + // Cancel the timer wait. + if (timer != InvalidHandle) { + auto& time_manager = kernel.TimeManager(); + time_manager.UnscheduleTimeEvent(timer); + } + + // Remove from the condition variable. + { + KScopedSchedulerLock sl(kernel); + + if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) { + owner->RemoveWaiter(cur_thread); + } + + if (cur_thread->IsWaitingForConditionVariable()) { + thread_tree.erase(thread_tree.iterator_to(*cur_thread)); + cur_thread->ClearConditionVariable(); + } + } + + // Get the result. + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(std::addressof(dummy)); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h new file mode 100644 index 0000000000..98ed5b323a --- /dev/null +++ b/src/core/hle/kernel/k_condition_variable.h @@ -0,0 +1,59 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/assert.h" +#include "common/common_types.h" + +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/result.h" + +namespace Core { +class System; +} + +namespace Kernel { + +class KConditionVariable { +public: + using ThreadTree = typename Thread::ConditionVariableThreadTreeType; + + explicit KConditionVariable(Core::System& system_); + ~KConditionVariable(); + + // Arbitration + [[nodiscard]] ResultCode SignalToAddress(VAddr addr); + [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value); + + // Condition variable + void Signal(u64 cv_key, s32 count); + [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); + +private: + [[nodiscard]] Thread* SignalImpl(Thread* thread); + + ThreadTree thread_tree; + + Core::System& system; + KernelCore& kernel; +}; + +inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, + Thread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + tree->erase(tree->iterator_to(*thread)); +} + +inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, + Thread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + tree->insert(*thread); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index e3391e2af8..27124ef677 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -50,6 +50,11 @@ public: } virtual HandleType GetHandleType() const = 0; + void Close() { + // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use + // when we implement KAutoObject instead of using shared_ptr. + } + /** * Check if a thread can wait on the object * @return True if a thread can wait on the object, otherwise false From 952d1ac4879b8c597acbe23dcb58cc69d68a84e7 Mon Sep 17 00:00:00 2001 From: bunnei Date: Tue, 29 Dec 2020 23:18:06 -0800 Subject: [PATCH 14/20] core: hle: kernel: Update KAddressArbiter. --- src/core/CMakeLists.txt | 2 + src/core/hle/kernel/k_address_arbiter.cpp | 365 ++++++++++++++++++++++ src/core/hle/kernel/k_address_arbiter.h | 70 +++++ 3 files changed, 437 insertions(+) create mode 100644 src/core/hle/kernel/k_address_arbiter.cpp create mode 100644 src/core/hle/kernel/k_address_arbiter.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index a870cd8feb..d29d4573e8 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -157,6 +157,8 @@ add_library(core STATIC hle/kernel/handle_table.h hle/kernel/hle_ipc.cpp hle/kernel/hle_ipc.h + hle/kernel/k_address_arbiter.cpp + hle/kernel/k_address_arbiter.h hle/kernel/k_affinity_mask.h hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp new file mode 100644 index 0000000000..7b712d31a0 --- /dev/null +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -0,0 +1,365 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/arm/exclusive_monitor.h" +#include "core/core.h" +#include "core/hle/kernel/k_address_arbiter.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/svc_results.h" +#include "core/hle/kernel/thread.h" +#include "core/hle/kernel/time_manager.h" +#include "core/memory.h" + +namespace Kernel { + +KAddressArbiter::KAddressArbiter(Core::System& system_) + : system{system_}, kernel{system.Kernel()} {} +KAddressArbiter::~KAddressArbiter() = default; + +namespace { + +bool ReadFromUser(Core::System& system, s32* out, VAddr address) { + *out = system.Memory().Read32(address); + return true; +} + +bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { + auto& monitor = system.Monitor(); + const auto current_core = system.CurrentCoreIndex(); + + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + // TODO(bunnei): We should call CanAccessAtomic(..) here. + + // Load the value from the address. + const s32 current_value = static_cast(monitor.ExclusiveRead32(current_core, address)); + + // Compare it to the desired one. + if (current_value < value) { + // If less than, we want to try to decrement. + const s32 decrement_value = current_value - 1; + + // Decrement and try to store. + if (!monitor.ExclusiveWrite32(current_core, address, static_cast(decrement_value))) { + // If we failed to store, try again. + DecrementIfLessThan(system, out, address, value); + } + } else { + // Otherwise, clear our exclusive hold and finish + monitor.ClearExclusive(); + } + + // We're done. + *out = current_value; + return true; +} + +bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { + auto& monitor = system.Monitor(); + const auto current_core = system.CurrentCoreIndex(); + + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + // TODO(bunnei): We should call CanAccessAtomic(..) here. + + // Load the value from the address. + const s32 current_value = static_cast(monitor.ExclusiveRead32(current_core, address)); + + // Compare it to the desired one. + if (current_value == value) { + // If equal, we want to try to write the new value. + + // Try to store. + if (!monitor.ExclusiveWrite32(current_core, address, static_cast(new_value))) { + // If we failed to store, try again. + UpdateIfEqual(system, out, address, value, new_value); + } + } else { + // Otherwise, clear our exclusive hold and finish. + monitor.ClearExclusive(); + } + + // We're done. + *out = current_value; + return true; +} + +} // namespace + +ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { + // Perform signaling. + s32 num_waiters{}; + { + KScopedSchedulerLock sl(kernel); + + auto it = thread_tree.nfind_light({addr, -1}); + while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + (it->GetAddressArbiterKey() == addr)) { + Thread* target_thread = std::addressof(*it); + target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + + ASSERT(target_thread->IsWaitingForAddressArbiter()); + target_thread->Wakeup(); + + it = thread_tree.erase(it); + target_thread->ClearAddressArbiter(); + ++num_waiters; + } + } + return RESULT_SUCCESS; +} + +ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) { + // Perform signaling. + s32 num_waiters{}; + { + KScopedSchedulerLock sl(kernel); + + // Check the userspace value. + s32 user_value{}; + R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1), + Svc::ResultInvalidCurrentMemory); + R_UNLESS(user_value == value, Svc::ResultInvalidState); + + auto it = thread_tree.nfind_light({addr, -1}); + while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + (it->GetAddressArbiterKey() == addr)) { + Thread* target_thread = std::addressof(*it); + target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + + ASSERT(target_thread->IsWaitingForAddressArbiter()); + target_thread->Wakeup(); + + it = thread_tree.erase(it); + target_thread->ClearAddressArbiter(); + ++num_waiters; + } + } + return RESULT_SUCCESS; +} + +ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) { + // Perform signaling. + s32 num_waiters{}; + { + KScopedSchedulerLock sl(kernel); + + auto it = thread_tree.nfind_light({addr, -1}); + // Determine the updated value. + s32 new_value{}; + if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) { + if (count <= 0) { + if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) { + new_value = value - 2; + } else { + new_value = value + 1; + } + } else { + if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) { + auto tmp_it = it; + s32 tmp_num_waiters{}; + while ((++tmp_it != thread_tree.end()) && + (tmp_it->GetAddressArbiterKey() == addr)) { + if ((tmp_num_waiters++) >= count) { + break; + } + } + + if (tmp_num_waiters < count) { + new_value = value - 1; + } else { + new_value = value; + } + } else { + new_value = value + 1; + } + } + } else { + if (count <= 0) { + if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) { + new_value = value - 1; + } else { + new_value = value + 1; + } + } else { + auto tmp_it = it; + s32 tmp_num_waiters{}; + while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && + (tmp_num_waiters < count + 1)) { + ++tmp_num_waiters; + ++tmp_it; + } + + if (tmp_num_waiters == 0) { + new_value = value + 1; + } else if (tmp_num_waiters <= count) { + new_value = value - 1; + } else { + new_value = value; + } + } + } + + // Check the userspace value. + s32 user_value{}; + bool succeeded{}; + if (value != new_value) { + succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value); + } else { + succeeded = ReadFromUser(system, std::addressof(user_value), addr); + } + + R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory); + R_UNLESS(user_value == value, Svc::ResultInvalidState); + + while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && + (it->GetAddressArbiterKey() == addr)) { + Thread* target_thread = std::addressof(*it); + target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); + + ASSERT(target_thread->IsWaitingForAddressArbiter()); + target_thread->Wakeup(); + + it = thread_tree.erase(it); + target_thread->ClearAddressArbiter(); + ++num_waiters; + } + } + return RESULT_SUCCESS; +} + +ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { + // Prepare to wait. + Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + Handle timer = InvalidHandle; + + { + KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); + + // Check that the thread isn't terminating. + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + return Svc::ResultTerminationRequested; + } + + // Set the synced object. + cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); + + // Read the value from userspace. + s32 user_value{}; + bool succeeded{}; + if (decrement) { + succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value); + } else { + succeeded = ReadFromUser(system, std::addressof(user_value), addr); + } + + if (!succeeded) { + slp.CancelSleep(); + return Svc::ResultInvalidCurrentMemory; + } + + // Check that the value is less than the specified one. + if (user_value >= value) { + slp.CancelSleep(); + return Svc::ResultInvalidState; + } + + // Check that the timeout is non-zero. + if (timeout == 0) { + slp.CancelSleep(); + return Svc::ResultTimedOut; + } + + // Set the arbiter. + cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr); + thread_tree.insert(*cur_thread); + cur_thread->SetState(ThreadState::Waiting); + } + + // Cancel the timer wait. + if (timer != InvalidHandle) { + auto& time_manager = kernel.TimeManager(); + time_manager.UnscheduleTimeEvent(timer); + } + + // Remove from the address arbiter. + { + KScopedSchedulerLock sl(kernel); + + if (cur_thread->IsWaitingForAddressArbiter()) { + thread_tree.erase(thread_tree.iterator_to(*cur_thread)); + cur_thread->ClearAddressArbiter(); + } + } + + // Get the result. + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(std::addressof(dummy)); +} + +ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { + // Prepare to wait. + Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + Handle timer = InvalidHandle; + + { + KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); + + // Check that the thread isn't terminating. + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + return Svc::ResultTerminationRequested; + } + + // Set the synced object. + cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); + + // Read the value from userspace. + s32 user_value{}; + if (!ReadFromUser(system, std::addressof(user_value), addr)) { + slp.CancelSleep(); + return Svc::ResultInvalidCurrentMemory; + } + + // Check that the value is equal. + if (value != user_value) { + slp.CancelSleep(); + return Svc::ResultInvalidState; + } + + // Check that the timeout is non-zero. + if (timeout == 0) { + slp.CancelSleep(); + return Svc::ResultTimedOut; + } + + // Set the arbiter. + cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr); + thread_tree.insert(*cur_thread); + cur_thread->SetState(ThreadState::Waiting); + } + + // Cancel the timer wait. + if (timer != InvalidHandle) { + auto& time_manager = kernel.TimeManager(); + time_manager.UnscheduleTimeEvent(timer); + } + + // Remove from the address arbiter. + { + KScopedSchedulerLock sl(kernel); + + if (cur_thread->IsWaitingForAddressArbiter()) { + thread_tree.erase(thread_tree.iterator_to(*cur_thread)); + cur_thread->ClearAddressArbiter(); + } + } + + // Get the result. + KSynchronizationObject* dummy{}; + return cur_thread->GetWaitResult(std::addressof(dummy)); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h new file mode 100644 index 0000000000..8d379b524c --- /dev/null +++ b/src/core/hle/kernel/k_address_arbiter.h @@ -0,0 +1,70 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_condition_variable.h" +#include "core/hle/kernel/svc_types.h" + +union ResultCode; + +namespace Core { +class System; +} + +namespace Kernel { + +class KernelCore; + +class KAddressArbiter { +public: + using ThreadTree = KConditionVariable::ThreadTree; + + explicit KAddressArbiter(Core::System& system_); + ~KAddressArbiter(); + + [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, + s32 count) { + switch (type) { + case Svc::SignalType::Signal: + return Signal(addr, count); + case Svc::SignalType::SignalAndIncrementIfEqual: + return SignalAndIncrementIfEqual(addr, value, count); + case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: + return SignalAndModifyByWaitingCountIfEqual(addr, value, count); + } + UNREACHABLE(); + return RESULT_UNKNOWN; + } + + [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, + s64 timeout) { + switch (type) { + case Svc::ArbitrationType::WaitIfLessThan: + return WaitIfLessThan(addr, value, false, timeout); + case Svc::ArbitrationType::DecrementAndWaitIfLessThan: + return WaitIfLessThan(addr, value, true, timeout); + case Svc::ArbitrationType::WaitIfEqual: + return WaitIfEqual(addr, value, timeout); + } + UNREACHABLE(); + return RESULT_UNKNOWN; + } + +private: + [[nodiscard]] ResultCode Signal(VAddr addr, s32 count); + [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); + [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); + [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); + [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout); + + ThreadTree thread_tree; + + Core::System& system; + KernelCore& kernel; +}; + +} // namespace Kernel From 912dd501465ffaabd149cc3532839e346982b337 Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 30 Dec 2020 01:14:02 -0800 Subject: [PATCH 15/20] core: hle: Integrate new KConditionVariable and KAddressArbiter implementations. --- src/core/CMakeLists.txt | 4 - src/core/hle/kernel/address_arbiter.cpp | 317 ---------------- src/core/hle/kernel/address_arbiter.h | 91 ----- .../hle/kernel/k_synchronization_object.cpp | 4 +- src/core/hle/kernel/mutex.cpp | 170 --------- src/core/hle/kernel/mutex.h | 42 --- src/core/hle/kernel/process.cpp | 48 +-- src/core/hle/kernel/process.h | 50 ++- src/core/hle/kernel/svc.cpp | 356 ++++++------------ src/core/hle/kernel/svc_common.h | 1 + src/core/hle/kernel/svc_wrap.h | 38 +- src/core/hle/kernel/thread.cpp | 247 ++++++------ src/core/hle/kernel/thread.h | 320 ++++++++++------ src/core/hle/kernel/time_manager.cpp | 6 +- src/yuzu/debugger/wait_tree.cpp | 10 +- 15 files changed, 515 insertions(+), 1189 deletions(-) delete mode 100644 src/core/hle/kernel/address_arbiter.cpp delete mode 100644 src/core/hle/kernel/address_arbiter.h delete mode 100644 src/core/hle/kernel/mutex.cpp delete mode 100644 src/core/hle/kernel/mutex.h diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index d29d4573e8..1b8ad476e4 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -142,8 +142,6 @@ add_library(core STATIC hardware_interrupt_manager.h hle/ipc.h hle/ipc_helpers.h - hle/kernel/address_arbiter.cpp - hle/kernel/address_arbiter.h hle/kernel/client_port.cpp hle/kernel/client_port.h hle/kernel/client_session.cpp @@ -189,8 +187,6 @@ add_library(core STATIC hle/kernel/memory/slab_heap.h hle/kernel/memory/system_control.cpp hle/kernel/memory/system_control.h - hle/kernel/mutex.cpp - hle/kernel/mutex.h hle/kernel/object.cpp hle/kernel/object.h hle/kernel/physical_core.cpp diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp deleted file mode 100644 index fe8675186a..0000000000 --- a/src/core/hle/kernel/address_arbiter.cpp +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include -#include - -#include "common/assert.h" -#include "common/common_types.h" -#include "core/arm/exclusive_monitor.h" -#include "core/core.h" -#include "core/hle/kernel/address_arbiter.h" -#include "core/hle/kernel/errors.h" -#include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/k_scheduler.h" -#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/thread.h" -#include "core/hle/kernel/time_manager.h" -#include "core/hle/result.h" -#include "core/memory.h" - -namespace Kernel { - -// Wake up num_to_wake (or all) threads in a vector. -void AddressArbiter::WakeThreads(const std::vector>& waiting_threads, - s32 num_to_wake) { - // Only process up to 'target' threads, unless 'target' is <= 0, in which case process - // them all. - std::size_t last = waiting_threads.size(); - if (num_to_wake > 0) { - last = std::min(last, static_cast(num_to_wake)); - } - - // Signal the waiting threads. - for (std::size_t i = 0; i < last; i++) { - waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS); - RemoveThread(waiting_threads[i]); - waiting_threads[i]->WaitForArbitration(false); - waiting_threads[i]->Wakeup(); - } -} - -AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} -AddressArbiter::~AddressArbiter() = default; - -ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value, - s32 num_to_wake) { - switch (type) { - case SignalType::Signal: - return SignalToAddressOnly(address, num_to_wake); - case SignalType::IncrementAndSignalIfEqual: - return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); - case SignalType::ModifyByWaitingCountAndSignalIfEqual: - return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake); - default: - return ERR_INVALID_ENUM_VALUE; - } -} - -ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { - KScopedSchedulerLock lock(system.Kernel()); - const std::vector> waiting_threads = - GetThreadsWaitingOnAddress(address); - WakeThreads(waiting_threads, num_to_wake); - return RESULT_SUCCESS; -} - -ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, - s32 num_to_wake) { - KScopedSchedulerLock lock(system.Kernel()); - auto& memory = system.Memory(); - - // Ensure that we can write to the address. - if (!memory.IsValidVirtualAddress(address)) { - return ERR_INVALID_ADDRESS_STATE; - } - - const std::size_t current_core = system.CurrentCoreIndex(); - auto& monitor = system.Monitor(); - u32 current_value; - do { - current_value = monitor.ExclusiveRead32(current_core, address); - - if (current_value != static_cast(value)) { - return ERR_INVALID_STATE; - } - current_value++; - } while (!monitor.ExclusiveWrite32(current_core, address, current_value)); - - return SignalToAddressOnly(address, num_to_wake); -} - -ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, - s32 num_to_wake) { - KScopedSchedulerLock lock(system.Kernel()); - auto& memory = system.Memory(); - - // Ensure that we can write to the address. - if (!memory.IsValidVirtualAddress(address)) { - return ERR_INVALID_ADDRESS_STATE; - } - - // Get threads waiting on the address. - const std::vector> waiting_threads = - GetThreadsWaitingOnAddress(address); - - const std::size_t current_core = system.CurrentCoreIndex(); - auto& monitor = system.Monitor(); - s32 updated_value; - do { - updated_value = monitor.ExclusiveRead32(current_core, address); - - if (updated_value != value) { - return ERR_INVALID_STATE; - } - // Determine the modified value depending on the waiting count. - if (num_to_wake <= 0) { - if (waiting_threads.empty()) { - updated_value = value + 1; - } else { - updated_value = value - 1; - } - } else { - if (waiting_threads.empty()) { - updated_value = value + 1; - } else if (waiting_threads.size() <= static_cast(num_to_wake)) { - updated_value = value - 1; - } else { - updated_value = value; - } - } - } while (!monitor.ExclusiveWrite32(current_core, address, updated_value)); - - WakeThreads(waiting_threads, num_to_wake); - return RESULT_SUCCESS; -} - -ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value, - s64 timeout_ns) { - switch (type) { - case ArbitrationType::WaitIfLessThan: - return WaitForAddressIfLessThan(address, value, timeout_ns, false); - case ArbitrationType::DecrementAndWaitIfLessThan: - return WaitForAddressIfLessThan(address, value, timeout_ns, true); - case ArbitrationType::WaitIfEqual: - return WaitForAddressIfEqual(address, value, timeout_ns); - default: - return ERR_INVALID_ENUM_VALUE; - } -} - -ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, - bool should_decrement) { - auto& memory = system.Memory(); - auto& kernel = system.Kernel(); - Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); - - Handle event_handle = InvalidHandle; - { - KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); - - if (current_thread->IsTerminationRequested()) { - lock.CancelSleep(); - return ERR_THREAD_TERMINATING; - } - - // Ensure that we can read the address. - if (!memory.IsValidVirtualAddress(address)) { - lock.CancelSleep(); - return ERR_INVALID_ADDRESS_STATE; - } - - s32 current_value = static_cast(memory.Read32(address)); - if (current_value >= value) { - lock.CancelSleep(); - return ERR_INVALID_STATE; - } - - current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); - - s32 decrement_value; - - const std::size_t current_core = system.CurrentCoreIndex(); - auto& monitor = system.Monitor(); - do { - current_value = static_cast(monitor.ExclusiveRead32(current_core, address)); - if (should_decrement) { - decrement_value = current_value - 1; - } else { - decrement_value = current_value; - } - } while ( - !monitor.ExclusiveWrite32(current_core, address, static_cast(decrement_value))); - - // Short-circuit without rescheduling, if timeout is zero. - if (timeout == 0) { - lock.CancelSleep(); - return RESULT_TIMEOUT; - } - - current_thread->SetArbiterWaitAddress(address); - InsertThread(SharedFrom(current_thread)); - current_thread->SetState(ThreadState::Waiting); - current_thread->WaitForArbitration(true); - } - - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } - - { - KScopedSchedulerLock lock(kernel); - if (current_thread->IsWaitingForArbitration()) { - RemoveThread(SharedFrom(current_thread)); - current_thread->WaitForArbitration(false); - } - } - - return current_thread->GetSignalingResult(); -} - -ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { - auto& memory = system.Memory(); - auto& kernel = system.Kernel(); - Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); - - Handle event_handle = InvalidHandle; - { - KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); - - if (current_thread->IsTerminationRequested()) { - lock.CancelSleep(); - return ERR_THREAD_TERMINATING; - } - - // Ensure that we can read the address. - if (!memory.IsValidVirtualAddress(address)) { - lock.CancelSleep(); - return ERR_INVALID_ADDRESS_STATE; - } - - s32 current_value = static_cast(memory.Read32(address)); - if (current_value != value) { - lock.CancelSleep(); - return ERR_INVALID_STATE; - } - - // Short-circuit without rescheduling, if timeout is zero. - if (timeout == 0) { - lock.CancelSleep(); - return RESULT_TIMEOUT; - } - - current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); - current_thread->SetArbiterWaitAddress(address); - InsertThread(SharedFrom(current_thread)); - current_thread->SetState(ThreadState::Waiting); - current_thread->WaitForArbitration(true); - } - - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } - - { - KScopedSchedulerLock lock(kernel); - if (current_thread->IsWaitingForArbitration()) { - RemoveThread(SharedFrom(current_thread)); - current_thread->WaitForArbitration(false); - } - } - - return current_thread->GetSignalingResult(); -} - -void AddressArbiter::InsertThread(std::shared_ptr thread) { - const VAddr arb_addr = thread->GetArbiterWaitAddress(); - std::list>& thread_list = arb_threads[arb_addr]; - - const auto iter = - std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) { - return entry->GetPriority() >= thread->GetPriority(); - }); - - if (iter == thread_list.cend()) { - thread_list.push_back(std::move(thread)); - } else { - thread_list.insert(iter, std::move(thread)); - } -} - -void AddressArbiter::RemoveThread(std::shared_ptr thread) { - const VAddr arb_addr = thread->GetArbiterWaitAddress(); - std::list>& thread_list = arb_threads[arb_addr]; - - const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(), - [&thread](const auto& entry) { return thread == entry; }); - - if (iter != thread_list.cend()) { - thread_list.erase(iter); - } -} - -std::vector> AddressArbiter::GetThreadsWaitingOnAddress( - VAddr address) const { - const auto iter = arb_threads.find(address); - if (iter == arb_threads.cend()) { - return {}; - } - - const std::list>& thread_list = iter->second; - return {thread_list.cbegin(), thread_list.cend()}; -} -} // namespace Kernel diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h deleted file mode 100644 index b91edc67da..0000000000 --- a/src/core/hle/kernel/address_arbiter.h +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include -#include -#include -#include - -#include "common/common_types.h" - -union ResultCode; - -namespace Core { -class System; -} - -namespace Kernel { - -class Thread; - -class AddressArbiter { -public: - enum class ArbitrationType { - WaitIfLessThan = 0, - DecrementAndWaitIfLessThan = 1, - WaitIfEqual = 2, - }; - - enum class SignalType { - Signal = 0, - IncrementAndSignalIfEqual = 1, - ModifyByWaitingCountAndSignalIfEqual = 2, - }; - - explicit AddressArbiter(Core::System& system); - ~AddressArbiter(); - - AddressArbiter(const AddressArbiter&) = delete; - AddressArbiter& operator=(const AddressArbiter&) = delete; - - AddressArbiter(AddressArbiter&&) = default; - AddressArbiter& operator=(AddressArbiter&&) = delete; - - /// Signals an address being waited on with a particular signaling type. - ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake); - - /// Waits on an address with a particular arbitration type. - ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); - -private: - /// Signals an address being waited on. - ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); - - /// Signals an address being waited on and increments its value if equal to the value argument. - ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); - - /// Signals an address being waited on and modifies its value based on waiting thread count if - /// equal to the value argument. - ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, - s32 num_to_wake); - - /// Waits on an address if the value passed is less than the argument value, - /// optionally decrementing. - ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, - bool should_decrement); - - /// Waits on an address if the value passed is equal to the argument value. - ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); - - /// Wake up num_to_wake (or all) threads in a vector. - void WakeThreads(const std::vector>& waiting_threads, s32 num_to_wake); - - /// Insert a thread into the address arbiter container - void InsertThread(std::shared_ptr thread); - - /// Removes a thread from the address arbiter container - void RemoveThread(std::shared_ptr thread); - - // Gets the threads waiting on an address. - std::vector> GetThreadsWaitingOnAddress(VAddr address) const; - - /// List of threads waiting for a address arbiter - std::unordered_map>> arb_threads; - - Core::System& system; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 64c566caa8..11b989ecdc 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -72,7 +72,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, } // For debugging only - thread->SetWaitObjectsForDebugging(objects, num_objects); + thread->SetWaitObjectsForDebugging({objects, static_cast(num_objects)}); // Mark the thread as waiting. thread->SetCancellable(); @@ -86,7 +86,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, thread->ClearCancellable(); // For debugging only - thread->SetWaitObjectsForDebugging(nullptr, 0); + thread->SetWaitObjectsForDebugging({}); // Cancel the timer as needed. if (timer != InvalidHandle) { diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp deleted file mode 100644 index 8a0faacf80..0000000000 --- a/src/core/hle/kernel/mutex.cpp +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include -#include -#include - -#include "common/assert.h" -#include "common/logging/log.h" -#include "core/core.h" -#include "core/hle/kernel/errors.h" -#include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/k_scheduler.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/mutex.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/thread.h" -#include "core/hle/result.h" -#include "core/memory.h" - -namespace Kernel { - -/// Returns the number of threads that are waiting for a mutex, and the highest priority one among -/// those. -static std::pair, u32> GetHighestPriorityMutexWaitingThread( - const std::shared_ptr& current_thread, VAddr mutex_addr) { - - std::shared_ptr highest_priority_thread; - u32 num_waiters = 0; - - for (const auto& thread : current_thread->GetMutexWaitingThreads()) { - if (thread->GetMutexWaitAddress() != mutex_addr) - continue; - - ++num_waiters; - if (highest_priority_thread == nullptr || - thread->GetPriority() < highest_priority_thread->GetPriority()) { - highest_priority_thread = thread; - } - } - - return {highest_priority_thread, num_waiters}; -} - -/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. -static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr current_thread, - std::shared_ptr new_owner) { - current_thread->RemoveMutexWaiter(new_owner); - const auto threads = current_thread->GetMutexWaitingThreads(); - for (const auto& thread : threads) { - if (thread->GetMutexWaitAddress() != mutex_addr) - continue; - - ASSERT(thread->GetLockOwner() == current_thread.get()); - current_thread->RemoveMutexWaiter(thread); - if (new_owner != thread) - new_owner->AddMutexWaiter(thread); - } -} - -Mutex::Mutex(Core::System& system) : system{system} {} -Mutex::~Mutex() = default; - -ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, - Handle requesting_thread_handle) { - // The mutex address must be 4-byte aligned - if ((address % sizeof(u32)) != 0) { - LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); - return ERR_INVALID_ADDRESS; - } - - auto& kernel = system.Kernel(); - std::shared_ptr current_thread = - SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); - { - KScopedSchedulerLock lock(kernel); - // The mutex address must be 4-byte aligned - if ((address % sizeof(u32)) != 0) { - return ERR_INVALID_ADDRESS; - } - - const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); - std::shared_ptr holding_thread = handle_table.Get(holding_thread_handle); - std::shared_ptr requesting_thread = - handle_table.Get(requesting_thread_handle); - - // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of - // another thread. - ASSERT(requesting_thread == current_thread); - - current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); - - const u32 addr_value = system.Memory().Read32(address); - - // If the mutex isn't being held, just return success. - if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { - return RESULT_SUCCESS; - } - - if (holding_thread == nullptr) { - return ERR_INVALID_HANDLE; - } - - // Wait until the mutex is released - current_thread->SetMutexWaitAddress(address); - current_thread->SetWaitHandle(requesting_thread_handle); - - current_thread->SetState(ThreadState::Waiting); - - // Update the lock holder thread's priority to prevent priority inversion. - holding_thread->AddMutexWaiter(current_thread); - } - - { - KScopedSchedulerLock lock(kernel); - auto* owner = current_thread->GetLockOwner(); - if (owner != nullptr) { - owner->RemoveMutexWaiter(current_thread); - } - } - return current_thread->GetSignalingResult(); -} - -std::pair> Mutex::Unlock(std::shared_ptr owner, - VAddr address) { - // The mutex address must be 4-byte aligned - if ((address % sizeof(u32)) != 0) { - LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); - return {ERR_INVALID_ADDRESS, nullptr}; - } - - auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address); - if (new_owner == nullptr) { - system.Memory().Write32(address, 0); - return {RESULT_SUCCESS, nullptr}; - } - // Transfer the ownership of the mutex from the previous owner to the new one. - TransferMutexOwnership(address, owner, new_owner); - u32 mutex_value = new_owner->GetWaitHandle(); - if (num_waiters >= 2) { - // Notify the guest that there are still some threads waiting for the mutex - mutex_value |= Mutex::MutexHasWaitersFlag; - } - new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS); - new_owner->SetLockOwner(nullptr); - new_owner->Wakeup(); - - system.Memory().Write32(address, mutex_value); - return {RESULT_SUCCESS, new_owner}; -} - -ResultCode Mutex::Release(VAddr address) { - auto& kernel = system.Kernel(); - KScopedSchedulerLock lock(kernel); - - std::shared_ptr current_thread = - SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); - - auto [result, new_owner] = Unlock(current_thread, address); - - if (result != RESULT_SUCCESS && new_owner != nullptr) { - new_owner->SetSynchronizationResults(nullptr, result); - } - - return result; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h deleted file mode 100644 index 3b81dc3dfa..0000000000 --- a/src/core/hle/kernel/mutex.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include "common/common_types.h" - -union ResultCode; - -namespace Core { -class System; -} - -namespace Kernel { - -class Mutex final { -public: - explicit Mutex(Core::System& system); - ~Mutex(); - - /// Flag that indicates that a mutex still has threads waiting for it. - static constexpr u32 MutexHasWaitersFlag = 0x40000000; - /// Mask of the bits in a mutex address value that contain the mutex owner. - static constexpr u32 MutexOwnerMask = 0xBFFFFFFF; - - /// Attempts to acquire a mutex at the specified address. - ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, - Handle requesting_thread_handle); - - /// Unlocks a mutex for owner at address - std::pair> Unlock(std::shared_ptr owner, - VAddr address); - - /// Releases the mutex at the specified address. - ResultCode Release(VAddr address); - -private: - Core::System& system; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index a306c7c732..37b77fa6e7 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -162,48 +162,6 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } -void Process::InsertConditionVariableThread(std::shared_ptr thread) { - VAddr cond_var_addr = thread->GetCondVarWaitAddress(); - std::list>& thread_list = cond_var_threads[cond_var_addr]; - auto it = thread_list.begin(); - while (it != thread_list.end()) { - const std::shared_ptr current_thread = *it; - if (current_thread->GetPriority() > thread->GetPriority()) { - thread_list.insert(it, thread); - return; - } - ++it; - } - thread_list.push_back(thread); -} - -void Process::RemoveConditionVariableThread(std::shared_ptr thread) { - VAddr cond_var_addr = thread->GetCondVarWaitAddress(); - std::list>& thread_list = cond_var_threads[cond_var_addr]; - auto it = thread_list.begin(); - while (it != thread_list.end()) { - const std::shared_ptr current_thread = *it; - if (current_thread.get() == thread.get()) { - thread_list.erase(it); - return; - } - ++it; - } -} - -std::vector> Process::GetConditionVariableThreads( - const VAddr cond_var_addr) { - std::vector> result{}; - std::list>& thread_list = cond_var_threads[cond_var_addr]; - auto it = thread_list.begin(); - while (it != thread_list.end()) { - std::shared_ptr current_thread = *it; - result.push_back(current_thread); - ++it; - } - return result; -} - void Process::RegisterThread(const Thread* thread) { thread_list.push_back(thread); } @@ -412,9 +370,9 @@ bool Process::IsSignaled() const { } Process::Process(Core::System& system) - : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique( - system)}, - handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} + : KSynchronizationObject{system.Kernel()}, + page_table{std::make_unique(system)}, handle_table{system.Kernel()}, + address_arbiter{system}, condition_var{system}, system{system} {} Process::~Process() = default; diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 901f1ff27b..564e1f27dd 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -11,10 +11,10 @@ #include #include #include "common/common_types.h" -#include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_address_arbiter.h" +#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_synchronization_object.h" -#include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/result.h" @@ -123,24 +123,30 @@ public: return handle_table; } - /// Gets a reference to the process' address arbiter. - AddressArbiter& GetAddressArbiter() { - return address_arbiter; + ResultCode SignalToAddress(VAddr address) { + return condition_var.SignalToAddress(address); } - /// Gets a const reference to the process' address arbiter. - const AddressArbiter& GetAddressArbiter() const { - return address_arbiter; + ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) { + return condition_var.WaitForAddress(handle, address, tag); } - /// Gets a reference to the process' mutex lock. - Mutex& GetMutex() { - return mutex; + void SignalConditionVariable(u64 cv_key, int32_t count) { + return condition_var.Signal(cv_key, count); } - /// Gets a const reference to the process' mutex lock - const Mutex& GetMutex() const { - return mutex; + ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { + return condition_var.Wait(address, cv_key, tag, ns); + } + + ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, + s32 count) { + return address_arbiter.SignalToAddress(address, signal_type, value, count); + } + + ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, + s64 timeout) { + return address_arbiter.WaitForAddress(address, arb_type, value, timeout); } /// Gets the address to the process' dedicated TLS region. @@ -250,15 +256,6 @@ public: return thread_list; } - /// Insert a thread into the condition variable wait container - void InsertConditionVariableThread(std::shared_ptr thread); - - /// Remove a thread from the condition variable wait container - void RemoveConditionVariableThread(std::shared_ptr thread); - - /// Obtain all condition variable threads waiting for some address - std::vector> GetConditionVariableThreads(VAddr cond_var_addr); - /// Registers a thread as being created under this process, /// adding it to this process' thread list. void RegisterThread(const Thread* thread); @@ -369,12 +366,12 @@ private: HandleTable handle_table; /// Per-process address arbiter. - AddressArbiter address_arbiter; + KAddressArbiter address_arbiter; /// The per-process mutex lock instance used for handling various /// forms of services, such as lock arbitration, and condition /// variable related facilities. - Mutex mutex; + KConditionVariable condition_var; /// Address indicating the location of the process' dedicated TLS region. VAddr tls_region_address = 0; @@ -385,9 +382,6 @@ private: /// List of threads that are running with this process as their owner. std::list thread_list; - /// List of threads waiting for a condition variable - std::unordered_map>> cond_var_threads; - /// Address of the top of the main thread's stack VAddr main_thread_stack_top{}; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 304b8727d6..99bb4ea20d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -10,6 +10,7 @@ #include "common/alignment.h" #include "common/assert.h" +#include "common/common_funcs.h" #include "common/fiber.h" #include "common/logging/log.h" #include "common/microprofile.h" @@ -19,24 +20,26 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/cpu_manager.h" -#include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_address_arbiter.h" +#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/memory_layout.h" #include "core/hle/kernel/memory/page_table.h" -#include "core/hle/kernel/mutex.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/svc.h" +#include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/thread.h" @@ -347,12 +350,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); } - Handle event_handle = thread->GetHLETimeEvent(); - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } - return thread->GetSignalingResult(); } @@ -491,56 +488,37 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha return CancelSynchronization(system, thread_handle); } -/// Attempts to locks a mutex, creating it if it does not already exist -static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, - VAddr mutex_addr, Handle requesting_thread_handle) { - LOG_TRACE(Kernel_SVC, - "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, " - "requesting_current_thread_handle=0x{:08X}", - holding_thread_handle, mutex_addr, requesting_thread_handle); +/// Attempts to locks a mutex +static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, + u32 tag) { + LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}", + thread_handle, address, tag); - if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { - LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", - mutex_addr); - return ERR_INVALID_ADDRESS_STATE; - } + // Validate the input address. + R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress); - if (!Common::IsWordAligned(mutex_addr)) { - LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); - return ERR_INVALID_ADDRESS; - } - - auto* const current_process = system.Kernel().CurrentProcess(); - return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle, - requesting_thread_handle); + return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag); } -static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, - u32 mutex_addr, Handle requesting_thread_handle) { - return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle); +static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, + u32 tag) { + return ArbitrateLock(system, thread_handle, address, tag); } /// Unlock a mutex -static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { - LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); +static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) { + LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address); - if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { - LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", - mutex_addr); - return ERR_INVALID_ADDRESS_STATE; - } + // Validate the input address. + R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress); - if (!Common::IsWordAligned(mutex_addr)) { - LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); - return ERR_INVALID_ADDRESS; - } - - auto* const current_process = system.Kernel().CurrentProcess(); - return current_process->GetMutex().Release(mutex_addr); + return system.Kernel().CurrentProcess()->SignalToAddress(address); } -static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { - return ArbitrateUnlock(system, mutex_addr); +static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) { + return ArbitrateUnlock(system, address); } enum class BreakType : u32 { @@ -1167,7 +1145,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri return ERR_INVALID_HANDLE; } - thread->SetPriority(priority); + thread->SetBasePriority(priority); return RESULT_SUCCESS; } @@ -1607,223 +1585,135 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec } /// Wait process wide key atomic -static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr, - VAddr condition_variable_addr, Handle thread_handle, - s64 nano_seconds) { - LOG_TRACE( - Kernel_SVC, - "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", - mutex_addr, condition_variable_addr, thread_handle, nano_seconds); +static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, + u32 tag, s64 timeout_ns) { + LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address, + cv_key, tag, timeout_ns); - if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { - LOG_ERROR( - Kernel_SVC, - "Given mutex address must not be within the kernel address space. address=0x{:016X}", - mutex_addr); - return ERR_INVALID_ADDRESS_STATE; - } + // Validate input. + R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress); - if (!Common::IsWordAligned(mutex_addr)) { - LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}", - mutex_addr); - return ERR_INVALID_ADDRESS; - } - - ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); - auto& kernel = system.Kernel(); - Handle event_handle; - Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); - auto* const current_process = kernel.CurrentProcess(); - { - KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); - const auto& handle_table = current_process->GetHandleTable(); - std::shared_ptr thread = handle_table.Get(thread_handle); - ASSERT(thread); - - current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); - - if (thread->IsTerminationRequested()) { - lock.CancelSleep(); - return ERR_THREAD_TERMINATING; + // Convert timeout from nanoseconds to ticks. + s64 timeout{}; + if (timeout_ns > 0) { + const s64 offset_tick(timeout_ns); + if (offset_tick > 0) { + timeout = offset_tick + 2; + if (timeout <= 0) { + timeout = std::numeric_limits::max(); + } + } else { + timeout = std::numeric_limits::max(); } - - const auto release_result = current_process->GetMutex().Release(mutex_addr); - if (release_result.IsError()) { - lock.CancelSleep(); - return release_result; - } - - if (nano_seconds == 0) { - lock.CancelSleep(); - return RESULT_TIMEOUT; - } - - current_thread->SetCondVarWaitAddress(condition_variable_addr); - current_thread->SetMutexWaitAddress(mutex_addr); - current_thread->SetWaitHandle(thread_handle); - current_thread->SetState(ThreadState::Waiting); - current_thread->SetWaitingCondVar(true); - current_process->InsertConditionVariableThread(SharedFrom(current_thread)); + } else { + timeout = timeout_ns; } - if (event_handle != InvalidHandle) { - auto& time_manager = kernel.TimeManager(); - time_manager.UnscheduleTimeEvent(event_handle); - } - - { - KScopedSchedulerLock lock(kernel); - - auto* owner = current_thread->GetLockOwner(); - if (owner != nullptr) { - owner->RemoveMutexWaiter(SharedFrom(current_thread)); - } - - current_process->RemoveConditionVariableThread(SharedFrom(current_thread)); - } - // Note: Deliberately don't attempt to inherit the lock owner's priority. - - return current_thread->GetSignalingResult(); + // Wait on the condition variable. + return system.Kernel().CurrentProcess()->WaitConditionVariable( + address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout); } -static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, - u32 condition_variable_addr, Handle thread_handle, - u32 nanoseconds_low, u32 nanoseconds_high) { - const auto nanoseconds = static_cast(nanoseconds_low | (u64{nanoseconds_high} << 32)); - return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle, - nanoseconds); +static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag, + u32 timeout_ns_low, u32 timeout_ns_high) { + const auto timeout_ns = static_cast(timeout_ns_low | (u64{timeout_ns_high} << 32)); + return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns); } /// Signal process wide key -static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) { - LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", - condition_variable_addr, target); +static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) { + LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count); - ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); + // Signal the condition variable. + return system.Kernel().CurrentProcess()->SignalConditionVariable( + Common::AlignDown(cv_key, sizeof(u32)), count); +} - // Retrieve a list of all threads that are waiting for this condition variable. - auto& kernel = system.Kernel(); - KScopedSchedulerLock lock(kernel); - auto* const current_process = kernel.CurrentProcess(); - std::vector> waiting_threads = - current_process->GetConditionVariableThreads(condition_variable_addr); +static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) { + SignalProcessWideKey(system, cv_key, count); +} - // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process - // them all. - std::size_t last = waiting_threads.size(); - if (target > 0) { - last = std::min(waiting_threads.size(), static_cast(target)); - } - for (std::size_t index = 0; index < last; ++index) { - auto& thread = waiting_threads[index]; +namespace { - ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); - - // liberate Cond Var Thread. - current_process->RemoveConditionVariableThread(thread); - - const std::size_t current_core = system.CurrentCoreIndex(); - auto& monitor = system.Monitor(); - - // Atomically read the value of the mutex. - u32 mutex_val = 0; - u32 update_val = 0; - const VAddr mutex_address = thread->GetMutexWaitAddress(); - do { - // If the mutex is not yet acquired, acquire it. - mutex_val = monitor.ExclusiveRead32(current_core, mutex_address); - - if (mutex_val != 0) { - update_val = mutex_val | Mutex::MutexHasWaitersFlag; - } else { - update_val = thread->GetWaitHandle(); - } - } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); - monitor.ClearExclusive(); - if (mutex_val == 0) { - // We were able to acquire the mutex, resume this thread. - auto* const lock_owner = thread->GetLockOwner(); - if (lock_owner != nullptr) { - lock_owner->RemoveMutexWaiter(thread); - } - - thread->SetLockOwner(nullptr); - thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); - thread->Wakeup(); - } else { - // The mutex is already owned by some other thread, make this thread wait on it. - const Handle owner_handle = static_cast(mutex_val & Mutex::MutexOwnerMask); - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto owner = handle_table.Get(owner_handle); - ASSERT(owner); - thread->SetWaitingCondVar(false); - - owner->AddMutexWaiter(thread); - } +constexpr bool IsValidSignalType(Svc::SignalType type) { + switch (type) { + case Svc::SignalType::Signal: + case Svc::SignalType::SignalAndIncrementIfEqual: + case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: + return true; + default: + return false; } } -static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) { - SignalProcessWideKey(system, condition_variable_addr, target); +constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) { + switch (type) { + case Svc::ArbitrationType::WaitIfLessThan: + case Svc::ArbitrationType::DecrementAndWaitIfLessThan: + case Svc::ArbitrationType::WaitIfEqual: + return true; + default: + return false; + } } +} // namespace + // Wait for an address (via Address Arbiter) -static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value, - s64 timeout) { - LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address, - type, value, timeout); +static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type, + s32 value, s64 timeout_ns) { + LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}", + address, arb_type, value, timeout_ns); - // If the passed address is a kernel virtual address, return invalid memory state. - if (Core::Memory::IsKernelVirtualAddress(address)) { - LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); - return ERR_INVALID_ADDRESS_STATE; + // Validate input. + R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress); + R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue); + + // Convert timeout from nanoseconds to ticks. + s64 timeout{}; + if (timeout_ns > 0) { + const s64 offset_tick(timeout_ns); + if (offset_tick > 0) { + timeout = offset_tick + 2; + if (timeout <= 0) { + timeout = std::numeric_limits::max(); + } + } else { + timeout = std::numeric_limits::max(); + } + } else { + timeout = timeout_ns; } - // If the address is not properly aligned to 4 bytes, return invalid address. - if (!Common::IsWordAligned(address)) { - LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); - return ERR_INVALID_ADDRESS; - } - - const auto arbitration_type = static_cast(type); - auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); - const ResultCode result = - address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); - return result; + return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout); } -static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, - u32 timeout_low, u32 timeout_high) { - const auto timeout = static_cast(timeout_low | (u64{timeout_high} << 32)); - return WaitForAddress(system, address, type, value, timeout); +static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type, + s32 value, u32 timeout_ns_low, u32 timeout_ns_high) { + const auto timeout = static_cast(timeout_ns_low | (u64{timeout_ns_high} << 32)); + return WaitForAddress(system, address, arb_type, value, timeout); } // Signals to an address (via Address Arbiter) -static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, - s32 num_to_wake) { - LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", - address, type, value, num_to_wake); +static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type, + s32 value, s32 count) { + LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}", + address, signal_type, value, count); - // If the passed address is a kernel virtual address, return invalid memory state. - if (Core::Memory::IsKernelVirtualAddress(address)) { - LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); - return ERR_INVALID_ADDRESS_STATE; - } + // Validate input. + R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); + R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress); + R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue); - // If the address is not properly aligned to 4 bytes, return invalid address. - if (!Common::IsWordAligned(address)) { - LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); - return ERR_INVALID_ADDRESS; - } - - const auto signal_type = static_cast(type); - auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); - return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); + return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value, + count); } -static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, - s32 num_to_wake) { - return SignalToAddress(system, address, type, value, num_to_wake); +static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type, + s32 value, s32 count) { + return SignalToAddress(system, address, signal_type, value, count); } static void KernelDebug([[maybe_unused]] Core::System& system, diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 7734bb2360..4af0495510 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -8,6 +8,7 @@ namespace Kernel::Svc { +constexpr s32 ArgumentHandleCountMax = 0x40; constexpr u32 HandleWaitMask{1u << 30}; } // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index f94c487bac..a32750ed75 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -7,6 +7,7 @@ #include "common/common_types.h" #include "core/arm/arm_interface.h" #include "core/core.h" +#include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" namespace Kernel { @@ -277,18 +278,22 @@ void SvcWrap64(Core::System& system) { FuncReturn(system, retval); } -template +// Used by WaitForAddress +template void SvcWrap64(Core::System& system) { - FuncReturn(system, func(system, Param(system, 0), static_cast(Param(system, 1)), - static_cast(Param(system, 2)), static_cast(Param(system, 3))) - .raw); + FuncReturn(system, + func(system, Param(system, 0), static_cast(Param(system, 1)), + static_cast(Param(system, 2)), static_cast(Param(system, 3))) + .raw); } -template +// Used by SignalToAddress +template void SvcWrap64(Core::System& system) { - FuncReturn(system, func(system, Param(system, 0), static_cast(Param(system, 1)), - static_cast(Param(system, 2)), static_cast(Param(system, 3))) - .raw); + FuncReturn(system, + func(system, Param(system, 0), static_cast(Param(system, 1)), + static_cast(Param(system, 2)), static_cast(Param(system, 3))) + .raw); } //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -504,22 +509,23 @@ void SvcWrap32(Core::System& system) { } // Used by WaitForAddress32 -template +template void SvcWrap32(Core::System& system) { const u32 retval = func(system, static_cast(Param(system, 0)), - static_cast(Param(system, 1)), static_cast(Param(system, 2)), - static_cast(Param(system, 3)), static_cast(Param(system, 4))) + static_cast(Param(system, 1)), + static_cast(Param(system, 2)), static_cast(Param(system, 3)), + static_cast(Param(system, 4))) .raw; FuncReturn(system, retval); } // Used by SignalToAddress32 -template +template void SvcWrap32(Core::System& system) { - const u32 retval = - func(system, static_cast(Param(system, 0)), static_cast(Param(system, 1)), - static_cast(Param(system, 2)), static_cast(Param(system, 3))) - .raw; + const u32 retval = func(system, static_cast(Param(system, 0)), + static_cast(Param(system, 1)), + static_cast(Param(system, 2)), static_cast(Param(system, 3))) + .raw; FuncReturn(system, retval); } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 33a4e1fa30..eda56c31c6 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -17,9 +17,11 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_layout.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/thread.h" @@ -61,24 +63,6 @@ void Thread::Stop() { } void Thread::Wakeup() { - KScopedSchedulerLock lock(kernel); - switch (thread_state) { - case ThreadState::Runnable: - // If the thread is waiting on multiple wait objects, it might be awoken more than once - // before actually resuming. We can ignore subsequent wakeups if the thread status has - // already been set to ThreadStatus::Ready. - return; - case ThreadState::Terminated: - // This should never happen, as threads must complete before being stopped. - DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", - GetObjectId()); - return; - } - - SetState(ThreadState::Runnable); -} - -void Thread::OnWakeUp() { KScopedSchedulerLock lock(kernel); SetState(ThreadState::Runnable); } @@ -167,15 +151,14 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy thread->stack_top = stack_top; thread->disable_count = 1; thread->tpidr_el0 = 0; - thread->nominal_priority = thread->current_priority = priority; + thread->current_priority = priority; + thread->base_priority = priority; + thread->lock_owner = nullptr; thread->schedule_count = -1; thread->last_scheduled_tick = 0; thread->processor_id = processor_id; thread->ideal_core = processor_id; thread->affinity_mask.SetAffinity(processor_id, true); - thread->mutex_wait_address = 0; - thread->condvar_wait_address = 0; - thread->wait_handle = 0; thread->name = std::move(name); thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); thread->owner_process = owner_process; @@ -205,12 +188,17 @@ ResultVal> Thread::Create(Core::System& system, ThreadTy return MakeResult>(std::move(thread)); } -void Thread::SetPriority(u32 priority) { - KScopedSchedulerLock lock(kernel); +void Thread::SetBasePriority(u32 priority) { ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, "Invalid priority value."); - nominal_priority = priority; - UpdatePriority(); + + KScopedSchedulerLock lock(kernel); + + // Change our base priority. + base_priority = priority; + + // Perform a priority restoration. + RestorePriority(kernel, this); } void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { @@ -224,95 +212,146 @@ VAddr Thread::GetCommandBufferAddress() const { return GetTLSAddress() + command_header_offset; } -void Thread::SetState(ThreadState new_status) { - if (new_status == thread_state) { - return; +void Thread::SetState(ThreadState state) { + KScopedSchedulerLock sl(kernel); + + SetMutexWaitAddressForDebugging(0); + const ThreadState old_state = thread_state; + thread_state = + static_cast((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); + if (thread_state != old_state) { + KScheduler::OnThreadStateChanged(kernel, this, old_state); } - - if (new_status != ThreadState::Waiting) { - SetWaitingCondVar(false); - } - - SetSchedulingStatus(new_status); - - thread_state = new_status; } -void Thread::AddMutexWaiter(std::shared_ptr thread) { - if (thread->lock_owner.get() == this) { - // If the thread is already waiting for this thread to release the mutex, ensure that the - // waiters list is consistent and return without doing anything. - const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); - ASSERT(iter != wait_mutex_threads.end()); - return; +void Thread::AddWaiterImpl(Thread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Find the right spot to insert the waiter. + auto it = waiter_list.begin(); + while (it != waiter_list.end()) { + if (it->GetPriority() > thread->GetPriority()) { + break; + } + it++; } - // A thread can't wait on two different mutexes at the same time. - ASSERT(thread->lock_owner == nullptr); + // Keep track of how many kernel waiters we have. + if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { + ASSERT((num_kernel_waiters++) >= 0); + } - // Ensure that the thread is not already in the list of mutex waiters - const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); - ASSERT(iter == wait_mutex_threads.end()); - - // Keep the list in an ordered fashion - const auto insertion_point = std::find_if( - wait_mutex_threads.begin(), wait_mutex_threads.end(), - [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); - wait_mutex_threads.insert(insertion_point, thread); - thread->lock_owner = SharedFrom(this); - - UpdatePriority(); + // Insert the waiter. + waiter_list.insert(it, *thread); + thread->SetLockOwner(this); } -void Thread::RemoveMutexWaiter(std::shared_ptr thread) { - ASSERT(thread->lock_owner.get() == this); +void Thread::RemoveWaiterImpl(Thread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); - // Ensure that the thread is in the list of mutex waiters - const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); - ASSERT(iter != wait_mutex_threads.end()); + // Keep track of how many kernel waiters we have. + if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { + ASSERT((num_kernel_waiters--) > 0); + } - wait_mutex_threads.erase(iter); - - thread->lock_owner = nullptr; - UpdatePriority(); + // Remove the waiter. + waiter_list.erase(waiter_list.iterator_to(*thread)); + thread->SetLockOwner(nullptr); } -void Thread::UpdatePriority() { - // If any of the threads waiting on the mutex have a higher priority - // (taking into account priority inheritance), then this thread inherits - // that thread's priority. - u32 new_priority = nominal_priority; - if (!wait_mutex_threads.empty()) { - if (wait_mutex_threads.front()->current_priority < new_priority) { - new_priority = wait_mutex_threads.front()->current_priority; +void Thread::RestorePriority(KernelCore& kernel, Thread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + while (true) { + // We want to inherit priority where possible. + s32 new_priority = thread->GetBasePriority(); + if (thread->HasWaiters()) { + new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); + } + + // If the priority we would inherit is not different from ours, don't do anything. + if (new_priority == thread->GetPriority()) { + return; + } + + // Ensure we don't violate condition variable red black tree invariants. + if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { + BeforeUpdatePriority(kernel, cv_tree, thread); + } + + // Change the priority. + const s32 old_priority = thread->GetPriority(); + thread->SetPriority(new_priority); + + // Restore the condition variable, if relevant. + if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { + AfterUpdatePriority(kernel, cv_tree, thread); + } + + // Update the scheduler. + KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); + + // Keep the lock owner up to date. + Thread* lock_owner = thread->GetLockOwner(); + if (lock_owner == nullptr) { + return; + } + + // Update the thread in the lock owner's sorted list, and continue inheriting. + lock_owner->RemoveWaiterImpl(thread); + lock_owner->AddWaiterImpl(thread); + thread = lock_owner; + } +} + +void Thread::AddWaiter(Thread* thread) { + AddWaiterImpl(thread); + RestorePriority(kernel, this); +} + +void Thread::RemoveWaiter(Thread* thread) { + RemoveWaiterImpl(thread); + RestorePriority(kernel, this); +} + +Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + s32 num_waiters{}; + Thread* next_lock_owner{}; + auto it = waiter_list.begin(); + while (it != waiter_list.end()) { + if (it->GetAddressKey() == key) { + Thread* thread = std::addressof(*it); + + // Keep track of how many kernel waiters we have. + if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { + ASSERT((num_kernel_waiters--) > 0); + } + it = waiter_list.erase(it); + + // Update the next lock owner. + if (next_lock_owner == nullptr) { + next_lock_owner = thread; + next_lock_owner->SetLockOwner(nullptr); + } else { + next_lock_owner->AddWaiterImpl(thread); + } + num_waiters++; + } else { + it++; } } - if (new_priority == current_priority) { - return; + // Do priority updates, if we have a next owner. + if (next_lock_owner) { + RestorePriority(kernel, this); + RestorePriority(kernel, next_lock_owner); } - if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { - owner_process->RemoveConditionVariableThread(SharedFrom(this)); - } - - SetCurrentPriority(new_priority); - - if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { - owner_process->InsertConditionVariableThread(SharedFrom(this)); - } - - if (!lock_owner) { - return; - } - - // Ensure that the thread is within the correct location in the waiting list. - auto old_owner = lock_owner; - lock_owner->RemoveMutexWaiter(SharedFrom(this)); - old_owner->AddMutexWaiter(SharedFrom(this)); - - // Recursively update the priority of the thread that depends on the priority of this one. - lock_owner->UpdatePriority(); + // Return output. + *out_num_waiters = num_waiters; + return next_lock_owner; } ResultCode Thread::SetActivity(ThreadActivity value) { @@ -372,18 +411,6 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { KScheduler::OnThreadStateChanged(kernel, this, old_state); } -void Thread::SetSchedulingStatus(ThreadState new_status) { - const auto old_state = GetRawState(); - thread_state = (thread_state & ThreadState::HighMask) | new_status; - KScheduler::OnThreadStateChanged(kernel, this, old_state); -} - -void Thread::SetCurrentPriority(u32 new_priority) { - const u32 old_priority = std::exchange(current_priority, new_priority); - KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(), - old_priority); -} - ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { KScopedSchedulerLock lock(kernel); const auto HighestSetCore = [](u64 mask, u32 max_cores) { diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 06dd2ef2d8..820ea524fc 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -6,16 +6,21 @@ #include #include +#include #include #include #include +#include + #include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" #include "common/spin_lock.h" #include "core/arm/arm_interface.h" #include "core/hle/kernel/k_affinity_mask.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/object.h" +#include "core/hle/kernel/svc_common.h" #include "core/hle/result.h" namespace Common { @@ -89,8 +94,6 @@ enum class ThreadState : u16 { InitSuspended = (1 << (4 + SuspendShift)), SuspendFlagMask = ((1 << 5) - 1) << SuspendShift, - - HighMask = 0xfff0, }; DECLARE_ENUM_FLAG_OPERATORS(ThreadState); @@ -111,7 +114,10 @@ enum class ThreadSchedFlags : u32 { KernelInitPauseFlag = 1 << 8, }; -class Thread final : public KSynchronizationObject { +class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { + friend class KScheduler; + friend class Process; + public: explicit Thread(KernelCore& kernel); ~Thread() override; @@ -180,49 +186,46 @@ public: * Gets the thread's current priority * @return The current thread's priority */ - u32 GetPriority() const { + [[nodiscard]] s32 GetPriority() const { return current_priority; } + /** + * Sets the thread's current priority. + * @param priority The new priority. + */ + void SetPriority(s32 priority) { + current_priority = priority; + } + /** * Gets the thread's nominal priority. * @return The current thread's nominal priority. */ - u32 GetNominalPriority() const { - return nominal_priority; + [[nodiscard]] s32 GetBasePriority() const { + return base_priority; } /** - * Sets the thread's current priority - * @param priority The new priority + * Sets the thread's nominal priority. + * @param priority The new priority. */ - void SetPriority(u32 priority); - - /// Adds a thread to the list of threads that are waiting for a lock held by this thread. - void AddMutexWaiter(std::shared_ptr thread); - - /// Removes a thread from the list of threads that are waiting for a lock held by this thread. - void RemoveMutexWaiter(std::shared_ptr thread); - - /// Recalculates the current priority taking into account priority inheritance. - void UpdatePriority(); + void SetBasePriority(u32 priority); /// Changes the core that the thread is running or scheduled to run on. - ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); + [[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); /** * Gets the thread's thread ID * @return The thread's ID */ - u64 GetThreadID() const { + [[nodiscard]] u64 GetThreadID() const { return thread_id; } /// Resumes a thread from waiting void Wakeup(); - void OnWakeUp(); - ResultCode Start(); virtual bool IsSignaled() const override; @@ -242,7 +245,7 @@ public: } ResultCode GetWaitResult(KSynchronizationObject** out) const { - *out = this->signaling_object; + *out = signaling_object; return signaling_result; } @@ -328,18 +331,14 @@ public: return thread_state; } - void SetState(ThreadState new_state); - - void SetWaitingCondVar(bool value) { - is_waiting_on_condvar = value; - } + void SetState(ThreadState state); s64 GetLastScheduledTick() const { - return this->last_scheduled_tick; + return last_scheduled_tick; } void SetLastScheduledTick(s64 tick) { - this->last_scheduled_tick = tick; + last_scheduled_tick = tick; } u64 GetTotalCPUTimeTicks() const { @@ -379,55 +378,13 @@ public: } Thread* GetLockOwner() const { - return lock_owner.get(); + return lock_owner; } - void SetLockOwner(std::shared_ptr owner) { - lock_owner = std::move(owner); + void SetLockOwner(Thread* owner) { + lock_owner = owner; } - VAddr GetCondVarWaitAddress() const { - return condvar_wait_address; - } - - void SetCondVarWaitAddress(VAddr address) { - condvar_wait_address = address; - } - - VAddr GetMutexWaitAddress() const { - return mutex_wait_address; - } - - void SetMutexWaitAddress(VAddr address) { - mutex_wait_address = address; - } - - Handle GetWaitHandle() const { - return wait_handle; - } - - void SetWaitHandle(Handle handle) { - wait_handle = handle; - } - - VAddr GetArbiterWaitAddress() const { - return arb_wait_address; - } - - void SetArbiterWaitAddress(VAddr address) { - arb_wait_address = address; - } - - void SetHLETimeEvent(Handle time_event) { - hle_time_event = time_event; - } - - Handle GetHLETimeEvent() const { - return hle_time_event; - } - - bool InvokeHLECallback(std::shared_ptr thread); - u32 GetIdealCore() const { return ideal_core; } @@ -442,11 +399,11 @@ public: ResultCode Sleep(s64 nanoseconds); s64 GetYieldScheduleCount() const { - return this->schedule_count; + return schedule_count; } void SetYieldScheduleCount(s64 count) { - this->schedule_count = count; + schedule_count = count; } bool IsRunning() const { @@ -469,14 +426,6 @@ public: return global_handle; } - bool IsWaitingForArbitration() const { - return waiting_for_arbitration; - } - - void WaitForArbitration(bool set) { - waiting_for_arbitration = set; - } - bool IsCancellable() const { return is_cancellable; } @@ -490,7 +439,7 @@ public: } bool IsTerminationRequested() const { - return will_be_terminated || GetState() == ThreadState::Terminated; + return will_be_terminated || GetRawState() == ThreadState::Terminated; } bool IsPaused() const { @@ -522,21 +471,21 @@ public: constexpr QueueEntry() = default; constexpr void Initialize() { - this->prev = nullptr; - this->next = nullptr; + prev = nullptr; + next = nullptr; } constexpr Thread* GetPrev() const { - return this->prev; + return prev; } constexpr Thread* GetNext() const { - return this->next; + return next; } constexpr void SetPrev(Thread* thread) { - this->prev = thread; + prev = thread; } constexpr void SetNext(Thread* thread) { - this->next = thread; + next = thread; } private: @@ -545,11 +494,11 @@ public: }; QueueEntry& GetPriorityQueueEntry(s32 core) { - return this->per_core_priority_queue_entry[core]; + return per_core_priority_queue_entry[core]; } const QueueEntry& GetPriorityQueueEntry(s32 core) const { - return this->per_core_priority_queue_entry[core]; + return per_core_priority_queue_entry[core]; } s32 GetDisableDispatchCount() const { @@ -566,27 +515,155 @@ public: disable_count--; } - void SetWaitObjectsForDebugging(KSynchronizationObject** objects, s32 num_objects) { + void SetWaitObjectsForDebugging(const std::span& objects) { wait_objects_for_debugging.clear(); - wait_objects_for_debugging.reserve(num_objects); - for (auto i = 0; i < num_objects; ++i) { - wait_objects_for_debugging.emplace_back(objects[i]); + wait_objects_for_debugging.reserve(objects.size()); + for (const auto& object : objects) { + wait_objects_for_debugging.emplace_back(object); } } - const std::vector& GetWaitObjectsForDebugging() const { + [[nodiscard]] const std::vector& GetWaitObjectsForDebugging() const { return wait_objects_for_debugging; } -private: - friend class GlobalSchedulerContext; - friend class KScheduler; - friend class Process; + void SetMutexWaitAddressForDebugging(VAddr address) { + mutex_wait_address_for_debugging = address; + } - void SetSchedulingStatus(ThreadState new_status); + [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const { + return mutex_wait_address_for_debugging; + } + + void AddWaiter(Thread* thread); + + void RemoveWaiter(Thread* thread); + + [[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key); + + [[nodiscard]] VAddr GetAddressKey() const { + return address_key; + } + + [[nodiscard]] u32 GetAddressKeyValue() const { + return address_key_value; + } + + void SetAddressKey(VAddr key) { + address_key = key; + } + + void SetAddressKey(VAddr key, u32 val) { + address_key = key; + address_key_value = val; + } + +private: + static constexpr size_t PriorityInheritanceCountMax = 10; + union SyncObjectBuffer { + std::array sync_objects{}; + std::array + handles; + constexpr SyncObjectBuffer() {} + }; + static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); + + struct ConditionVariableComparator { + struct LightCompareType { + u64 cv_key{}; + s32 priority{}; + + [[nodiscard]] constexpr u64 GetConditionVariableKey() const { + return cv_key; + } + + [[nodiscard]] constexpr s32 GetPriority() const { + return priority; + } + }; + + template + requires( + std::same_as || + std::same_as) static constexpr int Compare(const T& lhs, + const Thread& rhs) { + const uintptr_t l_key = lhs.GetConditionVariableKey(); + const uintptr_t r_key = rhs.GetConditionVariableKey(); + + if (l_key < r_key) { + // Sort first by key + return -1; + } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) { + // And then by priority. + return -1; + } else { + return 1; + } + } + }; + + Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; + + using ConditionVariableThreadTreeTraits = + Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>; + using ConditionVariableThreadTree = + ConditionVariableThreadTreeTraits::TreeType; + +public: + using ConditionVariableThreadTreeType = ConditionVariableThreadTree; + + [[nodiscard]] uintptr_t GetConditionVariableKey() const { + return condvar_key; + } + + [[nodiscard]] uintptr_t GetAddressArbiterKey() const { + return condvar_key; + } + + void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key, + u32 value) { + condvar_tree = tree; + condvar_key = cv_key; + address_key = address; + address_key_value = value; + } + + void ClearConditionVariable() { + condvar_tree = nullptr; + } + + [[nodiscard]] bool IsWaitingForConditionVariable() const { + return condvar_tree != nullptr; + } + + void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) { + condvar_tree = tree; + condvar_key = address; + } + + void ClearAddressArbiter() { + condvar_tree = nullptr; + } + + [[nodiscard]] bool IsWaitingForAddressArbiter() const { + return condvar_tree != nullptr; + } + + [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { + return condvar_tree; + } + + [[nodiscard]] bool HasWaiters() const { + return !waiter_list.empty(); + } + +private: void AddSchedulingFlag(ThreadSchedFlags flag); void RemoveSchedulingFlag(ThreadSchedFlags flag); - void SetCurrentPriority(u32 new_priority); + void AddWaiterImpl(Thread* thread); + void RemoveWaiterImpl(Thread* thread); + static void RestorePriority(KernelCore& kernel, Thread* thread); Common::SpinLock context_guard{}; ThreadContext32 context_32{}; @@ -606,11 +683,11 @@ private: /// Nominal thread priority, as set by the emulated application. /// The nominal priority is the thread priority without priority /// inheritance taken into account. - u32 nominal_priority = 0; + s32 base_priority{}; /// Current thread priority. This may change over the course of the /// thread's lifetime in order to facilitate priority inheritance. - u32 current_priority = 0; + s32 current_priority{}; u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. s64 schedule_count{}; @@ -628,6 +705,9 @@ private: /// passed to WaitSynchronization. This is used for debugging only. std::vector wait_objects_for_debugging; + /// The current mutex wait address. This is used for debugging only. + VAddr mutex_wait_address_for_debugging{}; + KSynchronizationObject* signaling_object; ResultCode signaling_result{RESULT_SUCCESS}; @@ -635,25 +715,11 @@ private: MutexWaitingThreads wait_mutex_threads; /// Thread that owns the lock that this thread is waiting for. - std::shared_ptr lock_owner; - - /// If waiting on a ConditionVariable, this is the ConditionVariable address - VAddr condvar_wait_address = 0; - bool is_waiting_on_condvar{}; - /// If waiting on a Mutex, this is the mutex address - VAddr mutex_wait_address = 0; - /// The handle used to wait for the mutex. - Handle wait_handle = 0; - - /// If waiting for an AddressArbiter, this is the address being waited on. - VAddr arb_wait_address{0}; - bool waiting_for_arbitration{}; + Thread* lock_owner{}; /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. Handle global_handle = 0; - Handle hle_time_event; - KScheduler* scheduler = nullptr; std::array per_core_priority_queue_entry{}; @@ -679,6 +745,16 @@ private: bool signaled{}; + ConditionVariableThreadTree* condvar_tree{}; + uintptr_t condvar_key{}; + VAddr address_key{}; + u32 address_key_value{}; + s32 num_kernel_waiters{}; + + using WaiterList = boost::intrusive::list; + WaiterList waiter_list{}; + WaiterList pinned_waiter_list{}; + std::string name; }; diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index b58a76dbab..832edd6295 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -18,12 +18,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { time_manager_event_type = Core::Timing::CreateEvent( "Kernel::TimeManagerCallback", [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { - const KScopedSchedulerLock lock(system.Kernel()); - const auto proper_handle = static_cast(thread_handle); - std::shared_ptr thread; { std::lock_guard lock{mutex}; + const auto proper_handle = static_cast(thread_handle); if (cancelled_events[proper_handle]) { return; } @@ -32,7 +30,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { if (thread) { // Thread can be null if process has exited - thread->OnWakeUp(); + thread->Wakeup(); } }); } diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index acf6b7ab5f..3ded857201 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -15,9 +15,9 @@ #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_synchronization_object.h" -#include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" +#include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/thread.h" #include "core/memory.h" @@ -116,7 +116,7 @@ QString WaitTreeText::GetText() const { WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) : mutex_address(mutex_address) { mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); - owner_handle = static_cast(mutex_value & Kernel::Mutex::MutexOwnerMask); + owner_handle = static_cast(mutex_value & Kernel::Svc::HandleWaitMask); owner = handle_table.Get(owner_handle); } @@ -127,7 +127,7 @@ QString WaitTreeMutexInfo::GetText() const { } std::vector> WaitTreeMutexInfo::GetChildren() const { - const bool has_waiters = (mutex_value & Kernel::Mutex::MutexHasWaitersFlag) != 0; + const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0; std::vector> list; list.push_back(std::make_unique(tr("has waiters: %1").arg(has_waiters))); @@ -324,11 +324,11 @@ std::vector> WaitTreeThread::GetChildren() const { list.push_back(std::make_unique(tr("thread id = %1").arg(thread.GetThreadID()))); list.push_back(std::make_unique(tr("priority = %1(current) / %2(normal)") .arg(thread.GetPriority()) - .arg(thread.GetNominalPriority()))); + .arg(thread.GetBasePriority()))); list.push_back(std::make_unique( tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); - const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); + const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging(); if (mutex_wait_address != 0) { const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); list.push_back(std::make_unique(mutex_wait_address, handle_table)); From 82f6037ec2c56788f7a387a45cccc8bda995619a Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 30 Dec 2020 01:34:50 -0800 Subject: [PATCH 16/20] core: hle: Add missing calls to MicroProfileOnThreadExit. --- src/core/core_timing.cpp | 1 + src/core/hle/service/nvflinger/nvflinger.cpp | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index e6c8461a5d..874b5673a8 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -49,6 +49,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) { Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); instance.on_thread_init(); instance.ThreadLoop(); + MicroProfileOnThreadExit(); } void CoreTiming::Initialize(std::function&& on_thread_init_) { diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 4b35819498..ceaa93d282 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -38,6 +38,10 @@ void NVFlinger::SplitVSync() { system.RegisterHostThread(); std::string name = "yuzu:VSyncThread"; MicroProfileOnThreadCreate(name.c_str()); + + // Cleanup + SCOPE_EXIT({ MicroProfileOnThreadExit(); }); + Common::SetCurrentThreadName(name.c_str()); Common::SetCurrentThreadPriority(Common::ThreadPriority::High); s64 delay = 0; From 354130cd84b41ade9541fabc73f22144e6b521ce Mon Sep 17 00:00:00 2001 From: bunnei Date: Wed, 30 Dec 2020 23:05:01 -0800 Subject: [PATCH 17/20] core: arm: arm_interface: Fix shadowing errors. --- src/core/arm/arm_interface.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 70098c5266..9a0151736a 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -26,9 +26,10 @@ using CPUInterrupts = std::array Date: Wed, 30 Dec 2020 23:05:30 -0800 Subject: [PATCH 18/20] hle: kernel: k_scheduler_lock: Fix shadowing errors. --- src/core/hle/kernel/k_scheduler_lock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 2f1c1f691a..9b40bd22c8 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -19,7 +19,7 @@ class KernelCore; template class KAbstractSchedulerLock { public: - explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {} + explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} bool IsLockedByCurrentThread() const { return this->owner_thread == kernel.GetCurrentEmuThreadID(); From 81c1bfafeac88d0651d146056e9008c22bea87d2 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 10 Jan 2021 01:02:46 -0800 Subject: [PATCH 19/20] yuzu: debugger: wait_tree: Handle unknown ThreadState. --- src/yuzu/debugger/wait_tree.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 3ded857201..deefb0ba0d 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -259,6 +259,9 @@ QString WaitTreeThread::GetText() const { case Kernel::ThreadState::Terminated: status = tr("terminated"); break; + default: + status = tr("unknown"); + break; } const auto& context = thread.GetContext64(); From 03dfc8d8e74910d447b755e00848a623ec65cd93 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sun, 10 Jan 2021 14:29:02 -0800 Subject: [PATCH 20/20] hle: kernel: thread: Preserve thread wait reason for debugging only. - This is decoupled from core functionality and used for debugging only. --- src/core/hle/kernel/k_address_arbiter.cpp | 2 + src/core/hle/kernel/k_condition_variable.cpp | 2 + .../hle/kernel/k_synchronization_object.cpp | 1 + src/core/hle/kernel/kernel.cpp | 2 + src/core/hle/kernel/svc.cpp | 1 + src/core/hle/kernel/thread.cpp | 6 ++- src/core/hle/kernel/thread.h | 21 +++++++++ src/yuzu/debugger/wait_tree.cpp | 43 +++++++++++++++++-- 8 files changed, 74 insertions(+), 4 deletions(-) diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 7b712d31a0..d9e702f132 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -276,6 +276,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr); thread_tree.insert(*cur_thread); cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); } // Cancel the timer wait. @@ -339,6 +340,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr); thread_tree.insert(*cur_thread); cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); } // Cancel the timer wait. diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index ef5c174095..49a0683109 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -133,6 +133,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val cur_thread->SetAddressKey(addr, value); owner_thread->AddWaiter(cur_thread); cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); cur_thread->SetMutexWaitAddressForDebugging(addr); } } @@ -315,6 +316,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) // If the timeout is non-zero, set the thread as waiting. if (timeout != 0) { cur_thread->SetState(ThreadState::Waiting); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); cur_thread->SetMutexWaitAddressForDebugging(addr); } } diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 11b989ecdc..1c508cb557 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -78,6 +78,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, thread->SetCancellable(); thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); thread->SetState(ThreadState::Waiting); + thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization); } // The lock/sleep is done, so we should be able to get our result. diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 8d03f16fb8..c0ff287a67 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -605,6 +605,8 @@ void KernelCore::Suspend(bool in_suspention) { const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { impl->suspend_threads[i]->SetState(state); + impl->suspend_threads[i]->SetWaitReasonForDebugging( + ThreadWaitReasonForDebugging::Suspended); } } } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 99bb4ea20d..cc8b661afa 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -347,6 +347,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { { KScopedSchedulerLock lock(kernel); thread->SetState(ThreadState::Waiting); + thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index eda56c31c6..d973232550 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -215,7 +215,10 @@ VAddr Thread::GetCommandBufferAddress() const { void Thread::SetState(ThreadState state) { KScopedSchedulerLock sl(kernel); - SetMutexWaitAddressForDebugging(0); + // Clear debugging state + SetMutexWaitAddressForDebugging({}); + SetWaitReasonForDebugging({}); + const ThreadState old_state = thread_state; thread_state = static_cast((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); @@ -386,6 +389,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { { KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); SetState(ThreadState::Waiting); + SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } if (event_handle != InvalidHandle) { diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 820ea524fc..6b66c9a0ea 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -114,6 +114,16 @@ enum class ThreadSchedFlags : u32 { KernelInitPauseFlag = 1 << 8, }; +enum class ThreadWaitReasonForDebugging : u32 { + None, ///< Thread is not waiting + Sleep, ///< Thread is waiting due to a SleepThread SVC + IPC, ///< Thread is waiting for the reply from an IPC request + Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC + ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC + Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC + Suspended, ///< Thread is waiting due to process suspension +}; + class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { friend class KScheduler; friend class Process; @@ -515,6 +525,14 @@ public: disable_count--; } + void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { + wait_reason_for_debugging = reason; + } + + [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { + return wait_reason_for_debugging; + } + void SetWaitObjectsForDebugging(const std::span& objects) { wait_objects_for_debugging.clear(); wait_objects_for_debugging.reserve(objects.size()); @@ -708,6 +726,9 @@ private: /// The current mutex wait address. This is used for debugging only. VAddr mutex_wait_address_for_debugging{}; + /// The reason the thread is waiting. This is used for debugging only. + ThreadWaitReasonForDebugging wait_reason_for_debugging{}; + KSynchronizationObject* signaling_object; ResultCode signaling_result{RESULT_SUCCESS}; diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index deefb0ba0d..a93b5d3c28 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -251,7 +251,29 @@ QString WaitTreeThread::GetText() const { } break; case Kernel::ThreadState::Waiting: - status = tr("waiting"); + switch (thread.GetWaitReasonForDebugging()) { + case Kernel::ThreadWaitReasonForDebugging::Sleep: + status = tr("sleeping"); + break; + case Kernel::ThreadWaitReasonForDebugging::IPC: + status = tr("waiting for IPC reply"); + break; + case Kernel::ThreadWaitReasonForDebugging::Synchronization: + status = tr("waiting for objects"); + break; + case Kernel::ThreadWaitReasonForDebugging::ConditionVar: + status = tr("waiting for condition variable"); + break; + case Kernel::ThreadWaitReasonForDebugging::Arbitration: + status = tr("waiting for address arbiter"); + break; + case Kernel::ThreadWaitReasonForDebugging::Suspended: + status = tr("waiting for suspend resume"); + break; + default: + status = tr("waiting"); + break; + } break; case Kernel::ThreadState::Initialized: status = tr("initialized"); @@ -288,7 +310,20 @@ QColor WaitTreeThread::GetColor() const { return QColor(WaitTreeColors[2][color_index]); } case Kernel::ThreadState::Waiting: - return QColor(WaitTreeColors[3][color_index]); + switch (thread.GetWaitReasonForDebugging()) { + case Kernel::ThreadWaitReasonForDebugging::IPC: + return QColor(WaitTreeColors[4][color_index]); + case Kernel::ThreadWaitReasonForDebugging::Sleep: + return QColor(WaitTreeColors[5][color_index]); + case Kernel::ThreadWaitReasonForDebugging::Synchronization: + case Kernel::ThreadWaitReasonForDebugging::ConditionVar: + case Kernel::ThreadWaitReasonForDebugging::Arbitration: + case Kernel::ThreadWaitReasonForDebugging::Suspended: + return QColor(WaitTreeColors[6][color_index]); + break; + default: + return QColor(WaitTreeColors[3][color_index]); + } case Kernel::ThreadState::Initialized: return QColor(WaitTreeColors[7][color_index]); case Kernel::ThreadState::Terminated: @@ -339,7 +374,9 @@ std::vector> WaitTreeThread::GetChildren() const { list.push_back(std::make_unique(tr("not waiting for mutex"))); } - if (thread.GetState() == Kernel::ThreadState::Waiting) { + if (thread.GetState() == Kernel::ThreadState::Waiting && + thread.GetWaitReasonForDebugging() == + Kernel::ThreadWaitReasonForDebugging::Synchronization) { list.push_back(std::make_unique(thread.GetWaitObjectsForDebugging(), thread.IsCancellable())); }