mirror of https://git.suyu.dev/suyu/suyu
core: hle: Integrate new KConditionVariable and KAddressArbiter implementations.
This commit is contained in:
parent
952d1ac487
commit
912dd50146
|
@ -142,8 +142,6 @@ add_library(core STATIC
|
|||
hardware_interrupt_manager.h
|
||||
hle/ipc.h
|
||||
hle/ipc_helpers.h
|
||||
hle/kernel/address_arbiter.cpp
|
||||
hle/kernel/address_arbiter.h
|
||||
hle/kernel/client_port.cpp
|
||||
hle/kernel/client_port.h
|
||||
hle/kernel/client_session.cpp
|
||||
|
@ -189,8 +187,6 @@ add_library(core STATIC
|
|||
hle/kernel/memory/slab_heap.h
|
||||
hle/kernel/memory/system_control.cpp
|
||||
hle/kernel/memory/system_control.h
|
||||
hle/kernel/mutex.cpp
|
||||
hle/kernel/mutex.h
|
||||
hle/kernel/object.cpp
|
||||
hle/kernel/object.h
|
||||
hle/kernel/physical_core.cpp
|
||||
|
|
|
@ -1,317 +0,0 @@
|
|||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
||||
s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (num_to_wake > 0) {
|
||||
last = std::min(last, static_cast<std::size_t>(num_to_wake));
|
||||
}
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
RemoveThread(waiting_threads[i]);
|
||||
waiting_threads[i]->WaitForArbitration(false);
|
||||
waiting_threads[i]->Wakeup();
|
||||
}
|
||||
}
|
||||
|
||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
||||
AddressArbiter::~AddressArbiter() = default;
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
switch (type) {
|
||||
case SignalType::Signal:
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
case SignalType::IncrementAndSignalIfEqual:
|
||||
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
||||
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
u32 current_value;
|
||||
do {
|
||||
current_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (current_value != static_cast<u32>(value)) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
current_value++;
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
|
||||
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Get threads waiting on the address.
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
// Determine the modified value depending on the waiting count.
|
||||
if (num_to_wake <= 0) {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else {
|
||||
updated_value = value - 1;
|
||||
}
|
||||
} else {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
} else {
|
||||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
|
||||
s64 timeout_ns) {
|
||||
switch (type) {
|
||||
case ArbitrationType::WaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
|
||||
case ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
|
||||
case ArbitrationType::WaitIfEqual:
|
||||
return WaitForAddressIfEqual(address, value, timeout_ns);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsTerminationRequested()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value >= value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
s32 decrement_value;
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
do {
|
||||
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
if (should_decrement) {
|
||||
decrement_value = current_value - 1;
|
||||
} else {
|
||||
decrement_value = current_value;
|
||||
}
|
||||
} while (
|
||||
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetState(ThreadState::Waiting);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsTerminationRequested()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value != value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetState(ThreadState::Waiting);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter =
|
||||
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
||||
return entry->GetPriority() >= thread->GetPriority();
|
||||
});
|
||||
|
||||
if (iter == thread_list.cend()) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
} else {
|
||||
thread_list.insert(iter, std::move(thread));
|
||||
}
|
||||
}
|
||||
|
||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
||||
[&thread](const auto& entry) { return thread == entry; });
|
||||
|
||||
if (iter != thread_list.cend()) {
|
||||
thread_list.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
||||
VAddr address) const {
|
||||
const auto iter = arb_threads.find(address);
|
||||
if (iter == arb_threads.cend()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
||||
return {thread_list.cbegin(), thread_list.cend()};
|
||||
}
|
||||
} // namespace Kernel
|
|
@ -1,91 +0,0 @@
|
|||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
class AddressArbiter {
|
||||
public:
|
||||
enum class ArbitrationType {
|
||||
WaitIfLessThan = 0,
|
||||
DecrementAndWaitIfLessThan = 1,
|
||||
WaitIfEqual = 2,
|
||||
};
|
||||
|
||||
enum class SignalType {
|
||||
Signal = 0,
|
||||
IncrementAndSignalIfEqual = 1,
|
||||
ModifyByWaitingCountAndSignalIfEqual = 2,
|
||||
};
|
||||
|
||||
explicit AddressArbiter(Core::System& system);
|
||||
~AddressArbiter();
|
||||
|
||||
AddressArbiter(const AddressArbiter&) = delete;
|
||||
AddressArbiter& operator=(const AddressArbiter&) = delete;
|
||||
|
||||
AddressArbiter(AddressArbiter&&) = default;
|
||||
AddressArbiter& operator=(AddressArbiter&&) = delete;
|
||||
|
||||
/// Signals an address being waited on with a particular signaling type.
|
||||
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Waits on an address with a particular arbitration type.
|
||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
||||
|
||||
private:
|
||||
/// Signals an address being waited on.
|
||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and increments its value if equal to the value argument.
|
||||
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and modifies its value based on waiting thread count if
|
||||
/// equal to the value argument.
|
||||
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake);
|
||||
|
||||
/// Waits on an address if the value passed is less than the argument value,
|
||||
/// optionally decrementing.
|
||||
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement);
|
||||
|
||||
/// Waits on an address if the value passed is equal to the argument value.
|
||||
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
|
||||
|
||||
/// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
||||
|
||||
/// Insert a thread into the address arbiter container
|
||||
void InsertThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the address arbiter container
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||
|
||||
/// List of threads waiting for a address arbiter
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
|
@ -72,7 +72,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
|||
}
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging(objects, num_objects);
|
||||
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
||||
|
||||
// Mark the thread as waiting.
|
||||
thread->SetCancellable();
|
||||
|
@ -86,7 +86,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
|||
thread->ClearCancellable();
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging(nullptr, 0);
|
||||
thread->SetWaitObjectsForDebugging({});
|
||||
|
||||
// Cancel the timer as needed.
|
||||
if (timer != InvalidHandle) {
|
||||
|
|
|
@ -1,170 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
|
||||
/// those.
|
||||
static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
|
||||
|
||||
std::shared_ptr<Thread> highest_priority_thread;
|
||||
u32 num_waiters = 0;
|
||||
|
||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
++num_waiters;
|
||||
if (highest_priority_thread == nullptr ||
|
||||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
|
||||
highest_priority_thread = thread;
|
||||
}
|
||||
}
|
||||
|
||||
return {highest_priority_thread, num_waiters};
|
||||
}
|
||||
|
||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
|
||||
std::shared_ptr<Thread> new_owner) {
|
||||
current_thread->RemoveMutexWaiter(new_owner);
|
||||
const auto threads = current_thread->GetMutexWaitingThreads();
|
||||
for (const auto& thread : threads) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->GetLockOwner() == current_thread.get());
|
||||
current_thread->RemoveMutexWaiter(thread);
|
||||
if (new_owner != thread)
|
||||
new_owner->AddMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
|
||||
Mutex::Mutex(Core::System& system) : system{system} {}
|
||||
Mutex::~Mutex() = default;
|
||||
|
||||
ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||
std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
std::shared_ptr<Thread> requesting_thread =
|
||||
handle_table.Get<Thread>(requesting_thread_handle);
|
||||
|
||||
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
|
||||
// another thread.
|
||||
ASSERT(requesting_thread == current_thread);
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
|
||||
const u32 addr_value = system.Memory().Read32(address);
|
||||
|
||||
// If the mutex isn't being held, just return success.
|
||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
if (holding_thread == nullptr) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
// Wait until the mutex is released
|
||||
current_thread->SetMutexWaitAddress(address);
|
||||
current_thread->SetWaitHandle(requesting_thread_handle);
|
||||
|
||||
current_thread->SetState(ThreadState::Waiting);
|
||||
|
||||
// Update the lock holder thread's priority to prevent priority inversion.
|
||||
holding_thread->AddMutexWaiter(current_thread);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* owner = current_thread->GetLockOwner();
|
||||
if (owner != nullptr) {
|
||||
owner->RemoveMutexWaiter(current_thread);
|
||||
}
|
||||
}
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return {ERR_INVALID_ADDRESS, nullptr};
|
||||
}
|
||||
|
||||
auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
|
||||
if (new_owner == nullptr) {
|
||||
system.Memory().Write32(address, 0);
|
||||
return {RESULT_SUCCESS, nullptr};
|
||||
}
|
||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
||||
TransferMutexOwnership(address, owner, new_owner);
|
||||
u32 mutex_value = new_owner->GetWaitHandle();
|
||||
if (num_waiters >= 2) {
|
||||
// Notify the guest that there are still some threads waiting for the mutex
|
||||
mutex_value |= Mutex::MutexHasWaitersFlag;
|
||||
}
|
||||
new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
new_owner->SetLockOwner(nullptr);
|
||||
new_owner->Wakeup();
|
||||
|
||||
system.Memory().Write32(address, mutex_value);
|
||||
return {RESULT_SUCCESS, new_owner};
|
||||
}
|
||||
|
||||
ResultCode Mutex::Release(VAddr address) {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
|
||||
auto [result, new_owner] = Unlock(current_thread, address);
|
||||
|
||||
if (result != RESULT_SUCCESS && new_owner != nullptr) {
|
||||
new_owner->SetSynchronizationResults(nullptr, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
|
@ -1,42 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Mutex final {
|
||||
public:
|
||||
explicit Mutex(Core::System& system);
|
||||
~Mutex();
|
||||
|
||||
/// Flag that indicates that a mutex still has threads waiting for it.
|
||||
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
|
||||
/// Mask of the bits in a mutex address value that contain the mutex owner.
|
||||
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
|
||||
|
||||
/// Attempts to acquire a mutex at the specified address.
|
||||
ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle);
|
||||
|
||||
/// Unlocks a mutex for owner at address
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address);
|
||||
|
||||
/// Releases the mutex at the specified address.
|
||||
ResultCode Release(VAddr address);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
|
@ -162,48 +162,6 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
|||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread->GetPriority() > thread->GetPriority()) {
|
||||
thread_list.insert(it, thread);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread.get() == thread.get()) {
|
||||
thread_list.erase(it);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
|
||||
const VAddr cond_var_addr) {
|
||||
std::vector<std::shared_ptr<Thread>> result{};
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
std::shared_ptr<Thread> current_thread = *it;
|
||||
result.push_back(current_thread);
|
||||
++it;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Process::RegisterThread(const Thread* thread) {
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
@ -412,9 +370,9 @@ bool Process::IsSignaled() const {
|
|||
}
|
||||
|
||||
Process::Process(Core::System& system)
|
||||
: KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>(
|
||||
system)},
|
||||
handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
|
||||
: KSynchronizationObject{system.Kernel()},
|
||||
page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
|
||||
address_arbiter{system}, condition_var{system}, system{system} {}
|
||||
|
||||
Process::~Process() = default;
|
||||
|
||||
|
|
|
@ -11,10 +11,10 @@
|
|||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
|
@ -123,24 +123,30 @@ public:
|
|||
return handle_table;
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' address arbiter.
|
||||
AddressArbiter& GetAddressArbiter() {
|
||||
return address_arbiter;
|
||||
ResultCode SignalToAddress(VAddr address) {
|
||||
return condition_var.SignalToAddress(address);
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' address arbiter.
|
||||
const AddressArbiter& GetAddressArbiter() const {
|
||||
return address_arbiter;
|
||||
ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
return condition_var.WaitForAddress(handle, address, tag);
|
||||
}
|
||||
|
||||
/// Gets a reference to the process' mutex lock.
|
||||
Mutex& GetMutex() {
|
||||
return mutex;
|
||||
void SignalConditionVariable(u64 cv_key, int32_t count) {
|
||||
return condition_var.Signal(cv_key, count);
|
||||
}
|
||||
|
||||
/// Gets a const reference to the process' mutex lock
|
||||
const Mutex& GetMutex() const {
|
||||
return mutex;
|
||||
ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
}
|
||||
|
||||
ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
}
|
||||
|
||||
ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
/// Gets the address to the process' dedicated TLS region.
|
||||
|
@ -250,15 +256,6 @@ public:
|
|||
return thread_list;
|
||||
}
|
||||
|
||||
/// Insert a thread into the condition variable wait container
|
||||
void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Remove a thread from the condition variable wait container
|
||||
void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Obtain all condition variable threads waiting for some address
|
||||
std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const Thread* thread);
|
||||
|
@ -369,12 +366,12 @@ private:
|
|||
HandleTable handle_table;
|
||||
|
||||
/// Per-process address arbiter.
|
||||
AddressArbiter address_arbiter;
|
||||
KAddressArbiter address_arbiter;
|
||||
|
||||
/// The per-process mutex lock instance used for handling various
|
||||
/// forms of services, such as lock arbitration, and condition
|
||||
/// variable related facilities.
|
||||
Mutex mutex;
|
||||
KConditionVariable condition_var;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr tls_region_address = 0;
|
||||
|
@ -385,9 +382,6 @@ private:
|
|||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const Thread*> thread_list;
|
||||
|
||||
/// List of threads waiting for a condition variable
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
VAddr main_thread_stack_top{};
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/fiber.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
|
@ -19,24 +20,26 @@
|
|||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/kernel/svc_wrap.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
@ -347,12 +350,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
|||
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
|
||||
}
|
||||
|
||||
Handle event_handle = thread->GetHLETimeEvent();
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
return thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
|
@ -491,56 +488,37 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
|
|||
return CancelSynchronization(system, thread_handle);
|
||||
}
|
||||
|
||||
/// Attempts to locks a mutex, creating it if it does not already exist
|
||||
static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
|
||||
VAddr mutex_addr, Handle requesting_thread_handle) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
|
||||
"requesting_current_thread_handle=0x{:08X}",
|
||||
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
/// Attempts to locks a mutex
|
||||
static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
|
||||
u32 tag) {
|
||||
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
|
||||
thread_handle, address, tag);
|
||||
|
||||
if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
// Validate the input address.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
|
||||
|
||||
if (!Common::IsWordAligned(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
|
||||
requesting_thread_handle);
|
||||
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
|
||||
}
|
||||
|
||||
static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
|
||||
u32 mutex_addr, Handle requesting_thread_handle) {
|
||||
return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
|
||||
u32 tag) {
|
||||
return ArbitrateLock(system, thread_handle, address, tag);
|
||||
}
|
||||
|
||||
/// Unlock a mutex
|
||||
static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
|
||||
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
|
||||
static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
|
||||
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
|
||||
|
||||
if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
// Validate the input address.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
|
||||
|
||||
if (!Common::IsWordAligned(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
return current_process->GetMutex().Release(mutex_addr);
|
||||
return system.Kernel().CurrentProcess()->SignalToAddress(address);
|
||||
}
|
||||
|
||||
static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
|
||||
return ArbitrateUnlock(system, mutex_addr);
|
||||
static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
|
||||
return ArbitrateUnlock(system, address);
|
||||
}
|
||||
|
||||
enum class BreakType : u32 {
|
||||
|
@ -1167,7 +1145,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
|
|||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
thread->SetPriority(priority);
|
||||
thread->SetBasePriority(priority);
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
@ -1607,223 +1585,135 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
|
|||
}
|
||||
|
||||
/// Wait process wide key atomic
|
||||
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr,
|
||||
VAddr condition_variable_addr, Handle thread_handle,
|
||||
s64 nano_seconds) {
|
||||
LOG_TRACE(
|
||||
Kernel_SVC,
|
||||
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
|
||||
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
|
||||
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
|
||||
u32 tag, s64 timeout_ns) {
|
||||
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
|
||||
cv_key, tag, timeout_ns);
|
||||
|
||||
if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
LOG_ERROR(
|
||||
Kernel_SVC,
|
||||
"Given mutex address must not be within the kernel address space. address=0x{:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
// Validate input.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
|
||||
|
||||
if (!Common::IsWordAligned(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
auto& kernel = system.Kernel();
|
||||
Handle event_handle;
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* const current_process = kernel.CurrentProcess();
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
|
||||
const auto& handle_table = current_process->GetHandleTable();
|
||||
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
ASSERT(thread);
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
if (thread->IsTerminationRequested()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
// Convert timeout from nanoseconds to ticks.
|
||||
s64 timeout{};
|
||||
if (timeout_ns > 0) {
|
||||
const s64 offset_tick(timeout_ns);
|
||||
if (offset_tick > 0) {
|
||||
timeout = offset_tick + 2;
|
||||
if (timeout <= 0) {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
|
||||
const auto release_result = current_process->GetMutex().Release(mutex_addr);
|
||||
if (release_result.IsError()) {
|
||||
lock.CancelSleep();
|
||||
return release_result;
|
||||
}
|
||||
|
||||
if (nano_seconds == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
||||
current_thread->SetMutexWaitAddress(mutex_addr);
|
||||
current_thread->SetWaitHandle(thread_handle);
|
||||
current_thread->SetState(ThreadState::Waiting);
|
||||
current_thread->SetWaitingCondVar(true);
|
||||
current_process->InsertConditionVariableThread(SharedFrom(current_thread));
|
||||
} else {
|
||||
timeout = timeout_ns;
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
auto* owner = current_thread->GetLockOwner();
|
||||
if (owner != nullptr) {
|
||||
owner->RemoveMutexWaiter(SharedFrom(current_thread));
|
||||
}
|
||||
|
||||
current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
|
||||
}
|
||||
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
// Wait on the condition variable.
|
||||
return system.Kernel().CurrentProcess()->WaitConditionVariable(
|
||||
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
|
||||
}
|
||||
|
||||
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
|
||||
u32 condition_variable_addr, Handle thread_handle,
|
||||
u32 nanoseconds_low, u32 nanoseconds_high) {
|
||||
const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32));
|
||||
return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
|
||||
nanoseconds);
|
||||
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
|
||||
u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
||||
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
|
||||
}
|
||||
|
||||
/// Signal process wide key
|
||||
static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) {
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
||||
condition_variable_addr, target);
|
||||
static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
|
||||
LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
|
||||
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
// Signal the condition variable.
|
||||
return system.Kernel().CurrentProcess()->SignalConditionVariable(
|
||||
Common::AlignDown(cv_key, sizeof(u32)), count);
|
||||
}
|
||||
|
||||
// Retrieve a list of all threads that are waiting for this condition variable.
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* const current_process = kernel.CurrentProcess();
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
current_process->GetConditionVariableThreads(condition_variable_addr);
|
||||
static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
|
||||
SignalProcessWideKey(system, cv_key, count);
|
||||
}
|
||||
|
||||
// Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (target > 0) {
|
||||
last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
|
||||
}
|
||||
for (std::size_t index = 0; index < last; ++index) {
|
||||
auto& thread = waiting_threads[index];
|
||||
namespace {
|
||||
|
||||
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
|
||||
|
||||
// liberate Cond Var Thread.
|
||||
current_process->RemoveConditionVariableThread(thread);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
|
||||
// Atomically read the value of the mutex.
|
||||
u32 mutex_val = 0;
|
||||
u32 update_val = 0;
|
||||
const VAddr mutex_address = thread->GetMutexWaitAddress();
|
||||
do {
|
||||
// If the mutex is not yet acquired, acquire it.
|
||||
mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
|
||||
|
||||
if (mutex_val != 0) {
|
||||
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
|
||||
} else {
|
||||
update_val = thread->GetWaitHandle();
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
|
||||
monitor.ClearExclusive();
|
||||
if (mutex_val == 0) {
|
||||
// We were able to acquire the mutex, resume this thread.
|
||||
auto* const lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner != nullptr) {
|
||||
lock_owner->RemoveMutexWaiter(thread);
|
||||
}
|
||||
|
||||
thread->SetLockOwner(nullptr);
|
||||
thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
thread->Wakeup();
|
||||
} else {
|
||||
// The mutex is already owned by some other thread, make this thread wait on it.
|
||||
const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
auto owner = handle_table.Get<Thread>(owner_handle);
|
||||
ASSERT(owner);
|
||||
thread->SetWaitingCondVar(false);
|
||||
|
||||
owner->AddMutexWaiter(thread);
|
||||
}
|
||||
constexpr bool IsValidSignalType(Svc::SignalType type) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) {
|
||||
SignalProcessWideKey(system, condition_variable_addr, target);
|
||||
constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Wait for an address (via Address Arbiter)
|
||||
static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
|
||||
s64 timeout) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
|
||||
type, value, timeout);
|
||||
static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
|
||||
s32 value, s64 timeout_ns) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
|
||||
address, arb_type, value, timeout_ns);
|
||||
|
||||
// If the passed address is a kernel virtual address, return invalid memory state.
|
||||
if (Core::Memory::IsKernelVirtualAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
// Validate input.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
|
||||
R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue);
|
||||
|
||||
// Convert timeout from nanoseconds to ticks.
|
||||
s64 timeout{};
|
||||
if (timeout_ns > 0) {
|
||||
const s64 offset_tick(timeout_ns);
|
||||
if (offset_tick > 0) {
|
||||
timeout = offset_tick + 2;
|
||||
if (timeout <= 0) {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = timeout_ns;
|
||||
}
|
||||
|
||||
// If the address is not properly aligned to 4 bytes, return invalid address.
|
||||
if (!Common::IsWordAligned(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
|
||||
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
||||
const ResultCode result =
|
||||
address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
|
||||
return result;
|
||||
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
|
||||
u32 timeout_low, u32 timeout_high) {
|
||||
const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32));
|
||||
return WaitForAddress(system, address, type, value, timeout);
|
||||
static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
|
||||
s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
||||
return WaitForAddress(system, address, arb_type, value, timeout);
|
||||
}
|
||||
|
||||
// Signals to an address (via Address Arbiter)
|
||||
static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
|
||||
address, type, value, num_to_wake);
|
||||
static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
|
||||
address, signal_type, value, count);
|
||||
|
||||
// If the passed address is a kernel virtual address, return invalid memory state.
|
||||
if (Core::Memory::IsKernelVirtualAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
// Validate input.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress);
|
||||
R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue);
|
||||
|
||||
// If the address is not properly aligned to 4 bytes, return invalid address.
|
||||
if (!Common::IsWordAligned(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto signal_type = static_cast<AddressArbiter::SignalType>(type);
|
||||
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
|
||||
return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
|
||||
count);
|
||||
}
|
||||
|
||||
static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
return SignalToAddress(system, address, type, value, num_to_wake);
|
||||
static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
return SignalToAddress(system, address, signal_type, value, count);
|
||||
}
|
||||
|
||||
static void KernelDebug([[maybe_unused]] Core::System& system,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
namespace Kernel::Svc {
|
||||
|
||||
constexpr s32 ArgumentHandleCountMax = 0x40;
|
||||
constexpr u32 HandleWaitMask{1u << 30};
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include "common/common_types.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
@ -277,18 +278,22 @@ void SvcWrap64(Core::System& system) {
|
|||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32, s32, s64)>
|
||||
// Used by WaitForAddress
|
||||
template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
.raw);
|
||||
FuncReturn(system,
|
||||
func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
template <ResultCode func(Core::System&, u64, u32, s32, s32)>
|
||||
// Used by SignalToAddress
|
||||
template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw);
|
||||
FuncReturn(system,
|
||||
func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -504,22 +509,23 @@ void SvcWrap32(Core::System& system) {
|
|||
}
|
||||
|
||||
// Used by WaitForAddress32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
|
||||
template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
|
||||
static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
|
||||
static_cast<Svc::ArbitrationType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
|
||||
static_cast<u32>(Param(system, 4)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by SignalToAddress32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, s32)>
|
||||
template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval =
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw;
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<Svc::SignalType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,9 +17,11 @@
|
|||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
@ -61,24 +63,6 @@ void Thread::Stop() {
|
|||
}
|
||||
|
||||
void Thread::Wakeup() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
switch (thread_state) {
|
||||
case ThreadState::Runnable:
|
||||
// If the thread is waiting on multiple wait objects, it might be awoken more than once
|
||||
// before actually resuming. We can ignore subsequent wakeups if the thread status has
|
||||
// already been set to ThreadStatus::Ready.
|
||||
return;
|
||||
case ThreadState::Terminated:
|
||||
// This should never happen, as threads must complete before being stopped.
|
||||
DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
|
||||
GetObjectId());
|
||||
return;
|
||||
}
|
||||
|
||||
SetState(ThreadState::Runnable);
|
||||
}
|
||||
|
||||
void Thread::OnWakeUp() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
}
|
||||
|
@ -167,15 +151,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
|||
thread->stack_top = stack_top;
|
||||
thread->disable_count = 1;
|
||||
thread->tpidr_el0 = 0;
|
||||
thread->nominal_priority = thread->current_priority = priority;
|
||||
thread->current_priority = priority;
|
||||
thread->base_priority = priority;
|
||||
thread->lock_owner = nullptr;
|
||||
thread->schedule_count = -1;
|
||||
thread->last_scheduled_tick = 0;
|
||||
thread->processor_id = processor_id;
|
||||
thread->ideal_core = processor_id;
|
||||
thread->affinity_mask.SetAffinity(processor_id, true);
|
||||
thread->mutex_wait_address = 0;
|
||||
thread->condvar_wait_address = 0;
|
||||
thread->wait_handle = 0;
|
||||
thread->name = std::move(name);
|
||||
thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
|
||||
thread->owner_process = owner_process;
|
||||
|
@ -205,12 +188,17 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
|||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
}
|
||||
|
||||
void Thread::SetPriority(u32 priority) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
void Thread::SetBasePriority(u32 priority) {
|
||||
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
||||
"Invalid priority value.");
|
||||
nominal_priority = priority;
|
||||
UpdatePriority();
|
||||
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
// Change our base priority.
|
||||
base_priority = priority;
|
||||
|
||||
// Perform a priority restoration.
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
|
||||
|
@ -224,95 +212,146 @@ VAddr Thread::GetCommandBufferAddress() const {
|
|||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
void Thread::SetState(ThreadState new_status) {
|
||||
if (new_status == thread_state) {
|
||||
return;
|
||||
void Thread::SetState(ThreadState state) {
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
SetMutexWaitAddressForDebugging(0);
|
||||
const ThreadState old_state = thread_state;
|
||||
thread_state =
|
||||
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
|
||||
if (thread_state != old_state) {
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
if (new_status != ThreadState::Waiting) {
|
||||
SetWaitingCondVar(false);
|
||||
}
|
||||
|
||||
SetSchedulingStatus(new_status);
|
||||
|
||||
thread_state = new_status;
|
||||
}
|
||||
|
||||
void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
if (thread->lock_owner.get() == this) {
|
||||
// If the thread is already waiting for this thread to release the mutex, ensure that the
|
||||
// waiters list is consistent and return without doing anything.
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter != wait_mutex_threads.end());
|
||||
return;
|
||||
void Thread::AddWaiterImpl(Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Find the right spot to insert the waiter.
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetPriority() > thread->GetPriority()) {
|
||||
break;
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
||||
// A thread can't wait on two different mutexes at the same time.
|
||||
ASSERT(thread->lock_owner == nullptr);
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters++) >= 0);
|
||||
}
|
||||
|
||||
// Ensure that the thread is not already in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter == wait_mutex_threads.end());
|
||||
|
||||
// Keep the list in an ordered fashion
|
||||
const auto insertion_point = std::find_if(
|
||||
wait_mutex_threads.begin(), wait_mutex_threads.end(),
|
||||
[&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
|
||||
wait_mutex_threads.insert(insertion_point, thread);
|
||||
thread->lock_owner = SharedFrom(this);
|
||||
|
||||
UpdatePriority();
|
||||
// Insert the waiter.
|
||||
waiter_list.insert(it, *thread);
|
||||
thread->SetLockOwner(this);
|
||||
}
|
||||
|
||||
void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(thread->lock_owner.get() == this);
|
||||
void Thread::RemoveWaiterImpl(Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Ensure that the thread is in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter != wait_mutex_threads.end());
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
|
||||
wait_mutex_threads.erase(iter);
|
||||
|
||||
thread->lock_owner = nullptr;
|
||||
UpdatePriority();
|
||||
// Remove the waiter.
|
||||
waiter_list.erase(waiter_list.iterator_to(*thread));
|
||||
thread->SetLockOwner(nullptr);
|
||||
}
|
||||
|
||||
void Thread::UpdatePriority() {
|
||||
// If any of the threads waiting on the mutex have a higher priority
|
||||
// (taking into account priority inheritance), then this thread inherits
|
||||
// that thread's priority.
|
||||
u32 new_priority = nominal_priority;
|
||||
if (!wait_mutex_threads.empty()) {
|
||||
if (wait_mutex_threads.front()->current_priority < new_priority) {
|
||||
new_priority = wait_mutex_threads.front()->current_priority;
|
||||
void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
while (true) {
|
||||
// We want to inherit priority where possible.
|
||||
s32 new_priority = thread->GetBasePriority();
|
||||
if (thread->HasWaiters()) {
|
||||
new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
|
||||
}
|
||||
|
||||
// If the priority we would inherit is not different from ours, don't do anything.
|
||||
if (new_priority == thread->GetPriority()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure we don't violate condition variable red black tree invariants.
|
||||
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
||||
BeforeUpdatePriority(kernel, cv_tree, thread);
|
||||
}
|
||||
|
||||
// Change the priority.
|
||||
const s32 old_priority = thread->GetPriority();
|
||||
thread->SetPriority(new_priority);
|
||||
|
||||
// Restore the condition variable, if relevant.
|
||||
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
||||
AfterUpdatePriority(kernel, cv_tree, thread);
|
||||
}
|
||||
|
||||
// Update the scheduler.
|
||||
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
||||
|
||||
// Keep the lock owner up to date.
|
||||
Thread* lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the thread in the lock owner's sorted list, and continue inheriting.
|
||||
lock_owner->RemoveWaiterImpl(thread);
|
||||
lock_owner->AddWaiterImpl(thread);
|
||||
thread = lock_owner;
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::AddWaiter(Thread* thread) {
|
||||
AddWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiter(Thread* thread) {
|
||||
RemoveWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
s32 num_waiters{};
|
||||
Thread* next_lock_owner{};
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetAddressKey() == key) {
|
||||
Thread* thread = std::addressof(*it);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
it = waiter_list.erase(it);
|
||||
|
||||
// Update the next lock owner.
|
||||
if (next_lock_owner == nullptr) {
|
||||
next_lock_owner = thread;
|
||||
next_lock_owner->SetLockOwner(nullptr);
|
||||
} else {
|
||||
next_lock_owner->AddWaiterImpl(thread);
|
||||
}
|
||||
num_waiters++;
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_priority == current_priority) {
|
||||
return;
|
||||
// Do priority updates, if we have a next owner.
|
||||
if (next_lock_owner) {
|
||||
RestorePriority(kernel, this);
|
||||
RestorePriority(kernel, next_lock_owner);
|
||||
}
|
||||
|
||||
if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) {
|
||||
owner_process->RemoveConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
SetCurrentPriority(new_priority);
|
||||
|
||||
if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) {
|
||||
owner_process->InsertConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
if (!lock_owner) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the thread is within the correct location in the waiting list.
|
||||
auto old_owner = lock_owner;
|
||||
lock_owner->RemoveMutexWaiter(SharedFrom(this));
|
||||
old_owner->AddMutexWaiter(SharedFrom(this));
|
||||
|
||||
// Recursively update the priority of the thread that depends on the priority of this one.
|
||||
lock_owner->UpdatePriority();
|
||||
// Return output.
|
||||
*out_num_waiters = num_waiters;
|
||||
return next_lock_owner;
|
||||
}
|
||||
|
||||
ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||
|
@ -372,18 +411,6 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
|||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::SetSchedulingStatus(ThreadState new_status) {
|
||||
const auto old_state = GetRawState();
|
||||
thread_state = (thread_state & ThreadState::HighMask) | new_status;
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::SetCurrentPriority(u32 new_priority) {
|
||||
const u32 old_priority = std::exchange(current_priority, new_priority);
|
||||
KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
|
||||
old_priority);
|
||||
}
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
|
|
|
@ -6,16 +6,21 @@
|
|||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <span>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Common {
|
||||
|
@ -89,8 +94,6 @@ enum class ThreadState : u16 {
|
|||
InitSuspended = (1 << (4 + SuspendShift)),
|
||||
|
||||
SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
|
||||
|
||||
HighMask = 0xfff0,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
|
||||
|
||||
|
@ -111,7 +114,10 @@ enum class ThreadSchedFlags : u32 {
|
|||
KernelInitPauseFlag = 1 << 8,
|
||||
};
|
||||
|
||||
class Thread final : public KSynchronizationObject {
|
||||
class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
public:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
|
@ -180,49 +186,46 @@ public:
|
|||
* Gets the thread's current priority
|
||||
* @return The current thread's priority
|
||||
*/
|
||||
u32 GetPriority() const {
|
||||
[[nodiscard]] s32 GetPriority() const {
|
||||
return current_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority.
|
||||
* @param priority The new priority.
|
||||
*/
|
||||
void SetPriority(s32 priority) {
|
||||
current_priority = priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's nominal priority.
|
||||
* @return The current thread's nominal priority.
|
||||
*/
|
||||
u32 GetNominalPriority() const {
|
||||
return nominal_priority;
|
||||
[[nodiscard]] s32 GetBasePriority() const {
|
||||
return base_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority
|
||||
* @param priority The new priority
|
||||
* Sets the thread's nominal priority.
|
||||
* @param priority The new priority.
|
||||
*/
|
||||
void SetPriority(u32 priority);
|
||||
|
||||
/// Adds a thread to the list of threads that are waiting for a lock held by this thread.
|
||||
void AddMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the list of threads that are waiting for a lock held by this thread.
|
||||
void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Recalculates the current priority taking into account priority inheritance.
|
||||
void UpdatePriority();
|
||||
void SetBasePriority(u32 priority);
|
||||
|
||||
/// Changes the core that the thread is running or scheduled to run on.
|
||||
ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
[[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
|
||||
/**
|
||||
* Gets the thread's thread ID
|
||||
* @return The thread's ID
|
||||
*/
|
||||
u64 GetThreadID() const {
|
||||
[[nodiscard]] u64 GetThreadID() const {
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
/// Resumes a thread from waiting
|
||||
void Wakeup();
|
||||
|
||||
void OnWakeUp();
|
||||
|
||||
ResultCode Start();
|
||||
|
||||
virtual bool IsSignaled() const override;
|
||||
|
@ -242,7 +245,7 @@ public:
|
|||
}
|
||||
|
||||
ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
||||
*out = this->signaling_object;
|
||||
*out = signaling_object;
|
||||
return signaling_result;
|
||||
}
|
||||
|
||||
|
@ -328,18 +331,14 @@ public:
|
|||
return thread_state;
|
||||
}
|
||||
|
||||
void SetState(ThreadState new_state);
|
||||
|
||||
void SetWaitingCondVar(bool value) {
|
||||
is_waiting_on_condvar = value;
|
||||
}
|
||||
void SetState(ThreadState state);
|
||||
|
||||
s64 GetLastScheduledTick() const {
|
||||
return this->last_scheduled_tick;
|
||||
return last_scheduled_tick;
|
||||
}
|
||||
|
||||
void SetLastScheduledTick(s64 tick) {
|
||||
this->last_scheduled_tick = tick;
|
||||
last_scheduled_tick = tick;
|
||||
}
|
||||
|
||||
u64 GetTotalCPUTimeTicks() const {
|
||||
|
@ -379,55 +378,13 @@ public:
|
|||
}
|
||||
|
||||
Thread* GetLockOwner() const {
|
||||
return lock_owner.get();
|
||||
return lock_owner;
|
||||
}
|
||||
|
||||
void SetLockOwner(std::shared_ptr<Thread> owner) {
|
||||
lock_owner = std::move(owner);
|
||||
void SetLockOwner(Thread* owner) {
|
||||
lock_owner = owner;
|
||||
}
|
||||
|
||||
VAddr GetCondVarWaitAddress() const {
|
||||
return condvar_wait_address;
|
||||
}
|
||||
|
||||
void SetCondVarWaitAddress(VAddr address) {
|
||||
condvar_wait_address = address;
|
||||
}
|
||||
|
||||
VAddr GetMutexWaitAddress() const {
|
||||
return mutex_wait_address;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddress(VAddr address) {
|
||||
mutex_wait_address = address;
|
||||
}
|
||||
|
||||
Handle GetWaitHandle() const {
|
||||
return wait_handle;
|
||||
}
|
||||
|
||||
void SetWaitHandle(Handle handle) {
|
||||
wait_handle = handle;
|
||||
}
|
||||
|
||||
VAddr GetArbiterWaitAddress() const {
|
||||
return arb_wait_address;
|
||||
}
|
||||
|
||||
void SetArbiterWaitAddress(VAddr address) {
|
||||
arb_wait_address = address;
|
||||
}
|
||||
|
||||
void SetHLETimeEvent(Handle time_event) {
|
||||
hle_time_event = time_event;
|
||||
}
|
||||
|
||||
Handle GetHLETimeEvent() const {
|
||||
return hle_time_event;
|
||||
}
|
||||
|
||||
bool InvokeHLECallback(std::shared_ptr<Thread> thread);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
@ -442,11 +399,11 @@ public:
|
|||
ResultCode Sleep(s64 nanoseconds);
|
||||
|
||||
s64 GetYieldScheduleCount() const {
|
||||
return this->schedule_count;
|
||||
return schedule_count;
|
||||
}
|
||||
|
||||
void SetYieldScheduleCount(s64 count) {
|
||||
this->schedule_count = count;
|
||||
schedule_count = count;
|
||||
}
|
||||
|
||||
bool IsRunning() const {
|
||||
|
@ -469,14 +426,6 @@ public:
|
|||
return global_handle;
|
||||
}
|
||||
|
||||
bool IsWaitingForArbitration() const {
|
||||
return waiting_for_arbitration;
|
||||
}
|
||||
|
||||
void WaitForArbitration(bool set) {
|
||||
waiting_for_arbitration = set;
|
||||
}
|
||||
|
||||
bool IsCancellable() const {
|
||||
return is_cancellable;
|
||||
}
|
||||
|
@ -490,7 +439,7 @@ public:
|
|||
}
|
||||
|
||||
bool IsTerminationRequested() const {
|
||||
return will_be_terminated || GetState() == ThreadState::Terminated;
|
||||
return will_be_terminated || GetRawState() == ThreadState::Terminated;
|
||||
}
|
||||
|
||||
bool IsPaused() const {
|
||||
|
@ -522,21 +471,21 @@ public:
|
|||
constexpr QueueEntry() = default;
|
||||
|
||||
constexpr void Initialize() {
|
||||
this->prev = nullptr;
|
||||
this->next = nullptr;
|
||||
prev = nullptr;
|
||||
next = nullptr;
|
||||
}
|
||||
|
||||
constexpr Thread* GetPrev() const {
|
||||
return this->prev;
|
||||
return prev;
|
||||
}
|
||||
constexpr Thread* GetNext() const {
|
||||
return this->next;
|
||||
return next;
|
||||
}
|
||||
constexpr void SetPrev(Thread* thread) {
|
||||
this->prev = thread;
|
||||
prev = thread;
|
||||
}
|
||||
constexpr void SetNext(Thread* thread) {
|
||||
this->next = thread;
|
||||
next = thread;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -545,11 +494,11 @@ public:
|
|||
};
|
||||
|
||||
QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||
return this->per_core_priority_queue_entry[core];
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
const QueueEntry& GetPriorityQueueEntry(s32 core) const {
|
||||
return this->per_core_priority_queue_entry[core];
|
||||
return per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
s32 GetDisableDispatchCount() const {
|
||||
|
@ -566,27 +515,155 @@ public:
|
|||
disable_count--;
|
||||
}
|
||||
|
||||
void SetWaitObjectsForDebugging(KSynchronizationObject** objects, s32 num_objects) {
|
||||
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
|
||||
wait_objects_for_debugging.clear();
|
||||
wait_objects_for_debugging.reserve(num_objects);
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
wait_objects_for_debugging.emplace_back(objects[i]);
|
||||
wait_objects_for_debugging.reserve(objects.size());
|
||||
for (const auto& object : objects) {
|
||||
wait_objects_for_debugging.emplace_back(object);
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
|
||||
[[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
|
||||
return wait_objects_for_debugging;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class GlobalSchedulerContext;
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
void SetMutexWaitAddressForDebugging(VAddr address) {
|
||||
mutex_wait_address_for_debugging = address;
|
||||
}
|
||||
|
||||
void SetSchedulingStatus(ThreadState new_status);
|
||||
[[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
|
||||
return mutex_wait_address_for_debugging;
|
||||
}
|
||||
|
||||
void AddWaiter(Thread* thread);
|
||||
|
||||
void RemoveWaiter(Thread* thread);
|
||||
|
||||
[[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
|
||||
[[nodiscard]] VAddr GetAddressKey() const {
|
||||
return address_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetAddressKeyValue() const {
|
||||
return address_key_value;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key) {
|
||||
address_key = key;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key, u32 val) {
|
||||
address_key = key;
|
||||
address_key_value = val;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t PriorityInheritanceCountMax = 10;
|
||||
union SyncObjectBuffer {
|
||||
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
|
||||
std::array<Handle,
|
||||
Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
|
||||
handles;
|
||||
constexpr SyncObjectBuffer() {}
|
||||
};
|
||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||
|
||||
struct ConditionVariableComparator {
|
||||
struct LightCompareType {
|
||||
u64 cv_key{};
|
||||
s32 priority{};
|
||||
|
||||
[[nodiscard]] constexpr u64 GetConditionVariableKey() const {
|
||||
return cv_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s32 GetPriority() const {
|
||||
return priority;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires(
|
||||
std::same_as<T, Thread> ||
|
||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
||||
const Thread& rhs) {
|
||||
const uintptr_t l_key = lhs.GetConditionVariableKey();
|
||||
const uintptr_t r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
if (l_key < r_key) {
|
||||
// Sort first by key
|
||||
return -1;
|
||||
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
|
||||
// And then by priority.
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
||||
|
||||
using ConditionVariableThreadTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
|
||||
using ConditionVariableThreadTree =
|
||||
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
||||
|
||||
public:
|
||||
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
|
||||
|
||||
[[nodiscard]] uintptr_t GetConditionVariableKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] uintptr_t GetAddressArbiterKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key,
|
||||
u32 value) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = cv_key;
|
||||
address_key = address;
|
||||
address_key_value = value;
|
||||
}
|
||||
|
||||
void ClearConditionVariable() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForConditionVariable() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = address;
|
||||
}
|
||||
|
||||
void ClearAddressArbiter() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForAddressArbiter() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
|
||||
return condvar_tree;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool HasWaiters() const {
|
||||
return !waiter_list.empty();
|
||||
}
|
||||
|
||||
private:
|
||||
void AddSchedulingFlag(ThreadSchedFlags flag);
|
||||
void RemoveSchedulingFlag(ThreadSchedFlags flag);
|
||||
void SetCurrentPriority(u32 new_priority);
|
||||
void AddWaiterImpl(Thread* thread);
|
||||
void RemoveWaiterImpl(Thread* thread);
|
||||
static void RestorePriority(KernelCore& kernel, Thread* thread);
|
||||
|
||||
Common::SpinLock context_guard{};
|
||||
ThreadContext32 context_32{};
|
||||
|
@ -606,11 +683,11 @@ private:
|
|||
/// Nominal thread priority, as set by the emulated application.
|
||||
/// The nominal priority is the thread priority without priority
|
||||
/// inheritance taken into account.
|
||||
u32 nominal_priority = 0;
|
||||
s32 base_priority{};
|
||||
|
||||
/// Current thread priority. This may change over the course of the
|
||||
/// thread's lifetime in order to facilitate priority inheritance.
|
||||
u32 current_priority = 0;
|
||||
s32 current_priority{};
|
||||
|
||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||
s64 schedule_count{};
|
||||
|
@ -628,6 +705,9 @@ private:
|
|||
/// passed to WaitSynchronization. This is used for debugging only.
|
||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||
|
||||
/// The current mutex wait address. This is used for debugging only.
|
||||
VAddr mutex_wait_address_for_debugging{};
|
||||
|
||||
KSynchronizationObject* signaling_object;
|
||||
ResultCode signaling_result{RESULT_SUCCESS};
|
||||
|
||||
|
@ -635,25 +715,11 @@ private:
|
|||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
std::shared_ptr<Thread> lock_owner;
|
||||
|
||||
/// If waiting on a ConditionVariable, this is the ConditionVariable address
|
||||
VAddr condvar_wait_address = 0;
|
||||
bool is_waiting_on_condvar{};
|
||||
/// If waiting on a Mutex, this is the mutex address
|
||||
VAddr mutex_wait_address = 0;
|
||||
/// The handle used to wait for the mutex.
|
||||
Handle wait_handle = 0;
|
||||
|
||||
/// If waiting for an AddressArbiter, this is the address being waited on.
|
||||
VAddr arb_wait_address{0};
|
||||
bool waiting_for_arbitration{};
|
||||
Thread* lock_owner{};
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle global_handle = 0;
|
||||
|
||||
Handle hle_time_event;
|
||||
|
||||
KScheduler* scheduler = nullptr;
|
||||
|
||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||
|
@ -679,6 +745,16 @@ private:
|
|||
|
||||
bool signaled{};
|
||||
|
||||
ConditionVariableThreadTree* condvar_tree{};
|
||||
uintptr_t condvar_key{};
|
||||
VAddr address_key{};
|
||||
u32 address_key_value{};
|
||||
s32 num_kernel_waiters{};
|
||||
|
||||
using WaiterList = boost::intrusive::list<Thread>;
|
||||
WaiterList waiter_list{};
|
||||
WaiterList pinned_waiter_list{};
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
|
|
|
@ -18,12 +18,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
|||
time_manager_event_type = Core::Timing::CreateEvent(
|
||||
"Kernel::TimeManagerCallback",
|
||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||
const KScopedSchedulerLock lock(system.Kernel());
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
|
||||
std::shared_ptr<Thread> thread;
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
if (cancelled_events[proper_handle]) {
|
||||
return;
|
||||
}
|
||||
|
@ -32,7 +30,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
|||
|
||||
if (thread) {
|
||||
// Thread can be null if process has exited
|
||||
thread->OnWakeUp();
|
||||
thread->Wakeup();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
|
@ -116,7 +116,7 @@ QString WaitTreeText::GetText() const {
|
|||
WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table)
|
||||
: mutex_address(mutex_address) {
|
||||
mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address);
|
||||
owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask);
|
||||
owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask);
|
||||
owner = handle_table.Get<Kernel::Thread>(owner_handle);
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ QString WaitTreeMutexInfo::GetText() const {
|
|||
}
|
||||
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const {
|
||||
const bool has_waiters = (mutex_value & Kernel::Mutex::MutexHasWaitersFlag) != 0;
|
||||
const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0;
|
||||
|
||||
std::vector<std::unique_ptr<WaitTreeItem>> list;
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters)));
|
||||
|
@ -324,11 +324,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
|||
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
|
||||
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
||||
.arg(thread.GetPriority())
|
||||
.arg(thread.GetNominalPriority())));
|
||||
.arg(thread.GetBasePriority())));
|
||||
list.push_back(std::make_unique<WaitTreeText>(
|
||||
tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
|
||||
|
||||
const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
|
||||
const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging();
|
||||
if (mutex_wait_address != 0) {
|
||||
const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable();
|
||||
list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table));
|
||||
|
|
Loading…
Reference in New Issue