mirror of https://git.suyu.dev/suyu/suyu
Merge pull request #6428 from bunnei/service-thread-crash-fix
hle: kernel: Remove service thread manager and use weak_ptr.
This commit is contained in:
commit
3c621d37f0
|
@ -41,6 +41,21 @@ SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kerne
|
||||||
|
|
||||||
SessionRequestManager::~SessionRequestManager() = default;
|
SessionRequestManager::~SessionRequestManager() = default;
|
||||||
|
|
||||||
|
bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& context) const {
|
||||||
|
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||||
|
const auto& message_header = context.GetDomainMessageHeader();
|
||||||
|
const auto object_id = message_header.object_id;
|
||||||
|
|
||||||
|
if (object_id > DomainHandlerCount()) {
|
||||||
|
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return DomainHandler(object_id - 1) != nullptr;
|
||||||
|
} else {
|
||||||
|
return session_handler != nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
void SessionRequestHandler::ClientConnected(KServerSession* session) {
|
||||||
session->SetSessionHandler(shared_from_this());
|
session->SetSessionHandler(shared_from_this());
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,8 +85,8 @@ public:
|
||||||
*/
|
*/
|
||||||
void ClientDisconnected(KServerSession* session);
|
void ClientDisconnected(KServerSession* session);
|
||||||
|
|
||||||
std::shared_ptr<ServiceThread> GetServiceThread() const {
|
std::weak_ptr<ServiceThread> GetServiceThread() const {
|
||||||
return service_thread.lock();
|
return service_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -152,10 +152,12 @@ public:
|
||||||
session_handler = std::move(handler);
|
session_handler = std::move(handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<ServiceThread> GetServiceThread() const {
|
std::weak_ptr<ServiceThread> GetServiceThread() const {
|
||||||
return session_handler->GetServiceThread();
|
return session_handler->GetServiceThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool HasSessionRequestHandler(const HLERequestContext& context) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool is_domain{};
|
bool is_domain{};
|
||||||
SessionRequestHandlerPtr session_handler;
|
SessionRequestHandlerPtr session_handler;
|
||||||
|
@ -163,7 +165,6 @@ private:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
std::weak_ptr<ServiceThread> service_thread;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "common/scope_exit.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hle/ipc_helpers.h"
|
#include "core/hle/ipc_helpers.h"
|
||||||
#include "core/hle/kernel/hle_ipc.h"
|
#include "core/hle/kernel/hle_ipc.h"
|
||||||
|
@ -119,11 +120,25 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
|
||||||
|
|
||||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||||
|
|
||||||
if (auto strong_ptr = manager->GetServiceThread(); strong_ptr) {
|
// In the event that something fails here, stub a result to prevent the game from crashing.
|
||||||
|
// This is a work-around in the event that somehow we process a service request after the
|
||||||
|
// session has been closed by the game. This has been observed to happen rarely in Pokemon
|
||||||
|
// Sword/Shield and is likely a result of us using host threads/scheduling for services.
|
||||||
|
// TODO(bunnei): Find a better solution here.
|
||||||
|
auto error_guard = SCOPE_GUARD({ CompleteSyncRequest(*context); });
|
||||||
|
|
||||||
|
// Ensure we have a session request handler
|
||||||
|
if (manager->HasSessionRequestHandler(*context)) {
|
||||||
|
if (auto strong_ptr = manager->GetServiceThread().lock()) {
|
||||||
strong_ptr->QueueSyncRequest(*parent, std::move(context));
|
strong_ptr->QueueSyncRequest(*parent, std::move(context));
|
||||||
return ResultSuccess;
|
|
||||||
|
// We succeeded.
|
||||||
|
error_guard.Cancel();
|
||||||
} else {
|
} else {
|
||||||
ASSERT_MSG(false, "strong_ptr was nullptr!");
|
ASSERT_MSG(false, "strong_ptr is nullptr!");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT_MSG(false, "handler is invalid!");
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
@ -131,7 +146,9 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
|
||||||
|
|
||||||
ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||||
ResultCode result = ResultSuccess;
|
ResultCode result = ResultSuccess;
|
||||||
|
|
||||||
// If the session has been converted to a domain, handle the domain request
|
// If the session has been converted to a domain, handle the domain request
|
||||||
|
if (manager->HasSessionRequestHandler(context)) {
|
||||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||||
result = HandleDomainSyncRequest(context);
|
result = HandleDomainSyncRequest(context);
|
||||||
// If there is no domain header, the regular session handler is used
|
// If there is no domain header, the regular session handler is used
|
||||||
|
@ -139,6 +156,11 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||||
result = manager->SessionHandler().HandleSyncRequest(*this, context);
|
result = manager->SessionHandler().HandleSyncRequest(*this, context);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
|
||||||
|
IPC::ResponseBuilder rb(context, 2);
|
||||||
|
rb.Push(ResultSuccess);
|
||||||
|
}
|
||||||
|
|
||||||
if (convert_to_domain) {
|
if (convert_to_domain) {
|
||||||
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
|
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
|
||||||
|
|
|
@ -63,8 +63,6 @@ struct KernelCore::Impl {
|
||||||
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||||
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
|
||||||
|
|
||||||
service_thread_manager =
|
|
||||||
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
|
|
||||||
is_phantom_mode_for_singlecore = false;
|
is_phantom_mode_for_singlecore = false;
|
||||||
|
|
||||||
InitializePhysicalCores();
|
InitializePhysicalCores();
|
||||||
|
@ -96,7 +94,6 @@ struct KernelCore::Impl {
|
||||||
process_list.clear();
|
process_list.clear();
|
||||||
|
|
||||||
// Ensures all service threads gracefully shutdown
|
// Ensures all service threads gracefully shutdown
|
||||||
service_thread_manager.reset();
|
|
||||||
service_threads.clear();
|
service_threads.clear();
|
||||||
|
|
||||||
next_object_id = 0;
|
next_object_id = 0;
|
||||||
|
@ -680,10 +677,6 @@ struct KernelCore::Impl {
|
||||||
// Threads used for services
|
// Threads used for services
|
||||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||||
|
|
||||||
// Service threads are managed by a worker thread, so that a calling service thread can queue up
|
|
||||||
// the release of itself
|
|
||||||
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
|
|
||||||
|
|
||||||
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
|
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
|
||||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||||
|
@ -986,17 +979,14 @@ void KernelCore::ExitSVCProfile() {
|
||||||
|
|
||||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||||
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
|
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
|
||||||
impl->service_thread_manager->QueueWork(
|
impl->service_threads.emplace(service_thread);
|
||||||
[this, service_thread] { impl->service_threads.emplace(service_thread); });
|
|
||||||
return service_thread;
|
return service_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
|
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
|
||||||
impl->service_thread_manager->QueueWork([this, service_thread] {
|
|
||||||
if (auto strong_ptr = service_thread.lock()) {
|
if (auto strong_ptr = service_thread.lock()) {
|
||||||
impl->service_threads.erase(strong_ptr);
|
impl->service_threads.erase(strong_ptr);
|
||||||
}
|
}
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
|
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
|
||||||
|
|
Loading…
Reference in New Issue