forked from ShuriZma/suyu
hle: kernel: Refactor several threads/events/sharedmemory to use slab heaps.
This commit is contained in:
parent
b6156e735c
commit
89edbe8aa2
|
@ -233,7 +233,7 @@ struct System::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
|
telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
|
||||||
auto main_process = Kernel::Process::CreateWithKernel(system.Kernel());
|
auto main_process = Kernel::Process::Create(system.Kernel());
|
||||||
ASSERT(Kernel::Process::Initialize(main_process, system, "main",
|
ASSERT(Kernel::Process::Initialize(main_process, system, "main",
|
||||||
Kernel::Process::ProcessType::Userland)
|
Kernel::Process::ProcessType::Userland)
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
|
|
|
@ -618,14 +618,17 @@ KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core
|
||||||
}
|
}
|
||||||
|
|
||||||
KScheduler::~KScheduler() {
|
KScheduler::~KScheduler() {
|
||||||
idle_thread->Close();
|
if (idle_thread) {
|
||||||
|
idle_thread->Close();
|
||||||
|
idle_thread = nullptr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KThread* KScheduler::GetCurrentThread() const {
|
KThread* KScheduler::GetCurrentThread() const {
|
||||||
if (auto result = current_thread.load(); result) {
|
if (auto result = current_thread.load(); result) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
return idle_thread.get();
|
return idle_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KScheduler::GetLastContextSwitchTicks() const {
|
u64 KScheduler::GetLastContextSwitchTicks() const {
|
||||||
|
@ -710,7 +713,7 @@ void KScheduler::ScheduleImpl() {
|
||||||
|
|
||||||
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
|
||||||
if (next_thread == nullptr) {
|
if (next_thread == nullptr) {
|
||||||
next_thread = idle_thread.get();
|
next_thread = idle_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're not actually switching thread, there's nothing to do.
|
// If we're not actually switching thread, there's nothing to do.
|
||||||
|
@ -771,7 +774,7 @@ void KScheduler::SwitchToCurrent() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
auto thread = next_thread ? next_thread : idle_thread.get();
|
auto thread = next_thread ? next_thread : idle_thread;
|
||||||
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
|
||||||
} while (!is_switch_pending());
|
} while (!is_switch_pending());
|
||||||
}
|
}
|
||||||
|
@ -794,9 +797,8 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process)
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Initialize() {
|
void KScheduler::Initialize() {
|
||||||
idle_thread = std::make_unique<KThread>(system.Kernel());
|
idle_thread = KThread::Create(system.Kernel());
|
||||||
KAutoObject::Create(idle_thread.get());
|
ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
|
||||||
ASSERT(KThread::InitializeIdleThread(system, idle_thread.get(), core_id).IsSuccess());
|
|
||||||
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
|
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ public:
|
||||||
|
|
||||||
/// Returns true if the scheduler is idle
|
/// Returns true if the scheduler is idle
|
||||||
[[nodiscard]] bool IsIdle() const {
|
[[nodiscard]] bool IsIdle() const {
|
||||||
return GetCurrentThread() == idle_thread.get();
|
return GetCurrentThread() == idle_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the timestamp for the last context switch in ticks.
|
/// Gets the timestamp for the last context switch in ticks.
|
||||||
|
@ -173,7 +173,7 @@ private:
|
||||||
KThread* prev_thread{};
|
KThread* prev_thread{};
|
||||||
std::atomic<KThread*> current_thread{};
|
std::atomic<KThread*> current_thread{};
|
||||||
|
|
||||||
std::unique_ptr<KThread> idle_thread;
|
KThread* idle_thread{};
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber> switch_fiber{};
|
std::shared_ptr<Common::Fiber> switch_fiber{};
|
||||||
|
|
||||||
|
|
|
@ -102,15 +102,21 @@ struct KernelCore::Impl {
|
||||||
next_user_process_id = Process::ProcessIDMin;
|
next_user_process_id = Process::ProcessIDMin;
|
||||||
next_thread_id = 1;
|
next_thread_id = 1;
|
||||||
|
|
||||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
for (s32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
if (suspend_threads[i]) {
|
if (suspend_threads[core_id]) {
|
||||||
suspend_threads[i]->Close();
|
suspend_threads[core_id]->Close();
|
||||||
|
suspend_threads[core_id] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
schedulers[core_id].reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
cores.clear();
|
cores.clear();
|
||||||
|
|
||||||
current_process = nullptr;
|
if (current_process) {
|
||||||
|
current_process->Close();
|
||||||
|
current_process = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
global_handle_table.Clear();
|
global_handle_table.Clear();
|
||||||
|
|
||||||
|
@ -195,10 +201,9 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void InitializeSuspendThreads() {
|
void InitializeSuspendThreads() {
|
||||||
for (s32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
for (s32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
suspend_threads[core_id] = std::make_unique<KThread>(system.Kernel());
|
suspend_threads[core_id] = KThread::Create(system.Kernel());
|
||||||
KAutoObject::Create(suspend_threads[core_id].get());
|
ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
|
||||||
ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id].get(), {},
|
core_id)
|
||||||
{}, core_id)
|
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
|
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
|
||||||
}
|
}
|
||||||
|
@ -577,15 +582,10 @@ struct KernelCore::Impl {
|
||||||
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
|
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
|
||||||
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
|
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
|
||||||
|
|
||||||
hid_shared_mem = std::make_unique<KSharedMemory>(system.Kernel());
|
hid_shared_mem = KSharedMemory::Create(system.Kernel());
|
||||||
font_shared_mem = std::make_unique<KSharedMemory>(system.Kernel());
|
font_shared_mem = KSharedMemory::Create(system.Kernel());
|
||||||
irs_shared_mem = std::make_unique<KSharedMemory>(system.Kernel());
|
irs_shared_mem = KSharedMemory::Create(system.Kernel());
|
||||||
time_shared_mem = std::make_unique<KSharedMemory>(system.Kernel());
|
time_shared_mem = KSharedMemory::Create(system.Kernel());
|
||||||
|
|
||||||
KAutoObject::Create(hid_shared_mem.get());
|
|
||||||
KAutoObject::Create(font_shared_mem.get());
|
|
||||||
KAutoObject::Create(irs_shared_mem.get());
|
|
||||||
KAutoObject::Create(time_shared_mem.get());
|
|
||||||
|
|
||||||
hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
|
hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr,
|
||||||
{hid_phys_addr, hid_size / PageSize}, KMemoryPermission::None,
|
{hid_phys_addr, hid_size / PageSize}, KMemoryPermission::None,
|
||||||
|
@ -656,10 +656,10 @@ struct KernelCore::Impl {
|
||||||
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
||||||
|
|
||||||
// Shared memory for services
|
// Shared memory for services
|
||||||
std::unique_ptr<Kernel::KSharedMemory> hid_shared_mem;
|
Kernel::KSharedMemory* hid_shared_mem{};
|
||||||
std::unique_ptr<Kernel::KSharedMemory> font_shared_mem;
|
Kernel::KSharedMemory* font_shared_mem{};
|
||||||
std::unique_ptr<Kernel::KSharedMemory> irs_shared_mem;
|
Kernel::KSharedMemory* irs_shared_mem{};
|
||||||
std::unique_ptr<Kernel::KSharedMemory> time_shared_mem;
|
Kernel::KSharedMemory* time_shared_mem{};
|
||||||
|
|
||||||
// Threads used for services
|
// Threads used for services
|
||||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||||
|
@ -668,7 +668,7 @@ struct KernelCore::Impl {
|
||||||
// the release of itself
|
// the release of itself
|
||||||
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
|
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
|
||||||
|
|
||||||
std::array<std::unique_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads;
|
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
|
||||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||||
|
|
||||||
|
@ -938,9 +938,9 @@ void KernelCore::Suspend(bool in_suspention) {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock lock(*this);
|
KScopedSchedulerLock lock(*this);
|
||||||
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
|
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
|
||||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
for (s32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
impl->suspend_threads[i]->SetState(state);
|
impl->suspend_threads[core_id]->SetState(state);
|
||||||
impl->suspend_threads[i]->SetWaitReasonForDebugging(
|
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
||||||
ThreadWaitReasonForDebugging::Suspended);
|
ThreadWaitReasonForDebugging::Suspended);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
|
||||||
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
|
||||||
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
|
||||||
|
|
||||||
KThread* thread = KThread::CreateWithKernel(system.Kernel());
|
KThread* thread = KThread::Create(system.Kernel());
|
||||||
ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
|
ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
|
||||||
owner_process.GetIdealCoreId(), &owner_process)
|
owner_process.GetIdealCoreId(), &owner_process)
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
|
|
|
@ -67,10 +67,6 @@ class KAutoObjectWithSlabHeapAndContainer : public Base {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static Derived* Allocate(KernelCore& kernel) {
|
static Derived* Allocate(KernelCore& kernel) {
|
||||||
return kernel.SlabHeap<Derived>().Allocate();
|
|
||||||
}
|
|
||||||
|
|
||||||
static Derived* AllocateWithKernel(KernelCore& kernel) {
|
|
||||||
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
|
return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,16 +116,8 @@ public:
|
||||||
kernel.ObjectListContainer().Initialize();
|
kernel.ObjectListContainer().Initialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
static Derived* Create() {
|
static Derived* Create(KernelCore& kernel) {
|
||||||
Derived* obj = Allocate();
|
Derived* obj = Allocate(kernel);
|
||||||
if (obj != nullptr) {
|
|
||||||
KAutoObject::Create(obj);
|
|
||||||
}
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Derived* CreateWithKernel(KernelCore& kernel) {
|
|
||||||
Derived* obj = AllocateWithKernel(kernel);
|
|
||||||
if (obj != nullptr) {
|
if (obj != nullptr) {
|
||||||
KAutoObject::Create(obj);
|
KAutoObject::Create(obj);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1431,7 +1431,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the thread.
|
// Create the thread.
|
||||||
KThread* thread = KThread::CreateWithKernel(kernel);
|
KThread* thread = KThread::Create(kernel);
|
||||||
if (!thread) {
|
if (!thread) {
|
||||||
LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
|
LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached.");
|
||||||
return ResultOutOfResource;
|
return ResultOutOfResource;
|
||||||
|
@ -1953,7 +1953,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
|
||||||
HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||||
|
|
||||||
// Create a new event.
|
// Create a new event.
|
||||||
KEvent* event = KEvent::CreateWithKernel(kernel);
|
KEvent* event = KEvent::Create(kernel);
|
||||||
R_UNLESS(event != nullptr, ResultOutOfResource);
|
R_UNLESS(event != nullptr, ResultOutOfResource);
|
||||||
|
|
||||||
// Initialize the event.
|
// Initialize the event.
|
||||||
|
|
|
@ -253,8 +253,7 @@ void Controller_NPad::InitNewlyAddedController(std::size_t controller_idx) {
|
||||||
void Controller_NPad::OnInit() {
|
void Controller_NPad::OnInit() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
for (std::size_t i = 0; i < styleset_changed_events.size(); ++i) {
|
for (std::size_t i = 0; i < styleset_changed_events.size(); ++i) {
|
||||||
styleset_changed_events[i] = std::make_unique<Kernel::KEvent>(kernel);
|
styleset_changed_events[i] = Kernel::KEvent::Create(kernel);
|
||||||
Kernel::KAutoObject::Create(styleset_changed_events[i].get());
|
|
||||||
styleset_changed_events[i]->Initialize(fmt::format("npad:NpadStyleSetChanged_{}", i));
|
styleset_changed_events[i]->Initialize(fmt::format("npad:NpadStyleSetChanged_{}", i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,6 +340,11 @@ void Controller_NPad::OnRelease() {
|
||||||
VibrateControllerAtIndex(npad_idx, device_idx, {});
|
VibrateControllerAtIndex(npad_idx, device_idx, {});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < styleset_changed_events.size(); ++i) {
|
||||||
|
styleset_changed_events[i]->Close();
|
||||||
|
styleset_changed_events[i] = nullptr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Controller_NPad::RequestPadStateUpdate(u32 npad_id) {
|
void Controller_NPad::RequestPadStateUpdate(u32 npad_id) {
|
||||||
|
|
|
@ -573,8 +573,9 @@ private:
|
||||||
NpadHandheldActivationMode handheld_activation_mode{NpadHandheldActivationMode::Dual};
|
NpadHandheldActivationMode handheld_activation_mode{NpadHandheldActivationMode::Dual};
|
||||||
NpadCommunicationMode communication_mode{NpadCommunicationMode::Default};
|
NpadCommunicationMode communication_mode{NpadCommunicationMode::Default};
|
||||||
// Each controller should have their own styleset changed event
|
// Each controller should have their own styleset changed event
|
||||||
std::array<std::unique_ptr<Kernel::KEvent>, 10> styleset_changed_events;
|
std::array<Kernel::KEvent*, 10> styleset_changed_events{};
|
||||||
std::array<std::array<std::chrono::steady_clock::time_point, 2>, 10> last_vibration_timepoints;
|
std::array<std::array<std::chrono::steady_clock::time_point, 2>, 10>
|
||||||
|
last_vibration_timepoints{};
|
||||||
std::array<std::array<VibrationValue, 2>, 10> latest_vibration_values{};
|
std::array<std::array<VibrationValue, 2>, 10> latest_vibration_values{};
|
||||||
bool permit_vibration_session_enabled{false};
|
bool permit_vibration_session_enabled{false};
|
||||||
std::array<std::array<bool, 2>, 10> vibration_devices_mounted{};
|
std::array<std::array<bool, 2>, 10> vibration_devices_mounted{};
|
||||||
|
|
|
@ -42,8 +42,7 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
|
||||||
Module::Module(Core::System& system) : syncpoint_manager{system.GPU()} {
|
Module::Module(Core::System& system) : syncpoint_manager{system.GPU()} {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||||
events_interface.events[i].event = std::make_unique<Kernel::KEvent>(kernel);
|
events_interface.events[i].event = Kernel::KEvent::Create(kernel);
|
||||||
Kernel::KAutoObject::Create(events_interface.events[i].event.get());
|
|
||||||
events_interface.events[i].event->Initialize(fmt::format("NVDRV::NvEvent_{}", i));
|
events_interface.events[i].event->Initialize(fmt::format("NVDRV::NvEvent_{}", i));
|
||||||
events_interface.status[i] = EventState::Free;
|
events_interface.status[i] = EventState::Free;
|
||||||
events_interface.registered[i] = false;
|
events_interface.registered[i] = false;
|
||||||
|
|
|
@ -35,7 +35,7 @@ class nvdevice;
|
||||||
|
|
||||||
/// Represents an Nvidia event
|
/// Represents an Nvidia event
|
||||||
struct NvEvent {
|
struct NvEvent {
|
||||||
std::unique_ptr<Kernel::KEvent> event;
|
Kernel::KEvent* event{};
|
||||||
Fence fence{};
|
Fence fence{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue