forked from ShuriZma/suyu
Merge pull request #5953 from bunnei/memory-refactor-1
Kernel Rework: Memory updates and refactoring (Part 1)
This commit is contained in:
commit
09f7c355c6
|
@ -167,6 +167,7 @@ add_library(common STATIC
|
|||
threadsafe_queue.h
|
||||
time_zone.cpp
|
||||
time_zone.h
|
||||
tiny_mt.h
|
||||
tree.h
|
||||
uint128.h
|
||||
uuid.cpp
|
||||
|
|
|
@ -42,6 +42,11 @@ requires std::is_integral_v<T>[[nodiscard]] constexpr bool IsAligned(T value, si
|
|||
return (value & mask) == 0;
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
requires std::is_integral_v<T>[[nodiscard]] constexpr T DivideUp(T x, U y) {
|
||||
return (x + (y - 1)) / y;
|
||||
}
|
||||
|
||||
template <typename T, size_t Align = 16>
|
||||
class AlignmentAllocator {
|
||||
public:
|
||||
|
|
|
@ -0,0 +1,250 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
// Implementation of TinyMT (mersenne twister RNG).
|
||||
// Like Nintendo, we will use the sample parameters.
|
||||
class TinyMT {
|
||||
public:
|
||||
static constexpr std::size_t NumStateWords = 4;
|
||||
|
||||
struct State {
|
||||
std::array<u32, NumStateWords> data{};
|
||||
};
|
||||
|
||||
private:
|
||||
static constexpr u32 ParamMat1 = 0x8F7011EE;
|
||||
static constexpr u32 ParamMat2 = 0xFC78FF1F;
|
||||
static constexpr u32 ParamTmat = 0x3793FDFF;
|
||||
|
||||
static constexpr u32 ParamMult = 0x6C078965;
|
||||
static constexpr u32 ParamPlus = 0x0019660D;
|
||||
static constexpr u32 ParamXor = 0x5D588B65;
|
||||
|
||||
static constexpr u32 TopBitmask = 0x7FFFFFFF;
|
||||
|
||||
static constexpr int MinimumInitIterations = 8;
|
||||
static constexpr int NumDiscardedInitOutputs = 8;
|
||||
|
||||
static constexpr u32 XorByShifted27(u32 value) {
|
||||
return value ^ (value >> 27);
|
||||
}
|
||||
|
||||
static constexpr u32 XorByShifted30(u32 value) {
|
||||
return value ^ (value >> 30);
|
||||
}
|
||||
|
||||
private:
|
||||
State state{};
|
||||
|
||||
private:
|
||||
// Internal API.
|
||||
void FinalizeInitialization() {
|
||||
const u32 state0 = this->state.data[0] & TopBitmask;
|
||||
const u32 state1 = this->state.data[1];
|
||||
const u32 state2 = this->state.data[2];
|
||||
const u32 state3 = this->state.data[3];
|
||||
|
||||
if (state0 == 0 && state1 == 0 && state2 == 0 && state3 == 0) {
|
||||
this->state.data[0] = 'T';
|
||||
this->state.data[1] = 'I';
|
||||
this->state.data[2] = 'N';
|
||||
this->state.data[3] = 'Y';
|
||||
}
|
||||
|
||||
for (int i = 0; i < NumDiscardedInitOutputs; i++) {
|
||||
this->GenerateRandomU32();
|
||||
}
|
||||
}
|
||||
|
||||
u32 GenerateRandomU24() {
|
||||
return (this->GenerateRandomU32() >> 8);
|
||||
}
|
||||
|
||||
static void GenerateInitialValuePlus(TinyMT::State* state, int index, u32 value) {
|
||||
u32& state0 = state->data[(index + 0) % NumStateWords];
|
||||
u32& state1 = state->data[(index + 1) % NumStateWords];
|
||||
u32& state2 = state->data[(index + 2) % NumStateWords];
|
||||
u32& state3 = state->data[(index + 3) % NumStateWords];
|
||||
|
||||
const u32 x = XorByShifted27(state0 ^ state1 ^ state3) * ParamPlus;
|
||||
const u32 y = x + index + value;
|
||||
|
||||
state0 = y;
|
||||
state1 += x;
|
||||
state2 += y;
|
||||
}
|
||||
|
||||
static void GenerateInitialValueXor(TinyMT::State* state, int index) {
|
||||
u32& state0 = state->data[(index + 0) % NumStateWords];
|
||||
u32& state1 = state->data[(index + 1) % NumStateWords];
|
||||
u32& state2 = state->data[(index + 2) % NumStateWords];
|
||||
u32& state3 = state->data[(index + 3) % NumStateWords];
|
||||
|
||||
const u32 x = XorByShifted27(state0 + state1 + state3) * ParamXor;
|
||||
const u32 y = x - index;
|
||||
|
||||
state0 = y;
|
||||
state1 ^= x;
|
||||
state2 ^= y;
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr TinyMT() = default;
|
||||
|
||||
// Public API.
|
||||
|
||||
// Initialization.
|
||||
void Initialize(u32 seed) {
|
||||
this->state.data[0] = seed;
|
||||
this->state.data[1] = ParamMat1;
|
||||
this->state.data[2] = ParamMat2;
|
||||
this->state.data[3] = ParamTmat;
|
||||
|
||||
for (int i = 1; i < MinimumInitIterations; i++) {
|
||||
const u32 mixed = XorByShifted30(this->state.data[(i - 1) % NumStateWords]);
|
||||
this->state.data[i % NumStateWords] ^= mixed * ParamMult + i;
|
||||
}
|
||||
|
||||
this->FinalizeInitialization();
|
||||
}
|
||||
|
||||
void Initialize(const u32* seed, int seed_count) {
|
||||
this->state.data[0] = 0;
|
||||
this->state.data[1] = ParamMat1;
|
||||
this->state.data[2] = ParamMat2;
|
||||
this->state.data[3] = ParamTmat;
|
||||
|
||||
{
|
||||
const int num_init_iterations = std::max(seed_count + 1, MinimumInitIterations) - 1;
|
||||
|
||||
GenerateInitialValuePlus(&this->state, 0, seed_count);
|
||||
|
||||
for (int i = 0; i < num_init_iterations; i++) {
|
||||
GenerateInitialValuePlus(&this->state, (i + 1) % NumStateWords,
|
||||
(i < seed_count) ? seed[i] : 0);
|
||||
}
|
||||
|
||||
for (int i = 0; i < static_cast<int>(NumStateWords); i++) {
|
||||
GenerateInitialValueXor(&this->state,
|
||||
(i + 1 + num_init_iterations) % NumStateWords);
|
||||
}
|
||||
}
|
||||
|
||||
this->FinalizeInitialization();
|
||||
}
|
||||
|
||||
// State management.
|
||||
void GetState(TinyMT::State& out) const {
|
||||
out.data = this->state.data;
|
||||
}
|
||||
|
||||
void SetState(const TinyMT::State& state_) {
|
||||
this->state.data = state_.data;
|
||||
}
|
||||
|
||||
// Random generation.
|
||||
void GenerateRandomBytes(void* dst, std::size_t size) {
|
||||
const uintptr_t start = reinterpret_cast<uintptr_t>(dst);
|
||||
const uintptr_t end = start + size;
|
||||
const uintptr_t aligned_start = Common::AlignUp(start, 4);
|
||||
const uintptr_t aligned_end = Common::AlignDown(end, 4);
|
||||
|
||||
// Make sure we're aligned.
|
||||
if (start < aligned_start) {
|
||||
const u32 rnd = this->GenerateRandomU32();
|
||||
std::memcpy(dst, &rnd, aligned_start - start);
|
||||
}
|
||||
|
||||
// Write as many aligned u32s as we can.
|
||||
{
|
||||
u32* cur_dst = reinterpret_cast<u32*>(aligned_start);
|
||||
u32* const end_dst = reinterpret_cast<u32*>(aligned_end);
|
||||
|
||||
while (cur_dst < end_dst) {
|
||||
*(cur_dst++) = this->GenerateRandomU32();
|
||||
}
|
||||
}
|
||||
|
||||
// Handle any leftover unaligned data.
|
||||
if (aligned_end < end) {
|
||||
const u32 rnd = this->GenerateRandomU32();
|
||||
std::memcpy(reinterpret_cast<void*>(aligned_end), &rnd, end - aligned_end);
|
||||
}
|
||||
}
|
||||
|
||||
u32 GenerateRandomU32() {
|
||||
// Advance state.
|
||||
const u32 x0 =
|
||||
(this->state.data[0] & TopBitmask) ^ this->state.data[1] ^ this->state.data[2];
|
||||
const u32 y0 = this->state.data[3];
|
||||
const u32 x1 = x0 ^ (x0 << 1);
|
||||
const u32 y1 = y0 ^ (y0 >> 1) ^ x1;
|
||||
|
||||
const u32 state0 = this->state.data[1];
|
||||
u32 state1 = this->state.data[2];
|
||||
u32 state2 = x1 ^ (y1 << 10);
|
||||
const u32 state3 = y1;
|
||||
|
||||
if ((y1 & 1) != 0) {
|
||||
state1 ^= ParamMat1;
|
||||
state2 ^= ParamMat2;
|
||||
}
|
||||
|
||||
this->state.data[0] = state0;
|
||||
this->state.data[1] = state1;
|
||||
this->state.data[2] = state2;
|
||||
this->state.data[3] = state3;
|
||||
|
||||
// Temper.
|
||||
const u32 t1 = state0 + (state2 >> 8);
|
||||
u32 t0 = state3 ^ t1;
|
||||
|
||||
if ((t1 & 1) != 0) {
|
||||
t0 ^= ParamTmat;
|
||||
}
|
||||
|
||||
return t0;
|
||||
}
|
||||
|
||||
u64 GenerateRandomU64() {
|
||||
const u32 lo = this->GenerateRandomU32();
|
||||
const u32 hi = this->GenerateRandomU32();
|
||||
return (u64{hi} << 32) | u64{lo};
|
||||
}
|
||||
|
||||
float GenerateRandomF32() {
|
||||
// Floats have 24 bits of mantissa.
|
||||
constexpr u32 MantissaBits = 24;
|
||||
return static_cast<float>(GenerateRandomU24()) * (1.0f / (1U << MantissaBits));
|
||||
}
|
||||
|
||||
double GenerateRandomF64() {
|
||||
// Doubles have 53 bits of mantissa.
|
||||
// The smart way to generate 53 bits of random would be to use 32 bits
|
||||
// from the first rnd32() call, and then 21 from the second.
|
||||
// Nintendo does not. They use (32 - 5) = 27 bits from the first rnd32()
|
||||
// call, and (32 - 6) bits from the second. We'll do what they do, but
|
||||
// There's not a clear reason why.
|
||||
constexpr u32 MantissaBits = 53;
|
||||
constexpr u32 Shift1st = (64 - MantissaBits) / 2;
|
||||
constexpr u32 Shift2nd = (64 - MantissaBits) - Shift1st;
|
||||
|
||||
const u32 first = (this->GenerateRandomU32() >> Shift1st);
|
||||
const u32 second = (this->GenerateRandomU32() >> Shift2nd);
|
||||
|
||||
return (1.0 * first * (u64{1} << (32 - Shift2nd)) + second) *
|
||||
(1.0 / (u64{1} << MantissaBits));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Common
|
|
@ -156,6 +156,8 @@ add_library(core STATIC
|
|||
hle/kernel/hle_ipc.h
|
||||
hle/kernel/k_address_arbiter.cpp
|
||||
hle/kernel/k_address_arbiter.h
|
||||
hle/kernel/k_address_space_info.cpp
|
||||
hle/kernel/k_address_space_info.h
|
||||
hle/kernel/k_affinity_mask.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
|
@ -164,6 +166,18 @@ add_library(core STATIC
|
|||
hle/kernel/k_light_condition_variable.h
|
||||
hle/kernel/k_light_lock.cpp
|
||||
hle/kernel/k_light_lock.h
|
||||
hle/kernel/k_memory_block.h
|
||||
hle/kernel/k_memory_block_manager.cpp
|
||||
hle/kernel/k_memory_block_manager.h
|
||||
hle/kernel/k_memory_layout.h
|
||||
hle/kernel/k_memory_manager.cpp
|
||||
hle/kernel/k_memory_manager.h
|
||||
hle/kernel/k_page_bitmap.h
|
||||
hle/kernel/k_page_heap.cpp
|
||||
hle/kernel/k_page_heap.h
|
||||
hle/kernel/k_page_linked_list.h
|
||||
hle/kernel/k_page_table.cpp
|
||||
hle/kernel/k_page_table.h
|
||||
hle/kernel/k_priority_queue.h
|
||||
hle/kernel/k_readable_event.cpp
|
||||
hle/kernel/k_readable_event.h
|
||||
|
@ -175,8 +189,15 @@ add_library(core STATIC
|
|||
hle/kernel/k_scoped_lock.h
|
||||
hle/kernel/k_scoped_resource_reservation.h
|
||||
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||
hle/kernel/k_shared_memory.cpp
|
||||
hle/kernel/k_shared_memory.h
|
||||
hle/kernel/k_slab_heap.h
|
||||
hle/kernel/k_spin_lock.cpp
|
||||
hle/kernel/k_spin_lock.h
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/k_system_control.cpp
|
||||
hle/kernel/k_system_control.h
|
||||
hle/kernel/k_thread.cpp
|
||||
hle/kernel/k_thread.h
|
||||
hle/kernel/k_thread_queue.h
|
||||
|
@ -184,23 +205,7 @@ add_library(core STATIC
|
|||
hle/kernel/k_writable_event.h
|
||||
hle/kernel/kernel.cpp
|
||||
hle/kernel/kernel.h
|
||||
hle/kernel/memory/address_space_info.cpp
|
||||
hle/kernel/memory/address_space_info.h
|
||||
hle/kernel/memory/memory_block.h
|
||||
hle/kernel/memory/memory_block_manager.cpp
|
||||
hle/kernel/memory/memory_block_manager.h
|
||||
hle/kernel/memory/memory_layout.h
|
||||
hle/kernel/memory/memory_manager.cpp
|
||||
hle/kernel/memory/memory_manager.h
|
||||
hle/kernel/memory/memory_types.h
|
||||
hle/kernel/memory/page_linked_list.h
|
||||
hle/kernel/memory/page_heap.cpp
|
||||
hle/kernel/memory/page_heap.h
|
||||
hle/kernel/memory/page_table.cpp
|
||||
hle/kernel/memory/page_table.h
|
||||
hle/kernel/memory/slab_heap.h
|
||||
hle/kernel/memory/system_control.cpp
|
||||
hle/kernel/memory/system_control.h
|
||||
hle/kernel/memory_types.h
|
||||
hle/kernel/object.cpp
|
||||
hle/kernel/object.h
|
||||
hle/kernel/physical_core.cpp
|
||||
|
@ -218,8 +223,6 @@ add_library(core STATIC
|
|||
hle/kernel/service_thread.h
|
||||
hle/kernel/session.cpp
|
||||
hle/kernel/session.h
|
||||
hle/kernel/shared_memory.cpp
|
||||
hle/kernel/shared_memory.h
|
||||
hle/kernel/svc.cpp
|
||||
hle/kernel/svc.h
|
||||
hle/kernel/svc_common.h
|
||||
|
|
|
@ -2,15 +2,12 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/memory/address_space_info.h"
|
||||
#include "core/hle/kernel/k_address_space_info.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -28,20 +25,20 @@ enum : u64 {
|
|||
};
|
||||
|
||||
// clang-format off
|
||||
constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Is32Bit, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = AddressSpaceInfo::Type::Small64Bit, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Large64Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Is32Bit },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = AddressSpaceInfo::Type::Stack, },
|
||||
constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
|
||||
{ .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, },
|
||||
{ .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, },
|
||||
{ .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, },
|
||||
}};
|
||||
// clang-format on
|
||||
|
||||
|
@ -49,7 +46,8 @@ constexpr bool IsAllowedIndexForAddress(std::size_t index) {
|
|||
return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid;
|
||||
}
|
||||
|
||||
using IndexArray = std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)>;
|
||||
using IndexArray =
|
||||
std::array<std::size_t, static_cast<std::size_t>(KAddressSpaceInfo::Type::Count)>;
|
||||
|
||||
constexpr IndexArray AddressSpaceIndices32Bit{
|
||||
0, 1, 0, 2, 0, 3,
|
||||
|
@ -63,23 +61,23 @@ constexpr IndexArray AddressSpaceIndices39Bit{
|
|||
9, 8, 8, 10, 12, 11,
|
||||
};
|
||||
|
||||
constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) {
|
||||
return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
|
||||
type != KAddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit &&
|
||||
type != AddressSpaceInfo::Type::Stack;
|
||||
constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) {
|
||||
return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
|
||||
type != KAddressSpaceInfo::Type::Stack;
|
||||
}
|
||||
|
||||
constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) {
|
||||
return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit;
|
||||
constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) {
|
||||
return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::MapLarge;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
|
@ -99,7 +97,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
const std::size_t index{static_cast<std::size_t>(type)};
|
||||
switch (width) {
|
||||
case 32:
|
||||
|
@ -116,4 +114,4 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -2,20 +2,17 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
struct AddressSpaceInfo final {
|
||||
struct KAddressSpaceInfo final {
|
||||
enum class Type : u32 {
|
||||
Is32Bit = 0,
|
||||
Small64Bit = 1,
|
||||
Large64Bit = 2,
|
||||
MapSmall = 0,
|
||||
MapLarge = 1,
|
||||
Map39Bit = 2,
|
||||
Heap = 3,
|
||||
Stack = 4,
|
||||
Alias = 5,
|
||||
|
@ -31,4 +28,4 @@ struct AddressSpaceInfo final {
|
|||
const Type type{};
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -2,20 +2,17 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
enum class MemoryState : u32 {
|
||||
enum class KMemoryState : u32 {
|
||||
None = 0,
|
||||
Mask = 0xFF,
|
||||
All = ~None,
|
||||
|
@ -97,31 +94,31 @@ enum class MemoryState : u32 {
|
|||
FlagReferenceCounted | FlagCanDebug,
|
||||
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryState);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
|
||||
|
||||
static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015);
|
||||
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
|
||||
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001);
|
||||
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
|
||||
static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04);
|
||||
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05);
|
||||
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08);
|
||||
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(KMemoryState::Transferred) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedTransferred) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811);
|
||||
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812);
|
||||
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
|
||||
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214);
|
||||
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
|
||||
|
||||
enum class MemoryPermission : u8 {
|
||||
enum class KMemoryPermission : u8 {
|
||||
None = 0,
|
||||
Mask = static_cast<u8>(~None),
|
||||
|
||||
|
@ -135,9 +132,9 @@ enum class MemoryPermission : u8 {
|
|||
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
|
||||
Svc::MemoryPermission::Execute),
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
|
||||
|
||||
enum class MemoryAttribute : u8 {
|
||||
enum class KMemoryAttribute : u8 {
|
||||
None = 0x00,
|
||||
Mask = 0x7F,
|
||||
All = Mask,
|
||||
|
@ -152,18 +149,18 @@ enum class MemoryAttribute : u8 {
|
|||
LockedAndIpcLocked = Locked | IpcLocked,
|
||||
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute);
|
||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||
|
||||
static_assert((static_cast<u8>(MemoryAttribute::Mask) &
|
||||
static_cast<u8>(MemoryAttribute::DontCareMask)) == 0);
|
||||
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
|
||||
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
|
||||
|
||||
struct MemoryInfo {
|
||||
struct KMemoryInfo {
|
||||
VAddr addr{};
|
||||
std::size_t size{};
|
||||
MemoryState state{};
|
||||
MemoryPermission perm{};
|
||||
MemoryAttribute attribute{};
|
||||
MemoryPermission original_perm{};
|
||||
KMemoryState state{};
|
||||
KMemoryPermission perm{};
|
||||
KMemoryAttribute attribute{};
|
||||
KMemoryPermission original_perm{};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
|
||||
|
@ -171,9 +168,9 @@ struct MemoryInfo {
|
|||
return {
|
||||
addr,
|
||||
size,
|
||||
static_cast<Svc::MemoryState>(state & MemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask),
|
||||
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
|
||||
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
|
||||
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
|
||||
ipc_lock_count,
|
||||
device_use_count,
|
||||
};
|
||||
|
@ -196,21 +193,21 @@ struct MemoryInfo {
|
|||
}
|
||||
};
|
||||
|
||||
class MemoryBlock final {
|
||||
friend class MemoryBlockManager;
|
||||
class KMemoryBlock final {
|
||||
friend class KMemoryBlockManager;
|
||||
|
||||
private:
|
||||
VAddr addr{};
|
||||
std::size_t num_pages{};
|
||||
MemoryState state{MemoryState::None};
|
||||
KMemoryState state{KMemoryState::None};
|
||||
u16 ipc_lock_count{};
|
||||
u16 device_use_count{};
|
||||
MemoryPermission perm{MemoryPermission::None};
|
||||
MemoryPermission original_perm{MemoryPermission::None};
|
||||
MemoryAttribute attribute{MemoryAttribute::None};
|
||||
KMemoryPermission perm{KMemoryPermission::None};
|
||||
KMemoryPermission original_perm{KMemoryPermission::None};
|
||||
KMemoryAttribute attribute{KMemoryAttribute::None};
|
||||
|
||||
public:
|
||||
static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) {
|
||||
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
return -1;
|
||||
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||
|
@ -221,9 +218,9 @@ public:
|
|||
}
|
||||
|
||||
public:
|
||||
constexpr MemoryBlock() = default;
|
||||
constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
|
||||
MemoryPermission perm_, MemoryAttribute attribute_)
|
||||
constexpr KMemoryBlock() = default;
|
||||
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
|
||||
KMemoryPermission perm_, KMemoryAttribute attribute_)
|
||||
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
|
@ -246,40 +243,40 @@ public:
|
|||
return GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
constexpr MemoryInfo GetMemoryInfo() const {
|
||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
GetAddress(), GetSize(), state, perm,
|
||||
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||
};
|
||||
}
|
||||
|
||||
void ShareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
|
||||
void ShareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||
device_use_count == 0);
|
||||
attribute |= MemoryAttribute::DeviceShared;
|
||||
attribute |= KMemoryAttribute::DeviceShared;
|
||||
const u16 new_use_count{++device_use_count};
|
||||
ASSERT(new_use_count > 0);
|
||||
}
|
||||
|
||||
void UnshareToDevice(MemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
|
||||
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
|
||||
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||
const u16 prev_use_count{device_use_count--};
|
||||
ASSERT(prev_use_count > 0);
|
||||
if (prev_use_count == 1) {
|
||||
attribute &= ~MemoryAttribute::DeviceShared;
|
||||
attribute &= ~KMemoryAttribute::DeviceShared;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
|
||||
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
|
||||
MemoryAttribute::IpcLocked |
|
||||
MemoryAttribute::DeviceShared};
|
||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared};
|
||||
return state == s && perm == p &&
|
||||
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
constexpr bool HasSameProperties(const MemoryBlock& rhs) const {
|
||||
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
|
||||
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||
device_use_count == rhs.device_use_count;
|
||||
|
@ -296,25 +293,25 @@ private:
|
|||
num_pages += count;
|
||||
}
|
||||
|
||||
constexpr void Update(MemoryState new_state, MemoryPermission new_perm,
|
||||
MemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == MemoryPermission::None);
|
||||
ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None);
|
||||
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
|
||||
KMemoryAttribute new_attribute) {
|
||||
ASSERT(original_perm == KMemoryPermission::None);
|
||||
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||
|
||||
state = new_state;
|
||||
perm = new_perm;
|
||||
|
||||
attribute = static_cast<MemoryAttribute>(
|
||||
attribute = static_cast<KMemoryAttribute>(
|
||||
new_attribute |
|
||||
(attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
|
||||
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||
}
|
||||
|
||||
constexpr MemoryBlock Split(VAddr split_addr) {
|
||||
constexpr KMemoryBlock Split(VAddr split_addr) {
|
||||
ASSERT(GetAddress() < split_addr);
|
||||
ASSERT(Contains(split_addr));
|
||||
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||
|
||||
MemoryBlock block;
|
||||
KMemoryBlock block;
|
||||
block.addr = addr;
|
||||
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||
block.state = state;
|
||||
|
@ -330,6 +327,6 @@ private:
|
|||
return block;
|
||||
}
|
||||
};
|
||||
static_assert(std::is_trivially_destructible<MemoryBlock>::value);
|
||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -2,19 +2,19 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr, VAddr end_addr)
|
||||
: start_addr{start_addr}, end_addr{end_addr} {
|
||||
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None,
|
||||
MemoryAttribute::None);
|
||||
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
|
||||
KMemoryPermission::None, KMemoryAttribute::None);
|
||||
}
|
||||
|
||||
MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
||||
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
|
||||
auto node{memory_block_tree.begin()};
|
||||
while (node != end()) {
|
||||
const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||
|
@ -26,9 +26,9 @@ MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) {
|
|||
return end();
|
||||
}
|
||||
|
||||
VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align, std::size_t offset,
|
||||
std::size_t guard_pages) {
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||
std::size_t num_pages, std::size_t align,
|
||||
std::size_t offset, std::size_t guard_pages) {
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_nu
|
|||
break;
|
||||
}
|
||||
|
||||
if (info.state != MemoryState::Free) {
|
||||
if (info.state != KMemoryState::Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -63,17 +63,17 @@ VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_nu
|
|||
return {};
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute,
|
||||
MemoryState state, MemoryPermission perm,
|
||||
MemoryAttribute attribute) {
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
prev_attribute |= MemoryAttribute::IpcAndDeviceMapped;
|
||||
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
@ -106,13 +106,13 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState p
|
|||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute) {
|
||||
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
@ -141,13 +141,13 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState s
|
|||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
MemoryPermission perm) {
|
||||
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
KMemoryPermission perm) {
|
||||
const VAddr end_addr{addr + num_pages * PageSize};
|
||||
iterator node{memory_block_tree.begin()};
|
||||
|
||||
while (node != memory_block_tree.end()) {
|
||||
MemoryBlock* block{&(*node)};
|
||||
KMemoryBlock* block{&(*node)};
|
||||
iterator next_node{std::next(node)};
|
||||
const VAddr cur_addr{block->GetAddress()};
|
||||
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||
|
@ -176,9 +176,9 @@ void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&
|
|||
}
|
||||
}
|
||||
|
||||
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||
const_iterator it{FindIterator(start)};
|
||||
MemoryInfo info{};
|
||||
KMemoryInfo info{};
|
||||
do {
|
||||
info = it->GetMemoryInfo();
|
||||
func(info);
|
||||
|
@ -186,8 +186,8 @@ void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& f
|
|||
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||
}
|
||||
|
||||
void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
MemoryBlock* block{&(*it)};
|
||||
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||
KMemoryBlock* block{&(*it)};
|
||||
|
||||
auto EraseIt = [&](const iterator it_to_erase) {
|
||||
if (next_it == it_to_erase) {
|
||||
|
@ -197,7 +197,7 @@ void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
|||
};
|
||||
|
||||
if (it != memory_block_tree.begin()) {
|
||||
MemoryBlock* prev{&(*std::prev(it))};
|
||||
KMemoryBlock* prev{&(*std::prev(it))};
|
||||
|
||||
if (block->HasSameProperties(*prev)) {
|
||||
const iterator prev_it{std::prev(it)};
|
||||
|
@ -211,7 +211,7 @@ void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
|||
}
|
||||
|
||||
if (it != cend()) {
|
||||
const MemoryBlock* const next{&(*std::next(it))};
|
||||
const KMemoryBlock* const next{&(*std::next(it))};
|
||||
|
||||
if (block->HasSameProperties(*next)) {
|
||||
block->Add(next->GetNumPages());
|
||||
|
@ -220,4 +220,4 @@ void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
|||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -8,18 +8,18 @@
|
|||
#include <list>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class MemoryBlockManager final {
|
||||
class KMemoryBlockManager final {
|
||||
public:
|
||||
using MemoryBlockTree = std::list<MemoryBlock>;
|
||||
using MemoryBlockTree = std::list<KMemoryBlock>;
|
||||
using iterator = MemoryBlockTree::iterator;
|
||||
using const_iterator = MemoryBlockTree::const_iterator;
|
||||
|
||||
public:
|
||||
MemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||
KMemoryBlockManager(VAddr start_addr, VAddr end_addr);
|
||||
|
||||
iterator end() {
|
||||
return memory_block_tree.end();
|
||||
|
@ -36,21 +36,22 @@ public:
|
|||
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state,
|
||||
MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state,
|
||||
MemoryPermission perm, MemoryAttribute attribute);
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attribute);
|
||||
|
||||
void Update(VAddr addr, std::size_t num_pages, MemoryState state,
|
||||
MemoryPermission perm = MemoryPermission::None,
|
||||
MemoryAttribute attribute = MemoryAttribute::None);
|
||||
void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm = KMemoryPermission::None,
|
||||
KMemoryAttribute attribute = KMemoryAttribute::None);
|
||||
|
||||
using LockFunc = std::function<void(iterator, MemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
|
||||
using LockFunc = std::function<void(iterator, KMemoryPermission)>;
|
||||
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||
KMemoryPermission perm);
|
||||
|
||||
using IterateFunc = std::function<void(const MemoryInfo&)>;
|
||||
using IterateFunc = std::function<void(const KMemoryInfo&)>;
|
||||
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||
|
||||
MemoryBlock& FindBlock(VAddr addr) {
|
||||
KMemoryBlock& FindBlock(VAddr addr) {
|
||||
return *FindIterator(addr);
|
||||
}
|
||||
|
||||
|
@ -63,4 +64,4 @@ private:
|
|||
MemoryBlockTree memory_block_tree;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -7,7 +7,7 @@
|
|||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
|
||||
|
@ -27,8 +27,8 @@ constexpr bool IsKernelAddress(VAddr address) {
|
|||
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
|
||||
}
|
||||
|
||||
class MemoryRegion final {
|
||||
friend class MemoryLayout;
|
||||
class KMemoryRegion final {
|
||||
friend class KMemoryLayout;
|
||||
|
||||
public:
|
||||
constexpr PAddr StartAddress() const {
|
||||
|
@ -40,29 +40,29 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryRegion() = default;
|
||||
constexpr MemoryRegion(PAddr start_address, PAddr end_address)
|
||||
constexpr KMemoryRegion() = default;
|
||||
constexpr KMemoryRegion(PAddr start_address, PAddr end_address)
|
||||
: start_address{start_address}, end_address{end_address} {}
|
||||
|
||||
const PAddr start_address{};
|
||||
const PAddr end_address{};
|
||||
};
|
||||
|
||||
class MemoryLayout final {
|
||||
class KMemoryLayout final {
|
||||
public:
|
||||
constexpr const MemoryRegion& Application() const {
|
||||
constexpr const KMemoryRegion& Application() const {
|
||||
return application;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& Applet() const {
|
||||
constexpr const KMemoryRegion& Applet() const {
|
||||
return applet;
|
||||
}
|
||||
|
||||
constexpr const MemoryRegion& System() const {
|
||||
constexpr const KMemoryRegion& System() const {
|
||||
return system;
|
||||
}
|
||||
|
||||
static constexpr MemoryLayout GetDefaultLayout() {
|
||||
static constexpr KMemoryLayout GetDefaultLayout() {
|
||||
constexpr std::size_t application_size{0xcd500000};
|
||||
constexpr std::size_t applet_size{0x1fb00000};
|
||||
constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size};
|
||||
|
@ -76,15 +76,15 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size,
|
||||
PAddr applet_start_address, std::size_t applet_size,
|
||||
PAddr system_start_address, std::size_t system_size)
|
||||
: application{application_start_address, application_size},
|
||||
applet{applet_start_address, applet_size}, system{system_start_address, system_size} {}
|
||||
|
||||
const MemoryRegion application;
|
||||
const MemoryRegion applet;
|
||||
const MemoryRegion system;
|
||||
const KMemoryRegion application;
|
||||
const KMemoryRegion applet;
|
||||
const KMemoryRegion system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -8,20 +8,20 @@
|
|||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||
std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
|
||||
const auto size{end_address - start_address};
|
||||
|
||||
// Calculate metadata sizes
|
||||
const auto ref_count_size{(size / PageSize) * sizeof(u16)};
|
||||
const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
|
||||
const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
|
||||
const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)};
|
||||
const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)};
|
||||
const auto total_metadata_size{manager_size + page_heap_size};
|
||||
ASSERT(manager_size <= total_metadata_size);
|
||||
ASSERT(Common::IsAligned(total_metadata_size, PageSize));
|
||||
|
@ -41,29 +41,30 @@ std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u6
|
|||
return total_metadata_size;
|
||||
}
|
||||
|
||||
void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||
void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
|
||||
ASSERT(pool < Pool::Count);
|
||||
managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
|
||||
}
|
||||
|
||||
VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir) {
|
||||
VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages,
|
||||
u32 option) {
|
||||
// Early return if we're allocating no pages
|
||||
if (num_pages == 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
// Lock the pool that we're allocating from
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
const auto pool_index{static_cast<std::size_t>(pool)};
|
||||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||
const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
|
||||
|
||||
// Loop, trying to iterate from each block
|
||||
// TODO (bunnei): Support multiple managers
|
||||
Impl& chosen_manager{managers[pool_index]};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)};
|
||||
|
||||
// If we failed to allocate, quit now
|
||||
if (!allocated_block) {
|
||||
|
@ -71,7 +72,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
|||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
|
@ -79,8 +80,8 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
|||
return allocated_block;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ASSERT(page_list.GetNumPages() == 0);
|
||||
|
||||
// Early return if we're allocating no pages
|
||||
|
@ -93,7 +94,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
|||
std::lock_guard lock{pool_locks[pool_index]};
|
||||
|
||||
// Choose a heap based on our page size request
|
||||
const s32 heap_index{PageHeap::GetBlockIndex(num_pages)};
|
||||
const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)};
|
||||
if (heap_index < 0) {
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
@ -112,11 +113,11 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
|||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(index)};
|
||||
VAddr allocated_block{chosen_manager.AllocateBlock(index, false)};
|
||||
if (!allocated_block) {
|
||||
break;
|
||||
}
|
||||
|
@ -148,8 +149,8 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
|||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir) {
|
||||
// Early return if we're freeing no pages
|
||||
if (!num_pages) {
|
||||
return RESULT_SUCCESS;
|
||||
|
@ -172,4 +173,4 @@ ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages,
|
|||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -6,16 +6,18 @@
|
|||
|
||||
#include <array>
|
||||
#include <mutex>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/hle/kernel/k_page_heap.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class PageLinkedList;
|
||||
class KPageLinkedList;
|
||||
|
||||
class MemoryManager final : NonCopyable {
|
||||
class KMemoryManager final : NonCopyable {
|
||||
public:
|
||||
enum class Pool : u32 {
|
||||
Application = 0,
|
||||
|
@ -37,29 +39,50 @@ public:
|
|||
Mask = (0xF << Shift),
|
||||
};
|
||||
|
||||
MemoryManager() = default;
|
||||
KMemoryManager() = default;
|
||||
|
||||
constexpr std::size_t GetSize(Pool pool) const {
|
||||
return managers[static_cast<std::size_t>(pool)].GetSize();
|
||||
}
|
||||
|
||||
void InitializeManager(Pool pool, u64 start_address, u64 end_address);
|
||||
VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
|
||||
VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||
ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
|
||||
Direction dir = Direction::FromFront);
|
||||
|
||||
static constexpr std::size_t MaxManagerCount = 10;
|
||||
|
||||
public:
|
||||
static constexpr u32 EncodeOption(Pool pool, Direction dir) {
|
||||
return (static_cast<u32>(pool) << static_cast<u32>(Pool::Shift)) |
|
||||
(static_cast<u32>(dir) << static_cast<u32>(Direction::Shift));
|
||||
}
|
||||
|
||||
static constexpr Pool GetPool(u32 option) {
|
||||
return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
|
||||
static_cast<u32>(Pool::Shift));
|
||||
}
|
||||
|
||||
static constexpr Direction GetDirection(u32 option) {
|
||||
return static_cast<Direction>(
|
||||
(static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
|
||||
static_cast<u32>(Direction::Shift));
|
||||
}
|
||||
|
||||
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
||||
return std::make_tuple(GetPool(option), GetDirection(option));
|
||||
}
|
||||
|
||||
private:
|
||||
class Impl final : NonCopyable {
|
||||
private:
|
||||
using RefCount = u16;
|
||||
|
||||
private:
|
||||
PageHeap heap;
|
||||
KPageHeap heap;
|
||||
Pool pool{};
|
||||
|
||||
public:
|
||||
|
@ -67,8 +90,8 @@ private:
|
|||
|
||||
std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
|
||||
|
||||
VAddr AllocateBlock(s32 index) {
|
||||
return heap.AllocateBlock(index);
|
||||
VAddr AllocateBlock(s32 index, bool random) {
|
||||
return heap.AllocateBlock(index, random);
|
||||
}
|
||||
|
||||
void Free(VAddr addr, std::size_t num_pages) {
|
||||
|
@ -93,4 +116,4 @@ private:
|
|||
std::array<Impl, MaxManagerCount> managers;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -0,0 +1,279 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/tiny_mt.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageBitmap {
|
||||
private:
|
||||
class RandomBitGenerator {
|
||||
private:
|
||||
Common::TinyMT rng{};
|
||||
u32 entropy{};
|
||||
u32 bits_available{};
|
||||
|
||||
private:
|
||||
void RefreshEntropy() {
|
||||
entropy = rng.GenerateRandomU32();
|
||||
bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>());
|
||||
}
|
||||
|
||||
bool GenerateRandomBit() {
|
||||
if (bits_available == 0) {
|
||||
this->RefreshEntropy();
|
||||
}
|
||||
|
||||
const bool rnd_bit = (entropy & 1) != 0;
|
||||
entropy >>= 1;
|
||||
--bits_available;
|
||||
return rnd_bit;
|
||||
}
|
||||
|
||||
public:
|
||||
RandomBitGenerator() {
|
||||
rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||
}
|
||||
|
||||
std::size_t SelectRandomBit(u64 bitmap) {
|
||||
u64 selected = 0;
|
||||
|
||||
u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2;
|
||||
u64 cur_mask = (1ULL << cur_num_bits) - 1;
|
||||
|
||||
while (cur_num_bits) {
|
||||
const u64 low = (bitmap >> 0) & cur_mask;
|
||||
const u64 high = (bitmap >> cur_num_bits) & cur_mask;
|
||||
|
||||
bool choose_low;
|
||||
if (high == 0) {
|
||||
// If only low val is set, choose low.
|
||||
choose_low = true;
|
||||
} else if (low == 0) {
|
||||
// If only high val is set, choose high.
|
||||
choose_low = false;
|
||||
} else {
|
||||
// If both are set, choose random.
|
||||
choose_low = this->GenerateRandomBit();
|
||||
}
|
||||
|
||||
// If we chose low, proceed with low.
|
||||
if (choose_low) {
|
||||
bitmap = low;
|
||||
selected += 0;
|
||||
} else {
|
||||
bitmap = high;
|
||||
selected += cur_num_bits;
|
||||
}
|
||||
|
||||
// Proceed.
|
||||
cur_num_bits /= 2;
|
||||
cur_mask >>= cur_num_bits;
|
||||
}
|
||||
|
||||
return selected;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth = 4;
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
RandomBitGenerator rng{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
|
||||
public:
|
||||
KPageBitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
}
|
||||
|
||||
u64* Initialize(u64* storage, std::size_t size) {
|
||||
// Initially, everything is un-set.
|
||||
num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth.
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers.
|
||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>();
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
s64 FindFreeBlock(bool random) {
|
||||
uintptr_t offset = 0;
|
||||
s32 depth = 0;
|
||||
|
||||
if (random) {
|
||||
do {
|
||||
const u64 v = bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
// If depth is bigger than zero, then a previous level indicated a block was
|
||||
// free.
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
} else {
|
||||
do {
|
||||
const u64 v = bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
// If depth is bigger than zero, then a previous level indicated a block was
|
||||
// free.
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * Common::BitSize<u64>() + std::countr_zero(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
}
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
void SetBit(std::size_t offset) {
|
||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
}
|
||||
|
||||
void ClearBit(std::size_t offset) {
|
||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
}
|
||||
|
||||
bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
s32 depth = this->GetHighestDepthIndex();
|
||||
u64* bits = bit_storages[depth];
|
||||
std::size_t bit_ind = offset / Common::BitSize<u64>();
|
||||
if (count < Common::BitSize<u64>()) {
|
||||
const std::size_t shift = offset % Common::BitSize<u64>();
|
||||
ASSERT(shift + count <= Common::BitSize<u64>());
|
||||
// Check that all the bits are set.
|
||||
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||
u64 v = bits[bit_ind];
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear the bits.
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
this->ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
ASSERT(offset % Common::BitSize<u64>() == 0);
|
||||
ASSERT(count % Common::BitSize<u64>() == 0);
|
||||
// Check that all the bits are set.
|
||||
std::size_t remaining = count;
|
||||
std::size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= Common::BitSize<u64>();
|
||||
} while (remaining > 0);
|
||||
|
||||
// Clear the bits.
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
this->ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= Common::BitSize<u64>();
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
void SetBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
std::size_t ind = offset / Common::BitSize<u64>();
|
||||
std::size_t which = offset % Common::BitSize<u64>();
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64* bit = std::addressof(bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
std::size_t ind = offset / Common::BitSize<u64>();
|
||||
std::size_t which = offset % Common::BitSize<u64>();
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64* bit = std::addressof(bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= Common::BitSize<u64>();
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
|
||||
region_size =
|
||||
Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>();
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
|
@ -2,16 +2,13 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/memory/page_heap.h"
|
||||
#include "core/hle/kernel/k_page_heap.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||
void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
|
||||
// Check our assumptions
|
||||
ASSERT(Common::IsAligned((address), PageSize));
|
||||
ASSERT(Common::IsAligned(size, PageSize));
|
||||
|
@ -32,11 +29,11 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
|
|||
}
|
||||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const VAddr addr{blocks[i].PopBlock(random)}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
|
@ -48,13 +45,13 @@ VAddr PageHeap::AllocateBlock(s32 index) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
void KPageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[index++].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
|
||||
// Freeing no pages is a no-op
|
||||
if (num_pages == 0) {
|
||||
return;
|
||||
|
@ -104,16 +101,16 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
}
|
||||
}
|
||||
|
||||
std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_size = 0;
|
||||
for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
|
||||
const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
|
||||
const std::size_t next_block_shift{
|
||||
(i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
|
||||
overhead_size += PageHeap::Block::CalculateMetadataOverheadSize(
|
||||
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
|
||||
region_size, cur_block_shift, next_block_shift);
|
||||
}
|
||||
return Common::AlignUp(overhead_size, PageSize);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -0,0 +1,193 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_page_bitmap.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KPageHeap final : NonCopyable {
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||
const auto target_pages{std::max(num_pages, align_pages)};
|
||||
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||
};
|
||||
|
||||
class Block final : NonCopyable {
|
||||
private:
|
||||
KPageBitmap bitmap;
|
||||
VAddr heap_address{};
|
||||
uintptr_t end_offset{};
|
||||
std::size_t block_shift{};
|
||||
std::size_t next_block_shift{};
|
||||
|
||||
public:
|
||||
Block() = default;
|
||||
|
||||
constexpr std::size_t GetShift() const {
|
||||
return block_shift;
|
||||
}
|
||||
constexpr std::size_t GetNextShift() const {
|
||||
return next_block_shift;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return static_cast<std::size_t>(1) << GetShift();
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr std::size_t GetNumFreeBlocks() const {
|
||||
return bitmap.GetNumBits();
|
||||
}
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
return GetNumFreeBlocks() * GetNumPages();
|
||||
}
|
||||
|
||||
u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||
u64* bit_storage) {
|
||||
// Set shifts
|
||||
block_shift = bs;
|
||||
next_block_shift = nbs;
|
||||
|
||||
// Align up the address
|
||||
VAddr end{addr + size};
|
||||
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||
: (1ULL << block_shift)};
|
||||
addr = Common::AlignDown((addr), align);
|
||||
end = Common::AlignUp((end), align);
|
||||
|
||||
heap_address = addr;
|
||||
end_offset = (end - addr) / (1ULL << block_shift);
|
||||
return bitmap.Initialize(bit_storage, end_offset);
|
||||
}
|
||||
|
||||
VAddr PushBlock(VAddr address) {
|
||||
// Set the bit for the free block
|
||||
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||
bitmap.SetBit(offset);
|
||||
|
||||
// If we have a next shift, try to clear the blocks below and return the address
|
||||
if (GetNextShift()) {
|
||||
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||
offset = Common::AlignDown(offset, diff);
|
||||
if (bitmap.ClearRange(offset, diff)) {
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't coalesce, or we're already as big as possible
|
||||
return 0;
|
||||
}
|
||||
|
||||
VAddr PopBlock(bool random) {
|
||||
// Find a free block
|
||||
const s64 soffset{bitmap.FindFreeBlock(random)};
|
||||
if (soffset < 0) {
|
||||
return 0;
|
||||
}
|
||||
const auto offset{static_cast<std::size_t>(soffset)};
|
||||
|
||||
// Update our tracking and return it
|
||||
bitmap.ClearBit(offset);
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
|
||||
std::size_t cur_block_shift,
|
||||
std::size_t next_block_shift) {
|
||||
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||
const auto next_block_size{(1ULL << next_block_shift)};
|
||||
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||
return KPageBitmap::CalculateManagementOverheadSize(
|
||||
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
KPageHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap_size;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||
return (block - GetAddress()) / PageSize;
|
||||
}
|
||||
|
||||
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||
VAddr AllocateBlock(s32 index, bool random);
|
||||
void Free(VAddr addr, std::size_t num_pages);
|
||||
|
||||
void UpdateUsedSize() {
|
||||
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
|
||||
|
||||
private:
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
std::size_t num_free{};
|
||||
|
||||
for (const auto& block : blocks) {
|
||||
num_free += block.GetNumFreePages();
|
||||
}
|
||||
|
||||
return num_free;
|
||||
}
|
||||
|
||||
void FreeBlock(VAddr block, s32 index);
|
||||
|
||||
VAddr heap_address{};
|
||||
std::size_t heap_size{};
|
||||
std::size_t used_size{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||
std::vector<u64> metadata;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
|
@ -8,12 +8,12 @@
|
|||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class PageLinkedList final {
|
||||
class KPageLinkedList final {
|
||||
public:
|
||||
class Node final {
|
||||
public:
|
||||
|
@ -33,8 +33,8 @@ public:
|
|||
};
|
||||
|
||||
public:
|
||||
PageLinkedList() = default;
|
||||
PageLinkedList(u64 address, u64 num_pages) {
|
||||
KPageLinkedList() = default;
|
||||
KPageLinkedList(u64 address, u64 num_pages) {
|
||||
ASSERT(AddBlock(address, num_pages).IsSuccess());
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ public:
|
|||
return num_pages;
|
||||
}
|
||||
|
||||
bool IsEqual(PageLinkedList& other) const {
|
||||
bool IsEqual(KPageLinkedList& other) const {
|
||||
auto this_node = nodes.begin();
|
||||
auto other_node = other.nodes.begin();
|
||||
while (this_node != nodes.end() && other_node != other.nodes.end()) {
|
||||
|
@ -89,4 +89,4 @@ private:
|
|||
std::list<Node> nodes;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
File diff suppressed because it is too large
Load Diff
|
@ -10,27 +10,27 @@
|
|||
#include "common/common_types.h"
|
||||
#include "common/page_table.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
class MemoryBlockManager;
|
||||
class KMemoryBlockManager;
|
||||
|
||||
class PageTable final : NonCopyable {
|
||||
class KPageTable final : NonCopyable {
|
||||
public:
|
||||
explicit PageTable(Core::System& system);
|
||||
explicit KPageTable(Core::System& system);
|
||||
|
||||
ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||
VAddr code_addr, std::size_t code_size,
|
||||
Memory::MemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
KMemoryManager::Pool pool);
|
||||
ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||
|
@ -38,20 +38,20 @@ public:
|
|||
ResultCode UnmapMemory(VAddr addr, std::size_t size);
|
||||
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||
ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
|
||||
MemoryPermission perm);
|
||||
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
MemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
|
||||
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
|
||||
KMemoryPermission perm);
|
||||
ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
KMemoryInfo QueryInfo(VAddr addr);
|
||||
ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||
ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
|
||||
MemoryAttribute value);
|
||||
ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask,
|
||||
KMemoryAttribute value);
|
||||
ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
|
||||
ResultVal<VAddr> SetHeapSize(std::size_t size);
|
||||
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||
bool is_map_only, VAddr region_start,
|
||||
std::size_t region_num_pages, MemoryState state,
|
||||
MemoryPermission perm, PAddr map_addr = 0);
|
||||
std::size_t region_num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, PAddr map_addr = 0);
|
||||
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||
|
||||
|
@ -72,47 +72,49 @@ private:
|
|||
ChangePermissionsAndRefresh,
|
||||
};
|
||||
|
||||
static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
|
||||
MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
|
||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
|
||||
KMemoryAttribute::IpcLocked |
|
||||
KMemoryAttribute::DeviceShared;
|
||||
|
||||
ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
|
||||
void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
|
||||
KMemoryPermission perm);
|
||||
void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
|
||||
bool IsRegionMapped(VAddr address, u64 size);
|
||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
|
||||
MemoryInfo QueryInfoImpl(VAddr addr);
|
||||
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
|
||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||
std::size_t align);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
|
||||
OperationType operation);
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
|
||||
ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||
OperationType operation, PAddr map_addr = 0);
|
||||
constexpr VAddr GetRegionAddress(MemoryState state) const;
|
||||
constexpr std::size_t GetRegionSize(MemoryState state) const;
|
||||
constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
|
||||
constexpr VAddr GetRegionAddress(KMemoryState state) const;
|
||||
constexpr std::size_t GetRegionSize(KMemoryState state) const;
|
||||
constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
|
||||
|
||||
constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
|
||||
MemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||
MemoryState state_mask, MemoryState state,
|
||||
MemoryPermission perm_mask, MemoryPermission perm,
|
||||
MemoryAttribute attr_mask, MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
|
||||
MemoryState state, MemoryPermission perm_mask,
|
||||
MemoryPermission perm, MemoryAttribute attr_mask,
|
||||
MemoryAttribute attr,
|
||||
MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||
constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const;
|
||||
ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
KMemoryAttribute* out_attr, VAddr addr, std::size_t size,
|
||||
KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
|
||||
ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr, ignore_attr);
|
||||
}
|
||||
|
||||
std::recursive_mutex page_table_lock;
|
||||
std::unique_ptr<MemoryBlockManager> block_manager;
|
||||
std::unique_ptr<KMemoryBlockManager> block_manager;
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddressSpaceStart() const {
|
||||
|
@ -212,7 +214,7 @@ public:
|
|||
return !IsOutsideASLRRegion(address, size);
|
||||
}
|
||||
constexpr PAddr GetPhysicalAddr(VAddr addr) {
|
||||
return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr;
|
||||
return page_table_impl.backing_addr[addr >> PageBits] + addr;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -267,11 +269,11 @@ private:
|
|||
bool is_kernel{};
|
||||
bool is_aslr_enabled{};
|
||||
|
||||
MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
|
||||
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
||||
|
||||
Common::PageTable page_table_impl;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -4,33 +4,32 @@
|
|||
|
||||
#include "common/assert.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
||||
KSharedMemory::KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory)
|
||||
: Object{kernel}, device_memory{device_memory} {}
|
||||
|
||||
SharedMemory::~SharedMemory() {
|
||||
KSharedMemory::~KSharedMemory() {
|
||||
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
|
||||
}
|
||||
|
||||
std::shared_ptr<SharedMemory> SharedMemory::Create(
|
||||
std::shared_ptr<KSharedMemory> KSharedMemory::Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name) {
|
||||
KPageLinkedList&& page_list, KMemoryPermission owner_permission,
|
||||
KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) {
|
||||
|
||||
const auto resource_limit = kernel.GetSystemResourceLimit();
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
size);
|
||||
ASSERT(memory_reservation.Succeeded());
|
||||
|
||||
std::shared_ptr<SharedMemory> shared_memory{
|
||||
std::make_shared<SharedMemory>(kernel, device_memory)};
|
||||
std::shared_ptr<KSharedMemory> shared_memory{
|
||||
std::make_shared<KSharedMemory>(kernel, device_memory)};
|
||||
|
||||
shared_memory->owner_process = owner_process;
|
||||
shared_memory->page_list = std::move(page_list);
|
||||
|
@ -44,22 +43,22 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
|
|||
return shared_memory;
|
||||
}
|
||||
|
||||
ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions) {
|
||||
const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize};
|
||||
ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
|
||||
KMemoryPermission permissions) {
|
||||
const u64 page_count{(size + PageSize - 1) / PageSize};
|
||||
|
||||
if (page_list.GetNumPages() != page_count) {
|
||||
UNIMPLEMENTED_MSG("Page count does not match");
|
||||
}
|
||||
|
||||
const Memory::MemoryPermission expected =
|
||||
const KMemoryPermission expected =
|
||||
&target_process == owner_process ? owner_permission : user_permission;
|
||||
|
||||
if (permissions != expected) {
|
||||
UNIMPLEMENTED_MSG("Permission does not match");
|
||||
}
|
||||
|
||||
return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared,
|
||||
return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared,
|
||||
permissions);
|
||||
}
|
||||
|
|
@ -9,8 +9,8 @@
|
|||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/page_linked_list.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_page_linked_list.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/result.h"
|
||||
|
@ -19,15 +19,15 @@ namespace Kernel {
|
|||
|
||||
class KernelCore;
|
||||
|
||||
class SharedMemory final : public Object {
|
||||
class KSharedMemory final : public Object {
|
||||
public:
|
||||
explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
|
||||
~SharedMemory() override;
|
||||
explicit KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory);
|
||||
~KSharedMemory() override;
|
||||
|
||||
static std::shared_ptr<SharedMemory> Create(
|
||||
static std::shared_ptr<KSharedMemory> Create(
|
||||
KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process,
|
||||
Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission,
|
||||
Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
KPageLinkedList&& page_list, KMemoryPermission owner_permission,
|
||||
KMemoryPermission user_permission, PAddr physical_address, std::size_t size,
|
||||
std::string name);
|
||||
|
||||
std::string GetTypeName() const override {
|
||||
|
@ -51,7 +51,7 @@ public:
|
|||
* @param permissions Memory block map permissions (specified by SVC field)
|
||||
*/
|
||||
ResultCode Map(Process& target_process, VAddr address, std::size_t size,
|
||||
Memory::MemoryPermission permissions);
|
||||
KMemoryPermission permissions);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the shared memory block
|
||||
|
@ -76,9 +76,9 @@ public:
|
|||
private:
|
||||
Core::DeviceMemory& device_memory;
|
||||
Process* owner_process{};
|
||||
Memory::PageLinkedList page_list;
|
||||
Memory::MemoryPermission owner_permission{};
|
||||
Memory::MemoryPermission user_permission{};
|
||||
KPageLinkedList page_list;
|
||||
KMemoryPermission owner_permission{};
|
||||
KMemoryPermission user_permission{};
|
||||
PAddr physical_address{};
|
||||
std::size_t size{};
|
||||
std::string name;
|
|
@ -2,9 +2,6 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
@ -12,17 +9,17 @@
|
|||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class SlabHeapImpl final : NonCopyable {
|
||||
class KSlabHeapImpl final : NonCopyable {
|
||||
public:
|
||||
struct Node {
|
||||
Node* next{};
|
||||
};
|
||||
|
||||
constexpr SlabHeapImpl() = default;
|
||||
constexpr KSlabHeapImpl() = default;
|
||||
|
||||
void Initialize(std::size_t size) {
|
||||
ASSERT(head == nullptr);
|
||||
|
@ -65,9 +62,9 @@ private:
|
|||
|
||||
} // namespace impl
|
||||
|
||||
class SlabHeapBase : NonCopyable {
|
||||
class KSlabHeapBase : NonCopyable {
|
||||
public:
|
||||
constexpr SlabHeapBase() = default;
|
||||
constexpr KSlabHeapBase() = default;
|
||||
|
||||
constexpr bool Contains(uintptr_t addr) const {
|
||||
return start <= addr && addr < end;
|
||||
|
@ -126,7 +123,7 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
using Impl = impl::SlabHeapImpl;
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
|
||||
Impl impl;
|
||||
uintptr_t peak{};
|
||||
|
@ -135,9 +132,9 @@ private:
|
|||
};
|
||||
|
||||
template <typename T>
|
||||
class SlabHeap final : public SlabHeapBase {
|
||||
class KSlabHeap final : public KSlabHeapBase {
|
||||
public:
|
||||
constexpr SlabHeap() : SlabHeapBase() {}
|
||||
constexpr KSlabHeap() : KSlabHeapBase() {}
|
||||
|
||||
void Initialize(void* memory, std::size_t memory_size) {
|
||||
InitializeImpl(sizeof(T), memory, memory_size);
|
||||
|
@ -160,4 +157,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
|
||||
#if _MSC_VER
|
||||
#include <intrin.h>
|
||||
#if _M_AMD64
|
||||
#define __x86_64__ 1
|
||||
#endif
|
||||
#if _M_ARM64
|
||||
#define __aarch64__ 1
|
||||
#endif
|
||||
#else
|
||||
#if __x86_64__
|
||||
#include <xmmintrin.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
|
||||
void ThreadPause() {
|
||||
#if __x86_64__
|
||||
_mm_pause();
|
||||
#elif __aarch64__ && _MSC_VER
|
||||
__yield();
|
||||
#elif __aarch64__
|
||||
asm("yield");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
void KSpinLock::Lock() {
|
||||
while (lck.test_and_set(std::memory_order_acquire)) {
|
||||
ThreadPause();
|
||||
}
|
||||
}
|
||||
|
||||
void KSpinLock::Unlock() {
|
||||
lck.clear(std::memory_order_release);
|
||||
}
|
||||
|
||||
bool KSpinLock::TryLock() {
|
||||
if (lck.test_and_set(std::memory_order_acquire)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "core/hle/kernel/k_scoped_lock.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSpinLock {
|
||||
public:
|
||||
KSpinLock() = default;
|
||||
|
||||
KSpinLock(const KSpinLock&) = delete;
|
||||
KSpinLock& operator=(const KSpinLock&) = delete;
|
||||
|
||||
KSpinLock(KSpinLock&&) = delete;
|
||||
KSpinLock& operator=(KSpinLock&&) = delete;
|
||||
|
||||
void Lock();
|
||||
void Unlock();
|
||||
[[nodiscard]] bool TryLock();
|
||||
|
||||
private:
|
||||
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
||||
};
|
||||
|
||||
using KScopedSpinLock = KScopedLock<KSpinLock>;
|
||||
|
||||
} // namespace Kernel
|
|
@ -1,12 +1,13 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "core/hle/kernel/memory/system_control.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
namespace {
|
||||
template <typename F>
|
||||
u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||
|
@ -25,16 +26,17 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
|||
}
|
||||
}
|
||||
|
||||
u64 GenerateRandomU64ForInit() {
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 KSystemControl::GenerateRandomU64() {
|
||||
static std::random_device device;
|
||||
static std::mt19937 gen(device());
|
||||
static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
|
||||
return distribution(gen);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64);
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
||||
} // namespace Kernel
|
|
@ -0,0 +1,19 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KSystemControl {
|
||||
public:
|
||||
KSystemControl() = default;
|
||||
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
|
@ -20,13 +20,13 @@
|
|||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
@ -782,7 +782,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
|
|||
}
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters++) >= 0);
|
||||
}
|
||||
|
||||
|
@ -795,7 +795,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
|
|||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
|
||||
|
@ -870,7 +870,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
|||
KThread* thread = std::addressof(*it);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
if (IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
it = waiter_list.erase(it);
|
||||
|
|
|
@ -27,17 +27,17 @@
|
|||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/memory_manager.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/lock.h"
|
||||
|
@ -271,7 +271,7 @@ struct KernelCore::Impl {
|
|||
|
||||
void InitializeMemoryLayout() {
|
||||
// Initialize memory layout
|
||||
constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()};
|
||||
constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()};
|
||||
constexpr std::size_t hid_size{0x40000};
|
||||
constexpr std::size_t font_size{0x1100000};
|
||||
constexpr std::size_t irs_size{0x8000};
|
||||
|
@ -282,36 +282,36 @@ struct KernelCore::Impl {
|
|||
constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
|
||||
|
||||
// Initialize memory manager
|
||||
memory_manager = std::make_unique<Memory::MemoryManager>();
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application,
|
||||
memory_manager = std::make_unique<KMemoryManager>();
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Application,
|
||||
layout.Application().StartAddress(),
|
||||
layout.Application().EndAddress());
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet,
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::Applet,
|
||||
layout.Applet().StartAddress(),
|
||||
layout.Applet().EndAddress());
|
||||
memory_manager->InitializeManager(Memory::MemoryManager::Pool::System,
|
||||
memory_manager->InitializeManager(KMemoryManager::Pool::System,
|
||||
layout.System().StartAddress(),
|
||||
layout.System().EndAddress());
|
||||
|
||||
hid_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory");
|
||||
font_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::SharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr,
|
||||
{time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None,
|
||||
Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory");
|
||||
hid_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size,
|
||||
"HID:SharedMemory");
|
||||
font_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size,
|
||||
"Font:SharedMemory");
|
||||
irs_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size,
|
||||
"IRS:SharedMemory");
|
||||
time_shared_mem = Kernel::KSharedMemory::Create(
|
||||
system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize},
|
||||
KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size,
|
||||
"Time:SharedMemory");
|
||||
|
||||
// Allocate slab heaps
|
||||
user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>();
|
||||
user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>();
|
||||
|
||||
constexpr u64 user_slab_heap_size{0x1ef000};
|
||||
// Reserve slab heaps
|
||||
|
@ -353,14 +353,14 @@ struct KernelCore::Impl {
|
|||
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
|
||||
|
||||
// Kernel memory management
|
||||
std::unique_ptr<Memory::MemoryManager> memory_manager;
|
||||
std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages;
|
||||
std::unique_ptr<KMemoryManager> memory_manager;
|
||||
std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
|
||||
|
||||
// Shared memory for services
|
||||
std::shared_ptr<Kernel::SharedMemory> hid_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> font_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> hid_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> font_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> irs_shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> time_shared_mem;
|
||||
|
||||
// Threads used for services
|
||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||
|
@ -578,51 +578,51 @@ KThread* KernelCore::GetCurrentEmuThread() const {
|
|||
return impl->GetCurrentEmuThread();
|
||||
}
|
||||
|
||||
Memory::MemoryManager& KernelCore::MemoryManager() {
|
||||
KMemoryManager& KernelCore::MemoryManager() {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
const Memory::MemoryManager& KernelCore::MemoryManager() const {
|
||||
const KMemoryManager& KernelCore::MemoryManager() const {
|
||||
return *impl->memory_manager;
|
||||
}
|
||||
|
||||
Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() {
|
||||
KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const {
|
||||
const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
|
||||
return *impl->user_slab_heap_pages;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetHidSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetHidSharedMem() const {
|
||||
return *impl->hid_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetFontSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetFontSharedMem() {
|
||||
return *impl->font_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetFontSharedMem() const {
|
||||
return *impl->font_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetIrsSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetIrsSharedMem() {
|
||||
return *impl->irs_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetIrsSharedMem() const {
|
||||
return *impl->irs_shared_mem;
|
||||
}
|
||||
|
||||
Kernel::SharedMemory& KernelCore::GetTimeSharedMem() {
|
||||
Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() {
|
||||
return *impl->time_shared_mem;
|
||||
}
|
||||
|
||||
const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
|
||||
const Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() const {
|
||||
return *impl->time_shared_mem;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <vector>
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Core {
|
||||
|
@ -27,25 +27,23 @@ struct EventType;
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Memory {
|
||||
class MemoryManager;
|
||||
template <typename T>
|
||||
class SlabHeap;
|
||||
} // namespace Memory
|
||||
|
||||
class ClientPort;
|
||||
class GlobalSchedulerContext;
|
||||
class HandleTable;
|
||||
class PhysicalCore;
|
||||
class Process;
|
||||
class KMemoryManager;
|
||||
class KResourceLimit;
|
||||
class KScheduler;
|
||||
class SharedMemory;
|
||||
class KSharedMemory;
|
||||
class KThread;
|
||||
class PhysicalCore;
|
||||
class Process;
|
||||
class ServiceThread;
|
||||
class Synchronization;
|
||||
class KThread;
|
||||
class TimeManager;
|
||||
|
||||
template <typename T>
|
||||
class KSlabHeap;
|
||||
|
||||
using EmuThreadHandle = uintptr_t;
|
||||
constexpr EmuThreadHandle EmuThreadHandleInvalid{};
|
||||
constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
|
||||
|
@ -178,40 +176,40 @@ public:
|
|||
void RegisterHostThread();
|
||||
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
Memory::MemoryManager& MemoryManager();
|
||||
KMemoryManager& MemoryManager();
|
||||
|
||||
/// Gets the virtual memory manager for the kernel.
|
||||
const Memory::MemoryManager& MemoryManager() const;
|
||||
const KMemoryManager& MemoryManager() const;
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages();
|
||||
KSlabHeap<Page>& GetUserSlabHeapPages();
|
||||
|
||||
/// Gets the slab heap allocated for user space pages.
|
||||
const Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages() const;
|
||||
const KSlabHeap<Page>& GetUserSlabHeapPages() const;
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
Kernel::SharedMemory& GetHidSharedMem();
|
||||
Kernel::KSharedMemory& GetHidSharedMem();
|
||||
|
||||
/// Gets the shared memory object for HID services.
|
||||
const Kernel::SharedMemory& GetHidSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetHidSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for font services.
|
||||
Kernel::SharedMemory& GetFontSharedMem();
|
||||
Kernel::KSharedMemory& GetFontSharedMem();
|
||||
|
||||
/// Gets the shared memory object for font services.
|
||||
const Kernel::SharedMemory& GetFontSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetFontSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for IRS services.
|
||||
Kernel::SharedMemory& GetIrsSharedMem();
|
||||
Kernel::KSharedMemory& GetIrsSharedMem();
|
||||
|
||||
/// Gets the shared memory object for IRS services.
|
||||
const Kernel::SharedMemory& GetIrsSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetIrsSharedMem() const;
|
||||
|
||||
/// Gets the shared memory object for Time services.
|
||||
Kernel::SharedMemory& GetTimeSharedMem();
|
||||
Kernel::KSharedMemory& GetTimeSharedMem();
|
||||
|
||||
/// Gets the shared memory object for Time services.
|
||||
const Kernel::SharedMemory& GetTimeSharedMem() const;
|
||||
const Kernel::KSharedMemory& GetTimeSharedMem() const;
|
||||
|
||||
/// Suspend/unsuspend the OS.
|
||||
void Suspend(bool in_suspention);
|
||||
|
|
|
@ -1,370 +0,0 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <bit>
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/memory/memory_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
class PageHeap final : NonCopyable {
|
||||
public:
|
||||
static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
|
||||
const auto target_pages{std::max(num_pages, align_pages)};
|
||||
for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||
if (target_pages <=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockSize(std::size_t index) {
|
||||
return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
|
||||
}
|
||||
|
||||
static constexpr std::size_t GetBlockNumPages(std::size_t index) {
|
||||
return GetBlockSize(index) / PageSize;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr std::size_t NumMemoryBlockPageShifts{7};
|
||||
static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
|
||||
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
|
||||
};
|
||||
|
||||
class Block final : NonCopyable {
|
||||
private:
|
||||
class Bitmap final : NonCopyable {
|
||||
public:
|
||||
static constexpr std::size_t MaxDepth{4};
|
||||
|
||||
private:
|
||||
std::array<u64*, MaxDepth> bit_storages{};
|
||||
std::size_t num_bits{};
|
||||
std::size_t used_depths{};
|
||||
|
||||
public:
|
||||
constexpr Bitmap() = default;
|
||||
|
||||
constexpr std::size_t GetNumBits() const {
|
||||
return num_bits;
|
||||
}
|
||||
constexpr s32 GetHighestDepthIndex() const {
|
||||
return static_cast<s32>(used_depths) - 1;
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(u64* storage, std::size_t size) {
|
||||
//* Initially, everything is un-set
|
||||
num_bits = 0;
|
||||
|
||||
// Calculate the needed bitmap depth
|
||||
used_depths = static_cast<std::size_t>(GetRequiredDepth(size));
|
||||
ASSERT(used_depths <= MaxDepth);
|
||||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
s64 FindFreeBlock() const {
|
||||
uintptr_t offset{};
|
||||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * 64 + static_cast<u32>(std::countr_zero(v));
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(used_depths));
|
||||
|
||||
return static_cast<s64>(offset);
|
||||
}
|
||||
|
||||
constexpr void SetBit(std::size_t offset) {
|
||||
SetBit(GetHighestDepthIndex(), offset);
|
||||
num_bits++;
|
||||
}
|
||||
|
||||
constexpr void ClearBit(std::size_t offset) {
|
||||
ClearBit(GetHighestDepthIndex(), offset);
|
||||
num_bits--;
|
||||
}
|
||||
|
||||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[depth]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
// Check that all the bits are set
|
||||
const u64 mask{((1ULL << count) - 1) << shift};
|
||||
u64 v{bits[bit_ind]};
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Clear the bits
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
ASSERT(offset % 64 == 0);
|
||||
ASSERT(count % 64 == 0);
|
||||
// Check that all the bits are set
|
||||
std::size_t remaining{count};
|
||||
std::size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
|
||||
// Clear the bits
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= 64;
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
constexpr void SetBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void ClearBit(s32 depth, std::size_t offset) {
|
||||
while (depth >= 0) {
|
||||
const auto ind{offset / 64};
|
||||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(std::size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= 64;
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) {
|
||||
std::size_t overhead_bits = 0;
|
||||
for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) {
|
||||
region_size = Common::AlignUp(region_size, 64) / 64;
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
Bitmap bitmap;
|
||||
VAddr heap_address{};
|
||||
uintptr_t end_offset{};
|
||||
std::size_t block_shift{};
|
||||
std::size_t next_block_shift{};
|
||||
|
||||
public:
|
||||
constexpr Block() = default;
|
||||
|
||||
constexpr std::size_t GetShift() const {
|
||||
return block_shift;
|
||||
}
|
||||
constexpr std::size_t GetNextShift() const {
|
||||
return next_block_shift;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return static_cast<std::size_t>(1) << GetShift();
|
||||
}
|
||||
constexpr std::size_t GetNumPages() const {
|
||||
return GetSize() / PageSize;
|
||||
}
|
||||
constexpr std::size_t GetNumFreeBlocks() const {
|
||||
return bitmap.GetNumBits();
|
||||
}
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
return GetNumFreeBlocks() * GetNumPages();
|
||||
}
|
||||
|
||||
constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
|
||||
u64* bit_storage) {
|
||||
// Set shifts
|
||||
block_shift = bs;
|
||||
next_block_shift = nbs;
|
||||
|
||||
// Align up the address
|
||||
VAddr end{addr + size};
|
||||
const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
|
||||
: (1ULL << block_shift)};
|
||||
addr = Common::AlignDown((addr), align);
|
||||
end = Common::AlignUp((end), align);
|
||||
|
||||
heap_address = addr;
|
||||
end_offset = (end - addr) / (1ULL << block_shift);
|
||||
return bitmap.Initialize(bit_storage, end_offset);
|
||||
}
|
||||
|
||||
constexpr VAddr PushBlock(VAddr address) {
|
||||
// Set the bit for the free block
|
||||
std::size_t offset{(address - heap_address) >> GetShift()};
|
||||
bitmap.SetBit(offset);
|
||||
|
||||
// If we have a next shift, try to clear the blocks below and return the address
|
||||
if (GetNextShift()) {
|
||||
const auto diff{1ULL << (GetNextShift() - GetShift())};
|
||||
offset = Common::AlignDown(offset, diff);
|
||||
if (bitmap.ClearRange(offset, diff)) {
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
}
|
||||
|
||||
// We couldn't coalesce, or we're already as big as possible
|
||||
return 0;
|
||||
}
|
||||
|
||||
VAddr PopBlock() {
|
||||
// Find a free block
|
||||
const s64 soffset{bitmap.FindFreeBlock()};
|
||||
if (soffset < 0) {
|
||||
return 0;
|
||||
}
|
||||
const auto offset{static_cast<std::size_t>(soffset)};
|
||||
|
||||
// Update our tracking and return it
|
||||
bitmap.ClearBit(offset);
|
||||
return heap_address + (offset << GetShift());
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size,
|
||||
std::size_t cur_block_shift,
|
||||
std::size_t next_block_shift) {
|
||||
const auto cur_block_size{(1ULL << cur_block_shift)};
|
||||
const auto next_block_size{(1ULL << next_block_shift)};
|
||||
const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
|
||||
return Bitmap::CalculateMetadataOverheadSize(
|
||||
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
PageHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
return heap_address;
|
||||
}
|
||||
constexpr std::size_t GetSize() const {
|
||||
return heap_size;
|
||||
}
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
return GetAddress() + GetSize();
|
||||
}
|
||||
constexpr std::size_t GetPageOffset(VAddr block) const {
|
||||
return (block - GetAddress()) / PageSize;
|
||||
}
|
||||
|
||||
void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
|
||||
VAddr AllocateBlock(s32 index);
|
||||
void Free(VAddr addr, std::size_t num_pages);
|
||||
|
||||
void UpdateUsedSize() {
|
||||
used_size = heap_size - (GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
static std::size_t CalculateMetadataOverheadSize(std::size_t region_size);
|
||||
|
||||
private:
|
||||
constexpr std::size_t GetNumFreePages() const {
|
||||
std::size_t num_free{};
|
||||
|
||||
for (const auto& block : blocks) {
|
||||
num_free += block.GetNumFreePages();
|
||||
}
|
||||
|
||||
return num_free;
|
||||
}
|
||||
|
||||
void FreeBlock(VAddr block, s32 index);
|
||||
|
||||
VAddr heap_address{};
|
||||
std::size_t heap_size{};
|
||||
std::size_t used_size{};
|
||||
std::array<Block, NumMemoryBlockPageShifts> blocks{};
|
||||
std::vector<u64> metadata;
|
||||
};
|
||||
|
||||
} // namespace Kernel::Memory
|
|
@ -1,13 +0,0 @@
|
|||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory::SystemControl {
|
||||
|
||||
u64 GenerateRandomRange(u64 min, u64 max);
|
||||
|
||||
} // namespace Kernel::Memory::SystemControl
|
|
@ -8,11 +8,11 @@
|
|||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
namespace Kernel {
|
||||
|
||||
constexpr std::size_t PageBits{12};
|
||||
constexpr std::size_t PageSize{1 << PageBits};
|
||||
|
||||
using Page = std::array<u8, PageSize>;
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
} // namespace Kernel
|
|
@ -14,14 +14,14 @@
|
|||
#include "core/device_memory.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_slab_heap.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block_manager.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/memory/slab_heap.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/lock.h"
|
||||
|
@ -274,7 +274,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
LimitableResource::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
||||
kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||
code_size + system_resource_size);
|
||||
if (!memory_reservation.Succeeded()) {
|
||||
|
@ -285,15 +285,15 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||
// Initialize proces address space
|
||||
if (const ResultCode result{
|
||||
page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
|
||||
code_size, Memory::MemoryManager::Pool::Application)};
|
||||
code_size, KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
if (const ResultCode result{page_table->MapProcessCode(
|
||||
page_table->GetCodeRegionStart(), code_size / Memory::PageSize,
|
||||
Memory::MemoryState::Code, Memory::MemoryPermission::None)};
|
||||
if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
}
|
||||
|
@ -323,6 +323,11 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
LimitableResource::PhysicalMemory,
|
||||
kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application));
|
||||
|
||||
resource_limit->SetLimitValue(LimitableResource::Threads, 608);
|
||||
resource_limit->SetLimitValue(LimitableResource::Events, 700);
|
||||
resource_limit->SetLimitValue(LimitableResource::TransferMemory, 128);
|
||||
|
@ -400,22 +405,22 @@ VAddr Process::CreateTLSRegion() {
|
|||
return *tls_page_iter->ReserveSlot();
|
||||
}
|
||||
|
||||
Memory::Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
|
||||
ASSERT(tls_page_ptr);
|
||||
|
||||
const VAddr start{page_table->GetKernelMapRegionStart()};
|
||||
const VAddr size{page_table->GetKernelMapRegionEnd() - start};
|
||||
const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
|
||||
const VAddr tls_page_addr{
|
||||
page_table
|
||||
->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
|
||||
Memory::MemoryState::ThreadLocal,
|
||||
Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
const VAddr tls_page_addr{page_table
|
||||
->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
|
||||
KMemoryState::ThreadLocal,
|
||||
KMemoryPermission::ReadAndWrite,
|
||||
tls_map_addr)
|
||||
.ValueOr(0)};
|
||||
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
std::memset(tls_page_ptr, 0, Memory::PageSize);
|
||||
std::memset(tls_page_ptr, 0, PageSize);
|
||||
tls_pages.emplace_back(tls_page_addr);
|
||||
|
||||
const auto reserve_result{tls_pages.back().ReserveSlot()};
|
||||
|
@ -442,15 +447,15 @@ void Process::FreeTLSRegion(VAddr tls_address) {
|
|||
void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
std::lock_guard lock{HLE::g_hle_lock};
|
||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||
Memory::MemoryPermission permission) {
|
||||
KMemoryPermission permission) {
|
||||
page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||
};
|
||||
|
||||
system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size());
|
||||
|
||||
ReprotectSegment(code_set.CodeSegment(), Memory::MemoryPermission::ReadAndExecute);
|
||||
ReprotectSegment(code_set.RODataSegment(), Memory::MemoryPermission::Read);
|
||||
ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
|
||||
ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute);
|
||||
ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read);
|
||||
ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite);
|
||||
}
|
||||
|
||||
bool Process::IsSignaled() const {
|
||||
|
@ -459,9 +464,9 @@ bool Process::IsSignaled() const {
|
|||
}
|
||||
|
||||
Process::Process(Core::System& system)
|
||||
: KSynchronizationObject{system.Kernel()},
|
||||
page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
|
||||
address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {}
|
||||
: KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<KPageTable>(system)},
|
||||
handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system},
|
||||
state_lock{system.Kernel()}, system{system} {}
|
||||
|
||||
Process::~Process() = default;
|
||||
|
||||
|
@ -479,16 +484,15 @@ ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
|
|||
ASSERT(stack_size);
|
||||
|
||||
// The kernel always ensures that the given stack size is page aligned.
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, Memory::PageSize);
|
||||
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
||||
|
||||
const VAddr start{page_table->GetStackRegionStart()};
|
||||
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||
|
||||
CASCADE_RESULT(main_thread_stack_top,
|
||||
page_table->AllocateAndMapMemory(
|
||||
main_thread_stack_size / Memory::PageSize, Memory::PageSize, false, start,
|
||||
size / Memory::PageSize, Memory::MemoryState::Stack,
|
||||
Memory::MemoryPermission::ReadAndWrite));
|
||||
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
||||
KMemoryState::Stack, KMemoryPermission::ReadAndWrite));
|
||||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
|
|
|
@ -29,16 +29,13 @@ class ProgramMetadata;
|
|||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class KPageTable;
|
||||
class KResourceLimit;
|
||||
class KThread;
|
||||
class TLSPage;
|
||||
|
||||
struct CodeSet;
|
||||
|
||||
namespace Memory {
|
||||
class PageTable;
|
||||
}
|
||||
|
||||
enum class MemoryRegion : u16 {
|
||||
APPLICATION = 1,
|
||||
SYSTEM = 2,
|
||||
|
@ -104,12 +101,12 @@ public:
|
|||
}
|
||||
|
||||
/// Gets a reference to the process' page table.
|
||||
Memory::PageTable& PageTable() {
|
||||
KPageTable& PageTable() {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
/// Gets const a reference to the process' page table.
|
||||
const Memory::PageTable& PageTable() const {
|
||||
const KPageTable& PageTable() const {
|
||||
return *page_table;
|
||||
}
|
||||
|
||||
|
@ -385,7 +382,7 @@ private:
|
|||
ResultCode AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
/// Memory manager for this process
|
||||
std::unique_ptr<Memory::PageTable> page_table;
|
||||
std::unique_ptr<KPageTable> page_table;
|
||||
|
||||
/// Current status of the process
|
||||
ProcessStatus status{};
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include "common/bit_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
|
@ -69,7 +69,7 @@ u32 GetFlagBitOffset(CapabilityType type) {
|
|||
|
||||
ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
// Allow all cores and priorities.
|
||||
|
@ -82,7 +82,7 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
|
|||
|
||||
ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
Clear();
|
||||
|
||||
return ParseCapabilities(capabilities, num_capabilities, page_table);
|
||||
|
@ -108,7 +108,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
|
|||
|
||||
ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
||||
std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
u32 set_flags = 0;
|
||||
u32 set_svc_bits = 0;
|
||||
|
||||
|
@ -155,7 +155,7 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
|
|||
}
|
||||
|
||||
ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
|
||||
u32 flag, Memory::PageTable& page_table) {
|
||||
u32 flag, KPageTable& page_table) {
|
||||
const auto type = GetCapabilityType(flag);
|
||||
|
||||
if (type == CapabilityType::Unset) {
|
||||
|
@ -293,12 +293,12 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
|
|||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
|
||||
Memory::PageTable& page_table) {
|
||||
KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) {
|
||||
ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
|
||||
// TODO(Lioncache): Implement once the memory manager can handle this.
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -12,9 +12,7 @@ union ResultCode;
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
namespace Memory {
|
||||
class PageTable;
|
||||
}
|
||||
class KPageTable;
|
||||
|
||||
/// The possible types of programs that may be indicated
|
||||
/// by the program type capability descriptor.
|
||||
|
@ -90,7 +88,7 @@ public:
|
|||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a userland process.
|
||||
///
|
||||
|
@ -103,7 +101,7 @@ public:
|
|||
/// otherwise, an error code upon failure.
|
||||
///
|
||||
ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Initializes this process capabilities instance for a process that does not
|
||||
/// have any metadata to parse.
|
||||
|
@ -189,7 +187,7 @@ private:
|
|||
/// @return RESULT_SUCCESS if no errors occur, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Attempts to parse a capability descriptor that is only represented by a
|
||||
/// single flag set.
|
||||
|
@ -204,7 +202,7 @@ private:
|
|||
/// @return RESULT_SUCCESS if no errors occurred, otherwise an error code.
|
||||
///
|
||||
ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
|
||||
Memory::PageTable& page_table);
|
||||
KPageTable& page_table);
|
||||
|
||||
/// Clears the internal state of this process capability instance. Necessary,
|
||||
/// to have a sane starting point due to us allowing running executables without
|
||||
|
@ -228,10 +226,10 @@ private:
|
|||
ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
|
||||
|
||||
/// Handles flags related to mapping physical memory pages.
|
||||
ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table);
|
||||
ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to mapping IO pages.
|
||||
ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table);
|
||||
ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table);
|
||||
|
||||
/// Handles flags related to the interrupt capability flags.
|
||||
ResultCode HandleInterruptFlags(u32 flags);
|
||||
|
|
|
@ -27,21 +27,21 @@
|
|||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_event.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_layout.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_resource_reservation.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
@ -67,8 +67,8 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
|
|||
// Helper function that performs the common sanity checks for svcMapMemory
|
||||
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
|
||||
// in the same order.
|
||||
ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr dst_addr,
|
||||
VAddr src_addr, u64 size) {
|
||||
ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
|
||||
u64 size) {
|
||||
if (!Common::Is4KBAligned(dst_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
|
||||
return ResultInvalidAddress;
|
||||
|
@ -230,9 +230,9 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
|
|||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
const auto attributes{static_cast<Memory::MemoryAttribute>(mask | attribute)};
|
||||
if (attributes != static_cast<Memory::MemoryAttribute>(mask) ||
|
||||
(attributes | Memory::MemoryAttribute::Uncached) != Memory::MemoryAttribute::Uncached) {
|
||||
const auto attributes{static_cast<MemoryAttribute>(mask | attribute)};
|
||||
if (attributes != static_cast<MemoryAttribute>(mask) ||
|
||||
(attributes | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}",
|
||||
attribute, mask);
|
||||
|
@ -241,8 +241,8 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
|
|||
|
||||
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
|
||||
|
||||
return page_table.SetMemoryAttribute(address, size, static_cast<Memory::MemoryAttribute>(mask),
|
||||
static_cast<Memory::MemoryAttribute>(attribute));
|
||||
return page_table.SetMemoryAttribute(address, size, static_cast<KMemoryAttribute>(mask),
|
||||
static_cast<KMemoryAttribute>(attribute));
|
||||
}
|
||||
|
||||
static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
|
||||
|
@ -508,7 +508,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAdd
|
|||
thread_handle, address, tag);
|
||||
|
||||
// Validate the input address.
|
||||
if (Memory::IsKernelAddress(address)) {
|
||||
if (IsKernelAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Attempting to arbitrate a lock on a kernel address (address={:08X})",
|
||||
address);
|
||||
return ResultInvalidCurrentMemory;
|
||||
|
@ -531,8 +531,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
|
|||
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
|
||||
|
||||
// Validate the input address.
|
||||
|
||||
if (Memory::IsKernelAddress(address)) {
|
||||
if (IsKernelAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC,
|
||||
"Attempting to arbitrate an unlock on a kernel address (address={:08X})",
|
||||
address);
|
||||
|
@ -1232,9 +1231,8 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
|
|||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
const auto permission_type = static_cast<Memory::MemoryPermission>(permissions);
|
||||
if ((permission_type | Memory::MemoryPermission::Write) !=
|
||||
Memory::MemoryPermission::ReadAndWrite) {
|
||||
const auto permission_type = static_cast<MemoryPermission>(permissions);
|
||||
if ((permission_type | MemoryPermission::Write) != MemoryPermission::ReadWrite) {
|
||||
LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}",
|
||||
permissions);
|
||||
return ResultInvalidMemoryPermissions;
|
||||
|
@ -1267,14 +1265,15 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
|
|||
return ResultInvalidMemoryRange;
|
||||
}
|
||||
|
||||
auto shared_memory{current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle)};
|
||||
auto shared_memory{current_process->GetHandleTable().Get<KSharedMemory>(shared_memory_handle)};
|
||||
if (!shared_memory) {
|
||||
LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}",
|
||||
shared_memory_handle);
|
||||
return ResultInvalidHandle;
|
||||
}
|
||||
|
||||
return shared_memory->Map(*current_process, addr, size, permission_type);
|
||||
return shared_memory->Map(*current_process, addr, size,
|
||||
static_cast<KMemoryPermission>(permission_type));
|
||||
}
|
||||
|
||||
static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
|
||||
|
@ -1638,7 +1637,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address,
|
|||
cv_key, tag, timeout_ns);
|
||||
|
||||
// Validate input.
|
||||
if (Memory::IsKernelAddress(address)) {
|
||||
if (IsKernelAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Attempted to wait on kernel address (address={:08X})", address);
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
@ -1720,7 +1719,7 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::Arbit
|
|||
address, arb_type, value, timeout_ns);
|
||||
|
||||
// Validate input.
|
||||
if (Memory::IsKernelAddress(address)) {
|
||||
if (IsKernelAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Attempting to wait on kernel address (address={:08X})", address);
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
@ -1765,7 +1764,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::Sign
|
|||
address, signal_type, value, count);
|
||||
|
||||
// Validate input.
|
||||
if (Memory::IsKernelAddress(address)) {
|
||||
if (IsKernelAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Attempting to signal to a kernel address (address={:08X})", address);
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
@ -1887,9 +1886,8 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
|
|||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
const auto perms{static_cast<Memory::MemoryPermission>(permissions)};
|
||||
if (perms > Memory::MemoryPermission::ReadAndWrite ||
|
||||
perms == Memory::MemoryPermission::Write) {
|
||||
const auto perms{static_cast<MemoryPermission>(permissions)};
|
||||
if (perms > MemoryPermission::ReadWrite || perms == MemoryPermission::Write) {
|
||||
LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})",
|
||||
permissions);
|
||||
return ResultInvalidMemoryPermissions;
|
||||
|
@ -1903,7 +1901,8 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
|
|||
LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory");
|
||||
return ResultResourceLimitedExceeded;
|
||||
}
|
||||
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size, perms);
|
||||
auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size,
|
||||
static_cast<KMemoryPermission>(perms));
|
||||
|
||||
if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) {
|
||||
return reserve_result;
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_resource_limit.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/transfer_memory.h"
|
||||
#include "core/hle/result.h"
|
||||
|
@ -24,7 +24,7 @@ TransferMemory::~TransferMemory() {
|
|||
std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel,
|
||||
Core::Memory::Memory& memory,
|
||||
VAddr base_address, std::size_t size,
|
||||
Memory::MemoryPermission permissions) {
|
||||
KMemoryPermission permissions) {
|
||||
std::shared_ptr<TransferMemory> transfer_memory{
|
||||
std::make_shared<TransferMemory>(kernel, memory)};
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#include <memory>
|
||||
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
|
||||
|
@ -36,7 +36,7 @@ public:
|
|||
|
||||
static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
VAddr base_address, std::size_t size,
|
||||
Memory::MemoryPermission permissions);
|
||||
KMemoryPermission permissions);
|
||||
|
||||
TransferMemory(const TransferMemory&) = delete;
|
||||
TransferMemory& operator=(const TransferMemory&) = delete;
|
||||
|
@ -82,7 +82,7 @@ private:
|
|||
std::size_t size{};
|
||||
|
||||
/// The memory permissions that are applied to this instance.
|
||||
Memory::MemoryPermission owner_permissions{};
|
||||
KMemoryPermission owner_permissions{};
|
||||
|
||||
/// The process that this transfer memory instance was created under.
|
||||
Process* owner_process{};
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/k_readable_event.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_writable_event.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/service/hid/errors.h"
|
||||
#include "core/hle/service/hid/hid.h"
|
||||
#include "core/hle/service/hid/irs.h"
|
||||
|
|
|
@ -14,7 +14,7 @@ struct EventType;
|
|||
}
|
||||
|
||||
namespace Kernel {
|
||||
class SharedMemory;
|
||||
class KSharedMemory;
|
||||
}
|
||||
|
||||
namespace Service::SM {
|
||||
|
@ -69,7 +69,7 @@ private:
|
|||
void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
|
||||
|
||||
std::shared_ptr<Kernel::SharedMemory> shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> shared_mem;
|
||||
|
||||
std::shared_ptr<Core::Timing::EventType> pad_update_event;
|
||||
std::shared_ptr<Core::Timing::EventType> motion_update_event;
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/service/hid/irs.h"
|
||||
|
||||
namespace Service::HID {
|
||||
|
|
|
@ -12,7 +12,7 @@ class System;
|
|||
}
|
||||
|
||||
namespace Kernel {
|
||||
class SharedMemory;
|
||||
class KSharedMemory;
|
||||
}
|
||||
|
||||
namespace Service::HID {
|
||||
|
@ -42,7 +42,7 @@ private:
|
|||
void StopImageProcessorAsync(Kernel::HLERequestContext& ctx);
|
||||
void ActivateIrsensorWithFunctionLevel(Kernel::HLERequestContext& ctx);
|
||||
|
||||
std::shared_ptr<Kernel::SharedMemory> shared_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> shared_mem;
|
||||
const u32 device_handle{0xABCD};
|
||||
};
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
#include "common/scope_exit.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/memory/system_control.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_system_control.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/service/ldr/ldr.h"
|
||||
|
@ -287,12 +287,11 @@ public:
|
|||
rb.Push(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
bool ValidateRegionForMap(Kernel::Memory::PageTable& page_table, VAddr start,
|
||||
std::size_t size) const {
|
||||
constexpr std::size_t padding_size{4 * Kernel::Memory::PageSize};
|
||||
bool ValidateRegionForMap(Kernel::KPageTable& page_table, VAddr start, std::size_t size) const {
|
||||
constexpr std::size_t padding_size{4 * Kernel::PageSize};
|
||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
||||
|
||||
if (start_info.state != Kernel::Memory::MemoryState::Free) {
|
||||
if (start_info.state != Kernel::KMemoryState::Free) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -302,21 +301,20 @@ public:
|
|||
|
||||
const auto end_info{page_table.QueryInfo(start + size)};
|
||||
|
||||
if (end_info.state != Kernel::Memory::MemoryState::Free) {
|
||||
if (end_info.state != Kernel::KMemoryState::Free) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize());
|
||||
}
|
||||
|
||||
VAddr GetRandomMapRegion(const Kernel::Memory::PageTable& page_table, std::size_t size) const {
|
||||
VAddr GetRandomMapRegion(const Kernel::KPageTable& page_table, std::size_t size) const {
|
||||
VAddr addr{};
|
||||
const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >>
|
||||
Kernel::Memory::PageBits};
|
||||
Kernel::PageBits};
|
||||
do {
|
||||
addr = page_table.GetAliasCodeRegionStart() +
|
||||
(Kernel::Memory::SystemControl::GenerateRandomRange(0, end_pages)
|
||||
<< Kernel::Memory::PageBits);
|
||||
(Kernel::KSystemControl::GenerateRandomRange(0, end_pages) << Kernel::PageBits);
|
||||
} while (!page_table.IsInsideAddressSpace(addr, size) ||
|
||||
page_table.IsInsideHeapRegion(addr, size) ||
|
||||
page_table.IsInsideAliasRegion(addr, size));
|
||||
|
@ -387,7 +385,7 @@ public:
|
|||
const VAddr data_start{start + nro_header.segment_headers[DATA_INDEX].memory_offset};
|
||||
const VAddr bss_start{data_start + nro_header.segment_headers[DATA_INDEX].memory_size};
|
||||
const VAddr bss_end_addr{
|
||||
Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)};
|
||||
Common::AlignUp(bss_start + nro_header.bss_size, Kernel::PageSize)};
|
||||
|
||||
auto CopyCode{[&](VAddr src_addr, VAddr dst_addr, u64 size) {
|
||||
std::vector<u8> source_data(size);
|
||||
|
@ -402,12 +400,12 @@ public:
|
|||
nro_header.segment_headers[DATA_INDEX].memory_size);
|
||||
|
||||
CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(
|
||||
text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute));
|
||||
CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(
|
||||
ro_start, data_start - ro_start, Kernel::Memory::MemoryPermission::Read));
|
||||
text_start, ro_start - text_start, Kernel::KMemoryPermission::ReadAndExecute));
|
||||
CASCADE_CODE(process->PageTable().SetCodeMemoryPermission(ro_start, data_start - ro_start,
|
||||
Kernel::KMemoryPermission::Read));
|
||||
|
||||
return process->PageTable().SetCodeMemoryPermission(
|
||||
data_start, bss_end_addr - data_start, Kernel::Memory::MemoryPermission::ReadAndWrite);
|
||||
data_start, bss_end_addr - data_start, Kernel::KMemoryPermission::ReadAndWrite);
|
||||
}
|
||||
|
||||
void LoadNro(Kernel::HLERequestContext& ctx) {
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
#include "core/file_sys/romfs.h"
|
||||
#include "core/file_sys/system_archive/system_archive.h"
|
||||
#include "core/hle/ipc_helpers.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/hle/service/ns/pl_u.h"
|
||||
|
||||
|
@ -131,7 +131,7 @@ struct PL_U::Impl {
|
|||
}
|
||||
|
||||
/// Handle to shared memory region designated for a shared font
|
||||
std::shared_ptr<Kernel::SharedMemory> shared_font_mem;
|
||||
std::shared_ptr<Kernel::KSharedMemory> shared_font_mem;
|
||||
|
||||
/// Backing memory for the shared font data
|
||||
std::shared_ptr<Kernel::PhysicalMemory> shared_font;
|
||||
|
|
|
@ -22,7 +22,7 @@ SharedMemory::SharedMemory(Core::System& system) : system(system) {
|
|||
|
||||
SharedMemory::~SharedMemory() = default;
|
||||
|
||||
std::shared_ptr<Kernel::SharedMemory> SharedMemory::GetSharedMemoryHolder() const {
|
||||
std::shared_ptr<Kernel::KSharedMemory> SharedMemory::GetSharedMemoryHolder() const {
|
||||
return shared_memory_holder;
|
||||
}
|
||||
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
#include "common/common_types.h"
|
||||
#include "common/uuid.h"
|
||||
#include "core/hle/kernel/k_shared_memory.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/service/time/clock_types.h"
|
||||
|
||||
namespace Service::Time {
|
||||
|
@ -18,7 +18,7 @@ public:
|
|||
~SharedMemory();
|
||||
|
||||
// Return the shared memory handle
|
||||
std::shared_ptr<Kernel::SharedMemory> GetSharedMemoryHolder() const;
|
||||
std::shared_ptr<Kernel::KSharedMemory> GetSharedMemoryHolder() const;
|
||||
|
||||
// TODO(ogniK): We have to properly simulate memory barriers, how are we going to do this?
|
||||
template <typename T, std::size_t Offset>
|
||||
|
@ -63,7 +63,7 @@ public:
|
|||
void SetAutomaticCorrectionEnabled(bool is_enabled);
|
||||
|
||||
private:
|
||||
std::shared_ptr<Kernel::SharedMemory> shared_memory_holder;
|
||||
std::shared_ptr<Kernel::KSharedMemory> shared_memory_holder;
|
||||
Core::System& system;
|
||||
Format shared_memory_format{};
|
||||
};
|
||||
|
|
|
@ -12,8 +12,8 @@
|
|||
#include "core/file_sys/control_metadata.h"
|
||||
#include "core/file_sys/patch_manager.h"
|
||||
#include "core/file_sys/romfs_factory.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/loader/deconstructed_rom_directory.h"
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "common/file_util.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/loader/elf.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "core/file_sys/kernel_executable.h"
|
||||
#include "core/file_sys/program_metadata.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/loader/kip.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
#include "core/file_sys/romfs_factory.h"
|
||||
#include "core/file_sys/vfs_offset.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/service/filesystem/filesystem.h"
|
||||
#include "core/loader/nro.h"
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
#include "core/core.h"
|
||||
#include "core/file_sys/patch_manager.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/loader/nso.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -116,6 +116,11 @@ public:
|
|||
*/
|
||||
u8* GetPointer(VAddr vaddr);
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(VAddr vaddr) {
|
||||
return reinterpret_cast<T*>(GetPointer(vaddr));
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a pointer to the given address.
|
||||
*
|
||||
|
@ -126,6 +131,11 @@ public:
|
|||
*/
|
||||
const u8* GetPointer(VAddr vaddr) const;
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointer(VAddr vaddr) const {
|
||||
return reinterpret_cast<T*>(GetPointer(vaddr));
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads an 8-bit unsigned value from the current process' address space
|
||||
* at the given virtual address.
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/service/hid/controllers/npad.h"
|
||||
#include "core/hle/service/hid/hid.h"
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/hle_ipc.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/memory.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
|
Loading…
Reference in New Issue