Partial commit: sys_memory, sys_mmapper

This commit is contained in:
Nekotekina 2016-05-25 21:04:08 +03:00
parent f5e65e4ad9
commit 96728a83f6
13 changed files with 366 additions and 375 deletions

View File

@ -12,6 +12,7 @@
#define _XOPEN_SOURCE
#define __USE_GNU
#endif
#include <errno.h>
#include <signal.h>
#include <ucontext.h>
#endif

View File

@ -1,15 +1,48 @@
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_mmapper.h"
#include "sysPrxForUser.h"
namespace vm { using namespace ps3; }
extern logs::channel sysPrxForUser;
void sysPrxForUser_sys_mmapper_init()
s32 sys_mmapper_allocate_memory(u32 size, u64 flags, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_allocate_memory(size=0x%x, flags=0x%llx, mem_id=*0x%x)", size, flags, mem_id);
return sys_mmapper_allocate_shared_memory(0xffff000000000000ull, size, flags, mem_id);
}
s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_allocate_memory_from_container(size=0x%x, cid=0x%x, flags=0x%llx, mem_id=*0x%x)", size, cid, flags, mem_id);
return sys_mmapper_allocate_shared_memory_from_container(0xffff000000000000ull, size, cid, flags, mem_id);
}
s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
{
sysPrxForUser.notice("sys_mmapper_map_memory(addr=0x%x, mem_id=0x%x, flags=0x%llx)", addr, mem_id, flags);
return sys_mmapper_map_shared_memory(addr, mem_id, flags);
}
s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_unmap_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
return sys_mmapper_unmap_shared_memory(addr, mem_id);
}
s32 sys_mmapper_free_memory(u32 mem_id)
{
sysPrxForUser.notice("sys_mmapper_free_memory(mem_id=0x%x)", mem_id);
return sys_mmapper_free_shared_memory(mem_id);
}
extern void sysPrxForUser_sys_mmapper_init()
{
// TODO: split syscalls and liblv2 functions
REG_FUNC(sysPrxForUser, sys_mmapper_allocate_memory);
REG_FUNC(sysPrxForUser, sys_mmapper_allocate_memory_from_container);
REG_FUNC(sysPrxForUser, sys_mmapper_map_memory);

View File

@ -229,7 +229,7 @@ extern std::string ppu_get_syscall_name(u64 code)
case 353: return "sys_memory_get_user_memory_stat";
case 356: return "sys_memory_allocate_colored";
case 361: return "sys_memory_allocate_from_container_colored";
case 362: return "sys_mmapper_allocate_memory_from_container";
case 362: return "sys_mmapper_allocate_shared_memory_from_container";
case 367: return "sys_uart_initialize";
case 368: return "sys_uart_receive";
case 369: return "sys_uart_send";

View File

@ -333,13 +333,13 @@ std::array<ppu_function_t, 1024> g_ppu_syscall_table
BIND_FUNC(sys_mmapper_allocate_fixed_address), //326 (0x146)
BIND_FUNC(sys_mmapper_enable_page_fault_notification), //327 (0x147)
null_func,//BIND_FUNC(sys_mmapper_...) //328 (0x148)
null_func,//BIND_FUNC(sys_mmapper_free_shared_memory) //329 (0x149)
BIND_FUNC(sys_mmapper_free_shared_memory), //329 (0x149)
BIND_FUNC(sys_mmapper_allocate_address), //330 (0x14A)
BIND_FUNC(sys_mmapper_free_address), //331 (0x14B)
null_func,//BIND_FUNC(sys_mmapper_allocate_shared_memory)//332(0x14C)
BIND_FUNC(sys_mmapper_allocate_shared_memory), //332 (0x14C)
null_func,//BIND_FUNC(sys_mmapper_set_shared_memory_flag)//333(0x14D)
null_func,//BIND_FUNC(sys_mmapper_map_shared_memory) //334 (0x14E)
null_func,//BIND_FUNC(sys_mmapper_unmap_shared_memory) //335 (0x14F)
BIND_FUNC(sys_mmapper_map_shared_memory), //334 (0x14E)
BIND_FUNC(sys_mmapper_unmap_shared_memory), //335 (0x14F)
BIND_FUNC(sys_mmapper_change_address_access_right), //336 (0x150)
BIND_FUNC(sys_mmapper_search_and_map), //337 (0x151)
null_func,//BIND_FUNC(sys_mmapper_get_shared_memory_attribute) //338 (0x152)
@ -366,7 +366,7 @@ std::array<ppu_function_t, 1024> g_ppu_syscall_table
null_func,//BIND_FUNC(sys_memory_...) //359 (0x167)
null_func,//BIND_FUNC(sys_memory_...) //360 (0x168)
null_func,//BIND_FUNC(sys_memory_allocate_from_container_colored) //361 (0x169)
null_func,//BIND_FUNC(sys_mmapper_allocate_memory_from_container) //362 (0x16A)
BIND_FUNC(sys_mmapper_allocate_shared_memory_from_container),//362 (0x16A)
null_func,//BIND_FUNC(sys_mmapper_...) //363 (0x16B)
null_func,//BIND_FUNC(sys_mmapper_...) //364 (0x16C)
null_func, //365 (0x16D) UNS

View File

@ -1,21 +1,16 @@
#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_memory.h"
namespace vm { using namespace ps3; }
logs::channel sys_memory("sys_memory", logs::level::notice);
s32 sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
ppu_error_code sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
{
sys_memory.warning("sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, flags, alloc_addr);
LV2_LOCK;
// Check allocation size
switch(flags)
switch (flags)
{
case SYS_MEMORY_PAGE_SIZE_1M:
{
@ -43,36 +38,24 @@ s32 sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
}
}
// Allocate memory
const u32 addr =
flags == SYS_MEMORY_PAGE_SIZE_1M ? vm::alloc(size, vm::user_space, 0x100000) :
flags == SYS_MEMORY_PAGE_SIZE_64K ? vm::alloc(size, vm::user_space, 0x10000) :
throw EXCEPTION("Unexpected flags");
// Get "default" memory container
const auto dct = fxm::get_always<lv2_memory_container>();
if (!addr)
// Try to get "physical memory"
if (!dct->take(size))
{
return CELL_ENOMEM;
}
// Write back the start address of the allocated area
*alloc_addr = addr;
// Allocate memory, write back the start address of the allocated area
VERIFY(*alloc_addr = vm::alloc(size, vm::user_space, flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000));
return CELL_OK;
}
s32 sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr)
ppu_error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr)
{
sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, cid, flags, alloc_addr);
LV2_LOCK;
// Check if this container ID is valid
const auto ct = idm::get<lv2_memory_container_t>(cid);
if (!ct)
{
return CELL_ESRCH;
}
// Check allocation size
switch (flags)
@ -103,89 +86,69 @@ s32 sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32
}
}
if (ct->used > ct->size)
ppu_error_code result{};
const auto ct = idm::get<lv2_memory_container>(cid, [&](u32, lv2_memory_container& ct)
{
throw EXCEPTION("Unexpected amount of memory taken (0x%x, size=0x%x)", ct->used.load(), ct->size);
// Try to get "physical memory"
if (!ct.take(size))
{
result = CELL_ENOMEM;
return_ false;
}
return_ true;
});
if (!ct && !result)
{
return CELL_ESRCH;
}
// Check memory availability
if (size > ct->size - ct->used)
if (!ct)
{
return CELL_ENOMEM;
return result;
}
const auto area = vm::get(vm::user_space);
// Return "physical" memory required for allocation
area->used -= size;
// Allocate memory
const u32 addr =
flags == SYS_MEMORY_PAGE_SIZE_1M ? area->alloc(size, 0x100000) :
flags == SYS_MEMORY_PAGE_SIZE_64K ? area->alloc(size, 0x10000) :
throw EXCEPTION("Unexpected flags");
if (!addr)
{
throw EXCEPTION("Memory not allocated (ct=0x%x, size=0x%x)", cid, size);
}
// Store the address and size in the container
ct->allocs.emplace(addr, size);
ct->used += size;
// Write back the start address of the allocated area.
*alloc_addr = addr;
// Allocate memory, write back the start address of the allocated area, use cid as the supplementary info
VERIFY(*alloc_addr = vm::alloc(size, vm::user_space, flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000, cid));
return CELL_OK;
}
s32 sys_memory_free(u32 addr)
ppu_error_code sys_memory_free(u32 addr)
{
sys_memory.warning("sys_memory_free(addr=0x%x)", addr);
LV2_LOCK;
const auto area = vm::get(vm::user_space);
// Check all memory containers
const auto ct = idm::select<lv2_memory_container_t>([&](u32, lv2_memory_container_t& ct)
{
return ct.allocs.count(addr) != 0;
});
VERIFY(area);
if (ct)
{
const u32 size = ct->allocs.at(addr);
// Deallocate memory
u32 cid, size = area->dealloc(addr, &cid);
if (!area->dealloc(addr))
{
throw EXCEPTION("Memory not deallocated (cid=0x%x, addr=0x%x, size=0x%x)", ct->id, addr, size);
}
ct->allocs.erase(addr);
// Return "memory"
ct->used -= size;
area->used += size;
return CELL_OK;
}
if (!area->dealloc(addr))
if (!size)
{
return CELL_EINVAL;
}
// Return "physical memory"
if (cid == 0)
{
fxm::get<lv2_memory_container>()->used -= size;
}
else if (const auto ct = idm::get<lv2_memory_container>(cid))
{
ct->used -= size;
}
return CELL_OK;
}
s32 sys_memory_get_page_attribute(u32 addr, vm::ptr<sys_page_attr_t> attr)
ppu_error_code sys_memory_get_page_attribute(u32 addr, vm::ptr<sys_page_attr_t> attr)
{
sys_memory.error("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr, attr);
LV2_LOCK;
// TODO: Implement per thread page attribute setting.
attr->attribute = 0x40000ull; // SYS_MEMORY_PROT_READ_WRITE
attr->access_right = 0xFull; // SYS_MEMORY_ACCESS_RIGHT_ANY
@ -194,35 +157,29 @@ s32 sys_memory_get_page_attribute(u32 addr, vm::ptr<sys_page_attr_t> attr)
return CELL_OK;
}
s32 sys_memory_get_user_memory_size(vm::ptr<sys_memory_info_t> mem_info)
ppu_error_code sys_memory_get_user_memory_size(vm::ptr<sys_memory_info_t> mem_info)
{
sys_memory.warning("sys_memory_get_user_memory_size(mem_info=*0x%x)", mem_info);
LV2_LOCK;
// Get "default" memory container
const auto dct = fxm::get_always<lv2_memory_container>();
u32 reserved = 0;
mem_info->total_user_memory = dct->size;
mem_info->available_user_memory = dct->size - dct->used;
// Check all memory containers
idm::select<lv2_memory_container_t>([&](u32, lv2_memory_container_t& ct)
// Scan other memory containers
idm::select<lv2_memory_container>([&](u32, lv2_memory_container& ct)
{
reserved += ct.size;
mem_info->total_user_memory -= ct.size;
});
const auto area = vm::get(vm::user_space);
// Fetch the user memory available
mem_info->total_user_memory = area->size - reserved;
mem_info->available_user_memory = area->size - area->used;
return CELL_OK;
}
s32 sys_memory_container_create(vm::ptr<u32> cid, u32 size)
ppu_error_code sys_memory_container_create(vm::ptr<u32> cid, u32 size)
{
sys_memory.warning("sys_memory_container_create(cid=*0x%x, size=0x%x)", cid, size);
LV2_LOCK;
// Round down to 1 MB granularity
size &= ~0xfffff;
@ -231,66 +188,67 @@ s32 sys_memory_container_create(vm::ptr<u32> cid, u32 size)
return CELL_ENOMEM;
}
u32 reserved = 0;
const auto dct = fxm::get_always<lv2_memory_container>();
// Check all memory containers
idm::select<lv2_memory_container_t>([&](u32, lv2_memory_container_t& ct)
{
reserved += ct.size;
});
const auto area = vm::get(vm::user_space);
if (area->size < reserved + size || area->size - area->used < size)
// Try to obtain "physical memory" from the default container
if (!dct->take(size))
{
return CELL_ENOMEM;
}
// Create the memory container
*cid = idm::make<lv2_memory_container_t>(size);
*cid = idm::make<lv2_memory_container>(size);
return CELL_OK;
}
s32 sys_memory_container_destroy(u32 cid)
ppu_error_code sys_memory_container_destroy(u32 cid)
{
sys_memory.warning("sys_memory_container_destroy(cid=0x%x)", cid);
LV2_LOCK;
ppu_error_code result{};
const auto ct = idm::get<lv2_memory_container_t>(cid);
const auto ct = idm::withdraw<lv2_memory_container>(cid, [&](u32, lv2_memory_container& ct)
{
// Check if some memory is not deallocated (the container cannot be destroyed in this case)
if (!ct.used.compare_and_swap_test(0, ct.size))
{
result = CELL_EBUSY;
return_ false;
}
if (!ct)
return_ true;
});
if (!ct && !result)
{
return CELL_ESRCH;
}
// Check if some memory is not deallocated (the container cannot be destroyed in this case)
if (ct->used)
if (!ct)
{
return CELL_EBUSY;
return result;
}
idm::remove<lv2_memory_container_t>(cid);
// Return "physical memory" to the default container
fxm::get<lv2_memory_container>()->used -= ct->size;
return CELL_OK;
}
s32 sys_memory_container_get_size(vm::ptr<sys_memory_info_t> mem_info, u32 cid)
ppu_error_code sys_memory_container_get_size(vm::ptr<sys_memory_info_t> mem_info, u32 cid)
{
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)", mem_info, cid);
LV2_LOCK;
const auto ct = idm::get<lv2_memory_container_t>(cid);
const auto ct = idm::get<lv2_memory_container>(cid);
if (!ct)
{
return CELL_ESRCH;
}
mem_info->total_user_memory = ct->size; // total container memory
mem_info->available_user_memory = ct->size - ct->used; // available container memory
mem_info->total_user_memory = ct->size; // Total container memory
mem_info->available_user_memory = ct->size - ct->used; // Available container memory
return CELL_OK;
}

View File

@ -1,6 +1,8 @@
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/Memory.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
enum : u32
{
@ -42,33 +44,45 @@ struct sys_page_attr_t
be_t<u32> pad;
};
#include <map>
struct lv2_memory_container_t
struct lv2_memory_container
{
// Amount of "physical" memory in this container
const u32 size;
const u32 size = 0x10000000; // Amount of "physical" memory in this container
const id_value<> id{};
// Amount of memory allocated
atomic_t<u32> used{ 0 };
atomic_t<u32> used{}; // Amount of "physical" memory currently used
// Allocations (addr -> size)
std::map<u32, u32> allocs;
lv2_memory_container() = default;
lv2_memory_container_t(u32 size)
lv2_memory_container(u32 size)
: size(size)
{
}
// Try to get specified amount of "physical" memory
u32 take(u32 amount)
{
const u32 old_value = used.fetch_op([&](u32& value)
{
if (size - value >= amount)
{
value += amount;
}
});
if (size - old_value >= amount)
{
return amount;
}
return 0;
}
};
// SysCalls
s32 sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr);
s32 sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr);
s32 sys_memory_free(u32 start_addr);
s32 sys_memory_get_page_attribute(u32 addr, vm::ptr<sys_page_attr_t> attr);
s32 sys_memory_get_user_memory_size(vm::ptr<sys_memory_info_t> mem_info);
s32 sys_memory_container_create(vm::ptr<u32> cid, u32 size);
s32 sys_memory_container_destroy(u32 cid);
s32 sys_memory_container_get_size(vm::ptr<sys_memory_info_t> mem_info, u32 cid);
ppu_error_code sys_memory_allocate(u32 size, u64 flags, vm::ps3::ptr<u32> alloc_addr);
ppu_error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ps3::ptr<u32> alloc_addr);
ppu_error_code sys_memory_free(u32 start_addr);
ppu_error_code sys_memory_get_page_attribute(u32 addr, vm::ps3::ptr<sys_page_attr_t> attr);
ppu_error_code sys_memory_get_user_memory_size(vm::ps3::ptr<sys_memory_info_t> mem_info);
ppu_error_code sys_memory_container_create(vm::ps3::ptr<u32> cid, u32 size);
ppu_error_code sys_memory_container_destroy(u32 cid);
ppu_error_code sys_memory_container_get_size(vm::ps3::ptr<sys_memory_info_t> mem_info, u32 cid);

View File

@ -1,19 +1,14 @@
#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_mmapper.h"
namespace vm { using namespace ps3; }
logs::channel sys_mmapper("sys_mmapper", logs::level::notice);
s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr)
ppu_error_code sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr)
{
sys_mmapper.error("sys_mmapper_allocate_address(size=0x%llx, flags=0x%llx, alignment=0x%llx, alloc_addr=*0x%x)", size, flags, alignment, alloc_addr);
LV2_LOCK;
if (size % 0x10000000)
{
return CELL_EALIGN;
@ -24,9 +19,8 @@ s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32
return CELL_ENOMEM;
}
// This is a 'hack' / workaround for psl1ght, which gives us an alignment of 0, which is technically invalid,
// but apparently is allowed on actual ps3
// https://github.com/ps3dev/PSL1GHT/blob/534e58950732c54dc6a553910b653c99ba6e9edc/ppu/librt/sbrk.c#L71
// This is a workaround for psl1ght, which gives us an alignment of 0, which is technically invalid, but apparently is allowed on actual ps3
// https://github.com/ps3dev/PSL1GHT/blob/534e58950732c54dc6a553910b653c99ba6e9edc/ppu/librt/sbrk.c#L71
if (!alignment)
{
alignment = 0x10000000;
@ -43,8 +37,7 @@ s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32
{
if (const auto area = vm::map(static_cast<u32>(addr), static_cast<u32>(size), flags))
{
*alloc_addr = addr;
*alloc_addr = static_cast<u32>(addr);
return CELL_OK;
}
}
@ -56,12 +49,10 @@ s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32
return CELL_EALIGN;
}
s32 sys_mmapper_allocate_fixed_address()
ppu_error_code sys_mmapper_allocate_fixed_address()
{
sys_mmapper.error("sys_mmapper_allocate_fixed_address()");
LV2_LOCK;
if (!vm::map(0xB0000000, 0x10000000)) // TODO: set correct flags (they aren't used currently though)
{
return CELL_EEXIST;
@ -70,12 +61,9 @@ s32 sys_mmapper_allocate_fixed_address()
return CELL_OK;
}
// Allocate physical memory (create lv2_memory_t object)
s32 sys_mmapper_allocate_memory(u64 size, u64 flags, vm::ptr<u32> mem_id)
ppu_error_code sys_mmapper_allocate_shared_memory(u64 unk, u32 size, u64 flags, vm::ptr<u32> mem_id)
{
sys_mmapper.warning("sys_mmapper_allocate_memory(size=0x%llx, flags=0x%llx, mem_id=*0x%x)", size, flags, mem_id);
LV2_LOCK;
sys_mmapper.warning("sys_mmapper_allocate_shared_memory(0x%llx, size=0x%x, flags=0x%llx, mem_id=*0x%x)", unk, size, flags, mem_id);
// Check page granularity
switch (flags & SYS_MEMORY_PAGE_SIZE_MASK)
@ -106,35 +94,23 @@ s32 sys_mmapper_allocate_memory(u64 size, u64 flags, vm::ptr<u32> mem_id)
}
}
if (size > UINT32_MAX)
// Get "default" memory container
const auto dct = fxm::get_always<lv2_memory_container>();
if (!dct->take(size))
{
return CELL_ENOMEM;
}
const u32 align =
flags & SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
throw EXCEPTION("Unexpected");
// Generate a new mem ID
*mem_id = idm::make<lv2_memory_t>(static_cast<u32>(size), align, flags, nullptr);
*mem_id = idm::make<lv2_memory>(size, flags & SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000, flags, dct);
return CELL_OK;
}
s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
ppu_error_code sys_mmapper_allocate_shared_memory_from_container(u64 unk, u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
{
sys_mmapper.error("sys_mmapper_allocate_memory_from_container(size=0x%x, cid=0x%x, flags=0x%llx, mem_id=*0x%x)", size, cid, flags, mem_id);
LV2_LOCK;
// Check if this container ID is valid.
const auto ct = idm::get<lv2_memory_container_t>(cid);
if (!ct)
{
return CELL_ESRCH;
}
sys_mmapper.error("sys_mmapper_allocate_shared_memory_from_container(0x%llx, size=0x%x, cid=0x%x, flags=0x%llx, mem_id=*0x%x)", unk, size, cid, flags, mem_id);
// Check page granularity.
switch (flags & SYS_MEMORY_PAGE_SIZE_MASK)
@ -165,93 +141,100 @@ s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm:
}
}
if (ct->size - ct->used < size)
ppu_error_code result{};
const auto ct = idm::get<lv2_memory_container>(cid, [&](u32, lv2_memory_container& ct)
{
return CELL_ENOMEM;
// Try to get "physical memory"
if (!ct.take(size))
{
result = CELL_ENOMEM;
return_ false;
}
return_ true;
});
if (!ct && !result)
{
return CELL_ESRCH;
}
const u32 align =
flags & SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
throw EXCEPTION("Unexpected");
ct->used += size;
if (!ct)
{
return result;
}
// Generate a new mem ID
*mem_id = idm::make<lv2_memory_t>(size, align, flags, ct);
*mem_id = idm::make<lv2_memory>(size, flags & SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000, flags, ct);
return CELL_OK;
}
s32 sys_mmapper_change_address_access_right(u32 addr, u64 flags)
ppu_error_code sys_mmapper_change_address_access_right(u32 addr, u64 flags)
{
sys_mmapper.todo("sys_mmapper_change_address_access_right(addr=0x%x, flags=0x%llx)", addr, flags);
return CELL_OK;
}
s32 sys_mmapper_free_address(u32 addr)
ppu_error_code sys_mmapper_free_address(u32 addr)
{
sys_mmapper.error("sys_mmapper_free_address(addr=0x%x)", addr);
LV2_LOCK;
// Try to unmap area
const auto area = vm::unmap(addr, true);
const auto area = vm::get(vm::any, addr);
if (!area || addr != area->addr)
if (!area)
{
return CELL_EINVAL;
}
if (area->used)
if (!area.unique())
{
return CELL_EBUSY;
}
if (!vm::unmap(addr))
{
throw EXCEPTION("Unexpected (failed to unmap memory ad 0x%x)", addr);
}
return CELL_OK;
}
s32 sys_mmapper_free_memory(u32 mem_id)
ppu_error_code sys_mmapper_free_shared_memory(u32 mem_id)
{
sys_mmapper.warning("sys_mmapper_free_memory(mem_id=0x%x)", mem_id);
sys_mmapper.warning("sys_mmapper_free_shared_memory(mem_id=0x%x)", mem_id);
LV2_LOCK;
ppu_error_code result{};
// Check if this mem ID is valid.
const auto mem = idm::get<lv2_memory_t>(mem_id);
// Conditionally remove memory ID
const auto mem = idm::withdraw<lv2_memory>(mem_id, [&](u32, lv2_memory& mem)
{
if (mem.addr.compare_and_swap_test(0, -1))
{
result = CELL_EBUSY;
return_ false;
}
if (!mem)
return_ true;
});
if (!mem && !result)
{
return CELL_ESRCH;
}
if (mem->addr)
if (!mem)
{
return CELL_EBUSY;
return result;
}
// Return physical memory to the container if necessary
if (mem->ct)
{
mem->ct->used -= mem->size;
}
// Release the allocated memory and remove the ID
idm::remove<lv2_memory_t>(mem_id);
// Return "physical memory" to the memory container
mem->ct->used -= mem->size;
return CELL_OK;
}
s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
ppu_error_code sys_mmapper_map_shared_memory(u32 addr, u32 mem_id, u64 flags)
{
sys_mmapper.error("sys_mmapper_map_memory(addr=0x%x, mem_id=0x%x, flags=0x%llx)", addr, mem_id, flags);
LV2_LOCK;
sys_mmapper.error("sys_mmapper_map_shared_memory(addr=0x%x, mem_id=0x%x, flags=0x%llx)", addr, mem_id, flags);
const auto area = vm::get(vm::any, addr);
@ -260,7 +243,7 @@ s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
return CELL_EINVAL;
}
const auto mem = idm::get<lv2_memory_t>(mem_id);
const auto mem = idm::get<lv2_memory>(mem_id);
if (!mem)
{
@ -272,28 +255,26 @@ s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
return CELL_EALIGN;
}
if (const u32 old_addr = mem->addr)
if (const u32 old_addr = mem->addr.compare_and_swap(0, -1))
{
sys_mmapper.warning("sys_mmapper_map_memory: Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr);
sys_mmapper.warning("sys_mmapper_map_shared_memory(): Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr);
return CELL_OK;
}
if (!area->falloc(addr, mem->size))
{
mem->addr = 0;
return CELL_EBUSY;
}
mem->addr = addr;
return CELL_OK;
}
s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u32> alloc_addr)
ppu_error_code sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u32> alloc_addr)
{
sys_mmapper.error("sys_mmapper_search_and_map(start_addr=0x%x, mem_id=0x%x, flags=0x%llx, alloc_addr=*0x%x)", start_addr, mem_id, flags, alloc_addr);
LV2_LOCK;
const auto area = vm::get(vm::any, start_addr);
if (!area || start_addr != area->addr || start_addr < 0x30000000 || start_addr >= 0xC0000000)
@ -301,30 +282,34 @@ s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u3
return CELL_EINVAL;
}
const auto mem = idm::get<lv2_memory_t>(mem_id);
const auto mem = idm::get<lv2_memory>(mem_id);
if (!mem)
{
return CELL_ESRCH;
}
if (const u32 old_addr = mem->addr.compare_and_swap(0, -1))
{
sys_mmapper.warning("sys_mmapper_search_and_map(): Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr);
return CELL_OK;
}
const u32 addr = area->alloc(mem->size, mem->align);
if (!addr)
{
mem->addr = 0;
return CELL_ENOMEM;
}
*alloc_addr = addr;
*alloc_addr = mem->addr = addr;
return CELL_OK;
}
s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
ppu_error_code sys_mmapper_unmap_shared_memory(u32 addr, vm::ptr<u32> mem_id)
{
sys_mmapper.error("sys_mmapper_unmap_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
LV2_LOCK;
sys_mmapper.error("sys_mmapper_unmap_shared_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
const auto area = vm::get(vm::any, addr);
@ -333,9 +318,15 @@ s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
return CELL_EINVAL;
}
const auto mem = idm::select<lv2_memory_t>([&](u32, lv2_memory_t& mem)
const auto mem = idm::select<lv2_memory>([&](u32 id, lv2_memory& mem)
{
return mem.addr == addr;
if (mem.addr == addr)
{
*mem_id = id;
return true;
}
return false;
});
if (!mem)
@ -343,19 +334,13 @@ s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
return CELL_EINVAL;
}
if (!area->dealloc(addr))
{
throw EXCEPTION("Deallocation failed (mem_id=0x%x, addr=0x%x)", mem->id, addr);
}
mem->addr = 0;
*mem_id = mem->id;
VERIFY(area->dealloc(addr));
VERIFY(mem->addr.exchange(0) == addr);
return CELL_OK;
}
s32 sys_mmapper_enable_page_fault_notification(u32 addr, u32 eq)
ppu_error_code sys_mmapper_enable_page_fault_notification(u32 addr, u32 eq)
{
sys_mmapper.todo("sys_mmapper_enable_page_fault_notification(addr=0x%x, eq=0x%x)", addr, eq);

View File

@ -2,18 +2,16 @@
#include "sys_memory.h"
struct lv2_memory_t
struct lv2_memory
{
const u32 size; // memory size
const u32 align; // required alignment
const u32 size; // Memory size
const u32 align; // Alignment required
const u64 flags;
const std::shared_ptr<lv2_memory_container_t> ct; // memory container the physical memory is taken from
const std::shared_ptr<lv2_memory_container> ct; // Associated memory container
const id_value<> id{};
atomic_t<u32> addr{}; // Actual mapping address
atomic_t<u32> addr{ 0 }; // actual mapping address
lv2_memory_t(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container_t> ct)
lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr<lv2_memory_container>& ct)
: size(size)
, align(align)
, flags(flags)
@ -23,14 +21,14 @@ struct lv2_memory_t
};
// SysCalls
s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr);
s32 sys_mmapper_allocate_fixed_address();
s32 sys_mmapper_allocate_memory(u64 size, u64 flags, vm::ptr<u32> mem_id);
s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id);
s32 sys_mmapper_change_address_access_right(u32 addr, u64 flags);
s32 sys_mmapper_free_address(u32 addr);
s32 sys_mmapper_free_memory(u32 mem_id);
s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags);
s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u32> alloc_addr);
s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id);
s32 sys_mmapper_enable_page_fault_notification(u32 addr, u32 eq);
ppu_error_code sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ps3::ptr<u32> alloc_addr);
ppu_error_code sys_mmapper_allocate_fixed_address();
ppu_error_code sys_mmapper_allocate_shared_memory(u64 unk, u32 size, u64 flags, vm::ps3::ptr<u32> mem_id);
ppu_error_code sys_mmapper_allocate_shared_memory_from_container(u64 unk, u32 size, u32 cid, u64 flags, vm::ps3::ptr<u32> mem_id);
ppu_error_code sys_mmapper_change_address_access_right(u32 addr, u64 flags);
ppu_error_code sys_mmapper_free_address(u32 addr);
ppu_error_code sys_mmapper_free_shared_memory(u32 mem_id);
ppu_error_code sys_mmapper_map_shared_memory(u32 addr, u32 mem_id, u64 flags);
ppu_error_code sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ps3::ptr<u32> alloc_addr);
ppu_error_code sys_mmapper_unmap_shared_memory(u32 addr, vm::ps3::ptr<u32> mem_id);
ppu_error_code sys_mmapper_enable_page_fault_notification(u32 addr, u32 eq);

View File

@ -74,7 +74,7 @@ s32 sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump)
switch(object)
{
case SYS_MEM_OBJECT: *nump = idm::get_count<lv2_memory_t>(); break;
case SYS_MEM_OBJECT: *nump = idm::get_count<lv2_memory>(); break;
case SYS_MUTEX_OBJECT: *nump = idm::get_count<lv2_mutex_t>(); break;
case SYS_COND_OBJECT: *nump = idm::get_count<lv2_cond_t>(); break;
case SYS_RWLOCK_OBJECT: *nump = idm::get_count<lv2_rwlock_t>(); break;
@ -121,7 +121,7 @@ s32 sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> s
switch (object)
{
case SYS_MEM_OBJECT: idm_get_set<lv2_memory_t>(objects); break;
case SYS_MEM_OBJECT: idm_get_set<lv2_memory>(objects); break;
case SYS_MUTEX_OBJECT: idm_get_set<lv2_mutex_t>(objects); break;
case SYS_COND_OBJECT: idm_get_set<lv2_cond_t>(objects); break;
case SYS_RWLOCK_OBJECT: idm_get_set<lv2_rwlock_t>(objects); break;

View File

@ -15,7 +15,7 @@ ppu_error_code sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 po
return CELL_EINVAL;
}
if (cid != SYS_MEMORY_CONTAINER_ID_INVALID && !idm::check<lv2_memory_container_t>(cid))
if (cid != SYS_MEMORY_CONTAINER_ID_INVALID && !idm::check<lv2_memory_container>(cid))
{
return CELL_ESRCH;
}

View File

@ -24,6 +24,8 @@
#include "wait_engine.h"
#include <mutex>
namespace vm
{
thread_local u64 g_tls_fault_count{};
@ -482,7 +484,7 @@ namespace vm
return true;
}
u32 alloc(u32 size, memory_location_t location, u32 align)
u32 alloc(u32 size, memory_location_t location, u32 align, u32 sup)
{
const auto block = get(location);
@ -491,10 +493,10 @@ namespace vm
throw EXCEPTION("Invalid memory location (%d)", location);
}
return block->alloc(size, align);
return block->alloc(size, align, sup);
}
u32 falloc(u32 addr, u32 size, memory_location_t location)
u32 falloc(u32 addr, u32 size, memory_location_t location, u32 sup)
{
const auto block = get(location, addr);
@ -503,10 +505,10 @@ namespace vm
throw EXCEPTION("Invalid memory location (%d, addr=0x%x)", location, addr);
}
return block->falloc(addr, size);
return block->falloc(addr, size, sup);
}
bool dealloc(u32 addr, memory_location_t location)
u32 dealloc(u32 addr, memory_location_t location, u32* sup_out)
{
const auto block = get(location, addr);
@ -515,7 +517,7 @@ namespace vm
throw EXCEPTION("Invalid memory location (%d, addr=0x%x)", location, addr);
}
return block->dealloc(addr);
return block->dealloc(addr, sup_out);
}
void dealloc_verbose_nothrow(u32 addr, memory_location_t location) noexcept
@ -535,9 +537,9 @@ namespace vm
}
}
bool block_t::try_alloc(u32 addr, u32 size)
bool block_t::try_alloc(u32 addr, u32 size, u32 sup)
{
// check if memory area is already mapped
// Check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
if (g_pages[i])
@ -546,86 +548,70 @@ namespace vm
}
}
// try to reserve "physical" memory
if (!used.atomic_op([=](u32& used) -> bool
{
if (used > this->size)
{
throw EXCEPTION("Unexpected memory amount used (0x%x)", used);
}
if (used + size > this->size)
{
return false;
}
used += size;
return true;
}))
{
return false;
}
// map memory pages
// Map "real" memory pages
_page_map(addr, size, page_readable | page_writable);
// add entry
// Add entry
m_map[addr] = size;
// Add supplementary info if necessary
if (sup) m_sup[addr] = sup;
return true;
}
block_t::block_t(u32 addr, u32 size, u64 flags)
: addr(addr)
, size(size)
, flags(flags)
{
}
block_t::~block_t()
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
// deallocate all memory
// Deallocate all memory
for (auto& entry : m_map)
{
_page_unmap(entry.first, entry.second);
}
}
u32 block_t::alloc(u32 size, u32 align)
u32 block_t::alloc(u32 size, u32 align, u32 sup)
{
std::lock_guard<std::mutex> lock(m_mutex);
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
// align to minimal page size
// Align to minimal page size
size = ::align(size, 4096);
// check alignment (it's page allocation, so passing small values there is just silly)
// Check alignment (it's page allocation, so passing small values there is just silly)
if (align < 4096 || align != (0x80000000u >> cntlz32(align)))
{
throw EXCEPTION("Invalid alignment (size=0x%x, align=0x%x)", size, align);
}
// return if size is invalid
// Return if size is invalid
if (!size || size > this->size)
{
return 0;
}
// search for an appropriate place (unoptimized)
// Search for an appropriate place (unoptimized)
for (u32 addr = ::align(this->addr, align); addr < this->addr + this->size - 1; addr += align)
{
if (try_alloc(addr, size))
if (try_alloc(addr, size, sup))
{
return addr;
}
if (used + size > this->size)
{
return 0;
}
}
return 0;
}
u32 block_t::falloc(u32 addr, u32 size)
u32 block_t::falloc(u32 addr, u32 size, u32 sup)
{
std::lock_guard<std::mutex> lock(m_mutex);
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
// align to minimal page size
size = ::align(size, 4096);
@ -636,7 +622,7 @@ namespace vm
return 0;
}
if (!try_alloc(addr, size))
if (!try_alloc(addr, size, sup))
{
return 0;
}
@ -644,9 +630,9 @@ namespace vm
return addr;
}
bool block_t::dealloc(u32 addr)
u32 block_t::dealloc(u32 addr, u32* sup_out)
{
std::lock_guard<std::mutex> lock(m_mutex);
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
const auto found = m_map.find(addr);
@ -654,19 +640,36 @@ namespace vm
{
const u32 size = found->second;
// remove entry
// Remove entry
m_map.erase(found);
// return "physical" memory
used -= size;
// Unmap "real" memory pages
_page_unmap(addr, size);
// unmap memory pages
std::lock_guard<reservation_mutex_t>{ g_reservation_mutex }, _page_unmap(addr, size);
// Write supplementary info if necessary
if (sup_out) *sup_out = m_sup[addr];
return true;
// Remove supplementary info
m_sup.erase(addr);
return size;
}
return false;
return 0;
}
u32 block_t::used()
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
u32 result = 0;
for (auto& entry : m_map)
{
result += entry.second;
}
return result;
}
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
@ -706,7 +709,7 @@ namespace vm
return block;
}
std::shared_ptr<block_t> unmap(u32 addr)
std::shared_ptr<block_t> unmap(u32 addr, bool must_be_empty)
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
@ -714,6 +717,11 @@ namespace vm
{
if (*it && (*it)->addr == addr)
{
if (must_be_empty && (!it->unique() || (*it)->used()))
{
return *it;
}
auto block = std::move(*it);
g_locations.erase(it);
return block;

View File

@ -1,7 +1,6 @@
#pragma once
#include <map>
#include <mutex>
class thread_ctrl;
@ -80,13 +79,13 @@ namespace vm
bool check_addr(u32 addr, u32 size = 1);
// Search and map memory in specified memory location (don't pass alignment smaller than 4096)
u32 alloc(u32 size, memory_location_t location, u32 align = 4096);
u32 alloc(u32 size, memory_location_t location, u32 align = 4096, u32 sup = 0);
// Map memory at specified address (in optionally specified memory location)
u32 falloc(u32 addr, u32 size, memory_location_t location = any);
u32 falloc(u32 addr, u32 size, memory_location_t location = any, u32 sup = 0);
// Unmap memory at specified address (in optionally specified memory location)
bool dealloc(u32 addr, memory_location_t location = any);
// Unmap memory at specified address (in optionally specified memory location), return size
u32 dealloc(u32 addr, memory_location_t location = any, u32* sup_out = nullptr);
// dealloc() with no return value and no exceptions
void dealloc_verbose_nothrow(u32 addr, memory_location_t location = any) noexcept;
@ -94,46 +93,41 @@ namespace vm
// Object that handles memory allocations inside specific constant bounds ("location")
class block_t final
{
std::map<u32, u32> m_map; // addr -> size mapping of mapped locations
std::mutex m_mutex;
std::map<u32, u32> m_map; // Mapped memory: addr -> size
std::unordered_map<u32, u32> m_sup; // Supplementary info for allocations
bool try_alloc(u32 addr, u32 size);
bool try_alloc(u32 addr, u32 size, u32 sup);
public:
block_t(u32 addr, u32 size, u64 flags = 0)
: addr(addr)
, size(size)
, flags(flags)
, used(0)
{
}
block_t(u32 addr, u32 size, u64 flags = 0);
~block_t();
public:
const u32 addr; // start address
const u32 size; // total size
const u64 flags; // currently unused
atomic_t<u32> used; // amount of memory used, may be increased manually to prevent some memory from allocating
const u32 addr; // Start address
const u32 size; // Total size
const u64 flags; // Currently unused
// Search and map memory (don't pass alignment smaller than 4096)
u32 alloc(u32 size, u32 align = 4096);
u32 alloc(u32 size, u32 align = 4096, u32 sup = 0);
// Try to map memory at fixed location
u32 falloc(u32 addr, u32 size);
u32 falloc(u32 addr, u32 size, u32 sup = 0);
// Unmap memory at specified location previously returned by alloc()
bool dealloc(u32 addr);
// Unmap memory at specified location previously returned by alloc(), return size
u32 dealloc(u32 addr, u32* sup_out = nullptr);
// Get allocated memory count
u32 used();
};
// create new memory block with specified parameters and return it
// Create new memory block with specified parameters and return it
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags = 0);
// delete existing memory block with specified start address
std::shared_ptr<block_t> unmap(u32 addr);
// Delete existing memory block with specified start address, return it
std::shared_ptr<block_t> unmap(u32 addr, bool must_be_empty = false);
// get memory block associated with optionally specified memory location or optionally specified address
// Get memory block associated with optionally specified memory location or optionally specified address
std::shared_ptr<block_t> get(memory_location_t location, u32 addr = 0);
// Get PS3/PSV virtual memory address from the provided pointer (nullptr always converted to 0)

View File

@ -57,7 +57,7 @@ KernelExplorer::KernelExplorer(wxWindow* parent)
void KernelExplorer::Update()
{
m_tree->DeleteAllItems();
const u32 total_memory_usage = vm::get(vm::user_space)->used.load();
const u32 total_memory_usage = vm::get(vm::user_space)->used();
const auto& root = m_tree->AddRoot(fmt::format("Process, ID = 0x00000001, Total Memory Usage = 0x%x (%0.2f MB)", total_memory_usage, (float)total_memory_usage / (1024 * 1024)));
@ -198,22 +198,22 @@ void KernelExplorer::Update()
}
// Memory Containers
if (const u32 count = idm::get_count<lv2_memory_container_t>())
if (const u32 count = idm::get_count<lv2_memory_container>())
{
const auto& node = m_tree->AppendItem(root, fmt::format("Memory Containers (%zu)", count));
idm::select<lv2_memory_container_t>([&](u32 id, lv2_memory_container_t&)
idm::select<lv2_memory_container>([&](u32 id, lv2_memory_container&)
{
m_tree->AppendItem(node, fmt::format("Memory Container: ID = 0x%08x", id));
});
}
// Memory Objects
if (const u32 count = idm::get_count<lv2_memory_t>())
if (const u32 count = idm::get_count<lv2_memory>())
{
const auto& node = m_tree->AppendItem(root, fmt::format("Memory Objects (%zu)", count));
idm::select<lv2_memory_t>([&](u32 id, lv2_memory_t&)
idm::select<lv2_memory>([&](u32 id, lv2_memory&)
{
m_tree->AppendItem(node, fmt::format("Memory Object: ID = 0x%08x", id));
});