vm: memory locking rewritten

Added vm::lock_sudo method (wrapper for utils::memory_lock).
Put locking outside of vm::g_mutex scope.
Prelock sudo memory for RSX, vm::stack, vm::main.
Prelock sudo memory for shared memory objects.
Don't check for TSX path.
This commit is contained in:
Nekotekina 2020-11-15 03:26:43 +03:00
parent eaf0bbc108
commit ea5f5aea5f
6 changed files with 52 additions and 35 deletions

View File

@ -434,7 +434,7 @@ error_code cellCameraOpenEx(s32 dev_num, vm::ptr<CellCameraInfoEx> info)
if (info->read_mode != CELL_CAMERA_READ_DIRECT && !info->buffer)
{
info->buffer = vm::cast(vm::alloc(vbuf_size, vm::memory_location_t::main));
info->buffer = vm::cast(vm::alloc(vbuf_size, vm::main));
info->bytesize = vbuf_size;
}
@ -480,7 +480,7 @@ error_code cellCameraClose(s32 dev_num)
return CELL_CAMERA_ERROR_NOT_OPEN;
}
vm::dealloc(g_camera->info.buffer.addr(), vm::memory_location_t::main);
vm::dealloc(g_camera->info.buffer.addr(), vm::main);
g_camera->is_open = false;
return CELL_OK;

View File

@ -58,12 +58,13 @@ error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptr<u32
if (const auto area = vm::reserve_map(align == 0x10000 ? vm::user64k : vm::user1m, 0, ::align(size, 0x10000000), 0x401))
{
if (u32 addr = area->alloc(size, align))
if (u32 addr = area->alloc(size, nullptr, align))
{
verify(HERE), !g_fxo->get<sys_memory_address_table>()->addrs[addr >> 16].exchange(dct);
if (alloc_addr)
{
vm::lock_sudo(addr, size);
*alloc_addr = addr;
return CELL_OK;
}
@ -134,6 +135,7 @@ error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid
if (alloc_addr)
{
vm::lock_sudo(addr, size);
*alloc_addr = addr;
return CELL_OK;
}

View File

@ -20,6 +20,10 @@ lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, lv2_memory_container* ct)
, ct(ct)
, shm(std::make_shared<utils::shm>(size, 1 /* shareable flag */))
{
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
template<> DECLARE(ipc_manager<lv2_memory, u64>::g_ipc) {};
@ -588,6 +592,7 @@ error_code sys_mmapper_map_shared_memory(ppu_thread& ppu, u32 addr, u32 mem_id,
return CELL_EBUSY;
}
vm::lock_sudo(addr, mem->size);
return CELL_OK;
}
@ -627,7 +632,7 @@ error_code sys_mmapper_search_and_map(ppu_thread& ppu, u32 start_addr, u32 mem_i
return mem.ret;
}
const u32 addr = area->alloc(mem->size, mem->align, &mem->shm, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M);
const u32 addr = area->alloc(mem->size, &mem->shm, mem->align, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M);
if (!addr)
{
@ -635,6 +640,7 @@ error_code sys_mmapper_search_and_map(ppu_thread& ppu, u32 start_addr, u32 mem_i
return CELL_ENOMEM;
}
vm::lock_sudo(addr, mem->size);
*alloc_addr = addr;
return CELL_OK;
}
@ -652,7 +658,7 @@ error_code sys_mmapper_unmap_shared_memory(ppu_thread& ppu, u32 addr, vm::ptr<u3
return {CELL_EINVAL, addr};
}
const auto shm = area->get(addr);
const auto shm = area->peek(addr);
if (!shm.second)
{

View File

@ -77,6 +77,7 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64
{
// Alloc all memory (shall not fail)
verify(HERE), area->alloc(vsize);
vm::lock_sudo(area->addr, vsize);
idm::make<sys_vm_t>(area->addr, vsize, ct, psize);

View File

@ -763,18 +763,6 @@ namespace vm
// Unlock
g_range_lock.release(0);
perf_meter<"PAGE_LCK"_u64> perf1;
if (!g_use_rtm)
{
perf1.reset();
}
else if (!utils::memory_lock(g_sudo_addr + addr, size))
{
vm_log.error("Failed to lock memory. Consider increasing your system limits.\n"
"addr=0x%x, size=0x%x, shm=%d, shm:[f=%d,l=%u]", addr, size, +!!shm, shm ? shm->flags() : 0, shm ? shm->info : 0);
}
}
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
@ -1009,7 +997,7 @@ namespace vm
fmt::throw_exception("Invalid memory location (%u)" HERE, +location);
}
return block->alloc(size, align);
return block->alloc(size, nullptr, align);
}
u32 falloc(u32 addr, u32 size, memory_location_t location)
@ -1053,6 +1041,19 @@ namespace vm
}
}
void lock_sudo(u32 addr, u32 size)
{
perf_meter<"PAGE_LCK"_u64> perf;
verify("lock_sudo" HERE), addr % 4096 == 0;
verify("lock_sudo" HERE), size % 4096 == 0;
if (!utils::memory_lock(g_sudo_addr + addr, size))
{
vm_log.error("Failed to lock sudo memory (addr=0x%x, size=0x%x). Consider increasing your system limits.", addr, size);
}
}
bool block_t::try_alloc(u32 addr, u8 flags, u32 size, std::shared_ptr<utils::shm>&& shm)
{
// Check if memory area is already mapped
@ -1108,12 +1109,13 @@ namespace vm
, size(size)
, flags(flags)
{
if (flags & 0x100)
if (flags & 0x100 || flags & 0x20)
{
// Special path for 4k-aligned pages
// Special path for whole-allocated areas allowing 4k granularity
m_common = std::make_shared<utils::shm>(size);
verify(HERE), m_common->map_critical(vm::base(addr), utils::protection::no) == vm::base(addr);
verify(HERE), m_common->map_critical(vm::get_super_ptr(addr)) == vm::get_super_ptr(addr);
m_common->map_critical(vm::base(addr), utils::protection::no);
m_common->map_critical(vm::get_super_ptr(addr));
lock_sudo(addr, size);
}
}
@ -1131,7 +1133,6 @@ namespace vm
it = next;
}
// Special path for 4k-aligned pages
if (m_common)
{
m_common->unmap_critical(vm::base(addr));
@ -1140,7 +1141,7 @@ namespace vm
}
}
u32 block_t::alloc(const u32 orig_size, u32 align, const std::shared_ptr<utils::shm>* src, u64 flags)
u32 block_t::alloc(const u32 orig_size, const std::shared_ptr<utils::shm>* src, u32 align, u64 flags)
{
if (!src)
{
@ -1148,8 +1149,6 @@ namespace vm
flags = this->flags;
}
vm::writer_lock lock(0);
// Determine minimal alignment
const u32 min_page_size = flags & 0x100 ? 0x1000 : 0x10000;
@ -1187,7 +1186,11 @@ namespace vm
else if (src)
shm = *src;
else
{
shm = std::make_shared<utils::shm>(size);
}
vm::writer_lock lock(0);
// Search for an appropriate place (unoptimized)
for (u32 addr = ::align(this->addr, align); u64{addr} + size <= u64{this->addr} + this->size; addr += align)
@ -1209,8 +1212,6 @@ namespace vm
flags = this->flags;
}
vm::writer_lock lock(0);
// Determine minimal alignment
const u32 min_page_size = flags & 0x100 ? 0x1000 : 0x10000;
@ -1242,7 +1243,11 @@ namespace vm
else if (src)
shm = *src;
else
{
shm = std::make_shared<utils::shm>(size);
}
vm::writer_lock lock(0);
if (!try_alloc(addr, pflags, size, std::move(shm)))
{
@ -1289,7 +1294,7 @@ namespace vm
}
}
std::pair<u32, std::shared_ptr<utils::shm>> block_t::get(u32 addr, u32 size)
std::pair<u32, std::shared_ptr<utils::shm>> block_t::peek(u32 addr, u32 size)
{
if (addr < this->addr || addr + u64{size} > this->addr + u64{this->size})
{
@ -1602,12 +1607,12 @@ namespace vm
g_locations =
{
std::make_shared<block_t>(0x00010000, 0x1FFF0000, 0x200), // main
std::make_shared<block_t>(0x00010000, 0x1FFF0000, 0x220), // main
std::make_shared<block_t>(0x20000000, 0x10000000, 0x201), // user 64k pages
nullptr, // user 1m pages
nullptr, // rsx context
std::make_shared<block_t>(0xC0000000, 0x10000000), // video
std::make_shared<block_t>(0xD0000000, 0x10000000, 0x111), // stack
std::make_shared<block_t>(0xC0000000, 0x10000000, 0x220), // video
std::make_shared<block_t>(0xD0000000, 0x10000000, 0x131), // stack
std::make_shared<block_t>(0xE0000000, 0x20000000), // SPU reserved
};

View File

@ -76,7 +76,7 @@ namespace vm
return check_addr(addr, flags, Size);
}
return !(~g_pages[addr / 4096].flags & (flags | page_allocated));
return !(~g_pages[addr / 4096].flags & (flags | page_allocated));
}
// Search and map memory in specified memory location (min alignment is 0x10000)
@ -91,6 +91,9 @@ namespace vm
// dealloc() with no return value and no exceptions
void dealloc_verbose_nothrow(u32 addr, memory_location_t location = any) noexcept;
// utils::memory_lock wrapper for locking sudo memory
void lock_sudo(u32 addr, u32 size);
// Object that handles memory allocations inside specific constant bounds ("location")
class block_t final
{
@ -113,7 +116,7 @@ namespace vm
const u64 flags; // Currently unused
// Search and map memory (min alignment is 0x10000)
u32 alloc(u32 size, u32 align = 0x10000, const std::shared_ptr<utils::shm>* = nullptr, u64 flags = 0);
u32 alloc(u32 size, const std::shared_ptr<utils::shm>* = nullptr, u32 align = 0x10000, u64 flags = 0);
// Try to map memory at fixed location
u32 falloc(u32 addr, u32 size, const std::shared_ptr<utils::shm>* = nullptr, u64 flags = 0);
@ -122,7 +125,7 @@ namespace vm
u32 dealloc(u32 addr, const std::shared_ptr<utils::shm>* = nullptr);
// Get memory at specified address (if size = 0, addr assumed exact)
std::pair<u32, std::shared_ptr<utils::shm>> get(u32 addr, u32 size = 0);
std::pair<u32, std::shared_ptr<utils::shm>> peek(u32 addr, u32 size = 0);
// Get allocated memory count
u32 used();