Always check page_allocated in vm::check_addr

This commit is contained in:
Nekotekina 2019-08-11 23:31:49 +03:00
parent d45c743877
commit f8f3067deb
7 changed files with 13 additions and 10 deletions

View File

@ -567,7 +567,7 @@ bool GDBDebugServer::cmd_write_memory(gdb_cmd & cmd)
u32 len = hex_to_u32(cmd.data.substr(s + 1, s2 - s - 1)); u32 len = hex_to_u32(cmd.data.substr(s + 1, s2 - s - 1));
const char* data_ptr = (cmd.data.c_str()) + s2 + 1; const char* data_ptr = (cmd.data.c_str()) + s2 + 1;
for (u32 i = 0; i < len; ++i) { for (u32 i = 0; i < len; ++i) {
if (vm::check_addr(addr + i, 1, vm::page_allocated | vm::page_writable)) { if (vm::check_addr(addr + i, 1, vm::page_writable)) {
u8 val; u8 val;
int res = sscanf_s(data_ptr, "%02hhX", &val); int res = sscanf_s(data_ptr, "%02hhX", &val);
if (!res) { if (!res) {

View File

@ -1273,7 +1273,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
return true; return true;
} }
if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), vm::page_allocated | (is_writing ? vm::page_writable : vm::page_readable))) if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), is_writing ? vm::page_writable : vm::page_readable))
{ {
if (cpu && cpu->test_stopped()) if (cpu && cpu->test_stopped())
{ {
@ -1328,12 +1328,12 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
u64 data3; u64 data3;
{ {
vm::reader_lock rlock; vm::reader_lock rlock;
if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), vm::page_allocated | (is_writing ? vm::page_writable : vm::page_readable))) if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), is_writing ? vm::page_writable : vm::page_readable))
{ {
// Memory was allocated inbetween, retry // Memory was allocated inbetween, retry
return true; return true;
} }
else if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), vm::page_allocated | vm::page_readable)) else if (vm::check_addr(addr, std::max<std::size_t>(1, d_size)))
{ {
data3 = SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY; // TODO data3 = SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY; // TODO
} }
@ -2086,7 +2086,7 @@ u64 thread_ctrl::get_affinity_mask(thread_class group)
else else
{ {
// zen(+) // zen(+)
// Ryzen 7, Threadripper // Ryzen 7, Threadripper
// Assign threads 3-16 // Assign threads 3-16
ppu_mask = 0b1111111100000000; ppu_mask = 0b1111111100000000;
spu_mask = ppu_mask; spu_mask = ppu_mask;

View File

@ -501,12 +501,12 @@ std::string ppu_thread::dump() const
u32 stack_min = stack_ptr & ~0xfff; u32 stack_min = stack_ptr & ~0xfff;
u32 stack_max = stack_min + 4096; u32 stack_max = stack_min + 4096;
while (stack_min && vm::check_addr(stack_min - 4096, 4096, vm::page_allocated | vm::page_writable)) while (stack_min && vm::check_addr(stack_min - 4096, 4096, vm::page_writable))
{ {
stack_min -= 4096; stack_min -= 4096;
} }
while (stack_max + 4096 && vm::check_addr(stack_max, 4096, vm::page_allocated | vm::page_writable)) while (stack_max + 4096 && vm::check_addr(stack_max, 4096, vm::page_writable))
{ {
stack_max += 4096; stack_max += 4096;
} }

View File

@ -14,7 +14,7 @@ error_code sys_gpio_get(u64 device_id, vm::ptr<u64> value)
return CELL_ESRCH; return CELL_ESRCH;
} }
if (!vm::check_addr(value.addr(), sizeof(u64), vm::page_allocated | vm::page_writable)) if (!vm::check_addr(value.addr(), sizeof(u64), vm::page_writable))
{ {
return CELL_EFAULT; return CELL_EFAULT;
} }

View File

@ -187,7 +187,7 @@ error_code sys_rsx_context_iomap(u32 context_id, u32 io, u32 ea, u32 size, u64 f
for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000) for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000)
{ {
if (!vm::check_addr(addr, 1, vm::page_allocated | vm::page_readable | (addr < 0x20000000 ? 0 : vm::page_1m_size))) if (!vm::check_addr(addr, 1, vm::page_readable | (addr < 0x20000000 ? 0 : vm::page_1m_size)))
{ {
return CELL_EINVAL; return CELL_EINVAL;
} }

View File

@ -528,6 +528,9 @@ namespace vm
return false; return false;
} }
// Always check this flag
flags |= page_allocated;
for (u32 i = addr / 4096, max = (addr + size - 1) / 4096; i <= max; i++) for (u32 i = addr / 4096, max = (addr + size - 1) / 4096; i <= max; i++)
{ {
if (UNLIKELY((g_pages[i].flags & flags) != flags)) if (UNLIKELY((g_pages[i].flags & flags) != flags))

View File

@ -52,7 +52,7 @@ namespace vm
bool page_protect(u32 addr, u32 size, u8 flags_test = 0, u8 flags_set = 0, u8 flags_clear = 0); bool page_protect(u32 addr, u32 size, u8 flags_test = 0, u8 flags_set = 0, u8 flags_clear = 0);
// Check flags for specified memory range (unsafe) // Check flags for specified memory range (unsafe)
bool check_addr(u32 addr, u32 size = 1, u8 flags = page_allocated | page_readable); bool check_addr(u32 addr, u32 size = 1, u8 flags = page_readable);
// Search and map memory in specified memory location (min alignment is 0x10000) // Search and map memory in specified memory location (min alignment is 0x10000)
u32 alloc(u32 size, memory_location_t location, u32 align = 0x10000); u32 alloc(u32 size, memory_location_t location, u32 align = 0x10000);