2013-01-13 07:25:41 +00:00
|
|
|
/**
|
|
|
|
******************************************************************************
|
|
|
|
* Xenia : Xbox 360 Emulator Research Project *
|
|
|
|
******************************************************************************
|
|
|
|
* Copyright 2013 Ben Vanik. All rights reserved. *
|
|
|
|
* Released under the BSD license - see LICENSE in the root for more details. *
|
|
|
|
******************************************************************************
|
|
|
|
*/
|
|
|
|
|
2015-02-01 06:49:47 +00:00
|
|
|
#include "xenia/memory.h"
|
2013-12-07 06:57:16 +00:00
|
|
|
|
2015-03-24 14:46:18 +00:00
|
|
|
#include <gflags/gflags.h>
|
|
|
|
|
2014-08-16 23:57:00 +00:00
|
|
|
#include <algorithm>
|
2015-06-17 05:08:05 +00:00
|
|
|
#include <cstring>
|
2014-07-10 05:28:51 +00:00
|
|
|
|
2015-12-01 23:26:55 +00:00
|
|
|
#include "xenia/base/byte_stream.h"
|
2015-05-27 05:20:46 +00:00
|
|
|
#include "xenia/base/clock.h"
|
2015-05-02 10:42:51 +00:00
|
|
|
#include "xenia/base/logging.h"
|
|
|
|
#include "xenia/base/math.h"
|
2015-05-24 07:02:47 +00:00
|
|
|
#include "xenia/base/threading.h"
|
2015-02-01 06:49:47 +00:00
|
|
|
#include "xenia/cpu/mmio_handler.h"
|
2013-12-07 06:57:16 +00:00
|
|
|
|
2013-10-23 06:34:24 +00:00
|
|
|
// TODO(benvanik): move xbox.h out
|
2015-02-01 06:49:47 +00:00
|
|
|
#include "xenia/xbox.h"
|
2013-10-23 06:34:24 +00:00
|
|
|
|
2015-06-05 21:13:34 +00:00
|
|
|
DEFINE_bool(protect_zero, false,
|
|
|
|
"Protect the zero page from reads and writes.");
|
2015-12-06 07:27:25 +00:00
|
|
|
DEFINE_bool(protect_on_release, false,
|
|
|
|
"Protect released memory to prevent accesses.");
|
2015-06-05 21:13:34 +00:00
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
DEFINE_bool(scribble_heap, false,
|
|
|
|
"Scribble 0xCD into all allocated heap memory.");
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
namespace xe {
|
|
|
|
|
|
|
|
uint32_t get_page_count(uint32_t value, uint32_t page_size) {
|
|
|
|
return xe::round_up(value, page_size) / page_size;
|
|
|
|
}
|
|
|
|
|
2013-01-13 07:25:41 +00:00
|
|
|
/**
|
|
|
|
* Memory map:
|
2013-06-01 10:08:31 +00:00
|
|
|
* 0x00000000 - 0x3FFFFFFF (1024mb) - virtual 4k pages
|
|
|
|
* 0x40000000 - 0x7FFFFFFF (1024mb) - virtual 64k pages
|
|
|
|
* 0x80000000 - 0x8BFFFFFF ( 192mb) - xex 64k pages
|
|
|
|
* 0x8C000000 - 0x8FFFFFFF ( 64mb) - xex 64k pages (encrypted)
|
|
|
|
* 0x90000000 - 0x9FFFFFFF ( 256mb) - xex 4k pages
|
|
|
|
* 0xA0000000 - 0xBFFFFFFF ( 512mb) - physical 64k pages
|
|
|
|
* 0xC0000000 - 0xDFFFFFFF - physical 16mb pages
|
|
|
|
* 0xE0000000 - 0xFFFFFFFF - physical 4k pages
|
2013-01-13 07:25:41 +00:00
|
|
|
*
|
|
|
|
* We use the host OS to create an entire addressable range for this. That way
|
|
|
|
* we don't have to emulate a TLB. It'd be really cool to pass through page
|
|
|
|
* sizes or use madvice to let the OS know what to expect.
|
2013-05-30 04:00:55 +00:00
|
|
|
*
|
2013-12-07 06:57:16 +00:00
|
|
|
* We create our own heap of committed memory that lives at
|
2015-03-24 15:25:58 +00:00
|
|
|
* memory_HEAP_LOW to memory_HEAP_HIGH - all normal user allocations
|
2013-12-07 06:57:16 +00:00
|
|
|
* come from there. Since the Xbox has no paging, we know that the size of this
|
|
|
|
* heap will never need to be larger than ~512MB (realistically, smaller than
|
|
|
|
* that). We place it far away from the XEX data and keep the memory around it
|
|
|
|
* uncommitted so that we have some warning if things go astray.
|
2013-05-30 04:00:55 +00:00
|
|
|
*
|
|
|
|
* For XEX/GPU/etc data we allow placement allocations (base_address != 0) and
|
|
|
|
* commit the requested memory as needed. This bypasses the standard heap, but
|
|
|
|
* XEXs should never be overwriting anything so that's fine. We can also query
|
|
|
|
* for previous commits and assert that we really isn't committing twice.
|
2013-10-21 07:57:48 +00:00
|
|
|
*
|
|
|
|
* GPU memory is mapped onto the lower 512mb of the virtual 4k range (0).
|
|
|
|
* So 0xA0000000 = 0x00000000. A more sophisticated allocator could handle
|
|
|
|
* this.
|
2013-01-13 07:25:41 +00:00
|
|
|
*/
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
static Memory* active_memory_ = nullptr;
|
|
|
|
|
2015-09-06 20:34:39 +00:00
|
|
|
void CrashDump() {
|
|
|
|
static std::atomic<int> in_crash_dump(0);
|
|
|
|
if (in_crash_dump.fetch_add(1)) {
|
|
|
|
xe::FatalError(
|
|
|
|
"Hard crash: the memory system crashed while dumping a crash dump.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
active_memory_->DumpMap();
|
|
|
|
--in_crash_dump;
|
|
|
|
}
|
2013-05-28 02:58:20 +00:00
|
|
|
|
2015-07-16 05:10:05 +00:00
|
|
|
Memory::Memory() {
|
2015-07-16 01:20:05 +00:00
|
|
|
system_page_size_ = uint32_t(xe::memory::page_size());
|
2015-05-16 07:23:13 +00:00
|
|
|
assert_zero(active_memory_);
|
|
|
|
active_memory_ = this;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
Memory::~Memory() {
|
2015-05-16 07:23:13 +00:00
|
|
|
assert_true(active_memory_ == this);
|
|
|
|
active_memory_ = nullptr;
|
|
|
|
|
2014-07-30 05:12:39 +00:00
|
|
|
// Uninstall the MMIO handler, as we won't be able to service more
|
|
|
|
// requests.
|
|
|
|
mmio_handler_.reset();
|
2014-06-02 14:11:27 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
heaps_.v00000000.Dispose();
|
|
|
|
heaps_.v40000000.Dispose();
|
|
|
|
heaps_.v80000000.Dispose();
|
|
|
|
heaps_.v90000000.Dispose();
|
|
|
|
heaps_.vA0000000.Dispose();
|
|
|
|
heaps_.vC0000000.Dispose();
|
|
|
|
heaps_.vE0000000.Dispose();
|
|
|
|
heaps_.physical.Dispose();
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
// Unmap all views and close mapping.
|
|
|
|
if (mapping_) {
|
|
|
|
UnmapViews();
|
2015-07-16 05:10:05 +00:00
|
|
|
xe::memory::CloseFileMappingHandle(mapping_);
|
|
|
|
mapping_base_ = nullptr;
|
|
|
|
mapping_ = nullptr;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
2015-03-29 18:11:35 +00:00
|
|
|
|
|
|
|
virtual_membase_ = nullptr;
|
|
|
|
physical_membase_ = nullptr;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2015-12-03 01:37:48 +00:00
|
|
|
bool Memory::Initialize() {
|
2015-07-16 05:10:05 +00:00
|
|
|
file_name_ = std::wstring(L"Local\\xenia_memory_") +
|
|
|
|
std::to_wstring(Clock::QueryHostTickCount());
|
|
|
|
|
|
|
|
// Create main page file-backed mapping. This is all reserved but
|
|
|
|
// uncommitted (so it shouldn't expand page file).
|
|
|
|
mapping_ = xe::memory::CreateFileMappingHandle(
|
|
|
|
file_name_,
|
|
|
|
// entire 4gb space + 512mb physical:
|
|
|
|
0x11FFFFFFF, xe::memory::PageAccess::kReadWrite, false);
|
2013-12-07 06:57:16 +00:00
|
|
|
if (!mapping_) {
|
2013-10-21 06:19:57 +00:00
|
|
|
XELOGE("Unable to reserve the 4gb guest address space.");
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_not_null(mapping_);
|
2015-12-03 01:37:48 +00:00
|
|
|
return false;
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to create our views. This may fail at the first address
|
|
|
|
// we pick, so try a few times.
|
2013-12-07 06:57:16 +00:00
|
|
|
mapping_base_ = 0;
|
2013-10-21 06:19:57 +00:00
|
|
|
for (size_t n = 32; n < 64; n++) {
|
2015-08-07 03:17:01 +00:00
|
|
|
auto mapping_base = reinterpret_cast<uint8_t*>(1ull << n);
|
2013-12-07 06:57:16 +00:00
|
|
|
if (!MapViews(mapping_base)) {
|
|
|
|
mapping_base_ = mapping_base;
|
2013-10-21 06:19:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
if (!mapping_base_) {
|
2013-10-21 06:19:57 +00:00
|
|
|
XELOGE("Unable to find a continuous block in the 64bit address space.");
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_always();
|
2015-12-03 01:37:48 +00:00
|
|
|
return false;
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
2015-03-29 18:11:35 +00:00
|
|
|
virtual_membase_ = mapping_base_;
|
2015-05-16 07:23:13 +00:00
|
|
|
physical_membase_ = mapping_base_ + 0x100000000ull;
|
|
|
|
|
|
|
|
// Prepare virtual heaps.
|
|
|
|
heaps_.v00000000.Initialize(virtual_membase_, 0x00000000, 0x40000000, 4096);
|
|
|
|
heaps_.v40000000.Initialize(virtual_membase_, 0x40000000,
|
|
|
|
0x40000000 - 0x01000000, 64 * 1024);
|
|
|
|
heaps_.v80000000.Initialize(virtual_membase_, 0x80000000, 0x10000000,
|
|
|
|
64 * 1024);
|
|
|
|
heaps_.v90000000.Initialize(virtual_membase_, 0x90000000, 0x10000000, 4096);
|
|
|
|
|
|
|
|
// Prepare physical heaps.
|
|
|
|
heaps_.physical.Initialize(physical_membase_, 0x00000000, 0x20000000, 4096);
|
2015-05-18 04:26:44 +00:00
|
|
|
// HACK: should be 64k, but with us overlaying A and E it needs to be 4.
|
|
|
|
/*heaps_.vA0000000.Initialize(virtual_membase_, 0xA0000000, 0x20000000,
|
|
|
|
64 * 1024, &heaps_.physical);*/
|
2015-05-16 07:23:13 +00:00
|
|
|
heaps_.vA0000000.Initialize(virtual_membase_, 0xA0000000, 0x20000000,
|
2015-05-18 04:26:44 +00:00
|
|
|
4 * 1024, &heaps_.physical);
|
2015-05-16 07:23:13 +00:00
|
|
|
heaps_.vC0000000.Initialize(virtual_membase_, 0xC0000000, 0x20000000,
|
|
|
|
16 * 1024 * 1024, &heaps_.physical);
|
|
|
|
heaps_.vE0000000.Initialize(virtual_membase_, 0xE0000000, 0x1FD00000, 4096,
|
|
|
|
&heaps_.physical);
|
|
|
|
|
|
|
|
// Take the first page at 0 so we can check for writes.
|
|
|
|
heaps_.v00000000.AllocFixed(
|
2015-06-24 01:06:37 +00:00
|
|
|
0x00000000, 64 * 1024, 64 * 1024,
|
2015-05-16 07:23:13 +00:00
|
|
|
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
2015-06-23 05:26:51 +00:00
|
|
|
!FLAGS_protect_zero ? kMemoryProtectRead | kMemoryProtectWrite
|
|
|
|
: kMemoryProtectNoAccess);
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2013-10-23 04:05:32 +00:00
|
|
|
// GPU writeback.
|
2013-12-15 23:31:32 +00:00
|
|
|
// 0xC... is physical, 0x7F... is virtual. We may need to overlay these.
|
2015-05-16 07:23:13 +00:00
|
|
|
heaps_.vC0000000.AllocFixed(
|
|
|
|
0xC0000000, 0x01000000, 32,
|
|
|
|
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
|
|
|
kMemoryProtectRead | kMemoryProtectWrite);
|
2013-10-23 04:05:32 +00:00
|
|
|
|
2014-06-02 06:36:18 +00:00
|
|
|
// Add handlers for MMIO.
|
2015-09-08 02:23:24 +00:00
|
|
|
mmio_handler_ = cpu::MMIOHandler::Install(virtual_membase_, physical_membase_,
|
|
|
|
physical_membase_ + 0x1FFFFFFF);
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_) {
|
|
|
|
XELOGE("Unable to install MMIO handlers");
|
|
|
|
assert_always();
|
2015-12-03 01:37:48 +00:00
|
|
|
return false;
|
2014-06-02 06:36:18 +00:00
|
|
|
}
|
|
|
|
|
2015-05-17 23:40:38 +00:00
|
|
|
// ?
|
|
|
|
uint32_t unk_phys_alloc;
|
|
|
|
heaps_.vA0000000.Alloc(0x340000, 64 * 1024, kMemoryAllocationReserve,
|
|
|
|
kMemoryProtectNoAccess, true, &unk_phys_alloc);
|
2015-01-08 06:24:59 +00:00
|
|
|
|
2015-12-03 01:37:48 +00:00
|
|
|
return true;
|
2013-01-13 07:25:41 +00:00
|
|
|
}
|
|
|
|
|
2015-08-07 03:17:01 +00:00
|
|
|
static const struct {
|
2015-05-16 07:23:13 +00:00
|
|
|
uint64_t virtual_address_start;
|
|
|
|
uint64_t virtual_address_end;
|
|
|
|
uint64_t target_address;
|
2014-08-02 04:43:52 +00:00
|
|
|
} map_info[] = {
|
2015-05-16 07:23:13 +00:00
|
|
|
// (1024mb) - virtual 4k pages
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0x00000000, 0x3FFFFFFF, 0x0000000000000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// (1024mb) - virtual 64k pages (cont)
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0x40000000, 0x7EFFFFFF, 0x0000000040000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// (16mb) - GPU writeback + 15mb of XPS?
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0x7F000000, 0x7FFFFFFF, 0x0000000100000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// (256mb) - xex 64k pages
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0x80000000, 0x8FFFFFFF, 0x0000000080000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// (256mb) - xex 4k pages
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0x90000000, 0x9FFFFFFF, 0x0000000080000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// (512mb) - physical 64k pages
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0xA0000000, 0xBFFFFFFF, 0x0000000100000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// - physical 16mb pages
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0xC0000000, 0xDFFFFFFF, 0x0000000100000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// - physical 4k pages
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0xE0000000, 0xFFFFFFFF, 0x0000000100000000ull,
|
|
|
|
},
|
2015-05-16 07:23:13 +00:00
|
|
|
// - physical raw
|
2015-07-20 01:32:48 +00:00
|
|
|
{
|
|
|
|
0x100000000, 0x11FFFFFFF, 0x0000000100000000ull,
|
|
|
|
},
|
2014-08-02 04:43:52 +00:00
|
|
|
};
|
2014-08-20 04:02:15 +00:00
|
|
|
int Memory::MapViews(uint8_t* mapping_base) {
|
2015-05-02 10:42:51 +00:00
|
|
|
assert_true(xe::countof(map_info) == xe::countof(views_.all_views));
|
|
|
|
for (size_t n = 0; n < xe::countof(map_info); n++) {
|
2015-07-16 05:10:05 +00:00
|
|
|
views_.all_views[n] = reinterpret_cast<uint8_t*>(xe::memory::MapFileView(
|
|
|
|
mapping_, mapping_base + map_info[n].virtual_address_start,
|
2014-08-02 04:43:52 +00:00
|
|
|
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
|
2015-07-16 05:10:05 +00:00
|
|
|
xe::memory::PageAccess::kReadWrite, map_info[n].target_address));
|
2014-08-21 06:26:46 +00:00
|
|
|
if (!views_.all_views[n]) {
|
|
|
|
// Failed, so bail and try again.
|
|
|
|
UnmapViews();
|
|
|
|
return 1;
|
|
|
|
}
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void Memory::UnmapViews() {
|
2015-05-02 10:42:51 +00:00
|
|
|
for (size_t n = 0; n < xe::countof(views_.all_views); n++) {
|
2013-12-07 06:57:16 +00:00
|
|
|
if (views_.all_views[n]) {
|
2014-08-19 05:12:21 +00:00
|
|
|
size_t length = map_info[n].virtual_address_end -
|
|
|
|
map_info[n].virtual_address_start + 1;
|
2015-07-16 05:10:05 +00:00
|
|
|
xe::memory::UnmapFileView(mapping_, views_.all_views[n], length);
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-13 07:25:41 +00:00
|
|
|
}
|
|
|
|
|
2015-12-27 20:03:30 +00:00
|
|
|
void Memory::Reset() {
|
2015-12-29 20:45:27 +00:00
|
|
|
heaps_.v00000000.Reset();
|
|
|
|
heaps_.v40000000.Reset();
|
|
|
|
heaps_.v80000000.Reset();
|
|
|
|
heaps_.v90000000.Reset();
|
|
|
|
heaps_.physical.Reset();
|
2015-12-27 20:03:30 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
BaseHeap* Memory::LookupHeap(uint32_t address) {
|
|
|
|
if (address < 0x40000000) {
|
|
|
|
return &heaps_.v00000000;
|
2015-05-22 07:20:05 +00:00
|
|
|
} else if (address < 0x7F000000) {
|
2015-05-16 07:23:13 +00:00
|
|
|
return &heaps_.v40000000;
|
2015-05-22 07:20:05 +00:00
|
|
|
} else if (address < 0x80000000) {
|
|
|
|
return nullptr;
|
2015-05-16 07:23:13 +00:00
|
|
|
} else if (address < 0x90000000) {
|
|
|
|
return &heaps_.v80000000;
|
|
|
|
} else if (address < 0xA0000000) {
|
|
|
|
return &heaps_.v90000000;
|
|
|
|
} else if (address < 0xC0000000) {
|
|
|
|
return &heaps_.vA0000000;
|
|
|
|
} else if (address < 0xE0000000) {
|
|
|
|
return &heaps_.vC0000000;
|
2015-06-09 00:49:54 +00:00
|
|
|
} else if (address < 0xFFD00000) {
|
2015-05-16 07:23:13 +00:00
|
|
|
return &heaps_.vE0000000;
|
2015-06-09 00:49:54 +00:00
|
|
|
} else {
|
|
|
|
return nullptr;
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BaseHeap* Memory::LookupHeapByType(bool physical, uint32_t page_size) {
|
|
|
|
if (physical) {
|
|
|
|
if (page_size <= 4096) {
|
2015-05-18 04:26:44 +00:00
|
|
|
// HACK: should be vE0000000
|
2015-05-16 07:23:13 +00:00
|
|
|
return &heaps_.vA0000000;
|
|
|
|
} else if (page_size <= 64 * 1024) {
|
|
|
|
return &heaps_.vA0000000;
|
|
|
|
} else {
|
|
|
|
return &heaps_.vC0000000;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (page_size <= 4096) {
|
|
|
|
return &heaps_.v00000000;
|
|
|
|
} else {
|
|
|
|
return &heaps_.v40000000;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 02:41:29 +00:00
|
|
|
void Memory::Zero(uint32_t address, uint32_t size) {
|
2015-03-29 18:11:35 +00:00
|
|
|
std::memset(TranslateVirtual(address), 0, size);
|
2015-03-24 14:46:18 +00:00
|
|
|
}
|
|
|
|
|
2015-03-25 02:41:29 +00:00
|
|
|
void Memory::Fill(uint32_t address, uint32_t size, uint8_t value) {
|
2015-03-29 18:11:35 +00:00
|
|
|
std::memset(TranslateVirtual(address), value, size);
|
2015-03-24 14:46:18 +00:00
|
|
|
}
|
|
|
|
|
2015-03-25 02:41:29 +00:00
|
|
|
void Memory::Copy(uint32_t dest, uint32_t src, uint32_t size) {
|
2015-03-29 18:11:35 +00:00
|
|
|
uint8_t* pdest = TranslateVirtual(dest);
|
|
|
|
const uint8_t* psrc = TranslateVirtual(src);
|
2015-03-28 22:54:44 +00:00
|
|
|
std::memcpy(pdest, psrc, size);
|
2015-03-24 14:46:18 +00:00
|
|
|
}
|
|
|
|
|
2015-03-25 02:41:29 +00:00
|
|
|
uint32_t Memory::SearchAligned(uint32_t start, uint32_t end,
|
2015-03-24 14:46:18 +00:00
|
|
|
const uint32_t* values, size_t value_count) {
|
|
|
|
assert_true(start <= end);
|
2015-03-29 18:11:35 +00:00
|
|
|
auto p = TranslateVirtual<const uint32_t*>(start);
|
|
|
|
auto pe = TranslateVirtual<const uint32_t*>(end);
|
2015-03-24 14:46:18 +00:00
|
|
|
while (p != pe) {
|
|
|
|
if (*p == values[0]) {
|
|
|
|
const uint32_t* pc = p + 1;
|
|
|
|
size_t matched = 1;
|
|
|
|
for (size_t n = 1; n < value_count; n++, pc++) {
|
|
|
|
if (*pc != values[n]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
matched++;
|
|
|
|
}
|
|
|
|
if (matched == value_count) {
|
2015-03-29 18:11:35 +00:00
|
|
|
return uint32_t(reinterpret_cast<const uint8_t*>(p) - virtual_membase_);
|
2015-03-24 14:46:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool Memory::AddVirtualMappedRange(uint32_t virtual_address, uint32_t mask,
|
|
|
|
uint32_t size, void* context,
|
|
|
|
cpu::MMIOReadCallback read_callback,
|
|
|
|
cpu::MMIOWriteCallback write_callback) {
|
2015-07-16 02:05:08 +00:00
|
|
|
if (!xe::memory::AllocFixed(TranslateVirtual(virtual_address), size,
|
|
|
|
xe::memory::AllocationType::kCommit,
|
|
|
|
xe::memory::PageAccess::kNoAccess)) {
|
2014-07-30 05:12:39 +00:00
|
|
|
XELOGE("Unable to map range; commit/protect failed");
|
2014-06-02 06:36:18 +00:00
|
|
|
return false;
|
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
return mmio_handler_->RegisterRange(virtual_address, mask, size, context,
|
2014-08-19 05:12:21 +00:00
|
|
|
read_callback, write_callback);
|
2014-06-02 06:36:18 +00:00
|
|
|
}
|
|
|
|
|
2015-06-03 03:15:43 +00:00
|
|
|
cpu::MMIORange* Memory::LookupVirtualMappedRange(uint32_t virtual_address) {
|
|
|
|
return mmio_handler_->LookupRange(virtual_address);
|
|
|
|
}
|
|
|
|
|
2016-03-18 02:55:16 +00:00
|
|
|
uintptr_t Memory::AddPhysicalAccessWatch(uint32_t physical_address,
|
|
|
|
uint32_t length,
|
|
|
|
cpu::MMIOHandler::WatchType type,
|
|
|
|
cpu::AccessWatchCallback callback,
|
|
|
|
void* callback_context,
|
|
|
|
void* callback_data) {
|
|
|
|
return mmio_handler_->AddPhysicalAccessWatch(physical_address, length, type,
|
|
|
|
callback, callback_context,
|
|
|
|
callback_data);
|
2015-01-05 00:28:42 +00:00
|
|
|
}
|
|
|
|
|
2016-03-18 02:55:16 +00:00
|
|
|
void Memory::CancelAccessWatch(uintptr_t watch_handle) {
|
|
|
|
mmio_handler_->CancelAccessWatch(watch_handle);
|
2015-01-05 00:28:42 +00:00
|
|
|
}
|
|
|
|
|
2015-03-28 22:54:44 +00:00
|
|
|
uint32_t Memory::SystemHeapAlloc(uint32_t size, uint32_t alignment,
|
|
|
|
uint32_t system_heap_flags) {
|
|
|
|
// TODO(benvanik): lightweight pool.
|
|
|
|
bool is_physical = !!(system_heap_flags & kSystemHeapPhysical);
|
2015-05-16 07:23:13 +00:00
|
|
|
auto heap = LookupHeapByType(is_physical, 4096);
|
|
|
|
uint32_t address;
|
|
|
|
if (!heap->Alloc(size, alignment,
|
|
|
|
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
|
|
|
kMemoryProtectRead | kMemoryProtectWrite, false, &address)) {
|
|
|
|
return 0;
|
2015-03-28 22:54:44 +00:00
|
|
|
}
|
2015-05-19 01:48:48 +00:00
|
|
|
Zero(address, size);
|
2015-05-16 07:23:13 +00:00
|
|
|
return address;
|
2015-03-28 22:54:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::SystemHeapFree(uint32_t address) {
|
|
|
|
if (!address) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// TODO(benvanik): lightweight pool.
|
2015-06-05 02:18:00 +00:00
|
|
|
auto heap = LookupHeap(address);
|
2015-05-16 07:23:13 +00:00
|
|
|
heap->Release(address);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Memory::DumpMap() {
|
|
|
|
XELOGE("==================================================================");
|
|
|
|
XELOGE("Memory Dump");
|
|
|
|
XELOGE("==================================================================");
|
|
|
|
XELOGE(" System Page Size: %d (%.8X)", system_page_size_, system_page_size_);
|
|
|
|
XELOGE(" Virtual Membase: %.16llX", virtual_membase_);
|
|
|
|
XELOGE(" Physical Membase: %.16llX", physical_membase_);
|
|
|
|
XELOGE("");
|
|
|
|
XELOGE("------------------------------------------------------------------");
|
|
|
|
XELOGE("Virtual Heaps");
|
|
|
|
XELOGE("------------------------------------------------------------------");
|
|
|
|
XELOGE("");
|
|
|
|
heaps_.v00000000.DumpMap();
|
|
|
|
heaps_.v40000000.DumpMap();
|
|
|
|
heaps_.v80000000.DumpMap();
|
|
|
|
heaps_.v90000000.DumpMap();
|
|
|
|
XELOGE("");
|
|
|
|
XELOGE("------------------------------------------------------------------");
|
|
|
|
XELOGE("Physical Heaps");
|
|
|
|
XELOGE("------------------------------------------------------------------");
|
|
|
|
XELOGE("");
|
|
|
|
heaps_.physical.DumpMap();
|
|
|
|
heaps_.vA0000000.DumpMap();
|
|
|
|
heaps_.vC0000000.DumpMap();
|
|
|
|
heaps_.vE0000000.DumpMap();
|
|
|
|
XELOGE("");
|
|
|
|
}
|
|
|
|
|
2015-12-01 23:26:55 +00:00
|
|
|
bool Memory::Save(ByteStream* stream) {
|
|
|
|
XELOGD("Serializing memory...");
|
|
|
|
heaps_.v00000000.Save(stream);
|
|
|
|
heaps_.v40000000.Save(stream);
|
|
|
|
heaps_.v80000000.Save(stream);
|
|
|
|
heaps_.v90000000.Save(stream);
|
|
|
|
heaps_.physical.Save(stream);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Memory::Restore(ByteStream* stream) {
|
2016-03-18 02:55:16 +00:00
|
|
|
XELOGD("Restoring memory...");
|
2015-12-01 23:26:55 +00:00
|
|
|
heaps_.v00000000.Restore(stream);
|
|
|
|
heaps_.v40000000.Restore(stream);
|
|
|
|
heaps_.v80000000.Restore(stream);
|
|
|
|
heaps_.v90000000.Restore(stream);
|
|
|
|
heaps_.physical.Restore(stream);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-07-16 01:20:05 +00:00
|
|
|
xe::memory::PageAccess ToPageAccess(uint32_t protect) {
|
|
|
|
if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
|
|
|
|
return xe::memory::PageAccess::kReadOnly;
|
|
|
|
} else if ((protect & kMemoryProtectRead) &&
|
|
|
|
(protect & kMemoryProtectWrite)) {
|
|
|
|
return xe::memory::PageAccess::kReadWrite;
|
|
|
|
} else {
|
|
|
|
return xe::memory::PageAccess::kNoAccess;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
BaseHeap::BaseHeap()
|
|
|
|
: membase_(nullptr), heap_base_(0), heap_size_(0), page_size_(0) {}
|
|
|
|
|
|
|
|
BaseHeap::~BaseHeap() = default;
|
|
|
|
|
|
|
|
void BaseHeap::Initialize(uint8_t* membase, uint32_t heap_base,
|
|
|
|
uint32_t heap_size, uint32_t page_size) {
|
|
|
|
membase_ = membase;
|
|
|
|
heap_base_ = heap_base;
|
|
|
|
heap_size_ = heap_size - 1;
|
|
|
|
page_size_ = page_size;
|
|
|
|
page_table_.resize(heap_size / page_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BaseHeap::Dispose() {
|
|
|
|
// Walk table and release all regions.
|
|
|
|
for (uint32_t page_number = 0; page_number < page_table_.size();
|
|
|
|
++page_number) {
|
|
|
|
auto& page_entry = page_table_[page_number];
|
|
|
|
if (page_entry.state) {
|
2015-07-16 02:05:08 +00:00
|
|
|
xe::memory::DeallocFixed(membase_ + heap_base_ + page_number * page_size_,
|
|
|
|
0, xe::memory::DeallocationType::kRelease);
|
2015-05-16 07:23:13 +00:00
|
|
|
page_number += page_entry.region_page_count;
|
2013-10-20 20:42:34 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BaseHeap::DumpMap() {
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
XELOGE("------------------------------------------------------------------");
|
|
|
|
XELOGE("Heap: %.8X-%.8X", heap_base_, heap_base_ + heap_size_);
|
|
|
|
XELOGE("------------------------------------------------------------------");
|
|
|
|
XELOGE(" Heap Base: %.8X", heap_base_);
|
|
|
|
XELOGE(" Heap Size: %d (%.8X)", heap_size_, heap_size_);
|
|
|
|
XELOGE(" Page Size: %d (%.8X)", page_size_, page_size_);
|
|
|
|
XELOGE(" Page Count: %lld", page_table_.size());
|
|
|
|
bool is_empty_span = false;
|
|
|
|
uint32_t empty_span_start = 0;
|
|
|
|
for (uint32_t i = 0; i < uint32_t(page_table_.size()); ++i) {
|
|
|
|
auto& page = page_table_[i];
|
|
|
|
if (!page.state) {
|
|
|
|
if (!is_empty_span) {
|
|
|
|
is_empty_span = true;
|
|
|
|
empty_span_start = i;
|
2013-10-23 04:50:10 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
continue;
|
2013-10-23 04:50:10 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
if (is_empty_span) {
|
|
|
|
XELOGE(" %.8X-%.8X %6dp %10db unreserved",
|
|
|
|
heap_base_ + empty_span_start * page_size_,
|
|
|
|
heap_base_ + i * page_size_, i - empty_span_start,
|
|
|
|
(i - empty_span_start) * page_size_);
|
|
|
|
is_empty_span = false;
|
2013-05-30 04:00:55 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
const char* state_name = " ";
|
|
|
|
if (page.state & kMemoryAllocationCommit) {
|
|
|
|
state_name = "COM";
|
|
|
|
} else if (page.state & kMemoryAllocationReserve) {
|
|
|
|
state_name = "RES";
|
2013-05-30 04:00:55 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
char access_r = (page.current_protect & kMemoryProtectRead) ? 'R' : ' ';
|
|
|
|
char access_w = (page.current_protect & kMemoryProtectWrite) ? 'W' : ' ';
|
|
|
|
XELOGE(" %.8X-%.8X %6dp %10db %s %c%c", heap_base_ + i * page_size_,
|
|
|
|
heap_base_ + (i + page.region_page_count) * page_size_,
|
|
|
|
page.region_page_count, page.region_page_count * page_size_,
|
|
|
|
state_name, access_r, access_w);
|
2015-05-17 17:17:32 +00:00
|
|
|
i += page.region_page_count - 1;
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
|
|
|
if (is_empty_span) {
|
|
|
|
XELOGE(" %.8X-%.8X - %d unreserved pages)",
|
|
|
|
heap_base_ + empty_span_start * page_size_, heap_base_ + heap_size_,
|
|
|
|
page_table_.size() - empty_span_start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 15:10:08 +00:00
|
|
|
uint32_t BaseHeap::GetTotalPageCount() { return uint32_t(page_table_.size()); }
|
|
|
|
|
2016-06-21 14:37:12 +00:00
|
|
|
uint32_t BaseHeap::GetUnreservedPageCount() {
|
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
|
|
|
uint32_t count = 0;
|
|
|
|
bool is_empty_span = false;
|
|
|
|
uint32_t empty_span_start = 0;
|
|
|
|
uint32_t size = uint32_t(page_table_.size());
|
|
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
|
|
auto& page = page_table_[i];
|
|
|
|
if (!page.state) {
|
|
|
|
if (!is_empty_span) {
|
|
|
|
is_empty_span = true;
|
|
|
|
empty_span_start = i;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (is_empty_span) {
|
2016-06-21 15:09:45 +00:00
|
|
|
is_empty_span = false;
|
2016-06-21 14:37:12 +00:00
|
|
|
count += i - empty_span_start;
|
|
|
|
}
|
|
|
|
i += page.region_page_count - 1;
|
|
|
|
}
|
|
|
|
if (is_empty_span) {
|
|
|
|
count += size - empty_span_start;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2015-12-01 23:26:55 +00:00
|
|
|
bool BaseHeap::Save(ByteStream* stream) {
|
|
|
|
XELOGD("Heap %.8X-%.8X", heap_base_, heap_base_ + heap_size_);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < page_table_.size(); i++) {
|
|
|
|
auto& page = page_table_[i];
|
|
|
|
stream->Write(page.qword);
|
|
|
|
if (!page.state) {
|
|
|
|
// Unallocated.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-12-30 04:26:01 +00:00
|
|
|
// TODO(DrChat): write compressed with snappy.
|
2015-12-07 00:02:59 +00:00
|
|
|
if (page.state & kMemoryAllocationCommit) {
|
|
|
|
void* addr = membase_ + heap_base_ + i * page_size_;
|
|
|
|
|
|
|
|
memory::PageAccess old_access;
|
|
|
|
memory::Protect(addr, page_size_, memory::PageAccess::kReadWrite,
|
|
|
|
&old_access);
|
|
|
|
|
|
|
|
stream->Write(addr, page_size_);
|
|
|
|
|
|
|
|
memory::Protect(addr, page_size_, old_access, nullptr);
|
2015-12-01 23:26:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BaseHeap::Restore(ByteStream* stream) {
|
2016-03-18 02:55:16 +00:00
|
|
|
XELOGD("Heap %.8X-%.8X", heap_base_, heap_base_ + heap_size_);
|
|
|
|
|
2015-12-01 23:26:55 +00:00
|
|
|
for (size_t i = 0; i < page_table_.size(); i++) {
|
|
|
|
auto& page = page_table_[i];
|
|
|
|
page.qword = stream->Read<uint64_t>();
|
|
|
|
if (!page.state) {
|
|
|
|
// Unallocated.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
memory::PageAccess page_access = memory::PageAccess::kNoAccess;
|
2015-12-07 16:53:45 +00:00
|
|
|
if ((page.current_protect & kMemoryProtectRead) &&
|
|
|
|
(page.current_protect & kMemoryProtectWrite)) {
|
2015-12-01 23:26:55 +00:00
|
|
|
page_access = memory::PageAccess::kReadWrite;
|
|
|
|
} else if (page.current_protect & kMemoryProtectRead) {
|
|
|
|
page_access = memory::PageAccess::kReadOnly;
|
|
|
|
}
|
|
|
|
|
2015-12-07 00:02:59 +00:00
|
|
|
// Commit the memory if it isn't already. We do not need to reserve any
|
|
|
|
// memory, as the mapping has already taken care of that.
|
|
|
|
if (page.state & kMemoryAllocationCommit) {
|
2015-12-01 23:26:55 +00:00
|
|
|
xe::memory::AllocFixed(membase_ + heap_base_ + i * page_size_, page_size_,
|
2015-12-07 00:02:59 +00:00
|
|
|
memory::AllocationType::kCommit,
|
|
|
|
memory::PageAccess::kReadWrite);
|
2015-12-01 23:26:55 +00:00
|
|
|
}
|
|
|
|
|
2015-12-07 00:02:59 +00:00
|
|
|
// Now read into memory. We'll set R/W protection first, then set the
|
|
|
|
// protection back to its previous state.
|
2015-12-30 04:26:01 +00:00
|
|
|
// TODO(DrChat): read compressed with snappy.
|
2015-12-07 00:02:59 +00:00
|
|
|
if (page.state & kMemoryAllocationCommit) {
|
|
|
|
void* addr = membase_ + heap_base_ + i * page_size_;
|
|
|
|
xe::memory::Protect(addr, page_size_, memory::PageAccess::kReadWrite,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
stream->Read(addr, page_size_);
|
2015-12-01 23:26:55 +00:00
|
|
|
|
2015-12-07 00:02:59 +00:00
|
|
|
xe::memory::Protect(addr, page_size_, page_access, nullptr);
|
2015-12-01 23:26:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BaseHeap::Reset() {
|
2015-12-30 04:26:01 +00:00
|
|
|
// TODO(DrChat): protect pages.
|
|
|
|
std::memset(page_table_.data(), 0, sizeof(PageEntry) * page_table_.size());
|
2015-12-01 23:26:55 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::Alloc(uint32_t size, uint32_t alignment,
|
|
|
|
uint32_t allocation_type, uint32_t protect, bool top_down,
|
|
|
|
uint32_t* out_address) {
|
|
|
|
*out_address = 0;
|
|
|
|
size = xe::round_up(size, page_size_);
|
|
|
|
alignment = xe::round_up(alignment, page_size_);
|
|
|
|
uint32_t low_address = heap_base_;
|
|
|
|
uint32_t high_address = heap_base_ + heap_size_;
|
|
|
|
return AllocRange(low_address, high_address, size, alignment, allocation_type,
|
|
|
|
protect, top_down, out_address);
|
|
|
|
}
|
2013-05-30 04:00:55 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::AllocFixed(uint32_t base_address, uint32_t size,
|
|
|
|
uint32_t alignment, uint32_t allocation_type,
|
|
|
|
uint32_t protect) {
|
|
|
|
alignment = xe::round_up(alignment, page_size_);
|
|
|
|
size = xe::align(size, alignment);
|
|
|
|
assert_true(base_address % alignment == 0);
|
|
|
|
uint32_t page_count = get_page_count(size, page_size_);
|
|
|
|
uint32_t start_page_number = (base_address - heap_base_) / page_size_;
|
|
|
|
uint32_t end_page_number = start_page_number + page_count - 1;
|
|
|
|
if (start_page_number >= page_table_.size() ||
|
|
|
|
end_page_number > page_table_.size()) {
|
2015-05-30 04:47:19 +00:00
|
|
|
XELOGE("BaseHeap::AllocFixed passed out of range address range");
|
2015-05-16 07:23:13 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-05-30 04:00:55 +00:00
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// - If we are reserving the entire range requested must not be already
|
|
|
|
// reserved.
|
|
|
|
// - If we are committing it's ok for pages within the range to already be
|
|
|
|
// committed.
|
|
|
|
for (uint32_t page_number = start_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
uint32_t state = page_table_[page_number].state;
|
|
|
|
if ((allocation_type == kMemoryAllocationReserve) && state) {
|
|
|
|
// Already reserved.
|
2015-05-30 04:47:19 +00:00
|
|
|
XELOGE(
|
|
|
|
"BaseHeap::AllocFixed attempting to reserve an already reserved "
|
|
|
|
"range");
|
2015-05-16 07:23:13 +00:00
|
|
|
return false;
|
2013-05-30 04:00:55 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
if ((allocation_type == kMemoryAllocationCommit) &&
|
|
|
|
!(state & kMemoryAllocationReserve)) {
|
|
|
|
// Attempting a commit-only op on an unreserved page.
|
2015-05-30 04:47:19 +00:00
|
|
|
// This may be OK.
|
|
|
|
XELOGW("BaseHeap::AllocFixed attempting commit on unreserved page");
|
|
|
|
allocation_type |= kMemoryAllocationReserve;
|
|
|
|
break;
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Allocate from host.
|
|
|
|
if (allocation_type == kMemoryAllocationReserve) {
|
|
|
|
// Reserve is not needed, as we are mapped already.
|
|
|
|
} else {
|
2015-07-16 02:05:08 +00:00
|
|
|
auto alloc_type = (allocation_type & kMemoryAllocationCommit)
|
|
|
|
? xe::memory::AllocationType::kCommit
|
|
|
|
: xe::memory::AllocationType::kReserve;
|
|
|
|
void* result = xe::memory::AllocFixed(
|
|
|
|
membase_ + heap_base_ + start_page_number * page_size_,
|
|
|
|
page_count * page_size_, alloc_type, ToPageAccess(protect));
|
2015-05-16 07:23:13 +00:00
|
|
|
if (!result) {
|
2015-05-30 04:47:19 +00:00
|
|
|
XELOGE("BaseHeap::AllocFixed failed to alloc range from host");
|
2015-05-16 07:23:13 +00:00
|
|
|
return false;
|
2013-10-23 04:50:10 +00:00
|
|
|
}
|
2015-05-16 23:41:18 +00:00
|
|
|
|
|
|
|
if (FLAGS_scribble_heap && protect & kMemoryProtectWrite) {
|
2015-05-28 10:28:59 +00:00
|
|
|
std::memset(result, 0xCD, page_count * page_size_);
|
2015-05-16 23:41:18 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
2013-10-23 04:50:10 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Set page state.
|
|
|
|
for (uint32_t page_number = start_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
auto& page_entry = page_table_[page_number];
|
|
|
|
if (allocation_type & kMemoryAllocationReserve) {
|
|
|
|
// Region is based on reservation.
|
|
|
|
page_entry.base_address = start_page_number;
|
|
|
|
page_entry.region_page_count = page_count;
|
|
|
|
}
|
|
|
|
page_entry.allocation_protect = protect;
|
|
|
|
page_entry.current_protect = protect;
|
|
|
|
page_entry.state = kMemoryAllocationReserve | allocation_type;
|
2013-05-30 04:00:55 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
return true;
|
2013-01-29 05:36:03 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|
|
|
uint32_t size, uint32_t alignment,
|
|
|
|
uint32_t allocation_type, uint32_t protect,
|
|
|
|
bool top_down, uint32_t* out_address) {
|
|
|
|
*out_address = 0;
|
|
|
|
|
|
|
|
alignment = xe::round_up(alignment, page_size_);
|
|
|
|
uint32_t page_count = get_page_count(size, page_size_);
|
|
|
|
low_address = std::max(heap_base_, xe::align(low_address, alignment));
|
|
|
|
high_address =
|
|
|
|
std::min(heap_base_ + heap_size_, xe::align(high_address, alignment));
|
|
|
|
uint32_t low_page_number = (low_address - heap_base_) / page_size_;
|
|
|
|
uint32_t high_page_number = (high_address - heap_base_) / page_size_;
|
|
|
|
low_page_number = std::min(uint32_t(page_table_.size()) - 1, low_page_number);
|
|
|
|
high_page_number =
|
2015-05-26 04:10:28 +00:00
|
|
|
std::min(uint32_t(page_table_.size()) - 1, high_page_number);
|
2015-05-16 07:23:13 +00:00
|
|
|
|
2015-07-30 06:27:55 +00:00
|
|
|
if (page_count > (high_page_number - low_page_number)) {
|
|
|
|
XELOGE("BaseHeap::Alloc page count too big for requested range");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Find a free page range.
|
|
|
|
// The base page must match the requested alignment, so we first scan for
|
|
|
|
// a free aligned page and only then check for continuous free pages.
|
|
|
|
// TODO(benvanik): optimized searching (free list buckets, bitmap, etc).
|
|
|
|
uint32_t start_page_number = UINT_MAX;
|
|
|
|
uint32_t end_page_number = UINT_MAX;
|
|
|
|
uint32_t page_scan_stride = alignment / page_size_;
|
|
|
|
high_page_number = high_page_number - (high_page_number % page_scan_stride);
|
|
|
|
if (top_down) {
|
2016-06-20 17:02:48 +00:00
|
|
|
for (int64_t base_page_number =
|
|
|
|
high_page_number - xe::round_up(page_count, page_scan_stride);
|
2015-05-16 07:23:13 +00:00
|
|
|
base_page_number >= low_page_number;
|
|
|
|
base_page_number -= page_scan_stride) {
|
|
|
|
if (page_table_[base_page_number].state != 0) {
|
|
|
|
// Base page not free, skip to next usable page.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Check requested range to ensure free.
|
|
|
|
start_page_number = uint32_t(base_page_number);
|
|
|
|
end_page_number = uint32_t(base_page_number) + page_count - 1;
|
|
|
|
assert_true(end_page_number < page_table_.size());
|
|
|
|
bool any_taken = false;
|
|
|
|
for (uint32_t page_number = uint32_t(base_page_number);
|
|
|
|
!any_taken && page_number <= end_page_number; ++page_number) {
|
|
|
|
bool is_free = page_table_[page_number].state == 0;
|
|
|
|
if (!is_free) {
|
|
|
|
// At least one page in the range is used, skip to next.
|
2015-06-04 04:24:09 +00:00
|
|
|
// We know we'll be starting at least before this page.
|
2015-05-16 07:23:13 +00:00
|
|
|
any_taken = true;
|
2015-09-05 13:53:05 +00:00
|
|
|
if (page_count > page_number) {
|
|
|
|
// Not enough space left to fit entire page range. Breaks outer
|
|
|
|
// loop.
|
|
|
|
base_page_number = -1;
|
|
|
|
} else {
|
|
|
|
base_page_number = page_number - page_count;
|
|
|
|
base_page_number -= base_page_number % page_scan_stride;
|
|
|
|
base_page_number += page_scan_stride; // cancel out loop logic
|
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!any_taken) {
|
|
|
|
// Found our place.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Retry.
|
|
|
|
start_page_number = end_page_number = UINT_MAX;
|
|
|
|
}
|
2013-05-30 04:00:55 +00:00
|
|
|
} else {
|
2015-05-16 07:23:13 +00:00
|
|
|
for (uint32_t base_page_number = low_page_number;
|
2015-05-26 04:10:28 +00:00
|
|
|
base_page_number <= high_page_number - page_count;
|
2015-05-16 07:23:13 +00:00
|
|
|
base_page_number += page_scan_stride) {
|
|
|
|
if (page_table_[base_page_number].state != 0) {
|
|
|
|
// Base page not free, skip to next usable page.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Check requested range to ensure free.
|
|
|
|
start_page_number = base_page_number;
|
|
|
|
end_page_number = base_page_number + page_count - 1;
|
|
|
|
bool any_taken = false;
|
|
|
|
for (uint32_t page_number = base_page_number;
|
|
|
|
!any_taken && page_number <= end_page_number; ++page_number) {
|
|
|
|
bool is_free = page_table_[page_number].state == 0;
|
|
|
|
if (!is_free) {
|
|
|
|
// At least one page in the range is used, skip to next.
|
2015-06-04 04:24:09 +00:00
|
|
|
// We know we'll be starting at least after this page.
|
2015-05-16 07:23:13 +00:00
|
|
|
any_taken = true;
|
2015-06-04 04:24:09 +00:00
|
|
|
base_page_number = xe::round_up(page_number + 1, page_scan_stride);
|
|
|
|
base_page_number -= page_scan_stride; // cancel out loop logic
|
2015-05-16 07:23:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!any_taken) {
|
|
|
|
// Found our place.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Retry.
|
|
|
|
start_page_number = end_page_number = UINT_MAX;
|
|
|
|
}
|
2013-05-30 04:00:55 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
if (start_page_number == UINT_MAX || end_page_number == UINT_MAX) {
|
|
|
|
// Out of memory.
|
|
|
|
XELOGE("BaseHeap::Alloc failed to find contiguous range");
|
|
|
|
assert_always("Heap exhausted!");
|
2014-08-19 05:12:21 +00:00
|
|
|
return false;
|
|
|
|
}
|
2014-08-15 06:14:57 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Allocate from host.
|
|
|
|
if (allocation_type == kMemoryAllocationReserve) {
|
|
|
|
// Reserve is not needed, as we are mapped already.
|
2014-01-05 19:19:02 +00:00
|
|
|
} else {
|
2015-07-16 02:05:08 +00:00
|
|
|
auto alloc_type = (allocation_type & kMemoryAllocationCommit)
|
|
|
|
? xe::memory::AllocationType::kCommit
|
|
|
|
: xe::memory::AllocationType::kReserve;
|
|
|
|
void* result = xe::memory::AllocFixed(
|
|
|
|
membase_ + heap_base_ + start_page_number * page_size_,
|
|
|
|
page_count * page_size_, alloc_type, ToPageAccess(protect));
|
2015-05-16 07:23:13 +00:00
|
|
|
if (!result) {
|
|
|
|
XELOGE("BaseHeap::Alloc failed to alloc range from host");
|
|
|
|
return false;
|
2014-01-05 19:19:02 +00:00
|
|
|
}
|
2015-05-16 23:41:18 +00:00
|
|
|
|
2015-07-16 02:05:08 +00:00
|
|
|
if (FLAGS_scribble_heap && (protect & kMemoryProtectWrite)) {
|
2015-05-19 04:24:14 +00:00
|
|
|
std::memset(result, 0xCD, page_count * page_size_);
|
2015-05-16 23:41:18 +00:00
|
|
|
}
|
2014-01-05 19:19:02 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Set page state.
|
|
|
|
for (uint32_t page_number = start_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
auto& page_entry = page_table_[page_number];
|
|
|
|
page_entry.base_address = start_page_number;
|
|
|
|
page_entry.region_page_count = page_count;
|
|
|
|
page_entry.allocation_protect = protect;
|
|
|
|
page_entry.current_protect = protect;
|
|
|
|
page_entry.state = kMemoryAllocationReserve | allocation_type;
|
|
|
|
}
|
|
|
|
|
2015-07-16 02:05:08 +00:00
|
|
|
*out_address = heap_base_ + (start_page_number * page_size_);
|
2015-05-16 07:23:13 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BaseHeap::Decommit(uint32_t address, uint32_t size) {
|
|
|
|
uint32_t page_count = get_page_count(size, page_size_);
|
|
|
|
uint32_t start_page_number = (address - heap_base_) / page_size_;
|
|
|
|
uint32_t end_page_number = start_page_number + page_count - 1;
|
|
|
|
start_page_number =
|
|
|
|
std::min(uint32_t(page_table_.size()) - 1, start_page_number);
|
|
|
|
end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number);
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Release from host.
|
|
|
|
// TODO(benvanik): find a way to actually decommit memory;
|
|
|
|
// mapped memory cannot be decommitted.
|
|
|
|
/*BOOL result =
|
|
|
|
VirtualFree(membase_ + heap_base_ + start_page_number * page_size_,
|
|
|
|
page_count * page_size_, MEM_DECOMMIT);
|
|
|
|
if (!result) {
|
|
|
|
PLOGW("BaseHeap::Decommit failed due to host VirtualFree failure");
|
|
|
|
return false;
|
|
|
|
}*/
|
|
|
|
|
|
|
|
// Perform table change.
|
|
|
|
for (uint32_t page_number = start_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
auto& page_entry = page_table_[page_number];
|
|
|
|
page_entry.state &= ~kMemoryAllocationCommit;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2014-01-05 19:19:02 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Given address must be a region base address.
|
|
|
|
uint32_t base_page_number = (base_address - heap_base_) / page_size_;
|
|
|
|
auto base_page_entry = page_table_[base_page_number];
|
|
|
|
if (base_page_entry.base_address != base_page_number) {
|
|
|
|
XELOGE("BaseHeap::Release failed because address is not a region start");
|
2016-03-18 02:55:16 +00:00
|
|
|
return false;
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
2013-10-20 20:42:34 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
if (out_region_size) {
|
2015-07-16 02:05:08 +00:00
|
|
|
*out_region_size = (base_page_entry.region_page_count * page_size_);
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
2013-10-23 06:34:24 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Release from host not needed as mapping reserves the range for us.
|
|
|
|
// TODO(benvanik): protect with NOACCESS?
|
|
|
|
/*BOOL result = VirtualFree(
|
|
|
|
membase_ + heap_base_ + base_page_number * page_size_, 0, MEM_RELEASE);
|
|
|
|
if (!result) {
|
|
|
|
PLOGE("BaseHeap::Release failed due to host VirtualFree failure");
|
|
|
|
return false;
|
|
|
|
}*/
|
2015-05-19 03:47:26 +00:00
|
|
|
// Instead, we just protect it, if we can.
|
2015-07-16 01:20:05 +00:00
|
|
|
if (page_size_ == xe::memory::page_size() ||
|
|
|
|
((base_page_entry.region_page_count * page_size_) %
|
2015-07-20 01:32:48 +00:00
|
|
|
xe::memory::page_size() ==
|
|
|
|
0 &&
|
|
|
|
((base_page_number * page_size_) % xe::memory::page_size() == 0))) {
|
2015-12-06 07:27:25 +00:00
|
|
|
// TODO(benvanik): figure out why games are using memory after releasing it.
|
|
|
|
// It's possible this is some virtual/physical stuff where the GPU still can
|
|
|
|
// access it.
|
|
|
|
if (FLAGS_protect_on_release) {
|
|
|
|
if (!xe::memory::Protect(
|
|
|
|
membase_ + heap_base_ + base_page_number * page_size_,
|
|
|
|
base_page_entry.region_page_count * page_size_,
|
|
|
|
xe::memory::PageAccess::kNoAccess, nullptr)) {
|
|
|
|
XELOGW("BaseHeap::Release failed due to host VirtualProtect failure");
|
|
|
|
}
|
2015-05-19 03:47:26 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
}
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Perform table change.
|
|
|
|
uint32_t end_page_number =
|
|
|
|
base_page_number + base_page_entry.region_page_count - 1;
|
|
|
|
for (uint32_t page_number = base_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
auto& page_entry = page_table_[page_number];
|
|
|
|
page_entry.qword = 0;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
return true;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
|
|
|
|
uint32_t page_count = xe::round_up(size, page_size_) / page_size_;
|
|
|
|
uint32_t start_page_number = (address - heap_base_) / page_size_;
|
|
|
|
uint32_t end_page_number = start_page_number + page_count - 1;
|
|
|
|
start_page_number =
|
|
|
|
std::min(uint32_t(page_table_.size()) - 1, start_page_number);
|
|
|
|
end_page_number = std::min(uint32_t(page_table_.size()) - 1, end_page_number);
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Ensure all pages are in the same reserved region and all are committed.
|
|
|
|
uint32_t first_base_address = UINT_MAX;
|
|
|
|
for (uint32_t page_number = start_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
auto page_entry = page_table_[page_number];
|
|
|
|
if (first_base_address == UINT_MAX) {
|
|
|
|
first_base_address = page_entry.base_address;
|
|
|
|
} else if (first_base_address != page_entry.base_address) {
|
|
|
|
XELOGE("BaseHeap::Protect failed due to request spanning regions");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!(page_entry.state & kMemoryAllocationCommit)) {
|
|
|
|
XELOGE("BaseHeap::Protect failed due to uncommitted page");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt host change (hopefully won't fail).
|
2015-05-19 03:25:15 +00:00
|
|
|
// We can only do this if our size matches system page granularity.
|
2015-07-16 01:20:05 +00:00
|
|
|
if (page_size_ == xe::memory::page_size() ||
|
2015-07-20 01:32:48 +00:00
|
|
|
(((page_count * page_size_) % xe::memory::page_size() == 0) &&
|
|
|
|
((start_page_number * page_size_) % xe::memory::page_size() == 0))) {
|
2015-07-16 01:20:05 +00:00
|
|
|
if (!xe::memory::Protect(
|
|
|
|
membase_ + heap_base_ + start_page_number * page_size_,
|
|
|
|
page_count * page_size_, ToPageAccess(protect), nullptr)) {
|
2015-05-19 03:25:15 +00:00
|
|
|
XELOGE("BaseHeap::Protect failed due to host VirtualProtect failure");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
XELOGW("BaseHeap::Protect: ignoring request as not 64k page aligned");
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Perform table change.
|
|
|
|
for (uint32_t page_number = start_page_number; page_number <= end_page_number;
|
|
|
|
++page_number) {
|
|
|
|
auto& page_entry = page_table_[page_number];
|
|
|
|
page_entry.current_protect = protect;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
return true;
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::QueryRegionInfo(uint32_t base_address,
|
|
|
|
HeapAllocationInfo* out_info) {
|
|
|
|
uint32_t start_page_number = (base_address - heap_base_) / page_size_;
|
|
|
|
if (start_page_number > page_table_.size()) {
|
|
|
|
XELOGE("BaseHeap::QueryRegionInfo base page out of range");
|
|
|
|
return false;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
auto start_page_entry = page_table_[start_page_number];
|
|
|
|
out_info->base_address = base_address;
|
|
|
|
out_info->allocation_base = 0;
|
|
|
|
out_info->allocation_protect = 0;
|
|
|
|
out_info->region_size = 0;
|
|
|
|
out_info->state = 0;
|
|
|
|
out_info->protect = 0;
|
|
|
|
out_info->type = 0;
|
|
|
|
if (start_page_entry.state) {
|
|
|
|
// Committed/reserved region.
|
|
|
|
out_info->allocation_base = start_page_entry.base_address * page_size_;
|
|
|
|
out_info->allocation_protect = start_page_entry.allocation_protect;
|
|
|
|
out_info->state = start_page_entry.state;
|
|
|
|
out_info->protect = start_page_entry.current_protect;
|
|
|
|
out_info->type = 0x20000;
|
|
|
|
for (uint32_t page_number = start_page_number;
|
|
|
|
page_number < start_page_number + start_page_entry.region_page_count;
|
|
|
|
++page_number) {
|
|
|
|
auto page_entry = page_table_[page_number];
|
|
|
|
if (page_entry.base_address != start_page_entry.base_address ||
|
|
|
|
page_entry.state != start_page_entry.state ||
|
|
|
|
page_entry.current_protect != start_page_entry.current_protect) {
|
|
|
|
// Different region or different properties within the region; done.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out_info->region_size += page_size_;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Free region.
|
|
|
|
for (uint32_t page_number = start_page_number;
|
|
|
|
page_number < page_table_.size(); ++page_number) {
|
|
|
|
auto page_entry = page_table_[page_number];
|
|
|
|
if (page_entry.state) {
|
|
|
|
// First non-free page; done with region.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out_info->region_size += page_size_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::QuerySize(uint32_t address, uint32_t* out_size) {
|
|
|
|
uint32_t page_number = (address - heap_base_) / page_size_;
|
|
|
|
if (page_number > page_table_.size()) {
|
|
|
|
XELOGE("BaseHeap::QuerySize base page out of range");
|
|
|
|
*out_size = 0;
|
|
|
|
return false;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
auto page_entry = page_table_[page_number];
|
2015-07-16 02:05:08 +00:00
|
|
|
*out_size = (page_entry.region_page_count * page_size_);
|
2015-05-16 07:23:13 +00:00
|
|
|
return true;
|
|
|
|
}
|
2013-10-22 02:28:25 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool BaseHeap::QueryProtect(uint32_t address, uint32_t* out_protect) {
|
|
|
|
uint32_t page_number = (address - heap_base_) / page_size_;
|
|
|
|
if (page_number > page_table_.size()) {
|
|
|
|
XELOGE("BaseHeap::QueryProtect base page out of range");
|
|
|
|
*out_protect = 0;
|
|
|
|
return false;
|
2014-08-19 05:12:21 +00:00
|
|
|
}
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
auto page_entry = page_table_[page_number];
|
|
|
|
*out_protect = page_entry.current_protect;
|
|
|
|
return true;
|
|
|
|
}
|
2014-08-19 05:12:21 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
uint32_t BaseHeap::GetPhysicalAddress(uint32_t address) {
|
|
|
|
// Only valid for memory in this range - will be bogus if the origin was
|
|
|
|
// outside of it.
|
|
|
|
uint32_t physical_address = address & 0x1FFFFFFF;
|
|
|
|
if (address >= 0xE0000000) {
|
|
|
|
physical_address += 0x1000;
|
2013-10-27 19:06:02 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
return physical_address;
|
|
|
|
}
|
2013-10-27 19:06:02 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
VirtualHeap::VirtualHeap() = default;
|
2013-12-07 06:57:16 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
VirtualHeap::~VirtualHeap() = default;
|
|
|
|
|
|
|
|
void VirtualHeap::Initialize(uint8_t* membase, uint32_t heap_base,
|
|
|
|
uint32_t heap_size, uint32_t page_size) {
|
|
|
|
BaseHeap::Initialize(membase, heap_base, heap_size, page_size);
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
PhysicalHeap::PhysicalHeap() : parent_heap_(nullptr) {}
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
PhysicalHeap::~PhysicalHeap() = default;
|
|
|
|
|
|
|
|
void PhysicalHeap::Initialize(uint8_t* membase, uint32_t heap_base,
|
|
|
|
uint32_t heap_size, uint32_t page_size,
|
|
|
|
VirtualHeap* parent_heap) {
|
|
|
|
BaseHeap::Initialize(membase, heap_base, heap_size, page_size);
|
|
|
|
parent_heap_ = parent_heap;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PhysicalHeap::Alloc(uint32_t size, uint32_t alignment,
|
|
|
|
uint32_t allocation_type, uint32_t protect,
|
|
|
|
bool top_down, uint32_t* out_address) {
|
|
|
|
*out_address = 0;
|
|
|
|
|
|
|
|
// Default top-down. Since parent heap is bottom-up this prevents collisions.
|
|
|
|
top_down = true;
|
|
|
|
|
|
|
|
// Adjust alignment size our page size differs from the parent.
|
|
|
|
size = xe::round_up(size, page_size_);
|
|
|
|
alignment = xe::round_up(alignment, page_size_);
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Allocate from parent heap (gets our physical address in 0-512mb).
|
|
|
|
uint32_t parent_low_address = GetPhysicalAddress(heap_base_);
|
|
|
|
uint32_t parent_high_address = GetPhysicalAddress(heap_base_ + heap_size_);
|
|
|
|
uint32_t parent_address;
|
|
|
|
if (!parent_heap_->AllocRange(parent_low_address, parent_high_address, size,
|
|
|
|
alignment, allocation_type, protect, top_down,
|
|
|
|
&parent_address)) {
|
|
|
|
XELOGE(
|
|
|
|
"PhysicalHeap::Alloc unable to alloc physical memory in parent heap");
|
|
|
|
return false;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2015-05-18 04:26:44 +00:00
|
|
|
if (heap_base_ >= 0xE0000000) {
|
|
|
|
parent_address -= 0x1000;
|
|
|
|
}
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Given the address we've reserved in the parent heap, pin that here.
|
|
|
|
// Shouldn't be possible for it to be allocated already.
|
|
|
|
uint32_t address = heap_base_ + parent_address;
|
|
|
|
if (!BaseHeap::AllocFixed(address, size, alignment, allocation_type,
|
|
|
|
protect)) {
|
|
|
|
XELOGE(
|
|
|
|
"PhysicalHeap::Alloc unable to pin physical memory in physical heap");
|
|
|
|
// TODO(benvanik): don't leak parent memory.
|
|
|
|
return false;
|
2013-10-27 19:06:02 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
*out_address = address;
|
|
|
|
return true;
|
|
|
|
}
|
2013-10-27 19:06:02 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool PhysicalHeap::AllocFixed(uint32_t base_address, uint32_t size,
|
|
|
|
uint32_t alignment, uint32_t allocation_type,
|
|
|
|
uint32_t protect) {
|
|
|
|
// Adjust alignment size our page size differs from the parent.
|
|
|
|
size = xe::round_up(size, page_size_);
|
|
|
|
alignment = xe::round_up(alignment, page_size_);
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Allocate from parent heap (gets our physical address in 0-512mb).
|
|
|
|
// NOTE: this can potentially overwrite heap contents if there are already
|
|
|
|
// committed pages in the requested physical range.
|
|
|
|
// TODO(benvanik): flag for ensure-not-committed?
|
|
|
|
uint32_t parent_base_address = GetPhysicalAddress(base_address);
|
|
|
|
if (!parent_heap_->AllocFixed(parent_base_address, size, alignment,
|
|
|
|
allocation_type, protect)) {
|
|
|
|
XELOGE(
|
|
|
|
"PhysicalHeap::Alloc unable to alloc physical memory in parent heap");
|
|
|
|
return false;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2015-05-18 04:26:44 +00:00
|
|
|
if (heap_base_ >= 0xE0000000) {
|
|
|
|
parent_base_address -= 0x1000;
|
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Given the address we've reserved in the parent heap, pin that here.
|
|
|
|
// Shouldn't be possible for it to be allocated already.
|
|
|
|
uint32_t address = heap_base_ + parent_base_address;
|
|
|
|
if (!BaseHeap::AllocFixed(address, size, alignment, allocation_type,
|
|
|
|
protect)) {
|
|
|
|
XELOGE(
|
|
|
|
"PhysicalHeap::Alloc unable to pin physical memory in physical heap");
|
|
|
|
// TODO(benvanik): don't leak parent memory.
|
|
|
|
return false;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2013-10-22 02:28:25 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PhysicalHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|
|
|
uint32_t size, uint32_t alignment,
|
|
|
|
uint32_t allocation_type, uint32_t protect,
|
|
|
|
bool top_down, uint32_t* out_address) {
|
|
|
|
*out_address = 0;
|
|
|
|
|
|
|
|
// Adjust alignment size our page size differs from the parent.
|
|
|
|
size = xe::round_up(size, page_size_);
|
|
|
|
alignment = xe::round_up(alignment, page_size_);
|
|
|
|
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
// Allocate from parent heap (gets our physical address in 0-512mb).
|
|
|
|
low_address = std::max(heap_base_, low_address);
|
|
|
|
high_address = std::min(heap_base_ + heap_size_, high_address);
|
|
|
|
uint32_t parent_low_address = GetPhysicalAddress(low_address);
|
|
|
|
uint32_t parent_high_address = GetPhysicalAddress(high_address);
|
|
|
|
uint32_t parent_address;
|
|
|
|
if (!parent_heap_->AllocRange(parent_low_address, parent_high_address, size,
|
|
|
|
alignment, allocation_type, protect, top_down,
|
|
|
|
&parent_address)) {
|
|
|
|
XELOGE(
|
|
|
|
"PhysicalHeap::Alloc unable to alloc physical memory in parent heap");
|
|
|
|
return false;
|
2013-10-22 02:28:25 +00:00
|
|
|
}
|
2015-05-18 04:26:44 +00:00
|
|
|
if (heap_base_ >= 0xE0000000) {
|
|
|
|
parent_address -= 0x1000;
|
|
|
|
}
|
2013-10-22 02:28:25 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
// Given the address we've reserved in the parent heap, pin that here.
|
|
|
|
// Shouldn't be possible for it to be allocated already.
|
|
|
|
uint32_t address = heap_base_ + parent_address;
|
|
|
|
if (!BaseHeap::AllocFixed(address, size, alignment, allocation_type,
|
|
|
|
protect)) {
|
|
|
|
XELOGE(
|
|
|
|
"PhysicalHeap::Alloc unable to pin physical memory in physical heap");
|
|
|
|
// TODO(benvanik): don't leak parent memory.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*out_address = address;
|
|
|
|
return true;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2013-10-23 06:34:24 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool PhysicalHeap::Decommit(uint32_t address, uint32_t size) {
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
uint32_t parent_address = GetPhysicalAddress(address);
|
|
|
|
if (!parent_heap_->Decommit(parent_address, size)) {
|
|
|
|
XELOGE("PhysicalHeap::Decommit failed due to parent heap failure");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return BaseHeap::Decommit(address, size);
|
|
|
|
}
|
2014-01-05 19:19:02 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool PhysicalHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
uint32_t parent_base_address = GetPhysicalAddress(base_address);
|
2015-11-09 01:14:06 +00:00
|
|
|
uint32_t region_size = 0;
|
|
|
|
if (QuerySize(base_address, ®ion_size)) {
|
|
|
|
cpu::MMIOHandler::global_handler()->InvalidateRange(parent_base_address,
|
|
|
|
region_size);
|
|
|
|
}
|
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
if (!parent_heap_->Release(parent_base_address, out_region_size)) {
|
|
|
|
XELOGE("PhysicalHeap::Release failed due to parent heap failure");
|
|
|
|
return false;
|
2014-01-05 19:19:02 +00:00
|
|
|
}
|
2015-11-09 01:14:06 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
return BaseHeap::Release(base_address, out_region_size);
|
|
|
|
}
|
2014-01-05 19:19:02 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
bool PhysicalHeap::Protect(uint32_t address, uint32_t size, uint32_t protect) {
|
2015-09-06 16:30:54 +00:00
|
|
|
auto global_lock = global_critical_region_.Acquire();
|
2015-05-16 07:23:13 +00:00
|
|
|
uint32_t parent_address = GetPhysicalAddress(address);
|
2015-11-09 01:14:06 +00:00
|
|
|
cpu::MMIOHandler::global_handler()->InvalidateRange(parent_address, size);
|
|
|
|
|
|
|
|
if (!parent_heap_->Protect(parent_address, size, protect)) {
|
2015-05-16 07:23:13 +00:00
|
|
|
XELOGE("PhysicalHeap::Protect failed due to parent heap failure");
|
|
|
|
return false;
|
2014-01-12 19:09:52 +00:00
|
|
|
}
|
2015-11-09 01:14:06 +00:00
|
|
|
|
2015-05-16 07:23:13 +00:00
|
|
|
return BaseHeap::Protect(address, size, protect);
|
2013-10-23 06:34:24 +00:00
|
|
|
}
|
2015-05-16 07:23:13 +00:00
|
|
|
|
|
|
|
} // namespace xe
|