2013-01-13 07:25:41 +00:00
|
|
|
/**
|
|
|
|
******************************************************************************
|
|
|
|
* Xenia : Xbox 360 Emulator Research Project *
|
|
|
|
******************************************************************************
|
|
|
|
* Copyright 2013 Ben Vanik. All rights reserved. *
|
|
|
|
* Released under the BSD license - see LICENSE in the root for more details. *
|
|
|
|
******************************************************************************
|
|
|
|
*/
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
#include <xenia/memory.h>
|
2013-12-07 06:57:16 +00:00
|
|
|
|
2014-08-16 23:57:00 +00:00
|
|
|
#include <algorithm>
|
2014-07-10 05:28:51 +00:00
|
|
|
#include <mutex>
|
|
|
|
|
2013-10-20 19:39:59 +00:00
|
|
|
#include <gflags/gflags.h>
|
2014-08-17 00:18:20 +00:00
|
|
|
#include <poly/math.h>
|
2014-08-20 04:02:15 +00:00
|
|
|
#include <xenia/cpu/mmio_handler.h>
|
2013-12-07 06:57:16 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
using namespace xe;
|
2013-01-31 06:44:32 +00:00
|
|
|
|
2013-10-23 06:34:24 +00:00
|
|
|
// TODO(benvanik): move xbox.h out
|
2013-10-24 03:42:24 +00:00
|
|
|
#include <xenia/xbox.h>
|
2013-10-23 06:34:24 +00:00
|
|
|
|
2014-01-12 22:06:00 +00:00
|
|
|
#if !XE_PLATFORM_WIN32
|
2013-01-13 07:25:41 +00:00
|
|
|
#include <sys/mman.h>
|
2013-01-30 09:45:55 +00:00
|
|
|
#endif // WIN32
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
#define MSPACES 1
|
|
|
|
#define USE_LOCKS 0
|
|
|
|
#define USE_DL_PREFIX 1
|
|
|
|
#define HAVE_MORECORE 0
|
|
|
|
#define HAVE_MREMAP 0
|
|
|
|
#define malloc_getpagesize 4096
|
|
|
|
#define DEFAULT_GRANULARITY 64 * 1024
|
|
|
|
#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
|
|
|
|
#define MALLOC_ALIGNMENT 32
|
|
|
|
#define MALLOC_INSPECT_ALL 1
|
2013-10-20 19:39:59 +00:00
|
|
|
#if XE_DEBUG
|
2014-08-19 05:12:21 +00:00
|
|
|
#define FOOTERS 0
|
2013-10-20 19:39:59 +00:00
|
|
|
#endif // XE_DEBUG
|
2013-01-29 05:36:03 +00:00
|
|
|
#include <third_party/dlmalloc/malloc.c.h>
|
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
DEFINE_bool(log_heap, false, "Log heap structure on alloc/free.");
|
2013-10-20 20:42:34 +00:00
|
|
|
DEFINE_uint64(
|
|
|
|
heap_guard_pages, 0,
|
|
|
|
"Allocate the given number of guard pages around all heap chunks.");
|
2014-08-19 05:12:21 +00:00
|
|
|
DEFINE_bool(scribble_heap, false,
|
|
|
|
"Scribble 0xCD into all allocated heap memory.");
|
2013-01-13 07:25:41 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Memory map:
|
2013-06-01 10:08:31 +00:00
|
|
|
* 0x00000000 - 0x3FFFFFFF (1024mb) - virtual 4k pages
|
|
|
|
* 0x40000000 - 0x7FFFFFFF (1024mb) - virtual 64k pages
|
|
|
|
* 0x80000000 - 0x8BFFFFFF ( 192mb) - xex 64k pages
|
|
|
|
* 0x8C000000 - 0x8FFFFFFF ( 64mb) - xex 64k pages (encrypted)
|
|
|
|
* 0x90000000 - 0x9FFFFFFF ( 256mb) - xex 4k pages
|
|
|
|
* 0xA0000000 - 0xBFFFFFFF ( 512mb) - physical 64k pages
|
|
|
|
* 0xC0000000 - 0xDFFFFFFF - physical 16mb pages
|
|
|
|
* 0xE0000000 - 0xFFFFFFFF - physical 4k pages
|
2013-01-13 07:25:41 +00:00
|
|
|
*
|
|
|
|
* We use the host OS to create an entire addressable range for this. That way
|
|
|
|
* we don't have to emulate a TLB. It'd be really cool to pass through page
|
|
|
|
* sizes or use madvice to let the OS know what to expect.
|
2013-05-30 04:00:55 +00:00
|
|
|
*
|
2013-12-07 06:57:16 +00:00
|
|
|
* We create our own heap of committed memory that lives at
|
|
|
|
* XENON_MEMORY_HEAP_LOW to XENON_MEMORY_HEAP_HIGH - all normal user allocations
|
|
|
|
* come from there. Since the Xbox has no paging, we know that the size of this
|
|
|
|
* heap will never need to be larger than ~512MB (realistically, smaller than
|
|
|
|
* that). We place it far away from the XEX data and keep the memory around it
|
|
|
|
* uncommitted so that we have some warning if things go astray.
|
2013-05-30 04:00:55 +00:00
|
|
|
*
|
|
|
|
* For XEX/GPU/etc data we allow placement allocations (base_address != 0) and
|
|
|
|
* commit the requested memory as needed. This bypasses the standard heap, but
|
|
|
|
* XEXs should never be overwriting anything so that's fine. We can also query
|
|
|
|
* for previous commits and assert that we really isn't committing twice.
|
2013-10-21 07:57:48 +00:00
|
|
|
*
|
|
|
|
* GPU memory is mapped onto the lower 512mb of the virtual 4k range (0).
|
|
|
|
* So 0xA0000000 = 0x00000000. A more sophisticated allocator could handle
|
|
|
|
* this.
|
2013-01-13 07:25:41 +00:00
|
|
|
*/
|
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
#define XENON_MEMORY_PHYSICAL_HEAP_LOW 0x00010000
|
|
|
|
#define XENON_MEMORY_PHYSICAL_HEAP_HIGH 0x20000000
|
|
|
|
#define XENON_MEMORY_VIRTUAL_HEAP_LOW 0x20000000
|
|
|
|
#define XENON_MEMORY_VIRTUAL_HEAP_HIGH 0x40000000
|
2013-12-07 06:57:16 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
class xe::MemoryHeap {
|
2014-08-19 05:12:21 +00:00
|
|
|
public:
|
2014-08-20 04:02:15 +00:00
|
|
|
MemoryHeap(Memory* memory, bool is_physical);
|
|
|
|
~MemoryHeap();
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
int Initialize(uint64_t low, uint64_t high);
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
uint64_t Alloc(uint64_t base_address, size_t size, uint32_t flags,
|
|
|
|
uint32_t alignment);
|
2013-12-07 06:57:16 +00:00
|
|
|
uint64_t Free(uint64_t address, size_t size);
|
2014-01-05 19:19:02 +00:00
|
|
|
size_t QuerySize(uint64_t base_address);
|
2013-10-21 07:57:48 +00:00
|
|
|
|
|
|
|
void Dump();
|
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
private:
|
2013-12-07 06:57:16 +00:00
|
|
|
static uint32_t next_heap_id_;
|
2014-08-19 05:12:21 +00:00
|
|
|
static void DumpHandler(void* start, void* end, size_t used_bytes,
|
|
|
|
void* context);
|
|
|
|
|
|
|
|
private:
|
2014-08-20 04:02:15 +00:00
|
|
|
Memory* memory_;
|
2014-08-19 05:12:21 +00:00
|
|
|
uint32_t heap_id_;
|
|
|
|
bool is_physical_;
|
|
|
|
std::mutex lock_;
|
|
|
|
size_t size_;
|
|
|
|
uint8_t* ptr_;
|
|
|
|
mspace space_;
|
2013-10-21 06:19:57 +00:00
|
|
|
};
|
2014-08-20 04:02:15 +00:00
|
|
|
uint32_t MemoryHeap::next_heap_id_ = 1;
|
2013-05-28 02:58:20 +00:00
|
|
|
|
2014-12-20 01:29:27 +00:00
|
|
|
Memory::Memory() : mapping_(0), mapping_base_(nullptr) {
|
2014-08-20 04:02:15 +00:00
|
|
|
virtual_heap_ = new MemoryHeap(this, false);
|
|
|
|
physical_heap_ = new MemoryHeap(this, true);
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
Memory::~Memory() {
|
2014-07-30 05:12:39 +00:00
|
|
|
// Uninstall the MMIO handler, as we won't be able to service more
|
|
|
|
// requests.
|
|
|
|
mmio_handler_.reset();
|
2014-06-02 14:11:27 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
if (mapping_base_) {
|
|
|
|
// GPU writeback.
|
2014-08-19 05:12:21 +00:00
|
|
|
VirtualFree(Translate(0xC0000000), 0x00100000, MEM_DECOMMIT);
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
delete physical_heap_;
|
|
|
|
delete virtual_heap_;
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
// Unmap all views and close mapping.
|
|
|
|
if (mapping_) {
|
|
|
|
UnmapViews();
|
|
|
|
CloseHandle(mapping_);
|
|
|
|
mapping_base_ = 0;
|
|
|
|
mapping_ = 0;
|
|
|
|
}
|
|
|
|
}
|
2013-01-13 07:25:41 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
int Memory::Initialize() {
|
|
|
|
int result = alloy::Memory::Initialize();
|
2013-12-07 06:57:16 +00:00
|
|
|
if (result) {
|
|
|
|
return result;
|
|
|
|
}
|
2013-05-28 02:58:20 +00:00
|
|
|
|
2014-08-19 05:12:21 +00:00
|
|
|
// Create main page file-backed mapping. This is all reserved but
|
|
|
|
// uncommitted (so it shouldn't expand page file).
|
2014-08-02 04:43:52 +00:00
|
|
|
#if XE_PLATFORM_WIN32
|
2014-08-19 05:12:21 +00:00
|
|
|
mapping_ =
|
|
|
|
CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
|
|
|
|
PAGE_READWRITE | SEC_RESERVE, 1, 0, // entire 4gb space
|
|
|
|
NULL);
|
2014-08-02 04:43:52 +00:00
|
|
|
#else
|
|
|
|
char mapping_path[] = "/xenia/mapping/XXXXXX";
|
|
|
|
mktemp(mapping_path);
|
|
|
|
mapping_ = shm_open(mapping_path, O_CREAT, 0);
|
|
|
|
ftruncate(mapping_, 0x100000000);
|
|
|
|
#endif // XE_PLATFORM_WIN32
|
2013-12-07 06:57:16 +00:00
|
|
|
if (!mapping_) {
|
2013-10-21 06:19:57 +00:00
|
|
|
XELOGE("Unable to reserve the 4gb guest address space.");
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_not_null(mapping_);
|
2014-08-21 06:26:46 +00:00
|
|
|
return 1;
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to create our views. This may fail at the first address
|
|
|
|
// we pick, so try a few times.
|
2013-12-07 06:57:16 +00:00
|
|
|
mapping_base_ = 0;
|
2013-10-21 06:19:57 +00:00
|
|
|
for (size_t n = 32; n < 64; n++) {
|
|
|
|
uint8_t* mapping_base = (uint8_t*)(1ull << n);
|
2013-12-07 06:57:16 +00:00
|
|
|
if (!MapViews(mapping_base)) {
|
|
|
|
mapping_base_ = mapping_base;
|
2013-10-21 06:19:57 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
if (!mapping_base_) {
|
2013-10-21 06:19:57 +00:00
|
|
|
XELOGE("Unable to find a continuous block in the 64bit address space.");
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_always();
|
2014-08-21 06:26:46 +00:00
|
|
|
return 1;
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
membase_ = mapping_base_;
|
|
|
|
|
2013-10-21 07:57:48 +00:00
|
|
|
// Prepare heaps.
|
2014-08-19 05:12:21 +00:00
|
|
|
virtual_heap_->Initialize(XENON_MEMORY_VIRTUAL_HEAP_LOW,
|
|
|
|
XENON_MEMORY_VIRTUAL_HEAP_HIGH);
|
|
|
|
physical_heap_->Initialize(XENON_MEMORY_PHYSICAL_HEAP_LOW,
|
|
|
|
XENON_MEMORY_PHYSICAL_HEAP_HIGH - 0x1000);
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2013-10-23 04:05:32 +00:00
|
|
|
// GPU writeback.
|
2013-12-15 23:31:32 +00:00
|
|
|
// 0xC... is physical, 0x7F... is virtual. We may need to overlay these.
|
2014-11-02 07:02:17 +00:00
|
|
|
VirtualAlloc(Translate(0x00000000), 0x00100000, MEM_COMMIT, PAGE_READWRITE);
|
2013-10-23 04:05:32 +00:00
|
|
|
|
2014-06-02 06:36:18 +00:00
|
|
|
// Add handlers for MMIO.
|
2014-08-20 04:02:15 +00:00
|
|
|
mmio_handler_ = cpu::MMIOHandler::Install(mapping_base_);
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_) {
|
|
|
|
XELOGE("Unable to install MMIO handlers");
|
|
|
|
assert_always();
|
2014-08-21 06:26:46 +00:00
|
|
|
return 1;
|
2014-06-02 06:36:18 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
return 0;
|
2013-01-13 07:25:41 +00:00
|
|
|
}
|
|
|
|
|
2014-08-02 04:43:52 +00:00
|
|
|
const static struct {
|
2014-08-19 05:12:21 +00:00
|
|
|
uint64_t virtual_address_start;
|
|
|
|
uint64_t virtual_address_end;
|
|
|
|
uint64_t target_address;
|
2014-08-02 04:43:52 +00:00
|
|
|
} map_info[] = {
|
2014-08-19 05:12:21 +00:00
|
|
|
0x00000000, 0x3FFFFFFF, 0x00000000, // (1024mb) - virtual 4k pages
|
2014-11-02 07:02:17 +00:00
|
|
|
0x40000000, 0x7EFFFFFF, 0x40000000, // (1024mb) - virtual 64k pages (cont)
|
|
|
|
0x7F000000, 0x7F0FFFFF, 0x00000000, // (1mb) - GPU writeback
|
|
|
|
0x7F100000, 0x7FFFFFFF, 0x00100000, // (15mb) - XPS?
|
2014-09-14 02:32:37 +00:00
|
|
|
0x80000000, 0x8FFFFFFF, 0x80000000, // (256mb) - xex 64k pages
|
|
|
|
0x90000000, 0x9FFFFFFF, 0x80000000, // (256mb) - xex 4k pages
|
2014-08-19 05:12:21 +00:00
|
|
|
0xA0000000, 0xBFFFFFFF, 0x00000000, // (512mb) - physical 64k pages
|
|
|
|
0xC0000000, 0xDFFFFFFF, 0x00000000, // - physical 16mb pages
|
|
|
|
0xE0000000, 0xFFFFFFFF, 0x00000000, // - physical 4k pages
|
2014-08-02 04:43:52 +00:00
|
|
|
};
|
2014-08-20 04:02:15 +00:00
|
|
|
int Memory::MapViews(uint8_t* mapping_base) {
|
2014-08-17 00:58:33 +00:00
|
|
|
assert_true(poly::countof(map_info) == poly::countof(views_.all_views));
|
|
|
|
for (size_t n = 0; n < poly::countof(map_info); n++) {
|
2014-08-02 04:43:52 +00:00
|
|
|
#if XE_PLATFORM_WIN32
|
|
|
|
views_.all_views[n] = reinterpret_cast<uint8_t*>(MapViewOfFileEx(
|
2014-08-19 05:12:21 +00:00
|
|
|
mapping_, FILE_MAP_ALL_ACCESS, 0x00000000,
|
|
|
|
(DWORD)map_info[n].target_address,
|
2013-10-22 02:28:25 +00:00
|
|
|
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
|
2014-08-02 04:43:52 +00:00
|
|
|
mapping_base + map_info[n].virtual_address_start));
|
|
|
|
#else
|
|
|
|
views_.all_views[n] = reinterpret_cast<uint8_t*>(mmap(
|
|
|
|
map_info[n].virtual_address_start + mapping_base,
|
|
|
|
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
|
2014-08-19 05:12:21 +00:00
|
|
|
PROT_NONE, MAP_SHARED | MAP_FIXED, mapping_,
|
|
|
|
map_info[n].target_address));
|
2014-08-02 04:43:52 +00:00
|
|
|
#endif // XE_PLATFORM_WIN32
|
2014-08-21 06:26:46 +00:00
|
|
|
if (!views_.all_views[n]) {
|
|
|
|
// Failed, so bail and try again.
|
|
|
|
UnmapViews();
|
|
|
|
return 1;
|
|
|
|
}
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void Memory::UnmapViews() {
|
2014-08-17 00:58:33 +00:00
|
|
|
for (size_t n = 0; n < poly::countof(views_.all_views); n++) {
|
2013-12-07 06:57:16 +00:00
|
|
|
if (views_.all_views[n]) {
|
2014-08-02 04:43:52 +00:00
|
|
|
#if XE_PLATFORM_WIN32
|
2013-12-07 06:57:16 +00:00
|
|
|
UnmapViewOfFile(views_.all_views[n]);
|
2014-08-02 04:43:52 +00:00
|
|
|
#else
|
2014-08-19 05:12:21 +00:00
|
|
|
size_t length = map_info[n].virtual_address_end -
|
|
|
|
map_info[n].virtual_address_start + 1;
|
2014-08-02 04:43:52 +00:00
|
|
|
munmap(views_.all_views[n], length);
|
|
|
|
#endif // XE_PLATFORM_WIN32
|
2013-10-21 06:19:57 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-13 07:25:41 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
bool Memory::AddMappedRange(uint64_t address, uint64_t mask, uint64_t size,
|
|
|
|
void* context, cpu::MMIOReadCallback read_callback,
|
|
|
|
cpu::MMIOWriteCallback write_callback) {
|
2014-07-30 05:12:39 +00:00
|
|
|
DWORD protect = PAGE_NOACCESS;
|
2014-08-19 05:12:21 +00:00
|
|
|
if (!VirtualAlloc(Translate(address), size, MEM_COMMIT, protect)) {
|
2014-07-30 05:12:39 +00:00
|
|
|
XELOGE("Unable to map range; commit/protect failed");
|
2014-06-02 06:36:18 +00:00
|
|
|
return false;
|
|
|
|
}
|
2014-08-19 05:12:21 +00:00
|
|
|
return mmio_handler_->RegisterRange(address, mask, size, context,
|
|
|
|
read_callback, write_callback);
|
2014-06-02 06:36:18 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint8_t Memory::LoadI8(uint64_t address) {
|
2014-06-02 06:36:18 +00:00
|
|
|
uint64_t value;
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckLoad(address, &value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
value = *reinterpret_cast<uint8_t*>(Translate(address));
|
|
|
|
}
|
|
|
|
return static_cast<uint8_t>(value);
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint16_t Memory::LoadI16(uint64_t address) {
|
2014-06-02 06:36:18 +00:00
|
|
|
uint64_t value;
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckLoad(address, &value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
value = *reinterpret_cast<uint16_t*>(Translate(address));
|
|
|
|
}
|
|
|
|
return static_cast<uint16_t>(value);
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint32_t Memory::LoadI32(uint64_t address) {
|
2014-06-02 06:36:18 +00:00
|
|
|
uint64_t value;
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckLoad(address, &value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
value = *reinterpret_cast<uint32_t*>(Translate(address));
|
|
|
|
}
|
|
|
|
return static_cast<uint32_t>(value);
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint64_t Memory::LoadI64(uint64_t address) {
|
2014-06-02 06:36:18 +00:00
|
|
|
uint64_t value;
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckLoad(address, &value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
value = *reinterpret_cast<uint64_t*>(Translate(address));
|
|
|
|
}
|
|
|
|
return static_cast<uint64_t>(value);
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void Memory::StoreI8(uint64_t address, uint8_t value) {
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckStore(address, value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
*reinterpret_cast<uint8_t*>(Translate(address)) = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void Memory::StoreI16(uint64_t address, uint16_t value) {
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckStore(address, value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
*reinterpret_cast<uint16_t*>(Translate(address)) = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void Memory::StoreI32(uint64_t address, uint32_t value) {
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckStore(address, value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
*reinterpret_cast<uint32_t*>(Translate(address)) = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void Memory::StoreI64(uint64_t address, uint64_t value) {
|
2014-07-30 05:12:39 +00:00
|
|
|
if (!mmio_handler_->CheckStore(address, value)) {
|
2014-06-02 06:36:18 +00:00
|
|
|
*reinterpret_cast<uint64_t*>(Translate(address)) = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint64_t Memory::HeapAlloc(uint64_t base_address, size_t size, uint32_t flags,
|
|
|
|
uint32_t alignment) {
|
2013-05-30 04:00:55 +00:00
|
|
|
// If we were given a base address we are outside of the normal heap and
|
|
|
|
// will place wherever asked (so long as it doesn't overlap the heap).
|
|
|
|
if (!base_address) {
|
|
|
|
// Normal allocation from the managed heap.
|
2013-12-07 06:57:16 +00:00
|
|
|
uint64_t result;
|
|
|
|
if (flags & MEMORY_FLAG_PHYSICAL) {
|
2014-08-19 05:12:21 +00:00
|
|
|
result = physical_heap_->Alloc(base_address, size, flags, alignment);
|
2013-10-21 07:57:48 +00:00
|
|
|
} else {
|
2014-08-19 05:12:21 +00:00
|
|
|
result = virtual_heap_->Alloc(base_address, size, flags, alignment);
|
2013-10-20 20:42:34 +00:00
|
|
|
}
|
2013-10-23 04:50:10 +00:00
|
|
|
if (result) {
|
2013-12-07 06:57:16 +00:00
|
|
|
if (flags & MEMORY_FLAG_ZERO) {
|
2014-08-21 05:22:47 +00:00
|
|
|
memset(Translate(result), 0, size);
|
2013-10-23 04:50:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
2013-10-21 07:57:48 +00:00
|
|
|
} else {
|
2013-12-07 06:57:16 +00:00
|
|
|
if (base_address >= XENON_MEMORY_VIRTUAL_HEAP_LOW &&
|
|
|
|
base_address < XENON_MEMORY_VIRTUAL_HEAP_HIGH) {
|
2013-10-21 07:57:48 +00:00
|
|
|
// Overlapping managed heap.
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_always();
|
2013-05-30 04:00:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
if (base_address >= XENON_MEMORY_PHYSICAL_HEAP_LOW &&
|
|
|
|
base_address < XENON_MEMORY_PHYSICAL_HEAP_HIGH) {
|
2013-05-30 04:00:55 +00:00
|
|
|
// Overlapping managed heap.
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_always();
|
2013-05-30 04:00:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
uint8_t* p = Translate(base_address);
|
2013-05-30 04:00:55 +00:00
|
|
|
// TODO(benvanik): check if address range is in use with a query.
|
|
|
|
|
|
|
|
void* pv = VirtualAlloc(p, size, MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
if (!pv) {
|
|
|
|
// Failed.
|
2014-07-12 23:51:52 +00:00
|
|
|
assert_always();
|
2013-05-30 04:00:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
if (flags & MEMORY_FLAG_ZERO) {
|
2014-08-21 05:22:47 +00:00
|
|
|
memset(pv, 0, size);
|
2013-10-23 04:50:10 +00:00
|
|
|
}
|
|
|
|
|
2013-05-30 04:00:55 +00:00
|
|
|
return base_address;
|
|
|
|
}
|
2013-01-29 05:36:03 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
int Memory::HeapFree(uint64_t address, size_t size) {
|
2013-12-07 06:57:16 +00:00
|
|
|
if (address >= XENON_MEMORY_VIRTUAL_HEAP_LOW &&
|
|
|
|
address < XENON_MEMORY_VIRTUAL_HEAP_HIGH) {
|
|
|
|
return virtual_heap_->Free(address, size) ? 0 : 1;
|
|
|
|
} else if (address >= XENON_MEMORY_PHYSICAL_HEAP_LOW &&
|
|
|
|
address < XENON_MEMORY_PHYSICAL_HEAP_HIGH) {
|
|
|
|
return physical_heap_->Free(address, size) ? 0 : 1;
|
2013-05-30 04:00:55 +00:00
|
|
|
} else {
|
|
|
|
// A placed address. Decommit.
|
2013-12-07 06:57:16 +00:00
|
|
|
uint8_t* p = Translate(address);
|
2013-05-30 04:00:55 +00:00
|
|
|
return VirtualFree(p, size, MEM_DECOMMIT) ? 0 : 1;
|
|
|
|
}
|
|
|
|
}
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
bool Memory::QueryInformation(uint64_t base_address, AllocationInfo* mem_info) {
|
2014-08-15 06:14:57 +00:00
|
|
|
uint8_t* p = Translate(base_address);
|
2014-08-19 05:12:21 +00:00
|
|
|
MEMORY_BASIC_INFORMATION mbi;
|
|
|
|
if (!VirtualQuery(p, &mbi, sizeof(mbi))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
mem_info->base_address = base_address;
|
2014-08-20 02:41:29 +00:00
|
|
|
mem_info->allocation_base = static_cast<uint64_t>(
|
|
|
|
reinterpret_cast<uint8_t*>(mbi.AllocationBase) - membase_);
|
2014-08-19 05:12:21 +00:00
|
|
|
mem_info->allocation_protect = mbi.AllocationProtect;
|
|
|
|
mem_info->region_size = mbi.RegionSize;
|
|
|
|
mem_info->state = mbi.State;
|
|
|
|
mem_info->protect = mbi.Protect;
|
|
|
|
mem_info->type = mbi.Type;
|
|
|
|
return true;
|
2014-08-15 06:14:57 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
size_t Memory::QuerySize(uint64_t base_address) {
|
2014-01-05 19:19:02 +00:00
|
|
|
if (base_address >= XENON_MEMORY_VIRTUAL_HEAP_LOW &&
|
|
|
|
base_address < XENON_MEMORY_VIRTUAL_HEAP_HIGH) {
|
|
|
|
return virtual_heap_->QuerySize(base_address);
|
|
|
|
} else if (base_address >= XENON_MEMORY_PHYSICAL_HEAP_LOW &&
|
|
|
|
base_address < XENON_MEMORY_PHYSICAL_HEAP_HIGH) {
|
|
|
|
return physical_heap_->QuerySize(base_address);
|
|
|
|
} else {
|
2014-08-02 04:43:52 +00:00
|
|
|
// A placed address.
|
2014-01-05 19:19:02 +00:00
|
|
|
uint8_t* p = Translate(base_address);
|
|
|
|
MEMORY_BASIC_INFORMATION mem_info;
|
|
|
|
if (VirtualQuery(p, &mem_info, sizeof(mem_info))) {
|
|
|
|
return mem_info.RegionSize;
|
|
|
|
} else {
|
|
|
|
// Error.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
int Memory::Protect(uint64_t address, size_t size, uint32_t access) {
|
2013-12-07 06:57:16 +00:00
|
|
|
uint8_t* p = Translate(address);
|
2013-01-29 05:36:03 +00:00
|
|
|
|
2013-10-20 20:42:34 +00:00
|
|
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
|
|
|
p += heap_guard_size;
|
|
|
|
|
2013-10-23 06:34:24 +00:00
|
|
|
DWORD new_protect = access;
|
2014-08-19 05:12:21 +00:00
|
|
|
new_protect =
|
|
|
|
new_protect &
|
|
|
|
(X_PAGE_NOACCESS | X_PAGE_READONLY | X_PAGE_READWRITE | X_PAGE_WRITECOPY |
|
|
|
|
X_PAGE_GUARD | X_PAGE_NOCACHE | X_PAGE_WRITECOMBINE);
|
2013-10-23 06:34:24 +00:00
|
|
|
|
2013-05-30 04:00:55 +00:00
|
|
|
DWORD old_protect;
|
|
|
|
return VirtualProtect(p, size, new_protect, &old_protect) == TRUE ? 0 : 1;
|
2013-01-29 05:36:03 +00:00
|
|
|
}
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint32_t Memory::QueryProtect(uint64_t address) {
|
2013-12-07 06:57:16 +00:00
|
|
|
uint8_t* p = Translate(address);
|
|
|
|
MEMORY_BASIC_INFORMATION info;
|
|
|
|
size_t info_size = VirtualQuery((void*)p, &info, sizeof(info));
|
|
|
|
if (!info_size) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return info.Protect;
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
MemoryHeap::MemoryHeap(Memory* memory, bool is_physical)
|
2014-08-19 05:12:21 +00:00
|
|
|
: memory_(memory), is_physical_(is_physical) {
|
2013-12-07 06:57:16 +00:00
|
|
|
heap_id_ = next_heap_id_++;
|
|
|
|
}
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
MemoryHeap::~MemoryHeap() {
|
2014-07-10 05:28:51 +00:00
|
|
|
if (space_) {
|
|
|
|
std::lock_guard<std::mutex> guard(lock_);
|
2013-12-07 06:57:16 +00:00
|
|
|
destroy_mspace(space_);
|
|
|
|
space_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ptr_) {
|
2014-08-16 23:57:00 +00:00
|
|
|
VirtualFree(ptr_, 0, MEM_RELEASE);
|
2013-12-07 06:57:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
int MemoryHeap::Initialize(uint64_t low, uint64_t high) {
|
2013-10-21 07:57:48 +00:00
|
|
|
// Commit the memory where our heap will live and allocate it.
|
|
|
|
// TODO(benvanik): replace dlmalloc with an implementation that can commit
|
|
|
|
// as it goes.
|
2013-12-07 06:57:16 +00:00
|
|
|
size_ = high - low;
|
|
|
|
ptr_ = memory_->views_.v00000000 + low;
|
2014-08-19 05:12:21 +00:00
|
|
|
void* heap_result = VirtualAlloc(ptr_, size_, MEM_COMMIT, PAGE_READWRITE);
|
2013-10-21 07:57:48 +00:00
|
|
|
if (!heap_result) {
|
|
|
|
return 1;
|
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
space_ = create_mspace_with_base(ptr_, size_, 0);
|
2013-10-21 07:57:48 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
return 0;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint64_t MemoryHeap::Alloc(uint64_t base_address, size_t size, uint32_t flags,
|
|
|
|
uint32_t alignment) {
|
2014-07-10 05:28:51 +00:00
|
|
|
lock_.lock();
|
2013-10-22 02:28:25 +00:00
|
|
|
size_t alloc_size = size;
|
2013-10-21 07:57:48 +00:00
|
|
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
|
|
|
if (heap_guard_size) {
|
2014-08-16 23:57:00 +00:00
|
|
|
alignment = std::max(alignment, static_cast<uint32_t>(heap_guard_size));
|
2014-08-17 00:18:20 +00:00
|
|
|
alloc_size = static_cast<uint32_t>(poly::round_up(size, heap_guard_size));
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2014-08-16 23:57:00 +00:00
|
|
|
uint8_t* p = (uint8_t*)mspace_memalign(space_, alignment,
|
|
|
|
alloc_size + heap_guard_size * 2);
|
2014-08-05 19:05:27 +00:00
|
|
|
assert_true(reinterpret_cast<uint64_t>(p) <= 0xFFFFFFFFFull);
|
2013-10-21 07:57:48 +00:00
|
|
|
if (FLAGS_heap_guard_pages) {
|
|
|
|
size_t real_size = mspace_usable_size(p);
|
|
|
|
DWORD old_protect;
|
2014-08-19 05:12:21 +00:00
|
|
|
VirtualProtect(p, heap_guard_size, PAGE_NOACCESS, &old_protect);
|
2013-10-21 07:57:48 +00:00
|
|
|
p += heap_guard_size;
|
2014-08-19 05:12:21 +00:00
|
|
|
VirtualProtect(p + alloc_size, heap_guard_size, PAGE_NOACCESS,
|
|
|
|
&old_protect);
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
|
|
|
if (FLAGS_log_heap) {
|
|
|
|
Dump();
|
|
|
|
}
|
2014-07-10 05:28:51 +00:00
|
|
|
lock_.unlock();
|
2013-10-21 07:57:48 +00:00
|
|
|
if (!p) {
|
|
|
|
return 0;
|
|
|
|
}
|
2013-10-22 02:28:25 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
if (is_physical_) {
|
2013-10-22 02:28:25 +00:00
|
|
|
// If physical, we need to commit the memory in the physical address ranges
|
|
|
|
// so that it can be accessed.
|
2014-08-19 05:12:21 +00:00
|
|
|
VirtualAlloc(memory_->views_.vA0000000 + (p - memory_->views_.v00000000),
|
|
|
|
size, MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
VirtualAlloc(memory_->views_.vC0000000 + (p - memory_->views_.v00000000),
|
|
|
|
size, MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
VirtualAlloc(memory_->views_.vE0000000 + (p - memory_->views_.v00000000),
|
|
|
|
size, MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flags & X_MEM_NOZERO) && FLAGS_scribble_heap) {
|
2013-10-27 19:06:02 +00:00
|
|
|
// Trash the memory so that we can see bad read-before-write bugs easier.
|
|
|
|
memset(p, 0xCD, alloc_size);
|
2013-10-27 23:01:35 +00:00
|
|
|
} else {
|
|
|
|
// Implicit clear.
|
|
|
|
memset(p, 0, alloc_size);
|
2013-10-27 19:06:02 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
uint64_t address =
|
|
|
|
(uint64_t)((uintptr_t)p - (uintptr_t)memory_->mapping_base_);
|
|
|
|
|
|
|
|
return address;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
uint64_t MemoryHeap::Free(uint64_t address, size_t size) {
|
2013-12-07 06:57:16 +00:00
|
|
|
uint8_t* p = memory_->Translate(address);
|
2013-10-21 07:57:48 +00:00
|
|
|
|
|
|
|
// Heap allocated address.
|
|
|
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
|
|
|
p -= heap_guard_size;
|
|
|
|
size_t real_size = mspace_usable_size(p);
|
|
|
|
real_size -= heap_guard_size * 2;
|
|
|
|
if (!real_size) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-27 19:06:02 +00:00
|
|
|
if (FLAGS_scribble_heap) {
|
|
|
|
// Trash the memory so that we can see bad read-before-write bugs easier.
|
2013-12-07 06:57:16 +00:00
|
|
|
memset(p + heap_guard_size, 0xDC, size);
|
2013-10-27 19:06:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-10 05:28:51 +00:00
|
|
|
lock_.lock();
|
2013-10-21 07:57:48 +00:00
|
|
|
if (FLAGS_heap_guard_pages) {
|
|
|
|
DWORD old_protect;
|
2014-08-19 05:12:21 +00:00
|
|
|
VirtualProtect(p, heap_guard_size, PAGE_READWRITE, &old_protect);
|
|
|
|
VirtualProtect(p + heap_guard_size + real_size, heap_guard_size,
|
|
|
|
PAGE_READWRITE, &old_protect);
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
mspace_free(space_, p);
|
2013-10-21 07:57:48 +00:00
|
|
|
if (FLAGS_log_heap) {
|
|
|
|
Dump();
|
|
|
|
}
|
2014-07-10 05:28:51 +00:00
|
|
|
lock_.unlock();
|
2013-10-22 02:28:25 +00:00
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
if (is_physical_) {
|
2013-10-22 02:28:25 +00:00
|
|
|
// If physical, decommit from physical ranges too.
|
2014-08-19 05:12:21 +00:00
|
|
|
VirtualFree(memory_->views_.vA0000000 + (p - memory_->views_.v00000000),
|
|
|
|
size, MEM_DECOMMIT);
|
|
|
|
VirtualFree(memory_->views_.vC0000000 + (p - memory_->views_.v00000000),
|
|
|
|
size, MEM_DECOMMIT);
|
|
|
|
VirtualFree(memory_->views_.vE0000000 + (p - memory_->views_.v00000000),
|
|
|
|
size, MEM_DECOMMIT);
|
2013-10-22 02:28:25 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 06:57:16 +00:00
|
|
|
return (uint64_t)real_size;
|
2013-10-21 07:57:48 +00:00
|
|
|
}
|
2013-10-23 06:34:24 +00:00
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
size_t MemoryHeap::QuerySize(uint64_t base_address) {
|
2014-01-05 19:19:02 +00:00
|
|
|
uint8_t* p = memory_->Translate(base_address);
|
|
|
|
|
|
|
|
// Heap allocated address.
|
|
|
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
|
|
|
p -= heap_guard_size;
|
|
|
|
size_t real_size = mspace_usable_size(p);
|
|
|
|
real_size -= heap_guard_size * 2;
|
|
|
|
if (!real_size) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return real_size;
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void MemoryHeap::Dump() {
|
|
|
|
XELOGI("MemoryHeap::Dump - %s", is_physical_ ? "physical" : "virtual");
|
2013-12-07 06:57:16 +00:00
|
|
|
if (FLAGS_heap_guard_pages) {
|
|
|
|
XELOGI(" (heap guard pages enabled, stats will be wrong)");
|
2013-10-23 06:34:24 +00:00
|
|
|
}
|
2013-12-07 06:57:16 +00:00
|
|
|
struct mallinfo info = mspace_mallinfo(space_);
|
|
|
|
XELOGI(" arena: %lld", info.arena);
|
|
|
|
XELOGI(" ordblks: %lld", info.ordblks);
|
|
|
|
XELOGI(" hblks: %lld", info.hblks);
|
|
|
|
XELOGI(" hblkhd: %lld", info.hblkhd);
|
|
|
|
XELOGI(" usmblks: %lld", info.usmblks);
|
|
|
|
XELOGI(" uordblks: %lld", info.uordblks);
|
|
|
|
XELOGI(" fordblks: %lld", info.fordblks);
|
|
|
|
XELOGI(" keepcost: %lld", info.keepcost);
|
|
|
|
mspace_inspect_all(space_, DumpHandler, this);
|
|
|
|
}
|
|
|
|
|
2014-08-20 04:02:15 +00:00
|
|
|
void MemoryHeap::DumpHandler(void* start, void* end, size_t used_bytes,
|
|
|
|
void* context) {
|
|
|
|
MemoryHeap* heap = (MemoryHeap*)context;
|
|
|
|
Memory* memory = heap->memory_;
|
2013-12-07 06:57:16 +00:00
|
|
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
|
|
|
uint64_t start_addr = (uint64_t)start + heap_guard_size;
|
|
|
|
uint64_t end_addr = (uint64_t)end - heap_guard_size;
|
|
|
|
uint32_t guest_start =
|
2014-01-12 19:09:52 +00:00
|
|
|
(uint32_t)(start_addr - (uintptr_t)memory->mapping_base_);
|
2014-08-19 05:12:21 +00:00
|
|
|
uint32_t guest_end = (uint32_t)(end_addr - (uintptr_t)memory->mapping_base_);
|
2014-01-12 23:43:54 +00:00
|
|
|
if (int32_t(end_addr - start_addr) > 0) {
|
2014-08-19 05:12:21 +00:00
|
|
|
XELOGI(" - %.8X-%.8X (%10db) %.16llX-%.16llX - %9db used", guest_start,
|
|
|
|
guest_end, (guest_end - guest_start), start_addr, end_addr,
|
2013-12-07 06:57:16 +00:00
|
|
|
used_bytes);
|
|
|
|
} else {
|
|
|
|
XELOGI(" - %.16llX-%.16llX - %9db used",
|
2014-01-12 23:43:54 +00:00
|
|
|
start, end, used_bytes);
|
2014-01-12 19:09:52 +00:00
|
|
|
}
|
2013-10-23 06:34:24 +00:00
|
|
|
}
|