Allocating things in a physical heap. Super wasteful right now.
This commit is contained in:
parent
ea022c8dd3
commit
aa3e8d0332
|
@ -65,10 +65,37 @@ DEFINE_uint64(
|
||||||
* commit the requested memory as needed. This bypasses the standard heap, but
|
* commit the requested memory as needed. This bypasses the standard heap, but
|
||||||
* XEXs should never be overwriting anything so that's fine. We can also query
|
* XEXs should never be overwriting anything so that's fine. We can also query
|
||||||
* for previous commits and assert that we really isn't committing twice.
|
* for previous commits and assert that we really isn't committing twice.
|
||||||
|
*
|
||||||
|
* GPU memory is mapped onto the lower 512mb of the virtual 4k range (0).
|
||||||
|
* So 0xA0000000 = 0x00000000. A more sophisticated allocator could handle
|
||||||
|
* this.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define XE_MEMORY_HEAP_LOW 0x00000000
|
#define XE_MEMORY_PHYSICAL_HEAP_LOW 0x00000000
|
||||||
#define XE_MEMORY_HEAP_HIGH 0x40000000
|
#define XE_MEMORY_PHYSICAL_HEAP_HIGH 0x20000000
|
||||||
|
#define XE_MEMORY_VIRTUAL_HEAP_LOW 0x20000000
|
||||||
|
#define XE_MEMORY_VIRTUAL_HEAP_HIGH 0x40000000
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
xe_memory_ref memory;
|
||||||
|
xe_mutex_t* mutex;
|
||||||
|
size_t size;
|
||||||
|
uint8_t* ptr;
|
||||||
|
mspace space;
|
||||||
|
|
||||||
|
int Initialize(xe_memory_ref memory, uint32_t low, uint32_t high);
|
||||||
|
void Cleanup();
|
||||||
|
void Dump();
|
||||||
|
uint32_t Alloc(uint32_t base_address,
|
||||||
|
uint32_t size, uint32_t flags,
|
||||||
|
uint32_t alignment);
|
||||||
|
uint32_t Free(uint32_t address, uint32_t size);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void DumpHandler(
|
||||||
|
void* start, void* end, size_t used_bytes, void* context);
|
||||||
|
} xe_memory_heap_t;
|
||||||
|
|
||||||
|
|
||||||
struct xe_memory {
|
struct xe_memory {
|
||||||
|
@ -90,10 +117,8 @@ struct xe_memory {
|
||||||
uint8_t* all_views[6];
|
uint8_t* all_views[6];
|
||||||
} views;
|
} views;
|
||||||
|
|
||||||
xe_mutex_t* heap_mutex;
|
xe_memory_heap_t virtual_heap;
|
||||||
size_t heap_size;
|
xe_memory_heap_t physical_heap;
|
||||||
uint8_t* heap_ptr;
|
|
||||||
mspace heap;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -102,7 +127,6 @@ void xe_memory_unmap_views(xe_memory_ref memory);
|
||||||
|
|
||||||
|
|
||||||
xe_memory_ref xe_memory_create(xe_memory_options_t options) {
|
xe_memory_ref xe_memory_create(xe_memory_options_t options) {
|
||||||
|
|
||||||
xe_memory_ref memory = (xe_memory_ref)xe_calloc(sizeof(xe_memory));
|
xe_memory_ref memory = (xe_memory_ref)xe_calloc(sizeof(xe_memory));
|
||||||
xe_ref_init((xe_ref)memory);
|
xe_ref_init((xe_ref)memory);
|
||||||
|
|
||||||
|
@ -140,25 +164,11 @@ xe_memory_ref xe_memory_create(xe_memory_options_t options) {
|
||||||
XEFAIL();
|
XEFAIL();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock used around heap allocs/frees.
|
// Prepare heaps.
|
||||||
memory->heap_mutex = xe_mutex_alloc(10000);
|
memory->virtual_heap.Initialize(
|
||||||
XEEXPECTNOTNULL(memory->heap_mutex);
|
memory, XE_MEMORY_VIRTUAL_HEAP_LOW, XE_MEMORY_VIRTUAL_HEAP_HIGH);
|
||||||
|
memory->physical_heap.Initialize(
|
||||||
// Commit the memory where our heap will live.
|
memory, XE_MEMORY_PHYSICAL_HEAP_LOW, XE_MEMORY_PHYSICAL_HEAP_HIGH);
|
||||||
// TODO(benvanik): replace dlmalloc with an implementation that can commit
|
|
||||||
// as it goes.
|
|
||||||
uint32_t heap_offset = XE_MEMORY_HEAP_LOW;
|
|
||||||
uint32_t heap_size = XE_MEMORY_HEAP_HIGH - XE_MEMORY_HEAP_LOW;
|
|
||||||
memory->heap_size = heap_size;
|
|
||||||
memory->heap_ptr = memory->views.v00000000 + heap_offset;
|
|
||||||
void* heap_result = VirtualAlloc(
|
|
||||||
memory->heap_ptr, heap_size,
|
|
||||||
MEM_COMMIT,
|
|
||||||
PAGE_READWRITE);
|
|
||||||
XEEXPECTNOTNULL(heap_result);
|
|
||||||
|
|
||||||
// Allocate the mspace for our heap.
|
|
||||||
memory->heap = create_mspace_with_base(memory->heap_ptr, heap_size, 0);
|
|
||||||
|
|
||||||
return memory;
|
return memory;
|
||||||
|
|
||||||
|
@ -168,19 +178,9 @@ XECLEANUP:
|
||||||
}
|
}
|
||||||
|
|
||||||
void xe_memory_dealloc(xe_memory_ref memory) {
|
void xe_memory_dealloc(xe_memory_ref memory) {
|
||||||
if (memory->heap_mutex && memory->heap) {
|
// Cleanup heaps.
|
||||||
xe_mutex_lock(memory->heap_mutex);
|
memory->virtual_heap.Cleanup();
|
||||||
destroy_mspace(memory->heap);
|
memory->physical_heap.Cleanup();
|
||||||
memory->heap = NULL;
|
|
||||||
xe_mutex_unlock(memory->heap_mutex);
|
|
||||||
}
|
|
||||||
if (memory->heap_mutex) {
|
|
||||||
xe_mutex_free(memory->heap_mutex);
|
|
||||||
memory->heap_mutex = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This decommits all pages and releases everything.
|
|
||||||
XEIGNORE(VirtualFree(memory->heap_ptr, 0, MEM_RELEASE));
|
|
||||||
|
|
||||||
// Unmap all views and close mapping.
|
// Unmap all views and close mapping.
|
||||||
if (memory->mapping) {
|
if (memory->mapping) {
|
||||||
|
@ -198,9 +198,9 @@ int xe_memory_map_views(xe_memory_ref memory, uint8_t* mapping_base) {
|
||||||
0x00000000, 0x3FFFFFFF, 0x00000000, // (1024mb) - virtual 4k pages
|
0x00000000, 0x3FFFFFFF, 0x00000000, // (1024mb) - virtual 4k pages
|
||||||
0x40000000, 0x7FFFFFFF, 0x40000000, // (1024mb) - virtual 64k pages
|
0x40000000, 0x7FFFFFFF, 0x40000000, // (1024mb) - virtual 64k pages
|
||||||
0x80000000, 0x9FFFFFFF, 0x80000000, // (512mb) - xex pages
|
0x80000000, 0x9FFFFFFF, 0x80000000, // (512mb) - xex pages
|
||||||
0xA0000000, 0xBFFFFFFF, 0xA0000000, // (512mb) - physical 64k pages
|
0xA0000000, 0xBFFFFFFF, 0x60000000, // (512mb) - physical 64k pages
|
||||||
0xC0000000, 0xDFFFFFFF, 0xA0000000, // - physical 16mb pages
|
0xC0000000, 0xDFFFFFFF, 0x60000000, // - physical 16mb pages
|
||||||
0xE0000000, 0xFFFFFFFF, 0xA0000000, // - physical 4k pages
|
0xE0000000, 0xFFFFFFFF, 0x60000000, // - physical 4k pages
|
||||||
};
|
};
|
||||||
XEASSERT(XECOUNT(map_info) == XECOUNT(memory->views.all_views));
|
XEASSERT(XECOUNT(map_info) == XECOUNT(memory->views.all_views));
|
||||||
for (size_t n = 0; n < XECOUNT(map_info); n++) {
|
for (size_t n = 0; n < XECOUNT(map_info); n++) {
|
||||||
|
@ -272,80 +272,27 @@ uint32_t xe_memory_search_aligned(xe_memory_ref memory, size_t start,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xe_memory_heap_dump_handler(
|
|
||||||
void* start, void* end, size_t used_bytes, void* context) {
|
|
||||||
xe_memory_ref memory = (xe_memory_ref)context;
|
|
||||||
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
|
||||||
uint64_t start_addr = (uint64_t)start + heap_guard_size;
|
|
||||||
uint64_t end_addr = (uint64_t)end - heap_guard_size;
|
|
||||||
uint32_t guest_start =
|
|
||||||
(uint32_t)(start_addr - (uintptr_t)memory->mapping_base);
|
|
||||||
uint32_t guest_end =
|
|
||||||
(uint32_t)(end_addr - (uintptr_t)memory->mapping_base);
|
|
||||||
if (used_bytes > 0) {
|
|
||||||
XELOGI(" - %.8X-%.8X (%10db) %.16llX-%.16llX - %9db used",
|
|
||||||
guest_start, guest_end, (guest_end - guest_start),
|
|
||||||
start_addr, end_addr,
|
|
||||||
used_bytes);
|
|
||||||
} else {
|
|
||||||
XELOGI(" - %.16llX-%.16llX - %9db used",
|
|
||||||
start_addr, end_addr, used_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void xe_memory_heap_dump(xe_memory_ref memory) {
|
|
||||||
XELOGI("xe_memory_heap_dump:");
|
|
||||||
if (FLAGS_heap_guard_pages) {
|
|
||||||
XELOGI(" (heap guard pages enabled, stats will be wrong)");
|
|
||||||
}
|
|
||||||
struct mallinfo info = mspace_mallinfo(memory->heap);
|
|
||||||
XELOGI(" arena: %lld", info.arena);
|
|
||||||
XELOGI(" ordblks: %lld", info.ordblks);
|
|
||||||
XELOGI(" hblks: %lld", info.hblks);
|
|
||||||
XELOGI(" hblkhd: %lld", info.hblkhd);
|
|
||||||
XELOGI(" usmblks: %lld", info.usmblks);
|
|
||||||
XELOGI(" uordblks: %lld", info.uordblks);
|
|
||||||
XELOGI(" fordblks: %lld", info.fordblks);
|
|
||||||
XELOGI(" keepcost: %lld", info.keepcost);
|
|
||||||
mspace_inspect_all(memory->heap, xe_memory_heap_dump_handler, memory);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t xe_memory_heap_alloc(
|
uint32_t xe_memory_heap_alloc(
|
||||||
xe_memory_ref memory, uint32_t base_address, uint32_t size,
|
xe_memory_ref memory, uint32_t base_address, uint32_t size,
|
||||||
uint32_t flags, uint32_t alignment) {
|
uint32_t flags, uint32_t alignment) {
|
||||||
XEASSERT(flags == 0);
|
|
||||||
|
|
||||||
// If we were given a base address we are outside of the normal heap and
|
// If we were given a base address we are outside of the normal heap and
|
||||||
// will place wherever asked (so long as it doesn't overlap the heap).
|
// will place wherever asked (so long as it doesn't overlap the heap).
|
||||||
if (!base_address) {
|
if (!base_address) {
|
||||||
// Normal allocation from the managed heap.
|
// Normal allocation from the managed heap.
|
||||||
XEIGNORE(xe_mutex_lock(memory->heap_mutex));
|
if (flags & XE_MEMORY_FLAG_PHYSICAL) {
|
||||||
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
return memory->physical_heap.Alloc(base_address, size, flags, alignment);
|
||||||
if (heap_guard_size) {
|
} else {
|
||||||
alignment = (uint32_t)MAX(alignment, heap_guard_size);
|
return memory->virtual_heap.Alloc(base_address, size, flags, alignment);
|
||||||
size = (uint32_t)XEROUNDUP(size, heap_guard_size);
|
|
||||||
}
|
}
|
||||||
uint8_t* p = (uint8_t*)mspace_memalign(
|
} else {
|
||||||
memory->heap,
|
if (base_address >= XE_MEMORY_VIRTUAL_HEAP_LOW &&
|
||||||
alignment,
|
base_address < XE_MEMORY_VIRTUAL_HEAP_HIGH) {
|
||||||
size + heap_guard_size * 2);
|
// Overlapping managed heap.
|
||||||
if (FLAGS_heap_guard_pages) {
|
XEASSERTALWAYS();
|
||||||
size_t real_size = mspace_usable_size(p);
|
|
||||||
DWORD old_protect;
|
|
||||||
VirtualProtect(p, heap_guard_size, PAGE_NOACCESS, &old_protect);
|
|
||||||
p += heap_guard_size;
|
|
||||||
VirtualProtect(p + size, heap_guard_size, PAGE_NOACCESS, &old_protect);
|
|
||||||
}
|
|
||||||
if (FLAGS_log_heap) {
|
|
||||||
xe_memory_heap_dump(memory);
|
|
||||||
}
|
|
||||||
XEIGNORE(xe_mutex_unlock(memory->heap_mutex));
|
|
||||||
if (!p) {
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return (uint32_t)((uintptr_t)p - (uintptr_t)memory->mapping_base);
|
if (base_address >= XE_MEMORY_PHYSICAL_HEAP_LOW &&
|
||||||
} else {
|
base_address < XE_MEMORY_PHYSICAL_HEAP_HIGH) {
|
||||||
if (base_address >= XE_MEMORY_HEAP_LOW &&
|
|
||||||
base_address < XE_MEMORY_HEAP_HIGH) {
|
|
||||||
// Overlapping managed heap.
|
// Overlapping managed heap.
|
||||||
XEASSERTALWAYS();
|
XEASSERTALWAYS();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -367,38 +314,25 @@ uint32_t xe_memory_heap_alloc(
|
||||||
|
|
||||||
int xe_memory_heap_free(
|
int xe_memory_heap_free(
|
||||||
xe_memory_ref memory, uint32_t address, uint32_t size) {
|
xe_memory_ref memory, uint32_t address, uint32_t size) {
|
||||||
uint8_t* p = memory->mapping_base + address;
|
if (address >= XE_MEMORY_VIRTUAL_HEAP_LOW &&
|
||||||
if (address >= XE_MEMORY_HEAP_LOW && address < XE_MEMORY_HEAP_HIGH) {
|
address < XE_MEMORY_VIRTUAL_HEAP_HIGH) {
|
||||||
// Heap allocated address.
|
return memory->virtual_heap.Free(address, size);
|
||||||
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
} else if (address >= XE_MEMORY_PHYSICAL_HEAP_LOW &&
|
||||||
p -= heap_guard_size;
|
address < XE_MEMORY_PHYSICAL_HEAP_HIGH) {
|
||||||
size_t real_size = mspace_usable_size(p);
|
return memory->physical_heap.Free(address, size);
|
||||||
real_size -= heap_guard_size * 2;
|
|
||||||
if (!real_size) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
XEIGNORE(xe_mutex_lock(memory->heap_mutex));
|
|
||||||
if (FLAGS_heap_guard_pages) {
|
|
||||||
DWORD old_protect;
|
|
||||||
VirtualProtect(p, heap_guard_size, PAGE_READWRITE, &old_protect);
|
|
||||||
VirtualProtect(p + heap_guard_size + real_size, heap_guard_size, PAGE_READWRITE, &old_protect);
|
|
||||||
}
|
|
||||||
mspace_free(memory->heap, p);
|
|
||||||
if (FLAGS_log_heap) {
|
|
||||||
xe_memory_heap_dump(memory);
|
|
||||||
}
|
|
||||||
XEIGNORE(xe_mutex_unlock(memory->heap_mutex));
|
|
||||||
return (uint32_t)real_size;
|
|
||||||
} else {
|
} else {
|
||||||
// A placed address. Decommit.
|
// A placed address. Decommit.
|
||||||
|
uint8_t* p = memory->mapping_base + address;
|
||||||
return VirtualFree(p, size, MEM_DECOMMIT) ? 0 : 1;
|
return VirtualFree(p, size, MEM_DECOMMIT) ? 0 : 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool xe_memory_is_valid(xe_memory_ref memory, uint32_t address) {
|
bool xe_memory_is_valid(xe_memory_ref memory, uint32_t address) {
|
||||||
uint8_t* p = memory->mapping_base + address;
|
uint8_t* p = memory->mapping_base + address;
|
||||||
if (address >= XE_MEMORY_HEAP_LOW && address < XE_MEMORY_HEAP_HIGH) {
|
if ((address >= XE_MEMORY_VIRTUAL_HEAP_LOW &&
|
||||||
|
address < XE_MEMORY_VIRTUAL_HEAP_HIGH) ||
|
||||||
|
(address >= XE_MEMORY_PHYSICAL_HEAP_LOW &&
|
||||||
|
address < XE_MEMORY_PHYSICAL_HEAP_HIGH)) {
|
||||||
// Within heap range, ask dlmalloc.
|
// Within heap range, ask dlmalloc.
|
||||||
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
||||||
p -= heap_guard_size;
|
p -= heap_guard_size;
|
||||||
|
@ -427,3 +361,141 @@ int xe_memory_protect(
|
||||||
DWORD old_protect;
|
DWORD old_protect;
|
||||||
return VirtualProtect(p, size, new_protect, &old_protect) == TRUE ? 0 : 1;
|
return VirtualProtect(p, size, new_protect, &old_protect) == TRUE ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int xe_memory_heap_t::Initialize(xe_memory_ref memory, uint32_t low, uint32_t high) {
|
||||||
|
this->memory = memory;
|
||||||
|
|
||||||
|
// Lock used around heap allocs/frees.
|
||||||
|
mutex = xe_mutex_alloc(10000);
|
||||||
|
if (!mutex) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit the memory where our heap will live and allocate it.
|
||||||
|
// TODO(benvanik): replace dlmalloc with an implementation that can commit
|
||||||
|
// as it goes.
|
||||||
|
size = high - low;
|
||||||
|
ptr = memory->views.v00000000 + low;
|
||||||
|
void* heap_result = VirtualAlloc(
|
||||||
|
ptr, size, MEM_COMMIT, PAGE_READWRITE);
|
||||||
|
if (!heap_result) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
space = create_mspace_with_base(ptr, size, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void xe_memory_heap_t::Cleanup() {
|
||||||
|
if (mutex && space) {
|
||||||
|
xe_mutex_lock(mutex);
|
||||||
|
destroy_mspace(space);
|
||||||
|
space = NULL;
|
||||||
|
xe_mutex_unlock(mutex);
|
||||||
|
}
|
||||||
|
if (mutex) {
|
||||||
|
xe_mutex_free(mutex);
|
||||||
|
mutex = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
XEIGNORE(VirtualFree(ptr, 0, MEM_RELEASE));
|
||||||
|
}
|
||||||
|
|
||||||
|
void xe_memory_heap_t::Dump() {
|
||||||
|
XELOGI("xe_memory_heap_dump:");
|
||||||
|
if (FLAGS_heap_guard_pages) {
|
||||||
|
XELOGI(" (heap guard pages enabled, stats will be wrong)");
|
||||||
|
}
|
||||||
|
struct mallinfo info = mspace_mallinfo(space);
|
||||||
|
XELOGI(" arena: %lld", info.arena);
|
||||||
|
XELOGI(" ordblks: %lld", info.ordblks);
|
||||||
|
XELOGI(" hblks: %lld", info.hblks);
|
||||||
|
XELOGI(" hblkhd: %lld", info.hblkhd);
|
||||||
|
XELOGI(" usmblks: %lld", info.usmblks);
|
||||||
|
XELOGI(" uordblks: %lld", info.uordblks);
|
||||||
|
XELOGI(" fordblks: %lld", info.fordblks);
|
||||||
|
XELOGI(" keepcost: %lld", info.keepcost);
|
||||||
|
mspace_inspect_all(space, DumpHandler, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void xe_memory_heap_t::DumpHandler(
|
||||||
|
void* start, void* end, size_t used_bytes, void* context) {
|
||||||
|
xe_memory_heap_t* heap = (xe_memory_heap_t*)context;
|
||||||
|
xe_memory_ref memory = heap->memory;
|
||||||
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
||||||
|
uint64_t start_addr = (uint64_t)start + heap_guard_size;
|
||||||
|
uint64_t end_addr = (uint64_t)end - heap_guard_size;
|
||||||
|
uint32_t guest_start =
|
||||||
|
(uint32_t)(start_addr - (uintptr_t)memory->mapping_base);
|
||||||
|
uint32_t guest_end =
|
||||||
|
(uint32_t)(end_addr - (uintptr_t)memory->mapping_base);
|
||||||
|
if (used_bytes > 0) {
|
||||||
|
XELOGI(" - %.8X-%.8X (%10db) %.16llX-%.16llX - %9db used",
|
||||||
|
guest_start, guest_end, (guest_end - guest_start),
|
||||||
|
start_addr, end_addr,
|
||||||
|
used_bytes);
|
||||||
|
} else {
|
||||||
|
XELOGI(" - %.16llX-%.16llX - %9db used",
|
||||||
|
start_addr, end_addr, used_bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t xe_memory_heap_t::Alloc(
|
||||||
|
uint32_t base_address, uint32_t size, uint32_t flags,
|
||||||
|
uint32_t alignment) {
|
||||||
|
XEIGNORE(xe_mutex_lock(mutex));
|
||||||
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
||||||
|
if (heap_guard_size) {
|
||||||
|
alignment = (uint32_t)MAX(alignment, heap_guard_size);
|
||||||
|
size = (uint32_t)XEROUNDUP(size, heap_guard_size);
|
||||||
|
}
|
||||||
|
uint8_t* p = (uint8_t*)mspace_memalign(
|
||||||
|
space,
|
||||||
|
alignment,
|
||||||
|
size + heap_guard_size * 2);
|
||||||
|
if (FLAGS_heap_guard_pages) {
|
||||||
|
size_t real_size = mspace_usable_size(p);
|
||||||
|
DWORD old_protect;
|
||||||
|
VirtualProtect(p, heap_guard_size, PAGE_NOACCESS, &old_protect);
|
||||||
|
p += heap_guard_size;
|
||||||
|
VirtualProtect(p + size, heap_guard_size, PAGE_NOACCESS, &old_protect);
|
||||||
|
}
|
||||||
|
if (FLAGS_log_heap) {
|
||||||
|
Dump();
|
||||||
|
}
|
||||||
|
XEIGNORE(xe_mutex_unlock(mutex));
|
||||||
|
if (!p) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return (uint32_t)((uintptr_t)p - (uintptr_t)memory->mapping_base);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t xe_memory_heap_t::Free(uint32_t address, uint32_t size) {
|
||||||
|
uint8_t* p = memory->mapping_base + address;
|
||||||
|
|
||||||
|
// Heap allocated address.
|
||||||
|
size_t heap_guard_size = FLAGS_heap_guard_pages * 4096;
|
||||||
|
p -= heap_guard_size;
|
||||||
|
size_t real_size = mspace_usable_size(p);
|
||||||
|
real_size -= heap_guard_size * 2;
|
||||||
|
if (!real_size) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
XEIGNORE(xe_mutex_lock(mutex));
|
||||||
|
if (FLAGS_heap_guard_pages) {
|
||||||
|
DWORD old_protect;
|
||||||
|
VirtualProtect(
|
||||||
|
p, heap_guard_size,
|
||||||
|
PAGE_READWRITE, &old_protect);
|
||||||
|
VirtualProtect(
|
||||||
|
p + heap_guard_size + real_size, heap_guard_size,
|
||||||
|
PAGE_READWRITE, &old_protect);
|
||||||
|
}
|
||||||
|
mspace_free(space, p);
|
||||||
|
if (FLAGS_log_heap) {
|
||||||
|
Dump();
|
||||||
|
}
|
||||||
|
XEIGNORE(xe_mutex_unlock(mutex));
|
||||||
|
return (uint32_t)real_size;
|
||||||
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ uint32_t xe_memory_search_aligned(xe_memory_ref memory, size_t start,
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
XE_MEMORY_FLAG_64KB_PAGES = (1 << 1),
|
XE_MEMORY_FLAG_64KB_PAGES = (1 << 1),
|
||||||
|
XE_MEMORY_FLAG_PHYSICAL = (1 << 2),
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
|
@ -231,7 +231,7 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
|
||||||
XEASSERT(max_addr_range == 0xFFFFFFFF);
|
XEASSERT(max_addr_range == 0xFFFFFFFF);
|
||||||
|
|
||||||
// Allocate.
|
// Allocate.
|
||||||
uint32_t flags = 0;
|
uint32_t flags = XE_MEMORY_FLAG_PHYSICAL;
|
||||||
uint32_t base_address = xe_memory_heap_alloc(
|
uint32_t base_address = xe_memory_heap_alloc(
|
||||||
state->memory(), 0, adjusted_size, flags, alignment);
|
state->memory(), 0, adjusted_size, flags, alignment);
|
||||||
if (!base_address) {
|
if (!base_address) {
|
||||||
|
@ -239,8 +239,6 @@ uint32_t xeMmAllocatePhysicalMemoryEx(
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(benvanik): address should be in 0xA0000000+ range.
|
|
||||||
|
|
||||||
return base_address;
|
return base_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,6 +271,9 @@ void xeMmFreePhysicalMemory(uint32_t type, uint32_t base_address) {
|
||||||
|
|
||||||
// base_address = result of MmAllocatePhysicalMemory.
|
// base_address = result of MmAllocatePhysicalMemory.
|
||||||
|
|
||||||
|
// Strip off physical bits before passing down.
|
||||||
|
base_address &= ~0xE0000000;
|
||||||
|
|
||||||
// TODO(benvanik): free memory.
|
// TODO(benvanik): free memory.
|
||||||
XELOGE("xeMmFreePhysicalMemory NOT IMPLEMENTED");
|
XELOGE("xeMmFreePhysicalMemory NOT IMPLEMENTED");
|
||||||
//uint32_t size = ?;
|
//uint32_t size = ?;
|
||||||
|
@ -305,6 +306,14 @@ uint32_t xeMmGetPhysicalAddress(uint32_t base_address) {
|
||||||
// physical ones. We could munge up the address here to another mapped view
|
// physical ones. We could munge up the address here to another mapped view
|
||||||
// of memory.
|
// of memory.
|
||||||
|
|
||||||
|
/*if (protect_bits & X_MEM_LARGE_PAGES) {
|
||||||
|
base_address |= 0xA0000000;
|
||||||
|
} else if (protect_bits & X_MEM_16MB_PAGES) {
|
||||||
|
base_address |= 0xC0000000;
|
||||||
|
} else {
|
||||||
|
base_address |= 0xE0000000;
|
||||||
|
}*/
|
||||||
|
|
||||||
return base_address;
|
return base_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue