Merge remote-tracking branch 'GliniakRepo/memory_stats' into canary_pr
This commit is contained in:
commit
b237b71031
|
@ -496,33 +496,24 @@ dword_result_t MmQueryStatistics_entry(
|
|||
stats_ptr->total_physical_pages = 0x00020000; // 512mb / 4kb pages
|
||||
stats_ptr->kernel_pages = 0x00000300;
|
||||
|
||||
// TODO(gibbed): maybe use LookupHeapByType instead?
|
||||
auto heap_a = kernel_memory()->LookupHeap(0xA0000000);
|
||||
auto heap_c = kernel_memory()->LookupHeap(0xC0000000);
|
||||
auto heap_e = kernel_memory()->LookupHeap(0xE0000000);
|
||||
|
||||
assert_not_null(heap_a);
|
||||
assert_not_null(heap_c);
|
||||
assert_not_null(heap_e);
|
||||
|
||||
#define GET_USED_PAGE_COUNT(x) \
|
||||
(x->GetTotalPageCount() - x->GetUnreservedPageCount())
|
||||
#define GET_USED_PAGE_SIZE(x) ((GET_USED_PAGE_COUNT(x) * x->page_size()) / 4096)
|
||||
uint32_t reserved_pages = 0;
|
||||
uint32_t unreserved_pages = 0;
|
||||
uint32_t used_pages = 0;
|
||||
used_pages += GET_USED_PAGE_SIZE(heap_a);
|
||||
used_pages += GET_USED_PAGE_SIZE(heap_c);
|
||||
used_pages += GET_USED_PAGE_SIZE(heap_e);
|
||||
#undef GET_USED_PAGE_SIZE
|
||||
#undef GET_USED_PAGE_COUNT
|
||||
uint32_t reserved_pages_bytes = 0;
|
||||
const BaseHeap* physical_heaps[3] = {
|
||||
kernel_memory()->LookupHeapByType(true, 0x1000),
|
||||
kernel_memory()->LookupHeapByType(true, 0x10000),
|
||||
kernel_memory()->LookupHeapByType(true, 0x1000000)};
|
||||
|
||||
kernel_memory()->GetHeapsPageStatsSummary(
|
||||
physical_heaps, std::size(physical_heaps), reserved_pages,
|
||||
unreserved_pages, used_pages, reserved_pages_bytes);
|
||||
|
||||
assert_true(used_pages < stats_ptr->total_physical_pages);
|
||||
|
||||
stats_ptr->title.available_pages =
|
||||
stats_ptr->total_physical_pages - used_pages;
|
||||
stats_ptr->title.total_virtual_memory_bytes =
|
||||
0x2FFF0000; // TODO(gibbed): FIXME
|
||||
stats_ptr->title.reserved_virtual_memory_bytes =
|
||||
0x00160000; // TODO(gibbed): FIXME
|
||||
stats_ptr->total_physical_pages - stats_ptr->kernel_pages - used_pages;
|
||||
stats_ptr->title.total_virtual_memory_bytes = 0x2FFE0000;
|
||||
stats_ptr->title.reserved_virtual_memory_bytes = reserved_pages_bytes;
|
||||
stats_ptr->title.physical_pages = 0x00001000; // TODO(gibbed): FIXME
|
||||
stats_ptr->title.pool_pages = 0x00000010;
|
||||
stats_ptr->title.stack_pages = 0x00000100;
|
||||
|
|
|
@ -353,6 +353,27 @@ BaseHeap* Memory::LookupHeapByType(bool physical, uint32_t page_size) {
|
|||
|
||||
VirtualHeap* Memory::GetPhysicalHeap() { return &heaps_.physical; }
|
||||
|
||||
void Memory::GetHeapsPageStatsSummary(const BaseHeap* const* provided_heaps,
|
||||
size_t heaps_count,
|
||||
uint32_t& unreserved_pages,
|
||||
uint32_t& reserved_pages,
|
||||
uint32_t& used_pages,
|
||||
uint32_t& reserved_bytes) {
|
||||
auto lock = global_critical_region_.Acquire();
|
||||
for (size_t i = 0; i < heaps_count; i++) {
|
||||
const BaseHeap* heap = provided_heaps[i];
|
||||
uint32_t heap_unreserved_pages = heap->unreserved_page_count();
|
||||
uint32_t heap_reserved_pages = heap->reserved_page_count();
|
||||
|
||||
unreserved_pages += heap_unreserved_pages;
|
||||
reserved_pages += heap_reserved_pages;
|
||||
used_pages += ((heap->total_page_count() - heap_unreserved_pages) *
|
||||
heap->page_size()) /
|
||||
4096;
|
||||
reserved_bytes += heap_reserved_pages * heap->page_size();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t Memory::HostToGuestVirtual(const void* host_address) const {
|
||||
size_t virtual_address = reinterpret_cast<size_t>(host_address) -
|
||||
reinterpret_cast<size_t>(virtual_membase_);
|
||||
|
@ -650,6 +671,7 @@ void BaseHeap::Initialize(Memory* memory, uint8_t* membase, HeapType heap_type,
|
|||
page_size_ = page_size;
|
||||
host_address_offset_ = host_address_offset;
|
||||
page_table_.resize(heap_size / page_size);
|
||||
unreserved_page_count_ = uint32_t(page_table_.size());
|
||||
}
|
||||
|
||||
void BaseHeap::Dispose() {
|
||||
|
@ -716,35 +738,6 @@ void BaseHeap::DumpMap() {
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t BaseHeap::GetTotalPageCount() { return uint32_t(page_table_.size()); }
|
||||
|
||||
uint32_t BaseHeap::GetUnreservedPageCount() {
|
||||
auto global_lock = global_critical_region_.Acquire();
|
||||
uint32_t count = 0;
|
||||
bool is_empty_span = false;
|
||||
uint32_t empty_span_start = 0;
|
||||
uint32_t size = uint32_t(page_table_.size());
|
||||
for (uint32_t i = 0; i < size; ++i) {
|
||||
auto& page = page_table_[i];
|
||||
if (!page.state) {
|
||||
if (!is_empty_span) {
|
||||
is_empty_span = true;
|
||||
empty_span_start = i;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (is_empty_span) {
|
||||
is_empty_span = false;
|
||||
count += i - empty_span_start;
|
||||
}
|
||||
i += page.region_page_count - 1;
|
||||
}
|
||||
if (is_empty_span) {
|
||||
count += size - empty_span_start;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bool BaseHeap::Save(ByteStream* stream) {
|
||||
XELOGD("Heap {:08X}-{:08X}", heap_base_, heap_base_ + (heap_size_ - 1));
|
||||
|
||||
|
@ -908,6 +901,9 @@ bool BaseHeap::AllocFixed(uint32_t base_address, uint32_t size,
|
|||
}
|
||||
page_entry.allocation_protect = protect;
|
||||
page_entry.current_protect = protect;
|
||||
if (!(page_entry.state & kMemoryAllocationReserve)) {
|
||||
unreserved_page_count_--;
|
||||
}
|
||||
page_entry.state = kMemoryAllocationReserve | allocation_type;
|
||||
}
|
||||
|
||||
|
@ -1054,6 +1050,7 @@ bool BaseHeap::AllocRange(uint32_t low_address, uint32_t high_address,
|
|||
page_entry.allocation_protect = protect;
|
||||
page_entry.current_protect = protect;
|
||||
page_entry.state = kMemoryAllocationReserve | allocation_type;
|
||||
unreserved_page_count_--;
|
||||
}
|
||||
|
||||
*out_address = heap_base_ + (start_page_number * page_size_);
|
||||
|
@ -1144,6 +1141,7 @@ bool BaseHeap::Release(uint32_t base_address, uint32_t* out_region_size) {
|
|||
++page_number) {
|
||||
auto& page_entry = page_table_[page_number];
|
||||
page_entry.qword = 0;
|
||||
unreserved_page_count_++;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -113,6 +113,17 @@ class BaseHeap {
|
|||
// Size of each page within the heap range in bytes.
|
||||
uint32_t page_size() const { return page_size_; }
|
||||
|
||||
// Amount of pages assigned to heap
|
||||
uint32_t total_page_count() const { return uint32_t(page_table_.size()); }
|
||||
|
||||
// Sum of unreserved pages in heap
|
||||
uint32_t unreserved_page_count() const { return unreserved_page_count_; }
|
||||
|
||||
// Sum of reserved pages in heap
|
||||
uint32_t reserved_page_count() const {
|
||||
return total_page_count() - unreserved_page_count();
|
||||
}
|
||||
|
||||
// Type of specified heap
|
||||
HeapType heap_type() const { return heap_type_; }
|
||||
|
||||
|
@ -132,9 +143,6 @@ class BaseHeap {
|
|||
// Dumps information about all allocations within the heap to the log.
|
||||
void DumpMap();
|
||||
|
||||
uint32_t GetTotalPageCount();
|
||||
uint32_t GetUnreservedPageCount();
|
||||
|
||||
// Allocates pages with the given properties and allocation strategy.
|
||||
// This can reserve and commit the pages as well as set protection modes.
|
||||
// This will fail if not enough contiguous pages can be found.
|
||||
|
@ -206,6 +214,7 @@ class BaseHeap {
|
|||
uint32_t heap_size_;
|
||||
uint32_t page_size_;
|
||||
uint32_t host_address_offset_;
|
||||
uint32_t unreserved_page_count_;
|
||||
xe::global_critical_region global_critical_region_;
|
||||
std::vector<PageEntry> page_table_;
|
||||
};
|
||||
|
@ -472,6 +481,11 @@ class Memory {
|
|||
// Gets the physical base heap.
|
||||
VirtualHeap* GetPhysicalHeap();
|
||||
|
||||
void GetHeapsPageStatsSummary(const BaseHeap* const* provided_heaps,
|
||||
size_t heaps_count, uint32_t& unreserved_pages,
|
||||
uint32_t& reserved_pages, uint32_t& used_pages,
|
||||
uint32_t& reserved_bytes);
|
||||
|
||||
// Dumps a map of all allocated memory to the log.
|
||||
void DumpMap();
|
||||
|
||||
|
|
Loading…
Reference in New Issue