Rewriting memory manager.
This commit is contained in:
parent
3a7d1f21e8
commit
147a70b9c1
|
@ -74,7 +74,7 @@ X_STATUS AudioSystem::Setup() {
|
|||
processor_ = emulator_->processor();
|
||||
|
||||
// Let the processor know we want register access callbacks.
|
||||
emulator_->memory()->AddMappedRange(
|
||||
emulator_->memory()->AddVirtualMappedRange(
|
||||
0x7FEA0000, 0xFFFF0000, 0x0000FFFF, this,
|
||||
reinterpret_cast<MMIOReadCallback>(MMIOReadRegisterThunk),
|
||||
reinterpret_cast<MMIOWriteCallback>(MMIOWriteRegisterThunk));
|
||||
|
@ -94,6 +94,9 @@ X_STATUS AudioSystem::Setup() {
|
|||
thread_state_->set_name("Audio Worker");
|
||||
thread_block_ = memory()->SystemHeapAlloc(2048);
|
||||
thread_state_->context()->r[13] = thread_block_;
|
||||
XELOGI("Audio Worker Thread %X Stack: %.8X-%.8X", thread_state_->thread_id(),
|
||||
thread_state_->stack_address(),
|
||||
thread_state_->stack_address() + thread_state_->stack_size());
|
||||
|
||||
// Create worker thread.
|
||||
// This will initialize the audio system.
|
||||
|
@ -252,7 +255,7 @@ void AudioSystem::UnregisterClient(size_t index) {
|
|||
// piece of hardware:
|
||||
// https://github.com/Free60Project/libxenon/blob/master/libxenon/drivers/xenon_sound/sound.c
|
||||
|
||||
uint64_t AudioSystem::ReadRegister(uint64_t addr) {
|
||||
uint64_t AudioSystem::ReadRegister(uint32_t addr) {
|
||||
uint32_t r = addr & 0xFFFF;
|
||||
XELOGAPU("ReadRegister(%.4X)", r);
|
||||
// 1800h is read on startup and stored -- context? buffers?
|
||||
|
@ -277,7 +280,7 @@ uint64_t AudioSystem::ReadRegister(uint64_t addr) {
|
|||
return value;
|
||||
}
|
||||
|
||||
void AudioSystem::WriteRegister(uint64_t addr, uint64_t value) {
|
||||
void AudioSystem::WriteRegister(uint32_t addr, uint64_t value) {
|
||||
uint32_t r = addr & 0xFFFF;
|
||||
value = xe::byte_swap(uint32_t(value));
|
||||
XELOGAPU("WriteRegister(%.4X, %.8X)", r, value);
|
||||
|
|
|
@ -49,8 +49,8 @@ class AudioSystem {
|
|||
AudioDriver** out_driver) = 0;
|
||||
virtual void DestroyDriver(AudioDriver* driver) = 0;
|
||||
|
||||
virtual uint64_t ReadRegister(uint64_t addr);
|
||||
virtual void WriteRegister(uint64_t addr, uint64_t value);
|
||||
virtual uint64_t ReadRegister(uint32_t addr);
|
||||
virtual void WriteRegister(uint32_t addr, uint64_t value);
|
||||
|
||||
protected:
|
||||
virtual void Initialize();
|
||||
|
@ -58,10 +58,10 @@ class AudioSystem {
|
|||
private:
|
||||
void ThreadStart();
|
||||
|
||||
static uint64_t MMIOReadRegisterThunk(AudioSystem* as, uint64_t addr) {
|
||||
static uint64_t MMIOReadRegisterThunk(AudioSystem* as, uint32_t addr) {
|
||||
return as->ReadRegister(addr);
|
||||
}
|
||||
static void MMIOWriteRegisterThunk(AudioSystem* as, uint64_t addr,
|
||||
static void MMIOWriteRegisterThunk(AudioSystem* as, uint32_t addr,
|
||||
uint64_t value) {
|
||||
as->WriteRegister(addr, value);
|
||||
}
|
||||
|
|
|
@ -23,9 +23,11 @@ namespace cpu {
|
|||
MMIOHandler* MMIOHandler::global_handler_ = nullptr;
|
||||
|
||||
// Implemented in the platform cc file.
|
||||
std::unique_ptr<MMIOHandler> CreateMMIOHandler(uint8_t* mapping_base);
|
||||
std::unique_ptr<MMIOHandler> CreateMMIOHandler(uint8_t* virtual_membase,
|
||||
uint8_t* physical_membase);
|
||||
|
||||
std::unique_ptr<MMIOHandler> MMIOHandler::Install(uint8_t* mapping_base) {
|
||||
std::unique_ptr<MMIOHandler> MMIOHandler::Install(uint8_t* virtual_membase,
|
||||
uint8_t* physical_membase) {
|
||||
// There can be only one handler at a time.
|
||||
assert_null(global_handler_);
|
||||
if (global_handler_) {
|
||||
|
@ -33,7 +35,7 @@ std::unique_ptr<MMIOHandler> MMIOHandler::Install(uint8_t* mapping_base) {
|
|||
}
|
||||
|
||||
// Create the platform-specific handler.
|
||||
auto handler = CreateMMIOHandler(mapping_base);
|
||||
auto handler = CreateMMIOHandler(virtual_membase, physical_membase);
|
||||
|
||||
// Platform-specific initialization for the handler.
|
||||
if (!handler->Initialize()) {
|
||||
|
@ -49,45 +51,44 @@ MMIOHandler::~MMIOHandler() {
|
|||
global_handler_ = nullptr;
|
||||
}
|
||||
|
||||
bool MMIOHandler::RegisterRange(uint64_t address, uint64_t mask, uint64_t size,
|
||||
void* context, MMIOReadCallback read_callback,
|
||||
bool MMIOHandler::RegisterRange(uint32_t virtual_address, uint32_t mask,
|
||||
uint32_t size, void* context,
|
||||
MMIOReadCallback read_callback,
|
||||
MMIOWriteCallback write_callback) {
|
||||
mapped_ranges_.push_back({
|
||||
reinterpret_cast<uint64_t>(mapping_base_) | address,
|
||||
0xFFFFFFFF00000000ull | mask, size, context, read_callback,
|
||||
write_callback,
|
||||
virtual_address, mask, size, context, read_callback, write_callback,
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MMIOHandler::CheckLoad(uint64_t address, uint64_t* out_value) {
|
||||
bool MMIOHandler::CheckLoad(uint32_t virtual_address, uint64_t* out_value) {
|
||||
for (const auto& range : mapped_ranges_) {
|
||||
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
|
||||
*out_value = static_cast<uint32_t>(range.read(range.context, address));
|
||||
if ((virtual_address & range.mask) == range.address) {
|
||||
*out_value =
|
||||
static_cast<uint32_t>(range.read(range.context, virtual_address));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MMIOHandler::CheckStore(uint64_t address, uint64_t value) {
|
||||
bool MMIOHandler::CheckStore(uint32_t virtual_address, uint64_t value) {
|
||||
for (const auto& range : mapped_ranges_) {
|
||||
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
|
||||
range.write(range.context, address, value);
|
||||
if ((virtual_address & range.mask) == range.address) {
|
||||
range.write(range.context, virtual_address, value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
uintptr_t MMIOHandler::AddWriteWatch(uint32_t guest_address, size_t length,
|
||||
WriteWatchCallback callback,
|
||||
void* callback_context,
|
||||
void* callback_data) {
|
||||
uintptr_t MMIOHandler::AddPhysicalWriteWatch(uint32_t guest_address,
|
||||
size_t length,
|
||||
WriteWatchCallback callback,
|
||||
void* callback_context,
|
||||
void* callback_data) {
|
||||
uint32_t base_address = guest_address;
|
||||
if (base_address > 0xA0000000) {
|
||||
base_address -= 0xA0000000;
|
||||
}
|
||||
assert_true(base_address < 0x1FFFFFFF);
|
||||
|
||||
// Add to table. The slot reservation may evict a previous watch, which
|
||||
// could include our target, so we do it first.
|
||||
|
@ -102,29 +103,33 @@ uintptr_t MMIOHandler::AddWriteWatch(uint32_t guest_address, size_t length,
|
|||
write_watch_mutex_.unlock();
|
||||
|
||||
// Make the desired range read only under all address spaces.
|
||||
auto host_address = mapping_base_ + base_address;
|
||||
DWORD old_protect;
|
||||
VirtualProtect(host_address, length, PAGE_READONLY, &old_protect);
|
||||
VirtualProtect(host_address + 0xA0000000, length, PAGE_READONLY,
|
||||
&old_protect);
|
||||
VirtualProtect(host_address + 0xC0000000, length, PAGE_READONLY,
|
||||
&old_protect);
|
||||
VirtualProtect(host_address + 0xE0000000, length, PAGE_READONLY,
|
||||
&old_protect);
|
||||
VirtualProtect(physical_membase_ + entry->address, entry->length,
|
||||
PAGE_READONLY, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + entry->address, entry->length,
|
||||
PAGE_READONLY, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
|
||||
PAGE_READONLY, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
|
||||
PAGE_READONLY, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
|
||||
PAGE_READONLY, &old_protect);
|
||||
|
||||
return reinterpret_cast<uintptr_t>(entry);
|
||||
}
|
||||
|
||||
void MMIOHandler::ClearWriteWatch(WriteWatchEntry* entry) {
|
||||
auto host_address = mapping_base_ + entry->address;
|
||||
DWORD old_protect;
|
||||
VirtualProtect(host_address, entry->length, PAGE_READWRITE, &old_protect);
|
||||
VirtualProtect(host_address + 0xA0000000, entry->length, PAGE_READWRITE,
|
||||
&old_protect);
|
||||
VirtualProtect(host_address + 0xC0000000, entry->length, PAGE_READWRITE,
|
||||
&old_protect);
|
||||
VirtualProtect(host_address + 0xE0000000, entry->length, PAGE_READWRITE,
|
||||
&old_protect);
|
||||
VirtualProtect(physical_membase_ + entry->address, entry->length,
|
||||
PAGE_READWRITE, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + entry->address, entry->length,
|
||||
PAGE_READWRITE, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + 0xA0000000 + entry->address, entry->length,
|
||||
PAGE_READWRITE, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + 0xC0000000 + entry->address, entry->length,
|
||||
PAGE_READWRITE, &old_protect);
|
||||
VirtualProtect(virtual_membase_ + 0xE0000000 + entry->address, entry->length,
|
||||
PAGE_READWRITE, &old_protect);
|
||||
}
|
||||
|
||||
void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {
|
||||
|
@ -145,17 +150,16 @@ void MMIOHandler::CancelWriteWatch(uintptr_t watch_handle) {
|
|||
}
|
||||
|
||||
bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) {
|
||||
uint32_t guest_address = uint32_t(fault_address - uintptr_t(mapping_base_));
|
||||
uint32_t base_address = guest_address;
|
||||
if (base_address > 0xA0000000) {
|
||||
base_address -= 0xA0000000;
|
||||
uint32_t physical_address = uint32_t(fault_address);
|
||||
if (physical_address > 0x1FFFFFFF) {
|
||||
physical_address &= 0x1FFFFFFF;
|
||||
}
|
||||
std::list<WriteWatchEntry*> pending_invalidates;
|
||||
write_watch_mutex_.lock();
|
||||
for (auto it = write_watches_.begin(); it != write_watches_.end();) {
|
||||
auto entry = *it;
|
||||
if (entry->address <= base_address &&
|
||||
entry->address + entry->length > base_address) {
|
||||
if (entry->address <= physical_address &&
|
||||
entry->address + entry->length > physical_address) {
|
||||
// Hit!
|
||||
pending_invalidates.push_back(entry);
|
||||
// TODO(benvanik): outside of lock?
|
||||
|
@ -176,7 +180,7 @@ bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) {
|
|||
auto entry = pending_invalidates.back();
|
||||
pending_invalidates.pop_back();
|
||||
entry->callback(entry->callback_context, entry->callback_data,
|
||||
guest_address);
|
||||
physical_address);
|
||||
delete entry;
|
||||
}
|
||||
// Range was watched, so lets eat this access violation.
|
||||
|
@ -185,18 +189,21 @@ bool MMIOHandler::CheckWriteWatch(void* thread_state, uint64_t fault_address) {
|
|||
|
||||
bool MMIOHandler::HandleAccessFault(void* thread_state,
|
||||
uint64_t fault_address) {
|
||||
if (fault_address < uint64_t(mapping_base_)) {
|
||||
if (fault_address < uint64_t(virtual_membase_)) {
|
||||
// Quick kill anything below our mapping base.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Access violations are pretty rare, so we can do a linear search here.
|
||||
// Only check if in the virtual range, as we only support virtual ranges.
|
||||
const MMIORange* range = nullptr;
|
||||
for (const auto& test_range : mapped_ranges_) {
|
||||
if ((fault_address & test_range.mask) == test_range.address) {
|
||||
// Address is within the range of this mapping.
|
||||
range = &test_range;
|
||||
break;
|
||||
if (fault_address < uint64_t(physical_membase_)) {
|
||||
for (const auto& test_range : mapped_ranges_) {
|
||||
if ((uint32_t(fault_address) & test_range.mask) == test_range.address) {
|
||||
// Address is within the range of this mapping.
|
||||
range = &test_range;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!range) {
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
namespace xe {
|
||||
namespace cpu {
|
||||
|
||||
typedef uint64_t (*MMIOReadCallback)(void* context, uint64_t addr);
|
||||
typedef void (*MMIOWriteCallback)(void* context, uint64_t addr, uint64_t value);
|
||||
typedef uint64_t (*MMIOReadCallback)(void* context, uint32_t addr);
|
||||
typedef void (*MMIOWriteCallback)(void* context, uint32_t addr, uint64_t value);
|
||||
|
||||
typedef void (*WriteWatchCallback)(void* context_ptr, void* data_ptr,
|
||||
uint32_t address);
|
||||
|
@ -29,19 +29,20 @@ class MMIOHandler {
|
|||
public:
|
||||
virtual ~MMIOHandler();
|
||||
|
||||
static std::unique_ptr<MMIOHandler> Install(uint8_t* mapping_base);
|
||||
static std::unique_ptr<MMIOHandler> Install(uint8_t* virtual_membase,
|
||||
uint8_t* physical_membase);
|
||||
static MMIOHandler* global_handler() { return global_handler_; }
|
||||
|
||||
bool RegisterRange(uint64_t address, uint64_t mask, uint64_t size,
|
||||
bool RegisterRange(uint32_t virtual_address, uint32_t mask, uint32_t size,
|
||||
void* context, MMIOReadCallback read_callback,
|
||||
MMIOWriteCallback write_callback);
|
||||
|
||||
bool CheckLoad(uint64_t address, uint64_t* out_value);
|
||||
bool CheckStore(uint64_t address, uint64_t value);
|
||||
bool CheckLoad(uint32_t virtual_address, uint64_t* out_value);
|
||||
bool CheckStore(uint32_t virtual_address, uint64_t value);
|
||||
|
||||
uintptr_t AddWriteWatch(uint32_t guest_address, size_t length,
|
||||
WriteWatchCallback callback, void* callback_context,
|
||||
void* callback_data);
|
||||
uintptr_t AddPhysicalWriteWatch(uint32_t guest_address, size_t length,
|
||||
WriteWatchCallback callback,
|
||||
void* callback_context, void* callback_data);
|
||||
void CancelWriteWatch(uintptr_t watch_handle);
|
||||
|
||||
public:
|
||||
|
@ -56,7 +57,9 @@ class MMIOHandler {
|
|||
void* callback_data;
|
||||
};
|
||||
|
||||
MMIOHandler(uint8_t* mapping_base) : mapping_base_(mapping_base) {}
|
||||
MMIOHandler(uint8_t* virtual_membase, uint8_t* physical_membase)
|
||||
: virtual_membase_(virtual_membase),
|
||||
physical_membase_(physical_membase) {}
|
||||
|
||||
virtual bool Initialize() = 0;
|
||||
|
||||
|
@ -68,12 +71,13 @@ class MMIOHandler {
|
|||
virtual uint64_t* GetThreadStateRegPtr(void* thread_state_ptr,
|
||||
int32_t be_reg_index) = 0;
|
||||
|
||||
uint8_t* mapping_base_;
|
||||
uint8_t* virtual_membase_;
|
||||
uint8_t* physical_membase_;
|
||||
|
||||
struct MMIORange {
|
||||
uint64_t address;
|
||||
uint64_t mask;
|
||||
uint64_t size;
|
||||
uint32_t address;
|
||||
uint32_t mask;
|
||||
uint32_t size;
|
||||
void* context;
|
||||
MMIOReadCallback read;
|
||||
MMIOWriteCallback write;
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
|
||||
#include <Windows.h>
|
||||
|
||||
namespace xe {
|
||||
void CrashDump();
|
||||
} // namespace xe
|
||||
|
||||
namespace xe {
|
||||
namespace cpu {
|
||||
|
||||
|
@ -18,7 +22,8 @@ LONG CALLBACK MMIOExceptionHandler(PEXCEPTION_POINTERS ex_info);
|
|||
|
||||
class WinMMIOHandler : public MMIOHandler {
|
||||
public:
|
||||
WinMMIOHandler(uint8_t* mapping_base) : MMIOHandler(mapping_base) {}
|
||||
WinMMIOHandler(uint8_t* virtual_membase, uint8_t* physical_membase)
|
||||
: MMIOHandler(virtual_membase, physical_membase) {}
|
||||
~WinMMIOHandler() override;
|
||||
|
||||
protected:
|
||||
|
@ -30,8 +35,9 @@ class WinMMIOHandler : public MMIOHandler {
|
|||
int32_t be_reg_index) override;
|
||||
};
|
||||
|
||||
std::unique_ptr<MMIOHandler> CreateMMIOHandler(uint8_t* mapping_base) {
|
||||
return std::make_unique<WinMMIOHandler>(mapping_base);
|
||||
std::unique_ptr<MMIOHandler> CreateMMIOHandler(uint8_t* virtual_membase,
|
||||
uint8_t* physical_membase) {
|
||||
return std::make_unique<WinMMIOHandler>(virtual_membase, physical_membase);
|
||||
}
|
||||
|
||||
bool WinMMIOHandler::Initialize() {
|
||||
|
@ -67,6 +73,7 @@ LONG CALLBACK MMIOExceptionHandler(PEXCEPTION_POINTERS ex_info) {
|
|||
} else {
|
||||
// Failed to handle; continue search for a handler (and die if no other
|
||||
// handler is found).
|
||||
xe::CrashDump();
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -152,6 +152,11 @@ bool Processor::Setup() {
|
|||
interrupt_thread_state_->set_name("Interrupt");
|
||||
interrupt_thread_block_ = memory_->SystemHeapAlloc(2048);
|
||||
interrupt_thread_state_->context()->r[13] = interrupt_thread_block_;
|
||||
XELOGI("Interrupt Thread %X Stack: %.8X-%.8X",
|
||||
interrupt_thread_state_->thread_id(),
|
||||
interrupt_thread_state_->stack_address(),
|
||||
interrupt_thread_state_->stack_address() +
|
||||
interrupt_thread_state_->stack_size());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -30,8 +30,11 @@ bool RawModule::LoadFile(uint32_t base_address, const std::wstring& path) {
|
|||
// Allocate memory.
|
||||
// Since we have no real heap just load it wherever.
|
||||
base_address_ = base_address;
|
||||
memory_->LookupHeap(base_address_)
|
||||
->AllocFixed(base_address_, file_length, 0,
|
||||
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
||||
kMemoryProtectRead | kMemoryProtectWrite);
|
||||
uint8_t* p = memory_->TranslateVirtual(base_address_);
|
||||
std::memset(p, 0, file_length);
|
||||
|
||||
// Read into memory.
|
||||
fread(p, file_length, 1, file);
|
||||
|
|
|
@ -64,8 +64,6 @@ class TestFunction {
|
|||
void Run(std::function<void(PPCContext*)> pre_call,
|
||||
std::function<void(PPCContext*)> post_call) {
|
||||
for (auto& processor : processors) {
|
||||
memory->Zero(0, memory_size);
|
||||
|
||||
xe::cpu::Function* fn;
|
||||
processor->ResolveFunction(0x1000, &fn);
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "xenia/cpu/thread_state.h"
|
||||
|
||||
#include "xenia/base/assert.h"
|
||||
#include "xenia/base/logging.h"
|
||||
#include "xenia/base/threading.h"
|
||||
#include "xenia/cpu/processor.h"
|
||||
#include "xenia/debug/debugger.h"
|
||||
|
@ -49,12 +50,19 @@ ThreadState::ThreadState(Processor* processor, uint32_t thread_id,
|
|||
uint32_t stack_alignment = (stack_size & 0xF000) ? 0x1000 : 0x10000;
|
||||
uint32_t stack_padding = stack_alignment * 1;
|
||||
uint32_t actual_stack_size = stack_padding + stack_size;
|
||||
stack_address_ = memory()->SystemHeapAlloc(actual_stack_size, stack_alignment);
|
||||
assert_true(!(stack_address & 0xFFF)); // just to be safe
|
||||
memory()
|
||||
->LookupHeapByType(false, 0x10000)
|
||||
->Alloc(actual_stack_size, stack_alignment,
|
||||
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
||||
kMemoryProtectRead | kMemoryProtectWrite, true,
|
||||
&stack_address_);
|
||||
assert_true(!(stack_address_ & 0xFFF)); // just to be safe
|
||||
stack_position = stack_address_ + actual_stack_size;
|
||||
stack_allocated_ = true;
|
||||
memset(memory()->TranslateVirtual(stack_address_), 0xBE, actual_stack_size);
|
||||
memory()->Protect(stack_address_, stack_padding, X_PAGE_NOACCESS);
|
||||
memory()
|
||||
->LookupHeap(stack_address_)
|
||||
->Protect(stack_address_, stack_padding, kMemoryProtectNoAccess);
|
||||
} else {
|
||||
stack_address_ = stack_address;
|
||||
stack_position = stack_address_ + stack_size;
|
||||
|
@ -100,7 +108,7 @@ ThreadState::~ThreadState() {
|
|||
|
||||
_aligned_free(context_);
|
||||
if (stack_allocated_) {
|
||||
memory()->SystemHeapFree(stack_address_);
|
||||
memory()->LookupHeap(stack_address_)->Decommit(stack_address_, stack_size_);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -456,7 +456,7 @@ void CommandProcessor::EnableReadPointerWriteBack(uint32_t ptr,
|
|||
uint32_t block_size) {
|
||||
// CP_RB_RPTR_ADDR Ring Buffer Read Pointer Address 0x70C
|
||||
// ptr = RB_RPTR_ADDR, pointer to write back the address to.
|
||||
read_ptr_writeback_ptr_ = (primary_buffer_ptr_ & ~0x1FFFFFFF) + ptr;
|
||||
read_ptr_writeback_ptr_ = ptr;
|
||||
// CP_RB_CNTL Ring Buffer Control 0x704
|
||||
// block_size = RB_BLKSZ, number of quadwords read between updates of the
|
||||
// read pointer.
|
||||
|
@ -966,7 +966,7 @@ bool CommandProcessor::ExecutePacketType3_XE_SWAP(RingbufferReader* reader,
|
|||
bool CommandProcessor::ExecutePacketType3_INDIRECT_BUFFER(
|
||||
RingbufferReader* reader, uint32_t packet, uint32_t count) {
|
||||
// indirect buffer dispatch
|
||||
uint32_t list_ptr = reader->Read();
|
||||
uint32_t list_ptr = CpuToGpu(reader->Read());
|
||||
uint32_t list_length = reader->Read();
|
||||
ExecuteIndirectBuffer(GpuToCpu(list_ptr), list_length);
|
||||
return true;
|
||||
|
@ -993,7 +993,7 @@ bool CommandProcessor::ExecutePacketType3_WAIT_REG_MEM(RingbufferReader* reader,
|
|||
poll_reg_addr &= ~0x3;
|
||||
value = xe::load<uint32_t>(memory_->TranslatePhysical(poll_reg_addr));
|
||||
value = GpuSwap(value, endianness);
|
||||
trace_writer_.WriteMemoryRead(poll_reg_addr, 4);
|
||||
trace_writer_.WriteMemoryRead(CpuToGpu(poll_reg_addr), 4);
|
||||
} else {
|
||||
// Register.
|
||||
assert_true(poll_reg_addr < RegisterFile::kRegisterCount);
|
||||
|
@ -1093,7 +1093,7 @@ bool CommandProcessor::ExecutePacketType3_COND_WRITE(RingbufferReader* reader,
|
|||
// Memory.
|
||||
auto endianness = static_cast<Endian>(poll_reg_addr & 0x3);
|
||||
poll_reg_addr &= ~0x3;
|
||||
trace_writer_.WriteMemoryRead(poll_reg_addr, 4);
|
||||
trace_writer_.WriteMemoryRead(CpuToGpu(poll_reg_addr), 4);
|
||||
value = xe::load<uint32_t>(memory_->TranslatePhysical(poll_reg_addr));
|
||||
value = GpuSwap(value, endianness);
|
||||
} else {
|
||||
|
@ -1136,7 +1136,7 @@ bool CommandProcessor::ExecutePacketType3_COND_WRITE(RingbufferReader* reader,
|
|||
write_reg_addr &= ~0x3;
|
||||
write_data = GpuSwap(write_data, endianness);
|
||||
xe::store(memory_->TranslatePhysical(write_reg_addr), write_data);
|
||||
trace_writer_.WriteMemoryWrite(write_reg_addr, 4);
|
||||
trace_writer_.WriteMemoryWrite(CpuToGpu(write_reg_addr), 4);
|
||||
} else {
|
||||
// Register.
|
||||
WriteRegister(write_reg_addr, write_data);
|
||||
|
@ -1182,7 +1182,7 @@ bool CommandProcessor::ExecutePacketType3_EVENT_WRITE_SHD(
|
|||
address &= ~0x3;
|
||||
data_value = GpuSwap(data_value, endianness);
|
||||
xe::store(memory_->TranslatePhysical(address), data_value);
|
||||
trace_writer_.WriteMemoryWrite(address, 4);
|
||||
trace_writer_.WriteMemoryWrite(CpuToGpu(address), 4);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1208,7 +1208,7 @@ bool CommandProcessor::ExecutePacketType3_EVENT_WRITE_EXT(
|
|||
xe::copy_and_swap_16_aligned(
|
||||
reinterpret_cast<uint16_t*>(memory_->TranslatePhysical(address)), extents,
|
||||
xe::countof(extents));
|
||||
trace_writer_.WriteMemoryWrite(address, sizeof(extents));
|
||||
trace_writer_.WriteMemoryWrite(CpuToGpu(address), sizeof(extents));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1364,7 +1364,7 @@ bool CommandProcessor::ExecutePacketType3_LOAD_ALU_CONSTANT(
|
|||
assert_always();
|
||||
return true;
|
||||
}
|
||||
trace_writer_.WriteMemoryRead(address, size_dwords * 4);
|
||||
trace_writer_.WriteMemoryRead(CpuToGpu(address), size_dwords * 4);
|
||||
for (uint32_t n = 0; n < size_dwords; n++, index++) {
|
||||
uint32_t data = xe::load_and_swap<uint32_t>(
|
||||
memory_->TranslatePhysical(address + n * 4));
|
||||
|
@ -1395,7 +1395,7 @@ bool CommandProcessor::ExecutePacketType3_IM_LOAD(RingbufferReader* reader,
|
|||
uint32_t start = start_size >> 16;
|
||||
uint32_t size_dwords = start_size & 0xFFFF; // dwords
|
||||
assert_true(start == 0);
|
||||
trace_writer_.WriteMemoryRead(addr, size_dwords * 4);
|
||||
trace_writer_.WriteMemoryRead(CpuToGpu(addr), size_dwords * 4);
|
||||
LoadShader(shader_type, addr, memory_->TranslatePhysical<uint32_t*>(addr),
|
||||
size_dwords);
|
||||
return true;
|
||||
|
@ -2106,29 +2106,31 @@ CommandProcessor::UpdateStatus CommandProcessor::UpdateBlendState() {
|
|||
|
||||
draw_batcher_.Flush(DrawBatcher::FlushMode::kStateChange);
|
||||
|
||||
static const GLenum blend_map[] = {/* 0 */ GL_ZERO,
|
||||
/* 1 */ GL_ONE,
|
||||
/* 2 */ GL_ZERO, // ?
|
||||
/* 3 */ GL_ZERO, // ?
|
||||
/* 4 */ GL_SRC_COLOR,
|
||||
/* 5 */ GL_ONE_MINUS_SRC_COLOR,
|
||||
/* 6 */ GL_SRC_ALPHA,
|
||||
/* 7 */ GL_ONE_MINUS_SRC_ALPHA,
|
||||
/* 8 */ GL_DST_COLOR,
|
||||
/* 9 */ GL_ONE_MINUS_DST_COLOR,
|
||||
/* 10 */ GL_DST_ALPHA,
|
||||
/* 11 */ GL_ONE_MINUS_DST_ALPHA,
|
||||
/* 12 */ GL_CONSTANT_COLOR,
|
||||
/* 13 */ GL_ONE_MINUS_CONSTANT_COLOR,
|
||||
/* 14 */ GL_CONSTANT_ALPHA,
|
||||
/* 15 */ GL_ONE_MINUS_CONSTANT_ALPHA,
|
||||
/* 16 */ GL_SRC_ALPHA_SATURATE,
|
||||
static const GLenum blend_map[] = {
|
||||
/* 0 */ GL_ZERO,
|
||||
/* 1 */ GL_ONE,
|
||||
/* 2 */ GL_ZERO, // ?
|
||||
/* 3 */ GL_ZERO, // ?
|
||||
/* 4 */ GL_SRC_COLOR,
|
||||
/* 5 */ GL_ONE_MINUS_SRC_COLOR,
|
||||
/* 6 */ GL_SRC_ALPHA,
|
||||
/* 7 */ GL_ONE_MINUS_SRC_ALPHA,
|
||||
/* 8 */ GL_DST_COLOR,
|
||||
/* 9 */ GL_ONE_MINUS_DST_COLOR,
|
||||
/* 10 */ GL_DST_ALPHA,
|
||||
/* 11 */ GL_ONE_MINUS_DST_ALPHA,
|
||||
/* 12 */ GL_CONSTANT_COLOR,
|
||||
/* 13 */ GL_ONE_MINUS_CONSTANT_COLOR,
|
||||
/* 14 */ GL_CONSTANT_ALPHA,
|
||||
/* 15 */ GL_ONE_MINUS_CONSTANT_ALPHA,
|
||||
/* 16 */ GL_SRC_ALPHA_SATURATE,
|
||||
};
|
||||
static const GLenum blend_op_map[] = {/* 0 */ GL_FUNC_ADD,
|
||||
/* 1 */ GL_FUNC_SUBTRACT,
|
||||
/* 2 */ GL_MIN,
|
||||
/* 3 */ GL_MAX,
|
||||
/* 4 */ GL_FUNC_REVERSE_SUBTRACT,
|
||||
static const GLenum blend_op_map[] = {
|
||||
/* 0 */ GL_FUNC_ADD,
|
||||
/* 1 */ GL_FUNC_SUBTRACT,
|
||||
/* 2 */ GL_MIN,
|
||||
/* 3 */ GL_MAX,
|
||||
/* 4 */ GL_FUNC_REVERSE_SUBTRACT,
|
||||
};
|
||||
for (int i = 0; i < xe::countof(regs.rb_blendcontrol); ++i) {
|
||||
uint32_t blend_control = regs.rb_blendcontrol[i];
|
||||
|
@ -2181,23 +2183,25 @@ CommandProcessor::UpdateStatus CommandProcessor::UpdateDepthStencilState() {
|
|||
|
||||
draw_batcher_.Flush(DrawBatcher::FlushMode::kStateChange);
|
||||
|
||||
static const GLenum compare_func_map[] = {/* 0 */ GL_NEVER,
|
||||
/* 1 */ GL_LESS,
|
||||
/* 2 */ GL_EQUAL,
|
||||
/* 3 */ GL_LEQUAL,
|
||||
/* 4 */ GL_GREATER,
|
||||
/* 5 */ GL_NOTEQUAL,
|
||||
/* 6 */ GL_GEQUAL,
|
||||
/* 7 */ GL_ALWAYS,
|
||||
static const GLenum compare_func_map[] = {
|
||||
/* 0 */ GL_NEVER,
|
||||
/* 1 */ GL_LESS,
|
||||
/* 2 */ GL_EQUAL,
|
||||
/* 3 */ GL_LEQUAL,
|
||||
/* 4 */ GL_GREATER,
|
||||
/* 5 */ GL_NOTEQUAL,
|
||||
/* 6 */ GL_GEQUAL,
|
||||
/* 7 */ GL_ALWAYS,
|
||||
};
|
||||
static const GLenum stencil_op_map[] = {/* 0 */ GL_KEEP,
|
||||
/* 1 */ GL_ZERO,
|
||||
/* 2 */ GL_REPLACE,
|
||||
/* 3 */ GL_INCR_WRAP,
|
||||
/* 4 */ GL_DECR_WRAP,
|
||||
/* 5 */ GL_INVERT,
|
||||
/* 6 */ GL_INCR,
|
||||
/* 7 */ GL_DECR,
|
||||
static const GLenum stencil_op_map[] = {
|
||||
/* 0 */ GL_KEEP,
|
||||
/* 1 */ GL_ZERO,
|
||||
/* 2 */ GL_REPLACE,
|
||||
/* 3 */ GL_INCR_WRAP,
|
||||
/* 4 */ GL_DECR_WRAP,
|
||||
/* 5 */ GL_INVERT,
|
||||
/* 6 */ GL_INCR,
|
||||
/* 7 */ GL_DECR,
|
||||
};
|
||||
// A2XX_RB_DEPTHCONTROL_Z_ENABLE
|
||||
if (regs.rb_depthcontrol & 0x00000002) {
|
||||
|
|
|
@ -74,7 +74,7 @@ X_STATUS GL4GraphicsSystem::Setup(cpu::Processor* processor,
|
|||
[this](const SwapParameters& swap_params) { SwapHandler(swap_params); });
|
||||
|
||||
// Let the processor know we want register access callbacks.
|
||||
memory_->AddMappedRange(
|
||||
memory_->AddVirtualMappedRange(
|
||||
0x7FC80000, 0xFFFF0000, 0x0000FFFF, this,
|
||||
reinterpret_cast<cpu::MMIOReadCallback>(MMIOReadRegisterThunk),
|
||||
reinterpret_cast<cpu::MMIOWriteCallback>(MMIOWriteRegisterThunk));
|
||||
|
@ -275,7 +275,7 @@ void GL4GraphicsSystem::SwapHandler(const SwapParameters& swap_params) {
|
|||
});
|
||||
}
|
||||
|
||||
uint64_t GL4GraphicsSystem::ReadRegister(uint64_t addr) {
|
||||
uint64_t GL4GraphicsSystem::ReadRegister(uint32_t addr) {
|
||||
uint32_t r = addr & 0xFFFF;
|
||||
|
||||
switch (r) {
|
||||
|
@ -295,7 +295,7 @@ uint64_t GL4GraphicsSystem::ReadRegister(uint64_t addr) {
|
|||
return register_file_.values[r].u32;
|
||||
}
|
||||
|
||||
void GL4GraphicsSystem::WriteRegister(uint64_t addr, uint64_t value) {
|
||||
void GL4GraphicsSystem::WriteRegister(uint32_t addr, uint64_t value) {
|
||||
uint32_t r = addr & 0xFFFF;
|
||||
|
||||
switch (r) {
|
||||
|
|
|
@ -50,13 +50,13 @@ class GL4GraphicsSystem : public GraphicsSystem {
|
|||
private:
|
||||
void MarkVblank();
|
||||
void SwapHandler(const SwapParameters& swap_params);
|
||||
uint64_t ReadRegister(uint64_t addr);
|
||||
void WriteRegister(uint64_t addr, uint64_t value);
|
||||
uint64_t ReadRegister(uint32_t addr);
|
||||
void WriteRegister(uint32_t addr, uint64_t value);
|
||||
|
||||
static uint64_t MMIOReadRegisterThunk(GL4GraphicsSystem* gs, uint64_t addr) {
|
||||
static uint64_t MMIOReadRegisterThunk(GL4GraphicsSystem* gs, uint32_t addr) {
|
||||
return gs->ReadRegister(addr);
|
||||
}
|
||||
static void MMIOWriteRegisterThunk(GL4GraphicsSystem* gs, uint64_t addr,
|
||||
static void MMIOWriteRegisterThunk(GL4GraphicsSystem* gs, uint32_t addr,
|
||||
uint64_t value) {
|
||||
gs->WriteRegister(addr, value);
|
||||
}
|
||||
|
|
|
@ -490,7 +490,7 @@ TextureCache::TextureEntry* TextureCache::LookupOrInsertTexture(
|
|||
// Add a write watch. If any data in the given range is touched we'll get a
|
||||
// callback and evict the texture. We could reuse the storage, though the
|
||||
// driver is likely in a better position to pool that kind of stuff.
|
||||
entry->write_watch_handle = memory_->AddWriteWatch(
|
||||
entry->write_watch_handle = memory_->AddPhysicalWriteWatch(
|
||||
texture_info.guest_address, texture_info.input_length,
|
||||
[](void* context_ptr, void* data_ptr, uint32_t address) {
|
||||
auto self = reinterpret_cast<TextureCache*>(context_ptr);
|
||||
|
@ -735,7 +735,8 @@ bool TextureCache::UploadTexture2D(GLuint texture,
|
|||
auto bpp = (bytes_per_block >> 2) +
|
||||
((bytes_per_block >> 1) >> (bytes_per_block >> 2));
|
||||
for (uint32_t y = 0, output_base_offset = 0;
|
||||
y < texture_info.size_2d.block_height;
|
||||
y < std::min(texture_info.size_2d.block_height,
|
||||
texture_info.size_2d.logical_height);
|
||||
y++, output_base_offset += texture_info.size_2d.output_pitch) {
|
||||
auto input_base_offset = TextureInfo::TiledOffset2DOuter(
|
||||
offset_y + y, (texture_info.size_2d.input_width /
|
||||
|
|
|
@ -771,7 +771,15 @@ class TracePlayer : public TraceReader {
|
|||
: loop_(loop),
|
||||
graphics_system_(graphics_system),
|
||||
current_frame_index_(0),
|
||||
current_command_index_(-1) {}
|
||||
current_command_index_(-1) {
|
||||
// Need to allocate all of physical memory so that we can write to it
|
||||
// during playback.
|
||||
graphics_system_->memory()
|
||||
->LookupHeapByType(true, 4096)
|
||||
->AllocFixed(0, 0x1FFFFFFF, 4096,
|
||||
kMemoryAllocationReserve | kMemoryAllocationCommit,
|
||||
kMemoryProtectRead | kMemoryProtectWrite);
|
||||
}
|
||||
~TracePlayer() = default;
|
||||
|
||||
GraphicsSystem* graphics_system() const { return graphics_system_; }
|
||||
|
|
|
@ -213,6 +213,9 @@ X_STATUS XThread::Create() {
|
|||
thread_state_ =
|
||||
new ThreadState(kernel_state()->processor(), thread_id_, 0,
|
||||
creation_params_.stack_size, thread_state_address_);
|
||||
XELOGI("XThread%04X (%X) Stack: %.8X-%.8X", handle(),
|
||||
thread_state_->thread_id(), thread_state_->stack_address(),
|
||||
thread_state_->stack_address() + thread_state_->stack_size());
|
||||
|
||||
xe::store_and_swap<uint32_t>(
|
||||
p + 0x05C, thread_state_->stack_address() + thread_state_->stack_size());
|
||||
|
|
|
@ -536,8 +536,12 @@ int xe_xex2_read_image_uncompressed(const xe_xex2_header_t *header,
|
|||
// Allocate in-place the XEX memory.
|
||||
const uint32_t exe_length = xex_length - header->exe_offset;
|
||||
uint32_t uncompressed_size = exe_length;
|
||||
uint32_t alloc_result = memory->HeapAlloc(
|
||||
header->exe_address, uncompressed_size, xe::MEMORY_FLAG_ZERO);
|
||||
bool alloc_result =
|
||||
memory->LookupHeap(header->exe_address)
|
||||
->AllocFixed(
|
||||
header->exe_address, uncompressed_size, 4096,
|
||||
xe::kMemoryAllocationReserve | xe::kMemoryAllocationCommit,
|
||||
xe::kMemoryProtectRead | xe::kMemoryProtectWrite);
|
||||
if (!alloc_result) {
|
||||
XELOGE("Unable to allocate XEX memory at %.8X-%.8X.", header->exe_address,
|
||||
uncompressed_size);
|
||||
|
@ -588,14 +592,18 @@ int xe_xex2_read_image_basic_compressed(const xe_xex2_header_t *header,
|
|||
// Calculate the total size of the XEX image from its headers.
|
||||
uint32_t total_size = 0;
|
||||
for (uint32_t i = 0; i < header->section_count; i++) {
|
||||
xe_xex2_section_t& section = header->sections[i];
|
||||
xe_xex2_section_t §ion = header->sections[i];
|
||||
|
||||
total_size += section.info.page_count * section.page_size;
|
||||
}
|
||||
|
||||
// Allocate in-place the XEX memory.
|
||||
uint32_t alloc_result = memory->HeapAlloc(
|
||||
header->exe_address, total_size, xe::MEMORY_FLAG_ZERO);
|
||||
bool alloc_result =
|
||||
memory->LookupHeap(header->exe_address)
|
||||
->AllocFixed(
|
||||
header->exe_address, total_size, 4096,
|
||||
xe::kMemoryAllocationReserve | xe::kMemoryAllocationCommit,
|
||||
xe::kMemoryProtectRead | xe::kMemoryProtectWrite);
|
||||
if (!alloc_result) {
|
||||
XELOGE("Unable to allocate XEX memory at %.8X-%.8X.", header->exe_address,
|
||||
uncompressed_size);
|
||||
|
@ -731,8 +739,12 @@ int xe_xex2_read_image_compressed(const xe_xex2_header_t *header,
|
|||
}
|
||||
|
||||
// Allocate in-place the XEX memory.
|
||||
uint32_t alloc_result = memory->HeapAlloc(
|
||||
header->exe_address, uncompressed_size, xe::MEMORY_FLAG_ZERO);
|
||||
bool alloc_result =
|
||||
memory->LookupHeap(header->exe_address)
|
||||
->AllocFixed(
|
||||
header->exe_address, uncompressed_size, 4096,
|
||||
xe::kMemoryAllocationReserve | xe::kMemoryAllocationCommit,
|
||||
xe::kMemoryProtectRead | xe::kMemoryProtectWrite);
|
||||
if (!alloc_result) {
|
||||
XELOGE("Unable to allocate XEX memory at %.8X-%.8X.", header->exe_address,
|
||||
uncompressed_size);
|
||||
|
@ -1084,4 +1096,4 @@ uint32_t xe_xex2_lookup_export(xe_xex2_ref xex, uint16_t ordinal) {
|
|||
|
||||
// No match
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ SHIM_CALL XGetAVPack_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
SHIM_CALL XGetGameRegion_shim(PPCContext* ppc_state, KernelState* state) {
|
||||
XELOGD("XGetGameRegion()");
|
||||
|
||||
SHIM_SET_RETURN_64(XEX_REGION_ALL);
|
||||
SHIM_SET_RETURN_64(0xFFFF);
|
||||
}
|
||||
|
||||
SHIM_CALL XGetLanguage_shim(PPCContext* ppc_state, KernelState* state) {
|
||||
|
|
|
@ -17,19 +17,55 @@
|
|||
namespace xe {
|
||||
namespace kernel {
|
||||
|
||||
uint32_t ToXdkProtectFlags(uint32_t protect) {
|
||||
uint32_t result = 0;
|
||||
if (!(protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
|
||||
result = X_PAGE_NOACCESS;
|
||||
} else if ((protect & kMemoryProtectRead) &&
|
||||
!(protect & kMemoryProtectWrite)) {
|
||||
result = X_PAGE_READONLY;
|
||||
} else {
|
||||
result = X_PAGE_READWRITE;
|
||||
}
|
||||
if (protect & kMemoryProtectNoCache) {
|
||||
result = X_PAGE_NOCACHE;
|
||||
}
|
||||
if (protect & kMemoryProtectWriteCombine) {
|
||||
result = X_PAGE_WRITECOMBINE;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
uint32_t FromXdkProtectFlags(uint32_t protect) {
|
||||
uint32_t result = 0;
|
||||
if ((protect & X_PAGE_READONLY) | (protect & X_PAGE_EXECUTE_READ)) {
|
||||
result |= kMemoryProtectRead;
|
||||
} else if ((protect & X_PAGE_READWRITE) |
|
||||
(protect & X_PAGE_EXECUTE_READWRITE)) {
|
||||
result |= kMemoryProtectRead | kMemoryProtectWrite;
|
||||
}
|
||||
if (protect & X_PAGE_NOCACHE) {
|
||||
result |= kMemoryProtectNoCache;
|
||||
}
|
||||
if (protect & X_PAGE_WRITECOMBINE) {
|
||||
result |= kMemoryProtectWriteCombine;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
SHIM_CALL NtAllocateVirtualMemory_shim(PPCContext* ppc_state,
|
||||
KernelState* state) {
|
||||
uint32_t base_addr_ptr = SHIM_GET_ARG_32(0);
|
||||
uint32_t base_addr_value = SHIM_MEM_32(base_addr_ptr);
|
||||
uint32_t region_size_ptr = SHIM_GET_ARG_32(1);
|
||||
uint32_t region_size_value = SHIM_MEM_32(region_size_ptr);
|
||||
uint32_t allocation_type = SHIM_GET_ARG_32(2); // X_MEM_* bitmask
|
||||
uint32_t protect_bits = SHIM_GET_ARG_32(3); // X_PAGE_* bitmask
|
||||
uint32_t alloc_type = SHIM_GET_ARG_32(2); // X_MEM_* bitmask
|
||||
uint32_t protect_bits = SHIM_GET_ARG_32(3); // X_PAGE_* bitmask
|
||||
uint32_t unknown = SHIM_GET_ARG_32(4);
|
||||
|
||||
XELOGD("NtAllocateVirtualMemory(%.8X(%.8X), %.8X(%.8X), %.8X, %.8X, %.8X)",
|
||||
base_addr_ptr, base_addr_value, region_size_ptr, region_size_value,
|
||||
allocation_type, protect_bits, unknown);
|
||||
alloc_type, protect_bits, unknown);
|
||||
|
||||
// NTSTATUS
|
||||
// _Inout_ PVOID *BaseAddress,
|
||||
|
@ -52,12 +88,12 @@ SHIM_CALL NtAllocateVirtualMemory_shim(PPCContext* ppc_state,
|
|||
return;
|
||||
}
|
||||
// Check allocation type.
|
||||
if (!(allocation_type & (X_MEM_COMMIT | X_MEM_RESET | X_MEM_RESERVE))) {
|
||||
if (!(alloc_type & (X_MEM_COMMIT | X_MEM_RESET | X_MEM_RESERVE))) {
|
||||
SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
|
||||
return;
|
||||
}
|
||||
// If MEM_RESET is set only MEM_RESET can be set.
|
||||
if (allocation_type & X_MEM_RESET && (allocation_type & ~X_MEM_RESET)) {
|
||||
if (alloc_type & X_MEM_RESET && (alloc_type & ~X_MEM_RESET)) {
|
||||
SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
|
||||
return;
|
||||
}
|
||||
|
@ -68,37 +104,60 @@ SHIM_CALL NtAllocateVirtualMemory_shim(PPCContext* ppc_state,
|
|||
}
|
||||
|
||||
// Adjust size.
|
||||
uint32_t adjusted_size = region_size_value;
|
||||
// TODO(benvanik): adjust based on page size flags/etc?
|
||||
|
||||
// TODO(benvanik): support different allocation types.
|
||||
// Right now we treat everything as a commit and ignore allocations that have
|
||||
// already happened.
|
||||
if (base_addr_value) {
|
||||
// Having a pointer already means that this is likely a follow-on COMMIT.
|
||||
assert_true(!(allocation_type & X_MEM_RESERVE) &&
|
||||
(allocation_type & X_MEM_COMMIT));
|
||||
SHIM_SET_MEM_32(base_addr_ptr, base_addr_value);
|
||||
SHIM_SET_MEM_32(region_size_ptr, adjusted_size);
|
||||
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
|
||||
return;
|
||||
uint32_t page_size = 4096;
|
||||
if (alloc_type & X_MEM_LARGE_PAGES) {
|
||||
page_size = 64 * 1024;
|
||||
}
|
||||
if (int32_t(region_size_value) < 0) {
|
||||
// Some games pass in negative sizes.
|
||||
region_size_value = -int32_t(region_size_value);
|
||||
}
|
||||
uint32_t adjusted_size = xe::round_up(region_size_value, page_size);
|
||||
|
||||
// Allocate.
|
||||
uint32_t flags = (allocation_type & X_MEM_NOZERO) ? 0 : MEMORY_FLAG_ZERO;
|
||||
uint32_t addr = (uint32_t)state->memory()->HeapAlloc(base_addr_value,
|
||||
adjusted_size, flags);
|
||||
if (!addr) {
|
||||
uint32_t allocation_type = 0;
|
||||
if (alloc_type & X_MEM_RESERVE) {
|
||||
allocation_type |= kMemoryAllocationReserve;
|
||||
}
|
||||
if (alloc_type & X_MEM_COMMIT) {
|
||||
allocation_type |= kMemoryAllocationCommit;
|
||||
}
|
||||
if (alloc_type & X_MEM_RESET) {
|
||||
XELOGE("X_MEM_RESET not implemented");
|
||||
assert_always();
|
||||
}
|
||||
uint32_t protect = FromXdkProtectFlags(protect_bits);
|
||||
uint32_t address = 0;
|
||||
if (base_addr_value) {
|
||||
auto heap = state->memory()->LookupHeap(base_addr_value);
|
||||
if (heap->AllocFixed(base_addr_value, adjusted_size, page_size,
|
||||
allocation_type, protect)) {
|
||||
address = base_addr_value;
|
||||
}
|
||||
} else {
|
||||
bool top_down = !!(alloc_type & X_MEM_TOP_DOWN);
|
||||
auto heap = state->memory()->LookupHeapByType(false, page_size);
|
||||
heap->Alloc(adjusted_size, page_size, allocation_type, protect, top_down,
|
||||
&address);
|
||||
}
|
||||
if (!address) {
|
||||
// Failed - assume no memory available.
|
||||
SHIM_SET_RETURN_32(X_STATUS_NO_MEMORY);
|
||||
return;
|
||||
}
|
||||
|
||||
XELOGD("NtAllocateVirtualMemory = %.8X", addr);
|
||||
// Zero memory, if needed.
|
||||
if (address && !(alloc_type & X_MEM_NOZERO)) {
|
||||
if (alloc_type & X_MEM_COMMIT) {
|
||||
std::memset(SHIM_MEM_ADDR(address), 0, adjusted_size);
|
||||
}
|
||||
}
|
||||
|
||||
XELOGD("NtAllocateVirtualMemory = %.8X", address);
|
||||
|
||||
// Stash back.
|
||||
// Maybe set X_STATUS_ALREADY_COMMITTED if MEM_COMMIT?
|
||||
SHIM_SET_MEM_32(base_addr_ptr, addr);
|
||||
SHIM_SET_MEM_32(base_addr_ptr, address);
|
||||
SHIM_SET_MEM_32(region_size_ptr, adjusted_size);
|
||||
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
|
||||
}
|
||||
|
@ -130,22 +189,24 @@ SHIM_CALL NtFreeVirtualMemory_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
return;
|
||||
}
|
||||
|
||||
// TODO(benvanik): ignore decommits for now.
|
||||
auto heap = state->memory()->LookupHeap(base_addr_value);
|
||||
bool result = false;
|
||||
if (free_type == X_MEM_DECOMMIT) {
|
||||
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
|
||||
return;
|
||||
}
|
||||
// If zero, we may need to query size (free whole region).
|
||||
assert_not_zero(region_size_value);
|
||||
|
||||
// Free.
|
||||
uint32_t flags = 0;
|
||||
uint32_t freed_size = state->memory()->HeapFree(base_addr_value, flags);
|
||||
if (!freed_size) {
|
||||
region_size_value = xe::round_up(region_size_value, heap->page_size());
|
||||
result = heap->Decommit(base_addr_value, region_size_value);
|
||||
} else {
|
||||
result = heap->Release(base_addr_value, ®ion_size_value);
|
||||
}
|
||||
if (!result) {
|
||||
SHIM_SET_RETURN_32(X_STATUS_UNSUCCESSFUL);
|
||||
return;
|
||||
}
|
||||
|
||||
SHIM_SET_MEM_32(base_addr_ptr, base_addr_value);
|
||||
SHIM_SET_MEM_32(region_size_ptr, freed_size);
|
||||
SHIM_SET_MEM_32(region_size_ptr, region_size_value);
|
||||
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -168,9 +229,9 @@ SHIM_CALL NtQueryVirtualMemory_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
XELOGD("NtQueryVirtualMemory(%.8X, %.8X)", base_address,
|
||||
memory_basic_information_ptr);
|
||||
|
||||
AllocationInfo alloc_info;
|
||||
size_t result = state->memory()->QueryInformation(base_address, &alloc_info);
|
||||
if (!result) {
|
||||
auto heap = state->memory()->LookupHeap(base_address);
|
||||
HeapAllocationInfo alloc_info;
|
||||
if (!heap->QueryRegionInfo(base_address, &alloc_info)) {
|
||||
SHIM_SET_RETURN_32(X_STATUS_INVALID_PARAMETER);
|
||||
return;
|
||||
}
|
||||
|
@ -179,15 +240,21 @@ SHIM_CALL NtQueryVirtualMemory_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
static_cast<uint32_t>(alloc_info.base_address);
|
||||
memory_basic_information->allocation_base =
|
||||
static_cast<uint32_t>(alloc_info.allocation_base);
|
||||
memory_basic_information->allocation_protect = alloc_info.allocation_protect;
|
||||
memory_basic_information->allocation_protect =
|
||||
ToXdkProtectFlags(alloc_info.allocation_protect);
|
||||
memory_basic_information->region_size =
|
||||
static_cast<uint32_t>(alloc_info.region_size);
|
||||
memory_basic_information->state = alloc_info.state;
|
||||
memory_basic_information->protect = alloc_info.protect;
|
||||
uint32_t x_state = 0;
|
||||
if (alloc_info.state & kMemoryAllocationReserve) {
|
||||
x_state |= X_MEM_RESERVE;
|
||||
}
|
||||
if (alloc_info.state & kMemoryAllocationCommit) {
|
||||
x_state |= X_MEM_COMMIT;
|
||||
}
|
||||
memory_basic_information->state = x_state;
|
||||
memory_basic_information->protect = ToXdkProtectFlags(alloc_info.protect);
|
||||
memory_basic_information->type = alloc_info.type;
|
||||
|
||||
XELOGE("NtQueryVirtualMemory NOT IMPLEMENTED");
|
||||
|
||||
SHIM_SET_RETURN_32(X_STATUS_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -242,26 +309,20 @@ SHIM_CALL MmAllocatePhysicalMemoryEx_shim(PPCContext* ppc_state,
|
|||
assert_true(min_addr_range == 0);
|
||||
assert_true(max_addr_range == 0xFFFFFFFF);
|
||||
|
||||
// Allocate.
|
||||
uint32_t flags = MEMORY_FLAG_PHYSICAL;
|
||||
uint32_t base_address = (uint32_t)state->memory()->HeapAlloc(
|
||||
0, adjusted_size, flags, adjusted_alignment);
|
||||
if (!base_address) {
|
||||
uint32_t allocation_type = kMemoryAllocationReserve | kMemoryAllocationCommit;
|
||||
uint32_t protect = FromXdkProtectFlags(protect_bits);
|
||||
bool top_down = true;
|
||||
auto heap = state->memory()->LookupHeapByType(true, page_size);
|
||||
uint32_t base_address;
|
||||
if (!heap->AllocRange(min_addr_range, max_addr_range, adjusted_size,
|
||||
adjusted_alignment, allocation_type, protect, top_down,
|
||||
&base_address)) {
|
||||
// Failed - assume no memory available.
|
||||
SHIM_SET_RETURN_32(0);
|
||||
return;
|
||||
}
|
||||
XELOGD("MmAllocatePhysicalMemoryEx = %.8X", base_address);
|
||||
|
||||
// Move the address into the right range.
|
||||
// if (protect_bits & X_MEM_LARGE_PAGES) {
|
||||
// base_address += 0xA0000000;
|
||||
//} else if (protect_bits & X_MEM_16MB_PAGES) {
|
||||
// base_address += 0xC0000000;
|
||||
//} else {
|
||||
// base_address += 0xE0000000;
|
||||
//}
|
||||
base_address += 0xA0000000;
|
||||
|
||||
SHIM_SET_RETURN_64(base_address);
|
||||
}
|
||||
|
@ -274,14 +335,10 @@ SHIM_CALL MmFreePhysicalMemory_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
|
||||
// base_address = result of MmAllocatePhysicalMemory.
|
||||
|
||||
// Strip off physical bits before passing down.
|
||||
base_address &= ~0xE0000000;
|
||||
assert_true((base_address & 0x1F) == 0);
|
||||
|
||||
// TODO(benvanik): free memory.
|
||||
XELOGE("xeMmFreePhysicalMemory NOT IMPLEMENTED");
|
||||
// uint32_t size = ?;
|
||||
// xe_memory_heap_free(
|
||||
// state->memory(), base_address, size);
|
||||
auto heap = state->memory()->LookupHeap(base_address);
|
||||
heap->Release(base_address);
|
||||
}
|
||||
|
||||
SHIM_CALL MmQueryAddressProtect_shim(PPCContext* ppc_state,
|
||||
|
@ -290,7 +347,12 @@ SHIM_CALL MmQueryAddressProtect_shim(PPCContext* ppc_state,
|
|||
|
||||
XELOGD("MmQueryAddressProtect(%.8X)", base_address);
|
||||
|
||||
uint32_t access = state->memory()->QueryProtect(base_address);
|
||||
auto heap = state->memory()->LookupHeap(base_address);
|
||||
uint32_t access;
|
||||
if (!heap->QueryProtect(base_address, &access)) {
|
||||
access = 0;
|
||||
}
|
||||
access = ToXdkProtectFlags(access);
|
||||
|
||||
SHIM_SET_RETURN_32(access);
|
||||
}
|
||||
|
@ -301,9 +363,13 @@ SHIM_CALL MmQueryAllocationSize_shim(PPCContext* ppc_state,
|
|||
|
||||
XELOGD("MmQueryAllocationSize(%.8X)", base_address);
|
||||
|
||||
size_t size = state->memory()->QuerySize(base_address);
|
||||
auto heap = state->memory()->LookupHeap(base_address);
|
||||
uint32_t size;
|
||||
if (!heap->QuerySize(base_address, &size)) {
|
||||
size = 0;
|
||||
}
|
||||
|
||||
SHIM_SET_RETURN_32(static_cast<uint32_t>(size));
|
||||
SHIM_SET_RETURN_32(size);
|
||||
}
|
||||
|
||||
SHIM_CALL MmQueryStatistics_shim(PPCContext* ppc_state, KernelState* state) {
|
||||
|
@ -372,19 +438,12 @@ SHIM_CALL MmGetPhysicalAddress_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
// );
|
||||
// base_address = result of MmAllocatePhysicalMemory.
|
||||
|
||||
// We are always using virtual addresses, right now, since we don't need
|
||||
// physical ones. We could munge up the address here to another mapped view
|
||||
// of memory.
|
||||
uint32_t physical_address = base_address & 0x1FFFFFFF;
|
||||
if (base_address >= 0xE0000000) {
|
||||
physical_address += 0x1000;
|
||||
}
|
||||
|
||||
/*if (protect_bits & X_MEM_LARGE_PAGES) {
|
||||
base_address |= 0xA0000000;
|
||||
} else if (protect_bits & X_MEM_16MB_PAGES) {
|
||||
base_address |= 0xC0000000;
|
||||
} else {
|
||||
base_address |= 0xE0000000;
|
||||
}*/
|
||||
|
||||
SHIM_SET_RETURN_64(base_address);
|
||||
SHIM_SET_RETURN_64(physical_address);
|
||||
}
|
||||
|
||||
SHIM_CALL MmMapIoSpace_shim(PPCContext* ppc_state, KernelState* state) {
|
||||
|
|
|
@ -83,6 +83,11 @@ SHIM_CALL ObReferenceObjectByHandle_shim(PPCContext* ppc_state,
|
|||
} break;
|
||||
}
|
||||
} break;
|
||||
case 0xD017BEEF: { // ExSemaphoreObjectType
|
||||
// TODO(benvanik): implement.
|
||||
assert_unhandled_case(object_type_ptr);
|
||||
native_ptr = 0xDEADF00D;
|
||||
} break;
|
||||
case 0xD01BBEEF: { // ExThreadObjectType
|
||||
XThread* thread = (XThread*)object;
|
||||
native_ptr = thread->thread_state_ptr();
|
||||
|
|
|
@ -380,6 +380,11 @@ SHIM_CALL VdPersistDisplay_shim(PPCContext* ppc_state, KernelState* state) {
|
|||
|
||||
// unk1_ptr needs to be populated with a pointer passed to
|
||||
// MmFreePhysicalMemory(1, *unk1_ptr).
|
||||
auto heap = state->memory()->LookupHeapByType(true, 16 * 1024);
|
||||
uint32_t unk1_value;
|
||||
heap->Alloc(64, 32, kMemoryAllocationReserve | kMemoryAllocationCommit,
|
||||
kMemoryProtectNoAccess, false, &unk1_value);
|
||||
SHIM_SET_MEM_32(unk1_ptr, unk1_value);
|
||||
|
||||
// ?
|
||||
SHIM_SET_RETURN_64(1);
|
||||
|
|
|
@ -113,6 +113,7 @@ X_STATUS XObject::Wait(uint32_t wait_reason, uint32_t processor_mode,
|
|||
// Or X_STATUS_ALERTED?
|
||||
return X_STATUS_USER_APC;
|
||||
case WAIT_TIMEOUT:
|
||||
YieldProcessor();
|
||||
return X_STATUS_TIMEOUT;
|
||||
default:
|
||||
case WAIT_FAILED:
|
||||
|
|
1251
src/xenia/memory.cc
1251
src/xenia/memory.cc
File diff suppressed because it is too large
Load Diff
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "xenia/base/platform.h"
|
||||
|
@ -25,25 +26,129 @@ enum SystemHeapFlag : uint32_t {
|
|||
|
||||
kSystemHeapDefault = kSystemHeapVirtual,
|
||||
};
|
||||
class MemoryHeap;
|
||||
|
||||
// TODO(benvanik): move to heap.
|
||||
enum {
|
||||
MEMORY_FLAG_64KB_PAGES = (1 << 1),
|
||||
MEMORY_FLAG_ZERO = (1 << 2),
|
||||
MEMORY_FLAG_PHYSICAL = (1 << 3),
|
||||
enum MemoryAllocationFlag : uint32_t {
|
||||
kMemoryAllocationReserve = 1 << 0,
|
||||
kMemoryAllocationCommit = 1 << 1,
|
||||
};
|
||||
|
||||
enum MemoryProtectFlag : uint32_t {
|
||||
kMemoryProtectRead = 1 << 0,
|
||||
kMemoryProtectWrite = 1 << 1,
|
||||
kMemoryProtectNoCache = 1 << 2,
|
||||
kMemoryProtectWriteCombine = 1 << 3,
|
||||
|
||||
kMemoryProtectNoAccess = 0,
|
||||
};
|
||||
|
||||
// TODO(benvanik): move to heap.
|
||||
// Equivalent to the Win32 MEMORY_BASIC_INFORMATION struct.
|
||||
struct AllocationInfo {
|
||||
struct HeapAllocationInfo {
|
||||
// A pointer to the base address of the region of pages.
|
||||
uint32_t base_address;
|
||||
// A pointer to the base address of a range of pages allocated by the
|
||||
// VirtualAlloc function. The page pointed to by the BaseAddress member is
|
||||
// contained within this allocation range.
|
||||
uint32_t allocation_base;
|
||||
uint32_t allocation_protect; // TBD
|
||||
size_t region_size;
|
||||
uint32_t state; // TBD
|
||||
uint32_t protect; // TBD
|
||||
uint32_t type; // TBD
|
||||
// The memory protection option when the region was initially allocated.
|
||||
uint32_t allocation_protect;
|
||||
// The size of the region beginning at the base address in which all pages
|
||||
// have identical attributes, in bytes.
|
||||
uint32_t region_size;
|
||||
// The state of the pages in the region (commit/free/reserve).
|
||||
uint32_t state;
|
||||
// The access protection of the pages in the region.
|
||||
uint32_t protect;
|
||||
// The type of pages in the region (private).
|
||||
uint32_t type;
|
||||
};
|
||||
|
||||
union PageEntry {
|
||||
struct {
|
||||
uint32_t base_address : 20; // in 4k pages
|
||||
uint32_t region_page_count : 20; // in 4k pages
|
||||
uint32_t allocation_protect : 4;
|
||||
uint32_t current_protect : 4;
|
||||
uint32_t state : 2;
|
||||
uint32_t reserved : 14;
|
||||
};
|
||||
uint64_t qword;
|
||||
};
|
||||
|
||||
class BaseHeap {
|
||||
public:
|
||||
virtual ~BaseHeap();
|
||||
|
||||
uint32_t page_size() const { return page_size_; }
|
||||
|
||||
virtual void Dispose();
|
||||
|
||||
void DumpMap();
|
||||
|
||||
virtual bool Alloc(uint32_t size, uint32_t alignment,
|
||||
uint32_t allocation_type, uint32_t protect, bool top_down,
|
||||
uint32_t* out_address);
|
||||
virtual bool AllocFixed(uint32_t base_address, uint32_t size,
|
||||
uint32_t alignment, uint32_t allocation_type,
|
||||
uint32_t protect);
|
||||
virtual bool AllocRange(uint32_t low_address, uint32_t high_address,
|
||||
uint32_t size, uint32_t alignment,
|
||||
uint32_t allocation_type, uint32_t protect,
|
||||
bool top_down, uint32_t* out_address);
|
||||
virtual bool Decommit(uint32_t address, uint32_t size);
|
||||
virtual bool Release(uint32_t address, uint32_t* out_region_size = nullptr);
|
||||
virtual bool Protect(uint32_t address, uint32_t size, uint32_t protect);
|
||||
|
||||
bool QueryRegionInfo(uint32_t base_address, HeapAllocationInfo* out_info);
|
||||
bool QuerySize(uint32_t address, uint32_t* out_size);
|
||||
bool QueryProtect(uint32_t address, uint32_t* out_protect);
|
||||
uint32_t GetPhysicalAddress(uint32_t address);
|
||||
|
||||
protected:
|
||||
BaseHeap();
|
||||
|
||||
void Initialize(uint8_t* membase, uint32_t heap_base, uint32_t heap_size,
|
||||
uint32_t page_size);
|
||||
|
||||
uint8_t* membase_;
|
||||
uint32_t heap_base_;
|
||||
uint32_t heap_size_;
|
||||
uint32_t page_size_;
|
||||
std::vector<PageEntry> page_table_;
|
||||
std::recursive_mutex heap_mutex_;
|
||||
};
|
||||
|
||||
class VirtualHeap : public BaseHeap {
|
||||
public:
|
||||
VirtualHeap();
|
||||
~VirtualHeap() override;
|
||||
|
||||
void Initialize(uint8_t* membase, uint32_t heap_base, uint32_t heap_size,
|
||||
uint32_t page_size);
|
||||
};
|
||||
|
||||
class PhysicalHeap : public BaseHeap {
|
||||
public:
|
||||
PhysicalHeap();
|
||||
~PhysicalHeap() override;
|
||||
|
||||
void Initialize(uint8_t* membase, uint32_t heap_base, uint32_t heap_size,
|
||||
uint32_t page_size, VirtualHeap* parent_heap);
|
||||
|
||||
bool Alloc(uint32_t size, uint32_t alignment, uint32_t allocation_type,
|
||||
uint32_t protect, bool top_down, uint32_t* out_address) override;
|
||||
bool AllocFixed(uint32_t base_address, uint32_t size, uint32_t alignment,
|
||||
uint32_t allocation_type, uint32_t protect) override;
|
||||
bool AllocRange(uint32_t low_address, uint32_t high_address, uint32_t size,
|
||||
uint32_t alignment, uint32_t allocation_type,
|
||||
uint32_t protect, bool top_down,
|
||||
uint32_t* out_address) override;
|
||||
bool Decommit(uint32_t address, uint32_t size) override;
|
||||
bool Release(uint32_t base_address,
|
||||
uint32_t* out_region_size = nullptr) override;
|
||||
bool Protect(uint32_t address, uint32_t size, uint32_t protect) override;
|
||||
|
||||
protected:
|
||||
VirtualHeap* parent_heap_;
|
||||
};
|
||||
|
||||
class Memory {
|
||||
|
@ -82,27 +187,24 @@ class Memory {
|
|||
uint32_t SearchAligned(uint32_t start, uint32_t end, const uint32_t* values,
|
||||
size_t value_count);
|
||||
|
||||
bool AddMappedRange(uint32_t address, uint32_t mask, uint32_t size,
|
||||
void* context, cpu::MMIOReadCallback read_callback,
|
||||
cpu::MMIOWriteCallback write_callback);
|
||||
bool AddVirtualMappedRange(uint32_t virtual_address, uint32_t mask,
|
||||
uint32_t size, void* context,
|
||||
cpu::MMIOReadCallback read_callback,
|
||||
cpu::MMIOWriteCallback write_callback);
|
||||
|
||||
uintptr_t AddWriteWatch(uint32_t guest_address, uint32_t length,
|
||||
cpu::WriteWatchCallback callback,
|
||||
void* callback_context, void* callback_data);
|
||||
uintptr_t AddPhysicalWriteWatch(uint32_t physical_address, uint32_t length,
|
||||
cpu::WriteWatchCallback callback,
|
||||
void* callback_context, void* callback_data);
|
||||
void CancelWriteWatch(uintptr_t watch_handle);
|
||||
|
||||
uint32_t SystemHeapAlloc(uint32_t size, uint32_t alignment = 0x20,
|
||||
uint32_t system_heap_flags = kSystemHeapDefault);
|
||||
void SystemHeapFree(uint32_t address);
|
||||
uint32_t HeapAlloc(uint32_t base_address, uint32_t size, uint32_t flags,
|
||||
uint32_t alignment = 0x20);
|
||||
int HeapFree(uint32_t address, uint32_t size);
|
||||
|
||||
bool QueryInformation(uint32_t base_address, AllocationInfo* mem_info);
|
||||
uint32_t QuerySize(uint32_t base_address);
|
||||
BaseHeap* LookupHeap(uint32_t address);
|
||||
BaseHeap* LookupHeapByType(bool physical, uint32_t page_size);
|
||||
|
||||
int Protect(uint32_t address, uint32_t size, uint32_t access);
|
||||
uint32_t QueryProtect(uint32_t address);
|
||||
void DumpMap();
|
||||
|
||||
private:
|
||||
int MapViews(uint8_t* mapping_base);
|
||||
|
@ -122,22 +224,31 @@ class Memory {
|
|||
uint8_t* v00000000;
|
||||
uint8_t* v40000000;
|
||||
uint8_t* v7F000000;
|
||||
uint8_t* v7F100000;
|
||||
uint8_t* v80000000;
|
||||
uint8_t* v90000000;
|
||||
uint8_t* vA0000000;
|
||||
uint8_t* vC0000000;
|
||||
uint8_t* vE0000000;
|
||||
uint8_t* physical;
|
||||
};
|
||||
uint8_t* all_views[9];
|
||||
} views_;
|
||||
|
||||
std::unique_ptr<cpu::MMIOHandler> mmio_handler_;
|
||||
|
||||
MemoryHeap* virtual_heap_;
|
||||
MemoryHeap* physical_heap_;
|
||||
struct {
|
||||
VirtualHeap v00000000;
|
||||
VirtualHeap v40000000;
|
||||
VirtualHeap v80000000;
|
||||
VirtualHeap v90000000;
|
||||
|
||||
friend class MemoryHeap;
|
||||
VirtualHeap physical;
|
||||
PhysicalHeap vA0000000;
|
||||
PhysicalHeap vC0000000;
|
||||
PhysicalHeap vE0000000;
|
||||
} heaps_;
|
||||
|
||||
friend class BaseHeap;
|
||||
};
|
||||
|
||||
} // namespace xe
|
||||
|
|
Loading…
Reference in New Issue