Cross-platformizing MMIO stuff.

MSVC build likely needs some fixes.
This commit is contained in:
Ben Vanik 2014-07-29 22:12:39 -07:00
parent d56ae60460
commit 0129a96225
11 changed files with 651 additions and 176 deletions

View File

@ -23,6 +23,10 @@ namespace debugging {
// though, so avoid calling it frequently. // though, so avoid calling it frequently.
bool IsDebuggerAttached(); bool IsDebuggerAttached();
// Breaks into the debugger if it is attached.
// If no debugger is present, a signal will be raised.
void Break();
} // namespace debugging } // namespace debugging
} // namespace poly } // namespace poly

View File

@ -25,5 +25,11 @@ bool IsDebuggerAttached() {
return (info.kp_proc.p_flag & P_TRACED) != 0; return (info.kp_proc.p_flag & P_TRACED) != 0;
} }
// TODO(benvanik): find a more reliable way.
void Break() {
// __asm__("int $3");
__builtin_debugtrap();
}
} // namespace debugging } // namespace debugging
} // namespace poly } // namespace poly

View File

@ -16,5 +16,9 @@ bool IsDebuggerAttached() {
return IsDebuggerPresent() ? true : false; return IsDebuggerPresent() ? true : false;
} }
void Break() {
__debugbreak();
}
} // namespace debugging } // namespace debugging
} // namespace poly } // namespace poly

View File

@ -15,6 +15,7 @@
#include <poly/byte_order.h> #include <poly/byte_order.h>
#include <poly/config.h> #include <poly/config.h>
#include <poly/cxx_compat.h> #include <poly/cxx_compat.h>
#include <poly/debugging.h>
#include <poly/math.h> #include <poly/math.h>
#include <poly/memory.h> #include <poly/memory.h>
#include <poly/platform.h> #include <poly/platform.h>

View File

@ -0,0 +1,192 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <xenia/cpu/mmio_handler.h>
#include <poly/poly.h>
namespace BE {
#include <beaengine/BeaEngine.h>
} // namespace BE
namespace xe {
namespace cpu {
MMIOHandler* MMIOHandler::global_handler_ = nullptr;
// Implemented in the platform cc file.
std::unique_ptr<MMIOHandler> CreateMMIOHandler();
std::unique_ptr<MMIOHandler> MMIOHandler::Install() {
// There can be only one handler at a time.
assert_null(global_handler_);
if (global_handler_) {
return nullptr;
}
// Create the platform-specific handler.
auto handler = CreateMMIOHandler();
// Platform-specific initialization for the handler.
if (!handler->Initialize()) {
return nullptr;
}
global_handler_ = handler.get();
return handler;
}
MMIOHandler::~MMIOHandler() {
assert_true(global_handler_ == this);
global_handler_ = nullptr;
// Platform-specific handler uninstall.
Uninstall();
}
bool MMIOHandler::RegisterRange(uint64_t address, uint64_t mask, uint64_t size,
void* context, MMIOReadCallback read_callback,
MMIOWriteCallback write_callback) {
mapped_ranges_.emplace_back({
reinterpret_cast<uint64_t>(mapping_base_) | address,
0xFFFFFFFF00000000ull | mask, size, context, read_callback,
write_callback,
});
}
bool MMIOHandler::CheckLoad(uint64_t address, uint64_t* out_value) {
for (const auto& range : mapped_ranges_) {
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
*out_value = static_cast<uint32_t>(range.read(range.context, address));
return true;
}
}
return false;
}
bool MMIOHandler::CheckStore(uint64_t address, uint64_t value) {
for (const auto& range : mapped_ranges_) {
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
range.write(range.context, address, value);
return true;
}
}
return false;
}
bool MMIOHandler::HandleAccessFault(void* thread_state,
uint64_t fault_address) {
// Access violations are pretty rare, so we can do a linear search here.
const MMIORange* range = nullptr;
for (const auto& test_range : mapped_ranges_) {
if ((fault_address & test_range.mask) == test_range.address) {
// Address is within the range of this mapping.
range = &test_range;
break;
}
}
if (!range) {
// Access is not found within any range, so fail and let the caller handle
// it (likely by aborting).
return false;
}
// TODO(benvanik): replace with simple check of mov (that's all
// we care about).
auto rip = GetThreadStateRip(thread_state);
BE::DISASM disasm = {0};
disasm.Archi = 64;
disasm.Options = BE::MasmSyntax + BE::PrefixedNumeral;
disasm.EIP = static_cast<BE::UIntPtr>(rip);
size_t instr_length = BE::Disasm(&disasm);
if (instr_length == BE::UNKNOWN_OPCODE) {
// Failed to decode instruction. Either it's an unhandled mov case or
// not a mov.
assert_always();
return false;
}
int32_t arg1_type = disasm.Argument1.ArgType;
int32_t arg2_type = disasm.Argument2.ArgType;
bool is_load = (arg1_type & BE::REGISTER_TYPE) == BE::REGISTER_TYPE &&
(arg1_type & BE::GENERAL_REG) == BE::GENERAL_REG &&
(disasm.Argument1.AccessMode & BE::WRITE) == BE::WRITE;
bool is_store = (arg1_type & BE::MEMORY_TYPE) == BE::MEMORY_TYPE &&
(((arg2_type & BE::REGISTER_TYPE) == BE::REGISTER_TYPE &&
(arg2_type & BE::GENERAL_REG) == BE::GENERAL_REG) ||
(arg2_type & BE::CONSTANT_TYPE) == BE::CONSTANT_TYPE) &&
(disasm.Argument1.AccessMode & BE::WRITE) == BE::WRITE;
if (is_load) {
// Load of a memory value - read from range, swap, and store in the
// register.
uint64_t value = range->read(range->context, fault_address & 0xFFFFFFFF);
uint32_t be_reg_index;
if (!poly::bit_scan_forward(arg1_type & 0xFFFF, &be_reg_index)) {
be_reg_index = 0;
}
uint64_t* reg_ptr = GetThreadStateRegPtr(thread_state, be_reg_index);
switch (disasm.Argument1.ArgSize) {
case 8:
*reg_ptr = static_cast<uint8_t>(value);
break;
case 16:
*reg_ptr = poly::byte_swap(static_cast<uint16_t>(value));
break;
case 32:
*reg_ptr = poly::byte_swap(static_cast<uint32_t>(value));
break;
case 64:
*reg_ptr = poly::byte_swap(static_cast<uint64_t>(value));
break;
}
} else if (is_store) {
// Store of a register value - read register, swap, write to range.
uint64_t value;
if ((arg2_type & BE::REGISTER_TYPE) == BE::REGISTER_TYPE) {
uint32_t be_reg_index;
if (!poly::bit_scan_forward(arg2_type & 0xFFFF, &be_reg_index)) {
be_reg_index = 0;
}
uint64_t* reg_ptr = GetThreadStateRegPtr(thread_state, arg2_type);
value = *reg_ptr;
} else if ((arg2_type & BE::CONSTANT_TYPE) == BE::CONSTANT_TYPE) {
value = disasm.Instruction.Immediat;
} else {
// Unknown destination type in mov.
assert_always();
}
switch (disasm.Argument2.ArgSize) {
case 8:
value = static_cast<uint8_t>(value);
break;
case 16:
value = poly::byte_swap(static_cast<uint16_t>(value));
break;
case 32:
value = poly::byte_swap(static_cast<uint32_t>(value));
break;
case 64:
value = poly::byte_swap(static_cast<uint64_t>(value));
break;
}
range->write(range->context, fault_address & 0xFFFFFFFF, value);
} else {
// Unknown MMIO instruction type.
assert_always();
return false;
}
// Advance RIP to the next instruction so that we resume properly.
SetThreadStateRip(thread_state, rip + instr_length);
return true;
}
} // namespace cpu
} // namespace xe

View File

@ -0,0 +1,67 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#ifndef XENIA_CPU_MMIO_HANDLER_H_
#define XENIA_CPU_MMIO_HANDLER_H_
#include <memory>
#include <vector>
namespace xe {
namespace cpu {
typedef uint64_t (*MMIOReadCallback)(void* context, uint64_t addr);
typedef void (*MMIOWriteCallback)(void* context, uint64_t addr, uint64_t value);
// NOTE: only one can exist at a time!
class MMIOHandler {
public:
virtual ~MMIOHandler();
static std::unique_ptr<MMIOHandler> Install();
static MMIOHandler* global_handler() { return global_handler_; }
bool RegisterRange(uint64_t address, uint64_t mask, uint64_t size,
void* context, MMIOReadCallback read_callback,
MMIOWriteCallback write_callback);
bool CheckLoad(uint64_t address, uint64_t* out_value);
bool CheckStore(uint64_t address, uint64_t value);
public:
bool HandleAccessFault(void* thread_state, uint64_t fault_address);
protected:
MMIOHandler() = default;
virtual bool Initialize() = 0;
virtual void Uninstall() = 0;
virtual uint64_t GetThreadStateRip(void* thread_state_ptr) = 0;
virtual void SetThreadStateRip(void* thread_state_ptr, uint64_t rip) = 0;
virtual uint64_t* GetThreadStateRegPtr(void* thread_state_ptr,
int32_t be_reg_index) = 0;
struct MMIORange {
uint64_t address;
uint64_t mask;
uint64_t size;
void* context;
MMIOReadCallback read;
MMIOWriteCallback write;
};
std::vector<MMIORange> mapped_ranges_;
static MMIOHandler* global_handler_;
};
} // namespace cpu
} // namespace xe
#endif // XENIA_CPU_MMIO_HANDLER_H_

View File

@ -0,0 +1,237 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <xenia/cpu/mmio_handler.h>
#include <mach/mach.h>
#include <signal.h>
#include <thread>
#include <poly/poly.h>
#include <xenia/logging.h>
// Mach internal function, not defined in any header.
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/exc_server.html
extern "C" boolean_t exc_server(mach_msg_header_t* request_msg,
mach_msg_header_t* reply_msg);
// Exported for the kernel to call back into.
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/catch_exception_raise.html
extern "C" kern_return_t catch_exception_raise(
mach_port_t exception_port, mach_port_t thread, mach_port_t task,
exception_type_t exception, exception_data_t code,
mach_msg_type_number_t code_count);
namespace xe {
namespace cpu {
class MachMMIOHandler : public MMIOHandler {
public:
MachMMIOHandler();
protected:
bool Initialize() override;
void Uninstall() override;
uint64_t GetThreadStateRip(void* thread_state_ptr) override;
void SetThreadStateRip(void* thread_state_ptr, uint64_t rip) override;
uint64_t* GetThreadStateRegPtr(void* thread_state_ptr,
int32_t be_reg_index) override;
private:
void ThreadEntry();
// Worker thread processing exceptions.
std::unique_ptr<std::thread> thread_;
// Port listening for exceptions on the worker thread.
mach_port_t listen_port_;
};
std::unique_ptr<MMIOHandler> CreateMMIOHandler() {
return std::make_unique<MachMMIOHandler>();
}
MachMMIOHandler::MachMMIOHandler() : listen_port_(0) {}
bool MachMMIOHandler::Initialize() {
// Allocates the port that listens for exceptions.
// This will be freed in the dtor.
if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
&listen_port_) != KERN_SUCCESS) {
XELOGE("Unable to allocate listen port");
return false;
}
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/mach_port_insert_right.html
if (mach_port_insert_right(mach_task_self(), listen_port_, listen_port_,
MACH_MSG_TYPE_MAKE_SEND) != KERN_SUCCESS) {
XELOGE("Unable to insert listen port right");
return false;
}
// Sets our exception filter so that any BAD_ACCESS exceptions go to it.
// http://web.mit.edu/darwin/src/modules/xnu/osfmk/man/task_set_exception_ports.html
if (task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS,
listen_port_, EXCEPTION_DEFAULT,
MACHINE_THREAD_STATE) != KERN_SUCCESS) {
XELOGE("Unable to set exception port");
return false;
}
// Spin up the worker thread.
std::unique_ptr<std::thread> thread(
new std::thread([this]() { ThreadEntry(); }));
thread->detach();
thread_ = std::move(thread);
return true;
}
void MachMMIOHandler::Uninstall() {
task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, 0,
EXCEPTION_DEFAULT, 0);
mach_port_deallocate(mach_task_self(), listen_port_);
}
void MachMMIOHandler::ThreadEntry() {
while (true) {
struct {
mach_msg_header_t head;
mach_msg_body_t msgh_body;
char data[1024];
} msg;
struct {
mach_msg_header_t head;
char data[1024];
} reply;
// Wait for a message on the exception port.
mach_msg_return_t ret =
mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE, 0, sizeof(msg),
listen_port_, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (ret != MACH_MSG_SUCCESS) {
XELOGE("mach_msg receive failed with %d %s", ret, mach_error_string(ret));
poly::debugging::Break();
break;
}
// Call exc_server, which will dispatch the catch_exception_raise.
exc_server(&msg.head, &reply.head);
// Send the reply.
if (mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
MACH_PORT_NULL) != MACH_MSG_SUCCESS) {
XELOGE("mach_msg reply send failed");
poly::debugging::Break();
break;
}
}
}
// Kills the app when a bad access exception is unhandled.
void FailBadAccess() {
raise(SIGSEGV);
abort();
}
kern_return_t CatchExceptionRaise(mach_port_t thread) {
auto state_count = x86_EXCEPTION_STATE64_COUNT;
x86_exception_state64_t exc_state;
if (thread_get_state(thread, x86_EXCEPTION_STATE64,
reinterpret_cast<thread_state_t>(&exc_state),
&state_count) != KERN_SUCCESS) {
XELOGE("thread_get_state failed to get exception state");
return KERN_FAILURE;
}
state_count = x86_THREAD_STATE64_COUNT;
x86_thread_state64_t thread_state;
if (thread_get_state(thread, x86_THREAD_STATE64,
reinterpret_cast<thread_state_t>(&thread_state),
&state_count) != KERN_SUCCESS) {
XELOGE("thread_get_state failed to get thread state");
return KERN_FAILURE;
}
auto fault_address = exc_state.__faultvaddr;
auto mmio_handler =
static_cast<MachMMIOHandler*>(MMIOHandler::global_handler());
bool handled = mmio_handler->HandleAccessFault(&thread_state, fault_address);
if (!handled) {
// Unhandled - raise to the system.
XELOGE("MMIO unhandled bad access for %llx, bubbling", fault_address);
// TODO(benvanik): manipulate stack so that we can rip = break_handler or
// something and have the stack trace be valid.
poly::debugging::Break();
// When the thread resumes, kill it.
thread_state.__rip = reinterpret_cast<uint64_t>(FailBadAccess);
// Mach docs say we can return this to continue searching for handlers, but
// OSX doesn't seem to have it.
// return MIG_DESTROY_REQUEST;
}
// Set the thread state - as we've likely changed it.
if (thread_set_state(thread, x86_THREAD_STATE64,
reinterpret_cast<thread_state_t>(&thread_state),
state_count) != KERN_SUCCESS) {
XELOGE("thread_set_state failed to set thread state for continue");
return KERN_FAILURE;
}
return KERN_SUCCESS;
}
uint64_t MachMMIOHandler::GetThreadStateRip(void* thread_state_ptr) {
auto thread_state = reinterpret_cast<x86_thread_state64_t*>(thread_state_ptr);
return thread_state->__rip;
}
void MachMMIOHandler::SetThreadStateRip(void* thread_state_ptr, uint64_t rip) {
auto thread_state = reinterpret_cast<x86_thread_state64_t*>(thread_state_ptr);
thread_state->__rip = rip;
}
uint64_t* MachMMIOHandler::GetThreadStateRegPtr(void* thread_state_ptr,
int32_t be_reg_index) {
// Map from BeaEngine register order to x86_thread_state64 order.
static const uint32_t mapping[] = {
0, // REG0 / RAX -> 0
2, // REG1 / RCX -> 2
3, // REG2 / RDX -> 3
1, // REG3 / RBX -> 1
7, // REG4 / RSP -> 7
6, // REG5 / RBP -> 6
5, // REG6 / RSI -> 5
4, // REG7 / RDI -> 4
8, // REG8 / R8 -> 8
9, // REG9 / R9 -> 9
10, // REG10 / R10 -> 10
11, // REG11 / R11 -> 11
12, // REG12 / R12 -> 12
13, // REG13 / R13 -> 13
14, // REG14 / R14 -> 14
15, // REG15 / R15 -> 15
};
auto thread_state = reinterpret_cast<x86_thread_state64_t*>(thread_state_ptr);
return &thread_state->__rax + mapping[be_reg_index];
}
} // namespace cpu
} // namespace xe
// Exported and called by exc_server.
extern "C" kern_return_t catch_exception_raise(
mach_port_t exception_port, mach_port_t thread, mach_port_t task,
exception_type_t exception, exception_data_t code,
mach_msg_type_number_t code_count) {
// We get/set the states manually instead of using catch_exception_raise_state
// variant because that truncates everything to 32bit.
return xe::cpu::CatchExceptionRaise(thread);
}

View File

@ -0,0 +1,91 @@
/**
******************************************************************************
* Xenia : Xbox 360 Emulator Research Project *
******************************************************************************
* Copyright 2014 Ben Vanik. All rights reserved. *
* Released under the BSD license - see LICENSE in the root for more details. *
******************************************************************************
*/
#include <xenia/cpu/mmio_handler.h>
namespace xe {
namespace cpu {
LONG CALLBACK MMIOExceptionHandler(PEXCEPTION_POINTERS ex_info);
class WinMMIOHandler : public MMIOHandler {
public:
WinMMIOHandler() = default;
protected:
bool Initialize() override;
void Uninstall() override;
uint64_t GetThreadStateRip(void* thread_state_ptr) override;
void SetThreadStateRip(void* thread_state_ptr, uint64_t rip) override;
uint64_t* GetThreadStateRegPtr(void* thread_state_ptr,
int32_t be_reg_index) override;
};
std::unique_ptr<MMIOHandler> CreateMMIOHandler() {
return std::make_unique<WinMMIOHandler>();
}
bool WinMMIOHandler::Initialize() {
// If there is a debugger attached the normal exception handler will not
// fire and we must instead add the continue handler.
AddVectoredExceptionHandler(1, MMIOExceptionHandler);
if (IsDebuggerPresent()) {
// TODO(benvanik): is this really required?
// AddVectoredContinueHandler(1, MMIOExceptionHandler);
}
return true;
}
void WinMMIOHandler::Uninstall() {
// Remove exception handlers.
RemoveVectoredExceptionHandler(MMIOExceptionHandler);
RemoveVectoredContinueHandler(MMIOExceptionHandler);
}
// Handles potential accesses to mmio. We look for access violations to
// addresses in our range and call into the registered handlers, if any.
// If there are none, we continue.
LONG CALLBACK MMIOExceptionHandler(PEXCEPTION_POINTERS ex_info) {
// http://msdn.microsoft.com/en-us/library/ms679331(v=vs.85).aspx
// http://msdn.microsoft.com/en-us/library/aa363082(v=vs.85).aspx
auto code = ex_info->ExceptionRecord->ExceptionCode;
if (code == STATUS_ACCESS_VIOLATION) {
auto fault_address = ex_info->ExceptionRecord->ExceptionInformation[1];
if (HandleAccessFault(ex_info->ContextRecord, fault_address)) {
// Handled successfully - RIP has been updated and we can continue.
return EXCEPTION_CONTINUE_EXECUTION;
} else {
// Failed to handle; continue search for a handler (and die if no other
// handler is found).
return EXCEPTION_CONTINUE_SEARCH;
}
}
return EXCEPTION_CONTINUE_SEARCH;
}
uint64_t WinMMIOHandler::GetThreadStateRip(void* thread_state_ptr) {
auto context = reinterpret_cast<LPCONTEXT>(thread_state_ptr);
return context->Rip;
}
void WinMMIOHandler::SetThreadStateRip(void* thread_state_ptr, uint64_t rip) {
auto context = reinterpret_cast<LPCONTEXT>(thread_state_ptr);
context->Rip = rip;
}
uint64_t* WinMMIOHandler::GetThreadStateRegPtr(void* thread_state_ptr,
int32_t be_reg_index) {
auto context = reinterpret_cast<LPCONTEXT>(thread_state_ptr);
// BeaEngine register indices line up with the CONTEXT structure format.
return &context->Rax + be_reg_index;
}
} // namespace cpu
} // namespace xe

View File

@ -4,6 +4,8 @@
'cpu-private.h', 'cpu-private.h',
'cpu.cc', 'cpu.cc',
'cpu.h', 'cpu.h',
'mmio_handler.cc',
'mmio_handler.h',
'processor.cc', 'processor.cc',
'processor.h', 'processor.h',
'xenon_memory.cc', 'xenon_memory.cc',
@ -15,4 +17,25 @@
'xex_module.cc', 'xex_module.cc',
'xex_module.h', 'xex_module.h',
], ],
'conditions': [
['OS == "mac" or OS == "linux"', {
'sources': [
],
}],
['OS == "linux"', {
'sources': [
],
}],
['OS == "mac"', {
'sources': [
'mmio_handler_mac.cc',
],
}],
['OS == "win"', {
'sources': [
'mmio_handler_win.cc',
],
}],
],
} }

View File

@ -118,117 +118,6 @@ private:
}; };
uint32_t XenonMemoryHeap::next_heap_id_ = 1; uint32_t XenonMemoryHeap::next_heap_id_ = 1;
namespace {
namespace BE {
#include <beaengine/BeaEngine.h>
}
struct MMIORange {
uint64_t address;
uint64_t mask;
uint64_t size;
void* context;
MMIOReadCallback read;
MMIOWriteCallback write;
};
MMIORange g_mapped_ranges_[16] = { 0 };
int g_mapped_range_count_ = 0;
uint64_t* GetContextRegPtr(BE::Int32 arg_type, PCONTEXT context) {
DWORD index = 0;
_BitScanForward(&index, arg_type);
return &context->Rax + index;
}
// Handles potential accesses to mmio. We look for access violations to
// addresses in our range and call into the registered handlers, if any.
// If there are none, we continue.
LONG CALLBACK CheckMMIOHandler(PEXCEPTION_POINTERS ex_info) {
// http://msdn.microsoft.com/en-us/library/ms679331(v=vs.85).aspx
// http://msdn.microsoft.com/en-us/library/aa363082(v=vs.85).aspx
auto code = ex_info->ExceptionRecord->ExceptionCode;
if (code == STATUS_ACCESS_VIOLATION) {
// Access violations are pretty rare, so we can do a linear search here.
auto address = ex_info->ExceptionRecord->ExceptionInformation[1];
for (int i = 0; i < g_mapped_range_count_; ++i) {
const auto& range = g_mapped_ranges_[i];
if ((address & range.mask) == range.address) {
// Within our range.
// TODO(benvanik): replace with simple check of mov (that's all
// we care about).
BE::DISASM disasm = { 0 };
disasm.Archi = 64;
disasm.Options = BE::MasmSyntax + BE::PrefixedNumeral;
disasm.EIP = (BE::UIntPtr)ex_info->ExceptionRecord->ExceptionAddress;
BE::UIntPtr eip_end = disasm.EIP + 20;
size_t len = BE::Disasm(&disasm);
if (len == BE::UNKNOWN_OPCODE) {
break;
}
auto action = ex_info->ExceptionRecord->ExceptionInformation[0];
if (action == 0) {
uint64_t value = range.read(range.context, address & 0xFFFFFFFF);
assert_true((disasm.Argument1.ArgType & BE::REGISTER_TYPE) ==
BE::REGISTER_TYPE);
uint64_t* reg_ptr = GetContextRegPtr(disasm.Argument1.ArgType,
ex_info->ContextRecord);
switch (disasm.Argument1.ArgSize) {
case 8:
*reg_ptr = static_cast<uint8_t>(value);
break;
case 16:
*reg_ptr = poly::byte_swap(static_cast<uint16_t>(value));
break;
case 32:
*reg_ptr = poly::byte_swap(static_cast<uint32_t>(value));
break;
case 64:
*reg_ptr = poly::byte_swap(static_cast<uint64_t>(value));
break;
}
ex_info->ContextRecord->Rip += len;
return EXCEPTION_CONTINUE_EXECUTION;
} else if (action == 1) {
uint64_t value;
if ((disasm.Argument2.ArgType & BE::REGISTER_TYPE) == BE::REGISTER_TYPE) {
uint64_t* reg_ptr = GetContextRegPtr(disasm.Argument2.ArgType,
ex_info->ContextRecord);
value = *reg_ptr;
} else if ((disasm.Argument2.ArgType & BE::CONSTANT_TYPE) == BE::CONSTANT_TYPE) {
value = disasm.Instruction.Immediat;
} else {
assert_always();
}
switch (disasm.Argument2.ArgSize) {
case 8:
value = static_cast<uint8_t>(value);
break;
case 16:
value = poly::byte_swap(static_cast<uint16_t>(value));
break;
case 32:
value = poly::byte_swap(static_cast<uint32_t>(value));
break;
case 64:
value = poly::byte_swap(static_cast<uint64_t>(value));
break;
}
range.write(range.context, address & 0xFFFFFFFF, value);
ex_info->ContextRecord->Rip += len;
return EXCEPTION_CONTINUE_EXECUTION;
}
}
}
}
return EXCEPTION_CONTINUE_SEARCH;
}
} // namespace
XenonMemory::XenonMemory() XenonMemory::XenonMemory()
: Memory(), : Memory(),
mapping_(0), mapping_base_(0), page_table_(0) { mapping_(0), mapping_base_(0), page_table_(0) {
@ -237,9 +126,9 @@ XenonMemory::XenonMemory()
} }
XenonMemory::~XenonMemory() { XenonMemory::~XenonMemory() {
// Remove exception handlers. // Uninstall the MMIO handler, as we won't be able to service more
RemoveVectoredExceptionHandler(CheckMMIOHandler); // requests.
RemoveVectoredContinueHandler(CheckMMIOHandler); mmio_handler_.reset();
// Unallocate mapped ranges. // Unallocate mapped ranges.
for (int i = 0; i < g_mapped_range_count_; ++i) { for (int i = 0; i < g_mapped_range_count_; ++i) {
@ -319,12 +208,11 @@ int XenonMemory::Initialize() {
MEM_COMMIT, PAGE_READWRITE); MEM_COMMIT, PAGE_READWRITE);
// Add handlers for MMIO. // Add handlers for MMIO.
// If there is a debugger attached the normal exception handler will not mmio_handler_ = MMIOHandler::Install();
// fire and we must instead add the continue handler. if (!mmio_handler_) {
AddVectoredExceptionHandler(1, CheckMMIOHandler); XELOGE("Unable to install MMIO handlers");
if (IsDebuggerPresent()) { assert_always();
// TODO(benvanik): is this really required? XEFAIL();
//AddVectoredContinueHandler(1, CheckMMIOHandler);
} }
// Allocate dirty page table. // Allocate dirty page table.
@ -382,44 +270,19 @@ bool XenonMemory::AddMappedRange(uint64_t address, uint64_t mask,
uint64_t size, void* context, uint64_t size, void* context,
MMIOReadCallback read_callback, MMIOReadCallback read_callback,
MMIOWriteCallback write_callback) { MMIOWriteCallback write_callback) {
DWORD protect = 0; DWORD protect = PAGE_NOACCESS;
if (read_callback && write_callback) {
protect = PAGE_NOACCESS;
} else if (write_callback) {
protect = PAGE_READONLY;
} else {
// Write-only memory is not supported.
assert_always();
}
if (!VirtualAlloc(Translate(address), if (!VirtualAlloc(Translate(address),
size, size,
MEM_COMMIT, protect)) { MEM_COMMIT, protect)) {
XELOGE("Unable to map range; commit/protect failed");
return false; return false;
} }
assert_true(g_mapped_range_count_ + 1 < XECOUNT(g_mapped_ranges_)); return mmio_handler_->RegisterRange(address, mask, size, context, read_callback, write_callback);
g_mapped_ranges_[g_mapped_range_count_++] = {
reinterpret_cast<uint64_t>(mapping_base_) | address,
0xFFFFFFFF00000000 | mask,
size, context,
read_callback, write_callback,
};
return true;
}
bool XenonMemory::CheckMMIOLoad(uint64_t address, uint64_t* out_value) {
for (int i = 0; i < g_mapped_range_count_; ++i) {
const auto& range = g_mapped_ranges_[i];
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
*out_value = static_cast<uint32_t>(range.read(range.context, address));
return true;
}
}
return false;
} }
uint8_t XenonMemory::LoadI8(uint64_t address) { uint8_t XenonMemory::LoadI8(uint64_t address) {
uint64_t value; uint64_t value;
if (!CheckMMIOLoad(address, &value)) { if (!mmio_handler_->CheckLoad(address, &value)) {
value = *reinterpret_cast<uint8_t*>(Translate(address)); value = *reinterpret_cast<uint8_t*>(Translate(address));
} }
return static_cast<uint8_t>(value); return static_cast<uint8_t>(value);
@ -427,7 +290,7 @@ uint8_t XenonMemory::LoadI8(uint64_t address) {
uint16_t XenonMemory::LoadI16(uint64_t address) { uint16_t XenonMemory::LoadI16(uint64_t address) {
uint64_t value; uint64_t value;
if (!CheckMMIOLoad(address, &value)) { if (!mmio_handler_->CheckLoad(address, &value)) {
value = *reinterpret_cast<uint16_t*>(Translate(address)); value = *reinterpret_cast<uint16_t*>(Translate(address));
} }
return static_cast<uint16_t>(value); return static_cast<uint16_t>(value);
@ -435,7 +298,7 @@ uint16_t XenonMemory::LoadI16(uint64_t address) {
uint32_t XenonMemory::LoadI32(uint64_t address) { uint32_t XenonMemory::LoadI32(uint64_t address) {
uint64_t value; uint64_t value;
if (!CheckMMIOLoad(address, &value)) { if (!mmio_handler_->CheckLoad(address, &value)) {
value = *reinterpret_cast<uint32_t*>(Translate(address)); value = *reinterpret_cast<uint32_t*>(Translate(address));
} }
return static_cast<uint32_t>(value); return static_cast<uint32_t>(value);
@ -443,43 +306,32 @@ uint32_t XenonMemory::LoadI32(uint64_t address) {
uint64_t XenonMemory::LoadI64(uint64_t address) { uint64_t XenonMemory::LoadI64(uint64_t address) {
uint64_t value; uint64_t value;
if (!CheckMMIOLoad(address, &value)) { if (!mmio_handler_->CheckLoad(address, &value)) {
value = *reinterpret_cast<uint64_t*>(Translate(address)); value = *reinterpret_cast<uint64_t*>(Translate(address));
} }
return static_cast<uint64_t>(value); return static_cast<uint64_t>(value);
} }
bool XenonMemory::CheckMMIOStore(uint64_t address, uint64_t value) {
for (int i = 0; i < g_mapped_range_count_; ++i) {
const auto& range = g_mapped_ranges_[i];
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
range.write(range.context, address, value);
return true;
}
}
return false;
}
void XenonMemory::StoreI8(uint64_t address, uint8_t value) { void XenonMemory::StoreI8(uint64_t address, uint8_t value) {
if (!CheckMMIOStore(address, value)) { if (!mmio_handler_->CheckStore(address, value)) {
*reinterpret_cast<uint8_t*>(Translate(address)) = value; *reinterpret_cast<uint8_t*>(Translate(address)) = value;
} }
} }
void XenonMemory::StoreI16(uint64_t address, uint16_t value) { void XenonMemory::StoreI16(uint64_t address, uint16_t value) {
if (!CheckMMIOStore(address, value)) { if (!mmio_handler_->CheckStore(address, value)) {
*reinterpret_cast<uint16_t*>(Translate(address)) = value; *reinterpret_cast<uint16_t*>(Translate(address)) = value;
} }
} }
void XenonMemory::StoreI32(uint64_t address, uint32_t value) { void XenonMemory::StoreI32(uint64_t address, uint32_t value) {
if (!CheckMMIOStore(address, value)) { if (!mmio_handler_->CheckStore(address, value)) {
*reinterpret_cast<uint32_t*>(Translate(address)) = value; *reinterpret_cast<uint32_t*>(Translate(address)) = value;
} }
} }
void XenonMemory::StoreI64(uint64_t address, uint64_t value) { void XenonMemory::StoreI64(uint64_t address, uint64_t value) {
if (!CheckMMIOStore(address, value)) { if (!mmio_handler_->CheckStore(address, value)) {
*reinterpret_cast<uint64_t*>(Translate(address)) = value; *reinterpret_cast<uint64_t*>(Translate(address)) = value;
} }
} }

View File

@ -10,9 +10,12 @@
#ifndef XENIA_CPU_XENON_MEMORY_H_ #ifndef XENIA_CPU_XENON_MEMORY_H_
#define XENIA_CPU_XENON_MEMORY_H_ #define XENIA_CPU_XENON_MEMORY_H_
#include <memory>
#include <alloy/memory.h> #include <alloy/memory.h>
#include <xenia/core.h> #include <xenia/core.h>
#include <xenia/cpu/mmio_handler.h>
typedef struct xe_ppc_state xe_ppc_state_t; typedef struct xe_ppc_state xe_ppc_state_t;
@ -22,10 +25,6 @@ namespace cpu {
class XenonMemoryHeap; class XenonMemoryHeap;
typedef uint64_t (*MMIOReadCallback)(void* context, uint64_t addr);
typedef void (*MMIOWriteCallback)(void* context, uint64_t addr,
uint64_t value);
class XenonMemory : public alloy::Memory { class XenonMemory : public alloy::Memory {
public: public:
XenonMemory(); XenonMemory();
@ -38,8 +37,8 @@ public:
bool AddMappedRange(uint64_t address, uint64_t mask, bool AddMappedRange(uint64_t address, uint64_t mask,
uint64_t size, uint64_t size,
void* context, void* context,
MMIOReadCallback read_callback = nullptr, MMIOReadCallback read_callback,
MMIOWriteCallback write_callback = nullptr); MMIOWriteCallback write_callback);
uint8_t LoadI8(uint64_t address) override; uint8_t LoadI8(uint64_t address) override;
uint16_t LoadI16(uint64_t address) override; uint16_t LoadI16(uint64_t address) override;
@ -64,9 +63,6 @@ private:
int MapViews(uint8_t* mapping_base); int MapViews(uint8_t* mapping_base);
void UnmapViews(); void UnmapViews();
bool CheckMMIOLoad(uint64_t address, uint64_t* out_value);
bool CheckMMIOStore(uint64_t address, uint64_t value);
private: private:
HANDLE mapping_; HANDLE mapping_;
uint8_t* mapping_base_; uint8_t* mapping_base_;
@ -82,6 +78,8 @@ private:
uint8_t* all_views[6]; uint8_t* all_views[6];
} views_; } views_;
std::unique_ptr<MMIOHandler> mmio_handler_;
XenonMemoryHeap* virtual_heap_; XenonMemoryHeap* virtual_heap_;
XenonMemoryHeap* physical_heap_; XenonMemoryHeap* physical_heap_;