Pure dynamic MMIO access. Prep for more complex GPU memory management.
This commit is contained in:
parent
3a8065b7b1
commit
0e3854555d
|
@ -61,7 +61,6 @@ int IVMAssembler::Assemble(
|
||||||
fn->set_debug_info(debug_info);
|
fn->set_debug_info(debug_info);
|
||||||
|
|
||||||
TranslationContext ctx;
|
TranslationContext ctx;
|
||||||
ctx.access_callbacks = backend_->runtime()->access_callbacks();
|
|
||||||
ctx.register_count = 0;
|
ctx.register_count = 0;
|
||||||
ctx.intcode_count = 0;
|
ctx.intcode_count = 0;
|
||||||
ctx.intcode_arena = &intcode_arena_;
|
ctx.intcode_arena = &intcode_arena_;
|
||||||
|
|
|
@ -120,7 +120,6 @@ int IVMFunction::CallImpl(ThreadState* thread_state, uint64_t return_address) {
|
||||||
ics.membase = memory->membase();
|
ics.membase = memory->membase();
|
||||||
ics.did_carry = 0;
|
ics.did_carry = 0;
|
||||||
ics.did_saturate = 0;
|
ics.did_saturate = 0;
|
||||||
ics.access_callbacks = thread_state->runtime()->access_callbacks();
|
|
||||||
ics.thread_state = thread_state;
|
ics.thread_state = thread_state;
|
||||||
ics.return_address = return_address;
|
ics.return_address = return_address;
|
||||||
ics.call_return_address = 0;
|
ics.call_return_address = 0;
|
||||||
|
|
|
@ -196,213 +196,6 @@ int DispatchToC(TranslationContext& ctx, Instr* i, IntCodeFn fn) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I8(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src2_reg | ((uint64_t)i->src3_reg << 32));
|
|
||||||
ics.rf[i->dest_reg].i8 = (int8_t)cbs->read(cbs->context, address);
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I16(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src2_reg | ((uint64_t)i->src3_reg << 32));
|
|
||||||
ics.rf[i->dest_reg].i16 = XESWAP16((int16_t)cbs->read(cbs->context, address));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I32(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src2_reg | ((uint64_t)i->src3_reg << 32));
|
|
||||||
ics.rf[i->dest_reg].i32 = XESWAP32((int32_t)cbs->read(cbs->context, address));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I64(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src2_reg | ((uint64_t)i->src3_reg << 32));
|
|
||||||
ics.rf[i->dest_reg].i64 = XESWAP64((int64_t)cbs->read(cbs->context, address));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
int DispatchRegisterRead(
|
|
||||||
TranslationContext& ctx, Instr* i, RegisterAccessCallbacks* cbs) {
|
|
||||||
static IntCodeFn fns[] = {
|
|
||||||
IntCode_LOAD_REGISTER_I8,
|
|
||||||
IntCode_LOAD_REGISTER_I16,
|
|
||||||
IntCode_LOAD_REGISTER_I32,
|
|
||||||
IntCode_LOAD_REGISTER_I64,
|
|
||||||
IntCode_INVALID_TYPE,
|
|
||||||
IntCode_INVALID_TYPE,
|
|
||||||
IntCode_INVALID_TYPE,
|
|
||||||
};
|
|
||||||
IntCodeFn fn = fns[i->dest->type];
|
|
||||||
XEASSERT(fn != IntCode_INVALID_TYPE);
|
|
||||||
uint32_t dest_reg = AllocDynamicRegister(ctx, i->dest);
|
|
||||||
uint32_t src1_reg = AllocOpRegister(ctx, OPCODE_SIG_TYPE_V, &i->src1);
|
|
||||||
ctx.intcode_count++;
|
|
||||||
IntCode* ic = ctx.intcode_arena->Alloc<IntCode>();
|
|
||||||
ic->intcode_fn = fn;
|
|
||||||
ic->flags = i->flags;
|
|
||||||
ic->debug_flags = 0;
|
|
||||||
ic->dest_reg = dest_reg;
|
|
||||||
ic->src1_reg = src1_reg;
|
|
||||||
ic->src2_reg = (uint32_t)((uint64_t)cbs);
|
|
||||||
ic->src3_reg = (uint32_t)(((uint64_t)cbs) >> 32);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I8_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
ics.rf[i->dest_reg].i8 = (int8_t)cbs->read(cbs->context, address);
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I16_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
ics.rf[i->dest_reg].i16 = XESWAP16((int16_t)cbs->read(cbs->context, address));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I32_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
ics.rf[i->dest_reg].i32 = XESWAP32((int32_t)cbs->read(cbs->context, address));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_LOAD_REGISTER_I64_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
ics.rf[i->dest_reg].i64 = XESWAP64((int64_t)cbs->read(cbs->context, address));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I8(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src3_reg | ((uint64_t)i->dest_reg << 32));
|
|
||||||
cbs->write(cbs->context, address, ics.rf[i->src2_reg].i8);
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I16(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src3_reg | ((uint64_t)i->dest_reg << 32));
|
|
||||||
cbs->write(cbs->context, address, XESWAP16(ics.rf[i->src2_reg].i16));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I32(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src3_reg | ((uint64_t)i->dest_reg << 32));
|
|
||||||
cbs->write(cbs->context, address, XESWAP32(ics.rf[i->src2_reg].i32));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I64(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = (RegisterAccessCallbacks*)
|
|
||||||
(i->src3_reg | ((uint64_t)i->dest_reg << 32));
|
|
||||||
cbs->write(cbs->context, address, XESWAP64(ics.rf[i->src2_reg].i64));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
int DispatchRegisterWrite(
|
|
||||||
TranslationContext& ctx, Instr* i, RegisterAccessCallbacks* cbs) {
|
|
||||||
static IntCodeFn fns[] = {
|
|
||||||
IntCode_STORE_REGISTER_I8,
|
|
||||||
IntCode_STORE_REGISTER_I16,
|
|
||||||
IntCode_STORE_REGISTER_I32,
|
|
||||||
IntCode_STORE_REGISTER_I64,
|
|
||||||
IntCode_INVALID_TYPE,
|
|
||||||
IntCode_INVALID_TYPE,
|
|
||||||
IntCode_INVALID_TYPE,
|
|
||||||
};
|
|
||||||
IntCodeFn fn = fns[i->src2.value->type];
|
|
||||||
XEASSERT(fn != IntCode_INVALID_TYPE);
|
|
||||||
uint32_t src1_reg = AllocOpRegister(ctx, OPCODE_SIG_TYPE_V, &i->src1);
|
|
||||||
uint32_t src2_reg = AllocOpRegister(ctx, OPCODE_SIG_TYPE_V, &i->src2);
|
|
||||||
ctx.intcode_count++;
|
|
||||||
IntCode* ic = ctx.intcode_arena->Alloc<IntCode>();
|
|
||||||
ic->intcode_fn = fn;
|
|
||||||
ic->flags = i->flags;
|
|
||||||
ic->debug_flags = 0;
|
|
||||||
ic->dest_reg = (uint32_t)(((uint64_t)cbs) >> 32);
|
|
||||||
ic->src1_reg = src1_reg;
|
|
||||||
ic->src2_reg = src2_reg;
|
|
||||||
ic->src3_reg = (uint32_t)((uint64_t)cbs);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I8_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
cbs->write(cbs->context, address, ics.rf[i->src2_reg].i8);
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I16_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
cbs->write(cbs->context, address, XESWAP16(ics.rf[i->src2_reg].i16));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I32_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
cbs->write(cbs->context, address, XESWAP32(ics.rf[i->src2_reg].i32));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
uint32_t IntCode_STORE_REGISTER_I64_DYNAMIC(IntCodeState& ics, const IntCode* i) {
|
|
||||||
uint64_t address = ics.rf[i->src1_reg].u32;
|
|
||||||
RegisterAccessCallbacks* cbs = ics.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
cbs->write(cbs->context, address, XESWAP64(ics.rf[i->src2_reg].i64));
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return IA_NEXT;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
uint32_t IntCode_INVALID(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_INVALID(IntCodeState& ics, const IntCode* i) {
|
||||||
XEASSERTALWAYS();
|
XEASSERTALWAYS();
|
||||||
return IA_NEXT;
|
return IA_NEXT;
|
||||||
|
@ -1549,7 +1342,8 @@ int Translate_STORE_CONTEXT(TranslationContext& ctx, Instr* i) {
|
||||||
uint32_t IntCode_LOAD_I8(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_LOAD_I8(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_LOAD_REGISTER_I8_DYNAMIC(ics, i);
|
ics.rf[i->dest_reg].i8 = ics.thread_state->memory()->LoadI8(address);
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("%d (%X) = load.i8 %.8X\n",
|
DPRINT("%d (%X) = load.i8 %.8X\n",
|
||||||
*((int8_t*)(ics.membase + address)),
|
*((int8_t*)(ics.membase + address)),
|
||||||
|
@ -1562,7 +1356,9 @@ uint32_t IntCode_LOAD_I8(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t IntCode_LOAD_I16(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_LOAD_I16(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_LOAD_REGISTER_I16_DYNAMIC(ics, i);
|
ics.rf[i->dest_reg].i16 =
|
||||||
|
XESWAP16(ics.thread_state->memory()->LoadI16(address));
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("%d (%X) = load.i16 %.8X\n",
|
DPRINT("%d (%X) = load.i16 %.8X\n",
|
||||||
*((int16_t*)(ics.membase + address)),
|
*((int16_t*)(ics.membase + address)),
|
||||||
|
@ -1575,7 +1371,9 @@ uint32_t IntCode_LOAD_I16(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t IntCode_LOAD_I32(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_LOAD_I32(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_LOAD_REGISTER_I32_DYNAMIC(ics, i);
|
ics.rf[i->dest_reg].i32 =
|
||||||
|
XESWAP32(ics.thread_state->memory()->LoadI32(address));
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DFLUSH();
|
DFLUSH();
|
||||||
DPRINT("%d (%X) = load.i32 %.8X\n",
|
DPRINT("%d (%X) = load.i32 %.8X\n",
|
||||||
|
@ -1589,7 +1387,9 @@ uint32_t IntCode_LOAD_I32(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t IntCode_LOAD_I64(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_LOAD_I64(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_LOAD_REGISTER_I64(ics, i);
|
ics.rf[i->dest_reg].i64 =
|
||||||
|
XESWAP64(ics.thread_state->memory()->LoadI64(address));
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("%lld (%llX) = load.i64 %.8X\n",
|
DPRINT("%lld (%llX) = load.i64 %.8X\n",
|
||||||
*((int64_t*)(ics.membase + address)),
|
*((int64_t*)(ics.membase + address)),
|
||||||
|
@ -1642,26 +1442,14 @@ int Translate_LOAD(TranslationContext& ctx, Instr* i) {
|
||||||
IntCode_LOAD_F64,
|
IntCode_LOAD_F64,
|
||||||
IntCode_LOAD_V128,
|
IntCode_LOAD_V128,
|
||||||
};
|
};
|
||||||
if (i->src1.value->IsConstant()) {
|
|
||||||
// Constant address - check register access callbacks.
|
|
||||||
// NOTE: we still will likely want to check on access in debug mode, as
|
|
||||||
// constant propagation may not have happened.
|
|
||||||
uint64_t address = i->src1.value->AsUint64();
|
|
||||||
RegisterAccessCallbacks* cbs = ctx.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
return DispatchRegisterRead(ctx, i, cbs);
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return DispatchToC(ctx, i, fns[i->dest->type]);
|
return DispatchToC(ctx, i, fns[i->dest->type]);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t IntCode_STORE_I8(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_STORE_I8(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_STORE_REGISTER_I8_DYNAMIC(ics, i);
|
ics.thread_state->memory()->StoreI8(address, ics.rf[i->src2_reg].i8);
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("store.i8 %.8X = %d (%X)\n",
|
DPRINT("store.i8 %.8X = %d (%X)\n",
|
||||||
address, ics.rf[i->src2_reg].i8, ics.rf[i->src2_reg].u8);
|
address, ics.rf[i->src2_reg].i8, ics.rf[i->src2_reg].u8);
|
||||||
|
@ -1672,7 +1460,9 @@ uint32_t IntCode_STORE_I8(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t IntCode_STORE_I16(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_STORE_I16(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_STORE_REGISTER_I16_DYNAMIC(ics, i);
|
ics.thread_state->memory()->StoreI16(address,
|
||||||
|
XESWAP16(ics.rf[i->src2_reg].i16));
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("store.i16 %.8X = %d (%X)\n",
|
DPRINT("store.i16 %.8X = %d (%X)\n",
|
||||||
address, ics.rf[i->src2_reg].i16, ics.rf[i->src2_reg].u16);
|
address, ics.rf[i->src2_reg].i16, ics.rf[i->src2_reg].u16);
|
||||||
|
@ -1683,7 +1473,9 @@ uint32_t IntCode_STORE_I16(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t IntCode_STORE_I32(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_STORE_I32(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_STORE_REGISTER_I32_DYNAMIC(ics, i);
|
ics.thread_state->memory()->StoreI32(address,
|
||||||
|
XESWAP32(ics.rf[i->src2_reg].i32));
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("store.i32 %.8X = %d (%X)\n",
|
DPRINT("store.i32 %.8X = %d (%X)\n",
|
||||||
address, ics.rf[i->src2_reg].i32, ics.rf[i->src2_reg].u32);
|
address, ics.rf[i->src2_reg].i32, ics.rf[i->src2_reg].u32);
|
||||||
|
@ -1694,7 +1486,9 @@ uint32_t IntCode_STORE_I32(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t IntCode_STORE_I64(IntCodeState& ics, const IntCode* i) {
|
uint32_t IntCode_STORE_I64(IntCodeState& ics, const IntCode* i) {
|
||||||
uint32_t address = ics.rf[i->src1_reg].u32;
|
uint32_t address = ics.rf[i->src1_reg].u32;
|
||||||
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
if (DYNAMIC_REGISTER_ACCESS_CHECK(address)) {
|
||||||
return IntCode_STORE_REGISTER_I64_DYNAMIC(ics, i);
|
ics.thread_state->memory()->StoreI64(address,
|
||||||
|
XESWAP64(ics.rf[i->src2_reg].i64));
|
||||||
|
return IA_NEXT;
|
||||||
}
|
}
|
||||||
DPRINT("store.i64 %.8X = %lld (%llX)\n",
|
DPRINT("store.i64 %.8X = %lld (%llX)\n",
|
||||||
address, ics.rf[i->src2_reg].i64, ics.rf[i->src2_reg].u64);
|
address, ics.rf[i->src2_reg].i64, ics.rf[i->src2_reg].u64);
|
||||||
|
@ -1738,19 +1532,6 @@ int Translate_STORE(TranslationContext& ctx, Instr* i) {
|
||||||
IntCode_STORE_F64,
|
IntCode_STORE_F64,
|
||||||
IntCode_STORE_V128,
|
IntCode_STORE_V128,
|
||||||
};
|
};
|
||||||
if (i->src1.value->IsConstant()) {
|
|
||||||
// Constant address - check register access callbacks.
|
|
||||||
// NOTE: we still will likely want to check on access in debug mode, as
|
|
||||||
// constant propagation may not have happened.
|
|
||||||
uint64_t address = i->src1.value->AsUint64();
|
|
||||||
RegisterAccessCallbacks* cbs = ctx.access_callbacks;
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
return DispatchRegisterWrite(ctx, i, cbs);
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return DispatchToC(ctx, i, fns[i->src2.value->type]);
|
return DispatchToC(ctx, i, fns[i->src2.value->type]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
#include <alloy/hir/instr.h>
|
#include <alloy/hir/instr.h>
|
||||||
#include <alloy/hir/opcodes.h>
|
#include <alloy/hir/opcodes.h>
|
||||||
#include <alloy/runtime/register_access.h>
|
|
||||||
|
|
||||||
namespace alloy { namespace runtime { class ThreadState; } }
|
namespace alloy { namespace runtime { class ThreadState; } }
|
||||||
|
|
||||||
|
@ -46,7 +45,6 @@ typedef struct {
|
||||||
uint8_t* membase;
|
uint8_t* membase;
|
||||||
int8_t did_carry;
|
int8_t did_carry;
|
||||||
int8_t did_saturate;
|
int8_t did_saturate;
|
||||||
runtime::RegisterAccessCallbacks* access_callbacks;
|
|
||||||
runtime::ThreadState* thread_state;
|
runtime::ThreadState* thread_state;
|
||||||
uint64_t return_address;
|
uint64_t return_address;
|
||||||
uint64_t call_return_address;
|
uint64_t call_return_address;
|
||||||
|
@ -97,8 +95,6 @@ typedef struct SourceMapEntry_s {
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
runtime::RegisterAccessCallbacks* access_callbacks;
|
|
||||||
|
|
||||||
uint32_t register_count;
|
uint32_t register_count;
|
||||||
size_t intcode_count;
|
size_t intcode_count;
|
||||||
Arena* intcode_arena;
|
Arena* intcode_arena;
|
||||||
|
|
|
@ -1456,42 +1456,6 @@ EMITTER_OPCODE_TABLE(
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
// Note: most *should* be aligned, but needs to be checked!
|
// Note: most *should* be aligned, but needs to be checked!
|
||||||
template <typename T>
|
template <typename T>
|
||||||
bool CheckLoadAccessCallback(X64Emitter& e, const T& i) {
|
|
||||||
// If this is a constant address load, check to see if it's in a
|
|
||||||
// register range. We'll also probably want a dynamic check for
|
|
||||||
// unverified stores. So far, most games use constants.
|
|
||||||
if (!i.src1.is_constant) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
uint64_t address = i.src1.constant() & 0xFFFFFFFF;
|
|
||||||
auto cbs = e.runtime()->access_callbacks();
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
e.mov(e.rcx, reinterpret_cast<uint64_t>(cbs->context));
|
|
||||||
e.mov(e.rdx, address);
|
|
||||||
e.CallNative(cbs->read);
|
|
||||||
if (T::dest_type == KEY_TYPE_V_I8) {
|
|
||||||
// No swap required.
|
|
||||||
e.mov(i.dest, e.al);
|
|
||||||
} else if (T::dest_type == KEY_TYPE_V_I16) {
|
|
||||||
e.ror(e.ax, 8);
|
|
||||||
e.mov(i.dest, e.ax);
|
|
||||||
} else if (T::dest_type == KEY_TYPE_V_I32) {
|
|
||||||
e.bswap(e.eax);
|
|
||||||
e.mov(i.dest, e.eax);
|
|
||||||
} else if (T::dest_type == KEY_TYPE_V_I64) {
|
|
||||||
e.bswap(e.rax);
|
|
||||||
e.mov(i.dest, e.rax);
|
|
||||||
} else {
|
|
||||||
XEASSERTALWAYS();
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
template <typename T>
|
|
||||||
RegExp ComputeMemoryAddress(X64Emitter& e, const T& guest) {
|
RegExp ComputeMemoryAddress(X64Emitter& e, const T& guest) {
|
||||||
if (guest.is_constant) {
|
if (guest.is_constant) {
|
||||||
// TODO(benvanik): figure out how to do this without a temp.
|
// TODO(benvanik): figure out how to do this without a temp.
|
||||||
|
@ -1506,128 +1470,12 @@ RegExp ComputeMemoryAddress(X64Emitter& e, const T& guest) {
|
||||||
return e.rdx + e.rax;
|
return e.rdx + e.rax;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
uint64_t DynamicRegisterLoad(void* raw_context, uint32_t address) {
|
|
||||||
auto thread_state = *((ThreadState**)raw_context);
|
|
||||||
auto cbs = thread_state->runtime()->access_callbacks();
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
return cbs->read(cbs->context, address);
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
void DynamicRegisterStore(void* raw_context, uint32_t address, uint64_t value) {
|
|
||||||
auto thread_state = *((ThreadState**)raw_context);
|
|
||||||
auto cbs = thread_state->runtime()->access_callbacks();
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
cbs->write(cbs->context, address, value);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template <typename DEST_REG>
|
|
||||||
void EmitLoadCheck(X64Emitter& e, const I64<>& addr_value, DEST_REG& dest) {
|
|
||||||
// rax = reserved
|
|
||||||
// if (address >> 24 == 0x7F) call register load handler;
|
|
||||||
auto addr = ComputeMemoryAddress(e, addr_value);
|
|
||||||
e.lea(e.r8d, e.ptr[addr]);
|
|
||||||
e.shr(e.r8d, 24);
|
|
||||||
e.cmp(e.r8b, 0x7F);
|
|
||||||
e.inLocalLabel();
|
|
||||||
Xbyak::Label normal_addr;
|
|
||||||
Xbyak::Label skip_load;
|
|
||||||
e.jne(normal_addr);
|
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
|
||||||
e.CallNative(DynamicRegisterLoad);
|
|
||||||
if (DEST_REG::key_type == KEY_TYPE_V_I32) {
|
|
||||||
e.bswap(e.eax);
|
|
||||||
e.mov(dest, e.eax);
|
|
||||||
}
|
|
||||||
e.jmp(skip_load);
|
|
||||||
e.L(normal_addr);
|
|
||||||
if (DEST_REG::key_type == KEY_TYPE_V_I32) {
|
|
||||||
e.mov(dest, e.dword[addr]);
|
|
||||||
}
|
|
||||||
if (IsTracingData()) {
|
|
||||||
e.mov(e.r8, dest);
|
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
|
||||||
if (DEST_REG::key_type == KEY_TYPE_V_I32) {
|
|
||||||
e.CallNative(TraceMemoryLoadI32);
|
|
||||||
} else if (DEST_REG::key_type == KEY_TYPE_V_I64) {
|
|
||||||
e.CallNative(TraceMemoryLoadI64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.L(skip_load);
|
|
||||||
e.outLocalLabel();
|
|
||||||
}
|
|
||||||
template <typename SRC_REG>
|
|
||||||
void EmitStoreCheck(X64Emitter& e, const I64<>& addr_value, SRC_REG& src) {
|
|
||||||
// rax = reserved
|
|
||||||
// if (address >> 24 == 0x7F) call register store handler;
|
|
||||||
auto addr = ComputeMemoryAddress(e, addr_value);
|
|
||||||
e.lea(e.r8d, e.ptr[addr]);
|
|
||||||
e.shr(e.r8d, 24);
|
|
||||||
e.cmp(e.r8b, 0x7F);
|
|
||||||
e.inLocalLabel();
|
|
||||||
Xbyak::Label normal_addr;
|
|
||||||
Xbyak::Label skip_load;
|
|
||||||
e.jne(normal_addr);
|
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
|
||||||
if (SRC_REG::key_type == KEY_TYPE_V_I32) {
|
|
||||||
if (src.is_constant) {
|
|
||||||
e.mov(e.r8d, XESWAP32(static_cast<uint32_t>(src.constant())));
|
|
||||||
} else {
|
|
||||||
e.mov(e.r8d, src);
|
|
||||||
e.bswap(e.r8d);
|
|
||||||
}
|
|
||||||
} else if (SRC_REG::key_type == KEY_TYPE_V_I64) {
|
|
||||||
if (src.is_constant) {
|
|
||||||
e.mov(e.r8, XESWAP64(static_cast<uint64_t>(src.constant())));
|
|
||||||
} else {
|
|
||||||
e.mov(e.r8, src);
|
|
||||||
e.bswap(e.r8);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.CallNative(DynamicRegisterStore);
|
|
||||||
e.jmp(skip_load);
|
|
||||||
e.L(normal_addr);
|
|
||||||
if (SRC_REG::key_type == KEY_TYPE_V_I32) {
|
|
||||||
if (src.is_constant) {
|
|
||||||
e.mov(e.dword[addr], src.constant());
|
|
||||||
} else {
|
|
||||||
e.mov(e.dword[addr], src);
|
|
||||||
}
|
|
||||||
} else if (SRC_REG::key_type == KEY_TYPE_V_I64) {
|
|
||||||
if (src.is_constant) {
|
|
||||||
e.MovMem64(addr, src.constant());
|
|
||||||
} else {
|
|
||||||
e.mov(e.qword[addr], src);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (IsTracingData()) {
|
|
||||||
e.mov(e.r8, e.qword[addr]);
|
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
|
||||||
if (SRC_REG::key_type == KEY_TYPE_V_I32) {
|
|
||||||
e.CallNative(TraceMemoryStoreI32);
|
|
||||||
} else if (SRC_REG::key_type == KEY_TYPE_V_I64) {
|
|
||||||
e.CallNative(TraceMemoryStoreI64);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.L(skip_load);
|
|
||||||
e.outLocalLabel();
|
|
||||||
}
|
|
||||||
EMITTER(LOAD_I8, MATCH(I<OPCODE_LOAD, I8<>, I64<>>)) {
|
EMITTER(LOAD_I8, MATCH(I<OPCODE_LOAD, I8<>, I64<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckLoadAccessCallback(e, i)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto addr = ComputeMemoryAddress(e, i.src1);
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
e.mov(i.dest, e.byte[addr]);
|
e.mov(i.dest, e.byte[addr]);
|
||||||
if (IsTracingData()) {
|
if (IsTracingData()) {
|
||||||
e.mov(e.r8, i.dest);
|
e.mov(e.r8b, i.dest);
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
e.CallNative(TraceMemoryLoadI8);
|
e.CallNative(TraceMemoryLoadI8);
|
||||||
}
|
}
|
||||||
|
@ -1635,13 +1483,10 @@ EMITTER(LOAD_I8, MATCH(I<OPCODE_LOAD, I8<>, I64<>>)) {
|
||||||
};
|
};
|
||||||
EMITTER(LOAD_I16, MATCH(I<OPCODE_LOAD, I16<>, I64<>>)) {
|
EMITTER(LOAD_I16, MATCH(I<OPCODE_LOAD, I16<>, I64<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckLoadAccessCallback(e, i)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto addr = ComputeMemoryAddress(e, i.src1);
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
e.mov(i.dest, e.word[addr]);
|
e.mov(i.dest, e.word[addr]);
|
||||||
if (IsTracingData()) {
|
if (IsTracingData()) {
|
||||||
e.mov(e.r8, i.dest);
|
e.mov(e.r8w, i.dest);
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
e.CallNative(TraceMemoryLoadI16);
|
e.CallNative(TraceMemoryLoadI16);
|
||||||
}
|
}
|
||||||
|
@ -1649,17 +1494,17 @@ EMITTER(LOAD_I16, MATCH(I<OPCODE_LOAD, I16<>, I64<>>)) {
|
||||||
};
|
};
|
||||||
EMITTER(LOAD_I32, MATCH(I<OPCODE_LOAD, I32<>, I64<>>)) {
|
EMITTER(LOAD_I32, MATCH(I<OPCODE_LOAD, I32<>, I64<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckLoadAccessCallback(e, i)) {
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
return;
|
e.mov(i.dest, e.dword[addr]);
|
||||||
|
if (IsTracingData()) {
|
||||||
|
e.mov(e.r8d, i.dest);
|
||||||
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
|
e.CallNative(TraceMemoryLoadI32);
|
||||||
}
|
}
|
||||||
EmitLoadCheck(e, i.src1, i.dest);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
EMITTER(LOAD_I64, MATCH(I<OPCODE_LOAD, I64<>, I64<>>)) {
|
EMITTER(LOAD_I64, MATCH(I<OPCODE_LOAD, I64<>, I64<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckLoadAccessCallback(e, i)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto addr = ComputeMemoryAddress(e, i.src1);
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
e.mov(i.dest, e.qword[addr]);
|
e.mov(i.dest, e.qword[addr]);
|
||||||
if (IsTracingData()) {
|
if (IsTracingData()) {
|
||||||
|
@ -1718,51 +1563,8 @@ EMITTER_OPCODE_TABLE(
|
||||||
// OPCODE_STORE
|
// OPCODE_STORE
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
// Note: most *should* be aligned, but needs to be checked!
|
// Note: most *should* be aligned, but needs to be checked!
|
||||||
template <typename T>
|
|
||||||
bool CheckStoreAccessCallback(X64Emitter& e, const T& i) {
|
|
||||||
// If this is a constant address store, check to see if it's in a
|
|
||||||
// register range. We'll also probably want a dynamic check for
|
|
||||||
// unverified stores. So far, most games use constants.
|
|
||||||
if (!i.src1.is_constant) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
uint64_t address = i.src1.constant() & 0xFFFFFFFF;
|
|
||||||
auto cbs = e.runtime()->access_callbacks();
|
|
||||||
while (cbs) {
|
|
||||||
if (cbs->handles(cbs->context, address)) {
|
|
||||||
e.mov(e.rcx, reinterpret_cast<uint64_t>(cbs->context));
|
|
||||||
e.mov(e.rdx, address);
|
|
||||||
if (i.src2.is_constant) {
|
|
||||||
e.mov(e.r8, i.src2.constant());
|
|
||||||
} else {
|
|
||||||
if (T::src2_type == KEY_TYPE_V_I8) {
|
|
||||||
// No swap required.
|
|
||||||
e.movzx(e.r8, i.src2.reg().cvt8());
|
|
||||||
} else if (T::src2_type == KEY_TYPE_V_I16) {
|
|
||||||
e.movzx(e.r8, i.src2.reg().cvt16());
|
|
||||||
e.ror(e.r8w, 8);
|
|
||||||
} else if (T::src2_type == KEY_TYPE_V_I32) {
|
|
||||||
e.mov(e.r8d, i.src2.reg().cvt32());
|
|
||||||
e.bswap(e.r8d);
|
|
||||||
} else if (T::src2_type == KEY_TYPE_V_I64) {
|
|
||||||
e.mov(e.r8, i.src2);
|
|
||||||
e.bswap(e.r8);
|
|
||||||
} else {
|
|
||||||
XEASSERTALWAYS();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e.CallNative(cbs->write);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
cbs = cbs->next;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
EMITTER(STORE_I8, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I8<>>)) {
|
EMITTER(STORE_I8, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I8<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckStoreAccessCallback(e, i)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto addr = ComputeMemoryAddress(e, i.src1);
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
if (i.src2.is_constant) {
|
if (i.src2.is_constant) {
|
||||||
e.mov(e.byte[addr], i.src2.constant());
|
e.mov(e.byte[addr], i.src2.constant());
|
||||||
|
@ -1770,7 +1572,7 @@ EMITTER(STORE_I8, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I8<>>)) {
|
||||||
e.mov(e.byte[addr], i.src2);
|
e.mov(e.byte[addr], i.src2);
|
||||||
}
|
}
|
||||||
if (IsTracingData()) {
|
if (IsTracingData()) {
|
||||||
e.mov(e.r8, e.byte[addr]);
|
e.mov(e.r8b, e.byte[addr]);
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
e.CallNative(TraceMemoryStoreI8);
|
e.CallNative(TraceMemoryStoreI8);
|
||||||
}
|
}
|
||||||
|
@ -1778,9 +1580,6 @@ EMITTER(STORE_I8, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I8<>>)) {
|
||||||
};
|
};
|
||||||
EMITTER(STORE_I16, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I16<>>)) {
|
EMITTER(STORE_I16, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I16<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckStoreAccessCallback(e, i)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto addr = ComputeMemoryAddress(e, i.src1);
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
if (i.src2.is_constant) {
|
if (i.src2.is_constant) {
|
||||||
e.mov(e.word[addr], i.src2.constant());
|
e.mov(e.word[addr], i.src2.constant());
|
||||||
|
@ -1788,7 +1587,7 @@ EMITTER(STORE_I16, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I16<>>)) {
|
||||||
e.mov(e.word[addr], i.src2);
|
e.mov(e.word[addr], i.src2);
|
||||||
}
|
}
|
||||||
if (IsTracingData()) {
|
if (IsTracingData()) {
|
||||||
e.mov(e.r8, e.word[addr]);
|
e.mov(e.r8w, e.word[addr]);
|
||||||
e.lea(e.rdx, e.ptr[addr]);
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
e.CallNative(TraceMemoryStoreI16);
|
e.CallNative(TraceMemoryStoreI16);
|
||||||
}
|
}
|
||||||
|
@ -1796,18 +1595,32 @@ EMITTER(STORE_I16, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I16<>>)) {
|
||||||
};
|
};
|
||||||
EMITTER(STORE_I32, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I32<>>)) {
|
EMITTER(STORE_I32, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I32<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckStoreAccessCallback(e, i)) {
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
return;
|
if (i.src2.is_constant) {
|
||||||
|
e.mov(e.dword[addr], i.src2.constant());
|
||||||
|
} else {
|
||||||
|
e.mov(e.dword[addr], i.src2);
|
||||||
|
}
|
||||||
|
if (IsTracingData()) {
|
||||||
|
e.mov(e.r8d, e.dword[addr]);
|
||||||
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
|
e.CallNative(TraceMemoryStoreI32);
|
||||||
}
|
}
|
||||||
EmitStoreCheck(e, i.src1, i.src2);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
EMITTER(STORE_I64, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I64<>>)) {
|
EMITTER(STORE_I64, MATCH(I<OPCODE_STORE, VoidOp, I64<>, I64<>>)) {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
if (CheckStoreAccessCallback(e, i)) {
|
auto addr = ComputeMemoryAddress(e, i.src1);
|
||||||
return;
|
if (i.src2.is_constant) {
|
||||||
|
e.MovMem64(addr, i.src2.constant());
|
||||||
|
} else {
|
||||||
|
e.mov(e.qword[addr], i.src2);
|
||||||
|
}
|
||||||
|
if (IsTracingData()) {
|
||||||
|
e.mov(e.r8, e.qword[addr]);
|
||||||
|
e.lea(e.rdx, e.ptr[addr]);
|
||||||
|
e.CallNative(TraceMemoryStoreI64);
|
||||||
}
|
}
|
||||||
EmitStoreCheck(e, i.src1, i.src2);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
EMITTER(STORE_F32, MATCH(I<OPCODE_STORE, VoidOp, I64<>, F32<>>)) {
|
EMITTER(STORE_F32, MATCH(I<OPCODE_STORE, VoidOp, I64<>, F32<>>)) {
|
||||||
|
|
|
@ -43,6 +43,15 @@ public:
|
||||||
uint64_t SearchAligned(uint64_t start, uint64_t end,
|
uint64_t SearchAligned(uint64_t start, uint64_t end,
|
||||||
const uint32_t* values, size_t value_count);
|
const uint32_t* values, size_t value_count);
|
||||||
|
|
||||||
|
virtual uint8_t LoadI8(uint64_t address) = 0;
|
||||||
|
virtual uint16_t LoadI16(uint64_t address) = 0;
|
||||||
|
virtual uint32_t LoadI32(uint64_t address) = 0;
|
||||||
|
virtual uint64_t LoadI64(uint64_t address) = 0;
|
||||||
|
virtual void StoreI8(uint64_t address, uint8_t value) = 0;
|
||||||
|
virtual void StoreI16(uint64_t address, uint16_t value) = 0;
|
||||||
|
virtual void StoreI32(uint64_t address, uint32_t value) = 0;
|
||||||
|
virtual void StoreI64(uint64_t address, uint64_t value) = 0;
|
||||||
|
|
||||||
virtual uint64_t HeapAlloc(
|
virtual uint64_t HeapAlloc(
|
||||||
uint64_t base_address, size_t size, uint32_t flags,
|
uint64_t base_address, size_t size, uint32_t flags,
|
||||||
uint32_t alignment = 0x20) = 0;
|
uint32_t alignment = 0x20) = 0;
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
/**
|
|
||||||
******************************************************************************
|
|
||||||
* Xenia : Xbox 360 Emulator Research Project *
|
|
||||||
******************************************************************************
|
|
||||||
* Copyright 2013 Ben Vanik. All rights reserved. *
|
|
||||||
* Released under the BSD license - see LICENSE in the root for more details. *
|
|
||||||
******************************************************************************
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef ALLOY_RUNTIME_REGISTER_ACCESS_H_
|
|
||||||
#define ALLOY_RUNTIME_REGISTER_ACCESS_H_
|
|
||||||
|
|
||||||
#include <alloy/core.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace alloy {
|
|
||||||
namespace runtime {
|
|
||||||
|
|
||||||
typedef bool (*RegisterHandlesCallback)(void* context, uint64_t addr);
|
|
||||||
typedef uint64_t (*RegisterReadCallback)(void* context, uint64_t addr);
|
|
||||||
typedef void (*RegisterWriteCallback)(void* context, uint64_t addr,
|
|
||||||
uint64_t value);
|
|
||||||
|
|
||||||
typedef struct RegisterAccessCallbacks_s {
|
|
||||||
void* context;
|
|
||||||
RegisterHandlesCallback handles;
|
|
||||||
RegisterReadCallback read;
|
|
||||||
RegisterWriteCallback write;
|
|
||||||
|
|
||||||
RegisterAccessCallbacks_s* next;
|
|
||||||
} RegisterAccessCallbacks;
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace runtime
|
|
||||||
} // namespace alloy
|
|
||||||
|
|
||||||
|
|
||||||
#endif // ALLOY_RUNTIME_REGISTER_ACCESS_H_
|
|
|
@ -25,8 +25,7 @@ DEFINE_string(runtime_backend, "any",
|
||||||
|
|
||||||
|
|
||||||
Runtime::Runtime(Memory* memory) :
|
Runtime::Runtime(Memory* memory) :
|
||||||
memory_(memory), debugger_(0), backend_(0), frontend_(0),
|
memory_(memory), debugger_(0), backend_(0), frontend_(0) {
|
||||||
access_callbacks_(0) {
|
|
||||||
tracing::Initialize();
|
tracing::Initialize();
|
||||||
modules_lock_ = AllocMutex(10000);
|
modules_lock_ = AllocMutex(10000);
|
||||||
}
|
}
|
||||||
|
@ -41,14 +40,6 @@ Runtime::~Runtime() {
|
||||||
UnlockMutex(modules_lock_);
|
UnlockMutex(modules_lock_);
|
||||||
FreeMutex(modules_lock_);
|
FreeMutex(modules_lock_);
|
||||||
|
|
||||||
RegisterAccessCallbacks* cbs = access_callbacks_;
|
|
||||||
while (cbs) {
|
|
||||||
RegisterAccessCallbacks* next = cbs->next;
|
|
||||||
delete cbs;
|
|
||||||
cbs = next;
|
|
||||||
}
|
|
||||||
access_callbacks_ = NULL;
|
|
||||||
|
|
||||||
delete frontend_;
|
delete frontend_;
|
||||||
delete backend_;
|
delete backend_;
|
||||||
delete debugger_;
|
delete debugger_;
|
||||||
|
@ -281,11 +272,3 @@ int Runtime::DemandFunction(
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Runtime::AddRegisterAccessCallbacks(
|
|
||||||
const RegisterAccessCallbacks& callbacks) {
|
|
||||||
RegisterAccessCallbacks* cbs = new RegisterAccessCallbacks();
|
|
||||||
xe_copy_struct(cbs, &callbacks, sizeof(callbacks));
|
|
||||||
cbs->next = access_callbacks_;
|
|
||||||
access_callbacks_ = cbs;
|
|
||||||
}
|
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
#include <alloy/runtime/debugger.h>
|
#include <alloy/runtime/debugger.h>
|
||||||
#include <alloy/runtime/entry_table.h>
|
#include <alloy/runtime/entry_table.h>
|
||||||
#include <alloy/runtime/module.h>
|
#include <alloy/runtime/module.h>
|
||||||
#include <alloy/runtime/register_access.h>
|
|
||||||
#include <alloy/runtime/symbol_info.h>
|
#include <alloy/runtime/symbol_info.h>
|
||||||
#include <alloy/runtime/thread_state.h>
|
#include <alloy/runtime/thread_state.h>
|
||||||
|
|
||||||
|
@ -38,9 +37,6 @@ public:
|
||||||
Debugger* debugger() const { return debugger_; }
|
Debugger* debugger() const { return debugger_; }
|
||||||
frontend::Frontend* frontend() const { return frontend_; }
|
frontend::Frontend* frontend() const { return frontend_; }
|
||||||
backend::Backend* backend() const { return backend_; }
|
backend::Backend* backend() const { return backend_; }
|
||||||
RegisterAccessCallbacks* access_callbacks() const {
|
|
||||||
return access_callbacks_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int Initialize(frontend::Frontend* frontend, backend::Backend* backend = 0);
|
int Initialize(frontend::Frontend* frontend, backend::Backend* backend = 0);
|
||||||
|
|
||||||
|
@ -55,9 +51,6 @@ public:
|
||||||
FunctionInfo** out_symbol_info);
|
FunctionInfo** out_symbol_info);
|
||||||
int ResolveFunction(uint64_t address, Function** out_function);
|
int ResolveFunction(uint64_t address, Function** out_function);
|
||||||
|
|
||||||
void AddRegisterAccessCallbacks(
|
|
||||||
const RegisterAccessCallbacks& callbacks);
|
|
||||||
|
|
||||||
//uint32_t CreateCallback(void (*callback)(void* data), void* data);
|
//uint32_t CreateCallback(void (*callback)(void* data), void* data);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -74,8 +67,6 @@ protected:
|
||||||
EntryTable entry_table_;
|
EntryTable entry_table_;
|
||||||
Mutex* modules_lock_;
|
Mutex* modules_lock_;
|
||||||
ModuleList modules_;
|
ModuleList modules_;
|
||||||
|
|
||||||
RegisterAccessCallbacks* access_callbacks_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
'module.h',
|
'module.h',
|
||||||
'raw_module.cc',
|
'raw_module.cc',
|
||||||
'raw_module.h',
|
'raw_module.h',
|
||||||
'register_access.h',
|
|
||||||
'runtime.cc',
|
'runtime.cc',
|
||||||
'runtime.h',
|
'runtime.h',
|
||||||
'symbol_info.cc',
|
'symbol_info.cc',
|
||||||
|
|
|
@ -42,12 +42,13 @@ X_STATUS AudioSystem::Setup() {
|
||||||
processor_ = emulator_->processor();
|
processor_ = emulator_->processor();
|
||||||
|
|
||||||
// Let the processor know we want register access callbacks.
|
// Let the processor know we want register access callbacks.
|
||||||
RegisterAccessCallbacks callbacks;
|
emulator_->memory()->AddMappedRange(
|
||||||
callbacks.context = this;
|
0x7FEA0000,
|
||||||
callbacks.handles = (RegisterHandlesCallback)HandlesRegisterThunk;
|
0xFFFF0000,
|
||||||
callbacks.read = (RegisterReadCallback)ReadRegisterThunk;
|
0x0000FFFF,
|
||||||
callbacks.write = (RegisterWriteCallback)WriteRegisterThunk;
|
this,
|
||||||
emulator_->processor()->AddRegisterAccessCallbacks(callbacks);
|
reinterpret_cast<MMIOReadCallback>(MMIOReadRegisterThunk),
|
||||||
|
reinterpret_cast<MMIOWriteCallback>(MMIOWriteRegisterThunk));
|
||||||
|
|
||||||
// Setup worker thread state. This lets us make calls into guest code.
|
// Setup worker thread state. This lets us make calls into guest code.
|
||||||
thread_state_ = new XenonThreadState(
|
thread_state_ = new XenonThreadState(
|
||||||
|
@ -181,10 +182,6 @@ void AudioSystem::UnregisterClient(size_t index) {
|
||||||
xe_mutex_unlock(lock_);
|
xe_mutex_unlock(lock_);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioSystem::HandlesRegister(uint64_t addr) {
|
|
||||||
return (addr & 0xFFFF0000) == 0x7FEA0000;
|
|
||||||
}
|
|
||||||
|
|
||||||
// free60 may be useful here, however it looks like it's using a different
|
// free60 may be useful here, however it looks like it's using a different
|
||||||
// piece of hardware:
|
// piece of hardware:
|
||||||
// https://github.com/Free60Project/libxenon/blob/master/libxenon/drivers/xenon_sound/sound.c
|
// https://github.com/Free60Project/libxenon/blob/master/libxenon/drivers/xenon_sound/sound.c
|
||||||
|
|
|
@ -42,7 +42,6 @@ public:
|
||||||
virtual X_STATUS CreateDriver(size_t index, HANDLE wait_handle, AudioDriver** out_driver) = 0;
|
virtual X_STATUS CreateDriver(size_t index, HANDLE wait_handle, AudioDriver** out_driver) = 0;
|
||||||
virtual void DestroyDriver(AudioDriver* driver) = 0;
|
virtual void DestroyDriver(AudioDriver* driver) = 0;
|
||||||
|
|
||||||
bool HandlesRegister(uint64_t addr);
|
|
||||||
virtual uint64_t ReadRegister(uint64_t addr);
|
virtual uint64_t ReadRegister(uint64_t addr);
|
||||||
virtual void WriteRegister(uint64_t addr, uint64_t value);
|
virtual void WriteRegister(uint64_t addr, uint64_t value);
|
||||||
|
|
||||||
|
@ -55,13 +54,10 @@ private:
|
||||||
}
|
}
|
||||||
void ThreadStart();
|
void ThreadStart();
|
||||||
|
|
||||||
static bool HandlesRegisterThunk(AudioSystem* as, uint64_t addr) {
|
static uint64_t MMIOReadRegisterThunk(AudioSystem* as, uint64_t addr) {
|
||||||
return as->HandlesRegister(addr);
|
|
||||||
}
|
|
||||||
static uint64_t ReadRegisterThunk(AudioSystem* as, uint64_t addr) {
|
|
||||||
return as->ReadRegister(addr);
|
return as->ReadRegister(addr);
|
||||||
}
|
}
|
||||||
static void WriteRegisterThunk(AudioSystem* as, uint64_t addr,
|
static void MMIOWriteRegisterThunk(AudioSystem* as, uint64_t addr,
|
||||||
uint64_t value) {
|
uint64_t value) {
|
||||||
as->WriteRegister(addr, value);
|
as->WriteRegister(addr, value);
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,11 +141,6 @@ int Processor::Setup() {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Processor::AddRegisterAccessCallbacks(
|
|
||||||
xe::cpu::RegisterAccessCallbacks callbacks) {
|
|
||||||
runtime_->AddRegisterAccessCallbacks(callbacks);
|
|
||||||
}
|
|
||||||
|
|
||||||
int Processor::Execute(XenonThreadState* thread_state, uint64_t address) {
|
int Processor::Execute(XenonThreadState* thread_state, uint64_t address) {
|
||||||
SCOPE_profile_cpu_f("cpu");
|
SCOPE_profile_cpu_f("cpu");
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
#ifndef XENIA_CPU_PROCESSOR_H_
|
#ifndef XENIA_CPU_PROCESSOR_H_
|
||||||
#define XENIA_CPU_PROCESSOR_H_
|
#define XENIA_CPU_PROCESSOR_H_
|
||||||
|
|
||||||
#include <alloy/runtime/register_access.h>
|
|
||||||
#include <xenia/core.h>
|
#include <xenia/core.h>
|
||||||
#include <xenia/debug/debug_target.h>
|
#include <xenia/debug/debug_target.h>
|
||||||
|
|
||||||
|
@ -28,11 +27,6 @@ XEDECLARECLASS2(xe, cpu, XexModule);
|
||||||
namespace xe {
|
namespace xe {
|
||||||
namespace cpu {
|
namespace cpu {
|
||||||
|
|
||||||
using RegisterAccessCallbacks = alloy::runtime::RegisterAccessCallbacks;
|
|
||||||
using RegisterHandlesCallback = alloy::runtime::RegisterHandlesCallback;
|
|
||||||
using RegisterReadCallback = alloy::runtime::RegisterReadCallback;
|
|
||||||
using RegisterWriteCallback = alloy::runtime::RegisterWriteCallback;
|
|
||||||
|
|
||||||
|
|
||||||
class Processor : public debug::DebugTarget {
|
class Processor : public debug::DebugTarget {
|
||||||
public:
|
public:
|
||||||
|
@ -45,8 +39,6 @@ public:
|
||||||
|
|
||||||
int Setup();
|
int Setup();
|
||||||
|
|
||||||
void AddRegisterAccessCallbacks(RegisterAccessCallbacks callbacks);
|
|
||||||
|
|
||||||
int Execute(
|
int Execute(
|
||||||
XenonThreadState* thread_state, uint64_t address);
|
XenonThreadState* thread_state, uint64_t address);
|
||||||
uint64_t Execute(
|
uint64_t Execute(
|
||||||
|
|
|
@ -119,6 +119,111 @@ private:
|
||||||
};
|
};
|
||||||
uint32_t XenonMemoryHeap::next_heap_id_ = 1;
|
uint32_t XenonMemoryHeap::next_heap_id_ = 1;
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
namespace BE {
|
||||||
|
#include <beaengine/BeaEngine.h>
|
||||||
|
}
|
||||||
|
|
||||||
|
struct MMIORange {
|
||||||
|
uint64_t address;
|
||||||
|
uint64_t mask;
|
||||||
|
uint64_t size;
|
||||||
|
void* context;
|
||||||
|
MMIOReadCallback read;
|
||||||
|
MMIOWriteCallback write;
|
||||||
|
};
|
||||||
|
MMIORange g_mapped_ranges_[16] = { 0 };
|
||||||
|
int g_mapped_range_count_ = 0;
|
||||||
|
|
||||||
|
uint64_t* GetContextRegPtr(BE::Int32 arg_type, PCONTEXT context) {
|
||||||
|
DWORD index = 0;
|
||||||
|
_BitScanForward(&index, arg_type);
|
||||||
|
return &context->Rax + index;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles potential accesses to mmio. We look for access violations to
|
||||||
|
// addresses in our range and call into the registered handlers, if any.
|
||||||
|
// If there are none, we continue.
|
||||||
|
LONG CALLBACK CheckMMIOHandler(PEXCEPTION_POINTERS ex_info) {
|
||||||
|
// http://msdn.microsoft.com/en-us/library/ms679331(v=vs.85).aspx
|
||||||
|
// http://msdn.microsoft.com/en-us/library/aa363082(v=vs.85).aspx
|
||||||
|
auto code = ex_info->ExceptionRecord->ExceptionCode;
|
||||||
|
if (code == STATUS_ACCESS_VIOLATION) {
|
||||||
|
// Access violations are pretty rare, so we can do a linear search here.
|
||||||
|
auto address = ex_info->ExceptionRecord->ExceptionInformation[1];
|
||||||
|
for (int i = 0; i < g_mapped_range_count_; ++i) {
|
||||||
|
const auto& range = g_mapped_ranges_[i];
|
||||||
|
if ((address & range.mask) == range.address) {
|
||||||
|
// Within our range.
|
||||||
|
|
||||||
|
// TODO(benvanik): replace with simple check of mov (that's all
|
||||||
|
// we care about).
|
||||||
|
BE::DISASM disasm = { 0 };
|
||||||
|
disasm.Archi = 64;
|
||||||
|
disasm.Options = BE::MasmSyntax + BE::PrefixedNumeral;
|
||||||
|
disasm.EIP = (BE::UIntPtr)ex_info->ExceptionRecord->ExceptionAddress;
|
||||||
|
BE::UIntPtr eip_end = disasm.EIP + 20;
|
||||||
|
size_t len = BE::Disasm(&disasm);
|
||||||
|
if (len == BE::UNKNOWN_OPCODE) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto action = ex_info->ExceptionRecord->ExceptionInformation[0];
|
||||||
|
if (action == 0) {
|
||||||
|
uint64_t value = range.read(range.context, address & 0xFFFFFFFF);
|
||||||
|
XEASSERT((disasm.Argument1.ArgType & BE::REGISTER_TYPE) ==
|
||||||
|
BE::REGISTER_TYPE);
|
||||||
|
uint64_t* reg_ptr = GetContextRegPtr(disasm.Argument1.ArgType,
|
||||||
|
ex_info->ContextRecord);
|
||||||
|
switch (disasm.Argument1.ArgSize) {
|
||||||
|
case 8:
|
||||||
|
*reg_ptr = static_cast<uint8_t>(value);
|
||||||
|
break;
|
||||||
|
case 16:
|
||||||
|
*reg_ptr = XESWAP16(static_cast<uint16_t>(value));
|
||||||
|
break;
|
||||||
|
case 32:
|
||||||
|
*reg_ptr = XESWAP32(static_cast<uint32_t>(value));
|
||||||
|
break;
|
||||||
|
case 64:
|
||||||
|
*reg_ptr = XESWAP64(static_cast<uint64_t>(value));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ex_info->ContextRecord->Rip += len;
|
||||||
|
return EXCEPTION_CONTINUE_EXECUTION;
|
||||||
|
} else if (action == 1) {
|
||||||
|
XEASSERT((disasm.Argument2.ArgType & BE::REGISTER_TYPE) ==
|
||||||
|
BE::REGISTER_TYPE);
|
||||||
|
uint64_t* reg_ptr = GetContextRegPtr(disasm.Argument2.ArgType,
|
||||||
|
ex_info->ContextRecord);
|
||||||
|
uint64_t value = *reg_ptr;
|
||||||
|
switch (disasm.Argument2.ArgSize) {
|
||||||
|
case 8:
|
||||||
|
value = static_cast<uint8_t>(value);
|
||||||
|
break;
|
||||||
|
case 16:
|
||||||
|
value = XESWAP16(static_cast<uint16_t>(value));
|
||||||
|
break;
|
||||||
|
case 32:
|
||||||
|
value = XESWAP32(static_cast<uint32_t>(value));
|
||||||
|
break;
|
||||||
|
case 64:
|
||||||
|
value = XESWAP64(static_cast<uint64_t>(value));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
range.write(range.context, address & 0xFFFFFFFF, value);
|
||||||
|
ex_info->ContextRecord->Rip += len;
|
||||||
|
return EXCEPTION_CONTINUE_EXECUTION;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return EXCEPTION_CONTINUE_SEARCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
|
||||||
XenonMemory::XenonMemory() :
|
XenonMemory::XenonMemory() :
|
||||||
mapping_(0), mapping_base_(0),
|
mapping_(0), mapping_base_(0),
|
||||||
|
@ -204,6 +309,15 @@ int XenonMemory::Initialize() {
|
||||||
0x00100000,
|
0x00100000,
|
||||||
MEM_COMMIT, PAGE_READWRITE);
|
MEM_COMMIT, PAGE_READWRITE);
|
||||||
|
|
||||||
|
// Add handlers for MMIO.
|
||||||
|
// If there is a debugger attached the normal exception handler will not
|
||||||
|
// fire and we must instead add the continue handler.
|
||||||
|
AddVectoredExceptionHandler(1, CheckMMIOHandler);
|
||||||
|
if (IsDebuggerPresent()) {
|
||||||
|
// TODO(benvanik): is this really required?
|
||||||
|
//AddVectoredContinueHandler(1, CheckMMIOHandler);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
XECLEANUP:
|
XECLEANUP:
|
||||||
|
@ -248,6 +362,112 @@ void XenonMemory::UnmapViews() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool XenonMemory::AddMappedRange(uint64_t address, uint64_t mask,
|
||||||
|
uint64_t size, void* context,
|
||||||
|
MMIOReadCallback read_callback,
|
||||||
|
MMIOWriteCallback write_callback) {
|
||||||
|
DWORD protect = 0;
|
||||||
|
if (read_callback && write_callback) {
|
||||||
|
protect = PAGE_NOACCESS;
|
||||||
|
} else if (write_callback) {
|
||||||
|
protect = PAGE_READONLY;
|
||||||
|
} else {
|
||||||
|
// Write-only memory is not supported.
|
||||||
|
XEASSERTALWAYS();
|
||||||
|
}
|
||||||
|
if (!VirtualAlloc(Translate(address),
|
||||||
|
size,
|
||||||
|
MEM_COMMIT, protect)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
XEASSERT(g_mapped_range_count_ + 1 < XECOUNT(g_mapped_ranges_));
|
||||||
|
g_mapped_ranges_[g_mapped_range_count_++] = {
|
||||||
|
reinterpret_cast<uint64_t>(mapping_base_) | address,
|
||||||
|
0xFFFFFFFF00000000 | mask,
|
||||||
|
size, context,
|
||||||
|
read_callback, write_callback,
|
||||||
|
};
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool XenonMemory::CheckMMIOLoad(uint64_t address, uint64_t* out_value) {
|
||||||
|
for (int i = 0; i < g_mapped_range_count_; ++i) {
|
||||||
|
const auto& range = g_mapped_ranges_[i];
|
||||||
|
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
|
||||||
|
*out_value = static_cast<uint32_t>(range.read(range.context, address));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint8_t XenonMemory::LoadI8(uint64_t address) {
|
||||||
|
uint64_t value;
|
||||||
|
if (!CheckMMIOLoad(address, &value)) {
|
||||||
|
value = *reinterpret_cast<uint8_t*>(Translate(address));
|
||||||
|
}
|
||||||
|
return static_cast<uint8_t>(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint16_t XenonMemory::LoadI16(uint64_t address) {
|
||||||
|
uint64_t value;
|
||||||
|
if (!CheckMMIOLoad(address, &value)) {
|
||||||
|
value = *reinterpret_cast<uint16_t*>(Translate(address));
|
||||||
|
}
|
||||||
|
return static_cast<uint16_t>(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t XenonMemory::LoadI32(uint64_t address) {
|
||||||
|
uint64_t value;
|
||||||
|
if (!CheckMMIOLoad(address, &value)) {
|
||||||
|
value = *reinterpret_cast<uint32_t*>(Translate(address));
|
||||||
|
}
|
||||||
|
return static_cast<uint32_t>(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t XenonMemory::LoadI64(uint64_t address) {
|
||||||
|
uint64_t value;
|
||||||
|
if (!CheckMMIOLoad(address, &value)) {
|
||||||
|
value = *reinterpret_cast<uint64_t*>(Translate(address));
|
||||||
|
}
|
||||||
|
return static_cast<uint64_t>(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool XenonMemory::CheckMMIOStore(uint64_t address, uint64_t value) {
|
||||||
|
for (int i = 0; i < g_mapped_range_count_; ++i) {
|
||||||
|
const auto& range = g_mapped_ranges_[i];
|
||||||
|
if (((address | (uint64_t)mapping_base_) & range.mask) == range.address) {
|
||||||
|
range.write(range.context, address, value);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void XenonMemory::StoreI8(uint64_t address, uint8_t value) {
|
||||||
|
if (!CheckMMIOStore(address, value)) {
|
||||||
|
*reinterpret_cast<uint8_t*>(Translate(address)) = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void XenonMemory::StoreI16(uint64_t address, uint16_t value) {
|
||||||
|
if (!CheckMMIOStore(address, value)) {
|
||||||
|
*reinterpret_cast<uint16_t*>(Translate(address)) = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void XenonMemory::StoreI32(uint64_t address, uint32_t value) {
|
||||||
|
if (!CheckMMIOStore(address, value)) {
|
||||||
|
*reinterpret_cast<uint32_t*>(Translate(address)) = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void XenonMemory::StoreI64(uint64_t address, uint64_t value) {
|
||||||
|
if (!CheckMMIOStore(address, value)) {
|
||||||
|
*reinterpret_cast<uint64_t*>(Translate(address)) = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
uint64_t XenonMemory::HeapAlloc(
|
uint64_t XenonMemory::HeapAlloc(
|
||||||
uint64_t base_address, size_t size, uint32_t flags,
|
uint64_t base_address, size_t size, uint32_t flags,
|
||||||
uint32_t alignment) {
|
uint32_t alignment) {
|
||||||
|
|
|
@ -15,33 +15,56 @@
|
||||||
#include <xenia/core.h>
|
#include <xenia/core.h>
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct xe_ppc_state xe_ppc_state_t;
|
||||||
|
|
||||||
namespace xe {
|
namespace xe {
|
||||||
namespace cpu {
|
namespace cpu {
|
||||||
|
|
||||||
class XenonMemoryHeap;
|
class XenonMemoryHeap;
|
||||||
|
|
||||||
|
typedef uint64_t (*MMIOReadCallback)(void* context, uint64_t addr);
|
||||||
|
typedef void (*MMIOWriteCallback)(void* context, uint64_t addr,
|
||||||
|
uint64_t value);
|
||||||
|
|
||||||
class XenonMemory : public alloy::Memory {
|
class XenonMemory : public alloy::Memory {
|
||||||
public:
|
public:
|
||||||
XenonMemory();
|
XenonMemory();
|
||||||
virtual ~XenonMemory();
|
virtual ~XenonMemory();
|
||||||
|
|
||||||
virtual int Initialize();
|
int Initialize() override;
|
||||||
|
|
||||||
virtual uint64_t HeapAlloc(
|
bool AddMappedRange(uint64_t address, uint64_t mask,
|
||||||
|
uint64_t size,
|
||||||
|
void* context,
|
||||||
|
MMIOReadCallback read_callback = nullptr,
|
||||||
|
MMIOWriteCallback write_callback = nullptr);
|
||||||
|
|
||||||
|
uint8_t LoadI8(uint64_t address) override;
|
||||||
|
uint16_t LoadI16(uint64_t address) override;
|
||||||
|
uint32_t LoadI32(uint64_t address) override;
|
||||||
|
uint64_t LoadI64(uint64_t address) override;
|
||||||
|
void StoreI8(uint64_t address, uint8_t value) override;
|
||||||
|
void StoreI16(uint64_t address, uint16_t value) override;
|
||||||
|
void StoreI32(uint64_t address, uint32_t value) override;
|
||||||
|
void StoreI64(uint64_t address, uint64_t value) override;
|
||||||
|
|
||||||
|
uint64_t HeapAlloc(
|
||||||
uint64_t base_address, size_t size, uint32_t flags,
|
uint64_t base_address, size_t size, uint32_t flags,
|
||||||
uint32_t alignment = 0x20);
|
uint32_t alignment = 0x20) override;
|
||||||
virtual int HeapFree(uint64_t address, size_t size);
|
int HeapFree(uint64_t address, size_t size) override;
|
||||||
|
|
||||||
virtual size_t QuerySize(uint64_t base_address);
|
size_t QuerySize(uint64_t base_address) override;
|
||||||
|
|
||||||
virtual int Protect(uint64_t address, size_t size, uint32_t access);
|
int Protect(uint64_t address, size_t size, uint32_t access) override;
|
||||||
virtual uint32_t QueryProtect(uint64_t address);
|
uint32_t QueryProtect(uint64_t address) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int MapViews(uint8_t* mapping_base);
|
int MapViews(uint8_t* mapping_base);
|
||||||
void UnmapViews();
|
void UnmapViews();
|
||||||
|
|
||||||
|
bool CheckMMIOLoad(uint64_t address, uint64_t* out_value);
|
||||||
|
bool CheckMMIOStore(uint64_t address, uint64_t value);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HANDLE mapping_;
|
HANDLE mapping_;
|
||||||
uint8_t* mapping_base_;
|
uint8_t* mapping_base_;
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <xenia/common.h>
|
#include <xenia/common.h>
|
||||||
#include <xenia/core.h>
|
#include <xenia/core.h>
|
||||||
#include <xenia/xbox.h>
|
#include <xenia/xbox.h>
|
||||||
|
#include <xenia/cpu/xenon_memory.h>
|
||||||
|
|
||||||
|
|
||||||
XEDECLARECLASS1(xe, ExportResolver);
|
XEDECLARECLASS1(xe, ExportResolver);
|
||||||
|
@ -41,7 +42,7 @@ public:
|
||||||
ui::Window* main_window() const { return main_window_; }
|
ui::Window* main_window() const { return main_window_; }
|
||||||
void set_main_window(ui::Window* window);
|
void set_main_window(ui::Window* window);
|
||||||
|
|
||||||
Memory* memory() const { return memory_; }
|
cpu::XenonMemory* memory() const { return memory_; }
|
||||||
|
|
||||||
debug::DebugServer* debug_server() const { return debug_server_; }
|
debug::DebugServer* debug_server() const { return debug_server_; }
|
||||||
|
|
||||||
|
@ -68,7 +69,7 @@ private:
|
||||||
|
|
||||||
ui::Window* main_window_;
|
ui::Window* main_window_;
|
||||||
|
|
||||||
Memory* memory_;
|
cpu::XenonMemory* memory_;
|
||||||
|
|
||||||
debug::DebugServer* debug_server_;
|
debug::DebugServer* debug_server_;
|
||||||
|
|
||||||
|
|
|
@ -45,12 +45,13 @@ X_STATUS GraphicsSystem::Setup() {
|
||||||
worker_ = new RingBufferWorker(this, memory_);
|
worker_ = new RingBufferWorker(this, memory_);
|
||||||
|
|
||||||
// Let the processor know we want register access callbacks.
|
// Let the processor know we want register access callbacks.
|
||||||
RegisterAccessCallbacks callbacks;
|
emulator_->memory()->AddMappedRange(
|
||||||
callbacks.context = this;
|
0x7FC80000,
|
||||||
callbacks.handles = (RegisterHandlesCallback)HandlesRegisterThunk;
|
0xFFFF0000,
|
||||||
callbacks.read = (RegisterReadCallback)ReadRegisterThunk;
|
0x0000FFFF,
|
||||||
callbacks.write = (RegisterWriteCallback)WriteRegisterThunk;
|
this,
|
||||||
emulator_->processor()->AddRegisterAccessCallbacks(callbacks);
|
reinterpret_cast<MMIOReadCallback>(MMIOReadRegisterThunk),
|
||||||
|
reinterpret_cast<MMIOWriteCallback>(MMIOWriteRegisterThunk));
|
||||||
|
|
||||||
// Create worker thread.
|
// Create worker thread.
|
||||||
// This will initialize the graphics system.
|
// This will initialize the graphics system.
|
||||||
|
@ -132,10 +133,6 @@ void GraphicsSystem::EnableReadPointerWriteBack(uint32_t ptr,
|
||||||
worker_->EnableReadPointerWriteBack(ptr, block_size);
|
worker_->EnableReadPointerWriteBack(ptr, block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GraphicsSystem::HandlesRegister(uint64_t addr) {
|
|
||||||
return (addr & 0xFFFF0000) == 0x7FC80000;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t GraphicsSystem::ReadRegister(uint64_t addr) {
|
uint64_t GraphicsSystem::ReadRegister(uint64_t addr) {
|
||||||
uint32_t r = addr & 0xFFFF;
|
uint32_t r = addr & 0xFFFF;
|
||||||
XELOGGPU("ReadRegister(%.4X)", r);
|
XELOGGPU("ReadRegister(%.4X)", r);
|
||||||
|
|
|
@ -40,7 +40,6 @@ public:
|
||||||
void InitializeRingBuffer(uint32_t ptr, uint32_t page_count);
|
void InitializeRingBuffer(uint32_t ptr, uint32_t page_count);
|
||||||
void EnableReadPointerWriteBack(uint32_t ptr, uint32_t block_size);
|
void EnableReadPointerWriteBack(uint32_t ptr, uint32_t block_size);
|
||||||
|
|
||||||
bool HandlesRegister(uint64_t addr);
|
|
||||||
virtual uint64_t ReadRegister(uint64_t addr);
|
virtual uint64_t ReadRegister(uint64_t addr);
|
||||||
virtual void WriteRegister(uint64_t addr, uint64_t value);
|
virtual void WriteRegister(uint64_t addr, uint64_t value);
|
||||||
|
|
||||||
|
@ -59,13 +58,10 @@ private:
|
||||||
}
|
}
|
||||||
void ThreadStart();
|
void ThreadStart();
|
||||||
|
|
||||||
static bool HandlesRegisterThunk(GraphicsSystem* gs, uint64_t addr) {
|
static uint64_t MMIOReadRegisterThunk(GraphicsSystem* gs, uint64_t addr) {
|
||||||
return gs->HandlesRegister(addr);
|
|
||||||
}
|
|
||||||
static uint64_t ReadRegisterThunk(GraphicsSystem* gs, uint64_t addr) {
|
|
||||||
return gs->ReadRegister(addr);
|
return gs->ReadRegister(addr);
|
||||||
}
|
}
|
||||||
static void WriteRegisterThunk(GraphicsSystem* gs, uint64_t addr,
|
static void MMIOWriteRegisterThunk(GraphicsSystem* gs, uint64_t addr,
|
||||||
uint64_t value) {
|
uint64_t value) {
|
||||||
gs->WriteRegister(addr, value);
|
gs->WriteRegister(addr, value);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue