[CPU, Memory] 0xE0000000 adjustment by @elad335 and mapping

This commit is contained in:
Triang3l 2019-08-14 21:37:52 +03:00
parent c021c7741d
commit 003c02c640
2 changed files with 69 additions and 18 deletions

View File

@ -12,6 +12,7 @@
#include <algorithm>
#include <cstring>
#include "xenia/base/memory.h"
#include "xenia/cpu/backend/x64/x64_op.h"
#include "xenia/cpu/backend/x64/x64_tracers.h"
@ -39,13 +40,28 @@ RegExp ComputeMemoryAddressOffset(X64Emitter& e, const T& guest,
if (address < 0x80000000) {
return e.GetMembaseReg() + address;
} else {
e.mov(e.eax, address);
if (address >= 0xE0000000 &&
xe::memory::allocation_granularity() > 0x1000) {
e.mov(e.eax, address + 0x1000);
} else {
e.mov(e.eax, address);
}
return e.GetMembaseReg() + e.rax;
}
} else {
// Clear the top 32 bits, as they are likely garbage.
// TODO(benvanik): find a way to avoid doing this.
e.mov(e.eax, guest.reg().cvt32());
if (xe::memory::allocation_granularity() > 0x1000) {
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
// it via memory mapping.
e.cmp(guest.reg().cvt32(), 0xE0000000 - offset_const);
e.setae(e.al);
e.movzx(e.eax, e.al);
e.shl(e.eax, 12);
e.add(e.eax, guest.reg().cvt32());
} else {
// Clear the top 32 bits, as they are likely garbage.
// TODO(benvanik): find a way to avoid doing this.
e.mov(e.eax, guest.reg().cvt32());
}
return e.GetMembaseReg() + e.rax + offset_const;
}
}
@ -61,13 +77,28 @@ RegExp ComputeMemoryAddress(X64Emitter& e, const T& guest) {
if (address < 0x80000000) {
return e.GetMembaseReg() + address;
} else {
e.mov(e.eax, address);
if (address >= 0xE0000000 &&
xe::memory::allocation_granularity() > 0x1000) {
e.mov(e.eax, address + 0x1000);
} else {
e.mov(e.eax, address);
}
return e.GetMembaseReg() + e.rax;
}
} else {
// Clear the top 32 bits, as they are likely garbage.
// TODO(benvanik): find a way to avoid doing this.
e.mov(e.eax, guest.reg().cvt32());
if (xe::memory::allocation_granularity() > 0x1000) {
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
// it via memory mapping.
e.cmp(guest.reg().cvt32(), 0xE0000000);
e.setae(e.al);
e.movzx(e.eax, e.al);
e.shl(e.eax, 12);
e.add(e.eax, guest.reg().cvt32());
} else {
// Clear the top 32 bits, as they are likely garbage.
// TODO(benvanik): find a way to avoid doing this.
e.mov(e.eax, guest.reg().cvt32());
}
return e.GetMembaseReg() + e.rax;
}
}
@ -142,7 +173,17 @@ struct ATOMIC_COMPARE_EXCHANGE_I32
I<OPCODE_ATOMIC_COMPARE_EXCHANGE, I8Op, I64Op, I32Op, I32Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
e.mov(e.eax, i.src2);
e.mov(e.ecx, i.src1.reg().cvt32());
if (xe::memory::allocation_granularity() > 0x1000) {
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
// it via memory mapping.
e.cmp(i.src1.reg().cvt32(), 0xE0000000);
e.setae(e.cl);
e.movzx(e.ecx, e.cl);
e.shl(e.ecx, 12);
e.add(e.ecx, i.src1.reg().cvt32());
} else {
e.mov(e.ecx, i.src1.reg().cvt32());
}
e.lock();
e.cmpxchg(e.dword[e.GetMembaseReg() + e.rcx], i.src3);
e.sete(i.dest);
@ -153,7 +194,17 @@ struct ATOMIC_COMPARE_EXCHANGE_I64
I<OPCODE_ATOMIC_COMPARE_EXCHANGE, I8Op, I64Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
e.mov(e.rax, i.src2);
e.mov(e.ecx, i.src1.reg().cvt32());
if (xe::memory::allocation_granularity() > 0x1000) {
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
// it via memory mapping.
e.cmp(i.src1.reg().cvt32(), 0xE0000000);
e.setae(e.cl);
e.movzx(e.ecx, e.cl);
e.shl(e.ecx, 12);
e.add(e.ecx, i.src1.reg().cvt32());
} else {
e.mov(e.ecx, i.src1.reg().cvt32());
}
e.lock();
e.cmpxchg(e.qword[e.GetMembaseReg() + e.rcx], i.src3);
e.sete(i.dest);

View File

@ -168,11 +168,8 @@ bool Memory::Initialize() {
// Prepare physical heaps.
heaps_.physical.Initialize(this, physical_membase_, 0x00000000, 0x20000000,
4096);
// HACK: should be 64k, but with us overlaying A and E it needs to be 4.
/*heaps_.vA0000000.Initialize(this, virtual_membase_, 0xA0000000, 0x20000000,
64 * 1024, &heaps_.physical);*/
heaps_.vA0000000.Initialize(this, virtual_membase_, 0xA0000000, 0x20000000,
4 * 1024, &heaps_.physical);
64 * 1024, &heaps_.physical);
heaps_.vC0000000.Initialize(this, virtual_membase_, 0xC0000000, 0x20000000,
16 * 1024 * 1024, &heaps_.physical);
heaps_.vE0000000.Initialize(this, virtual_membase_, 0xE0000000, 0x1FD00000,
@ -263,7 +260,7 @@ static const struct {
{
0xE0000000,
0xFFFFFFFF,
0x0000000100000000ull,
0x0000000100001000ull,
},
// - physical raw
{
@ -274,11 +271,15 @@ static const struct {
};
int Memory::MapViews(uint8_t* mapping_base) {
assert_true(xe::countof(map_info) == xe::countof(views_.all_views));
// 0xE0000000 4 KB offset is emulated via host_address_offset and on the CPU
// side if system allocation granularity is bigger than 4 KB.
uint64_t granularity_mask = ~uint64_t(system_allocation_granularity_ - 1);
for (size_t n = 0; n < xe::countof(map_info); n++) {
views_.all_views[n] = reinterpret_cast<uint8_t*>(xe::memory::MapFileView(
mapping_, mapping_base + map_info[n].virtual_address_start,
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
xe::memory::PageAccess::kReadWrite, map_info[n].target_address));
xe::memory::PageAccess::kReadWrite,
map_info[n].target_address & granularity_mask));
if (!views_.all_views[n]) {
// Failed, so bail and try again.
UnmapViews();
@ -331,8 +332,7 @@ const BaseHeap* Memory::LookupHeap(uint32_t address) const {
BaseHeap* Memory::LookupHeapByType(bool physical, uint32_t page_size) {
if (physical) {
if (page_size <= 4096) {
// HACK: should be vE0000000
return &heaps_.vA0000000;
return &heaps_.vE0000000;
} else if (page_size <= 64 * 1024) {
return &heaps_.vA0000000;
} else {