[CPU, Memory] 0xE0000000 adjustment by @elad335 and mapping
This commit is contained in:
parent
c021c7741d
commit
003c02c640
|
@ -12,6 +12,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "xenia/base/memory.h"
|
||||||
#include "xenia/cpu/backend/x64/x64_op.h"
|
#include "xenia/cpu/backend/x64/x64_op.h"
|
||||||
#include "xenia/cpu/backend/x64/x64_tracers.h"
|
#include "xenia/cpu/backend/x64/x64_tracers.h"
|
||||||
|
|
||||||
|
@ -38,14 +39,29 @@ RegExp ComputeMemoryAddressOffset(X64Emitter& e, const T& guest,
|
||||||
address += offset_const;
|
address += offset_const;
|
||||||
if (address < 0x80000000) {
|
if (address < 0x80000000) {
|
||||||
return e.GetMembaseReg() + address;
|
return e.GetMembaseReg() + address;
|
||||||
|
} else {
|
||||||
|
if (address >= 0xE0000000 &&
|
||||||
|
xe::memory::allocation_granularity() > 0x1000) {
|
||||||
|
e.mov(e.eax, address + 0x1000);
|
||||||
} else {
|
} else {
|
||||||
e.mov(e.eax, address);
|
e.mov(e.eax, address);
|
||||||
|
}
|
||||||
return e.GetMembaseReg() + e.rax;
|
return e.GetMembaseReg() + e.rax;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (xe::memory::allocation_granularity() > 0x1000) {
|
||||||
|
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
|
||||||
|
// it via memory mapping.
|
||||||
|
e.cmp(guest.reg().cvt32(), 0xE0000000 - offset_const);
|
||||||
|
e.setae(e.al);
|
||||||
|
e.movzx(e.eax, e.al);
|
||||||
|
e.shl(e.eax, 12);
|
||||||
|
e.add(e.eax, guest.reg().cvt32());
|
||||||
} else {
|
} else {
|
||||||
// Clear the top 32 bits, as they are likely garbage.
|
// Clear the top 32 bits, as they are likely garbage.
|
||||||
// TODO(benvanik): find a way to avoid doing this.
|
// TODO(benvanik): find a way to avoid doing this.
|
||||||
e.mov(e.eax, guest.reg().cvt32());
|
e.mov(e.eax, guest.reg().cvt32());
|
||||||
|
}
|
||||||
return e.GetMembaseReg() + e.rax + offset_const;
|
return e.GetMembaseReg() + e.rax + offset_const;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,14 +76,29 @@ RegExp ComputeMemoryAddress(X64Emitter& e, const T& guest) {
|
||||||
uint32_t address = static_cast<uint32_t>(guest.constant());
|
uint32_t address = static_cast<uint32_t>(guest.constant());
|
||||||
if (address < 0x80000000) {
|
if (address < 0x80000000) {
|
||||||
return e.GetMembaseReg() + address;
|
return e.GetMembaseReg() + address;
|
||||||
|
} else {
|
||||||
|
if (address >= 0xE0000000 &&
|
||||||
|
xe::memory::allocation_granularity() > 0x1000) {
|
||||||
|
e.mov(e.eax, address + 0x1000);
|
||||||
} else {
|
} else {
|
||||||
e.mov(e.eax, address);
|
e.mov(e.eax, address);
|
||||||
|
}
|
||||||
return e.GetMembaseReg() + e.rax;
|
return e.GetMembaseReg() + e.rax;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (xe::memory::allocation_granularity() > 0x1000) {
|
||||||
|
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
|
||||||
|
// it via memory mapping.
|
||||||
|
e.cmp(guest.reg().cvt32(), 0xE0000000);
|
||||||
|
e.setae(e.al);
|
||||||
|
e.movzx(e.eax, e.al);
|
||||||
|
e.shl(e.eax, 12);
|
||||||
|
e.add(e.eax, guest.reg().cvt32());
|
||||||
} else {
|
} else {
|
||||||
// Clear the top 32 bits, as they are likely garbage.
|
// Clear the top 32 bits, as they are likely garbage.
|
||||||
// TODO(benvanik): find a way to avoid doing this.
|
// TODO(benvanik): find a way to avoid doing this.
|
||||||
e.mov(e.eax, guest.reg().cvt32());
|
e.mov(e.eax, guest.reg().cvt32());
|
||||||
|
}
|
||||||
return e.GetMembaseReg() + e.rax;
|
return e.GetMembaseReg() + e.rax;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -142,7 +173,17 @@ struct ATOMIC_COMPARE_EXCHANGE_I32
|
||||||
I<OPCODE_ATOMIC_COMPARE_EXCHANGE, I8Op, I64Op, I32Op, I32Op>> {
|
I<OPCODE_ATOMIC_COMPARE_EXCHANGE, I8Op, I64Op, I32Op, I32Op>> {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
e.mov(e.eax, i.src2);
|
e.mov(e.eax, i.src2);
|
||||||
|
if (xe::memory::allocation_granularity() > 0x1000) {
|
||||||
|
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
|
||||||
|
// it via memory mapping.
|
||||||
|
e.cmp(i.src1.reg().cvt32(), 0xE0000000);
|
||||||
|
e.setae(e.cl);
|
||||||
|
e.movzx(e.ecx, e.cl);
|
||||||
|
e.shl(e.ecx, 12);
|
||||||
|
e.add(e.ecx, i.src1.reg().cvt32());
|
||||||
|
} else {
|
||||||
e.mov(e.ecx, i.src1.reg().cvt32());
|
e.mov(e.ecx, i.src1.reg().cvt32());
|
||||||
|
}
|
||||||
e.lock();
|
e.lock();
|
||||||
e.cmpxchg(e.dword[e.GetMembaseReg() + e.rcx], i.src3);
|
e.cmpxchg(e.dword[e.GetMembaseReg() + e.rcx], i.src3);
|
||||||
e.sete(i.dest);
|
e.sete(i.dest);
|
||||||
|
@ -153,7 +194,17 @@ struct ATOMIC_COMPARE_EXCHANGE_I64
|
||||||
I<OPCODE_ATOMIC_COMPARE_EXCHANGE, I8Op, I64Op, I64Op, I64Op>> {
|
I<OPCODE_ATOMIC_COMPARE_EXCHANGE, I8Op, I64Op, I64Op, I64Op>> {
|
||||||
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
static void Emit(X64Emitter& e, const EmitArgType& i) {
|
||||||
e.mov(e.rax, i.src2);
|
e.mov(e.rax, i.src2);
|
||||||
|
if (xe::memory::allocation_granularity() > 0x1000) {
|
||||||
|
// Emulate the 4 KB physical address offset in 0xE0000000+ when can't do
|
||||||
|
// it via memory mapping.
|
||||||
|
e.cmp(i.src1.reg().cvt32(), 0xE0000000);
|
||||||
|
e.setae(e.cl);
|
||||||
|
e.movzx(e.ecx, e.cl);
|
||||||
|
e.shl(e.ecx, 12);
|
||||||
|
e.add(e.ecx, i.src1.reg().cvt32());
|
||||||
|
} else {
|
||||||
e.mov(e.ecx, i.src1.reg().cvt32());
|
e.mov(e.ecx, i.src1.reg().cvt32());
|
||||||
|
}
|
||||||
e.lock();
|
e.lock();
|
||||||
e.cmpxchg(e.qword[e.GetMembaseReg() + e.rcx], i.src3);
|
e.cmpxchg(e.qword[e.GetMembaseReg() + e.rcx], i.src3);
|
||||||
e.sete(i.dest);
|
e.sete(i.dest);
|
||||||
|
|
|
@ -168,11 +168,8 @@ bool Memory::Initialize() {
|
||||||
// Prepare physical heaps.
|
// Prepare physical heaps.
|
||||||
heaps_.physical.Initialize(this, physical_membase_, 0x00000000, 0x20000000,
|
heaps_.physical.Initialize(this, physical_membase_, 0x00000000, 0x20000000,
|
||||||
4096);
|
4096);
|
||||||
// HACK: should be 64k, but with us overlaying A and E it needs to be 4.
|
|
||||||
/*heaps_.vA0000000.Initialize(this, virtual_membase_, 0xA0000000, 0x20000000,
|
|
||||||
64 * 1024, &heaps_.physical);*/
|
|
||||||
heaps_.vA0000000.Initialize(this, virtual_membase_, 0xA0000000, 0x20000000,
|
heaps_.vA0000000.Initialize(this, virtual_membase_, 0xA0000000, 0x20000000,
|
||||||
4 * 1024, &heaps_.physical);
|
64 * 1024, &heaps_.physical);
|
||||||
heaps_.vC0000000.Initialize(this, virtual_membase_, 0xC0000000, 0x20000000,
|
heaps_.vC0000000.Initialize(this, virtual_membase_, 0xC0000000, 0x20000000,
|
||||||
16 * 1024 * 1024, &heaps_.physical);
|
16 * 1024 * 1024, &heaps_.physical);
|
||||||
heaps_.vE0000000.Initialize(this, virtual_membase_, 0xE0000000, 0x1FD00000,
|
heaps_.vE0000000.Initialize(this, virtual_membase_, 0xE0000000, 0x1FD00000,
|
||||||
|
@ -263,7 +260,7 @@ static const struct {
|
||||||
{
|
{
|
||||||
0xE0000000,
|
0xE0000000,
|
||||||
0xFFFFFFFF,
|
0xFFFFFFFF,
|
||||||
0x0000000100000000ull,
|
0x0000000100001000ull,
|
||||||
},
|
},
|
||||||
// - physical raw
|
// - physical raw
|
||||||
{
|
{
|
||||||
|
@ -274,11 +271,15 @@ static const struct {
|
||||||
};
|
};
|
||||||
int Memory::MapViews(uint8_t* mapping_base) {
|
int Memory::MapViews(uint8_t* mapping_base) {
|
||||||
assert_true(xe::countof(map_info) == xe::countof(views_.all_views));
|
assert_true(xe::countof(map_info) == xe::countof(views_.all_views));
|
||||||
|
// 0xE0000000 4 KB offset is emulated via host_address_offset and on the CPU
|
||||||
|
// side if system allocation granularity is bigger than 4 KB.
|
||||||
|
uint64_t granularity_mask = ~uint64_t(system_allocation_granularity_ - 1);
|
||||||
for (size_t n = 0; n < xe::countof(map_info); n++) {
|
for (size_t n = 0; n < xe::countof(map_info); n++) {
|
||||||
views_.all_views[n] = reinterpret_cast<uint8_t*>(xe::memory::MapFileView(
|
views_.all_views[n] = reinterpret_cast<uint8_t*>(xe::memory::MapFileView(
|
||||||
mapping_, mapping_base + map_info[n].virtual_address_start,
|
mapping_, mapping_base + map_info[n].virtual_address_start,
|
||||||
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
|
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
|
||||||
xe::memory::PageAccess::kReadWrite, map_info[n].target_address));
|
xe::memory::PageAccess::kReadWrite,
|
||||||
|
map_info[n].target_address & granularity_mask));
|
||||||
if (!views_.all_views[n]) {
|
if (!views_.all_views[n]) {
|
||||||
// Failed, so bail and try again.
|
// Failed, so bail and try again.
|
||||||
UnmapViews();
|
UnmapViews();
|
||||||
|
@ -331,8 +332,7 @@ const BaseHeap* Memory::LookupHeap(uint32_t address) const {
|
||||||
BaseHeap* Memory::LookupHeapByType(bool physical, uint32_t page_size) {
|
BaseHeap* Memory::LookupHeapByType(bool physical, uint32_t page_size) {
|
||||||
if (physical) {
|
if (physical) {
|
||||||
if (page_size <= 4096) {
|
if (page_size <= 4096) {
|
||||||
// HACK: should be vE0000000
|
return &heaps_.vE0000000;
|
||||||
return &heaps_.vA0000000;
|
|
||||||
} else if (page_size <= 64 * 1024) {
|
} else if (page_size <= 64 * 1024) {
|
||||||
return &heaps_.vA0000000;
|
return &heaps_.vA0000000;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue