mirror of https://github.com/xemu-project/xemu.git
linux-user/i386: Emulate orig_ax
linux-user/vm86: Fix compilation with Clang tcg: remove singlestep_enabled from DisasContextBase accel/tcg: Add TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/arm: Fix alignment fault priority in get_phys_addr_lpae -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcMRU4dHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9dSQf+MUJq//oig+bDeUlQ v3uBMFVi1DBYI1Y/xVODADpn8Ltv5s9v7N+/phi+St2W65OzGNYviHvq/abeyhdo M40LGtOvjO6Mns+Z9NKTobtT8n4ap4JJyoFjuXFTHkMMDiQ/v7FkEJJoS3W2bemi zmKYF/vWe3bwI+d3+dyaUjA92gSs+Hlj8uEVBlzn3ubA19ZdvtyfKURPQynrkwlo dFtAOFRFBU6vrlJSBElxUfYO4jC4Cng19EOrWvIsuKAkACuhiHgah10i3WKw8Asz 1iRUYXe0EOlX2RYNTD+Oj5j0cViRylirgPtIhEIPBuDP7m1Jy1JO4dVARUJBBU71 Zd4Uuw== =EX+a -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu into staging linux-user/i386: Emulate orig_ax linux-user/vm86: Fix compilation with Clang tcg: remove singlestep_enabled from DisasContextBase accel/tcg: Add TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/arm: Fix alignment fault priority in get_phys_addr_lpae # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcMRU4dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9dSQf+MUJq//oig+bDeUlQ # v3uBMFVi1DBYI1Y/xVODADpn8Ltv5s9v7N+/phi+St2W65OzGNYviHvq/abeyhdo # M40LGtOvjO6Mns+Z9NKTobtT8n4ap4JJyoFjuXFTHkMMDiQ/v7FkEJJoS3W2bemi # zmKYF/vWe3bwI+d3+dyaUjA92gSs+Hlj8uEVBlzn3ubA19ZdvtyfKURPQynrkwlo # dFtAOFRFBU6vrlJSBElxUfYO4jC4Cng19EOrWvIsuKAkACuhiHgah10i3WKw8Asz # 1iRUYXe0EOlX2RYNTD+Oj5j0cViRylirgPtIhEIPBuDP7m1Jy1JO4dVARUJBBU71 # Zd4Uuw== # =EX+a # -----END PGP SIGNATURE----- # gpg: Signature made Sun 13 Oct 2024 23:10:22 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20241013' of https://gitlab.com/rth7680/qemu: (27 commits) target/arm: Fix alignment fault priority in get_phys_addr_lpae target/arm: Implement TCGCPUOps.tlb_fill_align target/arm: Move device detection earlier in get_phys_addr_lpae target/arm: Pass MemOp to get_phys_addr_lpae target/arm: Pass MemOp through get_phys_addr_twostage target/arm: Pass MemOp to get_phys_addr_nogpc target/arm: Pass MemOp to get_phys_addr_gpc target/arm: Pass MemOp to get_phys_addr_with_space_nogpc target/arm: Pass MemOp to get_phys_addr target/hppa: Implement TCGCPUOps.tlb_fill_align target/hppa: Handle alignment faults in hppa_get_physical_address target/hppa: Fix priority of T, D, and B page faults target/hppa: Perform access rights before protection id check target/hppa: Add MemOp argument to hppa_get_physical_address accel/tcg: Use the alignment test in tlb_fill_align accel/tcg: Add TCGCPUOps.tlb_fill_align include/exec/memop: Introduce memop_atomicity_bits include/exec/memop: Rename get_alignment_bits include/exec/memop: Move get_alignment_bits from tcg.h accel/tcg: Assert noreturn from write-only page for atomics ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
3860a2a8de
|
@ -1221,22 +1221,35 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
|
* Note: tlb_fill_align() can trigger a resize of the TLB.
|
||||||
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
|
* This means that all of the caller's prior references to the TLB table
|
||||||
* be discarded and looked up again (e.g. via tlb_entry()).
|
* (e.g. CPUTLBEntry pointers) must be discarded and looked up again
|
||||||
|
* (e.g. via tlb_entry()).
|
||||||
*/
|
*/
|
||||||
static void tlb_fill(CPUState *cpu, vaddr addr, int size,
|
static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
|
||||||
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
int mmu_idx, MemOp memop, int size,
|
||||||
|
bool probe, uintptr_t ra)
|
||||||
{
|
{
|
||||||
bool ok;
|
const TCGCPUOps *ops = cpu->cc->tcg_ops;
|
||||||
|
CPUTLBEntryFull full;
|
||||||
|
|
||||||
/*
|
if (ops->tlb_fill_align) {
|
||||||
* This is not a probe, so only valid return is success; failure
|
if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
|
||||||
* should result in exception + longjmp to the cpu loop.
|
memop, size, probe, ra)) {
|
||||||
*/
|
tlb_set_page_full(cpu, mmu_idx, addr, &full);
|
||||||
ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
|
return true;
|
||||||
access_type, mmu_idx, false, retaddr);
|
}
|
||||||
assert(ok);
|
} else {
|
||||||
|
/* Legacy behaviour is alignment before paging. */
|
||||||
|
if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
|
||||||
|
ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
|
||||||
|
}
|
||||||
|
if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(probe);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
||||||
|
@ -1351,22 +1364,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
|
||||||
|
|
||||||
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
||||||
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
|
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
|
||||||
if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
|
if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
|
||||||
mmu_idx, nonfault, retaddr)) {
|
0, fault_size, nonfault, retaddr)) {
|
||||||
/* Non-faulting page table read failed. */
|
/* Non-faulting page table read failed. */
|
||||||
*phost = NULL;
|
*phost = NULL;
|
||||||
*pfull = NULL;
|
*pfull = NULL;
|
||||||
return TLB_INVALID_MASK;
|
return TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TLB resize via tlb_fill may have moved the entry. */
|
/* TLB resize via tlb_fill_align may have moved the entry. */
|
||||||
index = tlb_index(cpu, mmu_idx, addr);
|
index = tlb_index(cpu, mmu_idx, addr);
|
||||||
entry = tlb_entry(cpu, mmu_idx, addr);
|
entry = tlb_entry(cpu, mmu_idx, addr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
|
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
|
||||||
* to force the next access through tlb_fill. We've just
|
* to force the next access through tlb_fill_align. We've just
|
||||||
* called tlb_fill, so we know that this entry *is* valid.
|
* called tlb_fill_align, so we know that this entry *is* valid.
|
||||||
*/
|
*/
|
||||||
flags &= ~TLB_INVALID_MASK;
|
flags &= ~TLB_INVALID_MASK;
|
||||||
}
|
}
|
||||||
|
@ -1607,16 +1620,17 @@ typedef struct MMULookupLocals {
|
||||||
* mmu_lookup1: translate one page
|
* mmu_lookup1: translate one page
|
||||||
* @cpu: generic cpu state
|
* @cpu: generic cpu state
|
||||||
* @data: lookup parameters
|
* @data: lookup parameters
|
||||||
|
* @memop: memory operation for the access, or 0
|
||||||
* @mmu_idx: virtual address context
|
* @mmu_idx: virtual address context
|
||||||
* @access_type: load/store/code
|
* @access_type: load/store/code
|
||||||
* @ra: return address into tcg generated code, or 0
|
* @ra: return address into tcg generated code, or 0
|
||||||
*
|
*
|
||||||
* Resolve the translation for the one page at @data.addr, filling in
|
* Resolve the translation for the one page at @data.addr, filling in
|
||||||
* the rest of @data with the results. If the translation fails,
|
* the rest of @data with the results. If the translation fails,
|
||||||
* tlb_fill will longjmp out. Return true if the softmmu tlb for
|
* tlb_fill_align will longjmp out. Return true if the softmmu tlb for
|
||||||
* @mmu_idx may have resized.
|
* @mmu_idx may have resized.
|
||||||
*/
|
*/
|
||||||
static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
|
static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
|
||||||
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
|
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
|
||||||
{
|
{
|
||||||
vaddr addr = data->addr;
|
vaddr addr = data->addr;
|
||||||
|
@ -1631,7 +1645,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
|
||||||
if (!tlb_hit(tlb_addr, addr)) {
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
|
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
|
||||||
addr & TARGET_PAGE_MASK)) {
|
addr & TARGET_PAGE_MASK)) {
|
||||||
tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
|
tlb_fill_align(cpu, addr, access_type, mmu_idx,
|
||||||
|
memop, data->size, false, ra);
|
||||||
maybe_resized = true;
|
maybe_resized = true;
|
||||||
index = tlb_index(cpu, mmu_idx, addr);
|
index = tlb_index(cpu, mmu_idx, addr);
|
||||||
entry = tlb_entry(cpu, mmu_idx, addr);
|
entry = tlb_entry(cpu, mmu_idx, addr);
|
||||||
|
@ -1643,6 +1658,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
|
||||||
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
|
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
|
||||||
flags |= full->slow_flags[access_type];
|
flags |= full->slow_flags[access_type];
|
||||||
|
|
||||||
|
if (likely(!maybe_resized)) {
|
||||||
|
/* Alignment has not been checked by tlb_fill_align. */
|
||||||
|
int a_bits = memop_alignment_bits(memop);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This alignment check differs from the one above, in that this is
|
||||||
|
* based on the atomicity of the operation. The intended use case is
|
||||||
|
* the ARM memory type field of each PTE, where access to pages with
|
||||||
|
* Device memory type require alignment.
|
||||||
|
*/
|
||||||
|
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
|
||||||
|
int at_bits = memop_atomicity_bits(memop);
|
||||||
|
a_bits = MAX(a_bits, at_bits);
|
||||||
|
}
|
||||||
|
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
||||||
|
cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data->full = full;
|
data->full = full;
|
||||||
data->flags = flags;
|
data->flags = flags;
|
||||||
/* Compute haddr speculatively; depending on flags it might be invalid. */
|
/* Compute haddr speculatively; depending on flags it might be invalid. */
|
||||||
|
@ -1699,7 +1733,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
|
||||||
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
|
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
|
||||||
{
|
{
|
||||||
unsigned a_bits;
|
|
||||||
bool crosspage;
|
bool crosspage;
|
||||||
int flags;
|
int flags;
|
||||||
|
|
||||||
|
@ -1708,12 +1741,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
|
|
||||||
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
|
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
|
||||||
|
|
||||||
/* Handle CPU specific unaligned behaviour */
|
|
||||||
a_bits = get_alignment_bits(l->memop);
|
|
||||||
if (addr & ((1 << a_bits) - 1)) {
|
|
||||||
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
|
|
||||||
}
|
|
||||||
|
|
||||||
l->page[0].addr = addr;
|
l->page[0].addr = addr;
|
||||||
l->page[0].size = memop_size(l->memop);
|
l->page[0].size = memop_size(l->memop);
|
||||||
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
|
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
|
||||||
|
@ -1721,7 +1748,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
|
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
|
||||||
|
|
||||||
if (likely(!crosspage)) {
|
if (likely(!crosspage)) {
|
||||||
mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
|
mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
|
||||||
|
|
||||||
flags = l->page[0].flags;
|
flags = l->page[0].flags;
|
||||||
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
||||||
|
@ -1740,8 +1767,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
* Lookup both pages, recognizing exceptions from either. If the
|
* Lookup both pages, recognizing exceptions from either. If the
|
||||||
* second lookup potentially resized, refresh first CPUTLBEntryFull.
|
* second lookup potentially resized, refresh first CPUTLBEntryFull.
|
||||||
*/
|
*/
|
||||||
mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
|
mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
|
||||||
if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
|
if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
|
||||||
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
|
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
|
||||||
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
|
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
|
||||||
}
|
}
|
||||||
|
@ -1760,31 +1787,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
tcg_debug_assert((flags & TLB_BSWAP) == 0);
|
tcg_debug_assert((flags & TLB_BSWAP) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This alignment check differs from the one above, in that this is
|
|
||||||
* based on the atomicity of the operation. The intended use case is
|
|
||||||
* the ARM memory type field of each PTE, where access to pages with
|
|
||||||
* Device memory type require alignment.
|
|
||||||
*/
|
|
||||||
if (unlikely(flags & TLB_CHECK_ALIGNED)) {
|
|
||||||
MemOp size = l->memop & MO_SIZE;
|
|
||||||
|
|
||||||
switch (l->memop & MO_ATOM_MASK) {
|
|
||||||
case MO_ATOM_NONE:
|
|
||||||
size = MO_8;
|
|
||||||
break;
|
|
||||||
case MO_ATOM_IFALIGN_PAIR:
|
|
||||||
case MO_ATOM_WITHIN16_PAIR:
|
|
||||||
size = size ? size - 1 : 0;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (addr & ((1 << size) - 1)) {
|
|
||||||
cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return crosspage;
|
return crosspage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1797,34 +1799,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
{
|
{
|
||||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||||
MemOp mop = get_memop(oi);
|
MemOp mop = get_memop(oi);
|
||||||
int a_bits = get_alignment_bits(mop);
|
|
||||||
uintptr_t index;
|
uintptr_t index;
|
||||||
CPUTLBEntry *tlbe;
|
CPUTLBEntry *tlbe;
|
||||||
vaddr tlb_addr;
|
vaddr tlb_addr;
|
||||||
void *hostaddr;
|
void *hostaddr;
|
||||||
CPUTLBEntryFull *full;
|
CPUTLBEntryFull *full;
|
||||||
|
bool did_tlb_fill = false;
|
||||||
|
|
||||||
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
||||||
|
|
||||||
/* Adjust the given return address. */
|
/* Adjust the given return address. */
|
||||||
retaddr -= GETPC_ADJ;
|
retaddr -= GETPC_ADJ;
|
||||||
|
|
||||||
/* Enforce guest required alignment. */
|
|
||||||
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
|
|
||||||
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
|
|
||||||
cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
|
|
||||||
mmu_idx, retaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Enforce qemu required alignment. */
|
|
||||||
if (unlikely(addr & (size - 1))) {
|
|
||||||
/* We get here if guest alignment was not requested,
|
|
||||||
or was not enforced by cpu_unaligned_access above.
|
|
||||||
We might widen the access and emulate, but for now
|
|
||||||
mark an exception and exit the cpu loop. */
|
|
||||||
goto stop_the_world;
|
|
||||||
}
|
|
||||||
|
|
||||||
index = tlb_index(cpu, mmu_idx, addr);
|
index = tlb_index(cpu, mmu_idx, addr);
|
||||||
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
||||||
|
|
||||||
|
@ -1833,8 +1819,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
if (!tlb_hit(tlb_addr, addr)) {
|
if (!tlb_hit(tlb_addr, addr)) {
|
||||||
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
|
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
|
||||||
addr & TARGET_PAGE_MASK)) {
|
addr & TARGET_PAGE_MASK)) {
|
||||||
tlb_fill(cpu, addr, size,
|
tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
|
||||||
MMU_DATA_STORE, mmu_idx, retaddr);
|
mop, size, false, retaddr);
|
||||||
|
did_tlb_fill = true;
|
||||||
index = tlb_index(cpu, mmu_idx, addr);
|
index = tlb_index(cpu, mmu_idx, addr);
|
||||||
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
||||||
}
|
}
|
||||||
|
@ -1848,15 +1835,32 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
* but addr_read will only be -1 if PAGE_READ was unset.
|
* but addr_read will only be -1 if PAGE_READ was unset.
|
||||||
*/
|
*/
|
||||||
if (unlikely(tlbe->addr_read == -1)) {
|
if (unlikely(tlbe->addr_read == -1)) {
|
||||||
tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
|
tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
|
||||||
|
0, size, false, retaddr);
|
||||||
/*
|
/*
|
||||||
* Since we don't support reads and writes to different
|
* Since we don't support reads and writes to different
|
||||||
* addresses, and we do have the proper page loaded for
|
* addresses, and we do have the proper page loaded for
|
||||||
* write, this shouldn't ever return. But just in case,
|
* write, this shouldn't ever return.
|
||||||
* handle via stop-the-world.
|
*/
|
||||||
|
g_assert_not_reached();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Enforce guest required alignment, if not handled by tlb_fill_align. */
|
||||||
|
if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
|
||||||
|
cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Enforce qemu required alignment. */
|
||||||
|
if (unlikely(addr & (size - 1))) {
|
||||||
|
/*
|
||||||
|
* We get here if guest alignment was not requested, or was not
|
||||||
|
* enforced by cpu_unaligned_access or tlb_fill_align above.
|
||||||
|
* We might widen the access and emulate, but for now
|
||||||
|
* mark an exception and exit the cpu loop.
|
||||||
*/
|
*/
|
||||||
goto stop_the_world;
|
goto stop_the_world;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Collect tlb flags for read. */
|
/* Collect tlb flags for read. */
|
||||||
tlb_addr |= tlbe->addr_read;
|
tlb_addr |= tlbe->addr_read;
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
||||||
db->is_jmp = DISAS_NEXT;
|
db->is_jmp = DISAS_NEXT;
|
||||||
db->num_insns = 0;
|
db->num_insns = 0;
|
||||||
db->max_insns = *max_insns;
|
db->max_insns = *max_insns;
|
||||||
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
|
|
||||||
db->insn_start = NULL;
|
db->insn_start = NULL;
|
||||||
db->fake_insn = false;
|
db->fake_insn = false;
|
||||||
db->host_addr[0] = host_pc;
|
db->host_addr[0] = host_pc;
|
||||||
|
|
|
@ -954,7 +954,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
|
||||||
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
|
static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
|
||||||
MemOp mop, uintptr_t ra, MMUAccessType type)
|
MemOp mop, uintptr_t ra, MMUAccessType type)
|
||||||
{
|
{
|
||||||
int a_bits = get_alignment_bits(mop);
|
int a_bits = memop_alignment_bits(mop);
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
/* Enforce guest required alignment. */
|
/* Enforce guest required alignment. */
|
||||||
|
@ -1236,7 +1236,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
||||||
int size, uintptr_t retaddr)
|
int size, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
MemOp mop = get_memop(oi);
|
MemOp mop = get_memop(oi);
|
||||||
int a_bits = get_alignment_bits(mop);
|
int a_bits = memop_alignment_bits(mop);
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
/* Enforce guest required alignment. */
|
/* Enforce guest required alignment. */
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
TARGET_ARCH=i386
|
TARGET_ARCH=i386
|
||||||
TARGET_SYSTBL_ABI=i386
|
TARGET_SYSTBL_ABI=i386
|
||||||
TARGET_SYSTBL=syscall_32.tbl
|
TARGET_SYSTBL=syscall_32.tbl
|
||||||
TARGET_XML_FILES= gdb-xml/i386-32bit.xml
|
TARGET_XML_FILES= gdb-xml/i386-32bit.xml gdb-xml/i386-32bit-linux.xml
|
||||||
|
|
|
@ -2,4 +2,4 @@ TARGET_ARCH=x86_64
|
||||||
TARGET_BASE_ARCH=i386
|
TARGET_BASE_ARCH=i386
|
||||||
TARGET_SYSTBL_ABI=common,64
|
TARGET_SYSTBL_ABI=common,64
|
||||||
TARGET_SYSTBL=syscall_64.tbl
|
TARGET_SYSTBL=syscall_64.tbl
|
||||||
TARGET_XML_FILES= gdb-xml/i386-64bit.xml
|
TARGET_XML_FILES= gdb-xml/i386-64bit.xml gdb-xml/i386-64bit-linux.xml
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
|
||||||
|
|
||||||
|
Copying and distribution of this file, with or without modification,
|
||||||
|
are permitted in any medium without royalty provided the copyright
|
||||||
|
notice and this notice are preserved. -->
|
||||||
|
|
||||||
|
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
|
||||||
|
<feature name="org.gnu.gdb.i386.linux">
|
||||||
|
<reg name="orig_eax" bitsize="32" type="int"/>
|
||||||
|
</feature>
|
|
@ -0,0 +1,11 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!-- Copyright (C) 2010-2024 Free Software Foundation, Inc.
|
||||||
|
|
||||||
|
Copying and distribution of this file, with or without modification,
|
||||||
|
are permitted in any medium without royalty provided the copyright
|
||||||
|
notice and this notice are preserved. -->
|
||||||
|
|
||||||
|
<!DOCTYPE feature SYSTEM "gdb-target.dtd">
|
||||||
|
<feature name="org.gnu.gdb.i386.linux">
|
||||||
|
<reg name="orig_rax" bitsize="64" type="int"/>
|
||||||
|
</feature>
|
|
@ -238,6 +238,17 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
|
||||||
return (void *)env - sizeof(CPUState);
|
return (void *)env - sizeof(CPUState);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* env_cpu_const(env)
|
||||||
|
* @env: The architecture environment
|
||||||
|
*
|
||||||
|
* Return the CPUState associated with the environment.
|
||||||
|
*/
|
||||||
|
static inline const CPUState *env_cpu_const(const CPUArchState *env)
|
||||||
|
{
|
||||||
|
return (void *)env - sizeof(CPUState);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* env_cpu(env)
|
* env_cpu(env)
|
||||||
* @env: The architecture environment
|
* @env: The architecture environment
|
||||||
|
@ -246,7 +257,7 @@ static inline ArchCPU *env_archcpu(CPUArchState *env)
|
||||||
*/
|
*/
|
||||||
static inline CPUState *env_cpu(CPUArchState *env)
|
static inline CPUState *env_cpu(CPUArchState *env)
|
||||||
{
|
{
|
||||||
return (void *)env - sizeof(CPUState);
|
return (CPUState *)env_cpu_const(env);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
|
|
@ -170,4 +170,51 @@ static inline bool memop_big_endian(MemOp op)
|
||||||
return (op & MO_BSWAP) == MO_BE;
|
return (op & MO_BSWAP) == MO_BE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* memop_alignment_bits:
|
||||||
|
* @memop: MemOp value
|
||||||
|
*
|
||||||
|
* Extract the alignment size from the memop.
|
||||||
|
*/
|
||||||
|
static inline unsigned memop_alignment_bits(MemOp memop)
|
||||||
|
{
|
||||||
|
unsigned a = memop & MO_AMASK;
|
||||||
|
|
||||||
|
if (a == MO_UNALN) {
|
||||||
|
/* No alignment required. */
|
||||||
|
a = 0;
|
||||||
|
} else if (a == MO_ALIGN) {
|
||||||
|
/* A natural alignment requirement. */
|
||||||
|
a = memop & MO_SIZE;
|
||||||
|
} else {
|
||||||
|
/* A specific alignment requirement. */
|
||||||
|
a = a >> MO_ASHIFT;
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* memop_atomicity_bits:
|
||||||
|
* @memop: MemOp value
|
||||||
|
*
|
||||||
|
* Extract the atomicity size from the memop.
|
||||||
|
*/
|
||||||
|
static inline unsigned memop_atomicity_bits(MemOp memop)
|
||||||
|
{
|
||||||
|
unsigned size = memop & MO_SIZE;
|
||||||
|
|
||||||
|
switch (memop & MO_ATOM_MASK) {
|
||||||
|
case MO_ATOM_NONE:
|
||||||
|
size = MO_8;
|
||||||
|
break;
|
||||||
|
case MO_ATOM_IFALIGN_PAIR:
|
||||||
|
case MO_ATOM_WITHIN16_PAIR:
|
||||||
|
size = size ? size - 1 : 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -71,7 +71,6 @@ typedef enum DisasJumpType {
|
||||||
* @is_jmp: What instruction to disassemble next.
|
* @is_jmp: What instruction to disassemble next.
|
||||||
* @num_insns: Number of translated instructions (including current).
|
* @num_insns: Number of translated instructions (including current).
|
||||||
* @max_insns: Maximum number of instructions to be translated in this TB.
|
* @max_insns: Maximum number of instructions to be translated in this TB.
|
||||||
* @singlestep_enabled: "Hardware" single stepping enabled.
|
|
||||||
* @plugin_enabled: TCG plugin enabled in this TB.
|
* @plugin_enabled: TCG plugin enabled in this TB.
|
||||||
* @fake_insn: True if translator_fake_ldb used.
|
* @fake_insn: True if translator_fake_ldb used.
|
||||||
* @insn_start: The last op emitted by the insn_start hook,
|
* @insn_start: The last op emitted by the insn_start hook,
|
||||||
|
@ -86,7 +85,6 @@ struct DisasContextBase {
|
||||||
DisasJumpType is_jmp;
|
DisasJumpType is_jmp;
|
||||||
int num_insns;
|
int num_insns;
|
||||||
int max_insns;
|
int max_insns;
|
||||||
bool singlestep_enabled;
|
|
||||||
bool plugin_enabled;
|
bool plugin_enabled;
|
||||||
bool fake_insn;
|
bool fake_insn;
|
||||||
struct TCGOp *insn_start;
|
struct TCGOp *insn_start;
|
||||||
|
|
|
@ -205,7 +205,7 @@ struct CPUClass {
|
||||||
* so the layout is not as critical as that of CPUTLBEntry. This is
|
* so the layout is not as critical as that of CPUTLBEntry. This is
|
||||||
* also why we don't want to combine the two structs.
|
* also why we don't want to combine the two structs.
|
||||||
*/
|
*/
|
||||||
typedef struct CPUTLBEntryFull {
|
struct CPUTLBEntryFull {
|
||||||
/*
|
/*
|
||||||
* @xlat_section contains:
|
* @xlat_section contains:
|
||||||
* - in the lower TARGET_PAGE_BITS, a physical section number
|
* - in the lower TARGET_PAGE_BITS, a physical section number
|
||||||
|
@ -261,7 +261,7 @@ typedef struct CPUTLBEntryFull {
|
||||||
bool guarded;
|
bool guarded;
|
||||||
} arm;
|
} arm;
|
||||||
} extra;
|
} extra;
|
||||||
} CPUTLBEntryFull;
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Data elements that are per MMU mode, minus the bits accessed by
|
* Data elements that are per MMU mode, minus the bits accessed by
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include "exec/breakpoint.h"
|
#include "exec/breakpoint.h"
|
||||||
#include "exec/hwaddr.h"
|
#include "exec/hwaddr.h"
|
||||||
#include "exec/memattrs.h"
|
#include "exec/memattrs.h"
|
||||||
|
#include "exec/memop.h"
|
||||||
#include "exec/mmu-access-type.h"
|
#include "exec/mmu-access-type.h"
|
||||||
#include "exec/vaddr.h"
|
#include "exec/vaddr.h"
|
||||||
|
|
||||||
|
@ -131,6 +132,31 @@ struct TCGCPUOps {
|
||||||
* same function signature.
|
* same function signature.
|
||||||
*/
|
*/
|
||||||
bool (*cpu_exec_halt)(CPUState *cpu);
|
bool (*cpu_exec_halt)(CPUState *cpu);
|
||||||
|
/**
|
||||||
|
* @tlb_fill_align: Handle a softmmu tlb miss
|
||||||
|
* @cpu: cpu context
|
||||||
|
* @out: output page properties
|
||||||
|
* @addr: virtual address
|
||||||
|
* @access_type: read, write or execute
|
||||||
|
* @mmu_idx: mmu context
|
||||||
|
* @memop: memory operation for the access
|
||||||
|
* @size: memory access size, or 0 for whole page
|
||||||
|
* @probe: test only, no fault
|
||||||
|
* @ra: host return address for exception unwind
|
||||||
|
*
|
||||||
|
* If the access is valid, fill in @out and return true.
|
||||||
|
* Otherwise if probe is true, return false.
|
||||||
|
* Otherwise raise an exception and do not return.
|
||||||
|
*
|
||||||
|
* The alignment check for the access is deferred to this hook,
|
||||||
|
* so that the target can determine the priority of any alignment
|
||||||
|
* fault with respect to other potential faults from paging.
|
||||||
|
* Zero may be passed for @memop to skip any alignment check
|
||||||
|
* for non-memory-access operations such as probing.
|
||||||
|
*/
|
||||||
|
bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
|
||||||
|
MMUAccessType access_type, int mmu_idx,
|
||||||
|
MemOp memop, int size, bool probe, uintptr_t ra);
|
||||||
/**
|
/**
|
||||||
* @tlb_fill: Handle a softmmu tlb miss
|
* @tlb_fill: Handle a softmmu tlb miss
|
||||||
*
|
*
|
||||||
|
|
|
@ -40,6 +40,7 @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
|
||||||
typedef struct CPUArchState CPUArchState;
|
typedef struct CPUArchState CPUArchState;
|
||||||
typedef struct CPUPluginState CPUPluginState;
|
typedef struct CPUPluginState CPUPluginState;
|
||||||
typedef struct CPUState CPUState;
|
typedef struct CPUState CPUState;
|
||||||
|
typedef struct CPUTLBEntryFull CPUTLBEntryFull;
|
||||||
typedef struct DeviceState DeviceState;
|
typedef struct DeviceState DeviceState;
|
||||||
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
|
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
|
||||||
typedef struct DisasContextBase DisasContextBase;
|
typedef struct DisasContextBase DisasContextBase;
|
||||||
|
|
|
@ -281,29 +281,6 @@ static inline int tcg_type_size(TCGType t)
|
||||||
return 4 << i;
|
return 4 << i;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* get_alignment_bits
|
|
||||||
* @memop: MemOp value
|
|
||||||
*
|
|
||||||
* Extract the alignment size from the memop.
|
|
||||||
*/
|
|
||||||
static inline unsigned get_alignment_bits(MemOp memop)
|
|
||||||
{
|
|
||||||
unsigned a = memop & MO_AMASK;
|
|
||||||
|
|
||||||
if (a == MO_UNALN) {
|
|
||||||
/* No alignment required. */
|
|
||||||
a = 0;
|
|
||||||
} else if (a == MO_ALIGN) {
|
|
||||||
/* A natural alignment requirement. */
|
|
||||||
a = memop & MO_SIZE;
|
|
||||||
} else {
|
|
||||||
/* A specific alignment requirement. */
|
|
||||||
a = a >> MO_ASHIFT;
|
|
||||||
}
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef tcg_target_ulong TCGArg;
|
typedef tcg_target_ulong TCGArg;
|
||||||
|
|
||||||
/* Define type and accessor macros for TCG variables.
|
/* Define type and accessor macros for TCG variables.
|
||||||
|
|
|
@ -203,7 +203,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
|
||||||
(*regs)[12] = tswapreg(env->regs[R_EDX]);
|
(*regs)[12] = tswapreg(env->regs[R_EDX]);
|
||||||
(*regs)[13] = tswapreg(env->regs[R_ESI]);
|
(*regs)[13] = tswapreg(env->regs[R_ESI]);
|
||||||
(*regs)[14] = tswapreg(env->regs[R_EDI]);
|
(*regs)[14] = tswapreg(env->regs[R_EDI]);
|
||||||
(*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */
|
(*regs)[15] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
|
||||||
(*regs)[16] = tswapreg(env->eip);
|
(*regs)[16] = tswapreg(env->eip);
|
||||||
(*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
|
(*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff);
|
||||||
(*regs)[18] = tswapreg(env->eflags);
|
(*regs)[18] = tswapreg(env->eflags);
|
||||||
|
@ -306,7 +306,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
|
||||||
(*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
|
(*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff);
|
||||||
(*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
|
(*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff);
|
||||||
(*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
|
(*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff);
|
||||||
(*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */
|
(*regs)[11] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax);
|
||||||
(*regs)[12] = tswapreg(env->eip);
|
(*regs)[12] = tswapreg(env->eip);
|
||||||
(*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
|
(*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff);
|
||||||
(*regs)[14] = tswapreg(env->eflags);
|
(*regs)[14] = tswapreg(env->eflags);
|
||||||
|
@ -4314,7 +4314,7 @@ static int wmr_write_region(void *opaque, target_ulong start,
|
||||||
*/
|
*/
|
||||||
static int elf_core_dump(int signr, const CPUArchState *env)
|
static int elf_core_dump(int signr, const CPUArchState *env)
|
||||||
{
|
{
|
||||||
const CPUState *cpu = env_cpu((CPUArchState *)env);
|
const CPUState *cpu = env_cpu_const(env);
|
||||||
const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
|
const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu);
|
||||||
struct rlimit dumpsize;
|
struct rlimit dumpsize;
|
||||||
CountAndSizeRegions css;
|
CountAndSizeRegions css;
|
||||||
|
|
|
@ -172,6 +172,7 @@ static void emulate_vsyscall(CPUX86State *env)
|
||||||
/*
|
/*
|
||||||
* Perform the syscall. None of the vsyscalls should need restarting.
|
* Perform the syscall. None of the vsyscalls should need restarting.
|
||||||
*/
|
*/
|
||||||
|
get_task_state(env_cpu(env))->orig_ax = syscall;
|
||||||
ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI],
|
ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI],
|
||||||
env->regs[R_EDX], env->regs[10], env->regs[8],
|
env->regs[R_EDX], env->regs[10], env->regs[8],
|
||||||
env->regs[9], 0, 0);
|
env->regs[9], 0, 0);
|
||||||
|
@ -221,6 +222,7 @@ void cpu_loop(CPUX86State *env)
|
||||||
case EXCP_SYSCALL:
|
case EXCP_SYSCALL:
|
||||||
#endif
|
#endif
|
||||||
/* linux syscall from int $0x80 */
|
/* linux syscall from int $0x80 */
|
||||||
|
get_task_state(cs)->orig_ax = env->regs[R_EAX];
|
||||||
ret = do_syscall(env,
|
ret = do_syscall(env,
|
||||||
env->regs[R_EAX],
|
env->regs[R_EAX],
|
||||||
env->regs[R_EBX],
|
env->regs[R_EBX],
|
||||||
|
@ -239,6 +241,7 @@ void cpu_loop(CPUX86State *env)
|
||||||
#ifdef TARGET_X86_64
|
#ifdef TARGET_X86_64
|
||||||
case EXCP_SYSCALL:
|
case EXCP_SYSCALL:
|
||||||
/* linux syscall from syscall instruction. */
|
/* linux syscall from syscall instruction. */
|
||||||
|
get_task_state(cs)->orig_ax = env->regs[R_EAX];
|
||||||
ret = do_syscall(env,
|
ret = do_syscall(env,
|
||||||
env->regs[R_EAX],
|
env->regs[R_EAX],
|
||||||
env->regs[R_EDI],
|
env->regs[R_EDI],
|
||||||
|
|
|
@ -113,6 +113,10 @@ struct TaskState {
|
||||||
struct target_vm86plus_struct vm86plus;
|
struct target_vm86plus_struct vm86plus;
|
||||||
uint32_t v86flags;
|
uint32_t v86flags;
|
||||||
uint32_t v86mask;
|
uint32_t v86mask;
|
||||||
|
#endif
|
||||||
|
#if defined(TARGET_I386)
|
||||||
|
/* Last syscall number. */
|
||||||
|
target_ulong orig_ax;
|
||||||
#endif
|
#endif
|
||||||
abi_ulong child_tidptr;
|
abi_ulong child_tidptr;
|
||||||
#ifdef TARGET_M68K
|
#ifdef TARGET_M68K
|
||||||
|
|
|
@ -47,30 +47,6 @@ static inline void vm_putw(CPUX86State *env, uint32_t segptr,
|
||||||
cpu_stw_data(env, segptr + (reg16 & 0xffff), val);
|
cpu_stw_data(env, segptr + (reg16 & 0xffff), val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void vm_putl(CPUX86State *env, uint32_t segptr,
|
|
||||||
unsigned int reg16, unsigned int val)
|
|
||||||
{
|
|
||||||
cpu_stl_data(env, segptr + (reg16 & 0xffff), val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int vm_getb(CPUX86State *env,
|
|
||||||
uint32_t segptr, unsigned int reg16)
|
|
||||||
{
|
|
||||||
return cpu_ldub_data(env, segptr + (reg16 & 0xffff));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int vm_getw(CPUX86State *env,
|
|
||||||
uint32_t segptr, unsigned int reg16)
|
|
||||||
{
|
|
||||||
return cpu_lduw_data(env, segptr + (reg16 & 0xffff));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int vm_getl(CPUX86State *env,
|
|
||||||
uint32_t segptr, unsigned int reg16)
|
|
||||||
{
|
|
||||||
return cpu_ldl_data(env, segptr + (reg16 & 0xffff));
|
|
||||||
}
|
|
||||||
|
|
||||||
void save_v86_state(CPUX86State *env)
|
void save_v86_state(CPUX86State *env)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
|
@ -131,19 +107,6 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
|
||||||
env->regs[R_EAX] = retval;
|
env->regs[R_EAX] = retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int set_IF(CPUX86State *env)
|
|
||||||
{
|
|
||||||
CPUState *cs = env_cpu(env);
|
|
||||||
TaskState *ts = get_task_state(cs);
|
|
||||||
|
|
||||||
ts->v86flags |= VIF_MASK;
|
|
||||||
if (ts->v86flags & VIP_MASK) {
|
|
||||||
return_to_32bit(env, TARGET_VM86_STI);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void clear_IF(CPUX86State *env)
|
static inline void clear_IF(CPUX86State *env)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
|
@ -162,34 +125,6 @@ static inline void clear_AC(CPUX86State *env)
|
||||||
env->eflags &= ~AC_MASK;
|
env->eflags &= ~AC_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
|
|
||||||
{
|
|
||||||
CPUState *cs = env_cpu(env);
|
|
||||||
TaskState *ts = get_task_state(cs);
|
|
||||||
|
|
||||||
set_flags(ts->v86flags, eflags, ts->v86mask);
|
|
||||||
set_flags(env->eflags, eflags, SAFE_MASK);
|
|
||||||
if (eflags & IF_MASK)
|
|
||||||
return set_IF(env);
|
|
||||||
else
|
|
||||||
clear_IF(env);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
|
|
||||||
{
|
|
||||||
CPUState *cs = env_cpu(env);
|
|
||||||
TaskState *ts = get_task_state(cs);
|
|
||||||
|
|
||||||
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
|
|
||||||
set_flags(env->eflags, flags, SAFE_MASK);
|
|
||||||
if (flags & IF_MASK)
|
|
||||||
return set_IF(env);
|
|
||||||
else
|
|
||||||
clear_IF(env);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int get_vflags(CPUX86State *env)
|
static inline unsigned int get_vflags(CPUX86State *env)
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
|
|
|
@ -2663,7 +2663,7 @@ static const TCGCPUOps arm_tcg_ops = {
|
||||||
.record_sigsegv = arm_cpu_record_sigsegv,
|
.record_sigsegv = arm_cpu_record_sigsegv,
|
||||||
.record_sigbus = arm_cpu_record_sigbus,
|
.record_sigbus = arm_cpu_record_sigbus,
|
||||||
#else
|
#else
|
||||||
.tlb_fill = arm_cpu_tlb_fill,
|
.tlb_fill_align = arm_cpu_tlb_fill_align,
|
||||||
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
||||||
.cpu_exec_halt = arm_cpu_exec_halt,
|
.cpu_exec_halt = arm_cpu_exec_halt,
|
||||||
.do_interrupt = arm_cpu_do_interrupt,
|
.do_interrupt = arm_cpu_do_interrupt,
|
||||||
|
|
|
@ -3599,11 +3599,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||||
GetPhysAddrResult res = {};
|
GetPhysAddrResult res = {};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* I_MXTJT: Granule protection checks are not performed on the final address
|
* I_MXTJT: Granule protection checks are not performed on the final
|
||||||
* of a successful translation.
|
* address of a successful translation. This is a translation not a
|
||||||
|
* memory reference, so "memop = none = 0".
|
||||||
*/
|
*/
|
||||||
ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
|
ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
|
||||||
&res, &fi);
|
mmu_idx, ss, &res, &fi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ATS operations only do S1 or S1+S2 translations, so we never
|
* ATS operations only do S1 or S1+S2 translations, so we never
|
||||||
|
|
|
@ -816,9 +816,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
|
||||||
void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
|
void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
|
||||||
MMUAccessType access_type, uintptr_t ra);
|
MMUAccessType access_type, uintptr_t ra);
|
||||||
#else
|
#else
|
||||||
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr);
|
MemOp memop, int size, bool probe, uintptr_t ra);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
|
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
|
||||||
|
@ -1432,6 +1432,7 @@ typedef struct GetPhysAddrResult {
|
||||||
* @env: CPUARMState
|
* @env: CPUARMState
|
||||||
* @address: virtual address to get physical address for
|
* @address: virtual address to get physical address for
|
||||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||||
|
* @memop: memory operation feeding this access, or 0 for none
|
||||||
* @mmu_idx: MMU index indicating required translation regime
|
* @mmu_idx: MMU index indicating required translation regime
|
||||||
* @result: set on translation success.
|
* @result: set on translation success.
|
||||||
* @fi: set to fault info if the translation fails
|
* @fi: set to fault info if the translation fails
|
||||||
|
@ -1450,7 +1451,7 @@ typedef struct GetPhysAddrResult {
|
||||||
* value.
|
* value.
|
||||||
*/
|
*/
|
||||||
bool get_phys_addr(CPUARMState *env, vaddr address,
|
bool get_phys_addr(CPUARMState *env, vaddr address,
|
||||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
|
||||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||||
__attribute__((nonnull));
|
__attribute__((nonnull));
|
||||||
|
|
||||||
|
@ -1460,6 +1461,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
|
||||||
* @env: CPUARMState
|
* @env: CPUARMState
|
||||||
* @address: virtual address to get physical address for
|
* @address: virtual address to get physical address for
|
||||||
* @access_type: 0 for read, 1 for write, 2 for execute
|
* @access_type: 0 for read, 1 for write, 2 for execute
|
||||||
|
* @memop: memory operation feeding this access, or 0 for none
|
||||||
* @mmu_idx: MMU index indicating required translation regime
|
* @mmu_idx: MMU index indicating required translation regime
|
||||||
* @space: security space for the access
|
* @space: security space for the access
|
||||||
* @result: set on translation success.
|
* @result: set on translation success.
|
||||||
|
@ -1469,7 +1471,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
|
||||||
* a Granule Protection Check on the resulting address.
|
* a Granule Protection Check on the resulting address.
|
||||||
*/
|
*/
|
||||||
bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
|
bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
|
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
|
|
141
target/arm/ptw.c
141
target/arm/ptw.c
|
@ -75,13 +75,13 @@ typedef struct S1Translate {
|
||||||
|
|
||||||
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||||
vaddr address,
|
vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi);
|
ARMMMUFaultInfo *fi);
|
||||||
|
|
||||||
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
||||||
vaddr address,
|
vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi);
|
ARMMMUFaultInfo *fi);
|
||||||
|
|
||||||
|
@ -579,7 +579,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
||||||
};
|
};
|
||||||
GetPhysAddrResult s2 = { };
|
GetPhysAddrResult s2 = { };
|
||||||
|
|
||||||
if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
|
if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1684,12 +1684,13 @@ static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
|
||||||
* @ptw: Current and next stage parameters for the walk.
|
* @ptw: Current and next stage parameters for the walk.
|
||||||
* @address: virtual address to get physical address for
|
* @address: virtual address to get physical address for
|
||||||
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
|
* @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
|
||||||
|
* @memop: memory operation feeding this access, or 0 for none
|
||||||
* @result: set on translation success,
|
* @result: set on translation success,
|
||||||
* @fi: set to fault info if the translation fails
|
* @fi: set to fault info if the translation fails
|
||||||
*/
|
*/
|
||||||
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||||
uint64_t address,
|
uint64_t address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
ARMCPU *cpu = env_archcpu(env);
|
ARMCPU *cpu = env_archcpu(env);
|
||||||
|
@ -2028,8 +2029,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||||
xn = extract64(attrs, 53, 2);
|
xn = extract64(attrs, 53, 2);
|
||||||
result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
|
result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result->cacheattrs.is_s2_format = true;
|
||||||
|
result->cacheattrs.attrs = extract32(attrs, 2, 4);
|
||||||
|
/*
|
||||||
|
* Security state does not really affect HCR_EL2.FWB;
|
||||||
|
* we only need to filter FWB for aa32 or other FEAT.
|
||||||
|
*/
|
||||||
|
device = S2_attrs_are_device(arm_hcr_el2_eff(env),
|
||||||
|
result->cacheattrs.attrs);
|
||||||
} else {
|
} else {
|
||||||
int nse, ns = extract32(attrs, 5, 1);
|
int nse, ns = extract32(attrs, 5, 1);
|
||||||
|
uint8_t attrindx;
|
||||||
|
uint64_t mair;
|
||||||
|
|
||||||
switch (out_space) {
|
switch (out_space) {
|
||||||
case ARMSS_Root:
|
case ARMSS_Root:
|
||||||
/*
|
/*
|
||||||
|
@ -2101,6 +2114,49 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||||
*/
|
*/
|
||||||
result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
|
result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
|
||||||
result->f.attrs.space, out_space);
|
result->f.attrs.space, out_space);
|
||||||
|
|
||||||
|
/* Index into MAIR registers for cache attributes */
|
||||||
|
attrindx = extract32(attrs, 2, 3);
|
||||||
|
mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
||||||
|
assert(attrindx <= 7);
|
||||||
|
result->cacheattrs.is_s2_format = false;
|
||||||
|
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
|
||||||
|
|
||||||
|
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
|
||||||
|
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
|
||||||
|
result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
|
||||||
|
}
|
||||||
|
device = S1_attrs_are_device(result->cacheattrs.attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enable alignment checks on Device memory.
|
||||||
|
*
|
||||||
|
* Per R_XCHFJ, the correct ordering for alignment, permission,
|
||||||
|
* and stage 2 faults is:
|
||||||
|
* - Alignment fault caused by the memory type
|
||||||
|
* - Permission fault
|
||||||
|
* - A stage 2 fault on the memory access
|
||||||
|
* Perform the alignment check now, so that we recognize it in
|
||||||
|
* the correct order. Set TLB_CHECK_ALIGNED so that any subsequent
|
||||||
|
* softmmu tlb hit will also check the alignment; clear along the
|
||||||
|
* non-device path so that tlb_fill_flags is consistent in the
|
||||||
|
* event of restart_atomic_update.
|
||||||
|
*
|
||||||
|
* In v7, for a CPU without the Virtualization Extensions this
|
||||||
|
* access is UNPREDICTABLE; we choose to make it take the alignment
|
||||||
|
* fault as is required for a v7VE CPU. (QEMU doesn't emulate any
|
||||||
|
* CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
|
||||||
|
*/
|
||||||
|
if (device) {
|
||||||
|
unsigned a_bits = memop_atomicity_bits(memop);
|
||||||
|
if (address & ((1 << a_bits) - 1)) {
|
||||||
|
fi->type = ARMFault_Alignment;
|
||||||
|
goto do_fault;
|
||||||
|
}
|
||||||
|
result->f.tlb_fill_flags = TLB_CHECK_ALIGNED;
|
||||||
|
} else {
|
||||||
|
result->f.tlb_fill_flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(result->f.prot & (1 << access_type))) {
|
if (!(result->f.prot & (1 << access_type))) {
|
||||||
|
@ -2130,51 +2186,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
||||||
result->f.attrs.space = out_space;
|
result->f.attrs.space = out_space;
|
||||||
result->f.attrs.secure = arm_space_is_secure(out_space);
|
result->f.attrs.secure = arm_space_is_secure(out_space);
|
||||||
|
|
||||||
if (regime_is_stage2(mmu_idx)) {
|
|
||||||
result->cacheattrs.is_s2_format = true;
|
|
||||||
result->cacheattrs.attrs = extract32(attrs, 2, 4);
|
|
||||||
/*
|
|
||||||
* Security state does not really affect HCR_EL2.FWB;
|
|
||||||
* we only need to filter FWB for aa32 or other FEAT.
|
|
||||||
*/
|
|
||||||
device = S2_attrs_are_device(arm_hcr_el2_eff(env),
|
|
||||||
result->cacheattrs.attrs);
|
|
||||||
} else {
|
|
||||||
/* Index into MAIR registers for cache attributes */
|
|
||||||
uint8_t attrindx = extract32(attrs, 2, 3);
|
|
||||||
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
|
||||||
assert(attrindx <= 7);
|
|
||||||
result->cacheattrs.is_s2_format = false;
|
|
||||||
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
|
|
||||||
|
|
||||||
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
|
|
||||||
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
|
|
||||||
result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
|
|
||||||
}
|
|
||||||
device = S1_attrs_are_device(result->cacheattrs.attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Enable alignment checks on Device memory.
|
|
||||||
*
|
|
||||||
* Per R_XCHFJ, this check is mis-ordered. The correct ordering
|
|
||||||
* for alignment, permission, and stage 2 faults should be:
|
|
||||||
* - Alignment fault caused by the memory type
|
|
||||||
* - Permission fault
|
|
||||||
* - A stage 2 fault on the memory access
|
|
||||||
* but due to the way the TCG softmmu TLB operates, we will have
|
|
||||||
* implicitly done the permission check and the stage2 lookup in
|
|
||||||
* finding the TLB entry, so the alignment check cannot be done sooner.
|
|
||||||
*
|
|
||||||
* In v7, for a CPU without the Virtualization Extensions this
|
|
||||||
* access is UNPREDICTABLE; we choose to make it take the alignment
|
|
||||||
* fault as is required for a v7VE CPU. (QEMU doesn't emulate any
|
|
||||||
* CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
|
|
||||||
*/
|
|
||||||
if (device) {
|
|
||||||
result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For FEAT_LPA2 and effective DS, the SH field in the attributes
|
* For FEAT_LPA2 and effective DS, the SH field in the attributes
|
||||||
* was re-purposed for output address bits. The SH attribute in
|
* was re-purposed for output address bits. The SH attribute in
|
||||||
|
@ -3301,7 +3312,7 @@ static bool get_phys_addr_disabled(CPUARMState *env,
|
||||||
|
|
||||||
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||||
vaddr address,
|
vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
|
@ -3313,7 +3324,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||||
ARMSecuritySpace ipa_space;
|
ARMSecuritySpace ipa_space;
|
||||||
uint64_t hcr;
|
uint64_t hcr;
|
||||||
|
|
||||||
ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
|
ret = get_phys_addr_nogpc(env, ptw, address, access_type,
|
||||||
|
memop, result, fi);
|
||||||
|
|
||||||
/* If S1 fails, return early. */
|
/* If S1 fails, return early. */
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -3339,7 +3351,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||||
cacheattrs1 = result->cacheattrs;
|
cacheattrs1 = result->cacheattrs;
|
||||||
memset(result, 0, sizeof(*result));
|
memset(result, 0, sizeof(*result));
|
||||||
|
|
||||||
ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
|
ret = get_phys_addr_nogpc(env, ptw, ipa, access_type,
|
||||||
|
memop, result, fi);
|
||||||
fi->s2addr = ipa;
|
fi->s2addr = ipa;
|
||||||
|
|
||||||
/* Combine the S1 and S2 perms. */
|
/* Combine the S1 and S2 perms. */
|
||||||
|
@ -3406,7 +3419,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
||||||
|
|
||||||
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||||
vaddr address,
|
vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
|
@ -3469,7 +3482,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||||
if (arm_feature(env, ARM_FEATURE_EL2) &&
|
if (arm_feature(env, ARM_FEATURE_EL2) &&
|
||||||
!regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
|
!regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
|
||||||
return get_phys_addr_twostage(env, ptw, address, access_type,
|
return get_phys_addr_twostage(env, ptw, address, access_type,
|
||||||
result, fi);
|
memop, result, fi);
|
||||||
}
|
}
|
||||||
/* fall through */
|
/* fall through */
|
||||||
|
|
||||||
|
@ -3532,7 +3545,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||||
return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
|
return get_phys_addr_lpae(env, ptw, address, access_type,
|
||||||
|
memop, result, fi);
|
||||||
} else if (arm_feature(env, ARM_FEATURE_V7) ||
|
} else if (arm_feature(env, ARM_FEATURE_V7) ||
|
||||||
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||||
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
|
return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
|
||||||
|
@ -3543,11 +3557,12 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
||||||
|
|
||||||
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
||||||
vaddr address,
|
vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
|
if (get_phys_addr_nogpc(env, ptw, address, access_type,
|
||||||
|
memop, result, fi)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (!granule_protection_check(env, result->f.phys_addr,
|
if (!granule_protection_check(env, result->f.phys_addr,
|
||||||
|
@ -3559,7 +3574,7 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
||||||
}
|
}
|
||||||
|
|
||||||
bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
|
bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type, MemOp memop,
|
||||||
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
|
ARMMMUIdx mmu_idx, ARMSecuritySpace space,
|
||||||
GetPhysAddrResult *result,
|
GetPhysAddrResult *result,
|
||||||
ARMMMUFaultInfo *fi)
|
ARMMMUFaultInfo *fi)
|
||||||
|
@ -3568,11 +3583,12 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
|
||||||
.in_mmu_idx = mmu_idx,
|
.in_mmu_idx = mmu_idx,
|
||||||
.in_space = space,
|
.in_space = space,
|
||||||
};
|
};
|
||||||
return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
|
return get_phys_addr_nogpc(env, &ptw, address, access_type,
|
||||||
|
memop, result, fi);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool get_phys_addr(CPUARMState *env, vaddr address,
|
bool get_phys_addr(CPUARMState *env, vaddr address,
|
||||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
|
||||||
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
||||||
{
|
{
|
||||||
S1Translate ptw = {
|
S1Translate ptw = {
|
||||||
|
@ -3641,7 +3657,8 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
|
||||||
}
|
}
|
||||||
|
|
||||||
ptw.in_space = ss;
|
ptw.in_space = ss;
|
||||||
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
|
return get_phys_addr_gpc(env, &ptw, address, access_type,
|
||||||
|
memop, result, fi);
|
||||||
}
|
}
|
||||||
|
|
||||||
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||||
|
@ -3660,7 +3677,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
|
ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
|
||||||
*attrs = res.f.attrs;
|
*attrs = res.f.attrs;
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -242,7 +242,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
|
||||||
.record_sigsegv = arm_cpu_record_sigsegv,
|
.record_sigsegv = arm_cpu_record_sigsegv,
|
||||||
.record_sigbus = arm_cpu_record_sigbus,
|
.record_sigbus = arm_cpu_record_sigbus,
|
||||||
#else
|
#else
|
||||||
.tlb_fill = arm_cpu_tlb_fill,
|
.tlb_fill_align = arm_cpu_tlb_fill_align,
|
||||||
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
||||||
.cpu_exec_halt = arm_cpu_exec_halt,
|
.cpu_exec_halt = arm_cpu_exec_halt,
|
||||||
.do_interrupt = arm_v7m_cpu_do_interrupt,
|
.do_interrupt = arm_v7m_cpu_do_interrupt,
|
||||||
|
|
|
@ -222,7 +222,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
|
||||||
int exc;
|
int exc;
|
||||||
bool exc_secure;
|
bool exc_secure;
|
||||||
|
|
||||||
if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
|
if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
|
||||||
/* MPU/SAU lookup failed */
|
/* MPU/SAU lookup failed */
|
||||||
if (fi.type == ARMFault_QEMU_SFault) {
|
if (fi.type == ARMFault_QEMU_SFault) {
|
||||||
if (mode == STACK_LAZYFP) {
|
if (mode == STACK_LAZYFP) {
|
||||||
|
@ -311,7 +311,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
|
||||||
bool exc_secure;
|
bool exc_secure;
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|
||||||
if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
|
if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
|
||||||
/* MPU/SAU lookup failed */
|
/* MPU/SAU lookup failed */
|
||||||
if (fi.type == ARMFault_QEMU_SFault) {
|
if (fi.type == ARMFault_QEMU_SFault) {
|
||||||
qemu_log_mask(CPU_LOG_INT,
|
qemu_log_mask(CPU_LOG_INT,
|
||||||
|
@ -2009,7 +2009,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
|
||||||
"...really SecureFault with SFSR.INVEP\n");
|
"...really SecureFault with SFSR.INVEP\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
|
if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
|
||||||
/* the MPU lookup failed */
|
/* the MPU lookup failed */
|
||||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
||||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
|
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
|
||||||
|
@ -2045,7 +2045,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|
||||||
if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
|
if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
|
||||||
/* MPU/SAU lookup failed */
|
/* MPU/SAU lookup failed */
|
||||||
if (fi.type == ARMFault_QEMU_SFault) {
|
if (fi.type == ARMFault_QEMU_SFault) {
|
||||||
qemu_log_mask(CPU_LOG_INT,
|
qemu_log_mask(CPU_LOG_INT,
|
||||||
|
|
|
@ -318,14 +318,13 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||||
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
|
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr)
|
MemOp memop, int size, bool probe, uintptr_t ra)
|
||||||
{
|
{
|
||||||
ARMCPU *cpu = ARM_CPU(cs);
|
ARMCPU *cpu = ARM_CPU(cs);
|
||||||
GetPhysAddrResult res = {};
|
GetPhysAddrResult res = {};
|
||||||
ARMMMUFaultInfo local_fi, *fi;
|
ARMMMUFaultInfo local_fi, *fi;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allow S1_ptw_translate to see any fault generated here.
|
* Allow S1_ptw_translate to see any fault generated here.
|
||||||
|
@ -339,37 +338,27 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Walk the page table and (if the mapping exists) add the page
|
* Per R_XCHFJ, alignment fault not due to memory type has
|
||||||
* to the TLB. On success, return true. Otherwise, if probing,
|
* highest precedence. Otherwise, walk the page table and
|
||||||
* return false. Otherwise populate fsr with ARM DFSR/IFSR fault
|
* and collect the page description.
|
||||||
* register format, and signal the fault.
|
|
||||||
*/
|
*/
|
||||||
ret = get_phys_addr(&cpu->env, address, access_type,
|
if (address & ((1 << memop_alignment_bits(memop)) - 1)) {
|
||||||
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
fi->type = ARMFault_Alignment;
|
||||||
&res, fi);
|
} else if (!get_phys_addr(&cpu->env, address, access_type, memop,
|
||||||
if (likely(!ret)) {
|
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
|
||||||
/*
|
&res, fi)) {
|
||||||
* Map a single [sub]page. Regions smaller than our declared
|
|
||||||
* target page size are handled specially, so for those we
|
|
||||||
* pass in the exact addresses.
|
|
||||||
*/
|
|
||||||
if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
|
|
||||||
res.f.phys_addr &= TARGET_PAGE_MASK;
|
|
||||||
address &= TARGET_PAGE_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
|
res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
|
||||||
res.f.extra.arm.shareability = res.cacheattrs.shareability;
|
res.f.extra.arm.shareability = res.cacheattrs.shareability;
|
||||||
|
*out = res.f;
|
||||||
tlb_set_page_full(cs, mmu_idx, address, &res.f);
|
|
||||||
return true;
|
return true;
|
||||||
} else if (probe) {
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
/* now we have a real cpu fault */
|
|
||||||
cpu_restore_state(cs, retaddr);
|
|
||||||
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
|
||||||
}
|
}
|
||||||
|
if (probe) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Now we have a real cpu fault. */
|
||||||
|
cpu_restore_state(cs, ra);
|
||||||
|
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
||||||
|
|
|
@ -294,7 +294,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
|
||||||
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
|
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
|
||||||
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
|
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
|
||||||
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
|
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
|
||||||
desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
|
desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop));
|
||||||
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
|
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
|
||||||
|
|
||||||
ret = tcg_temp_new_i64();
|
ret = tcg_temp_new_i64();
|
||||||
|
@ -326,7 +326,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
|
||||||
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
|
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
|
||||||
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
|
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
|
||||||
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
|
desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
|
||||||
desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
|
desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop));
|
||||||
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
|
desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
|
||||||
|
|
||||||
ret = tcg_temp_new_i64();
|
ret = tcg_temp_new_i64();
|
||||||
|
|
|
@ -226,7 +226,7 @@ static const TCGCPUOps hppa_tcg_ops = {
|
||||||
.restore_state_to_opc = hppa_restore_state_to_opc,
|
.restore_state_to_opc = hppa_restore_state_to_opc,
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
.tlb_fill = hppa_cpu_tlb_fill,
|
.tlb_fill_align = hppa_cpu_tlb_fill_align,
|
||||||
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
|
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
|
||||||
.cpu_exec_halt = hppa_cpu_has_work,
|
.cpu_exec_halt = hppa_cpu_has_work,
|
||||||
.do_interrupt = hppa_cpu_do_interrupt,
|
.do_interrupt = hppa_cpu_do_interrupt,
|
||||||
|
|
|
@ -363,13 +363,13 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
|
||||||
void hppa_ptlbe(CPUHPPAState *env);
|
void hppa_ptlbe(CPUHPPAState *env);
|
||||||
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
|
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
|
||||||
void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled);
|
void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled);
|
||||||
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr);
|
MemOp memop, int size, bool probe, uintptr_t ra);
|
||||||
void hppa_cpu_do_interrupt(CPUState *cpu);
|
void hppa_cpu_do_interrupt(CPUState *cpu);
|
||||||
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||||
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
int type, hwaddr *pphys, int *pprot);
|
int type, MemOp mop, hwaddr *pphys, int *pprot);
|
||||||
void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||||
vaddr addr, unsigned size,
|
vaddr addr, unsigned size,
|
||||||
MMUAccessType access_type,
|
MMUAccessType access_type,
|
||||||
|
|
|
@ -167,7 +167,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
|
||||||
|
|
||||||
vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr);
|
vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr);
|
||||||
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
|
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
|
||||||
0, &paddr, &prot);
|
0, 0, &paddr, &prot);
|
||||||
if (t >= 0) {
|
if (t >= 0) {
|
||||||
/* We can't re-load the instruction. */
|
/* We can't re-load the instruction. */
|
||||||
env->cr[CR_IIR] = 0;
|
env->cr[CR_IIR] = 0;
|
||||||
|
|
|
@ -197,7 +197,7 @@ static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
int type, hwaddr *pphys, int *pprot)
|
int type, MemOp mop, hwaddr *pphys, int *pprot)
|
||||||
{
|
{
|
||||||
hwaddr phys;
|
hwaddr phys;
|
||||||
int prot, r_prot, w_prot, x_prot, priv;
|
int prot, r_prot, w_prot, x_prot, priv;
|
||||||
|
@ -221,7 +221,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
g_assert_not_reached();
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||||
goto egress;
|
goto egress_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find a valid tlb entry that matches the virtual address. */
|
/* Find a valid tlb entry that matches the virtual address. */
|
||||||
|
@ -267,6 +267,12 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
goto egress;
|
goto egress;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(!(prot & type))) {
|
||||||
|
/* Not allowed -- Inst/Data Memory Access Rights Fault. */
|
||||||
|
ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
|
||||||
|
goto egress;
|
||||||
|
}
|
||||||
|
|
||||||
/* access_id == 0 means public page and no check is performed */
|
/* access_id == 0 means public page and no check is performed */
|
||||||
if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
|
if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
|
||||||
int access_prot = (hppa_is_pa20(env)
|
int access_prot = (hppa_is_pa20(env)
|
||||||
|
@ -281,14 +287,8 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
prot &= access_prot;
|
prot &= access_prot;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!(prot & type))) {
|
|
||||||
/* Not allowed -- Inst/Data Memory Access Rights Fault. */
|
|
||||||
ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
|
|
||||||
goto egress;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In priority order, check for conditions which raise faults.
|
* In reverse priority order, check for conditions which raise faults.
|
||||||
* Remove PROT bits that cover the condition we want to check,
|
* Remove PROT bits that cover the condition we want to check,
|
||||||
* so that the resulting PROT will force a re-check of the
|
* so that the resulting PROT will force a re-check of the
|
||||||
* architectural TLB entry for the next access.
|
* architectural TLB entry for the next access.
|
||||||
|
@ -299,13 +299,15 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
/* The T bit is set -- Page Reference Fault. */
|
/* The T bit is set -- Page Reference Fault. */
|
||||||
ret = EXCP_PAGE_REF;
|
ret = EXCP_PAGE_REF;
|
||||||
}
|
}
|
||||||
} else if (!ent->d) {
|
}
|
||||||
|
if (unlikely(!ent->d)) {
|
||||||
prot &= PAGE_READ | PAGE_EXEC;
|
prot &= PAGE_READ | PAGE_EXEC;
|
||||||
if (type & PAGE_WRITE) {
|
if (type & PAGE_WRITE) {
|
||||||
/* The D bit is not set -- TLB Dirty Bit Fault. */
|
/* The D bit is not set -- TLB Dirty Bit Fault. */
|
||||||
ret = EXCP_TLB_DIRTY;
|
ret = EXCP_TLB_DIRTY;
|
||||||
}
|
}
|
||||||
} else if (unlikely(ent->b)) {
|
}
|
||||||
|
if (unlikely(ent->b)) {
|
||||||
prot &= PAGE_READ | PAGE_EXEC;
|
prot &= PAGE_READ | PAGE_EXEC;
|
||||||
if (type & PAGE_WRITE) {
|
if (type & PAGE_WRITE) {
|
||||||
/*
|
/*
|
||||||
|
@ -321,6 +323,11 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
egress_align:
|
||||||
|
if (addr & ((1u << memop_alignment_bits(mop)) - 1)) {
|
||||||
|
ret = EXCP_UNALIGN;
|
||||||
|
}
|
||||||
|
|
||||||
egress:
|
egress:
|
||||||
*pphys = phys;
|
*pphys = phys;
|
||||||
*pprot = prot;
|
*pprot = prot;
|
||||||
|
@ -340,7 +347,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||||
mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
|
mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
|
||||||
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
|
||||||
|
|
||||||
excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
|
excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0,
|
||||||
&phys, &prot);
|
&phys, &prot);
|
||||||
|
|
||||||
/* Since we're translating for debugging, the only error that is a
|
/* Since we're translating for debugging, the only error that is a
|
||||||
|
@ -417,12 +424,11 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
|
||||||
MMUAccessType type, int mmu_idx,
|
MMUAccessType type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr)
|
MemOp memop, int size, bool probe, uintptr_t ra)
|
||||||
{
|
{
|
||||||
HPPACPU *cpu = HPPA_CPU(cs);
|
CPUHPPAState *env = cpu_env(cs);
|
||||||
CPUHPPAState *env = &cpu->env;
|
|
||||||
int prot, excp, a_prot;
|
int prot, excp, a_prot;
|
||||||
hwaddr phys;
|
hwaddr phys;
|
||||||
|
|
||||||
|
@ -438,7 +444,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, &phys, &prot);
|
excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
|
||||||
|
&phys, &prot);
|
||||||
if (unlikely(excp >= 0)) {
|
if (unlikely(excp >= 0)) {
|
||||||
if (probe) {
|
if (probe) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -446,7 +453,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||||
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
|
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
|
||||||
|
|
||||||
/* Failure. Raise the indicated exception. */
|
/* Failure. Raise the indicated exception. */
|
||||||
raise_exception_with_ior(env, excp, retaddr, addr,
|
raise_exception_with_ior(env, excp, ra, addr,
|
||||||
MMU_IDX_MMU_DISABLED(mmu_idx));
|
MMU_IDX_MMU_DISABLED(mmu_idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -460,8 +467,12 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||||
* the large page protection mask. We do not require this,
|
* the large page protection mask. We do not require this,
|
||||||
* because we record the large page here in the hppa tlb.
|
* because we record the large page here in the hppa tlb.
|
||||||
*/
|
*/
|
||||||
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
|
memset(out, 0, sizeof(*out));
|
||||||
prot, mmu_idx, TARGET_PAGE_SIZE);
|
out->phys_addr = phys;
|
||||||
|
out->prot = prot;
|
||||||
|
out->attrs = MEMTXATTRS_UNSPECIFIED;
|
||||||
|
out->lg_page_size = TARGET_PAGE_BITS;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -678,7 +689,7 @@ target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
|
||||||
hwaddr phys;
|
hwaddr phys;
|
||||||
int prot, excp;
|
int prot, excp;
|
||||||
|
|
||||||
excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
|
excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0,
|
||||||
&phys, &prot);
|
&phys, &prot);
|
||||||
if (excp >= 0) {
|
if (excp >= 0) {
|
||||||
if (excp == EXCP_DTLB_MISS) {
|
if (excp == EXCP_DTLB_MISS) {
|
||||||
|
|
|
@ -334,7 +334,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
|
mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
|
||||||
excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot);
|
excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot);
|
||||||
if (excp >= 0) {
|
if (excp >= 0) {
|
||||||
cpu_restore_state(env_cpu(env), GETPC());
|
cpu_restore_state(env_cpu(env), GETPC());
|
||||||
hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
|
hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
|
||||||
|
|
|
@ -7831,6 +7831,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||||
|
|
||||||
mce_init(cpu);
|
mce_init(cpu);
|
||||||
|
|
||||||
|
x86_cpu_gdb_init(cs);
|
||||||
qemu_init_vcpu(cs);
|
qemu_init_vcpu(cs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2226,6 +2226,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
|
||||||
|
|
||||||
int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||||
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||||
|
void x86_cpu_gdb_init(CPUState *cs);
|
||||||
|
|
||||||
void x86_cpu_list(void);
|
void x86_cpu_list(void);
|
||||||
int cpu_x86_support_mca_broadcast(CPUX86State *env);
|
int cpu_x86_support_mca_broadcast(CPUX86State *env);
|
||||||
|
|
|
@ -18,8 +18,13 @@
|
||||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
|
#include "accel/tcg/vcpu-state.h"
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
#include "exec/gdbstub.h"
|
||||||
#include "gdbstub/helpers.h"
|
#include "gdbstub/helpers.h"
|
||||||
|
#ifdef CONFIG_LINUX_USER
|
||||||
|
#include "linux-user/qemu.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef TARGET_X86_64
|
#ifdef TARGET_X86_64
|
||||||
static const int gpr_map[16] = {
|
static const int gpr_map[16] = {
|
||||||
|
@ -96,6 +101,19 @@ static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
|
||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gdb_get_reg(CPUX86State *env, GByteArray *mem_buf, target_ulong val)
|
||||||
|
{
|
||||||
|
if (TARGET_LONG_BITS == 64) {
|
||||||
|
if (env->hflags & HF_CS64_MASK) {
|
||||||
|
return gdb_get_reg64(mem_buf, val);
|
||||||
|
} else {
|
||||||
|
return gdb_get_reg64(mem_buf, val & 0xffffffffUL);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return gdb_get_reg32(mem_buf, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
||||||
{
|
{
|
||||||
X86CPU *cpu = X86_CPU(cs);
|
X86CPU *cpu = X86_CPU(cs);
|
||||||
|
@ -137,15 +155,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
||||||
} else {
|
} else {
|
||||||
switch (n) {
|
switch (n) {
|
||||||
case IDX_IP_REG:
|
case IDX_IP_REG:
|
||||||
if (TARGET_LONG_BITS == 64) {
|
return gdb_get_reg(env, mem_buf, env->eip);
|
||||||
if (env->hflags & HF_CS64_MASK) {
|
|
||||||
return gdb_get_reg64(mem_buf, env->eip);
|
|
||||||
} else {
|
|
||||||
return gdb_get_reg64(mem_buf, env->eip & 0xffffffffUL);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return gdb_get_reg32(mem_buf, env->eip);
|
|
||||||
}
|
|
||||||
case IDX_FLAGS_REG:
|
case IDX_FLAGS_REG:
|
||||||
return gdb_get_reg32(mem_buf, env->eflags);
|
return gdb_get_reg32(mem_buf, env->eflags);
|
||||||
|
|
||||||
|
@ -248,6 +258,21 @@ static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
|
||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gdb_write_reg(CPUX86State *env, uint8_t *mem_buf, target_ulong *val)
|
||||||
|
{
|
||||||
|
if (TARGET_LONG_BITS == 64) {
|
||||||
|
if (env->hflags & HF_CS64_MASK) {
|
||||||
|
*val = ldq_p(mem_buf);
|
||||||
|
} else {
|
||||||
|
*val = ldq_p(mem_buf) & 0xffffffffUL;
|
||||||
|
}
|
||||||
|
return 8;
|
||||||
|
} else {
|
||||||
|
*val = (uint32_t)ldl_p(mem_buf);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||||
{
|
{
|
||||||
X86CPU *cpu = X86_CPU(cs);
|
X86CPU *cpu = X86_CPU(cs);
|
||||||
|
@ -288,18 +313,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||||
} else {
|
} else {
|
||||||
switch (n) {
|
switch (n) {
|
||||||
case IDX_IP_REG:
|
case IDX_IP_REG:
|
||||||
if (TARGET_LONG_BITS == 64) {
|
return gdb_write_reg(env, mem_buf, &env->eip);
|
||||||
if (env->hflags & HF_CS64_MASK) {
|
|
||||||
env->eip = ldq_p(mem_buf);
|
|
||||||
} else {
|
|
||||||
env->eip = ldq_p(mem_buf) & 0xffffffffUL;
|
|
||||||
}
|
|
||||||
return 8;
|
|
||||||
} else {
|
|
||||||
env->eip &= ~0xffffffffUL;
|
|
||||||
env->eip |= (uint32_t)ldl_p(mem_buf);
|
|
||||||
return 4;
|
|
||||||
}
|
|
||||||
case IDX_FLAGS_REG:
|
case IDX_FLAGS_REG:
|
||||||
env->eflags = ldl_p(mem_buf);
|
env->eflags = ldl_p(mem_buf);
|
||||||
return 4;
|
return 4;
|
||||||
|
@ -397,3 +411,49 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||||
/* Unrecognised register. */
|
/* Unrecognised register. */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_LINUX_USER
|
||||||
|
|
||||||
|
#define IDX_ORIG_AX 0
|
||||||
|
|
||||||
|
static int x86_cpu_gdb_read_linux_register(CPUState *cs, GByteArray *mem_buf,
|
||||||
|
int n)
|
||||||
|
{
|
||||||
|
X86CPU *cpu = X86_CPU(cs);
|
||||||
|
CPUX86State *env = &cpu->env;
|
||||||
|
|
||||||
|
switch (n) {
|
||||||
|
case IDX_ORIG_AX:
|
||||||
|
return gdb_get_reg(env, mem_buf, get_task_state(cs)->orig_ax);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int x86_cpu_gdb_write_linux_register(CPUState *cs, uint8_t *mem_buf,
|
||||||
|
int n)
|
||||||
|
{
|
||||||
|
X86CPU *cpu = X86_CPU(cs);
|
||||||
|
CPUX86State *env = &cpu->env;
|
||||||
|
|
||||||
|
switch (n) {
|
||||||
|
case IDX_ORIG_AX:
|
||||||
|
return gdb_write_reg(env, mem_buf, &get_task_state(cs)->orig_ax);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void x86_cpu_gdb_init(CPUState *cs)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_LINUX_USER
|
||||||
|
gdb_register_coprocessor(cs, x86_cpu_gdb_read_linux_register,
|
||||||
|
x86_cpu_gdb_write_linux_register,
|
||||||
|
#ifdef TARGET_X86_64
|
||||||
|
gdb_find_static_feature("i386-64bit-linux.xml"),
|
||||||
|
#else
|
||||||
|
gdb_find_static_feature("i386-32bit-linux.xml"),
|
||||||
|
#endif
|
||||||
|
0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
|
@ -15362,7 +15362,8 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
||||||
* hardware does (e.g. if a delay slot instruction faults, the
|
* hardware does (e.g. if a delay slot instruction faults, the
|
||||||
* reported PC is the PC of the branch).
|
* reported PC is the PC of the branch).
|
||||||
*/
|
*/
|
||||||
if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
|
if ((tb_cflags(ctx->base.tb) & CF_SINGLE_STEP) &&
|
||||||
|
(ctx->hflags & MIPS_HFLAG_BMASK)) {
|
||||||
ctx->base.max_insns = 2;
|
ctx->base.max_insns = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15445,7 +15446,7 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||||
* together with its delay slot.
|
* together with its delay slot.
|
||||||
*/
|
*/
|
||||||
if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
|
if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
|
||||||
&& !ctx->base.singlestep_enabled) {
|
&& !(tb_cflags(ctx->base.tb) & CF_SINGLE_STEP)) {
|
||||||
ctx->base.is_jmp = DISAS_TOO_MANY;
|
ctx->base.is_jmp = DISAS_TOO_MANY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -521,7 +521,7 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop,
|
||||||
mop |= MO_ALIGN;
|
mop |= MO_ALIGN;
|
||||||
}
|
}
|
||||||
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
|
if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
|
||||||
tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop));
|
tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop));
|
||||||
}
|
}
|
||||||
return mop;
|
return mop;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1587,7 +1587,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
|
||||||
tcg_debug_assert((datalo & 1) == 0);
|
tcg_debug_assert((datalo & 1) == 0);
|
||||||
tcg_debug_assert(datahi == datalo + 1);
|
tcg_debug_assert(datahi == datalo + 1);
|
||||||
/* LDRD requires alignment; double-check that. */
|
/* LDRD requires alignment; double-check that. */
|
||||||
if (get_alignment_bits(opc) >= MO_64) {
|
if (memop_alignment_bits(opc) >= MO_64) {
|
||||||
if (h.index < 0) {
|
if (h.index < 0) {
|
||||||
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
|
tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
|
||||||
break;
|
break;
|
||||||
|
@ -1691,7 +1691,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
|
||||||
tcg_debug_assert((datalo & 1) == 0);
|
tcg_debug_assert((datalo & 1) == 0);
|
||||||
tcg_debug_assert(datahi == datalo + 1);
|
tcg_debug_assert(datahi == datalo + 1);
|
||||||
/* STRD requires alignment; double-check that. */
|
/* STRD requires alignment; double-check that. */
|
||||||
if (get_alignment_bits(opc) >= MO_64) {
|
if (memop_alignment_bits(opc) >= MO_64) {
|
||||||
if (h.index < 0) {
|
if (h.index < 0) {
|
||||||
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
|
tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1133,7 +1133,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
|
||||||
* Otherwise, test for at least natural alignment and defer
|
* Otherwise, test for at least natural alignment and defer
|
||||||
* everything else to the helper functions.
|
* everything else to the helper functions.
|
||||||
*/
|
*/
|
||||||
if (s_bits != get_alignment_bits(opc)) {
|
if (s_bits != memop_alignment_bits(opc)) {
|
||||||
tcg_debug_assert(check_fit_tl(a_mask, 13));
|
tcg_debug_assert(check_fit_tl(a_mask, 13));
|
||||||
tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
|
tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ static void check_max_alignment(unsigned a_bits)
|
||||||
|
|
||||||
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
|
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
|
||||||
{
|
{
|
||||||
unsigned a_bits = get_alignment_bits(op);
|
unsigned a_bits = memop_alignment_bits(op);
|
||||||
|
|
||||||
check_max_alignment(a_bits);
|
check_max_alignment(a_bits);
|
||||||
|
|
||||||
|
@ -559,7 +559,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
|
||||||
TCGv_i64 ext_addr = NULL;
|
TCGv_i64 ext_addr = NULL;
|
||||||
TCGOpcode opc;
|
TCGOpcode opc;
|
||||||
|
|
||||||
check_max_alignment(get_alignment_bits(memop));
|
check_max_alignment(memop_alignment_bits(memop));
|
||||||
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
|
|
||||||
/* In serial mode, reduce atomicity. */
|
/* In serial mode, reduce atomicity. */
|
||||||
|
@ -676,7 +676,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
|
||||||
TCGv_i64 ext_addr = NULL;
|
TCGv_i64 ext_addr = NULL;
|
||||||
TCGOpcode opc;
|
TCGOpcode opc;
|
||||||
|
|
||||||
check_max_alignment(get_alignment_bits(memop));
|
check_max_alignment(memop_alignment_bits(memop));
|
||||||
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
|
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
|
||||||
|
|
||||||
/* In serial mode, reduce atomicity. */
|
/* In serial mode, reduce atomicity. */
|
||||||
|
|
|
@ -5506,7 +5506,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
||||||
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
|
static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
|
||||||
MemOp host_atom, bool allow_two_ops)
|
MemOp host_atom, bool allow_two_ops)
|
||||||
{
|
{
|
||||||
MemOp align = get_alignment_bits(opc);
|
MemOp align = memop_alignment_bits(opc);
|
||||||
MemOp size = opc & MO_SIZE;
|
MemOp size = opc & MO_SIZE;
|
||||||
MemOp half = size ? size - 1 : 0;
|
MemOp half = size ? size - 1 : 0;
|
||||||
MemOp atom = opc & MO_ATOM_MASK;
|
MemOp atom = opc & MO_ATOM_MASK;
|
||||||
|
|
|
@ -8,17 +8,12 @@ from test_gdbstub import main, report
|
||||||
|
|
||||||
def run_test():
|
def run_test():
|
||||||
"""Run through the tests one by one"""
|
"""Run through the tests one by one"""
|
||||||
try:
|
if gdb.selected_inferior().architecture().name() == "m68k":
|
||||||
mappings = gdb.execute("info proc mappings", False, True)
|
# m68k GDB supports only GDB_OSABI_SVR4, but GDB_OSABI_LINUX is
|
||||||
except gdb.error as exc:
|
# required for the info proc support (see set_gdbarch_info_proc()).
|
||||||
exc_str = str(exc)
|
print("SKIP: m68k GDB does not support GDB_OSABI_LINUX")
|
||||||
if "Not supported on this target." in exc_str:
|
exit(0)
|
||||||
# Detect failures due to an outstanding issue with how GDB handles
|
mappings = gdb.execute("info proc mappings", False, True)
|
||||||
# the x86_64 QEMU's target.xml, which does not contain the
|
|
||||||
# definition of orig_rax. Skip the test in this case.
|
|
||||||
print("SKIP: {}".format(exc_str))
|
|
||||||
return
|
|
||||||
raise
|
|
||||||
report(isinstance(mappings, str), "Fetched the mappings from the inferior")
|
report(isinstance(mappings, str), "Fetched the mappings from the inferior")
|
||||||
# Broken with host page size > guest page size
|
# Broken with host page size > guest page size
|
||||||
# report("/sha1" in mappings, "Found the test binary name in the mappings")
|
# report("/sha1" in mappings, "Found the test binary name in the mappings")
|
||||||
|
|
Loading…
Reference in New Issue