mirror of https://github.com/xemu-project/xemu.git
target/hppa: Use IntervalTreeNode in HPPATLBEntry
Replace the va_b and va_b fields with the interval tree node. The actual interval tree is not yet used. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
729cd3506d
commit
66866cc74f
|
@ -23,6 +23,7 @@
|
|||
#include "cpu-qom.h"
|
||||
#include "exec/cpu-defs.h"
|
||||
#include "qemu/cpu-float.h"
|
||||
#include "qemu/interval-tree.h"
|
||||
|
||||
/* PA-RISC 1.x processors have a strong memory model. */
|
||||
/* ??? While we do not yet implement PA-RISC 2.0, those processors have
|
||||
|
@ -175,8 +176,8 @@ typedef int64_t target_sreg;
|
|||
#endif
|
||||
|
||||
typedef struct HPPATLBEntry {
|
||||
uint64_t va_b;
|
||||
uint64_t va_e;
|
||||
IntervalTreeNode itree;
|
||||
|
||||
target_ureg pa;
|
||||
unsigned u : 1;
|
||||
unsigned t : 1;
|
||||
|
|
|
@ -74,7 +74,7 @@ static int get_tlb(QEMUFile *f, void *opaque, size_t size,
|
|||
|
||||
memset(ent, 0, sizeof(*ent));
|
||||
|
||||
ent->va_b = qemu_get_be64(f);
|
||||
ent->itree.start = qemu_get_be64(f);
|
||||
ent->pa = qemu_get_betr(f);
|
||||
val = qemu_get_be32(f);
|
||||
|
||||
|
@ -88,7 +88,7 @@ static int get_tlb(QEMUFile *f, void *opaque, size_t size,
|
|||
ent->d = extract32(val, 28, 1);
|
||||
ent->t = extract32(val, 29, 1);
|
||||
|
||||
ent->va_e = ent->va_b + TARGET_PAGE_SIZE - 1;
|
||||
ent->itree.last = ent->itree.start + TARGET_PAGE_SIZE - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ static int put_tlb(QEMUFile *f, void *opaque, size_t size,
|
|||
val = deposit32(val, 29, 1, ent->t);
|
||||
}
|
||||
|
||||
qemu_put_be64(f, ent->va_b);
|
||||
qemu_put_be64(f, ent->itree.start);
|
||||
qemu_put_betr(f, ent->pa);
|
||||
qemu_put_be32(f, val);
|
||||
return 0;
|
||||
|
|
|
@ -31,9 +31,10 @@ static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
|
||||
HPPATLBEntry *ent = &env->tlb[i];
|
||||
if (ent->va_b <= addr && addr <= ent->va_e) {
|
||||
if (ent->itree.start <= addr && addr <= ent->itree.last) {
|
||||
trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
|
||||
ent->va_b, ent->va_e, ent->pa);
|
||||
ent->itree.start, ent->itree.last,
|
||||
ent->pa);
|
||||
return ent;
|
||||
}
|
||||
}
|
||||
|
@ -50,11 +51,12 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
|
|||
return;
|
||||
}
|
||||
|
||||
trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
|
||||
trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
|
||||
ent->itree.last, ent->pa);
|
||||
|
||||
tlb_flush_range_by_mmuidx(cs, ent->va_b,
|
||||
ent->va_e - ent->va_b + 1,
|
||||
HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
|
||||
tlb_flush_range_by_mmuidx(cs, ent->itree.start,
|
||||
ent->itree.last - ent->itree.start + 1,
|
||||
HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
|
||||
|
||||
/* never clear BTLBs, unless forced to do so. */
|
||||
if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
|
||||
|
@ -62,7 +64,7 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
|
|||
}
|
||||
|
||||
memset(ent, 0, sizeof(*ent));
|
||||
ent->va_b = -1;
|
||||
ent->itree.start = -1;
|
||||
}
|
||||
|
||||
static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
|
||||
|
@ -118,7 +120,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
|||
}
|
||||
|
||||
/* We now know the physical address. */
|
||||
phys = ent->pa + (addr - ent->va_b);
|
||||
phys = ent->pa + (addr - ent->itree.start);
|
||||
|
||||
/* Map TLB access_rights field to QEMU protection. */
|
||||
priv = MMU_IDX_TO_PRIV(mmu_idx);
|
||||
|
@ -281,7 +283,7 @@ void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
|
|||
/* Zap any old entries covering ADDR; notice empty entries on the way. */
|
||||
for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
|
||||
HPPATLBEntry *ent = &env->tlb[i];
|
||||
if (ent->va_b <= addr && addr <= ent->va_e) {
|
||||
if (ent->itree.start <= addr && addr <= ent->itree.last) {
|
||||
if (ent->entry_valid) {
|
||||
hppa_flush_tlb_ent(env, ent, false);
|
||||
}
|
||||
|
@ -297,10 +299,11 @@ void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
|
|||
}
|
||||
|
||||
/* Note that empty->entry_valid == 0 already. */
|
||||
empty->va_b = addr & TARGET_PAGE_MASK;
|
||||
empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
|
||||
empty->itree.start = addr & TARGET_PAGE_MASK;
|
||||
empty->itree.last = empty->itree.start + TARGET_PAGE_SIZE - 1;
|
||||
empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
|
||||
trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
|
||||
trace_hppa_tlb_itlba(env, empty, empty->itree.start,
|
||||
empty->itree.last, empty->pa);
|
||||
}
|
||||
|
||||
static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
|
||||
|
@ -467,8 +470,8 @@ void HELPER(diag_btlb)(CPUHPPAState *env)
|
|||
/* force flush of possibly existing BTLB entry */
|
||||
hppa_flush_tlb_ent(env, btlb, true);
|
||||
/* create new BTLB entry */
|
||||
btlb->va_b = virt_page << TARGET_PAGE_BITS;
|
||||
btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1;
|
||||
btlb->itree.start = virt_page << TARGET_PAGE_BITS;
|
||||
btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
|
||||
btlb->pa = phys_page << TARGET_PAGE_BITS;
|
||||
set_access_bits(env, btlb, env->gr[20]);
|
||||
btlb->t = 0;
|
||||
|
|
Loading…
Reference in New Issue