target/hppa: Add BTLB support to hppa TLB functions

Change the TLB code to store the Block-TLBs at the beginning
of the TLB table. New 4k TLB entries which are added later
shall not overwrite any of the BTLB entries.

Make sure that when the TLB is cleared by the OS via the ptlbe
instruction, the Block-TLBs will not be dropped.

Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Helge Deller 2023-09-13 10:55:59 +02:00
parent 6d1ef68cca
commit fa824d99f9
4 changed files with 65 additions and 30 deletions

View File

@ -350,7 +350,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
void hppa_cpu_do_interrupt(CPUState *cpu); void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot); int type, hwaddr *pphys, int *pprot,
hppa_tlb_entry **tlb_entry);
extern const MemoryRegionOps hppa_io_eir_ops; extern const MemoryRegionOps hppa_io_eir_ops;
extern const VMStateDescription vmstate_hppa_cpu; extern const VMStateDescription vmstate_hppa_cpu;
void hppa_cpu_alarm_timer(void *); void hppa_cpu_alarm_timer(void *);

View File

@ -154,7 +154,7 @@ void hppa_cpu_do_interrupt(CPUState *cs)
vaddr = hppa_form_gva_psw(old_psw, iasq_f, vaddr); vaddr = hppa_form_gva_psw(old_psw, iasq_f, vaddr);
t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX, t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
0, &paddr, &prot); 0, &paddr, &prot, NULL);
if (t >= 0) { if (t >= 0) {
/* We can't re-load the instruction. */ /* We can't re-load the instruction. */
env->cr[CR_IIR] = 0; env->cr[CR_IIR] = 0;

View File

@ -41,16 +41,24 @@ static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
return NULL; return NULL;
} }
static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent) static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent,
bool force_flush_btlb)
{ {
CPUState *cs = env_cpu(env); CPUState *cs = env_cpu(env);
unsigned i, n = 1 << (2 * ent->page_size);
uint64_t addr = ent->va_b; if (!ent->entry_valid) {
return;
}
trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa); trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) { tlb_flush_range_by_mmuidx(cs, ent->va_b,
tlb_flush_page_by_mmuidx(cs, addr, HPPA_MMU_FLUSH_MASK); ent->va_e - ent->va_b + 1,
HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
/* never clear BTLBs, unless forced to do so. */
if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
return;
} }
memset(ent, 0, sizeof(*ent)); memset(ent, 0, sizeof(*ent));
@ -60,23 +68,35 @@ static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env) static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
{ {
hppa_tlb_entry *ent; hppa_tlb_entry *ent;
uint32_t i = env->tlb_last; uint32_t i;
if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
i = HPPA_BTLB_ENTRIES;
env->tlb_last = HPPA_BTLB_ENTRIES + 1;
} else {
i = env->tlb_last;
env->tlb_last++;
}
env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
ent = &env->tlb[i]; ent = &env->tlb[i];
hppa_flush_tlb_ent(env, ent); hppa_flush_tlb_ent(env, ent, false);
return ent; return ent;
} }
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot) int type, hwaddr *pphys, int *pprot,
hppa_tlb_entry **tlb_entry)
{ {
hwaddr phys; hwaddr phys;
int prot, r_prot, w_prot, x_prot, priv; int prot, r_prot, w_prot, x_prot, priv;
hppa_tlb_entry *ent; hppa_tlb_entry *ent;
int ret = -1; int ret = -1;
if (tlb_entry) {
*tlb_entry = NULL;
}
/* Virtual translation disabled. Direct map virtual to physical. */ /* Virtual translation disabled. Direct map virtual to physical. */
if (mmu_idx == MMU_PHYS_IDX) { if (mmu_idx == MMU_PHYS_IDX) {
phys = addr; phys = addr;
@ -93,8 +113,12 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
goto egress; goto egress;
} }
if (tlb_entry) {
*tlb_entry = ent;
}
/* We now know the physical address. */ /* We now know the physical address. */
phys = ent->pa + (addr & ~TARGET_PAGE_MASK); phys = ent->pa + (addr - ent->va_b);
/* Map TLB access_rights field to QEMU protection. */ /* Map TLB access_rights field to QEMU protection. */
priv = MMU_IDX_TO_PRIV(mmu_idx); priv = MMU_IDX_TO_PRIV(mmu_idx);
@ -193,7 +217,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
} }
excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
&phys, &prot); &phys, &prot, NULL);
/* Since we're translating for debugging, the only error that is a /* Since we're translating for debugging, the only error that is a
hard error is no translation at all. Otherwise, while a real cpu hard error is no translation at all. Otherwise, while a real cpu
@ -207,6 +231,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
{ {
HPPACPU *cpu = HPPA_CPU(cs); HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env; CPUHPPAState *env = &cpu->env;
hppa_tlb_entry *ent;
int prot, excp, a_prot; int prot, excp, a_prot;
hwaddr phys; hwaddr phys;
@ -223,7 +248,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
} }
excp = hppa_get_physical_address(env, addr, mmu_idx, excp = hppa_get_physical_address(env, addr, mmu_idx,
a_prot, &phys, &prot); a_prot, &phys, &prot, &ent);
if (unlikely(excp >= 0)) { if (unlikely(excp >= 0)) {
if (probe) { if (probe) {
return false; return false;
@ -243,7 +268,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
phys & TARGET_PAGE_MASK, size, type, mmu_idx); phys & TARGET_PAGE_MASK, size, type, mmu_idx);
/* Success! Store the translation into the QEMU TLB. */ /* Success! Store the translation into the QEMU TLB. */
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE); prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0));
return true; return true;
} }
@ -254,11 +279,11 @@ void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
int i; int i;
/* Zap any old entries covering ADDR; notice empty entries on the way. */ /* Zap any old entries covering ADDR; notice empty entries on the way. */
for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
hppa_tlb_entry *ent = &env->tlb[i]; hppa_tlb_entry *ent = &env->tlb[i];
if (ent->va_b <= addr && addr <= ent->va_e) { if (ent->va_b <= addr && addr <= ent->va_e) {
if (ent->entry_valid) { if (ent->entry_valid) {
hppa_flush_tlb_ent(env, ent); hppa_flush_tlb_ent(env, ent, false);
} }
if (!empty) { if (!empty) {
empty = ent; empty = ent;
@ -278,16 +303,8 @@ void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa); trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
} }
/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg reg)
void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
{ {
hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
if (unlikely(ent == NULL)) {
qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
return;
}
ent->access_id = extract32(reg, 1, 18); ent->access_id = extract32(reg, 1, 18);
ent->u = extract32(reg, 19, 1); ent->u = extract32(reg, 19, 1);
ent->ar_pl2 = extract32(reg, 20, 2); ent->ar_pl2 = extract32(reg, 20, 2);
@ -301,6 +318,19 @@ void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
} }
/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
{
hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
if (unlikely(ent == NULL)) {
qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
return;
}
set_access_bits(env, ent, reg);
}
/* Purge (Insn/Data) TLB. This is explicitly page-based, and is /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
synchronous across all processors. */ synchronous across all processors. */
static void ptlb_work(CPUState *cpu, run_on_cpu_data data) static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
@ -310,7 +340,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
hppa_tlb_entry *ent = hppa_find_tlb(env, addr); hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
if (ent && ent->entry_valid) { if (ent && ent->entry_valid) {
hppa_flush_tlb_ent(env, ent); hppa_flush_tlb_ent(env, ent, false);
} }
} }
@ -334,7 +364,10 @@ void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
void HELPER(ptlbe)(CPUHPPAState *env) void HELPER(ptlbe)(CPUHPPAState *env)
{ {
trace_hppa_tlb_ptlbe(env); trace_hppa_tlb_ptlbe(env);
memset(env->tlb, 0, sizeof(env->tlb)); qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
env->tlb_last = HPPA_BTLB_ENTRIES;
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
} }
@ -356,7 +389,7 @@ target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
int prot, excp; int prot, excp;
excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
&phys, &prot); &phys, &prot, NULL);
if (excp >= 0) { if (excp >= 0) {
if (env->psw & PSW_Q) { if (env->psw & PSW_Q) {
/* ??? Needs tweaking for hppa64. */ /* ??? Needs tweaking for hppa64. */

View File

@ -179,7 +179,8 @@ target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
return 0; return 0;
} }
excp = hppa_get_physical_address(env, addr, level, 0, &phys, &prot); excp = hppa_get_physical_address(env, addr, level, 0, &phys,
&prot, NULL);
if (excp >= 0) { if (excp >= 0) {
if (env->psw & PSW_Q) { if (env->psw & PSW_Q) {
/* ??? Needs tweaking for hppa64. */ /* ??? Needs tweaking for hppa64. */