target/hppa: Implement TCGCPUOps.tlb_fill_align

Convert hppa_cpu_tlb_fill to hppa_cpu_tlb_fill_align so that we
can recognize alignment exceptions in the correct priority order.

Resolves: https://bugzilla.kernel.org/show_bug.cgi?id=219339
Tested-by: Helge Deller <deller@gmx.de>
Reviewed-by: Helge Deller <deller@gmx.de>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-10-07 18:02:44 -07:00
parent 5d29587b45
commit 99746de612
3 changed files with 16 additions and 13 deletions

View File

@ -226,7 +226,7 @@ static const TCGCPUOps hppa_tcg_ops = {
.restore_state_to_opc = hppa_restore_state_to_opc,
#ifndef CONFIG_USER_ONLY
.tlb_fill = hppa_cpu_tlb_fill,
.tlb_fill_align = hppa_cpu_tlb_fill_align,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.cpu_exec_halt = hppa_cpu_has_work,
.do_interrupt = hppa_cpu_do_interrupt,

View File

@ -363,9 +363,9 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
void hppa_ptlbe(CPUHPPAState *env);
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled);
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
MMUAccessType access_type, int mmu_idx,
MemOp memop, int size, bool probe, uintptr_t ra);
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,

View File

@ -424,12 +424,11 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
}
}
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
MMUAccessType type, int mmu_idx,
bool probe, uintptr_t retaddr)
bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
MMUAccessType type, int mmu_idx,
MemOp memop, int size, bool probe, uintptr_t ra)
{
HPPACPU *cpu = HPPA_CPU(cs);
CPUHPPAState *env = &cpu->env;
CPUHPPAState *env = cpu_env(cs);
int prot, excp, a_prot;
hwaddr phys;
@ -445,7 +444,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
break;
}
excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, 0,
excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop,
&phys, &prot);
if (unlikely(excp >= 0)) {
if (probe) {
@ -454,7 +453,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
/* Failure. Raise the indicated exception. */
raise_exception_with_ior(env, excp, retaddr, addr,
raise_exception_with_ior(env, excp, ra, addr,
MMU_IDX_MMU_DISABLED(mmu_idx));
}
@ -468,8 +467,12 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
* the large page protection mask. We do not require this,
* because we record the large page here in the hppa tlb.
*/
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
memset(out, 0, sizeof(*out));
out->phys_addr = phys;
out->prot = prot;
out->attrs = MEMTXATTRS_UNSPECIFIED;
out->lg_page_size = TARGET_PAGE_BITS;
return true;
}