mirror of https://github.com/xemu-project/xemu.git
target/sh4: Make sh4_cpu_tlb_fill sysemu only
The fallback code in cpu_loop_exit_sigsegv is sufficient for sh4 linux-user. Remove the code from cpu_loop that raised SIGSEGV. Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
c8e7fef102
commit
cac720ec54
|
@ -65,14 +65,6 @@ void cpu_loop(CPUSH4State *env)
|
||||||
info.si_code = TARGET_TRAP_BRKPT;
|
info.si_code = TARGET_TRAP_BRKPT;
|
||||||
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
||||||
break;
|
break;
|
||||||
case 0xa0:
|
|
||||||
case 0xc0:
|
|
||||||
info.si_signo = TARGET_SIGSEGV;
|
|
||||||
info.si_errno = 0;
|
|
||||||
info.si_code = TARGET_SEGV_MAPERR;
|
|
||||||
info._sifields._sigfault._addr = env->tea;
|
|
||||||
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
||||||
break;
|
|
||||||
case EXCP_ATOMIC:
|
case EXCP_ATOMIC:
|
||||||
cpu_exec_step_atomic(cs);
|
cpu_exec_step_atomic(cs);
|
||||||
arch_interrupt = false;
|
arch_interrupt = false;
|
||||||
|
|
|
@ -236,9 +236,9 @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
|
||||||
static const struct TCGCPUOps superh_tcg_ops = {
|
static const struct TCGCPUOps superh_tcg_ops = {
|
||||||
.initialize = sh4_translate_init,
|
.initialize = sh4_translate_init,
|
||||||
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
|
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
|
||||||
.tlb_fill = superh_cpu_tlb_fill,
|
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
.tlb_fill = superh_cpu_tlb_fill,
|
||||||
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
|
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
|
||||||
.do_interrupt = superh_cpu_do_interrupt,
|
.do_interrupt = superh_cpu_do_interrupt,
|
||||||
.do_unaligned_access = superh_cpu_do_unaligned_access,
|
.do_unaligned_access = superh_cpu_do_unaligned_access,
|
||||||
|
|
|
@ -213,12 +213,12 @@ void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
|
||||||
uintptr_t retaddr) QEMU_NORETURN;
|
uintptr_t retaddr) QEMU_NORETURN;
|
||||||
|
|
||||||
void sh4_translate_init(void);
|
void sh4_translate_init(void);
|
||||||
|
void sh4_cpu_list(void);
|
||||||
|
|
||||||
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr);
|
bool probe, uintptr_t retaddr);
|
||||||
|
|
||||||
void sh4_cpu_list(void);
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
|
||||||
void superh_cpu_do_interrupt(CPUState *cpu);
|
void superh_cpu_do_interrupt(CPUState *cpu);
|
||||||
bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||||
void cpu_sh4_invalidate_tlb(CPUSH4State *s);
|
void cpu_sh4_invalidate_tlb(CPUSH4State *s);
|
||||||
|
|
|
@ -796,8 +796,6 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !CONFIG_USER_ONLY */
|
|
||||||
|
|
||||||
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
MMUAccessType access_type, int mmu_idx,
|
MMUAccessType access_type, int mmu_idx,
|
||||||
bool probe, uintptr_t retaddr)
|
bool probe, uintptr_t retaddr)
|
||||||
|
@ -806,11 +804,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
CPUSH4State *env = &cpu->env;
|
CPUSH4State *env = &cpu->env;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
#ifdef CONFIG_USER_ONLY
|
|
||||||
ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE :
|
|
||||||
access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION :
|
|
||||||
MMU_DTLB_VIOLATION_READ);
|
|
||||||
#else
|
|
||||||
target_ulong physical;
|
target_ulong physical;
|
||||||
int prot;
|
int prot;
|
||||||
|
|
||||||
|
@ -829,7 +822,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
|
if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
|
||||||
env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
|
env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
env->tea = address;
|
env->tea = address;
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
|
@ -868,3 +860,4 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
}
|
}
|
||||||
cpu_loop_exit_restore(cs, retaddr);
|
cpu_loop_exit_restore(cs, retaddr);
|
||||||
}
|
}
|
||||||
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
Loading…
Reference in New Issue