mirror of https://github.com/xemu-project/xemu.git
accel/tcg: Remove will_exit argument from cpu_restore_state
The value passed is always true, and if the target's synchronize_from_tb hook is non-trivial, not exiting may be erroneous. Reviewed-by: Claudio Fontana <cfontana@suse.de> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
cc30dc441b
commit
3d419a4dd2
|
@ -71,7 +71,7 @@ void cpu_loop_exit(CPUState *cpu)
|
||||||
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||||
{
|
{
|
||||||
if (pc) {
|
if (pc) {
|
||||||
cpu_restore_state(cpu, pc, true);
|
cpu_restore_state(cpu, pc);
|
||||||
}
|
}
|
||||||
cpu_loop_exit(cpu);
|
cpu_loop_exit(cpu);
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,16 +318,8 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
|
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* The pc update associated with restore without exit will
|
|
||||||
* break the relative pc adjustments performed by TARGET_TB_PCREL.
|
|
||||||
*/
|
|
||||||
if (TARGET_TB_PCREL) {
|
|
||||||
assert(will_exit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The host_pc has to be in the rx region of the code buffer.
|
* The host_pc has to be in the rx region of the code buffer.
|
||||||
* If it is not we will not be able to resolve it here.
|
* If it is not we will not be able to resolve it here.
|
||||||
|
@ -341,7 +333,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
|
||||||
if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
|
if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
|
||||||
TranslationBlock *tb = tcg_tb_lookup(host_pc);
|
TranslationBlock *tb = tcg_tb_lookup(host_pc);
|
||||||
if (tb) {
|
if (tb) {
|
||||||
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
|
cpu_restore_state_from_tb(cpu, tb, host_pc, true);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,16 +56,13 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
|
||||||
* cpu_restore_state:
|
* cpu_restore_state:
|
||||||
* @cpu: the cpu context
|
* @cpu: the cpu context
|
||||||
* @host_pc: the host pc within the translation
|
* @host_pc: the host pc within the translation
|
||||||
* @will_exit: true if the TB executed will be interrupted after some
|
|
||||||
cpu adjustments. Required for maintaining the correct
|
|
||||||
icount valus
|
|
||||||
* @return: true if state was restored, false otherwise
|
* @return: true if state was restored, false otherwise
|
||||||
*
|
*
|
||||||
* Attempt to restore the state for a fault occurring in translated
|
* Attempt to restore the state for a fault occurring in translated
|
||||||
* code. If @host_pc is not in translated code no state is
|
* code. If @host_pc is not in translated code no state is
|
||||||
* restored and the function returns false.
|
* restored and the function returns false.
|
||||||
*/
|
*/
|
||||||
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit);
|
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
|
||||||
|
|
||||||
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
|
G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
|
||||||
G_NORETURN void cpu_loop_exit(CPUState *cpu);
|
G_NORETURN void cpu_loop_exit(CPUState *cpu);
|
||||||
|
|
|
@ -532,7 +532,7 @@ G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
|
||||||
cs->exception_index = excp;
|
cs->exception_index = excp;
|
||||||
env->error_code = error;
|
env->error_code = error;
|
||||||
if (retaddr) {
|
if (retaddr) {
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
/* Floating-point exceptions (our only users) point to the next PC. */
|
/* Floating-point exceptions (our only users) point to the next PC. */
|
||||||
env->pc += 4;
|
env->pc += 4;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retadd
|
||||||
uint64_t pc;
|
uint64_t pc;
|
||||||
uint32_t insn;
|
uint32_t insn;
|
||||||
|
|
||||||
cpu_restore_state(env_cpu(env), retaddr, true);
|
cpu_restore_state(env_cpu(env), retaddr);
|
||||||
|
|
||||||
pc = env->pc;
|
pc = env->pc;
|
||||||
insn = cpu_ldl_code(env, pc);
|
insn = cpu_ldl_code(env, pc);
|
||||||
|
|
|
@ -78,7 +78,7 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
|
||||||
* we must restore CPU state here before setting the syndrome
|
* we must restore CPU state here before setting the syndrome
|
||||||
* the caller passed us, and cannot use cpu_loop_exit_restore().
|
* the caller passed us, and cannot use cpu_loop_exit_restore().
|
||||||
*/
|
*/
|
||||||
cpu_restore_state(cs, ra, true);
|
cpu_restore_state(cs, ra);
|
||||||
raise_exception(env, excp, syndrome, target_el);
|
raise_exception(env, excp, syndrome, target_el);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,7 +156,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
|
|
||||||
/* now we have a real cpu fault */
|
/* now we have a real cpu fault */
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
|
|
||||||
fi.type = ARMFault_Alignment;
|
fi.type = ARMFault_Alignment;
|
||||||
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
|
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
|
||||||
|
@ -196,7 +196,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
|
||||||
ARMMMUFaultInfo fi = {};
|
ARMMMUFaultInfo fi = {};
|
||||||
|
|
||||||
/* now we have a real cpu fault */
|
/* now we have a real cpu fault */
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
|
|
||||||
fi.ea = arm_extabort_type(response);
|
fi.ea = arm_extabort_type(response);
|
||||||
fi.type = ARMFault_SyncExternal;
|
fi.type = ARMFault_SyncExternal;
|
||||||
|
@ -252,7 +252,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
/* now we have a real cpu fault */
|
/* now we have a real cpu fault */
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -271,7 +271,7 @@ void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
|
||||||
* We report both ESR and FAR to signal handlers.
|
* We report both ESR and FAR to signal handlers.
|
||||||
* For now, it's easiest to deliver the fault normally.
|
* For now, it's easiest to deliver the fault normally.
|
||||||
*/
|
*/
|
||||||
cpu_restore_state(cs, ra, true);
|
cpu_restore_state(cs, ra);
|
||||||
arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
|
arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
cs->exception_index = EXCP_BUSFAULT;
|
cs->exception_index = EXCP_BUSFAULT;
|
||||||
env->fault_vector = res.bf_vec;
|
env->fault_vector = res.bf_vec;
|
||||||
if (retaddr) {
|
if (retaddr) {
|
||||||
if (cpu_restore_state(cs, retaddr, true)) {
|
if (cpu_restore_state(cs, retaddr)) {
|
||||||
/* Evaluate flags after retranslation. */
|
/* Evaluate flags after retranslation. */
|
||||||
helper_top_evaluate_flags(env);
|
helper_top_evaluate_flags(env);
|
||||||
}
|
}
|
||||||
|
|
|
@ -704,7 +704,7 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
|
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
|
|
||||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
|
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
|
||||||
PRIx64 ", " TARGET_FMT_lx ")!\n",
|
PRIx64 ", " TARGET_FMT_lx ")!\n",
|
||||||
|
|
|
@ -460,7 +460,7 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
|
||||||
M68kCPU *cpu = M68K_CPU(cs);
|
M68kCPU *cpu = M68K_CPU(cs);
|
||||||
CPUM68KState *env = &cpu->env;
|
CPUM68KState *env = &cpu->env;
|
||||||
|
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
|
|
||||||
if (m68k_feature(env, M68K_FEATURE_M68040)) {
|
if (m68k_feature(env, M68K_FEATURE_M68040)) {
|
||||||
env->mmu.mmusr = 0;
|
env->mmu.mmusr = 0;
|
||||||
|
@ -558,7 +558,7 @@ raise_exception_format2(CPUM68KState *env, int tt, int ilen, uintptr_t raddr)
|
||||||
cs->exception_index = tt;
|
cs->exception_index = tt;
|
||||||
|
|
||||||
/* Recover PC and CC_OP for the beginning of the insn. */
|
/* Recover PC and CC_OP for the beginning of the insn. */
|
||||||
cpu_restore_state(cs, raddr, true);
|
cpu_restore_state(cs, raddr);
|
||||||
|
|
||||||
/* Flags are current in env->cc_*, or are undefined. */
|
/* Flags are current in env->cc_*, or are undefined. */
|
||||||
env->cc_op = CC_OP_FLAGS;
|
env->cc_op = CC_OP_FLAGS;
|
||||||
|
|
|
@ -277,7 +277,7 @@ void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||||
uint32_t esr, iflags;
|
uint32_t esr, iflags;
|
||||||
|
|
||||||
/* Recover the pc and iflags from the corresponding insn_start. */
|
/* Recover the pc and iflags from the corresponding insn_start. */
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
iflags = cpu->env.iflags;
|
iflags = cpu->env.iflags;
|
||||||
|
|
||||||
qemu_log_mask(CPU_LOG_INT,
|
qemu_log_mask(CPU_LOG_INT,
|
||||||
|
|
|
@ -40,7 +40,7 @@ void nios2_cpu_loop_exit_advance(CPUNios2State *env, uintptr_t retaddr)
|
||||||
* Do this here, rather than in restore_state_to_opc(),
|
* Do this here, rather than in restore_state_to_opc(),
|
||||||
* lest we affect QEMU internal exceptions, like EXCP_DEBUG.
|
* lest we affect QEMU internal exceptions, like EXCP_DEBUG.
|
||||||
*/
|
*/
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
env->pc += 4;
|
env->pc += 4;
|
||||||
cpu_loop_exit(cs);
|
cpu_loop_exit(cs);
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TO_SPR(0, 16): /* NPC */
|
case TO_SPR(0, 16): /* NPC */
|
||||||
cpu_restore_state(cs, GETPC(), true);
|
cpu_restore_state(cs, GETPC());
|
||||||
/* ??? Mirror or1ksim in not trashing delayed branch state
|
/* ??? Mirror or1ksim in not trashing delayed branch state
|
||||||
when "jumping" to the current instruction. */
|
when "jumping" to the current instruction. */
|
||||||
if (env->pc != rb) {
|
if (env->pc != rb) {
|
||||||
|
@ -131,7 +131,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
|
||||||
case TO_SPR(8, 0): /* PMR */
|
case TO_SPR(8, 0): /* PMR */
|
||||||
env->pmr = rb;
|
env->pmr = rb;
|
||||||
if (env->pmr & PMR_DME || env->pmr & PMR_SME) {
|
if (env->pmr & PMR_DME || env->pmr & PMR_SME) {
|
||||||
cpu_restore_state(cs, GETPC(), true);
|
cpu_restore_state(cs, GETPC());
|
||||||
env->pc += 4;
|
env->pc += 4;
|
||||||
cs->halted = 1;
|
cs->halted = 1;
|
||||||
raise_exception(cpu, EXCP_HALTED);
|
raise_exception(cpu, EXCP_HALTED);
|
||||||
|
|
|
@ -3075,7 +3075,7 @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
||||||
uint32_t insn;
|
uint32_t insn;
|
||||||
|
|
||||||
/* Restore state and reload the insn we executed, for filling in DSISR. */
|
/* Restore state and reload the insn we executed, for filling in DSISR. */
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
insn = cpu_ldl_code(env, env->nip);
|
insn = cpu_ldl_code(env, env->nip);
|
||||||
|
|
||||||
switch (env->mmu_model) {
|
switch (env->mmu_model) {
|
||||||
|
|
|
@ -39,7 +39,7 @@ G_NORETURN void tcg_s390_program_interrupt(CPUS390XState *env,
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
|
|
||||||
cpu_restore_state(cs, ra, true);
|
cpu_restore_state(cs, ra);
|
||||||
qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
|
qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
|
||||||
env->psw.addr);
|
env->psw.addr);
|
||||||
trigger_pgm_exception(env, code);
|
trigger_pgm_exception(env, code);
|
||||||
|
|
|
@ -31,7 +31,7 @@ void raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin
|
||||||
{
|
{
|
||||||
CPUState *cs = env_cpu(env);
|
CPUState *cs = env_cpu(env);
|
||||||
/* in case we come from a helper-call we need to restore the PC */
|
/* in case we come from a helper-call we need to restore the PC */
|
||||||
cpu_restore_state(cs, pc, true);
|
cpu_restore_state(cs, pc);
|
||||||
|
|
||||||
/* Tin is loaded into d[15] */
|
/* Tin is loaded into d[15] */
|
||||||
env->gpr_d[15] = tin;
|
env->gpr_d[15] = tin;
|
||||||
|
|
|
@ -253,7 +253,7 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs,
|
||||||
|
|
||||||
assert(xtensa_option_enabled(env->config,
|
assert(xtensa_option_enabled(env->config,
|
||||||
XTENSA_OPTION_UNALIGNED_EXCEPTION));
|
XTENSA_OPTION_UNALIGNED_EXCEPTION));
|
||||||
cpu_restore_state(CPU(cpu), retaddr, true);
|
cpu_restore_state(CPU(cpu), retaddr);
|
||||||
HELPER(exception_cause_vaddr)(env,
|
HELPER(exception_cause_vaddr)(env,
|
||||||
env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
|
env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
|
||||||
addr);
|
addr);
|
||||||
|
@ -284,7 +284,7 @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||||
} else if (probe) {
|
} else if (probe) {
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
|
HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -297,7 +297,7 @@ void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
|
||||||
XtensaCPU *cpu = XTENSA_CPU(cs);
|
XtensaCPU *cpu = XTENSA_CPU(cs);
|
||||||
CPUXtensaState *env = &cpu->env;
|
CPUXtensaState *env = &cpu->env;
|
||||||
|
|
||||||
cpu_restore_state(cs, retaddr, true);
|
cpu_restore_state(cs, retaddr);
|
||||||
HELPER(exception_cause_vaddr)(env, env->pc,
|
HELPER(exception_cause_vaddr)(env, env->pc,
|
||||||
access_type == MMU_INST_FETCH ?
|
access_type == MMU_INST_FETCH ?
|
||||||
INSTR_PIF_ADDR_ERROR_CAUSE :
|
INSTR_PIF_ADDR_ERROR_CAUSE :
|
||||||
|
|
Loading…
Reference in New Issue