mirror of https://github.com/xemu-project/xemu.git
target/i386: Move x86_cpu_exec_interrupt() under sysemu/ folder
Following the logic of commit 30493a030f
("i386: split seg_helper
into user-only and sysemu parts"), move x86_cpu_exec_interrupt()
under sysemu/seg_helper.c.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-By: Warner Losh <imp@bsdimp.com>
Message-Id: <20210911165434.531552-12-f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
604664726f
commit
0792e6c88d
target/i386/tcg
|
@ -1110,70 +1110,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
|
|||
do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
int intno;
|
||||
|
||||
interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
|
||||
if (!interrupt_request) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Don't process multiple interrupt requests in a single call.
|
||||
* This is required to make icount-driven execution deterministic.
|
||||
*/
|
||||
switch (interrupt_request) {
|
||||
case CPU_INTERRUPT_POLL:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
break;
|
||||
case CPU_INTERRUPT_SIPI:
|
||||
do_cpu_sipi(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_SMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
do_smm_enter(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_NMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_MCE:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
||||
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
|
||||
break;
|
||||
case CPU_INTERRUPT_HARD:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
|
||||
cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_VIRQ);
|
||||
intno = cpu_get_pic_interrupt(env);
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_VIRQ:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
|
||||
intno = x86_ldl_phys(cs, env->vm_vmcb
|
||||
+ offsetof(struct vmcb, control.int_vector));
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing virtual hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->int_ctl &= ~V_IRQ_MASK;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure that no TB jump will be modified as the program flow was changed. */
|
||||
return true;
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
void helper_lldt(CPUX86State *env, int selector)
|
||||
{
|
||||
SegmentCache *dt;
|
||||
|
|
|
@ -125,6 +125,68 @@ void x86_cpu_do_interrupt(CPUState *cs)
|
|||
}
|
||||
}
|
||||
|
||||
bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
int intno;
|
||||
|
||||
interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
|
||||
if (!interrupt_request) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Don't process multiple interrupt requests in a single call.
|
||||
* This is required to make icount-driven execution deterministic.
|
||||
*/
|
||||
switch (interrupt_request) {
|
||||
case CPU_INTERRUPT_POLL:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
break;
|
||||
case CPU_INTERRUPT_SIPI:
|
||||
do_cpu_sipi(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_SMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
do_smm_enter(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_NMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_MCE:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
||||
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
|
||||
break;
|
||||
case CPU_INTERRUPT_HARD:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
|
||||
cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_VIRQ);
|
||||
intno = cpu_get_pic_interrupt(env);
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_VIRQ:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
|
||||
intno = x86_ldl_phys(cs, env->vm_vmcb
|
||||
+ offsetof(struct vmcb, control.int_vector));
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing virtual hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->int_ctl &= ~V_IRQ_MASK;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure that no TB jump will be modified as the program flow was changed. */
|
||||
return true;
|
||||
}
|
||||
|
||||
/* check if Port I/O is allowed in TSS */
|
||||
void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue