reworked SVM interrupt handling logic - fixed vmrun EIP saved value - reworked cr8 handling - added CPUState.hflags2

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4662 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
bellard 2008-06-04 17:02:19 +00:00
parent 3cd9acb410
commit db620f46a8
7 changed files with 110 additions and 109 deletions

View File

@ -368,11 +368,8 @@ int cpu_exec(CPUState *env1)
next_tb = 0; /* force lookup of first TB */ next_tb = 0; /* force lookup of first TB */
for(;;) { for(;;) {
interrupt_request = env->interrupt_request; interrupt_request = env->interrupt_request;
if (__builtin_expect(interrupt_request, 0) if (__builtin_expect(interrupt_request, 0) &&
#if defined(TARGET_I386) likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
&& env->hflags & HF_GIF_MASK
#endif
&& likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
if (interrupt_request & CPU_INTERRUPT_DEBUG) { if (interrupt_request & CPU_INTERRUPT_DEBUG) {
env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
env->exception_index = EXCP_DEBUG; env->exception_index = EXCP_DEBUG;
@ -388,47 +385,51 @@ int cpu_exec(CPUState *env1)
} }
#endif #endif
#if defined(TARGET_I386) #if defined(TARGET_I386)
if ((interrupt_request & CPU_INTERRUPT_SMI) && if (env->hflags2 & HF2_GIF_MASK) {
!(env->hflags & HF_SMM_MASK)) { if ((interrupt_request & CPU_INTERRUPT_SMI) &&
svm_check_intercept(SVM_EXIT_SMI); !(env->hflags & HF_SMM_MASK)) {
env->interrupt_request &= ~CPU_INTERRUPT_SMI; svm_check_intercept(SVM_EXIT_SMI);
do_smm_enter(); env->interrupt_request &= ~CPU_INTERRUPT_SMI;
next_tb = 0; do_smm_enter();
} else if ((interrupt_request & CPU_INTERRUPT_NMI) && next_tb = 0;
!(env->hflags & HF_NMI_MASK)) { } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
env->interrupt_request &= ~CPU_INTERRUPT_NMI; !(env->hflags2 & HF2_NMI_MASK)) {
env->hflags |= HF_NMI_MASK; env->interrupt_request &= ~CPU_INTERRUPT_NMI;
do_interrupt(EXCP02_NMI, 0, 0, 0, 1); env->hflags2 |= HF2_NMI_MASK;
next_tb = 0; do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
} else if ((interrupt_request & CPU_INTERRUPT_HARD) && next_tb = 0;
(env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) && } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) { (((env->hflags2 & HF2_VINTR_MASK) &&
int intno; (env->hflags2 & HF2_HIF_MASK)) ||
svm_check_intercept(SVM_EXIT_INTR); (!(env->hflags2 & HF2_VINTR_MASK) &&
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); (env->eflags & IF_MASK &&
intno = cpu_get_pic_interrupt(env); !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
if (loglevel & CPU_LOG_TB_IN_ASM) { int intno;
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno); svm_check_intercept(SVM_EXIT_INTR);
} env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
do_interrupt(intno, 0, 0, 0, 1); intno = cpu_get_pic_interrupt(env);
/* ensure that no TB jump will be modified as if (loglevel & CPU_LOG_TB_IN_ASM) {
the program flow was changed */ fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
next_tb = 0; }
do_interrupt(intno, 0, 0, 0, 1);
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
(env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { (env->eflags & IF_MASK) &&
int intno; !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
/* FIXME: this should respect TPR */ int intno;
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; /* FIXME: this should respect TPR */
svm_check_intercept(SVM_EXIT_VINTR); svm_check_intercept(SVM_EXIT_VINTR);
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
if (loglevel & CPU_LOG_TB_IN_ASM) intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno); if (loglevel & CPU_LOG_TB_IN_ASM)
do_interrupt(intno, 0, 0, -1, 1); fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), do_interrupt(intno, 0, 0, 0, 1);
ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK); next_tb = 0;
next_tb = 0;
#endif #endif
}
} }
#elif defined(TARGET_PPC) #elif defined(TARGET_PPC)
#if 0 #if 0

View File

@ -145,11 +145,8 @@
#define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */ #define HF_OSFXSR_SHIFT 16 /* CR4.OSFXSR */
#define HF_VM_SHIFT 17 /* must be same as eflags */ #define HF_VM_SHIFT 17 /* must be same as eflags */
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */ #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
#define HF_GIF_SHIFT 20 /* if set CPU takes interrupts */ #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
#define HF_HIF_SHIFT 21 /* shadow copy of IF_MASK when in SVM */ #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
#define HF_NMI_SHIFT 22 /* CPU serving NMI */
#define HF_SVME_SHIFT 23 /* SVME enabled (copy of EFER.SVME) */
#define HF_SVMI_SHIFT 24 /* SVM intercepts are active */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT) #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) #define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@ -166,12 +163,21 @@
#define HF_CS64_MASK (1 << HF_CS64_SHIFT) #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
#define HF_SMM_MASK (1 << HF_SMM_SHIFT) #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
#define HF_GIF_MASK (1 << HF_GIF_SHIFT)
#define HF_HIF_MASK (1 << HF_HIF_SHIFT)
#define HF_NMI_MASK (1 << HF_NMI_SHIFT)
#define HF_SVME_MASK (1 << HF_SVME_SHIFT) #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
/* hflags2 */
#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
#define CR0_PE_MASK (1 << 0) #define CR0_PE_MASK (1 << 0)
#define CR0_MP_MASK (1 << 1) #define CR0_MP_MASK (1 << 1)
#define CR0_EM_MASK (1 << 2) #define CR0_EM_MASK (1 << 2)
@ -488,7 +494,9 @@ typedef struct CPUX86State {
target_ulong cc_dst; target_ulong cc_dst;
uint32_t cc_op; uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
uint32_t hflags; /* hidden flags, see HF_xxx constants */ uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
are known at translation time. */
uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
/* segments */ /* segments */
SegmentCache segs[6]; /* selector values */ SegmentCache segs[6]; /* selector values */
@ -497,7 +505,7 @@ typedef struct CPUX86State {
SegmentCache gdt; /* only base and limit are used */ SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */ SegmentCache idt; /* only base and limit are used */
target_ulong cr[9]; /* NOTE: cr1, cr5-7 are unused */ target_ulong cr[5]; /* NOTE: cr1 is unused */
uint64_t a20_mask; uint64_t a20_mask;
/* FPU state */ /* FPU state */
@ -541,6 +549,7 @@ typedef struct CPUX86State {
uint16_t intercept_dr_read; uint16_t intercept_dr_read;
uint16_t intercept_dr_write; uint16_t intercept_dr_write;
uint32_t intercept_exceptions; uint32_t intercept_exceptions;
uint8_t v_tpr;
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
target_ulong lstar; target_ulong lstar;

View File

@ -374,7 +374,7 @@ void cpu_reset(CPUX86State *env)
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
env->hflags |= HF_SOFTMMU_MASK; env->hflags |= HF_SOFTMMU_MASK;
#endif #endif
env->hflags |= HF_GIF_MASK; env->hflags2 |= HF2_GIF_MASK;
cpu_x86_update_cr0(env, 0x60000010); cpu_x86_update_cr0(env, 0x60000010);
env->a20_mask = ~0x0; env->a20_mask = ~0x0;

View File

@ -47,9 +47,6 @@ DEF_HELPER(target_ulong, helper_read_crN, (int reg))
DEF_HELPER(void, helper_write_crN, (int reg, target_ulong t0)) DEF_HELPER(void, helper_write_crN, (int reg, target_ulong t0))
DEF_HELPER(void, helper_lmsw, (target_ulong t0)) DEF_HELPER(void, helper_lmsw, (target_ulong t0))
DEF_HELPER(void, helper_clts, (void)) DEF_HELPER(void, helper_clts, (void))
#if !defined(CONFIG_USER_ONLY)
DEF_HELPER(target_ulong, helper_movtl_T0_cr8, (void))
#endif
DEF_HELPER(void, helper_movl_drN_T0, (int reg, target_ulong t0)) DEF_HELPER(void, helper_movl_drN_T0, (int reg, target_ulong t0))
DEF_HELPER(void, helper_invlpg, (target_ulong addr)) DEF_HELPER(void, helper_invlpg, (target_ulong addr))
@ -102,7 +99,7 @@ DEF_HELPER(void, helper_svm_check_intercept_param, (uint32_t type, uint64_t para
DEF_HELPER(void, helper_vmexit, (uint32_t exit_code, uint64_t exit_info_1)) DEF_HELPER(void, helper_vmexit, (uint32_t exit_code, uint64_t exit_info_1))
DEF_HELPER(void, helper_svm_check_io, (uint32_t port, uint32_t param, DEF_HELPER(void, helper_svm_check_io, (uint32_t port, uint32_t param,
uint32_t next_eip_addend)) uint32_t next_eip_addend))
DEF_HELPER(void, helper_vmrun, (int aflag)) DEF_HELPER(void, helper_vmrun, (int aflag, int next_eip_addend))
DEF_HELPER(void, helper_vmmcall, (void)) DEF_HELPER(void, helper_vmmcall, (void))
DEF_HELPER(void, helper_vmload, (int aflag)) DEF_HELPER(void, helper_vmload, (int aflag))
DEF_HELPER(void, helper_vmsave, (int aflag)) DEF_HELPER(void, helper_vmsave, (int aflag))

View File

@ -2591,7 +2591,7 @@ void helper_iret_real(int shift)
if (shift == 0) if (shift == 0)
eflags_mask &= 0xffff; eflags_mask &= 0xffff;
load_eflags(new_eflags, eflags_mask); load_eflags(new_eflags, eflags_mask);
env->hflags &= ~HF_NMI_MASK; env->hflags2 &= ~HF2_NMI_MASK;
} }
static inline void validate_seg(int seg_reg, int cpl) static inline void validate_seg(int seg_reg, int cpl)
@ -2843,7 +2843,7 @@ void helper_iret_protected(int shift, int next_eip)
} else { } else {
helper_ret_protected(shift, 1, 0); helper_ret_protected(shift, 1, 0);
} }
env->hflags &= ~HF_NMI_MASK; env->hflags2 &= ~HF2_NMI_MASK;
#ifdef USE_KQEMU #ifdef USE_KQEMU
if (kqemu_is_ok(env)) { if (kqemu_is_ok(env)) {
CC_OP = CC_OP_EFLAGS; CC_OP = CC_OP_EFLAGS;
@ -2934,7 +2934,11 @@ target_ulong helper_read_crN(int reg)
val = env->cr[reg]; val = env->cr[reg];
break; break;
case 8: case 8:
val = cpu_get_apic_tpr(env); if (!(env->hflags2 & HF2_VINTR_MASK)) {
val = cpu_get_apic_tpr(env);
} else {
val = env->v_tpr;
}
break; break;
} }
return val; return val;
@ -2954,8 +2958,10 @@ void helper_write_crN(int reg, target_ulong t0)
cpu_x86_update_cr4(env, t0); cpu_x86_update_cr4(env, t0);
break; break;
case 8: case 8:
cpu_set_apic_tpr(env, t0); if (!(env->hflags2 & HF2_VINTR_MASK)) {
env->cr[8] = t0; cpu_set_apic_tpr(env, t0);
}
env->v_tpr = t0 & 0x0f;
break; break;
default: default:
env->cr[reg] = t0; env->cr[reg] = t0;
@ -2978,13 +2984,6 @@ void helper_clts(void)
env->hflags &= ~HF_TS_MASK; env->hflags &= ~HF_TS_MASK;
} }
#if !defined(CONFIG_USER_ONLY)
target_ulong helper_movtl_T0_cr8(void)
{
return cpu_get_apic_tpr(env);
}
#endif
/* XXX: do more */ /* XXX: do more */
void helper_movl_drN_T0(int reg, target_ulong t0) void helper_movl_drN_T0(int reg, target_ulong t0)
{ {
@ -4721,7 +4720,7 @@ void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void helper_vmrun(int aflag) void helper_vmrun(int aflag, int next_eip_addend)
{ {
} }
void helper_vmmcall(void) void helper_vmmcall(void)
@ -4791,7 +4790,7 @@ static inline void svm_load_seg_cache(target_phys_addr_t addr,
sc->base, sc->limit, sc->flags); sc->base, sc->limit, sc->flags);
} }
void helper_vmrun(int aflag) void helper_vmrun(int aflag, int next_eip_addend)
{ {
target_ulong addr; target_ulong addr;
uint32_t event_inj; uint32_t event_inj;
@ -4820,7 +4819,6 @@ void helper_vmrun(int aflag)
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
@ -4836,7 +4834,8 @@ void helper_vmrun(int aflag)
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]); &env->segs[R_DS]);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
EIP + next_eip_addend);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX); stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
@ -4866,17 +4865,16 @@ void helper_vmrun(int aflag)
cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3))); cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
if (int_ctl & V_INTR_MASKING_MASK) { if (int_ctl & V_INTR_MASKING_MASK) {
env->cr[8] = int_ctl & V_TPR_MASK; env->v_tpr = int_ctl & V_TPR_MASK;
cpu_set_apic_tpr(env, env->cr[8]); env->hflags2 |= HF2_VINTR_MASK;
if (env->eflags & IF_MASK) if (env->eflags & IF_MASK)
env->hflags |= HF_HIF_MASK; env->hflags2 |= HF2_HIF_MASK;
} }
#ifdef TARGET_X86_64
cpu_load_efer(env, cpu_load_efer(env,
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer))); ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
#endif
env->eflags = 0; env->eflags = 0;
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)), load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
@ -4912,6 +4910,10 @@ void helper_vmrun(int aflag)
helper_stgi(); helper_stgi();
if (int_ctl & V_IRQ_MASK) {
env->interrupt_request |= CPU_INTERRUPT_VIRQ;
}
/* maybe we need to inject an event */ /* maybe we need to inject an event */
event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
if (event_inj & SVM_EVTINJ_VALID) { if (event_inj & SVM_EVTINJ_VALID) {
@ -4931,14 +4933,17 @@ void helper_vmrun(int aflag)
env->exception_next_eip = -1; env->exception_next_eip = -1;
if (loglevel & CPU_LOG_TB_IN_ASM) if (loglevel & CPU_LOG_TB_IN_ASM)
fprintf(logfile, "INTR"); fprintf(logfile, "INTR");
/* XXX: is it always correct ? */
do_interrupt(vector, 0, 0, 0, 1);
break; break;
case SVM_EVTINJ_TYPE_NMI: case SVM_EVTINJ_TYPE_NMI:
env->exception_index = vector; env->exception_index = EXCP02_NMI;
env->error_code = event_inj_err; env->error_code = event_inj_err;
env->exception_is_int = 0; env->exception_is_int = 0;
env->exception_next_eip = EIP; env->exception_next_eip = EIP;
if (loglevel & CPU_LOG_TB_IN_ASM) if (loglevel & CPU_LOG_TB_IN_ASM)
fprintf(logfile, "NMI"); fprintf(logfile, "NMI");
cpu_loop_exit();
break; break;
case SVM_EVTINJ_TYPE_EXEPT: case SVM_EVTINJ_TYPE_EXEPT:
env->exception_index = vector; env->exception_index = vector;
@ -4947,6 +4952,7 @@ void helper_vmrun(int aflag)
env->exception_next_eip = -1; env->exception_next_eip = -1;
if (loglevel & CPU_LOG_TB_IN_ASM) if (loglevel & CPU_LOG_TB_IN_ASM)
fprintf(logfile, "EXEPT"); fprintf(logfile, "EXEPT");
cpu_loop_exit();
break; break;
case SVM_EVTINJ_TYPE_SOFT: case SVM_EVTINJ_TYPE_SOFT:
env->exception_index = vector; env->exception_index = vector;
@ -4955,17 +4961,12 @@ void helper_vmrun(int aflag)
env->exception_next_eip = EIP; env->exception_next_eip = EIP;
if (loglevel & CPU_LOG_TB_IN_ASM) if (loglevel & CPU_LOG_TB_IN_ASM)
fprintf(logfile, "SOFT"); fprintf(logfile, "SOFT");
cpu_loop_exit();
break; break;
} }
if (loglevel & CPU_LOG_TB_IN_ASM) if (loglevel & CPU_LOG_TB_IN_ASM)
fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code); fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
} }
if ((int_ctl & V_IRQ_MASK) ||
(env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) {
env->interrupt_request |= CPU_INTERRUPT_VIRQ;
}
cpu_loop_exit();
} }
void helper_vmmcall(void) void helper_vmmcall(void)
@ -5049,13 +5050,13 @@ void helper_vmsave(int aflag)
void helper_stgi(void) void helper_stgi(void)
{ {
helper_svm_check_intercept_param(SVM_EXIT_STGI, 0); helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
env->hflags |= HF_GIF_MASK; env->hflags2 |= HF2_GIF_MASK;
} }
void helper_clgi(void) void helper_clgi(void)
{ {
helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0); helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
env->hflags &= ~HF_GIF_MASK; env->hflags2 &= ~HF2_GIF_MASK;
} }
void helper_skinit(void) void helper_skinit(void)
@ -5204,11 +5205,12 @@ void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) { int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
int_ctl &= ~V_TPR_MASK; int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
int_ctl |= env->cr[8] & V_TPR_MASK; int_ctl |= env->v_tpr & V_TPR_MASK;
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
} int_ctl |= V_IRQ_MASK;
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags()); stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip); stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
@ -5219,7 +5221,7 @@ void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK); stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
/* Reload the host state from vm_hsave */ /* Reload the host state from vm_hsave */
env->hflags &= ~HF_HIF_MASK; env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
env->hflags &= ~HF_SVMI_MASK; env->hflags &= ~HF_SVMI_MASK;
env->intercept = 0; env->intercept = 0;
env->intercept_exceptions = 0; env->intercept_exceptions = 0;
@ -5234,17 +5236,10 @@ void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4))); cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3))); cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
if (int_ctl & V_INTR_MASKING_MASK) {
env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
cpu_set_apic_tpr(env, env->cr[8]);
}
/* we need to set the efer after the crs so the hidden flags get /* we need to set the efer after the crs so the hidden flags get
set properly */ set properly */
#ifdef TARGET_X86_64
cpu_load_efer(env, cpu_load_efer(env,
ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer))); ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
#endif
env->eflags = 0; env->eflags = 0;
load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)), load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));

View File

@ -205,9 +205,7 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
uint64_t sysenter_esp; uint64_t sysenter_esp;
uint64_t sysenter_eip; uint64_t sysenter_eip;
uint64_t cr2; uint64_t cr2;
/* qemu: cr8 added to reuse this as hsave */ uint8_t reserved_6[32];
uint64_t cr8;
uint8_t reserved_6[32 - 8]; /* originally 32 */
uint64_t g_pat; uint64_t g_pat;
uint64_t dbgctl; uint64_t dbgctl;
uint64_t br_from; uint64_t br_from;

View File

@ -6569,10 +6569,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break; break;
} else { } else {
tcg_gen_helper_0_1(helper_vmrun, tcg_gen_helper_0_2(helper_vmrun,
tcg_const_i32(s->aflag)); tcg_const_i32(s->aflag),
s->cc_op = CC_OP_EFLAGS; tcg_const_i32(s->pc - pc_start));
gen_eob(s); tcg_gen_exit_tb(0);
s->is_jmp = 3;
} }
break; break;
case 1: /* VMMCALL */ case 1: /* VMMCALL */