mirror of https://github.com/xqemu/xqemu.git
KVM: x86: Restrict writeback of VCPU state
Do not write nmi_pending, sipi_vector, and mpstate unless we at least go through a reset. And TSC as well as KVM wallclocks should only be written on full sync, otherwise we risk to drop some time on state read-modify-write. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
ea375f9ab8
commit
ea64305139
|
@ -546,7 +546,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
|
||||||
entry->data = value;
|
entry->data = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_put_msrs(CPUState *env)
|
static int kvm_put_msrs(CPUState *env, int level)
|
||||||
{
|
{
|
||||||
struct {
|
struct {
|
||||||
struct kvm_msrs info;
|
struct kvm_msrs info;
|
||||||
|
@ -560,7 +560,6 @@ static int kvm_put_msrs(CPUState *env)
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
|
||||||
if (kvm_has_msr_star(env))
|
if (kvm_has_msr_star(env))
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
|
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
|
|
||||||
#ifdef TARGET_X86_64
|
#ifdef TARGET_X86_64
|
||||||
/* FIXME if lm capable */
|
/* FIXME if lm capable */
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
|
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
|
||||||
|
@ -568,8 +567,12 @@ static int kvm_put_msrs(CPUState *env)
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
|
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
|
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
|
||||||
#endif
|
#endif
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, env->system_time_msr);
|
if (level == KVM_PUT_FULL_STATE) {
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
|
kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
|
||||||
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
|
||||||
|
env->system_time_msr);
|
||||||
|
kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
|
||||||
|
}
|
||||||
|
|
||||||
msr_data.info.nmsrs = n;
|
msr_data.info.nmsrs = n;
|
||||||
|
|
||||||
|
@ -782,7 +785,7 @@ static int kvm_get_mp_state(CPUState *env)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_put_vcpu_events(CPUState *env)
|
static int kvm_put_vcpu_events(CPUState *env, int level)
|
||||||
{
|
{
|
||||||
#ifdef KVM_CAP_VCPU_EVENTS
|
#ifdef KVM_CAP_VCPU_EVENTS
|
||||||
struct kvm_vcpu_events events;
|
struct kvm_vcpu_events events;
|
||||||
|
@ -806,8 +809,11 @@ static int kvm_put_vcpu_events(CPUState *env)
|
||||||
|
|
||||||
events.sipi_vector = env->sipi_vector;
|
events.sipi_vector = env->sipi_vector;
|
||||||
|
|
||||||
events.flags =
|
events.flags = 0;
|
||||||
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
|
if (level >= KVM_PUT_RESET_STATE) {
|
||||||
|
events.flags |=
|
||||||
|
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
|
||||||
|
}
|
||||||
|
|
||||||
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
|
||||||
#else
|
#else
|
||||||
|
@ -899,15 +905,17 @@ int kvm_arch_put_registers(CPUState *env, int level)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = kvm_put_msrs(env);
|
ret = kvm_put_msrs(env, level);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = kvm_put_mp_state(env);
|
if (level >= KVM_PUT_RESET_STATE) {
|
||||||
if (ret < 0)
|
ret = kvm_put_mp_state(env);
|
||||||
return ret;
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = kvm_put_vcpu_events(env);
|
ret = kvm_put_vcpu_events(env, level);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue