mirror of https://github.com/xemu-project/xemu.git
kvmclock: add a new function to update env->tsc.
The commit 317b0a6d8
fixed an issue which caused by the outdated
env->tsc value, but the fix lead to 'cpu_synchronize_all_states()'
called twice during live migration. The 'cpu_synchronize_all_states()'
takes about 130us for a VM which has 4 vcpus, it's a bit expensive.
Synchronize the whole CPU context just for updating env->tsc is too
wasting, this patch use a new function to update the env->tsc.
Comparing to 'cpu_synchronize_all_states()', it only takes about 20us.
Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1446695464-27116-2-git-send-email-liang.z.li@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b553a04280
commit
0fd7e098db
|
@ -17,7 +17,7 @@
|
|||
#include "qemu/host-utils.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "kvm_i386.h"
|
||||
#include "hw/sysbus.h"
|
||||
#include "hw/kvm/clock.h"
|
||||
|
||||
|
@ -125,21 +125,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
|
|||
return;
|
||||
}
|
||||
|
||||
cpu_synchronize_all_states();
|
||||
/* In theory, the cpu_synchronize_all_states() call above wouldn't
|
||||
* affect the rest of the code, as the VCPU state inside CPUState
|
||||
* is supposed to always match the VCPU state on the kernel side.
|
||||
*
|
||||
* In practice, calling cpu_synchronize_state() too soon will load the
|
||||
* kernel-side APIC state into X86CPU.apic_state too early, APIC state
|
||||
* won't be reloaded later because CPUState.vcpu_dirty==true, and
|
||||
* outdated APIC state may be migrated to another host.
|
||||
*
|
||||
* The real fix would be to make sure outdated APIC state is read
|
||||
* from the kernel again when necessary. While this is not fixed, we
|
||||
* need the cpu_clean_all_dirty() call below.
|
||||
*/
|
||||
cpu_clean_all_dirty();
|
||||
kvm_synchronize_all_tsc();
|
||||
|
||||
ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -111,6 +111,51 @@ bool kvm_allows_irq0_override(void)
|
|||
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
|
||||
}
|
||||
|
||||
static int kvm_get_tsc(CPUState *cs)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
struct {
|
||||
struct kvm_msrs info;
|
||||
struct kvm_msr_entry entries[1];
|
||||
} msr_data;
|
||||
int ret;
|
||||
|
||||
if (env->tsc_valid) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
msr_data.info.nmsrs = 1;
|
||||
msr_data.entries[0].index = MSR_IA32_TSC;
|
||||
env->tsc_valid = !runstate_is_running();
|
||||
|
||||
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
env->tsc = msr_data.entries[0].data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void do_kvm_synchronize_tsc(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
kvm_get_tsc(cpu);
|
||||
}
|
||||
|
||||
void kvm_synchronize_all_tsc(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
CPU_FOREACH(cpu) {
|
||||
run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
|
||||
{
|
||||
struct kvm_cpuid2 *cpuid;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
bool kvm_allows_irq0_override(void);
|
||||
bool kvm_has_smm(void);
|
||||
void kvm_synchronize_all_tsc(void);
|
||||
void kvm_arch_reset_vcpu(X86CPU *cs);
|
||||
void kvm_arch_do_init_vcpu(X86CPU *cs);
|
||||
|
||||
|
|
Loading…
Reference in New Issue