diff --git a/target/i386/cpu.h b/target/i386/cpu.h index cdbf8b0cd7..17f1bb7ecb 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1097,10 +1097,13 @@ typedef struct CPUX86State { uint64_t async_pf_en_msr; uint64_t pv_eoi_en_msr; + /* Partition-wide HV MSRs, will be updated only on the first vcpu */ uint64_t msr_hv_hypercall; uint64_t msr_hv_guest_os_id; - uint64_t msr_hv_vapic; uint64_t msr_hv_tsc; + + /* Per-VCPU HV MSRs */ + uint64_t msr_hv_vapic; uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; uint64_t msr_hv_runtime; uint64_t msr_hv_synic_control; diff --git a/target/i386/kvm.c b/target/i386/kvm.c index d4b2ce2e94..89fa65e243 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -1678,19 +1678,26 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, env->msr_global_ctrl); } - if (has_msr_hv_hypercall) { - kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, - env->msr_hv_guest_os_id); - kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, - env->msr_hv_hypercall); + /* + * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, + * only sync them to KVM on the first cpu + */ + if (current_cpu == first_cpu) { + if (has_msr_hv_hypercall) { + kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, + env->msr_hv_guest_os_id); + kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, + env->msr_hv_hypercall); + } + if (cpu->hyperv_time) { + kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, + env->msr_hv_tsc); + } } if (cpu->hyperv_vapic) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, env->msr_hv_vapic); } - if (cpu->hyperv_time) { - kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc); - } if (has_msr_hv_crash) { int j;