diff --git a/target/i386/cpu.h b/target/i386/cpu.h index dba92936a2..0d528ac58f 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -391,6 +391,11 @@ typedef enum X86Seg { #define MSR_IA32_TSX_CTRL 0x122 #define MSR_IA32_TSCDEADLINE 0x6e0 #define MSR_IA32_PKRS 0x6e1 +#define MSR_ARCH_LBR_CTL 0x000014ce +#define MSR_ARCH_LBR_DEPTH 0x000014cf +#define MSR_ARCH_LBR_FROM_0 0x00001500 +#define MSR_ARCH_LBR_TO_0 0x00001600 +#define MSR_ARCH_LBR_INFO_0 0x00001200 #define FEATURE_CONTROL_LOCKED (1<<0) #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1) @@ -1650,6 +1655,11 @@ typedef struct CPUArchState { uint64_t msr_xfd; uint64_t msr_xfd_err; + /* Per-VCPU Arch LBR MSRs */ + uint64_t msr_lbr_ctl; + uint64_t msr_lbr_depth; + LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; + /* exception/interrupt handling */ int error_code; int exception_is_int; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 536cbe5fad..a9ee8eebd7 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -3367,6 +3367,38 @@ static int kvm_put_msrs(X86CPU *cpu, int level) env->msr_xfd_err); } + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + uint64_t depth; + int i, ret; + + /* + * Only migrate Arch LBR states when: 1) Arch LBR is enabled + * for migrated vcpu. 2) the host Arch LBR depth equals that + * of source guest's, this is to avoid mismatch of guest/host + * config for the msr hence avoid unexpected misbehavior. + */ + ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); + + if (ret == 1 && (env->msr_lbr_ctl & 0x1) && !!depth && + depth == env->msr_lbr_depth) { + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth); + + for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { + if (!env->lbr_records[i].from) { + continue; + } + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, + env->lbr_records[i].from); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, + env->lbr_records[i].to); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, + env->lbr_records[i].info); + } + } + } + /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } @@ -3767,6 +3799,26 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); } + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + uint64_t ctl, depth; + int i, ret2; + + ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_CTL, &ctl); + ret2 = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); + if (ret == 1 && ret2 == 1 && (ctl & 0x1) && + depth == ARCH_LBR_NR_ENTRIES) { + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0); + + for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0); + kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0); + } + } + } + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; @@ -4072,6 +4124,21 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_XFD_ERR: env->msr_xfd_err = msrs[i].data; break; + case MSR_ARCH_LBR_CTL: + env->msr_lbr_ctl = msrs[i].data; + break; + case MSR_ARCH_LBR_DEPTH: + env->msr_lbr_depth = msrs[i].data; + break; + case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data; + break; + case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data; + break; + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; + break; } }