mirror of https://github.com/xemu-project/xemu.git
Merge remote-tracking branch 'remotes/kvm/uq/master' into staging
* remotes/kvm/uq/master: target-i386: Move KVM default-vendor hack to instance_init target-i386: Don't change x86_def_t struct on cpu_x86_register() target-i386: Eliminate CONFIG_KVM #ifdefs kvm: add support for hyper-v timers kvm: make hyperv vapic assist page migratable kvm: make hyperv hypercall and guest os id MSRs migratable. kvm: make availability of Hyper-V enlightenments dependent on KVM_CAP_HYPERV KVM: fix coexistence of KVM and Hyper-V leaves kvm: print suberror on all internal errors target-i386: kvm_check_features_against_host(): Kill feature word array target-i386: kvm_cpu_fill_host(): Fill feature words in a loop target-i386: kvm_cpu_fill_host(): Set all feature words at end of function target-i386: kvm_cpu_fill_host(): No need to check xlevel2 target-i386: kvm_cpu_fill_host(): No need to check CPU vendor target-i386: kvm_cpu_fill_host(): No need to check level target-i386: kvm_cpu_fill_host(): Kill unused code Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
bc1c72171c
|
@ -1546,17 +1546,16 @@ static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
|
||||||
|
|
||||||
static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
|
static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "KVM internal error.");
|
fprintf(stderr, "KVM internal error. Suberror: %d\n",
|
||||||
|
run->internal.suberror);
|
||||||
|
|
||||||
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
|
|
||||||
for (i = 0; i < run->internal.ndata; ++i) {
|
for (i = 0; i < run->internal.ndata; ++i) {
|
||||||
fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
|
fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
|
||||||
i, (uint64_t)run->internal.data[i]);
|
i, (uint64_t)run->internal.data[i]);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
}
|
}
|
||||||
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
|
||||||
fprintf(stderr, "emulation failure\n");
|
fprintf(stderr, "emulation failure\n");
|
||||||
|
|
|
@ -149,6 +149,9 @@
|
||||||
/* MSR used to read the per-partition time reference counter */
|
/* MSR used to read the per-partition time reference counter */
|
||||||
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
|
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
|
||||||
|
|
||||||
|
/* A partition's reference time stamp counter (TSC) page */
|
||||||
|
#define HV_X64_MSR_REFERENCE_TSC 0x40000021
|
||||||
|
|
||||||
/* MSR used to retrieve the TSC frequency */
|
/* MSR used to retrieve the TSC frequency */
|
||||||
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
|
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
|
||||||
|
|
||||||
|
|
|
@ -674,6 +674,7 @@ struct kvm_ppc_smmu_info {
|
||||||
#define KVM_CAP_ARM_EL1_32BIT 93
|
#define KVM_CAP_ARM_EL1_32BIT 93
|
||||||
#define KVM_CAP_SPAPR_MULTITCE 94
|
#define KVM_CAP_SPAPR_MULTITCE 94
|
||||||
#define KVM_CAP_EXT_EMUL_CPUID 95
|
#define KVM_CAP_EXT_EMUL_CPUID 95
|
||||||
|
#define KVM_CAP_HYPERV_TIME 96
|
||||||
|
|
||||||
#ifdef KVM_CAP_IRQ_ROUTING
|
#ifdef KVM_CAP_IRQ_ROUTING
|
||||||
|
|
||||||
|
|
|
@ -69,6 +69,7 @@ typedef struct X86CPU {
|
||||||
bool hyperv_vapic;
|
bool hyperv_vapic;
|
||||||
bool hyperv_relaxed_timing;
|
bool hyperv_relaxed_timing;
|
||||||
int hyperv_spinlock_attempts;
|
int hyperv_spinlock_attempts;
|
||||||
|
bool hyperv_time;
|
||||||
bool check_cpuid;
|
bool check_cpuid;
|
||||||
bool enforce_cpuid;
|
bool enforce_cpuid;
|
||||||
|
|
||||||
|
|
|
@ -374,7 +374,6 @@ void disable_kvm_pv_eoi(void)
|
||||||
void host_cpuid(uint32_t function, uint32_t count,
|
void host_cpuid(uint32_t function, uint32_t count,
|
||||||
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_KVM)
|
|
||||||
uint32_t vec[4];
|
uint32_t vec[4];
|
||||||
|
|
||||||
#ifdef __x86_64__
|
#ifdef __x86_64__
|
||||||
|
@ -382,7 +381,7 @@ void host_cpuid(uint32_t function, uint32_t count,
|
||||||
: "=a"(vec[0]), "=b"(vec[1]),
|
: "=a"(vec[0]), "=b"(vec[1]),
|
||||||
"=c"(vec[2]), "=d"(vec[3])
|
"=c"(vec[2]), "=d"(vec[3])
|
||||||
: "0"(function), "c"(count) : "cc");
|
: "0"(function), "c"(count) : "cc");
|
||||||
#else
|
#elif defined(__i386__)
|
||||||
asm volatile("pusha \n\t"
|
asm volatile("pusha \n\t"
|
||||||
"cpuid \n\t"
|
"cpuid \n\t"
|
||||||
"mov %%eax, 0(%2) \n\t"
|
"mov %%eax, 0(%2) \n\t"
|
||||||
|
@ -392,6 +391,8 @@ void host_cpuid(uint32_t function, uint32_t count,
|
||||||
"popa"
|
"popa"
|
||||||
: : "a"(function), "c"(count), "S"(vec)
|
: : "a"(function), "c"(count), "S"(vec)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
|
#else
|
||||||
|
abort();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (eax)
|
if (eax)
|
||||||
|
@ -402,7 +403,6 @@ void host_cpuid(uint32_t function, uint32_t count,
|
||||||
*ecx = vec[2];
|
*ecx = vec[2];
|
||||||
if (edx)
|
if (edx)
|
||||||
*edx = vec[3];
|
*edx = vec[3];
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
|
#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
|
||||||
|
@ -1119,7 +1119,6 @@ void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM
|
|
||||||
static int cpu_x86_fill_model_id(char *str)
|
static int cpu_x86_fill_model_id(char *str)
|
||||||
{
|
{
|
||||||
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
|
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
|
||||||
|
@ -1134,7 +1133,6 @@ static int cpu_x86_fill_model_id(char *str)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Fill a x86_def_t struct with information about the host CPU, and
|
/* Fill a x86_def_t struct with information about the host CPU, and
|
||||||
* the CPU features supported by the host hardware + host kernel
|
* the CPU features supported by the host hardware + host kernel
|
||||||
|
@ -1143,7 +1141,6 @@ static int cpu_x86_fill_model_id(char *str)
|
||||||
*/
|
*/
|
||||||
static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
|
static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KVM
|
|
||||||
KVMState *s = kvm_state;
|
KVMState *s = kvm_state;
|
||||||
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
|
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
|
||||||
|
|
||||||
|
@ -1160,46 +1157,19 @@ static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
|
||||||
x86_cpu_def->stepping = eax & 0x0F;
|
x86_cpu_def->stepping = eax & 0x0F;
|
||||||
|
|
||||||
x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
|
x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
|
||||||
x86_cpu_def->features[FEAT_1_EDX] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, 0x1, 0, R_EDX);
|
|
||||||
x86_cpu_def->features[FEAT_1_ECX] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, 0x1, 0, R_ECX);
|
|
||||||
|
|
||||||
if (x86_cpu_def->level >= 7) {
|
|
||||||
x86_cpu_def->features[FEAT_7_0_EBX] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, 0x7, 0, R_EBX);
|
|
||||||
} else {
|
|
||||||
x86_cpu_def->features[FEAT_7_0_EBX] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
|
x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
|
||||||
x86_cpu_def->features[FEAT_8000_0001_EDX] =
|
x86_cpu_def->xlevel2 =
|
||||||
kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX);
|
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
|
||||||
x86_cpu_def->features[FEAT_8000_0001_ECX] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX);
|
|
||||||
|
|
||||||
cpu_x86_fill_model_id(x86_cpu_def->model_id);
|
cpu_x86_fill_model_id(x86_cpu_def->model_id);
|
||||||
|
|
||||||
/* Call Centaur's CPUID instruction. */
|
FeatureWord w;
|
||||||
if (!strcmp(x86_cpu_def->vendor, CPUID_VENDOR_VIA)) {
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
||||||
host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
|
FeatureWordInfo *wi = &feature_word_info[w];
|
||||||
eax = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
|
x86_cpu_def->features[w] =
|
||||||
if (eax >= 0xC0000001) {
|
kvm_arch_get_supported_cpuid(s, wi->cpuid_eax, wi->cpuid_ecx,
|
||||||
/* Support VIA max extended level */
|
wi->cpuid_reg);
|
||||||
x86_cpu_def->xlevel2 = eax;
|
|
||||||
host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
|
|
||||||
x86_cpu_def->features[FEAT_C000_0001_EDX] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Other KVM-specific feature fields: */
|
|
||||||
x86_cpu_def->features[FEAT_SVM] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX);
|
|
||||||
x86_cpu_def->features[FEAT_KVM] =
|
|
||||||
kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
|
|
||||||
|
|
||||||
#endif /* CONFIG_KVM */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
|
static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
|
||||||
|
@ -1226,48 +1196,23 @@ static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
|
||||||
*
|
*
|
||||||
* This function may be called only if KVM is enabled.
|
* This function may be called only if KVM is enabled.
|
||||||
*/
|
*/
|
||||||
static int kvm_check_features_against_host(X86CPU *cpu)
|
static int kvm_check_features_against_host(KVMState *s, X86CPU *cpu)
|
||||||
{
|
{
|
||||||
CPUX86State *env = &cpu->env;
|
CPUX86State *env = &cpu->env;
|
||||||
x86_def_t host_def;
|
int rv = 0;
|
||||||
uint32_t mask;
|
FeatureWord w;
|
||||||
int rv, i;
|
|
||||||
struct model_features_t ft[] = {
|
|
||||||
{&env->features[FEAT_1_EDX],
|
|
||||||
&host_def.features[FEAT_1_EDX],
|
|
||||||
FEAT_1_EDX },
|
|
||||||
{&env->features[FEAT_1_ECX],
|
|
||||||
&host_def.features[FEAT_1_ECX],
|
|
||||||
FEAT_1_ECX },
|
|
||||||
{&env->features[FEAT_8000_0001_EDX],
|
|
||||||
&host_def.features[FEAT_8000_0001_EDX],
|
|
||||||
FEAT_8000_0001_EDX },
|
|
||||||
{&env->features[FEAT_8000_0001_ECX],
|
|
||||||
&host_def.features[FEAT_8000_0001_ECX],
|
|
||||||
FEAT_8000_0001_ECX },
|
|
||||||
{&env->features[FEAT_C000_0001_EDX],
|
|
||||||
&host_def.features[FEAT_C000_0001_EDX],
|
|
||||||
FEAT_C000_0001_EDX },
|
|
||||||
{&env->features[FEAT_7_0_EBX],
|
|
||||||
&host_def.features[FEAT_7_0_EBX],
|
|
||||||
FEAT_7_0_EBX },
|
|
||||||
{&env->features[FEAT_SVM],
|
|
||||||
&host_def.features[FEAT_SVM],
|
|
||||||
FEAT_SVM },
|
|
||||||
{&env->features[FEAT_KVM],
|
|
||||||
&host_def.features[FEAT_KVM],
|
|
||||||
FEAT_KVM },
|
|
||||||
};
|
|
||||||
|
|
||||||
assert(kvm_enabled());
|
assert(kvm_enabled());
|
||||||
|
|
||||||
kvm_cpu_fill_host(&host_def);
|
for (w = 0; w < FEATURE_WORDS; w++) {
|
||||||
for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i) {
|
|
||||||
FeatureWord w = ft[i].feat_word;
|
|
||||||
FeatureWordInfo *wi = &feature_word_info[w];
|
FeatureWordInfo *wi = &feature_word_info[w];
|
||||||
|
uint32_t guest_feat = env->features[w];
|
||||||
|
uint32_t host_feat = kvm_arch_get_supported_cpuid(s, wi->cpuid_eax,
|
||||||
|
wi->cpuid_ecx,
|
||||||
|
wi->cpuid_reg);
|
||||||
|
uint32_t mask;
|
||||||
for (mask = 1; mask; mask <<= 1) {
|
for (mask = 1; mask; mask <<= 1) {
|
||||||
if (*ft[i].guest_feat & mask &&
|
if (guest_feat & mask && !(host_feat & mask)) {
|
||||||
!(*ft[i].host_feat & mask)) {
|
|
||||||
unavailable_host_feature(wi, mask);
|
unavailable_host_feature(wi, mask);
|
||||||
rv = 1;
|
rv = 1;
|
||||||
}
|
}
|
||||||
|
@ -1656,18 +1601,6 @@ static int cpu_x86_find_by_name(X86CPU *cpu, x86_def_t *x86_cpu_def,
|
||||||
def = &builtin_x86_defs[i];
|
def = &builtin_x86_defs[i];
|
||||||
if (strcmp(name, def->name) == 0) {
|
if (strcmp(name, def->name) == 0) {
|
||||||
memcpy(x86_cpu_def, def, sizeof(*def));
|
memcpy(x86_cpu_def, def, sizeof(*def));
|
||||||
/* sysenter isn't supported in compatibility mode on AMD,
|
|
||||||
* syscall isn't supported in compatibility mode on Intel.
|
|
||||||
* Normally we advertise the actual CPU vendor, but you can
|
|
||||||
* override this using the 'vendor' property if you want to use
|
|
||||||
* KVM's sysenter/syscall emulation in compatibility mode and
|
|
||||||
* when doing cross vendor migration
|
|
||||||
*/
|
|
||||||
if (kvm_enabled()) {
|
|
||||||
uint32_t ebx = 0, ecx = 0, edx = 0;
|
|
||||||
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
|
|
||||||
x86_cpu_vendor_words2str(x86_cpu_def->vendor, ebx, edx, ecx);
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1867,7 +1800,6 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
|
||||||
return cpu_list;
|
return cpu_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KVM
|
|
||||||
static void filter_features_for_kvm(X86CPU *cpu)
|
static void filter_features_for_kvm(X86CPU *cpu)
|
||||||
{
|
{
|
||||||
CPUX86State *env = &cpu->env;
|
CPUX86State *env = &cpu->env;
|
||||||
|
@ -1884,7 +1816,6 @@ static void filter_features_for_kvm(X86CPU *cpu)
|
||||||
cpu->filtered_features[w] = requested_features & ~env->features[w];
|
cpu->filtered_features[w] = requested_features & ~env->features[w];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
|
static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
|
||||||
{
|
{
|
||||||
|
@ -1898,12 +1829,6 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_enabled()) {
|
|
||||||
def->features[FEAT_KVM] |= kvm_default_features;
|
|
||||||
}
|
|
||||||
def->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
|
|
||||||
|
|
||||||
object_property_set_str(OBJECT(cpu), def->vendor, "vendor", errp);
|
|
||||||
object_property_set_int(OBJECT(cpu), def->level, "level", errp);
|
object_property_set_int(OBJECT(cpu), def->level, "level", errp);
|
||||||
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
|
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
|
||||||
object_property_set_int(OBJECT(cpu), def->model, "model", errp);
|
object_property_set_int(OBJECT(cpu), def->model, "model", errp);
|
||||||
|
@ -1921,6 +1846,31 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
|
||||||
cpu->cache_info_passthrough = def->cache_info_passthrough;
|
cpu->cache_info_passthrough = def->cache_info_passthrough;
|
||||||
|
|
||||||
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
|
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
|
||||||
|
|
||||||
|
/* Special cases not set in the x86_def_t structs: */
|
||||||
|
if (kvm_enabled()) {
|
||||||
|
env->features[FEAT_KVM] |= kvm_default_features;
|
||||||
|
}
|
||||||
|
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
|
||||||
|
|
||||||
|
/* sysenter isn't supported in compatibility mode on AMD,
|
||||||
|
* syscall isn't supported in compatibility mode on Intel.
|
||||||
|
* Normally we advertise the actual CPU vendor, but you can
|
||||||
|
* override this using the 'vendor' property if you want to use
|
||||||
|
* KVM's sysenter/syscall emulation in compatibility mode and
|
||||||
|
* when doing cross vendor migration
|
||||||
|
*/
|
||||||
|
const char *vendor = def->vendor;
|
||||||
|
char host_vendor[CPUID_VENDOR_SZ + 1];
|
||||||
|
if (kvm_enabled()) {
|
||||||
|
uint32_t ebx = 0, ecx = 0, edx = 0;
|
||||||
|
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
|
||||||
|
x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
|
||||||
|
vendor = host_vendor;
|
||||||
|
}
|
||||||
|
|
||||||
|
object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
|
X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
|
||||||
|
@ -2588,15 +2538,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||||
env->features[FEAT_8000_0001_ECX] &= TCG_EXT3_FEATURES;
|
env->features[FEAT_8000_0001_ECX] &= TCG_EXT3_FEATURES;
|
||||||
env->features[FEAT_SVM] &= TCG_SVM_FEATURES;
|
env->features[FEAT_SVM] &= TCG_SVM_FEATURES;
|
||||||
} else {
|
} else {
|
||||||
|
KVMState *s = kvm_state;
|
||||||
if ((cpu->check_cpuid || cpu->enforce_cpuid)
|
if ((cpu->check_cpuid || cpu->enforce_cpuid)
|
||||||
&& kvm_check_features_against_host(cpu) && cpu->enforce_cpuid) {
|
&& kvm_check_features_against_host(s, cpu) && cpu->enforce_cpuid) {
|
||||||
error_setg(&local_err,
|
error_setg(&local_err,
|
||||||
"Host's CPU doesn't support requested features");
|
"Host's CPU doesn't support requested features");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_KVM
|
|
||||||
filter_features_for_kvm(cpu);
|
filter_features_for_kvm(cpu);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_USER_ONLY
|
#ifndef CONFIG_USER_ONLY
|
||||||
|
@ -2751,6 +2700,7 @@ static Property x86_cpu_properties[] = {
|
||||||
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
|
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
|
||||||
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
|
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
|
||||||
DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
|
DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
|
||||||
|
DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
|
||||||
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
|
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
|
||||||
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
|
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
|
||||||
DEFINE_PROP_END_OF_LIST()
|
DEFINE_PROP_END_OF_LIST()
|
||||||
|
|
|
@ -862,6 +862,10 @@ typedef struct CPUX86State {
|
||||||
uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
|
uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
|
||||||
uint64_t msr_gp_counters[MAX_GP_COUNTERS];
|
uint64_t msr_gp_counters[MAX_GP_COUNTERS];
|
||||||
uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
|
uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
|
||||||
|
uint64_t msr_hv_hypercall;
|
||||||
|
uint64_t msr_hv_guest_os_id;
|
||||||
|
uint64_t msr_hv_vapic;
|
||||||
|
uint64_t msr_hv_tsc;
|
||||||
|
|
||||||
/* exception/interrupt handling */
|
/* exception/interrupt handling */
|
||||||
int error_code;
|
int error_code;
|
||||||
|
|
|
@ -72,6 +72,9 @@ static bool has_msr_misc_enable;
|
||||||
static bool has_msr_bndcfgs;
|
static bool has_msr_bndcfgs;
|
||||||
static bool has_msr_kvm_steal_time;
|
static bool has_msr_kvm_steal_time;
|
||||||
static int lm_capable_kernel;
|
static int lm_capable_kernel;
|
||||||
|
static bool has_msr_hv_hypercall;
|
||||||
|
static bool has_msr_hv_vapic;
|
||||||
|
static bool has_msr_hv_tsc;
|
||||||
|
|
||||||
static bool has_msr_architectural_pmu;
|
static bool has_msr_architectural_pmu;
|
||||||
static uint32_t num_architectural_pmu_counters;
|
static uint32_t num_architectural_pmu_counters;
|
||||||
|
@ -437,8 +440,11 @@ static bool hyperv_hypercall_available(X86CPU *cpu)
|
||||||
|
|
||||||
static bool hyperv_enabled(X86CPU *cpu)
|
static bool hyperv_enabled(X86CPU *cpu)
|
||||||
{
|
{
|
||||||
return hyperv_hypercall_available(cpu) ||
|
CPUState *cs = CPU(cpu);
|
||||||
cpu->hyperv_relaxed_timing;
|
return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
|
||||||
|
(hyperv_hypercall_available(cpu) ||
|
||||||
|
cpu->hyperv_time ||
|
||||||
|
cpu->hyperv_relaxed_timing);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define KVM_MAX_CPUID_ENTRIES 100
|
#define KVM_MAX_CPUID_ENTRIES 100
|
||||||
|
@ -455,6 +461,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||||
uint32_t unused;
|
uint32_t unused;
|
||||||
struct kvm_cpuid_entry2 *c;
|
struct kvm_cpuid_entry2 *c;
|
||||||
uint32_t signature[3];
|
uint32_t signature[3];
|
||||||
|
int kvm_base = KVM_CPUID_SIGNATURE;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
memset(&cpuid_data, 0, sizeof(cpuid_data));
|
memset(&cpuid_data, 0, sizeof(cpuid_data));
|
||||||
|
@ -462,26 +469,22 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||||
cpuid_i = 0;
|
cpuid_i = 0;
|
||||||
|
|
||||||
/* Paravirtualization CPUIDs */
|
/* Paravirtualization CPUIDs */
|
||||||
c = &cpuid_data.entries[cpuid_i++];
|
if (hyperv_enabled(cpu)) {
|
||||||
c->function = KVM_CPUID_SIGNATURE;
|
c = &cpuid_data.entries[cpuid_i++];
|
||||||
if (!hyperv_enabled(cpu)) {
|
c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
|
||||||
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
|
||||||
c->eax = 0;
|
|
||||||
} else {
|
|
||||||
memcpy(signature, "Microsoft Hv", 12);
|
memcpy(signature, "Microsoft Hv", 12);
|
||||||
c->eax = HYPERV_CPUID_MIN;
|
c->eax = HYPERV_CPUID_MIN;
|
||||||
}
|
c->ebx = signature[0];
|
||||||
c->ebx = signature[0];
|
c->ecx = signature[1];
|
||||||
c->ecx = signature[1];
|
c->edx = signature[2];
|
||||||
c->edx = signature[2];
|
|
||||||
|
|
||||||
c = &cpuid_data.entries[cpuid_i++];
|
c = &cpuid_data.entries[cpuid_i++];
|
||||||
c->function = KVM_CPUID_FEATURES;
|
c->function = HYPERV_CPUID_INTERFACE;
|
||||||
c->eax = env->features[FEAT_KVM];
|
|
||||||
|
|
||||||
if (hyperv_enabled(cpu)) {
|
|
||||||
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
|
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
|
||||||
c->eax = signature[0];
|
c->eax = signature[0];
|
||||||
|
c->ebx = 0;
|
||||||
|
c->ecx = 0;
|
||||||
|
c->edx = 0;
|
||||||
|
|
||||||
c = &cpuid_data.entries[cpuid_i++];
|
c = &cpuid_data.entries[cpuid_i++];
|
||||||
c->function = HYPERV_CPUID_VERSION;
|
c->function = HYPERV_CPUID_VERSION;
|
||||||
|
@ -496,14 +499,21 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||||
if (cpu->hyperv_vapic) {
|
if (cpu->hyperv_vapic) {
|
||||||
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
|
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
|
||||||
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
|
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
|
||||||
|
has_msr_hv_vapic = true;
|
||||||
|
}
|
||||||
|
if (cpu->hyperv_time &&
|
||||||
|
kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
|
||||||
|
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
|
||||||
|
c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
|
||||||
|
c->eax |= 0x200;
|
||||||
|
has_msr_hv_tsc = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
c = &cpuid_data.entries[cpuid_i++];
|
c = &cpuid_data.entries[cpuid_i++];
|
||||||
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
|
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
|
||||||
if (cpu->hyperv_relaxed_timing) {
|
if (cpu->hyperv_relaxed_timing) {
|
||||||
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
|
||||||
}
|
}
|
||||||
if (cpu->hyperv_vapic) {
|
if (has_msr_hv_vapic) {
|
||||||
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
|
||||||
}
|
}
|
||||||
c->ebx = cpu->hyperv_spinlock_attempts;
|
c->ebx = cpu->hyperv_spinlock_attempts;
|
||||||
|
@ -513,15 +523,22 @@ int kvm_arch_init_vcpu(CPUState *cs)
|
||||||
c->eax = 0x40;
|
c->eax = 0x40;
|
||||||
c->ebx = 0x40;
|
c->ebx = 0x40;
|
||||||
|
|
||||||
c = &cpuid_data.entries[cpuid_i++];
|
kvm_base = KVM_CPUID_SIGNATURE_NEXT;
|
||||||
c->function = KVM_CPUID_SIGNATURE_NEXT;
|
has_msr_hv_hypercall = true;
|
||||||
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
|
||||||
c->eax = 0;
|
|
||||||
c->ebx = signature[0];
|
|
||||||
c->ecx = signature[1];
|
|
||||||
c->edx = signature[2];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
|
||||||
|
c = &cpuid_data.entries[cpuid_i++];
|
||||||
|
c->function = KVM_CPUID_SIGNATURE | kvm_base;
|
||||||
|
c->eax = 0;
|
||||||
|
c->ebx = signature[0];
|
||||||
|
c->ecx = signature[1];
|
||||||
|
c->edx = signature[2];
|
||||||
|
|
||||||
|
c = &cpuid_data.entries[cpuid_i++];
|
||||||
|
c->function = KVM_CPUID_FEATURES | kvm_base;
|
||||||
|
c->eax = env->features[FEAT_KVM];
|
||||||
|
|
||||||
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
|
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
|
||||||
|
|
||||||
has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
|
has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
|
||||||
|
@ -1220,12 +1237,19 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
||||||
kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
|
kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
|
||||||
env->msr_global_ctrl);
|
env->msr_global_ctrl);
|
||||||
}
|
}
|
||||||
if (hyperv_hypercall_available(cpu)) {
|
if (has_msr_hv_hypercall) {
|
||||||
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0);
|
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID,
|
||||||
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0);
|
env->msr_hv_guest_os_id);
|
||||||
|
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL,
|
||||||
|
env->msr_hv_hypercall);
|
||||||
}
|
}
|
||||||
if (cpu->hyperv_vapic) {
|
if (has_msr_hv_vapic) {
|
||||||
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0);
|
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
|
||||||
|
env->msr_hv_vapic);
|
||||||
|
}
|
||||||
|
if (has_msr_hv_tsc) {
|
||||||
|
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
|
||||||
|
env->msr_hv_tsc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
|
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
|
||||||
|
@ -1511,6 +1535,17 @@ static int kvm_get_msrs(X86CPU *cpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (has_msr_hv_hypercall) {
|
||||||
|
msrs[n++].index = HV_X64_MSR_HYPERCALL;
|
||||||
|
msrs[n++].index = HV_X64_MSR_GUEST_OS_ID;
|
||||||
|
}
|
||||||
|
if (has_msr_hv_vapic) {
|
||||||
|
msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
|
||||||
|
}
|
||||||
|
if (has_msr_hv_tsc) {
|
||||||
|
msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
|
||||||
|
}
|
||||||
|
|
||||||
msr_data.info.nmsrs = n;
|
msr_data.info.nmsrs = n;
|
||||||
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -1618,6 +1653,18 @@ static int kvm_get_msrs(X86CPU *cpu)
|
||||||
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
|
case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
|
||||||
env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
|
env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
|
||||||
break;
|
break;
|
||||||
|
case HV_X64_MSR_HYPERCALL:
|
||||||
|
env->msr_hv_hypercall = msrs[i].data;
|
||||||
|
break;
|
||||||
|
case HV_X64_MSR_GUEST_OS_ID:
|
||||||
|
env->msr_hv_guest_os_id = msrs[i].data;
|
||||||
|
break;
|
||||||
|
case HV_X64_MSR_APIC_ASSIST_PAGE:
|
||||||
|
env->msr_hv_vapic = msrs[i].data;
|
||||||
|
break;
|
||||||
|
case HV_X64_MSR_REFERENCE_TSC:
|
||||||
|
env->msr_hv_tsc = msrs[i].data;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -554,6 +554,64 @@ static const VMStateDescription vmstate_mpx = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool hyperv_hypercall_enable_needed(void *opaque)
|
||||||
|
{
|
||||||
|
X86CPU *cpu = opaque;
|
||||||
|
CPUX86State *env = &cpu->env;
|
||||||
|
|
||||||
|
return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_msr_hypercall_hypercall = {
|
||||||
|
.name = "cpu/msr_hyperv_hypercall",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.minimum_version_id_old = 1,
|
||||||
|
.fields = (VMStateField []) {
|
||||||
|
VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
|
||||||
|
VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool hyperv_vapic_enable_needed(void *opaque)
|
||||||
|
{
|
||||||
|
X86CPU *cpu = opaque;
|
||||||
|
CPUX86State *env = &cpu->env;
|
||||||
|
|
||||||
|
return env->msr_hv_vapic != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_msr_hyperv_vapic = {
|
||||||
|
.name = "cpu/msr_hyperv_vapic",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.minimum_version_id_old = 1,
|
||||||
|
.fields = (VMStateField []) {
|
||||||
|
VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool hyperv_time_enable_needed(void *opaque)
|
||||||
|
{
|
||||||
|
X86CPU *cpu = opaque;
|
||||||
|
CPUX86State *env = &cpu->env;
|
||||||
|
|
||||||
|
return env->msr_hv_tsc != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const VMStateDescription vmstate_msr_hyperv_time = {
|
||||||
|
.name = "cpu/msr_hyperv_time",
|
||||||
|
.version_id = 1,
|
||||||
|
.minimum_version_id = 1,
|
||||||
|
.minimum_version_id_old = 1,
|
||||||
|
.fields = (VMStateField []) {
|
||||||
|
VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
|
||||||
|
VMSTATE_END_OF_LIST()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const VMStateDescription vmstate_x86_cpu = {
|
const VMStateDescription vmstate_x86_cpu = {
|
||||||
.name = "cpu",
|
.name = "cpu",
|
||||||
.version_id = 12,
|
.version_id = 12,
|
||||||
|
@ -688,6 +746,15 @@ const VMStateDescription vmstate_x86_cpu = {
|
||||||
} , {
|
} , {
|
||||||
.vmsd = &vmstate_mpx,
|
.vmsd = &vmstate_mpx,
|
||||||
.needed = mpx_needed,
|
.needed = mpx_needed,
|
||||||
|
}, {
|
||||||
|
.vmsd = &vmstate_msr_hypercall_hypercall,
|
||||||
|
.needed = hyperv_hypercall_enable_needed,
|
||||||
|
}, {
|
||||||
|
.vmsd = &vmstate_msr_hyperv_vapic,
|
||||||
|
.needed = hyperv_vapic_enable_needed,
|
||||||
|
}, {
|
||||||
|
.vmsd = &vmstate_msr_hyperv_time,
|
||||||
|
.needed = hyperv_time_enable_needed,
|
||||||
} , {
|
} , {
|
||||||
/* empty */
|
/* empty */
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue