mirror of https://github.com/xemu-project/xemu.git
hvf: Align with upstream and fix 32b
This commit is contained in:
parent
8d50346ce6
commit
76707bcc99
12
cpus.c
12
cpus.c
|
@ -1051,6 +1051,10 @@ void cpu_synchronize_all_states(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_state(cpu);
|
||||
/* TODO: move to cpu_synchronize_state() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_state(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1060,6 +1064,10 @@ void cpu_synchronize_all_post_reset(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_reset(cpu);
|
||||
/* TODO: move to cpu_synchronize_post_reset() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1069,6 +1077,10 @@ void cpu_synchronize_all_post_init(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_init(cpu);
|
||||
/* TODO: move to cpu_synchronize_post_init() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,41 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
|
|||
#define hvf_get_supported_cpuid(func, idx, reg) 0
|
||||
#endif
|
||||
|
||||
/* hvf_slot flags */
|
||||
#define HVF_SLOT_LOG (1 << 0)
|
||||
|
||||
typedef struct hvf_slot {
|
||||
uint64_t start;
|
||||
uint64_t size;
|
||||
uint8_t *mem;
|
||||
int slot_id;
|
||||
uint32_t flags;
|
||||
MemoryRegion *region;
|
||||
} hvf_slot;
|
||||
|
||||
typedef struct hvf_vcpu_caps {
|
||||
uint64_t vmx_cap_pinbased;
|
||||
uint64_t vmx_cap_procbased;
|
||||
uint64_t vmx_cap_procbased2;
|
||||
uint64_t vmx_cap_entry;
|
||||
uint64_t vmx_cap_exit;
|
||||
uint64_t vmx_cap_preemption_timer;
|
||||
} hvf_vcpu_caps;
|
||||
|
||||
typedef struct HVFState {
|
||||
AccelState parent;
|
||||
hvf_slot slots[32];
|
||||
int num_slots;
|
||||
|
||||
hvf_vcpu_caps *hvf_caps;
|
||||
} HVFState;
|
||||
extern HVFState *hvf_state;
|
||||
|
||||
void hvf_set_phys_mem(MemoryRegionSection *, bool);
|
||||
void hvf_handle_io(CPUArchState *, uint16_t, void *,
|
||||
int, int, int);
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
|
||||
|
||||
/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
|
||||
* the host CPU. Use hvf_enabled() after this to get the result. */
|
||||
void hvf_disable(int disable);
|
||||
|
@ -51,9 +86,20 @@ int hvf_smp_cpu_exec(CPUState *);
|
|||
void hvf_cpu_synchronize_state(CPUState *);
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *);
|
||||
void hvf_cpu_synchronize_post_init(CPUState *);
|
||||
void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
|
||||
|
||||
void hvf_vcpu_destroy(CPUState *);
|
||||
void hvf_raise_event(CPUState *);
|
||||
/* void hvf_reset_vcpu_state(void *opaque); */
|
||||
void hvf_reset_vcpu(CPUState *);
|
||||
void vmx_update_tpr(CPUState *);
|
||||
void update_apic_tpr(CPUState *);
|
||||
int hvf_put_registers(CPUState *);
|
||||
void vmx_clear_int_window_exiting(CPUState *cpu);
|
||||
|
||||
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
|
||||
|
||||
#define HVF_STATE(obj) \
|
||||
OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include "sysemu/hax.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/whpx.h"
|
||||
#include "sysemu/hvf.h"
|
||||
|
||||
static inline void cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
|
@ -28,9 +27,6 @@ static inline void cpu_synchronize_state(CPUState *cpu)
|
|||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_state(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_state(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_synchronize_post_reset(CPUState *cpu)
|
||||
|
@ -44,9 +40,6 @@ static inline void cpu_synchronize_post_reset(CPUState *cpu)
|
|||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_synchronize_post_init(CPUState *cpu)
|
||||
|
@ -60,9 +53,6 @@ static inline void cpu_synchronize_post_init(CPUState *cpu)
|
|||
if (whpx_enabled()) {
|
||||
whpx_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
|
|
|
@ -304,6 +304,7 @@ static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg
|
|||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
hvf_put_registers(cpu_state);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_CTLS, 0);
|
||||
cpu_state->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
|
@ -344,7 +345,7 @@ static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
|
|||
if (slot->flags & HVF_SLOT_LOG) {
|
||||
memory_region_set_dirty(slot->region, gpa - slot->start, 1);
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE);
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,12 +381,12 @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
|||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ);
|
||||
HV_MEMORY_READ | HV_MEMORY_EXEC);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE);
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -416,7 +417,9 @@ static void hvf_log_sync(MemoryListener *listener,
|
|||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
#ifndef XBOX
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* QEMU Hypervisor.framework (HVF) support
|
||||
*
|
||||
* Copyright Google Inc., 2017
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
/* header to be included in HVF-specific code */
|
||||
#ifndef _HVF_INT_H
|
||||
#define _HVF_INT_H
|
||||
|
||||
#include "sysemu/hvf.h"
|
||||
|
||||
#ifdef CONFIG_HVF
|
||||
#include "target/i386/cpu.h"
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
#include <Hypervisor/hv_error.h>
|
||||
#endif
|
||||
|
||||
/* hvf_slot flags */
|
||||
#define HVF_SLOT_LOG (1 << 0)
|
||||
|
||||
typedef struct hvf_slot {
|
||||
uint64_t start;
|
||||
uint64_t size;
|
||||
uint8_t *mem;
|
||||
int slot_id;
|
||||
uint32_t flags;
|
||||
MemoryRegion *region;
|
||||
} hvf_slot;
|
||||
|
||||
typedef struct hvf_vcpu_caps {
|
||||
uint64_t vmx_cap_pinbased;
|
||||
uint64_t vmx_cap_procbased;
|
||||
uint64_t vmx_cap_procbased2;
|
||||
uint64_t vmx_cap_entry;
|
||||
uint64_t vmx_cap_exit;
|
||||
uint64_t vmx_cap_preemption_timer;
|
||||
} hvf_vcpu_caps;
|
||||
|
||||
typedef struct HVFState {
|
||||
AccelState parent;
|
||||
hvf_slot slots[32];
|
||||
int num_slots;
|
||||
|
||||
hvf_vcpu_caps *hvf_caps;
|
||||
} HVFState;
|
||||
extern HVFState *hvf_state;
|
||||
|
||||
void hvf_set_phys_mem(MemoryRegionSection *, bool);
|
||||
void hvf_handle_io(CPUArchState *, uint16_t, void *,
|
||||
int, int, int);
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
|
||||
|
||||
void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
|
||||
|
||||
void vmx_update_tpr(CPUState *);
|
||||
void update_apic_tpr(CPUState *);
|
||||
void vmx_clear_int_window_exiting(CPUState *cpu);
|
||||
|
||||
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
|
||||
|
||||
#define HVF_STATE(obj) \
|
||||
OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
|
||||
|
||||
#endif
|
|
@ -1688,10 +1688,10 @@ calc_addr:
|
|||
}
|
||||
}
|
||||
|
||||
target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
|
||||
uintptr_t get_reg_ref(CPUX86State *env, int reg, int rex_present,
|
||||
int is_extended, int size)
|
||||
{
|
||||
target_ulong ptr = 0;
|
||||
uintptr_t ptr = 0;
|
||||
|
||||
if (is_extended) {
|
||||
reg |= R_R8;
|
||||
|
@ -1700,13 +1700,13 @@ target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
|
|||
switch (size) {
|
||||
case 1:
|
||||
if (is_extended || reg < 4 || rex_present) {
|
||||
ptr = (target_ulong)&RL(env, reg);
|
||||
ptr = (uintptr_t)&RL(env, reg);
|
||||
} else {
|
||||
ptr = (target_ulong)&RH(env, reg - 4);
|
||||
ptr = (uintptr_t)&RH(env, reg - 4);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ptr = (target_ulong)&RRX(env, reg);
|
||||
ptr = (uintptr_t)&RRX(env, reg);
|
||||
break;
|
||||
}
|
||||
return ptr;
|
||||
|
|
|
@ -266,7 +266,7 @@ typedef struct x86_decode_op {
|
|||
int reg;
|
||||
target_ulong val;
|
||||
|
||||
target_ulong ptr;
|
||||
uintptr_t ptr;
|
||||
} x86_decode_op;
|
||||
|
||||
typedef struct x86_decode {
|
||||
|
@ -303,7 +303,7 @@ uint64_t sign(uint64_t val, int size);
|
|||
|
||||
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
|
||||
|
||||
target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
|
||||
uintptr_t get_reg_ref(CPUX86State *env, int reg, int rex_present,
|
||||
int is_extended, int size);
|
||||
target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
|
||||
int is_extended, int size);
|
||||
|
|
Loading…
Reference in New Issue