mirror of https://github.com/xemu-project/xemu.git
Initial support for the HVF accelerator
-----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAlo+CvYUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPZpAf9E93cRgy+l7EyjLYp1jNrRVbE/57N g3Z0l9VN3QIErtVpvysp3nrfwSlPCrNdc2tFNq58HHUIrwqTsi2dQeBMXv1zLLUp BscjBF6XgG35lVZG/mUngYeUlZMI+ERhkMa8clYcW7zPEbmGRQB5fhvsR4eak3Ub j3b1ydZiQF6sooJIOJq/7Qpn2YkPaNOYOKV+VkEZQJhQkJMV9BzEEbjzbrzmBN/u xsUBwIJRHfdiyHKv6VwxdMvH9bB1LE7X6hpfXmsep71PYA4YJM7GmotbwjODGszq PKNOnrVqHfdvxBKkgfGypuEI4D9tczDgducfq0hcpjpNF+x3lMgPXS/qtg== =+nhI -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream-hvf' into staging Initial support for the HVF accelerator # gpg: Signature made Sat 23 Dec 2017 07:51:18 GMT # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream-hvf: i386: hvf: cleanup x86_gen.h i386: hvf: remove VM_PANIC from "in" i386: hvf: remove addr_t i386: hvf: simplify flag handling i386: hvf: abort on decoding error i386: hvf: remove ZERO_INIT macro i386: hvf: remove more dead emulator code i386: hvf: unify register enums between HVF and the rest i386: hvf: header cleanup i386: hvf: move all hvf files in the same directory i386: hvf: inject General Protection Fault when vmexit through vmcall i386: hvf: refactor event injection code for hvf i386: hvf: implement vga dirty page tracking i386: refactor KVM cpuid code so that it applies to hvf as well i386: hvf: implement hvf_get_supported_cpuid i386: hvf: use new helper functions for put/get xsave i386: hvf: fix licensing issues; isolate task handling code (GPL v2-only) i386: hvf: add code base from Google's QEMU repository apic: add function to apic that will be used by hvf Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
1e10eb532c
|
@ -1,3 +1,4 @@
|
|||
obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o
|
||||
obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o
|
||||
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
|
||||
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* QEMU HVF support
|
||||
*
|
||||
* Copyright 2017 Red Hat, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2 or later, as published by the Free Software Foundation,
|
||||
* and may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
#include "sysemu/hvf.h"
|
||||
|
||||
int hvf_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int hvf_vcpu_exec(CPUState *cpu)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
}
|
|
@ -211,6 +211,17 @@ supported_xen_target() {
|
|||
return 1
|
||||
}
|
||||
|
||||
supported_hvf_target() {
|
||||
test "$hvf" = "yes" || return 1
|
||||
glob "$1" "*-softmmu" || return 1
|
||||
case "${1%-softmmu}" in
|
||||
x86_64)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
supported_target() {
|
||||
case "$1" in
|
||||
*-softmmu)
|
||||
|
@ -236,6 +247,7 @@ supported_target() {
|
|||
supported_kvm_target "$1" && return 0
|
||||
supported_xen_target "$1" && return 0
|
||||
supported_hax_target "$1" && return 0
|
||||
supported_hvf_target "$1" && return 0
|
||||
print_error "TCG disabled, but hardware accelerator not available for '$target'"
|
||||
return 1
|
||||
}
|
||||
|
@ -325,6 +337,7 @@ vhost_vsock="no"
|
|||
vhost_user=""
|
||||
kvm="no"
|
||||
hax="no"
|
||||
hvf="no"
|
||||
rdma=""
|
||||
gprof="no"
|
||||
debug_tcg="no"
|
||||
|
@ -741,6 +754,7 @@ Darwin)
|
|||
bsd="yes"
|
||||
darwin="yes"
|
||||
hax="yes"
|
||||
hvf="yes"
|
||||
LDFLAGS_SHARED="-bundle -undefined dynamic_lookup"
|
||||
if [ "$cpu" = "x86_64" ] ; then
|
||||
QEMU_CFLAGS="-arch x86_64 $QEMU_CFLAGS"
|
||||
|
@ -1036,6 +1050,10 @@ for opt do
|
|||
;;
|
||||
--enable-hax) hax="yes"
|
||||
;;
|
||||
--disable-hvf) hvf="no"
|
||||
;;
|
||||
--enable-hvf) hvf="yes"
|
||||
;;
|
||||
--disable-tcg-interpreter) tcg_interpreter="no"
|
||||
;;
|
||||
--enable-tcg-interpreter) tcg_interpreter="yes"
|
||||
|
@ -1529,6 +1547,7 @@ disabled with --disable-FEATURE, default is enabled if available:
|
|||
bluez bluez stack connectivity
|
||||
kvm KVM acceleration support
|
||||
hax HAX acceleration support
|
||||
hvf Hypervisor.framework acceleration support
|
||||
rdma RDMA-based migration support
|
||||
vde support for vde network
|
||||
netmap support for netmap network
|
||||
|
@ -5055,6 +5074,21 @@ then
|
|||
fi
|
||||
|
||||
|
||||
#################################################
|
||||
# Check to see if we have the Hypervisor framework
|
||||
if [ "$darwin" == "yes" ] ; then
|
||||
cat > $TMPC << EOF
|
||||
#include <Hypervisor/hv.h>
|
||||
int main() { return 0;}
|
||||
EOF
|
||||
if ! compile_object ""; then
|
||||
hvf='no'
|
||||
else
|
||||
hvf='yes'
|
||||
LDFLAGS="-framework Hypervisor $LDFLAGS"
|
||||
fi
|
||||
fi
|
||||
|
||||
#################################################
|
||||
# Sparc implicitly links with --relax, which is
|
||||
# incompatible with -r, so --no-relax should be
|
||||
|
@ -5530,6 +5564,7 @@ echo "ATTR/XATTR support $attr"
|
|||
echo "Install blobs $blobs"
|
||||
echo "KVM support $kvm"
|
||||
echo "HAX support $hax"
|
||||
echo "HVF support $hvf"
|
||||
echo "TCG support $tcg"
|
||||
if test "$tcg" = "yes" ; then
|
||||
echo "TCG debug enabled $debug_tcg"
|
||||
|
@ -6602,6 +6637,9 @@ fi
|
|||
if supported_hax_target $target; then
|
||||
echo "CONFIG_HAX=y" >> $config_target_mak
|
||||
fi
|
||||
if supported_hvf_target $target; then
|
||||
echo "CONFIG_HVF=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_bigendian" = "yes" ; then
|
||||
echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
|
||||
fi
|
||||
|
|
86
cpus.c
86
cpus.c
|
@ -37,6 +37,7 @@
|
|||
#include "sysemu/hw_accel.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/hax.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "qmp-commands.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
|
@ -900,6 +901,10 @@ void cpu_synchronize_all_states(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_state(cpu);
|
||||
/* TODO: move to cpu_synchronize_state() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_state(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -909,6 +914,10 @@ void cpu_synchronize_all_post_reset(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_reset(cpu);
|
||||
/* TODO: move to cpu_synchronize_post_reset() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_reset(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -918,6 +927,10 @@ void cpu_synchronize_all_post_init(void)
|
|||
|
||||
CPU_FOREACH(cpu) {
|
||||
cpu_synchronize_post_init(cpu);
|
||||
/* TODO: move to cpu_synchronize_post_init() */
|
||||
if (hvf_enabled()) {
|
||||
hvf_cpu_synchronize_post_init(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1107,6 +1120,14 @@ static void qemu_kvm_wait_io_event(CPUState *cpu)
|
|||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
|
||||
static void qemu_hvf_wait_io_event(CPUState *cpu)
|
||||
{
|
||||
while (cpu_thread_is_idle(cpu)) {
|
||||
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
|
||||
}
|
||||
qemu_wait_io_event_common(cpu);
|
||||
}
|
||||
|
||||
static void *qemu_kvm_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
@ -1444,6 +1465,48 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* The HVF-specific vCPU thread function. This one should only run when the host
|
||||
* CPU supports the VMX "unrestricted guest" feature. */
|
||||
static void *qemu_hvf_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
int r;
|
||||
|
||||
assert(hvf_enabled());
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu->created = true;
|
||||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = hvf_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_hvf_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu->created = false;
|
||||
qemu_cond_signal(&qemu_cpu_cond);
|
||||
qemu_mutex_unlock_iothread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
static void CALLBACK dummy_apc_func(ULONG_PTR unused)
|
||||
{
|
||||
|
@ -1761,6 +1824,27 @@ static void qemu_kvm_start_vcpu(CPUState *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
static void qemu_hvf_start_vcpu(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
/* HVF currently does not support TCG, and only runs in
|
||||
* unrestricted-guest mode. */
|
||||
assert(hvf_enabled());
|
||||
|
||||
cpu->thread = g_malloc0(sizeof(QemuThread));
|
||||
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
|
||||
qemu_cond_init(cpu->halt_cond);
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
while (!cpu->created) {
|
||||
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void qemu_dummy_start_vcpu(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
@ -1795,6 +1879,8 @@ void qemu_init_vcpu(CPUState *cpu)
|
|||
qemu_kvm_start_vcpu(cpu);
|
||||
} else if (hax_enabled()) {
|
||||
qemu_hax_start_vcpu(cpu);
|
||||
} else if (hvf_enabled()) {
|
||||
qemu_hvf_start_vcpu(cpu);
|
||||
} else if (tcg_enabled()) {
|
||||
qemu_tcg_init_vcpu(cpu);
|
||||
} else {
|
||||
|
|
|
@ -305,6 +305,18 @@ static void apic_set_tpr(APICCommonState *s, uint8_t val)
|
|||
}
|
||||
}
|
||||
|
||||
int apic_get_highest_priority_irr(DeviceState *dev)
|
||||
{
|
||||
APICCommonState *s;
|
||||
|
||||
if (!dev) {
|
||||
/* no interrupts */
|
||||
return -1;
|
||||
}
|
||||
s = APIC_COMMON(dev);
|
||||
return get_highest_priority_int(s->irr);
|
||||
}
|
||||
|
||||
static uint8_t apic_get_tpr(APICCommonState *s)
|
||||
{
|
||||
apic_sync_vapic(s, SYNC_FROM_VAPIC);
|
||||
|
|
|
@ -20,6 +20,7 @@ void apic_init_reset(DeviceState *s);
|
|||
void apic_sipi(DeviceState *s);
|
||||
void apic_poll_irq(DeviceState *d);
|
||||
void apic_designate_bsp(DeviceState *d, bool bsp);
|
||||
int apic_get_highest_priority_irr(DeviceState *dev);
|
||||
|
||||
/* pc.c */
|
||||
DeviceState *cpu_get_current_apic(void);
|
||||
|
|
|
@ -36,6 +36,7 @@ typedef struct FWCfgIoState FWCfgIoState;
|
|||
typedef struct FWCfgMemState FWCfgMemState;
|
||||
typedef struct FWCfgState FWCfgState;
|
||||
typedef struct HCIInfo HCIInfo;
|
||||
typedef struct HVFX86EmulatorState HVFX86EmulatorState;
|
||||
typedef struct I2CBus I2CBus;
|
||||
typedef struct I2SCodec I2SCodec;
|
||||
typedef struct ISABus ISABus;
|
||||
|
|
|
@ -423,6 +423,8 @@ struct CPUState {
|
|||
* unnecessary flushes.
|
||||
*/
|
||||
uint16_t pending_tlb_flush;
|
||||
|
||||
int hvf_fd;
|
||||
};
|
||||
|
||||
QTAILQ_HEAD(CPUTailQ, CPUState);
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* QEMU Hypervisor.framework (HVF) support
|
||||
*
|
||||
* Copyright Google Inc., 2017
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
/* header to be included in non-HVF-specific code */
|
||||
#ifndef _HVF_H
|
||||
#define _HVF_H
|
||||
|
||||
#include "config-host.h"
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "exec/memory.h"
|
||||
#include "sysemu/accel.h"
|
||||
|
||||
extern int hvf_disabled;
|
||||
#ifdef CONFIG_HVF
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
#include <Hypervisor/hv_error.h>
|
||||
#include "target/i386/cpu.h"
|
||||
#include "hw/hw.h"
|
||||
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
|
||||
int reg);
|
||||
#define hvf_enabled() !hvf_disabled
|
||||
#else
|
||||
#define hvf_enabled() 0
|
||||
#define hvf_get_supported_cpuid(func, idx, reg) 0
|
||||
#endif
|
||||
|
||||
/* hvf_slot flags */
|
||||
#define HVF_SLOT_LOG (1 << 0)
|
||||
|
||||
typedef struct hvf_slot {
|
||||
uint64_t start;
|
||||
uint64_t size;
|
||||
uint8_t *mem;
|
||||
int slot_id;
|
||||
uint32_t flags;
|
||||
MemoryRegion *region;
|
||||
} hvf_slot;
|
||||
|
||||
typedef struct hvf_vcpu_caps {
|
||||
uint64_t vmx_cap_pinbased;
|
||||
uint64_t vmx_cap_procbased;
|
||||
uint64_t vmx_cap_procbased2;
|
||||
uint64_t vmx_cap_entry;
|
||||
uint64_t vmx_cap_exit;
|
||||
uint64_t vmx_cap_preemption_timer;
|
||||
} hvf_vcpu_caps;
|
||||
|
||||
typedef struct HVFState {
|
||||
AccelState parent;
|
||||
hvf_slot slots[32];
|
||||
int num_slots;
|
||||
|
||||
hvf_vcpu_caps *hvf_caps;
|
||||
} HVFState;
|
||||
extern HVFState *hvf_state;
|
||||
|
||||
void hvf_set_phys_mem(MemoryRegionSection *, bool);
|
||||
void hvf_handle_io(CPUArchState *, uint16_t, void *,
|
||||
int, int, int);
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
|
||||
|
||||
/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
|
||||
* the host CPU. Use hvf_enabled() after this to get the result. */
|
||||
void hvf_disable(int disable);
|
||||
|
||||
/* Returns non-0 if the host CPU supports the VMX "unrestricted guest" feature
|
||||
* which allows the virtual CPU to directly run in "real mode". If true, this
|
||||
* allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,
|
||||
* only a a single TCG thread can run, and it will call HVF to run the current
|
||||
* instructions, except in case of "real mode" (paging disabled, typically at
|
||||
* boot time), or MMIO operations. */
|
||||
|
||||
int hvf_sync_vcpus(void);
|
||||
|
||||
int hvf_init_vcpu(CPUState *);
|
||||
int hvf_vcpu_exec(CPUState *);
|
||||
int hvf_smp_cpu_exec(CPUState *);
|
||||
void hvf_cpu_synchronize_state(CPUState *);
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *);
|
||||
void hvf_cpu_synchronize_post_init(CPUState *);
|
||||
void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
|
||||
|
||||
void hvf_vcpu_destroy(CPUState *);
|
||||
void hvf_raise_event(CPUState *);
|
||||
/* void hvf_reset_vcpu_state(void *opaque); */
|
||||
void hvf_reset_vcpu(CPUState *);
|
||||
void vmx_update_tpr(CPUState *);
|
||||
void update_apic_tpr(CPUState *);
|
||||
int hvf_put_registers(CPUState *);
|
||||
void vmx_clear_int_window_exiting(CPUState *cpu);
|
||||
|
||||
#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
|
||||
|
||||
#define HVF_STATE(obj) \
|
||||
OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
|
||||
|
||||
#endif
|
|
@ -31,7 +31,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
|
|||
"-machine [type=]name[,prop[=value][,...]]\n"
|
||||
" selects emulated machine ('-machine help' for list)\n"
|
||||
" property accel=accel1[:accel2[:...]] selects accelerator\n"
|
||||
" supported accelerators are kvm, xen, hax or tcg (default: tcg)\n"
|
||||
" supported accelerators are kvm, xen, hax, hvf or tcg (default: tcg)\n"
|
||||
" kernel_irqchip=on|off|split controls accelerated irqchip support (default=off)\n"
|
||||
" vmport=on|off|auto controls emulation of vmport (default: auto)\n"
|
||||
" kvm_shadow_mem=size of KVM shadow MMU in bytes\n"
|
||||
|
@ -66,7 +66,7 @@ Supported machine properties are:
|
|||
@table @option
|
||||
@item accel=@var{accels1}[:@var{accels2}[:...]]
|
||||
This is used to enable an accelerator. Depending on the target architecture,
|
||||
kvm, xen, hax or tcg can be available. By default, tcg is used. If there is
|
||||
kvm, xen, hax, hvf or tcg can be available. By default, tcg is used. If there is
|
||||
more than one accelerator specified, the next one is used if the previous one
|
||||
fails to initialize.
|
||||
@item kernel_irqchip=on|off
|
||||
|
@ -126,13 +126,13 @@ ETEXI
|
|||
|
||||
DEF("accel", HAS_ARG, QEMU_OPTION_accel,
|
||||
"-accel [accel=]accelerator[,thread=single|multi]\n"
|
||||
" select accelerator (kvm, xen, hax or tcg; use 'help' for a list)\n"
|
||||
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
|
||||
" select accelerator (kvm, xen, hax, hvf or tcg; use 'help' for a list)\n"
|
||||
" thread=single|multi (enable multi-threaded TCG)", QEMU_ARCH_ALL)
|
||||
STEXI
|
||||
@item -accel @var{name}[,prop=@var{value}[,...]]
|
||||
@findex -accel
|
||||
This is used to enable an accelerator. Depending on the target architecture,
|
||||
kvm, xen, hax or tcg can be available. By default, tcg is used. If there is
|
||||
kvm, xen, hax, hvf or tcg can be available. By default, tcg is used. If there is
|
||||
more than one accelerator specified, the next one is used if the previous one
|
||||
fails to initialize.
|
||||
@table @option
|
||||
|
|
|
@ -12,4 +12,5 @@ obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-windows.o
|
|||
endif
|
||||
ifdef CONFIG_DARWIN
|
||||
obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-darwin.o
|
||||
obj-$(CONFIG_HVF) += hvf/
|
||||
endif
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef struct X86CPUDefinition X86CPUDefinition;
|
|||
/**
|
||||
* X86CPUClass:
|
||||
* @cpu_def: CPU model definition
|
||||
* @kvm_required: Whether CPU model requires KVM to be enabled.
|
||||
* @host_cpuid_required: Whether CPU model requires cpuid from host.
|
||||
* @ordering: Ordering on the "-cpu help" CPU model list.
|
||||
* @migration_safe: See CpuDefinitionInfo::migration_safe
|
||||
* @static_model: See CpuDefinitionInfo::static
|
||||
|
@ -66,7 +66,7 @@ typedef struct X86CPUClass {
|
|||
*/
|
||||
X86CPUDefinition *cpu_def;
|
||||
|
||||
bool kvm_required;
|
||||
bool host_cpuid_required;
|
||||
int ordering;
|
||||
bool migration_safe;
|
||||
bool static_model;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "kvm_i386.h"
|
||||
|
||||
|
@ -615,6 +616,11 @@ static uint32_t xsave_area_size(uint64_t mask)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline bool accel_uses_host_cpuid(void)
|
||||
{
|
||||
return kvm_enabled() || hvf_enabled();
|
||||
}
|
||||
|
||||
static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
|
||||
{
|
||||
return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
|
||||
|
@ -1686,10 +1692,15 @@ static void max_x86_cpu_initfn(Object *obj)
|
|||
*/
|
||||
cpu->max_features = true;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
if (accel_uses_host_cpuid()) {
|
||||
char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
|
||||
char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
|
||||
int family, model, stepping;
|
||||
X86CPUDefinition host_cpudef = { };
|
||||
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
|
||||
|
||||
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
|
||||
x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
|
||||
|
||||
host_vendor_fms(vendor, &family, &model, &stepping);
|
||||
|
||||
|
@ -1703,12 +1714,21 @@ static void max_x86_cpu_initfn(Object *obj)
|
|||
object_property_set_str(OBJECT(cpu), model_id, "model-id",
|
||||
&error_abort);
|
||||
|
||||
env->cpuid_min_level =
|
||||
kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
|
||||
env->cpuid_min_xlevel =
|
||||
kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
|
||||
env->cpuid_min_xlevel2 =
|
||||
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
|
||||
if (kvm_enabled()) {
|
||||
env->cpuid_min_level =
|
||||
kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
|
||||
env->cpuid_min_xlevel =
|
||||
kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
|
||||
env->cpuid_min_xlevel2 =
|
||||
kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
|
||||
} else {
|
||||
env->cpuid_min_level =
|
||||
hvf_get_supported_cpuid(0x0, 0, R_EAX);
|
||||
env->cpuid_min_xlevel =
|
||||
hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
|
||||
env->cpuid_min_xlevel2 =
|
||||
hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
|
||||
}
|
||||
|
||||
if (lmce_supported()) {
|
||||
object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
|
||||
|
@ -1734,18 +1754,21 @@ static const TypeInfo max_x86_cpu_type_info = {
|
|||
.class_init = max_x86_cpu_class_init,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
|
||||
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
|
||||
static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
X86CPUClass *xcc = X86_CPU_CLASS(oc);
|
||||
|
||||
xcc->kvm_required = true;
|
||||
xcc->host_cpuid_required = true;
|
||||
xcc->ordering = 8;
|
||||
|
||||
xcc->model_description =
|
||||
"KVM processor with all supported host features "
|
||||
"(only available in KVM mode)";
|
||||
if (kvm_enabled()) {
|
||||
xcc->model_description =
|
||||
"KVM processor with all supported host features ";
|
||||
} else if (hvf_enabled()) {
|
||||
xcc->model_description =
|
||||
"HVF processor with all supported host features ";
|
||||
}
|
||||
}
|
||||
|
||||
static const TypeInfo host_x86_cpu_type_info = {
|
||||
|
@ -1767,7 +1790,7 @@ static void report_unavailable_features(FeatureWord w, uint32_t mask)
|
|||
assert(reg);
|
||||
warn_report("%s doesn't support requested feature: "
|
||||
"CPUID.%02XH:%s%s%s [bit %d]",
|
||||
kvm_enabled() ? "host" : "TCG",
|
||||
accel_uses_host_cpuid() ? "host" : "TCG",
|
||||
f->cpuid_eax, reg,
|
||||
f->feat_names[i] ? "." : "",
|
||||
f->feat_names[i] ? f->feat_names[i] : "", i);
|
||||
|
@ -2218,7 +2241,7 @@ static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
|
|||
Error *err = NULL;
|
||||
strList **next = missing_feats;
|
||||
|
||||
if (xcc->kvm_required && !kvm_enabled()) {
|
||||
if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
|
||||
strList *new = g_new0(strList, 1);
|
||||
new->value = g_strdup("kvm");
|
||||
*missing_feats = new;
|
||||
|
@ -2380,6 +2403,10 @@ static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
|||
r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
|
||||
wi->cpuid_ecx,
|
||||
wi->cpuid_reg);
|
||||
} else if (hvf_enabled()) {
|
||||
r = hvf_get_supported_cpuid(wi->cpuid_eax,
|
||||
wi->cpuid_ecx,
|
||||
wi->cpuid_reg);
|
||||
} else if (tcg_enabled()) {
|
||||
r = wi->tcg_features;
|
||||
} else {
|
||||
|
@ -2439,6 +2466,7 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
|
|||
}
|
||||
|
||||
/* Special cases not set in the X86CPUDefinition structs: */
|
||||
/* TODO: in-kernel irqchip for hvf */
|
||||
if (kvm_enabled()) {
|
||||
if (!kvm_irqchip_in_kernel()) {
|
||||
x86_cpu_change_kvm_default("x2apic", "off");
|
||||
|
@ -2459,7 +2487,7 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
|
|||
* when doing cross vendor migration
|
||||
*/
|
||||
vendor = def->vendor;
|
||||
if (kvm_enabled()) {
|
||||
if (accel_uses_host_cpuid()) {
|
||||
uint32_t ebx = 0, ecx = 0, edx = 0;
|
||||
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
|
||||
x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
|
||||
|
@ -2910,6 +2938,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
|||
*ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
|
||||
*ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
|
||||
*edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
|
||||
} else if (hvf_enabled() && cpu->enable_pmu) {
|
||||
*eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
|
||||
*ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
|
||||
*ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
|
||||
*edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
|
||||
} else {
|
||||
*eax = 0;
|
||||
*ebx = 0;
|
||||
|
@ -3252,6 +3285,9 @@ static void x86_cpu_reset(CPUState *s)
|
|||
memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
|
||||
memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
|
||||
|
||||
env->interrupt_injected = -1;
|
||||
env->exception_injected = -1;
|
||||
env->nmi_injected = false;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* We hard-wire the BSP to the first CPU. */
|
||||
apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
|
||||
|
@ -3261,6 +3297,9 @@ static void x86_cpu_reset(CPUState *s)
|
|||
if (kvm_enabled()) {
|
||||
kvm_arch_reset_vcpu(cpu);
|
||||
}
|
||||
else if (hvf_enabled()) {
|
||||
hvf_reset_vcpu(s);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -3300,6 +3339,7 @@ APICCommonClass *apic_get_class(void)
|
|||
{
|
||||
const char *apic_type = "apic";
|
||||
|
||||
/* TODO: in-kernel irqchip for hvf */
|
||||
if (kvm_apic_in_kernel()) {
|
||||
apic_type = "kvm-apic";
|
||||
} else if (xen_enabled()) {
|
||||
|
@ -3613,7 +3653,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
Error *local_err = NULL;
|
||||
static bool ht_warned;
|
||||
|
||||
if (xcc->kvm_required && !kvm_enabled()) {
|
||||
if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
|
||||
char *name = x86_cpu_class_get_model_name(xcc);
|
||||
error_setg(&local_err, "CPU model '%s' requires KVM", name);
|
||||
g_free(name);
|
||||
|
@ -3635,7 +3675,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
x86_cpu_report_filtered_features(cpu);
|
||||
if (cpu->enforce_cpuid) {
|
||||
error_setg(&local_err,
|
||||
kvm_enabled() ?
|
||||
accel_uses_host_cpuid() ?
|
||||
"Host doesn't support requested features" :
|
||||
"TCG doesn't support requested features");
|
||||
goto out;
|
||||
|
@ -3658,7 +3698,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
|
|||
* consumer AMD devices but nothing else.
|
||||
*/
|
||||
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
|
||||
if (kvm_enabled()) {
|
||||
if (accel_uses_host_cpuid()) {
|
||||
uint32_t host_phys_bits = x86_host_phys_bits();
|
||||
static bool warned;
|
||||
|
||||
|
@ -4272,7 +4312,7 @@ static void x86_cpu_register_types(void)
|
|||
}
|
||||
type_register_static(&max_x86_cpu_type_info);
|
||||
type_register_static(&x86_base_cpu_type_info);
|
||||
#ifdef CONFIG_KVM
|
||||
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
|
||||
type_register_static(&host_x86_cpu_type_info);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define TARGET_LONG_BITS 32
|
||||
#endif
|
||||
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
/* The x86 has a strong memory model with some store-after-load re-ordering */
|
||||
#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
|
||||
|
||||
|
@ -50,48 +52,64 @@
|
|||
|
||||
#define CPUArchState struct CPUX86State
|
||||
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
#include "fpu/softfloat.h"
|
||||
#endif
|
||||
|
||||
#define R_EAX 0
|
||||
#define R_ECX 1
|
||||
#define R_EDX 2
|
||||
#define R_EBX 3
|
||||
#define R_ESP 4
|
||||
#define R_EBP 5
|
||||
#define R_ESI 6
|
||||
#define R_EDI 7
|
||||
enum {
|
||||
R_EAX = 0,
|
||||
R_ECX = 1,
|
||||
R_EDX = 2,
|
||||
R_EBX = 3,
|
||||
R_ESP = 4,
|
||||
R_EBP = 5,
|
||||
R_ESI = 6,
|
||||
R_EDI = 7,
|
||||
R_R8 = 8,
|
||||
R_R9 = 9,
|
||||
R_R10 = 10,
|
||||
R_R11 = 11,
|
||||
R_R12 = 12,
|
||||
R_R13 = 13,
|
||||
R_R14 = 14,
|
||||
R_R15 = 15,
|
||||
|
||||
#define R_AL 0
|
||||
#define R_CL 1
|
||||
#define R_DL 2
|
||||
#define R_BL 3
|
||||
#define R_AH 4
|
||||
#define R_CH 5
|
||||
#define R_DH 6
|
||||
#define R_BH 7
|
||||
R_AL = 0,
|
||||
R_CL = 1,
|
||||
R_DL = 2,
|
||||
R_BL = 3,
|
||||
R_AH = 4,
|
||||
R_CH = 5,
|
||||
R_DH = 6,
|
||||
R_BH = 7,
|
||||
};
|
||||
|
||||
#define R_ES 0
|
||||
#define R_CS 1
|
||||
#define R_SS 2
|
||||
#define R_DS 3
|
||||
#define R_FS 4
|
||||
#define R_GS 5
|
||||
typedef enum X86Seg {
|
||||
R_ES = 0,
|
||||
R_CS = 1,
|
||||
R_SS = 2,
|
||||
R_DS = 3,
|
||||
R_FS = 4,
|
||||
R_GS = 5,
|
||||
R_LDTR = 6,
|
||||
R_TR = 7,
|
||||
} X86Seg;
|
||||
|
||||
/* segment descriptor fields */
|
||||
#define DESC_G_MASK (1 << 23)
|
||||
#define DESC_G_SHIFT 23
|
||||
#define DESC_G_MASK (1 << DESC_G_SHIFT)
|
||||
#define DESC_B_SHIFT 22
|
||||
#define DESC_B_MASK (1 << DESC_B_SHIFT)
|
||||
#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
|
||||
#define DESC_L_MASK (1 << DESC_L_SHIFT)
|
||||
#define DESC_AVL_MASK (1 << 20)
|
||||
#define DESC_P_MASK (1 << 15)
|
||||
#define DESC_AVL_SHIFT 20
|
||||
#define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
|
||||
#define DESC_P_SHIFT 15
|
||||
#define DESC_P_MASK (1 << DESC_P_SHIFT)
|
||||
#define DESC_DPL_SHIFT 13
|
||||
#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
|
||||
#define DESC_S_MASK (1 << 12)
|
||||
#define DESC_S_SHIFT 12
|
||||
#define DESC_S_MASK (1 << DESC_S_SHIFT)
|
||||
#define DESC_TYPE_SHIFT 8
|
||||
#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
|
||||
#define DESC_A_MASK (1 << 8)
|
||||
|
@ -631,6 +649,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
|
|||
#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
|
||||
#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
|
||||
|
||||
#define CPUID_7_0_ECX_AVX512BMI (1U << 1)
|
||||
#define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
|
||||
#define CPUID_7_0_ECX_UMIP (1U << 2)
|
||||
#define CPUID_7_0_ECX_PKU (1U << 3)
|
||||
|
@ -812,6 +831,20 @@ typedef struct SegmentCache {
|
|||
float64 _d_##n[(bits)/64]; \
|
||||
}
|
||||
|
||||
typedef union {
|
||||
uint8_t _b[16];
|
||||
uint16_t _w[8];
|
||||
uint32_t _l[4];
|
||||
uint64_t _q[2];
|
||||
} XMMReg;
|
||||
|
||||
typedef union {
|
||||
uint8_t _b[32];
|
||||
uint16_t _w[16];
|
||||
uint32_t _l[8];
|
||||
uint64_t _q[4];
|
||||
} YMMReg;
|
||||
|
||||
typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
|
||||
typedef MMREG_UNION(MMXReg, 64) MMXReg;
|
||||
|
||||
|
@ -1047,7 +1080,11 @@ typedef struct CPUX86State {
|
|||
ZMMReg xmm_t0;
|
||||
MMXReg mmx_t0;
|
||||
|
||||
XMMReg ymmh_regs[CPU_NB_REGS];
|
||||
|
||||
uint64_t opmask_regs[NB_OPMASK_REGS];
|
||||
YMMReg zmmh_regs[CPU_NB_REGS];
|
||||
ZMMReg hi16_zmm_regs[CPU_NB_REGS];
|
||||
|
||||
/* sysenter registers */
|
||||
uint32_t sysenter_cs;
|
||||
|
@ -1172,11 +1209,15 @@ typedef struct CPUX86State {
|
|||
int32_t interrupt_injected;
|
||||
uint8_t soft_interrupt;
|
||||
uint8_t has_error_code;
|
||||
uint32_t ins_len;
|
||||
uint32_t sipi_vector;
|
||||
bool tsc_valid;
|
||||
int64_t tsc_khz;
|
||||
int64_t user_tsc_khz; /* for sanity check only */
|
||||
void *kvm_xsave_buf;
|
||||
#if defined(CONFIG_HVF)
|
||||
HVFX86EmulatorState *hvf_emul;
|
||||
#endif
|
||||
|
||||
uint64_t mcg_cap;
|
||||
uint64_t mcg_ctl;
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
obj-y += hvf.o
|
||||
obj-y += x86.o x86_cpuid.o x86_decode.o x86_descr.o x86_emu.o x86_flags.o x86_mmu.o x86hvf.o x86_task.o
|
|
@ -0,0 +1,7 @@
|
|||
# OS X Hypervisor.framework support in QEMU
|
||||
|
||||
These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desktop Hosted Hypervisor) (last known location: https://github.com/veertuinc/vdhh) with some minor changes, the most significant of which were:
|
||||
|
||||
1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, kvm_xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
|
||||
2. Removal of `apic_page` and hyperv-related functionality.
|
||||
3. More relaxed use of `qemu_mutex_lock_iothread`.
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* QEMU Hypervisor.framework (HVF) support
|
||||
*
|
||||
* Copyright 2017 Google Inc
|
||||
*
|
||||
* Adapted from target-i386/hax-i386.h:
|
||||
* Copyright (c) 2011 Intel Corporation
|
||||
* Written by:
|
||||
* Jiang Yunhong<yunhong.jiang@intel.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _HVF_I386_H
|
||||
#define _HVF_I386_H
|
||||
|
||||
#include "sysemu/hvf.h"
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
|
||||
#define HVF_MAX_VCPU 0x10
|
||||
#define MAX_VM_ID 0x40
|
||||
#define MAX_VCPU_ID 0x40
|
||||
|
||||
extern struct hvf_state hvf_global;
|
||||
|
||||
struct hvf_vm {
|
||||
int id;
|
||||
struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];
|
||||
};
|
||||
|
||||
struct hvf_state {
|
||||
uint32_t version;
|
||||
struct hvf_vm *vm;
|
||||
uint64_t mem_quota;
|
||||
};
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
/* Functions exported to host specific mode */
|
||||
|
||||
/* Host specific functions */
|
||||
int hvf_inject_interrupt(CPUArchState *env, int vector);
|
||||
int hvf_vcpu_run(struct hvf_vcpu_state *vcpu);
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -0,0 +1,959 @@
|
|||
/* Copyright 2008 IBM Corporation
|
||||
* 2008 Red Hat, Inc.
|
||||
* Copyright 2011 Intel Corporation
|
||||
* Copyright 2016 Veertu, Inc.
|
||||
* Copyright 2017 The Android Open Source Project
|
||||
*
|
||||
* QEMU Hypervisor.framework support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#include "sysemu/hvf.h"
|
||||
#include "hvf-i386.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx.h"
|
||||
#include "x86.h"
|
||||
#include "x86_descr.h"
|
||||
#include "x86_mmu.h"
|
||||
#include "x86_decode.h"
|
||||
#include "x86_emu.h"
|
||||
#include "x86_task.h"
|
||||
#include "x86hvf.h"
|
||||
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/ioport.h"
|
||||
#include "hw/i386/apic_internal.h"
|
||||
#include "hw/boards.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "strings.h"
|
||||
#include "sysemu/accel.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "target/i386/cpu.h"
|
||||
|
||||
pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER;
|
||||
HVFState *hvf_state;
|
||||
int hvf_disabled = 1;
|
||||
|
||||
static void assert_hvf_ok(hv_return_t ret)
|
||||
{
|
||||
if (ret == HV_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (ret) {
|
||||
case HV_ERROR:
|
||||
error_report("Error: HV_ERROR\n");
|
||||
break;
|
||||
case HV_BUSY:
|
||||
error_report("Error: HV_BUSY\n");
|
||||
break;
|
||||
case HV_BAD_ARGUMENT:
|
||||
error_report("Error: HV_BAD_ARGUMENT\n");
|
||||
break;
|
||||
case HV_NO_RESOURCES:
|
||||
error_report("Error: HV_NO_RESOURCES\n");
|
||||
break;
|
||||
case HV_NO_DEVICE:
|
||||
error_report("Error: HV_NO_DEVICE\n");
|
||||
break;
|
||||
case HV_UNSUPPORTED:
|
||||
error_report("Error: HV_UNSUPPORTED\n");
|
||||
break;
|
||||
default:
|
||||
error_report("Unknown Error\n");
|
||||
}
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Memory slots */
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
int x;
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
slot = &hvf_state->slots[x];
|
||||
if (slot->size && start < (slot->start + slot->size) &&
|
||||
end > slot->start) {
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mac_slot {
|
||||
int present;
|
||||
uint64_t size;
|
||||
uint64_t gpa_start;
|
||||
uint64_t gva;
|
||||
};
|
||||
|
||||
struct mac_slot mac_slots[32];
|
||||
#define ALIGN(x, y) (((x) + (y) - 1) & ~((y) - 1))
|
||||
|
||||
static int do_hvf_set_memory(hvf_slot *slot)
|
||||
{
|
||||
struct mac_slot *macslot;
|
||||
hv_memory_flags_t flags;
|
||||
hv_return_t ret;
|
||||
|
||||
macslot = &mac_slots[slot->slot_id];
|
||||
|
||||
if (macslot->present) {
|
||||
if (macslot->size != slot->size) {
|
||||
macslot->present = 0;
|
||||
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
|
||||
|
||||
macslot->present = 1;
|
||||
macslot->gpa_start = slot->start;
|
||||
macslot->size = slot->size;
|
||||
ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
|
||||
assert_hvf_ok(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
{
|
||||
hvf_slot *mem;
|
||||
MemoryRegion *area = section->mr;
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
return;
|
||||
}
|
||||
|
||||
mem = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
section->offset_within_address_space + int128_get64(section->size));
|
||||
|
||||
if (mem && add) {
|
||||
if (mem->size == int128_get64(section->size) &&
|
||||
mem->start == section->offset_within_address_space &&
|
||||
mem->mem == (memory_region_get_ram_ptr(area) +
|
||||
section->offset_within_region)) {
|
||||
return; /* Same region was attempted to register, go away. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Region needs to be reset. set the size to 0 and remap it. */
|
||||
if (mem) {
|
||||
mem->size = 0;
|
||||
if (do_hvf_set_memory(mem)) {
|
||||
error_report("Failed to reset overlapping slot\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
if (!add) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Now make a new slot. */
|
||||
int x;
|
||||
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
mem = &hvf_state->slots[x];
|
||||
if (!mem->size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x == hvf_state->num_slots) {
|
||||
error_report("No free slots\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
mem->size = int128_get64(section->size);
|
||||
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
|
||||
mem->start = section->offset_within_address_space;
|
||||
mem->region = area;
|
||||
|
||||
if (do_hvf_set_memory(mem)) {
|
||||
error_report("Error registering new memory slot\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void vmx_update_tpr(CPUState *cpu)
|
||||
{
|
||||
/* TODO: need integrate APIC handling */
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
|
||||
int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
|
||||
|
||||
wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
|
||||
if (irr == -1) {
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
|
||||
} else {
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
|
||||
irr >> 4);
|
||||
}
|
||||
}
|
||||
|
||||
void update_apic_tpr(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
|
||||
}
|
||||
|
||||
#define VECTORING_INFO_VECTOR_MASK 0xff
|
||||
|
||||
static void hvf_handle_interrupt(CPUState * cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
|
||||
int direction, int size, int count)
|
||||
{
|
||||
int i;
|
||||
uint8_t *ptr = buffer;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
|
||||
ptr, size,
|
||||
direction);
|
||||
ptr += size;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: synchronize vcpu state */
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
if (cpu_state->vcpu_dirty == 0) {
|
||||
hvf_get_registers(cpu_state);
|
||||
}
|
||||
|
||||
cpu_state->vcpu_dirty = 1;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_state(CPUState *cpu_state)
|
||||
{
|
||||
if (cpu_state->vcpu_dirty == 0) {
|
||||
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
hvf_put_registers(cpu_state);
|
||||
cpu_state->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
|
||||
{
|
||||
run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
CPUState *cpu_state = cpu;
|
||||
hvf_put_registers(cpu_state);
|
||||
cpu_state->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
|
||||
{
|
||||
run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
|
||||
{
|
||||
int read, write;
|
||||
|
||||
/* EPT fault on an instruction fetch doesn't make sense here */
|
||||
if (ept_qual & EPT_VIOLATION_INST_FETCH) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* EPT fault must be a read fault or a write fault */
|
||||
read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
|
||||
write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
|
||||
if ((read | write) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (write && slot) {
|
||||
if (slot->flags & HVF_SLOT_LOG) {
|
||||
memory_region_set_dirty(slot->region, gpa - slot->start, 1);
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The EPT violation must have been caused by accessing a
|
||||
* guest-physical address that is a translation of a guest-linear
|
||||
* address.
|
||||
*/
|
||||
if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
|
||||
(ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !slot;
|
||||
}
|
||||
|
||||
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
|
||||
slot = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
section->offset_within_address_space + int128_get64(section->size));
|
||||
|
||||
/* protect region against writes; begin tracking it */
|
||||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_log_start(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_log_stop(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (new != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 0);
|
||||
}
|
||||
|
||||
static void hvf_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/*
|
||||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, true);
|
||||
}
|
||||
|
||||
static void hvf_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, false);
|
||||
}
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.priority = 10,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
.log_stop = hvf_log_stop,
|
||||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
void hvf_reset_vcpu(CPUState *cpu) {
|
||||
|
||||
/* TODO: this shouldn't be needed; there is already a call to
|
||||
* cpu_synchronize_all_post_reset in vl.c
|
||||
*/
|
||||
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
|
||||
macvm_set_cr0(cpu->hvf_fd, 0x60000010);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
|
||||
wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
|
||||
|
||||
/* set VMCS guest state fields */
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
|
||||
|
||||
/*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
|
||||
|
||||
wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
|
||||
wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
|
||||
wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
|
||||
}
|
||||
|
||||
hv_vm_sync_tsc(0);
|
||||
cpu->halted = 0;
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
|
||||
hv_vcpu_flush(cpu->hvf_fd);
|
||||
}
|
||||
|
||||
void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
|
||||
static void dummy_signal(int sig)
|
||||
{
|
||||
}
|
||||
|
||||
int hvf_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
|
||||
X86CPU *x86cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
int r;
|
||||
|
||||
/* init cpu signals */
|
||||
sigset_t set;
|
||||
struct sigaction sigact;
|
||||
|
||||
memset(&sigact, 0, sizeof(sigact));
|
||||
sigact.sa_handler = dummy_signal;
|
||||
sigaction(SIG_IPI, &sigact, NULL);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, NULL, &set);
|
||||
sigdelset(&set, SIG_IPI);
|
||||
|
||||
init_emu();
|
||||
init_decoder();
|
||||
|
||||
hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
|
||||
env->hvf_emul = g_new0(HVFX86EmulatorState, 1);
|
||||
|
||||
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
|
||||
cpu->vcpu_dirty = 1;
|
||||
assert_hvf_ok(r);
|
||||
|
||||
if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
|
||||
&hvf_state->hvf_caps->vmx_cap_pinbased)) {
|
||||
abort();
|
||||
}
|
||||
if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,
|
||||
&hvf_state->hvf_caps->vmx_cap_procbased)) {
|
||||
abort();
|
||||
}
|
||||
if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,
|
||||
&hvf_state->hvf_caps->vmx_cap_procbased2)) {
|
||||
abort();
|
||||
}
|
||||
if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,
|
||||
&hvf_state->hvf_caps->vmx_cap_entry)) {
|
||||
abort();
|
||||
}
|
||||
|
||||
/* set VMCS control fields */
|
||||
wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
|
||||
cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
|
||||
VMCS_PIN_BASED_CTLS_EXTINT |
|
||||
VMCS_PIN_BASED_CTLS_NMI |
|
||||
VMCS_PIN_BASED_CTLS_VNMI));
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
|
||||
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
|
||||
VMCS_PRI_PROC_BASED_CTLS_HLT |
|
||||
VMCS_PRI_PROC_BASED_CTLS_MWAIT |
|
||||
VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
|
||||
VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
|
||||
VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
|
||||
wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
|
||||
cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
|
||||
VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
|
||||
0));
|
||||
wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
|
||||
|
||||
hvf_reset_vcpu(cpu);
|
||||
|
||||
x86cpu = X86_CPU(cpu);
|
||||
x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, 4096);
|
||||
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
|
||||
/*hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);*/
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
|
||||
hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hvf_disable(int shouldDisable)
|
||||
{
|
||||
hvf_disabled = shouldDisable;
|
||||
}
|
||||
|
||||
static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
env->exception_injected = -1;
|
||||
env->interrupt_injected = -1;
|
||||
env->nmi_injected = false;
|
||||
if (idtvec_info & VMCS_IDT_VEC_VALID) {
|
||||
switch (idtvec_info & VMCS_IDT_VEC_TYPE) {
|
||||
case VMCS_IDT_VEC_HWINTR:
|
||||
case VMCS_IDT_VEC_SWINTR:
|
||||
env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
|
||||
break;
|
||||
case VMCS_IDT_VEC_NMI:
|
||||
env->nmi_injected = true;
|
||||
break;
|
||||
case VMCS_IDT_VEC_HWEXCEPTION:
|
||||
case VMCS_IDT_VEC_SWEXCEPTION:
|
||||
env->exception_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
|
||||
break;
|
||||
case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION ||
|
||||
(idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) {
|
||||
env->ins_len = ins_len;
|
||||
}
|
||||
if (idtvec_info & VMCS_INTR_DEL_ERRCODE) {
|
||||
env->has_error_code = true;
|
||||
env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
|
||||
}
|
||||
}
|
||||
if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
|
||||
VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
} else {
|
||||
env->hflags2 &= ~HF2_NMI_MASK;
|
||||
}
|
||||
if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
|
||||
(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
|
||||
env->hflags |= HF_INHIBIT_IRQ_MASK;
|
||||
} else {
|
||||
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
int hvf_vcpu_exec(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
int ret = 0;
|
||||
uint64_t rip = 0;
|
||||
|
||||
cpu->halted = 0;
|
||||
|
||||
if (hvf_process_events(cpu)) {
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
do {
|
||||
if (cpu->vcpu_dirty) {
|
||||
hvf_put_registers(cpu);
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
if (hvf_inject_interrupts(cpu)) {
|
||||
return EXCP_INTERRUPT;
|
||||
}
|
||||
vmx_update_tpr(cpu);
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
|
||||
qemu_mutex_lock_iothread();
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
|
||||
assert_hvf_ok(r);
|
||||
|
||||
/* handle VMEXIT */
|
||||
uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
|
||||
uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
|
||||
uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
|
||||
VMCS_EXIT_INSTRUCTION_LENGTH);
|
||||
|
||||
uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
|
||||
|
||||
hvf_store_events(cpu, ins_len, idtvec_info);
|
||||
rip = rreg(cpu->hvf_fd, HV_X86_RIP);
|
||||
RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
|
||||
env->eflags = RFLAGS(env);
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
|
||||
update_apic_tpr(cpu);
|
||||
current_cpu = cpu;
|
||||
|
||||
ret = 0;
|
||||
switch (exit_reason) {
|
||||
case EXIT_REASON_HLT: {
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(EFLAGS(env) & IF_MASK))
|
||||
&& !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
|
||||
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
|
||||
cpu->halted = 1;
|
||||
ret = EXCP_HLT;
|
||||
}
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_MWAIT: {
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
}
|
||||
/* Need to check if MMIO or unmmaped fault */
|
||||
case EXIT_REASON_EPT_FAULT:
|
||||
{
|
||||
hvf_slot *slot;
|
||||
uint64_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
|
||||
|
||||
if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
|
||||
((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
|
||||
vmx_set_nmi_blocking(cpu);
|
||||
}
|
||||
|
||||
slot = hvf_find_overlap_slot(gpa, gpa);
|
||||
/* mmio */
|
||||
if (ept_emulation_fault(slot, gpa, exit_qual)) {
|
||||
struct x86_decode decode;
|
||||
|
||||
load_regs(cpu);
|
||||
env->hvf_emul->fetch_rip = rip;
|
||||
|
||||
decode_instruction(env, &decode);
|
||||
exec_instruction(env, &decode);
|
||||
store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_INOUT:
|
||||
{
|
||||
uint32_t in = (exit_qual & 8) != 0;
|
||||
uint32_t size = (exit_qual & 7) + 1;
|
||||
uint32_t string = (exit_qual & 16) != 0;
|
||||
uint32_t port = exit_qual >> 16;
|
||||
/*uint32_t rep = (exit_qual & 0x20) != 0;*/
|
||||
|
||||
if (!string && in) {
|
||||
uint64_t val = 0;
|
||||
load_regs(cpu);
|
||||
hvf_handle_io(env, port, &val, 0, size, 1);
|
||||
if (size == 1) {
|
||||
AL(env) = val;
|
||||
} else if (size == 2) {
|
||||
AX(env) = val;
|
||||
} else if (size == 4) {
|
||||
RAX(env) = (uint32_t)val;
|
||||
} else {
|
||||
RAX(env) = (uint64_t)val;
|
||||
}
|
||||
RIP(env) += ins_len;
|
||||
store_regs(cpu);
|
||||
break;
|
||||
} else if (!string && !in) {
|
||||
RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
hvf_handle_io(env, port, &RAX(env), 1, size, 1);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
struct x86_decode decode;
|
||||
|
||||
load_regs(cpu);
|
||||
env->hvf_emul->fetch_rip = rip;
|
||||
|
||||
decode_instruction(env, &decode);
|
||||
assert(ins_len == decode.len);
|
||||
exec_instruction(env, &decode);
|
||||
store_regs(cpu);
|
||||
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_CPUID: {
|
||||
uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
|
||||
uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
|
||||
uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
|
||||
|
||||
cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
|
||||
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, rax);
|
||||
wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
|
||||
wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
|
||||
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_XSETBV: {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
|
||||
uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
|
||||
uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
|
||||
|
||||
if (ecx) {
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
env->xcr0 = ((uint64_t)edx << 32) | eax;
|
||||
wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_INTR_WINDOW:
|
||||
vmx_clear_int_window_exiting(cpu);
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
case EXIT_REASON_NMI_WINDOW:
|
||||
vmx_clear_nmi_window_exiting(cpu);
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
case EXIT_REASON_EXT_INTR:
|
||||
/* force exit and allow io handling */
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
case EXIT_REASON_RDMSR:
|
||||
case EXIT_REASON_WRMSR:
|
||||
{
|
||||
load_regs(cpu);
|
||||
if (exit_reason == EXIT_REASON_RDMSR) {
|
||||
simulate_rdmsr(cpu);
|
||||
} else {
|
||||
simulate_wrmsr(cpu);
|
||||
}
|
||||
RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
|
||||
store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_CR_ACCESS: {
|
||||
int cr;
|
||||
int reg;
|
||||
|
||||
load_regs(cpu);
|
||||
cr = exit_qual & 15;
|
||||
reg = (exit_qual >> 8) & 15;
|
||||
|
||||
switch (cr) {
|
||||
case 0x0: {
|
||||
macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
|
||||
break;
|
||||
}
|
||||
case 8: {
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
if (exit_qual & 0x10) {
|
||||
RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
|
||||
} else {
|
||||
int tpr = RRX(env, reg);
|
||||
cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
|
||||
ret = EXCP_INTERRUPT;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
error_report("Unrecognized CR %d\n", cr);
|
||||
abort();
|
||||
}
|
||||
RIP(env) += ins_len;
|
||||
store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_APIC_ACCESS: { /* TODO */
|
||||
struct x86_decode decode;
|
||||
|
||||
load_regs(cpu);
|
||||
env->hvf_emul->fetch_rip = rip;
|
||||
|
||||
decode_instruction(env, &decode);
|
||||
exec_instruction(env, &decode);
|
||||
store_regs(cpu);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_TPR: {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_TASK_SWITCH: {
|
||||
uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
|
||||
x68_segment_selector sel = {.sel = exit_qual & 0xffff};
|
||||
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
|
||||
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
|
||||
& VMCS_INTR_T_MASK);
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_TRIPLE_FAULT: {
|
||||
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
|
||||
ret = EXCP_INTERRUPT;
|
||||
break;
|
||||
}
|
||||
case EXIT_REASON_RDPMC:
|
||||
wreg(cpu->hvf_fd, HV_X86_RAX, 0);
|
||||
wreg(cpu->hvf_fd, HV_X86_RDX, 0);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
break;
|
||||
case VMX_REASON_VMCALL:
|
||||
env->exception_injected = EXCP0D_GPF;
|
||||
env->has_error_code = true;
|
||||
env->error_code = 0;
|
||||
break;
|
||||
default:
|
||||
error_report("%llx: unhandled exit %llx\n", rip, exit_reason);
|
||||
}
|
||||
} while (ret == 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool hvf_allowed;
|
||||
|
||||
static int hvf_accel_init(MachineState *ms)
|
||||
{
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s;
|
||||
|
||||
hvf_disable(0);
|
||||
ret = hv_vm_create(HV_VM_DEFAULT);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s = g_new0(HVFState, 1);
|
||||
|
||||
s->num_slots = 32;
|
||||
for (x = 0; x < s->num_slots; ++x) {
|
||||
s->slots[x].size = 0;
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
hvf_state = s;
|
||||
cpu_interrupt_handler = hvf_handle_interrupt;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
.name = TYPE_HVF_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.class_init = hvf_accel_class_init,
|
||||
};
|
||||
|
||||
static void hvf_type_init(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_type);
|
||||
}
|
||||
|
||||
type_init(hvf_type_init);
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef HVF_PANIC_H
|
||||
#define HVF_PANIC_H
|
||||
|
||||
#define VM_PANIC(x) {\
|
||||
printf("%s\n", x); \
|
||||
abort(); \
|
||||
}
|
||||
|
||||
#define VM_PANIC_ON(x) {\
|
||||
if (x) { \
|
||||
printf("%s\n", #x); \
|
||||
abort(); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define VM_PANIC_EX(...) {\
|
||||
printf(__VA_ARGS__); \
|
||||
abort(); \
|
||||
}
|
||||
|
||||
#define VM_PANIC_ON_EX(x, ...) {\
|
||||
if (x) { \
|
||||
printf(__VA_ARGS__); \
|
||||
abort(); \
|
||||
} \
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,374 @@
|
|||
/*-
|
||||
* Copyright (c) 2011 NetApp, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _VMCS_H_
|
||||
#define _VMCS_H_
|
||||
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
|
||||
#define VMCS_INITIAL 0xffffffffffffffff
|
||||
|
||||
#define VMCS_IDENT(encoding) ((encoding) | 0x80000000)
|
||||
/*
|
||||
* VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.
|
||||
*/
|
||||
#define VMCS_INVALID_ENCODING 0xffffffff
|
||||
|
||||
/* 16-bit control fields */
|
||||
#define VMCS_VPID 0x00000000
|
||||
#define VMCS_PIR_VECTOR 0x00000002
|
||||
|
||||
/* 16-bit guest-state fields */
|
||||
#define VMCS_GUEST_ES_SELECTOR 0x00000800
|
||||
#define VMCS_GUEST_CS_SELECTOR 0x00000802
|
||||
#define VMCS_GUEST_SS_SELECTOR 0x00000804
|
||||
#define VMCS_GUEST_DS_SELECTOR 0x00000806
|
||||
#define VMCS_GUEST_FS_SELECTOR 0x00000808
|
||||
#define VMCS_GUEST_GS_SELECTOR 0x0000080A
|
||||
#define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
|
||||
#define VMCS_GUEST_TR_SELECTOR 0x0000080E
|
||||
#define VMCS_GUEST_INTR_STATUS 0x00000810
|
||||
|
||||
/* 16-bit host-state fields */
|
||||
#define VMCS_HOST_ES_SELECTOR 0x00000C00
|
||||
#define VMCS_HOST_CS_SELECTOR 0x00000C02
|
||||
#define VMCS_HOST_SS_SELECTOR 0x00000C04
|
||||
#define VMCS_HOST_DS_SELECTOR 0x00000C06
|
||||
#define VMCS_HOST_FS_SELECTOR 0x00000C08
|
||||
#define VMCS_HOST_GS_SELECTOR 0x00000C0A
|
||||
#define VMCS_HOST_TR_SELECTOR 0x00000C0C
|
||||
|
||||
/* 64-bit control fields */
|
||||
#define VMCS_IO_BITMAP_A 0x00002000
|
||||
#define VMCS_IO_BITMAP_B 0x00002002
|
||||
#define VMCS_MSR_BITMAP 0x00002004
|
||||
#define VMCS_EXIT_MSR_STORE 0x00002006
|
||||
#define VMCS_EXIT_MSR_LOAD 0x00002008
|
||||
#define VMCS_ENTRY_MSR_LOAD 0x0000200A
|
||||
#define VMCS_EXECUTIVE_VMCS 0x0000200C
|
||||
#define VMCS_TSC_OFFSET 0x00002010
|
||||
#define VMCS_VIRTUAL_APIC 0x00002012
|
||||
#define VMCS_APIC_ACCESS 0x00002014
|
||||
#define VMCS_PIR_DESC 0x00002016
|
||||
#define VMCS_EPTP 0x0000201A
|
||||
#define VMCS_EOI_EXIT0 0x0000201C
|
||||
#define VMCS_EOI_EXIT1 0x0000201E
|
||||
#define VMCS_EOI_EXIT2 0x00002020
|
||||
#define VMCS_EOI_EXIT3 0x00002022
|
||||
#define VMCS_EOI_EXIT(vector) (VMCS_EOI_EXIT0 + ((vector) / 64) * 2)
|
||||
|
||||
/* 64-bit read-only fields */
|
||||
#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
|
||||
|
||||
/* 64-bit guest-state fields */
|
||||
#define VMCS_LINK_POINTER 0x00002800
|
||||
#define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
|
||||
#define VMCS_GUEST_IA32_PAT 0x00002804
|
||||
#define VMCS_GUEST_IA32_EFER 0x00002806
|
||||
#define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
|
||||
#define VMCS_GUEST_PDPTE0 0x0000280A
|
||||
#define VMCS_GUEST_PDPTE1 0x0000280C
|
||||
#define VMCS_GUEST_PDPTE2 0x0000280E
|
||||
#define VMCS_GUEST_PDPTE3 0x00002810
|
||||
|
||||
/* 64-bit host-state fields */
|
||||
#define VMCS_HOST_IA32_PAT 0x00002C00
|
||||
#define VMCS_HOST_IA32_EFER 0x00002C02
|
||||
#define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
|
||||
|
||||
/* 32-bit control fields */
|
||||
#define VMCS_PIN_BASED_CTLS 0x00004000
|
||||
#define VMCS_PRI_PROC_BASED_CTLS 0x00004002
|
||||
#define VMCS_EXCEPTION_BITMAP 0x00004004
|
||||
#define VMCS_PF_ERROR_MASK 0x00004006
|
||||
#define VMCS_PF_ERROR_MATCH 0x00004008
|
||||
#define VMCS_CR3_TARGET_COUNT 0x0000400A
|
||||
#define VMCS_EXIT_CTLS 0x0000400C
|
||||
#define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
|
||||
#define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
|
||||
#define VMCS_ENTRY_CTLS 0x00004012
|
||||
#define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
|
||||
#define VMCS_ENTRY_INTR_INFO 0x00004016
|
||||
#define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
|
||||
#define VMCS_ENTRY_INST_LENGTH 0x0000401A
|
||||
#define VMCS_TPR_THRESHOLD 0x0000401C
|
||||
#define VMCS_SEC_PROC_BASED_CTLS 0x0000401E
|
||||
#define VMCS_PLE_GAP 0x00004020
|
||||
#define VMCS_PLE_WINDOW 0x00004022
|
||||
|
||||
/* 32-bit read-only data fields */
|
||||
#define VMCS_INSTRUCTION_ERROR 0x00004400
|
||||
#define VMCS_EXIT_REASON 0x00004402
|
||||
#define VMCS_EXIT_INTR_INFO 0x00004404
|
||||
#define VMCS_EXIT_INTR_ERRCODE 0x00004406
|
||||
#define VMCS_IDT_VECTORING_INFO 0x00004408
|
||||
#define VMCS_IDT_VECTORING_ERROR 0x0000440A
|
||||
#define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
|
||||
#define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
|
||||
|
||||
/* 32-bit guest-state fields */
|
||||
#define VMCS_GUEST_ES_LIMIT 0x00004800
|
||||
#define VMCS_GUEST_CS_LIMIT 0x00004802
|
||||
#define VMCS_GUEST_SS_LIMIT 0x00004804
|
||||
#define VMCS_GUEST_DS_LIMIT 0x00004806
|
||||
#define VMCS_GUEST_FS_LIMIT 0x00004808
|
||||
#define VMCS_GUEST_GS_LIMIT 0x0000480A
|
||||
#define VMCS_GUEST_LDTR_LIMIT 0x0000480C
|
||||
#define VMCS_GUEST_TR_LIMIT 0x0000480E
|
||||
#define VMCS_GUEST_GDTR_LIMIT 0x00004810
|
||||
#define VMCS_GUEST_IDTR_LIMIT 0x00004812
|
||||
#define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
|
||||
#define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
|
||||
#define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
|
||||
#define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
|
||||
#define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
|
||||
#define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
|
||||
#define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
|
||||
#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
|
||||
#define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
|
||||
#define VMCS_GUEST_ACTIVITY 0x00004826
|
||||
#define VMCS_GUEST_SMBASE 0x00004828
|
||||
#define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
|
||||
#define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
|
||||
|
||||
/* 32-bit host state fields */
|
||||
#define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
|
||||
|
||||
/* Natural Width control fields */
|
||||
#define VMCS_CR0_MASK 0x00006000
|
||||
#define VMCS_CR4_MASK 0x00006002
|
||||
#define VMCS_CR0_SHADOW 0x00006004
|
||||
#define VMCS_CR4_SHADOW 0x00006006
|
||||
#define VMCS_CR3_TARGET0 0x00006008
|
||||
#define VMCS_CR3_TARGET1 0x0000600A
|
||||
#define VMCS_CR3_TARGET2 0x0000600C
|
||||
#define VMCS_CR3_TARGET3 0x0000600E
|
||||
|
||||
/* Natural Width read-only fields */
|
||||
#define VMCS_EXIT_QUALIFICATION 0x00006400
|
||||
#define VMCS_IO_RCX 0x00006402
|
||||
#define VMCS_IO_RSI 0x00006404
|
||||
#define VMCS_IO_RDI 0x00006406
|
||||
#define VMCS_IO_RIP 0x00006408
|
||||
#define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
|
||||
|
||||
/* Natural Width guest-state fields */
|
||||
#define VMCS_GUEST_CR0 0x00006800
|
||||
#define VMCS_GUEST_CR3 0x00006802
|
||||
#define VMCS_GUEST_CR4 0x00006804
|
||||
#define VMCS_GUEST_ES_BASE 0x00006806
|
||||
#define VMCS_GUEST_CS_BASE 0x00006808
|
||||
#define VMCS_GUEST_SS_BASE 0x0000680A
|
||||
#define VMCS_GUEST_DS_BASE 0x0000680C
|
||||
#define VMCS_GUEST_FS_BASE 0x0000680E
|
||||
#define VMCS_GUEST_GS_BASE 0x00006810
|
||||
#define VMCS_GUEST_LDTR_BASE 0x00006812
|
||||
#define VMCS_GUEST_TR_BASE 0x00006814
|
||||
#define VMCS_GUEST_GDTR_BASE 0x00006816
|
||||
#define VMCS_GUEST_IDTR_BASE 0x00006818
|
||||
#define VMCS_GUEST_DR7 0x0000681A
|
||||
#define VMCS_GUEST_RSP 0x0000681C
|
||||
#define VMCS_GUEST_RIP 0x0000681E
|
||||
#define VMCS_GUEST_RFLAGS 0x00006820
|
||||
#define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
|
||||
#define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
|
||||
#define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
|
||||
|
||||
/* Natural Width host-state fields */
|
||||
#define VMCS_HOST_CR0 0x00006C00
|
||||
#define VMCS_HOST_CR3 0x00006C02
|
||||
#define VMCS_HOST_CR4 0x00006C04
|
||||
#define VMCS_HOST_FS_BASE 0x00006C06
|
||||
#define VMCS_HOST_GS_BASE 0x00006C08
|
||||
#define VMCS_HOST_TR_BASE 0x00006C0A
|
||||
#define VMCS_HOST_GDTR_BASE 0x00006C0C
|
||||
#define VMCS_HOST_IDTR_BASE 0x00006C0E
|
||||
#define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
|
||||
#define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
|
||||
#define VMCS_HOST_RSP 0x00006C14
|
||||
#define VMCS_HOST_RIP 0x00006c16
|
||||
|
||||
/*
|
||||
* VM instruction error numbers
|
||||
*/
|
||||
#define VMRESUME_WITH_NON_LAUNCHED_VMCS 5
|
||||
|
||||
/*
|
||||
* VMCS exit reasons
|
||||
*/
|
||||
#define EXIT_REASON_EXCEPTION 0
|
||||
#define EXIT_REASON_EXT_INTR 1
|
||||
#define EXIT_REASON_TRIPLE_FAULT 2
|
||||
#define EXIT_REASON_INIT 3
|
||||
#define EXIT_REASON_SIPI 4
|
||||
#define EXIT_REASON_IO_SMI 5
|
||||
#define EXIT_REASON_SMI 6
|
||||
#define EXIT_REASON_INTR_WINDOW 7
|
||||
#define EXIT_REASON_NMI_WINDOW 8
|
||||
#define EXIT_REASON_TASK_SWITCH 9
|
||||
#define EXIT_REASON_CPUID 10
|
||||
#define EXIT_REASON_GETSEC 11
|
||||
#define EXIT_REASON_HLT 12
|
||||
#define EXIT_REASON_INVD 13
|
||||
#define EXIT_REASON_INVLPG 14
|
||||
#define EXIT_REASON_RDPMC 15
|
||||
#define EXIT_REASON_RDTSC 16
|
||||
#define EXIT_REASON_RSM 17
|
||||
#define EXIT_REASON_VMCALL 18
|
||||
#define EXIT_REASON_VMCLEAR 19
|
||||
#define EXIT_REASON_VMLAUNCH 20
|
||||
#define EXIT_REASON_VMPTRLD 21
|
||||
#define EXIT_REASON_VMPTRST 22
|
||||
#define EXIT_REASON_VMREAD 23
|
||||
#define EXIT_REASON_VMRESUME 24
|
||||
#define EXIT_REASON_VMWRITE 25
|
||||
#define EXIT_REASON_VMXOFF 26
|
||||
#define EXIT_REASON_VMXON 27
|
||||
#define EXIT_REASON_CR_ACCESS 28
|
||||
#define EXIT_REASON_DR_ACCESS 29
|
||||
#define EXIT_REASON_INOUT 30
|
||||
#define EXIT_REASON_RDMSR 31
|
||||
#define EXIT_REASON_WRMSR 32
|
||||
#define EXIT_REASON_INVAL_VMCS 33
|
||||
#define EXIT_REASON_INVAL_MSR 34
|
||||
#define EXIT_REASON_MWAIT 36
|
||||
#define EXIT_REASON_MTF 37
|
||||
#define EXIT_REASON_MONITOR 39
|
||||
#define EXIT_REASON_PAUSE 40
|
||||
#define EXIT_REASON_MCE_DURING_ENTR 41
|
||||
#define EXIT_REASON_TPR 43
|
||||
#define EXIT_REASON_APIC_ACCESS 44
|
||||
#define EXIT_REASON_VIRTUALIZED_EOI 45
|
||||
#define EXIT_REASON_GDTR_IDTR 46
|
||||
#define EXIT_REASON_LDTR_TR 47
|
||||
#define EXIT_REASON_EPT_FAULT 48
|
||||
#define EXIT_REASON_EPT_MISCONFIG 49
|
||||
#define EXIT_REASON_INVEPT 50
|
||||
#define EXIT_REASON_RDTSCP 51
|
||||
#define EXIT_REASON_VMX_PREEMPT 52
|
||||
#define EXIT_REASON_INVVPID 53
|
||||
#define EXIT_REASON_WBINVD 54
|
||||
#define EXIT_REASON_XSETBV 55
|
||||
#define EXIT_REASON_APIC_WRITE 56
|
||||
|
||||
/*
|
||||
* NMI unblocking due to IRET.
|
||||
*
|
||||
* Applies to VM-exits due to hardware exception or EPT fault.
|
||||
*/
|
||||
#define EXIT_QUAL_NMIUDTI (1 << 12)
|
||||
/*
|
||||
* VMCS interrupt information fields
|
||||
*/
|
||||
#define VMCS_INTR_VALID (1U << 31)
|
||||
#define VMCS_INTR_T_MASK 0x700 /* Interruption-info type */
|
||||
#define VMCS_INTR_T_HWINTR (0 << 8)
|
||||
#define VMCS_INTR_T_NMI (2 << 8)
|
||||
#define VMCS_INTR_T_HWEXCEPTION (3 << 8)
|
||||
#define VMCS_INTR_T_SWINTR (4 << 8)
|
||||
#define VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)
|
||||
#define VMCS_INTR_T_SWEXCEPTION (6 << 8)
|
||||
#define VMCS_INTR_DEL_ERRCODE (1 << 11)
|
||||
|
||||
/*
|
||||
* VMCS IDT-Vectoring information fields
|
||||
*/
|
||||
#define VMCS_IDT_VEC_VECNUM 0xFF
|
||||
#define VMCS_IDT_VEC_VALID (1U << 31)
|
||||
#define VMCS_IDT_VEC_TYPE 0x700
|
||||
#define VMCS_IDT_VEC_ERRCODE_VALID (1U << 11)
|
||||
#define VMCS_IDT_VEC_HWINTR (0 << 8)
|
||||
#define VMCS_IDT_VEC_NMI (2 << 8)
|
||||
#define VMCS_IDT_VEC_HWEXCEPTION (3 << 8)
|
||||
#define VMCS_IDT_VEC_SWINTR (4 << 8)
|
||||
#define VMCS_IDT_VEC_PRIV_SWEXCEPTION (5 << 8)
|
||||
#define VMCS_IDT_VEC_SWEXCEPTION (6 << 8)
|
||||
|
||||
/*
|
||||
* VMCS Guest interruptibility field
|
||||
*/
|
||||
#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0)
|
||||
#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1)
|
||||
#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2)
|
||||
#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3)
|
||||
|
||||
/*
|
||||
* Exit qualification for EXIT_REASON_INVAL_VMCS
|
||||
*/
|
||||
#define EXIT_QUAL_NMI_WHILE_STI_BLOCKING 3
|
||||
|
||||
/*
|
||||
* Exit qualification for EPT violation
|
||||
*/
|
||||
#define EPT_VIOLATION_DATA_READ (1UL << 0)
|
||||
#define EPT_VIOLATION_DATA_WRITE (1UL << 1)
|
||||
#define EPT_VIOLATION_INST_FETCH (1UL << 2)
|
||||
#define EPT_VIOLATION_GPA_READABLE (1UL << 3)
|
||||
#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4)
|
||||
#define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5)
|
||||
#define EPT_VIOLATION_GLA_VALID (1UL << 7)
|
||||
#define EPT_VIOLATION_XLAT_VALID (1UL << 8)
|
||||
|
||||
/*
|
||||
* Exit qualification for APIC-access VM exit
|
||||
*/
|
||||
#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFF)
|
||||
#define APIC_ACCESS_TYPE(qual) (((qual) >> 12) & 0xF)
|
||||
|
||||
/*
|
||||
* Exit qualification for APIC-write VM exit
|
||||
*/
|
||||
#define APIC_WRITE_OFFSET(qual) ((qual) & 0xFFF)
|
||||
|
||||
#define VMCS_PIN_BASED_CTLS_EXTINT (1 << 0)
|
||||
#define VMCS_PIN_BASED_CTLS_NMI (1 << 3)
|
||||
#define VMCS_PIN_BASED_CTLS_VNMI (1 << 5)
|
||||
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING (1 << 2)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_MWAIT (1 << 10)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_TSC (1 << 12)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD (1 << 19)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_CR8_STORE (1 << 20)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW (1 << 21)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING (1 << 22)
|
||||
#define VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL (1 << 31)
|
||||
|
||||
#define VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES (1 << 0)
|
||||
#define VMCS_PRI_PROC_BASED2_CTLS_X2APIC (1 << 4)
|
||||
|
||||
enum task_switch_reason {
|
||||
TSR_CALL,
|
||||
TSR_IRET,
|
||||
TSR_JMP,
|
||||
TSR_IDT_GATE, /* task gate in IDT */
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
* Based on Veertu vddh/vmm/vmx.h
|
||||
*
|
||||
* Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef VMX_H
|
||||
#define VMX_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
#include "vmcs.h"
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
|
||||
{
|
||||
uint64_t v;
|
||||
|
||||
if (hv_vcpu_read_register(vcpu, reg, &v)) {
|
||||
abort();
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
/* write GPR */
|
||||
static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
|
||||
{
|
||||
if (hv_vcpu_write_register(vcpu, reg, v)) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
/* read VMCS field */
|
||||
static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
|
||||
{
|
||||
uint64_t v;
|
||||
|
||||
hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
/* write VMCS field */
|
||||
static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
|
||||
{
|
||||
hv_vmx_vcpu_write_vmcs(vcpu, field, v);
|
||||
}
|
||||
|
||||
/* desired control word constrained by hardware/hypervisor capabilities */
|
||||
static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
|
||||
{
|
||||
return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
|
||||
}
|
||||
|
||||
#define VM_ENTRY_GUEST_LMA (1LL << 9)
|
||||
|
||||
#define AR_TYPE_ACCESSES_MASK 1
|
||||
#define AR_TYPE_READABLE_MASK (1 << 1)
|
||||
#define AR_TYPE_WRITEABLE_MASK (1 << 2)
|
||||
#define AR_TYPE_CODE_MASK (1 << 3)
|
||||
#define AR_TYPE_MASK 0x0f
|
||||
#define AR_TYPE_BUSY_64_TSS 11
|
||||
#define AR_TYPE_BUSY_32_TSS 11
|
||||
#define AR_TYPE_BUSY_16_TSS 3
|
||||
#define AR_TYPE_LDT 2
|
||||
|
||||
static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
|
||||
{
|
||||
uint64_t entry_ctls;
|
||||
|
||||
efer |= MSR_EFER_LMA;
|
||||
wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
|
||||
entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
|
||||
wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
|
||||
VM_ENTRY_GUEST_LMA);
|
||||
|
||||
uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
|
||||
if ((efer & MSR_EFER_LME) &&
|
||||
(guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
|
||||
wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
|
||||
(guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
|
||||
}
|
||||
}
|
||||
|
||||
static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
|
||||
{
|
||||
uint64_t entry_ctls;
|
||||
|
||||
entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
|
||||
wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
|
||||
|
||||
efer &= ~MSR_EFER_LMA;
|
||||
wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
|
||||
}
|
||||
|
||||
static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
|
||||
{
|
||||
int i;
|
||||
uint64_t pdpte[4] = {0, 0, 0, 0};
|
||||
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
|
||||
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
|
||||
|
||||
if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
|
||||
!(efer & MSR_EFER_LME)) {
|
||||
address_space_rw(&address_space_memory,
|
||||
rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
|
||||
MEMTXATTRS_UNSPECIFIED,
|
||||
(uint8_t *)pdpte, 32, 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
|
||||
}
|
||||
|
||||
wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG);
|
||||
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
|
||||
|
||||
cr0 &= ~CR0_CD;
|
||||
wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
|
||||
|
||||
if (efer & MSR_EFER_LME) {
|
||||
if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
|
||||
enter_long_mode(vcpu, cr0, efer);
|
||||
}
|
||||
if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {
|
||||
exit_long_mode(vcpu, cr0, efer);
|
||||
}
|
||||
}
|
||||
|
||||
hv_vcpu_invalidate_tlb(vcpu);
|
||||
hv_vcpu_flush(vcpu);
|
||||
}
|
||||
|
||||
static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
|
||||
{
|
||||
uint64_t guest_cr4 = cr4 | CR4_VMXE;
|
||||
|
||||
wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
|
||||
wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
|
||||
|
||||
hv_vcpu_invalidate_tlb(vcpu);
|
||||
hv_vcpu_flush(vcpu);
|
||||
}
|
||||
|
||||
static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
/* BUG, should take considering overlap.. */
|
||||
wreg(cpu->hvf_fd, HV_X86_RIP, rip);
|
||||
|
||||
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
|
||||
val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
|
||||
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vmx_clear_nmi_blocking(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
env->hflags2 &= ~HF2_NMI_MASK;
|
||||
uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
}
|
||||
|
||||
static inline void vmx_set_nmi_blocking(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
|
||||
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
}
|
||||
|
||||
static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
|
||||
{
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
|
||||
VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
|
||||
|
||||
}
|
||||
|
||||
static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
|
||||
{
|
||||
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
|
||||
~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "cpu.h"
|
||||
#include "qemu-common.h"
|
||||
#include "x86_decode.h"
|
||||
#include "x86_emu.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx.h"
|
||||
#include "x86_mmu.h"
|
||||
#include "x86_descr.h"
|
||||
|
||||
/* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
|
||||
{
|
||||
uint32_t ar;
|
||||
|
||||
if (!var->p) {
|
||||
ar = 1 << 16;
|
||||
return ar;
|
||||
}
|
||||
|
||||
ar = var->type & 15;
|
||||
ar |= (var->s & 1) << 4;
|
||||
ar |= (var->dpl & 3) << 5;
|
||||
ar |= (var->p & 1) << 7;
|
||||
ar |= (var->avl & 1) << 12;
|
||||
ar |= (var->l & 1) << 13;
|
||||
ar |= (var->db & 1) << 14;
|
||||
ar |= (var->g & 1) << 15;
|
||||
return ar;
|
||||
}*/
|
||||
|
||||
bool x86_read_segment_descriptor(struct CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel)
|
||||
{
|
||||
target_ulong base;
|
||||
uint32_t limit;
|
||||
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
||||
/* valid gdt descriptors start from index 1 */
|
||||
if (!sel.index && GDT_SEL == sel.ti) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (GDT_SEL == sel.ti) {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
} else {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
|
||||
}
|
||||
|
||||
if (sel.index * 8 >= limit) {
|
||||
return false;
|
||||
}
|
||||
|
||||
vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool x86_write_segment_descriptor(struct CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel)
|
||||
{
|
||||
target_ulong base;
|
||||
uint32_t limit;
|
||||
|
||||
if (GDT_SEL == sel.ti) {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
} else {
|
||||
base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
|
||||
limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
|
||||
}
|
||||
|
||||
if (sel.index * 8 >= limit) {
|
||||
printf("%s: gdt limit\n", __func__);
|
||||
return false;
|
||||
}
|
||||
vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
|
||||
int gate)
|
||||
{
|
||||
target_ulong base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
|
||||
uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
|
||||
|
||||
memset(idt_desc, 0, sizeof(*idt_desc));
|
||||
if (gate * 8 >= limit) {
|
||||
printf("%s: idt limit\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool x86_is_protected(struct CPUState *cpu)
|
||||
{
|
||||
uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
|
||||
return cr0 & CR0_PE;
|
||||
}
|
||||
|
||||
bool x86_is_real(struct CPUState *cpu)
|
||||
{
|
||||
return !x86_is_protected(cpu);
|
||||
}
|
||||
|
||||
bool x86_is_v8086(struct CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);
|
||||
}
|
||||
|
||||
bool x86_is_long_mode(struct CPUState *cpu)
|
||||
{
|
||||
return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
|
||||
}
|
||||
|
||||
bool x86_is_long64_mode(struct CPUState *cpu)
|
||||
{
|
||||
struct vmx_segment desc;
|
||||
vmx_read_segment_descriptor(cpu, &desc, R_CS);
|
||||
|
||||
return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
|
||||
}
|
||||
|
||||
bool x86_is_paging_mode(struct CPUState *cpu)
|
||||
{
|
||||
uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
|
||||
return cr0 & CR0_PG;
|
||||
}
|
||||
|
||||
bool x86_is_pae_enabled(struct CPUState *cpu)
|
||||
{
|
||||
uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
|
||||
return cr4 & CR4_PAE;
|
||||
}
|
||||
|
||||
target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)
|
||||
{
|
||||
return vmx_read_segment_base(cpu, seg) + addr;
|
||||
}
|
||||
|
||||
target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
|
||||
X86Seg seg)
|
||||
{
|
||||
switch (size) {
|
||||
case 2:
|
||||
addr = (uint16_t)addr;
|
||||
break;
|
||||
case 4:
|
||||
addr = (uint32_t)addr;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return linear_addr(cpu, addr, seg);
|
||||
}
|
||||
|
||||
target_ulong linear_rip(struct CPUState *cpu, target_ulong rip)
|
||||
{
|
||||
return linear_addr(cpu, rip, R_CS);
|
||||
}
|
|
@ -0,0 +1,400 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Veertu Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef HVF_X86_H
|
||||
#define HVF_X86_H 1
|
||||
|
||||
typedef struct x86_register {
|
||||
union {
|
||||
struct {
|
||||
uint64_t rrx; /* full 64 bit */
|
||||
};
|
||||
struct {
|
||||
uint32_t erx; /* low 32 bit part */
|
||||
uint32_t hi32_unused1;
|
||||
};
|
||||
struct {
|
||||
uint16_t rx; /* low 16 bit part */
|
||||
uint16_t hi16_unused1;
|
||||
uint32_t hi32_unused2;
|
||||
};
|
||||
struct {
|
||||
uint8_t lx; /* low 8 bit part */
|
||||
uint8_t hx; /* high 8 bit */
|
||||
uint16_t hi16_unused2;
|
||||
uint32_t hi32_unused3;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x86_register;
|
||||
|
||||
typedef enum x86_rflags {
|
||||
RFLAGS_CF = (1L << 0),
|
||||
RFLAGS_PF = (1L << 2),
|
||||
RFLAGS_AF = (1L << 4),
|
||||
RFLAGS_ZF = (1L << 6),
|
||||
RFLAGS_SF = (1L << 7),
|
||||
RFLAGS_TF = (1L << 8),
|
||||
RFLAGS_IF = (1L << 9),
|
||||
RFLAGS_DF = (1L << 10),
|
||||
RFLAGS_OF = (1L << 11),
|
||||
RFLAGS_IOPL = (3L << 12),
|
||||
RFLAGS_NT = (1L << 14),
|
||||
RFLAGS_RF = (1L << 16),
|
||||
RFLAGS_VM = (1L << 17),
|
||||
RFLAGS_AC = (1L << 18),
|
||||
RFLAGS_VIF = (1L << 19),
|
||||
RFLAGS_VIP = (1L << 20),
|
||||
RFLAGS_ID = (1L << 21),
|
||||
} x86_rflags;
|
||||
|
||||
/* rflags register */
|
||||
typedef struct x86_reg_flags {
|
||||
union {
|
||||
struct {
|
||||
uint64_t rflags;
|
||||
};
|
||||
struct {
|
||||
uint32_t eflags;
|
||||
uint32_t hi32_unused1;
|
||||
};
|
||||
struct {
|
||||
uint32_t cf:1;
|
||||
uint32_t unused1:1;
|
||||
uint32_t pf:1;
|
||||
uint32_t unused2:1;
|
||||
uint32_t af:1;
|
||||
uint32_t unused3:1;
|
||||
uint32_t zf:1;
|
||||
uint32_t sf:1;
|
||||
uint32_t tf:1;
|
||||
uint32_t ief:1;
|
||||
uint32_t df:1;
|
||||
uint32_t of:1;
|
||||
uint32_t iopl:2;
|
||||
uint32_t nt:1;
|
||||
uint32_t unused4:1;
|
||||
uint32_t rf:1;
|
||||
uint32_t vm:1;
|
||||
uint32_t ac:1;
|
||||
uint32_t vif:1;
|
||||
uint32_t vip:1;
|
||||
uint32_t id:1;
|
||||
uint32_t unused5:10;
|
||||
uint32_t hi32_unused2;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x86_reg_flags;
|
||||
|
||||
typedef enum x86_reg_cr0 {
|
||||
CR0_PE = (1L << 0),
|
||||
CR0_MP = (1L << 1),
|
||||
CR0_EM = (1L << 2),
|
||||
CR0_TS = (1L << 3),
|
||||
CR0_ET = (1L << 4),
|
||||
CR0_NE = (1L << 5),
|
||||
CR0_WP = (1L << 16),
|
||||
CR0_AM = (1L << 18),
|
||||
CR0_NW = (1L << 29),
|
||||
CR0_CD = (1L << 30),
|
||||
CR0_PG = (1L << 31),
|
||||
} x86_reg_cr0;
|
||||
|
||||
typedef enum x86_reg_cr4 {
|
||||
CR4_VME = (1L << 0),
|
||||
CR4_PVI = (1L << 1),
|
||||
CR4_TSD = (1L << 2),
|
||||
CR4_DE = (1L << 3),
|
||||
CR4_PSE = (1L << 4),
|
||||
CR4_PAE = (1L << 5),
|
||||
CR4_MSE = (1L << 6),
|
||||
CR4_PGE = (1L << 7),
|
||||
CR4_PCE = (1L << 8),
|
||||
CR4_OSFXSR = (1L << 9),
|
||||
CR4_OSXMMEXCPT = (1L << 10),
|
||||
CR4_VMXE = (1L << 13),
|
||||
CR4_SMXE = (1L << 14),
|
||||
CR4_FSGSBASE = (1L << 16),
|
||||
CR4_PCIDE = (1L << 17),
|
||||
CR4_OSXSAVE = (1L << 18),
|
||||
CR4_SMEP = (1L << 20),
|
||||
} x86_reg_cr4;
|
||||
|
||||
/* 16 bit Task State Segment */
|
||||
typedef struct x86_tss_segment16 {
|
||||
uint16_t link;
|
||||
uint16_t sp0;
|
||||
uint16_t ss0;
|
||||
uint32_t sp1;
|
||||
uint16_t ss1;
|
||||
uint32_t sp2;
|
||||
uint16_t ss2;
|
||||
uint16_t ip;
|
||||
uint16_t flags;
|
||||
uint16_t ax;
|
||||
uint16_t cx;
|
||||
uint16_t dx;
|
||||
uint16_t bx;
|
||||
uint16_t sp;
|
||||
uint16_t bp;
|
||||
uint16_t si;
|
||||
uint16_t di;
|
||||
uint16_t es;
|
||||
uint16_t cs;
|
||||
uint16_t ss;
|
||||
uint16_t ds;
|
||||
uint16_t ldtr;
|
||||
} __attribute__((packed)) x86_tss_segment16;
|
||||
|
||||
/* 32 bit Task State Segment */
|
||||
typedef struct x86_tss_segment32 {
|
||||
uint32_t prev_tss;
|
||||
uint32_t esp0;
|
||||
uint32_t ss0;
|
||||
uint32_t esp1;
|
||||
uint32_t ss1;
|
||||
uint32_t esp2;
|
||||
uint32_t ss2;
|
||||
uint32_t cr3;
|
||||
uint32_t eip;
|
||||
uint32_t eflags;
|
||||
uint32_t eax;
|
||||
uint32_t ecx;
|
||||
uint32_t edx;
|
||||
uint32_t ebx;
|
||||
uint32_t esp;
|
||||
uint32_t ebp;
|
||||
uint32_t esi;
|
||||
uint32_t edi;
|
||||
uint32_t es;
|
||||
uint32_t cs;
|
||||
uint32_t ss;
|
||||
uint32_t ds;
|
||||
uint32_t fs;
|
||||
uint32_t gs;
|
||||
uint32_t ldt;
|
||||
uint16_t trap;
|
||||
uint16_t iomap_base;
|
||||
} __attribute__ ((__packed__)) x86_tss_segment32;
|
||||
|
||||
/* 64 bit Task State Segment */
|
||||
typedef struct x86_tss_segment64 {
|
||||
uint32_t unused;
|
||||
uint64_t rsp0;
|
||||
uint64_t rsp1;
|
||||
uint64_t rsp2;
|
||||
uint64_t unused1;
|
||||
uint64_t ist1;
|
||||
uint64_t ist2;
|
||||
uint64_t ist3;
|
||||
uint64_t ist4;
|
||||
uint64_t ist5;
|
||||
uint64_t ist6;
|
||||
uint64_t ist7;
|
||||
uint64_t unused2;
|
||||
uint16_t unused3;
|
||||
uint16_t iomap_base;
|
||||
} __attribute__ ((__packed__)) x86_tss_segment64;
|
||||
|
||||
/* segment descriptors */
|
||||
typedef struct x86_segment_descriptor {
|
||||
uint64_t limit0:16;
|
||||
uint64_t base0:16;
|
||||
uint64_t base1:8;
|
||||
uint64_t type:4;
|
||||
uint64_t s:1;
|
||||
uint64_t dpl:2;
|
||||
uint64_t p:1;
|
||||
uint64_t limit1:4;
|
||||
uint64_t avl:1;
|
||||
uint64_t l:1;
|
||||
uint64_t db:1;
|
||||
uint64_t g:1;
|
||||
uint64_t base2:8;
|
||||
} __attribute__ ((__packed__)) x86_segment_descriptor;
|
||||
|
||||
static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
|
||||
{
|
||||
return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
|
||||
}
|
||||
|
||||
static inline void x86_set_segment_base(x86_segment_descriptor *desc,
|
||||
uint32_t base)
|
||||
{
|
||||
desc->base2 = base >> 24;
|
||||
desc->base1 = (base >> 16) & 0xff;
|
||||
desc->base0 = base & 0xffff;
|
||||
}
|
||||
|
||||
static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
|
||||
{
|
||||
uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
|
||||
if (desc->g) {
|
||||
return (limit << 12) | 0xfff;
|
||||
}
|
||||
return limit;
|
||||
}
|
||||
|
||||
static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
|
||||
uint32_t limit)
|
||||
{
|
||||
desc->limit0 = limit & 0xffff;
|
||||
desc->limit1 = limit >> 16;
|
||||
}
|
||||
|
||||
typedef struct x86_call_gate {
|
||||
uint64_t offset0:16;
|
||||
uint64_t selector:16;
|
||||
uint64_t param_count:4;
|
||||
uint64_t reserved:3;
|
||||
uint64_t type:4;
|
||||
uint64_t dpl:1;
|
||||
uint64_t p:1;
|
||||
uint64_t offset1:16;
|
||||
} __attribute__ ((__packed__)) x86_call_gate;
|
||||
|
||||
static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
|
||||
{
|
||||
return (uint32_t)((gate->offset1 << 16) | gate->offset0);
|
||||
}
|
||||
|
||||
#define LDT_SEL 0
|
||||
#define GDT_SEL 1
|
||||
|
||||
typedef struct x68_segment_selector {
|
||||
union {
|
||||
uint16_t sel;
|
||||
struct {
|
||||
uint16_t rpl:3;
|
||||
uint16_t ti:1;
|
||||
uint16_t index:12;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x68_segment_selector;
|
||||
|
||||
typedef struct lazy_flags {
|
||||
target_ulong result;
|
||||
target_ulong auxbits;
|
||||
} lazy_flags;
|
||||
|
||||
/* Definition of hvf_x86_state is here */
|
||||
struct HVFX86EmulatorState {
|
||||
int interruptable;
|
||||
uint64_t fetch_rip;
|
||||
uint64_t rip;
|
||||
struct x86_register regs[16];
|
||||
struct x86_reg_flags rflags;
|
||||
struct lazy_flags lflags;
|
||||
uint8_t mmio_buf[4096];
|
||||
};
|
||||
|
||||
/* useful register access macros */
|
||||
#define RIP(cpu) (cpu->hvf_emul->rip)
|
||||
#define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
|
||||
#define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
|
||||
#define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
|
||||
|
||||
#define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
|
||||
#define RAX(cpu) RRX(cpu, R_EAX)
|
||||
#define RCX(cpu) RRX(cpu, R_ECX)
|
||||
#define RDX(cpu) RRX(cpu, R_EDX)
|
||||
#define RBX(cpu) RRX(cpu, R_EBX)
|
||||
#define RSP(cpu) RRX(cpu, R_ESP)
|
||||
#define RBP(cpu) RRX(cpu, R_EBP)
|
||||
#define RSI(cpu) RRX(cpu, R_ESI)
|
||||
#define RDI(cpu) RRX(cpu, R_EDI)
|
||||
#define R8(cpu) RRX(cpu, R_R8)
|
||||
#define R9(cpu) RRX(cpu, R_R9)
|
||||
#define R10(cpu) RRX(cpu, R_R10)
|
||||
#define R11(cpu) RRX(cpu, R_R11)
|
||||
#define R12(cpu) RRX(cpu, R_R12)
|
||||
#define R13(cpu) RRX(cpu, R_R13)
|
||||
#define R14(cpu) RRX(cpu, R_R14)
|
||||
#define R15(cpu) RRX(cpu, R_R15)
|
||||
|
||||
#define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
|
||||
#define EAX(cpu) ERX(cpu, R_EAX)
|
||||
#define ECX(cpu) ERX(cpu, R_ECX)
|
||||
#define EDX(cpu) ERX(cpu, R_EDX)
|
||||
#define EBX(cpu) ERX(cpu, R_EBX)
|
||||
#define ESP(cpu) ERX(cpu, R_ESP)
|
||||
#define EBP(cpu) ERX(cpu, R_EBP)
|
||||
#define ESI(cpu) ERX(cpu, R_ESI)
|
||||
#define EDI(cpu) ERX(cpu, R_EDI)
|
||||
|
||||
#define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
|
||||
#define AX(cpu) RX(cpu, R_EAX)
|
||||
#define CX(cpu) RX(cpu, R_ECX)
|
||||
#define DX(cpu) RX(cpu, R_EDX)
|
||||
#define BP(cpu) RX(cpu, R_EBP)
|
||||
#define SP(cpu) RX(cpu, R_ESP)
|
||||
#define BX(cpu) RX(cpu, R_EBX)
|
||||
#define SI(cpu) RX(cpu, R_ESI)
|
||||
#define DI(cpu) RX(cpu, R_EDI)
|
||||
|
||||
#define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
|
||||
#define AL(cpu) RL(cpu, R_EAX)
|
||||
#define CL(cpu) RL(cpu, R_ECX)
|
||||
#define DL(cpu) RL(cpu, R_EDX)
|
||||
#define BL(cpu) RL(cpu, R_EBX)
|
||||
|
||||
#define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
|
||||
#define AH(cpu) RH(cpu, R_EAX)
|
||||
#define CH(cpu) RH(cpu, R_ECX)
|
||||
#define DH(cpu) RH(cpu, R_EDX)
|
||||
#define BH(cpu) RH(cpu, R_EBX)
|
||||
|
||||
/* deal with GDT/LDT descriptors in memory */
|
||||
bool x86_read_segment_descriptor(struct CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel);
|
||||
bool x86_write_segment_descriptor(struct CPUState *cpu,
|
||||
struct x86_segment_descriptor *desc,
|
||||
x68_segment_selector sel);
|
||||
|
||||
bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
|
||||
int gate);
|
||||
|
||||
/* helpers */
|
||||
bool x86_is_protected(struct CPUState *cpu);
|
||||
bool x86_is_real(struct CPUState *cpu);
|
||||
bool x86_is_v8086(struct CPUState *cpu);
|
||||
bool x86_is_long_mode(struct CPUState *cpu);
|
||||
bool x86_is_long64_mode(struct CPUState *cpu);
|
||||
bool x86_is_paging_mode(struct CPUState *cpu);
|
||||
bool x86_is_pae_enabled(struct CPUState *cpu);
|
||||
|
||||
enum X86Seg;
|
||||
target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
|
||||
target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
|
||||
enum X86Seg seg);
|
||||
target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
|
||||
|
||||
static inline uint64_t rdtscp(void)
|
||||
{
|
||||
uint64_t tsc;
|
||||
__asm__ __volatile__("rdtscp; " /* serializing read of tsc */
|
||||
"shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
|
||||
"or %%rdx,%%rax" /* and or onto rax */
|
||||
: "=a"(tsc) /* output to tsc variable */
|
||||
:
|
||||
: "%rcx", "%rdx"); /* rcx and rdx are clobbered */
|
||||
|
||||
return tsc;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* i386 CPUID helper functions
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
* Copyright (c) 2017 Google Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* cpuid
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
#include "vmx.h"
|
||||
#include "sysemu/hvf.h"
|
||||
|
||||
static uint64_t xgetbv(uint32_t xcr)
|
||||
{
|
||||
uint32_t eax, edx;
|
||||
|
||||
__asm__ volatile ("xgetbv"
|
||||
: "=a" (eax), "=d" (edx)
|
||||
: "c" (xcr));
|
||||
|
||||
return (((uint64_t)edx) << 32) | eax;
|
||||
}
|
||||
|
||||
static bool vmx_mpx_supported()
|
||||
{
|
||||
uint64_t cap_exit, cap_entry;
|
||||
|
||||
hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &cap_entry);
|
||||
hv_vmx_read_capability(HV_VMX_CAP_EXIT, &cap_exit);
|
||||
|
||||
return ((cap_exit & (1 << 23)) && (cap_entry & (1 << 16)));
|
||||
}
|
||||
|
||||
uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
|
||||
int reg)
|
||||
{
|
||||
uint64_t cap;
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
host_cpuid(func, idx, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
switch (func) {
|
||||
case 0:
|
||||
eax = eax < (uint32_t)0xd ? eax : (uint32_t)0xd;
|
||||
break;
|
||||
case 1:
|
||||
edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
|
||||
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
|
||||
CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
|
||||
CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX |
|
||||
CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS;
|
||||
ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
|
||||
CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
|
||||
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
|
||||
CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
|
||||
CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND;
|
||||
ecx |= CPUID_EXT_HYPERVISOR;
|
||||
break;
|
||||
case 6:
|
||||
eax = CPUID_6_EAX_ARAT;
|
||||
ebx = 0;
|
||||
ecx = 0;
|
||||
edx = 0;
|
||||
break;
|
||||
case 7:
|
||||
if (idx == 0) {
|
||||
ebx &= CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
|
||||
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 |
|
||||
CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 |
|
||||
CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_RTM |
|
||||
CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
|
||||
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
|
||||
CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF |
|
||||
CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |
|
||||
CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
|
||||
CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_SHA_NI |
|
||||
CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL |
|
||||
CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_MPX;
|
||||
|
||||
if (!vmx_mpx_supported()) {
|
||||
ebx &= ~CPUID_7_0_EBX_MPX;
|
||||
}
|
||||
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
|
||||
if (!(cap & CPU_BASED2_INVPCID)) {
|
||||
ebx &= ~CPUID_7_0_EBX_INVPCID;
|
||||
}
|
||||
|
||||
ecx &= CPUID_7_0_ECX_AVX512BMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ;
|
||||
edx &= CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS;
|
||||
} else {
|
||||
ebx = 0;
|
||||
ecx = 0;
|
||||
edx = 0;
|
||||
}
|
||||
eax = 0;
|
||||
break;
|
||||
case 0xD:
|
||||
if (idx == 0) {
|
||||
uint64_t host_xcr0 = xgetbv(0);
|
||||
uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK |
|
||||
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK |
|
||||
XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK |
|
||||
XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK);
|
||||
eax &= supp_xcr0;
|
||||
if (!vmx_mpx_supported()) {
|
||||
eax &= ~(XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK);
|
||||
}
|
||||
} else if (idx == 1) {
|
||||
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
|
||||
eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1;
|
||||
if (!(cap & CPU_BASED2_XSAVES_XRSTORS)) {
|
||||
eax &= ~CPUID_XSAVE_XSAVES;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 0x80000001:
|
||||
/* LM only if HVF in 64-bit mode */
|
||||
edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
|
||||
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
|
||||
CPUID_EXT2_SYSCALL | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
|
||||
CPUID_PAT | CPUID_PSE36 | CPUID_EXT2_MMXEXT | CPUID_MMX |
|
||||
CPUID_FXSR | CPUID_EXT2_FXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_3DNOWEXT |
|
||||
CPUID_EXT2_3DNOW | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX;
|
||||
hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cap);
|
||||
if (!(cap & CPU_BASED_TSC_OFFSET)) {
|
||||
edx &= ~CPUID_EXT2_RDTSCP;
|
||||
}
|
||||
ecx &= CPUID_EXT3_LAHF_LM | CPUID_EXT3_CMP_LEG | CPUID_EXT3_CR8LEG |
|
||||
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | CPUID_EXT3_MISALIGNSSE |
|
||||
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_OSVW | CPUID_EXT3_XOP |
|
||||
CPUID_EXT3_FMA4 | CPUID_EXT3_TBM;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (reg) {
|
||||
case R_EAX:
|
||||
return eax;
|
||||
case R_EBX:
|
||||
return ebx;
|
||||
case R_ECX:
|
||||
return ecx;
|
||||
case R_EDX:
|
||||
return edx;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,323 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef HVF_X86_DECODE_H
|
||||
#define HVF_X86_DECODE_H 1
|
||||
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
|
||||
typedef enum x86_prefix {
|
||||
/* group 1 */
|
||||
PREFIX_LOCK = 0xf0,
|
||||
PREFIX_REPN = 0xf2,
|
||||
PREFIX_REP = 0xf3,
|
||||
/* group 2 */
|
||||
PREFIX_CS_SEG_OVEERIDE = 0x2e,
|
||||
PREFIX_SS_SEG_OVEERIDE = 0x36,
|
||||
PREFIX_DS_SEG_OVEERIDE = 0x3e,
|
||||
PREFIX_ES_SEG_OVEERIDE = 0x26,
|
||||
PREFIX_FS_SEG_OVEERIDE = 0x64,
|
||||
PREFIX_GS_SEG_OVEERIDE = 0x65,
|
||||
/* group 3 */
|
||||
PREFIX_OP_SIZE_OVERRIDE = 0x66,
|
||||
/* group 4 */
|
||||
PREFIX_ADDR_SIZE_OVERRIDE = 0x67,
|
||||
|
||||
PREFIX_REX = 0x40,
|
||||
} x86_prefix;
|
||||
|
||||
enum x86_decode_cmd {
|
||||
X86_DECODE_CMD_INVL = 0,
|
||||
|
||||
X86_DECODE_CMD_PUSH,
|
||||
X86_DECODE_CMD_PUSH_SEG,
|
||||
X86_DECODE_CMD_POP,
|
||||
X86_DECODE_CMD_POP_SEG,
|
||||
X86_DECODE_CMD_MOV,
|
||||
X86_DECODE_CMD_MOVSX,
|
||||
X86_DECODE_CMD_MOVZX,
|
||||
X86_DECODE_CMD_CALL_NEAR,
|
||||
X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
|
||||
X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
|
||||
X86_DECODE_CMD_CALL_FAR,
|
||||
X86_DECODE_RET_NEAR,
|
||||
X86_DECODE_RET_FAR,
|
||||
X86_DECODE_CMD_ADD,
|
||||
X86_DECODE_CMD_OR,
|
||||
X86_DECODE_CMD_ADC,
|
||||
X86_DECODE_CMD_SBB,
|
||||
X86_DECODE_CMD_AND,
|
||||
X86_DECODE_CMD_SUB,
|
||||
X86_DECODE_CMD_XOR,
|
||||
X86_DECODE_CMD_CMP,
|
||||
X86_DECODE_CMD_INC,
|
||||
X86_DECODE_CMD_DEC,
|
||||
X86_DECODE_CMD_TST,
|
||||
X86_DECODE_CMD_NOT,
|
||||
X86_DECODE_CMD_NEG,
|
||||
X86_DECODE_CMD_JMP_NEAR,
|
||||
X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
|
||||
X86_DECODE_CMD_JMP_FAR,
|
||||
X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
|
||||
X86_DECODE_CMD_LEA,
|
||||
X86_DECODE_CMD_JXX,
|
||||
X86_DECODE_CMD_JCXZ,
|
||||
X86_DECODE_CMD_SETXX,
|
||||
X86_DECODE_CMD_MOV_TO_SEG,
|
||||
X86_DECODE_CMD_MOV_FROM_SEG,
|
||||
X86_DECODE_CMD_CLI,
|
||||
X86_DECODE_CMD_STI,
|
||||
X86_DECODE_CMD_CLD,
|
||||
X86_DECODE_CMD_STD,
|
||||
X86_DECODE_CMD_STC,
|
||||
X86_DECODE_CMD_CLC,
|
||||
X86_DECODE_CMD_OUT,
|
||||
X86_DECODE_CMD_IN,
|
||||
X86_DECODE_CMD_INS,
|
||||
X86_DECODE_CMD_OUTS,
|
||||
X86_DECODE_CMD_LIDT,
|
||||
X86_DECODE_CMD_SIDT,
|
||||
X86_DECODE_CMD_LGDT,
|
||||
X86_DECODE_CMD_SGDT,
|
||||
X86_DECODE_CMD_SMSW,
|
||||
X86_DECODE_CMD_LMSW,
|
||||
X86_DECODE_CMD_RDTSCP,
|
||||
X86_DECODE_CMD_INVLPG,
|
||||
X86_DECODE_CMD_MOV_TO_CR,
|
||||
X86_DECODE_CMD_MOV_FROM_CR,
|
||||
X86_DECODE_CMD_MOV_TO_DR,
|
||||
X86_DECODE_CMD_MOV_FROM_DR,
|
||||
X86_DECODE_CMD_PUSHF,
|
||||
X86_DECODE_CMD_POPF,
|
||||
X86_DECODE_CMD_CPUID,
|
||||
X86_DECODE_CMD_ROL,
|
||||
X86_DECODE_CMD_ROR,
|
||||
X86_DECODE_CMD_RCL,
|
||||
X86_DECODE_CMD_RCR,
|
||||
X86_DECODE_CMD_SHL,
|
||||
X86_DECODE_CMD_SAL,
|
||||
X86_DECODE_CMD_SHR,
|
||||
X86_DECODE_CMD_SHRD,
|
||||
X86_DECODE_CMD_SHLD,
|
||||
X86_DECODE_CMD_SAR,
|
||||
X86_DECODE_CMD_DIV,
|
||||
X86_DECODE_CMD_IDIV,
|
||||
X86_DECODE_CMD_MUL,
|
||||
X86_DECODE_CMD_IMUL_3,
|
||||
X86_DECODE_CMD_IMUL_2,
|
||||
X86_DECODE_CMD_IMUL_1,
|
||||
X86_DECODE_CMD_MOVS,
|
||||
X86_DECODE_CMD_CMPS,
|
||||
X86_DECODE_CMD_SCAS,
|
||||
X86_DECODE_CMD_LODS,
|
||||
X86_DECODE_CMD_STOS,
|
||||
X86_DECODE_CMD_BSWAP,
|
||||
X86_DECODE_CMD_XCHG,
|
||||
X86_DECODE_CMD_RDTSC,
|
||||
X86_DECODE_CMD_RDMSR,
|
||||
X86_DECODE_CMD_WRMSR,
|
||||
X86_DECODE_CMD_ENTER,
|
||||
X86_DECODE_CMD_LEAVE,
|
||||
X86_DECODE_CMD_BT,
|
||||
X86_DECODE_CMD_BTS,
|
||||
X86_DECODE_CMD_BTC,
|
||||
X86_DECODE_CMD_BTR,
|
||||
X86_DECODE_CMD_BSF,
|
||||
X86_DECODE_CMD_BSR,
|
||||
X86_DECODE_CMD_IRET,
|
||||
X86_DECODE_CMD_INT,
|
||||
X86_DECODE_CMD_POPA,
|
||||
X86_DECODE_CMD_PUSHA,
|
||||
X86_DECODE_CMD_CWD,
|
||||
X86_DECODE_CMD_CBW,
|
||||
X86_DECODE_CMD_DAS,
|
||||
X86_DECODE_CMD_AAD,
|
||||
X86_DECODE_CMD_AAM,
|
||||
X86_DECODE_CMD_AAS,
|
||||
X86_DECODE_CMD_LOOP,
|
||||
X86_DECODE_CMD_SLDT,
|
||||
X86_DECODE_CMD_STR,
|
||||
X86_DECODE_CMD_LLDT,
|
||||
X86_DECODE_CMD_LTR,
|
||||
X86_DECODE_CMD_VERR,
|
||||
X86_DECODE_CMD_VERW,
|
||||
X86_DECODE_CMD_SAHF,
|
||||
X86_DECODE_CMD_LAHF,
|
||||
X86_DECODE_CMD_WBINVD,
|
||||
X86_DECODE_CMD_LDS,
|
||||
X86_DECODE_CMD_LSS,
|
||||
X86_DECODE_CMD_LES,
|
||||
X86_DECODE_XMD_LGS,
|
||||
X86_DECODE_CMD_LFS,
|
||||
X86_DECODE_CMD_CMC,
|
||||
X86_DECODE_CMD_XLAT,
|
||||
X86_DECODE_CMD_NOP,
|
||||
X86_DECODE_CMD_CMOV,
|
||||
X86_DECODE_CMD_CLTS,
|
||||
X86_DECODE_CMD_XADD,
|
||||
X86_DECODE_CMD_HLT,
|
||||
X86_DECODE_CMD_CMPXCHG8B,
|
||||
X86_DECODE_CMD_CMPXCHG,
|
||||
X86_DECODE_CMD_POPCNT,
|
||||
|
||||
X86_DECODE_CMD_FNINIT,
|
||||
X86_DECODE_CMD_FLD,
|
||||
X86_DECODE_CMD_FLDxx,
|
||||
X86_DECODE_CMD_FNSTCW,
|
||||
X86_DECODE_CMD_FNSTSW,
|
||||
X86_DECODE_CMD_FNSETPM,
|
||||
X86_DECODE_CMD_FSAVE,
|
||||
X86_DECODE_CMD_FRSTOR,
|
||||
X86_DECODE_CMD_FXSAVE,
|
||||
X86_DECODE_CMD_FXRSTOR,
|
||||
X86_DECODE_CMD_FDIV,
|
||||
X86_DECODE_CMD_FMUL,
|
||||
X86_DECODE_CMD_FSUB,
|
||||
X86_DECODE_CMD_FADD,
|
||||
X86_DECODE_CMD_EMMS,
|
||||
X86_DECODE_CMD_MFENCE,
|
||||
X86_DECODE_CMD_SFENCE,
|
||||
X86_DECODE_CMD_LFENCE,
|
||||
X86_DECODE_CMD_PREFETCH,
|
||||
X86_DECODE_CMD_CLFLUSH,
|
||||
X86_DECODE_CMD_FST,
|
||||
X86_DECODE_CMD_FABS,
|
||||
X86_DECODE_CMD_FUCOM,
|
||||
X86_DECODE_CMD_FUCOMI,
|
||||
X86_DECODE_CMD_FLDCW,
|
||||
X86_DECODE_CMD_FXCH,
|
||||
X86_DECODE_CMD_FCHS,
|
||||
X86_DECODE_CMD_FCMOV,
|
||||
X86_DECODE_CMD_FRNDINT,
|
||||
X86_DECODE_CMD_FXAM,
|
||||
|
||||
X86_DECODE_CMD_LAST,
|
||||
};
|
||||
|
||||
const char *decode_cmd_to_string(enum x86_decode_cmd cmd);
|
||||
|
||||
typedef struct x86_modrm {
|
||||
union {
|
||||
uint8_t modrm;
|
||||
struct {
|
||||
uint8_t rm:3;
|
||||
uint8_t reg:3;
|
||||
uint8_t mod:2;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x86_modrm;
|
||||
|
||||
typedef struct x86_sib {
|
||||
union {
|
||||
uint8_t sib;
|
||||
struct {
|
||||
uint8_t base:3;
|
||||
uint8_t index:3;
|
||||
uint8_t scale:2;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x86_sib;
|
||||
|
||||
typedef struct x86_rex {
|
||||
union {
|
||||
uint8_t rex;
|
||||
struct {
|
||||
uint8_t b:1;
|
||||
uint8_t x:1;
|
||||
uint8_t r:1;
|
||||
uint8_t w:1;
|
||||
uint8_t unused:4;
|
||||
};
|
||||
};
|
||||
} __attribute__ ((__packed__)) x86_rex;
|
||||
|
||||
typedef enum x86_var_type {
|
||||
X86_VAR_IMMEDIATE,
|
||||
X86_VAR_OFFSET,
|
||||
X86_VAR_REG,
|
||||
X86_VAR_RM,
|
||||
|
||||
/* for floating point computations */
|
||||
X87_VAR_REG,
|
||||
X87_VAR_FLOATP,
|
||||
X87_VAR_INTP,
|
||||
X87_VAR_BYTEP,
|
||||
} x86_var_type;
|
||||
|
||||
typedef struct x86_decode_op {
|
||||
enum x86_var_type type;
|
||||
int size;
|
||||
|
||||
int reg;
|
||||
target_ulong val;
|
||||
|
||||
target_ulong ptr;
|
||||
} x86_decode_op;
|
||||
|
||||
typedef struct x86_decode {
|
||||
int len;
|
||||
uint8_t opcode[4];
|
||||
uint8_t opcode_len;
|
||||
enum x86_decode_cmd cmd;
|
||||
int addressing_size;
|
||||
int operand_size;
|
||||
int lock;
|
||||
int rep;
|
||||
int op_size_override;
|
||||
int addr_size_override;
|
||||
int segment_override;
|
||||
int control_change_inst;
|
||||
bool fwait;
|
||||
bool fpop_stack;
|
||||
bool frev;
|
||||
|
||||
uint32_t displacement;
|
||||
uint8_t displacement_size;
|
||||
struct x86_rex rex;
|
||||
bool is_modrm;
|
||||
bool sib_present;
|
||||
struct x86_sib sib;
|
||||
struct x86_modrm modrm;
|
||||
struct x86_decode_op op[4];
|
||||
bool is_fpu;
|
||||
uint32_t flags_mask;
|
||||
|
||||
} x86_decode;
|
||||
|
||||
uint64_t sign(uint64_t val, int size);
|
||||
|
||||
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
|
||||
|
||||
target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size);
|
||||
target_ulong get_reg_val(CPUX86State *env, int reg, int is_extended, int size);
|
||||
void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
|
||||
struct x86_decode_op *op);
|
||||
target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
|
||||
target_ulong addr, enum X86Seg seg);
|
||||
|
||||
void init_decoder(void);
|
||||
void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
|
||||
struct x86_decode_op *op);
|
||||
void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
|
||||
struct x86_decode_op *op);
|
||||
void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
|
||||
struct x86_decode_op *op);
|
||||
void set_addressing_size(CPUX86State *env, struct x86_decode *decode);
|
||||
void set_operand_size(CPUX86State *env, struct x86_decode *decode);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "vmx.h"
|
||||
#include "x86_descr.h"
|
||||
|
||||
#define VMX_SEGMENT_FIELD(seg) \
|
||||
[R_##seg] = { \
|
||||
.selector = VMCS_GUEST_##seg##_SELECTOR, \
|
||||
.base = VMCS_GUEST_##seg##_BASE, \
|
||||
.limit = VMCS_GUEST_##seg##_LIMIT, \
|
||||
.ar_bytes = VMCS_GUEST_##seg##_ACCESS_RIGHTS, \
|
||||
}
|
||||
|
||||
static const struct vmx_segment_field {
|
||||
int selector;
|
||||
int base;
|
||||
int limit;
|
||||
int ar_bytes;
|
||||
} vmx_segment_fields[] = {
|
||||
VMX_SEGMENT_FIELD(ES),
|
||||
VMX_SEGMENT_FIELD(CS),
|
||||
VMX_SEGMENT_FIELD(SS),
|
||||
VMX_SEGMENT_FIELD(DS),
|
||||
VMX_SEGMENT_FIELD(FS),
|
||||
VMX_SEGMENT_FIELD(GS),
|
||||
VMX_SEGMENT_FIELD(LDTR),
|
||||
VMX_SEGMENT_FIELD(TR),
|
||||
};
|
||||
|
||||
uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
|
||||
}
|
||||
|
||||
uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
|
||||
}
|
||||
|
||||
uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
|
||||
}
|
||||
|
||||
x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
|
||||
{
|
||||
x68_segment_selector sel;
|
||||
sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
|
||||
return sel;
|
||||
}
|
||||
|
||||
void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)
|
||||
{
|
||||
wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);
|
||||
}
|
||||
|
||||
void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
|
||||
{
|
||||
desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
|
||||
desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
|
||||
desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
|
||||
desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
|
||||
}
|
||||
|
||||
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
|
||||
{
|
||||
const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
|
||||
|
||||
wvmcs(cpu->hvf_fd, sf->base, desc->base);
|
||||
wvmcs(cpu->hvf_fd, sf->limit, desc->limit);
|
||||
wvmcs(cpu->hvf_fd, sf->selector, desc->sel);
|
||||
wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);
|
||||
}
|
||||
|
||||
void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
|
||||
{
|
||||
vmx_desc->sel = selector.sel;
|
||||
vmx_desc->base = x86_segment_base(desc);
|
||||
vmx_desc->limit = x86_segment_limit(desc);
|
||||
|
||||
vmx_desc->ar = (selector.sel ? 0 : 1) << 16 |
|
||||
desc->g << 15 |
|
||||
desc->db << 14 |
|
||||
desc->l << 13 |
|
||||
desc->avl << 12 |
|
||||
desc->p << 7 |
|
||||
desc->dpl << 5 |
|
||||
desc->s << 4 |
|
||||
desc->type;
|
||||
}
|
||||
|
||||
void vmx_segment_to_x86_descriptor(struct CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc)
|
||||
{
|
||||
x86_set_segment_limit(desc, vmx_desc->limit);
|
||||
x86_set_segment_base(desc, vmx_desc->base);
|
||||
|
||||
desc->type = vmx_desc->ar & 15;
|
||||
desc->s = (vmx_desc->ar >> 4) & 1;
|
||||
desc->dpl = (vmx_desc->ar >> 5) & 3;
|
||||
desc->p = (vmx_desc->ar >> 7) & 1;
|
||||
desc->avl = (vmx_desc->ar >> 12) & 1;
|
||||
desc->l = (vmx_desc->ar >> 13) & 1;
|
||||
desc->db = (vmx_desc->ar >> 14) & 1;
|
||||
desc->g = (vmx_desc->ar >> 15) & 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef HVF_X86_DESCR_H
|
||||
#define HVF_X86_DESCR_H 1
|
||||
|
||||
#include "x86.h"
|
||||
|
||||
typedef struct vmx_segment {
|
||||
uint16_t sel;
|
||||
uint64_t base;
|
||||
uint64_t limit;
|
||||
uint64_t ar;
|
||||
} vmx_segment;
|
||||
|
||||
/* deal with vmstate descriptors */
|
||||
void vmx_read_segment_descriptor(struct CPUState *cpu,
|
||||
struct vmx_segment *desc, enum X86Seg seg);
|
||||
void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
|
||||
enum X86Seg seg);
|
||||
|
||||
x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu,
|
||||
enum X86Seg seg);
|
||||
void vmx_write_segment_selector(struct CPUState *cpu,
|
||||
x68_segment_selector selector,
|
||||
enum X86Seg seg);
|
||||
|
||||
uint64_t vmx_read_segment_base(struct CPUState *cpu, enum X86Seg seg);
|
||||
void vmx_write_segment_base(struct CPUState *cpu, enum X86Seg seg,
|
||||
uint64_t base);
|
||||
|
||||
void x86_segment_descriptor_to_vmx(struct CPUState *cpu,
|
||||
x68_segment_selector selector,
|
||||
struct x86_segment_descriptor *desc,
|
||||
struct vmx_segment *vmx_desc);
|
||||
|
||||
uint32_t vmx_read_segment_limit(CPUState *cpu, enum X86Seg seg);
|
||||
uint32_t vmx_read_segment_ar(CPUState *cpu, enum X86Seg seg);
|
||||
void vmx_segment_to_x86_descriptor(struct CPUState *cpu,
|
||||
struct vmx_segment *vmx_desc,
|
||||
struct x86_segment_descriptor *desc);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __X86_EMU_H__
|
||||
#define __X86_EMU_H__
|
||||
|
||||
#include "x86.h"
|
||||
#include "x86_decode.h"
|
||||
#include "cpu.h"
|
||||
|
||||
void init_emu(void);
|
||||
bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins);
|
||||
|
||||
void load_regs(struct CPUState *cpu);
|
||||
void store_regs(struct CPUState *cpu);
|
||||
|
||||
void simulate_rdmsr(struct CPUState *cpu);
|
||||
void simulate_wrmsr(struct CPUState *cpu);
|
||||
|
||||
target_ulong read_reg(CPUX86State *env, int reg, int size);
|
||||
void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
|
||||
target_ulong read_val_from_reg(target_ulong reg_ptr, int size);
|
||||
void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size);
|
||||
void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size);
|
||||
uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes);
|
||||
target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size);
|
||||
|
||||
void exec_movzx(struct CPUX86State *env, struct x86_decode *decode);
|
||||
void exec_shl(struct CPUX86State *env, struct x86_decode *decode);
|
||||
void exec_movsx(struct CPUX86State *env, struct x86_decode *decode);
|
||||
void exec_ror(struct CPUX86State *env, struct x86_decode *decode);
|
||||
void exec_rol(struct CPUX86State *env, struct x86_decode *decode);
|
||||
void exec_rcl(struct CPUX86State *env, struct x86_decode *decode);
|
||||
void exec_rcr(struct CPUX86State *env, struct x86_decode *decode);
|
||||
#endif
|
|
@ -0,0 +1,315 @@
|
|||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001-2012 The Bochs Project
|
||||
// Copyright (C) 2017 Google Inc.
|
||||
//
|
||||
// This library is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 2 of the License, or (at your option) any later version.
|
||||
//
|
||||
// This library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
// Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License along with this library; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
/*
|
||||
* flags functions
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "panic.h"
|
||||
#include "cpu.h"
|
||||
#include "x86_flags.h"
|
||||
#include "x86.h"
|
||||
|
||||
|
||||
/* this is basically bocsh code */
|
||||
|
||||
#define LF_SIGN_BIT 31
|
||||
|
||||
#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
|
||||
#define LF_BIT_AF (3) /* lazy Adjust flag */
|
||||
#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
|
||||
#define LF_BIT_CF (31) /* lazy Carry Flag */
|
||||
#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
|
||||
|
||||
#define LF_MASK_SD (0x01 << LF_BIT_SD)
|
||||
#define LF_MASK_AF (0x01 << LF_BIT_AF)
|
||||
#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
|
||||
#define LF_MASK_CF (0x01 << LF_BIT_CF)
|
||||
#define LF_MASK_PO (0x01 << LF_BIT_PO)
|
||||
|
||||
#define ADD_COUT_VEC(op1, op2, result) \
|
||||
(((op1) & (op2)) | (((op1) | (op2)) & (~(result))))
|
||||
|
||||
#define SUB_COUT_VEC(op1, op2, result) \
|
||||
(((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result)))
|
||||
|
||||
#define GET_ADD_OVERFLOW(op1, op2, result, mask) \
|
||||
((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))
|
||||
|
||||
/* ******************* */
|
||||
/* OSZAPC */
|
||||
/* ******************* */
|
||||
|
||||
/* size, carries, result */
|
||||
#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
|
||||
target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
|
||||
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
|
||||
env->hvf_emul->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
|
||||
if ((size) == 32) { \
|
||||
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
|
||||
} else if ((size) == 16) { \
|
||||
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
|
||||
} else if ((size) == 8) { \
|
||||
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
|
||||
} else { \
|
||||
VM_PANIC("unimplemented"); \
|
||||
} \
|
||||
env->hvf_emul->lflags.auxbits = (target_ulong)(uint32_t)temp; \
|
||||
}
|
||||
|
||||
/* carries, result */
|
||||
#define SET_FLAGS_OSZAPC_8(carries, result) \
|
||||
SET_FLAGS_OSZAPC_SIZE(8, carries, result)
|
||||
#define SET_FLAGS_OSZAPC_16(carries, result) \
|
||||
SET_FLAGS_OSZAPC_SIZE(16, carries, result)
|
||||
#define SET_FLAGS_OSZAPC_32(carries, result) \
|
||||
SET_FLAGS_OSZAPC_SIZE(32, carries, result)
|
||||
|
||||
/* ******************* */
|
||||
/* OSZAP */
|
||||
/* ******************* */
|
||||
/* size, carries, result */
|
||||
#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
|
||||
target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
|
||||
(((lf_carries) >> (size - 2)) << LF_BIT_PO); \
|
||||
if ((size) == 32) { \
|
||||
temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
|
||||
} else if ((size) == 16) { \
|
||||
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
|
||||
} else if ((size) == 8) { \
|
||||
temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
|
||||
} else { \
|
||||
VM_PANIC("unimplemented"); \
|
||||
} \
|
||||
env->hvf_emul->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
|
||||
target_ulong delta_c = (env->hvf_emul->lflags.auxbits ^ temp) & LF_MASK_CF; \
|
||||
delta_c ^= (delta_c >> 1); \
|
||||
env->hvf_emul->lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
|
||||
}
|
||||
|
||||
/* carries, result */
|
||||
#define SET_FLAGS_OSZAP_8(carries, result) \
|
||||
SET_FLAGS_OSZAP_SIZE(8, carries, result)
|
||||
#define SET_FLAGS_OSZAP_16(carries, result) \
|
||||
SET_FLAGS_OSZAP_SIZE(16, carries, result)
|
||||
#define SET_FLAGS_OSZAP_32(carries, result) \
|
||||
SET_FLAGS_OSZAP_SIZE(32, carries, result)
|
||||
|
||||
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
|
||||
{
|
||||
uint32_t temp_po = new_of ^ new_cf;
|
||||
env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
|
||||
env->hvf_emul->lflags.auxbits |= (temp_po << LF_BIT_PO) |
|
||||
(new_cf << LF_BIT_CF);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff);
|
||||
}
|
||||
|
||||
|
||||
void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_32(0, diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_16(0, diff);
|
||||
}
|
||||
|
||||
void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff)
|
||||
{
|
||||
SET_FLAGS_OSZAPC_8(0, diff);
|
||||
}
|
||||
|
||||
bool get_PF(CPUX86State *env)
|
||||
{
|
||||
uint32_t temp = (255 & env->hvf_emul->lflags.result);
|
||||
temp = temp ^ (255 & (env->hvf_emul->lflags.auxbits >> LF_BIT_PDB));
|
||||
temp = (temp ^ (temp >> 4)) & 0x0F;
|
||||
return (0x9669U >> temp) & 1;
|
||||
}
|
||||
|
||||
void set_PF(CPUX86State *env, bool val)
|
||||
{
|
||||
uint32_t temp = (255 & env->hvf_emul->lflags.result) ^ (!val);
|
||||
env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PDB);
|
||||
env->hvf_emul->lflags.auxbits |= (temp << LF_BIT_PDB);
|
||||
}
|
||||
|
||||
bool get_OF(CPUX86State *env)
|
||||
{
|
||||
return ((env->hvf_emul->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
|
||||
}
|
||||
|
||||
bool get_CF(CPUX86State *env)
|
||||
{
|
||||
return (env->hvf_emul->lflags.auxbits >> LF_BIT_CF) & 1;
|
||||
}
|
||||
|
||||
void set_OF(CPUX86State *env, bool val)
|
||||
{
|
||||
bool old_cf = get_CF(env);
|
||||
SET_FLAGS_OxxxxC(env, val, old_cf);
|
||||
}
|
||||
|
||||
void set_CF(CPUX86State *env, bool val)
|
||||
{
|
||||
bool old_of = get_OF(env);
|
||||
SET_FLAGS_OxxxxC(env, old_of, val);
|
||||
}
|
||||
|
||||
bool get_AF(CPUX86State *env)
|
||||
{
|
||||
return (env->hvf_emul->lflags.auxbits >> LF_BIT_AF) & 1;
|
||||
}
|
||||
|
||||
void set_AF(CPUX86State *env, bool val)
|
||||
{
|
||||
env->hvf_emul->lflags.auxbits &= ~(LF_MASK_AF);
|
||||
env->hvf_emul->lflags.auxbits |= val << LF_BIT_AF;
|
||||
}
|
||||
|
||||
bool get_ZF(CPUX86State *env)
|
||||
{
|
||||
return !env->hvf_emul->lflags.result;
|
||||
}
|
||||
|
||||
void set_ZF(CPUX86State *env, bool val)
|
||||
{
|
||||
if (val) {
|
||||
env->hvf_emul->lflags.auxbits ^=
|
||||
(((env->hvf_emul->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
|
||||
/* merge the parity bits into the Parity Delta Byte */
|
||||
uint32_t temp_pdb = (255 & env->hvf_emul->lflags.result);
|
||||
env->hvf_emul->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
|
||||
/* now zero the .result value */
|
||||
env->hvf_emul->lflags.result = 0;
|
||||
} else {
|
||||
env->hvf_emul->lflags.result |= (1 << 8);
|
||||
}
|
||||
}
|
||||
|
||||
bool get_SF(CPUX86State *env)
|
||||
{
|
||||
return ((env->hvf_emul->lflags.result >> LF_SIGN_BIT) ^
|
||||
(env->hvf_emul->lflags.auxbits >> LF_BIT_SD)) & 1;
|
||||
}
|
||||
|
||||
void set_SF(CPUX86State *env, bool val)
|
||||
{
|
||||
bool temp_sf = get_SF(env);
|
||||
env->hvf_emul->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
|
||||
}
|
||||
|
||||
void lflags_to_rflags(CPUX86State *env)
|
||||
{
|
||||
env->hvf_emul->rflags.cf = get_CF(env);
|
||||
env->hvf_emul->rflags.pf = get_PF(env);
|
||||
env->hvf_emul->rflags.af = get_AF(env);
|
||||
env->hvf_emul->rflags.zf = get_ZF(env);
|
||||
env->hvf_emul->rflags.sf = get_SF(env);
|
||||
env->hvf_emul->rflags.of = get_OF(env);
|
||||
}
|
||||
|
||||
void rflags_to_lflags(CPUX86State *env)
|
||||
{
|
||||
env->hvf_emul->lflags.auxbits = env->hvf_emul->lflags.result = 0;
|
||||
set_OF(env, env->hvf_emul->rflags.of);
|
||||
set_SF(env, env->hvf_emul->rflags.sf);
|
||||
set_ZF(env, env->hvf_emul->rflags.zf);
|
||||
set_AF(env, env->hvf_emul->rflags.af);
|
||||
set_PF(env, env->hvf_emul->rflags.pf);
|
||||
set_CF(env, env->hvf_emul->rflags.cf);
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001-2012 The Bochs Project
|
||||
// Copyright (C) 2017 Google Inc.
|
||||
//
|
||||
// This library is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 2 of the License, or (at your option) any later version.
|
||||
//
|
||||
// This library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
// Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License along with this library; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
/*
|
||||
* x86 eflags functions
|
||||
*/
|
||||
#ifndef __X86_FLAGS_H__
|
||||
#define __X86_FLAGS_H__
|
||||
|
||||
#include "cpu.h"
|
||||
void lflags_to_rflags(CPUX86State *env);
|
||||
void rflags_to_lflags(CPUX86State *env);
|
||||
|
||||
bool get_PF(CPUX86State *env);
|
||||
void set_PF(CPUX86State *env, bool val);
|
||||
bool get_CF(CPUX86State *env);
|
||||
void set_CF(CPUX86State *env, bool val);
|
||||
bool get_AF(CPUX86State *env);
|
||||
void set_AF(CPUX86State *env, bool val);
|
||||
bool get_ZF(CPUX86State *env);
|
||||
void set_ZF(CPUX86State *env, bool val);
|
||||
bool get_SF(CPUX86State *env);
|
||||
void set_SF(CPUX86State *env, bool val);
|
||||
bool get_OF(CPUX86State *env);
|
||||
void set_OF(CPUX86State *env, bool val);
|
||||
|
||||
void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
|
||||
|
||||
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff);
|
||||
void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff);
|
||||
void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff);
|
||||
|
||||
void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff);
|
||||
void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff);
|
||||
void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff);
|
||||
|
||||
void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff);
|
||||
void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff);
|
||||
void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff);
|
||||
|
||||
void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff);
|
||||
void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff);
|
||||
void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff);
|
||||
|
||||
void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
|
||||
uint32_t diff);
|
||||
void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
|
||||
uint16_t diff);
|
||||
void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
|
||||
uint8_t diff);
|
||||
|
||||
#endif /* __X86_FLAGS_H__ */
|
|
@ -0,0 +1,272 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "panic.h"
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
#include "x86.h"
|
||||
#include "x86_mmu.h"
|
||||
#include "string.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx.h"
|
||||
|
||||
#include "memory.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
#define pte_present(pte) (pte & PT_PRESENT)
|
||||
#define pte_write_access(pte) (pte & PT_WRITE)
|
||||
#define pte_user_access(pte) (pte & PT_USER)
|
||||
#define pte_exec_access(pte) (!(pte & PT_NX))
|
||||
|
||||
#define pte_large_page(pte) (pte & PT_PS)
|
||||
#define pte_global_access(pte) (pte & PT_GLOBAL)
|
||||
|
||||
#define PAE_CR3_MASK (~0x1fllu)
|
||||
#define LEGACY_CR3_MASK (0xffffffff)
|
||||
|
||||
#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
|
||||
#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
|
||||
#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
|
||||
|
||||
struct gpt_translation {
|
||||
target_ulong gva;
|
||||
uint64_t gpa;
|
||||
int err_code;
|
||||
uint64_t pte[5];
|
||||
bool write_access;
|
||||
bool user_access;
|
||||
bool exec_access;
|
||||
};
|
||||
|
||||
static int gpt_top_level(struct CPUState *cpu, bool pae)
|
||||
{
|
||||
if (!pae) {
|
||||
return 2;
|
||||
}
|
||||
if (x86_is_long_mode(cpu)) {
|
||||
return 4;
|
||||
}
|
||||
|
||||
return 3;
|
||||
}
|
||||
|
||||
static inline int gpt_entry(target_ulong addr, int level, bool pae)
|
||||
{
|
||||
int level_shift = pae ? 9 : 10;
|
||||
return (addr >> (level_shift * (level - 1) + 12)) & ((1 << level_shift) - 1);
|
||||
}
|
||||
|
||||
static inline int pte_size(bool pae)
|
||||
{
|
||||
return pae ? 8 : 4;
|
||||
}
|
||||
|
||||
|
||||
static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
|
||||
int level, bool pae)
|
||||
{
|
||||
int index;
|
||||
uint64_t pte = 0;
|
||||
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
|
||||
uint64_t gpa = pt->pte[level] & page_mask;
|
||||
|
||||
if (level == 3 && !x86_is_long_mode(cpu)) {
|
||||
gpa = pt->pte[level];
|
||||
}
|
||||
|
||||
index = gpt_entry(pt->gva, level, pae);
|
||||
address_space_rw(&address_space_memory, gpa + index * pte_size(pae),
|
||||
MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);
|
||||
|
||||
pt->pte[level - 1] = pte;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* test page table entry */
|
||||
static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
|
||||
int level, bool *is_large, bool pae)
|
||||
{
|
||||
uint64_t pte = pt->pte[level];
|
||||
|
||||
if (pt->write_access) {
|
||||
pt->err_code |= MMU_PAGE_WT;
|
||||
}
|
||||
if (pt->user_access) {
|
||||
pt->err_code |= MMU_PAGE_US;
|
||||
}
|
||||
if (pt->exec_access) {
|
||||
pt->err_code |= MMU_PAGE_NX;
|
||||
}
|
||||
|
||||
if (!pte_present(pte)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pae && !x86_is_long_mode(cpu) && 2 == level) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (1 == level && pte_large_page(pte)) {
|
||||
pt->err_code |= MMU_PAGE_PT;
|
||||
*is_large = true;
|
||||
}
|
||||
if (!level) {
|
||||
pt->err_code |= MMU_PAGE_PT;
|
||||
}
|
||||
|
||||
uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
|
||||
/* check protection */
|
||||
if (cr0 & CR0_WP) {
|
||||
if (pt->write_access && !pte_write_access(pte)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (pt->user_access && !pte_user_access(pte)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pae && pt->exec_access && !pte_exec_access(pte)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
exit:
|
||||
/* TODO: check reserved bits */
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline uint64_t pse_pte_to_page(uint64_t pte)
|
||||
{
|
||||
return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
|
||||
}
|
||||
|
||||
static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
|
||||
{
|
||||
VM_PANIC_ON(!pte_large_page(pt->pte[1]))
|
||||
/* 2Mb large page */
|
||||
if (pae) {
|
||||
return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
|
||||
}
|
||||
|
||||
/* 4Mb large page */
|
||||
return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
|
||||
struct gpt_translation *pt, bool pae)
|
||||
{
|
||||
int top_level, level;
|
||||
bool is_large = false;
|
||||
target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
|
||||
uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
|
||||
|
||||
memset(pt, 0, sizeof(*pt));
|
||||
top_level = gpt_top_level(cpu, pae);
|
||||
|
||||
pt->pte[top_level] = pae ? (cr3 & PAE_CR3_MASK) : (cr3 & LEGACY_CR3_MASK);
|
||||
pt->gva = addr;
|
||||
pt->user_access = (err_code & MMU_PAGE_US);
|
||||
pt->write_access = (err_code & MMU_PAGE_WT);
|
||||
pt->exec_access = (err_code & MMU_PAGE_NX);
|
||||
|
||||
for (level = top_level; level > 0; level--) {
|
||||
get_pt_entry(cpu, pt, level, pae);
|
||||
|
||||
if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_large) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_large) {
|
||||
pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
|
||||
} else {
|
||||
pt->gpa = large_page_gpa(pt, pae);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa)
|
||||
{
|
||||
bool res;
|
||||
struct gpt_translation pt;
|
||||
int err_code = 0;
|
||||
|
||||
if (!x86_is_paging_mode(cpu)) {
|
||||
*gpa = gva;
|
||||
return true;
|
||||
}
|
||||
|
||||
res = walk_gpt(cpu, gva, err_code, &pt, x86_is_pae_enabled(cpu));
|
||||
if (res) {
|
||||
*gpa = pt.gpa;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes)
|
||||
{
|
||||
uint64_t gpa;
|
||||
|
||||
while (bytes > 0) {
|
||||
/* copy page */
|
||||
int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
|
||||
|
||||
if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
|
||||
VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
|
||||
} else {
|
||||
address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
|
||||
data, copy, 1);
|
||||
}
|
||||
|
||||
bytes -= copy;
|
||||
gva += copy;
|
||||
data += copy;
|
||||
}
|
||||
}
|
||||
|
||||
void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes)
|
||||
{
|
||||
uint64_t gpa;
|
||||
|
||||
while (bytes > 0) {
|
||||
/* copy page */
|
||||
int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
|
||||
|
||||
if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
|
||||
VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
|
||||
}
|
||||
address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
|
||||
data, copy, 0);
|
||||
|
||||
bytes -= copy;
|
||||
gva += copy;
|
||||
data += copy;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __X86_MMU_H__
|
||||
#define __X86_MMU_H__
|
||||
|
||||
#define PT_PRESENT (1 << 0)
|
||||
#define PT_WRITE (1 << 1)
|
||||
#define PT_USER (1 << 2)
|
||||
#define PT_WT (1 << 3)
|
||||
#define PT_CD (1 << 4)
|
||||
#define PT_ACCESSED (1 << 5)
|
||||
#define PT_DIRTY (1 << 6)
|
||||
#define PT_PS (1 << 7)
|
||||
#define PT_GLOBAL (1 << 8)
|
||||
#define PT_NX (1llu << 63)
|
||||
|
||||
/* error codes */
|
||||
#define MMU_PAGE_PT (1 << 0)
|
||||
#define MMU_PAGE_WT (1 << 1)
|
||||
#define MMU_PAGE_US (1 << 2)
|
||||
#define MMU_PAGE_NX (1 << 3)
|
||||
|
||||
bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa);
|
||||
|
||||
void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes);
|
||||
void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes);
|
||||
|
||||
#endif /* __X86_MMU_H__ */
|
|
@ -0,0 +1,191 @@
|
|||
// This software is licensed under the terms of the GNU General Public
|
||||
// License version 2, as published by the Free Software Foundation, and
|
||||
// may be copied, distributed, and modified under those terms.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
#include "qemu/osdep.h"
|
||||
#include "panic.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#include "sysemu/hvf.h"
|
||||
#include "hvf-i386.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx.h"
|
||||
#include "x86.h"
|
||||
#include "x86_descr.h"
|
||||
#include "x86_mmu.h"
|
||||
#include "x86_decode.h"
|
||||
#include "x86_emu.h"
|
||||
#include "x86_task.h"
|
||||
#include "x86hvf.h"
|
||||
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/ioport.h"
|
||||
#include "hw/i386/apic_internal.h"
|
||||
#include "hw/boards.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "strings.h"
|
||||
#include "sysemu/accel.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "target/i386/cpu.h"
|
||||
|
||||
// TODO: taskswitch handling
|
||||
static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
/* CR3 and ldt selector are not saved intentionally */
|
||||
tss->eip = EIP(env);
|
||||
tss->eflags = EFLAGS(env);
|
||||
tss->eax = EAX(env);
|
||||
tss->ecx = ECX(env);
|
||||
tss->edx = EDX(env);
|
||||
tss->ebx = EBX(env);
|
||||
tss->esp = ESP(env);
|
||||
tss->ebp = EBP(env);
|
||||
tss->esi = ESI(env);
|
||||
tss->edi = EDI(env);
|
||||
|
||||
tss->es = vmx_read_segment_selector(cpu, R_ES).sel;
|
||||
tss->cs = vmx_read_segment_selector(cpu, R_CS).sel;
|
||||
tss->ss = vmx_read_segment_selector(cpu, R_SS).sel;
|
||||
tss->ds = vmx_read_segment_selector(cpu, R_DS).sel;
|
||||
tss->fs = vmx_read_segment_selector(cpu, R_FS).sel;
|
||||
tss->gs = vmx_read_segment_selector(cpu, R_GS).sel;
|
||||
}
|
||||
|
||||
static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
|
||||
{
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
|
||||
|
||||
RIP(env) = tss->eip;
|
||||
EFLAGS(env) = tss->eflags | 2;
|
||||
|
||||
/* General purpose registers */
|
||||
RAX(env) = tss->eax;
|
||||
RCX(env) = tss->ecx;
|
||||
RDX(env) = tss->edx;
|
||||
RBX(env) = tss->ebx;
|
||||
RSP(env) = tss->esp;
|
||||
RBP(env) = tss->ebp;
|
||||
RSI(env) = tss->esi;
|
||||
RDI(env) = tss->edi;
|
||||
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
|
||||
vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
|
||||
}
|
||||
|
||||
static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
|
||||
uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
|
||||
{
|
||||
struct x86_tss_segment32 tss_seg;
|
||||
uint32_t new_tss_base = x86_segment_base(new_desc);
|
||||
uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
|
||||
uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
|
||||
|
||||
vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
|
||||
save_state_to_tss32(cpu, &tss_seg);
|
||||
|
||||
vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
|
||||
vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
|
||||
|
||||
if (old_tss_sel.sel != 0xffff) {
|
||||
tss_seg.prev_tss = old_tss_sel.sel;
|
||||
|
||||
vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
|
||||
}
|
||||
load_state_from_tss32(cpu, &tss_seg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
|
||||
{
|
||||
uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
|
||||
if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
|
||||
gate_type != VMCS_INTR_T_HWINTR &&
|
||||
gate_type != VMCS_INTR_T_NMI)) {
|
||||
int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
return;
|
||||
}
|
||||
|
||||
load_regs(cpu);
|
||||
|
||||
struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
|
||||
int ret;
|
||||
x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
|
||||
uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
|
||||
uint32_t desc_limit;
|
||||
struct x86_call_gate task_gate_desc;
|
||||
struct vmx_segment vmx_seg;
|
||||
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
|
||||
x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
|
||||
|
||||
if (reason == TSR_IDT_GATE && gate_valid) {
|
||||
int dpl;
|
||||
|
||||
ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
|
||||
|
||||
dpl = task_gate_desc.dpl;
|
||||
x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
|
||||
if (tss_sel.rpl > dpl || cs.rpl > dpl)
|
||||
;//DPRINTF("emulate_gp");
|
||||
}
|
||||
|
||||
desc_limit = x86_segment_limit(&next_tss_desc);
|
||||
if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
|
||||
VM_PANIC("emulate_ts");
|
||||
}
|
||||
|
||||
if (reason == TSR_IRET || reason == TSR_JMP) {
|
||||
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
|
||||
x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
|
||||
}
|
||||
|
||||
if (reason == TSR_IRET)
|
||||
EFLAGS(env) &= ~RFLAGS_NT;
|
||||
|
||||
if (reason != TSR_CALL && reason != TSR_IDT_GATE)
|
||||
old_tss_sel.sel = 0xffff;
|
||||
|
||||
if (reason != TSR_IRET) {
|
||||
next_tss_desc.type |= (1 << 1); /* set busy flag */
|
||||
x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
|
||||
}
|
||||
|
||||
if (next_tss_desc.type & 8)
|
||||
ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
|
||||
else
|
||||
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
|
||||
VM_PANIC("task_switch_16");
|
||||
|
||||
macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
|
||||
x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
|
||||
vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
|
||||
|
||||
store_regs(cpu);
|
||||
|
||||
hv_vcpu_invalidate_tlb(cpu->hvf_fd);
|
||||
hv_vcpu_flush(cpu->hvf_fd);
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
/* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 or
|
||||
* (at your option) version 3 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef HVF_TASK
|
||||
#define HVF_TASK
|
||||
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
|
||||
int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
|
||||
#endif
|
|
@ -0,0 +1,465 @@
|
|||
/*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "x86hvf.h"
|
||||
#include "vmx.h"
|
||||
#include "vmcs.h"
|
||||
#include "cpu.h"
|
||||
#include "x86_descr.h"
|
||||
#include "x86_decode.h"
|
||||
|
||||
#include "hw/i386/apic_internal.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <Hypervisor/hv.h>
|
||||
#include <Hypervisor/hv_vmx.h>
|
||||
#include <stdint.h>
|
||||
|
||||
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
|
||||
SegmentCache *qseg, bool is_tr)
|
||||
{
|
||||
vmx_seg->sel = qseg->selector;
|
||||
vmx_seg->base = qseg->base;
|
||||
vmx_seg->limit = qseg->limit;
|
||||
|
||||
if (!qseg->selector && !x86_is_real(cpu) && !is_tr) {
|
||||
/* the TR register is usable after processor reset despite
|
||||
* having a null selector */
|
||||
vmx_seg->ar = 1 << 16;
|
||||
return;
|
||||
}
|
||||
vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5;
|
||||
vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4;
|
||||
}
|
||||
|
||||
void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg)
|
||||
{
|
||||
qseg->limit = vmx_seg->limit;
|
||||
qseg->base = vmx_seg->base;
|
||||
qseg->selector = vmx_seg->sel;
|
||||
qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) |
|
||||
(((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) |
|
||||
(((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) |
|
||||
(((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) |
|
||||
(((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) |
|
||||
(((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) |
|
||||
(((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) |
|
||||
(((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT);
|
||||
}
|
||||
|
||||
void hvf_put_xsave(CPUState *cpu_state)
|
||||
{
|
||||
|
||||
struct X86XSaveArea *xsave;
|
||||
|
||||
xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;
|
||||
|
||||
x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
|
||||
|
||||
if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void hvf_put_segments(CPUState *cpu_state)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
struct vmx_segment seg;
|
||||
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
|
||||
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
|
||||
|
||||
/* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);
|
||||
vmx_update_tpr(cpu_state);
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);
|
||||
|
||||
macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);
|
||||
macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_CS);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_DS);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_ES);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_SS);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_FS);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_GS);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->tr, true);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_TR);
|
||||
|
||||
hvf_set_segment(cpu_state, &seg, &env->ldt, false);
|
||||
vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
|
||||
|
||||
hv_vcpu_flush(cpu_state->hvf_fd);
|
||||
}
|
||||
|
||||
void hvf_put_msrs(CPUState *cpu_state)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,
|
||||
env->sysenter_cs);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,
|
||||
env->sysenter_esp);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,
|
||||
env->sysenter_eip);
|
||||
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);
|
||||
#endif
|
||||
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
|
||||
hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
|
||||
|
||||
/* if (!osx_is_sierra())
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/
|
||||
hv_vm_sync_tsc(env->tsc);
|
||||
}
|
||||
|
||||
|
||||
void hvf_get_xsave(CPUState *cpu_state)
|
||||
{
|
||||
struct X86XSaveArea *xsave;
|
||||
|
||||
xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;
|
||||
|
||||
if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
|
||||
abort();
|
||||
}
|
||||
|
||||
x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave);
|
||||
}
|
||||
|
||||
void hvf_get_segments(CPUState *cpu_state)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
|
||||
struct vmx_segment seg;
|
||||
|
||||
env->interrupt_injected = -1;
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_CS);
|
||||
hvf_get_segment(&env->segs[R_CS], &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_DS);
|
||||
hvf_get_segment(&env->segs[R_DS], &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_ES);
|
||||
hvf_get_segment(&env->segs[R_ES], &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_FS);
|
||||
hvf_get_segment(&env->segs[R_FS], &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_GS);
|
||||
hvf_get_segment(&env->segs[R_GS], &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_SS);
|
||||
hvf_get_segment(&env->segs[R_SS], &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_TR);
|
||||
hvf_get_segment(&env->tr, &seg);
|
||||
|
||||
vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);
|
||||
hvf_get_segment(&env->ldt, &seg);
|
||||
|
||||
env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
|
||||
env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);
|
||||
env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
|
||||
env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);
|
||||
|
||||
env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);
|
||||
env->cr[2] = 0;
|
||||
env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);
|
||||
env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);
|
||||
|
||||
env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);
|
||||
}
|
||||
|
||||
void hvf_get_msrs(CPUState *cpu_state)
|
||||
{
|
||||
CPUX86State *env = &X86_CPU(cpu_state)->env;
|
||||
uint64_t tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);
|
||||
env->sysenter_cs = tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);
|
||||
env->sysenter_esp = tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);
|
||||
env->sysenter_eip = tmp;
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);
|
||||
#endif
|
||||
|
||||
hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);
|
||||
|
||||
env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);
|
||||
}
|
||||
|
||||
int hvf_put_registers(CPUState *cpu_state)
|
||||
{
|
||||
X86CPU *x86cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);
|
||||
|
||||
wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);
|
||||
|
||||
hvf_put_xsave(cpu_state);
|
||||
|
||||
hvf_put_segments(cpu_state);
|
||||
|
||||
hvf_put_msrs(cpu_state);
|
||||
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);
|
||||
wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hvf_get_registers(CPUState *cpu_state)
|
||||
{
|
||||
X86CPU *x86cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
|
||||
|
||||
env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);
|
||||
env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);
|
||||
env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);
|
||||
env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);
|
||||
env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);
|
||||
env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);
|
||||
env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);
|
||||
env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);
|
||||
env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);
|
||||
env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);
|
||||
env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);
|
||||
env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);
|
||||
env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);
|
||||
env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);
|
||||
env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);
|
||||
env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);
|
||||
|
||||
env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
|
||||
env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);
|
||||
|
||||
hvf_get_xsave(cpu_state);
|
||||
env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);
|
||||
|
||||
hvf_get_segments(cpu_state);
|
||||
hvf_get_msrs(cpu_state);
|
||||
|
||||
env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);
|
||||
env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);
|
||||
env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);
|
||||
env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);
|
||||
env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);
|
||||
env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);
|
||||
env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);
|
||||
env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmx_set_int_window_exiting(CPUState *cpu)
|
||||
{
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
|
||||
VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
|
||||
}
|
||||
|
||||
void vmx_clear_int_window_exiting(CPUState *cpu)
|
||||
{
|
||||
uint64_t val;
|
||||
val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
|
||||
wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
|
||||
~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
|
||||
}
|
||||
|
||||
#define NMI_VEC 2
|
||||
|
||||
bool hvf_inject_interrupts(CPUState *cpu_state)
|
||||
{
|
||||
X86CPU *x86cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &x86cpu->env;
|
||||
|
||||
uint8_t vector;
|
||||
uint64_t intr_type;
|
||||
bool have_event = true;
|
||||
if (env->interrupt_injected != -1) {
|
||||
vector = env->interrupt_injected;
|
||||
intr_type = VMCS_INTR_T_SWINTR;
|
||||
} else if (env->exception_injected != -1) {
|
||||
vector = env->exception_injected;
|
||||
if (vector == EXCP03_INT3 || vector == EXCP04_INTO) {
|
||||
intr_type = VMCS_INTR_T_SWEXCEPTION;
|
||||
} else {
|
||||
intr_type = VMCS_INTR_T_HWEXCEPTION;
|
||||
}
|
||||
} else if (env->nmi_injected) {
|
||||
vector = NMI_VEC;
|
||||
intr_type = VMCS_INTR_T_NMI;
|
||||
} else {
|
||||
have_event = false;
|
||||
}
|
||||
|
||||
uint64_t info = 0;
|
||||
if (have_event) {
|
||||
info = vector | intr_type | VMCS_INTR_VALID;
|
||||
uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);
|
||||
if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
|
||||
vmx_clear_nmi_blocking(cpu_state);
|
||||
}
|
||||
|
||||
if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) {
|
||||
info &= ~(1 << 12); /* clear undefined bit */
|
||||
if (intr_type == VMCS_INTR_T_SWINTR ||
|
||||
intr_type == VMCS_INTR_T_SWEXCEPTION) {
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
|
||||
}
|
||||
|
||||
if (env->has_error_code) {
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,
|
||||
env->error_code);
|
||||
}
|
||||
/*printf("reinject %lx err %d\n", info, err);*/
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
|
||||
};
|
||||
}
|
||||
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC;
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
|
||||
} else {
|
||||
vmx_set_nmi_window_exiting(cpu_state);
|
||||
}
|
||||
}
|
||||
|
||||
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
|
||||
(cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
|
||||
int line = cpu_get_pic_interrupt(&x86cpu->env);
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
||||
if (line >= 0) {
|
||||
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
|
||||
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
|
||||
}
|
||||
}
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
vmx_set_int_window_exiting(cpu_state);
|
||||
}
|
||||
return (cpu_state->interrupt_request
|
||||
& (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
|
||||
}
|
||||
|
||||
int hvf_process_events(CPUState *cpu_state)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
|
||||
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
hvf_cpu_synchronize_state(cpu_state);
|
||||
do_cpu_init(cpu);
|
||||
}
|
||||
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
}
|
||||
if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(EFLAGS(env) & IF_MASK)) ||
|
||||
(cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu_state->halted = 0;
|
||||
}
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
hvf_cpu_synchronize_state(cpu_state);
|
||||
do_cpu_sipi(cpu);
|
||||
}
|
||||
if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
hvf_cpu_synchronize_state(cpu_state);
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
||||
env->tpr_access_type);
|
||||
}
|
||||
return cpu_state->halted;
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Veertu Inc,
|
||||
* Copyright (C) 2017 Google Inc,
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef X86HVF_H
|
||||
#define X86HVF_H
|
||||
#include "cpu.h"
|
||||
#include "x86_descr.h"
|
||||
|
||||
int hvf_process_events(CPUState *);
|
||||
int hvf_put_registers(CPUState *);
|
||||
int hvf_get_registers(CPUState *);
|
||||
bool hvf_inject_interrupts(CPUState *);
|
||||
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
|
||||
SegmentCache *qseg, bool is_tr);
|
||||
void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);
|
||||
void hvf_put_xsave(CPUState *cpu_state);
|
||||
void hvf_put_segments(CPUState *cpu_state);
|
||||
void hvf_put_msrs(CPUState *cpu_state);
|
||||
void hvf_get_xsave(CPUState *cpu_state);
|
||||
void hvf_get_msrs(CPUState *cpu_state);
|
||||
void vmx_clear_int_window_exiting(CPUState *cpu);
|
||||
void hvf_get_segments(CPUState *cpu_state);
|
||||
void vmx_update_tpr(CPUState *cpu);
|
||||
void hvf_cpu_synchronize_state(CPUState *cpu_state);
|
||||
#endif
|
|
@ -1038,8 +1038,6 @@ void kvm_arch_reset_vcpu(X86CPU *cpu)
|
|||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
env->exception_injected = -1;
|
||||
env->interrupt_injected = -1;
|
||||
env->xcr0 = 1;
|
||||
if (kvm_irqchip_in_kernel()) {
|
||||
env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
|
||||
|
|
Loading…
Reference in New Issue