mirror of https://github.com/xemu-project/xemu.git
* qemu-thread portability improvement (Fam)
* virtio-scsi IOMMU fix (Jason) * poisoning and common-obj-y cleanups (Thomas) * initial Hypervisor.framework refactoring (Sergio) * x86 TCG interrupt injection fixes (Wu Xiang, me) * --disable-tcg support for x86 (Yang Zhong, me) * various other bugfixes and cleanups (Daniel, Peter, Thomas) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJZXJF4AAoJEL/70l94x66DKLUH/jxig9RZgsGyt4PtbGzrv4+N gvlPWPN5t3KQkdU1XRvJSa9qIXSW24k87+E61muIdeNI2GcSsyrNB1v7DyMJVPoZ btHp8Cz69WWL+Lh1k2aw6DzxWfY9dgLba1ujyGWiqA/xtkF0y4eVl3gKd8eO+Tvs WPzj1WkaLT/YL1RD4wkyUvChsVDVdxk03wGDD9oB+pC6ygaoYSDzPo241XtjnNpa KAF8/0yFkxNhNS+6AI+Xq+GNaySpNln6P6xZaNgNeLOXzOcvQveM1/Xi4pMUqhDj H9p+oMjuTPb4iGHyICfVSd6clL8Op2gwKCP2tLs2usWcmTO0oG40vc778WsQEZE= =KdYh -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * qemu-thread portability improvement (Fam) * virtio-scsi IOMMU fix (Jason) * poisoning and common-obj-y cleanups (Thomas) * initial Hypervisor.framework refactoring (Sergio) * x86 TCG interrupt injection fixes (Wu Xiang, me) * --disable-tcg support for x86 (Yang Zhong, me) * various other bugfixes and cleanups (Daniel, Peter, Thomas) # gpg: Signature made Wed 05 Jul 2017 08:12:56 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (42 commits) target/i386: add the CONFIG_TCG into Makefiles target/i386: add the tcg_enabled() in target/i386/ target/i386: move TLB refill function out of helper.c target/i386: split cpu_set_mxcsr() and make cpu_set_fpuc() inline target/i386: make cpu_get_fp80()/cpu_set_fp80() static target/i386: move cpu_sync_bndcs_hflags() function tcg: add the CONFIG_TCG into Makefiles tcg: add CONFIG_TCG guards in headers exec: elide calls to tb_lock and tb_unlock tcg: move tb_lock out of translate-all.h tcg: add the tcg-stub.c file into accel/stubs/ vapic: use tcg_enabled monitor: disable "info jit" and "info opcount" if !TCG tcg: make tcg_allowed global cpu: move interrupt handling out of translate-common.c tcg: move page_size_init() function vl: add tcg_enabled() for tcg related code vl: convert -tb-size to qemu_strtoul configure: add --disable-tcg configure option configure: early test for supported targets ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
67b9c5d4f3
|
@ -40,7 +40,7 @@ io-obj-y = io/
|
|||
|
||||
ifeq ($(CONFIG_SOFTMMU),y)
|
||||
common-obj-y = blockdev.o blockdev-nbd.o block/
|
||||
common-obj-y += iothread.o
|
||||
common-obj-y += bootdevice.o iothread.o
|
||||
common-obj-y += net/
|
||||
common-obj-y += qdev-monitor.o device-hotplug.o
|
||||
common-obj-$(CONFIG_WIN32) += os-win32.o
|
||||
|
|
|
@ -90,8 +90,8 @@ all: $(PROGS) stap
|
|||
# cpu emulator library
|
||||
obj-y += exec.o
|
||||
obj-y += accel/
|
||||
obj-y += tcg/tcg.o tcg/tcg-op.o tcg/optimize.o
|
||||
obj-y += tcg/tcg-common.o tcg/tcg-runtime.o
|
||||
obj-$(CONFIG_TCG) += tcg/tcg.o tcg/tcg-op.o tcg/optimize.o
|
||||
obj-$(CONFIG_TCG) += tcg/tcg-common.o tcg/tcg-runtime.o
|
||||
obj-$(CONFIG_TCG_INTERPRETER) += tcg/tci.o
|
||||
obj-$(CONFIG_TCG_INTERPRETER) += disas/tci.o
|
||||
obj-y += fpu/softfloat.o
|
||||
|
@ -137,7 +137,7 @@ endif #CONFIG_BSD_USER
|
|||
# System emulator target
|
||||
ifdef CONFIG_SOFTMMU
|
||||
obj-y += arch_init.o cpus.o monitor.o gdbstub.o balloon.o ioport.o numa.o
|
||||
obj-y += qtest.o bootdevice.o
|
||||
obj-y += qtest.o
|
||||
obj-y += hw/
|
||||
obj-y += memory.o
|
||||
obj-y += memory_mapping.o
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
obj-$(CONFIG_SOFTMMU) += accel.o
|
||||
obj-y += kvm/
|
||||
obj-y += tcg/
|
||||
obj-$(CONFIG_TCG) += tcg/
|
||||
obj-y += stubs/
|
||||
|
|
|
@ -318,7 +318,7 @@ int kvm_init_vcpu(CPUState *cpu)
|
|||
|
||||
cpu->kvm_fd = ret;
|
||||
cpu->kvm_state = s;
|
||||
cpu->kvm_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
|
||||
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
|
||||
if (mmap_size < 0) {
|
||||
|
@ -981,15 +981,6 @@ static MemoryListener kvm_io_listener = {
|
|||
.priority = 10,
|
||||
};
|
||||
|
||||
static void kvm_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_set_irq(KVMState *s, int irq, int level)
|
||||
{
|
||||
struct kvm_irq_level event;
|
||||
|
@ -1774,8 +1765,6 @@ static int kvm_init(MachineState *ms)
|
|||
|
||||
s->many_ioeventfds = kvm_check_many_ioeventfds();
|
||||
|
||||
cpu_interrupt_handler = kvm_handle_interrupt;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -1864,15 +1853,15 @@ void kvm_flush_coalesced_mmio_buffer(void)
|
|||
|
||||
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->kvm_vcpu_dirty) {
|
||||
if (!cpu->vcpu_dirty) {
|
||||
kvm_arch_get_registers(cpu);
|
||||
cpu->kvm_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->kvm_vcpu_dirty) {
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
@ -1880,7 +1869,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
|
|||
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
|
||||
cpu->kvm_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
|
@ -1891,7 +1880,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu)
|
|||
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
|
||||
cpu->kvm_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_post_init(CPUState *cpu)
|
||||
|
@ -1901,7 +1890,7 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu)
|
|||
|
||||
static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
cpu->kvm_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
|
@ -1982,9 +1971,9 @@ int kvm_cpu_exec(CPUState *cpu)
|
|||
do {
|
||||
MemTxAttrs attrs;
|
||||
|
||||
if (cpu->kvm_vcpu_dirty) {
|
||||
if (cpu->vcpu_dirty) {
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
|
||||
cpu->kvm_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
kvm_arch_pre_run(cpu, run);
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
|
||||
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* QEMU TCG accelerator stub
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
obj-$(CONFIG_SOFTMMU) += tcg-all.o
|
||||
obj-$(CONFIG_SOFTMMU) += cputlb.o
|
||||
obj-y += cpu-exec.o cpu-exec-common.o translate-all.o translate-common.o
|
||||
obj-y += cpu-exec.o cpu-exec-common.o translate-all.o
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include "exec/exec-all.h"
|
||||
#include "exec/memory-internal.h"
|
||||
|
||||
bool tcg_allowed;
|
||||
|
||||
/* exit the current TB, but without causing any exception to be raised */
|
||||
void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
{
|
||||
|
|
|
@ -27,13 +27,44 @@
|
|||
#include "sysemu/accel.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qom/object.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qom/cpu.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/main-loop.h"
|
||||
|
||||
int tcg_tb_size;
|
||||
static bool tcg_allowed = true;
|
||||
unsigned long tcg_tb_size;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
old_mask = cpu->interrupt_request;
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
/*
|
||||
* If called from iothread context, wake the target cpu in
|
||||
* case its halted.
|
||||
*/
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
cpu->icount_decr.u16.high = -1;
|
||||
if (use_icount &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int tcg_init(MachineState *ms)
|
||||
{
|
||||
tcg_exec_init(tcg_tb_size * 1024 * 1024);
|
||||
cpu_interrupt_handler = tcg_handle_interrupt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,9 +112,6 @@ typedef struct PageDesc {
|
|||
#define V_L2_BITS 10
|
||||
#define V_L2_SIZE (1 << V_L2_BITS)
|
||||
|
||||
uintptr_t qemu_host_page_size;
|
||||
intptr_t qemu_host_page_mask;
|
||||
|
||||
/*
|
||||
* L1 Mapping properties
|
||||
*/
|
||||
|
@ -363,21 +360,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
|
|||
return r;
|
||||
}
|
||||
|
||||
void page_size_init(void)
|
||||
{
|
||||
/* NOTE: we can always suppose that qemu_host_page_size >=
|
||||
TARGET_PAGE_SIZE */
|
||||
qemu_real_host_page_size = getpagesize();
|
||||
qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
|
||||
if (qemu_host_page_size == 0) {
|
||||
qemu_host_page_size = qemu_real_host_page_size;
|
||||
}
|
||||
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
|
||||
qemu_host_page_size = TARGET_PAGE_SIZE;
|
||||
}
|
||||
qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
|
||||
}
|
||||
|
||||
static void page_init(void)
|
||||
{
|
||||
page_size_init();
|
||||
|
@ -802,6 +784,7 @@ static void tb_htable_init(void)
|
|||
size. */
|
||||
void tcg_exec_init(unsigned long tb_size)
|
||||
{
|
||||
tcg_allowed = true;
|
||||
cpu_gen_init();
|
||||
page_init();
|
||||
tb_htable_init();
|
||||
|
@ -813,11 +796,6 @@ void tcg_exec_init(unsigned long tb_size)
|
|||
#endif
|
||||
}
|
||||
|
||||
bool tcg_enabled(void)
|
||||
{
|
||||
return tcg_ctx.code_gen_buffer != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new translation block. Flush the translation buffer if
|
||||
* too many translation blocks or too much generated code.
|
||||
|
@ -1873,6 +1851,11 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
|||
|
||||
tb_lock();
|
||||
|
||||
if (!tcg_enabled()) {
|
||||
cpu_fprintf(f, "TCG not enabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
target_code_size = 0;
|
||||
max_target_code_size = 0;
|
||||
cross_page = 0;
|
||||
|
@ -2223,3 +2206,11 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
|||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
|
||||
void tcg_flush_softmmu_tlb(CPUState *cs)
|
||||
{
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
tlb_flush(cs);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Host code generation common components
|
||||
*
|
||||
* Copyright (c) 2015 Peter Crosthwaite <crosthwaite.peter@gmail.com>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qom/cpu.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/main-loop.h"
|
||||
|
||||
uintptr_t qemu_real_host_page_size;
|
||||
intptr_t qemu_real_host_page_mask;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask;
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
|
||||
old_mask = cpu->interrupt_request;
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
/*
|
||||
* If called from iothread context, wake the target cpu in
|
||||
* case its halted.
|
||||
*/
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
cpu->icount_decr.u16.high = -1;
|
||||
if (use_icount &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
|
||||
#endif
|
|
@ -345,14 +345,14 @@ int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
|||
void nbd_client_detach_aio_context(BlockDriverState *bs)
|
||||
{
|
||||
NBDClientSession *client = nbd_get_client_session(bs);
|
||||
qio_channel_detach_aio_context(QIO_CHANNEL(client->sioc));
|
||||
qio_channel_detach_aio_context(QIO_CHANNEL(client->ioc));
|
||||
}
|
||||
|
||||
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context)
|
||||
{
|
||||
NBDClientSession *client = nbd_get_client_session(bs);
|
||||
qio_channel_attach_aio_context(QIO_CHANNEL(client->sioc), new_context);
|
||||
qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context);
|
||||
aio_co_schedule(new_context, client->read_reply_co);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "sysemu/sysemu.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/hw.h"
|
||||
#include "sysemu/reset.h"
|
||||
#include "hw/qdev-core.h"
|
||||
|
||||
typedef struct FWBootEntry FWBootEntry;
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include "qemu/config-file.h"
|
||||
#include "qemu/path.h"
|
||||
#include "qemu/help_option.h"
|
||||
/* For tb_lock */
|
||||
#include "cpu.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "tcg.h"
|
||||
|
|
|
@ -40,14 +40,18 @@ printf " '%s'" "$0" "$@" >> config.log
|
|||
echo >> config.log
|
||||
echo "#" >> config.log
|
||||
|
||||
error_exit() {
|
||||
echo
|
||||
print_error() {
|
||||
(echo
|
||||
echo "ERROR: $1"
|
||||
while test -n "$2"; do
|
||||
echo " $2"
|
||||
shift
|
||||
done
|
||||
echo
|
||||
echo) >&2
|
||||
}
|
||||
|
||||
error_exit() {
|
||||
print_error "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
@ -163,6 +167,79 @@ have_backend () {
|
|||
echo "$trace_backends" | grep "$1" >/dev/null
|
||||
}
|
||||
|
||||
glob() {
|
||||
eval test -z '"${1#'"$2"'}"'
|
||||
}
|
||||
|
||||
supported_hax_target() {
|
||||
test "$hax" = "yes" || return 1
|
||||
glob "$1" "*-softmmu" || return 1
|
||||
case "${1%-softmmu}" in
|
||||
i386|x86_64)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
supported_kvm_target() {
|
||||
test "$kvm" = "yes" || return 1
|
||||
glob "$1" "*-softmmu" || return 1
|
||||
case "${1%-softmmu}:$cpu" in
|
||||
arm:arm | aarch64:aarch64 | \
|
||||
i386:i386 | i386:x86_64 | i386:x32 | \
|
||||
x86_64:i386 | x86_64:x86_64 | x86_64:x32 | \
|
||||
mips:mips | mipsel:mips | \
|
||||
ppc:ppc | ppcemb:ppc | ppc64:ppc | \
|
||||
ppc:ppc64 | ppcemb:ppc64 | ppc64:ppc64 | \
|
||||
s390x:s390x)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
supported_xen_target() {
|
||||
test "$xen" = "yes" || return 1
|
||||
glob "$1" "*-softmmu" || return 1
|
||||
case "${1%-softmmu}:$cpu" in
|
||||
arm:arm | aarch64:aarch64 | \
|
||||
i386:i386 | i386:x86_64 | x86_64:i386 | x86_64:x86_64)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
supported_target() {
|
||||
case "$1" in
|
||||
*-softmmu)
|
||||
;;
|
||||
*-linux-user)
|
||||
if test "$linux" != "yes"; then
|
||||
print_error "Target '$target' is only available on a Linux host"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*-bsd-user)
|
||||
if test "$bsd" != "yes"; then
|
||||
print_error "Target '$target' is only available on a BSD host"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
print_error "Invalid target name '$target'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
test "$tcg" = "yes" && return 0
|
||||
supported_kvm_target "$1" && return 0
|
||||
supported_xen_target "$1" && return 0
|
||||
supported_hax_target "$1" && return 0
|
||||
print_error "TCG disabled, but hardware accelerator not available for '$target'"
|
||||
return 1
|
||||
}
|
||||
|
||||
# default parameters
|
||||
source_path=$(dirname "$0")
|
||||
cpu=""
|
||||
|
@ -224,6 +301,7 @@ cap_ng=""
|
|||
attr=""
|
||||
libattr=""
|
||||
xfs=""
|
||||
tcg="yes"
|
||||
|
||||
vhost_net="no"
|
||||
vhost_scsi="no"
|
||||
|
@ -961,6 +1039,10 @@ for opt do
|
|||
;;
|
||||
--enable-cap-ng) cap_ng="yes"
|
||||
;;
|
||||
--disable-tcg) tcg="no"
|
||||
;;
|
||||
--enable-tcg) tcg="yes"
|
||||
;;
|
||||
--disable-spice) spice="no"
|
||||
;;
|
||||
--enable-spice) spice="yes"
|
||||
|
@ -1690,23 +1772,27 @@ if test "$solaris" = "yes" ; then
|
|||
fi
|
||||
|
||||
if test -z "${target_list+xxx}" ; then
|
||||
target_list="$default_target_list"
|
||||
for target in $default_target_list; do
|
||||
supported_target $target 2>/dev/null && \
|
||||
target_list="$target_list $target"
|
||||
done
|
||||
target_list="${target_list# }"
|
||||
else
|
||||
target_list=$(echo "$target_list" | sed -e 's/,/ /g')
|
||||
for target in $target_list; do
|
||||
# Check that we recognised the target name; this allows a more
|
||||
# friendly error message than if we let it fall through.
|
||||
case " $default_target_list " in
|
||||
*" $target "*)
|
||||
;;
|
||||
*)
|
||||
error_exit "Unknown target name '$target'"
|
||||
;;
|
||||
esac
|
||||
supported_target $target || exit 1
|
||||
done
|
||||
fi
|
||||
|
||||
# Check that we recognised the target name; this allows a more
|
||||
# friendly error message than if we let it fall through.
|
||||
for target in $target_list; do
|
||||
case " $default_target_list " in
|
||||
*" $target "*)
|
||||
;;
|
||||
*)
|
||||
error_exit "Unknown target name '$target'"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# see if system emulation was really requested
|
||||
case " $target_list " in
|
||||
*"-softmmu "*) softmmu=yes
|
||||
|
@ -5119,7 +5205,6 @@ echo "module support $modules"
|
|||
echo "host CPU $cpu"
|
||||
echo "host big endian $bigendian"
|
||||
echo "target list $target_list"
|
||||
echo "tcg debug enabled $debug_tcg"
|
||||
echo "gprof enabled $gprof"
|
||||
echo "sparse enabled $sparse"
|
||||
echo "strip binaries $strip_opt"
|
||||
|
@ -5174,8 +5259,12 @@ echo "ATTR/XATTR support $attr"
|
|||
echo "Install blobs $blobs"
|
||||
echo "KVM support $kvm"
|
||||
echo "HAX support $hax"
|
||||
echo "TCG support $tcg"
|
||||
if test "$tcg" = "yes" ; then
|
||||
echo "TCG debug enabled $debug_tcg"
|
||||
echo "TCG interpreter $tcg_interpreter"
|
||||
fi
|
||||
echo "RDMA support $rdma"
|
||||
echo "TCG interpreter $tcg_interpreter"
|
||||
echo "fdt support $fdt"
|
||||
echo "preadv support $preadv"
|
||||
echo "fdatasync $fdatasync"
|
||||
|
@ -5618,8 +5707,11 @@ fi
|
|||
if test "$signalfd" = "yes" ; then
|
||||
echo "CONFIG_SIGNALFD=y" >> $config_host_mak
|
||||
fi
|
||||
if test "$tcg_interpreter" = "yes" ; then
|
||||
echo "CONFIG_TCG_INTERPRETER=y" >> $config_host_mak
|
||||
if test "$tcg" = "yes"; then
|
||||
echo "CONFIG_TCG=y" >> $config_host_mak
|
||||
if test "$tcg_interpreter" = "yes" ; then
|
||||
echo "CONFIG_TCG_INTERPRETER=y" >> $config_host_mak
|
||||
fi
|
||||
fi
|
||||
if test "$fdatasync" = "yes" ; then
|
||||
echo "CONFIG_FDATASYNC=y" >> $config_host_mak
|
||||
|
@ -6006,16 +6098,10 @@ case "$target" in
|
|||
target_softmmu="yes"
|
||||
;;
|
||||
${target_name}-linux-user)
|
||||
if test "$linux" != "yes" ; then
|
||||
error_exit "Target '$target' is only available on a Linux host"
|
||||
fi
|
||||
target_user_only="yes"
|
||||
target_linux_user="yes"
|
||||
;;
|
||||
${target_name}-bsd-user)
|
||||
if test "$bsd" != "yes" ; then
|
||||
error_exit "Target '$target' is only available on a BSD host"
|
||||
fi
|
||||
target_user_only="yes"
|
||||
target_bsd_user="yes"
|
||||
;;
|
||||
|
@ -6178,46 +6264,22 @@ echo "TARGET_ABI_DIR=$TARGET_ABI_DIR" >> $config_target_mak
|
|||
if [ "$HOST_VARIANT_DIR" != "" ]; then
|
||||
echo "HOST_VARIANT_DIR=$HOST_VARIANT_DIR" >> $config_target_mak
|
||||
fi
|
||||
case "$target_name" in
|
||||
i386|x86_64)
|
||||
if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then
|
||||
echo "CONFIG_XEN=y" >> $config_target_mak
|
||||
if test "$xen_pci_passthrough" = yes; then
|
||||
|
||||
if supported_xen_target $target; then
|
||||
echo "CONFIG_XEN=y" >> $config_target_mak
|
||||
if test "$xen_pci_passthrough" = yes; then
|
||||
echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
esac
|
||||
case "$target_name" in
|
||||
aarch64|arm|i386|x86_64|ppcemb|ppc|ppc64|s390x|mipsel|mips)
|
||||
# Make sure the target and host cpus are compatible
|
||||
if test "$kvm" = "yes" -a "$target_softmmu" = "yes" -a \
|
||||
\( "$target_name" = "$cpu" -o \
|
||||
\( "$target_name" = "ppcemb" -a "$cpu" = "ppc" \) -o \
|
||||
\( "$target_name" = "ppc64" -a "$cpu" = "ppc" \) -o \
|
||||
\( "$target_name" = "ppc" -a "$cpu" = "ppc64" \) -o \
|
||||
\( "$target_name" = "ppcemb" -a "$cpu" = "ppc64" \) -o \
|
||||
\( "$target_name" = "mipsel" -a "$cpu" = "mips" \) -o \
|
||||
\( "$target_name" = "x86_64" -a "$cpu" = "i386" \) -o \
|
||||
\( "$target_name" = "i386" -a "$cpu" = "x86_64" \) -o \
|
||||
\( "$target_name" = "x86_64" -a "$cpu" = "x32" \) -o \
|
||||
\( "$target_name" = "i386" -a "$cpu" = "x32" \) \) ; then
|
||||
echo "CONFIG_KVM=y" >> $config_target_mak
|
||||
if test "$vhost_net" = "yes" ; then
|
||||
fi
|
||||
if supported_kvm_target $target; then
|
||||
echo "CONFIG_KVM=y" >> $config_target_mak
|
||||
if test "$vhost_net" = "yes" ; then
|
||||
echo "CONFIG_VHOST_NET=y" >> $config_target_mak
|
||||
echo "CONFIG_VHOST_NET_TEST_$target_name=y" >> $config_host_mak
|
||||
fi
|
||||
fi
|
||||
esac
|
||||
if test "$hax" = "yes" ; then
|
||||
if test "$target_softmmu" = "yes" ; then
|
||||
case "$target_name" in
|
||||
i386|x86_64)
|
||||
echo "CONFIG_HAX=y" >> $config_target_mak
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
if supported_hax_target $target; then
|
||||
echo "CONFIG_HAX=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_bigendian" = "yes" ; then
|
||||
echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
|
||||
|
|
23
exec.c
23
exec.c
|
@ -118,6 +118,11 @@ __thread CPUState *current_cpu;
|
|||
2 = Adaptive rate instruction counting. */
|
||||
int use_icount;
|
||||
|
||||
uintptr_t qemu_host_page_size;
|
||||
intptr_t qemu_host_page_mask;
|
||||
uintptr_t qemu_real_host_page_size;
|
||||
intptr_t qemu_real_host_page_mask;
|
||||
|
||||
bool set_preferred_target_page_bits(int bits)
|
||||
{
|
||||
/* The target page size is the lowest common denominator for all
|
||||
|
@ -2312,6 +2317,7 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
|
|||
{
|
||||
bool locked = false;
|
||||
|
||||
assert(tcg_enabled());
|
||||
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
|
||||
locked = true;
|
||||
tb_lock();
|
||||
|
@ -2370,6 +2376,7 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
|
|||
CPUWatchpoint *wp;
|
||||
uint32_t cpu_flags;
|
||||
|
||||
assert(tcg_enabled());
|
||||
if (cpu->watchpoint_hit) {
|
||||
/* We re-entered the check after replacing the TB. Now raise
|
||||
* the debug interrupt so that is will trigger after the
|
||||
|
@ -2815,6 +2822,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
|
|||
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
|
||||
}
|
||||
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
|
||||
assert(tcg_enabled());
|
||||
tb_lock();
|
||||
tb_invalidate_phys_range(addr, addr + length);
|
||||
tb_unlock();
|
||||
|
@ -3590,3 +3598,18 @@ err:
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
void page_size_init(void)
|
||||
{
|
||||
/* NOTE: we can always suppose that qemu_host_page_size >=
|
||||
TARGET_PAGE_SIZE */
|
||||
qemu_real_host_page_size = getpagesize();
|
||||
qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
|
||||
if (qemu_host_page_size == 0) {
|
||||
qemu_host_page_size = qemu_real_host_page_size;
|
||||
}
|
||||
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
|
||||
qemu_host_page_size = TARGET_PAGE_SIZE;
|
||||
}
|
||||
qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
|
||||
}
|
||||
|
|
|
@ -261,6 +261,7 @@ STEXI
|
|||
Show memory tree.
|
||||
ETEXI
|
||||
|
||||
#if defined(CONFIG_TCG)
|
||||
{
|
||||
.name = "jit",
|
||||
.args_type = "",
|
||||
|
@ -268,6 +269,7 @@ ETEXI
|
|||
.help = "show dynamic compiler info",
|
||||
.cmd = hmp_info_jit,
|
||||
},
|
||||
#endif
|
||||
|
||||
STEXI
|
||||
@item info jit
|
||||
|
@ -275,6 +277,7 @@ STEXI
|
|||
Show dynamic compiler info.
|
||||
ETEXI
|
||||
|
||||
#if defined(CONFIG_TCG)
|
||||
{
|
||||
.name = "opcount",
|
||||
.args_type = "",
|
||||
|
@ -282,6 +285,7 @@ ETEXI
|
|||
.help = "show dynamic compiler opcode counters",
|
||||
.cmd = hmp_info_opcount,
|
||||
},
|
||||
#endif
|
||||
|
||||
STEXI
|
||||
@item info opcount
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include "sysemu/sysemu.h"
|
||||
#include "hw/acpi/acpi.h"
|
||||
#include "hw/acpi/tco.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
#include "hw/i386/ich9.h"
|
||||
|
|
|
@ -410,7 +410,8 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
|
|||
handlers = &s->rom_state.mp;
|
||||
}
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
if (tcg_enabled()) {
|
||||
cpu_restore_state(cs, cs->mem_io_pc);
|
||||
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
||||
¤t_flags);
|
||||
/* Account this instruction, because we will exit the tb.
|
||||
|
@ -456,7 +457,7 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
|
|||
|
||||
resume_all_vcpus();
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
if (tcg_enabled()) {
|
||||
/* Both tb_lock and iothread_mutex will be reset when
|
||||
* longjmps back into the cpu_exec loop. */
|
||||
tb_lock();
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "hw/timer/mc146818rtc.h"
|
||||
#include "hw/xen/xen.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "kvm_i386.h"
|
||||
#include "hw/kvm/clock.h"
|
||||
#include "hw/pci-host/q35.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
|
|
@ -6,6 +6,7 @@ common-obj-$(CONFIG_ISA_DEBUG) += debugexit.o
|
|||
common-obj-$(CONFIG_SGA) += sga.o
|
||||
common-obj-$(CONFIG_ISA_TESTDEV) += pc-testdev.o
|
||||
common-obj-$(CONFIG_PCI_TESTDEV) += pci-testdev.o
|
||||
common-obj-$(CONFIG_EDU) += edu.o
|
||||
|
||||
common-obj-y += unimp.o
|
||||
|
||||
|
@ -53,7 +54,6 @@ obj-$(CONFIG_MIPS_CPS) += mips_cpc.o
|
|||
obj-$(CONFIG_MIPS_ITU) += mips_itu.o
|
||||
|
||||
obj-$(CONFIG_PVPANIC) += pvpanic.o
|
||||
obj-$(CONFIG_EDU) += edu.o
|
||||
obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
|
||||
obj-$(CONFIG_AUX) += auxbus.o
|
||||
obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o
|
||||
|
|
|
@ -43,12 +43,13 @@ static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
|
|||
|
||||
void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
const size_t zero_skip =
|
||||
offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
|
||||
|
||||
req->vq = vq;
|
||||
req->dev = s;
|
||||
qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
|
||||
qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
|
||||
qemu_iovec_init(&req->resp_iov, 1);
|
||||
memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@ void qemu_init_cpu_list(void);
|
|||
void cpu_list_lock(void);
|
||||
void cpu_list_unlock(void);
|
||||
|
||||
void tcg_flush_softmmu_tlb(CPUState *cs);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
enum device_endian {
|
||||
|
|
|
@ -25,7 +25,9 @@
|
|||
|
||||
#include "qemu/host-utils.h"
|
||||
#include "qemu/queue.h"
|
||||
#ifdef CONFIG_TCG
|
||||
#include "tcg-target.h"
|
||||
#endif
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "exec/hwaddr.h"
|
||||
#endif
|
||||
|
@ -54,7 +56,7 @@ typedef uint64_t target_ulong;
|
|||
#error TARGET_LONG_SIZE undefined
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||
/* use a fully associative victim tlb of 8 entries */
|
||||
#define CPU_VTLB_SIZE 8
|
||||
|
||||
|
|
|
@ -82,6 +82,9 @@ void cpu_reloading_memory_map(void);
|
|||
* Note that with KVM only one address space is supported.
|
||||
*/
|
||||
void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||
/* cputlb.c */
|
||||
/**
|
||||
* tlb_flush_page:
|
||||
|
@ -467,6 +470,10 @@ extern uintptr_t tci_tb_ptr;
|
|||
smaller than 4 bytes, so we don't worry about special-casing this. */
|
||||
#define GETPC_ADJ 2
|
||||
|
||||
void tb_lock(void);
|
||||
void tb_unlock(void);
|
||||
void tb_lock_reset(void);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
|
||||
|
|
|
@ -7,13 +7,16 @@
|
|||
|
||||
#pragma GCC poison TARGET_I386
|
||||
#pragma GCC poison TARGET_X86_64
|
||||
#pragma GCC poison TARGET_AARCH64
|
||||
#pragma GCC poison TARGET_ALPHA
|
||||
#pragma GCC poison TARGET_ARM
|
||||
#pragma GCC poison TARGET_CRIS
|
||||
#pragma GCC poison TARGET_HPPA
|
||||
#pragma GCC poison TARGET_LM32
|
||||
#pragma GCC poison TARGET_M68K
|
||||
#pragma GCC poison TARGET_MICROBLAZE
|
||||
#pragma GCC poison TARGET_MIPS
|
||||
#pragma GCC poison TARGET_ABI_MIPSN32
|
||||
#pragma GCC poison TARGET_ABI_MIPSO32
|
||||
#pragma GCC poison TARGET_MIPS64
|
||||
#pragma GCC poison TARGET_ABI_MIPSN64
|
||||
|
@ -28,10 +31,12 @@
|
|||
#pragma GCC poison TARGET_SH4
|
||||
#pragma GCC poison TARGET_SPARC
|
||||
#pragma GCC poison TARGET_SPARC64
|
||||
#pragma GCC poison TARGET_TILEGX
|
||||
#pragma GCC poison TARGET_TRICORE
|
||||
#pragma GCC poison TARGET_UNICORE32
|
||||
#pragma GCC poison TARGET_XTENSA
|
||||
|
||||
#pragma GCC poison TARGET_HAS_BFLT
|
||||
#pragma GCC poison TARGET_NAME
|
||||
#pragma GCC poison TARGET_SUPPORTS_MTTCG
|
||||
#pragma GCC poison TARGET_WORDS_BIGENDIAN
|
||||
|
@ -65,6 +70,7 @@
|
|||
#pragma GCC poison CONFIG_ARM_A64_DIS
|
||||
#pragma GCC poison CONFIG_ARM_DIS
|
||||
#pragma GCC poison CONFIG_CRIS_DIS
|
||||
#pragma GCC poison CONFIG_HPPA_DIS
|
||||
#pragma GCC poison CONFIG_I386_DIS
|
||||
#pragma GCC poison CONFIG_LM32_DIS
|
||||
#pragma GCC poison CONFIG_M68K_DIS
|
||||
|
@ -80,6 +86,8 @@
|
|||
|
||||
#pragma GCC poison CONFIG_LINUX_USER
|
||||
#pragma GCC poison CONFIG_VHOST_NET
|
||||
#pragma GCC poison CONFIG_KVM
|
||||
#pragma GCC poison CONFIG_SOFTMMU
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -20,19 +20,6 @@
|
|||
|
||||
#define HPET_INTCAP "hpet-intcap"
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
#define kvm_pit_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#define kvm_pic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#define kvm_ioapic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#else
|
||||
#define kvm_pit_in_kernel() 0
|
||||
#define kvm_pic_in_kernel() 0
|
||||
#define kvm_ioapic_in_kernel() 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* PCMachineState:
|
||||
* @acpi_dev: link to ACPI PM device that performs ACPI hotplug handling
|
||||
|
|
|
@ -76,8 +76,13 @@ int qemu_openpty_raw(int *aslave, char *pty_name);
|
|||
sendto(sockfd, buf, len, flags, destaddr, addrlen)
|
||||
#endif
|
||||
|
||||
extern bool tcg_allowed;
|
||||
void tcg_exec_init(unsigned long tb_size);
|
||||
bool tcg_enabled(void);
|
||||
#ifdef CONFIG_TCG
|
||||
#define tcg_enabled() (tcg_allowed)
|
||||
#else
|
||||
#define tcg_enabled() 0
|
||||
#endif
|
||||
|
||||
void cpu_exec_init_all(void);
|
||||
void cpu_exec_step_atomic(CPUState *cpu);
|
||||
|
|
|
@ -79,7 +79,7 @@ int qemu_init_main_loop(Error **errp);
|
|||
*
|
||||
* @nonblocking: Whether the caller should block until an event occurs.
|
||||
*/
|
||||
int main_loop_wait(int nonblocking);
|
||||
void main_loop_wait(int nonblocking);
|
||||
|
||||
/**
|
||||
* qemu_get_aio_context: Return the main loop's AioContext
|
||||
|
|
|
@ -12,10 +12,12 @@ typedef QemuMutex QemuRecMutex;
|
|||
|
||||
struct QemuMutex {
|
||||
pthread_mutex_t lock;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct QemuCond {
|
||||
pthread_cond_t cond;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct QemuSemaphore {
|
||||
|
@ -26,6 +28,7 @@ struct QemuSemaphore {
|
|||
#else
|
||||
sem_t sem;
|
||||
#endif
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct QemuEvent {
|
||||
|
@ -34,6 +37,7 @@ struct QemuEvent {
|
|||
pthread_cond_t cond;
|
||||
#endif
|
||||
unsigned value;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct QemuThread {
|
||||
|
|
|
@ -5,11 +5,13 @@
|
|||
|
||||
struct QemuMutex {
|
||||
SRWLOCK lock;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
typedef struct QemuRecMutex QemuRecMutex;
|
||||
struct QemuRecMutex {
|
||||
CRITICAL_SECTION lock;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
|
||||
|
@ -19,15 +21,18 @@ void qemu_rec_mutex_unlock(QemuRecMutex *mutex);
|
|||
|
||||
struct QemuCond {
|
||||
CONDITION_VARIABLE var;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct QemuSemaphore {
|
||||
HANDLE sema;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct QemuEvent {
|
||||
int value;
|
||||
HANDLE event;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
typedef struct QemuThreadData QemuThreadData;
|
||||
|
|
|
@ -369,7 +369,6 @@ struct CPUState {
|
|||
vaddr mem_io_vaddr;
|
||||
|
||||
int kvm_fd;
|
||||
bool kvm_vcpu_dirty;
|
||||
struct KVMState *kvm_state;
|
||||
struct kvm_run *kvm_run;
|
||||
|
||||
|
@ -386,6 +385,9 @@ struct CPUState {
|
|||
uint32_t can_do_io;
|
||||
int32_t exception_index; /* used by m68k TCG */
|
||||
|
||||
/* shared by kvm, hax and hvf */
|
||||
bool vcpu_dirty;
|
||||
|
||||
/* Used to keep track of an outstanding cpu throttle thread for migration
|
||||
* autoconverge
|
||||
*/
|
||||
|
@ -400,7 +402,6 @@ struct CPUState {
|
|||
icount_decr_u16 u16;
|
||||
} icount_decr;
|
||||
|
||||
bool hax_vcpu_dirty;
|
||||
struct hax_vcpu_state *hax_vcpu;
|
||||
|
||||
/* The pending_tlb_flush flag is set and cleared atomically to
|
||||
|
@ -816,6 +817,8 @@ void cpu_interrupt(CPUState *cpu, int mask);
|
|||
|
||||
#endif /* USER_ONLY */
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
|
||||
bool is_write, bool is_exec,
|
||||
|
@ -838,6 +841,8 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif /* NEED_CPU_H */
|
||||
|
||||
/**
|
||||
* cpu_set_pc:
|
||||
* @cpu: The CPU to set the program counter for.
|
||||
|
@ -1014,6 +1019,8 @@ void cpu_exec_initfn(CPUState *cpu);
|
|||
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void cpu_exec_unrealizefn(CPUState *cpu);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
extern const struct VMStateDescription vmstate_cpu_common;
|
||||
#else
|
||||
|
@ -1028,6 +1035,8 @@ extern const struct VMStateDescription vmstate_cpu_common;
|
|||
.offset = 0, \
|
||||
}
|
||||
|
||||
#endif /* NEED_CPU_H */
|
||||
|
||||
#define UNASSIGNED_CPU_INDEX -1
|
||||
|
||||
#endif
|
||||
|
|
|
@ -63,7 +63,7 @@ typedef struct AccelClass {
|
|||
#define ACCEL_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(AccelClass, (obj), TYPE_ACCEL)
|
||||
|
||||
extern int tcg_tb_size;
|
||||
extern unsigned long tcg_tb_size;
|
||||
|
||||
void configure_accelerator(MachineState *ms);
|
||||
/* Register accelerator specific global properties */
|
||||
|
|
|
@ -19,26 +19,18 @@
|
|||
#include "exec/memattrs.h"
|
||||
#include "hw/irq.h"
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#ifdef NEED_CPU_H
|
||||
# ifdef CONFIG_KVM
|
||||
# include <linux/kvm.h>
|
||||
# include <linux/kvm_para.h>
|
||||
# define CONFIG_KVM_IS_POSSIBLE
|
||||
# endif
|
||||
#else
|
||||
/* These constants must never be used at runtime if kvm_enabled() is false.
|
||||
* They exist so we don't need #ifdefs around KVM-specific code that already
|
||||
* checks kvm_enabled() properly.
|
||||
*/
|
||||
#define KVM_CPUID_SIGNATURE 0
|
||||
#define KVM_CPUID_FEATURES 0
|
||||
#define KVM_FEATURE_CLOCKSOURCE 0
|
||||
#define KVM_FEATURE_NOP_IO_DELAY 0
|
||||
#define KVM_FEATURE_MMU_OP 0
|
||||
#define KVM_FEATURE_CLOCKSOURCE2 0
|
||||
#define KVM_FEATURE_ASYNC_PF 0
|
||||
#define KVM_FEATURE_STEAL_TIME 0
|
||||
#define KVM_FEATURE_PV_EOI 0
|
||||
#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 0
|
||||
# define CONFIG_KVM_IS_POSSIBLE
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_IS_POSSIBLE
|
||||
|
||||
extern bool kvm_allowed;
|
||||
extern bool kvm_kernel_irqchip;
|
||||
extern bool kvm_split_irqchip;
|
||||
|
@ -55,7 +47,6 @@ extern bool kvm_direct_msi_allowed;
|
|||
extern bool kvm_ioeventfd_any_length_allowed;
|
||||
extern bool kvm_msi_use_devid;
|
||||
|
||||
#if defined CONFIG_KVM || !defined NEED_CPU_H
|
||||
#define kvm_enabled() (kvm_allowed)
|
||||
/**
|
||||
* kvm_irqchip_in_kernel:
|
||||
|
@ -178,6 +169,7 @@ extern bool kvm_msi_use_devid;
|
|||
#define kvm_msi_devid_required() (kvm_msi_use_devid)
|
||||
|
||||
#else
|
||||
|
||||
#define kvm_enabled() (0)
|
||||
#define kvm_irqchip_in_kernel() (false)
|
||||
#define kvm_irqchip_is_split() (false)
|
||||
|
@ -193,7 +185,8 @@ extern bool kvm_msi_use_devid;
|
|||
#define kvm_direct_msi_enabled() (false)
|
||||
#define kvm_ioeventfd_any_length_enabled() (false)
|
||||
#define kvm_msi_devid_required() (false)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_KVM_IS_POSSIBLE */
|
||||
|
||||
struct kvm_run;
|
||||
struct kvm_lapic_state;
|
||||
|
|
|
@ -1098,6 +1098,7 @@ static void hmp_info_registers(Monitor *mon, const QDict *qdict)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
static void hmp_info_jit(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
if (!tcg_enabled()) {
|
||||
|
@ -1113,6 +1114,7 @@ static void hmp_info_opcount(Monitor *mon, const QDict *qdict)
|
|||
{
|
||||
dump_opcount_info((FILE *)mon, monitor_fprintf);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void hmp_info_history(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
|
|
|
@ -386,11 +386,9 @@ CPU registers by prefixing them with @emph{$}.
|
|||
@node disk_images
|
||||
@section Disk Images
|
||||
|
||||
Since version 0.6.1, QEMU supports many disk image formats, including
|
||||
growable disk images (their size increase as non empty sectors are
|
||||
written), compressed and encrypted disk images. Version 0.8.3 added
|
||||
the new qcow2 disk image format which is essential to support VM
|
||||
snapshots.
|
||||
QEMU supports many disk image formats, including growable disk images
|
||||
(their size increase as non empty sectors are written), compressed and
|
||||
encrypted disk images.
|
||||
|
||||
@menu
|
||||
* disk_images_quickstart:: Quick start for disk image creation
|
||||
|
@ -2566,6 +2564,8 @@ so should only be used with trusted guest OS.
|
|||
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@node ColdFire System emulator
|
||||
@section ColdFire System emulator
|
||||
@cindex system emulation (ColdFire)
|
||||
|
@ -2610,6 +2610,8 @@ so should only be used with trusted guest OS.
|
|||
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@node Cris System emulator
|
||||
@section Cris System emulator
|
||||
@cindex system emulation (Cris)
|
||||
|
@ -2682,6 +2684,8 @@ so should only be used with trusted guest OS.
|
|||
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@node QEMU Guest Agent
|
||||
@chapter QEMU Guest Agent invocation
|
||||
|
||||
|
|
18
qom/cpu.c
18
qom/cpu.c
|
@ -26,11 +26,14 @@
|
|||
#include "qemu/notify.h"
|
||||
#include "qemu/log.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "trace-root.h"
|
||||
|
||||
CPUInterruptHandler cpu_interrupt_handler;
|
||||
|
||||
bool cpu_exists(int64_t id)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
@ -293,9 +296,7 @@ static void cpu_common_reset(CPUState *cpu)
|
|||
if (tcg_enabled()) {
|
||||
cpu_tb_jmp_cache_clear(cpu);
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
tlb_flush(cpu, 0);
|
||||
#endif
|
||||
tcg_flush_softmmu_tlb(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,6 +419,17 @@ static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
|
|||
return addr;
|
||||
}
|
||||
|
||||
static void generic_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
|
||||
|
||||
static void cpu_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
|
|
@ -2473,6 +2473,10 @@ sub process {
|
|||
if ($line =~ /\b(strto[^kd].*?)\s*\(/) {
|
||||
ERROR("consider using qemu_$1 in preference to $1\n" . $herecurr);
|
||||
}
|
||||
# recommend sigaction over signal for portability, when establishing a handler
|
||||
if ($line =~ /\bsignal\s*\(/ && !($line =~ /SIG_(?:IGN|DFL)/)) {
|
||||
ERROR("use sigaction to establish signal handlers; signal is not portable\n" . $herecurr);
|
||||
}
|
||||
# check for module_init(), use category-specific init macros explicitly please
|
||||
if ($line =~ /^module_init\s*\(/) {
|
||||
ERROR("please use block_init(), type_init() etc. instead of module_init()\n" . $herecurr);
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
obj-y += translate.o helper.o cpu.o bpt_helper.o
|
||||
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
|
||||
obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o mpx_helper.o
|
||||
obj-y += gdbstub.o
|
||||
obj-y += helper.o cpu.o gdbstub.o xsave_helper.o
|
||||
obj-$(CONFIG_TCG) += translate.o
|
||||
obj-$(CONFIG_TCG) += bpt_helper.o cc_helper.o excp_helper.o fpu_helper.o
|
||||
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o mpx_helper.o
|
||||
obj-$(CONFIG_TCG) += seg_helper.o smm_helper.o svm_helper.o
|
||||
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
|
||||
obj-$(CONFIG_KVM) += kvm.o hyperv.o
|
||||
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
|
||||
|
|
|
@ -4040,8 +4040,10 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
|||
cc->class_by_name = x86_cpu_class_by_name;
|
||||
cc->parse_features = x86_cpu_parse_featurestr;
|
||||
cc->has_work = x86_cpu_has_work;
|
||||
#ifdef CONFIG_TCG
|
||||
cc->do_interrupt = x86_cpu_do_interrupt;
|
||||
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
|
||||
#endif
|
||||
cc->dump_state = x86_cpu_dump_state;
|
||||
cc->get_crash_info = x86_cpu_get_crash_info;
|
||||
cc->set_pc = x86_cpu_set_pc;
|
||||
|
@ -4070,7 +4072,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
|||
cc->gdb_core_xml_file = "i386-32bit.xml";
|
||||
cc->gdb_num_core_regs = 41;
|
||||
#endif
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
||||
cc->debug_excp_handler = breakpoint_handler;
|
||||
#endif
|
||||
cc->cpu_exec_enter = x86_cpu_exec_enter;
|
||||
|
|
|
@ -52,7 +52,9 @@
|
|||
|
||||
#include "exec/cpu-defs.h"
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
#include "fpu/softfloat.h"
|
||||
#endif
|
||||
|
||||
#define R_EAX 0
|
||||
#define R_ECX 1
|
||||
|
@ -1418,8 +1420,6 @@ int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
|
|||
|
||||
/* op_helper.c */
|
||||
/* used for debug or cpu save/restore */
|
||||
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
|
||||
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
|
||||
|
||||
/* cpu-exec.c */
|
||||
/* the following helpers are only usable in user mode simulation as
|
||||
|
@ -1596,11 +1596,14 @@ void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
|
|||
/* cc_helper.c */
|
||||
extern const uint8_t parity_table[256];
|
||||
uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
|
||||
void update_fp_status(CPUX86State *env);
|
||||
|
||||
static inline uint32_t cpu_compute_eflags(CPUX86State *env)
|
||||
{
|
||||
return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
|
||||
uint32_t eflags = env->eflags;
|
||||
if (tcg_enabled()) {
|
||||
eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
|
||||
}
|
||||
return eflags;
|
||||
}
|
||||
|
||||
/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
|
||||
|
@ -1645,8 +1648,24 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
|
|||
}
|
||||
|
||||
/* fpu_helper.c */
|
||||
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
|
||||
void cpu_set_fpuc(CPUX86State *env, uint16_t val);
|
||||
void update_fp_status(CPUX86State *env);
|
||||
void update_mxcsr_status(CPUX86State *env);
|
||||
|
||||
static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
||||
{
|
||||
env->mxcsr = mxcsr;
|
||||
if (tcg_enabled()) {
|
||||
update_mxcsr_status(env);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
|
||||
{
|
||||
env->fpuc = fpuc;
|
||||
if (tcg_enabled()) {
|
||||
update_fp_status(env);
|
||||
}
|
||||
}
|
||||
|
||||
/* mem_helper.c */
|
||||
void helper_lock_init(void);
|
||||
|
@ -1697,4 +1716,6 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
|
|||
/* cpu.c */
|
||||
bool cpu_is_bsp(X86CPU *cpu);
|
||||
|
||||
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
|
||||
void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
|
||||
#endif /* I386_CPU_H */
|
||||
|
|
|
@ -136,3 +136,346 @@ void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr
|
|||
{
|
||||
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
/* user mode only emulation */
|
||||
is_write &= 1;
|
||||
env->cr[2] = addr;
|
||||
env->error_code = (is_write << PG_ERROR_W_BIT);
|
||||
env->error_code |= PG_ERROR_U_MASK;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* return value:
|
||||
* -1 = cannot handle fault
|
||||
* 0 = nothing more to do
|
||||
* 1 = generate PF fault
|
||||
*/
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write1, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
uint64_t ptep, pte;
|
||||
int32_t a20_mask;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
int error_code = 0;
|
||||
int is_dirty, prot, page_size, is_write, is_user;
|
||||
hwaddr paddr;
|
||||
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
|
||||
uint32_t page_offset;
|
||||
target_ulong vaddr;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
|
||||
addr, is_write1, is_user, env->eip);
|
||||
#endif
|
||||
is_write = is_write1 & 1;
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
pte = addr;
|
||||
#ifdef TARGET_X86_64
|
||||
if (!(env->hflags & HF_LMA_MASK)) {
|
||||
/* Without long mode we can only address 32bits in real mode */
|
||||
pte = (uint32_t)pte;
|
||||
}
|
||||
#endif
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
page_size = 4096;
|
||||
goto do_mapping;
|
||||
}
|
||||
|
||||
if (!(env->efer & MSR_EFER_NXE)) {
|
||||
rsvd_mask |= PG_NX_MASK;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
uint64_t pde, pdpe;
|
||||
target_ulong pdpe_addr;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
bool la57 = env->cr[4] & CR4_LA57_MASK;
|
||||
uint64_t pml5e_addr, pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
int32_t sext;
|
||||
|
||||
/* test virtual address sign extension */
|
||||
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
|
||||
if (sext != 0 && sext != -1) {
|
||||
env->error_code = 0;
|
||||
cs->exception_index = EXCP0D_GPF;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (la57) {
|
||||
pml5e_addr = ((env->cr[3] & ~0xfff) +
|
||||
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
|
||||
pml5e = x86_ldq_phys(cs, pml5e_addr);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml5e & PG_ACCESSED_MASK)) {
|
||||
pml5e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
|
||||
}
|
||||
ptep = pml5e ^ PG_NX_MASK;
|
||||
} else {
|
||||
pml5e = env->cr[3];
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml4e & PG_ACCESSED_MASK)) {
|
||||
pml4e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
|
||||
}
|
||||
ptep &= pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pdpe & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pdpe ^ PG_NX_MASK;
|
||||
if (!(pdpe & PG_ACCESSED_MASK)) {
|
||||
pdpe |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte_addr = pdpe_addr;
|
||||
pte = pdpe;
|
||||
goto do_check_protect;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* XXX: load them when cr3 is loaded ? */
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
rsvd_mask |= PG_HI_USER_MASK;
|
||||
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pde & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pde ^ PG_NX_MASK;
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
pte = pde;
|
||||
goto do_check_protect;
|
||||
}
|
||||
/* 4 KB page */
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
/* combine pde and pte nx, user and rw protections */
|
||||
ptep &= pte ^ PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
|
||||
a20_mask;
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
|
||||
* Leave bits 20-13 in place for setting accessed/dirty bits below.
|
||||
*/
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
rsvd_mask = 0x200000;
|
||||
goto do_check_protect_pse36;
|
||||
}
|
||||
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
|
||||
/* page directory entry */
|
||||
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
|
||||
a20_mask;
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
/* combine pde and pte user and rw protections */
|
||||
ptep &= pte | PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
rsvd_mask = 0;
|
||||
}
|
||||
|
||||
do_check_protect:
|
||||
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
|
||||
do_check_protect_pse36:
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep ^= PG_NX_MASK;
|
||||
|
||||
/* can the page can be put in the TLB? prot will tell us */
|
||||
if (is_user && !(ptep & PG_USER_MASK)) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
prot = 0;
|
||||
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
|
||||
prot |= PAGE_READ;
|
||||
if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
}
|
||||
if (!(ptep & PG_NX_MASK) &&
|
||||
(mmu_idx == MMU_USER_IDX ||
|
||||
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
|
||||
prot |= PAGE_EXEC;
|
||||
}
|
||||
if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
|
||||
(ptep & PG_USER_MASK) && env->pkru) {
|
||||
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
|
||||
uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
|
||||
uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
|
||||
uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
||||
if (pkru_ad) {
|
||||
pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
|
||||
} else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
|
||||
pkru_prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
prot &= pkru_prot;
|
||||
if ((pkru_prot & (1 << is_write1)) == 0) {
|
||||
assert(is_write1 != 2);
|
||||
error_code |= PG_ERROR_PK_MASK;
|
||||
goto do_fault_protect;
|
||||
}
|
||||
}
|
||||
|
||||
if ((prot & (1 << is_write1)) == 0) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
/* yes, it can! */
|
||||
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
|
||||
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
||||
pte |= PG_ACCESSED_MASK;
|
||||
if (is_dirty) {
|
||||
pte |= PG_DIRTY_MASK;
|
||||
}
|
||||
x86_stl_phys_notdirty(cs, pte_addr, pte);
|
||||
}
|
||||
|
||||
if (!(pte & PG_DIRTY_MASK)) {
|
||||
/* only set write access if already dirty... otherwise wait
|
||||
for dirty access */
|
||||
assert(!is_write);
|
||||
prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
do_mapping:
|
||||
pte = pte & a20_mask;
|
||||
|
||||
/* align to page_size */
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
|
||||
/* Even if 4MB pages, we map only one 4KB page in the cache to
|
||||
avoid filling it too fast */
|
||||
vaddr = addr & TARGET_PAGE_MASK;
|
||||
page_offset = vaddr & (page_size - 1);
|
||||
paddr = pte + page_offset;
|
||||
|
||||
assert(prot & (1 << is_write1));
|
||||
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
|
||||
prot, mmu_idx, page_size);
|
||||
return 0;
|
||||
do_fault_rsvd:
|
||||
error_code |= PG_ERROR_RSVD_MASK;
|
||||
do_fault_protect:
|
||||
error_code |= PG_ERROR_P_MASK;
|
||||
do_fault:
|
||||
error_code |= (is_write << PG_ERROR_W_BIT);
|
||||
if (is_user)
|
||||
error_code |= PG_ERROR_U_MASK;
|
||||
if (is_write1 == 2 &&
|
||||
(((env->efer & MSR_EFER_NXE) &&
|
||||
(env->cr[4] & CR4_PAE_MASK)) ||
|
||||
(env->cr[4] & CR4_SMEP_MASK)))
|
||||
error_code |= PG_ERROR_I_D_MASK;
|
||||
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
|
||||
/* cr2 is not modified in case of exceptions */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
addr);
|
||||
} else {
|
||||
env->cr[2] = addr;
|
||||
}
|
||||
env->error_code = error_code;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1539,24 +1539,6 @@ void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
|
|||
raise_exception_ra(env, EXCP0D_GPF, GETPC());
|
||||
}
|
||||
|
||||
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.d = f;
|
||||
*pmant = temp.l.lower;
|
||||
*pexp = temp.l.upper;
|
||||
}
|
||||
|
||||
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.l.upper = upper;
|
||||
temp.l.lower = mant;
|
||||
return temp.d;
|
||||
}
|
||||
|
||||
/* MMX/SSE */
|
||||
/* XXX: optimize by storing fptt and fptags in the static cpu state */
|
||||
|
||||
|
@ -1568,12 +1550,11 @@ floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
|||
#define SSE_RC_CHOP 0x6000
|
||||
#define SSE_FZ 0x8000
|
||||
|
||||
void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
||||
void update_mxcsr_status(CPUX86State *env)
|
||||
{
|
||||
uint32_t mxcsr = env->mxcsr;
|
||||
int rnd_type;
|
||||
|
||||
env->mxcsr = mxcsr;
|
||||
|
||||
/* set rounding mode */
|
||||
switch (mxcsr & SSE_RC_MASK) {
|
||||
default:
|
||||
|
@ -1599,12 +1580,6 @@ void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
|
|||
set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
|
||||
}
|
||||
|
||||
void cpu_set_fpuc(CPUX86State *env, uint16_t val)
|
||||
{
|
||||
env->fpuc = val;
|
||||
update_fp_status(env);
|
||||
}
|
||||
|
||||
void helper_ldmxcsr(CPUX86State *env, uint32_t val)
|
||||
{
|
||||
cpu_set_mxcsr(env, val);
|
||||
|
|
|
@ -232,7 +232,7 @@ int hax_init_vcpu(CPUState *cpu)
|
|||
}
|
||||
|
||||
cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
|
||||
cpu->hax_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
|
||||
|
||||
return ret;
|
||||
|
@ -599,12 +599,12 @@ static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
|||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
hax_arch_get_registers(env);
|
||||
cpu->hax_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->hax_vcpu_dirty) {
|
||||
if (!cpu->vcpu_dirty) {
|
||||
run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
@ -615,7 +615,7 @@ static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
|
|||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
hax_vcpu_sync_state(env, 1);
|
||||
cpu->hax_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
|
@ -628,7 +628,7 @@ static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
|
|||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
hax_vcpu_sync_state(env, 1);
|
||||
cpu->hax_vcpu_dirty = false;
|
||||
cpu->vcpu_dirty = false;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_post_init(CPUState *cpu)
|
||||
|
@ -638,7 +638,7 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
|
|||
|
||||
static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
cpu->hax_vcpu_dirty = true;
|
||||
cpu->vcpu_dirty = true;
|
||||
}
|
||||
|
||||
void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
|
|
|
@ -29,6 +29,36 @@
|
|||
#include "hw/i386/apic_internal.h"
|
||||
#endif
|
||||
|
||||
void cpu_sync_bndcs_hflags(CPUX86State *env)
|
||||
{
|
||||
uint32_t hflags = env->hflags;
|
||||
uint32_t hflags2 = env->hflags2;
|
||||
uint32_t bndcsr;
|
||||
|
||||
if ((hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
if ((env->cr[4] & CR4_OSXSAVE_MASK)
|
||||
&& (env->xcr0 & XSTATE_BNDCSR_MASK)
|
||||
&& (bndcsr & BNDCFG_ENABLE)) {
|
||||
hflags |= HF_MPX_EN_MASK;
|
||||
} else {
|
||||
hflags &= ~HF_MPX_EN_MASK;
|
||||
}
|
||||
|
||||
if (bndcsr & BNDCFG_BNDPRESERVE) {
|
||||
hflags2 |= HF2_MPX_PR_MASK;
|
||||
} else {
|
||||
hflags2 &= ~HF2_MPX_PR_MASK;
|
||||
}
|
||||
|
||||
env->hflags = hflags;
|
||||
env->hflags2 = hflags2;
|
||||
}
|
||||
|
||||
static void cpu_x86_version(CPUX86State *env, int *family, int *model)
|
||||
{
|
||||
int cpuver = env->cpuid_version;
|
||||
|
@ -692,349 +722,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
|
|||
cpu_sync_bndcs_hflags(env);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
/* user mode only emulation */
|
||||
is_write &= 1;
|
||||
env->cr[2] = addr;
|
||||
env->error_code = (is_write << PG_ERROR_W_BIT);
|
||||
env->error_code |= PG_ERROR_U_MASK;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
env->exception_is_int = 0;
|
||||
env->exception_next_eip = -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* return value:
|
||||
* -1 = cannot handle fault
|
||||
* 0 = nothing more to do
|
||||
* 1 = generate PF fault
|
||||
*/
|
||||
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
|
||||
int is_write1, int mmu_idx)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
uint64_t ptep, pte;
|
||||
int32_t a20_mask;
|
||||
target_ulong pde_addr, pte_addr;
|
||||
int error_code = 0;
|
||||
int is_dirty, prot, page_size, is_write, is_user;
|
||||
hwaddr paddr;
|
||||
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
|
||||
uint32_t page_offset;
|
||||
target_ulong vaddr;
|
||||
|
||||
is_user = mmu_idx == MMU_USER_IDX;
|
||||
#if defined(DEBUG_MMU)
|
||||
printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
|
||||
addr, is_write1, is_user, env->eip);
|
||||
#endif
|
||||
is_write = is_write1 & 1;
|
||||
|
||||
a20_mask = x86_get_a20_mask(env);
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
pte = addr;
|
||||
#ifdef TARGET_X86_64
|
||||
if (!(env->hflags & HF_LMA_MASK)) {
|
||||
/* Without long mode we can only address 32bits in real mode */
|
||||
pte = (uint32_t)pte;
|
||||
}
|
||||
#endif
|
||||
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
page_size = 4096;
|
||||
goto do_mapping;
|
||||
}
|
||||
|
||||
if (!(env->efer & MSR_EFER_NXE)) {
|
||||
rsvd_mask |= PG_NX_MASK;
|
||||
}
|
||||
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
uint64_t pde, pdpe;
|
||||
target_ulong pdpe_addr;
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
bool la57 = env->cr[4] & CR4_LA57_MASK;
|
||||
uint64_t pml5e_addr, pml5e;
|
||||
uint64_t pml4e_addr, pml4e;
|
||||
int32_t sext;
|
||||
|
||||
/* test virtual address sign extension */
|
||||
sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
|
||||
if (sext != 0 && sext != -1) {
|
||||
env->error_code = 0;
|
||||
cs->exception_index = EXCP0D_GPF;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (la57) {
|
||||
pml5e_addr = ((env->cr[3] & ~0xfff) +
|
||||
(((addr >> 48) & 0x1ff) << 3)) & a20_mask;
|
||||
pml5e = x86_ldq_phys(cs, pml5e_addr);
|
||||
if (!(pml5e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml5e & PG_ACCESSED_MASK)) {
|
||||
pml5e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
|
||||
}
|
||||
ptep = pml5e ^ PG_NX_MASK;
|
||||
} else {
|
||||
pml5e = env->cr[3];
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
|
||||
(((addr >> 39) & 0x1ff) << 3)) & a20_mask;
|
||||
pml4e = x86_ldq_phys(cs, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
if (!(pml4e & PG_ACCESSED_MASK)) {
|
||||
pml4e |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
|
||||
}
|
||||
ptep &= pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pdpe & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pdpe ^ PG_NX_MASK;
|
||||
if (!(pdpe & PG_ACCESSED_MASK)) {
|
||||
pdpe |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
|
||||
}
|
||||
if (pdpe & PG_PSE_MASK) {
|
||||
/* 1 GB page */
|
||||
page_size = 1024 * 1024 * 1024;
|
||||
pte_addr = pdpe_addr;
|
||||
pte = pdpe;
|
||||
goto do_check_protect;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/* XXX: load them when cr3 is loaded ? */
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
a20_mask;
|
||||
pdpe = x86_ldq_phys(cs, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
rsvd_mask |= PG_HI_USER_MASK;
|
||||
if (pdpe & (rsvd_mask | PG_NX_MASK)) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pde = x86_ldq_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pde & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep &= pde ^ PG_NX_MASK;
|
||||
if (pde & PG_PSE_MASK) {
|
||||
/* 2 MB page */
|
||||
page_size = 2048 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
pte = pde;
|
||||
goto do_check_protect;
|
||||
}
|
||||
/* 4 KB page */
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
|
||||
a20_mask;
|
||||
pte = x86_ldq_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
/* combine pde and pte nx, user and rw protections */
|
||||
ptep &= pte ^ PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
} else {
|
||||
uint32_t pde;
|
||||
|
||||
/* page directory entry */
|
||||
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
|
||||
a20_mask;
|
||||
pde = x86_ldl_phys(cs, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
ptep = pde | PG_NX_MASK;
|
||||
|
||||
/* if PSE bit is set, then we use a 4MB page */
|
||||
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
|
||||
page_size = 4096 * 1024;
|
||||
pte_addr = pde_addr;
|
||||
|
||||
/* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
|
||||
* Leave bits 20-13 in place for setting accessed/dirty bits below.
|
||||
*/
|
||||
pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
|
||||
rsvd_mask = 0x200000;
|
||||
goto do_check_protect_pse36;
|
||||
}
|
||||
|
||||
if (!(pde & PG_ACCESSED_MASK)) {
|
||||
pde |= PG_ACCESSED_MASK;
|
||||
x86_stl_phys_notdirty(cs, pde_addr, pde);
|
||||
}
|
||||
|
||||
/* page directory entry */
|
||||
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
|
||||
a20_mask;
|
||||
pte = x86_ldl_phys(cs, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
goto do_fault;
|
||||
}
|
||||
/* combine pde and pte user and rw protections */
|
||||
ptep &= pte | PG_NX_MASK;
|
||||
page_size = 4096;
|
||||
rsvd_mask = 0;
|
||||
}
|
||||
|
||||
do_check_protect:
|
||||
rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
|
||||
do_check_protect_pse36:
|
||||
if (pte & rsvd_mask) {
|
||||
goto do_fault_rsvd;
|
||||
}
|
||||
ptep ^= PG_NX_MASK;
|
||||
|
||||
/* can the page can be put in the TLB? prot will tell us */
|
||||
if (is_user && !(ptep & PG_USER_MASK)) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
prot = 0;
|
||||
if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
|
||||
prot |= PAGE_READ;
|
||||
if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
|
||||
prot |= PAGE_WRITE;
|
||||
}
|
||||
}
|
||||
if (!(ptep & PG_NX_MASK) &&
|
||||
(mmu_idx == MMU_USER_IDX ||
|
||||
!((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
|
||||
prot |= PAGE_EXEC;
|
||||
}
|
||||
if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
|
||||
(ptep & PG_USER_MASK) && env->pkru) {
|
||||
uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
|
||||
uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
|
||||
uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
|
||||
uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
||||
|
||||
if (pkru_ad) {
|
||||
pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
|
||||
} else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
|
||||
pkru_prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
prot &= pkru_prot;
|
||||
if ((pkru_prot & (1 << is_write1)) == 0) {
|
||||
assert(is_write1 != 2);
|
||||
error_code |= PG_ERROR_PK_MASK;
|
||||
goto do_fault_protect;
|
||||
}
|
||||
}
|
||||
|
||||
if ((prot & (1 << is_write1)) == 0) {
|
||||
goto do_fault_protect;
|
||||
}
|
||||
|
||||
/* yes, it can! */
|
||||
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
|
||||
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
|
||||
pte |= PG_ACCESSED_MASK;
|
||||
if (is_dirty) {
|
||||
pte |= PG_DIRTY_MASK;
|
||||
}
|
||||
x86_stl_phys_notdirty(cs, pte_addr, pte);
|
||||
}
|
||||
|
||||
if (!(pte & PG_DIRTY_MASK)) {
|
||||
/* only set write access if already dirty... otherwise wait
|
||||
for dirty access */
|
||||
assert(!is_write);
|
||||
prot &= ~PAGE_WRITE;
|
||||
}
|
||||
|
||||
do_mapping:
|
||||
pte = pte & a20_mask;
|
||||
|
||||
/* align to page_size */
|
||||
pte &= PG_ADDRESS_MASK & ~(page_size - 1);
|
||||
|
||||
/* Even if 4MB pages, we map only one 4KB page in the cache to
|
||||
avoid filling it too fast */
|
||||
vaddr = addr & TARGET_PAGE_MASK;
|
||||
page_offset = vaddr & (page_size - 1);
|
||||
paddr = pte + page_offset;
|
||||
|
||||
assert(prot & (1 << is_write1));
|
||||
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
|
||||
prot, mmu_idx, page_size);
|
||||
return 0;
|
||||
do_fault_rsvd:
|
||||
error_code |= PG_ERROR_RSVD_MASK;
|
||||
do_fault_protect:
|
||||
error_code |= PG_ERROR_P_MASK;
|
||||
do_fault:
|
||||
error_code |= (is_write << PG_ERROR_W_BIT);
|
||||
if (is_user)
|
||||
error_code |= PG_ERROR_U_MASK;
|
||||
if (is_write1 == 2 &&
|
||||
(((env->efer & MSR_EFER_NXE) &&
|
||||
(env->cr[4] & CR4_PAE_MASK)) ||
|
||||
(env->cr[4] & CR4_SMEP_MASK)))
|
||||
error_code |= PG_ERROR_I_D_MASK;
|
||||
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
|
||||
/* cr2 is not modified in case of exceptions */
|
||||
x86_stq_phys(cs,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
|
||||
addr);
|
||||
} else {
|
||||
env->cr[2] = addr;
|
||||
}
|
||||
env->error_code = error_code;
|
||||
cs->exception_index = EXCP0E_PAGE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
|
@ -1302,7 +990,7 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
|
|||
env->tpr_access_type = access;
|
||||
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_TPR);
|
||||
} else {
|
||||
} else if (tcg_enabled()) {
|
||||
cpu_restore_state(cs, cs->mem_io_pc);
|
||||
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
|
||||
|
|
|
@ -1433,56 +1433,12 @@ static int kvm_put_xsave(X86CPU *cpu)
|
|||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = env->kvm_xsave_buf;
|
||||
uint16_t cwd, swd, twd;
|
||||
int i;
|
||||
|
||||
if (!has_xsave) {
|
||||
return kvm_put_fpu(cpu);
|
||||
}
|
||||
x86_cpu_xsave_all_areas(cpu, xsave);
|
||||
|
||||
memset(xsave, 0, sizeof(struct kvm_xsave));
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
cwd = env->fpuc;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
twd |= (!env->fptags[i]) << i;
|
||||
}
|
||||
xsave->legacy.fcw = cwd;
|
||||
xsave->legacy.fsw = swd;
|
||||
xsave->legacy.ftw = twd;
|
||||
xsave->legacy.fpop = env->fpop;
|
||||
xsave->legacy.fpip = env->fpip;
|
||||
xsave->legacy.fpdp = env->fpdp;
|
||||
memcpy(&xsave->legacy.fpregs, env->fpregs,
|
||||
sizeof env->fpregs);
|
||||
xsave->legacy.mxcsr = env->mxcsr;
|
||||
xsave->header.xstate_bv = env->xstate_bv;
|
||||
memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
||||
memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
||||
stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
||||
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
||||
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
||||
stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
||||
stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
#endif
|
||||
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
|
||||
}
|
||||
|
||||
|
@ -1868,8 +1824,7 @@ static int kvm_get_xsave(X86CPU *cpu)
|
|||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = env->kvm_xsave_buf;
|
||||
int ret, i;
|
||||
uint16_t cwd, swd, twd;
|
||||
int ret;
|
||||
|
||||
if (!has_xsave) {
|
||||
return kvm_get_fpu(cpu);
|
||||
|
@ -1879,48 +1834,8 @@ static int kvm_get_xsave(X86CPU *cpu)
|
|||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
x86_cpu_xrstor_all_areas(cpu, xsave);
|
||||
|
||||
cwd = xsave->legacy.fcw;
|
||||
swd = xsave->legacy.fsw;
|
||||
twd = xsave->legacy.ftw;
|
||||
env->fpop = xsave->legacy.fpop;
|
||||
env->fpstt = (swd >> 11) & 7;
|
||||
env->fpus = swd;
|
||||
env->fpuc = cwd;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
env->fptags[i] = !((twd >> i) & 1);
|
||||
}
|
||||
env->fpip = xsave->legacy.fpip;
|
||||
env->fpdp = xsave->legacy.fpdp;
|
||||
env->mxcsr = xsave->legacy.mxcsr;
|
||||
memcpy(env->fpregs, &xsave->legacy.fpregs,
|
||||
sizeof env->fpregs);
|
||||
env->xstate_bv = xsave->header.xstate_bv;
|
||||
memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
||||
memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
||||
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
||||
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
||||
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,29 @@
|
|||
|
||||
#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
|
||||
#define kvm_pit_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#define kvm_pic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
#define kvm_ioapic_in_kernel() \
|
||||
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
|
||||
|
||||
#else
|
||||
|
||||
#define kvm_pit_in_kernel() 0
|
||||
#define kvm_pic_in_kernel() 0
|
||||
#define kvm_ioapic_in_kernel() 0
|
||||
|
||||
/* These constants must never be used at runtime if kvm_enabled() is false.
|
||||
* They exist so we don't need #ifdefs around KVM-specific code that already
|
||||
* checks kvm_enabled() properly.
|
||||
*/
|
||||
#define KVM_CPUID_FEATURES 0
|
||||
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
bool kvm_allows_irq0_override(void);
|
||||
bool kvm_has_smm(void);
|
||||
bool kvm_has_adjust_clock_stable(void);
|
||||
|
|
|
@ -142,6 +142,24 @@ typedef struct x86_FPReg_tmp {
|
|||
uint16_t tmp_exp;
|
||||
} x86_FPReg_tmp;
|
||||
|
||||
static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.d = f;
|
||||
*pmant = temp.l.lower;
|
||||
*pexp = temp.l.upper;
|
||||
}
|
||||
|
||||
static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
|
||||
{
|
||||
CPU_LDoubleU temp;
|
||||
|
||||
temp.l.upper = upper;
|
||||
temp.l.lower = mant;
|
||||
return temp.d;
|
||||
}
|
||||
|
||||
static void fpreg_pre_save(void *opaque)
|
||||
{
|
||||
x86_FPReg_tmp *tmp = opaque;
|
||||
|
@ -262,14 +280,17 @@ static int cpu_post_load(void *opaque, int version_id)
|
|||
for(i = 0; i < 8; i++) {
|
||||
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
|
||||
}
|
||||
update_fp_status(env);
|
||||
if (tcg_enabled()) {
|
||||
target_ulong dr7;
|
||||
update_fp_status(env);
|
||||
update_mxcsr_status(env);
|
||||
|
||||
cpu_breakpoint_remove_all(cs, BP_CPU);
|
||||
cpu_watchpoint_remove_all(cs, BP_CPU);
|
||||
|
||||
cpu_breakpoint_remove_all(cs, BP_CPU);
|
||||
cpu_watchpoint_remove_all(cs, BP_CPU);
|
||||
{
|
||||
/* Indicate all breakpoints disabled, as they are, then
|
||||
let the helper re-enable them. */
|
||||
target_ulong dr7 = env->dr[7];
|
||||
dr7 = env->dr[7];
|
||||
env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
|
||||
cpu_x86_update_dr7(env, dr7);
|
||||
}
|
||||
|
|
|
@ -24,36 +24,6 @@
|
|||
#include "exec/exec-all.h"
|
||||
|
||||
|
||||
void cpu_sync_bndcs_hflags(CPUX86State *env)
|
||||
{
|
||||
uint32_t hflags = env->hflags;
|
||||
uint32_t hflags2 = env->hflags2;
|
||||
uint32_t bndcsr;
|
||||
|
||||
if ((hflags & HF_CPL_MASK) == 3) {
|
||||
bndcsr = env->bndcs_regs.cfgu;
|
||||
} else {
|
||||
bndcsr = env->msr_bndcfgs;
|
||||
}
|
||||
|
||||
if ((env->cr[4] & CR4_OSXSAVE_MASK)
|
||||
&& (env->xcr0 & XSTATE_BNDCSR_MASK)
|
||||
&& (bndcsr & BNDCFG_ENABLE)) {
|
||||
hflags |= HF_MPX_EN_MASK;
|
||||
} else {
|
||||
hflags &= ~HF_MPX_EN_MASK;
|
||||
}
|
||||
|
||||
if (bndcsr & BNDCFG_BNDPRESERVE) {
|
||||
hflags2 |= HF2_MPX_PR_MASK;
|
||||
} else {
|
||||
hflags2 &= ~HF2_MPX_PR_MASK;
|
||||
}
|
||||
|
||||
env->hflags = hflags;
|
||||
env->hflags2 = hflags2;
|
||||
}
|
||||
|
||||
void helper_bndck(CPUX86State *env, uint32_t fail)
|
||||
{
|
||||
if (unlikely(fail)) {
|
||||
|
|
|
@ -692,7 +692,10 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
|||
if (!(e2 & DESC_P_MASK)) {
|
||||
raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
|
||||
}
|
||||
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
|
||||
if (e2 & DESC_C_MASK) {
|
||||
dpl = cpl;
|
||||
}
|
||||
if (dpl < cpl) {
|
||||
/* to inner privilege */
|
||||
get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
|
||||
if ((ss & 0xfffc) == 0) {
|
||||
|
@ -719,7 +722,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
|||
new_stack = 1;
|
||||
sp_mask = get_sp_mask(ss_e2);
|
||||
ssp = get_seg_base(ss_e1, ss_e2);
|
||||
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
|
||||
} else {
|
||||
/* to same privilege */
|
||||
if (vm86) {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
|
@ -728,13 +731,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
|||
sp_mask = get_sp_mask(env->segs[R_SS].flags);
|
||||
ssp = env->segs[R_SS].base;
|
||||
esp = env->regs[R_ESP];
|
||||
dpl = cpl;
|
||||
} else {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
new_stack = 0; /* avoid warning */
|
||||
sp_mask = 0; /* avoid warning */
|
||||
ssp = 0; /* avoid warning */
|
||||
esp = 0; /* avoid warning */
|
||||
}
|
||||
|
||||
shift = type >> 3;
|
||||
|
@ -919,23 +915,21 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
|||
if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
}
|
||||
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
|
||||
if (e2 & DESC_C_MASK) {
|
||||
dpl = cpl;
|
||||
}
|
||||
if (dpl < cpl || ist != 0) {
|
||||
/* to inner privilege */
|
||||
new_stack = 1;
|
||||
esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
|
||||
ss = 0;
|
||||
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
|
||||
} else {
|
||||
/* to same privilege */
|
||||
if (env->eflags & VM_MASK) {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
}
|
||||
new_stack = 0;
|
||||
esp = env->regs[R_ESP];
|
||||
dpl = cpl;
|
||||
} else {
|
||||
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
|
||||
new_stack = 0; /* avoid warning */
|
||||
esp = 0; /* avoid warning */
|
||||
}
|
||||
esp &= ~0xfLL; /* align stack */
|
||||
|
||||
|
@ -956,7 +950,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
|||
|
||||
if (new_stack) {
|
||||
ss = 0 | dpl;
|
||||
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
|
||||
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
|
||||
}
|
||||
env->regs[R_ESP] = esp;
|
||||
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
|
||||
void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
X86XSaveArea *xsave = buf;
|
||||
|
||||
uint16_t cwd, swd, twd;
|
||||
int i;
|
||||
memset(xsave, 0, sizeof(X86XSaveArea));
|
||||
twd = 0;
|
||||
swd = env->fpus & ~(7 << 11);
|
||||
swd |= (env->fpstt & 7) << 11;
|
||||
cwd = env->fpuc;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
twd |= (!env->fptags[i]) << i;
|
||||
}
|
||||
xsave->legacy.fcw = cwd;
|
||||
xsave->legacy.fsw = swd;
|
||||
xsave->legacy.ftw = twd;
|
||||
xsave->legacy.fpop = env->fpop;
|
||||
xsave->legacy.fpip = env->fpip;
|
||||
xsave->legacy.fpdp = env->fpdp;
|
||||
memcpy(&xsave->legacy.fpregs, env->fpregs,
|
||||
sizeof env->fpregs);
|
||||
xsave->legacy.mxcsr = env->mxcsr;
|
||||
xsave->header.xstate_bv = env->xstate_bv;
|
||||
memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
xsave->bndcsr_state.bndcsr = env->bndcs_regs;
|
||||
memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
|
||||
stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
|
||||
stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
|
||||
stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
|
||||
stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
|
||||
stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
|
||||
stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
|
||||
stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf)
|
||||
{
|
||||
|
||||
CPUX86State *env = &cpu->env;
|
||||
const X86XSaveArea *xsave = buf;
|
||||
|
||||
int i;
|
||||
uint16_t cwd, swd, twd;
|
||||
cwd = xsave->legacy.fcw;
|
||||
swd = xsave->legacy.fsw;
|
||||
twd = xsave->legacy.ftw;
|
||||
env->fpop = xsave->legacy.fpop;
|
||||
env->fpstt = (swd >> 11) & 7;
|
||||
env->fpus = swd;
|
||||
env->fpuc = cwd;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
env->fptags[i] = !((twd >> i) & 1);
|
||||
}
|
||||
env->fpip = xsave->legacy.fpip;
|
||||
env->fpdp = xsave->legacy.fpdp;
|
||||
env->mxcsr = xsave->legacy.mxcsr;
|
||||
memcpy(env->fpregs, &xsave->legacy.fpregs,
|
||||
sizeof env->fpregs);
|
||||
env->xstate_bv = xsave->header.xstate_bv;
|
||||
memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
|
||||
sizeof env->bnd_regs);
|
||||
env->bndcs_regs = xsave->bndcsr_state.bndcsr;
|
||||
memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
|
||||
sizeof env->opmask_regs);
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; i++) {
|
||||
const uint8_t *xmm = xsave->legacy.xmm_regs[i];
|
||||
const uint8_t *ymmh = xsave->avx_state.ymmh[i];
|
||||
const uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
|
||||
env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
|
||||
env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
|
||||
env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
|
||||
env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
|
||||
env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
|
||||
env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
|
||||
env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
|
||||
16 * sizeof env->xmm_regs[16]);
|
||||
memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
|
||||
#endif
|
||||
|
||||
}
|
|
@ -523,7 +523,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
|
|||
* already saved and can be restored when it is synced back to KVM.
|
||||
*/
|
||||
if (!running) {
|
||||
if (!cs->kvm_vcpu_dirty) {
|
||||
if (!cs->vcpu_dirty) {
|
||||
ret = kvm_mips_save_count(cs);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed saving count\n");
|
||||
|
@ -539,7 +539,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
|
|||
return;
|
||||
}
|
||||
|
||||
if (!cs->kvm_vcpu_dirty) {
|
||||
if (!cs->vcpu_dirty) {
|
||||
ret = kvm_mips_restore_count(cs);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed restoring count\n");
|
||||
|
|
|
@ -757,10 +757,6 @@ void *tcg_malloc_internal(TCGContext *s, int size);
|
|||
void tcg_pool_reset(TCGContext *s);
|
||||
TranslationBlock *tcg_tb_alloc(TCGContext *s);
|
||||
|
||||
void tb_lock(void);
|
||||
void tb_unlock(void);
|
||||
void tb_lock_reset(void);
|
||||
|
||||
/* Called with tb_lock held. */
|
||||
static inline void *tcg_malloc(int size)
|
||||
{
|
||||
|
|
|
@ -20,13 +20,9 @@ typedef struct FeHandler {
|
|||
|
||||
static void main_loop(void)
|
||||
{
|
||||
bool nonblocking;
|
||||
int last_io = 0;
|
||||
|
||||
quit = false;
|
||||
do {
|
||||
nonblocking = last_io > 0;
|
||||
last_io = main_loop_wait(nonblocking);
|
||||
main_loop_wait(false);
|
||||
} while (!quit);
|
||||
}
|
||||
|
||||
|
|
|
@ -487,7 +487,7 @@ static int os_host_main_loop_wait(int64_t timeout)
|
|||
}
|
||||
#endif
|
||||
|
||||
int main_loop_wait(int nonblocking)
|
||||
void main_loop_wait(int nonblocking)
|
||||
{
|
||||
int ret;
|
||||
uint32_t timeout = UINT32_MAX;
|
||||
|
@ -500,9 +500,7 @@ int main_loop_wait(int nonblocking)
|
|||
/* poll any events */
|
||||
g_array_set_size(gpollfds, 0); /* reset for new iteration */
|
||||
/* XXX: separate device handlers from system ones */
|
||||
#ifdef CONFIG_SLIRP
|
||||
slirp_pollfds_fill(gpollfds, &timeout);
|
||||
#endif
|
||||
|
||||
if (timeout == UINT32_MAX) {
|
||||
timeout_ns = -1;
|
||||
|
@ -515,16 +513,12 @@ int main_loop_wait(int nonblocking)
|
|||
&main_loop_tlg));
|
||||
|
||||
ret = os_host_main_loop_wait(timeout_ns);
|
||||
#ifdef CONFIG_SLIRP
|
||||
slirp_pollfds_poll(gpollfds, (ret < 0));
|
||||
#endif
|
||||
|
||||
/* CPU thread can infinitely wait for event after
|
||||
missing the warp */
|
||||
qemu_start_warp_timer();
|
||||
qemu_clock_run_all_timers();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Functions to operate on the main QEMU AioContext. */
|
||||
|
|
|
@ -438,10 +438,8 @@ static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles,
|
|||
if (timeout == 0 && nhandles > 1) {
|
||||
/* Remove the handle that fired */
|
||||
int i;
|
||||
if (ready < nhandles - 1) {
|
||||
for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
|
||||
handles[i-1] = handles[i];
|
||||
}
|
||||
for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
|
||||
handles[i-1] = handles[i];
|
||||
}
|
||||
nhandles--;
|
||||
recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
|
||||
|
|
|
@ -897,7 +897,7 @@ static int unix_listen_saddr(UnixSocketAddress *saddr,
|
|||
strncpy(un.sun_path, path, sizeof(un.sun_path));
|
||||
|
||||
if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) {
|
||||
error_setg_errno(errp, errno, "Failed to bind socket to %s", un.sun_path);
|
||||
error_setg_errno(errp, errno, "Failed to bind socket to %s", path);
|
||||
goto err;
|
||||
}
|
||||
if (listen(sock, 1) < 0) {
|
||||
|
|
|
@ -43,12 +43,15 @@ void qemu_mutex_init(QemuMutex *mutex)
|
|||
err = pthread_mutex_init(&mutex->lock, NULL);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
mutex->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_mutex_destroy(QemuMutex *mutex)
|
||||
{
|
||||
int err;
|
||||
|
||||
assert(mutex->initialized);
|
||||
mutex->initialized = false;
|
||||
err = pthread_mutex_destroy(&mutex->lock);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
|
@ -58,6 +61,7 @@ void qemu_mutex_lock(QemuMutex *mutex)
|
|||
{
|
||||
int err;
|
||||
|
||||
assert(mutex->initialized);
|
||||
err = pthread_mutex_lock(&mutex->lock);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
|
@ -69,6 +73,7 @@ int qemu_mutex_trylock(QemuMutex *mutex)
|
|||
{
|
||||
int err;
|
||||
|
||||
assert(mutex->initialized);
|
||||
err = pthread_mutex_trylock(&mutex->lock);
|
||||
if (err == 0) {
|
||||
trace_qemu_mutex_locked(mutex);
|
||||
|
@ -84,6 +89,7 @@ void qemu_mutex_unlock(QemuMutex *mutex)
|
|||
{
|
||||
int err;
|
||||
|
||||
assert(mutex->initialized);
|
||||
trace_qemu_mutex_unlocked(mutex);
|
||||
err = pthread_mutex_unlock(&mutex->lock);
|
||||
if (err)
|
||||
|
@ -102,6 +108,7 @@ void qemu_rec_mutex_init(QemuRecMutex *mutex)
|
|||
if (err) {
|
||||
error_exit(err, __func__);
|
||||
}
|
||||
mutex->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_cond_init(QemuCond *cond)
|
||||
|
@ -111,12 +118,15 @@ void qemu_cond_init(QemuCond *cond)
|
|||
err = pthread_cond_init(&cond->cond, NULL);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
cond->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_cond_destroy(QemuCond *cond)
|
||||
{
|
||||
int err;
|
||||
|
||||
assert(cond->initialized);
|
||||
cond->initialized = false;
|
||||
err = pthread_cond_destroy(&cond->cond);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
|
@ -126,6 +136,7 @@ void qemu_cond_signal(QemuCond *cond)
|
|||
{
|
||||
int err;
|
||||
|
||||
assert(cond->initialized);
|
||||
err = pthread_cond_signal(&cond->cond);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
|
@ -135,6 +146,7 @@ void qemu_cond_broadcast(QemuCond *cond)
|
|||
{
|
||||
int err;
|
||||
|
||||
assert(cond->initialized);
|
||||
err = pthread_cond_broadcast(&cond->cond);
|
||||
if (err)
|
||||
error_exit(err, __func__);
|
||||
|
@ -144,6 +156,7 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
|
|||
{
|
||||
int err;
|
||||
|
||||
assert(cond->initialized);
|
||||
trace_qemu_mutex_unlocked(mutex);
|
||||
err = pthread_cond_wait(&cond->cond, &mutex->lock);
|
||||
trace_qemu_mutex_locked(mutex);
|
||||
|
@ -174,12 +187,15 @@ void qemu_sem_init(QemuSemaphore *sem, int init)
|
|||
error_exit(errno, __func__);
|
||||
}
|
||||
#endif
|
||||
sem->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_sem_destroy(QemuSemaphore *sem)
|
||||
{
|
||||
int rc;
|
||||
|
||||
assert(sem->initialized);
|
||||
sem->initialized = false;
|
||||
#if defined(__APPLE__) || defined(__NetBSD__)
|
||||
rc = pthread_cond_destroy(&sem->cond);
|
||||
if (rc < 0) {
|
||||
|
@ -201,6 +217,7 @@ void qemu_sem_post(QemuSemaphore *sem)
|
|||
{
|
||||
int rc;
|
||||
|
||||
assert(sem->initialized);
|
||||
#if defined(__APPLE__) || defined(__NetBSD__)
|
||||
pthread_mutex_lock(&sem->lock);
|
||||
if (sem->count == UINT_MAX) {
|
||||
|
@ -238,6 +255,7 @@ int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
|
|||
int rc;
|
||||
struct timespec ts;
|
||||
|
||||
assert(sem->initialized);
|
||||
#if defined(__APPLE__) || defined(__NetBSD__)
|
||||
rc = 0;
|
||||
compute_abs_deadline(&ts, ms);
|
||||
|
@ -285,6 +303,7 @@ void qemu_sem_wait(QemuSemaphore *sem)
|
|||
{
|
||||
int rc;
|
||||
|
||||
assert(sem->initialized);
|
||||
#if defined(__APPLE__) || defined(__NetBSD__)
|
||||
pthread_mutex_lock(&sem->lock);
|
||||
while (sem->count == 0) {
|
||||
|
@ -310,6 +329,7 @@ void qemu_sem_wait(QemuSemaphore *sem)
|
|||
#else
|
||||
static inline void qemu_futex_wake(QemuEvent *ev, int n)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
pthread_mutex_lock(&ev->lock);
|
||||
if (n == 1) {
|
||||
pthread_cond_signal(&ev->cond);
|
||||
|
@ -321,6 +341,7 @@ static inline void qemu_futex_wake(QemuEvent *ev, int n)
|
|||
|
||||
static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
pthread_mutex_lock(&ev->lock);
|
||||
if (ev->value == val) {
|
||||
pthread_cond_wait(&ev->cond, &ev->lock);
|
||||
|
@ -355,10 +376,13 @@ void qemu_event_init(QemuEvent *ev, bool init)
|
|||
#endif
|
||||
|
||||
ev->value = (init ? EV_SET : EV_FREE);
|
||||
ev->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_event_destroy(QemuEvent *ev)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
ev->initialized = false;
|
||||
#ifndef __linux__
|
||||
pthread_mutex_destroy(&ev->lock);
|
||||
pthread_cond_destroy(&ev->cond);
|
||||
|
@ -370,6 +394,7 @@ void qemu_event_set(QemuEvent *ev)
|
|||
/* qemu_event_set has release semantics, but because it *loads*
|
||||
* ev->value we need a full memory barrier here.
|
||||
*/
|
||||
assert(ev->initialized);
|
||||
smp_mb();
|
||||
if (atomic_read(&ev->value) != EV_SET) {
|
||||
if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
|
||||
|
@ -383,6 +408,7 @@ void qemu_event_reset(QemuEvent *ev)
|
|||
{
|
||||
unsigned value;
|
||||
|
||||
assert(ev->initialized);
|
||||
value = atomic_read(&ev->value);
|
||||
smp_mb_acquire();
|
||||
if (value == EV_SET) {
|
||||
|
@ -398,6 +424,7 @@ void qemu_event_wait(QemuEvent *ev)
|
|||
{
|
||||
unsigned value;
|
||||
|
||||
assert(ev->initialized);
|
||||
value = atomic_read(&ev->value);
|
||||
smp_mb_acquire();
|
||||
if (value != EV_SET) {
|
||||
|
|
|
@ -46,15 +46,19 @@ static void error_exit(int err, const char *msg)
|
|||
void qemu_mutex_init(QemuMutex *mutex)
|
||||
{
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
mutex->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_mutex_destroy(QemuMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
mutex->initialized = false;
|
||||
InitializeSRWLock(&mutex->lock);
|
||||
}
|
||||
|
||||
void qemu_mutex_lock(QemuMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
AcquireSRWLockExclusive(&mutex->lock);
|
||||
trace_qemu_mutex_locked(mutex);
|
||||
}
|
||||
|
@ -63,6 +67,7 @@ int qemu_mutex_trylock(QemuMutex *mutex)
|
|||
{
|
||||
int owned;
|
||||
|
||||
assert(mutex->initialized);
|
||||
owned = TryAcquireSRWLockExclusive(&mutex->lock);
|
||||
if (owned) {
|
||||
trace_qemu_mutex_locked(mutex);
|
||||
|
@ -73,6 +78,7 @@ int qemu_mutex_trylock(QemuMutex *mutex)
|
|||
|
||||
void qemu_mutex_unlock(QemuMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
trace_qemu_mutex_unlocked(mutex);
|
||||
ReleaseSRWLockExclusive(&mutex->lock);
|
||||
}
|
||||
|
@ -80,25 +86,31 @@ void qemu_mutex_unlock(QemuMutex *mutex)
|
|||
void qemu_rec_mutex_init(QemuRecMutex *mutex)
|
||||
{
|
||||
InitializeCriticalSection(&mutex->lock);
|
||||
mutex->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
mutex->initialized = false;
|
||||
DeleteCriticalSection(&mutex->lock);
|
||||
}
|
||||
|
||||
void qemu_rec_mutex_lock(QemuRecMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
EnterCriticalSection(&mutex->lock);
|
||||
}
|
||||
|
||||
int qemu_rec_mutex_trylock(QemuRecMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
return !TryEnterCriticalSection(&mutex->lock);
|
||||
}
|
||||
|
||||
void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
|
||||
{
|
||||
assert(mutex->initialized);
|
||||
LeaveCriticalSection(&mutex->lock);
|
||||
}
|
||||
|
||||
|
@ -106,25 +118,31 @@ void qemu_cond_init(QemuCond *cond)
|
|||
{
|
||||
memset(cond, 0, sizeof(*cond));
|
||||
InitializeConditionVariable(&cond->var);
|
||||
cond->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_cond_destroy(QemuCond *cond)
|
||||
{
|
||||
assert(cond->initialized);
|
||||
cond->initialized = false;
|
||||
InitializeConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_signal(QemuCond *cond)
|
||||
{
|
||||
assert(cond->initialized);
|
||||
WakeConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_broadcast(QemuCond *cond)
|
||||
{
|
||||
assert(cond->initialized);
|
||||
WakeAllConditionVariable(&cond->var);
|
||||
}
|
||||
|
||||
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
|
||||
{
|
||||
assert(cond->initialized);
|
||||
trace_qemu_mutex_unlocked(mutex);
|
||||
SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
|
||||
trace_qemu_mutex_locked(mutex);
|
||||
|
@ -134,21 +152,28 @@ void qemu_sem_init(QemuSemaphore *sem, int init)
|
|||
{
|
||||
/* Manual reset. */
|
||||
sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
|
||||
sem->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_sem_destroy(QemuSemaphore *sem)
|
||||
{
|
||||
assert(sem->initialized);
|
||||
sem->initialized = false;
|
||||
CloseHandle(sem->sema);
|
||||
}
|
||||
|
||||
void qemu_sem_post(QemuSemaphore *sem)
|
||||
{
|
||||
assert(sem->initialized);
|
||||
ReleaseSemaphore(sem->sema, 1, NULL);
|
||||
}
|
||||
|
||||
int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
|
||||
{
|
||||
int rc = WaitForSingleObject(sem->sema, ms);
|
||||
int rc;
|
||||
|
||||
assert(sem->initialized);
|
||||
rc = WaitForSingleObject(sem->sema, ms);
|
||||
if (rc == WAIT_OBJECT_0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -160,6 +185,7 @@ int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
|
|||
|
||||
void qemu_sem_wait(QemuSemaphore *sem)
|
||||
{
|
||||
assert(sem->initialized);
|
||||
if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
|
||||
error_exit(GetLastError(), __func__);
|
||||
}
|
||||
|
@ -193,15 +219,19 @@ void qemu_event_init(QemuEvent *ev, bool init)
|
|||
/* Manual reset. */
|
||||
ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
|
||||
ev->value = (init ? EV_SET : EV_FREE);
|
||||
ev->initialized = true;
|
||||
}
|
||||
|
||||
void qemu_event_destroy(QemuEvent *ev)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
ev->initialized = false;
|
||||
CloseHandle(ev->event);
|
||||
}
|
||||
|
||||
void qemu_event_set(QemuEvent *ev)
|
||||
{
|
||||
assert(ev->initialized);
|
||||
/* qemu_event_set has release semantics, but because it *loads*
|
||||
* ev->value we need a full memory barrier here.
|
||||
*/
|
||||
|
@ -218,6 +248,7 @@ void qemu_event_reset(QemuEvent *ev)
|
|||
{
|
||||
unsigned value;
|
||||
|
||||
assert(ev->initialized);
|
||||
value = atomic_read(&ev->value);
|
||||
smp_mb_acquire();
|
||||
if (value == EV_SET) {
|
||||
|
@ -232,6 +263,7 @@ void qemu_event_wait(QemuEvent *ev)
|
|||
{
|
||||
unsigned value;
|
||||
|
||||
assert(ev->initialized);
|
||||
value = atomic_read(&ev->value);
|
||||
smp_mb_acquire();
|
||||
if (value != EV_SET) {
|
||||
|
|
14
vl.c
14
vl.c
|
@ -3933,9 +3933,13 @@ int main(int argc, char **argv, char **envp)
|
|||
configure_rtc(opts);
|
||||
break;
|
||||
case QEMU_OPTION_tb_size:
|
||||
tcg_tb_size = strtol(optarg, NULL, 0);
|
||||
if (tcg_tb_size < 0) {
|
||||
tcg_tb_size = 0;
|
||||
if (!tcg_enabled()) {
|
||||
error_report("TCG is disabled");
|
||||
exit(1);
|
||||
}
|
||||
if (qemu_strtoul(optarg, NULL, 0, &tcg_tb_size) < 0) {
|
||||
error_report("Invalid argument to -tb-size");
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case QEMU_OPTION_icount:
|
||||
|
@ -4481,7 +4485,9 @@ int main(int argc, char **argv, char **envp)
|
|||
qemu_opts_del(icount_opts);
|
||||
}
|
||||
|
||||
qemu_tcg_configure(accel_opts, &error_fatal);
|
||||
if (tcg_enabled()) {
|
||||
qemu_tcg_configure(accel_opts, &error_fatal);
|
||||
}
|
||||
|
||||
if (default_net) {
|
||||
QemuOptsList *net = qemu_find_opts("net");
|
||||
|
|
Loading…
Reference in New Issue