From 3b4a0253e6feed3951e7506d63acd4094be7b7b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 13:32:23 +0200 Subject: [PATCH 01/30] MAINTAINERS: Update Roman Bolshakov email address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit r.bolshakov@yadro.com is bouncing: Update Roman's email address using one found somewhere on the Internet; this way he can Ack-by. (Reorder Taylor's line to keep the section sorted alphabetically). Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Roman Bolshakov Message-Id: <20230624174121.11508-2-philmd@linaro.org> --- .mailmap | 3 ++- MAINTAINERS | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.mailmap b/.mailmap index b57da4827e..64ef9f4de6 100644 --- a/.mailmap +++ b/.mailmap @@ -76,9 +76,10 @@ Paul Burton Philippe Mathieu-Daudé Philippe Mathieu-Daudé Philippe Mathieu-Daudé +Roman Bolshakov Stefan Brankovic -Yongbok Kim Taylor Simpson +Yongbok Kim # Also list preferred name forms where people have changed their # git author config, or had utf8/latin1 encoding issues. diff --git a/MAINTAINERS b/MAINTAINERS index 21a587ce4b..aba07722f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -498,14 +498,14 @@ F: target/arm/hvf/ X86 HVF CPUs M: Cameron Esfahani -M: Roman Bolshakov +M: Roman Bolshakov W: https://wiki.qemu.org/Features/HVF S: Maintained F: target/i386/hvf/ HVF M: Cameron Esfahani -M: Roman Bolshakov +M: Roman Bolshakov W: https://wiki.qemu.org/Features/HVF S: Maintained F: accel/hvf/ From 098bfc01df5ba4c1cb62803b173b2101ae39b0f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Sat, 24 Jun 2023 14:58:24 +0200 Subject: [PATCH 02/30] docs/devel/testing: Update the 'Docker Debugging' section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit 93cc0506f6 ("tests/docker: Use Fedora containers for MinGW cross-builds in the gitlab-CI") the MinGW toolchain is packaged inside the fedora-win[32/64]-cross images. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Daniel P. Berrangé Message-Id: <20230624142211.8888-2-philmd@linaro.org> --- docs/devel/testing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst index 203facb417..e85e26c4ca 100644 --- a/docs/devel/testing.rst +++ b/docs/devel/testing.rst @@ -558,7 +558,7 @@ When CI tasks, maintainers or yourself report a Docker test failure, follow the below steps to debug it: 1. Locally reproduce the failure with the reported command line. E.g. run - ``make docker-test-mingw@fedora J=8``. + ``make docker-test-mingw@fedora-win64-cross J=8``. 2. Add "V=1" to the command line, try again, to see the verbose output. 3. Further add "DEBUG=1" to the command line. This will pause in a shell prompt in the container right before testing starts. You could either manually From 641b841722836cd08d39701f6e4932543afcf41a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Sat, 24 Jun 2023 15:31:44 +0200 Subject: [PATCH 03/30] accel: Re-enable WHPX cross-build on case sensitive filesystems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since MinGW commit 395dcfdea ("rename hyper-v headers and def files to lower case") [*], WinHvPlatform.h and WinHvEmulation.h got respectively renamed as winhvplatform.h / winhvemulation.h. The mingw64-headers package included in the Fedora version we use for CI does include this commit; and meson fails to detect these present-but-renamed headers while cross-building (on case-sensitive filesystems). Use the renamed header in order to detect and successfully cross-build with the WHPX accelerator. Note, on Windows hosts, the libraries are still named as WinHvPlatform.dll and WinHvEmulation.dll, so we don't bother renaming the definitions used by load_whp_dispatch_fns() in target/i386/whpx/whpx-all.c. [*] https://sourceforge.net/p/mingw-w64/mingw-w64/ci/395dcfdea Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Daniel P. Berrangé Message-Id: <20230624142211.8888-3-philmd@linaro.org> --- meson.build | 4 ++-- target/i386/whpx/whpx-all.c | 4 ++-- target/i386/whpx/whpx-internal.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/meson.build b/meson.build index 1631e60a3b..a9ba0bfab3 100644 --- a/meson.build +++ b/meson.build @@ -661,8 +661,8 @@ endif if get_option('whpx').allowed() and targetos == 'windows' if get_option('whpx').enabled() and host_machine.cpu() != 'x86_64' error('WHPX requires 64-bit host') - elif cc.has_header('WinHvPlatform.h', required: get_option('whpx')) and \ - cc.has_header('WinHvEmulation.h', required: get_option('whpx')) + elif cc.has_header('winhvplatform.h', required: get_option('whpx')) and \ + cc.has_header('winhvemulation.h', required: get_option('whpx')) accelerators += 'CONFIG_WHPX' endif endif diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 52af81683c..7fa68d49e5 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -31,8 +31,8 @@ #include "whpx-internal.h" #include "whpx-accel-ops.h" -#include -#include +#include +#include #define HYPERV_APIC_BUS_FREQUENCY (200000000ULL) diff --git a/target/i386/whpx/whpx-internal.h b/target/i386/whpx/whpx-internal.h index 06429d8ccd..6633e9c4ca 100644 --- a/target/i386/whpx/whpx-internal.h +++ b/target/i386/whpx/whpx-internal.h @@ -2,8 +2,8 @@ #define TARGET_I386_WHPX_INTERNAL_H #include -#include -#include +#include +#include typedef enum WhpxBreakpointState { WHPX_BP_CLEARED = 0, From 6970030177ae846327af435ad0e3f6e1045a0c86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Mon, 3 Apr 2023 14:53:17 +0200 Subject: [PATCH 04/30] accel: Document generic accelerator headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These headers are meant to be include by any file to check the availability of accelerators, thus are not accelerator specific. Signed-off-by: Philippe Mathieu-Daudé Acked-by: Richard Henderson Message-Id: <20230624174121.11508-3-philmd@linaro.org> --- include/sysemu/hax.h | 2 ++ include/sysemu/kvm.h | 2 ++ include/sysemu/nvmm.h | 2 ++ include/sysemu/tcg.h | 2 ++ include/sysemu/whpx.h | 2 ++ include/sysemu/xen.h | 2 ++ 6 files changed, 12 insertions(+) diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h index bf8f99a824..80fc716f80 100644 --- a/include/sysemu/hax.h +++ b/include/sysemu/hax.h @@ -19,6 +19,8 @@ * */ +/* header to be included in non-HAX-specific code */ + #ifndef QEMU_HAX_H #define QEMU_HAX_H diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 88f5ccfbce..7902acdfd9 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -11,6 +11,8 @@ * */ +/* header to be included in non-KVM-specific code */ + #ifndef QEMU_KVM_H #define QEMU_KVM_H diff --git a/include/sysemu/nvmm.h b/include/sysemu/nvmm.h index 833670fccb..be7bc9a62d 100644 --- a/include/sysemu/nvmm.h +++ b/include/sysemu/nvmm.h @@ -7,6 +7,8 @@ * See the COPYING file in the top-level directory. */ +/* header to be included in non-NVMM-specific code */ + #ifndef QEMU_NVMM_H #define QEMU_NVMM_H diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h index 53352450ff..5e2ca9aab3 100644 --- a/include/sysemu/tcg.h +++ b/include/sysemu/tcg.h @@ -5,6 +5,8 @@ * See the COPYING file in the top-level directory. */ +/* header to be included in non-TCG-specific code */ + #ifndef SYSEMU_TCG_H #define SYSEMU_TCG_H diff --git a/include/sysemu/whpx.h b/include/sysemu/whpx.h index 2889fa2278..781ca5b2b6 100644 --- a/include/sysemu/whpx.h +++ b/include/sysemu/whpx.h @@ -10,6 +10,8 @@ * */ +/* header to be included in non-WHPX-specific code */ + #ifndef QEMU_WHPX_H #define QEMU_WHPX_H diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h index 0ca25697e4..bc13ad5692 100644 --- a/include/sysemu/xen.h +++ b/include/sysemu/xen.h @@ -5,6 +5,8 @@ * See the COPYING file in the top-level directory. */ +/* header to be included in non-Xen-specific code */ + #ifndef SYSEMU_XEN_H #define SYSEMU_XEN_H From af03d22a0a2aec52f8e6959e8bc5e318ecc72e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 18:46:12 +0200 Subject: [PATCH 05/30] accel: Remove unused hThread variable on TCG/WHPX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On Windows hosts, cpu->hThread is assigned but never accessed: remove it. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-4-philmd@linaro.org> --- accel/tcg/tcg-accel-ops-mttcg.c | 4 ---- accel/tcg/tcg-accel-ops-rr.c | 3 --- target/i386/whpx/whpx-accel-ops.c | 3 --- 3 files changed, 10 deletions(-) diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index b320ff0037..b276262007 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -152,8 +152,4 @@ void mttcg_start_vcpu_thread(CPUState *cpu) qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); - -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif } diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 23e4d0f452..2d523289a8 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -329,9 +329,6 @@ void rr_start_vcpu_thread(CPUState *cpu) single_tcg_halt_cond = cpu->halt_cond; single_tcg_cpu_thread = cpu->thread; -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif } else { /* we share the thread */ cpu->thread = single_tcg_cpu_thread; diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index e8dc4b3a47..67cad86720 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -71,9 +71,6 @@ static void whpx_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, whpx_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif } static void whpx_kick_vcpu_thread(CPUState *cpu) From 43477340c38c7980d4f4d5bfedf1bd0146ccb577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 09:29:59 +0200 Subject: [PATCH 06/30] accel: Fix a leak on Windows HAX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hThread is only used on the error path in hax_kick_vcpu_thread(). Fixes: b0cb0a66d6 ("Plumb the HAXM-based hardware acceleration support") Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-5-philmd@linaro.org> --- target/i386/hax/hax-all.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 3e5992a63b..a2321a1eff 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -205,6 +205,9 @@ int hax_vcpu_destroy(CPUState *cpu) */ hax_close_fd(vcpu->fd); hax_global.vm->vcpus[vcpu->vcpu_id] = NULL; +#ifdef _WIN32 + CloseHandle(cpu->hThread); +#endif g_free(vcpu); return 0; } From 83d0f7f95cc88bbf31ba000d1d2f05e8563f9d97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 09:15:26 +0200 Subject: [PATCH 07/30] accel: Destroy HAX vCPU threads once done MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the vCPU thread finished its processing, destroy it and signal its destruction to generic vCPU management layer. Add a sanity check for the vCPU accelerator context. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-6-philmd@linaro.org> --- target/i386/hax/hax-accel-ops.c | 3 +++ target/i386/hax/hax-all.c | 1 + 2 files changed, 4 insertions(+) diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index 18114fe34d..0157a628a3 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -53,6 +53,8 @@ static void *hax_cpu_thread_fn(void *arg) qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); + hax_vcpu_destroy(cpu); + cpu_thread_signal_destroyed(cpu); rcu_unregister_thread(); return NULL; } @@ -69,6 +71,7 @@ static void hax_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, hax_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); + assert(cpu->hax_vcpu); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index a2321a1eff..38a4323a3c 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -209,6 +209,7 @@ int hax_vcpu_destroy(CPUState *cpu) CloseHandle(cpu->hThread); #endif g_free(vcpu); + cpu->hax_vcpu = NULL; return 0; } From 6ecd2cd0dcfa733bdad7e97ee913ff44fc7681aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 18:56:35 +0200 Subject: [PATCH 08/30] accel: Rename 'hax_vcpu' as 'accel' in CPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All accelerators will share a single opaque context in CPUState. Start by renaming 'hax_vcpu' as 'accel'. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-7-philmd@linaro.org> --- include/hw/core/cpu.h | 2 +- target/i386/hax/hax-accel-ops.c | 2 +- target/i386/hax/hax-all.c | 18 +++++++++--------- target/i386/nvmm/nvmm-all.c | 6 +++--- target/i386/whpx/whpx-all.c | 6 +++--- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index eda0230a02..63c25ce106 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -443,7 +443,7 @@ struct CPUState { /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ bool prctl_unalign_sigbus; - struct hax_vcpu_state *hax_vcpu; + struct hax_vcpu_state *accel; struct hvf_vcpu_state *hvf; diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index 0157a628a3..a8512efcd5 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -71,7 +71,7 @@ static void hax_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, hax_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); - assert(cpu->hax_vcpu); + assert(cpu->accel); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 38a4323a3c..3865ff9419 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -62,7 +62,7 @@ int valid_hax_tunnel_size(uint16_t size) hax_fd hax_vcpu_get_fd(CPUArchState *env) { - struct hax_vcpu_state *vcpu = env_cpu(env)->hax_vcpu; + struct hax_vcpu_state *vcpu = env_cpu(env)->accel; if (!vcpu) { return HAX_INVALID_FD; } @@ -188,7 +188,7 @@ int hax_vcpu_create(int id) int hax_vcpu_destroy(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; if (!hax_global.vm) { fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id); @@ -209,7 +209,7 @@ int hax_vcpu_destroy(CPUState *cpu) CloseHandle(cpu->hThread); #endif g_free(vcpu); - cpu->hax_vcpu = NULL; + cpu->accel = NULL; return 0; } @@ -223,7 +223,7 @@ int hax_init_vcpu(CPUState *cpu) exit(-1); } - cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index]; + cpu->accel = hax_global.vm->vcpus[cpu->cpu_index]; cpu->vcpu_dirty = true; qemu_register_reset(hax_reset_vcpu_state, cpu->env_ptr); @@ -415,7 +415,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, static int hax_vcpu_interrupt(CPUArchState *env) { CPUState *cpu = env_cpu(env); - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; /* @@ -447,7 +447,7 @@ static int hax_vcpu_interrupt(CPUArchState *env) void hax_raise_event(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; if (!vcpu) { return; @@ -468,7 +468,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env) int ret = 0; CPUState *cpu = env_cpu(env); X86CPU *x86_cpu = X86_CPU(cpu); - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; if (!hax_enabled()) { @@ -1114,8 +1114,8 @@ void hax_reset_vcpu_state(void *opaque) { CPUState *cpu; for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) { - cpu->hax_vcpu->tunnel->user_event_pending = 0; - cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0; + cpu->accel->tunnel->user_event_pending = 0; + cpu->accel->tunnel->ready_for_interrupt_injection = 0; } } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index b75738ee9c..cf4f0af24b 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -52,7 +52,7 @@ static struct qemu_machine qemu_mach; static struct qemu_vcpu * get_qemu_vcpu(CPUState *cpu) { - return (struct qemu_vcpu *)cpu->hax_vcpu; + return (struct qemu_vcpu *)cpu->accel; } static struct nvmm_machine * @@ -995,7 +995,7 @@ nvmm_init_vcpu(CPUState *cpu) } cpu->vcpu_dirty = true; - cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu; + cpu->accel = (struct hax_vcpu_state *)qcpu; return 0; } @@ -1030,7 +1030,7 @@ nvmm_destroy_vcpu(CPUState *cpu) struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); nvmm_vcpu_destroy(mach, &qcpu->vcpu); - g_free(cpu->hax_vcpu); + g_free(cpu->accel); } /* -------------------------------------------------------------------------- */ diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 7fa68d49e5..98dfa5cfc2 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -262,7 +262,7 @@ static bool whpx_has_xsave(void) static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu) { - return (struct whpx_vcpu *)cpu->hax_vcpu; + return (struct whpx_vcpu *)cpu->accel; } static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, @@ -2258,7 +2258,7 @@ int whpx_init_vcpu(CPUState *cpu) vcpu->interruptable = true; cpu->vcpu_dirty = true; - cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu; + cpu->accel = (struct hax_vcpu_state *)vcpu; max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr); @@ -2300,7 +2300,7 @@ void whpx_destroy_vcpu(CPUState *cpu) whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); - g_free(cpu->hax_vcpu); + g_free(cpu->accel); return; } From f861b3f39063619b2d169cbacf455a00c22a1584 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 19:01:49 +0200 Subject: [PATCH 09/30] accel: Rename HAX 'struct hax_vcpu_state' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Start with the HAX context, renaming its forward declarated structure 'hax_vcpu_state' as 'AccelCPUState'. Document the CPUState field. Directly use the typedef. Remove the amusing but now unnecessary casts in NVMM / WHPX. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-8-philmd@linaro.org> --- include/hw/core/cpu.h | 5 ++--- include/qemu/typedefs.h | 1 + target/i386/hax/hax-all.c | 16 ++++++++-------- target/i386/hax/hax-i386.h | 9 +++++---- target/i386/hax/hax-posix.c | 4 ++-- target/i386/hax/hax-windows.c | 4 ++-- target/i386/nvmm/nvmm-all.c | 2 +- target/i386/whpx/whpx-all.c | 2 +- 8 files changed, 22 insertions(+), 21 deletions(-) diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 63c25ce106..45e30911e7 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -241,7 +241,6 @@ typedef struct SavedIOTLB { struct KVMState; struct kvm_run; -struct hax_vcpu_state; struct hvf_vcpu_state; /* work queue */ @@ -309,6 +308,7 @@ struct qemu_work_item; * @next_cpu: Next CPU sharing TB cache. * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. + * @accel: Pointer to accelerator specific state. * @kvm_fd: vCPU file descriptor for KVM. * @work_mutex: Lock to prevent multiple access to @work_list. * @work_list: List of pending asynchronous work. @@ -424,6 +424,7 @@ struct CPUState { uint32_t can_do_io; int32_t exception_index; + AccelCPUState *accel; /* shared by kvm, hax and hvf */ bool vcpu_dirty; @@ -443,8 +444,6 @@ struct CPUState { /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ bool prctl_unalign_sigbus; - struct hax_vcpu_state *accel; - struct hvf_vcpu_state *hvf; /* track IOMMUs whose translations we've cached in the TCG TLB */ diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index 8c1840bfc1..834b0e47a0 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -21,6 +21,7 @@ * Incomplete struct types * Please keep this list in case-insensitive alphabetical order. */ +typedef struct AccelCPUState AccelCPUState; typedef struct AccelState AccelState; typedef struct AdapterInfo AdapterInfo; typedef struct AddressSpace AddressSpace; diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 3865ff9419..9d9011cc38 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -62,7 +62,7 @@ int valid_hax_tunnel_size(uint16_t size) hax_fd hax_vcpu_get_fd(CPUArchState *env) { - struct hax_vcpu_state *vcpu = env_cpu(env)->accel; + AccelCPUState *vcpu = env_cpu(env)->accel; if (!vcpu) { return HAX_INVALID_FD; } @@ -136,7 +136,7 @@ static int hax_version_support(struct hax_state *hax) int hax_vcpu_create(int id) { - struct hax_vcpu_state *vcpu = NULL; + AccelCPUState *vcpu = NULL; int ret; if (!hax_global.vm) { @@ -149,7 +149,7 @@ int hax_vcpu_create(int id) return 0; } - vcpu = g_new0(struct hax_vcpu_state, 1); + vcpu = g_new0(AccelCPUState, 1); ret = hax_host_create_vcpu(hax_global.vm->fd, id); if (ret) { @@ -188,7 +188,7 @@ int hax_vcpu_create(int id) int hax_vcpu_destroy(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; if (!hax_global.vm) { fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id); @@ -263,7 +263,7 @@ struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus) } vm->numvcpus = max_cpus; - vm->vcpus = g_new0(struct hax_vcpu_state *, vm->numvcpus); + vm->vcpus = g_new0(AccelCPUState *, vm->numvcpus); for (i = 0; i < vm->numvcpus; i++) { vm->vcpus[i] = NULL; } @@ -415,7 +415,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, static int hax_vcpu_interrupt(CPUArchState *env) { CPUState *cpu = env_cpu(env); - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; /* @@ -447,7 +447,7 @@ static int hax_vcpu_interrupt(CPUArchState *env) void hax_raise_event(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; if (!vcpu) { return; @@ -468,7 +468,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env) int ret = 0; CPUState *cpu = env_cpu(env); X86CPU *x86_cpu = X86_CPU(cpu); - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; if (!hax_enabled()) { diff --git a/target/i386/hax/hax-i386.h b/target/i386/hax/hax-i386.h index 409ebdb4af..4372ee596d 100644 --- a/target/i386/hax/hax-i386.h +++ b/target/i386/hax/hax-i386.h @@ -25,7 +25,8 @@ typedef HANDLE hax_fd; #endif extern struct hax_state hax_global; -struct hax_vcpu_state { + +struct AccelCPUState { hax_fd fd; int vcpu_id; struct hax_tunnel *tunnel; @@ -46,7 +47,7 @@ struct hax_vm { hax_fd fd; int id; int numvcpus; - struct hax_vcpu_state **vcpus; + AccelCPUState **vcpus; }; /* Functions exported to host specific mode */ @@ -57,7 +58,7 @@ int valid_hax_tunnel_size(uint16_t size); int hax_mod_version(struct hax_state *hax, struct hax_module_version *version); int hax_inject_interrupt(CPUArchState *env, int vector); struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus); -int hax_vcpu_run(struct hax_vcpu_state *vcpu); +int hax_vcpu_run(AccelCPUState *vcpu); int hax_vcpu_create(int id); void hax_kick_vcpu_thread(CPUState *cpu); @@ -76,7 +77,7 @@ int hax_host_create_vm(struct hax_state *hax, int *vm_id); hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id); int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid); hax_fd hax_host_open_vcpu(int vmid, int vcpuid); -int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu); +int hax_host_setup_vcpu_channel(AccelCPUState *vcpu); hax_fd hax_mod_open(void); void hax_memory_init(void); diff --git a/target/i386/hax/hax-posix.c b/target/i386/hax/hax-posix.c index ac1a51096e..a057a5bd94 100644 --- a/target/i386/hax/hax-posix.c +++ b/target/i386/hax/hax-posix.c @@ -205,7 +205,7 @@ hax_fd hax_host_open_vcpu(int vmid, int vcpuid) return fd; } -int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) +int hax_host_setup_vcpu_channel(AccelCPUState *vcpu) { int ret; struct hax_tunnel_info info; @@ -227,7 +227,7 @@ int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) return 0; } -int hax_vcpu_run(struct hax_vcpu_state *vcpu) +int hax_vcpu_run(AccelCPUState *vcpu) { return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL); } diff --git a/target/i386/hax/hax-windows.c b/target/i386/hax/hax-windows.c index 59afa213a6..bf4b0ad941 100644 --- a/target/i386/hax/hax-windows.c +++ b/target/i386/hax/hax-windows.c @@ -301,7 +301,7 @@ hax_fd hax_host_open_vcpu(int vmid, int vcpuid) return hDeviceVCPU; } -int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) +int hax_host_setup_vcpu_channel(AccelCPUState *vcpu) { hax_fd hDeviceVCPU = vcpu->fd; int ret; @@ -327,7 +327,7 @@ int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) return 0; } -int hax_vcpu_run(struct hax_vcpu_state *vcpu) +int hax_vcpu_run(AccelCPUState *vcpu) { int ret; HANDLE hDeviceVCPU = vcpu->fd; diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index cf4f0af24b..b3c3adc59a 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -995,7 +995,7 @@ nvmm_init_vcpu(CPUState *cpu) } cpu->vcpu_dirty = true; - cpu->accel = (struct hax_vcpu_state *)qcpu; + cpu->accel = qcpu; return 0; } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 98dfa5cfc2..7fc8d4ffe5 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2258,7 +2258,7 @@ int whpx_init_vcpu(CPUState *cpu) vcpu->interruptable = true; cpu->vcpu_dirty = true; - cpu->accel = (struct hax_vcpu_state *)vcpu; + cpu->accel = vcpu; max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr); From 642ce52d8ea9e1f80aa6013ad55bb66e51f1c76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 19:13:09 +0200 Subject: [PATCH 10/30] accel: Move HAX hThread to accelerator context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hThread variable is only used by the HAX accelerator, so move it to the accelerator specific context. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-9-philmd@linaro.org> --- include/hw/core/cpu.h | 1 - target/i386/hax/hax-accel-ops.c | 2 +- target/i386/hax/hax-all.c | 2 +- target/i386/hax/hax-i386.h | 3 +++ target/i386/hax/hax-windows.c | 2 +- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 45e30911e7..476b39e42e 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -338,7 +338,6 @@ struct CPUState { struct QemuThread *thread; #ifdef _WIN32 - HANDLE hThread; QemuSemaphore sem; #endif int thread_id; diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index a8512efcd5..5031096760 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -73,7 +73,7 @@ static void hax_start_vcpu_thread(CPUState *cpu) cpu, QEMU_THREAD_JOINABLE); assert(cpu->accel); #ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); + cpu->accel->hThread = qemu_thread_get_handle(cpu->thread); #endif } diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 9d9011cc38..18d78e5b6b 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -206,7 +206,7 @@ int hax_vcpu_destroy(CPUState *cpu) hax_close_fd(vcpu->fd); hax_global.vm->vcpus[vcpu->vcpu_id] = NULL; #ifdef _WIN32 - CloseHandle(cpu->hThread); + CloseHandle(vcpu->hThread); #endif g_free(vcpu); cpu->accel = NULL; diff --git a/target/i386/hax/hax-i386.h b/target/i386/hax/hax-i386.h index 4372ee596d..87153f40ab 100644 --- a/target/i386/hax/hax-i386.h +++ b/target/i386/hax/hax-i386.h @@ -27,6 +27,9 @@ typedef HANDLE hax_fd; extern struct hax_state hax_global; struct AccelCPUState { +#ifdef _WIN32 + HANDLE hThread; +#endif hax_fd fd; int vcpu_id; struct hax_tunnel *tunnel; diff --git a/target/i386/hax/hax-windows.c b/target/i386/hax/hax-windows.c index bf4b0ad941..4bf6cc08d2 100644 --- a/target/i386/hax/hax-windows.c +++ b/target/i386/hax/hax-windows.c @@ -476,7 +476,7 @@ void hax_kick_vcpu_thread(CPUState *cpu) */ cpu->exit_request = 1; if (!qemu_cpu_is_self(cpu)) { - if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { + if (!QueueUserAPC(dummy_apc_func, cpu->accel->hThread, 0)) { fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n", __func__, GetLastError()); exit(1); From 8c12c76df2373b61764c2cacb4a6c505f874fa5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 10:04:07 +0200 Subject: [PATCH 11/30] accel: Remove NVMM unreachable error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit g_malloc0() can not fail. Remove the unreachable error path. https://developer-old.gnome.org/glib/stable/glib-Memory-Allocation.html#glib-Memory-Allocation.description Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-10-philmd@linaro.org> --- target/i386/nvmm/nvmm-all.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index b3c3adc59a..90e9e0a5b2 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -943,10 +943,6 @@ nvmm_init_vcpu(CPUState *cpu) } qcpu = g_malloc0(sizeof(*qcpu)); - if (qcpu == NULL) { - error_report("NVMM: Failed to allocate VCPU context."); - return -ENOMEM; - } ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); if (ret == -1) { From c5beb26a2f5a641a3c4ef9556f48ae05ea86e30d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 23 Mar 2023 13:38:34 +0100 Subject: [PATCH 12/30] accel: Rename NVMM 'struct qemu_vcpu' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Rename NVMM 'qemu_vcpu' as 'AccelCPUState'; directly use the typedef, remove unnecessary casts. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-11-philmd@linaro.org> --- target/i386/nvmm/nvmm-all.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 90e9e0a5b2..e5ee4af084 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -26,7 +26,7 @@ #include -struct qemu_vcpu { +struct AccelCPUState { struct nvmm_vcpu vcpu; uint8_t tpr; bool stop; @@ -49,10 +49,10 @@ struct qemu_machine { static bool nvmm_allowed; static struct qemu_machine qemu_mach; -static struct qemu_vcpu * +static AccelCPUState * get_qemu_vcpu(CPUState *cpu) { - return (struct qemu_vcpu *)cpu->accel; + return cpu->accel; } static struct nvmm_machine * @@ -86,7 +86,7 @@ nvmm_set_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_x64_state *state = vcpu->state; uint64_t bitmap; @@ -223,7 +223,7 @@ nvmm_get_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -347,7 +347,7 @@ static bool nvmm_can_take_int(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -372,7 +372,7 @@ nvmm_can_take_int(CPUState *cpu) static bool nvmm_can_take_nmi(CPUState *cpu) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); /* * Contrary to INTs, NMIs always schedule an exit when they are @@ -395,7 +395,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -478,7 +478,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) static void nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); uint64_t tpr; @@ -565,7 +565,7 @@ static int nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -610,7 +610,7 @@ static int nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -686,7 +686,7 @@ nvmm_vcpu_loop(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_vcpu_exit *exit = vcpu->exit; @@ -892,7 +892,7 @@ static void nvmm_ipi_signal(int sigcpu) { if (current_cpu) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(current_cpu); + AccelCPUState *qcpu = get_qemu_vcpu(current_cpu); #if NVMM_USER_VERSION >= 2 struct nvmm_vcpu *vcpu = &qcpu->vcpu; nvmm_vcpu_stop(vcpu); @@ -926,7 +926,7 @@ nvmm_init_vcpu(CPUState *cpu) struct nvmm_vcpu_conf_cpuid cpuid; struct nvmm_vcpu_conf_tpr tpr; Error *local_error = NULL; - struct qemu_vcpu *qcpu; + AccelCPUState *qcpu; int ret, err; nvmm_init_cpu_signals(); @@ -942,7 +942,7 @@ nvmm_init_vcpu(CPUState *cpu) } } - qcpu = g_malloc0(sizeof(*qcpu)); + qcpu = g_new0(AccelCPUState, 1); ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); if (ret == -1) { @@ -1023,7 +1023,7 @@ void nvmm_destroy_vcpu(CPUState *cpu) { struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); nvmm_vcpu_destroy(mach, &qcpu->vcpu); g_free(cpu->accel); From 2f642b1c2cfa32a1d5af6cbe66c16b5d1c5fd69c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 23 Mar 2023 13:39:26 +0100 Subject: [PATCH 13/30] accel: Inline NVMM get_qemu_vcpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for this helper to access the CPUState::accel field. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-12-philmd@linaro.org> --- target/i386/nvmm/nvmm-all.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index e5ee4af084..72a3a9e3ae 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -49,12 +49,6 @@ struct qemu_machine { static bool nvmm_allowed; static struct qemu_machine qemu_mach; -static AccelCPUState * -get_qemu_vcpu(CPUState *cpu) -{ - return cpu->accel; -} - static struct nvmm_machine * get_nvmm_mach(void) { @@ -86,7 +80,7 @@ nvmm_set_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_x64_state *state = vcpu->state; uint64_t bitmap; @@ -223,7 +217,7 @@ nvmm_get_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -347,7 +341,7 @@ static bool nvmm_can_take_int(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -372,7 +366,7 @@ nvmm_can_take_int(CPUState *cpu) static bool nvmm_can_take_nmi(CPUState *cpu) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; /* * Contrary to INTs, NMIs always schedule an exit when they are @@ -395,7 +389,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -478,7 +472,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) static void nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); uint64_t tpr; @@ -565,7 +559,7 @@ static int nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -610,7 +604,7 @@ static int nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -686,7 +680,7 @@ nvmm_vcpu_loop(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_vcpu_exit *exit = vcpu->exit; @@ -892,7 +886,7 @@ static void nvmm_ipi_signal(int sigcpu) { if (current_cpu) { - AccelCPUState *qcpu = get_qemu_vcpu(current_cpu); + AccelCPUState *qcpu = current_cpu->accel; #if NVMM_USER_VERSION >= 2 struct nvmm_vcpu *vcpu = &qcpu->vcpu; nvmm_vcpu_stop(vcpu); @@ -1023,7 +1017,7 @@ void nvmm_destroy_vcpu(CPUState *cpu) { struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; nvmm_vcpu_destroy(mach, &qcpu->vcpu); g_free(cpu->accel); From 50830fea89ceefefd455d2489c4f8914df2d68ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 15:52:51 +0200 Subject: [PATCH 14/30] accel: Remove WHPX unreachable error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit g_new0() can not fail. Remove the unreachable error path. https://developer-old.gnome.org/glib/stable/glib-Memory-Allocation.html#glib-Memory-Allocation.description Reported-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-13-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 7fc8d4ffe5..63be5fbdad 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2179,12 +2179,6 @@ int whpx_init_vcpu(CPUState *cpu) vcpu = g_new0(struct whpx_vcpu, 1); - if (!vcpu) { - error_report("WHPX: Failed to allocte VCPU context."); - ret = -ENOMEM; - goto error; - } - hr = whp_dispatch.WHvEmulatorCreateEmulator( &whpx_emu_callbacks, &vcpu->emulator); From b4f879a4ed039f73f61ee9ae7142abd84cdc0f27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 23 Mar 2023 13:43:54 +0100 Subject: [PATCH 15/30] accel: Rename WHPX 'struct whpx_vcpu' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Rename WHPX 'whpx_vcpu' as 'AccelCPUState'; use the typedef. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-14-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 63be5fbdad..185f2a2820 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -229,7 +229,7 @@ typedef enum WhpxStepMode { WHPX_STEP_EXCLUSIVE, } WhpxStepMode; -struct whpx_vcpu { +struct AccelCPUState { WHV_EMULATOR_HANDLE emulator; bool window_registered; bool interruptable; @@ -260,9 +260,9 @@ static bool whpx_has_xsave(void) * VP support */ -static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu) +static AccelCPUState *get_whpx_vcpu(CPUState *cpu) { - return (struct whpx_vcpu *)cpu->accel; + return (AccelCPUState *)cpu->accel; } static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, @@ -390,7 +390,7 @@ static uint64_t whpx_cr8_to_apic_tpr(uint64_t cr8) static void whpx_set_registers(CPUState *cpu, int level) { struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -609,7 +609,7 @@ static void whpx_get_xcrs(CPUState *cpu) static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -892,7 +892,7 @@ static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = { static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx) { HRESULT hr; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryMmioEmulation( @@ -917,7 +917,7 @@ static int whpx_handle_portio(CPUState *cpu, WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx) { HRESULT hr; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryIoEmulation( @@ -1417,7 +1417,7 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) * of QEMU, nor this port by calling WHvSetVirtualProcessorRegisters(). * This is the most common case. */ - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); return vcpu->exit_ctx.VpContext.Rip; } else { /* @@ -1468,7 +1468,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); int irq; @@ -1590,7 +1590,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) static void whpx_vcpu_post_run(CPUState *cpu) { - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); @@ -1617,7 +1617,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { @@ -1656,7 +1656,7 @@ static int whpx_vcpu_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); struct whpx_breakpoint *stepped_over_bp = NULL; WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; @@ -2154,7 +2154,7 @@ int whpx_init_vcpu(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = NULL; + AccelCPUState *vcpu = NULL; Error *local_error = NULL; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); @@ -2177,7 +2177,7 @@ int whpx_init_vcpu(CPUState *cpu) } } - vcpu = g_new0(struct whpx_vcpu, 1); + vcpu = g_new0(AccelCPUState, 1); hr = whp_dispatch.WHvEmulatorCreateEmulator( &whpx_emu_callbacks, @@ -2290,7 +2290,7 @@ int whpx_vcpu_exec(CPUState *cpu) void whpx_destroy_vcpu(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); From 441f2449111b276df7fad81dcaa2f3f93ac7cce8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 16:03:17 +0200 Subject: [PATCH 16/30] accel: Inline WHPX get_whpx_vcpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for this helper to access the CPUState::accel field. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-15-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 185f2a2820..9ee04ee650 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -256,15 +256,6 @@ static bool whpx_has_xsave(void) return whpx_xsave_cap.XsaveSupport; } -/* - * VP support - */ - -static AccelCPUState *get_whpx_vcpu(CPUState *cpu) -{ - return (AccelCPUState *)cpu->accel; -} - static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, int r86) { @@ -390,7 +381,7 @@ static uint64_t whpx_cr8_to_apic_tpr(uint64_t cr8) static void whpx_set_registers(CPUState *cpu, int level) { struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -609,7 +600,7 @@ static void whpx_get_xcrs(CPUState *cpu) static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -892,7 +883,7 @@ static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = { static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx) { HRESULT hr; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryMmioEmulation( @@ -917,7 +908,7 @@ static int whpx_handle_portio(CPUState *cpu, WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx) { HRESULT hr; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryIoEmulation( @@ -1417,7 +1408,7 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) * of QEMU, nor this port by calling WHvSetVirtualProcessorRegisters(). * This is the most common case. */ - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; return vcpu->exit_ctx.VpContext.Rip; } else { /* @@ -1468,7 +1459,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); int irq; @@ -1590,7 +1581,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) static void whpx_vcpu_post_run(CPUState *cpu) { - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); @@ -1617,7 +1608,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { @@ -1656,7 +1647,7 @@ static int whpx_vcpu_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; struct whpx_breakpoint *stepped_over_bp = NULL; WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; @@ -2290,7 +2281,7 @@ int whpx_vcpu_exec(CPUState *cpu) void whpx_destroy_vcpu(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); From a7159244285058c049ad53a42b3dc7b24809faaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 13:14:06 +0200 Subject: [PATCH 17/30] accel: Rename 'cpu_state' -> 'cs' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most of the codebase uses 'CPUState *cpu' or 'CPUState *cs'. While 'cpu_state' is kind of explicit, it makes the code harder to review. Simply rename as 'cs'. Acked-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Tested-by: Peter Maydell Message-Id: <20230624174121.11508-16-philmd@linaro.org> --- target/i386/hvf/x86hvf.c | 372 +++++++++++++++++++-------------------- target/i386/hvf/x86hvf.h | 18 +- 2 files changed, 195 insertions(+), 195 deletions(-) diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 69d4fb8cf5..92dfd26a01 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -32,14 +32,14 @@ #include #include -void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, +void hvf_set_segment(CPUState *cs, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr) { vmx_seg->sel = qseg->selector; vmx_seg->base = qseg->base; vmx_seg->limit = qseg->limit; - if (!qseg->selector && !x86_is_real(cpu) && !is_tr) { + if (!qseg->selector && !x86_is_real(cs) && !is_tr) { /* the TR register is usable after processor reset despite * having a null selector */ vmx_seg->ar = 1 << 16; @@ -70,279 +70,279 @@ void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg) (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); } -void hvf_put_xsave(CPUState *cpu_state) +void hvf_put_xsave(CPUState *cs) { - void *xsave = X86_CPU(cpu_state)->env.xsave_buf; - uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len; + void *xsave = X86_CPU(cs)->env.xsave_buf; + uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; - x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave, xsave_len); + x86_cpu_xsave_all_areas(X86_CPU(cs), xsave, xsave_len); - if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_write_fpstate(cs->hvf->fd, xsave, xsave_len)) { abort(); } } -static void hvf_put_segments(CPUState *cpu_state) +static void hvf_put_segments(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; struct vmx_segment seg; - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); + wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); + wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); + wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); + wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); - /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); - vmx_update_tpr(cpu_state); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); + /* wvmcs(cs->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ + wvmcs(cs->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); + vmx_update_tpr(cs); + wvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); - macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]); - macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]); + macvm_set_cr4(cs->hvf->fd, env->cr[4]); + macvm_set_cr0(cs->hvf->fd, env->cr[0]); - hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_CS); + hvf_set_segment(cs, &seg, &env->segs[R_CS], false); + vmx_write_segment_descriptor(cs, &seg, R_CS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_DS); + hvf_set_segment(cs, &seg, &env->segs[R_DS], false); + vmx_write_segment_descriptor(cs, &seg, R_DS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_ES); + hvf_set_segment(cs, &seg, &env->segs[R_ES], false); + vmx_write_segment_descriptor(cs, &seg, R_ES); - hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_SS); + hvf_set_segment(cs, &seg, &env->segs[R_SS], false); + vmx_write_segment_descriptor(cs, &seg, R_SS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_FS); + hvf_set_segment(cs, &seg, &env->segs[R_FS], false); + vmx_write_segment_descriptor(cs, &seg, R_FS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_GS); + hvf_set_segment(cs, &seg, &env->segs[R_GS], false); + vmx_write_segment_descriptor(cs, &seg, R_GS); - hvf_set_segment(cpu_state, &seg, &env->tr, true); - vmx_write_segment_descriptor(cpu_state, &seg, R_TR); + hvf_set_segment(cs, &seg, &env->tr, true); + vmx_write_segment_descriptor(cs, &seg, R_TR); - hvf_set_segment(cpu_state, &seg, &env->ldt, false); - vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); + hvf_set_segment(cs, &seg, &env->ldt, false); + vmx_write_segment_descriptor(cs, &seg, R_LDTR); } -void hvf_put_msrs(CPUState *cpu_state) +void hvf_put_msrs(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, + hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, + hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, + hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star); + hv_vcpu_write_msr(cs->hvf->fd, MSR_STAR, env->star); #ifdef TARGET_X86_64 - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar); + hv_vcpu_write_msr(cs->hvf->fd, MSR_CSTAR, env->cstar); + hv_vcpu_write_msr(cs->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); + hv_vcpu_write_msr(cs->hvf->fd, MSR_FMASK, env->fmask); + hv_vcpu_write_msr(cs->hvf->fd, MSR_LSTAR, env->lstar); #endif - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); + hv_vcpu_write_msr(cs->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); + hv_vcpu_write_msr(cs->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); } -void hvf_get_xsave(CPUState *cpu_state) +void hvf_get_xsave(CPUState *cs) { - void *xsave = X86_CPU(cpu_state)->env.xsave_buf; - uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len; + void *xsave = X86_CPU(cs)->env.xsave_buf; + uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; - if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_read_fpstate(cs->hvf->fd, xsave, xsave_len)) { abort(); } - x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len); + x86_cpu_xrstor_all_areas(X86_CPU(cs), xsave, xsave_len); } -static void hvf_get_segments(CPUState *cpu_state) +static void hvf_get_segments(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; struct vmx_segment seg; env->interrupt_injected = -1; - vmx_read_segment_descriptor(cpu_state, &seg, R_CS); + vmx_read_segment_descriptor(cs, &seg, R_CS); hvf_get_segment(&env->segs[R_CS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_DS); + vmx_read_segment_descriptor(cs, &seg, R_DS); hvf_get_segment(&env->segs[R_DS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_ES); + vmx_read_segment_descriptor(cs, &seg, R_ES); hvf_get_segment(&env->segs[R_ES], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_FS); + vmx_read_segment_descriptor(cs, &seg, R_FS); hvf_get_segment(&env->segs[R_FS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_GS); + vmx_read_segment_descriptor(cs, &seg, R_GS); hvf_get_segment(&env->segs[R_GS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_SS); + vmx_read_segment_descriptor(cs, &seg, R_SS); hvf_get_segment(&env->segs[R_SS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_TR); + vmx_read_segment_descriptor(cs, &seg, R_TR); hvf_get_segment(&env->tr, &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR); + vmx_read_segment_descriptor(cs, &seg, R_LDTR); hvf_get_segment(&env->ldt, &seg); - env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT); - env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE); - env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT); - env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE); + env->idt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT); + env->idt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE); + env->gdt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + env->gdt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE); - env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0); + env->cr[0] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR0); env->cr[2] = 0; - env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3); - env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4); + env->cr[3] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR3); + env->cr[4] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR4); - env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER); + env->efer = rvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER); } -void hvf_get_msrs(CPUState *cpu_state) +void hvf_get_msrs(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; uint64_t tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); env->sysenter_cs = tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); env->sysenter_esp = tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); env->sysenter_eip = tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star); + hv_vcpu_read_msr(cs->hvf->fd, MSR_STAR, &env->star); #ifdef TARGET_X86_64 - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar); - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask); - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar); + hv_vcpu_read_msr(cs->hvf->fd, MSR_CSTAR, &env->cstar); + hv_vcpu_read_msr(cs->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); + hv_vcpu_read_msr(cs->hvf->fd, MSR_FMASK, &env->fmask); + hv_vcpu_read_msr(cs->hvf->fd, MSR_LSTAR, &env->lstar); #endif - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_APICBASE, &tmp); - env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET); + env->tsc = rdtscp() + rvmcs(cs->hvf->fd, VMCS_TSC_OFFSET); } -int hvf_put_registers(CPUState *cpu_state) +int hvf_put_registers(CPUState *cs) { - X86CPU *x86cpu = X86_CPU(cpu_state); + X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); - wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); - wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); - wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); - wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); - wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); - wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); - wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); - wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]); - wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]); - wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]); - wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]); - wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]); - wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]); - wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]); - wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]); - wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags); - wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip); + wreg(cs->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); + wreg(cs->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); + wreg(cs->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); + wreg(cs->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); + wreg(cs->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); + wreg(cs->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); + wreg(cs->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); + wreg(cs->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); + wreg(cs->hvf->fd, HV_X86_R8, env->regs[8]); + wreg(cs->hvf->fd, HV_X86_R9, env->regs[9]); + wreg(cs->hvf->fd, HV_X86_R10, env->regs[10]); + wreg(cs->hvf->fd, HV_X86_R11, env->regs[11]); + wreg(cs->hvf->fd, HV_X86_R12, env->regs[12]); + wreg(cs->hvf->fd, HV_X86_R13, env->regs[13]); + wreg(cs->hvf->fd, HV_X86_R14, env->regs[14]); + wreg(cs->hvf->fd, HV_X86_R15, env->regs[15]); + wreg(cs->hvf->fd, HV_X86_RFLAGS, env->eflags); + wreg(cs->hvf->fd, HV_X86_RIP, env->eip); - wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0); + wreg(cs->hvf->fd, HV_X86_XCR0, env->xcr0); - hvf_put_xsave(cpu_state); + hvf_put_xsave(cs); - hvf_put_segments(cpu_state); + hvf_put_segments(cs); - hvf_put_msrs(cpu_state); + hvf_put_msrs(cs); - wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]); - wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]); - wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]); - wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]); - wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]); - wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]); - wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]); - wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]); + wreg(cs->hvf->fd, HV_X86_DR0, env->dr[0]); + wreg(cs->hvf->fd, HV_X86_DR1, env->dr[1]); + wreg(cs->hvf->fd, HV_X86_DR2, env->dr[2]); + wreg(cs->hvf->fd, HV_X86_DR3, env->dr[3]); + wreg(cs->hvf->fd, HV_X86_DR4, env->dr[4]); + wreg(cs->hvf->fd, HV_X86_DR5, env->dr[5]); + wreg(cs->hvf->fd, HV_X86_DR6, env->dr[6]); + wreg(cs->hvf->fd, HV_X86_DR7, env->dr[7]); return 0; } -int hvf_get_registers(CPUState *cpu_state) +int hvf_get_registers(CPUState *cs) { - X86CPU *x86cpu = X86_CPU(cpu_state); + X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX); - env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX); - env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX); - env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX); - env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP); - env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP); - env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI); - env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI); - env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8); - env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9); - env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10); - env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11); - env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12); - env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13); - env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14); - env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15); + env->regs[R_EAX] = rreg(cs->hvf->fd, HV_X86_RAX); + env->regs[R_EBX] = rreg(cs->hvf->fd, HV_X86_RBX); + env->regs[R_ECX] = rreg(cs->hvf->fd, HV_X86_RCX); + env->regs[R_EDX] = rreg(cs->hvf->fd, HV_X86_RDX); + env->regs[R_EBP] = rreg(cs->hvf->fd, HV_X86_RBP); + env->regs[R_ESP] = rreg(cs->hvf->fd, HV_X86_RSP); + env->regs[R_ESI] = rreg(cs->hvf->fd, HV_X86_RSI); + env->regs[R_EDI] = rreg(cs->hvf->fd, HV_X86_RDI); + env->regs[8] = rreg(cs->hvf->fd, HV_X86_R8); + env->regs[9] = rreg(cs->hvf->fd, HV_X86_R9); + env->regs[10] = rreg(cs->hvf->fd, HV_X86_R10); + env->regs[11] = rreg(cs->hvf->fd, HV_X86_R11); + env->regs[12] = rreg(cs->hvf->fd, HV_X86_R12); + env->regs[13] = rreg(cs->hvf->fd, HV_X86_R13); + env->regs[14] = rreg(cs->hvf->fd, HV_X86_R14); + env->regs[15] = rreg(cs->hvf->fd, HV_X86_R15); - env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); - env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP); + env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); + env->eip = rreg(cs->hvf->fd, HV_X86_RIP); - hvf_get_xsave(cpu_state); - env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0); + hvf_get_xsave(cs); + env->xcr0 = rreg(cs->hvf->fd, HV_X86_XCR0); - hvf_get_segments(cpu_state); - hvf_get_msrs(cpu_state); + hvf_get_segments(cs); + hvf_get_msrs(cs); - env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0); - env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1); - env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2); - env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3); - env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4); - env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5); - env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6); - env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7); + env->dr[0] = rreg(cs->hvf->fd, HV_X86_DR0); + env->dr[1] = rreg(cs->hvf->fd, HV_X86_DR1); + env->dr[2] = rreg(cs->hvf->fd, HV_X86_DR2); + env->dr[3] = rreg(cs->hvf->fd, HV_X86_DR3); + env->dr[4] = rreg(cs->hvf->fd, HV_X86_DR4); + env->dr[5] = rreg(cs->hvf->fd, HV_X86_DR5); + env->dr[6] = rreg(cs->hvf->fd, HV_X86_DR6); + env->dr[7] = rreg(cs->hvf->fd, HV_X86_DR7); x86_update_hflags(env); return 0; } -static void vmx_set_int_window_exiting(CPUState *cpu) +static void vmx_set_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | + val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } -void vmx_clear_int_window_exiting(CPUState *cpu) +void vmx_clear_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & + val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } -bool hvf_inject_interrupts(CPUState *cpu_state) +bool hvf_inject_interrupts(CPUState *cs) { - X86CPU *x86cpu = X86_CPU(cpu_state); + X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; uint8_t vector; @@ -372,89 +372,89 @@ bool hvf_inject_interrupts(CPUState *cpu_state) uint64_t info = 0; if (have_event) { info = vector | intr_type | VMCS_INTR_VALID; - uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON); + uint64_t reason = rvmcs(cs->hvf->fd, VMCS_EXIT_REASON); if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { - vmx_clear_nmi_blocking(cpu_state); + vmx_clear_nmi_blocking(cs); } if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { info &= ~(1 << 12); /* clear undefined bit */ if (intr_type == VMCS_INTR_T_SWINTR || intr_type == VMCS_INTR_T_SWEXCEPTION) { - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); + wvmcs(cs->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); } if (env->has_error_code) { - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, + wvmcs(cs->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, env->error_code); /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ info |= VMCS_INTR_DEL_ERRCODE; } /*printf("reinject %lx err %d\n", info, err);*/ - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); }; } - if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { + if (cs->interrupt_request & CPU_INTERRUPT_NMI) { if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { - cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); } else { - vmx_set_nmi_window_exiting(cpu_state); + vmx_set_nmi_window_exiting(cs); } } if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && - (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && + (cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { int line = cpu_get_pic_interrupt(&x86cpu->env); - cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; + cs->interrupt_request &= ~CPU_INTERRUPT_HARD; if (line >= 0) { - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line | + wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } - if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { - vmx_set_int_window_exiting(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + vmx_set_int_window_exiting(cs); } - return (cpu_state->interrupt_request + return (cs->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); } -int hvf_process_events(CPUState *cpu_state) +int hvf_process_events(CPUState *cs) { - X86CPU *cpu = X86_CPU(cpu_state); + X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (!cpu_state->vcpu_dirty) { + if (!cs->vcpu_dirty) { /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ - env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); + env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); } - if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { - cpu_synchronize_state(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_INIT) { + cpu_synchronize_state(cs); do_cpu_init(cpu); } - if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) { - cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL; + if (cs->interrupt_request & CPU_INTERRUPT_POLL) { + cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); } - if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && + if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) { - cpu_state->halted = 0; + (cs->interrupt_request & CPU_INTERRUPT_NMI)) { + cs->halted = 0; } - if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) { - cpu_synchronize_state(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { + cpu_synchronize_state(cs); do_cpu_sipi(cpu); } - if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) { - cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR; - cpu_synchronize_state(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_TPR) { + cs->interrupt_request &= ~CPU_INTERRUPT_TPR; + cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); } - return cpu_state->halted; + return cs->halted; } diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h index db6003d6bd..423a89b6ad 100644 --- a/target/i386/hvf/x86hvf.h +++ b/target/i386/hvf/x86hvf.h @@ -20,15 +20,15 @@ #include "cpu.h" #include "x86_descr.h" -int hvf_process_events(CPUState *); -bool hvf_inject_interrupts(CPUState *); -void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, +int hvf_process_events(CPUState *cs); +bool hvf_inject_interrupts(CPUState *cs); +void hvf_set_segment(CPUState *cs, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr); void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg); -void hvf_put_xsave(CPUState *cpu_state); -void hvf_put_msrs(CPUState *cpu_state); -void hvf_get_xsave(CPUState *cpu_state); -void hvf_get_msrs(CPUState *cpu_state); -void vmx_clear_int_window_exiting(CPUState *cpu); -void vmx_update_tpr(CPUState *cpu); +void hvf_put_xsave(CPUState *cs); +void hvf_put_msrs(CPUState *cs); +void hvf_get_xsave(CPUState *cs); +void hvf_get_msrs(CPUState *cs); +void vmx_clear_int_window_exiting(CPUState *cs); +void vmx_update_tpr(CPUState *cs); #endif From 3b295bcb3289afec09508786032f4ba5d657a934 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 13:15:27 +0200 Subject: [PATCH 18/30] accel: Rename HVF 'struct hvf_vcpu_state' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Rename the 'hvf_vcpu_state' structure as 'AccelCPUState'. Use the generic 'accel' field of CPUState instead of 'hvf'. Replace g_malloc0() by g_new0() for readability. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Tested-by: Peter Maydell Message-Id: <20230624174121.11508-17-philmd@linaro.org> --- accel/hvf/hvf-accel-ops.c | 19 ++-- include/hw/core/cpu.h | 4 - include/sysemu/hvf_int.h | 2 +- target/arm/hvf/hvf.c | 108 +++++++++---------- target/i386/hvf/hvf.c | 106 +++++++++--------- target/i386/hvf/vmx.h | 22 ++-- target/i386/hvf/x86.c | 28 ++--- target/i386/hvf/x86_descr.c | 26 ++--- target/i386/hvf/x86_emu.c | 62 +++++------ target/i386/hvf/x86_mmu.c | 4 +- target/i386/hvf/x86_task.c | 10 +- target/i386/hvf/x86hvf.c | 208 ++++++++++++++++++------------------ 12 files changed, 298 insertions(+), 301 deletions(-) diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index 9c3da03c94..7db9d3099e 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -372,19 +372,19 @@ type_init(hvf_type_init); static void hvf_vcpu_destroy(CPUState *cpu) { - hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd); + hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); assert_hvf_ok(ret); hvf_arch_vcpu_destroy(cpu); - g_free(cpu->hvf); - cpu->hvf = NULL; + g_free(cpu->accel); + cpu->accel = NULL; } static int hvf_init_vcpu(CPUState *cpu) { int r; - cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); + cpu->accel = g_new0(AccelCPUState, 1); /* init cpu signals */ struct sigaction sigact; @@ -393,18 +393,19 @@ static int hvf_init_vcpu(CPUState *cpu) sigact.sa_handler = dummy_signal; sigaction(SIG_IPI, &sigact, NULL); - pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); - sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); + pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask); + sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI); #ifdef __aarch64__ - r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); + r = hv_vcpu_create(&cpu->accel->fd, + (hv_vcpu_exit_t **)&cpu->accel->exit, NULL); #else - r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); + r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT); #endif cpu->vcpu_dirty = 1; assert_hvf_ok(r); - cpu->hvf->guest_debug_enabled = false; + cpu->accel->guest_debug_enabled = false; return hvf_arch_init_vcpu(cpu); } diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 476b39e42e..b08f8b7079 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -241,8 +241,6 @@ typedef struct SavedIOTLB { struct KVMState; struct kvm_run; -struct hvf_vcpu_state; - /* work queue */ /* The union type allows passing of 64 bit target pointers on 32 bit @@ -443,8 +441,6 @@ struct CPUState { /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ bool prctl_unalign_sigbus; - struct hvf_vcpu_state *hvf; - /* track IOMMUs whose translations we've cached in the TCG TLB */ GArray *iommu_notifiers; }; diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h index 6ab119e49f..718beddcdd 100644 --- a/include/sysemu/hvf_int.h +++ b/include/sysemu/hvf_int.h @@ -49,7 +49,7 @@ struct HVFState { }; extern HVFState *hvf_state; -struct hvf_vcpu_state { +struct AccelCPUState { uint64_t fd; void *exit; bool vtimer_masked; diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 8f72624586..8fce64bbf6 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -544,29 +544,29 @@ int hvf_get_registers(CPUState *cpu) int i; for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { - ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val); *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; assert_hvf_ok(ret); } for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { - ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg, &fpval); memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); assert_hvf_ok(ret); } val = 0; - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val); assert_hvf_ok(ret); vfp_set_fpcr(env, val); val = 0; - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val); assert_hvf_ok(ret); vfp_set_fpsr(env, val); - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val); assert_hvf_ok(ret); pstate_write(env, val); @@ -575,7 +575,7 @@ int hvf_get_registers(CPUState *cpu) continue; } - if (cpu->hvf->guest_debug_enabled) { + if (cpu->accel->guest_debug_enabled) { /* Handle debug registers */ switch (hvf_sreg_match[i].reg) { case HV_SYS_REG_DBGBVR0_EL1: @@ -661,7 +661,7 @@ int hvf_get_registers(CPUState *cpu) } } - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val); assert_hvf_ok(ret); arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; @@ -684,24 +684,24 @@ int hvf_put_registers(CPUState *cpu) for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); - ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); + ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val); assert_hvf_ok(ret); } for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); - ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg, fpval); assert_hvf_ok(ret); } - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env)); assert_hvf_ok(ret); - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env)); assert_hvf_ok(ret); - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env)); assert_hvf_ok(ret); aarch64_save_sp(env, arm_current_el(env)); @@ -712,7 +712,7 @@ int hvf_put_registers(CPUState *cpu) continue; } - if (cpu->hvf->guest_debug_enabled) { + if (cpu->accel->guest_debug_enabled) { /* Handle debug registers */ switch (hvf_sreg_match[i].reg) { case HV_SYS_REG_DBGBVR0_EL1: @@ -789,11 +789,11 @@ int hvf_put_registers(CPUState *cpu) } val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val); assert_hvf_ok(ret); } - ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset); + ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset); assert_hvf_ok(ret); return 0; @@ -814,7 +814,7 @@ static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) flush_cpu_state(cpu); if (rt < 31) { - r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); + r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val); assert_hvf_ok(r); } } @@ -827,7 +827,7 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt) flush_cpu_state(cpu); if (rt < 31) { - r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); + r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val); assert_hvf_ok(r); } @@ -969,22 +969,22 @@ int hvf_arch_init_vcpu(CPUState *cpu) assert(write_cpustate_to_list(arm_cpu, false)); /* Set CP_NO_RAW system registers on init */ - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1, arm_cpu->midr); assert_hvf_ok(ret); - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1, arm_cpu->mp_affinity); assert_hvf_ok(ret); - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); assert_hvf_ok(ret); pfr |= env->gicv3state ? (1 << 24) : 0; - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); assert_hvf_ok(ret); /* We're limited to underlying hardware caps, override internal versions */ - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, &arm_cpu->isar.id_aa64mmfr0); assert_hvf_ok(ret); @@ -994,7 +994,7 @@ int hvf_arch_init_vcpu(CPUState *cpu) void hvf_kick_vcpu_thread(CPUState *cpu) { cpus_kick_thread(cpu); - hv_vcpus_exit(&cpu->hvf->fd, 1); + hv_vcpus_exit(&cpu->accel->fd, 1); } static void hvf_raise_exception(CPUState *cpu, uint32_t excp, @@ -1678,13 +1678,13 @@ static int hvf_inject_interrupts(CPUState *cpu) { if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { trace_hvf_inject_fiq(); - hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, + hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ, true); } if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { trace_hvf_inject_irq(); - hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, + hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ, true); } @@ -1718,7 +1718,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) */ qatomic_set_mb(&cpu->thread_kicked, false); qemu_mutex_unlock_iothread(); - pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); + pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); qemu_mutex_lock_iothread(); } @@ -1739,7 +1739,7 @@ static void hvf_wfi(CPUState *cpu) return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); assert_hvf_ok(r); if (!(ctl & 1) || (ctl & 2)) { @@ -1748,7 +1748,7 @@ static void hvf_wfi(CPUState *cpu) return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); assert_hvf_ok(r); ticks_to_sleep = cval - hvf_vtimer_val(); @@ -1781,12 +1781,12 @@ static void hvf_sync_vtimer(CPUState *cpu) uint64_t ctl; bool irq_state; - if (!cpu->hvf->vtimer_masked) { + if (!cpu->accel->vtimer_masked) { /* We will get notified on vtimer changes by hvf, nothing to do */ return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); assert_hvf_ok(r); irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == @@ -1795,8 +1795,8 @@ static void hvf_sync_vtimer(CPUState *cpu) if (!irq_state) { /* Timer no longer asserting, we can unmask it */ - hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); - cpu->hvf->vtimer_masked = false; + hv_vcpu_set_vtimer_mask(cpu->accel->fd, false); + cpu->accel->vtimer_masked = false; } } @@ -1805,7 +1805,7 @@ int hvf_vcpu_exec(CPUState *cpu) ARMCPU *arm_cpu = ARM_CPU(cpu); CPUARMState *env = &arm_cpu->env; int ret; - hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; + hv_vcpu_exit_t *hvf_exit = cpu->accel->exit; hv_return_t r; bool advance_pc = false; @@ -1821,7 +1821,7 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); qemu_mutex_unlock_iothread(); - assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); + assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); /* handle VMEXIT */ uint64_t exit_reason = hvf_exit->reason; @@ -1836,7 +1836,7 @@ int hvf_vcpu_exec(CPUState *cpu) break; case HV_EXIT_REASON_VTIMER_ACTIVATED: qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); - cpu->hvf->vtimer_masked = true; + cpu->accel->vtimer_masked = true; return 0; case HV_EXIT_REASON_CANCELED: /* we got kicked, no exit to process */ @@ -1990,10 +1990,10 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); - r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); + r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc); assert_hvf_ok(r); pc += 4; - r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); + r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc); assert_hvf_ok(r); /* Handle single-stepping over instructions which trigger a VM exit */ @@ -2113,29 +2113,29 @@ static void hvf_put_gdbstub_debug_registers(CPUState *cpu) for (i = 0; i < cur_hw_bps; i++) { HWBreakpoint *bp = get_hw_bp(i); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], bp->bcr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], bp->bvr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr); assert_hvf_ok(r); } for (i = cur_hw_bps; i < max_hw_bps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0); assert_hvf_ok(r); } for (i = 0; i < cur_hw_wps; i++) { HWWatchpoint *wp = get_hw_wp(i); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], wp->wcr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], wp->wvr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr); assert_hvf_ok(r); } for (i = cur_hw_wps; i < max_hw_wps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0); assert_hvf_ok(r); } } @@ -2152,19 +2152,19 @@ static void hvf_put_guest_debug_registers(CPUState *cpu) int i; for (i = 0; i < max_hw_bps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], env->cp15.dbgbcr[i]); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], env->cp15.dbgbvr[i]); assert_hvf_ok(r); } for (i = 0; i < max_hw_wps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], env->cp15.dbgwcr[i]); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], env->cp15.dbgwvr[i]); assert_hvf_ok(r); } @@ -2184,16 +2184,16 @@ static void hvf_arch_set_traps(void) /* Check whether guest debugging is enabled for at least one vCPU; if it * is, enable exiting the guest on all vCPUs */ CPU_FOREACH(cpu) { - should_enable_traps |= cpu->hvf->guest_debug_enabled; + should_enable_traps |= cpu->accel->guest_debug_enabled; } CPU_FOREACH(cpu) { /* Set whether debug exceptions exit the guest */ - r = hv_vcpu_set_trap_debug_exceptions(cpu->hvf->fd, + r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd, should_enable_traps); assert_hvf_ok(r); /* Set whether accesses to debug registers exit the guest */ - r = hv_vcpu_set_trap_debug_reg_accesses(cpu->hvf->fd, + r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd, should_enable_traps); assert_hvf_ok(r); } @@ -2205,12 +2205,12 @@ void hvf_arch_update_guest_debug(CPUState *cpu) CPUARMState *env = &arm_cpu->env; /* Check whether guest debugging is enabled */ - cpu->hvf->guest_debug_enabled = cpu->singlestep_enabled || + cpu->accel->guest_debug_enabled = cpu->singlestep_enabled || hvf_sw_breakpoints_active(cpu) || hvf_arm_hw_debug_active(cpu); /* Update debug registers */ - if (cpu->hvf->guest_debug_enabled) { + if (cpu->accel->guest_debug_enabled) { hvf_put_gdbstub_debug_registers(cpu); } else { hvf_put_guest_debug_registers(cpu); diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index f6775c942a..b9cbcc02a8 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -81,11 +81,11 @@ void vmx_update_tpr(CPUState *cpu) int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4; int irr = apic_get_highest_priority_irr(x86_cpu->apic_state); - wreg(cpu->hvf->fd, HV_X86_TPR, tpr); + wreg(cpu->accel->fd, HV_X86_TPR, tpr); if (irr == -1) { - wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0); + wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); } else { - wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : + wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : irr >> 4); } } @@ -93,7 +93,7 @@ void vmx_update_tpr(CPUState *cpu) static void update_apic_tpr(CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); - int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4; + int tpr = rreg(cpu->accel->fd, HV_X86_TPR) >> 4; cpu_set_apic_tpr(x86_cpu->apic_state, tpr); } @@ -256,12 +256,12 @@ int hvf_arch_init_vcpu(CPUState *cpu) } /* set VMCS control fields */ - wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS, + wvmcs(cpu->accel->fd, VMCS_PIN_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased, VMCS_PIN_BASED_CTLS_EXTINT | VMCS_PIN_BASED_CTLS_NMI | VMCS_PIN_BASED_CTLS_VNMI)); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, + wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased, VMCS_PRI_PROC_BASED_CTLS_HLT | VMCS_PRI_PROC_BASED_CTLS_MWAIT | @@ -276,14 +276,14 @@ int hvf_arch_init_vcpu(CPUState *cpu) reqCap |= VMCS_PRI_PROC_BASED2_CTLS_RDTSCP; } - wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS, + wvmcs(cpu->accel->fd, VMCS_SEC_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, reqCap)); - wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, - 0)); - wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ + wvmcs(cpu->accel->fd, VMCS_ENTRY_CTLS, + cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, 0)); + wvmcs(cpu->accel->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ - wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0); + wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); x86cpu = X86_CPU(cpu); x86cpu->env.xsave_buf_len = 4096; @@ -295,18 +295,18 @@ int hvf_arch_init_vcpu(CPUState *cpu) */ assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_STAR, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_LSTAR, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_CSTAR, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FMASK, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FSBASE, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_GSBASE, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_KERNELGSBASE, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_TSC_AUX, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_TSC, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_CS, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_EIP, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_ESP, 1); return 0; } @@ -347,16 +347,16 @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in } if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { env->has_error_code = true; - env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR); + env->error_code = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_ERROR); } } - if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) & + if ((rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) { env->hflags2 |= HF2_NMI_MASK; } else { env->hflags2 &= ~HF2_NMI_MASK; } - if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) & + if (rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { env->hflags |= HF_INHIBIT_IRQ_MASK; @@ -435,20 +435,20 @@ int hvf_vcpu_exec(CPUState *cpu) return EXCP_HLT; } - hv_return_t r = hv_vcpu_run(cpu->hvf->fd); + hv_return_t r = hv_vcpu_run(cpu->accel->fd); assert_hvf_ok(r); /* handle VMEXIT */ - uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON); - uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION); - uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd, + uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON); + uint64_t exit_qual = rvmcs(cpu->accel->fd, VMCS_EXIT_QUALIFICATION); + uint32_t ins_len = (uint32_t)rvmcs(cpu->accel->fd, VMCS_EXIT_INSTRUCTION_LENGTH); - uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO); + uint64_t idtvec_info = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); hvf_store_events(cpu, ins_len, idtvec_info); - rip = rreg(cpu->hvf->fd, HV_X86_RIP); - env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS); + rip = rreg(cpu->accel->fd, HV_X86_RIP); + env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); qemu_mutex_lock_iothread(); @@ -478,7 +478,7 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_EPT_FAULT: { hvf_slot *slot; - uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS); + uint64_t gpa = rvmcs(cpu->accel->fd, VMCS_GUEST_PHYSICAL_ADDRESS); if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) && ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) { @@ -523,7 +523,7 @@ int hvf_vcpu_exec(CPUState *cpu) store_regs(cpu); break; } else if (!string && !in) { - RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX); + RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX); hvf_handle_io(env, port, &RAX(env), 1, size, 1); macvm_set_rip(cpu, rip + ins_len); break; @@ -539,21 +539,21 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_CPUID: { - uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX); - uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX); - uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX); - uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX); + uint32_t rax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); + uint32_t rbx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RBX); + uint32_t rcx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); + uint32_t rdx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); if (rax == 1) { /* CPUID1.ecx.OSXSAVE needs to know CR4 */ - env->cr[4] = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); + env->cr[4] = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); } hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx); - wreg(cpu->hvf->fd, HV_X86_RAX, rax); - wreg(cpu->hvf->fd, HV_X86_RBX, rbx); - wreg(cpu->hvf->fd, HV_X86_RCX, rcx); - wreg(cpu->hvf->fd, HV_X86_RDX, rdx); + wreg(cpu->accel->fd, HV_X86_RAX, rax); + wreg(cpu->accel->fd, HV_X86_RBX, rbx); + wreg(cpu->accel->fd, HV_X86_RCX, rcx); + wreg(cpu->accel->fd, HV_X86_RDX, rdx); macvm_set_rip(cpu, rip + ins_len); break; @@ -561,16 +561,16 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_XSETBV: { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX); - uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX); - uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX); + uint32_t eax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); + uint32_t ecx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); + uint32_t edx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); if (ecx) { macvm_set_rip(cpu, rip + ins_len); break; } env->xcr0 = ((uint64_t)edx << 32) | eax; - wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1); + wreg(cpu->accel->fd, HV_X86_XCR0, env->xcr0 | 1); macvm_set_rip(cpu, rip + ins_len); break; } @@ -609,11 +609,11 @@ int hvf_vcpu_exec(CPUState *cpu) switch (cr) { case 0x0: { - macvm_set_cr0(cpu->hvf->fd, RRX(env, reg)); + macvm_set_cr0(cpu->accel->fd, RRX(env, reg)); break; } case 4: { - macvm_set_cr4(cpu->hvf->fd, RRX(env, reg)); + macvm_set_cr4(cpu->accel->fd, RRX(env, reg)); break; } case 8: { @@ -649,7 +649,7 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_TASK_SWITCH: { - uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO); + uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); x68_segment_selector sel = {.sel = exit_qual & 0xffff}; vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo @@ -662,8 +662,8 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_RDPMC: - wreg(cpu->hvf->fd, HV_X86_RAX, 0); - wreg(cpu->hvf->fd, HV_X86_RDX, 0); + wreg(cpu->accel->fd, HV_X86_RAX, 0); + wreg(cpu->accel->fd, HV_X86_RDX, 0); macvm_set_rip(cpu, rip + ins_len); break; case VMX_REASON_VMCALL: diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h index fcd9a95e5b..0fffcfa46c 100644 --- a/target/i386/hvf/vmx.h +++ b/target/i386/hvf/vmx.h @@ -180,15 +180,15 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip) uint64_t val; /* BUG, should take considering overlap.. */ - wreg(cpu->hvf->fd, HV_X86_RIP, rip); + wreg(cpu->accel->fd, HV_X86_RIP, rip); env->eip = rip; /* after moving forward in rip, we need to clean INTERRUPTABILITY */ - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { env->hflags &= ~HF_INHIBIT_IRQ_MASK; - wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, + wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)); } @@ -200,9 +200,9 @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu) CPUX86State *env = &x86_cpu->env; env->hflags2 &= ~HF2_NMI_MASK; - uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); + uint32_t gi = (uint32_t) rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; - wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); + wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); } static inline void vmx_set_nmi_blocking(CPUState *cpu) @@ -211,16 +211,16 @@ static inline void vmx_set_nmi_blocking(CPUState *cpu) CPUX86State *env = &x86_cpu->env; env->hflags2 |= HF2_NMI_MASK; - uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); + uint32_t gi = (uint32_t)rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; - wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); + wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); } static inline void vmx_set_nmi_window_exiting(CPUState *cpu) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | + val = rvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); } @@ -229,8 +229,8 @@ static inline void vmx_clear_nmi_window_exiting(CPUState *cpu) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & + val = rvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); } diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index d086584f26..8ceea6398e 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -61,11 +61,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu, } if (GDT_SEL == sel.ti) { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); } if (sel.index * 8 >= limit) { @@ -84,11 +84,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, uint32_t limit; if (GDT_SEL == sel.ti) { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); } if (sel.index * 8 >= limit) { @@ -102,8 +102,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate) { - target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE); - uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT); + target_ulong base = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_BASE); + uint32_t limit = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_LIMIT); memset(idt_desc, 0, sizeof(*idt_desc)); if (gate * 8 >= limit) { @@ -117,7 +117,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, bool x86_is_protected(struct CPUState *cpu) { - uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); + uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); return cr0 & CR0_PE_MASK; } @@ -135,7 +135,7 @@ bool x86_is_v8086(struct CPUState *cpu) bool x86_is_long_mode(struct CPUState *cpu) { - return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; + return rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; } bool x86_is_long64_mode(struct CPUState *cpu) @@ -148,13 +148,13 @@ bool x86_is_long64_mode(struct CPUState *cpu) bool x86_is_paging_mode(struct CPUState *cpu) { - uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); + uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); return cr0 & CR0_PG_MASK; } bool x86_is_pae_enabled(struct CPUState *cpu) { - uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); + uint64_t cr4 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); return cr4 & CR4_PAE_MASK; } diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c index a484942cfc..c2d2e9ee84 100644 --- a/target/i386/hvf/x86_descr.c +++ b/target/i386/hvf/x86_descr.c @@ -47,47 +47,47 @@ static const struct vmx_segment_field { uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg) { - return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit); + return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); } uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg) { - return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes); + return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].ar_bytes); } uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg) { - return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base); + return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); } x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) { x68_segment_selector sel; - sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector); + sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); return sel; } void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg) { - wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel); + wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel); } void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg) { - desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector); - desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base); - desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit); - desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes); + desc->sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); + desc->base = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); + desc->limit = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); + desc->ar = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].ar_bytes); } void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg) { const struct vmx_segment_field *sf = &vmx_segment_fields[seg]; - wvmcs(cpu->hvf->fd, sf->base, desc->base); - wvmcs(cpu->hvf->fd, sf->limit, desc->limit); - wvmcs(cpu->hvf->fd, sf->selector, desc->sel); - wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar); + wvmcs(cpu->accel->fd, sf->base, desc->base); + wvmcs(cpu->accel->fd, sf->limit, desc->limit); + wvmcs(cpu->accel->fd, sf->selector, desc->sel); + wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar); } void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc) diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c index f5704f63e8..ccda568478 100644 --- a/target/i386/hvf/x86_emu.c +++ b/target/i386/hvf/x86_emu.c @@ -673,7 +673,7 @@ void simulate_rdmsr(struct CPUState *cpu) switch (msr) { case MSR_IA32_TSC: - val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET); + val = rdtscp() + rvmcs(cpu->accel->fd, VMCS_TSC_OFFSET); break; case MSR_IA32_APICBASE: val = cpu_get_apic_base(X86_CPU(cpu)->apic_state); @@ -682,16 +682,16 @@ void simulate_rdmsr(struct CPUState *cpu) val = x86_cpu->ucode_rev; break; case MSR_EFER: - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER); break; case MSR_FSBASE: - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_FS_BASE); break; case MSR_GSBASE: - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_GS_BASE); break; case MSR_KERNELGSBASE: - val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE); + val = rvmcs(cpu->accel->fd, VMCS_HOST_FS_BASE); break; case MSR_STAR: abort(); @@ -779,13 +779,13 @@ void simulate_wrmsr(struct CPUState *cpu) cpu_set_apic_base(X86_CPU(cpu)->apic_state, data); break; case MSR_FSBASE: - wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data); + wvmcs(cpu->accel->fd, VMCS_GUEST_FS_BASE, data); break; case MSR_GSBASE: - wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data); + wvmcs(cpu->accel->fd, VMCS_GUEST_GS_BASE, data); break; case MSR_KERNELGSBASE: - wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data); + wvmcs(cpu->accel->fd, VMCS_HOST_FS_BASE, data); break; case MSR_STAR: abort(); @@ -798,9 +798,9 @@ void simulate_wrmsr(struct CPUState *cpu) break; case MSR_EFER: /*printf("new efer %llx\n", EFER(cpu));*/ - wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data); + wvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER, data); if (data & MSR_EFER_NXE) { - hv_vcpu_invalidate_tlb(cpu->hvf->fd); + hv_vcpu_invalidate_tlb(cpu->accel->fd); } break; case MSR_MTRRphysBase(0): @@ -1424,21 +1424,21 @@ void load_regs(struct CPUState *cpu) CPUX86State *env = &x86_cpu->env; int i = 0; - RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX); - RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX); - RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX); - RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX); - RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI); - RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI); - RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP); - RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP); + RRX(env, R_EAX) = rreg(cpu->accel->fd, HV_X86_RAX); + RRX(env, R_EBX) = rreg(cpu->accel->fd, HV_X86_RBX); + RRX(env, R_ECX) = rreg(cpu->accel->fd, HV_X86_RCX); + RRX(env, R_EDX) = rreg(cpu->accel->fd, HV_X86_RDX); + RRX(env, R_ESI) = rreg(cpu->accel->fd, HV_X86_RSI); + RRX(env, R_EDI) = rreg(cpu->accel->fd, HV_X86_RDI); + RRX(env, R_ESP) = rreg(cpu->accel->fd, HV_X86_RSP); + RRX(env, R_EBP) = rreg(cpu->accel->fd, HV_X86_RBP); for (i = 8; i < 16; i++) { - RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i); + RRX(env, i) = rreg(cpu->accel->fd, HV_X86_RAX + i); } - env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS); + env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); rflags_to_lflags(env); - env->eip = rreg(cpu->hvf->fd, HV_X86_RIP); + env->eip = rreg(cpu->accel->fd, HV_X86_RIP); } void store_regs(struct CPUState *cpu) @@ -1447,20 +1447,20 @@ void store_regs(struct CPUState *cpu) CPUX86State *env = &x86_cpu->env; int i = 0; - wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env)); - wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env)); - wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env)); - wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env)); - wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env)); - wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env)); - wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env)); - wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env)); + wreg(cpu->accel->fd, HV_X86_RAX, RAX(env)); + wreg(cpu->accel->fd, HV_X86_RBX, RBX(env)); + wreg(cpu->accel->fd, HV_X86_RCX, RCX(env)); + wreg(cpu->accel->fd, HV_X86_RDX, RDX(env)); + wreg(cpu->accel->fd, HV_X86_RSI, RSI(env)); + wreg(cpu->accel->fd, HV_X86_RDI, RDI(env)); + wreg(cpu->accel->fd, HV_X86_RBP, RBP(env)); + wreg(cpu->accel->fd, HV_X86_RSP, RSP(env)); for (i = 8; i < 16; i++) { - wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i)); + wreg(cpu->accel->fd, HV_X86_RAX + i, RRX(env, i)); } lflags_to_rflags(env); - wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags); + wreg(cpu->accel->fd, HV_X86_RFLAGS, env->eflags); macvm_set_rip(cpu, env->eip); } diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 96d117567e..8cd08622a1 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -126,7 +126,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, pt->err_code |= MMU_PAGE_PT; } - uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); + uint32_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); /* check protection */ if (cr0 & CR0_WP_MASK) { if (pt->write_access && !pte_write_access(pte)) { @@ -171,7 +171,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code, { int top_level, level; bool is_large = false; - target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3); + target_ulong cr3 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR3); uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK; memset(pt, 0, sizeof(*pt)); diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c index beaeec0687..f09bfbdda5 100644 --- a/target/i386/hvf/x86_task.c +++ b/target/i386/hvf/x86_task.c @@ -61,7 +61,7 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3); + wvmcs(cpu->accel->fd, VMCS_GUEST_CR3, tss->cr3); env->eip = tss->eip; env->eflags = tss->eflags | 2; @@ -110,11 +110,11 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) { - uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP); + uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP); if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION && gate_type != VMCS_INTR_T_HWINTR && gate_type != VMCS_INTR_T_NMI)) { - int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH); + int ins_len = rvmcs(cpu->accel->fd, VMCS_EXIT_INSTRUCTION_LENGTH); macvm_set_rip(cpu, rip + ins_len); return; } @@ -173,12 +173,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); - macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | + macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | CR0_TS_MASK); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); store_regs(cpu); - hv_vcpu_invalidate_tlb(cpu->hvf->fd); + hv_vcpu_invalidate_tlb(cpu->accel->fd); } diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 92dfd26a01..3b1ef5f49a 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -77,7 +77,7 @@ void hvf_put_xsave(CPUState *cs) x86_cpu_xsave_all_areas(X86_CPU(cs), xsave, xsave_len); - if (hv_vcpu_write_fpstate(cs->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_write_fpstate(cs->accel->fd, xsave, xsave_len)) { abort(); } } @@ -87,19 +87,19 @@ static void hvf_put_segments(CPUState *cs) CPUX86State *env = &X86_CPU(cs)->env; struct vmx_segment seg; - wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); - wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); + wvmcs(cs->accel->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); + wvmcs(cs->accel->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); - wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); - wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); + wvmcs(cs->accel->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); + wvmcs(cs->accel->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); - /* wvmcs(cs->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ - wvmcs(cs->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); + /* wvmcs(cs->accel->fd, VMCS_GUEST_CR2, env->cr[2]); */ + wvmcs(cs->accel->fd, VMCS_GUEST_CR3, env->cr[3]); vmx_update_tpr(cs); - wvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); + wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, env->efer); - macvm_set_cr4(cs->hvf->fd, env->cr[4]); - macvm_set_cr0(cs->hvf->fd, env->cr[0]); + macvm_set_cr4(cs->accel->fd, env->cr[4]); + macvm_set_cr0(cs->accel->fd, env->cr[0]); hvf_set_segment(cs, &seg, &env->segs[R_CS], false); vmx_write_segment_descriptor(cs, &seg, R_CS); @@ -130,24 +130,24 @@ void hvf_put_msrs(CPUState *cs) { CPUX86State *env = &X86_CPU(cs)->env; - hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, + hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs); - hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, + hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); - hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, + hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); - hv_vcpu_write_msr(cs->hvf->fd, MSR_STAR, env->star); + hv_vcpu_write_msr(cs->accel->fd, MSR_STAR, env->star); #ifdef TARGET_X86_64 - hv_vcpu_write_msr(cs->hvf->fd, MSR_CSTAR, env->cstar); - hv_vcpu_write_msr(cs->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); - hv_vcpu_write_msr(cs->hvf->fd, MSR_FMASK, env->fmask); - hv_vcpu_write_msr(cs->hvf->fd, MSR_LSTAR, env->lstar); + hv_vcpu_write_msr(cs->accel->fd, MSR_CSTAR, env->cstar); + hv_vcpu_write_msr(cs->accel->fd, MSR_KERNELGSBASE, env->kernelgsbase); + hv_vcpu_write_msr(cs->accel->fd, MSR_FMASK, env->fmask); + hv_vcpu_write_msr(cs->accel->fd, MSR_LSTAR, env->lstar); #endif - hv_vcpu_write_msr(cs->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); - hv_vcpu_write_msr(cs->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); + hv_vcpu_write_msr(cs->accel->fd, MSR_GSBASE, env->segs[R_GS].base); + hv_vcpu_write_msr(cs->accel->fd, MSR_FSBASE, env->segs[R_FS].base); } @@ -156,7 +156,7 @@ void hvf_get_xsave(CPUState *cs) void *xsave = X86_CPU(cs)->env.xsave_buf; uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; - if (hv_vcpu_read_fpstate(cs->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_read_fpstate(cs->accel->fd, xsave, xsave_len)) { abort(); } @@ -195,17 +195,17 @@ static void hvf_get_segments(CPUState *cs) vmx_read_segment_descriptor(cs, &seg, R_LDTR); hvf_get_segment(&env->ldt, &seg); - env->idt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT); - env->idt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE); - env->gdt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT); - env->gdt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE); + env->idt.limit = rvmcs(cs->accel->fd, VMCS_GUEST_IDTR_LIMIT); + env->idt.base = rvmcs(cs->accel->fd, VMCS_GUEST_IDTR_BASE); + env->gdt.limit = rvmcs(cs->accel->fd, VMCS_GUEST_GDTR_LIMIT); + env->gdt.base = rvmcs(cs->accel->fd, VMCS_GUEST_GDTR_BASE); - env->cr[0] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR0); + env->cr[0] = rvmcs(cs->accel->fd, VMCS_GUEST_CR0); env->cr[2] = 0; - env->cr[3] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR3); - env->cr[4] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR4); + env->cr[3] = rvmcs(cs->accel->fd, VMCS_GUEST_CR3); + env->cr[4] = rvmcs(cs->accel->fd, VMCS_GUEST_CR4); - env->efer = rvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER); + env->efer = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); } void hvf_get_msrs(CPUState *cs) @@ -213,27 +213,27 @@ void hvf_get_msrs(CPUState *cs) CPUX86State *env = &X86_CPU(cs)->env; uint64_t tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_CS, &tmp); env->sysenter_cs = tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_ESP, &tmp); env->sysenter_esp = tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_EIP, &tmp); env->sysenter_eip = tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_STAR, &env->star); + hv_vcpu_read_msr(cs->accel->fd, MSR_STAR, &env->star); #ifdef TARGET_X86_64 - hv_vcpu_read_msr(cs->hvf->fd, MSR_CSTAR, &env->cstar); - hv_vcpu_read_msr(cs->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); - hv_vcpu_read_msr(cs->hvf->fd, MSR_FMASK, &env->fmask); - hv_vcpu_read_msr(cs->hvf->fd, MSR_LSTAR, &env->lstar); + hv_vcpu_read_msr(cs->accel->fd, MSR_CSTAR, &env->cstar); + hv_vcpu_read_msr(cs->accel->fd, MSR_KERNELGSBASE, &env->kernelgsbase); + hv_vcpu_read_msr(cs->accel->fd, MSR_FMASK, &env->fmask); + hv_vcpu_read_msr(cs->accel->fd, MSR_LSTAR, &env->lstar); #endif - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_APICBASE, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_APICBASE, &tmp); - env->tsc = rdtscp() + rvmcs(cs->hvf->fd, VMCS_TSC_OFFSET); + env->tsc = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); } int hvf_put_registers(CPUState *cs) @@ -241,26 +241,26 @@ int hvf_put_registers(CPUState *cs) X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - wreg(cs->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); - wreg(cs->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); - wreg(cs->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); - wreg(cs->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); - wreg(cs->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); - wreg(cs->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); - wreg(cs->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); - wreg(cs->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); - wreg(cs->hvf->fd, HV_X86_R8, env->regs[8]); - wreg(cs->hvf->fd, HV_X86_R9, env->regs[9]); - wreg(cs->hvf->fd, HV_X86_R10, env->regs[10]); - wreg(cs->hvf->fd, HV_X86_R11, env->regs[11]); - wreg(cs->hvf->fd, HV_X86_R12, env->regs[12]); - wreg(cs->hvf->fd, HV_X86_R13, env->regs[13]); - wreg(cs->hvf->fd, HV_X86_R14, env->regs[14]); - wreg(cs->hvf->fd, HV_X86_R15, env->regs[15]); - wreg(cs->hvf->fd, HV_X86_RFLAGS, env->eflags); - wreg(cs->hvf->fd, HV_X86_RIP, env->eip); + wreg(cs->accel->fd, HV_X86_RAX, env->regs[R_EAX]); + wreg(cs->accel->fd, HV_X86_RBX, env->regs[R_EBX]); + wreg(cs->accel->fd, HV_X86_RCX, env->regs[R_ECX]); + wreg(cs->accel->fd, HV_X86_RDX, env->regs[R_EDX]); + wreg(cs->accel->fd, HV_X86_RBP, env->regs[R_EBP]); + wreg(cs->accel->fd, HV_X86_RSP, env->regs[R_ESP]); + wreg(cs->accel->fd, HV_X86_RSI, env->regs[R_ESI]); + wreg(cs->accel->fd, HV_X86_RDI, env->regs[R_EDI]); + wreg(cs->accel->fd, HV_X86_R8, env->regs[8]); + wreg(cs->accel->fd, HV_X86_R9, env->regs[9]); + wreg(cs->accel->fd, HV_X86_R10, env->regs[10]); + wreg(cs->accel->fd, HV_X86_R11, env->regs[11]); + wreg(cs->accel->fd, HV_X86_R12, env->regs[12]); + wreg(cs->accel->fd, HV_X86_R13, env->regs[13]); + wreg(cs->accel->fd, HV_X86_R14, env->regs[14]); + wreg(cs->accel->fd, HV_X86_R15, env->regs[15]); + wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); + wreg(cs->accel->fd, HV_X86_RIP, env->eip); - wreg(cs->hvf->fd, HV_X86_XCR0, env->xcr0); + wreg(cs->accel->fd, HV_X86_XCR0, env->xcr0); hvf_put_xsave(cs); @@ -268,14 +268,14 @@ int hvf_put_registers(CPUState *cs) hvf_put_msrs(cs); - wreg(cs->hvf->fd, HV_X86_DR0, env->dr[0]); - wreg(cs->hvf->fd, HV_X86_DR1, env->dr[1]); - wreg(cs->hvf->fd, HV_X86_DR2, env->dr[2]); - wreg(cs->hvf->fd, HV_X86_DR3, env->dr[3]); - wreg(cs->hvf->fd, HV_X86_DR4, env->dr[4]); - wreg(cs->hvf->fd, HV_X86_DR5, env->dr[5]); - wreg(cs->hvf->fd, HV_X86_DR6, env->dr[6]); - wreg(cs->hvf->fd, HV_X86_DR7, env->dr[7]); + wreg(cs->accel->fd, HV_X86_DR0, env->dr[0]); + wreg(cs->accel->fd, HV_X86_DR1, env->dr[1]); + wreg(cs->accel->fd, HV_X86_DR2, env->dr[2]); + wreg(cs->accel->fd, HV_X86_DR3, env->dr[3]); + wreg(cs->accel->fd, HV_X86_DR4, env->dr[4]); + wreg(cs->accel->fd, HV_X86_DR5, env->dr[5]); + wreg(cs->accel->fd, HV_X86_DR6, env->dr[6]); + wreg(cs->accel->fd, HV_X86_DR7, env->dr[7]); return 0; } @@ -285,40 +285,40 @@ int hvf_get_registers(CPUState *cs) X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - env->regs[R_EAX] = rreg(cs->hvf->fd, HV_X86_RAX); - env->regs[R_EBX] = rreg(cs->hvf->fd, HV_X86_RBX); - env->regs[R_ECX] = rreg(cs->hvf->fd, HV_X86_RCX); - env->regs[R_EDX] = rreg(cs->hvf->fd, HV_X86_RDX); - env->regs[R_EBP] = rreg(cs->hvf->fd, HV_X86_RBP); - env->regs[R_ESP] = rreg(cs->hvf->fd, HV_X86_RSP); - env->regs[R_ESI] = rreg(cs->hvf->fd, HV_X86_RSI); - env->regs[R_EDI] = rreg(cs->hvf->fd, HV_X86_RDI); - env->regs[8] = rreg(cs->hvf->fd, HV_X86_R8); - env->regs[9] = rreg(cs->hvf->fd, HV_X86_R9); - env->regs[10] = rreg(cs->hvf->fd, HV_X86_R10); - env->regs[11] = rreg(cs->hvf->fd, HV_X86_R11); - env->regs[12] = rreg(cs->hvf->fd, HV_X86_R12); - env->regs[13] = rreg(cs->hvf->fd, HV_X86_R13); - env->regs[14] = rreg(cs->hvf->fd, HV_X86_R14); - env->regs[15] = rreg(cs->hvf->fd, HV_X86_R15); + env->regs[R_EAX] = rreg(cs->accel->fd, HV_X86_RAX); + env->regs[R_EBX] = rreg(cs->accel->fd, HV_X86_RBX); + env->regs[R_ECX] = rreg(cs->accel->fd, HV_X86_RCX); + env->regs[R_EDX] = rreg(cs->accel->fd, HV_X86_RDX); + env->regs[R_EBP] = rreg(cs->accel->fd, HV_X86_RBP); + env->regs[R_ESP] = rreg(cs->accel->fd, HV_X86_RSP); + env->regs[R_ESI] = rreg(cs->accel->fd, HV_X86_RSI); + env->regs[R_EDI] = rreg(cs->accel->fd, HV_X86_RDI); + env->regs[8] = rreg(cs->accel->fd, HV_X86_R8); + env->regs[9] = rreg(cs->accel->fd, HV_X86_R9); + env->regs[10] = rreg(cs->accel->fd, HV_X86_R10); + env->regs[11] = rreg(cs->accel->fd, HV_X86_R11); + env->regs[12] = rreg(cs->accel->fd, HV_X86_R12); + env->regs[13] = rreg(cs->accel->fd, HV_X86_R13); + env->regs[14] = rreg(cs->accel->fd, HV_X86_R14); + env->regs[15] = rreg(cs->accel->fd, HV_X86_R15); - env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); - env->eip = rreg(cs->hvf->fd, HV_X86_RIP); + env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); + env->eip = rreg(cs->accel->fd, HV_X86_RIP); hvf_get_xsave(cs); - env->xcr0 = rreg(cs->hvf->fd, HV_X86_XCR0); + env->xcr0 = rreg(cs->accel->fd, HV_X86_XCR0); hvf_get_segments(cs); hvf_get_msrs(cs); - env->dr[0] = rreg(cs->hvf->fd, HV_X86_DR0); - env->dr[1] = rreg(cs->hvf->fd, HV_X86_DR1); - env->dr[2] = rreg(cs->hvf->fd, HV_X86_DR2); - env->dr[3] = rreg(cs->hvf->fd, HV_X86_DR3); - env->dr[4] = rreg(cs->hvf->fd, HV_X86_DR4); - env->dr[5] = rreg(cs->hvf->fd, HV_X86_DR5); - env->dr[6] = rreg(cs->hvf->fd, HV_X86_DR6); - env->dr[7] = rreg(cs->hvf->fd, HV_X86_DR7); + env->dr[0] = rreg(cs->accel->fd, HV_X86_DR0); + env->dr[1] = rreg(cs->accel->fd, HV_X86_DR1); + env->dr[2] = rreg(cs->accel->fd, HV_X86_DR2); + env->dr[3] = rreg(cs->accel->fd, HV_X86_DR3); + env->dr[4] = rreg(cs->accel->fd, HV_X86_DR4); + env->dr[5] = rreg(cs->accel->fd, HV_X86_DR5); + env->dr[6] = rreg(cs->accel->fd, HV_X86_DR6); + env->dr[7] = rreg(cs->accel->fd, HV_X86_DR7); x86_update_hflags(env); return 0; @@ -327,16 +327,16 @@ int hvf_get_registers(CPUState *cs) static void vmx_set_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | + val = rvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } void vmx_clear_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & + val = rvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } @@ -372,7 +372,7 @@ bool hvf_inject_interrupts(CPUState *cs) uint64_t info = 0; if (have_event) { info = vector | intr_type | VMCS_INTR_VALID; - uint64_t reason = rvmcs(cs->hvf->fd, VMCS_EXIT_REASON); + uint64_t reason = rvmcs(cs->accel->fd, VMCS_EXIT_REASON); if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { vmx_clear_nmi_blocking(cs); } @@ -381,17 +381,17 @@ bool hvf_inject_interrupts(CPUState *cs) info &= ~(1 << 12); /* clear undefined bit */ if (intr_type == VMCS_INTR_T_SWINTR || intr_type == VMCS_INTR_T_SWEXCEPTION) { - wvmcs(cs->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); + wvmcs(cs->accel->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); } if (env->has_error_code) { - wvmcs(cs->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, + wvmcs(cs->accel->fd, VMCS_ENTRY_EXCEPTION_ERROR, env->error_code); /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ info |= VMCS_INTR_DEL_ERRCODE; } /*printf("reinject %lx err %d\n", info, err);*/ - wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); }; } @@ -399,7 +399,7 @@ bool hvf_inject_interrupts(CPUState *cs) if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { cs->interrupt_request &= ~CPU_INTERRUPT_NMI; info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; - wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); } else { vmx_set_nmi_window_exiting(cs); } @@ -411,7 +411,7 @@ bool hvf_inject_interrupts(CPUState *cs) int line = cpu_get_pic_interrupt(&x86cpu->env); cs->interrupt_request &= ~CPU_INTERRUPT_HARD; if (line >= 0) { - wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, line | + wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } @@ -429,7 +429,7 @@ int hvf_process_events(CPUState *cs) if (!cs->vcpu_dirty) { /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ - env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); + env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); } if (cs->interrupt_request & CPU_INTERRUPT_INIT) { From 217a190eece1a13297b7b075bce50226e5e78048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 6 Jun 2023 07:32:17 +0200 Subject: [PATCH 19/30] accel/kvm: Re-include "exec/memattrs.h" header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 1e05888ab5 ("sysemu/kvm: Remove unused headers") was a bit overzealous while cleaning "sysemu/kvm.h" headers: kvm_arch_post_run() returns a MemTxAttrs type, so depends on "exec/memattrs.h" for its definition. Fixes: 1e05888ab5 ("sysemu/kvm: Remove unused headers") Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230619074153.44268-5-philmd@linaro.org> --- include/sysemu/kvm.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 7902acdfd9..115f0cca79 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -16,6 +16,7 @@ #ifndef QEMU_KVM_H #define QEMU_KVM_H +#include "exec/memattrs.h" #include "qemu/accel.h" #include "qom/object.h" From dec68f7042aba11ae706842a9616587ce3685685 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 14:08:40 +0200 Subject: [PATCH 20/30] accel/kvm: Declare kvm_direct_msi_allowed in stubs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid when calling kvm_direct_msi_enabled() from arm_gicv3_its_common.c the next commit: Undefined symbols for architecture arm64: "_kvm_direct_msi_allowed", referenced from: _its_class_name in hw_intc_arm_gicv3_its_common.c.o ld: symbol(s) not found for architecture arm64 Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Alex Bennée Reviewed-by: Richard Henderson Message-Id: <20230405160454.97436-3-philmd@linaro.org> --- accel/stubs/kvm-stub.c | 1 + 1 file changed, 1 insertion(+) diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 5d2dd8f351..235dc661bc 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -27,6 +27,7 @@ bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_ioeventfd_any_length_allowed; bool kvm_msi_use_devid; +bool kvm_direct_msi_allowed; void kvm_flush_coalesced_mmio_buffer(void) { From 0c40daf03810ed56535c40f6debdcc62ce4c5e6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 13:48:26 +0200 Subject: [PATCH 21/30] hw/intc/arm_gic: Un-inline GIC*/ITS class_name() helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "kvm_arm.h" contains external and internal prototype declarations. Files under the hw/ directory should only access the KVM external API. In order to avoid machine / device models to include "kvm_arm.h" simply to get the QOM GIC/ITS class name, un-inline each class name getter to the proper device model file. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230405160454.97436-4-philmd@linaro.org> --- hw/arm/sbsa-ref.c | 1 + hw/arm/virt-acpi-build.c | 2 +- hw/arm/virt.c | 1 + hw/intc/arm_gic_common.c | 7 ++++ hw/intc/arm_gicv3_common.c | 14 ++++++++ hw/intc/arm_gicv3_its_common.c | 12 +++++++ include/hw/intc/arm_gic.h | 2 ++ include/hw/intc/arm_gicv3_common.h | 10 ++++++ include/hw/intc/arm_gicv3_its_common.h | 9 ++++++ target/arm/kvm_arm.h | 45 -------------------------- 10 files changed, 57 insertions(+), 46 deletions(-) diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index b774d80291..66500a5b08 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -36,6 +36,7 @@ #include "hw/ide/internal.h" #include "hw/ide/ahci_internal.h" #include "hw/intc/arm_gicv3_common.h" +#include "hw/intc/arm_gicv3_its_common.h" #include "hw/loader.h" #include "hw/pci-host/gpex.h" #include "hw/qdev-properties.h" diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 4af0de8b24..55f2706bc9 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -48,12 +48,12 @@ #include "hw/pci/pci_bus.h" #include "hw/pci-host/gpex.h" #include "hw/arm/virt.h" +#include "hw/intc/arm_gicv3_its_common.h" #include "hw/mem/nvdimm.h" #include "hw/platform-bus.h" #include "sysemu/numa.h" #include "sysemu/reset.h" #include "sysemu/tpm.h" -#include "kvm_arm.h" #include "migration/vmstate.h" #include "hw/acpi/ghes.h" #include "hw/acpi/viot.h" diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 3937e30477..3196db556e 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -63,6 +63,7 @@ #include "hw/arm/fdt.h" #include "hw/intc/arm_gic.h" #include "hw/intc/arm_gicv3_common.h" +#include "hw/intc/arm_gicv3_its_common.h" #include "hw/irq.h" #include "kvm_arm.h" #include "hw/firmware/smbios.h" diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index a379cea395..9702197856 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -21,10 +21,12 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "gic_internal.h" #include "hw/arm/linux-boot-if.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" +#include "sysemu/kvm.h" static int gic_pre_save(void *opaque) { @@ -393,3 +395,8 @@ static void register_types(void) } type_init(register_types) + +const char *gic_class_name(void) +{ + return kvm_irqchip_in_kernel() ? "kvm-arm-gic" : "arm_gic"; +} diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 642a8243ed..2ebf880ead 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "hw/core/cpu.h" #include "hw/intc/arm_gicv3_common.h" #include "hw/qdev-properties.h" @@ -608,3 +609,16 @@ static void register_types(void) } type_init(register_types) + +const char *gicv3_class_name(void) +{ + if (kvm_irqchip_in_kernel()) { + return "kvm-arm-gicv3"; + } else { + if (kvm_enabled()) { + error_report("Userspace GICv3 is not supported with KVM"); + exit(1); + } + return "arm-gicv3"; + } +} diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index d7532a7a89..abaf77057e 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -24,6 +24,7 @@ #include "hw/intc/arm_gicv3_its_common.h" #include "qemu/log.h" #include "qemu/module.h" +#include "sysemu/kvm.h" static int gicv3_its_pre_save(void *opaque) { @@ -158,3 +159,14 @@ static void gicv3_its_common_register_types(void) } type_init(gicv3_its_common_register_types) + +const char *its_class_name(void) +{ + if (kvm_irqchip_in_kernel()) { + /* KVM implementation requires this capability */ + return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL; + } else { + /* Software emulation based model */ + return "arm-gicv3-its"; + } +} diff --git a/include/hw/intc/arm_gic.h b/include/hw/intc/arm_gic.h index 116ccbb5a9..48f6a51a70 100644 --- a/include/hw/intc/arm_gic.h +++ b/include/hw/intc/arm_gic.h @@ -86,4 +86,6 @@ struct ARMGICClass { DeviceRealize parent_realize; }; +const char *gic_class_name(void); + #endif diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index ab5182a28a..4e2fb518e7 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -329,4 +329,14 @@ struct ARMGICv3CommonClass { void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, const MemoryRegionOps *ops); +/** + * gicv3_class_name + * + * Return name of GICv3 class to use depending on whether KVM acceleration is + * in use. May throw an error if the chosen implementation is not available. + * + * Returns: class name to use + */ +const char *gicv3_class_name(void); + #endif diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h index a11a0f6654..7dc712b38d 100644 --- a/include/hw/intc/arm_gicv3_its_common.h +++ b/include/hw/intc/arm_gicv3_its_common.h @@ -122,5 +122,14 @@ struct GICv3ITSCommonClass { void (*post_load)(GICv3ITSState *s); }; +/** + * its_class_name: + * + * Return the ITS class name to use depending on whether KVM acceleration + * and KVM CAP_SIGNAL_MSI are supported + * + * Returns: class name to use or NULL + */ +const char *its_class_name(void); #endif diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 330fbe5c72..051a0da41c 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -453,32 +453,6 @@ static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) #endif -static inline const char *gic_class_name(void) -{ - return kvm_irqchip_in_kernel() ? "kvm-arm-gic" : "arm_gic"; -} - -/** - * gicv3_class_name - * - * Return name of GICv3 class to use depending on whether KVM acceleration is - * in use. May throw an error if the chosen implementation is not available. - * - * Returns: class name to use - */ -static inline const char *gicv3_class_name(void) -{ - if (kvm_irqchip_in_kernel()) { - return "kvm-arm-gicv3"; - } else { - if (kvm_enabled()) { - error_report("Userspace GICv3 is not supported with KVM"); - exit(1); - } - return "arm-gicv3"; - } -} - /** * kvm_arm_handle_debug: * @cs: CPUState @@ -516,23 +490,4 @@ void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr); */ bool kvm_arm_verify_ext_dabt_pending(CPUState *cs); -/** - * its_class_name: - * - * Return the ITS class name to use depending on whether KVM acceleration - * and KVM CAP_SIGNAL_MSI are supported - * - * Returns: class name to use or NULL - */ -static inline const char *its_class_name(void) -{ - if (kvm_irqchip_in_kernel()) { - /* KVM implementation requires this capability */ - return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL; - } else { - /* Software emulation based model */ - return "arm-gicv3-its"; - } -} - #endif From 674e44c99a204c2796c99cf8be65dea487718975 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 13:37:41 +0200 Subject: [PATCH 22/30] hw/intc/arm_gic: Rename 'first_cpu' argument MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "hw/core/cpu.h" defines 'first_cpu' as QTAILQ_FIRST_RCU(&cpus). arm_gic_common_reset_irq_state() calls its second argument 'first_cpu', producing a build failure when "hw/core/cpu.h" is included: hw/intc/arm_gic_common.c:238:68: warning: omitting the parameter name in a function definition is a C2x extension [-Wc2x-extensions] static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu, ^ include/hw/core/cpu.h:451:26: note: expanded from macro 'first_cpu' #define first_cpu QTAILQ_FIRST_RCU(&cpus) ^ KISS, rename the function argument. Reviewed-by: Richard Henderson Reviewed-by: Alex Bennée Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230405160454.97436-5-philmd@linaro.org> --- hw/intc/arm_gic_common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 9702197856..7c28504ace 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -235,12 +235,12 @@ static void arm_gic_common_realize(DeviceState *dev, Error **errp) } } -static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu, +static inline void arm_gic_common_reset_irq_state(GICState *s, int cidx, int resetprio) { int i, j; - for (i = first_cpu; i < first_cpu + s->num_cpu; i++) { + for (i = cidx; i < cidx + s->num_cpu; i++) { if (s->revision == REV_11MPCORE) { s->priority_mask[i] = 0xf0; } else { From 94522562f68043c0420795c9b4ec8f1d6b40e2fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 13:39:29 +0200 Subject: [PATCH 23/30] hw/arm/sbsa-ref: Include missing 'sysemu/kvm.h' header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "sysemu/kvm.h" is indirectly pulled in. Explicit its inclusion to avoid when refactoring include/: hw/arm/sbsa-ref.c:693:9: error: implicit declaration of function 'kvm_enabled' is invalid in C99 [-Werror,-Wimplicit-function-declaration] if (kvm_enabled()) { ^ Reviewed-by: Leif Lindholm Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230405160454.97436-6-philmd@linaro.org> --- hw/arm/sbsa-ref.c | 1 + 1 file changed, 1 insertion(+) diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index 66500a5b08..82a28b2e0b 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -23,6 +23,7 @@ #include "qemu/error-report.h" #include "qemu/units.h" #include "sysemu/device_tree.h" +#include "sysemu/kvm.h" #include "sysemu/numa.h" #include "sysemu/runstate.h" #include "sysemu/sysemu.h" From cf43b5b69c0f4acee52e3648b88f4a0bf3de770b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 4 Apr 2023 11:12:38 +0200 Subject: [PATCH 24/30] target/arm: Restrict KVM-specific fields from ArchCPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These fields shouldn't be accessed when KVM is not available. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230405160454.97436-8-philmd@linaro.org> --- target/arm/cpu.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 00e675f58f..4d6c0f95d5 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -961,6 +961,7 @@ struct ArchCPU { */ uint32_t kvm_target; +#ifdef CONFIG_KVM /* KVM init features for this CPU */ uint32_t kvm_init_features[7]; @@ -973,6 +974,7 @@ struct ArchCPU { /* KVM steal time */ OnOffAuto kvm_steal_time; +#endif /* CONFIG_KVM */ /* Uniprocessor system with MP extensions */ bool mp_is_up; From 0573997713eb0a37ab98b545794e18b868b471ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 4 Apr 2023 11:14:58 +0200 Subject: [PATCH 25/30] target/ppc: Restrict KVM-specific fields from ArchCPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'kvm_sw_tlb' and 'tlb_dirty' fields introduced in commit 93dd5e852c ("kvm: ppc: booke206: use MMU API") are specific to KVM and shouldn't be accessed when it is not available. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Nicholas Piggin Message-Id: <20230624192645.13680-1-philmd@linaro.org> --- hw/ppc/e500.c | 2 ++ hw/ppc/ppce500_spin.c | 2 ++ target/ppc/cpu.h | 2 ++ target/ppc/mmu_common.c | 4 ++++ 4 files changed, 10 insertions(+) diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index b6eb599751..67793a86f1 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -765,7 +765,9 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env) tlb->mas7_3 = 0; tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; +#ifdef CONFIG_KVM env->tlb_dirty = true; +#endif } static void ppce500_cpu_reset_sec(void *opaque) diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index d57b199797..bbce63e8a4 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -83,7 +83,9 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env, tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M; tlb->mas7_3 = pa & TARGET_PAGE_MASK; tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; +#ifdef CONFIG_KVM env->tlb_dirty = true; +#endif } static void spin_kick(CPUState *cs, run_on_cpu_data data) diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 94497aa115..af12c93ebc 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1149,8 +1149,10 @@ struct CPUArchState { int nb_pids; /* Number of available PID registers */ int tlb_type; /* Type of TLB we're dealing with */ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ +#ifdef CONFIG_KVM bool tlb_dirty; /* Set to non-zero when modifying TLB */ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ +#endif /* CONFIG_KVM */ uint32_t tlb_need_flush; /* Delayed flush needed */ #define TLB_NEED_LOCAL_FLUSH 0x1 #define TLB_NEED_GLOBAL_FLUSH 0x2 diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c index ae1db6e348..8c000e250d 100644 --- a/target/ppc/mmu_common.c +++ b/target/ppc/mmu_common.c @@ -930,10 +930,12 @@ static void mmubooke_dump_mmu(CPUPPCState *env) ppcemb_tlb_t *entry; int i; +#ifdef CONFIG_KVM if (kvm_enabled() && !env->kvm_sw_tlb) { qemu_printf("Cannot access KVM TLB\n"); return; } +#endif qemu_printf("\nTLB:\n"); qemu_printf("Effective Physical Size PID Prot " @@ -1021,10 +1023,12 @@ static void mmubooke206_dump_mmu(CPUPPCState *env) int offset = 0; int i; +#ifdef CONFIG_KVM if (kvm_enabled() && !env->kvm_sw_tlb) { qemu_printf("Cannot access KVM TLB\n"); return; } +#endif for (i = 0; i < BOOKE206_MAX_TLBN; i++) { int size = booke206_tlb_size(env, i); From 9638cbde6c4f433b157672c0fdee783c10be45fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 4 Apr 2023 11:15:05 +0200 Subject: [PATCH 26/30] target/riscv: Restrict KVM-specific fields from ArchCPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These fields shouldn't be accessed when KVM is not available. Restrict the KVM timer migration state. Rename the KVM timer post_load() handler accordingly, because cpu_post_load() is too generic. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Daniel Henrique Barboza Message-Id: <20230626232007.8933-3-philmd@linaro.org> --- target/riscv/cpu.c | 2 +- target/riscv/cpu.h | 2 ++ target/riscv/machine.c | 8 ++++++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 881bddf393..4035fe0e62 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -584,7 +584,7 @@ static void riscv_host_cpu_init(Object *obj) #endif riscv_cpu_add_user_properties(obj); } -#endif +#endif /* CONFIG_KVM */ static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) { diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 7bff1d47f6..7adb8706ac 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -363,12 +363,14 @@ struct CPUArchState { hwaddr kernel_addr; hwaddr fdt_addr; +#ifdef CONFIG_KVM /* kvm timer */ bool kvm_timer_dirty; uint64_t kvm_timer_time; uint64_t kvm_timer_compare; uint64_t kvm_timer_state; uint64_t kvm_timer_frequency; +#endif /* CONFIG_KVM */ }; /* diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 3ce2970785..c7c862cdd3 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -194,12 +194,13 @@ static const VMStateDescription vmstate_rv128 = { } }; +#ifdef CONFIG_KVM static bool kvmtimer_needed(void *opaque) { return kvm_enabled(); } -static int cpu_post_load(void *opaque, int version_id) +static int cpu_kvmtimer_post_load(void *opaque, int version_id) { RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; @@ -213,7 +214,7 @@ static const VMStateDescription vmstate_kvmtimer = { .version_id = 1, .minimum_version_id = 1, .needed = kvmtimer_needed, - .post_load = cpu_post_load, + .post_load = cpu_kvmtimer_post_load, .fields = (VMStateField[]) { VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU), VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU), @@ -221,6 +222,7 @@ static const VMStateDescription vmstate_kvmtimer = { VMSTATE_END_OF_LIST() } }; +#endif static bool debug_needed(void *opaque) { @@ -409,7 +411,9 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_vector, &vmstate_pointermasking, &vmstate_rv128, +#ifdef CONFIG_KVM &vmstate_kvmtimer, +#endif &vmstate_envcfg, &vmstate_debug, &vmstate_smstateen, From db5a06b3a21239ffa9424f89f98ee9afa14e955b Mon Sep 17 00:00:00 2001 From: Zhao Liu Date: Mon, 29 May 2023 20:43:31 +0800 Subject: [PATCH 27/30] target/i386/WHPX: Fix error message when fail to set ProcessorCount MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 003f230e37d7 ("machine: Tweak the order of topology members in struct CpuTopology") changes the meaning of MachineState.smp.cores from "the number of cores in one package" to "the number of cores in one die" and doesn't fix other uses of MachineState.smp.cores. And because of the introduction of cluster, now smp.cores just means "the number of cores in one cluster". This clearly does not fit the semantics here. And before this error message, WHvSetPartitionProperty() is called to set prop.ProcessorCount. So the error message should show the prop.ProcessorCount other than "cores per cluster" or "cores per package". Cc: Sunil Muthuswamy Signed-off-by: Zhao Liu Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20230529124331.412822-1-zhao1.liu@linux.intel.com> [PMD: Use '%u' format for ProcessorCount] Signed-off-by: Philippe Mathieu-Daudé --- target/i386/whpx/whpx-all.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 9ee04ee650..57580ca383 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2598,8 +2598,8 @@ static int whpx_accel_init(MachineState *ms) sizeof(WHV_PARTITION_PROPERTY)); if (FAILED(hr)) { - error_report("WHPX: Failed to set partition core count to %d," - " hr=%08lx", ms->smp.cores, hr); + error_report("WHPX: Failed to set partition processor count to %u," + " hr=%08lx", prop.ProcessorCount, hr); ret = -EINVAL; goto error; } From 5369a36c4fe5a70e8831059bcd2a2eef834aae21 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 20 Jun 2023 09:50:47 -0700 Subject: [PATCH 28/30] exec/memory: Add symbolic value for memory listener priority for accel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add MEMORY_LISTNER_PRIORITY_ACCEL for the symbolic value for the memory listener to replace the hard-coded value 10 for accel. No functional change intended. Signed-off-by: Isaku Yamahata Reviewed-by: Philippe Mathieu-Daudé Message-Id: Signed-off-by: Philippe Mathieu-Daudé --- accel/hvf/hvf-accel-ops.c | 2 +- accel/kvm/kvm-all.c | 2 +- hw/arm/xen_arm.c | 2 +- hw/i386/xen/xen-hvm.c | 2 +- hw/xen/xen-hvm-common.c | 2 +- hw/xen/xen_pt.c | 4 ++-- include/exec/memory.h | 2 ++ target/i386/hax/hax-mem.c | 2 +- target/i386/nvmm/nvmm-all.c | 2 +- target/i386/whpx/whpx-all.c | 2 +- 10 files changed, 12 insertions(+), 10 deletions(-) diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index 7db9d3099e..a44cf1c144 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -304,7 +304,7 @@ static void hvf_region_del(MemoryListener *listener, static MemoryListener hvf_memory_listener = { .name = "hvf", - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, .region_add = hvf_region_add, .region_del = hvf_region_del, .log_start = hvf_log_start, diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 9aa898db14..964f61feee 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -1777,7 +1777,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, kml->listener.commit = kvm_region_commit; kml->listener.log_start = kvm_log_start; kml->listener.log_stop = kvm_log_stop; - kml->listener.priority = 10; + kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; kml->listener.name = name; if (s->kvm_dirty_ring_size) { diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c index 19b1cb81ad..044093fec7 100644 --- a/hw/arm/xen_arm.c +++ b/hw/arm/xen_arm.c @@ -45,7 +45,7 @@ static MemoryListener xen_memory_listener = { .log_sync = NULL, .log_global_start = NULL, .log_global_stop = NULL, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; struct XenArmState { diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 5dc5e80535..3da5a2b23f 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -467,7 +467,7 @@ static MemoryListener xen_memory_listener = { .log_sync = xen_log_sync, .log_global_start = xen_log_global_start, .log_global_stop = xen_log_global_stop, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c index 42339c96bd..886c3ee944 100644 --- a/hw/xen/xen-hvm-common.c +++ b/hw/xen/xen-hvm-common.c @@ -155,7 +155,7 @@ MemoryListener xen_io_listener = { .name = "xen-io", .region_add = xen_io_add, .region_del = xen_io_del, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; DeviceListener xen_device_listener = { diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index a540149639..36e6f93c37 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -691,14 +691,14 @@ static const MemoryListener xen_pt_memory_listener = { .name = "xen-pt-mem", .region_add = xen_pt_region_add, .region_del = xen_pt_region_del, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static const MemoryListener xen_pt_io_listener = { .name = "xen-pt-io", .region_add = xen_pt_io_region_add, .region_del = xen_pt_io_region_del, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; /* destroy. */ diff --git a/include/exec/memory.h b/include/exec/memory.h index 47c2e0221c..6d95d59175 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -811,6 +811,8 @@ struct IOMMUMemoryRegion { #define IOMMU_NOTIFIER_FOREACH(n, mr) \ QLIST_FOREACH((n), &(mr)->iommu_notify, node) +#define MEMORY_LISTENER_PRIORITY_ACCEL 10 + /** * struct MemoryListener: callbacks structure for updates to the physical memory map * diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c index 05dbe8cce3..bb5ffbc9ac 100644 --- a/target/i386/hax/hax-mem.c +++ b/target/i386/hax/hax-mem.c @@ -291,7 +291,7 @@ static MemoryListener hax_memory_listener = { .region_add = hax_region_add, .region_del = hax_region_del, .log_sync = hax_log_sync, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 72a3a9e3ae..066a173d26 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -1128,7 +1128,7 @@ static MemoryListener nvmm_memory_listener = { .region_add = nvmm_region_add, .region_del = nvmm_region_del, .log_sync = nvmm_log_sync, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 57580ca383..3de0dc1d46 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2412,7 +2412,7 @@ static MemoryListener whpx_memory_listener = { .region_add = whpx_region_add, .region_del = whpx_region_del, .log_sync = whpx_log_sync, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void whpx_memory_init(void) From 8be0461d37705ae0a0c4e76b3ce5e4b558d51aba Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 20 Jun 2023 09:50:48 -0700 Subject: [PATCH 29/30] exec/memory: Add symbol for memory listener priority for device backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add MEMORY_LISTENER_PRIORITY_DEV_BACKEND for the symbolic value for memory listener to replace the hard-coded value 10 for the device backend. No functional change intended. Signed-off-by: Isaku Yamahata Reviewed-by: Philippe Mathieu-Daudé Message-Id: <8314d91688030d7004e96958f12e2c83fb889245.1687279702.git.isaku.yamahata@intel.com> Signed-off-by: Philippe Mathieu-Daudé --- accel/kvm/kvm-all.c | 2 +- hw/remote/proxy-memory-listener.c | 2 +- hw/virtio/vhost.c | 2 +- include/exec/memory.h | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 964f61feee..9d0837a39c 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -1802,7 +1802,7 @@ static MemoryListener kvm_io_listener = { .name = "kvm-io", .eventfd_add = kvm_io_ioeventfd_add, .eventfd_del = kvm_io_ioeventfd_del, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, }; int kvm_set_irq(KVMState *s, int irq, int level) diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c index 18d96a1d04..a926f61ebe 100644 --- a/hw/remote/proxy-memory-listener.c +++ b/hw/remote/proxy-memory-listener.c @@ -217,7 +217,7 @@ void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener, proxy_listener->listener.commit = proxy_memory_listener_commit; proxy_listener->listener.region_add = proxy_memory_listener_region_addnop; proxy_listener->listener.region_nop = proxy_memory_listener_region_addnop; - proxy_listener->listener.priority = 10; + proxy_listener->listener.priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND; proxy_listener->listener.name = "proxy"; memory_listener_register(&proxy_listener->listener, diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index d116c2d6a1..82394331bf 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1444,7 +1444,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, - .priority = 10 + .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND }; hdev->iommu_listener = (MemoryListener) { diff --git a/include/exec/memory.h b/include/exec/memory.h index 6d95d59175..4d789461a2 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -812,6 +812,7 @@ struct IOMMUMemoryRegion { QLIST_FOREACH((n), &(mr)->iommu_notify, node) #define MEMORY_LISTENER_PRIORITY_ACCEL 10 +#define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10 /** * struct MemoryListener: callbacks structure for updates to the physical memory map From 14a868c626e99eea063ecbf6ef86002f6a314f0a Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 20 Jun 2023 09:50:49 -0700 Subject: [PATCH 30/30] exec/memory: Add symbol for the min value of memory listener priority MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add MEMORY_LISTNER_PRIORITY_MIN for the symbolic value for the min value of the memory listener instead of the hard-coded magic value 0. Add explicit initialization. No functional change intended. Signed-off-by: Isaku Yamahata Reviewed-by: Philippe Mathieu-Daudé Message-Id: <29f88477fe82eb774bcfcae7f65ea21995f865f2.1687279702.git.isaku.yamahata@intel.com> Signed-off-by: Philippe Mathieu-Daudé --- accel/kvm/kvm-all.c | 1 + include/exec/memory.h | 1 + target/arm/kvm.c | 1 + 3 files changed, 3 insertions(+) diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 9d0837a39c..373d876c05 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -1105,6 +1105,7 @@ static MemoryListener kvm_coalesced_pio_listener = { .name = "kvm-coalesced-pio", .coalesced_io_add = kvm_coalesce_pio_add, .coalesced_io_del = kvm_coalesce_pio_del, + .priority = MEMORY_LISTENER_PRIORITY_MIN, }; int kvm_check_extension(KVMState *s, unsigned int extension) diff --git a/include/exec/memory.h b/include/exec/memory.h index 4d789461a2..7f5c11a0cc 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -811,6 +811,7 @@ struct IOMMUMemoryRegion { #define IOMMU_NOTIFIER_FOREACH(n, mr) \ QLIST_FOREACH((n), &(mr)->iommu_notify, node) +#define MEMORY_LISTENER_PRIORITY_MIN 0 #define MEMORY_LISTENER_PRIORITY_ACCEL 10 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10 diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 84da49332c..b4c7654f49 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -341,6 +341,7 @@ static MemoryListener devlistener = { .name = "kvm-arm", .region_add = kvm_arm_devlistener_add, .region_del = kvm_arm_devlistener_del, + .priority = MEMORY_LISTENER_PRIORITY_MIN, }; static void kvm_arm_set_device_addr(KVMDevice *kd)