hw/xen: Add evtchn operations to allow redirection to internal emulation

The existing implementation calling into the real libxenevtchn moves to
a new file hw/xen/xen-operations.c, and is called via a function table
which in a subsequent commit will also be able to invoke the emulated
event channel support.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
This commit is contained in:
David Woodhouse 2023-01-01 17:54:41 +00:00
parent 831b0db8ab
commit b6cacfea0b
13 changed files with 242 additions and 57 deletions

View File

@ -241,7 +241,7 @@ static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
xen_wmb(); xen_wmb();
ring->inprogress = false; ring->inprogress = false;
xenevtchn_notify(ring->evtchndev, ring->local_port); qemu_xen_evtchn_notify(ring->evtchndev, ring->local_port);
qemu_bh_schedule(ring->bh); qemu_bh_schedule(ring->bh);
} }
@ -324,8 +324,8 @@ static void xen_9pfs_evtchn_event(void *opaque)
Xen9pfsRing *ring = opaque; Xen9pfsRing *ring = opaque;
evtchn_port_t port; evtchn_port_t port;
port = xenevtchn_pending(ring->evtchndev); port = qemu_xen_evtchn_pending(ring->evtchndev);
xenevtchn_unmask(ring->evtchndev, port); qemu_xen_evtchn_unmask(ring->evtchndev, port);
qemu_bh_schedule(ring->bh); qemu_bh_schedule(ring->bh);
} }
@ -337,10 +337,10 @@ static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev)
for (i = 0; i < xen_9pdev->num_rings; i++) { for (i = 0; i < xen_9pdev->num_rings; i++) {
if (xen_9pdev->rings[i].evtchndev != NULL) { if (xen_9pdev->rings[i].evtchndev != NULL) {
qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
NULL, NULL, NULL); NULL, NULL, NULL);
xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, qemu_xen_evtchn_unbind(xen_9pdev->rings[i].evtchndev,
xen_9pdev->rings[i].local_port); xen_9pdev->rings[i].local_port);
xen_9pdev->rings[i].evtchndev = NULL; xen_9pdev->rings[i].evtchndev = NULL;
} }
} }
@ -447,12 +447,12 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
xen_9pdev->rings[i].inprogress = false; xen_9pdev->rings[i].inprogress = false;
xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); xen_9pdev->rings[i].evtchndev = qemu_xen_evtchn_open();
if (xen_9pdev->rings[i].evtchndev == NULL) { if (xen_9pdev->rings[i].evtchndev == NULL) {
goto out; goto out;
} }
qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev)); qemu_set_cloexec(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev));
xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain xen_9pdev->rings[i].local_port = qemu_xen_evtchn_bind_interdomain
(xen_9pdev->rings[i].evtchndev, (xen_9pdev->rings[i].evtchndev,
xendev->dom, xendev->dom,
xen_9pdev->rings[i].evtchn); xen_9pdev->rings[i].evtchn);
@ -463,8 +463,8 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev)
goto out; goto out;
} }
xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), qemu_set_fd_handler(qemu_xen_evtchn_fd(xen_9pdev->rings[i].evtchndev),
xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]);
} }
xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model"); xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model");

View File

@ -761,7 +761,7 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
int i; int i;
evtchn_port_t port; evtchn_port_t port;
port = xenevtchn_pending(state->xce_handle); port = qemu_xen_evtchn_pending(state->xce_handle);
if (port == state->bufioreq_local_port) { if (port == state->bufioreq_local_port) {
timer_mod(state->buffered_io_timer, timer_mod(state->buffered_io_timer,
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
@ -780,7 +780,7 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
} }
/* unmask the wanted port again */ /* unmask the wanted port again */
xenevtchn_unmask(state->xce_handle, port); qemu_xen_evtchn_unmask(state->xce_handle, port);
/* get the io packet from shared memory */ /* get the io packet from shared memory */
state->send_vcpu = i; state->send_vcpu = i;
@ -1147,7 +1147,7 @@ static void handle_buffered_io(void *opaque)
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
} else { } else {
timer_del(state->buffered_io_timer); timer_del(state->buffered_io_timer);
xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
} }
} }
@ -1196,8 +1196,8 @@ static void cpu_handle_ioreq(void *opaque)
} }
req->state = STATE_IORESP_READY; req->state = STATE_IORESP_READY;
xenevtchn_notify(state->xce_handle, qemu_xen_evtchn_notify(state->xce_handle,
state->ioreq_local_port[state->send_vcpu]); state->ioreq_local_port[state->send_vcpu]);
} }
} }
@ -1206,7 +1206,7 @@ static void xen_main_loop_prepare(XenIOState *state)
int evtchn_fd = -1; int evtchn_fd = -1;
if (state->xce_handle != NULL) { if (state->xce_handle != NULL) {
evtchn_fd = xenevtchn_fd(state->xce_handle); evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle);
} }
state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
@ -1249,7 +1249,7 @@ static void xen_exit_notifier(Notifier *n, void *data)
xenforeignmemory_unmap_resource(xen_fmem, state->fres); xenforeignmemory_unmap_resource(xen_fmem, state->fres);
} }
xenevtchn_close(state->xce_handle); qemu_xen_evtchn_close(state->xce_handle);
xs_daemon_close(state->xenstore); xs_daemon_close(state->xenstore);
} }
@ -1397,9 +1397,11 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
xen_pfn_t ioreq_pfn; xen_pfn_t ioreq_pfn;
XenIOState *state; XenIOState *state;
setup_xen_backend_ops();
state = g_new0(XenIOState, 1); state = g_new0(XenIOState, 1);
state->xce_handle = xenevtchn_open(NULL, 0); state->xce_handle = qemu_xen_evtchn_open();
if (state->xce_handle == NULL) { if (state->xce_handle == NULL) {
perror("xen: event channel open"); perror("xen: event channel open");
goto err; goto err;
@ -1463,8 +1465,9 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
/* FIXME: how about if we overflow the page here? */ /* FIXME: how about if we overflow the page here? */
for (i = 0; i < max_cpus; i++) { for (i = 0; i < max_cpus; i++) {
rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
xen_vcpu_eport(state->shared_page, i)); xen_vcpu_eport(state->shared_page,
i));
if (rc == -1) { if (rc == -1) {
error_report("shared evtchn %d bind error %d", i, errno); error_report("shared evtchn %d bind error %d", i, errno);
goto err; goto err;
@ -1472,8 +1475,8 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
state->ioreq_local_port[i] = rc; state->ioreq_local_port[i] = rc;
} }
rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
state->bufioreq_remote_port); state->bufioreq_remote_port);
if (rc == -1) { if (rc == -1) {
error_report("buffered evtchn bind error %d", errno); error_report("buffered evtchn bind error %d", errno);
goto err; goto err;

View File

@ -5,6 +5,7 @@ softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files(
'xen-legacy-backend.c', 'xen-legacy-backend.c',
'xen_devconfig.c', 'xen_devconfig.c',
'xen_pvdev.c', 'xen_pvdev.c',
'xen-operations.c',
)) ))
xen_specific_ss = ss.source_set() xen_specific_ss = ss.source_set()

View File

@ -1095,12 +1095,12 @@ static bool xen_device_poll(void *opaque)
static void xen_device_event(void *opaque) static void xen_device_event(void *opaque)
{ {
XenEventChannel *channel = opaque; XenEventChannel *channel = opaque;
unsigned long port = xenevtchn_pending(channel->xeh); unsigned long port = qemu_xen_evtchn_pending(channel->xeh);
if (port == channel->local_port) { if (port == channel->local_port) {
xen_device_poll(channel); xen_device_poll(channel);
xenevtchn_unmask(channel->xeh, port); qemu_xen_evtchn_unmask(channel->xeh, port);
} }
} }
@ -1115,11 +1115,11 @@ void xen_device_set_event_channel_context(XenDevice *xendev,
} }
if (channel->ctx) if (channel->ctx)
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
channel->ctx = ctx; channel->ctx = ctx;
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
xen_device_event, NULL, xen_device_poll, NULL, channel); xen_device_event, NULL, xen_device_poll, NULL, channel);
} }
@ -1131,13 +1131,13 @@ XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
XenEventChannel *channel = g_new0(XenEventChannel, 1); XenEventChannel *channel = g_new0(XenEventChannel, 1);
xenevtchn_port_or_error_t local_port; xenevtchn_port_or_error_t local_port;
channel->xeh = xenevtchn_open(NULL, 0); channel->xeh = qemu_xen_evtchn_open();
if (!channel->xeh) { if (!channel->xeh) {
error_setg_errno(errp, errno, "failed xenevtchn_open"); error_setg_errno(errp, errno, "failed xenevtchn_open");
goto fail; goto fail;
} }
local_port = xenevtchn_bind_interdomain(channel->xeh, local_port = qemu_xen_evtchn_bind_interdomain(channel->xeh,
xendev->frontend_id, xendev->frontend_id,
port); port);
if (local_port < 0) { if (local_port < 0) {
@ -1160,7 +1160,7 @@ XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
fail: fail:
if (channel->xeh) { if (channel->xeh) {
xenevtchn_close(channel->xeh); qemu_xen_evtchn_close(channel->xeh);
} }
g_free(channel); g_free(channel);
@ -1177,7 +1177,7 @@ void xen_device_notify_event_channel(XenDevice *xendev,
return; return;
} }
if (xenevtchn_notify(channel->xeh, channel->local_port) < 0) { if (qemu_xen_evtchn_notify(channel->xeh, channel->local_port) < 0) {
error_setg_errno(errp, errno, "xenevtchn_notify failed"); error_setg_errno(errp, errno, "xenevtchn_notify failed");
} }
} }
@ -1193,14 +1193,14 @@ void xen_device_unbind_event_channel(XenDevice *xendev,
QLIST_REMOVE(channel, list); QLIST_REMOVE(channel, list);
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) { if (qemu_xen_evtchn_unbind(channel->xeh, channel->local_port) < 0) {
error_setg_errno(errp, errno, "xenevtchn_unbind failed"); error_setg_errno(errp, errno, "xenevtchn_unbind failed");
} }
xenevtchn_close(channel->xeh); qemu_xen_evtchn_close(channel->xeh);
g_free(channel); g_free(channel);
} }

View File

@ -294,13 +294,13 @@ static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom,
xendev->debug = debug; xendev->debug = debug;
xendev->local_port = -1; xendev->local_port = -1;
xendev->evtchndev = xenevtchn_open(NULL, 0); xendev->evtchndev = qemu_xen_evtchn_open();
if (xendev->evtchndev == NULL) { if (xendev->evtchndev == NULL) {
xen_pv_printf(NULL, 0, "can't open evtchn device\n"); xen_pv_printf(NULL, 0, "can't open evtchn device\n");
qdev_unplug(DEVICE(xendev), NULL); qdev_unplug(DEVICE(xendev), NULL);
return NULL; return NULL;
} }
qemu_set_cloexec(xenevtchn_fd(xendev->evtchndev)); qemu_set_cloexec(qemu_xen_evtchn_fd(xendev->evtchndev));
xen_pv_insert_xendev(xendev); xen_pv_insert_xendev(xendev);
@ -751,14 +751,14 @@ int xen_be_bind_evtchn(struct XenLegacyDevice *xendev)
if (xendev->local_port != -1) { if (xendev->local_port != -1) {
return 0; return 0;
} }
xendev->local_port = xenevtchn_bind_interdomain xendev->local_port = qemu_xen_evtchn_bind_interdomain
(xendev->evtchndev, xendev->dom, xendev->remote_port); (xendev->evtchndev, xendev->dom, xendev->remote_port);
if (xendev->local_port == -1) { if (xendev->local_port == -1) {
xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n"); xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n");
return -1; return -1;
} }
xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), qemu_set_fd_handler(qemu_xen_evtchn_fd(xendev->evtchndev),
xen_pv_evtchn_event, NULL, xendev); xen_pv_evtchn_event, NULL, xendev);
return 0; return 0;
} }

71
hw/xen/xen-operations.c Normal file
View File

@ -0,0 +1,71 @@
/*
* QEMU Xen backend support: Operations for true Xen
*
* Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Authors: David Woodhouse <dwmw2@infradead.org>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_common.h"
/*
* If we have new enough libxenctrl then we do not want/need these compat
* interfaces, despite what the user supplied cflags might say. They
* must be undefined before including xenctrl.h
*/
#undef XC_WANT_COMPAT_EVTCHN_API
#include <xenctrl.h>
/*
* We don't support Xen prior to 4.2.0.
*/
/* Xen 4.2 through 4.6 */
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
typedef xc_evtchn xenevtchn_handle;
typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
#define xenevtchn_close(h) xc_evtchn_close(h)
#define xenevtchn_fd(h) xc_evtchn_fd(h)
#define xenevtchn_pending(h) xc_evtchn_pending(h)
#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
#include <xenevtchn.h>
#endif
static xenevtchn_handle *libxenevtchn_backend_open(void)
{
return xenevtchn_open(NULL, 0);
}
struct evtchn_backend_ops libxenevtchn_backend_ops = {
.open = libxenevtchn_backend_open,
.close = xenevtchn_close,
.bind_interdomain = xenevtchn_bind_interdomain,
.unbind = xenevtchn_unbind,
.get_fd = xenevtchn_fd,
.notify = xenevtchn_notify,
.unmask = xenevtchn_unmask,
.pending = xenevtchn_pending,
};
void setup_xen_backend_ops(void)
{
xen_evtchn_ops = &libxenevtchn_backend_ops;
}

View File

@ -238,14 +238,14 @@ void xen_pv_evtchn_event(void *opaque)
struct XenLegacyDevice *xendev = opaque; struct XenLegacyDevice *xendev = opaque;
evtchn_port_t port; evtchn_port_t port;
port = xenevtchn_pending(xendev->evtchndev); port = qemu_xen_evtchn_pending(xendev->evtchndev);
if (port != xendev->local_port) { if (port != xendev->local_port) {
xen_pv_printf(xendev, 0, xen_pv_printf(xendev, 0,
"xenevtchn_pending returned %d (expected %d)\n", "xenevtchn_pending returned %d (expected %d)\n",
port, xendev->local_port); port, xendev->local_port);
return; return;
} }
xenevtchn_unmask(xendev->evtchndev, port); qemu_xen_evtchn_unmask(xendev->evtchndev, port);
if (xendev->ops->event) { if (xendev->ops->event) {
xendev->ops->event(xendev); xendev->ops->event(xendev);
@ -257,15 +257,15 @@ void xen_pv_unbind_evtchn(struct XenLegacyDevice *xendev)
if (xendev->local_port == -1) { if (xendev->local_port == -1) {
return; return;
} }
qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL); qemu_set_fd_handler(qemu_xen_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
xenevtchn_unbind(xendev->evtchndev, xendev->local_port); qemu_xen_evtchn_unbind(xendev->evtchndev, xendev->local_port);
xen_pv_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); xen_pv_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port);
xendev->local_port = -1; xendev->local_port = -1;
} }
int xen_pv_send_notify(struct XenLegacyDevice *xendev) int xen_pv_send_notify(struct XenLegacyDevice *xendev)
{ {
return xenevtchn_notify(xendev->evtchndev, xendev->local_port); return qemu_xen_evtchn_notify(xendev->evtchndev, xendev->local_port);
} }
/* ------------------------------------------------------------- */ /* ------------------------------------------------------------- */
@ -306,7 +306,7 @@ void xen_pv_del_xendev(struct XenLegacyDevice *xendev)
} }
if (xendev->evtchndev != NULL) { if (xendev->evtchndev != NULL) {
xenevtchn_close(xendev->evtchndev); qemu_xen_evtchn_close(xendev->evtchndev);
} }
if (xendev->gnttabdev != NULL) { if (xendev->gnttabdev != NULL) {
xengnttab_close(xendev->gnttabdev); xengnttab_close(xendev->gnttabdev);

View File

@ -8,6 +8,7 @@
#ifndef HW_XEN_BUS_H #ifndef HW_XEN_BUS_H
#define HW_XEN_BUS_H #define HW_XEN_BUS_H
#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_common.h" #include "hw/xen/xen_common.h"
#include "hw/sysbus.h" #include "hw/sysbus.h"
#include "qemu/notify.h" #include "qemu/notify.h"

View File

@ -2,6 +2,7 @@
#define HW_XEN_LEGACY_BACKEND_H #define HW_XEN_LEGACY_BACKEND_H
#include "hw/xen/xen_common.h" #include "hw/xen/xen_common.h"
#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_pvdev.h" #include "hw/xen/xen_pvdev.h"
#include "net/net.h" #include "net/net.h"
#include "qom/object.h" #include "qom/object.h"

View File

@ -0,0 +1,118 @@
/*
* QEMU Xen backend support
*
* Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Authors: David Woodhouse <dwmw2@infradead.org>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef QEMU_XEN_BACKEND_OPS_H
#define QEMU_XEN_BACKEND_OPS_H
/*
* For the time being, these operations map fairly closely to the API of
* the actual Xen libraries, e.g. libxenevtchn. As we complete the migration
* from XenLegacyDevice back ends to the new XenDevice model, they may
* evolve to slightly higher-level APIs.
*
* The internal emulations do not emulate the Xen APIs entirely faithfully;
* only enough to be used by the Xen backend devices. For example, only one
* event channel can be bound to each handle, since that's sufficient for
* the device support (only the true Xen HVM backend uses more). And the
* behaviour of unmask() and pending() is different too because the device
* backends don't care.
*/
typedef struct xenevtchn_handle xenevtchn_handle;
typedef int xenevtchn_port_or_error_t;
typedef uint32_t evtchn_port_t;
struct evtchn_backend_ops {
xenevtchn_handle *(*open)(void);
int (*bind_interdomain)(xenevtchn_handle *xc, uint32_t domid,
evtchn_port_t guest_port);
int (*unbind)(xenevtchn_handle *xc, evtchn_port_t port);
int (*close)(struct xenevtchn_handle *xc);
int (*get_fd)(struct xenevtchn_handle *xc);
int (*notify)(struct xenevtchn_handle *xc, evtchn_port_t port);
int (*unmask)(struct xenevtchn_handle *xc, evtchn_port_t port);
int (*pending)(struct xenevtchn_handle *xc);
};
extern struct evtchn_backend_ops *xen_evtchn_ops;
static inline xenevtchn_handle *qemu_xen_evtchn_open(void)
{
if (!xen_evtchn_ops) {
return NULL;
}
return xen_evtchn_ops->open();
}
static inline int qemu_xen_evtchn_bind_interdomain(xenevtchn_handle *xc,
uint32_t domid,
evtchn_port_t guest_port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->bind_interdomain(xc, domid, guest_port);
}
static inline int qemu_xen_evtchn_unbind(xenevtchn_handle *xc,
evtchn_port_t port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->unbind(xc, port);
}
static inline int qemu_xen_evtchn_close(xenevtchn_handle *xc)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->close(xc);
}
static inline int qemu_xen_evtchn_fd(xenevtchn_handle *xc)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->get_fd(xc);
}
static inline int qemu_xen_evtchn_notify(xenevtchn_handle *xc,
evtchn_port_t port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->notify(xc, port);
}
static inline int qemu_xen_evtchn_unmask(xenevtchn_handle *xc,
evtchn_port_t port)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->unmask(xc, port);
}
static inline int qemu_xen_evtchn_pending(xenevtchn_handle *xc)
{
if (!xen_evtchn_ops) {
return -ENOSYS;
}
return xen_evtchn_ops->pending(xc);
}
void setup_xen_backend_ops(void);
#endif /* QEMU_XEN_BACKEND_OPS_H */

View File

@ -28,18 +28,7 @@ extern xc_interface *xen_xc;
#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
typedef xc_interface xenforeignmemory_handle; typedef xc_interface xenforeignmemory_handle;
typedef xc_evtchn xenevtchn_handle;
typedef xc_gnttab xengnttab_handle; typedef xc_gnttab xengnttab_handle;
typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
#define xenevtchn_close(h) xc_evtchn_close(h)
#define xenevtchn_fd(h) xc_evtchn_fd(h)
#define xenevtchn_pending(h) xc_evtchn_pending(h)
#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
#define xengnttab_open(l, f) xc_gnttab_open(l, f) #define xengnttab_open(l, f) xc_gnttab_open(l, f)
#define xengnttab_close(h) xc_gnttab_close(h) #define xengnttab_close(h) xc_gnttab_close(h)
@ -69,7 +58,6 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
#include <xenevtchn.h>
#include <xengnttab.h> #include <xengnttab.h>
#include <xenforeignmemory.h> #include <xenforeignmemory.h>

View File

@ -1,6 +1,7 @@
#ifndef QEMU_HW_XEN_PVDEV_H #ifndef QEMU_HW_XEN_PVDEV_H
#define QEMU_HW_XEN_PVDEV_H #define QEMU_HW_XEN_PVDEV_H
#include "hw/xen/xen_backend_ops.h"
#include "hw/xen/xen_common.h" #include "hw/xen/xen_common.h"
/* ------------------------------------------------------------- */ /* ------------------------------------------------------------- */

View File

@ -65,3 +65,4 @@ bool qemu_uuid_set;
uint32_t xen_domid; uint32_t xen_domid;
enum xen_mode xen_mode = XEN_DISABLED; enum xen_mode xen_mode = XEN_DISABLED;
bool xen_domid_restrict; bool xen_domid_restrict;
struct evtchn_backend_ops *xen_evtchn_ops;