mirror of https://github.com/xemu-project/xemu.git
vhost, virtio, pci, pc
Fixes all over the place. virtio dataplane migration support. Old q35 machine types removed. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWzuKeAAoJECgfDbjSjVRpGzIH/1Tz6CoEq1rowiyVJ9B80oQU gDI2YWnJDSwJllmAF0rmoPRBQR8op3ZETZiCAcADHoZ7kdBNWGbyQeaDrrEPH7Q/ rCDVt8Q3g80vs89aWKG0nQ16J2MW5TbkuiQw7pjQSdc9AbUdWpUqSiWnpZ+sPAql 6DuVpjQ4/rN2alucXoa1Sir8KDDV7kBuY8U6/KoY890qzh842dv2523qvuCza9yR KX8Imj3oQAFjFSv5t1aOD3yYvWFd73EsReHPLGb1JtsVr/6wjs0sFUyA3JicBgnT +kWoSObWikfDY69HnqTkJpkun6woMM3zW5h2SkUBf9QP3yqLfGIp9uSriNN84Ak= =KXyh -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging vhost, virtio, pci, pc Fixes all over the place. virtio dataplane migration support. Old q35 machine types removed. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Thu 25 Feb 2016 11:16:46 GMT using RSA key ID D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" * remotes/mst/tags/for_upstream: (21 commits) q35: No need to check gigabyte_align q35: Remove unused q35-acpi-dsdt.aml file ich9: Remove enable_tco arguments from init functions machine: Remove no_tco field q35: Remove old machine versions tests/vhost-user-bridge: fix build on 32 bit systems vring: remove virtio-scsi: do not use vring in dataplane virtio-blk: do not use vring in dataplane virtio-blk: fix "disabled data plane" mode virtio: export vring_notify as virtio_should_notify virtio: add AioContext-specific function for host notifiers vring: make vring_enable_notification return void block-migration: acquire AioContext as necessary pci core: function pci_bus_init() cleanup pci core: function pci_host_bus_register() cleanup balloon: Use only 'pc-dimm' type dimm for ballooning virtio-balloon: rewrite get_current_ram_size() move get_current_ram_size to virtio-balloon.c vhost-user: don't merge regions with different fds ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
df215b59d9
2
Makefile
2
Makefile
|
@ -391,7 +391,7 @@ bepo cz
|
|||
ifdef INSTALL_BLOBS
|
||||
BLOBS=bios.bin bios-256k.bin sgabios.bin vgabios.bin vgabios-cirrus.bin \
|
||||
vgabios-stdvga.bin vgabios-vmware.bin vgabios-qxl.bin vgabios-virtio.bin \
|
||||
acpi-dsdt.aml q35-acpi-dsdt.aml \
|
||||
acpi-dsdt.aml \
|
||||
ppc_rom.bin openbios-sparc32 openbios-sparc64 openbios-ppc QEMU,tcx.bin QEMU,cgthree.bin \
|
||||
pxe-e1000.rom pxe-eepro100.rom pxe-ne2k_pci.rom \
|
||||
pxe-pcnet.rom pxe-rtl8139.rom pxe-virtio.rom \
|
||||
|
|
|
@ -1451,7 +1451,7 @@ build_header(GArray *linker, GArray *table_data,
|
|||
h->checksum = 0;
|
||||
/* Checksum to be filled in by Guest linker */
|
||||
bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE,
|
||||
table_data->data, h, len, &h->checksum);
|
||||
table_data, h, len, &h->checksum);
|
||||
}
|
||||
|
||||
void *acpi_data_push(GArray *table_data, unsigned size)
|
||||
|
|
|
@ -25,6 +25,13 @@
|
|||
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
/*
|
||||
* Linker/loader is a paravirtualized interface that passes commands to guest.
|
||||
* The commands can be used to request guest to
|
||||
* - allocate memory chunks and initialize them from QEMU FW CFG files
|
||||
* - link allocated chunks by storing pointer to one chunk into another
|
||||
* - calculate ACPI checksum of part of the chunk and store into same chunk
|
||||
*/
|
||||
#define BIOS_LINKER_LOADER_FILESZ FW_CFG_MAX_FILE_PATH
|
||||
|
||||
struct BiosLinkerLoaderEntry {
|
||||
|
@ -88,6 +95,12 @@ enum {
|
|||
BIOS_LINKER_LOADER_ALLOC_ZONE_FSEG = 0x2,
|
||||
};
|
||||
|
||||
/*
|
||||
* bios_linker_loader_init: allocate a new linker file blob array.
|
||||
*
|
||||
* After initialization, linker commands can be added, and will
|
||||
* be stored in the array.
|
||||
*/
|
||||
GArray *bios_linker_loader_init(void)
|
||||
{
|
||||
return g_array_new(false, true /* clear */, 1);
|
||||
|
@ -99,6 +112,16 @@ void *bios_linker_loader_cleanup(GArray *linker)
|
|||
return g_array_free(linker, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* bios_linker_loader_alloc: ask guest to load file into guest memory.
|
||||
*
|
||||
* @linker: linker file blob array
|
||||
* @file: file to be loaded
|
||||
* @alloc_align: required minimal alignment in bytes. Must be a power of 2.
|
||||
* @alloc_fseg: request allocation in FSEG zone (useful for the RSDP ACPI table)
|
||||
*
|
||||
* Note: this command must precede any other linker command using this file.
|
||||
*/
|
||||
void bios_linker_loader_alloc(GArray *linker,
|
||||
const char *file,
|
||||
uint32_t alloc_align,
|
||||
|
@ -106,6 +129,8 @@ void bios_linker_loader_alloc(GArray *linker,
|
|||
{
|
||||
BiosLinkerLoaderEntry entry;
|
||||
|
||||
assert(!(alloc_align & (alloc_align - 1)));
|
||||
|
||||
memset(&entry, 0, sizeof entry);
|
||||
strncpy(entry.alloc.file, file, sizeof entry.alloc.file - 1);
|
||||
entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ALLOCATE);
|
||||
|
@ -118,23 +143,77 @@ void bios_linker_loader_alloc(GArray *linker,
|
|||
g_array_prepend_vals(linker, &entry, sizeof entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* bios_linker_loader_add_checksum: ask guest to add checksum of file data
|
||||
* into (same) file at the specified pointer.
|
||||
*
|
||||
* Checksum calculation simply sums -X for each byte X in the range
|
||||
* using 8-bit math (i.e. ACPI checksum).
|
||||
*
|
||||
* @linker: linker file blob array
|
||||
* @file: file that includes the checksum to be calculated
|
||||
* and the data to be checksummed
|
||||
* @table: @file blob contents
|
||||
* @start, @size: range of data to checksum
|
||||
* @checksum: location of the checksum to be patched within file blob
|
||||
*
|
||||
* Notes:
|
||||
* - checksum byte initial value must have been pushed into @table
|
||||
* and reside at address @checksum.
|
||||
* - @size bytes must have been pushed into @table and reside at address
|
||||
* @start.
|
||||
* - Guest calculates checksum of specified range of data, result is added to
|
||||
* initial value at @checksum into copy of @file in Guest memory.
|
||||
* - Range might include the checksum itself.
|
||||
* - To avoid confusion, caller must always put 0x0 at @checksum.
|
||||
* - @file must be loaded into Guest memory using bios_linker_loader_alloc
|
||||
*/
|
||||
void bios_linker_loader_add_checksum(GArray *linker, const char *file,
|
||||
void *table,
|
||||
GArray *table,
|
||||
void *start, unsigned size,
|
||||
uint8_t *checksum)
|
||||
{
|
||||
BiosLinkerLoaderEntry entry;
|
||||
ptrdiff_t checksum_offset = (gchar *)checksum - table->data;
|
||||
ptrdiff_t start_offset = (gchar *)start - table->data;
|
||||
|
||||
assert(checksum_offset >= 0);
|
||||
assert(start_offset >= 0);
|
||||
assert(checksum_offset + 1 <= table->len);
|
||||
assert(start_offset + size <= table->len);
|
||||
assert(*checksum == 0x0);
|
||||
|
||||
memset(&entry, 0, sizeof entry);
|
||||
strncpy(entry.cksum.file, file, sizeof entry.cksum.file - 1);
|
||||
entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ADD_CHECKSUM);
|
||||
entry.cksum.offset = cpu_to_le32(checksum - (uint8_t *)table);
|
||||
entry.cksum.start = cpu_to_le32((uint8_t *)start - (uint8_t *)table);
|
||||
entry.cksum.offset = cpu_to_le32(checksum_offset);
|
||||
entry.cksum.start = cpu_to_le32(start_offset);
|
||||
entry.cksum.length = cpu_to_le32(size);
|
||||
|
||||
g_array_append_vals(linker, &entry, sizeof entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* bios_linker_loader_add_pointer: ask guest to add address of source file
|
||||
* into destination file at the specified pointer.
|
||||
*
|
||||
* @linker: linker file blob array
|
||||
* @dest_file: destination file that must be changed
|
||||
* @src_file: source file who's address must be taken
|
||||
* @table: @dest_file blob contents array
|
||||
* @pointer: location of the pointer to be patched within destination file blob
|
||||
* @pointer_size: size of pointer to be patched, in bytes
|
||||
*
|
||||
* Notes:
|
||||
* - @pointer_size bytes must have been pushed into @table
|
||||
* and reside at address @pointer.
|
||||
* - Guest address is added to initial value at @pointer
|
||||
* into copy of @dest_file in Guest memory.
|
||||
* e.g. to get start of src_file in guest memory, put 0x0 there
|
||||
* to get address of a field at offset 0x10 in src_file, put 0x10 there
|
||||
* - Both @dest_file and @src_file must be
|
||||
* loaded into Guest memory using bios_linker_loader_alloc
|
||||
*/
|
||||
void bios_linker_loader_add_pointer(GArray *linker,
|
||||
const char *dest_file,
|
||||
const char *src_file,
|
||||
|
@ -142,7 +221,10 @@ void bios_linker_loader_add_pointer(GArray *linker,
|
|||
uint8_t pointer_size)
|
||||
{
|
||||
BiosLinkerLoaderEntry entry;
|
||||
size_t offset = (gchar *)pointer - table->data;
|
||||
ptrdiff_t offset = (gchar *)pointer - table->data;
|
||||
|
||||
assert(offset >= 0);
|
||||
assert(offset + pointer_size <= table->len);
|
||||
|
||||
memset(&entry, 0, sizeof entry);
|
||||
strncpy(entry.pointer.dest_file, dest_file,
|
||||
|
@ -150,7 +232,6 @@ void bios_linker_loader_add_pointer(GArray *linker,
|
|||
strncpy(entry.pointer.src_file, src_file,
|
||||
sizeof entry.pointer.src_file - 1);
|
||||
entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ADD_POINTER);
|
||||
assert(table->len >= offset + pointer_size);
|
||||
entry.pointer.offset = cpu_to_le32(offset);
|
||||
entry.pointer.size = pointer_size;
|
||||
assert(pointer_size == 1 || pointer_size == 2 ||
|
||||
|
|
|
@ -240,7 +240,7 @@ static void pm_powerdown_req(Notifier *n, void *opaque)
|
|||
}
|
||||
|
||||
void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
|
||||
bool smm_enabled, bool enable_tco,
|
||||
bool smm_enabled,
|
||||
qemu_irq sci_irq)
|
||||
{
|
||||
memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE);
|
||||
|
@ -264,10 +264,8 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
|
|||
|
||||
pm->smm_enabled = smm_enabled;
|
||||
|
||||
pm->enable_tco = enable_tco;
|
||||
if (pm->enable_tco) {
|
||||
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
|
||||
}
|
||||
pm->enable_tco = true;
|
||||
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
|
||||
|
||||
pm->irq = sci_irq;
|
||||
qemu_register_reset(pm_reset, pm);
|
||||
|
|
|
@ -359,7 +359,8 @@ build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
|
|||
rsdp->checksum = 0;
|
||||
/* Checksum to be filled by Guest linker */
|
||||
bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
|
||||
rsdp, rsdp, sizeof *rsdp, &rsdp->checksum);
|
||||
rsdp_table, rsdp, sizeof *rsdp,
|
||||
&rsdp->checksum);
|
||||
|
||||
return rsdp_table;
|
||||
}
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#include "qemu/thread.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
#include "hw/virtio/dataplane/vring.h"
|
||||
#include "hw/virtio/dataplane/vring-accessors.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "hw/virtio/virtio-blk.h"
|
||||
#include "virtio-blk.h"
|
||||
|
@ -28,7 +26,6 @@
|
|||
#include "qom/object_interfaces.h"
|
||||
|
||||
struct VirtIOBlockDataPlane {
|
||||
bool started;
|
||||
bool starting;
|
||||
bool stopping;
|
||||
bool disabled;
|
||||
|
@ -36,7 +33,7 @@ struct VirtIOBlockDataPlane {
|
|||
VirtIOBlkConf *conf;
|
||||
|
||||
VirtIODevice *vdev;
|
||||
Vring vring; /* virtqueue vring */
|
||||
VirtQueue *vq; /* virtqueue vring */
|
||||
EventNotifier *guest_notifier; /* irq */
|
||||
QEMUBH *bh; /* bh for guest notification */
|
||||
|
||||
|
@ -49,93 +46,26 @@ struct VirtIOBlockDataPlane {
|
|||
*/
|
||||
IOThread *iothread;
|
||||
AioContext *ctx;
|
||||
EventNotifier host_notifier; /* doorbell */
|
||||
|
||||
/* Operation blocker on BDS */
|
||||
Error *blocker;
|
||||
void (*saved_complete_request)(struct VirtIOBlockReq *req,
|
||||
unsigned char status);
|
||||
};
|
||||
|
||||
/* Raise an interrupt to signal guest, if necessary */
|
||||
static void notify_guest(VirtIOBlockDataPlane *s)
|
||||
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s)
|
||||
{
|
||||
if (!vring_should_notify(s->vdev, &s->vring)) {
|
||||
return;
|
||||
}
|
||||
|
||||
event_notifier_set(s->guest_notifier);
|
||||
qemu_bh_schedule(s->bh);
|
||||
}
|
||||
|
||||
static void notify_guest_bh(void *opaque)
|
||||
{
|
||||
VirtIOBlockDataPlane *s = opaque;
|
||||
|
||||
notify_guest(s);
|
||||
}
|
||||
|
||||
static void complete_request_vring(VirtIOBlockReq *req, unsigned char status)
|
||||
{
|
||||
VirtIOBlockDataPlane *s = req->dev->dataplane;
|
||||
stb_p(&req->in->status, status);
|
||||
|
||||
vring_push(s->vdev, &req->dev->dataplane->vring, &req->elem, req->in_len);
|
||||
|
||||
/* Suppress notification to guest by BH and its scheduled
|
||||
* flag because requests are completed as a batch after io
|
||||
* plug & unplug is introduced, and the BH can still be
|
||||
* executed in dataplane aio context even after it is
|
||||
* stopped, so needn't worry about notification loss with BH.
|
||||
*/
|
||||
qemu_bh_schedule(s->bh);
|
||||
}
|
||||
|
||||
static void handle_notify(EventNotifier *e)
|
||||
{
|
||||
VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
|
||||
host_notifier);
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
|
||||
|
||||
event_notifier_test_and_clear(&s->host_notifier);
|
||||
blk_io_plug(s->conf->conf.blk);
|
||||
for (;;) {
|
||||
MultiReqBuffer mrb = {};
|
||||
|
||||
/* Disable guest->host notifies to avoid unnecessary vmexits */
|
||||
vring_disable_notification(s->vdev, &s->vring);
|
||||
|
||||
for (;;) {
|
||||
VirtIOBlockReq *req = vring_pop(s->vdev, &s->vring,
|
||||
sizeof(VirtIOBlockReq));
|
||||
|
||||
if (req == NULL) {
|
||||
break; /* no more requests */
|
||||
}
|
||||
|
||||
virtio_blk_init_request(vblk, req);
|
||||
trace_virtio_blk_data_plane_process_request(s, req->elem.out_num,
|
||||
req->elem.in_num,
|
||||
req->elem.index);
|
||||
|
||||
virtio_blk_handle_request(req, &mrb);
|
||||
}
|
||||
|
||||
if (mrb.num_reqs) {
|
||||
virtio_blk_submit_multireq(s->conf->conf.blk, &mrb);
|
||||
}
|
||||
|
||||
if (likely(!vring_more_avail(s->vdev, &s->vring))) { /* vring emptied */
|
||||
/* Re-enable guest->host notifies and stop processing the vring.
|
||||
* But if the guest has snuck in more descriptors, keep processing.
|
||||
*/
|
||||
if (vring_enable_notification(s->vdev, &s->vring)) {
|
||||
break;
|
||||
}
|
||||
} else { /* fatal error */
|
||||
break;
|
||||
}
|
||||
if (!virtio_should_notify(s->vdev, s->vq)) {
|
||||
return;
|
||||
}
|
||||
blk_io_unplug(s->conf->conf.blk);
|
||||
|
||||
event_notifier_set(s->guest_notifier);
|
||||
}
|
||||
|
||||
static void data_plane_set_up_op_blockers(VirtIOBlockDataPlane *s)
|
||||
|
@ -260,23 +190,14 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
|
|||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
|
||||
VirtQueue *vq;
|
||||
int r;
|
||||
|
||||
if (s->started || s->disabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->starting) {
|
||||
if (vblk->dataplane_started || s->starting) {
|
||||
return;
|
||||
}
|
||||
|
||||
s->starting = true;
|
||||
|
||||
vq = virtio_get_queue(s->vdev, 0);
|
||||
if (!vring_setup(&s->vring, s->vdev, 0)) {
|
||||
goto fail_vring;
|
||||
}
|
||||
s->vq = virtio_get_queue(s->vdev, 0);
|
||||
|
||||
/* Set up guest notifier (irq) */
|
||||
r = k->set_guest_notifiers(qbus->parent, 1, true);
|
||||
|
@ -285,7 +206,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
|
|||
"ensure -enable-kvm is set\n", r);
|
||||
goto fail_guest_notifiers;
|
||||
}
|
||||
s->guest_notifier = virtio_queue_get_guest_notifier(vq);
|
||||
s->guest_notifier = virtio_queue_get_guest_notifier(s->vq);
|
||||
|
||||
/* Set up virtqueue notify */
|
||||
r = k->set_host_notifier(qbus->parent, 0, true);
|
||||
|
@ -293,34 +214,28 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
|
|||
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
|
||||
goto fail_host_notifier;
|
||||
}
|
||||
s->host_notifier = *virtio_queue_get_host_notifier(vq);
|
||||
|
||||
s->saved_complete_request = vblk->complete_request;
|
||||
vblk->complete_request = complete_request_vring;
|
||||
|
||||
s->starting = false;
|
||||
s->started = true;
|
||||
vblk->dataplane_started = true;
|
||||
trace_virtio_blk_data_plane_start(s);
|
||||
|
||||
blk_set_aio_context(s->conf->conf.blk, s->ctx);
|
||||
|
||||
/* Kick right away to begin processing requests already in vring */
|
||||
event_notifier_set(virtio_queue_get_host_notifier(vq));
|
||||
event_notifier_set(virtio_queue_get_host_notifier(s->vq));
|
||||
|
||||
/* Get this show started by hooking up our callbacks */
|
||||
aio_context_acquire(s->ctx);
|
||||
aio_set_event_notifier(s->ctx, &s->host_notifier, true,
|
||||
handle_notify);
|
||||
virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, true, true);
|
||||
aio_context_release(s->ctx);
|
||||
return;
|
||||
|
||||
fail_host_notifier:
|
||||
k->set_guest_notifiers(qbus->parent, 1, false);
|
||||
fail_guest_notifiers:
|
||||
vring_teardown(&s->vring, s->vdev, 0);
|
||||
s->disabled = true;
|
||||
fail_vring:
|
||||
s->starting = false;
|
||||
vblk->dataplane_started = true;
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
|
@ -330,39 +245,34 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
|
|||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
|
||||
|
||||
if (!vblk->dataplane_started || s->stopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Better luck next time. */
|
||||
if (s->disabled) {
|
||||
s->disabled = false;
|
||||
return;
|
||||
}
|
||||
if (!s->started || s->stopping) {
|
||||
vblk->dataplane_started = false;
|
||||
return;
|
||||
}
|
||||
s->stopping = true;
|
||||
vblk->complete_request = s->saved_complete_request;
|
||||
trace_virtio_blk_data_plane_stop(s);
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
|
||||
/* Stop notifications for new requests from guest */
|
||||
aio_set_event_notifier(s->ctx, &s->host_notifier, true, NULL);
|
||||
virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, false, false);
|
||||
|
||||
/* Drain and switch bs back to the QEMU main loop */
|
||||
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
|
||||
|
||||
aio_context_release(s->ctx);
|
||||
|
||||
/* Sync vring state back to virtqueue so that non-dataplane request
|
||||
* processing can continue when we disable the host notifier below.
|
||||
*/
|
||||
vring_teardown(&s->vring, s->vdev, 0);
|
||||
|
||||
k->set_host_notifier(qbus->parent, 0, false);
|
||||
|
||||
/* Clean up guest notifier (irq) */
|
||||
k->set_guest_notifiers(qbus->parent, 1, false);
|
||||
|
||||
s->started = false;
|
||||
vblk->dataplane_started = false;
|
||||
s->stopping = false;
|
||||
}
|
||||
|
|
|
@ -26,5 +26,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
|
|||
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
|
||||
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
|
||||
void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
|
||||
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s);
|
||||
|
||||
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include "sysemu/blockdev.h"
|
||||
#include "hw/virtio/virtio-blk.h"
|
||||
#include "dataplane/virtio-blk.h"
|
||||
#include "migration/migration.h"
|
||||
#include "block/scsi.h"
|
||||
#ifdef __linux__
|
||||
# include <scsi/sg.h>
|
||||
|
@ -45,8 +44,7 @@ void virtio_blk_free_request(VirtIOBlockReq *req)
|
|||
}
|
||||
}
|
||||
|
||||
static void virtio_blk_complete_request(VirtIOBlockReq *req,
|
||||
unsigned char status)
|
||||
static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
|
||||
{
|
||||
VirtIOBlock *s = req->dev;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
|
@ -55,12 +53,11 @@ static void virtio_blk_complete_request(VirtIOBlockReq *req,
|
|||
|
||||
stb_p(&req->in->status, status);
|
||||
virtqueue_push(s->vq, &req->elem, req->in_len);
|
||||
virtio_notify(vdev, s->vq);
|
||||
}
|
||||
|
||||
static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
|
||||
{
|
||||
req->dev->complete_request(req, status);
|
||||
if (s->dataplane) {
|
||||
virtio_blk_data_plane_notify(s->dataplane);
|
||||
} else {
|
||||
virtio_notify(vdev, s->vq);
|
||||
}
|
||||
}
|
||||
|
||||
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
|
||||
|
@ -589,7 +586,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
|||
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
|
||||
* dataplane here instead of waiting for .set_status().
|
||||
*/
|
||||
if (s->dataplane) {
|
||||
if (s->dataplane && !s->dataplane_started) {
|
||||
virtio_blk_data_plane_start(s->dataplane);
|
||||
return;
|
||||
}
|
||||
|
@ -852,36 +849,6 @@ static const BlockDevOps virtio_block_ops = {
|
|||
.resize_cb = virtio_blk_resize,
|
||||
};
|
||||
|
||||
/* Disable dataplane thread during live migration since it does not
|
||||
* update the dirty memory bitmap yet.
|
||||
*/
|
||||
static void virtio_blk_migration_state_changed(Notifier *notifier, void *data)
|
||||
{
|
||||
VirtIOBlock *s = container_of(notifier, VirtIOBlock,
|
||||
migration_state_notifier);
|
||||
MigrationState *mig = data;
|
||||
Error *err = NULL;
|
||||
|
||||
if (migration_in_setup(mig)) {
|
||||
if (!s->dataplane) {
|
||||
return;
|
||||
}
|
||||
virtio_blk_data_plane_destroy(s->dataplane);
|
||||
s->dataplane = NULL;
|
||||
} else if (migration_has_finished(mig) ||
|
||||
migration_has_failed(mig)) {
|
||||
if (s->dataplane) {
|
||||
return;
|
||||
}
|
||||
blk_drain_all(); /* complete in-flight non-dataplane requests */
|
||||
virtio_blk_data_plane_create(VIRTIO_DEVICE(s), &s->conf,
|
||||
&s->dataplane, &err);
|
||||
if (err != NULL) {
|
||||
error_report_err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
|
@ -916,15 +883,12 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
|||
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
|
||||
|
||||
s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
|
||||
s->complete_request = virtio_blk_complete_request;
|
||||
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
|
||||
if (err != NULL) {
|
||||
error_propagate(errp, err);
|
||||
virtio_cleanup(vdev);
|
||||
return;
|
||||
}
|
||||
s->migration_state_notifier.notify = virtio_blk_migration_state_changed;
|
||||
add_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
|
||||
s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
|
||||
register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
|
||||
|
@ -940,7 +904,6 @@ static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
|
|||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
VirtIOBlock *s = VIRTIO_BLK(dev);
|
||||
|
||||
remove_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
virtio_blk_data_plane_destroy(s->dataplane);
|
||||
s->dataplane = NULL;
|
||||
qemu_del_vm_change_state_handler(s->change);
|
||||
|
|
|
@ -2532,7 +2532,8 @@ build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
|
|||
rsdp->checksum = 0;
|
||||
/* Checksum to be filled by Guest linker */
|
||||
bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
|
||||
rsdp, rsdp, sizeof *rsdp, &rsdp->checksum);
|
||||
rsdp_table, rsdp, sizeof *rsdp,
|
||||
&rsdp->checksum);
|
||||
|
||||
return rsdp_table;
|
||||
}
|
||||
|
|
176
hw/i386/pc_q35.c
176
hw/i386/pc_q35.c
|
@ -81,11 +81,9 @@ static void pc_q35_init(MachineState *machine)
|
|||
* If it doesn't, we need to split it in chunks below and above 4G.
|
||||
* In any case, try to make sure that guest addresses aligned at
|
||||
* 1G boundaries get mapped to host addresses aligned at 1G boundaries.
|
||||
* For old machine types, use whatever split we used historically to avoid
|
||||
* breaking migration.
|
||||
*/
|
||||
if (machine->ram_size >= 0xb0000000) {
|
||||
lowmem = pcmc->gigabyte_align ? 0x80000000 : 0xb0000000;
|
||||
lowmem = 0x80000000;
|
||||
} else {
|
||||
lowmem = 0xb0000000;
|
||||
}
|
||||
|
@ -116,10 +114,6 @@ static void pc_q35_init(MachineState *machine)
|
|||
}
|
||||
|
||||
pc_cpus_init(pcms);
|
||||
if (!pcmc->has_acpi_build) {
|
||||
/* only machine types 1.7 & older need this */
|
||||
pc_acpi_init("q35-acpi-dsdt.aml");
|
||||
}
|
||||
|
||||
kvmclock_create();
|
||||
|
||||
|
@ -225,7 +219,7 @@ static void pc_q35_init(MachineState *machine)
|
|||
(pcms->vmport != ON_OFF_AUTO_ON), 0xff0104);
|
||||
|
||||
/* connect pm stuff to lpc */
|
||||
ich9_lpc_pm_init(lpc, pc_machine_is_smm_enabled(pcms), !mc->no_tco);
|
||||
ich9_lpc_pm_init(lpc, pc_machine_is_smm_enabled(pcms));
|
||||
|
||||
/* ahci and SATA device, for q35 1 ahci controller is built-in */
|
||||
ahci = pci_create_simple_multifunction(host_bus,
|
||||
|
@ -259,62 +253,6 @@ static void pc_q35_init(MachineState *machine)
|
|||
}
|
||||
}
|
||||
|
||||
/* Looking for a pc_compat_2_4() function? It doesn't exist.
|
||||
* pc_compat_*() functions that run on machine-init time and
|
||||
* change global QEMU state are deprecated. Please don't create
|
||||
* one, and implement any pc-*-2.4 (and newer) compat code in
|
||||
* HW_COMPAT_*, PC_COMPAT_*, or * pc_*_machine_options().
|
||||
*/
|
||||
|
||||
static void pc_compat_2_3(MachineState *machine)
|
||||
{
|
||||
PCMachineState *pcms = PC_MACHINE(machine);
|
||||
savevm_skip_section_footers();
|
||||
if (kvm_enabled()) {
|
||||
pcms->smm = ON_OFF_AUTO_OFF;
|
||||
}
|
||||
global_state_set_optional();
|
||||
savevm_skip_configuration();
|
||||
}
|
||||
|
||||
static void pc_compat_2_2(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_3(machine);
|
||||
machine->suppress_vmdesc = true;
|
||||
}
|
||||
|
||||
static void pc_compat_2_1(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_2(machine);
|
||||
x86_cpu_change_kvm_default("svm", NULL);
|
||||
}
|
||||
|
||||
static void pc_compat_2_0(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_1(machine);
|
||||
}
|
||||
|
||||
static void pc_compat_1_7(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_0(machine);
|
||||
x86_cpu_change_kvm_default("x2apic", NULL);
|
||||
}
|
||||
|
||||
static void pc_compat_1_6(MachineState *machine)
|
||||
{
|
||||
pc_compat_1_7(machine);
|
||||
}
|
||||
|
||||
static void pc_compat_1_5(MachineState *machine)
|
||||
{
|
||||
pc_compat_1_6(machine);
|
||||
}
|
||||
|
||||
static void pc_compat_1_4(MachineState *machine)
|
||||
{
|
||||
pc_compat_1_5(machine);
|
||||
}
|
||||
|
||||
#define DEFINE_Q35_MACHINE(suffix, name, compatfn, optionfn) \
|
||||
static void pc_init_##suffix(MachineState *machine) \
|
||||
{ \
|
||||
|
@ -336,7 +274,6 @@ static void pc_q35_machine_options(MachineClass *m)
|
|||
m->default_machine_opts = "firmware=bios-256k.bin";
|
||||
m->default_display = "std";
|
||||
m->no_floppy = 1;
|
||||
m->no_tco = 0;
|
||||
}
|
||||
|
||||
static void pc_q35_2_6_machine_options(MachineClass *m)
|
||||
|
@ -371,112 +308,3 @@ static void pc_q35_2_4_machine_options(MachineClass *m)
|
|||
|
||||
DEFINE_Q35_MACHINE(v2_4, "pc-q35-2.4", NULL,
|
||||
pc_q35_2_4_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_2_3_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_2_4_machine_options(m);
|
||||
m->hw_version = "2.3.0";
|
||||
m->no_floppy = 0;
|
||||
m->no_tco = 1;
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_2_3);
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v2_3, "pc-q35-2.3", pc_compat_2_3,
|
||||
pc_q35_2_3_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_2_2_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_2_3_machine_options(m);
|
||||
m->hw_version = "2.2.0";
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_2_2);
|
||||
pcmc->rsdp_in_ram = false;
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v2_2, "pc-q35-2.2", pc_compat_2_2,
|
||||
pc_q35_2_2_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_2_1_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_2_2_machine_options(m);
|
||||
m->hw_version = "2.1.0";
|
||||
m->default_display = NULL;
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_2_1);
|
||||
pcmc->smbios_uuid_encoded = false;
|
||||
pcmc->enforce_aligned_dimm = false;
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v2_1, "pc-q35-2.1", pc_compat_2_1,
|
||||
pc_q35_2_1_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_2_0_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_2_1_machine_options(m);
|
||||
m->hw_version = "2.0.0";
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_2_0);
|
||||
pcmc->has_reserved_memory = false;
|
||||
pcmc->smbios_legacy_mode = true;
|
||||
pcmc->acpi_data_size = 0x10000;
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v2_0, "pc-q35-2.0", pc_compat_2_0,
|
||||
pc_q35_2_0_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_1_7_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_2_0_machine_options(m);
|
||||
m->hw_version = "1.7.0";
|
||||
m->default_machine_opts = NULL;
|
||||
m->option_rom_has_mr = true;
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_1_7);
|
||||
pcmc->smbios_defaults = false;
|
||||
pcmc->gigabyte_align = false;
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v1_7, "pc-q35-1.7", pc_compat_1_7,
|
||||
pc_q35_1_7_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_1_6_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
pc_q35_machine_options(m);
|
||||
m->hw_version = "1.6.0";
|
||||
m->rom_file_has_mr = false;
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_1_6);
|
||||
pcmc->has_acpi_build = false;
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v1_6, "pc-q35-1.6", pc_compat_1_6,
|
||||
pc_q35_1_6_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_1_5_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_1_6_machine_options(m);
|
||||
m->hw_version = "1.5.0";
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_1_5);
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v1_5, "pc-q35-1.5", pc_compat_1_5,
|
||||
pc_q35_1_5_machine_options);
|
||||
|
||||
|
||||
static void pc_q35_1_4_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_q35_1_5_machine_options(m);
|
||||
m->hw_version = "1.4.0";
|
||||
m->hot_add_cpu = NULL;
|
||||
SET_MACHINE_COMPAT(m, PC_COMPAT_1_4);
|
||||
}
|
||||
|
||||
DEFINE_Q35_MACHINE(v1_4, "pc-q35-1.4", pc_compat_1_4,
|
||||
pc_q35_1_4_machine_options);
|
||||
|
|
|
@ -369,13 +369,13 @@ static void ich9_set_sci(void *opaque, int irq_num, int level)
|
|||
}
|
||||
}
|
||||
|
||||
void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled, bool enable_tco)
|
||||
void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled)
|
||||
{
|
||||
ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci);
|
||||
qemu_irq sci_irq;
|
||||
|
||||
sci_irq = qemu_allocate_irq(ich9_set_sci, lpc, 0);
|
||||
ich9_pm_init(lpc_pci, &lpc->pm, smm_enabled, enable_tco, sci_irq);
|
||||
ich9_pm_init(lpc_pci, &lpc->pm, smm_enabled, sci_irq);
|
||||
ich9_lpc_reset(&lpc->d.qdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -192,32 +192,6 @@ int qmp_pc_dimm_device_list(Object *obj, void *opaque)
|
|||
return 0;
|
||||
}
|
||||
|
||||
ram_addr_t get_current_ram_size(void)
|
||||
{
|
||||
MemoryDeviceInfoList *info_list = NULL;
|
||||
MemoryDeviceInfoList **prev = &info_list;
|
||||
MemoryDeviceInfoList *info;
|
||||
ram_addr_t size = ram_size;
|
||||
|
||||
qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
|
||||
for (info = info_list; info; info = info->next) {
|
||||
MemoryDeviceInfo *value = info->value;
|
||||
|
||||
if (value) {
|
||||
switch (value->type) {
|
||||
case MEMORY_DEVICE_INFO_KIND_DIMM:
|
||||
size += value->u.dimm->size;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
qapi_free_MemoryDeviceInfoList(info_list);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int pc_dimm_slot2bitmap(Object *obj, void *opaque)
|
||||
{
|
||||
unsigned long *bitmap = opaque;
|
||||
|
|
13
hw/pci/pci.c
13
hw/pci/pci.c
|
@ -278,9 +278,9 @@ static void pcibus_reset(BusState *qbus)
|
|||
}
|
||||
}
|
||||
|
||||
static void pci_host_bus_register(PCIBus *bus, DeviceState *parent)
|
||||
static void pci_host_bus_register(DeviceState *host)
|
||||
{
|
||||
PCIHostState *host_bridge = PCI_HOST_BRIDGE(parent);
|
||||
PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
|
||||
|
||||
QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
|
||||
}
|
||||
|
@ -331,7 +331,6 @@ const char *pci_root_bus_path(PCIDevice *dev)
|
|||
}
|
||||
|
||||
static void pci_bus_init(PCIBus *bus, DeviceState *parent,
|
||||
const char *name,
|
||||
MemoryRegion *address_space_mem,
|
||||
MemoryRegion *address_space_io,
|
||||
uint8_t devfn_min)
|
||||
|
@ -344,7 +343,7 @@ static void pci_bus_init(PCIBus *bus, DeviceState *parent,
|
|||
/* host bridge */
|
||||
QLIST_INIT(&bus->child);
|
||||
|
||||
pci_host_bus_register(bus, parent);
|
||||
pci_host_bus_register(parent);
|
||||
}
|
||||
|
||||
bool pci_bus_is_express(PCIBus *bus)
|
||||
|
@ -364,8 +363,7 @@ void pci_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent,
|
|||
uint8_t devfn_min, const char *typename)
|
||||
{
|
||||
qbus_create_inplace(bus, bus_size, typename, parent, name);
|
||||
pci_bus_init(bus, parent, name, address_space_mem,
|
||||
address_space_io, devfn_min);
|
||||
pci_bus_init(bus, parent, address_space_mem, address_space_io, devfn_min);
|
||||
}
|
||||
|
||||
PCIBus *pci_bus_new(DeviceState *parent, const char *name,
|
||||
|
@ -376,8 +374,7 @@ PCIBus *pci_bus_new(DeviceState *parent, const char *name,
|
|||
PCIBus *bus;
|
||||
|
||||
bus = PCI_BUS(qbus_create(typename, parent, name));
|
||||
pci_bus_init(bus, parent, name, address_space_mem,
|
||||
address_space_io, devfn_min);
|
||||
pci_bus_init(bus, parent, address_space_mem, address_space_io, devfn_min);
|
||||
return bus;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,14 +39,10 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
|
|||
}
|
||||
}
|
||||
|
||||
static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
|
||||
VirtQueue *vq,
|
||||
EventNotifierHandler *handler,
|
||||
int n)
|
||||
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n)
|
||||
{
|
||||
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
|
||||
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
||||
VirtIOSCSIVring *r;
|
||||
int rc;
|
||||
|
||||
/* Set up virtqueue notify */
|
||||
|
@ -55,105 +51,17 @@ static VirtIOSCSIVring *virtio_scsi_vring_init(VirtIOSCSI *s,
|
|||
fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n",
|
||||
rc);
|
||||
s->dataplane_fenced = true;
|
||||
return NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
r = g_new(VirtIOSCSIVring, 1);
|
||||
r->host_notifier = *virtio_queue_get_host_notifier(vq);
|
||||
r->guest_notifier = *virtio_queue_get_guest_notifier(vq);
|
||||
aio_set_event_notifier(s->ctx, &r->host_notifier, true, handler);
|
||||
|
||||
r->parent = s;
|
||||
|
||||
if (!vring_setup(&r->vring, VIRTIO_DEVICE(s), n)) {
|
||||
fprintf(stderr, "virtio-scsi: VRing setup failed\n");
|
||||
goto fail_vring;
|
||||
}
|
||||
return r;
|
||||
|
||||
fail_vring:
|
||||
aio_set_event_notifier(s->ctx, &r->host_notifier, true, NULL);
|
||||
k->set_host_notifier(qbus->parent, n, false);
|
||||
g_free(r);
|
||||
return NULL;
|
||||
virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, true, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
VirtIOSCSIReq *virtio_scsi_pop_req_vring(VirtIOSCSI *s,
|
||||
VirtIOSCSIVring *vring)
|
||||
void virtio_scsi_dataplane_notify(VirtIODevice *vdev, VirtIOSCSIReq *req)
|
||||
{
|
||||
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
|
||||
VirtIOSCSIReq *req;
|
||||
|
||||
req = vring_pop((VirtIODevice *)s, &vring->vring,
|
||||
sizeof(VirtIOSCSIReq) + vs->cdb_size);
|
||||
if (!req) {
|
||||
return NULL;
|
||||
}
|
||||
virtio_scsi_init_req(s, NULL, req);
|
||||
req->vring = vring;
|
||||
return req;
|
||||
}
|
||||
|
||||
void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(req->vring->parent);
|
||||
|
||||
vring_push(vdev, &req->vring->vring, &req->elem,
|
||||
req->qsgl.size + req->resp_iov.size);
|
||||
|
||||
if (vring_should_notify(vdev, &req->vring->vring)) {
|
||||
event_notifier_set(&req->vring->guest_notifier);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_scsi_iothread_handle_ctrl(EventNotifier *notifier)
|
||||
{
|
||||
VirtIOSCSIVring *vring = container_of(notifier,
|
||||
VirtIOSCSIVring, host_notifier);
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vring->parent);
|
||||
VirtIOSCSIReq *req;
|
||||
|
||||
event_notifier_test_and_clear(notifier);
|
||||
while ((req = virtio_scsi_pop_req_vring(s, vring))) {
|
||||
virtio_scsi_handle_ctrl_req(s, req);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_scsi_iothread_handle_event(EventNotifier *notifier)
|
||||
{
|
||||
VirtIOSCSIVring *vring = container_of(notifier,
|
||||
VirtIOSCSIVring, host_notifier);
|
||||
VirtIOSCSI *s = vring->parent;
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
|
||||
event_notifier_test_and_clear(notifier);
|
||||
|
||||
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->events_dropped) {
|
||||
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_scsi_iothread_handle_cmd(EventNotifier *notifier)
|
||||
{
|
||||
VirtIOSCSIVring *vring = container_of(notifier,
|
||||
VirtIOSCSIVring, host_notifier);
|
||||
VirtIOSCSI *s = (VirtIOSCSI *)vring->parent;
|
||||
VirtIOSCSIReq *req, *next;
|
||||
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
||||
|
||||
event_notifier_test_and_clear(notifier);
|
||||
while ((req = virtio_scsi_pop_req_vring(s, vring))) {
|
||||
if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
|
||||
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
||||
}
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
|
||||
virtio_scsi_handle_cmd_req_submit(s, req);
|
||||
if (virtio_should_notify(vdev, req->vq)) {
|
||||
event_notifier_set(virtio_queue_get_guest_notifier(req->vq));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,46 +71,10 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
|
|||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
||||
int i;
|
||||
|
||||
if (s->ctrl_vring) {
|
||||
aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
|
||||
true, NULL);
|
||||
}
|
||||
if (s->event_vring) {
|
||||
aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
|
||||
true, NULL);
|
||||
}
|
||||
if (s->cmd_vrings) {
|
||||
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
|
||||
aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
|
||||
true, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_scsi_vring_teardown(VirtIOSCSI *s)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
||||
int i;
|
||||
|
||||
if (s->ctrl_vring) {
|
||||
vring_teardown(&s->ctrl_vring->vring, vdev, 0);
|
||||
g_free(s->ctrl_vring);
|
||||
s->ctrl_vring = NULL;
|
||||
}
|
||||
if (s->event_vring) {
|
||||
vring_teardown(&s->event_vring->vring, vdev, 1);
|
||||
g_free(s->event_vring);
|
||||
s->event_vring = NULL;
|
||||
}
|
||||
if (s->cmd_vrings) {
|
||||
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
|
||||
vring_teardown(&s->cmd_vrings[i]->vring, vdev, 2 + i);
|
||||
g_free(s->cmd_vrings[i]);
|
||||
s->cmd_vrings[i] = NULL;
|
||||
}
|
||||
free(s->cmd_vrings);
|
||||
s->cmd_vrings = NULL;
|
||||
virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, false, false);
|
||||
virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, false, false);
|
||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||
virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,30 +101,21 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
|
|||
if (rc != 0) {
|
||||
fprintf(stderr, "virtio-scsi: Failed to set guest notifiers (%d), "
|
||||
"ensure -enable-kvm is set\n", rc);
|
||||
s->dataplane_fenced = true;
|
||||
goto fail_guest_notifiers;
|
||||
}
|
||||
|
||||
aio_context_acquire(s->ctx);
|
||||
s->ctrl_vring = virtio_scsi_vring_init(s, vs->ctrl_vq,
|
||||
virtio_scsi_iothread_handle_ctrl,
|
||||
0);
|
||||
if (!s->ctrl_vring) {
|
||||
rc = virtio_scsi_vring_init(s, vs->ctrl_vq, 0);
|
||||
if (rc) {
|
||||
goto fail_vrings;
|
||||
}
|
||||
s->event_vring = virtio_scsi_vring_init(s, vs->event_vq,
|
||||
virtio_scsi_iothread_handle_event,
|
||||
1);
|
||||
if (!s->event_vring) {
|
||||
rc = virtio_scsi_vring_init(s, vs->event_vq, 1);
|
||||
if (rc) {
|
||||
goto fail_vrings;
|
||||
}
|
||||
s->cmd_vrings = g_new(VirtIOSCSIVring *, vs->conf.num_queues);
|
||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||
s->cmd_vrings[i] =
|
||||
virtio_scsi_vring_init(s, vs->cmd_vqs[i],
|
||||
virtio_scsi_iothread_handle_cmd,
|
||||
i + 2);
|
||||
if (!s->cmd_vrings[i]) {
|
||||
rc = virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2);
|
||||
if (rc) {
|
||||
goto fail_vrings;
|
||||
}
|
||||
}
|
||||
|
@ -265,13 +128,14 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
|
|||
fail_vrings:
|
||||
virtio_scsi_clear_aio(s);
|
||||
aio_context_release(s->ctx);
|
||||
virtio_scsi_vring_teardown(s);
|
||||
for (i = 0; i < vs->conf.num_queues + 2; i++) {
|
||||
k->set_host_notifier(qbus->parent, i, false);
|
||||
}
|
||||
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
|
||||
fail_guest_notifiers:
|
||||
s->dataplane_fenced = true;
|
||||
s->dataplane_starting = false;
|
||||
s->dataplane_started = true;
|
||||
}
|
||||
|
||||
/* Context: QEMU global mutex held */
|
||||
|
@ -282,12 +146,14 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
|
|||
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
|
||||
int i;
|
||||
|
||||
if (!s->dataplane_started || s->dataplane_stopping) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Better luck next time. */
|
||||
if (s->dataplane_fenced) {
|
||||
s->dataplane_fenced = false;
|
||||
return;
|
||||
}
|
||||
if (!s->dataplane_started || s->dataplane_stopping) {
|
||||
s->dataplane_started = false;
|
||||
return;
|
||||
}
|
||||
s->dataplane_stopping = true;
|
||||
|
@ -295,24 +161,12 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
|
|||
|
||||
aio_context_acquire(s->ctx);
|
||||
|
||||
aio_set_event_notifier(s->ctx, &s->ctrl_vring->host_notifier,
|
||||
true, NULL);
|
||||
aio_set_event_notifier(s->ctx, &s->event_vring->host_notifier,
|
||||
true, NULL);
|
||||
for (i = 0; i < vs->conf.num_queues; i++) {
|
||||
aio_set_event_notifier(s->ctx, &s->cmd_vrings[i]->host_notifier,
|
||||
true, NULL);
|
||||
}
|
||||
virtio_scsi_clear_aio(s);
|
||||
|
||||
blk_drain_all(); /* ensure there are no in-flight requests */
|
||||
|
||||
aio_context_release(s->ctx);
|
||||
|
||||
/* Sync vring state back to virtqueue so that non-dataplane request
|
||||
* processing can continue when we disable the host notifier below.
|
||||
*/
|
||||
virtio_scsi_vring_teardown(s);
|
||||
|
||||
for (i = 0; i < vs->conf.num_queues + 2; i++) {
|
||||
k->set_host_notifier(qbus->parent, i, false);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <block/scsi.h>
|
||||
#include <hw/virtio/virtio-bus.h>
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
#include "migration/migration.h"
|
||||
|
||||
static inline int virtio_scsi_get_lun(uint8_t *lun)
|
||||
{
|
||||
|
@ -43,7 +42,8 @@ static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
|
|||
|
||||
void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
|
||||
{
|
||||
const size_t zero_skip = offsetof(VirtIOSCSIReq, vring);
|
||||
const size_t zero_skip =
|
||||
offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
|
||||
|
||||
req->vq = vq;
|
||||
req->dev = s;
|
||||
|
@ -66,11 +66,10 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
|
|||
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
||||
|
||||
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
|
||||
if (req->vring) {
|
||||
assert(req->vq == NULL);
|
||||
virtio_scsi_vring_push_notify(req);
|
||||
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
|
||||
if (s->dataplane_started) {
|
||||
virtio_scsi_dataplane_notify(vdev, req);
|
||||
} else {
|
||||
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
|
||||
virtio_notify(vdev, vq);
|
||||
}
|
||||
|
||||
|
@ -417,7 +416,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
|
|||
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
|
||||
VirtIOSCSIReq *req;
|
||||
|
||||
if (s->ctx && !s->dataplane_disabled) {
|
||||
if (s->ctx && !s->dataplane_started) {
|
||||
virtio_scsi_dataplane_start(s);
|
||||
return;
|
||||
}
|
||||
|
@ -567,7 +566,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
|
|||
VirtIOSCSIReq *req, *next;
|
||||
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
||||
|
||||
if (s->ctx && !s->dataplane_disabled) {
|
||||
if (s->ctx && !s->dataplane_started) {
|
||||
virtio_scsi_dataplane_start(s);
|
||||
return;
|
||||
}
|
||||
|
@ -687,11 +686,7 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
aio_context_acquire(s->ctx);
|
||||
}
|
||||
|
||||
if (s->dataplane_started) {
|
||||
req = virtio_scsi_pop_req_vring(s, s->event_vring);
|
||||
} else {
|
||||
req = virtio_scsi_pop_req(s, vs->event_vq);
|
||||
}
|
||||
req = virtio_scsi_pop_req(s, vs->event_vq);
|
||||
if (!req) {
|
||||
s->events_dropped = true;
|
||||
goto out;
|
||||
|
@ -733,7 +728,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
|
|||
{
|
||||
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
||||
|
||||
if (s->ctx && !s->dataplane_disabled) {
|
||||
if (s->ctx && !s->dataplane_started) {
|
||||
virtio_scsi_dataplane_start(s);
|
||||
return;
|
||||
}
|
||||
|
@ -901,31 +896,6 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
|
|||
}
|
||||
}
|
||||
|
||||
/* Disable dataplane thread during live migration since it does not
|
||||
* update the dirty memory bitmap yet.
|
||||
*/
|
||||
static void virtio_scsi_migration_state_changed(Notifier *notifier, void *data)
|
||||
{
|
||||
VirtIOSCSI *s = container_of(notifier, VirtIOSCSI,
|
||||
migration_state_notifier);
|
||||
MigrationState *mig = data;
|
||||
|
||||
if (migration_in_setup(mig)) {
|
||||
if (!s->dataplane_started) {
|
||||
return;
|
||||
}
|
||||
virtio_scsi_dataplane_stop(s);
|
||||
s->dataplane_disabled = true;
|
||||
} else if (migration_has_finished(mig) ||
|
||||
migration_has_failed(mig)) {
|
||||
if (s->dataplane_started) {
|
||||
return;
|
||||
}
|
||||
blk_drain_all(); /* complete in-flight non-dataplane requests */
|
||||
s->dataplane_disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||
|
@ -956,8 +926,6 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
|
|||
|
||||
register_savevm(dev, "virtio-scsi", virtio_scsi_id++, 1,
|
||||
virtio_scsi_save, virtio_scsi_load, s);
|
||||
s->migration_state_notifier.notify = virtio_scsi_migration_state_changed;
|
||||
add_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
|
||||
error_setg(&s->blocker, "block device is in use by data plane");
|
||||
|
||||
|
@ -991,8 +959,6 @@ static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
|
|||
error_free(s->blocker);
|
||||
|
||||
unregister_savevm(dev, "virtio-scsi", s);
|
||||
remove_migration_state_change_notifier(&s->migration_state_notifier);
|
||||
|
||||
virtio_scsi_common_unrealize(dev, errp);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ common-obj-y += virtio-rng.o
|
|||
common-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o
|
||||
common-obj-y += virtio-bus.o
|
||||
common-obj-y += virtio-mmio.o
|
||||
obj-$(CONFIG_VIRTIO) += dataplane/
|
||||
|
||||
obj-y += virtio.o virtio-balloon.o
|
||||
obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
obj-y += vring.o
|
|
@ -1,549 +0,0 @@
|
|||
/* Copyright 2012 Red Hat, Inc.
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Based on Linux 2.6.39 vhost code:
|
||||
* Copyright (C) 2009 Red Hat, Inc.
|
||||
* Copyright (C) 2006 Rusty Russell IBM Corporation
|
||||
*
|
||||
* Author: Michael S. Tsirkin <mst@redhat.com>
|
||||
* Stefan Hajnoczi <stefanha@redhat.com>
|
||||
*
|
||||
* Inspiration, some code, and most witty comments come from
|
||||
* Documentation/virtual/lguest/lguest.c, by Rusty Russell
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "trace.h"
|
||||
#include "hw/hw.h"
|
||||
#include "exec/memory.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
#include "hw/virtio/dataplane/vring.h"
|
||||
#include "hw/virtio/dataplane/vring-accessors.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
/* vring_map can be coupled with vring_unmap or (if you still have the
|
||||
* value returned in *mr) memory_region_unref.
|
||||
* Returns NULL on failure.
|
||||
* Callers that can handle a partial mapping must supply mapped_len pointer to
|
||||
* get the actual length mapped.
|
||||
* Passing mapped_len == NULL requires either a full mapping or a failure.
|
||||
*/
|
||||
static void *vring_map(MemoryRegion **mr, hwaddr phys,
|
||||
hwaddr len, hwaddr *mapped_len,
|
||||
bool is_write)
|
||||
{
|
||||
MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
|
||||
uint64_t size;
|
||||
|
||||
if (!section.mr) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
size = int128_get64(section.size);
|
||||
assert(size);
|
||||
|
||||
/* Passing mapped_len == NULL requires either a full mapping or a failure. */
|
||||
if (!mapped_len && size < len) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_write && section.readonly) {
|
||||
goto out;
|
||||
}
|
||||
if (!memory_region_is_ram(section.mr)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Ignore regions with dirty logging, we cannot mark them dirty */
|
||||
if (memory_region_get_dirty_log_mask(section.mr)) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mapped_len) {
|
||||
*mapped_len = MIN(size, len);
|
||||
}
|
||||
|
||||
*mr = section.mr;
|
||||
return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
|
||||
|
||||
out:
|
||||
memory_region_unref(section.mr);
|
||||
*mr = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vring_unmap(void *buffer, bool is_write)
|
||||
{
|
||||
ram_addr_t addr;
|
||||
MemoryRegion *mr;
|
||||
|
||||
mr = qemu_ram_addr_from_host(buffer, &addr);
|
||||
memory_region_unref(mr);
|
||||
}
|
||||
|
||||
/* Map the guest's vring to host memory */
|
||||
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
|
||||
{
|
||||
struct vring *vr = &vring->vr;
|
||||
hwaddr addr;
|
||||
hwaddr size;
|
||||
void *ptr;
|
||||
|
||||
vring->broken = false;
|
||||
vr->num = virtio_queue_get_num(vdev, n);
|
||||
|
||||
addr = virtio_queue_get_desc_addr(vdev, n);
|
||||
size = virtio_queue_get_desc_size(vdev, n);
|
||||
/* Map the descriptor area as read only */
|
||||
ptr = vring_map(&vring->mr_desc, addr, size, NULL, false);
|
||||
if (!ptr) {
|
||||
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring desc "
|
||||
"at 0x%" HWADDR_PRIx,
|
||||
size, addr);
|
||||
goto out_err_desc;
|
||||
}
|
||||
vr->desc = ptr;
|
||||
|
||||
addr = virtio_queue_get_avail_addr(vdev, n);
|
||||
size = virtio_queue_get_avail_size(vdev, n);
|
||||
/* Add the size of the used_event_idx */
|
||||
size += sizeof(uint16_t);
|
||||
/* Map the driver area as read only */
|
||||
ptr = vring_map(&vring->mr_avail, addr, size, NULL, false);
|
||||
if (!ptr) {
|
||||
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring avail "
|
||||
"at 0x%" HWADDR_PRIx,
|
||||
size, addr);
|
||||
goto out_err_avail;
|
||||
}
|
||||
vr->avail = ptr;
|
||||
|
||||
addr = virtio_queue_get_used_addr(vdev, n);
|
||||
size = virtio_queue_get_used_size(vdev, n);
|
||||
/* Add the size of the avail_event_idx */
|
||||
size += sizeof(uint16_t);
|
||||
/* Map the device area as read-write */
|
||||
ptr = vring_map(&vring->mr_used, addr, size, NULL, true);
|
||||
if (!ptr) {
|
||||
error_report("Failed to map 0x%" HWADDR_PRIx " byte for vring used "
|
||||
"at 0x%" HWADDR_PRIx,
|
||||
size, addr);
|
||||
goto out_err_used;
|
||||
}
|
||||
vr->used = ptr;
|
||||
|
||||
vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
|
||||
vring->last_used_idx = vring_get_used_idx(vdev, vring);
|
||||
vring->signalled_used = 0;
|
||||
vring->signalled_used_valid = false;
|
||||
|
||||
trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
|
||||
vring->vr.desc, vring->vr.avail, vring->vr.used);
|
||||
return true;
|
||||
|
||||
out_err_used:
|
||||
memory_region_unref(vring->mr_avail);
|
||||
out_err_avail:
|
||||
memory_region_unref(vring->mr_desc);
|
||||
out_err_desc:
|
||||
vring->broken = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
|
||||
{
|
||||
virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
|
||||
virtio_queue_invalidate_signalled_used(vdev, n);
|
||||
|
||||
memory_region_unref(vring->mr_desc);
|
||||
memory_region_unref(vring->mr_avail);
|
||||
memory_region_unref(vring->mr_used);
|
||||
}
|
||||
|
||||
/* Disable guest->host notifies */
|
||||
void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
vring_set_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable guest->host notifies
|
||||
*
|
||||
* Return true if the vring is empty, false if there are more requests.
|
||||
*/
|
||||
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
vring_avail_event(&vring->vr) = vring->vr.avail->idx;
|
||||
} else {
|
||||
vring_clear_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
|
||||
}
|
||||
smp_mb(); /* ensure update is seen before reading avail_idx */
|
||||
return !vring_more_avail(vdev, vring);
|
||||
}
|
||||
|
||||
/* This is stolen from linux/drivers/vhost/vhost.c:vhost_notify() */
|
||||
bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
uint16_t old, new;
|
||||
bool v;
|
||||
/* Flush out used index updates. This is paired
|
||||
* with the barrier that the Guest executes when enabling
|
||||
* interrupts. */
|
||||
smp_mb();
|
||||
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
|
||||
unlikely(!vring_more_avail(vdev, vring))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
return !(vring_get_avail_flags(vdev, vring) &
|
||||
VRING_AVAIL_F_NO_INTERRUPT);
|
||||
}
|
||||
old = vring->signalled_used;
|
||||
v = vring->signalled_used_valid;
|
||||
new = vring->signalled_used = vring->last_used_idx;
|
||||
vring->signalled_used_valid = true;
|
||||
|
||||
if (unlikely(!v)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return vring_need_event(virtio_tswap16(vdev, vring_used_event(&vring->vr)),
|
||||
new, old);
|
||||
}
|
||||
|
||||
typedef struct VirtQueueCurrentElement {
|
||||
unsigned in_num;
|
||||
unsigned out_num;
|
||||
hwaddr addr[VIRTQUEUE_MAX_SIZE];
|
||||
struct iovec iov[VIRTQUEUE_MAX_SIZE];
|
||||
} VirtQueueCurrentElement;
|
||||
|
||||
static int get_desc(Vring *vring, VirtQueueCurrentElement *elem,
|
||||
struct vring_desc *desc)
|
||||
{
|
||||
unsigned *num;
|
||||
struct iovec *iov;
|
||||
hwaddr *addr;
|
||||
MemoryRegion *mr;
|
||||
hwaddr len;
|
||||
|
||||
if (desc->flags & VRING_DESC_F_WRITE) {
|
||||
num = &elem->in_num;
|
||||
iov = &elem->iov[elem->out_num + *num];
|
||||
addr = &elem->addr[elem->out_num + *num];
|
||||
} else {
|
||||
num = &elem->out_num;
|
||||
iov = &elem->iov[*num];
|
||||
addr = &elem->addr[*num];
|
||||
|
||||
/* If it's an output descriptor, they're all supposed
|
||||
* to come before any input descriptors. */
|
||||
if (unlikely(elem->in_num)) {
|
||||
error_report("Descriptor has out after in");
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
while (desc->len) {
|
||||
/* Stop for now if there are not enough iovecs available. */
|
||||
if (*num >= VIRTQUEUE_MAX_SIZE) {
|
||||
error_report("Invalid SG num: %u", *num);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
iov->iov_base = vring_map(&mr, desc->addr, desc->len, &len,
|
||||
desc->flags & VRING_DESC_F_WRITE);
|
||||
if (!iov->iov_base) {
|
||||
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
|
||||
(uint64_t)desc->addr, desc->len);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* The MemoryRegion is looked up again and unref'ed later, leave the
|
||||
* ref in place. */
|
||||
(iov++)->iov_len = len;
|
||||
*addr++ = desc->addr;
|
||||
desc->len -= len;
|
||||
desc->addr += len;
|
||||
*num += 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void copy_in_vring_desc(VirtIODevice *vdev,
|
||||
const struct vring_desc *guest,
|
||||
struct vring_desc *host)
|
||||
{
|
||||
host->addr = virtio_ldq_p(vdev, &guest->addr);
|
||||
host->len = virtio_ldl_p(vdev, &guest->len);
|
||||
host->flags = virtio_lduw_p(vdev, &guest->flags);
|
||||
host->next = virtio_lduw_p(vdev, &guest->next);
|
||||
}
|
||||
|
||||
static bool read_vring_desc(VirtIODevice *vdev,
|
||||
hwaddr guest,
|
||||
struct vring_desc *host)
|
||||
{
|
||||
if (address_space_read(&address_space_memory, guest, MEMTXATTRS_UNSPECIFIED,
|
||||
(uint8_t *)host, sizeof *host)) {
|
||||
return false;
|
||||
}
|
||||
host->addr = virtio_tswap64(vdev, host->addr);
|
||||
host->len = virtio_tswap32(vdev, host->len);
|
||||
host->flags = virtio_tswap16(vdev, host->flags);
|
||||
host->next = virtio_tswap16(vdev, host->next);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* This is stolen from linux/drivers/vhost/vhost.c. */
|
||||
static int get_indirect(VirtIODevice *vdev, Vring *vring,
|
||||
VirtQueueCurrentElement *cur_elem,
|
||||
struct vring_desc *indirect)
|
||||
{
|
||||
struct vring_desc desc;
|
||||
unsigned int i = 0, count, found = 0;
|
||||
int ret;
|
||||
|
||||
/* Sanity check */
|
||||
if (unlikely(indirect->len % sizeof(desc))) {
|
||||
error_report("Invalid length in indirect descriptor: "
|
||||
"len %#x not multiple of %#zx",
|
||||
indirect->len, sizeof(desc));
|
||||
vring->broken = true;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
count = indirect->len / sizeof(desc);
|
||||
/* Buffers are chained via a 16 bit next field, so
|
||||
* we can have at most 2^16 of these. */
|
||||
if (unlikely(count > USHRT_MAX + 1)) {
|
||||
error_report("Indirect buffer length too big: %d", indirect->len);
|
||||
vring->broken = true;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
do {
|
||||
/* Translate indirect descriptor */
|
||||
if (!read_vring_desc(vdev, indirect->addr + found * sizeof(desc),
|
||||
&desc)) {
|
||||
error_report("Failed to read indirect descriptor "
|
||||
"addr %#" PRIx64 " len %zu",
|
||||
(uint64_t)indirect->addr + found * sizeof(desc),
|
||||
sizeof(desc));
|
||||
vring->broken = true;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Ensure descriptor has been loaded before accessing fields */
|
||||
barrier(); /* read_barrier_depends(); */
|
||||
|
||||
if (unlikely(++found > count)) {
|
||||
error_report("Loop detected: last one at %u "
|
||||
"indirect size %u", i, count);
|
||||
vring->broken = true;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
|
||||
error_report("Nested indirect descriptor");
|
||||
vring->broken = true;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
ret = get_desc(vring, cur_elem, &desc);
|
||||
if (ret < 0) {
|
||||
vring->broken |= (ret == -EFAULT);
|
||||
return ret;
|
||||
}
|
||||
i = desc.next;
|
||||
} while (desc.flags & VRING_DESC_F_NEXT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vring_unmap_element(VirtQueueElement *elem)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* This assumes that the iovecs, if changed, are never moved past
|
||||
* the end of the valid area. This is true if iovec manipulations
|
||||
* are done with iov_discard_front and iov_discard_back.
|
||||
*/
|
||||
for (i = 0; i < elem->out_num; i++) {
|
||||
vring_unmap(elem->out_sg[i].iov_base, false);
|
||||
}
|
||||
|
||||
for (i = 0; i < elem->in_num; i++) {
|
||||
vring_unmap(elem->in_sg[i].iov_base, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* This looks in the virtqueue and for the first available buffer, and converts
|
||||
* it to an iovec for convenient access. Since descriptors consist of some
|
||||
* number of output then some number of input descriptors, it's actually two
|
||||
* iovecs, but we pack them into one and note how many of each there were.
|
||||
*
|
||||
* This function returns the descriptor number found, or vq->num (which is
|
||||
* never a valid descriptor number) if none was found. A negative code is
|
||||
* returned on error.
|
||||
*
|
||||
* Stolen from linux/drivers/vhost/vhost.c.
|
||||
*/
|
||||
void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz)
|
||||
{
|
||||
struct vring_desc desc;
|
||||
unsigned int i, head, found = 0, num = vring->vr.num;
|
||||
uint16_t avail_idx, last_avail_idx;
|
||||
VirtQueueCurrentElement cur_elem;
|
||||
VirtQueueElement *elem = NULL;
|
||||
int ret;
|
||||
|
||||
/* If there was a fatal error then refuse operation */
|
||||
if (vring->broken) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cur_elem.in_num = cur_elem.out_num = 0;
|
||||
|
||||
/* Check it isn't doing very strange things with descriptor numbers. */
|
||||
last_avail_idx = vring->last_avail_idx;
|
||||
avail_idx = vring_get_avail_idx(vdev, vring);
|
||||
barrier(); /* load indices now and not again later */
|
||||
|
||||
if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
|
||||
error_report("Guest moved used index from %u to %u",
|
||||
last_avail_idx, avail_idx);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If there's nothing new since last we looked. */
|
||||
if (avail_idx == last_avail_idx) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Only get avail ring entries after they have been exposed by guest. */
|
||||
smp_rmb();
|
||||
|
||||
/* Grab the next descriptor number they're advertising, and increment
|
||||
* the index we've seen. */
|
||||
head = vring_get_avail_ring(vdev, vring, last_avail_idx % num);
|
||||
|
||||
/* If their number is silly, that's an error. */
|
||||
if (unlikely(head >= num)) {
|
||||
error_report("Guest says index %u > %u is available", head, num);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = head;
|
||||
do {
|
||||
if (unlikely(i >= num)) {
|
||||
error_report("Desc index is %u > %u, head = %u", i, num, head);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(++found > num)) {
|
||||
error_report("Loop detected: last one at %u vq size %u head %u",
|
||||
i, num, head);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
copy_in_vring_desc(vdev, &vring->vr.desc[i], &desc);
|
||||
|
||||
/* Ensure descriptor is loaded before accessing fields */
|
||||
barrier();
|
||||
|
||||
if (desc.flags & VRING_DESC_F_INDIRECT) {
|
||||
ret = get_indirect(vdev, vring, &cur_elem, &desc);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = get_desc(vring, &cur_elem, &desc);
|
||||
if (ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = desc.next;
|
||||
} while (desc.flags & VRING_DESC_F_NEXT);
|
||||
|
||||
/* On success, increment avail index. */
|
||||
vring->last_avail_idx++;
|
||||
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
|
||||
vring_avail_event(&vring->vr) =
|
||||
virtio_tswap16(vdev, vring->last_avail_idx);
|
||||
}
|
||||
|
||||
/* Now copy what we have collected and mapped */
|
||||
elem = virtqueue_alloc_element(sz, cur_elem.out_num, cur_elem.in_num);
|
||||
elem->index = head;
|
||||
for (i = 0; i < cur_elem.out_num; i++) {
|
||||
elem->out_addr[i] = cur_elem.addr[i];
|
||||
elem->out_sg[i] = cur_elem.iov[i];
|
||||
}
|
||||
for (i = 0; i < cur_elem.in_num; i++) {
|
||||
elem->in_addr[i] = cur_elem.addr[cur_elem.out_num + i];
|
||||
elem->in_sg[i] = cur_elem.iov[cur_elem.out_num + i];
|
||||
}
|
||||
|
||||
return elem;
|
||||
|
||||
out:
|
||||
assert(ret < 0);
|
||||
if (ret == -EFAULT) {
|
||||
vring->broken = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < cur_elem.out_num + cur_elem.in_num; i++) {
|
||||
vring_unmap(cur_elem.iov[i].iov_base, false);
|
||||
}
|
||||
|
||||
g_free(elem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* After we've used one of their buffers, we tell them about it.
|
||||
*
|
||||
* Stolen from linux/drivers/vhost/vhost.c.
|
||||
*/
|
||||
void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
|
||||
int len)
|
||||
{
|
||||
unsigned int head = elem->index;
|
||||
uint16_t new;
|
||||
|
||||
vring_unmap_element(elem);
|
||||
|
||||
/* Don't touch vring if a fatal error occurred */
|
||||
if (vring->broken) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* The virtqueue contains a ring of used buffers. Get a pointer to the
|
||||
* next entry in that used ring. */
|
||||
vring_set_used_ring_id(vdev, vring, vring->last_used_idx % vring->vr.num,
|
||||
head);
|
||||
vring_set_used_ring_len(vdev, vring, vring->last_used_idx % vring->vr.num,
|
||||
len);
|
||||
|
||||
/* Make sure buffer is written before we update index. */
|
||||
smp_wmb();
|
||||
|
||||
new = ++vring->last_used_idx;
|
||||
vring_set_used_idx(vdev, vring, new);
|
||||
if (unlikely((int16_t)(new - vring->signalled_used) < (uint16_t)1)) {
|
||||
vring->signalled_used_valid = false;
|
||||
}
|
||||
}
|
|
@ -611,6 +611,25 @@ static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static bool vhost_user_can_merge(struct vhost_dev *dev,
|
||||
uint64_t start1, uint64_t size1,
|
||||
uint64_t start2, uint64_t size2)
|
||||
{
|
||||
ram_addr_t ram_addr;
|
||||
int mfd, rfd;
|
||||
MemoryRegion *mr;
|
||||
|
||||
mr = qemu_ram_addr_from_host((void *)(uintptr_t)start1, &ram_addr);
|
||||
assert(mr);
|
||||
mfd = qemu_get_ram_fd(ram_addr);
|
||||
|
||||
mr = qemu_ram_addr_from_host((void *)(uintptr_t)start2, &ram_addr);
|
||||
assert(mr);
|
||||
rfd = qemu_get_ram_fd(ram_addr);
|
||||
|
||||
return mfd == rfd;
|
||||
}
|
||||
|
||||
const VhostOps user_ops = {
|
||||
.backend_type = VHOST_BACKEND_TYPE_USER,
|
||||
.vhost_backend_init = vhost_user_init,
|
||||
|
@ -633,4 +652,5 @@ const VhostOps user_ops = {
|
|||
.vhost_set_vring_enable = vhost_user_set_vring_enable,
|
||||
.vhost_requires_shm_log = vhost_user_requires_shm_log,
|
||||
.vhost_migration_done = vhost_user_migration_done,
|
||||
.vhost_backend_can_merge = vhost_user_can_merge,
|
||||
};
|
||||
|
|
|
@ -260,6 +260,13 @@ static void vhost_dev_assign_memory(struct vhost_dev *dev,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (dev->vhost_ops->vhost_backend_can_merge &&
|
||||
!dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
|
||||
reg->userspace_addr,
|
||||
reg->memory_size)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (merged) {
|
||||
--to;
|
||||
assert(to >= 0);
|
||||
|
|
|
@ -305,6 +305,39 @@ static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data)
|
|||
memcpy(config_data, &config, sizeof(struct virtio_balloon_config));
|
||||
}
|
||||
|
||||
static int build_dimm_list(Object *obj, void *opaque)
|
||||
{
|
||||
GSList **list = opaque;
|
||||
|
||||
if (object_dynamic_cast(obj, TYPE_PC_DIMM)) {
|
||||
DeviceState *dev = DEVICE(obj);
|
||||
if (dev->realized) { /* only realized DIMMs matter */
|
||||
*list = g_slist_prepend(*list, dev);
|
||||
}
|
||||
}
|
||||
|
||||
object_child_foreach(obj, build_dimm_list, opaque);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ram_addr_t get_current_ram_size(void)
|
||||
{
|
||||
GSList *list = NULL, *item;
|
||||
ram_addr_t size = ram_size;
|
||||
|
||||
build_dimm_list(qdev_get_machine(), &list);
|
||||
for (item = list; item; item = g_slist_next(item)) {
|
||||
Object *obj = OBJECT(item->data);
|
||||
if (!strcmp(object_get_typename(obj), TYPE_PC_DIMM)) {
|
||||
size += object_property_get_int(obj, PC_DIMM_SIZE_PROP,
|
||||
&error_abort);
|
||||
}
|
||||
}
|
||||
g_slist_free(list);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static void virtio_balloon_set_config(VirtIODevice *vdev,
|
||||
const uint8_t *config_data)
|
||||
{
|
||||
|
|
|
@ -1162,7 +1162,7 @@ void virtio_irq(VirtQueue *vq)
|
|||
virtio_notify_vector(vq->vdev, vq->vector);
|
||||
}
|
||||
|
||||
static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
uint16_t old, new;
|
||||
bool v;
|
||||
|
@ -1187,7 +1187,7 @@ static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
|
|||
|
||||
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
if (!vring_notify(vdev, vq)) {
|
||||
if (!virtio_should_notify(vdev, vq)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1786,6 +1786,22 @@ static void virtio_queue_host_notifier_read(EventNotifier *n)
|
|||
}
|
||||
}
|
||||
|
||||
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
|
||||
bool assign, bool set_handler)
|
||||
{
|
||||
if (assign && set_handler) {
|
||||
aio_set_event_notifier(ctx, &vq->host_notifier, true,
|
||||
virtio_queue_host_notifier_read);
|
||||
} else {
|
||||
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
|
||||
}
|
||||
if (!assign) {
|
||||
/* Test and clear notifier before after disabling event,
|
||||
* in case poll callback didn't have time to run. */
|
||||
virtio_queue_host_notifier_read(&vq->host_notifier);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
|
||||
bool set_handler)
|
||||
{
|
||||
|
|
|
@ -54,7 +54,6 @@ typedef uintptr_t ram_addr_t;
|
|||
#endif
|
||||
|
||||
extern ram_addr_t ram_size;
|
||||
ram_addr_t get_current_ram_size(void);
|
||||
|
||||
/* memory API */
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ void bios_linker_loader_alloc(GArray *linker,
|
|||
bool alloc_fseg);
|
||||
|
||||
void bios_linker_loader_add_checksum(GArray *linker, const char *file,
|
||||
void *table,
|
||||
GArray *table,
|
||||
void *start, unsigned size,
|
||||
uint8_t *checksum);
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@ typedef struct ICH9LPCPMRegs {
|
|||
|
||||
void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
|
||||
bool smm_enabled,
|
||||
bool enable_tco,
|
||||
qemu_irq sci_irq);
|
||||
|
||||
void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base);
|
||||
|
|
|
@ -84,7 +84,6 @@ struct MachineClass {
|
|||
no_cdrom:1,
|
||||
no_sdcard:1,
|
||||
has_dynamic_sysbus:1,
|
||||
no_tco:1,
|
||||
pci_allow_0_address:1;
|
||||
int is_default;
|
||||
const char *default_machine_opts;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
void ich9_lpc_set_irq(void *opaque, int irq_num, int level);
|
||||
int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx);
|
||||
PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin);
|
||||
void ich9_lpc_pm_init(PCIDevice *pci_lpc, bool smm_enabled, bool enable_tco);
|
||||
void ich9_lpc_pm_init(PCIDevice *pci_lpc, bool smm_enabled);
|
||||
I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base);
|
||||
|
||||
void ich9_generate_smi(void);
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
#ifndef VRING_ACCESSORS_H
|
||||
#define VRING_ACCESSORS_H
|
||||
|
||||
#include "standard-headers/linux/virtio_ring.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
#include "hw/virtio/virtio-access.h"
|
||||
|
||||
static inline uint16_t vring_get_used_idx(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
return virtio_tswap16(vdev, vring->vr.used->idx);
|
||||
}
|
||||
|
||||
static inline void vring_set_used_idx(VirtIODevice *vdev, Vring *vring,
|
||||
uint16_t idx)
|
||||
{
|
||||
vring->vr.used->idx = virtio_tswap16(vdev, idx);
|
||||
}
|
||||
|
||||
static inline uint16_t vring_get_avail_idx(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
return virtio_tswap16(vdev, vring->vr.avail->idx);
|
||||
}
|
||||
|
||||
static inline uint16_t vring_get_avail_ring(VirtIODevice *vdev, Vring *vring,
|
||||
int i)
|
||||
{
|
||||
return virtio_tswap16(vdev, vring->vr.avail->ring[i]);
|
||||
}
|
||||
|
||||
static inline void vring_set_used_ring_id(VirtIODevice *vdev, Vring *vring,
|
||||
int i, uint32_t id)
|
||||
{
|
||||
vring->vr.used->ring[i].id = virtio_tswap32(vdev, id);
|
||||
}
|
||||
|
||||
static inline void vring_set_used_ring_len(VirtIODevice *vdev, Vring *vring,
|
||||
int i, uint32_t len)
|
||||
{
|
||||
vring->vr.used->ring[i].len = virtio_tswap32(vdev, len);
|
||||
}
|
||||
|
||||
static inline uint16_t vring_get_used_flags(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
return virtio_tswap16(vdev, vring->vr.used->flags);
|
||||
}
|
||||
|
||||
static inline uint16_t vring_get_avail_flags(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
return virtio_tswap16(vdev, vring->vr.avail->flags);
|
||||
}
|
||||
|
||||
static inline void vring_set_used_flags(VirtIODevice *vdev, Vring *vring,
|
||||
uint16_t flags)
|
||||
{
|
||||
vring->vr.used->flags |= virtio_tswap16(vdev, flags);
|
||||
}
|
||||
|
||||
static inline void vring_clear_used_flags(VirtIODevice *vdev, Vring *vring,
|
||||
uint16_t flags)
|
||||
{
|
||||
vring->vr.used->flags &= virtio_tswap16(vdev, ~flags);
|
||||
}
|
||||
|
||||
static inline unsigned int vring_get_num(Vring *vring)
|
||||
{
|
||||
return vring->vr.num;
|
||||
}
|
||||
|
||||
/* Are there more descriptors available? */
|
||||
static inline bool vring_more_avail(VirtIODevice *vdev, Vring *vring)
|
||||
{
|
||||
return vring_get_avail_idx(vdev, vring) != vring->last_avail_idx;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,51 +0,0 @@
|
|||
/* Copyright 2012 Red Hat, Inc. and/or its affiliates
|
||||
* Copyright IBM, Corp. 2012
|
||||
*
|
||||
* Based on Linux 2.6.39 vhost code:
|
||||
* Copyright (C) 2009 Red Hat, Inc.
|
||||
* Copyright (C) 2006 Rusty Russell IBM Corporation
|
||||
*
|
||||
* Author: Michael S. Tsirkin <mst@redhat.com>
|
||||
* Stefan Hajnoczi <stefanha@redhat.com>
|
||||
*
|
||||
* Inspiration, some code, and most witty comments come from
|
||||
* Documentation/virtual/lguest/lguest.c, by Rusty Russell
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*/
|
||||
|
||||
#ifndef VRING_H
|
||||
#define VRING_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "standard-headers/linux/virtio_ring.h"
|
||||
#include "hw/virtio/virtio.h"
|
||||
|
||||
typedef struct {
|
||||
MemoryRegion *mr_desc; /* memory region for the vring desc */
|
||||
MemoryRegion *mr_avail; /* memory region for the vring avail */
|
||||
MemoryRegion *mr_used; /* memory region for the vring used */
|
||||
struct vring vr; /* virtqueue vring mapped to host memory */
|
||||
uint16_t last_avail_idx; /* last processed avail ring index */
|
||||
uint16_t last_used_idx; /* last processed used ring index */
|
||||
uint16_t signalled_used; /* EVENT_IDX state */
|
||||
bool signalled_used_valid;
|
||||
bool broken; /* was there a fatal error? */
|
||||
} Vring;
|
||||
|
||||
/* Fail future vring_pop() and vring_push() calls until reset */
|
||||
static inline void vring_set_broken(Vring *vring)
|
||||
{
|
||||
vring->broken = true;
|
||||
}
|
||||
|
||||
bool vring_setup(Vring *vring, VirtIODevice *vdev, int n);
|
||||
void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
|
||||
void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
|
||||
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
|
||||
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
|
||||
void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz);
|
||||
void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
|
||||
int len);
|
||||
|
||||
#endif /* VRING_H */
|
|
@ -69,6 +69,9 @@ typedef int (*vhost_set_vring_enable_op)(struct vhost_dev *dev,
|
|||
typedef bool (*vhost_requires_shm_log_op)(struct vhost_dev *dev);
|
||||
typedef int (*vhost_migration_done_op)(struct vhost_dev *dev,
|
||||
char *mac_addr);
|
||||
typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev,
|
||||
uint64_t start1, uint64_t size1,
|
||||
uint64_t start2, uint64_t size2);
|
||||
|
||||
typedef struct VhostOps {
|
||||
VhostBackendType backend_type;
|
||||
|
@ -96,6 +99,7 @@ typedef struct VhostOps {
|
|||
vhost_set_vring_enable_op vhost_set_vring_enable;
|
||||
vhost_requires_shm_log_op vhost_requires_shm_log;
|
||||
vhost_migration_done_op vhost_migration_done;
|
||||
vhost_backend_can_merge_op vhost_backend_can_merge;
|
||||
} VhostOps;
|
||||
|
||||
extern const VhostOps user_ops;
|
||||
|
|
|
@ -53,9 +53,7 @@ typedef struct VirtIOBlock {
|
|||
unsigned short sector_mask;
|
||||
bool original_wce;
|
||||
VMChangeStateEntry *change;
|
||||
/* Function to push to vq and notify guest */
|
||||
void (*complete_request)(struct VirtIOBlockReq *req, unsigned char status);
|
||||
Notifier migration_state_notifier;
|
||||
bool dataplane_started;
|
||||
struct VirtIOBlockDataPlane *dataplane;
|
||||
} VirtIOBlock;
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include "hw/pci/pci.h"
|
||||
#include "hw/scsi/scsi.h"
|
||||
#include "sysemu/iothread.h"
|
||||
#include "hw/virtio/dataplane/vring.h"
|
||||
|
||||
#define TYPE_VIRTIO_SCSI_COMMON "virtio-scsi-common"
|
||||
#define VIRTIO_SCSI_COMMON(obj) \
|
||||
|
@ -58,13 +57,6 @@ struct VirtIOSCSIConf {
|
|||
|
||||
struct VirtIOSCSI;
|
||||
|
||||
typedef struct {
|
||||
struct VirtIOSCSI *parent;
|
||||
Vring vring;
|
||||
EventNotifier host_notifier;
|
||||
EventNotifier guest_notifier;
|
||||
} VirtIOSCSIVring;
|
||||
|
||||
typedef struct VirtIOSCSICommon {
|
||||
VirtIODevice parent_obj;
|
||||
VirtIOSCSIConf conf;
|
||||
|
@ -96,18 +88,12 @@ typedef struct VirtIOSCSI {
|
|||
QTAILQ_HEAD(, VirtIOSCSIBlkChangeNotifier) insert_notifiers;
|
||||
QTAILQ_HEAD(, VirtIOSCSIBlkChangeNotifier) remove_notifiers;
|
||||
|
||||
/* Vring is used instead of vq in dataplane code, because of the underlying
|
||||
* memory layer thread safety */
|
||||
VirtIOSCSIVring *ctrl_vring;
|
||||
VirtIOSCSIVring *event_vring;
|
||||
VirtIOSCSIVring **cmd_vrings;
|
||||
bool dataplane_started;
|
||||
bool dataplane_starting;
|
||||
bool dataplane_stopping;
|
||||
bool dataplane_disabled;
|
||||
bool dataplane_fenced;
|
||||
Error *blocker;
|
||||
Notifier migration_state_notifier;
|
||||
uint32_t host_features;
|
||||
} VirtIOSCSI;
|
||||
|
||||
|
@ -123,9 +109,6 @@ typedef struct VirtIOSCSIReq {
|
|||
QEMUSGList qsgl;
|
||||
QEMUIOVector resp_iov;
|
||||
|
||||
/* Set by dataplane code. */
|
||||
VirtIOSCSIVring *vring;
|
||||
|
||||
union {
|
||||
/* Used for two-stage request submission */
|
||||
QTAILQ_ENTRY(VirtIOSCSIReq) next;
|
||||
|
@ -168,8 +151,6 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
|
|||
void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread);
|
||||
void virtio_scsi_dataplane_start(VirtIOSCSI *s);
|
||||
void virtio_scsi_dataplane_stop(VirtIOSCSI *s);
|
||||
void virtio_scsi_vring_push_notify(VirtIOSCSIReq *req);
|
||||
VirtIOSCSIReq *virtio_scsi_pop_req_vring(VirtIOSCSI *s,
|
||||
VirtIOSCSIVring *vring);
|
||||
void virtio_scsi_dataplane_notify(VirtIODevice *vdev, VirtIOSCSIReq *req);
|
||||
|
||||
#endif /* _QEMU_VIRTIO_SCSI_H */
|
||||
|
|
|
@ -163,6 +163,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
|
|||
unsigned int *out_bytes,
|
||||
unsigned max_in_bytes, unsigned max_out_bytes);
|
||||
|
||||
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq);
|
||||
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
|
||||
|
||||
void virtio_save(VirtIODevice *vdev, QEMUFile *f);
|
||||
|
@ -249,6 +250,8 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
|
|||
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
|
||||
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
|
||||
bool set_handler);
|
||||
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
|
||||
bool assign, bool set_handler);
|
||||
void virtio_queue_notify_vq(VirtQueue *vq);
|
||||
void virtio_irq(VirtQueue *vq);
|
||||
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
|
||||
|
|
|
@ -54,17 +54,25 @@ typedef struct BlkMigDevState {
|
|||
int shared_base;
|
||||
int64_t total_sectors;
|
||||
QSIMPLEQ_ENTRY(BlkMigDevState) entry;
|
||||
Error *blocker;
|
||||
|
||||
/* Only used by migration thread. Does not need a lock. */
|
||||
int bulk_completed;
|
||||
int64_t cur_sector;
|
||||
int64_t cur_dirty;
|
||||
|
||||
/* Protected by block migration lock. */
|
||||
/* Data in the aio_bitmap is protected by block migration lock.
|
||||
* Allocation and free happen during setup and cleanup respectively.
|
||||
*/
|
||||
unsigned long *aio_bitmap;
|
||||
|
||||
/* Protected by block migration lock. */
|
||||
int64_t completed_sectors;
|
||||
|
||||
/* During migration this is protected by iothread lock / AioContext.
|
||||
* Allocation and free happen during setup and cleanup respectively.
|
||||
*/
|
||||
BdrvDirtyBitmap *dirty_bitmap;
|
||||
Error *blocker;
|
||||
} BlkMigDevState;
|
||||
|
||||
typedef struct BlkMigBlock {
|
||||
|
@ -100,7 +108,7 @@ typedef struct BlkMigState {
|
|||
int prev_progress;
|
||||
int bulk_completed;
|
||||
|
||||
/* Lock must be taken _inside_ the iothread lock. */
|
||||
/* Lock must be taken _inside_ the iothread lock and any AioContexts. */
|
||||
QemuMutex lock;
|
||||
} BlkMigState;
|
||||
|
||||
|
@ -264,11 +272,13 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
|||
|
||||
if (bmds->shared_base) {
|
||||
qemu_mutex_lock_iothread();
|
||||
aio_context_acquire(bdrv_get_aio_context(bs));
|
||||
while (cur_sector < total_sectors &&
|
||||
!bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
|
||||
&nr_sectors)) {
|
||||
cur_sector += nr_sectors;
|
||||
}
|
||||
aio_context_release(bdrv_get_aio_context(bs));
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
|
@ -302,11 +312,21 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
|
|||
block_mig_state.submitted++;
|
||||
blk_mig_unlock();
|
||||
|
||||
/* We do not know if bs is under the main thread (and thus does
|
||||
* not acquire the AioContext when doing AIO) or rather under
|
||||
* dataplane. Thus acquire both the iothread mutex and the
|
||||
* AioContext.
|
||||
*
|
||||
* This is ugly and will disappear when we make bdrv_* thread-safe,
|
||||
* without the need to acquire the AioContext.
|
||||
*/
|
||||
qemu_mutex_lock_iothread();
|
||||
aio_context_acquire(bdrv_get_aio_context(bmds->bs));
|
||||
blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
|
||||
nr_sectors, blk_mig_read_cb, blk);
|
||||
|
||||
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
|
||||
aio_context_release(bdrv_get_aio_context(bmds->bs));
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
bmds->cur_sector = cur_sector + nr_sectors;
|
||||
|
@ -321,8 +341,10 @@ static int set_dirty_tracking(void)
|
|||
int ret;
|
||||
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bmds->bs));
|
||||
bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
|
||||
NULL, NULL);
|
||||
aio_context_release(bdrv_get_aio_context(bmds->bs));
|
||||
if (!bmds->dirty_bitmap) {
|
||||
ret = -errno;
|
||||
goto fail;
|
||||
|
@ -333,18 +355,24 @@ static int set_dirty_tracking(void)
|
|||
fail:
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
if (bmds->dirty_bitmap) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bmds->bs));
|
||||
bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
|
||||
aio_context_release(bdrv_get_aio_context(bmds->bs));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
|
||||
static void unset_dirty_tracking(void)
|
||||
{
|
||||
BlkMigDevState *bmds;
|
||||
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bmds->bs));
|
||||
bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
|
||||
aio_context_release(bdrv_get_aio_context(bmds->bs));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -444,7 +472,7 @@ static void blk_mig_reset_dirty_cursor(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* Called with iothread lock taken. */
|
||||
/* Called with iothread lock and AioContext taken. */
|
||||
|
||||
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
|
||||
int is_async)
|
||||
|
@ -527,7 +555,9 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
|
|||
int ret = 1;
|
||||
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bmds->bs));
|
||||
ret = mig_save_device_dirty(f, bmds, is_async);
|
||||
aio_context_release(bdrv_get_aio_context(bmds->bs));
|
||||
if (ret <= 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -585,7 +615,9 @@ static int64_t get_remaining_dirty(void)
|
|||
int64_t dirty = 0;
|
||||
|
||||
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
|
||||
aio_context_acquire(bdrv_get_aio_context(bmds->bs));
|
||||
dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
|
||||
aio_context_release(bdrv_get_aio_context(bmds->bs));
|
||||
}
|
||||
|
||||
return dirty << BDRV_SECTOR_BITS;
|
||||
|
@ -597,21 +629,28 @@ static void block_migration_cleanup(void *opaque)
|
|||
{
|
||||
BlkMigDevState *bmds;
|
||||
BlkMigBlock *blk;
|
||||
AioContext *ctx;
|
||||
|
||||
bdrv_drain_all();
|
||||
|
||||
unset_dirty_tracking();
|
||||
|
||||
blk_mig_lock();
|
||||
while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
|
||||
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
|
||||
bdrv_op_unblock_all(bmds->bs, bmds->blocker);
|
||||
error_free(bmds->blocker);
|
||||
|
||||
/* Save ctx, because bmds->bs can disappear during bdrv_unref. */
|
||||
ctx = bdrv_get_aio_context(bmds->bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_unref(bmds->bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
g_free(bmds->aio_bitmap);
|
||||
g_free(bmds);
|
||||
}
|
||||
|
||||
blk_mig_lock();
|
||||
while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
|
||||
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
|
||||
g_free(blk->buf);
|
||||
|
@ -633,13 +672,12 @@ static int block_save_setup(QEMUFile *f, void *opaque)
|
|||
/* start track dirty blocks */
|
||||
ret = set_dirty_tracking();
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
if (ret) {
|
||||
qemu_mutex_unlock_iothread();
|
||||
return ret;
|
||||
}
|
||||
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
ret = flush_blks(f);
|
||||
blk_mig_reset_dirty_cursor();
|
||||
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
|
||||
|
@ -761,17 +799,18 @@ static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
|
|||
uint64_t pending;
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
pending = get_remaining_dirty();
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
blk_mig_lock();
|
||||
pending = get_remaining_dirty() +
|
||||
block_mig_state.submitted * BLOCK_SIZE +
|
||||
block_mig_state.read_done * BLOCK_SIZE;
|
||||
pending += block_mig_state.submitted * BLOCK_SIZE +
|
||||
block_mig_state.read_done * BLOCK_SIZE;
|
||||
blk_mig_unlock();
|
||||
|
||||
/* Report at least one block pending during bulk phase */
|
||||
if (pending <= max_size && !block_mig_state.bulk_completed) {
|
||||
pending = max_size + BLOCK_SIZE;
|
||||
}
|
||||
blk_mig_unlock();
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
|
||||
/* We don't do postcopy */
|
||||
|
|
Binary file not shown.
|
@ -6,8 +6,3 @@ int qmp_pc_dimm_device_list(Object *obj, void *opaque)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
ram_addr_t get_current_ram_size(void)
|
||||
{
|
||||
return ram_size;
|
||||
}
|
||||
|
|
|
@ -414,7 +414,7 @@ vubr_message_read(int conn_fd, VhostUserMsg *vmsg)
|
|||
if (vmsg->size > sizeof(vmsg->payload)) {
|
||||
fprintf(stderr,
|
||||
"Error: too big message request: %d, size: vmsg->size: %u, "
|
||||
"while sizeof(vmsg->payload) = %lu\n",
|
||||
"while sizeof(vmsg->payload) = %zu\n",
|
||||
vmsg->request, vmsg->size, sizeof(vmsg->payload));
|
||||
exit(1);
|
||||
}
|
||||
|
@ -578,7 +578,7 @@ vubr_post_buffer(VubrDev *dev, VubrVirtq *vq, uint8_t *buf, int32_t len)
|
|||
exit(1);
|
||||
}
|
||||
|
||||
void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr);
|
||||
void *chunk_start = (void *)(uintptr_t)gpa_to_va(dev, desc[i].addr);
|
||||
uint32_t chunk_len = desc[i].len;
|
||||
uint32_t chunk_write_len = MIN(remaining_len, chunk_len);
|
||||
|
||||
|
@ -641,7 +641,7 @@ vubr_process_desc(VubrDev *dev, VubrVirtq *vq)
|
|||
DPRINT("Chunks: ");
|
||||
i = d_index;
|
||||
do {
|
||||
void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr);
|
||||
void *chunk_start = (void *)(uintptr_t)gpa_to_va(dev, desc[i].addr);
|
||||
uint32_t chunk_len = desc[i].len;
|
||||
|
||||
assert(!(desc[i].flags & VRING_DESC_F_WRITE));
|
||||
|
@ -861,7 +861,7 @@ vubr_set_mem_table_exec(VubrDev *dev, VhostUserMsg *vmsg)
|
|||
if (mmap_addr == MAP_FAILED) {
|
||||
vubr_die("mmap");
|
||||
}
|
||||
dev_region->mmap_addr = (uint64_t) mmap_addr;
|
||||
dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
|
||||
DPRINT(" mmap_addr: 0x%016"PRIx64"\n", dev_region->mmap_addr);
|
||||
|
||||
close(vmsg->fds[i]);
|
||||
|
@ -935,9 +935,9 @@ vubr_set_vring_addr_exec(VubrDev *dev, VhostUserMsg *vmsg)
|
|||
DPRINT(" avail_user_addr: 0x%016llx\n", vra->avail_user_addr);
|
||||
DPRINT(" log_guest_addr: 0x%016llx\n", vra->log_guest_addr);
|
||||
|
||||
vq->desc = (struct vring_desc *)qva_to_va(dev, vra->desc_user_addr);
|
||||
vq->used = (struct vring_used *)qva_to_va(dev, vra->used_user_addr);
|
||||
vq->avail = (struct vring_avail *)qva_to_va(dev, vra->avail_user_addr);
|
||||
vq->desc = (struct vring_desc *)(uintptr_t)qva_to_va(dev, vra->desc_user_addr);
|
||||
vq->used = (struct vring_used *)(uintptr_t)qva_to_va(dev, vra->used_user_addr);
|
||||
vq->avail = (struct vring_avail *)(uintptr_t)qva_to_va(dev, vra->avail_user_addr);
|
||||
vq->log_guest_addr = vra->log_guest_addr;
|
||||
|
||||
DPRINT("Setting virtq addresses:\n");
|
||||
|
|
|
@ -127,9 +127,6 @@ virtio_blk_data_plane_start(void *s) "dataplane %p"
|
|||
virtio_blk_data_plane_stop(void *s) "dataplane %p"
|
||||
virtio_blk_data_plane_process_request(void *s, unsigned int out_num, unsigned int in_num, unsigned int head) "dataplane %p out_num %u in_num %u head %u"
|
||||
|
||||
# hw/virtio/dataplane/vring.c
|
||||
vring_setup(uint64_t physical, void *desc, void *avail, void *used) "vring physical %#"PRIx64" desc %p avail %p used %p"
|
||||
|
||||
# thread-pool.c
|
||||
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
|
||||
thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
|
||||
|
|
Loading…
Reference in New Issue