mirror of https://github.com/xemu-project/xemu.git
migration/next for 20170421
-----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJY+d69AAoJEPSH7xhYctcj/4oQAIFFEyWaqrL9ve5ySiJgdtcY zYtiIhZQ+nPuy2i1oDSX+vbMcmkJDDyfO5qLovxyHGkZHniR8HtxNHP+MkZQa07p DiSIvd51HvcixIouhbGcoUCU63AYxqNL3o5/TyNpUI72nvsgwl3yfOot7PtutE/F r384j8DrOJ9VwC5GGPg27mJvRPvyfDQWfxDCyMYVw153HTuwVYtgiu/layWojJDV D2L1KV45ezBuGckZTHt9y6K4J5qz8qHb/dJc+whBBjj4j9T9XOILU9NPDAEuvjFZ gHbrUyxj7EiApjHcDZoQm9Raez422ALU30yc9Kn7ik7vSqTxk2Ejq6Gz7y9MJrDn KdMj75OETJNjBL+0T9MmbtWts28+aalpTUXtBpmi3eWQV5Hcox2NF1RP42jtD9Pa lkrM6jv0nsdNfBPlQ+ZmBTJxysWECcMqy487nrzmPNC8vZfokjXL5be12puho9fh ziU4gx9C6/k82S+/H6WD/AUtRiXJM7j4oTU2mnjrsSXQC1JNWqODBOFUo9zsDufl vtcrxfPhSD1DwOInFSIBHf/RylcgTkPCL0rPoJ8npNDly6rHFYJ+oIbsn84Z4uYY RWvH8xB9wgRlK9L1WdRgOd2q7PaeHQoSSdPOiS9YVEVMVvSW8Es5CRlhcAsw/M/T 1Tl65cNrjETAuZKL3dLH =EsZ5 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20170421' into staging migration/next for 20170421 # gpg: Signature made Fri 21 Apr 2017 11:28:13 BST # gpg: using RSA key 0xF487EF185872D723 # gpg: Good signature from "Juan Quintela <quintela@redhat.com>" # gpg: aka "Juan Quintela <quintela@trasno.org>" # Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723 * remotes/juanquintela/tags/migration/20170421: (65 commits) hmp: info migrate_parameters format tunes hmp: info migrate_capability format tunes migration: rename max_size to threshold_size migration: set current_active_state once virtio-rng: stop virtqueue while the CPU is stopped migration: don't close a file descriptor while it can be in use ram: Remove migration_bitmap_extend() migration: Disable hotplug/unplug during migration qdev: Move qdev_unplug() to qdev-monitor.c qdev: Export qdev_hot_removed qdev: qdev_hotplug is really a bool migration: Remove MigrationState parameter from migration_is_idle() ram: Use RAMBitmap type for coherence ram: rename last_ram_offset() last_ram_pages() ram: Use ramblock and page offset instead of absolute offset ram: Change offset field in PageSearchStatus to page ram: Remember last_page instead of last_offset ram: Use page number instead of an address for the bitmap operations ram: reorganize last_sent_block ram: ram_discard_range() don't use the mis parameter ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
32c7e0ab75
11
exec.c
11
exec.c
|
@ -1528,7 +1528,7 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
ram_addr_t last_ram_offset(void)
|
unsigned long last_ram_page(void)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
ram_addr_t last = 0;
|
ram_addr_t last = 0;
|
||||||
|
@ -1538,7 +1538,7 @@ ram_addr_t last_ram_offset(void)
|
||||||
last = MAX(last, block->offset + block->max_length);
|
last = MAX(last, block->offset + block->max_length);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return last;
|
return last >> TARGET_PAGE_BITS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
|
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
|
||||||
|
@ -1727,7 +1727,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
|
||||||
ram_addr_t old_ram_size, new_ram_size;
|
ram_addr_t old_ram_size, new_ram_size;
|
||||||
Error *err = NULL;
|
Error *err = NULL;
|
||||||
|
|
||||||
old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
|
old_ram_size = last_ram_page();
|
||||||
|
|
||||||
qemu_mutex_lock_ramlist();
|
qemu_mutex_lock_ramlist();
|
||||||
new_block->offset = find_ram_offset(new_block->max_length);
|
new_block->offset = find_ram_offset(new_block->max_length);
|
||||||
|
@ -1758,7 +1758,6 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
|
||||||
new_ram_size = MAX(old_ram_size,
|
new_ram_size = MAX(old_ram_size,
|
||||||
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
|
(new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
|
||||||
if (new_ram_size > old_ram_size) {
|
if (new_ram_size > old_ram_size) {
|
||||||
migration_bitmap_extend(old_ram_size, new_ram_size);
|
|
||||||
dirty_memory_extend(old_ram_size, new_ram_size);
|
dirty_memory_extend(old_ram_size, new_ram_size);
|
||||||
}
|
}
|
||||||
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
|
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
|
||||||
|
@ -3307,9 +3306,9 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
||||||
* Allows code that needs to deal with migration bitmaps etc to still be built
|
* Allows code that needs to deal with migration bitmaps etc to still be built
|
||||||
* target independent.
|
* target independent.
|
||||||
*/
|
*/
|
||||||
size_t qemu_target_page_bits(void)
|
size_t qemu_target_page_size(void)
|
||||||
{
|
{
|
||||||
return TARGET_PAGE_BITS;
|
return TARGET_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
29
hmp.c
29
hmp.c
|
@ -215,6 +215,9 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
|
||||||
info->ram->normal_bytes >> 10);
|
info->ram->normal_bytes >> 10);
|
||||||
monitor_printf(mon, "dirty sync count: %" PRIu64 "\n",
|
monitor_printf(mon, "dirty sync count: %" PRIu64 "\n",
|
||||||
info->ram->dirty_sync_count);
|
info->ram->dirty_sync_count);
|
||||||
|
monitor_printf(mon, "page size: %" PRIu64 " kbytes\n",
|
||||||
|
info->ram->page_size >> 10);
|
||||||
|
|
||||||
if (info->ram->dirty_pages_rate) {
|
if (info->ram->dirty_pages_rate) {
|
||||||
monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n",
|
monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n",
|
||||||
info->ram->dirty_pages_rate);
|
info->ram->dirty_pages_rate);
|
||||||
|
@ -265,13 +268,11 @@ void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict)
|
||||||
caps = qmp_query_migrate_capabilities(NULL);
|
caps = qmp_query_migrate_capabilities(NULL);
|
||||||
|
|
||||||
if (caps) {
|
if (caps) {
|
||||||
monitor_printf(mon, "capabilities: ");
|
|
||||||
for (cap = caps; cap; cap = cap->next) {
|
for (cap = caps; cap; cap = cap->next) {
|
||||||
monitor_printf(mon, "%s: %s ",
|
monitor_printf(mon, "%s: %s\n",
|
||||||
MigrationCapability_lookup[cap->value->capability],
|
MigrationCapability_lookup[cap->value->capability],
|
||||||
cap->value->state ? "on" : "off");
|
cap->value->state ? "on" : "off");
|
||||||
}
|
}
|
||||||
monitor_printf(mon, "\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qapi_free_MigrationCapabilityStatusList(caps);
|
qapi_free_MigrationCapabilityStatusList(caps);
|
||||||
|
@ -284,46 +285,44 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
|
||||||
params = qmp_query_migrate_parameters(NULL);
|
params = qmp_query_migrate_parameters(NULL);
|
||||||
|
|
||||||
if (params) {
|
if (params) {
|
||||||
monitor_printf(mon, "parameters:");
|
|
||||||
assert(params->has_compress_level);
|
assert(params->has_compress_level);
|
||||||
monitor_printf(mon, " %s: %" PRId64,
|
monitor_printf(mon, "%s: %" PRId64 "\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_LEVEL],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_LEVEL],
|
||||||
params->compress_level);
|
params->compress_level);
|
||||||
assert(params->has_compress_threads);
|
assert(params->has_compress_threads);
|
||||||
monitor_printf(mon, " %s: %" PRId64,
|
monitor_printf(mon, "%s: %" PRId64 "\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_THREADS],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_THREADS],
|
||||||
params->compress_threads);
|
params->compress_threads);
|
||||||
assert(params->has_decompress_threads);
|
assert(params->has_decompress_threads);
|
||||||
monitor_printf(mon, " %s: %" PRId64,
|
monitor_printf(mon, "%s: %" PRId64 "\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_DECOMPRESS_THREADS],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_DECOMPRESS_THREADS],
|
||||||
params->decompress_threads);
|
params->decompress_threads);
|
||||||
assert(params->has_cpu_throttle_initial);
|
assert(params->has_cpu_throttle_initial);
|
||||||
monitor_printf(mon, " %s: %" PRId64,
|
monitor_printf(mon, "%s: %" PRId64 "\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL],
|
||||||
params->cpu_throttle_initial);
|
params->cpu_throttle_initial);
|
||||||
assert(params->has_cpu_throttle_increment);
|
assert(params->has_cpu_throttle_increment);
|
||||||
monitor_printf(mon, " %s: %" PRId64,
|
monitor_printf(mon, "%s: %" PRId64 "\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT],
|
||||||
params->cpu_throttle_increment);
|
params->cpu_throttle_increment);
|
||||||
monitor_printf(mon, " %s: '%s'",
|
monitor_printf(mon, "%s: '%s'\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_TLS_CREDS],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_TLS_CREDS],
|
||||||
params->has_tls_creds ? params->tls_creds : "");
|
params->has_tls_creds ? params->tls_creds : "");
|
||||||
monitor_printf(mon, " %s: '%s'",
|
monitor_printf(mon, "%s: '%s'\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_TLS_HOSTNAME],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_TLS_HOSTNAME],
|
||||||
params->has_tls_hostname ? params->tls_hostname : "");
|
params->has_tls_hostname ? params->tls_hostname : "");
|
||||||
assert(params->has_max_bandwidth);
|
assert(params->has_max_bandwidth);
|
||||||
monitor_printf(mon, " %s: %" PRId64 " bytes/second",
|
monitor_printf(mon, "%s: %" PRId64 " bytes/second\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_MAX_BANDWIDTH],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_MAX_BANDWIDTH],
|
||||||
params->max_bandwidth);
|
params->max_bandwidth);
|
||||||
assert(params->has_downtime_limit);
|
assert(params->has_downtime_limit);
|
||||||
monitor_printf(mon, " %s: %" PRId64 " milliseconds",
|
monitor_printf(mon, "%s: %" PRId64 " milliseconds\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_DOWNTIME_LIMIT],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_DOWNTIME_LIMIT],
|
||||||
params->downtime_limit);
|
params->downtime_limit);
|
||||||
assert(params->has_x_checkpoint_delay);
|
assert(params->has_x_checkpoint_delay);
|
||||||
monitor_printf(mon, " %s: %" PRId64,
|
monitor_printf(mon, "%s: %" PRId64 "\n",
|
||||||
MigrationParameter_lookup[MIGRATION_PARAMETER_X_CHECKPOINT_DELAY],
|
MigrationParameter_lookup[MIGRATION_PARAMETER_X_CHECKPOINT_DELAY],
|
||||||
params->x_checkpoint_delay);
|
params->x_checkpoint_delay);
|
||||||
monitor_printf(mon, "\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qapi_free_MigrationParameters(params);
|
qapi_free_MigrationParameters(params);
|
||||||
|
|
|
@ -39,9 +39,9 @@
|
||||||
#include "qapi-event.h"
|
#include "qapi-event.h"
|
||||||
#include "migration/migration.h"
|
#include "migration/migration.h"
|
||||||
|
|
||||||
int qdev_hotplug = 0;
|
bool qdev_hotplug = false;
|
||||||
static bool qdev_hot_added = false;
|
static bool qdev_hot_added = false;
|
||||||
static bool qdev_hot_removed = false;
|
bool qdev_hot_removed = false;
|
||||||
|
|
||||||
const VMStateDescription *qdev_get_vmsd(DeviceState *dev)
|
const VMStateDescription *qdev_get_vmsd(DeviceState *dev)
|
||||||
{
|
{
|
||||||
|
@ -271,40 +271,6 @@ HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev)
|
||||||
return hotplug_ctrl;
|
return hotplug_ctrl;
|
||||||
}
|
}
|
||||||
|
|
||||||
void qdev_unplug(DeviceState *dev, Error **errp)
|
|
||||||
{
|
|
||||||
DeviceClass *dc = DEVICE_GET_CLASS(dev);
|
|
||||||
HotplugHandler *hotplug_ctrl;
|
|
||||||
HotplugHandlerClass *hdc;
|
|
||||||
|
|
||||||
if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) {
|
|
||||||
error_setg(errp, QERR_BUS_NO_HOTPLUG, dev->parent_bus->name);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dc->hotpluggable) {
|
|
||||||
error_setg(errp, QERR_DEVICE_NO_HOTPLUG,
|
|
||||||
object_get_typename(OBJECT(dev)));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
qdev_hot_removed = true;
|
|
||||||
|
|
||||||
hotplug_ctrl = qdev_get_hotplug_handler(dev);
|
|
||||||
/* hotpluggable device MUST have HotplugHandler, if it doesn't
|
|
||||||
* then something is very wrong with it */
|
|
||||||
g_assert(hotplug_ctrl);
|
|
||||||
|
|
||||||
/* If device supports async unplug just request it to be done,
|
|
||||||
* otherwise just remove it synchronously */
|
|
||||||
hdc = HOTPLUG_HANDLER_GET_CLASS(hotplug_ctrl);
|
|
||||||
if (hdc->unplug_request) {
|
|
||||||
hotplug_handler_unplug_request(hotplug_ctrl, dev, errp);
|
|
||||||
} else {
|
|
||||||
hotplug_handler_unplug(hotplug_ctrl, dev, errp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int qdev_reset_one(DeviceState *dev, void *opaque)
|
static int qdev_reset_one(DeviceState *dev, void *opaque)
|
||||||
{
|
{
|
||||||
device_reset(dev);
|
device_reset(dev);
|
||||||
|
@ -385,7 +351,7 @@ void qdev_machine_creation_done(void)
|
||||||
* ok, initial machine setup is done, starting from now we can
|
* ok, initial machine setup is done, starting from now we can
|
||||||
* only create hotpluggable devices
|
* only create hotpluggable devices
|
||||||
*/
|
*/
|
||||||
qdev_hotplug = 1;
|
qdev_hotplug = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool qdev_machine_modified(void)
|
bool qdev_machine_modified(void)
|
||||||
|
|
|
@ -11,8 +11,11 @@ virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u"
|
||||||
|
|
||||||
# hw/virtio/virtio-rng.c
|
# hw/virtio/virtio-rng.c
|
||||||
virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready"
|
virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready"
|
||||||
|
virtio_rng_cpu_is_stopped(void *rng, int size) "rng %p: cpu is stopped, dropping %d bytes"
|
||||||
|
virtio_rng_popped(void *rng) "rng %p: elem popped"
|
||||||
virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed"
|
virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed"
|
||||||
virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left"
|
virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left"
|
||||||
|
virtio_rng_vm_state_change(void *rng, int running, int state) "rng %p: state change to running %d state %d"
|
||||||
|
|
||||||
# hw/virtio/virtio-balloon.c
|
# hw/virtio/virtio-balloon.c
|
||||||
#
|
#
|
||||||
|
|
|
@ -53,6 +53,15 @@ static void chr_read(void *opaque, const void *buf, size_t size)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* we can't modify the virtqueue until
|
||||||
|
* our state is fully synced
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!runstate_check(RUN_STATE_RUNNING)) {
|
||||||
|
trace_virtio_rng_cpu_is_stopped(vrng, size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
vrng->quota_remaining -= size;
|
vrng->quota_remaining -= size;
|
||||||
|
|
||||||
offset = 0;
|
offset = 0;
|
||||||
|
@ -61,6 +70,7 @@ static void chr_read(void *opaque, const void *buf, size_t size)
|
||||||
if (!elem) {
|
if (!elem) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
trace_virtio_rng_popped(vrng);
|
||||||
len = iov_from_buf(elem->in_sg, elem->in_num,
|
len = iov_from_buf(elem->in_sg, elem->in_num,
|
||||||
0, buf + offset, size - offset);
|
0, buf + offset, size - offset);
|
||||||
offset += len;
|
offset += len;
|
||||||
|
@ -120,17 +130,21 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp)
|
||||||
return f;
|
return f;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_rng_post_load(void *opaque, int version_id)
|
static void virtio_rng_vm_state_change(void *opaque, int running,
|
||||||
|
RunState state)
|
||||||
{
|
{
|
||||||
VirtIORNG *vrng = opaque;
|
VirtIORNG *vrng = opaque;
|
||||||
|
|
||||||
/* We may have an element ready but couldn't process it due to a quota
|
trace_virtio_rng_vm_state_change(vrng, running, state);
|
||||||
* limit. Make sure to try again after live migration when the quota may
|
|
||||||
* have been reset.
|
|
||||||
*/
|
|
||||||
virtio_rng_process(vrng);
|
|
||||||
|
|
||||||
return 0;
|
/* We may have an element ready but couldn't process it due to a quota
|
||||||
|
* limit or because CPU was stopped. Make sure to try again when the
|
||||||
|
* CPU restart.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (running && is_guest_ready(vrng)) {
|
||||||
|
virtio_rng_process(vrng);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void check_rate_limit(void *opaque)
|
static void check_rate_limit(void *opaque)
|
||||||
|
@ -198,6 +212,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
|
||||||
vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
|
vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
|
||||||
check_rate_limit, vrng);
|
check_rate_limit, vrng);
|
||||||
vrng->activate_timer = true;
|
vrng->activate_timer = true;
|
||||||
|
|
||||||
|
vrng->vmstate = qemu_add_vm_change_state_handler(virtio_rng_vm_state_change,
|
||||||
|
vrng);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
|
static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
|
||||||
|
@ -205,6 +222,7 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
|
||||||
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
||||||
VirtIORNG *vrng = VIRTIO_RNG(dev);
|
VirtIORNG *vrng = VIRTIO_RNG(dev);
|
||||||
|
|
||||||
|
qemu_del_vm_change_state_handler(vrng->vmstate);
|
||||||
timer_del(vrng->rate_limit_timer);
|
timer_del(vrng->rate_limit_timer);
|
||||||
timer_free(vrng->rate_limit_timer);
|
timer_free(vrng->rate_limit_timer);
|
||||||
virtio_cleanup(vdev);
|
virtio_cleanup(vdev);
|
||||||
|
@ -218,7 +236,6 @@ static const VMStateDescription vmstate_virtio_rng = {
|
||||||
VMSTATE_VIRTIO_DEVICE,
|
VMSTATE_VIRTIO_DEVICE,
|
||||||
VMSTATE_END_OF_LIST()
|
VMSTATE_END_OF_LIST()
|
||||||
},
|
},
|
||||||
.post_load = virtio_rng_post_load,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static Property virtio_rng_properties[] = {
|
static Property virtio_rng_properties[] = {
|
||||||
|
|
|
@ -53,7 +53,7 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
long qemu_getrampagesize(void);
|
long qemu_getrampagesize(void);
|
||||||
ram_addr_t last_ram_offset(void);
|
unsigned long last_ram_page(void);
|
||||||
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
||||||
bool share, const char *mem_path,
|
bool share, const char *mem_path,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
@ -354,11 +354,13 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
|
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
|
||||||
|
RAMBlock *rb,
|
||||||
ram_addr_t start,
|
ram_addr_t start,
|
||||||
ram_addr_t length,
|
ram_addr_t length,
|
||||||
int64_t *real_dirty_pages)
|
uint64_t *real_dirty_pages)
|
||||||
{
|
{
|
||||||
ram_addr_t addr;
|
ram_addr_t addr;
|
||||||
|
start = rb->offset + start;
|
||||||
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
|
||||||
uint64_t num_dirty = 0;
|
uint64_t num_dirty = 0;
|
||||||
|
|
||||||
|
@ -411,7 +413,5 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
|
||||||
|
|
||||||
return num_dirty;
|
return num_dirty;
|
||||||
}
|
}
|
||||||
|
|
||||||
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -373,7 +373,8 @@ Object *qdev_get_machine(void);
|
||||||
/* FIXME: make this a link<> */
|
/* FIXME: make this a link<> */
|
||||||
void qdev_set_parent_bus(DeviceState *dev, BusState *bus);
|
void qdev_set_parent_bus(DeviceState *dev, BusState *bus);
|
||||||
|
|
||||||
extern int qdev_hotplug;
|
extern bool qdev_hotplug;
|
||||||
|
extern bool qdev_hot_removed;
|
||||||
|
|
||||||
char *qdev_get_dev_path(DeviceState *dev);
|
char *qdev_get_dev_path(DeviceState *dev);
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,8 @@ typedef struct VirtIORNG {
|
||||||
QEMUTimer *rate_limit_timer;
|
QEMUTimer *rate_limit_timer;
|
||||||
int64_t quota_remaining;
|
int64_t quota_remaining;
|
||||||
bool activate_timer;
|
bool activate_timer;
|
||||||
|
|
||||||
|
VMChangeStateEntry *vmstate;
|
||||||
} VirtIORNG;
|
} VirtIORNG;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -128,18 +128,6 @@ struct MigrationIncomingState {
|
||||||
MigrationIncomingState *migration_incoming_get_current(void);
|
MigrationIncomingState *migration_incoming_get_current(void);
|
||||||
void migration_incoming_state_destroy(void);
|
void migration_incoming_state_destroy(void);
|
||||||
|
|
||||||
/*
|
|
||||||
* An outstanding page request, on the source, having been received
|
|
||||||
* and queued
|
|
||||||
*/
|
|
||||||
struct MigrationSrcPageRequest {
|
|
||||||
RAMBlock *rb;
|
|
||||||
hwaddr offset;
|
|
||||||
hwaddr len;
|
|
||||||
|
|
||||||
QSIMPLEQ_ENTRY(MigrationSrcPageRequest) next_req;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MigrationState
|
struct MigrationState
|
||||||
{
|
{
|
||||||
size_t bytes_xfer;
|
size_t bytes_xfer;
|
||||||
|
@ -166,14 +154,9 @@ struct MigrationState
|
||||||
int64_t total_time;
|
int64_t total_time;
|
||||||
int64_t downtime;
|
int64_t downtime;
|
||||||
int64_t expected_downtime;
|
int64_t expected_downtime;
|
||||||
int64_t dirty_pages_rate;
|
|
||||||
int64_t dirty_bytes_rate;
|
|
||||||
bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
|
bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
|
||||||
int64_t xbzrle_cache_size;
|
int64_t xbzrle_cache_size;
|
||||||
int64_t setup_time;
|
int64_t setup_time;
|
||||||
int64_t dirty_sync_count;
|
|
||||||
/* Count of requests incoming from destination */
|
|
||||||
int64_t postcopy_requests;
|
|
||||||
|
|
||||||
/* Flag set once the migration has been asked to enter postcopy */
|
/* Flag set once the migration has been asked to enter postcopy */
|
||||||
bool start_postcopy;
|
bool start_postcopy;
|
||||||
|
@ -186,11 +169,6 @@ struct MigrationState
|
||||||
/* Flag set once the migration thread called bdrv_inactivate_all */
|
/* Flag set once the migration thread called bdrv_inactivate_all */
|
||||||
bool block_inactive;
|
bool block_inactive;
|
||||||
|
|
||||||
/* Queue of outstanding page requests from the destination */
|
|
||||||
QemuMutex src_page_req_mutex;
|
|
||||||
QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
|
|
||||||
/* The RAMBlock used in the last src_page_request */
|
|
||||||
RAMBlock *last_req_rb;
|
|
||||||
/* The semaphore is used to notify COLO thread that failover is finished */
|
/* The semaphore is used to notify COLO thread that failover is finished */
|
||||||
QemuSemaphore colo_exit_sem;
|
QemuSemaphore colo_exit_sem;
|
||||||
|
|
||||||
|
@ -256,11 +234,11 @@ void remove_migration_state_change_notifier(Notifier *notify);
|
||||||
MigrationState *migrate_init(const MigrationParams *params);
|
MigrationState *migrate_init(const MigrationParams *params);
|
||||||
bool migration_is_blocked(Error **errp);
|
bool migration_is_blocked(Error **errp);
|
||||||
bool migration_in_setup(MigrationState *);
|
bool migration_in_setup(MigrationState *);
|
||||||
bool migration_is_idle(MigrationState *s);
|
bool migration_is_idle(void);
|
||||||
bool migration_has_finished(MigrationState *);
|
bool migration_has_finished(MigrationState *);
|
||||||
bool migration_has_failed(MigrationState *);
|
bool migration_has_failed(MigrationState *);
|
||||||
/* True if outgoing migration has entered postcopy phase */
|
/* True if outgoing migration has entered postcopy phase */
|
||||||
bool migration_in_postcopy(MigrationState *);
|
bool migration_in_postcopy(void);
|
||||||
/* ...and after the device transmission */
|
/* ...and after the device transmission */
|
||||||
bool migration_in_postcopy_after_devices(MigrationState *);
|
bool migration_in_postcopy_after_devices(MigrationState *);
|
||||||
MigrationState *migrate_get_current(void);
|
MigrationState *migrate_get_current(void);
|
||||||
|
@ -272,15 +250,14 @@ void migrate_decompress_threads_join(void);
|
||||||
uint64_t ram_bytes_remaining(void);
|
uint64_t ram_bytes_remaining(void);
|
||||||
uint64_t ram_bytes_transferred(void);
|
uint64_t ram_bytes_transferred(void);
|
||||||
uint64_t ram_bytes_total(void);
|
uint64_t ram_bytes_total(void);
|
||||||
|
uint64_t ram_dirty_sync_count(void);
|
||||||
|
uint64_t ram_dirty_pages_rate(void);
|
||||||
|
uint64_t ram_postcopy_requests(void);
|
||||||
void free_xbzrle_decoded_buf(void);
|
void free_xbzrle_decoded_buf(void);
|
||||||
|
|
||||||
void acct_update_position(QEMUFile *f, size_t size, bool zero);
|
void acct_update_position(QEMUFile *f, size_t size, bool zero);
|
||||||
|
|
||||||
uint64_t dup_mig_bytes_transferred(void);
|
|
||||||
uint64_t dup_mig_pages_transferred(void);
|
uint64_t dup_mig_pages_transferred(void);
|
||||||
uint64_t skipped_mig_bytes_transferred(void);
|
|
||||||
uint64_t skipped_mig_pages_transferred(void);
|
|
||||||
uint64_t norm_mig_bytes_transferred(void);
|
|
||||||
uint64_t norm_mig_pages_transferred(void);
|
uint64_t norm_mig_pages_transferred(void);
|
||||||
uint64_t xbzrle_mig_bytes_transferred(void);
|
uint64_t xbzrle_mig_bytes_transferred(void);
|
||||||
uint64_t xbzrle_mig_pages_transferred(void);
|
uint64_t xbzrle_mig_pages_transferred(void);
|
||||||
|
@ -293,8 +270,7 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
|
||||||
/* For outgoing discard bitmap */
|
/* For outgoing discard bitmap */
|
||||||
int ram_postcopy_send_discard_bitmap(MigrationState *ms);
|
int ram_postcopy_send_discard_bitmap(MigrationState *ms);
|
||||||
/* For incoming postcopy discard */
|
/* For incoming postcopy discard */
|
||||||
int ram_discard_range(MigrationIncomingState *mis, const char *block_name,
|
int ram_discard_range(const char *block_name, uint64_t start, size_t length);
|
||||||
uint64_t start, size_t length);
|
|
||||||
int ram_postcopy_incoming_init(MigrationIncomingState *mis);
|
int ram_postcopy_incoming_init(MigrationIncomingState *mis);
|
||||||
void ram_postcopy_migrated_memory_release(MigrationState *ms);
|
void ram_postcopy_migrated_memory_release(MigrationState *ms);
|
||||||
|
|
||||||
|
@ -377,9 +353,8 @@ void savevm_skip_configuration(void);
|
||||||
int global_state_store(void);
|
int global_state_store(void);
|
||||||
void global_state_store_running(void);
|
void global_state_store_running(void);
|
||||||
|
|
||||||
void flush_page_queue(MigrationState *ms);
|
void migration_page_queue_free(void);
|
||||||
int ram_save_queue_pages(MigrationState *ms, const char *rbname,
|
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
|
||||||
ram_addr_t start, ram_addr_t len);
|
|
||||||
uint64_t ram_pagesize_summary(void);
|
uint64_t ram_pagesize_summary(void);
|
||||||
|
|
||||||
PostcopyState postcopy_state_get(void);
|
PostcopyState postcopy_state_get(void);
|
||||||
|
|
|
@ -56,7 +56,8 @@ typedef struct SaveVMHandlers {
|
||||||
|
|
||||||
/* This runs outside the iothread lock! */
|
/* This runs outside the iothread lock! */
|
||||||
int (*save_live_setup)(QEMUFile *f, void *opaque);
|
int (*save_live_setup)(QEMUFile *f, void *opaque);
|
||||||
void (*save_live_pending)(QEMUFile *f, void *opaque, uint64_t max_size,
|
void (*save_live_pending)(QEMUFile *f, void *opaque,
|
||||||
|
uint64_t threshold_size,
|
||||||
uint64_t *non_postcopiable_pending,
|
uint64_t *non_postcopiable_pending,
|
||||||
uint64_t *postcopiable_pending);
|
uint64_t *postcopiable_pending);
|
||||||
LoadStateHandler *load_state;
|
LoadStateHandler *load_state;
|
||||||
|
|
|
@ -67,7 +67,7 @@ int qemu_reset_requested_get(void);
|
||||||
void qemu_system_killed(int signal, pid_t pid);
|
void qemu_system_killed(int signal, pid_t pid);
|
||||||
void qemu_system_reset(bool report);
|
void qemu_system_reset(bool report);
|
||||||
void qemu_system_guest_panicked(GuestPanicInformation *info);
|
void qemu_system_guest_panicked(GuestPanicInformation *info);
|
||||||
size_t qemu_target_page_bits(void);
|
size_t qemu_target_page_size(void);
|
||||||
|
|
||||||
void qemu_add_exit_notifier(Notifier *notify);
|
void qemu_add_exit_notifier(Notifier *notify);
|
||||||
void qemu_remove_exit_notifier(Notifier *notify);
|
void qemu_remove_exit_notifier(Notifier *notify);
|
||||||
|
|
|
@ -109,7 +109,6 @@ MigrationState *migrate_get_current(void)
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!once) {
|
if (!once) {
|
||||||
qemu_mutex_init(¤t_migration.src_page_req_mutex);
|
|
||||||
current_migration.parameters.tls_creds = g_strdup("");
|
current_migration.parameters.tls_creds = g_strdup("");
|
||||||
current_migration.parameters.tls_hostname = g_strdup("");
|
current_migration.parameters.tls_hostname = g_strdup("");
|
||||||
once = true;
|
once = true;
|
||||||
|
@ -436,9 +435,6 @@ static void process_incoming_migration_co(void *opaque)
|
||||||
qemu_thread_join(&mis->colo_incoming_thread);
|
qemu_thread_join(&mis->colo_incoming_thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_fclose(f);
|
|
||||||
free_xbzrle_decoded_buf();
|
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
||||||
MIGRATION_STATUS_FAILED);
|
MIGRATION_STATUS_FAILED);
|
||||||
|
@ -447,6 +443,9 @@ static void process_incoming_migration_co(void *opaque)
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qemu_fclose(f);
|
||||||
|
free_xbzrle_decoded_buf();
|
||||||
|
|
||||||
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
|
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
|
||||||
qemu_bh_schedule(mis->bh);
|
qemu_bh_schedule(mis->bh);
|
||||||
}
|
}
|
||||||
|
@ -651,16 +650,19 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
|
||||||
info->ram->transferred = ram_bytes_transferred();
|
info->ram->transferred = ram_bytes_transferred();
|
||||||
info->ram->total = ram_bytes_total();
|
info->ram->total = ram_bytes_total();
|
||||||
info->ram->duplicate = dup_mig_pages_transferred();
|
info->ram->duplicate = dup_mig_pages_transferred();
|
||||||
info->ram->skipped = skipped_mig_pages_transferred();
|
/* legacy value. It is not used anymore */
|
||||||
|
info->ram->skipped = 0;
|
||||||
info->ram->normal = norm_mig_pages_transferred();
|
info->ram->normal = norm_mig_pages_transferred();
|
||||||
info->ram->normal_bytes = norm_mig_bytes_transferred();
|
info->ram->normal_bytes = norm_mig_pages_transferred() *
|
||||||
|
qemu_target_page_size();
|
||||||
info->ram->mbps = s->mbps;
|
info->ram->mbps = s->mbps;
|
||||||
info->ram->dirty_sync_count = s->dirty_sync_count;
|
info->ram->dirty_sync_count = ram_dirty_sync_count();
|
||||||
info->ram->postcopy_requests = s->postcopy_requests;
|
info->ram->postcopy_requests = ram_postcopy_requests();
|
||||||
|
info->ram->page_size = qemu_target_page_size();
|
||||||
|
|
||||||
if (s->state != MIGRATION_STATUS_COMPLETED) {
|
if (s->state != MIGRATION_STATUS_COMPLETED) {
|
||||||
info->ram->remaining = ram_bytes_remaining();
|
info->ram->remaining = ram_bytes_remaining();
|
||||||
info->ram->dirty_pages_rate = s->dirty_pages_rate;
|
info->ram->dirty_pages_rate = ram_dirty_pages_rate();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -955,7 +957,7 @@ static void migrate_fd_cleanup(void *opaque)
|
||||||
qemu_bh_delete(s->cleanup_bh);
|
qemu_bh_delete(s->cleanup_bh);
|
||||||
s->cleanup_bh = NULL;
|
s->cleanup_bh = NULL;
|
||||||
|
|
||||||
flush_page_queue(s);
|
migration_page_queue_free();
|
||||||
|
|
||||||
if (s->to_dst_file) {
|
if (s->to_dst_file) {
|
||||||
trace_migrate_fd_cleanup();
|
trace_migrate_fd_cleanup();
|
||||||
|
@ -1061,21 +1063,21 @@ bool migration_has_failed(MigrationState *s)
|
||||||
s->state == MIGRATION_STATUS_FAILED);
|
s->state == MIGRATION_STATUS_FAILED);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool migration_in_postcopy(MigrationState *s)
|
bool migration_in_postcopy(void)
|
||||||
{
|
{
|
||||||
|
MigrationState *s = migrate_get_current();
|
||||||
|
|
||||||
return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool migration_in_postcopy_after_devices(MigrationState *s)
|
bool migration_in_postcopy_after_devices(MigrationState *s)
|
||||||
{
|
{
|
||||||
return migration_in_postcopy(s) && s->postcopy_after_devices;
|
return migration_in_postcopy() && s->postcopy_after_devices;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool migration_is_idle(MigrationState *s)
|
bool migration_is_idle(void)
|
||||||
{
|
{
|
||||||
if (!s) {
|
MigrationState *s = migrate_get_current();
|
||||||
s = migrate_get_current();
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (s->state) {
|
switch (s->state) {
|
||||||
case MIGRATION_STATUS_NONE:
|
case MIGRATION_STATUS_NONE:
|
||||||
|
@ -1116,22 +1118,15 @@ MigrationState *migrate_init(const MigrationParams *params)
|
||||||
s->mbps = 0.0;
|
s->mbps = 0.0;
|
||||||
s->downtime = 0;
|
s->downtime = 0;
|
||||||
s->expected_downtime = 0;
|
s->expected_downtime = 0;
|
||||||
s->dirty_pages_rate = 0;
|
|
||||||
s->dirty_bytes_rate = 0;
|
|
||||||
s->setup_time = 0;
|
s->setup_time = 0;
|
||||||
s->dirty_sync_count = 0;
|
|
||||||
s->start_postcopy = false;
|
s->start_postcopy = false;
|
||||||
s->postcopy_after_devices = false;
|
s->postcopy_after_devices = false;
|
||||||
s->postcopy_requests = 0;
|
|
||||||
s->migration_thread_running = false;
|
s->migration_thread_running = false;
|
||||||
s->last_req_rb = NULL;
|
|
||||||
error_free(s->error);
|
error_free(s->error);
|
||||||
s->error = NULL;
|
s->error = NULL;
|
||||||
|
|
||||||
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
|
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
|
||||||
|
|
||||||
QSIMPLEQ_INIT(&s->src_page_requests);
|
|
||||||
|
|
||||||
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -1147,7 +1142,7 @@ int migrate_add_blocker(Error *reason, Error **errp)
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (migration_is_idle(NULL)) {
|
if (migration_is_idle()) {
|
||||||
migration_blockers = g_slist_prepend(migration_blockers, reason);
|
migration_blockers = g_slist_prepend(migration_blockers, reason);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1485,7 +1480,7 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ram_save_queue_pages(ms, rbname, start, len)) {
|
if (ram_save_queue_pages(rbname, start, len)) {
|
||||||
mark_source_rp_bad(ms);
|
mark_source_rp_bad(ms);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1915,7 +1910,12 @@ static void *migration_thread(void *opaque)
|
||||||
int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
||||||
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
||||||
int64_t initial_bytes = 0;
|
int64_t initial_bytes = 0;
|
||||||
int64_t max_size = 0;
|
/*
|
||||||
|
* The final stage happens when the remaining data is smaller than
|
||||||
|
* this threshold; it's calculated from the requested downtime and
|
||||||
|
* measured bandwidth
|
||||||
|
*/
|
||||||
|
int64_t threshold_size = 0;
|
||||||
int64_t start_time = initial_time;
|
int64_t start_time = initial_time;
|
||||||
int64_t end_time;
|
int64_t end_time;
|
||||||
bool old_vm_running = false;
|
bool old_vm_running = false;
|
||||||
|
@ -1946,7 +1946,6 @@ static void *migration_thread(void *opaque)
|
||||||
qemu_savevm_state_begin(s->to_dst_file, &s->params);
|
qemu_savevm_state_begin(s->to_dst_file, &s->params);
|
||||||
|
|
||||||
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
||||||
current_active_state = MIGRATION_STATUS_ACTIVE;
|
|
||||||
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
||||||
MIGRATION_STATUS_ACTIVE);
|
MIGRATION_STATUS_ACTIVE);
|
||||||
|
|
||||||
|
@ -1960,17 +1959,17 @@ static void *migration_thread(void *opaque)
|
||||||
if (!qemu_file_rate_limit(s->to_dst_file)) {
|
if (!qemu_file_rate_limit(s->to_dst_file)) {
|
||||||
uint64_t pend_post, pend_nonpost;
|
uint64_t pend_post, pend_nonpost;
|
||||||
|
|
||||||
qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
|
qemu_savevm_state_pending(s->to_dst_file, threshold_size,
|
||||||
&pend_post);
|
&pend_nonpost, &pend_post);
|
||||||
pending_size = pend_nonpost + pend_post;
|
pending_size = pend_nonpost + pend_post;
|
||||||
trace_migrate_pending(pending_size, max_size,
|
trace_migrate_pending(pending_size, threshold_size,
|
||||||
pend_post, pend_nonpost);
|
pend_post, pend_nonpost);
|
||||||
if (pending_size && pending_size >= max_size) {
|
if (pending_size && pending_size >= threshold_size) {
|
||||||
/* Still a significant amount to transfer */
|
/* Still a significant amount to transfer */
|
||||||
|
|
||||||
if (migrate_postcopy_ram() &&
|
if (migrate_postcopy_ram() &&
|
||||||
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
|
s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
|
||||||
pend_nonpost <= max_size &&
|
pend_nonpost <= threshold_size &&
|
||||||
atomic_read(&s->start_postcopy)) {
|
atomic_read(&s->start_postcopy)) {
|
||||||
|
|
||||||
if (!postcopy_start(s, &old_vm_running)) {
|
if (!postcopy_start(s, &old_vm_running)) {
|
||||||
|
@ -2002,17 +2001,18 @@ static void *migration_thread(void *opaque)
|
||||||
initial_bytes;
|
initial_bytes;
|
||||||
uint64_t time_spent = current_time - initial_time;
|
uint64_t time_spent = current_time - initial_time;
|
||||||
double bandwidth = (double)transferred_bytes / time_spent;
|
double bandwidth = (double)transferred_bytes / time_spent;
|
||||||
max_size = bandwidth * s->parameters.downtime_limit;
|
threshold_size = bandwidth * s->parameters.downtime_limit;
|
||||||
|
|
||||||
s->mbps = (((double) transferred_bytes * 8.0) /
|
s->mbps = (((double) transferred_bytes * 8.0) /
|
||||||
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
||||||
|
|
||||||
trace_migrate_transferred(transferred_bytes, time_spent,
|
trace_migrate_transferred(transferred_bytes, time_spent,
|
||||||
bandwidth, max_size);
|
bandwidth, threshold_size);
|
||||||
/* if we haven't sent anything, we don't want to recalculate
|
/* if we haven't sent anything, we don't want to recalculate
|
||||||
10000 is a small enough number for our purposes */
|
10000 is a small enough number for our purposes */
|
||||||
if (s->dirty_bytes_rate && transferred_bytes > 10000) {
|
if (ram_dirty_pages_rate() && transferred_bytes > 10000) {
|
||||||
s->expected_downtime = s->dirty_bytes_rate / bandwidth;
|
s->expected_downtime = ram_dirty_pages_rate() *
|
||||||
|
qemu_target_page_size() / bandwidth;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_file_reset_rate_limit(s->to_dst_file);
|
qemu_file_reset_rate_limit(s->to_dst_file);
|
||||||
|
|
|
@ -123,7 +123,7 @@ bool postcopy_ram_supported_by_host(void)
|
||||||
struct uffdio_range range_struct;
|
struct uffdio_range range_struct;
|
||||||
uint64_t feature_mask;
|
uint64_t feature_mask;
|
||||||
|
|
||||||
if ((1ul << qemu_target_page_bits()) > pagesize) {
|
if (qemu_target_page_size() > pagesize) {
|
||||||
error_report("Target page size bigger than host page size");
|
error_report("Target page size bigger than host page size");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -213,8 +213,6 @@ out:
|
||||||
static int init_range(const char *block_name, void *host_addr,
|
static int init_range(const char *block_name, void *host_addr,
|
||||||
ram_addr_t offset, ram_addr_t length, void *opaque)
|
ram_addr_t offset, ram_addr_t length, void *opaque)
|
||||||
{
|
{
|
||||||
MigrationIncomingState *mis = opaque;
|
|
||||||
|
|
||||||
trace_postcopy_init_range(block_name, host_addr, offset, length);
|
trace_postcopy_init_range(block_name, host_addr, offset, length);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -223,7 +221,7 @@ static int init_range(const char *block_name, void *host_addr,
|
||||||
* - we're going to get the copy from the source anyway.
|
* - we're going to get the copy from the source anyway.
|
||||||
* (Precopy will just overwrite this data, so doesn't need the discard)
|
* (Precopy will just overwrite this data, so doesn't need the discard)
|
||||||
*/
|
*/
|
||||||
if (ram_discard_range(mis, block_name, 0, length)) {
|
if (ram_discard_range(block_name, 0, length)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,7 +269,7 @@ static int cleanup_range(const char *block_name, void *host_addr,
|
||||||
*/
|
*/
|
||||||
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
|
int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
|
||||||
{
|
{
|
||||||
if (qemu_ram_foreach_block(init_range, mis)) {
|
if (qemu_ram_foreach_block(init_range, NULL)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,10 +743,10 @@ PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
|
||||||
void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
|
void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
|
||||||
unsigned long start, unsigned long length)
|
unsigned long start, unsigned long length)
|
||||||
{
|
{
|
||||||
size_t tp_bits = qemu_target_page_bits();
|
size_t tp_size = qemu_target_page_size();
|
||||||
/* Convert to byte offsets within the RAM block */
|
/* Convert to byte offsets within the RAM block */
|
||||||
pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits;
|
pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size;
|
||||||
pds->length_list[pds->cur_entry] = length << tp_bits;
|
pds->length_list[pds->cur_entry] = length * tp_size;
|
||||||
trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
|
trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
|
||||||
pds->cur_entry++;
|
pds->cur_entry++;
|
||||||
pds->nsentwords++;
|
pds->nsentwords++;
|
||||||
|
|
1284
migration/ram.c
1284
migration/ram.c
File diff suppressed because it is too large
Load Diff
|
@ -871,7 +871,7 @@ void qemu_savevm_send_postcopy_advise(QEMUFile *f)
|
||||||
{
|
{
|
||||||
uint64_t tmp[2];
|
uint64_t tmp[2];
|
||||||
tmp[0] = cpu_to_be64(ram_pagesize_summary());
|
tmp[0] = cpu_to_be64(ram_pagesize_summary());
|
||||||
tmp[1] = cpu_to_be64(1ul << qemu_target_page_bits());
|
tmp[1] = cpu_to_be64(qemu_target_page_size());
|
||||||
|
|
||||||
trace_qemu_savevm_send_postcopy_advise();
|
trace_qemu_savevm_send_postcopy_advise();
|
||||||
qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp);
|
qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp);
|
||||||
|
@ -1062,7 +1062,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
|
||||||
static bool should_send_vmdesc(void)
|
static bool should_send_vmdesc(void)
|
||||||
{
|
{
|
||||||
MachineState *machine = MACHINE(qdev_get_machine());
|
MachineState *machine = MACHINE(qdev_get_machine());
|
||||||
bool in_postcopy = migration_in_postcopy(migrate_get_current());
|
bool in_postcopy = migration_in_postcopy();
|
||||||
return !machine->suppress_vmdesc && !in_postcopy;
|
return !machine->suppress_vmdesc && !in_postcopy;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1111,7 +1111,7 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
|
||||||
int vmdesc_len;
|
int vmdesc_len;
|
||||||
SaveStateEntry *se;
|
SaveStateEntry *se;
|
||||||
int ret;
|
int ret;
|
||||||
bool in_postcopy = migration_in_postcopy(migrate_get_current());
|
bool in_postcopy = migration_in_postcopy();
|
||||||
|
|
||||||
trace_savevm_state_complete_precopy();
|
trace_savevm_state_complete_precopy();
|
||||||
|
|
||||||
|
@ -1197,7 +1197,7 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
|
||||||
* the result is split into the amount for units that can and
|
* the result is split into the amount for units that can and
|
||||||
* for units that can't do postcopy.
|
* for units that can't do postcopy.
|
||||||
*/
|
*/
|
||||||
void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size,
|
void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size,
|
||||||
uint64_t *res_non_postcopiable,
|
uint64_t *res_non_postcopiable,
|
||||||
uint64_t *res_postcopiable)
|
uint64_t *res_postcopiable)
|
||||||
{
|
{
|
||||||
|
@ -1216,7 +1216,7 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
se->ops->save_live_pending(f, se->opaque, max_size,
|
se->ops->save_live_pending(f, se->opaque, threshold_size,
|
||||||
res_non_postcopiable, res_postcopiable);
|
res_non_postcopiable, res_postcopiable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1390,13 +1390,13 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis)
|
||||||
}
|
}
|
||||||
|
|
||||||
remote_tps = qemu_get_be64(mis->from_src_file);
|
remote_tps = qemu_get_be64(mis->from_src_file);
|
||||||
if (remote_tps != (1ul << qemu_target_page_bits())) {
|
if (remote_tps != qemu_target_page_size()) {
|
||||||
/*
|
/*
|
||||||
* Again, some differences could be dealt with, but for now keep it
|
* Again, some differences could be dealt with, but for now keep it
|
||||||
* simple.
|
* simple.
|
||||||
*/
|
*/
|
||||||
error_report("Postcopy needs matching target page sizes (s=%d d=%d)",
|
error_report("Postcopy needs matching target page sizes (s=%d d=%zd)",
|
||||||
(int)remote_tps, 1 << qemu_target_page_bits());
|
(int)remote_tps, qemu_target_page_size());
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1479,8 +1479,7 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
|
||||||
block_length = qemu_get_be64(mis->from_src_file);
|
block_length = qemu_get_be64(mis->from_src_file);
|
||||||
|
|
||||||
len -= 16;
|
len -= 16;
|
||||||
int ret = ram_discard_range(mis, ramid, start_addr,
|
int ret = ram_discard_range(ramid, start_addr, block_length);
|
||||||
block_length);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,8 +63,8 @@ put_qtailq_end(const char *name, const char *reason) "%s %s"
|
||||||
qemu_file_fclose(void) ""
|
qemu_file_fclose(void) ""
|
||||||
|
|
||||||
# migration/ram.c
|
# migration/ram.c
|
||||||
get_queued_page(const char *block_name, uint64_t tmp_offset, uint64_t ram_addr) "%s/%" PRIx64 " ram_addr=%" PRIx64
|
get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/%" PRIx64 " page_abs=%lx"
|
||||||
get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, uint64_t ram_addr, int sent) "%s/%" PRIx64 " ram_addr=%" PRIx64 " (sent=%d)"
|
get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs, int sent) "%s/%" PRIx64 " page_abs=%lx (sent=%d)"
|
||||||
migration_bitmap_sync_start(void) ""
|
migration_bitmap_sync_start(void) ""
|
||||||
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
|
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
|
||||||
migration_throttle(void) ""
|
migration_throttle(void) ""
|
||||||
|
|
|
@ -598,6 +598,9 @@
|
||||||
# @postcopy-requests: The number of page requests received from the destination
|
# @postcopy-requests: The number of page requests received from the destination
|
||||||
# (since 2.7)
|
# (since 2.7)
|
||||||
#
|
#
|
||||||
|
# @page-size: The number of bytes per page for the various page-based
|
||||||
|
# statistics (since 2.10)
|
||||||
|
#
|
||||||
# Since: 0.14.0
|
# Since: 0.14.0
|
||||||
##
|
##
|
||||||
{ 'struct': 'MigrationStats',
|
{ 'struct': 'MigrationStats',
|
||||||
|
@ -605,7 +608,7 @@
|
||||||
'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
|
'duplicate': 'int', 'skipped': 'int', 'normal': 'int',
|
||||||
'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
|
'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
|
||||||
'mbps' : 'number', 'dirty-sync-count' : 'int',
|
'mbps' : 'number', 'dirty-sync-count' : 'int',
|
||||||
'postcopy-requests' : 'int' } }
|
'postcopy-requests' : 'int', 'page-size' : 'int' } }
|
||||||
|
|
||||||
##
|
##
|
||||||
# @XBZRLECacheStats:
|
# @XBZRLECacheStats:
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "qemu/help_option.h"
|
#include "qemu/help_option.h"
|
||||||
#include "sysemu/block-backend.h"
|
#include "sysemu/block-backend.h"
|
||||||
|
#include "migration/migration.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Aliases were a bad idea from the start. Let's keep them
|
* Aliases were a bad idea from the start. Let's keep them
|
||||||
|
@ -603,6 +604,11 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!migration_is_idle()) {
|
||||||
|
error_setg(errp, "device_add not allowed while migrating");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* create device */
|
/* create device */
|
||||||
dev = DEVICE(object_new(driver));
|
dev = DEVICE(object_new(driver));
|
||||||
|
|
||||||
|
@ -836,6 +842,45 @@ static DeviceState *find_device_state(const char *id, Error **errp)
|
||||||
return DEVICE(obj);
|
return DEVICE(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void qdev_unplug(DeviceState *dev, Error **errp)
|
||||||
|
{
|
||||||
|
DeviceClass *dc = DEVICE_GET_CLASS(dev);
|
||||||
|
HotplugHandler *hotplug_ctrl;
|
||||||
|
HotplugHandlerClass *hdc;
|
||||||
|
|
||||||
|
if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) {
|
||||||
|
error_setg(errp, QERR_BUS_NO_HOTPLUG, dev->parent_bus->name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dc->hotpluggable) {
|
||||||
|
error_setg(errp, QERR_DEVICE_NO_HOTPLUG,
|
||||||
|
object_get_typename(OBJECT(dev)));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!migration_is_idle()) {
|
||||||
|
error_setg(errp, "device_del not allowed while migrating");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
qdev_hot_removed = true;
|
||||||
|
|
||||||
|
hotplug_ctrl = qdev_get_hotplug_handler(dev);
|
||||||
|
/* hotpluggable device MUST have HotplugHandler, if it doesn't
|
||||||
|
* then something is very wrong with it */
|
||||||
|
g_assert(hotplug_ctrl);
|
||||||
|
|
||||||
|
/* If device supports async unplug just request it to be done,
|
||||||
|
* otherwise just remove it synchronously */
|
||||||
|
hdc = HOTPLUG_HANDLER_GET_CLASS(hotplug_ctrl);
|
||||||
|
if (hdc->unplug_request) {
|
||||||
|
hotplug_handler_unplug_request(hotplug_ctrl, dev, errp);
|
||||||
|
} else {
|
||||||
|
hotplug_handler_unplug(hotplug_ctrl, dev, errp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void qmp_device_del(const char *id, Error **errp)
|
void qmp_device_del(const char *id, Error **errp)
|
||||||
{
|
{
|
||||||
DeviceState *dev = find_device_state(id, errp);
|
DeviceState *dev = find_device_state(id, errp);
|
||||||
|
|
Loading…
Reference in New Issue