From 86d063fa83901bc8150343ff8b03979fbea392c9 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sun, 26 Mar 2023 13:25:38 -0400 Subject: [PATCH 1/5] io: tls: Inherit QIO_CHANNEL_FEATURE_SHUTDOWN on server side MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TLS iochannel will inherit io_shutdown() from the master ioc, however we missed to do that on the server side. This will e.g. allow qemu_file_shutdown() to work on dest QEMU too for migration. Acked-by: Daniel P. Berrangé Signed-off-by: Peter Xu Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- io/channel-tls.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/io/channel-tls.c b/io/channel-tls.c index 5a7a3d48d6..9805dd0a3f 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -74,6 +74,9 @@ qio_channel_tls_new_server(QIOChannel *master, ioc = QIO_CHANNEL_TLS(object_new(TYPE_QIO_CHANNEL_TLS)); ioc->master = master; + if (qio_channel_has_feature(master, QIO_CHANNEL_FEATURE_SHUTDOWN)) { + qio_channel_set_feature(QIO_CHANNEL(ioc), QIO_CHANNEL_FEATURE_SHUTDOWN); + } object_ref(OBJECT(master)); ioc->session = qcrypto_tls_session_new( From 6621883f9398bc3f255968f0b4919e883bafb06c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sun, 26 Mar 2023 13:25:39 -0400 Subject: [PATCH 2/5] migration: Fix potential race on postcopy_qemufile_src postcopy_qemufile_src object should be owned by one thread, either the main thread (e.g. when at the beginning, or at the end of migration), or by the return path thread (when during a preempt enabled postcopy migration). If that's not the case the access to the object might be racy. postcopy_preempt_shutdown_file() can be potentially racy, because it's called at the end phase of migration on the main thread, however during which the return path thread hasn't yet been recycled; the recycle happens in await_return_path_close_on_source() which is after this point. It means, logically it's posslbe the main thread and the return path thread are both operating on the same qemufile. While I don't think qemufile is thread safe at all. postcopy_preempt_shutdown_file() used to be needed because that's where we send EOS to dest so that dest can safely shutdown the preempt thread. To avoid the possible race, remove this only place that a race can happen. Instead we figure out another way to safely close the preempt thread on dest. The core idea during postcopy on deciding "when to stop" is that dest will send a postcopy SHUT message to src, telling src that all data is there. Hence to shut the dest preempt thread maybe better to do it directly on dest node. This patch proposed such a way that we change postcopy_prio_thread_created into PreemptThreadStatus, so that we kick the preempt thread on dest qemu by a sequence of: mis->preempt_thread_status = PREEMPT_THREAD_QUIT; qemu_file_shutdown(mis->postcopy_qemufile_dst); While here shutdown() is probably so far the easiest way to kick preempt thread from a blocked qemu_get_be64(). Then it reads preempt_thread_status to make sure it's not a network failure but a willingness to quit the thread. We could have avoided that extra status but just rely on migration status. The problem is postcopy_ram_incoming_cleanup() is just called early enough so we're still during POSTCOPY_ACTIVE no matter what.. So just make it simple to have the status introduced. One flag x-preempt-pre-7-2 is added to keep old pre-7.2 behaviors of postcopy preempt. Fixes: 9358982744 ("migration: Send requested page directly in rp-return thread") Signed-off-by: Peter Xu Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- hw/core/machine.c | 1 + migration/migration.c | 10 ++++++++-- migration/migration.h | 34 +++++++++++++++++++++++++++++++++- migration/postcopy-ram.c | 20 +++++++++++++++----- 4 files changed, 57 insertions(+), 8 deletions(-) diff --git a/hw/core/machine.c b/hw/core/machine.c index 45e3d24fdc..cd13b8b0a3 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -42,6 +42,7 @@ GlobalProperty hw_compat_7_2[] = { { "e1000e", "migrate-timadj", "off" }, { "virtio-mem", "x-early-migration", "false" }, + { "migration", "x-preempt-pre-7-2", "true" }, }; const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); diff --git a/migration/migration.c b/migration/migration.c index ae2025d9d8..37fc4fb3e2 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -3464,8 +3464,12 @@ static void migration_completion(MigrationState *s) qemu_savevm_state_complete_postcopy(s->to_dst_file); qemu_mutex_unlock_iothread(); - /* Shutdown the postcopy fast path thread */ - if (migrate_postcopy_preempt()) { + /* + * Shutdown the postcopy fast path thread. This is only needed + * when dest QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need + * this. + */ + if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { postcopy_preempt_shutdown_file(s); } @@ -4443,6 +4447,8 @@ static Property migration_properties[] = { decompress_error_check, true), DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState, clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT), + DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState, + preempt_pre_7_2, false), /* Migration parameters */ DEFINE_PROP_UINT8("x-compress-level", MigrationState, diff --git a/migration/migration.h b/migration/migration.h index 2da2f8a164..67baba2184 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -65,6 +65,12 @@ typedef struct { bool all_zero; } PostcopyTmpPage; +typedef enum { + PREEMPT_THREAD_NONE = 0, + PREEMPT_THREAD_CREATED, + PREEMPT_THREAD_QUIT, +} PreemptThreadStatus; + /* State for the incoming migration */ struct MigrationIncomingState { QEMUFile *from_src_file; @@ -124,7 +130,12 @@ struct MigrationIncomingState { QemuSemaphore postcopy_qemufile_dst_done; /* Postcopy priority thread is used to receive postcopy requested pages */ QemuThread postcopy_prio_thread; - bool postcopy_prio_thread_created; + /* + * Always set by the main vm load thread only, but can be read by the + * postcopy preempt thread. "volatile" makes sure all reads will be + * uptodate across cores. + */ + volatile PreemptThreadStatus preempt_thread_status; /* * Used to sync between the ram load main thread and the fast ram load * thread. It protects postcopy_qemufile_dst, which is the postcopy @@ -364,6 +375,27 @@ struct MigrationState { * do not trigger spurious decompression errors. */ bool decompress_error_check; + /* + * This variable only affects behavior when postcopy preempt mode is + * enabled. + * + * When set: + * + * - postcopy preempt src QEMU instance will generate an EOS message at + * the end of migration to shut the preempt channel on dest side. + * + * When clear: + * + * - postcopy preempt src QEMU instance will _not_ generate an EOS + * message at the end of migration, the dest qemu will shutdown the + * channel itself. + * + * NOTE: See message-id on qemu-devel + * mailing list for more information on the possible race. Everyone + * should probably just keep this value untouched after set by the + * machine type (or the default). + */ + bool preempt_pre_7_2; /* * This decides the size of guest memory chunk that will be used diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 41c0713650..263bab75ec 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -568,9 +568,14 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) { trace_postcopy_ram_incoming_cleanup_entry(); - if (mis->postcopy_prio_thread_created) { + if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) { + /* Notify the fast load thread to quit */ + mis->preempt_thread_status = PREEMPT_THREAD_QUIT; + if (mis->postcopy_qemufile_dst) { + qemu_file_shutdown(mis->postcopy_qemufile_dst); + } qemu_thread_join(&mis->postcopy_prio_thread); - mis->postcopy_prio_thread_created = false; + mis->preempt_thread_status = PREEMPT_THREAD_NONE; } if (mis->have_fault_thread) { @@ -1203,7 +1208,7 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) */ postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", postcopy_preempt_thread, QEMU_THREAD_JOINABLE); - mis->postcopy_prio_thread_created = true; + mis->preempt_thread_status = PREEMPT_THREAD_CREATED; } trace_postcopy_ram_enable_notify(); @@ -1652,6 +1657,11 @@ static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) trace_postcopy_pause_fast_load_continued(); } +static bool preempt_thread_should_run(MigrationIncomingState *mis) +{ + return mis->preempt_thread_status != PREEMPT_THREAD_QUIT; +} + void *postcopy_preempt_thread(void *opaque) { MigrationIncomingState *mis = opaque; @@ -1671,11 +1681,11 @@ void *postcopy_preempt_thread(void *opaque) /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); - while (1) { + while (preempt_thread_should_run(mis)) { ret = ram_load_postcopy(mis->postcopy_qemufile_dst, RAM_CHANNEL_POSTCOPY); /* If error happened, go into recovery routine */ - if (ret) { + if (ret && preempt_thread_should_run(mis)) { postcopy_pause_ram_fast_load(mis); } else { /* We're done */ From 06064a671573580326b1f23a2afa2702c48d8e05 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sun, 26 Mar 2023 13:25:40 -0400 Subject: [PATCH 3/5] migration: Recover behavior of preempt channel creation for pre-7.2 In 8.0 devel window we reworked preempt channel creation, so that there'll be no race condition when the migration channel and preempt channel got established in the wrong order in commit 5655aab079. However no one noticed that the change will also be not compatible with older qemus, majorly 7.1/7.2 versions where preempt mode started to be supported. Leverage the same pre-7.2 flag introduced in the previous patch to recover the behavior hopefully before 8.0 releases, so we don't break migration when we migrate from 8.0 to older qemu binaries. Fixes: 5655aab079 ("migration: Postpone postcopy preempt channel to be after main") Signed-off-by: Peter Xu Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- migration/migration.c | 9 +++++++++ migration/migration.h | 7 +++++++ migration/postcopy-ram.c | 10 ++++++++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index 37fc4fb3e2..bda4789193 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -4388,6 +4388,15 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } + /* + * This needs to be done before resuming a postcopy. Note: for newer + * QEMUs we will delay the channel creation until postcopy_start(), to + * avoid disorder of channel creations. + */ + if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { + postcopy_preempt_setup(s); + } + if (resume) { /* Wakeup the main migration thread to do the recovery */ migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, diff --git a/migration/migration.h b/migration/migration.h index 67baba2184..310ae8901b 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -384,12 +384,19 @@ struct MigrationState { * - postcopy preempt src QEMU instance will generate an EOS message at * the end of migration to shut the preempt channel on dest side. * + * - postcopy preempt channel will be created at the setup phase on src + QEMU. + * * When clear: * * - postcopy preempt src QEMU instance will _not_ generate an EOS * message at the end of migration, the dest qemu will shutdown the * channel itself. * + * - postcopy preempt channel will be created at the switching phase + * from precopy -> postcopy (to avoid race condtion of misordered + * creation of channels). + * * NOTE: See message-id on qemu-devel * mailing list for more information on the possible race. Everyone * should probably just keep this value untouched after set by the diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 263bab75ec..93f39f8e06 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -1630,8 +1630,14 @@ int postcopy_preempt_establish_channel(MigrationState *s) return 0; } - /* Kick off async task to establish preempt channel */ - postcopy_preempt_setup(s); + /* + * Kick off async task to establish preempt channel. Only do so with + * 8.0+ machines, because 7.1/7.2 require the channel to be created in + * setup phase of migration (even if racy in an unreliable network). + */ + if (!s->preempt_pre_7_2) { + postcopy_preempt_setup(s); + } /* * We need the postcopy preempt channel to be established before From 37502df32c4b02403fe92452c4ed1d96da3df01c Mon Sep 17 00:00:00 2001 From: Lukas Straub Date: Sun, 2 Apr 2023 17:06:32 +0000 Subject: [PATCH 4/5] migration/ram.c: Fix migration with compress enabled Since ec6f3ab9, migration with compress enabled was broken, because the compress threads use a dummy QEMUFile which just acts as a buffer and that commit accidentally changed it to use the outgoing migration channel instead. Fix this by using the dummy file again in the compress threads. Signed-off-by: Lukas Straub Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- migration/ram.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 96e8a19a58..9d1817ab7b 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -688,12 +688,11 @@ exit: * @offset: offset inside the block for the page * in the lower bits, it contains flags */ -static size_t save_page_header(PageSearchStatus *pss, RAMBlock *block, - ram_addr_t offset) +static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f, + RAMBlock *block, ram_addr_t offset) { size_t size, len; bool same_block = (block == pss->last_sent_block); - QEMUFile *f = pss->pss_channel; if (same_block) { offset |= RAM_SAVE_FLAG_CONTINUE; @@ -867,7 +866,7 @@ static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss, } /* Send XBZRLE based compressed page */ - bytes_xbzrle = save_page_header(pss, block, + bytes_xbzrle = save_page_header(pss, pss->pss_channel, block, offset | RAM_SAVE_FLAG_XBZRLE); qemu_put_byte(file, ENCODING_FLAG_XBZRLE); qemu_put_be16(file, encoded_len); @@ -1302,15 +1301,14 @@ void ram_release_page(const char *rbname, uint64_t offset) * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_zero_page_to_file(PageSearchStatus *pss, +static int save_zero_page_to_file(PageSearchStatus *pss, QEMUFile *file, RAMBlock *block, ram_addr_t offset) { uint8_t *p = block->host + offset; - QEMUFile *file = pss->pss_channel; int len = 0; if (buffer_is_zero(p, TARGET_PAGE_SIZE)) { - len += save_page_header(pss, block, offset | RAM_SAVE_FLAG_ZERO); + len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO); qemu_put_byte(file, 0); len += 1; ram_release_page(block->idstr, offset); @@ -1327,10 +1325,10 @@ static int save_zero_page_to_file(PageSearchStatus *pss, * @block: block that contains the page we want to send * @offset: offset inside the block for the page */ -static int save_zero_page(PageSearchStatus *pss, RAMBlock *block, +static int save_zero_page(PageSearchStatus *pss, QEMUFile *f, RAMBlock *block, ram_addr_t offset) { - int len = save_zero_page_to_file(pss, block, offset); + int len = save_zero_page_to_file(pss, f, block, offset); if (len) { stat64_add(&ram_atomic_counters.duplicate, 1); @@ -1394,7 +1392,7 @@ static int save_normal_page(PageSearchStatus *pss, RAMBlock *block, { QEMUFile *file = pss->pss_channel; - ram_transferred_add(save_page_header(pss, block, + ram_transferred_add(save_page_header(pss, pss->pss_channel, block, offset | RAM_SAVE_FLAG_PAGE)); if (async) { qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE, @@ -1473,11 +1471,11 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, uint8_t *p = block->host + offset; int ret; - if (save_zero_page_to_file(pss, block, offset)) { + if (save_zero_page_to_file(pss, f, block, offset)) { return true; } - save_page_header(pss, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); + save_page_header(pss, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); /* * copy it to a internal buffer to avoid it being modified by VM @@ -2355,7 +2353,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) return 1; } - res = save_zero_page(pss, block, offset); + res = save_zero_page(pss, pss->pss_channel, block, offset); if (res > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale From 28ef5339c37f1f78c2fa4df2295bc0cd73a0abfd Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 12 Apr 2023 22:30:20 +0200 Subject: [PATCH 5/5] migration: fix ram_state_pending_exact() I removed that bit on commit: commit c8df4a7aeffcb46020f610526eea621fa5b0cd47 Author: Juan Quintela Date: Mon Oct 3 02:00:03 2022 +0200 migration: Split save_live_pending() into state_pending_* Fixes: c8df4a7aeffcb46020f610526eea621fa5b0cd47 Suggested-by: Nina Schoetterl-Glausch Signed-off-by: Juan Quintela --- migration/ram.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/migration/ram.c b/migration/ram.c index 9d1817ab7b..79d881f735 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3506,12 +3506,13 @@ static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy, static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, uint64_t *can_postcopy) { + MigrationState *s = migrate_get_current(); RAMState **temp = opaque; RAMState *rs = *temp; uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; - if (!migration_in_postcopy()) { + if (!migration_in_postcopy() && remaining_size < s->threshold_size) { qemu_mutex_lock_iothread(); WITH_RCU_READ_LOCK_GUARD() { migration_bitmap_sync_precopy(rs);