mirror of https://github.com/xemu-project/xemu.git
vl: introduce vm_shutdown()
Commit00d09fdbba
("vl: pause vcpus before stopping iothreads") and commitdce8921b2b
("iothread: Stop threads before main() quits") tried to work around the fact that emulation was still active during termination by stopping iothreads. They suffer from race conditions: 1. virtio_scsi_handle_cmd_vq() racing with iothread_stop_all() hits the virtio_scsi_ctx_check() assertion failure because the BDS AioContext has been modified by iothread_stop_all(). 2. Guest vq kick racing with main loop termination leaves a readable ioeventfd that is handled by the next aio_poll() when external clients are enabled again, resulting in unwanted emulation activity. This patch obsoletes those commits by fully disabling emulation activity when vcpus are stopped. Use the new vm_shutdown() function instead of pause_all_vcpus() so that vm change state handlers are invoked too. Virtio devices will now stop their ioeventfds, preventing further emulation activity after vm_stop(). Note that vm_stop(RUN_STATE_SHUTDOWN) cannot be used because it emits a QMP STOP event that may affect existing clients. It is no longer necessary to call replay_disable_events() directly since vm_shutdown() does so already. Drop iothread_stop_all() since it is no longer used. Cc: Fam Zheng <famz@redhat.com> Cc: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20180307144205.20619-5-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
184b962346
commit
4486e89c21
16
cpus.c
16
cpus.c
|
@ -993,7 +993,7 @@ void cpu_synchronize_all_pre_loadvm(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_vm_stop(RunState state)
|
static int do_vm_stop(RunState state, bool send_stop)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -1002,7 +1002,9 @@ static int do_vm_stop(RunState state)
|
||||||
pause_all_vcpus();
|
pause_all_vcpus();
|
||||||
runstate_set(state);
|
runstate_set(state);
|
||||||
vm_state_notify(0, state);
|
vm_state_notify(0, state);
|
||||||
qapi_event_send_stop(&error_abort);
|
if (send_stop) {
|
||||||
|
qapi_event_send_stop(&error_abort);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bdrv_drain_all();
|
bdrv_drain_all();
|
||||||
|
@ -1012,6 +1014,14 @@ static int do_vm_stop(RunState state)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Special vm_stop() variant for terminating the process. Historically clients
|
||||||
|
* did not expect a QMP STOP event and so we need to retain compatibility.
|
||||||
|
*/
|
||||||
|
int vm_shutdown(void)
|
||||||
|
{
|
||||||
|
return do_vm_stop(RUN_STATE_SHUTDOWN, false);
|
||||||
|
}
|
||||||
|
|
||||||
static bool cpu_can_run(CPUState *cpu)
|
static bool cpu_can_run(CPUState *cpu)
|
||||||
{
|
{
|
||||||
if (cpu->stop) {
|
if (cpu->stop) {
|
||||||
|
@ -1994,7 +2004,7 @@ int vm_stop(RunState state)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return do_vm_stop(state);
|
return do_vm_stop(state, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -45,7 +45,6 @@ typedef struct {
|
||||||
char *iothread_get_id(IOThread *iothread);
|
char *iothread_get_id(IOThread *iothread);
|
||||||
IOThread *iothread_by_id(const char *id);
|
IOThread *iothread_by_id(const char *id);
|
||||||
AioContext *iothread_get_aio_context(IOThread *iothread);
|
AioContext *iothread_get_aio_context(IOThread *iothread);
|
||||||
void iothread_stop_all(void);
|
|
||||||
GMainContext *iothread_get_g_main_context(IOThread *iothread);
|
GMainContext *iothread_get_g_main_context(IOThread *iothread);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -56,6 +56,7 @@ void vm_start(void);
|
||||||
int vm_prepare_start(void);
|
int vm_prepare_start(void);
|
||||||
int vm_stop(RunState state);
|
int vm_stop(RunState state);
|
||||||
int vm_stop_force_state(RunState state);
|
int vm_stop_force_state(RunState state);
|
||||||
|
int vm_shutdown(void);
|
||||||
|
|
||||||
typedef enum WakeupReason {
|
typedef enum WakeupReason {
|
||||||
/* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
|
/* Always keep QEMU_WAKEUP_REASON_NONE = 0 */
|
||||||
|
|
31
iothread.c
31
iothread.c
|
@ -101,18 +101,6 @@ void iothread_stop(IOThread *iothread)
|
||||||
qemu_thread_join(&iothread->thread);
|
qemu_thread_join(&iothread->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iothread_stop_iter(Object *object, void *opaque)
|
|
||||||
{
|
|
||||||
IOThread *iothread;
|
|
||||||
|
|
||||||
iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD);
|
|
||||||
if (!iothread) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
iothread_stop(iothread);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void iothread_instance_init(Object *obj)
|
static void iothread_instance_init(Object *obj)
|
||||||
{
|
{
|
||||||
IOThread *iothread = IOTHREAD(obj);
|
IOThread *iothread = IOTHREAD(obj);
|
||||||
|
@ -333,25 +321,6 @@ IOThreadInfoList *qmp_query_iothreads(Error **errp)
|
||||||
return head;
|
return head;
|
||||||
}
|
}
|
||||||
|
|
||||||
void iothread_stop_all(void)
|
|
||||||
{
|
|
||||||
Object *container = object_get_objects_root();
|
|
||||||
BlockDriverState *bs;
|
|
||||||
BdrvNextIterator it;
|
|
||||||
|
|
||||||
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
|
|
||||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
|
||||||
if (ctx == qemu_get_aio_context()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
aio_context_acquire(ctx);
|
|
||||||
bdrv_set_aio_context(bs, qemu_get_aio_context());
|
|
||||||
aio_context_release(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
object_child_foreach(container, iothread_stop_iter, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static gpointer iothread_g_main_context_init(gpointer opaque)
|
static gpointer iothread_g_main_context_init(gpointer opaque)
|
||||||
{
|
{
|
||||||
AioContext *ctx;
|
AioContext *ctx;
|
||||||
|
|
13
vl.c
13
vl.c
|
@ -4722,17 +4722,10 @@ int main(int argc, char **argv, char **envp)
|
||||||
os_setup_post();
|
os_setup_post();
|
||||||
|
|
||||||
main_loop();
|
main_loop();
|
||||||
replay_disable_events();
|
|
||||||
|
|
||||||
/* The ordering of the following is delicate. Stop vcpus to prevent new
|
/* No more vcpu or device emulation activity beyond this point */
|
||||||
* I/O requests being queued by the guest. Then stop IOThreads (this
|
vm_shutdown();
|
||||||
* includes a drain operation and completes all request processing). At
|
|
||||||
* this point emulated devices are still associated with their IOThreads
|
|
||||||
* (if any) but no longer have any work to do. Only then can we close
|
|
||||||
* block devices safely because we know there is no more I/O coming.
|
|
||||||
*/
|
|
||||||
pause_all_vcpus();
|
|
||||||
iothread_stop_all();
|
|
||||||
bdrv_close_all();
|
bdrv_close_all();
|
||||||
|
|
||||||
res_free();
|
res_free();
|
||||||
|
|
Loading…
Reference in New Issue