mirror of https://github.com/xemu-project/xemu.git
coroutine: stop using AioContext in CoQueue
qemu_co_queue_next(&queue) arranges that the next queued coroutine is run at a later point in time. This deferred restart is useful because the caller may not want to transfer control yet. This behavior was implemented using QEMUBH in the past, which meant that CoQueue (and hence CoMutex and CoRwlock) had a dependency on the AioContext event loop. This hidden dependency causes trouble when we move to a world with multiple event loops - now qemu_co_queue_next() needs to know which event loop to schedule the QEMUBH in. After pondering how to stash AioContext I realized the best solution is to not use AioContext at all. This patch implements the deferred restart behavior purely in terms of coroutines and no longer uses QEMUBH. Here is how it works: Each Coroutine has a wakeup queue that starts out empty. When qemu_co_queue_next() is called, the next coroutine is added to our wakeup queue. The wakeup queue is processed when we yield or terminate. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
b84c458623
commit
02ffb50448
|
@ -38,6 +38,9 @@ struct Coroutine {
|
|||
void *entry_arg;
|
||||
Coroutine *caller;
|
||||
QSLIST_ENTRY(Coroutine) pool_next;
|
||||
|
||||
/* Coroutines that should be woken up when we yield or terminate */
|
||||
QTAILQ_HEAD(, Coroutine) co_queue_wakeup;
|
||||
QTAILQ_ENTRY(Coroutine) co_queue_next;
|
||||
};
|
||||
|
||||
|
@ -45,5 +48,6 @@ Coroutine *qemu_coroutine_new(void);
|
|||
void qemu_coroutine_delete(Coroutine *co);
|
||||
CoroutineAction qemu_coroutine_switch(Coroutine *from, Coroutine *to,
|
||||
CoroutineAction action);
|
||||
void coroutine_fn qemu_co_queue_run_restart(Coroutine *co);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -26,39 +26,11 @@
|
|||
#include "block/coroutine.h"
|
||||
#include "block/coroutine_int.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "block/aio.h"
|
||||
#include "trace.h"
|
||||
|
||||
/* Coroutines are awoken from a BH to allow the current coroutine to complete
|
||||
* its flow of execution. The BH may run after the CoQueue has been destroyed,
|
||||
* so keep BH data in a separate heap-allocated struct.
|
||||
*/
|
||||
typedef struct {
|
||||
QEMUBH *bh;
|
||||
QTAILQ_HEAD(, Coroutine) entries;
|
||||
} CoQueueNextData;
|
||||
|
||||
static void qemu_co_queue_next_bh(void *opaque)
|
||||
{
|
||||
CoQueueNextData *data = opaque;
|
||||
Coroutine *next;
|
||||
|
||||
trace_qemu_co_queue_next_bh();
|
||||
while ((next = QTAILQ_FIRST(&data->entries))) {
|
||||
QTAILQ_REMOVE(&data->entries, next, co_queue_next);
|
||||
qemu_coroutine_enter(next, NULL);
|
||||
}
|
||||
|
||||
qemu_bh_delete(data->bh);
|
||||
g_slice_free(CoQueueNextData, data);
|
||||
}
|
||||
|
||||
void qemu_co_queue_init(CoQueue *queue)
|
||||
{
|
||||
QTAILQ_INIT(&queue->entries);
|
||||
|
||||
/* This will be exposed to callers once there are multiple AioContexts */
|
||||
queue->ctx = qemu_get_aio_context();
|
||||
}
|
||||
|
||||
void coroutine_fn qemu_co_queue_wait(CoQueue *queue)
|
||||
|
@ -77,23 +49,37 @@ void coroutine_fn qemu_co_queue_wait_insert_head(CoQueue *queue)
|
|||
assert(qemu_in_coroutine());
|
||||
}
|
||||
|
||||
static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
|
||||
/**
|
||||
* qemu_co_queue_run_restart:
|
||||
*
|
||||
* Enter each coroutine that was previously marked for restart by
|
||||
* qemu_co_queue_next() or qemu_co_queue_restart_all(). This function is
|
||||
* invoked by the core coroutine code when the current coroutine yields or
|
||||
* terminates.
|
||||
*/
|
||||
void qemu_co_queue_run_restart(Coroutine *co)
|
||||
{
|
||||
Coroutine *next;
|
||||
CoQueueNextData *data;
|
||||
|
||||
trace_qemu_co_queue_run_restart(co);
|
||||
while ((next = QTAILQ_FIRST(&co->co_queue_wakeup))) {
|
||||
QTAILQ_REMOVE(&co->co_queue_wakeup, next, co_queue_next);
|
||||
qemu_coroutine_enter(next, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
|
||||
{
|
||||
Coroutine *self = qemu_coroutine_self();
|
||||
Coroutine *next;
|
||||
|
||||
if (QTAILQ_EMPTY(&queue->entries)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
data = g_slice_new(CoQueueNextData);
|
||||
data->bh = aio_bh_new(queue->ctx, qemu_co_queue_next_bh, data);
|
||||
QTAILQ_INIT(&data->entries);
|
||||
qemu_bh_schedule(data->bh);
|
||||
|
||||
while ((next = QTAILQ_FIRST(&queue->entries)) != NULL) {
|
||||
QTAILQ_REMOVE(&queue->entries, next, co_queue_next);
|
||||
QTAILQ_INSERT_TAIL(&data->entries, next, co_queue_next);
|
||||
QTAILQ_INSERT_TAIL(&self->co_queue_wakeup, next, co_queue_next);
|
||||
trace_qemu_co_queue_next(next);
|
||||
if (single) {
|
||||
break;
|
||||
|
|
|
@ -45,6 +45,7 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
|
|||
}
|
||||
|
||||
co->entry = entry;
|
||||
QTAILQ_INIT(&co->co_queue_wakeup);
|
||||
return co;
|
||||
}
|
||||
|
||||
|
@ -87,6 +88,8 @@ static void coroutine_swap(Coroutine *from, Coroutine *to)
|
|||
|
||||
ret = qemu_coroutine_switch(from, to, COROUTINE_YIELD);
|
||||
|
||||
qemu_co_queue_run_restart(to);
|
||||
|
||||
switch (ret) {
|
||||
case COROUTINE_YIELD:
|
||||
return;
|
||||
|
|
|
@ -825,7 +825,7 @@ qemu_coroutine_yield(void *from, void *to) "from %p to %p"
|
|||
qemu_coroutine_terminate(void *co) "self %p"
|
||||
|
||||
# qemu-coroutine-lock.c
|
||||
qemu_co_queue_next_bh(void) ""
|
||||
qemu_co_queue_run_restart(void *co) "co %p"
|
||||
qemu_co_queue_next(void *nxt) "next %p"
|
||||
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
|
||||
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
|
||||
|
|
Loading…
Reference in New Issue