mirror of https://github.com/xemu-project/xemu.git
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1 iQEcBAABAgAGBQJVr4HnAAoJEJykq7OBq3PIuwMIALf9rj8sVhr0b2isbS27t+TP Byp5EsLVXIrfkIwsTw1OkdYrBJ1daLYljR7/3CwsPyTmV05ULdKzI32NzHgG5usL 6SiHBB1TiPcs2t4IFD/rKx+ZB7UYWKOVRuUNtAy3khzcjZk05IR/R58d0Uibscbe +iA1umxlx4M0egeKiyDhJXCpmwUphNKGVNCv+WC8Ay6KJTBxn86Sh+/rCoWRl15f snZsL4Va4DLHvQUzsxZwt9GsSBSZ7bmH05QMWpaTYJAqSy+s+FHRe8eeI9O+rJi8 2Qd7T01T+pl0DAN+A8yAxkcj9wwxOHzjlA1LGq9meVOGdoFFpbxAkORTQhZvaDk= =dlU8 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging # gpg: Signature made Wed Jul 22 12:43:35 2015 BST using RSA key ID 81AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" * remotes/stefanha/tags/block-pull-request: AioContext: optimize clearing the EventNotifier AioContext: fix broken placement of event_notifier_test_and_clear AioContext: fix broken ctx->dispatching optimization aio-win32: reorganize polling loop tests: remove irrelevant assertions from test-aio qemu-timer: initialize "timers_done_ev" to set mirror: Speed up bitmap initial scanning Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
dc94bd9166
20
aio-posix.c
20
aio-posix.c
|
@ -233,26 +233,23 @@ static void add_pollfd(AioHandler *node)
|
|||
bool aio_poll(AioContext *ctx, bool blocking)
|
||||
{
|
||||
AioHandler *node;
|
||||
bool was_dispatching;
|
||||
int i, ret;
|
||||
bool progress;
|
||||
int64_t timeout;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
was_dispatching = ctx->dispatching;
|
||||
progress = false;
|
||||
|
||||
/* aio_notify can avoid the expensive event_notifier_set if
|
||||
* everything (file descriptors, bottom halves, timers) will
|
||||
* be re-evaluated before the next blocking poll(). This is
|
||||
* already true when aio_poll is called with blocking == false;
|
||||
* if blocking == true, it is only true after poll() returns.
|
||||
*
|
||||
* If we're in a nested event loop, ctx->dispatching might be true.
|
||||
* In that case we can restore it just before returning, but we
|
||||
* have to clear it now.
|
||||
* if blocking == true, it is only true after poll() returns,
|
||||
* so disable the optimization now.
|
||||
*/
|
||||
aio_set_dispatching(ctx, !blocking);
|
||||
if (blocking) {
|
||||
atomic_add(&ctx->notify_me, 2);
|
||||
}
|
||||
|
||||
ctx->walking_handlers++;
|
||||
|
||||
|
@ -272,10 +269,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
aio_context_release(ctx);
|
||||
}
|
||||
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
|
||||
if (blocking) {
|
||||
atomic_sub(&ctx->notify_me, 2);
|
||||
}
|
||||
if (timeout) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
|
||||
aio_notify_accept(ctx);
|
||||
|
||||
/* if we have any readable fds, dispatch event */
|
||||
if (ret > 0) {
|
||||
for (i = 0; i < npfd; i++) {
|
||||
|
@ -287,12 +289,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
ctx->walking_handlers--;
|
||||
|
||||
/* Run dispatch even if there were no readable fds to run timers */
|
||||
aio_set_dispatching(ctx, true);
|
||||
if (aio_dispatch(ctx)) {
|
||||
progress = true;
|
||||
}
|
||||
|
||||
aio_set_dispatching(ctx, was_dispatching);
|
||||
aio_context_release(ctx);
|
||||
|
||||
return progress;
|
||||
|
|
48
aio-win32.c
48
aio-win32.c
|
@ -279,30 +279,25 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
{
|
||||
AioHandler *node;
|
||||
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
|
||||
bool was_dispatching, progress, have_select_revents, first;
|
||||
bool progress, have_select_revents, first;
|
||||
int count;
|
||||
int timeout;
|
||||
|
||||
aio_context_acquire(ctx);
|
||||
have_select_revents = aio_prepare(ctx);
|
||||
if (have_select_revents) {
|
||||
blocking = false;
|
||||
}
|
||||
|
||||
was_dispatching = ctx->dispatching;
|
||||
progress = false;
|
||||
|
||||
/* aio_notify can avoid the expensive event_notifier_set if
|
||||
* everything (file descriptors, bottom halves, timers) will
|
||||
* be re-evaluated before the next blocking poll(). This is
|
||||
* already true when aio_poll is called with blocking == false;
|
||||
* if blocking == true, it is only true after poll() returns.
|
||||
*
|
||||
* If we're in a nested event loop, ctx->dispatching might be true.
|
||||
* In that case we can restore it just before returning, but we
|
||||
* have to clear it now.
|
||||
* if blocking == true, it is only true after poll() returns,
|
||||
* so disable the optimization now.
|
||||
*/
|
||||
aio_set_dispatching(ctx, !blocking);
|
||||
if (blocking) {
|
||||
atomic_add(&ctx->notify_me, 2);
|
||||
}
|
||||
|
||||
have_select_revents = aio_prepare(ctx);
|
||||
|
||||
ctx->walking_handlers++;
|
||||
|
||||
|
@ -317,26 +312,36 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
ctx->walking_handlers--;
|
||||
first = true;
|
||||
|
||||
/* wait until next event */
|
||||
while (count > 0) {
|
||||
/* ctx->notifier is always registered. */
|
||||
assert(count > 0);
|
||||
|
||||
/* Multiple iterations, all of them non-blocking except the first,
|
||||
* may be necessary to process all pending events. After the first
|
||||
* WaitForMultipleObjects call ctx->notify_me will be decremented.
|
||||
*/
|
||||
do {
|
||||
HANDLE event;
|
||||
int ret;
|
||||
|
||||
timeout = blocking
|
||||
timeout = blocking && !have_select_revents
|
||||
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
|
||||
if (timeout) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
|
||||
if (blocking) {
|
||||
assert(first);
|
||||
atomic_sub(&ctx->notify_me, 2);
|
||||
}
|
||||
if (timeout) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
aio_set_dispatching(ctx, true);
|
||||
|
||||
if (first && aio_bh_poll(ctx)) {
|
||||
progress = true;
|
||||
}
|
||||
if (first) {
|
||||
aio_notify_accept(ctx);
|
||||
progress |= aio_bh_poll(ctx);
|
||||
first = false;
|
||||
}
|
||||
|
||||
/* if we have any signaled events, dispatch event */
|
||||
event = NULL;
|
||||
|
@ -351,11 +356,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
blocking = false;
|
||||
|
||||
progress |= aio_dispatch_handlers(ctx, event);
|
||||
}
|
||||
} while (count > 0);
|
||||
|
||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||
|
||||
aio_set_dispatching(ctx, was_dispatching);
|
||||
aio_context_release(ctx);
|
||||
return progress;
|
||||
}
|
||||
|
|
31
async.c
31
async.c
|
@ -184,6 +184,8 @@ aio_ctx_prepare(GSource *source, gint *timeout)
|
|||
{
|
||||
AioContext *ctx = (AioContext *) source;
|
||||
|
||||
atomic_or(&ctx->notify_me, 1);
|
||||
|
||||
/* We assume there is no timeout already supplied */
|
||||
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
|
||||
|
||||
|
@ -200,6 +202,9 @@ aio_ctx_check(GSource *source)
|
|||
AioContext *ctx = (AioContext *) source;
|
||||
QEMUBH *bh;
|
||||
|
||||
atomic_and(&ctx->notify_me, ~1);
|
||||
aio_notify_accept(ctx);
|
||||
|
||||
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
||||
if (!bh->deleted && bh->scheduled) {
|
||||
return true;
|
||||
|
@ -254,24 +259,22 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx)
|
|||
return ctx->thread_pool;
|
||||
}
|
||||
|
||||
void aio_set_dispatching(AioContext *ctx, bool dispatching)
|
||||
void aio_notify(AioContext *ctx)
|
||||
{
|
||||
ctx->dispatching = dispatching;
|
||||
if (!dispatching) {
|
||||
/* Write ctx->dispatching before reading e.g. bh->scheduled.
|
||||
* Optimization: this is only needed when we're entering the "unsafe"
|
||||
* phase where other threads must call event_notifier_set.
|
||||
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
|
||||
* with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
|
||||
*/
|
||||
smp_mb();
|
||||
if (ctx->notify_me) {
|
||||
event_notifier_set(&ctx->notifier);
|
||||
atomic_mb_set(&ctx->notified, true);
|
||||
}
|
||||
}
|
||||
|
||||
void aio_notify(AioContext *ctx)
|
||||
void aio_notify_accept(AioContext *ctx)
|
||||
{
|
||||
/* Write e.g. bh->scheduled before reading ctx->dispatching. */
|
||||
smp_mb();
|
||||
if (!ctx->dispatching) {
|
||||
event_notifier_set(&ctx->notifier);
|
||||
if (atomic_xchg(&ctx->notified, false)) {
|
||||
event_notifier_test_and_clear(&ctx->notifier);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,6 +289,10 @@ static void aio_rfifolock_cb(void *opaque)
|
|||
aio_notify(opaque);
|
||||
}
|
||||
|
||||
static void event_notifier_dummy_cb(EventNotifier *e)
|
||||
{
|
||||
}
|
||||
|
||||
AioContext *aio_context_new(Error **errp)
|
||||
{
|
||||
int ret;
|
||||
|
@ -300,7 +307,7 @@ AioContext *aio_context_new(Error **errp)
|
|||
g_source_set_can_recurse(&ctx->source, true);
|
||||
aio_set_event_notifier(ctx, &ctx->notifier,
|
||||
(EventNotifierHandler *)
|
||||
event_notifier_test_and_clear);
|
||||
event_notifier_dummy_cb);
|
||||
ctx->thread_pool = NULL;
|
||||
qemu_mutex_init(&ctx->bh_lock);
|
||||
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
|
||||
|
|
|
@ -388,7 +388,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
MirrorBlockJob *s = opaque;
|
||||
MirrorExitData *data;
|
||||
BlockDriverState *bs = s->common.bs;
|
||||
int64_t sector_num, end, sectors_per_chunk, length;
|
||||
int64_t sector_num, end, length;
|
||||
uint64_t last_pause_ns;
|
||||
BlockDriverInfo bdi;
|
||||
char backing_filename[2]; /* we only need 2 characters because we are only
|
||||
|
@ -442,7 +442,6 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
goto immediate_exit;
|
||||
}
|
||||
|
||||
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
|
||||
mirror_free_init(s);
|
||||
|
||||
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
|
@ -450,7 +449,9 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
/* First part, loop on the sectors and initialize the dirty bitmap. */
|
||||
BlockDriverState *base = s->base;
|
||||
for (sector_num = 0; sector_num < end; ) {
|
||||
int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
|
||||
/* Just to make sure we are not exceeding int limit. */
|
||||
int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
|
||||
end - sector_num);
|
||||
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
|
||||
if (now - last_pause_ns > SLICE_TIME) {
|
||||
|
@ -462,8 +463,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
goto immediate_exit;
|
||||
}
|
||||
|
||||
ret = bdrv_is_allocated_above(bs, base,
|
||||
sector_num, next - sector_num, &n);
|
||||
ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
|
||||
|
||||
if (ret < 0) {
|
||||
goto immediate_exit;
|
||||
|
@ -472,10 +472,8 @@ static void coroutine_fn mirror_run(void *opaque)
|
|||
assert(n > 0);
|
||||
if (ret == 1) {
|
||||
bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
|
||||
sector_num = next;
|
||||
} else {
|
||||
sector_num += n;
|
||||
}
|
||||
sector_num += n;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* This model describes the interaction between aio_set_dispatching()
|
||||
* This model describes the interaction between ctx->notify_me
|
||||
* and aio_notify().
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
|
@ -14,57 +14,53 @@
|
|||
* spin -a docs/aio_notify.promela
|
||||
* gcc -O2 pan.c
|
||||
* ./a.out -a
|
||||
*
|
||||
* To verify it (with a bug planted in the model):
|
||||
* spin -a -DBUG docs/aio_notify.promela
|
||||
* gcc -O2 pan.c
|
||||
* ./a.out -a
|
||||
*/
|
||||
|
||||
#define MAX 4
|
||||
#define LAST (1 << (MAX - 1))
|
||||
#define FINAL ((LAST << 1) - 1)
|
||||
|
||||
bool dispatching;
|
||||
bool notify_me;
|
||||
bool event;
|
||||
|
||||
int req, done;
|
||||
int req;
|
||||
int done;
|
||||
|
||||
active proctype waiter()
|
||||
{
|
||||
int fetch, blocking;
|
||||
int fetch;
|
||||
|
||||
do
|
||||
:: done != FINAL -> {
|
||||
// Computing "blocking" is separate from execution of the
|
||||
// "bottom half"
|
||||
blocking = (req == 0);
|
||||
|
||||
// This is our "bottom half"
|
||||
atomic { fetch = req; req = 0; }
|
||||
done = done | fetch;
|
||||
:: true -> {
|
||||
notify_me++;
|
||||
|
||||
if
|
||||
#ifndef BUG
|
||||
:: (req > 0) -> skip;
|
||||
#endif
|
||||
:: else ->
|
||||
// Wait for a nudge from the other side
|
||||
do
|
||||
:: event == 1 -> { event = 0; break; }
|
||||
:: !blocking -> break;
|
||||
od;
|
||||
fi;
|
||||
|
||||
dispatching = 1;
|
||||
notify_me--;
|
||||
|
||||
// If you are simulating this model, you may want to add
|
||||
// something like this here:
|
||||
//
|
||||
// int foo; foo++; foo++; foo++;
|
||||
//
|
||||
// This only wastes some time and makes it more likely
|
||||
// that the notifier process hits the "fast path".
|
||||
|
||||
dispatching = 0;
|
||||
atomic { fetch = req; req = 0; }
|
||||
done = done | fetch;
|
||||
}
|
||||
:: else -> break;
|
||||
od
|
||||
}
|
||||
|
||||
active proctype notifier()
|
||||
{
|
||||
int next = 1;
|
||||
int sets = 0;
|
||||
|
||||
do
|
||||
:: next <= LAST -> {
|
||||
|
@ -74,8 +70,8 @@ active proctype notifier()
|
|||
|
||||
// aio_notify
|
||||
if
|
||||
:: dispatching == 0 -> sets++; event = 1;
|
||||
:: else -> skip;
|
||||
:: notify_me == 1 -> event = 1;
|
||||
:: else -> printf("Skipped event_notifier_set\n"); skip;
|
||||
fi;
|
||||
|
||||
// Test both synchronous and asynchronous delivery
|
||||
|
@ -86,19 +82,12 @@ active proctype notifier()
|
|||
:: 1 -> skip;
|
||||
fi;
|
||||
}
|
||||
:: else -> break;
|
||||
od;
|
||||
printf("Skipped %d event_notifier_set\n", MAX - sets);
|
||||
}
|
||||
|
||||
#define p (done == FINAL)
|
||||
|
||||
never {
|
||||
never { /* [] done < FINAL */
|
||||
accept_init:
|
||||
do
|
||||
:: 1 // after an arbitrarily long prefix
|
||||
:: p -> break // p becomes true
|
||||
:: done < FINAL -> skip;
|
||||
od;
|
||||
do
|
||||
:: !p -> accept: break // it then must remains true forever after
|
||||
od
|
||||
}
|
||||
|
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* This model describes the interaction between ctx->notified
|
||||
* and ctx->notifier.
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
*
|
||||
* This file is in the public domain. If you really want a license,
|
||||
* the WTFPL will do.
|
||||
*
|
||||
* To verify the buggy version:
|
||||
* spin -a -DBUG1 docs/aio_notify_bug.promela
|
||||
* gcc -O2 pan.c
|
||||
* ./a.out -a -f
|
||||
* (or -DBUG2)
|
||||
*
|
||||
* To verify the fixed version:
|
||||
* spin -a docs/aio_notify_bug.promela
|
||||
* gcc -O2 pan.c
|
||||
* ./a.out -a -f
|
||||
*
|
||||
* Add -DCHECK_REQ to test an alternative invariant and the
|
||||
* "notify_me" optimization.
|
||||
*/
|
||||
|
||||
int notify_me;
|
||||
bool notified;
|
||||
bool event;
|
||||
bool req;
|
||||
bool notifier_done;
|
||||
|
||||
#ifdef CHECK_REQ
|
||||
#define USE_NOTIFY_ME 1
|
||||
#else
|
||||
#define USE_NOTIFY_ME 0
|
||||
#endif
|
||||
|
||||
#ifdef BUG
|
||||
#error Please define BUG1 or BUG2 instead.
|
||||
#endif
|
||||
|
||||
active proctype notifier()
|
||||
{
|
||||
do
|
||||
:: true -> {
|
||||
req = 1;
|
||||
if
|
||||
:: !USE_NOTIFY_ME || notify_me ->
|
||||
#if defined BUG1
|
||||
/* CHECK_REQ does not detect this bug! */
|
||||
notified = 1;
|
||||
event = 1;
|
||||
#elif defined BUG2
|
||||
if
|
||||
:: !notified -> event = 1;
|
||||
:: else -> skip;
|
||||
fi;
|
||||
notified = 1;
|
||||
#else
|
||||
event = 1;
|
||||
notified = 1;
|
||||
#endif
|
||||
:: else -> skip;
|
||||
fi
|
||||
}
|
||||
:: true -> break;
|
||||
od;
|
||||
notifier_done = 1;
|
||||
}
|
||||
|
||||
#define AIO_POLL \
|
||||
notify_me++; \
|
||||
if \
|
||||
:: !req -> { \
|
||||
if \
|
||||
:: event -> skip; \
|
||||
fi; \
|
||||
} \
|
||||
:: else -> skip; \
|
||||
fi; \
|
||||
notify_me--; \
|
||||
\
|
||||
atomic { old = notified; notified = 0; } \
|
||||
if \
|
||||
:: old -> event = 0; \
|
||||
:: else -> skip; \
|
||||
fi; \
|
||||
\
|
||||
req = 0;
|
||||
|
||||
active proctype waiter()
|
||||
{
|
||||
bool old;
|
||||
|
||||
do
|
||||
:: true -> AIO_POLL;
|
||||
od;
|
||||
}
|
||||
|
||||
/* Same as waiter(), but disappears after a while. */
|
||||
active proctype temporary_waiter()
|
||||
{
|
||||
bool old;
|
||||
|
||||
do
|
||||
:: true -> AIO_POLL;
|
||||
:: true -> break;
|
||||
od;
|
||||
}
|
||||
|
||||
#ifdef CHECK_REQ
|
||||
never {
|
||||
do
|
||||
:: req -> goto accept_if_req_not_eventually_false;
|
||||
:: true -> skip;
|
||||
od;
|
||||
|
||||
accept_if_req_not_eventually_false:
|
||||
if
|
||||
:: req -> goto accept_if_req_not_eventually_false;
|
||||
fi;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
#else
|
||||
/* There must be infinitely many transitions of event as long
|
||||
* as the notifier does not exit.
|
||||
*
|
||||
* If event stayed always true, the waiters would be busy looping.
|
||||
* If event stayed always false, the waiters would be sleeping
|
||||
* forever.
|
||||
*/
|
||||
never {
|
||||
do
|
||||
:: !event -> goto accept_if_event_not_eventually_true;
|
||||
:: event -> goto accept_if_event_not_eventually_false;
|
||||
:: true -> skip;
|
||||
od;
|
||||
|
||||
accept_if_event_not_eventually_true:
|
||||
if
|
||||
:: !event && notifier_done -> do :: true -> skip; od;
|
||||
:: !event && !notifier_done -> goto accept_if_event_not_eventually_true;
|
||||
fi;
|
||||
assert(0);
|
||||
|
||||
accept_if_event_not_eventually_false:
|
||||
if
|
||||
:: event -> goto accept_if_event_not_eventually_false;
|
||||
fi;
|
||||
assert(0);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* This model describes a bug in aio_notify. If ctx->notifier is
|
||||
* cleared too late, a wakeup could be lost.
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
*
|
||||
* This file is in the public domain. If you really want a license,
|
||||
* the WTFPL will do.
|
||||
*
|
||||
* To verify the buggy version:
|
||||
* spin -a -DBUG docs/aio_notify_bug.promela
|
||||
* gcc -O2 pan.c
|
||||
* ./a.out -a -f
|
||||
*
|
||||
* To verify the fixed version:
|
||||
* spin -a docs/aio_notify_bug.promela
|
||||
* gcc -O2 pan.c
|
||||
* ./a.out -a -f
|
||||
*
|
||||
* Add -DCHECK_REQ to test an alternative invariant and the
|
||||
* "notify_me" optimization.
|
||||
*/
|
||||
|
||||
int notify_me;
|
||||
bool event;
|
||||
bool req;
|
||||
bool notifier_done;
|
||||
|
||||
#ifdef CHECK_REQ
|
||||
#define USE_NOTIFY_ME 1
|
||||
#else
|
||||
#define USE_NOTIFY_ME 0
|
||||
#endif
|
||||
|
||||
active proctype notifier()
|
||||
{
|
||||
do
|
||||
:: true -> {
|
||||
req = 1;
|
||||
if
|
||||
:: !USE_NOTIFY_ME || notify_me -> event = 1;
|
||||
:: else -> skip;
|
||||
fi
|
||||
}
|
||||
:: true -> break;
|
||||
od;
|
||||
notifier_done = 1;
|
||||
}
|
||||
|
||||
#ifdef BUG
|
||||
#define AIO_POLL \
|
||||
notify_me++; \
|
||||
if \
|
||||
:: !req -> { \
|
||||
if \
|
||||
:: event -> skip; \
|
||||
fi; \
|
||||
} \
|
||||
:: else -> skip; \
|
||||
fi; \
|
||||
notify_me--; \
|
||||
\
|
||||
req = 0; \
|
||||
event = 0;
|
||||
#else
|
||||
#define AIO_POLL \
|
||||
notify_me++; \
|
||||
if \
|
||||
:: !req -> { \
|
||||
if \
|
||||
:: event -> skip; \
|
||||
fi; \
|
||||
} \
|
||||
:: else -> skip; \
|
||||
fi; \
|
||||
notify_me--; \
|
||||
\
|
||||
event = 0; \
|
||||
req = 0;
|
||||
#endif
|
||||
|
||||
active proctype waiter()
|
||||
{
|
||||
do
|
||||
:: true -> AIO_POLL;
|
||||
od;
|
||||
}
|
||||
|
||||
/* Same as waiter(), but disappears after a while. */
|
||||
active proctype temporary_waiter()
|
||||
{
|
||||
do
|
||||
:: true -> AIO_POLL;
|
||||
:: true -> break;
|
||||
od;
|
||||
}
|
||||
|
||||
#ifdef CHECK_REQ
|
||||
never {
|
||||
do
|
||||
:: req -> goto accept_if_req_not_eventually_false;
|
||||
:: true -> skip;
|
||||
od;
|
||||
|
||||
accept_if_req_not_eventually_false:
|
||||
if
|
||||
:: req -> goto accept_if_req_not_eventually_false;
|
||||
fi;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
#else
|
||||
/* There must be infinitely many transitions of event as long
|
||||
* as the notifier does not exit.
|
||||
*
|
||||
* If event stayed always true, the waiters would be busy looping.
|
||||
* If event stayed always false, the waiters would be sleeping
|
||||
* forever.
|
||||
*/
|
||||
never {
|
||||
do
|
||||
:: !event -> goto accept_if_event_not_eventually_true;
|
||||
:: event -> goto accept_if_event_not_eventually_false;
|
||||
:: true -> skip;
|
||||
od;
|
||||
|
||||
accept_if_event_not_eventually_true:
|
||||
if
|
||||
:: !event && notifier_done -> do :: true -> skip; od;
|
||||
:: !event && !notifier_done -> goto accept_if_event_not_eventually_true;
|
||||
fi;
|
||||
assert(0);
|
||||
|
||||
accept_if_event_not_eventually_false:
|
||||
if
|
||||
:: event -> goto accept_if_event_not_eventually_false;
|
||||
fi;
|
||||
assert(0);
|
||||
}
|
||||
#endif
|
|
@ -63,10 +63,30 @@ struct AioContext {
|
|||
*/
|
||||
int walking_handlers;
|
||||
|
||||
/* Used to avoid unnecessary event_notifier_set calls in aio_notify.
|
||||
* Writes protected by lock or BQL, reads are lockless.
|
||||
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
|
||||
* accessed with atomic primitives. If this field is 0, everything
|
||||
* (file descriptors, bottom halves, timers) will be re-evaluated
|
||||
* before the next blocking poll(), thus the event_notifier_set call
|
||||
* can be skipped. If it is non-zero, you may need to wake up a
|
||||
* concurrent aio_poll or the glib main event loop, making
|
||||
* event_notifier_set necessary.
|
||||
*
|
||||
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
|
||||
* between a call to aio_ctx_check and the next call to aio_ctx_dispatch.
|
||||
* Bits 1-31 simply count the number of active calls to aio_poll
|
||||
* that are in the prepare or poll phase.
|
||||
*
|
||||
* The GSource and aio_poll must use a different mechanism because
|
||||
* there is no certainty that a call to GSource's prepare callback
|
||||
* (via g_main_context_prepare) is indeed followed by check and
|
||||
* dispatch. It's not clear whether this would be a bug, but let's
|
||||
* play safe and allow it---it will just cause extra calls to
|
||||
* event_notifier_set until the next call to dispatch.
|
||||
*
|
||||
* Instead, the aio_poll calls include both the prepare and the
|
||||
* dispatch phase, hence a simple counter is enough for them.
|
||||
*/
|
||||
bool dispatching;
|
||||
uint32_t notify_me;
|
||||
|
||||
/* lock to protect between bh's adders and deleter */
|
||||
QemuMutex bh_lock;
|
||||
|
@ -79,7 +99,19 @@ struct AioContext {
|
|||
*/
|
||||
int walking_bh;
|
||||
|
||||
/* Used for aio_notify. */
|
||||
/* Used by aio_notify.
|
||||
*
|
||||
* "notified" is used to avoid expensive event_notifier_test_and_clear
|
||||
* calls. When it is clear, the EventNotifier is clear, or one thread
|
||||
* is going to clear "notified" before processing more events. False
|
||||
* positives are possible, i.e. "notified" could be set even though the
|
||||
* EventNotifier is clear.
|
||||
*
|
||||
* Note that event_notifier_set *cannot* be optimized the same way. For
|
||||
* more information on the problem that would result, see "#ifdef BUG2"
|
||||
* in the docs/aio_notify_accept.promela formal model.
|
||||
*/
|
||||
bool notified;
|
||||
EventNotifier notifier;
|
||||
|
||||
/* Thread pool for performing work and receiving completion callbacks */
|
||||
|
@ -89,9 +121,6 @@ struct AioContext {
|
|||
QEMUTimerListGroup tlg;
|
||||
};
|
||||
|
||||
/* Used internally to synchronize aio_poll against qemu_bh_schedule. */
|
||||
void aio_set_dispatching(AioContext *ctx, bool dispatching);
|
||||
|
||||
/**
|
||||
* aio_context_new: Allocate a new AioContext.
|
||||
*
|
||||
|
@ -156,6 +185,24 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
|||
*/
|
||||
void aio_notify(AioContext *ctx);
|
||||
|
||||
/**
|
||||
* aio_notify_accept: Acknowledge receiving an aio_notify.
|
||||
*
|
||||
* aio_notify() uses an EventNotifier in order to wake up a sleeping
|
||||
* aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
|
||||
* usually rare, but the AioContext has to clear the EventNotifier on
|
||||
* every aio_poll() or g_main_context_iteration() in order to avoid
|
||||
* busy waiting. This event_notifier_test_and_clear() cannot be done
|
||||
* using the usual aio_context_set_event_notifier(), because it must
|
||||
* be done before processing all events (file descriptors, bottom halves,
|
||||
* timers).
|
||||
*
|
||||
* aio_notify_accept() is an optimized event_notifier_test_and_clear()
|
||||
* that is specific to an AioContext's notifier; it is used internally
|
||||
* to clear the EventNotifier only if aio_notify() had been called.
|
||||
*/
|
||||
void aio_notify_accept(AioContext *ctx);
|
||||
|
||||
/**
|
||||
* aio_bh_poll: Poll bottom halves for an AioContext.
|
||||
*
|
||||
|
|
|
@ -99,7 +99,7 @@ QEMUTimerList *timerlist_new(QEMUClockType type,
|
|||
QEMUClock *clock = qemu_clock_ptr(type);
|
||||
|
||||
timer_list = g_malloc0(sizeof(QEMUTimerList));
|
||||
qemu_event_init(&timer_list->timers_done_ev, false);
|
||||
qemu_event_init(&timer_list->timers_done_ev, true);
|
||||
timer_list->clock = clock;
|
||||
timer_list->notify_cb = cb;
|
||||
timer_list->notify_opaque = opaque;
|
||||
|
|
|
@ -97,14 +97,6 @@ static void event_ready_cb(EventNotifier *e)
|
|||
|
||||
/* Tests using aio_*. */
|
||||
|
||||
static void test_notify(void)
|
||||
{
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
aio_notify(ctx);
|
||||
g_assert(!aio_poll(ctx, true));
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
QemuMutex start_lock;
|
||||
bool thread_acquired;
|
||||
|
@ -331,7 +323,7 @@ static void test_wait_event_notifier(void)
|
|||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||
event_notifier_init(&data.e, false);
|
||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
while (aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 0);
|
||||
g_assert_cmpint(data.active, ==, 1);
|
||||
|
||||
|
@ -356,7 +348,7 @@ static void test_flush_event_notifier(void)
|
|||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||
event_notifier_init(&data.e, false);
|
||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||
g_assert(!aio_poll(ctx, false));
|
||||
while (aio_poll(ctx, false));
|
||||
g_assert_cmpint(data.n, ==, 0);
|
||||
g_assert_cmpint(data.active, ==, 10);
|
||||
|
||||
|
@ -494,14 +486,6 @@ static void test_timer_schedule(void)
|
|||
* works well, and that's what I am using.
|
||||
*/
|
||||
|
||||
static void test_source_notify(void)
|
||||
{
|
||||
while (g_main_context_iteration(NULL, false));
|
||||
aio_notify(ctx);
|
||||
g_assert(g_main_context_iteration(NULL, true));
|
||||
g_assert(!g_main_context_iteration(NULL, false));
|
||||
}
|
||||
|
||||
static void test_source_flush(void)
|
||||
{
|
||||
g_assert(!g_main_context_iteration(NULL, false));
|
||||
|
@ -669,7 +653,7 @@ static void test_source_wait_event_notifier(void)
|
|||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||
event_notifier_init(&data.e, false);
|
||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||
g_assert(g_main_context_iteration(NULL, false));
|
||||
while (g_main_context_iteration(NULL, false));
|
||||
g_assert_cmpint(data.n, ==, 0);
|
||||
g_assert_cmpint(data.active, ==, 1);
|
||||
|
||||
|
@ -694,7 +678,7 @@ static void test_source_flush_event_notifier(void)
|
|||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||
event_notifier_init(&data.e, false);
|
||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||
g_assert(g_main_context_iteration(NULL, false));
|
||||
while (g_main_context_iteration(NULL, false));
|
||||
g_assert_cmpint(data.n, ==, 0);
|
||||
g_assert_cmpint(data.active, ==, 10);
|
||||
|
||||
|
@ -830,7 +814,6 @@ int main(int argc, char **argv)
|
|||
while (g_main_context_iteration(NULL, false));
|
||||
|
||||
g_test_init(&argc, &argv, NULL);
|
||||
g_test_add_func("/aio/notify", test_notify);
|
||||
g_test_add_func("/aio/acquire", test_acquire);
|
||||
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
||||
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
||||
|
@ -845,7 +828,6 @@ int main(int argc, char **argv)
|
|||
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
||||
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
||||
|
||||
g_test_add_func("/aio-gsource/notify", test_source_notify);
|
||||
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
||||
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
|
||||
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
|
||||
|
|
Loading…
Reference in New Issue