mirror of https://github.com/xemu-project/xemu.git
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1 iQEcBAABAgAGBQJVr4HnAAoJEJykq7OBq3PIuwMIALf9rj8sVhr0b2isbS27t+TP Byp5EsLVXIrfkIwsTw1OkdYrBJ1daLYljR7/3CwsPyTmV05ULdKzI32NzHgG5usL 6SiHBB1TiPcs2t4IFD/rKx+ZB7UYWKOVRuUNtAy3khzcjZk05IR/R58d0Uibscbe +iA1umxlx4M0egeKiyDhJXCpmwUphNKGVNCv+WC8Ay6KJTBxn86Sh+/rCoWRl15f snZsL4Va4DLHvQUzsxZwt9GsSBSZ7bmH05QMWpaTYJAqSy+s+FHRe8eeI9O+rJi8 2Qd7T01T+pl0DAN+A8yAxkcj9wwxOHzjlA1LGq9meVOGdoFFpbxAkORTQhZvaDk= =dlU8 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging # gpg: Signature made Wed Jul 22 12:43:35 2015 BST using RSA key ID 81AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" * remotes/stefanha/tags/block-pull-request: AioContext: optimize clearing the EventNotifier AioContext: fix broken placement of event_notifier_test_and_clear AioContext: fix broken ctx->dispatching optimization aio-win32: reorganize polling loop tests: remove irrelevant assertions from test-aio qemu-timer: initialize "timers_done_ev" to set mirror: Speed up bitmap initial scanning Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
dc94bd9166
20
aio-posix.c
20
aio-posix.c
|
@ -233,26 +233,23 @@ static void add_pollfd(AioHandler *node)
|
||||||
bool aio_poll(AioContext *ctx, bool blocking)
|
bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
{
|
{
|
||||||
AioHandler *node;
|
AioHandler *node;
|
||||||
bool was_dispatching;
|
|
||||||
int i, ret;
|
int i, ret;
|
||||||
bool progress;
|
bool progress;
|
||||||
int64_t timeout;
|
int64_t timeout;
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
was_dispatching = ctx->dispatching;
|
|
||||||
progress = false;
|
progress = false;
|
||||||
|
|
||||||
/* aio_notify can avoid the expensive event_notifier_set if
|
/* aio_notify can avoid the expensive event_notifier_set if
|
||||||
* everything (file descriptors, bottom halves, timers) will
|
* everything (file descriptors, bottom halves, timers) will
|
||||||
* be re-evaluated before the next blocking poll(). This is
|
* be re-evaluated before the next blocking poll(). This is
|
||||||
* already true when aio_poll is called with blocking == false;
|
* already true when aio_poll is called with blocking == false;
|
||||||
* if blocking == true, it is only true after poll() returns.
|
* if blocking == true, it is only true after poll() returns,
|
||||||
*
|
* so disable the optimization now.
|
||||||
* If we're in a nested event loop, ctx->dispatching might be true.
|
|
||||||
* In that case we can restore it just before returning, but we
|
|
||||||
* have to clear it now.
|
|
||||||
*/
|
*/
|
||||||
aio_set_dispatching(ctx, !blocking);
|
if (blocking) {
|
||||||
|
atomic_add(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
|
|
||||||
ctx->walking_handlers++;
|
ctx->walking_handlers++;
|
||||||
|
|
||||||
|
@ -272,10 +269,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
}
|
}
|
||||||
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
|
ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
|
||||||
|
if (blocking) {
|
||||||
|
atomic_sub(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aio_notify_accept(ctx);
|
||||||
|
|
||||||
/* if we have any readable fds, dispatch event */
|
/* if we have any readable fds, dispatch event */
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
for (i = 0; i < npfd; i++) {
|
for (i = 0; i < npfd; i++) {
|
||||||
|
@ -287,12 +289,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
ctx->walking_handlers--;
|
ctx->walking_handlers--;
|
||||||
|
|
||||||
/* Run dispatch even if there were no readable fds to run timers */
|
/* Run dispatch even if there were no readable fds to run timers */
|
||||||
aio_set_dispatching(ctx, true);
|
|
||||||
if (aio_dispatch(ctx)) {
|
if (aio_dispatch(ctx)) {
|
||||||
progress = true;
|
progress = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
aio_set_dispatching(ctx, was_dispatching);
|
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
|
|
||||||
return progress;
|
return progress;
|
||||||
|
|
48
aio-win32.c
48
aio-win32.c
|
@ -279,30 +279,25 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
{
|
{
|
||||||
AioHandler *node;
|
AioHandler *node;
|
||||||
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
|
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
|
||||||
bool was_dispatching, progress, have_select_revents, first;
|
bool progress, have_select_revents, first;
|
||||||
int count;
|
int count;
|
||||||
int timeout;
|
int timeout;
|
||||||
|
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
have_select_revents = aio_prepare(ctx);
|
|
||||||
if (have_select_revents) {
|
|
||||||
blocking = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
was_dispatching = ctx->dispatching;
|
|
||||||
progress = false;
|
progress = false;
|
||||||
|
|
||||||
/* aio_notify can avoid the expensive event_notifier_set if
|
/* aio_notify can avoid the expensive event_notifier_set if
|
||||||
* everything (file descriptors, bottom halves, timers) will
|
* everything (file descriptors, bottom halves, timers) will
|
||||||
* be re-evaluated before the next blocking poll(). This is
|
* be re-evaluated before the next blocking poll(). This is
|
||||||
* already true when aio_poll is called with blocking == false;
|
* already true when aio_poll is called with blocking == false;
|
||||||
* if blocking == true, it is only true after poll() returns.
|
* if blocking == true, it is only true after poll() returns,
|
||||||
*
|
* so disable the optimization now.
|
||||||
* If we're in a nested event loop, ctx->dispatching might be true.
|
|
||||||
* In that case we can restore it just before returning, but we
|
|
||||||
* have to clear it now.
|
|
||||||
*/
|
*/
|
||||||
aio_set_dispatching(ctx, !blocking);
|
if (blocking) {
|
||||||
|
atomic_add(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
have_select_revents = aio_prepare(ctx);
|
||||||
|
|
||||||
ctx->walking_handlers++;
|
ctx->walking_handlers++;
|
||||||
|
|
||||||
|
@ -317,26 +312,36 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
ctx->walking_handlers--;
|
ctx->walking_handlers--;
|
||||||
first = true;
|
first = true;
|
||||||
|
|
||||||
/* wait until next event */
|
/* ctx->notifier is always registered. */
|
||||||
while (count > 0) {
|
assert(count > 0);
|
||||||
|
|
||||||
|
/* Multiple iterations, all of them non-blocking except the first,
|
||||||
|
* may be necessary to process all pending events. After the first
|
||||||
|
* WaitForMultipleObjects call ctx->notify_me will be decremented.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
HANDLE event;
|
HANDLE event;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
timeout = blocking
|
timeout = blocking && !have_select_revents
|
||||||
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
|
? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
}
|
}
|
||||||
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
|
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
|
||||||
|
if (blocking) {
|
||||||
|
assert(first);
|
||||||
|
atomic_sub(&ctx->notify_me, 2);
|
||||||
|
}
|
||||||
if (timeout) {
|
if (timeout) {
|
||||||
aio_context_acquire(ctx);
|
aio_context_acquire(ctx);
|
||||||
}
|
}
|
||||||
aio_set_dispatching(ctx, true);
|
|
||||||
|
|
||||||
if (first && aio_bh_poll(ctx)) {
|
if (first) {
|
||||||
progress = true;
|
aio_notify_accept(ctx);
|
||||||
}
|
progress |= aio_bh_poll(ctx);
|
||||||
first = false;
|
first = false;
|
||||||
|
}
|
||||||
|
|
||||||
/* if we have any signaled events, dispatch event */
|
/* if we have any signaled events, dispatch event */
|
||||||
event = NULL;
|
event = NULL;
|
||||||
|
@ -351,11 +356,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
blocking = false;
|
blocking = false;
|
||||||
|
|
||||||
progress |= aio_dispatch_handlers(ctx, event);
|
progress |= aio_dispatch_handlers(ctx, event);
|
||||||
}
|
} while (count > 0);
|
||||||
|
|
||||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||||
|
|
||||||
aio_set_dispatching(ctx, was_dispatching);
|
|
||||||
aio_context_release(ctx);
|
aio_context_release(ctx);
|
||||||
return progress;
|
return progress;
|
||||||
}
|
}
|
||||||
|
|
31
async.c
31
async.c
|
@ -184,6 +184,8 @@ aio_ctx_prepare(GSource *source, gint *timeout)
|
||||||
{
|
{
|
||||||
AioContext *ctx = (AioContext *) source;
|
AioContext *ctx = (AioContext *) source;
|
||||||
|
|
||||||
|
atomic_or(&ctx->notify_me, 1);
|
||||||
|
|
||||||
/* We assume there is no timeout already supplied */
|
/* We assume there is no timeout already supplied */
|
||||||
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
|
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
|
||||||
|
|
||||||
|
@ -200,6 +202,9 @@ aio_ctx_check(GSource *source)
|
||||||
AioContext *ctx = (AioContext *) source;
|
AioContext *ctx = (AioContext *) source;
|
||||||
QEMUBH *bh;
|
QEMUBH *bh;
|
||||||
|
|
||||||
|
atomic_and(&ctx->notify_me, ~1);
|
||||||
|
aio_notify_accept(ctx);
|
||||||
|
|
||||||
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
for (bh = ctx->first_bh; bh; bh = bh->next) {
|
||||||
if (!bh->deleted && bh->scheduled) {
|
if (!bh->deleted && bh->scheduled) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -254,24 +259,22 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx)
|
||||||
return ctx->thread_pool;
|
return ctx->thread_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
void aio_set_dispatching(AioContext *ctx, bool dispatching)
|
void aio_notify(AioContext *ctx)
|
||||||
{
|
{
|
||||||
ctx->dispatching = dispatching;
|
/* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
|
||||||
if (!dispatching) {
|
* with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
|
||||||
/* Write ctx->dispatching before reading e.g. bh->scheduled.
|
|
||||||
* Optimization: this is only needed when we're entering the "unsafe"
|
|
||||||
* phase where other threads must call event_notifier_set.
|
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
if (ctx->notify_me) {
|
||||||
|
event_notifier_set(&ctx->notifier);
|
||||||
|
atomic_mb_set(&ctx->notified, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void aio_notify(AioContext *ctx)
|
void aio_notify_accept(AioContext *ctx)
|
||||||
{
|
{
|
||||||
/* Write e.g. bh->scheduled before reading ctx->dispatching. */
|
if (atomic_xchg(&ctx->notified, false)) {
|
||||||
smp_mb();
|
event_notifier_test_and_clear(&ctx->notifier);
|
||||||
if (!ctx->dispatching) {
|
|
||||||
event_notifier_set(&ctx->notifier);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,6 +289,10 @@ static void aio_rfifolock_cb(void *opaque)
|
||||||
aio_notify(opaque);
|
aio_notify(opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void event_notifier_dummy_cb(EventNotifier *e)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
AioContext *aio_context_new(Error **errp)
|
AioContext *aio_context_new(Error **errp)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -300,7 +307,7 @@ AioContext *aio_context_new(Error **errp)
|
||||||
g_source_set_can_recurse(&ctx->source, true);
|
g_source_set_can_recurse(&ctx->source, true);
|
||||||
aio_set_event_notifier(ctx, &ctx->notifier,
|
aio_set_event_notifier(ctx, &ctx->notifier,
|
||||||
(EventNotifierHandler *)
|
(EventNotifierHandler *)
|
||||||
event_notifier_test_and_clear);
|
event_notifier_dummy_cb);
|
||||||
ctx->thread_pool = NULL;
|
ctx->thread_pool = NULL;
|
||||||
qemu_mutex_init(&ctx->bh_lock);
|
qemu_mutex_init(&ctx->bh_lock);
|
||||||
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
|
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
|
||||||
|
|
|
@ -388,7 +388,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||||
MirrorBlockJob *s = opaque;
|
MirrorBlockJob *s = opaque;
|
||||||
MirrorExitData *data;
|
MirrorExitData *data;
|
||||||
BlockDriverState *bs = s->common.bs;
|
BlockDriverState *bs = s->common.bs;
|
||||||
int64_t sector_num, end, sectors_per_chunk, length;
|
int64_t sector_num, end, length;
|
||||||
uint64_t last_pause_ns;
|
uint64_t last_pause_ns;
|
||||||
BlockDriverInfo bdi;
|
BlockDriverInfo bdi;
|
||||||
char backing_filename[2]; /* we only need 2 characters because we are only
|
char backing_filename[2]; /* we only need 2 characters because we are only
|
||||||
|
@ -442,7 +442,6 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||||
goto immediate_exit;
|
goto immediate_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
|
|
||||||
mirror_free_init(s);
|
mirror_free_init(s);
|
||||||
|
|
||||||
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||||
|
@ -450,7 +449,9 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||||
/* First part, loop on the sectors and initialize the dirty bitmap. */
|
/* First part, loop on the sectors and initialize the dirty bitmap. */
|
||||||
BlockDriverState *base = s->base;
|
BlockDriverState *base = s->base;
|
||||||
for (sector_num = 0; sector_num < end; ) {
|
for (sector_num = 0; sector_num < end; ) {
|
||||||
int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
|
/* Just to make sure we are not exceeding int limit. */
|
||||||
|
int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
|
||||||
|
end - sector_num);
|
||||||
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||||
|
|
||||||
if (now - last_pause_ns > SLICE_TIME) {
|
if (now - last_pause_ns > SLICE_TIME) {
|
||||||
|
@ -462,8 +463,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||||
goto immediate_exit;
|
goto immediate_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_is_allocated_above(bs, base,
|
ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
|
||||||
sector_num, next - sector_num, &n);
|
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto immediate_exit;
|
goto immediate_exit;
|
||||||
|
@ -472,10 +472,8 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||||
assert(n > 0);
|
assert(n > 0);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
|
bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
|
||||||
sector_num = next;
|
|
||||||
} else {
|
|
||||||
sector_num += n;
|
|
||||||
}
|
}
|
||||||
|
sector_num += n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* This model describes the interaction between aio_set_dispatching()
|
* This model describes the interaction between ctx->notify_me
|
||||||
* and aio_notify().
|
* and aio_notify().
|
||||||
*
|
*
|
||||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
@ -14,57 +14,53 @@
|
||||||
* spin -a docs/aio_notify.promela
|
* spin -a docs/aio_notify.promela
|
||||||
* gcc -O2 pan.c
|
* gcc -O2 pan.c
|
||||||
* ./a.out -a
|
* ./a.out -a
|
||||||
|
*
|
||||||
|
* To verify it (with a bug planted in the model):
|
||||||
|
* spin -a -DBUG docs/aio_notify.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define MAX 4
|
#define MAX 4
|
||||||
#define LAST (1 << (MAX - 1))
|
#define LAST (1 << (MAX - 1))
|
||||||
#define FINAL ((LAST << 1) - 1)
|
#define FINAL ((LAST << 1) - 1)
|
||||||
|
|
||||||
bool dispatching;
|
bool notify_me;
|
||||||
bool event;
|
bool event;
|
||||||
|
|
||||||
int req, done;
|
int req;
|
||||||
|
int done;
|
||||||
|
|
||||||
active proctype waiter()
|
active proctype waiter()
|
||||||
{
|
{
|
||||||
int fetch, blocking;
|
int fetch;
|
||||||
|
|
||||||
do
|
do
|
||||||
:: done != FINAL -> {
|
:: true -> {
|
||||||
// Computing "blocking" is separate from execution of the
|
notify_me++;
|
||||||
// "bottom half"
|
|
||||||
blocking = (req == 0);
|
|
||||||
|
|
||||||
// This is our "bottom half"
|
|
||||||
atomic { fetch = req; req = 0; }
|
|
||||||
done = done | fetch;
|
|
||||||
|
|
||||||
|
if
|
||||||
|
#ifndef BUG
|
||||||
|
:: (req > 0) -> skip;
|
||||||
|
#endif
|
||||||
|
:: else ->
|
||||||
// Wait for a nudge from the other side
|
// Wait for a nudge from the other side
|
||||||
do
|
do
|
||||||
:: event == 1 -> { event = 0; break; }
|
:: event == 1 -> { event = 0; break; }
|
||||||
:: !blocking -> break;
|
|
||||||
od;
|
od;
|
||||||
|
fi;
|
||||||
|
|
||||||
dispatching = 1;
|
notify_me--;
|
||||||
|
|
||||||
// If you are simulating this model, you may want to add
|
atomic { fetch = req; req = 0; }
|
||||||
// something like this here:
|
done = done | fetch;
|
||||||
//
|
|
||||||
// int foo; foo++; foo++; foo++;
|
|
||||||
//
|
|
||||||
// This only wastes some time and makes it more likely
|
|
||||||
// that the notifier process hits the "fast path".
|
|
||||||
|
|
||||||
dispatching = 0;
|
|
||||||
}
|
}
|
||||||
:: else -> break;
|
|
||||||
od
|
od
|
||||||
}
|
}
|
||||||
|
|
||||||
active proctype notifier()
|
active proctype notifier()
|
||||||
{
|
{
|
||||||
int next = 1;
|
int next = 1;
|
||||||
int sets = 0;
|
|
||||||
|
|
||||||
do
|
do
|
||||||
:: next <= LAST -> {
|
:: next <= LAST -> {
|
||||||
|
@ -74,8 +70,8 @@ active proctype notifier()
|
||||||
|
|
||||||
// aio_notify
|
// aio_notify
|
||||||
if
|
if
|
||||||
:: dispatching == 0 -> sets++; event = 1;
|
:: notify_me == 1 -> event = 1;
|
||||||
:: else -> skip;
|
:: else -> printf("Skipped event_notifier_set\n"); skip;
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
// Test both synchronous and asynchronous delivery
|
// Test both synchronous and asynchronous delivery
|
||||||
|
@ -86,19 +82,12 @@ active proctype notifier()
|
||||||
:: 1 -> skip;
|
:: 1 -> skip;
|
||||||
fi;
|
fi;
|
||||||
}
|
}
|
||||||
:: else -> break;
|
|
||||||
od;
|
od;
|
||||||
printf("Skipped %d event_notifier_set\n", MAX - sets);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define p (done == FINAL)
|
never { /* [] done < FINAL */
|
||||||
|
accept_init:
|
||||||
never {
|
|
||||||
do
|
do
|
||||||
:: 1 // after an arbitrarily long prefix
|
:: done < FINAL -> skip;
|
||||||
:: p -> break // p becomes true
|
|
||||||
od;
|
od;
|
||||||
do
|
|
||||||
:: !p -> accept: break // it then must remains true forever after
|
|
||||||
od
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
/*
|
||||||
|
* This model describes the interaction between ctx->notified
|
||||||
|
* and ctx->notifier.
|
||||||
|
*
|
||||||
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
*
|
||||||
|
* This file is in the public domain. If you really want a license,
|
||||||
|
* the WTFPL will do.
|
||||||
|
*
|
||||||
|
* To verify the buggy version:
|
||||||
|
* spin -a -DBUG1 docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
* (or -DBUG2)
|
||||||
|
*
|
||||||
|
* To verify the fixed version:
|
||||||
|
* spin -a docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
*
|
||||||
|
* Add -DCHECK_REQ to test an alternative invariant and the
|
||||||
|
* "notify_me" optimization.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int notify_me;
|
||||||
|
bool notified;
|
||||||
|
bool event;
|
||||||
|
bool req;
|
||||||
|
bool notifier_done;
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
#define USE_NOTIFY_ME 1
|
||||||
|
#else
|
||||||
|
#define USE_NOTIFY_ME 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef BUG
|
||||||
|
#error Please define BUG1 or BUG2 instead.
|
||||||
|
#endif
|
||||||
|
|
||||||
|
active proctype notifier()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> {
|
||||||
|
req = 1;
|
||||||
|
if
|
||||||
|
:: !USE_NOTIFY_ME || notify_me ->
|
||||||
|
#if defined BUG1
|
||||||
|
/* CHECK_REQ does not detect this bug! */
|
||||||
|
notified = 1;
|
||||||
|
event = 1;
|
||||||
|
#elif defined BUG2
|
||||||
|
if
|
||||||
|
:: !notified -> event = 1;
|
||||||
|
:: else -> skip;
|
||||||
|
fi;
|
||||||
|
notified = 1;
|
||||||
|
#else
|
||||||
|
event = 1;
|
||||||
|
notified = 1;
|
||||||
|
#endif
|
||||||
|
:: else -> skip;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
notifier_done = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define AIO_POLL \
|
||||||
|
notify_me++; \
|
||||||
|
if \
|
||||||
|
:: !req -> { \
|
||||||
|
if \
|
||||||
|
:: event -> skip; \
|
||||||
|
fi; \
|
||||||
|
} \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
notify_me--; \
|
||||||
|
\
|
||||||
|
atomic { old = notified; notified = 0; } \
|
||||||
|
if \
|
||||||
|
:: old -> event = 0; \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
\
|
||||||
|
req = 0;
|
||||||
|
|
||||||
|
active proctype waiter()
|
||||||
|
{
|
||||||
|
bool old;
|
||||||
|
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Same as waiter(), but disappears after a while. */
|
||||||
|
active proctype temporary_waiter()
|
||||||
|
{
|
||||||
|
bool old;
|
||||||
|
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_req_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
/* There must be infinitely many transitions of event as long
|
||||||
|
* as the notifier does not exit.
|
||||||
|
*
|
||||||
|
* If event stayed always true, the waiters would be busy looping.
|
||||||
|
* If event stayed always false, the waiters would be sleeping
|
||||||
|
* forever.
|
||||||
|
*/
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: !event -> goto accept_if_event_not_eventually_true;
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_true:
|
||||||
|
if
|
||||||
|
:: !event && notifier_done -> do :: true -> skip; od;
|
||||||
|
:: !event && !notifier_done -> goto accept_if_event_not_eventually_true;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
#endif
|
|
@ -0,0 +1,140 @@
|
||||||
|
/*
|
||||||
|
* This model describes a bug in aio_notify. If ctx->notifier is
|
||||||
|
* cleared too late, a wakeup could be lost.
|
||||||
|
*
|
||||||
|
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
*
|
||||||
|
* This file is in the public domain. If you really want a license,
|
||||||
|
* the WTFPL will do.
|
||||||
|
*
|
||||||
|
* To verify the buggy version:
|
||||||
|
* spin -a -DBUG docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
*
|
||||||
|
* To verify the fixed version:
|
||||||
|
* spin -a docs/aio_notify_bug.promela
|
||||||
|
* gcc -O2 pan.c
|
||||||
|
* ./a.out -a -f
|
||||||
|
*
|
||||||
|
* Add -DCHECK_REQ to test an alternative invariant and the
|
||||||
|
* "notify_me" optimization.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int notify_me;
|
||||||
|
bool event;
|
||||||
|
bool req;
|
||||||
|
bool notifier_done;
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
#define USE_NOTIFY_ME 1
|
||||||
|
#else
|
||||||
|
#define USE_NOTIFY_ME 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
active proctype notifier()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> {
|
||||||
|
req = 1;
|
||||||
|
if
|
||||||
|
:: !USE_NOTIFY_ME || notify_me -> event = 1;
|
||||||
|
:: else -> skip;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
notifier_done = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef BUG
|
||||||
|
#define AIO_POLL \
|
||||||
|
notify_me++; \
|
||||||
|
if \
|
||||||
|
:: !req -> { \
|
||||||
|
if \
|
||||||
|
:: event -> skip; \
|
||||||
|
fi; \
|
||||||
|
} \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
notify_me--; \
|
||||||
|
\
|
||||||
|
req = 0; \
|
||||||
|
event = 0;
|
||||||
|
#else
|
||||||
|
#define AIO_POLL \
|
||||||
|
notify_me++; \
|
||||||
|
if \
|
||||||
|
:: !req -> { \
|
||||||
|
if \
|
||||||
|
:: event -> skip; \
|
||||||
|
fi; \
|
||||||
|
} \
|
||||||
|
:: else -> skip; \
|
||||||
|
fi; \
|
||||||
|
notify_me--; \
|
||||||
|
\
|
||||||
|
event = 0; \
|
||||||
|
req = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
active proctype waiter()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Same as waiter(), but disappears after a while. */
|
||||||
|
active proctype temporary_waiter()
|
||||||
|
{
|
||||||
|
do
|
||||||
|
:: true -> AIO_POLL;
|
||||||
|
:: true -> break;
|
||||||
|
od;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CHECK_REQ
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_req_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: req -> goto accept_if_req_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
/* There must be infinitely many transitions of event as long
|
||||||
|
* as the notifier does not exit.
|
||||||
|
*
|
||||||
|
* If event stayed always true, the waiters would be busy looping.
|
||||||
|
* If event stayed always false, the waiters would be sleeping
|
||||||
|
* forever.
|
||||||
|
*/
|
||||||
|
never {
|
||||||
|
do
|
||||||
|
:: !event -> goto accept_if_event_not_eventually_true;
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
:: true -> skip;
|
||||||
|
od;
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_true:
|
||||||
|
if
|
||||||
|
:: !event && notifier_done -> do :: true -> skip; od;
|
||||||
|
:: !event && !notifier_done -> goto accept_if_event_not_eventually_true;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
|
||||||
|
accept_if_event_not_eventually_false:
|
||||||
|
if
|
||||||
|
:: event -> goto accept_if_event_not_eventually_false;
|
||||||
|
fi;
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
#endif
|
|
@ -63,10 +63,30 @@ struct AioContext {
|
||||||
*/
|
*/
|
||||||
int walking_handlers;
|
int walking_handlers;
|
||||||
|
|
||||||
/* Used to avoid unnecessary event_notifier_set calls in aio_notify.
|
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
|
||||||
* Writes protected by lock or BQL, reads are lockless.
|
* accessed with atomic primitives. If this field is 0, everything
|
||||||
|
* (file descriptors, bottom halves, timers) will be re-evaluated
|
||||||
|
* before the next blocking poll(), thus the event_notifier_set call
|
||||||
|
* can be skipped. If it is non-zero, you may need to wake up a
|
||||||
|
* concurrent aio_poll or the glib main event loop, making
|
||||||
|
* event_notifier_set necessary.
|
||||||
|
*
|
||||||
|
* Bit 0 is reserved for GSource usage of the AioContext, and is 1
|
||||||
|
* between a call to aio_ctx_check and the next call to aio_ctx_dispatch.
|
||||||
|
* Bits 1-31 simply count the number of active calls to aio_poll
|
||||||
|
* that are in the prepare or poll phase.
|
||||||
|
*
|
||||||
|
* The GSource and aio_poll must use a different mechanism because
|
||||||
|
* there is no certainty that a call to GSource's prepare callback
|
||||||
|
* (via g_main_context_prepare) is indeed followed by check and
|
||||||
|
* dispatch. It's not clear whether this would be a bug, but let's
|
||||||
|
* play safe and allow it---it will just cause extra calls to
|
||||||
|
* event_notifier_set until the next call to dispatch.
|
||||||
|
*
|
||||||
|
* Instead, the aio_poll calls include both the prepare and the
|
||||||
|
* dispatch phase, hence a simple counter is enough for them.
|
||||||
*/
|
*/
|
||||||
bool dispatching;
|
uint32_t notify_me;
|
||||||
|
|
||||||
/* lock to protect between bh's adders and deleter */
|
/* lock to protect between bh's adders and deleter */
|
||||||
QemuMutex bh_lock;
|
QemuMutex bh_lock;
|
||||||
|
@ -79,7 +99,19 @@ struct AioContext {
|
||||||
*/
|
*/
|
||||||
int walking_bh;
|
int walking_bh;
|
||||||
|
|
||||||
/* Used for aio_notify. */
|
/* Used by aio_notify.
|
||||||
|
*
|
||||||
|
* "notified" is used to avoid expensive event_notifier_test_and_clear
|
||||||
|
* calls. When it is clear, the EventNotifier is clear, or one thread
|
||||||
|
* is going to clear "notified" before processing more events. False
|
||||||
|
* positives are possible, i.e. "notified" could be set even though the
|
||||||
|
* EventNotifier is clear.
|
||||||
|
*
|
||||||
|
* Note that event_notifier_set *cannot* be optimized the same way. For
|
||||||
|
* more information on the problem that would result, see "#ifdef BUG2"
|
||||||
|
* in the docs/aio_notify_accept.promela formal model.
|
||||||
|
*/
|
||||||
|
bool notified;
|
||||||
EventNotifier notifier;
|
EventNotifier notifier;
|
||||||
|
|
||||||
/* Thread pool for performing work and receiving completion callbacks */
|
/* Thread pool for performing work and receiving completion callbacks */
|
||||||
|
@ -89,9 +121,6 @@ struct AioContext {
|
||||||
QEMUTimerListGroup tlg;
|
QEMUTimerListGroup tlg;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Used internally to synchronize aio_poll against qemu_bh_schedule. */
|
|
||||||
void aio_set_dispatching(AioContext *ctx, bool dispatching);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aio_context_new: Allocate a new AioContext.
|
* aio_context_new: Allocate a new AioContext.
|
||||||
*
|
*
|
||||||
|
@ -156,6 +185,24 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
|
||||||
*/
|
*/
|
||||||
void aio_notify(AioContext *ctx);
|
void aio_notify(AioContext *ctx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aio_notify_accept: Acknowledge receiving an aio_notify.
|
||||||
|
*
|
||||||
|
* aio_notify() uses an EventNotifier in order to wake up a sleeping
|
||||||
|
* aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
|
||||||
|
* usually rare, but the AioContext has to clear the EventNotifier on
|
||||||
|
* every aio_poll() or g_main_context_iteration() in order to avoid
|
||||||
|
* busy waiting. This event_notifier_test_and_clear() cannot be done
|
||||||
|
* using the usual aio_context_set_event_notifier(), because it must
|
||||||
|
* be done before processing all events (file descriptors, bottom halves,
|
||||||
|
* timers).
|
||||||
|
*
|
||||||
|
* aio_notify_accept() is an optimized event_notifier_test_and_clear()
|
||||||
|
* that is specific to an AioContext's notifier; it is used internally
|
||||||
|
* to clear the EventNotifier only if aio_notify() had been called.
|
||||||
|
*/
|
||||||
|
void aio_notify_accept(AioContext *ctx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aio_bh_poll: Poll bottom halves for an AioContext.
|
* aio_bh_poll: Poll bottom halves for an AioContext.
|
||||||
*
|
*
|
||||||
|
|
|
@ -99,7 +99,7 @@ QEMUTimerList *timerlist_new(QEMUClockType type,
|
||||||
QEMUClock *clock = qemu_clock_ptr(type);
|
QEMUClock *clock = qemu_clock_ptr(type);
|
||||||
|
|
||||||
timer_list = g_malloc0(sizeof(QEMUTimerList));
|
timer_list = g_malloc0(sizeof(QEMUTimerList));
|
||||||
qemu_event_init(&timer_list->timers_done_ev, false);
|
qemu_event_init(&timer_list->timers_done_ev, true);
|
||||||
timer_list->clock = clock;
|
timer_list->clock = clock;
|
||||||
timer_list->notify_cb = cb;
|
timer_list->notify_cb = cb;
|
||||||
timer_list->notify_opaque = opaque;
|
timer_list->notify_opaque = opaque;
|
||||||
|
|
|
@ -97,14 +97,6 @@ static void event_ready_cb(EventNotifier *e)
|
||||||
|
|
||||||
/* Tests using aio_*. */
|
/* Tests using aio_*. */
|
||||||
|
|
||||||
static void test_notify(void)
|
|
||||||
{
|
|
||||||
g_assert(!aio_poll(ctx, false));
|
|
||||||
aio_notify(ctx);
|
|
||||||
g_assert(!aio_poll(ctx, true));
|
|
||||||
g_assert(!aio_poll(ctx, false));
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
QemuMutex start_lock;
|
QemuMutex start_lock;
|
||||||
bool thread_acquired;
|
bool thread_acquired;
|
||||||
|
@ -331,7 +323,7 @@ static void test_wait_event_notifier(void)
|
||||||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(!aio_poll(ctx, false));
|
while (aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 1);
|
g_assert_cmpint(data.active, ==, 1);
|
||||||
|
|
||||||
|
@ -356,7 +348,7 @@ static void test_flush_event_notifier(void)
|
||||||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(!aio_poll(ctx, false));
|
while (aio_poll(ctx, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 10);
|
g_assert_cmpint(data.active, ==, 10);
|
||||||
|
|
||||||
|
@ -494,14 +486,6 @@ static void test_timer_schedule(void)
|
||||||
* works well, and that's what I am using.
|
* works well, and that's what I am using.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void test_source_notify(void)
|
|
||||||
{
|
|
||||||
while (g_main_context_iteration(NULL, false));
|
|
||||||
aio_notify(ctx);
|
|
||||||
g_assert(g_main_context_iteration(NULL, true));
|
|
||||||
g_assert(!g_main_context_iteration(NULL, false));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void test_source_flush(void)
|
static void test_source_flush(void)
|
||||||
{
|
{
|
||||||
g_assert(!g_main_context_iteration(NULL, false));
|
g_assert(!g_main_context_iteration(NULL, false));
|
||||||
|
@ -669,7 +653,7 @@ static void test_source_wait_event_notifier(void)
|
||||||
EventNotifierTestData data = { .n = 0, .active = 1 };
|
EventNotifierTestData data = { .n = 0, .active = 1 };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 1);
|
g_assert_cmpint(data.active, ==, 1);
|
||||||
|
|
||||||
|
@ -694,7 +678,7 @@ static void test_source_flush_event_notifier(void)
|
||||||
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
|
||||||
event_notifier_init(&data.e, false);
|
event_notifier_init(&data.e, false);
|
||||||
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
|
||||||
g_assert(g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
g_assert_cmpint(data.n, ==, 0);
|
g_assert_cmpint(data.n, ==, 0);
|
||||||
g_assert_cmpint(data.active, ==, 10);
|
g_assert_cmpint(data.active, ==, 10);
|
||||||
|
|
||||||
|
@ -830,7 +814,6 @@ int main(int argc, char **argv)
|
||||||
while (g_main_context_iteration(NULL, false));
|
while (g_main_context_iteration(NULL, false));
|
||||||
|
|
||||||
g_test_init(&argc, &argv, NULL);
|
g_test_init(&argc, &argv, NULL);
|
||||||
g_test_add_func("/aio/notify", test_notify);
|
|
||||||
g_test_add_func("/aio/acquire", test_acquire);
|
g_test_add_func("/aio/acquire", test_acquire);
|
||||||
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
|
||||||
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
|
||||||
|
@ -845,7 +828,6 @@ int main(int argc, char **argv)
|
||||||
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
|
||||||
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
|
||||||
|
|
||||||
g_test_add_func("/aio-gsource/notify", test_source_notify);
|
|
||||||
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
g_test_add_func("/aio-gsource/flush", test_source_flush);
|
||||||
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
|
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
|
||||||
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
|
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
|
||||||
|
|
Loading…
Reference in New Issue