mirror of https://github.com/xemu-project/xemu.git
test-aio-multithread: simplify test_multi_co_schedule
Instead of using qatomic_mb_{read,set} mindlessly, just use a per-coroutine flag that requires no synchronization. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
4f7335e21d
commit
355635c018
|
@ -107,8 +107,7 @@ static void test_lifecycle(void)
|
||||||
/* aio_co_schedule test. */
|
/* aio_co_schedule test. */
|
||||||
|
|
||||||
static Coroutine *to_schedule[NUM_CONTEXTS];
|
static Coroutine *to_schedule[NUM_CONTEXTS];
|
||||||
|
static bool stop[NUM_CONTEXTS];
|
||||||
static bool now_stopping;
|
|
||||||
|
|
||||||
static int count_retry;
|
static int count_retry;
|
||||||
static int count_here;
|
static int count_here;
|
||||||
|
@ -136,6 +135,7 @@ static bool schedule_next(int n)
|
||||||
|
|
||||||
static void finish_cb(void *opaque)
|
static void finish_cb(void *opaque)
|
||||||
{
|
{
|
||||||
|
stop[id] = true;
|
||||||
schedule_next(id);
|
schedule_next(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,13 +143,19 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
|
||||||
{
|
{
|
||||||
g_assert(to_schedule[id] == NULL);
|
g_assert(to_schedule[id] == NULL);
|
||||||
|
|
||||||
while (!qatomic_mb_read(&now_stopping)) {
|
/*
|
||||||
|
* The next iteration will set to_schedule[id] again, but once finish_cb
|
||||||
|
* is scheduled there is no guarantee that it will actually be woken up,
|
||||||
|
* so at that point it must not go to sleep.
|
||||||
|
*/
|
||||||
|
while (!stop[id]) {
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
n = g_test_rand_int_range(0, NUM_CONTEXTS);
|
n = g_test_rand_int_range(0, NUM_CONTEXTS);
|
||||||
schedule_next(n);
|
schedule_next(n);
|
||||||
|
|
||||||
qatomic_mb_set(&to_schedule[id], qemu_coroutine_self());
|
qatomic_mb_set(&to_schedule[id], qemu_coroutine_self());
|
||||||
|
/* finish_cb can run here. */
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
g_assert(to_schedule[id] == NULL);
|
g_assert(to_schedule[id] == NULL);
|
||||||
}
|
}
|
||||||
|
@ -161,7 +167,6 @@ static void test_multi_co_schedule(int seconds)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
count_here = count_other = count_retry = 0;
|
count_here = count_other = count_retry = 0;
|
||||||
now_stopping = false;
|
|
||||||
|
|
||||||
create_aio_contexts();
|
create_aio_contexts();
|
||||||
for (i = 0; i < NUM_CONTEXTS; i++) {
|
for (i = 0; i < NUM_CONTEXTS; i++) {
|
||||||
|
@ -171,10 +176,10 @@ static void test_multi_co_schedule(int seconds)
|
||||||
|
|
||||||
g_usleep(seconds * 1000000);
|
g_usleep(seconds * 1000000);
|
||||||
|
|
||||||
qatomic_mb_set(&now_stopping, true);
|
/* Guarantee that each AioContext is woken up from its last wait. */
|
||||||
for (i = 0; i < NUM_CONTEXTS; i++) {
|
for (i = 0; i < NUM_CONTEXTS; i++) {
|
||||||
ctx_run(i, finish_cb, NULL);
|
ctx_run(i, finish_cb, NULL);
|
||||||
to_schedule[i] = NULL;
|
g_assert(to_schedule[i] == NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
join_aio_contexts();
|
join_aio_contexts();
|
||||||
|
@ -199,6 +204,7 @@ static uint32_t atomic_counter;
|
||||||
static uint32_t running;
|
static uint32_t running;
|
||||||
static uint32_t counter;
|
static uint32_t counter;
|
||||||
static CoMutex comutex;
|
static CoMutex comutex;
|
||||||
|
static bool now_stopping;
|
||||||
|
|
||||||
static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
|
static void coroutine_fn test_multi_co_mutex_entry(void *opaque)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue