mirror of https://github.com/xemu-project/xemu.git
aio-posix: partially inline aio_dispatch into aio_poll
This patch prepares for the removal of unnecessary lockcnt inc/dec pairs. Extract the dispatching loop for file descriptor handlers into a new function aio_dispatch_handlers, and then inline aio_dispatch into aio_poll. aio_dispatch can now become void. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Reviewed-by: Daniel P. Berrange <berrange@redhat.com> Message-id: 20170213135235.12274-17-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
b9e413dd37
commit
a153bf52b3
|
@ -310,12 +310,8 @@ bool aio_pending(AioContext *ctx);
|
||||||
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
|
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
|
||||||
*
|
*
|
||||||
* This is used internally in the implementation of the GSource.
|
* This is used internally in the implementation of the GSource.
|
||||||
*
|
|
||||||
* @dispatch_fds: true to process fds, false to skip them
|
|
||||||
* (can be used as an optimization by callers that know there
|
|
||||||
* are no fds ready)
|
|
||||||
*/
|
*/
|
||||||
bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
|
void aio_dispatch(AioContext *ctx);
|
||||||
|
|
||||||
/* Progress in completing AIO work to occur. This can issue new pending
|
/* Progress in completing AIO work to occur. This can issue new pending
|
||||||
* aio as a result of executing I/O completion or bh callbacks.
|
* aio as a result of executing I/O completion or bh callbacks.
|
||||||
|
|
|
@ -386,12 +386,6 @@ static bool aio_dispatch_handlers(AioContext *ctx)
|
||||||
AioHandler *node, *tmp;
|
AioHandler *node, *tmp;
|
||||||
bool progress = false;
|
bool progress = false;
|
||||||
|
|
||||||
/*
|
|
||||||
* We have to walk very carefully in case aio_set_fd_handler is
|
|
||||||
* called while we're walking.
|
|
||||||
*/
|
|
||||||
qemu_lockcnt_inc(&ctx->list_lock);
|
|
||||||
|
|
||||||
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
|
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
|
||||||
int revents;
|
int revents;
|
||||||
|
|
||||||
|
@ -426,33 +420,18 @@ static bool aio_dispatch_handlers(AioContext *ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_lockcnt_dec(&ctx->list_lock);
|
|
||||||
return progress;
|
return progress;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
void aio_dispatch(AioContext *ctx)
|
||||||
* Note that dispatch_fds == false has the side-effect of post-poning the
|
|
||||||
* freeing of deleted handlers.
|
|
||||||
*/
|
|
||||||
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
|
|
||||||
{
|
{
|
||||||
bool progress;
|
aio_bh_poll(ctx);
|
||||||
|
|
||||||
/*
|
qemu_lockcnt_inc(&ctx->list_lock);
|
||||||
* If there are callbacks left that have been queued, we need to call them.
|
aio_dispatch_handlers(ctx);
|
||||||
* Do not call select in this case, because it is possible that the caller
|
qemu_lockcnt_dec(&ctx->list_lock);
|
||||||
* does not need a complete flush (as is the case for aio_poll loops).
|
|
||||||
*/
|
|
||||||
progress = aio_bh_poll(ctx);
|
|
||||||
|
|
||||||
if (dispatch_fds) {
|
timerlistgroup_run_timers(&ctx->tlg);
|
||||||
progress |= aio_dispatch_handlers(ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Run our timers */
|
|
||||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
|
||||||
|
|
||||||
return progress;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* These thread-local variables are used only in a small part of aio_poll
|
/* These thread-local variables are used only in a small part of aio_poll
|
||||||
|
@ -702,11 +681,16 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
npfd = 0;
|
npfd = 0;
|
||||||
qemu_lockcnt_dec(&ctx->list_lock);
|
qemu_lockcnt_dec(&ctx->list_lock);
|
||||||
|
|
||||||
/* Run dispatch even if there were no readable fds to run timers */
|
progress |= aio_bh_poll(ctx);
|
||||||
if (aio_dispatch(ctx, ret > 0)) {
|
|
||||||
progress = true;
|
if (ret > 0) {
|
||||||
|
qemu_lockcnt_inc(&ctx->list_lock);
|
||||||
|
progress |= aio_dispatch_handlers(ctx);
|
||||||
|
qemu_lockcnt_dec(&ctx->list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
||||||
|
|
||||||
return progress;
|
return progress;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -309,16 +309,11 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
|
||||||
return progress;
|
return progress;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
|
void aio_dispatch(AioContext *ctx)
|
||||||
{
|
{
|
||||||
bool progress;
|
aio_bh_poll(ctx);
|
||||||
|
aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
|
||||||
progress = aio_bh_poll(ctx);
|
timerlistgroup_run_timers(&ctx->tlg);
|
||||||
if (dispatch_fds) {
|
|
||||||
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
|
|
||||||
}
|
|
||||||
progress |= timerlistgroup_run_timers(&ctx->tlg);
|
|
||||||
return progress;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool aio_poll(AioContext *ctx, bool blocking)
|
bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
|
|
|
@ -258,7 +258,7 @@ aio_ctx_dispatch(GSource *source,
|
||||||
AioContext *ctx = (AioContext *) source;
|
AioContext *ctx = (AioContext *) source;
|
||||||
|
|
||||||
assert(callback == NULL);
|
assert(callback == NULL);
|
||||||
aio_dispatch(ctx, true);
|
aio_dispatch(ctx);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue