mirror of https://github.com/xqemu/xqemu.git
aio-posix: compute timeout before polling
This is a preparation for the next patch, and also a very small
optimization. Compute the timeout only once, before invoking
try_poll_mode, and adjust it in run_poll_handlers. The adjustment
is the polling time when polling fails, or zero (non-blocking) if
polling succeeds.
Fixes: 70232b5253
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20180912171040.1732-3-pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
This commit is contained in:
parent
d7be5dd19c
commit
e30cffa04d
|
@ -490,7 +490,7 @@ static void add_pollfd(AioHandler *node)
|
|||
npfd++;
|
||||
}
|
||||
|
||||
static bool run_poll_handlers_once(AioContext *ctx)
|
||||
static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
|
||||
{
|
||||
bool progress = false;
|
||||
AioHandler *node;
|
||||
|
@ -500,6 +500,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
|
|||
aio_node_check(ctx, node->is_external) &&
|
||||
node->io_poll(node->opaque) &&
|
||||
node->opaque != &ctx->notifier) {
|
||||
*timeout = 0;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
@ -522,31 +523,38 @@ static bool run_poll_handlers_once(AioContext *ctx)
|
|||
*
|
||||
* Returns: true if progress was made, false otherwise
|
||||
*/
|
||||
static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
||||
static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
|
||||
{
|
||||
bool progress;
|
||||
int64_t end_time;
|
||||
int64_t start_time, elapsed_time;
|
||||
|
||||
assert(ctx->notify_me);
|
||||
assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
|
||||
|
||||
trace_run_poll_handlers_begin(ctx, max_ns);
|
||||
|
||||
end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
|
||||
trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
|
||||
|
||||
start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
do {
|
||||
progress = run_poll_handlers_once(ctx);
|
||||
} while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time
|
||||
progress = run_poll_handlers_once(ctx, timeout);
|
||||
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
|
||||
} while (!progress && elapsed_time < max_ns
|
||||
&& !atomic_read(&ctx->poll_disable_cnt));
|
||||
|
||||
trace_run_poll_handlers_end(ctx, progress);
|
||||
/* If time has passed with no successful polling, adjust *timeout to
|
||||
* keep the same ending time.
|
||||
*/
|
||||
if (*timeout != -1) {
|
||||
*timeout -= MIN(*timeout, elapsed_time);
|
||||
}
|
||||
|
||||
trace_run_poll_handlers_end(ctx, progress, *timeout);
|
||||
return progress;
|
||||
}
|
||||
|
||||
/* try_poll_mode:
|
||||
* @ctx: the AioContext
|
||||
* @blocking: busy polling is only attempted when blocking is true
|
||||
* @timeout: timeout for blocking wait, computed by the caller and updated if
|
||||
* polling succeeds.
|
||||
*
|
||||
* ctx->notify_me must be non-zero so this function can detect aio_notify().
|
||||
*
|
||||
|
@ -554,19 +562,16 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
|
|||
*
|
||||
* Returns: true if progress was made, false otherwise
|
||||
*/
|
||||
static bool try_poll_mode(AioContext *ctx, bool blocking)
|
||||
static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
|
||||
{
|
||||
if (blocking && ctx->poll_max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
|
||||
/* See qemu_soonest_timeout() uint64_t hack */
|
||||
int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
|
||||
(uint64_t)ctx->poll_ns);
|
||||
/* See qemu_soonest_timeout() uint64_t hack */
|
||||
int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
|
||||
|
||||
if (max_ns) {
|
||||
poll_set_started(ctx, true);
|
||||
if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
|
||||
poll_set_started(ctx, true);
|
||||
|
||||
if (run_poll_handlers(ctx, max_ns)) {
|
||||
return true;
|
||||
}
|
||||
if (run_poll_handlers(ctx, max_ns, timeout)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -575,7 +580,7 @@ static bool try_poll_mode(AioContext *ctx, bool blocking)
|
|||
/* Even if we don't run busy polling, try polling once in case it can make
|
||||
* progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
|
||||
*/
|
||||
return run_poll_handlers_once(ctx);
|
||||
return run_poll_handlers_once(ctx, timeout);
|
||||
}
|
||||
|
||||
bool aio_poll(AioContext *ctx, bool blocking)
|
||||
|
@ -605,8 +610,14 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
|
||||
}
|
||||
|
||||
progress = try_poll_mode(ctx, blocking);
|
||||
if (!progress) {
|
||||
timeout = blocking ? aio_compute_timeout(ctx) : 0;
|
||||
progress = try_poll_mode(ctx, &timeout);
|
||||
assert(!(timeout && progress));
|
||||
|
||||
/* If polling is allowed, non-blocking aio_poll does not need the
|
||||
* system call---a single round of run_poll_handlers_once suffices.
|
||||
*/
|
||||
if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
|
||||
assert(npfd == 0);
|
||||
|
||||
/* fill pollfds */
|
||||
|
@ -620,8 +631,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
|
|||
}
|
||||
}
|
||||
|
||||
timeout = blocking ? aio_compute_timeout(ctx) : 0;
|
||||
|
||||
/* wait until next event */
|
||||
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
|
||||
AioHandler epoll_handler;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# See docs/devel/tracing.txt for syntax documentation.
|
||||
|
||||
# util/aio-posix.c
|
||||
run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
|
||||
run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
|
||||
run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_ns %"PRId64 " timeout %"PRId64
|
||||
run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
|
||||
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
|
||||
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
|
||||
|
||||
|
|
Loading…
Reference in New Issue