mirror of https://github.com/xemu-project/xemu.git
aio-posix: ensure poll mode is left when aio_notify is called
With aio=thread, adaptive polling makes latency worse rather than better, because it delays the execution of the ThreadPool's completion bottom half. event_notifier_poll() does run while polling, detecting that a bottom half was scheduled by a worker thread, but because ctx->notifier is explicitly ignored in run_poll_handlers_once(), scheduling the BH does not count as making progress and run_poll_handlers() keeps running. Fix this by recomputing the deadline after *timeout could have changed. With this change, ThreadPool still cannot participate in polling but at least it does not suffer from extra latency. Reported-by: Sergio Lopez <slp@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20190409122823.12416-1-pbonzini@redhat.com Cc: Stefan Hajnoczi <stefanha@gmail.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: qemu-block@nongnu.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <1553692145-86728-1-git-send-email-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20190409122823.12416-1-pbonzini@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
118f99442d
commit
993ed89f35
|
@ -519,6 +519,10 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
|
||||||
if (!node->deleted && node->io_poll &&
|
if (!node->deleted && node->io_poll &&
|
||||||
aio_node_check(ctx, node->is_external) &&
|
aio_node_check(ctx, node->is_external) &&
|
||||||
node->io_poll(node->opaque)) {
|
node->io_poll(node->opaque)) {
|
||||||
|
/*
|
||||||
|
* Polling was successful, exit try_poll_mode immediately
|
||||||
|
* to adjust the next polling time.
|
||||||
|
*/
|
||||||
*timeout = 0;
|
*timeout = 0;
|
||||||
if (node->opaque != &ctx->notifier) {
|
if (node->opaque != &ctx->notifier) {
|
||||||
progress = true;
|
progress = true;
|
||||||
|
@ -558,8 +562,9 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
|
||||||
do {
|
do {
|
||||||
progress = run_poll_handlers_once(ctx, timeout);
|
progress = run_poll_handlers_once(ctx, timeout);
|
||||||
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
|
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
|
||||||
} while (!progress && elapsed_time < max_ns
|
max_ns = qemu_soonest_timeout(*timeout, max_ns);
|
||||||
&& !atomic_read(&ctx->poll_disable_cnt));
|
assert(!(max_ns && progress));
|
||||||
|
} while (elapsed_time < max_ns && !atomic_read(&ctx->poll_disable_cnt));
|
||||||
|
|
||||||
/* If time has passed with no successful polling, adjust *timeout to
|
/* If time has passed with no successful polling, adjust *timeout to
|
||||||
* keep the same ending time.
|
* keep the same ending time.
|
||||||
|
@ -585,8 +590,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
|
||||||
*/
|
*/
|
||||||
static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
|
static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
|
||||||
{
|
{
|
||||||
/* See qemu_soonest_timeout() uint64_t hack */
|
int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
|
||||||
int64_t max_ns = MIN((uint64_t)*timeout, (uint64_t)ctx->poll_ns);
|
|
||||||
|
|
||||||
if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
|
if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
|
||||||
poll_set_started(ctx, true);
|
poll_set_started(ctx, true);
|
||||||
|
|
Loading…
Reference in New Issue