block-copy: add coroutine_fn annotations

These functions end up calling bdrv_common_block_status_above(), a
generated_co_wrapper function.
In addition, they also happen to be always called in coroutine context,
meaning all callers are coroutine_fn.
This means that the g_c_w function will enter the qemu_in_coroutine()
case and eventually suspend (or in other words call qemu_coroutine_yield()).
Therefore we can mark such functions coroutine_fn too.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20221128142337.657646-3-eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Emanuele Giuseppe Esposito 2022-11-28 09:23:25 -05:00 committed by Kevin Wolf
parent 7b52a921c1
commit 43a0d4f08b
2 changed files with 15 additions and 11 deletions

View File

@ -577,7 +577,8 @@ static coroutine_fn int block_copy_task_entry(AioTask *task)
return ret; return ret;
} }
static int block_copy_block_status(BlockCopyState *s, int64_t offset, static coroutine_fn int block_copy_block_status(BlockCopyState *s,
int64_t offset,
int64_t bytes, int64_t *pnum) int64_t bytes, int64_t *pnum)
{ {
int64_t num; int64_t num;
@ -590,7 +591,7 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
base = NULL; base = NULL;
} }
ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num, ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num,
NULL, NULL); NULL, NULL);
if (ret < 0 || num < s->cluster_size) { if (ret < 0 || num < s->cluster_size) {
/* /*
@ -613,7 +614,8 @@ static int block_copy_block_status(BlockCopyState *s, int64_t offset,
* Check if the cluster starting at offset is allocated or not. * Check if the cluster starting at offset is allocated or not.
* return via pnum the number of contiguous clusters sharing this allocation. * return via pnum the number of contiguous clusters sharing this allocation.
*/ */
static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset, static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s,
int64_t offset,
int64_t *pnum) int64_t *pnum)
{ {
BlockDriverState *bs = s->source->bs; BlockDriverState *bs = s->source->bs;
@ -624,7 +626,7 @@ static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) { while (true) {
ret = bdrv_is_allocated(bs, offset, bytes, &count); ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
if (ret < 0) { if (ret < 0) {
return ret; return ret;
} }
@ -669,8 +671,9 @@ void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
* @return 0 when the cluster at @offset was unallocated, * @return 0 when the cluster at @offset was unallocated,
* 1 otherwise, and -ret on error. * 1 otherwise, and -ret on error.
*/ */
int64_t block_copy_reset_unallocated(BlockCopyState *s, int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count) int64_t offset,
int64_t *count)
{ {
int ret; int ret;
int64_t clusters, bytes; int64_t clusters, bytes;

View File

@ -36,8 +36,9 @@ void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);
void block_copy_state_free(BlockCopyState *s); void block_copy_state_free(BlockCopyState *s);
void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes); void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes);
int64_t block_copy_reset_unallocated(BlockCopyState *s, int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
int64_t offset, int64_t *count); int64_t offset,
int64_t *count);
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes, int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
bool ignore_ratelimit, uint64_t timeout_ns, bool ignore_ratelimit, uint64_t timeout_ns,