mirror of https://github.com/xemu-project/xemu.git
block: bdrv_mark_request_serialising: split non-waiting function
We'll need a separate function, which will only "mark" request serialising with specified align but not wait for conflicting requests. So, it will be like old bdrv_mark_request_serialising(), before merging bdrv_wait_serialising_requests_locked() into it. To reduce the possible mess, let's do the following: Public function that does both marking and waiting will be called bdrv_make_request_serialising, and private function which will only "mark" will be called tracked_request_set_serialising(). Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-Id: <20201021145859.11201-6-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
parent
ec1c886831
commit
8ac5aab255
|
@ -2953,7 +2953,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
|
||||||
|
|
||||||
assert(bdrv_check_request(req->offset, req->bytes) == 0);
|
assert(bdrv_check_request(req->offset, req->bytes) == 0);
|
||||||
|
|
||||||
bdrv_mark_request_serialising(req, bs->bl.request_alignment);
|
bdrv_make_request_serialising(req, bs->bl.request_alignment);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
35
block/io.c
35
block/io.c
|
@ -805,15 +805,14 @@ bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
|
||||||
return waited;
|
return waited;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
|
/* Called with req->bs->reqs_lock held */
|
||||||
|
static void tracked_request_set_serialising(BdrvTrackedRequest *req,
|
||||||
|
uint64_t align)
|
||||||
{
|
{
|
||||||
BlockDriverState *bs = req->bs;
|
|
||||||
int64_t overlap_offset = req->offset & ~(align - 1);
|
int64_t overlap_offset = req->offset & ~(align - 1);
|
||||||
uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
|
uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
|
||||||
- overlap_offset;
|
- overlap_offset;
|
||||||
bool waited;
|
|
||||||
|
|
||||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
|
||||||
if (!req->serialising) {
|
if (!req->serialising) {
|
||||||
qatomic_inc(&req->bs->serialising_in_flight);
|
qatomic_inc(&req->bs->serialising_in_flight);
|
||||||
req->serialising = true;
|
req->serialising = true;
|
||||||
|
@ -821,9 +820,6 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
|
||||||
|
|
||||||
req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
|
req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
|
||||||
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
|
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
|
||||||
waited = bdrv_wait_serialising_requests_locked(req);
|
|
||||||
qemu_co_mutex_unlock(&bs->reqs_lock);
|
|
||||||
return waited;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -909,6 +905,21 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
|
||||||
return waited;
|
return waited;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
|
||||||
|
uint64_t align)
|
||||||
|
{
|
||||||
|
bool waited;
|
||||||
|
|
||||||
|
qemu_co_mutex_lock(&req->bs->reqs_lock);
|
||||||
|
|
||||||
|
tracked_request_set_serialising(req, align);
|
||||||
|
waited = bdrv_wait_serialising_requests_locked(req);
|
||||||
|
|
||||||
|
qemu_co_mutex_unlock(&req->bs->reqs_lock);
|
||||||
|
|
||||||
|
return waited;
|
||||||
|
}
|
||||||
|
|
||||||
int bdrv_check_request(int64_t offset, int64_t bytes)
|
int bdrv_check_request(int64_t offset, int64_t bytes)
|
||||||
{
|
{
|
||||||
if (offset < 0 || bytes < 0) {
|
if (offset < 0 || bytes < 0) {
|
||||||
|
@ -1434,7 +1445,7 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
|
||||||
* with each other for the same cluster. For example, in copy-on-read
|
* with each other for the same cluster. For example, in copy-on-read
|
||||||
* it ensures that the CoR read and write operations are atomic and
|
* it ensures that the CoR read and write operations are atomic and
|
||||||
* guest writes cannot interleave between them. */
|
* guest writes cannot interleave between them. */
|
||||||
bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
|
bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
|
||||||
} else {
|
} else {
|
||||||
bdrv_wait_serialising_requests(req);
|
bdrv_wait_serialising_requests(req);
|
||||||
}
|
}
|
||||||
|
@ -1849,7 +1860,7 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
|
||||||
assert(!(flags & ~BDRV_REQ_MASK));
|
assert(!(flags & ~BDRV_REQ_MASK));
|
||||||
|
|
||||||
if (flags & BDRV_REQ_SERIALISING) {
|
if (flags & BDRV_REQ_SERIALISING) {
|
||||||
bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
|
bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
|
||||||
} else {
|
} else {
|
||||||
bdrv_wait_serialising_requests(req);
|
bdrv_wait_serialising_requests(req);
|
||||||
}
|
}
|
||||||
|
@ -2015,7 +2026,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
|
||||||
|
|
||||||
padding = bdrv_init_padding(bs, offset, bytes, &pad);
|
padding = bdrv_init_padding(bs, offset, bytes, &pad);
|
||||||
if (padding) {
|
if (padding) {
|
||||||
bdrv_mark_request_serialising(req, align);
|
bdrv_make_request_serialising(req, align);
|
||||||
|
|
||||||
bdrv_padding_rmw_read(child, req, &pad, true);
|
bdrv_padding_rmw_read(child, req, &pad, true);
|
||||||
|
|
||||||
|
@ -2129,7 +2140,7 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
|
if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
|
||||||
bdrv_mark_request_serialising(&req, align);
|
bdrv_make_request_serialising(&req, align);
|
||||||
bdrv_padding_rmw_read(child, &req, &pad, false);
|
bdrv_padding_rmw_read(child, &req, &pad, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3250,7 +3261,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
|
||||||
* new area, we need to make sure that no write requests are made to it
|
* new area, we need to make sure that no write requests are made to it
|
||||||
* concurrently or they might be overwritten by preallocation. */
|
* concurrently or they might be overwritten by preallocation. */
|
||||||
if (new_bytes) {
|
if (new_bytes) {
|
||||||
bdrv_mark_request_serialising(&req, 1);
|
bdrv_make_request_serialising(&req, 1);
|
||||||
}
|
}
|
||||||
if (bs->read_only) {
|
if (bs->read_only) {
|
||||||
error_setg(errp, "Image is read-only");
|
error_setg(errp, "Image is read-only");
|
||||||
|
|
|
@ -1060,7 +1060,8 @@ extern unsigned int bdrv_drain_all_count;
|
||||||
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
|
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
|
||||||
void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
|
void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
|
||||||
|
|
||||||
bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align);
|
bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
|
||||||
|
uint64_t align);
|
||||||
BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
|
BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs);
|
||||||
|
|
||||||
int get_tmp_filename(char *filename, int size);
|
int get_tmp_filename(char *filename, int size);
|
||||||
|
|
Loading…
Reference in New Issue