mirror of https://github.com/xemu-project/xemu.git
qcow: Switch qcow_co_readv to byte-based calls
We are gradually moving away from sector-based interfaces, towards byte-based. Make the change for the internals of the qcow driver read function, by iterating over offset/bytes instead of sector_num/nb_sectors, and with a rename of index_in_cluster and repurposing of n to track bytes instead of sectors. A later patch will then switch the qcow driver as a whole over to byte-based operation. Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: Jeff Cody <jcody@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
787993a543
commit
a15312b017
42
block/qcow.c
42
block/qcow.c
|
@ -617,13 +617,15 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
int nb_sectors, QEMUIOVector *qiov)
|
int nb_sectors, QEMUIOVector *qiov)
|
||||||
{
|
{
|
||||||
BDRVQcowState *s = bs->opaque;
|
BDRVQcowState *s = bs->opaque;
|
||||||
int index_in_cluster;
|
int offset_in_cluster;
|
||||||
int ret = 0, n;
|
int ret = 0, n;
|
||||||
uint64_t cluster_offset;
|
uint64_t cluster_offset;
|
||||||
struct iovec hd_iov;
|
struct iovec hd_iov;
|
||||||
QEMUIOVector hd_qiov;
|
QEMUIOVector hd_qiov;
|
||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
void *orig_buf;
|
void *orig_buf;
|
||||||
|
int64_t offset = sector_num * BDRV_SECTOR_SIZE;
|
||||||
|
int64_t bytes = nb_sectors * BDRV_SECTOR_SIZE;
|
||||||
|
|
||||||
if (qiov->niov > 1) {
|
if (qiov->niov > 1) {
|
||||||
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
|
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
|
||||||
|
@ -637,36 +639,35 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
|
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
|
|
||||||
while (nb_sectors != 0) {
|
while (bytes != 0) {
|
||||||
/* prepare next request */
|
/* prepare next request */
|
||||||
ret = get_cluster_offset(bs, sector_num << 9,
|
ret = get_cluster_offset(bs, offset, 0, 0, 0, 0, &cluster_offset);
|
||||||
0, 0, 0, 0, &cluster_offset);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
index_in_cluster = sector_num & (s->cluster_sectors - 1);
|
offset_in_cluster = offset & (s->cluster_size - 1);
|
||||||
n = s->cluster_sectors - index_in_cluster;
|
n = s->cluster_size - offset_in_cluster;
|
||||||
if (n > nb_sectors) {
|
if (n > bytes) {
|
||||||
n = nb_sectors;
|
n = bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cluster_offset) {
|
if (!cluster_offset) {
|
||||||
if (bs->backing) {
|
if (bs->backing) {
|
||||||
/* read from the base image */
|
/* read from the base image */
|
||||||
hd_iov.iov_base = (void *)buf;
|
hd_iov.iov_base = (void *)buf;
|
||||||
hd_iov.iov_len = n * 512;
|
hd_iov.iov_len = n;
|
||||||
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
|
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
/* qcow2 emits this on bs->file instead of bs->backing */
|
/* qcow2 emits this on bs->file instead of bs->backing */
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
|
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
|
||||||
ret = bdrv_co_readv(bs->backing, sector_num, n, &hd_qiov);
|
ret = bdrv_co_preadv(bs->backing, offset, n, &hd_qiov, 0);
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Note: in this case, no need to wait */
|
/* Note: in this case, no need to wait */
|
||||||
memset(buf, 0, 512 * n);
|
memset(buf, 0, n);
|
||||||
}
|
}
|
||||||
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
|
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
|
||||||
/* add AIO support for compressed blocks ? */
|
/* add AIO support for compressed blocks ? */
|
||||||
|
@ -674,21 +675,19 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
memcpy(buf,
|
memcpy(buf, s->cluster_cache + offset_in_cluster, n);
|
||||||
s->cluster_cache + index_in_cluster * 512, 512 * n);
|
|
||||||
} else {
|
} else {
|
||||||
if ((cluster_offset & 511) != 0) {
|
if ((cluster_offset & 511) != 0) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
hd_iov.iov_base = (void *)buf;
|
hd_iov.iov_base = (void *)buf;
|
||||||
hd_iov.iov_len = n * 512;
|
hd_iov.iov_len = n;
|
||||||
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
|
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
|
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
|
||||||
ret = bdrv_co_readv(bs->file,
|
ret = bdrv_co_preadv(bs->file, cluster_offset + offset_in_cluster,
|
||||||
(cluster_offset >> 9) + index_in_cluster,
|
n, &hd_qiov, 0);
|
||||||
n, &hd_qiov);
|
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
break;
|
break;
|
||||||
|
@ -696,8 +695,7 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
if (bs->encrypted) {
|
if (bs->encrypted) {
|
||||||
assert(s->crypto);
|
assert(s->crypto);
|
||||||
if (qcrypto_block_decrypt(s->crypto,
|
if (qcrypto_block_decrypt(s->crypto,
|
||||||
sector_num * BDRV_SECTOR_SIZE, buf,
|
offset, buf, n, NULL) < 0) {
|
||||||
n * BDRV_SECTOR_SIZE, NULL) < 0) {
|
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -705,9 +703,9 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
}
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
nb_sectors -= n;
|
bytes -= n;
|
||||||
sector_num += n;
|
offset += n;
|
||||||
buf += n * 512;
|
buf += n;
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
|
|
Loading…
Reference in New Issue