mirror of https://github.com/xemu-project/xemu.git
qcow2: rename two QCowAIOCB members
The n member is not very descriptive and very hard to grep, rename it to cur_nr_sectors to better indicate what it is used for. Also rename nb_sectors to remaining_sectors as that is what it is used for. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
parent
9a2d77ad0d
commit
7b88e48ba5
|
@ -332,8 +332,8 @@ typedef struct QCowAIOCB {
|
||||||
QEMUIOVector *qiov;
|
QEMUIOVector *qiov;
|
||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
void *orig_buf;
|
void *orig_buf;
|
||||||
int nb_sectors;
|
int remaining_sectors;
|
||||||
int n;
|
int cur_nr_sectors; /* number of sectors in current iteration */
|
||||||
uint64_t cluster_offset;
|
uint64_t cluster_offset;
|
||||||
uint8_t *cluster_data;
|
uint8_t *cluster_data;
|
||||||
BlockDriverAIOCB *hd_aiocb;
|
BlockDriverAIOCB *hd_aiocb;
|
||||||
|
@ -399,38 +399,38 @@ static void qcow_aio_read_cb(void *opaque, int ret)
|
||||||
} else {
|
} else {
|
||||||
if (s->crypt_method) {
|
if (s->crypt_method) {
|
||||||
qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
|
qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
|
||||||
acb->n, 0,
|
acb->cur_nr_sectors, 0,
|
||||||
&s->aes_decrypt_key);
|
&s->aes_decrypt_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
acb->nb_sectors -= acb->n;
|
acb->remaining_sectors -= acb->cur_nr_sectors;
|
||||||
acb->sector_num += acb->n;
|
acb->sector_num += acb->cur_nr_sectors;
|
||||||
acb->buf += acb->n * 512;
|
acb->buf += acb->cur_nr_sectors * 512;
|
||||||
|
|
||||||
if (acb->nb_sectors == 0) {
|
if (acb->remaining_sectors == 0) {
|
||||||
/* request completed */
|
/* request completed */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* prepare next AIO request */
|
/* prepare next AIO request */
|
||||||
acb->n = acb->nb_sectors;
|
acb->cur_nr_sectors = acb->remaining_sectors;
|
||||||
acb->cluster_offset =
|
acb->cluster_offset = qcow2_get_cluster_offset(bs, acb->sector_num << 9,
|
||||||
qcow2_get_cluster_offset(bs, acb->sector_num << 9, &acb->n);
|
&acb->cur_nr_sectors);
|
||||||
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
|
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
|
||||||
|
|
||||||
if (!acb->cluster_offset) {
|
if (!acb->cluster_offset) {
|
||||||
if (bs->backing_hd) {
|
if (bs->backing_hd) {
|
||||||
/* read from the base image */
|
/* read from the base image */
|
||||||
n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num,
|
n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num,
|
||||||
acb->buf, acb->n);
|
acb->buf, acb->cur_nr_sectors);
|
||||||
if (n1 > 0) {
|
if (n1 > 0) {
|
||||||
acb->hd_iov.iov_base = (void *)acb->buf;
|
acb->hd_iov.iov_base = (void *)acb->buf;
|
||||||
acb->hd_iov.iov_len = acb->n * 512;
|
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
|
||||||
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
|
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
|
||||||
acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num,
|
acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num,
|
||||||
&acb->hd_qiov, acb->n,
|
&acb->hd_qiov, acb->cur_nr_sectors,
|
||||||
qcow_aio_read_cb, acb);
|
qcow_aio_read_cb, acb);
|
||||||
if (acb->hd_aiocb == NULL)
|
if (acb->hd_aiocb == NULL)
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -441,7 +441,7 @@ static void qcow_aio_read_cb(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Note: in this case, no need to wait */
|
/* Note: in this case, no need to wait */
|
||||||
memset(acb->buf, 0, 512 * acb->n);
|
memset(acb->buf, 0, 512 * acb->cur_nr_sectors);
|
||||||
ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
|
ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -450,8 +450,8 @@ static void qcow_aio_read_cb(void *opaque, int ret)
|
||||||
/* add AIO support for compressed blocks ? */
|
/* add AIO support for compressed blocks ? */
|
||||||
if (qcow2_decompress_cluster(s, acb->cluster_offset) < 0)
|
if (qcow2_decompress_cluster(s, acb->cluster_offset) < 0)
|
||||||
goto done;
|
goto done;
|
||||||
memcpy(acb->buf,
|
memcpy(acb->buf, s->cluster_cache + index_in_cluster * 512,
|
||||||
s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
|
512 * acb->cur_nr_sectors);
|
||||||
ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
|
ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto done;
|
goto done;
|
||||||
|
@ -462,11 +462,12 @@ static void qcow_aio_read_cb(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
acb->hd_iov.iov_base = (void *)acb->buf;
|
acb->hd_iov.iov_base = (void *)acb->buf;
|
||||||
acb->hd_iov.iov_len = acb->n * 512;
|
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
|
||||||
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
|
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
|
||||||
acb->hd_aiocb = bdrv_aio_readv(s->hd,
|
acb->hd_aiocb = bdrv_aio_readv(s->hd,
|
||||||
(acb->cluster_offset >> 9) + index_in_cluster,
|
(acb->cluster_offset >> 9) + index_in_cluster,
|
||||||
&acb->hd_qiov, acb->n, qcow_aio_read_cb, acb);
|
&acb->hd_qiov, acb->cur_nr_sectors,
|
||||||
|
qcow_aio_read_cb, acb);
|
||||||
if (acb->hd_aiocb == NULL)
|
if (acb->hd_aiocb == NULL)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -500,8 +501,8 @@ static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
|
||||||
} else {
|
} else {
|
||||||
acb->buf = (uint8_t *)qiov->iov->iov_base;
|
acb->buf = (uint8_t *)qiov->iov->iov_base;
|
||||||
}
|
}
|
||||||
acb->nb_sectors = nb_sectors;
|
acb->remaining_sectors = nb_sectors;
|
||||||
acb->n = 0;
|
acb->cur_nr_sectors = 0;
|
||||||
acb->cluster_offset = 0;
|
acb->cluster_offset = 0;
|
||||||
acb->l2meta.nb_clusters = 0;
|
acb->l2meta.nb_clusters = 0;
|
||||||
QLIST_INIT(&acb->l2meta.dependent_requests);
|
QLIST_INIT(&acb->l2meta.dependent_requests);
|
||||||
|
@ -569,24 +570,24 @@ static void qcow_aio_write_cb(void *opaque, int ret)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
acb->nb_sectors -= acb->n;
|
acb->remaining_sectors -= acb->cur_nr_sectors;
|
||||||
acb->sector_num += acb->n;
|
acb->sector_num += acb->cur_nr_sectors;
|
||||||
acb->buf += acb->n * 512;
|
acb->buf += acb->cur_nr_sectors * 512;
|
||||||
|
|
||||||
if (acb->nb_sectors == 0) {
|
if (acb->remaining_sectors == 0) {
|
||||||
/* request completed */
|
/* request completed */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
|
index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
|
||||||
n_end = index_in_cluster + acb->nb_sectors;
|
n_end = index_in_cluster + acb->remaining_sectors;
|
||||||
if (s->crypt_method &&
|
if (s->crypt_method &&
|
||||||
n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
|
n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
|
||||||
n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
|
n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
|
||||||
|
|
||||||
ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
|
ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
|
||||||
index_in_cluster, n_end, &acb->n, &acb->l2meta);
|
index_in_cluster, n_end, &acb->cur_nr_sectors, &acb->l2meta);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -608,17 +609,17 @@ static void qcow_aio_write_cb(void *opaque, int ret)
|
||||||
s->cluster_size);
|
s->cluster_size);
|
||||||
}
|
}
|
||||||
qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
|
qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
|
||||||
acb->n, 1, &s->aes_encrypt_key);
|
acb->cur_nr_sectors, 1, &s->aes_encrypt_key);
|
||||||
src_buf = acb->cluster_data;
|
src_buf = acb->cluster_data;
|
||||||
} else {
|
} else {
|
||||||
src_buf = acb->buf;
|
src_buf = acb->buf;
|
||||||
}
|
}
|
||||||
acb->hd_iov.iov_base = (void *)src_buf;
|
acb->hd_iov.iov_base = (void *)src_buf;
|
||||||
acb->hd_iov.iov_len = acb->n * 512;
|
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
|
||||||
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
|
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
|
||||||
acb->hd_aiocb = bdrv_aio_writev(s->hd,
|
acb->hd_aiocb = bdrv_aio_writev(s->hd,
|
||||||
(acb->cluster_offset >> 9) + index_in_cluster,
|
(acb->cluster_offset >> 9) + index_in_cluster,
|
||||||
&acb->hd_qiov, acb->n,
|
&acb->hd_qiov, acb->cur_nr_sectors,
|
||||||
qcow_aio_write_cb, acb);
|
qcow_aio_write_cb, acb);
|
||||||
if (acb->hd_aiocb == NULL)
|
if (acb->hd_aiocb == NULL)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
Loading…
Reference in New Issue