mirror of https://github.com/xqemu/xqemu.git
Pull request
v2: * Resolved merge conflict with block/iscsi.c [Peter] -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJXj6TkAAoJEJykq7OBq3PI15oIAL24OFnMTFOAWyY2h3yfzGwe Gn1qzFOTHCXgbgVsolVuZnFU/SDn9WzMS9y6o7jcJykv++FdMSYUO7paPQAg9etX o9KOgyfUUDhskbaXEbTKxhqy7UvcyJsZYmjN+TD5tupfnrqGlmu6rkUPpSwGo++G u7CkAZewJc5SvsCdQRe3N10LbG4xU/j06cjCIHWDPRr+AV9qvsb9KkIOg3wkrhWN R83yj+yX6vhfQouHLoOlh3RYZ5HNo020JPum0jT1tGcoshKoFyu0prSRqNVVMqTY BepvWMhXbiq219HnFaQBtnysve9gWix4WhuCHtaAlaZDTBh945ZztsW7w7dubtA= =7ics -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging Pull request v2: * Resolved merge conflict with block/iscsi.c [Peter] # gpg: Signature made Wed 20 Jul 2016 17:20:52 BST # gpg: using RSA key 0x9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: (25 commits) raw_bsd: Convert to byte-based interface nbd: Convert to byte-based interface block: Kill .bdrv_co_discard() sheepdog: Switch .bdrv_co_discard() to byte-based raw_bsd: Switch .bdrv_co_discard() to byte-based qcow2: Switch .bdrv_co_discard() to byte-based nbd: Switch .bdrv_co_discard() to byte-based iscsi: Switch .bdrv_co_discard() to byte-based gluster: Switch .bdrv_co_discard() to byte-based blkreplay: Switch .bdrv_co_discard() to byte-based block: Add .bdrv_co_pdiscard() driver callback block: Convert .bdrv_aio_discard() to byte-based rbd: Switch rbd_start_aio() to byte-based raw-posix: Switch paio_submit() to byte-based block: Convert BB interface to byte-based discards block: Convert bdrv_aio_discard() to byte-based block: Switch BlockRequest to byte-based block: Convert bdrv_discard() to byte-based block: Convert bdrv_co_discard() to byte-based iscsi: Rely on block layer to break up large requests ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Conflicts: block/gluster.c
This commit is contained in:
commit
61ead113ae
|
@ -114,11 +114,11 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coroutine_fn blkreplay_co_discard(BlockDriverState *bs,
|
static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors)
|
int64_t offset, int count)
|
||||||
{
|
{
|
||||||
uint64_t reqid = request_id++;
|
uint64_t reqid = request_id++;
|
||||||
int ret = bdrv_co_discard(bs->file->bs, sector_num, nb_sectors);
|
int ret = bdrv_co_pdiscard(bs->file->bs, offset, count);
|
||||||
block_request_create(reqid, bs, qemu_coroutine_self());
|
block_request_create(reqid, bs, qemu_coroutine_self());
|
||||||
qemu_coroutine_yield();
|
qemu_coroutine_yield();
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ static BlockDriver bdrv_blkreplay = {
|
||||||
.bdrv_co_pwritev = blkreplay_co_pwritev,
|
.bdrv_co_pwritev = blkreplay_co_pwritev,
|
||||||
|
|
||||||
.bdrv_co_pwrite_zeroes = blkreplay_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = blkreplay_co_pwrite_zeroes,
|
||||||
.bdrv_co_discard = blkreplay_co_discard,
|
.bdrv_co_pdiscard = blkreplay_co_pdiscard,
|
||||||
.bdrv_co_flush = blkreplay_co_flush,
|
.bdrv_co_flush = blkreplay_co_flush,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1065,16 +1065,16 @@ BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
||||||
return bdrv_aio_flush(blk_bs(blk), cb, opaque);
|
return bdrv_aio_flush(blk_bs(blk), cb, opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockAIOCB *blk_aio_discard(BlockBackend *blk,
|
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
|
||||||
int64_t sector_num, int nb_sectors,
|
int64_t offset, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque)
|
BlockCompletionFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
int ret = blk_check_byte_request(blk, offset, count);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return blk_abort_aio_request(blk, cb, opaque, ret);
|
return blk_abort_aio_request(blk, cb, opaque, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
return bdrv_aio_discard(blk_bs(blk), sector_num, nb_sectors, cb, opaque);
|
return bdrv_aio_pdiscard(blk_bs(blk), offset, count, cb, opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_aio_cancel(BlockAIOCB *acb)
|
void blk_aio_cancel(BlockAIOCB *acb)
|
||||||
|
@ -1106,14 +1106,14 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||||
return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
|
return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
|
||||||
{
|
{
|
||||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
int ret = blk_check_byte_request(blk, offset, count);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bdrv_co_discard(blk_bs(blk), sector_num, nb_sectors);
|
return bdrv_co_pdiscard(blk_bs(blk), offset, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_co_flush(BlockBackend *blk)
|
int blk_co_flush(BlockBackend *blk)
|
||||||
|
@ -1504,14 +1504,14 @@ int blk_truncate(BlockBackend *blk, int64_t offset)
|
||||||
return bdrv_truncate(blk_bs(blk), offset);
|
return bdrv_truncate(blk_bs(blk), offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
|
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
|
||||||
{
|
{
|
||||||
int ret = blk_check_request(blk, sector_num, nb_sectors);
|
int ret = blk_check_byte_request(blk, offset, count);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bdrv_discard(blk_bs(blk), sector_num, nb_sectors);
|
return bdrv_pdiscard(blk_bs(blk), offset, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
||||||
|
|
|
@ -1077,15 +1077,12 @@ error:
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||||
static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
|
static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num,
|
int64_t offset, int size)
|
||||||
int nb_sectors)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
GlusterAIOCB acb;
|
GlusterAIOCB acb;
|
||||||
BDRVGlusterState *s = bs->opaque;
|
BDRVGlusterState *s = bs->opaque;
|
||||||
size_t size = nb_sectors * BDRV_SECTOR_SIZE;
|
|
||||||
off_t offset = sector_num * BDRV_SECTOR_SIZE;
|
|
||||||
|
|
||||||
acb.size = 0;
|
acb.size = 0;
|
||||||
acb.ret = 0;
|
acb.ret = 0;
|
||||||
|
@ -1307,7 +1304,7 @@ static BlockDriver bdrv_gluster = {
|
||||||
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
||||||
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
||||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||||
.bdrv_co_discard = qemu_gluster_co_discard,
|
.bdrv_co_pdiscard = qemu_gluster_co_pdiscard,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
||||||
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
||||||
|
@ -1335,7 +1332,7 @@ static BlockDriver bdrv_gluster_tcp = {
|
||||||
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
||||||
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
||||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||||
.bdrv_co_discard = qemu_gluster_co_discard,
|
.bdrv_co_pdiscard = qemu_gluster_co_pdiscard,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
||||||
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
||||||
|
@ -1363,7 +1360,7 @@ static BlockDriver bdrv_gluster_unix = {
|
||||||
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
||||||
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
||||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||||
.bdrv_co_discard = qemu_gluster_co_discard,
|
.bdrv_co_pdiscard = qemu_gluster_co_pdiscard,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
||||||
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
||||||
|
@ -1397,7 +1394,7 @@ static BlockDriver bdrv_gluster_rdma = {
|
||||||
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
|
||||||
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
.bdrv_has_zero_init = qemu_gluster_has_zero_init,
|
||||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||||
.bdrv_co_discard = qemu_gluster_co_discard,
|
.bdrv_co_pdiscard = qemu_gluster_co_pdiscard,
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
||||||
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
|
||||||
|
|
246
block/io.c
246
block/io.c
|
@ -33,14 +33,13 @@
|
||||||
|
|
||||||
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
|
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
|
||||||
|
|
||||||
static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
|
static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
|
||||||
int64_t sector_num,
|
int64_t offset,
|
||||||
QEMUIOVector *qiov,
|
QEMUIOVector *qiov,
|
||||||
int nb_sectors,
|
BdrvRequestFlags flags,
|
||||||
BdrvRequestFlags flags,
|
BlockCompletionFunc *cb,
|
||||||
BlockCompletionFunc *cb,
|
void *opaque,
|
||||||
void *opaque,
|
bool is_write);
|
||||||
bool is_write);
|
|
||||||
static void coroutine_fn bdrv_co_do_rw(void *opaque);
|
static void coroutine_fn bdrv_co_do_rw(void *opaque);
|
||||||
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
||||||
int64_t offset, int count, BdrvRequestFlags flags);
|
int64_t offset, int count, BdrvRequestFlags flags);
|
||||||
|
@ -971,21 +970,25 @@ err:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Forwards an already correctly aligned request to the BlockDriver. This
|
* Forwards an already correctly aligned request to the BlockDriver. This
|
||||||
* handles copy on read and zeroing after EOF; any other features must be
|
* handles copy on read, zeroing after EOF, and fragmentation of large
|
||||||
* implemented by the caller.
|
* reads; any other features must be implemented by the caller.
|
||||||
*/
|
*/
|
||||||
static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
|
static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
|
||||||
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
|
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
|
||||||
int64_t align, QEMUIOVector *qiov, int flags)
|
int64_t align, QEMUIOVector *qiov, int flags)
|
||||||
{
|
{
|
||||||
int64_t total_bytes, max_bytes;
|
int64_t total_bytes, max_bytes;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
uint64_t bytes_remaining = bytes;
|
||||||
|
int max_transfer;
|
||||||
|
|
||||||
assert(is_power_of_2(align));
|
assert(is_power_of_2(align));
|
||||||
assert((offset & (align - 1)) == 0);
|
assert((offset & (align - 1)) == 0);
|
||||||
assert((bytes & (align - 1)) == 0);
|
assert((bytes & (align - 1)) == 0);
|
||||||
assert(!qiov || bytes == qiov->size);
|
assert(!qiov || bytes == qiov->size);
|
||||||
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
|
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
|
||||||
|
max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
|
||||||
|
align);
|
||||||
|
|
||||||
/* TODO: We would need a per-BDS .supported_read_flags and
|
/* TODO: We would need a per-BDS .supported_read_flags and
|
||||||
* potential fallback support, if we ever implement any read flags
|
* potential fallback support, if we ever implement any read flags
|
||||||
|
@ -1024,7 +1027,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Forward the request to the BlockDriver */
|
/* Forward the request to the BlockDriver, possibly fragmenting it */
|
||||||
total_bytes = bdrv_getlength(bs);
|
total_bytes = bdrv_getlength(bs);
|
||||||
if (total_bytes < 0) {
|
if (total_bytes < 0) {
|
||||||
ret = total_bytes;
|
ret = total_bytes;
|
||||||
|
@ -1032,30 +1035,39 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
|
||||||
}
|
}
|
||||||
|
|
||||||
max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
|
max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
|
||||||
if (bytes <= max_bytes) {
|
if (bytes <= max_bytes && bytes <= max_transfer) {
|
||||||
ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
|
ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
|
||||||
} else if (max_bytes > 0) {
|
goto out;
|
||||||
QEMUIOVector local_qiov;
|
|
||||||
|
|
||||||
qemu_iovec_init(&local_qiov, qiov->niov);
|
|
||||||
qemu_iovec_concat(&local_qiov, qiov, 0, max_bytes);
|
|
||||||
|
|
||||||
ret = bdrv_driver_preadv(bs, offset, max_bytes, &local_qiov, 0);
|
|
||||||
|
|
||||||
qemu_iovec_destroy(&local_qiov);
|
|
||||||
} else {
|
|
||||||
ret = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reading beyond end of file is supposed to produce zeroes */
|
while (bytes_remaining) {
|
||||||
if (ret == 0 && total_bytes < offset + bytes) {
|
int num;
|
||||||
uint64_t zero_offset = MAX(0, total_bytes - offset);
|
|
||||||
uint64_t zero_bytes = offset + bytes - zero_offset;
|
if (max_bytes) {
|
||||||
qemu_iovec_memset(qiov, zero_offset, 0, zero_bytes);
|
QEMUIOVector local_qiov;
|
||||||
|
|
||||||
|
num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
|
||||||
|
assert(num);
|
||||||
|
qemu_iovec_init(&local_qiov, qiov->niov);
|
||||||
|
qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
|
||||||
|
|
||||||
|
ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
|
||||||
|
num, &local_qiov, 0);
|
||||||
|
max_bytes -= num;
|
||||||
|
qemu_iovec_destroy(&local_qiov);
|
||||||
|
} else {
|
||||||
|
num = bytes_remaining;
|
||||||
|
ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
|
||||||
|
bytes_remaining);
|
||||||
|
}
|
||||||
|
if (ret < 0) {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
bytes_remaining -= num;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1256,7 +1268,8 @@ fail:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Forwards an already correctly aligned write request to the BlockDriver.
|
* Forwards an already correctly aligned write request to the BlockDriver,
|
||||||
|
* after possibly fragmenting it.
|
||||||
*/
|
*/
|
||||||
static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
||||||
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
|
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
|
||||||
|
@ -1268,6 +1281,8 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
||||||
|
|
||||||
int64_t start_sector = offset >> BDRV_SECTOR_BITS;
|
int64_t start_sector = offset >> BDRV_SECTOR_BITS;
|
||||||
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
|
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
|
||||||
|
uint64_t bytes_remaining = bytes;
|
||||||
|
int max_transfer;
|
||||||
|
|
||||||
assert(is_power_of_2(align));
|
assert(is_power_of_2(align));
|
||||||
assert((offset & (align - 1)) == 0);
|
assert((offset & (align - 1)) == 0);
|
||||||
|
@ -1275,6 +1290,8 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
||||||
assert(!qiov || bytes == qiov->size);
|
assert(!qiov || bytes == qiov->size);
|
||||||
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
|
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
|
||||||
assert(!(flags & ~BDRV_REQ_MASK));
|
assert(!(flags & ~BDRV_REQ_MASK));
|
||||||
|
max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
|
||||||
|
align);
|
||||||
|
|
||||||
waited = wait_serialising_requests(req);
|
waited = wait_serialising_requests(req);
|
||||||
assert(!waited || !req->serialising);
|
assert(!waited || !req->serialising);
|
||||||
|
@ -1297,9 +1314,34 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
||||||
} else if (flags & BDRV_REQ_ZERO_WRITE) {
|
} else if (flags & BDRV_REQ_ZERO_WRITE) {
|
||||||
bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
|
bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
|
||||||
ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
|
ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
|
||||||
} else {
|
} else if (bytes <= max_transfer) {
|
||||||
bdrv_debug_event(bs, BLKDBG_PWRITEV);
|
bdrv_debug_event(bs, BLKDBG_PWRITEV);
|
||||||
ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
|
ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
|
||||||
|
} else {
|
||||||
|
bdrv_debug_event(bs, BLKDBG_PWRITEV);
|
||||||
|
while (bytes_remaining) {
|
||||||
|
int num = MIN(bytes_remaining, max_transfer);
|
||||||
|
QEMUIOVector local_qiov;
|
||||||
|
int local_flags = flags;
|
||||||
|
|
||||||
|
assert(num);
|
||||||
|
if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
|
||||||
|
!(bs->supported_write_flags & BDRV_REQ_FUA)) {
|
||||||
|
/* If FUA is going to be emulated by flush, we only
|
||||||
|
* need to flush on the last iteration */
|
||||||
|
local_flags &= ~BDRV_REQ_FUA;
|
||||||
|
}
|
||||||
|
qemu_iovec_init(&local_qiov, qiov->niov);
|
||||||
|
qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
|
||||||
|
|
||||||
|
ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
|
||||||
|
num, &local_qiov, local_flags);
|
||||||
|
qemu_iovec_destroy(&local_qiov);
|
||||||
|
if (ret < 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
bytes_remaining -= num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
|
bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
|
||||||
|
|
||||||
|
@ -1312,6 +1354,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
|
||||||
|
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
bs->total_sectors = MAX(bs->total_sectors, end_sector);
|
bs->total_sectors = MAX(bs->total_sectors, end_sector);
|
||||||
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1971,8 +2014,9 @@ BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
|
||||||
{
|
{
|
||||||
trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
|
trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
|
||||||
|
|
||||||
return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
|
assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
|
||||||
cb, opaque, false);
|
return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
|
||||||
|
0, cb, opaque, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
|
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
|
||||||
|
@ -1981,8 +2025,9 @@ BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
|
||||||
{
|
{
|
||||||
trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
|
trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
|
||||||
|
|
||||||
return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
|
assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
|
||||||
cb, opaque, true);
|
return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
|
||||||
|
0, cb, opaque, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdrv_aio_cancel(BlockAIOCB *acb)
|
void bdrv_aio_cancel(BlockAIOCB *acb)
|
||||||
|
@ -2018,8 +2063,8 @@ typedef struct BlockRequest {
|
||||||
union {
|
union {
|
||||||
/* Used during read, write, trim */
|
/* Used during read, write, trim */
|
||||||
struct {
|
struct {
|
||||||
int64_t sector;
|
int64_t offset;
|
||||||
int nb_sectors;
|
int bytes;
|
||||||
int flags;
|
int flags;
|
||||||
QEMUIOVector *qiov;
|
QEMUIOVector *qiov;
|
||||||
};
|
};
|
||||||
|
@ -2083,24 +2128,23 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
|
||||||
BlockAIOCBCoroutine *acb = opaque;
|
BlockAIOCBCoroutine *acb = opaque;
|
||||||
|
|
||||||
if (!acb->is_write) {
|
if (!acb->is_write) {
|
||||||
acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector,
|
acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
|
||||||
acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
|
acb->req.qiov->size, acb->req.qiov, acb->req.flags);
|
||||||
} else {
|
} else {
|
||||||
acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector,
|
acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
|
||||||
acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
|
acb->req.qiov->size, acb->req.qiov, acb->req.flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
bdrv_co_complete(acb);
|
bdrv_co_complete(acb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
|
static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
|
||||||
int64_t sector_num,
|
int64_t offset,
|
||||||
QEMUIOVector *qiov,
|
QEMUIOVector *qiov,
|
||||||
int nb_sectors,
|
BdrvRequestFlags flags,
|
||||||
BdrvRequestFlags flags,
|
BlockCompletionFunc *cb,
|
||||||
BlockCompletionFunc *cb,
|
void *opaque,
|
||||||
void *opaque,
|
bool is_write)
|
||||||
bool is_write)
|
|
||||||
{
|
{
|
||||||
Coroutine *co;
|
Coroutine *co;
|
||||||
BlockAIOCBCoroutine *acb;
|
BlockAIOCBCoroutine *acb;
|
||||||
|
@ -2109,8 +2153,7 @@ static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
|
||||||
acb->child = child;
|
acb->child = child;
|
||||||
acb->need_bh = true;
|
acb->need_bh = true;
|
||||||
acb->req.error = -EINPROGRESS;
|
acb->req.error = -EINPROGRESS;
|
||||||
acb->req.sector = sector_num;
|
acb->req.offset = offset;
|
||||||
acb->req.nb_sectors = nb_sectors;
|
|
||||||
acb->req.qiov = qiov;
|
acb->req.qiov = qiov;
|
||||||
acb->req.flags = flags;
|
acb->req.flags = flags;
|
||||||
acb->is_write = is_write;
|
acb->is_write = is_write;
|
||||||
|
@ -2150,30 +2193,29 @@ BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
|
||||||
return &acb->common;
|
return &acb->common;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
|
static void coroutine_fn bdrv_aio_pdiscard_co_entry(void *opaque)
|
||||||
{
|
{
|
||||||
BlockAIOCBCoroutine *acb = opaque;
|
BlockAIOCBCoroutine *acb = opaque;
|
||||||
BlockDriverState *bs = acb->common.bs;
|
BlockDriverState *bs = acb->common.bs;
|
||||||
|
|
||||||
acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
|
acb->req.error = bdrv_co_pdiscard(bs, acb->req.offset, acb->req.bytes);
|
||||||
bdrv_co_complete(acb);
|
bdrv_co_complete(acb);
|
||||||
}
|
}
|
||||||
|
|
||||||
BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
|
BlockAIOCB *bdrv_aio_pdiscard(BlockDriverState *bs, int64_t offset, int count,
|
||||||
int64_t sector_num, int nb_sectors,
|
BlockCompletionFunc *cb, void *opaque)
|
||||||
BlockCompletionFunc *cb, void *opaque)
|
|
||||||
{
|
{
|
||||||
Coroutine *co;
|
Coroutine *co;
|
||||||
BlockAIOCBCoroutine *acb;
|
BlockAIOCBCoroutine *acb;
|
||||||
|
|
||||||
trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
|
trace_bdrv_aio_pdiscard(bs, offset, count, opaque);
|
||||||
|
|
||||||
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
|
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
|
||||||
acb->need_bh = true;
|
acb->need_bh = true;
|
||||||
acb->req.error = -EINPROGRESS;
|
acb->req.error = -EINPROGRESS;
|
||||||
acb->req.sector = sector_num;
|
acb->req.offset = offset;
|
||||||
acb->req.nb_sectors = nb_sectors;
|
acb->req.bytes = count;
|
||||||
co = qemu_coroutine_create(bdrv_aio_discard_co_entry, acb);
|
co = qemu_coroutine_create(bdrv_aio_pdiscard_co_entry, acb);
|
||||||
qemu_coroutine_enter(co);
|
qemu_coroutine_enter(co);
|
||||||
|
|
||||||
bdrv_co_maybe_schedule_bh(acb);
|
bdrv_co_maybe_schedule_bh(acb);
|
||||||
|
@ -2346,28 +2388,29 @@ int bdrv_flush(BlockDriverState *bs)
|
||||||
|
|
||||||
typedef struct DiscardCo {
|
typedef struct DiscardCo {
|
||||||
BlockDriverState *bs;
|
BlockDriverState *bs;
|
||||||
int64_t sector_num;
|
int64_t offset;
|
||||||
int nb_sectors;
|
int count;
|
||||||
int ret;
|
int ret;
|
||||||
} DiscardCo;
|
} DiscardCo;
|
||||||
static void coroutine_fn bdrv_discard_co_entry(void *opaque)
|
static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
|
||||||
{
|
{
|
||||||
DiscardCo *rwco = opaque;
|
DiscardCo *rwco = opaque;
|
||||||
|
|
||||||
rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
|
rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
|
int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
|
||||||
int nb_sectors)
|
int count)
|
||||||
{
|
{
|
||||||
BdrvTrackedRequest req;
|
BdrvTrackedRequest req;
|
||||||
int max_discard, ret;
|
int max_pdiscard, ret;
|
||||||
|
int head, align;
|
||||||
|
|
||||||
if (!bs->drv) {
|
if (!bs->drv) {
|
||||||
return -ENOMEDIUM;
|
return -ENOMEDIUM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bdrv_check_request(bs, sector_num, nb_sectors);
|
ret = bdrv_check_byte_request(bs, offset, count);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
return ret;
|
return ret;
|
||||||
} else if (bs->read_only) {
|
} else if (bs->read_only) {
|
||||||
|
@ -2380,50 +2423,47 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
|
if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
tracked_request_begin(&req, bs, sector_num << BDRV_SECTOR_BITS,
|
/* Discard is advisory, so ignore any unaligned head or tail */
|
||||||
nb_sectors << BDRV_SECTOR_BITS, BDRV_TRACKED_DISCARD);
|
align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
|
||||||
|
assert(is_power_of_2(align));
|
||||||
|
head = MIN(count, -offset & (align - 1));
|
||||||
|
if (head) {
|
||||||
|
count -= head;
|
||||||
|
offset += head;
|
||||||
|
}
|
||||||
|
count = QEMU_ALIGN_DOWN(count, align);
|
||||||
|
if (!count) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
|
||||||
|
|
||||||
ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
|
ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
max_discard = MIN_NON_ZERO(bs->bl.max_pdiscard >> BDRV_SECTOR_BITS,
|
max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
|
||||||
BDRV_REQUEST_MAX_SECTORS);
|
align);
|
||||||
while (nb_sectors > 0) {
|
|
||||||
|
while (count > 0) {
|
||||||
int ret;
|
int ret;
|
||||||
int num = nb_sectors;
|
int num = MIN(count, max_pdiscard);
|
||||||
int discard_alignment = bs->bl.pdiscard_alignment >> BDRV_SECTOR_BITS;
|
|
||||||
|
|
||||||
/* align request */
|
if (bs->drv->bdrv_co_pdiscard) {
|
||||||
if (discard_alignment &&
|
ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
|
||||||
num >= discard_alignment &&
|
|
||||||
sector_num % discard_alignment) {
|
|
||||||
if (num > discard_alignment) {
|
|
||||||
num = discard_alignment;
|
|
||||||
}
|
|
||||||
num -= sector_num % discard_alignment;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* limit request size */
|
|
||||||
if (num > max_discard) {
|
|
||||||
num = max_discard;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bs->drv->bdrv_co_discard) {
|
|
||||||
ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
|
|
||||||
} else {
|
} else {
|
||||||
BlockAIOCB *acb;
|
BlockAIOCB *acb;
|
||||||
CoroutineIOCompletion co = {
|
CoroutineIOCompletion co = {
|
||||||
.coroutine = qemu_coroutine_self(),
|
.coroutine = qemu_coroutine_self(),
|
||||||
};
|
};
|
||||||
|
|
||||||
acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
|
acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
|
||||||
bdrv_co_io_em_complete, &co);
|
bdrv_co_io_em_complete, &co);
|
||||||
if (acb == NULL) {
|
if (acb == NULL) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2436,8 +2476,8 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
sector_num += num;
|
offset += num;
|
||||||
nb_sectors -= num;
|
count -= num;
|
||||||
}
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
|
@ -2448,23 +2488,23 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
|
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
|
||||||
{
|
{
|
||||||
Coroutine *co;
|
Coroutine *co;
|
||||||
DiscardCo rwco = {
|
DiscardCo rwco = {
|
||||||
.bs = bs,
|
.bs = bs,
|
||||||
.sector_num = sector_num,
|
.offset = offset,
|
||||||
.nb_sectors = nb_sectors,
|
.count = count,
|
||||||
.ret = NOT_DONE,
|
.ret = NOT_DONE,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (qemu_in_coroutine()) {
|
if (qemu_in_coroutine()) {
|
||||||
/* Fast-path if already in coroutine context */
|
/* Fast-path if already in coroutine context */
|
||||||
bdrv_discard_co_entry(&rwco);
|
bdrv_pdiscard_co_entry(&rwco);
|
||||||
} else {
|
} else {
|
||||||
AioContext *aio_context = bdrv_get_aio_context(bs);
|
AioContext *aio_context = bdrv_get_aio_context(bs);
|
||||||
|
|
||||||
co = qemu_coroutine_create(bdrv_discard_co_entry, &rwco);
|
co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
|
||||||
qemu_coroutine_enter(co);
|
qemu_coroutine_enter(co);
|
||||||
while (rwco.ret == NOT_DONE) {
|
while (rwco.ret == NOT_DONE) {
|
||||||
aio_poll(aio_context, true);
|
aio_poll(aio_context, true);
|
||||||
|
|
|
@ -586,11 +586,8 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bs->bl.max_transfer &&
|
if (bs->bl.max_transfer) {
|
||||||
nb_sectors << BDRV_SECTOR_BITS > bs->bl.max_transfer) {
|
assert(nb_sectors << BDRV_SECTOR_BITS <= bs->bl.max_transfer);
|
||||||
error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len "
|
|
||||||
"of %" PRIu32 " bytes", nb_sectors, bs->bl.max_transfer);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
lba = sector_qemu2lun(sector_num, iscsilun);
|
lba = sector_qemu2lun(sector_num, iscsilun);
|
||||||
|
@ -754,11 +751,8 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bs->bl.max_transfer &&
|
if (bs->bl.max_transfer) {
|
||||||
nb_sectors << BDRV_SECTOR_BITS > bs->bl.max_transfer) {
|
assert(nb_sectors << BDRV_SECTOR_BITS <= bs->bl.max_transfer);
|
||||||
error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len "
|
|
||||||
"of %" PRIu32 " bytes", nb_sectors, bs->bl.max_transfer);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if cache.direct is off and we have a valid entry in our allocation map
|
/* if cache.direct is off and we have a valid entry in our allocation map
|
||||||
|
@ -1048,29 +1042,26 @@ iscsi_getlength(BlockDriverState *bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
coroutine_fn iscsi_co_discard(BlockDriverState *bs, int64_t sector_num,
|
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
|
||||||
int nb_sectors)
|
|
||||||
{
|
{
|
||||||
IscsiLun *iscsilun = bs->opaque;
|
IscsiLun *iscsilun = bs->opaque;
|
||||||
struct IscsiTask iTask;
|
struct IscsiTask iTask;
|
||||||
struct unmap_list list;
|
struct unmap_list list;
|
||||||
|
|
||||||
if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
|
assert(is_byte_request_lun_aligned(offset, count, iscsilun));
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!iscsilun->lbp.lbpu) {
|
if (!iscsilun->lbp.lbpu) {
|
||||||
/* UNMAP is not supported by the target */
|
/* UNMAP is not supported by the target */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
list.lba = sector_qemu2lun(sector_num, iscsilun);
|
list.lba = offset / iscsilun->block_size;
|
||||||
list.num = sector_qemu2lun(nb_sectors, iscsilun);
|
list.num = count / iscsilun->block_size;
|
||||||
|
|
||||||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||||
retry:
|
retry:
|
||||||
if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
|
if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1,
|
||||||
iscsi_co_generic_cb, &iTask) == NULL) {
|
iscsi_co_generic_cb, &iTask) == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1100,7 +1091,8 @@ retry:
|
||||||
return iTask.err_code;
|
return iTask.err_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
|
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
|
||||||
|
count >> BDRV_SECTOR_BITS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2004,7 +1996,7 @@ static BlockDriver bdrv_iscsi = {
|
||||||
.bdrv_refresh_limits = iscsi_refresh_limits,
|
.bdrv_refresh_limits = iscsi_refresh_limits,
|
||||||
|
|
||||||
.bdrv_co_get_block_status = iscsi_co_get_block_status,
|
.bdrv_co_get_block_status = iscsi_co_get_block_status,
|
||||||
.bdrv_co_discard = iscsi_co_discard,
|
.bdrv_co_pdiscard = iscsi_co_pdiscard,
|
||||||
.bdrv_co_pwrite_zeroes = iscsi_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = iscsi_co_pwrite_zeroes,
|
||||||
.bdrv_co_readv = iscsi_co_readv,
|
.bdrv_co_readv = iscsi_co_readv,
|
||||||
.bdrv_co_writev_flags = iscsi_co_writev_flags,
|
.bdrv_co_writev_flags = iscsi_co_writev_flags,
|
||||||
|
|
|
@ -304,8 +304,9 @@ static void mirror_do_zero_or_discard(MirrorBlockJob *s,
|
||||||
s->in_flight++;
|
s->in_flight++;
|
||||||
s->sectors_in_flight += nb_sectors;
|
s->sectors_in_flight += nb_sectors;
|
||||||
if (is_discard) {
|
if (is_discard) {
|
||||||
blk_aio_discard(s->target, sector_num, op->nb_sectors,
|
blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
|
||||||
mirror_write_complete, op);
|
op->nb_sectors << BDRV_SECTOR_BITS,
|
||||||
|
mirror_write_complete, op);
|
||||||
} else {
|
} else {
|
||||||
blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
|
blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
|
||||||
op->nb_sectors * BDRV_SECTOR_SIZE,
|
op->nb_sectors * BDRV_SECTOR_SIZE,
|
||||||
|
|
|
@ -116,7 +116,7 @@ static void nbd_restart_write(void *opaque)
|
||||||
|
|
||||||
static int nbd_co_send_request(BlockDriverState *bs,
|
static int nbd_co_send_request(BlockDriverState *bs,
|
||||||
struct nbd_request *request,
|
struct nbd_request *request,
|
||||||
QEMUIOVector *qiov, int offset)
|
QEMUIOVector *qiov)
|
||||||
{
|
{
|
||||||
NbdClientSession *s = nbd_get_client_session(bs);
|
NbdClientSession *s = nbd_get_client_session(bs);
|
||||||
AioContext *aio_context;
|
AioContext *aio_context;
|
||||||
|
@ -149,8 +149,8 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||||
qio_channel_set_cork(s->ioc, true);
|
qio_channel_set_cork(s->ioc, true);
|
||||||
rc = nbd_send_request(s->ioc, request);
|
rc = nbd_send_request(s->ioc, request);
|
||||||
if (rc >= 0) {
|
if (rc >= 0) {
|
||||||
ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov,
|
ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov, request->len,
|
||||||
offset, request->len, 0);
|
false);
|
||||||
if (ret != request->len) {
|
if (ret != request->len) {
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
}
|
}
|
||||||
|
@ -167,8 +167,9 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nbd_co_receive_reply(NbdClientSession *s,
|
static void nbd_co_receive_reply(NbdClientSession *s,
|
||||||
struct nbd_request *request, struct nbd_reply *reply,
|
struct nbd_request *request,
|
||||||
QEMUIOVector *qiov, int offset)
|
struct nbd_reply *reply,
|
||||||
|
QEMUIOVector *qiov)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -181,8 +182,8 @@ static void nbd_co_receive_reply(NbdClientSession *s,
|
||||||
reply->error = EIO;
|
reply->error = EIO;
|
||||||
} else {
|
} else {
|
||||||
if (qiov && reply->error == 0) {
|
if (qiov && reply->error == 0) {
|
||||||
ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov,
|
ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov, request->len,
|
||||||
offset, request->len, 1);
|
true);
|
||||||
if (ret != request->len) {
|
if (ret != request->len) {
|
||||||
reply->error = EIO;
|
reply->error = EIO;
|
||||||
}
|
}
|
||||||
|
@ -217,36 +218,41 @@ static void nbd_coroutine_end(NbdClientSession *s,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
|
int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||||
int nb_sectors, QEMUIOVector *qiov,
|
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||||
int offset)
|
|
||||||
{
|
{
|
||||||
NbdClientSession *client = nbd_get_client_session(bs);
|
NbdClientSession *client = nbd_get_client_session(bs);
|
||||||
struct nbd_request request = { .type = NBD_CMD_READ };
|
struct nbd_request request = {
|
||||||
|
.type = NBD_CMD_READ,
|
||||||
|
.from = offset,
|
||||||
|
.len = bytes,
|
||||||
|
};
|
||||||
struct nbd_reply reply;
|
struct nbd_reply reply;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
request.from = sector_num * 512;
|
assert(bytes <= NBD_MAX_BUFFER_SIZE);
|
||||||
request.len = nb_sectors * 512;
|
assert(!flags);
|
||||||
|
|
||||||
nbd_coroutine_start(client, &request);
|
nbd_coroutine_start(client, &request);
|
||||||
ret = nbd_co_send_request(bs, &request, NULL, 0);
|
ret = nbd_co_send_request(bs, &request, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
reply.error = -ret;
|
reply.error = -ret;
|
||||||
} else {
|
} else {
|
||||||
nbd_co_receive_reply(client, &request, &reply, qiov, offset);
|
nbd_co_receive_reply(client, &request, &reply, qiov);
|
||||||
}
|
}
|
||||||
nbd_coroutine_end(client, &request);
|
nbd_coroutine_end(client, &request);
|
||||||
return -reply.error;
|
return -reply.error;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
|
int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||||
int nb_sectors, QEMUIOVector *qiov,
|
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||||
int offset, int flags)
|
|
||||||
{
|
{
|
||||||
NbdClientSession *client = nbd_get_client_session(bs);
|
NbdClientSession *client = nbd_get_client_session(bs);
|
||||||
struct nbd_request request = { .type = NBD_CMD_WRITE };
|
struct nbd_request request = {
|
||||||
|
.type = NBD_CMD_WRITE,
|
||||||
|
.from = offset,
|
||||||
|
.len = bytes,
|
||||||
|
};
|
||||||
struct nbd_reply reply;
|
struct nbd_reply reply;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
|
@ -255,55 +261,19 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
|
||||||
request.type |= NBD_CMD_FLAG_FUA;
|
request.type |= NBD_CMD_FLAG_FUA;
|
||||||
}
|
}
|
||||||
|
|
||||||
request.from = sector_num * 512;
|
assert(bytes <= NBD_MAX_BUFFER_SIZE);
|
||||||
request.len = nb_sectors * 512;
|
|
||||||
|
|
||||||
nbd_coroutine_start(client, &request);
|
nbd_coroutine_start(client, &request);
|
||||||
ret = nbd_co_send_request(bs, &request, qiov, offset);
|
ret = nbd_co_send_request(bs, &request, qiov);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
reply.error = -ret;
|
reply.error = -ret;
|
||||||
} else {
|
} else {
|
||||||
nbd_co_receive_reply(client, &request, &reply, NULL, 0);
|
nbd_co_receive_reply(client, &request, &reply, NULL);
|
||||||
}
|
}
|
||||||
nbd_coroutine_end(client, &request);
|
nbd_coroutine_end(client, &request);
|
||||||
return -reply.error;
|
return -reply.error;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
|
|
||||||
int nb_sectors, QEMUIOVector *qiov)
|
|
||||||
{
|
|
||||||
int offset = 0;
|
|
||||||
int ret;
|
|
||||||
while (nb_sectors > NBD_MAX_SECTORS) {
|
|
||||||
ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset);
|
|
||||||
if (ret < 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
offset += NBD_MAX_SECTORS * 512;
|
|
||||||
sector_num += NBD_MAX_SECTORS;
|
|
||||||
nb_sectors -= NBD_MAX_SECTORS;
|
|
||||||
}
|
|
||||||
return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
|
|
||||||
int nb_sectors, QEMUIOVector *qiov, int flags)
|
|
||||||
{
|
|
||||||
int offset = 0;
|
|
||||||
int ret;
|
|
||||||
while (nb_sectors > NBD_MAX_SECTORS) {
|
|
||||||
ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset,
|
|
||||||
flags);
|
|
||||||
if (ret < 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
offset += NBD_MAX_SECTORS * 512;
|
|
||||||
sector_num += NBD_MAX_SECTORS;
|
|
||||||
nb_sectors -= NBD_MAX_SECTORS;
|
|
||||||
}
|
|
||||||
return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
int nbd_client_co_flush(BlockDriverState *bs)
|
int nbd_client_co_flush(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
NbdClientSession *client = nbd_get_client_session(bs);
|
NbdClientSession *client = nbd_get_client_session(bs);
|
||||||
|
@ -319,36 +289,37 @@ int nbd_client_co_flush(BlockDriverState *bs)
|
||||||
request.len = 0;
|
request.len = 0;
|
||||||
|
|
||||||
nbd_coroutine_start(client, &request);
|
nbd_coroutine_start(client, &request);
|
||||||
ret = nbd_co_send_request(bs, &request, NULL, 0);
|
ret = nbd_co_send_request(bs, &request, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
reply.error = -ret;
|
reply.error = -ret;
|
||||||
} else {
|
} else {
|
||||||
nbd_co_receive_reply(client, &request, &reply, NULL, 0);
|
nbd_co_receive_reply(client, &request, &reply, NULL);
|
||||||
}
|
}
|
||||||
nbd_coroutine_end(client, &request);
|
nbd_coroutine_end(client, &request);
|
||||||
return -reply.error;
|
return -reply.error;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
|
||||||
int nb_sectors)
|
|
||||||
{
|
{
|
||||||
NbdClientSession *client = nbd_get_client_session(bs);
|
NbdClientSession *client = nbd_get_client_session(bs);
|
||||||
struct nbd_request request = { .type = NBD_CMD_TRIM };
|
struct nbd_request request = {
|
||||||
|
.type = NBD_CMD_TRIM,
|
||||||
|
.from = offset,
|
||||||
|
.len = count,
|
||||||
|
};
|
||||||
struct nbd_reply reply;
|
struct nbd_reply reply;
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
if (!(client->nbdflags & NBD_FLAG_SEND_TRIM)) {
|
if (!(client->nbdflags & NBD_FLAG_SEND_TRIM)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
request.from = sector_num * 512;
|
|
||||||
request.len = nb_sectors * 512;
|
|
||||||
|
|
||||||
nbd_coroutine_start(client, &request);
|
nbd_coroutine_start(client, &request);
|
||||||
ret = nbd_co_send_request(bs, &request, NULL, 0);
|
ret = nbd_co_send_request(bs, &request, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
reply.error = -ret;
|
reply.error = -ret;
|
||||||
} else {
|
} else {
|
||||||
nbd_co_receive_reply(client, &request, &reply, NULL, 0);
|
nbd_co_receive_reply(client, &request, &reply, NULL);
|
||||||
}
|
}
|
||||||
nbd_coroutine_end(client, &request);
|
nbd_coroutine_end(client, &request);
|
||||||
return -reply.error;
|
return -reply.error;
|
||||||
|
|
|
@ -44,13 +44,12 @@ int nbd_client_init(BlockDriverState *bs,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
void nbd_client_close(BlockDriverState *bs);
|
void nbd_client_close(BlockDriverState *bs);
|
||||||
|
|
||||||
int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
|
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count);
|
||||||
int nb_sectors);
|
|
||||||
int nbd_client_co_flush(BlockDriverState *bs);
|
int nbd_client_co_flush(BlockDriverState *bs);
|
||||||
int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
|
int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||||
int nb_sectors, QEMUIOVector *qiov, int flags);
|
uint64_t bytes, QEMUIOVector *qiov, int flags);
|
||||||
int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
|
int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||||
int nb_sectors, QEMUIOVector *qiov);
|
uint64_t bytes, QEMUIOVector *qiov, int flags);
|
||||||
|
|
||||||
void nbd_client_detach_aio_context(BlockDriverState *bs);
|
void nbd_client_detach_aio_context(BlockDriverState *bs);
|
||||||
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||||
|
|
30
block/nbd.c
30
block/nbd.c
|
@ -349,12 +349,6 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
|
|
||||||
int nb_sectors, QEMUIOVector *qiov)
|
|
||||||
{
|
|
||||||
return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nbd_co_flush(BlockDriverState *bs)
|
static int nbd_co_flush(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
return nbd_client_co_flush(bs);
|
return nbd_client_co_flush(bs);
|
||||||
|
@ -366,12 +360,6 @@ static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||||
bs->bl.max_transfer = NBD_MAX_BUFFER_SIZE;
|
bs->bl.max_transfer = NBD_MAX_BUFFER_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
|
|
||||||
int nb_sectors)
|
|
||||||
{
|
|
||||||
return nbd_client_co_discard(bs, sector_num, nb_sectors);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nbd_close(BlockDriverState *bs)
|
static void nbd_close(BlockDriverState *bs)
|
||||||
{
|
{
|
||||||
nbd_client_close(bs);
|
nbd_client_close(bs);
|
||||||
|
@ -450,11 +438,11 @@ static BlockDriver bdrv_nbd = {
|
||||||
.instance_size = sizeof(BDRVNBDState),
|
.instance_size = sizeof(BDRVNBDState),
|
||||||
.bdrv_parse_filename = nbd_parse_filename,
|
.bdrv_parse_filename = nbd_parse_filename,
|
||||||
.bdrv_file_open = nbd_open,
|
.bdrv_file_open = nbd_open,
|
||||||
.bdrv_co_readv = nbd_co_readv,
|
.bdrv_co_preadv = nbd_client_co_preadv,
|
||||||
.bdrv_co_writev_flags = nbd_client_co_writev,
|
.bdrv_co_pwritev = nbd_client_co_pwritev,
|
||||||
.bdrv_close = nbd_close,
|
.bdrv_close = nbd_close,
|
||||||
.bdrv_co_flush_to_os = nbd_co_flush,
|
.bdrv_co_flush_to_os = nbd_co_flush,
|
||||||
.bdrv_co_discard = nbd_co_discard,
|
.bdrv_co_pdiscard = nbd_client_co_pdiscard,
|
||||||
.bdrv_refresh_limits = nbd_refresh_limits,
|
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||||
.bdrv_getlength = nbd_getlength,
|
.bdrv_getlength = nbd_getlength,
|
||||||
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
||||||
|
@ -468,11 +456,11 @@ static BlockDriver bdrv_nbd_tcp = {
|
||||||
.instance_size = sizeof(BDRVNBDState),
|
.instance_size = sizeof(BDRVNBDState),
|
||||||
.bdrv_parse_filename = nbd_parse_filename,
|
.bdrv_parse_filename = nbd_parse_filename,
|
||||||
.bdrv_file_open = nbd_open,
|
.bdrv_file_open = nbd_open,
|
||||||
.bdrv_co_readv = nbd_co_readv,
|
.bdrv_co_preadv = nbd_client_co_preadv,
|
||||||
.bdrv_co_writev_flags = nbd_client_co_writev,
|
.bdrv_co_pwritev = nbd_client_co_pwritev,
|
||||||
.bdrv_close = nbd_close,
|
.bdrv_close = nbd_close,
|
||||||
.bdrv_co_flush_to_os = nbd_co_flush,
|
.bdrv_co_flush_to_os = nbd_co_flush,
|
||||||
.bdrv_co_discard = nbd_co_discard,
|
.bdrv_co_pdiscard = nbd_client_co_pdiscard,
|
||||||
.bdrv_refresh_limits = nbd_refresh_limits,
|
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||||
.bdrv_getlength = nbd_getlength,
|
.bdrv_getlength = nbd_getlength,
|
||||||
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
||||||
|
@ -486,11 +474,11 @@ static BlockDriver bdrv_nbd_unix = {
|
||||||
.instance_size = sizeof(BDRVNBDState),
|
.instance_size = sizeof(BDRVNBDState),
|
||||||
.bdrv_parse_filename = nbd_parse_filename,
|
.bdrv_parse_filename = nbd_parse_filename,
|
||||||
.bdrv_file_open = nbd_open,
|
.bdrv_file_open = nbd_open,
|
||||||
.bdrv_co_readv = nbd_co_readv,
|
.bdrv_co_preadv = nbd_client_co_preadv,
|
||||||
.bdrv_co_writev_flags = nbd_client_co_writev,
|
.bdrv_co_pwritev = nbd_client_co_pwritev,
|
||||||
.bdrv_close = nbd_close,
|
.bdrv_close = nbd_close,
|
||||||
.bdrv_co_flush_to_os = nbd_co_flush,
|
.bdrv_co_flush_to_os = nbd_co_flush,
|
||||||
.bdrv_co_discard = nbd_co_discard,
|
.bdrv_co_pdiscard = nbd_client_co_pdiscard,
|
||||||
.bdrv_refresh_limits = nbd_refresh_limits,
|
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||||
.bdrv_getlength = nbd_getlength,
|
.bdrv_getlength = nbd_getlength,
|
||||||
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
.bdrv_detach_aio_context = nbd_detach_aio_context,
|
||||||
|
|
|
@ -615,9 +615,7 @@ void qcow2_process_discards(BlockDriverState *bs, int ret)
|
||||||
|
|
||||||
/* Discard is optional, ignore the return value */
|
/* Discard is optional, ignore the return value */
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
bdrv_discard(bs->file->bs,
|
bdrv_pdiscard(bs->file->bs, d->offset, d->bytes);
|
||||||
d->offset >> BDRV_SECTOR_BITS,
|
|
||||||
d->bytes >> BDRV_SECTOR_BITS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
g_free(d);
|
g_free(d);
|
||||||
|
|
|
@ -2479,15 +2479,15 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static coroutine_fn int qcow2_co_discard(BlockDriverState *bs,
|
static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors)
|
int64_t offset, int count)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
BDRVQcow2State *s = bs->opaque;
|
BDRVQcow2State *s = bs->opaque;
|
||||||
|
|
||||||
qemu_co_mutex_lock(&s->lock);
|
qemu_co_mutex_lock(&s->lock);
|
||||||
ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS,
|
ret = qcow2_discard_clusters(bs, offset, count >> BDRV_SECTOR_BITS,
|
||||||
nb_sectors, QCOW2_DISCARD_REQUEST, false);
|
QCOW2_DISCARD_REQUEST, false);
|
||||||
qemu_co_mutex_unlock(&s->lock);
|
qemu_co_mutex_unlock(&s->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -3410,7 +3410,7 @@ BlockDriver bdrv_qcow2 = {
|
||||||
.bdrv_co_flush_to_os = qcow2_co_flush_to_os,
|
.bdrv_co_flush_to_os = qcow2_co_flush_to_os,
|
||||||
|
|
||||||
.bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
|
||||||
.bdrv_co_discard = qcow2_co_discard,
|
.bdrv_co_pdiscard = qcow2_co_pdiscard,
|
||||||
.bdrv_truncate = qcow2_truncate,
|
.bdrv_truncate = qcow2_truncate,
|
||||||
.bdrv_write_compressed = qcow2_write_compressed,
|
.bdrv_write_compressed = qcow2_write_compressed,
|
||||||
.bdrv_make_empty = qcow2_make_empty,
|
.bdrv_make_empty = qcow2_make_empty,
|
||||||
|
|
|
@ -1214,7 +1214,7 @@ static int paio_submit_co(BlockDriverState *bs, int fd,
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
|
static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
|
||||||
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
int64_t offset, QEMUIOVector *qiov, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque, int type)
|
BlockCompletionFunc *cb, void *opaque, int type)
|
||||||
{
|
{
|
||||||
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
|
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
|
||||||
|
@ -1224,8 +1224,8 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
|
||||||
acb->aio_type = type;
|
acb->aio_type = type;
|
||||||
acb->aio_fildes = fd;
|
acb->aio_fildes = fd;
|
||||||
|
|
||||||
acb->aio_nbytes = nb_sectors * BDRV_SECTOR_SIZE;
|
acb->aio_nbytes = count;
|
||||||
acb->aio_offset = sector_num * BDRV_SECTOR_SIZE;
|
acb->aio_offset = offset;
|
||||||
|
|
||||||
if (qiov) {
|
if (qiov) {
|
||||||
acb->aio_iov = qiov->iov;
|
acb->aio_iov = qiov->iov;
|
||||||
|
@ -1233,7 +1233,7 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
|
||||||
assert(qiov->size == acb->aio_nbytes);
|
assert(qiov->size == acb->aio_nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
|
trace_paio_submit(acb, opaque, offset, count, type);
|
||||||
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
|
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
|
||||||
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
|
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
|
||||||
}
|
}
|
||||||
|
@ -1786,13 +1786,13 @@ static int64_t coroutine_fn raw_co_get_block_status(BlockDriverState *bs,
|
||||||
return ret | BDRV_BLOCK_OFFSET_VALID | start;
|
return ret | BDRV_BLOCK_OFFSET_VALID | start;
|
||||||
}
|
}
|
||||||
|
|
||||||
static coroutine_fn BlockAIOCB *raw_aio_discard(BlockDriverState *bs,
|
static coroutine_fn BlockAIOCB *raw_aio_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors,
|
int64_t offset, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque)
|
BlockCompletionFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
|
|
||||||
return paio_submit(bs, s->fd, sector_num, NULL, nb_sectors,
|
return paio_submit(bs, s->fd, offset, NULL, count,
|
||||||
cb, opaque, QEMU_AIO_DISCARD);
|
cb, opaque, QEMU_AIO_DISCARD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1864,7 +1864,7 @@ BlockDriver bdrv_file = {
|
||||||
.bdrv_co_preadv = raw_co_preadv,
|
.bdrv_co_preadv = raw_co_preadv,
|
||||||
.bdrv_co_pwritev = raw_co_pwritev,
|
.bdrv_co_pwritev = raw_co_pwritev,
|
||||||
.bdrv_aio_flush = raw_aio_flush,
|
.bdrv_aio_flush = raw_aio_flush,
|
||||||
.bdrv_aio_discard = raw_aio_discard,
|
.bdrv_aio_pdiscard = raw_aio_pdiscard,
|
||||||
.bdrv_refresh_limits = raw_refresh_limits,
|
.bdrv_refresh_limits = raw_refresh_limits,
|
||||||
.bdrv_io_plug = raw_aio_plug,
|
.bdrv_io_plug = raw_aio_plug,
|
||||||
.bdrv_io_unplug = raw_aio_unplug,
|
.bdrv_io_unplug = raw_aio_unplug,
|
||||||
|
@ -2203,8 +2203,8 @@ static int fd_open(BlockDriverState *bs)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static coroutine_fn BlockAIOCB *hdev_aio_discard(BlockDriverState *bs,
|
static coroutine_fn BlockAIOCB *hdev_aio_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors,
|
int64_t offset, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque)
|
BlockCompletionFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
|
@ -2212,7 +2212,7 @@ static coroutine_fn BlockAIOCB *hdev_aio_discard(BlockDriverState *bs,
|
||||||
if (fd_open(bs) < 0) {
|
if (fd_open(bs) < 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return paio_submit(bs, s->fd, sector_num, NULL, nb_sectors,
|
return paio_submit(bs, s->fd, offset, NULL, count,
|
||||||
cb, opaque, QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV);
|
cb, opaque, QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2307,7 +2307,7 @@ static BlockDriver bdrv_host_device = {
|
||||||
.bdrv_co_preadv = raw_co_preadv,
|
.bdrv_co_preadv = raw_co_preadv,
|
||||||
.bdrv_co_pwritev = raw_co_pwritev,
|
.bdrv_co_pwritev = raw_co_pwritev,
|
||||||
.bdrv_aio_flush = raw_aio_flush,
|
.bdrv_aio_flush = raw_aio_flush,
|
||||||
.bdrv_aio_discard = hdev_aio_discard,
|
.bdrv_aio_pdiscard = hdev_aio_pdiscard,
|
||||||
.bdrv_refresh_limits = raw_refresh_limits,
|
.bdrv_refresh_limits = raw_refresh_limits,
|
||||||
.bdrv_io_plug = raw_aio_plug,
|
.bdrv_io_plug = raw_aio_plug,
|
||||||
.bdrv_io_unplug = raw_aio_unplug,
|
.bdrv_io_unplug = raw_aio_unplug,
|
||||||
|
|
|
@ -142,7 +142,7 @@ static int aio_worker(void *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
|
static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
|
||||||
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
int64_t offset, QEMUIOVector *qiov, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque, int type)
|
BlockCompletionFunc *cb, void *opaque, int type)
|
||||||
{
|
{
|
||||||
RawWin32AIOData *acb = g_new(RawWin32AIOData, 1);
|
RawWin32AIOData *acb = g_new(RawWin32AIOData, 1);
|
||||||
|
@ -155,11 +155,12 @@ static BlockAIOCB *paio_submit(BlockDriverState *bs, HANDLE hfile,
|
||||||
if (qiov) {
|
if (qiov) {
|
||||||
acb->aio_iov = qiov->iov;
|
acb->aio_iov = qiov->iov;
|
||||||
acb->aio_niov = qiov->niov;
|
acb->aio_niov = qiov->niov;
|
||||||
|
assert(qiov->size == count);
|
||||||
}
|
}
|
||||||
acb->aio_nbytes = nb_sectors * 512;
|
acb->aio_nbytes = count;
|
||||||
acb->aio_offset = sector_num * 512;
|
acb->aio_offset = offset;
|
||||||
|
|
||||||
trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
|
trace_paio_submit(acb, opaque, offset, count, type);
|
||||||
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
|
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
|
||||||
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
|
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
|
||||||
}
|
}
|
||||||
|
@ -378,9 +379,10 @@ static BlockAIOCB *raw_aio_readv(BlockDriverState *bs,
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
if (s->aio) {
|
if (s->aio) {
|
||||||
return win32_aio_submit(bs, s->aio, s->hfile, sector_num, qiov,
|
return win32_aio_submit(bs, s->aio, s->hfile, sector_num, qiov,
|
||||||
nb_sectors, cb, opaque, QEMU_AIO_READ);
|
nb_sectors, cb, opaque, QEMU_AIO_READ);
|
||||||
} else {
|
} else {
|
||||||
return paio_submit(bs, s->hfile, sector_num, qiov, nb_sectors,
|
return paio_submit(bs, s->hfile, sector_num << BDRV_SECTOR_BITS, qiov,
|
||||||
|
nb_sectors << BDRV_SECTOR_BITS,
|
||||||
cb, opaque, QEMU_AIO_READ);
|
cb, opaque, QEMU_AIO_READ);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -392,9 +394,10 @@ static BlockAIOCB *raw_aio_writev(BlockDriverState *bs,
|
||||||
BDRVRawState *s = bs->opaque;
|
BDRVRawState *s = bs->opaque;
|
||||||
if (s->aio) {
|
if (s->aio) {
|
||||||
return win32_aio_submit(bs, s->aio, s->hfile, sector_num, qiov,
|
return win32_aio_submit(bs, s->aio, s->hfile, sector_num, qiov,
|
||||||
nb_sectors, cb, opaque, QEMU_AIO_WRITE);
|
nb_sectors, cb, opaque, QEMU_AIO_WRITE);
|
||||||
} else {
|
} else {
|
||||||
return paio_submit(bs, s->hfile, sector_num, qiov, nb_sectors,
|
return paio_submit(bs, s->hfile, sector_num << BDRV_SECTOR_BITS, qiov,
|
||||||
|
nb_sectors << BDRV_SECTOR_BITS,
|
||||||
cb, opaque, QEMU_AIO_WRITE);
|
cb, opaque, QEMU_AIO_WRITE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,33 +50,30 @@ static int raw_reopen_prepare(BDRVReopenState *reopen_state,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num,
|
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||||
int nb_sectors, QEMUIOVector *qiov)
|
uint64_t bytes, QEMUIOVector *qiov,
|
||||||
|
int flags)
|
||||||
{
|
{
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
|
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
|
||||||
return bdrv_co_readv(bs->file, sector_num, nb_sectors, qiov);
|
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coroutine_fn
|
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||||
raw_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
uint64_t bytes, QEMUIOVector *qiov,
|
||||||
QEMUIOVector *qiov, int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
void *buf = NULL;
|
void *buf = NULL;
|
||||||
BlockDriver *drv;
|
BlockDriver *drv;
|
||||||
QEMUIOVector local_qiov;
|
QEMUIOVector local_qiov;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (bs->probed && sector_num == 0) {
|
if (bs->probed && offset < BLOCK_PROBE_BUF_SIZE && bytes) {
|
||||||
/* As long as these conditions are true, we can't get partial writes to
|
/* Handling partial writes would be a pain - so we just
|
||||||
* the probe buffer and can just directly check the request. */
|
* require that guests have 512-byte request alignment if
|
||||||
|
* probing occurred */
|
||||||
QEMU_BUILD_BUG_ON(BLOCK_PROBE_BUF_SIZE != 512);
|
QEMU_BUILD_BUG_ON(BLOCK_PROBE_BUF_SIZE != 512);
|
||||||
QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != 512);
|
QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != 512);
|
||||||
|
assert(offset == 0 && bytes >= BLOCK_PROBE_BUF_SIZE);
|
||||||
if (nb_sectors == 0) {
|
|
||||||
/* qemu_iovec_to_buf() would fail, but we want to return success
|
|
||||||
* instead of -EINVAL in this case. */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = qemu_try_blockalign(bs->file->bs, 512);
|
buf = qemu_try_blockalign(bs->file->bs, 512);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
|
@ -105,8 +102,7 @@ raw_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
|
||||||
}
|
}
|
||||||
|
|
||||||
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
|
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
|
||||||
ret = bdrv_co_pwritev(bs->file, sector_num * BDRV_SECTOR_SIZE,
|
ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
|
||||||
nb_sectors * BDRV_SECTOR_SIZE, qiov, flags);
|
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
if (qiov == &local_qiov) {
|
if (qiov == &local_qiov) {
|
||||||
|
@ -134,10 +130,10 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
|
||||||
return bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
|
return bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int coroutine_fn raw_co_discard(BlockDriverState *bs,
|
static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors)
|
int64_t offset, int count)
|
||||||
{
|
{
|
||||||
return bdrv_co_discard(bs->file->bs, sector_num, nb_sectors);
|
return bdrv_co_pdiscard(bs->file->bs, offset, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int64_t raw_getlength(BlockDriverState *bs)
|
static int64_t raw_getlength(BlockDriverState *bs)
|
||||||
|
@ -150,6 +146,16 @@ static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
||||||
return bdrv_get_info(bs->file->bs, bdi);
|
return bdrv_get_info(bs->file->bs, bdi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||||
|
{
|
||||||
|
if (bs->probed) {
|
||||||
|
/* To make it easier to protect the first sector, any probed
|
||||||
|
* image is restricted to read-modify-write on sub-sector
|
||||||
|
* operations. */
|
||||||
|
bs->bl.request_alignment = BDRV_SECTOR_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int raw_truncate(BlockDriverState *bs, int64_t offset)
|
static int raw_truncate(BlockDriverState *bs, int64_t offset)
|
||||||
{
|
{
|
||||||
return bdrv_truncate(bs->file->bs, offset);
|
return bdrv_truncate(bs->file->bs, offset);
|
||||||
|
@ -192,8 +198,10 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
bs->sg = bs->file->bs->sg;
|
bs->sg = bs->file->bs->sg;
|
||||||
bs->supported_write_flags = BDRV_REQ_FUA;
|
bs->supported_write_flags = BDRV_REQ_FUA &
|
||||||
bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP;
|
bs->file->bs->supported_write_flags;
|
||||||
|
bs->supported_zero_flags = (BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP) &
|
||||||
|
bs->file->bs->supported_zero_flags;
|
||||||
|
|
||||||
if (bs->probed && !bdrv_is_read_only(bs)) {
|
if (bs->probed && !bdrv_is_read_only(bs)) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
|
@ -238,15 +246,16 @@ BlockDriver bdrv_raw = {
|
||||||
.bdrv_open = &raw_open,
|
.bdrv_open = &raw_open,
|
||||||
.bdrv_close = &raw_close,
|
.bdrv_close = &raw_close,
|
||||||
.bdrv_create = &raw_create,
|
.bdrv_create = &raw_create,
|
||||||
.bdrv_co_readv = &raw_co_readv,
|
.bdrv_co_preadv = &raw_co_preadv,
|
||||||
.bdrv_co_writev_flags = &raw_co_writev_flags,
|
.bdrv_co_pwritev = &raw_co_pwritev,
|
||||||
.bdrv_co_pwrite_zeroes = &raw_co_pwrite_zeroes,
|
.bdrv_co_pwrite_zeroes = &raw_co_pwrite_zeroes,
|
||||||
.bdrv_co_discard = &raw_co_discard,
|
.bdrv_co_pdiscard = &raw_co_pdiscard,
|
||||||
.bdrv_co_get_block_status = &raw_co_get_block_status,
|
.bdrv_co_get_block_status = &raw_co_get_block_status,
|
||||||
.bdrv_truncate = &raw_truncate,
|
.bdrv_truncate = &raw_truncate,
|
||||||
.bdrv_getlength = &raw_getlength,
|
.bdrv_getlength = &raw_getlength,
|
||||||
.has_variable_length = true,
|
.has_variable_length = true,
|
||||||
.bdrv_get_info = &raw_get_info,
|
.bdrv_get_info = &raw_get_info,
|
||||||
|
.bdrv_refresh_limits = &raw_refresh_limits,
|
||||||
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
|
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
|
||||||
.bdrv_probe_geometry = &raw_probe_geometry,
|
.bdrv_probe_geometry = &raw_probe_geometry,
|
||||||
.bdrv_media_changed = &raw_media_changed,
|
.bdrv_media_changed = &raw_media_changed,
|
||||||
|
|
29
block/rbd.c
29
block/rbd.c
|
@ -649,9 +649,9 @@ static int rbd_aio_flush_wrapper(rbd_image_t image,
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockAIOCB *rbd_start_aio(BlockDriverState *bs,
|
static BlockAIOCB *rbd_start_aio(BlockDriverState *bs,
|
||||||
int64_t sector_num,
|
int64_t off,
|
||||||
QEMUIOVector *qiov,
|
QEMUIOVector *qiov,
|
||||||
int nb_sectors,
|
int64_t size,
|
||||||
BlockCompletionFunc *cb,
|
BlockCompletionFunc *cb,
|
||||||
void *opaque,
|
void *opaque,
|
||||||
RBDAIOCmd cmd)
|
RBDAIOCmd cmd)
|
||||||
|
@ -659,7 +659,6 @@ static BlockAIOCB *rbd_start_aio(BlockDriverState *bs,
|
||||||
RBDAIOCB *acb;
|
RBDAIOCB *acb;
|
||||||
RADOSCB *rcb = NULL;
|
RADOSCB *rcb = NULL;
|
||||||
rbd_completion_t c;
|
rbd_completion_t c;
|
||||||
int64_t off, size;
|
|
||||||
char *buf;
|
char *buf;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
@ -668,6 +667,7 @@ static BlockAIOCB *rbd_start_aio(BlockDriverState *bs,
|
||||||
acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque);
|
acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque);
|
||||||
acb->cmd = cmd;
|
acb->cmd = cmd;
|
||||||
acb->qiov = qiov;
|
acb->qiov = qiov;
|
||||||
|
assert(!qiov || qiov->size == size);
|
||||||
if (cmd == RBD_AIO_DISCARD || cmd == RBD_AIO_FLUSH) {
|
if (cmd == RBD_AIO_DISCARD || cmd == RBD_AIO_FLUSH) {
|
||||||
acb->bounce = NULL;
|
acb->bounce = NULL;
|
||||||
} else {
|
} else {
|
||||||
|
@ -687,9 +687,6 @@ static BlockAIOCB *rbd_start_aio(BlockDriverState *bs,
|
||||||
|
|
||||||
buf = acb->bounce;
|
buf = acb->bounce;
|
||||||
|
|
||||||
off = sector_num * BDRV_SECTOR_SIZE;
|
|
||||||
size = nb_sectors * BDRV_SECTOR_SIZE;
|
|
||||||
|
|
||||||
rcb = g_new(RADOSCB, 1);
|
rcb = g_new(RADOSCB, 1);
|
||||||
rcb->acb = acb;
|
rcb->acb = acb;
|
||||||
rcb->buf = buf;
|
rcb->buf = buf;
|
||||||
|
@ -739,7 +736,8 @@ static BlockAIOCB *qemu_rbd_aio_readv(BlockDriverState *bs,
|
||||||
BlockCompletionFunc *cb,
|
BlockCompletionFunc *cb,
|
||||||
void *opaque)
|
void *opaque)
|
||||||
{
|
{
|
||||||
return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
|
return rbd_start_aio(bs, sector_num << BDRV_SECTOR_BITS, qiov,
|
||||||
|
nb_sectors << BDRV_SECTOR_BITS, cb, opaque,
|
||||||
RBD_AIO_READ);
|
RBD_AIO_READ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -750,7 +748,8 @@ static BlockAIOCB *qemu_rbd_aio_writev(BlockDriverState *bs,
|
||||||
BlockCompletionFunc *cb,
|
BlockCompletionFunc *cb,
|
||||||
void *opaque)
|
void *opaque)
|
||||||
{
|
{
|
||||||
return rbd_start_aio(bs, sector_num, qiov, nb_sectors, cb, opaque,
|
return rbd_start_aio(bs, sector_num << BDRV_SECTOR_BITS, qiov,
|
||||||
|
nb_sectors << BDRV_SECTOR_BITS, cb, opaque,
|
||||||
RBD_AIO_WRITE);
|
RBD_AIO_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -931,13 +930,13 @@ static int qemu_rbd_snap_list(BlockDriverState *bs,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef LIBRBD_SUPPORTS_DISCARD
|
#ifdef LIBRBD_SUPPORTS_DISCARD
|
||||||
static BlockAIOCB* qemu_rbd_aio_discard(BlockDriverState *bs,
|
static BlockAIOCB *qemu_rbd_aio_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num,
|
int64_t offset,
|
||||||
int nb_sectors,
|
int count,
|
||||||
BlockCompletionFunc *cb,
|
BlockCompletionFunc *cb,
|
||||||
void *opaque)
|
void *opaque)
|
||||||
{
|
{
|
||||||
return rbd_start_aio(bs, sector_num, NULL, nb_sectors, cb, opaque,
|
return rbd_start_aio(bs, offset, NULL, count, cb, opaque,
|
||||||
RBD_AIO_DISCARD);
|
RBD_AIO_DISCARD);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1001,7 +1000,7 @@ static BlockDriver bdrv_rbd = {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef LIBRBD_SUPPORTS_DISCARD
|
#ifdef LIBRBD_SUPPORTS_DISCARD
|
||||||
.bdrv_aio_discard = qemu_rbd_aio_discard,
|
.bdrv_aio_pdiscard = qemu_rbd_aio_pdiscard,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.bdrv_snapshot_create = qemu_rbd_snap_create,
|
.bdrv_snapshot_create = qemu_rbd_snap_create,
|
||||||
|
|
|
@ -2800,8 +2800,8 @@ static int sd_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static coroutine_fn int sd_co_discard(BlockDriverState *bs, int64_t sector_num,
|
static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset,
|
||||||
int nb_sectors)
|
int count)
|
||||||
{
|
{
|
||||||
SheepdogAIOCB *acb;
|
SheepdogAIOCB *acb;
|
||||||
BDRVSheepdogState *s = bs->opaque;
|
BDRVSheepdogState *s = bs->opaque;
|
||||||
|
@ -2811,7 +2811,7 @@ static coroutine_fn int sd_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||||
uint32_t zero = 0;
|
uint32_t zero = 0;
|
||||||
|
|
||||||
if (!s->discard_supported) {
|
if (!s->discard_supported) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&discard_iov, 0, sizeof(discard_iov));
|
memset(&discard_iov, 0, sizeof(discard_iov));
|
||||||
|
@ -2820,7 +2820,10 @@ static coroutine_fn int sd_co_discard(BlockDriverState *bs, int64_t sector_num,
|
||||||
iov.iov_len = sizeof(zero);
|
iov.iov_len = sizeof(zero);
|
||||||
discard_iov.iov = &iov;
|
discard_iov.iov = &iov;
|
||||||
discard_iov.niov = 1;
|
discard_iov.niov = 1;
|
||||||
acb = sd_aio_setup(bs, &discard_iov, sector_num, nb_sectors);
|
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
|
||||||
|
assert((count & (BDRV_SECTOR_SIZE - 1)) == 0);
|
||||||
|
acb = sd_aio_setup(bs, &discard_iov, offset >> BDRV_SECTOR_BITS,
|
||||||
|
count >> BDRV_SECTOR_BITS);
|
||||||
acb->aiocb_type = AIOCB_DISCARD_OBJ;
|
acb->aiocb_type = AIOCB_DISCARD_OBJ;
|
||||||
acb->aio_done_func = sd_finish_aiocb;
|
acb->aio_done_func = sd_finish_aiocb;
|
||||||
|
|
||||||
|
@ -2954,7 +2957,7 @@ static BlockDriver bdrv_sheepdog = {
|
||||||
.bdrv_co_readv = sd_co_readv,
|
.bdrv_co_readv = sd_co_readv,
|
||||||
.bdrv_co_writev = sd_co_writev,
|
.bdrv_co_writev = sd_co_writev,
|
||||||
.bdrv_co_flush_to_disk = sd_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = sd_co_flush_to_disk,
|
||||||
.bdrv_co_discard = sd_co_discard,
|
.bdrv_co_pdiscard = sd_co_pdiscard,
|
||||||
.bdrv_co_get_block_status = sd_co_get_block_status,
|
.bdrv_co_get_block_status = sd_co_get_block_status,
|
||||||
|
|
||||||
.bdrv_snapshot_create = sd_snapshot_create,
|
.bdrv_snapshot_create = sd_snapshot_create,
|
||||||
|
@ -2990,7 +2993,7 @@ static BlockDriver bdrv_sheepdog_tcp = {
|
||||||
.bdrv_co_readv = sd_co_readv,
|
.bdrv_co_readv = sd_co_readv,
|
||||||
.bdrv_co_writev = sd_co_writev,
|
.bdrv_co_writev = sd_co_writev,
|
||||||
.bdrv_co_flush_to_disk = sd_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = sd_co_flush_to_disk,
|
||||||
.bdrv_co_discard = sd_co_discard,
|
.bdrv_co_pdiscard = sd_co_pdiscard,
|
||||||
.bdrv_co_get_block_status = sd_co_get_block_status,
|
.bdrv_co_get_block_status = sd_co_get_block_status,
|
||||||
|
|
||||||
.bdrv_snapshot_create = sd_snapshot_create,
|
.bdrv_snapshot_create = sd_snapshot_create,
|
||||||
|
@ -3026,7 +3029,7 @@ static BlockDriver bdrv_sheepdog_unix = {
|
||||||
.bdrv_co_readv = sd_co_readv,
|
.bdrv_co_readv = sd_co_readv,
|
||||||
.bdrv_co_writev = sd_co_writev,
|
.bdrv_co_writev = sd_co_writev,
|
||||||
.bdrv_co_flush_to_disk = sd_co_flush_to_disk,
|
.bdrv_co_flush_to_disk = sd_co_flush_to_disk,
|
||||||
.bdrv_co_discard = sd_co_discard,
|
.bdrv_co_pdiscard = sd_co_pdiscard,
|
||||||
.bdrv_co_get_block_status = sd_co_get_block_status,
|
.bdrv_co_get_block_status = sd_co_get_block_status,
|
||||||
|
|
||||||
.bdrv_snapshot_create = sd_snapshot_create,
|
.bdrv_snapshot_create = sd_snapshot_create,
|
||||||
|
|
|
@ -9,7 +9,7 @@ blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags
|
||||||
blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags %x"
|
blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags %x"
|
||||||
|
|
||||||
# block/io.c
|
# block/io.c
|
||||||
bdrv_aio_discard(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
bdrv_aio_pdiscard(void *bs, int64_t offset, int count, void *opaque) "bs %p offset %"PRId64" count %d opaque %p"
|
||||||
bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
|
bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
|
||||||
bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
||||||
bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
||||||
|
@ -58,7 +58,7 @@ qmp_block_stream(void *bs, void *job) "bs %p job %p"
|
||||||
# block/raw-win32.c
|
# block/raw-win32.c
|
||||||
# block/raw-posix.c
|
# block/raw-posix.c
|
||||||
paio_submit_co(int64_t offset, int count, int type) "offset %"PRId64" count %d type %d"
|
paio_submit_co(int64_t offset, int count, int type) "offset %"PRId64" count %d type %d"
|
||||||
paio_submit(void *acb, void *opaque, int64_t sector_num, int nb_sectors, int type) "acb %p opaque %p sector_num %"PRId64" nb_sectors %d type %d"
|
paio_submit(void *acb, void *opaque, int64_t offset, int count, int type) "acb %p opaque %p offset %"PRId64" count %d type %d"
|
||||||
|
|
||||||
# block/qcow2.c
|
# block/qcow2.c
|
||||||
qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset %" PRIx64 " bytes %d"
|
qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset %" PRIx64 " bytes %d"
|
||||||
|
|
|
@ -574,9 +574,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||||
{
|
{
|
||||||
struct blkif_request_discard *discard_req = (void *)&ioreq->req;
|
struct blkif_request_discard *discard_req = (void *)&ioreq->req;
|
||||||
ioreq->aio_inflight++;
|
ioreq->aio_inflight++;
|
||||||
blk_aio_discard(blkdev->blk,
|
blk_aio_pdiscard(blkdev->blk,
|
||||||
discard_req->sector_number, discard_req->nr_sectors,
|
discard_req->sector_number << BDRV_SECTOR_BITS,
|
||||||
qemu_aio_complete, ioreq);
|
discard_req->nr_sectors << BDRV_SECTOR_BITS,
|
||||||
|
qemu_aio_complete, ioreq);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -423,8 +423,10 @@ static void ide_issue_trim_cb(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Got an entry! Submit and exit. */
|
/* Got an entry! Submit and exit. */
|
||||||
iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
|
iocb->aiocb = blk_aio_pdiscard(iocb->blk,
|
||||||
ide_issue_trim_cb, opaque);
|
sector << BDRV_SECTOR_BITS,
|
||||||
|
count << BDRV_SECTOR_BITS,
|
||||||
|
ide_issue_trim_cb, opaque);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1609,10 +1609,10 @@ static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
r->req.aiocb = blk_aio_discard(s->qdev.conf.blk,
|
r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
|
||||||
sector_num * (s->qdev.blocksize / 512),
|
sector_num * s->qdev.blocksize,
|
||||||
nb_sectors * (s->qdev.blocksize / 512),
|
nb_sectors * s->qdev.blocksize,
|
||||||
scsi_unmap_complete, data);
|
scsi_unmap_complete, data);
|
||||||
data->count--;
|
data->count--;
|
||||||
data->inbuf += 16;
|
data->inbuf += 16;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -316,9 +316,9 @@ BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
|
BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
|
BlockAIOCB *bdrv_aio_pdiscard(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors,
|
int64_t offset, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
void bdrv_aio_cancel(BlockAIOCB *acb);
|
void bdrv_aio_cancel(BlockAIOCB *acb);
|
||||||
void bdrv_aio_cancel_async(BlockAIOCB *acb);
|
void bdrv_aio_cancel_async(BlockAIOCB *acb);
|
||||||
|
|
||||||
|
@ -341,8 +341,8 @@ void bdrv_drain(BlockDriverState *bs);
|
||||||
void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
|
void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
|
||||||
void bdrv_drain_all(void);
|
void bdrv_drain_all(void);
|
||||||
|
|
||||||
int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
|
int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count);
|
||||||
int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
|
int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int count);
|
||||||
int bdrv_has_zero_init_1(BlockDriverState *bs);
|
int bdrv_has_zero_init_1(BlockDriverState *bs);
|
||||||
int bdrv_has_zero_init(BlockDriverState *bs);
|
int bdrv_has_zero_init(BlockDriverState *bs);
|
||||||
bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
|
bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
|
||||||
|
|
|
@ -142,8 +142,8 @@ struct BlockDriver {
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
|
BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
|
BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors,
|
int64_t offset, int count,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
|
|
||||||
int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
|
int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
|
||||||
|
@ -165,8 +165,8 @@ struct BlockDriver {
|
||||||
*/
|
*/
|
||||||
int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
|
int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
|
||||||
int64_t offset, int count, BdrvRequestFlags flags);
|
int64_t offset, int count, BdrvRequestFlags flags);
|
||||||
int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
|
int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors);
|
int64_t offset, int count);
|
||||||
int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs,
|
int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs,
|
||||||
int64_t sector_num, int nb_sectors, int *pnum,
|
int64_t sector_num, int nb_sectors, int *pnum,
|
||||||
BlockDriverState **file);
|
BlockDriverState **file);
|
||||||
|
|
|
@ -77,7 +77,6 @@ enum {
|
||||||
|
|
||||||
/* Maximum size of a single READ/WRITE data buffer */
|
/* Maximum size of a single READ/WRITE data buffer */
|
||||||
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
|
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
|
||||||
#define NBD_MAX_SECTORS (NBD_MAX_BUFFER_SIZE / BDRV_SECTOR_SIZE)
|
|
||||||
|
|
||||||
/* Maximum size of an export name. The NBD spec requires 256 and
|
/* Maximum size of an export name. The NBD spec requires 256 and
|
||||||
* suggests that servers support up to 4096, but we stick to only the
|
* suggests that servers support up to 4096, but we stick to only the
|
||||||
|
@ -89,7 +88,6 @@ enum {
|
||||||
ssize_t nbd_wr_syncv(QIOChannel *ioc,
|
ssize_t nbd_wr_syncv(QIOChannel *ioc,
|
||||||
struct iovec *iov,
|
struct iovec *iov,
|
||||||
size_t niov,
|
size_t niov,
|
||||||
size_t offset,
|
|
||||||
size_t length,
|
size_t length,
|
||||||
bool do_read);
|
bool do_read);
|
||||||
int nbd_receive_negotiate(QIOChannel *ioc, const char *name, uint32_t *flags,
|
int nbd_receive_negotiate(QIOChannel *ioc, const char *name, uint32_t *flags,
|
||||||
|
|
|
@ -139,15 +139,14 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockAIOCB *blk_aio_discard(BlockBackend *blk,
|
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count,
|
||||||
int64_t sector_num, int nb_sectors,
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
|
||||||
void blk_aio_cancel(BlockAIOCB *acb);
|
void blk_aio_cancel(BlockAIOCB *acb);
|
||||||
void blk_aio_cancel_async(BlockAIOCB *acb);
|
void blk_aio_cancel_async(BlockAIOCB *acb);
|
||||||
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||||
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||||
BlockCompletionFunc *cb, void *opaque);
|
BlockCompletionFunc *cb, void *opaque);
|
||||||
int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
|
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count);
|
||||||
int blk_co_flush(BlockBackend *blk);
|
int blk_co_flush(BlockBackend *blk);
|
||||||
int blk_flush(BlockBackend *blk);
|
int blk_flush(BlockBackend *blk);
|
||||||
int blk_flush_all(void);
|
int blk_flush_all(void);
|
||||||
|
@ -207,7 +206,7 @@ int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
|
||||||
int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
|
int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
|
||||||
const uint8_t *buf, int nb_sectors);
|
const uint8_t *buf, int nb_sectors);
|
||||||
int blk_truncate(BlockBackend *blk, int64_t offset);
|
int blk_truncate(BlockBackend *blk, int64_t offset);
|
||||||
int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors);
|
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count);
|
||||||
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
|
||||||
int64_t pos, int size);
|
int64_t pos, int size);
|
||||||
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
|
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size);
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
ssize_t nbd_wr_syncv(QIOChannel *ioc,
|
ssize_t nbd_wr_syncv(QIOChannel *ioc,
|
||||||
struct iovec *iov,
|
struct iovec *iov,
|
||||||
size_t niov,
|
size_t niov,
|
||||||
size_t offset,
|
|
||||||
size_t length,
|
size_t length,
|
||||||
bool do_read)
|
bool do_read)
|
||||||
{
|
{
|
||||||
|
@ -33,9 +32,7 @@ ssize_t nbd_wr_syncv(QIOChannel *ioc,
|
||||||
struct iovec *local_iov_head = local_iov;
|
struct iovec *local_iov_head = local_iov;
|
||||||
unsigned int nlocal_iov = niov;
|
unsigned int nlocal_iov = niov;
|
||||||
|
|
||||||
nlocal_iov = iov_copy(local_iov, nlocal_iov,
|
nlocal_iov = iov_copy(local_iov, nlocal_iov, iov, niov, 0, length);
|
||||||
iov, niov,
|
|
||||||
offset, length);
|
|
||||||
|
|
||||||
while (nlocal_iov > 0) {
|
while (nlocal_iov > 0) {
|
||||||
ssize_t len;
|
ssize_t len;
|
||||||
|
|
|
@ -101,14 +101,14 @@ static inline ssize_t read_sync(QIOChannel *ioc, void *buffer, size_t size)
|
||||||
* our request/reply. Synchronization is done with recv_coroutine, so
|
* our request/reply. Synchronization is done with recv_coroutine, so
|
||||||
* that this is coroutine-safe.
|
* that this is coroutine-safe.
|
||||||
*/
|
*/
|
||||||
return nbd_wr_syncv(ioc, &iov, 1, 0, size, true);
|
return nbd_wr_syncv(ioc, &iov, 1, size, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ssize_t write_sync(QIOChannel *ioc, void *buffer, size_t size)
|
static inline ssize_t write_sync(QIOChannel *ioc, void *buffer, size_t size)
|
||||||
{
|
{
|
||||||
struct iovec iov = { .iov_base = buffer, .iov_len = size };
|
struct iovec iov = { .iov_base = buffer, .iov_len = size };
|
||||||
|
|
||||||
return nbd_wr_syncv(ioc, &iov, 1, 0, size, false);
|
return nbd_wr_syncv(ioc, &iov, 1, size, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct NBDTLSHandshakeData {
|
struct NBDTLSHandshakeData {
|
||||||
|
|
19
nbd/server.c
19
nbd/server.c
|
@ -1182,20 +1182,11 @@ static void nbd_trip(void *opaque)
|
||||||
break;
|
break;
|
||||||
case NBD_CMD_TRIM:
|
case NBD_CMD_TRIM:
|
||||||
TRACE("Request type is TRIM");
|
TRACE("Request type is TRIM");
|
||||||
/* Ignore unaligned head or tail, until block layer adds byte
|
ret = blk_co_pdiscard(exp->blk, request.from + exp->dev_offset,
|
||||||
* interface */
|
request.len);
|
||||||
if (request.len >= BDRV_SECTOR_SIZE) {
|
if (ret < 0) {
|
||||||
request.len -= (request.from + request.len) % BDRV_SECTOR_SIZE;
|
LOG("discard failed");
|
||||||
ret = blk_co_discard(exp->blk,
|
reply.error = -ret;
|
||||||
DIV_ROUND_UP(request.from + exp->dev_offset,
|
|
||||||
BDRV_SECTOR_SIZE),
|
|
||||||
request.len / BDRV_SECTOR_SIZE);
|
|
||||||
if (ret < 0) {
|
|
||||||
LOG("discard failed");
|
|
||||||
reply.error = -ret;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
TRACE("trim request too small, ignoring");
|
|
||||||
}
|
}
|
||||||
if (nbd_co_send_reply(req, &reply, 0) < 0) {
|
if (nbd_co_send_reply(req, &reply, 0) < 0) {
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -1696,8 +1696,7 @@ static int discard_f(BlockBackend *blk, int argc, char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
gettimeofday(&t1, NULL);
|
gettimeofday(&t1, NULL);
|
||||||
ret = blk_discard(blk, offset >> BDRV_SECTOR_BITS,
|
ret = blk_pdiscard(blk, offset, count);
|
||||||
count >> BDRV_SECTOR_BITS);
|
|
||||||
gettimeofday(&t2, NULL);
|
gettimeofday(&t2, NULL);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
|
Loading…
Reference in New Issue