block: Switch BlockRequest to byte-based

BlockRequest is the internal struct used by bdrv_aio_*.  At the
moment, all such calls were sector-based, but we will eventually
convert to byte-based; start by changing the internal variables
to be byte-based.  No change to behavior, although the read and
write code can now go byte-based through more of the stack.

Signed-off-by: Eric Blake <eblake@redhat.com>
Message-id: 1468624988-423-4-git-send-email-eblake@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Eric Blake 2016-07-15 17:22:52 -06:00 committed by Stefan Hajnoczi
parent 0c51a893b6
commit b15404e027
1 changed files with 30 additions and 32 deletions

View File

@ -33,10 +33,9 @@
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
int64_t sector_num, int64_t offset,
QEMUIOVector *qiov, QEMUIOVector *qiov,
int nb_sectors,
BdrvRequestFlags flags, BdrvRequestFlags flags,
BlockCompletionFunc *cb, BlockCompletionFunc *cb,
void *opaque, void *opaque,
@ -2015,8 +2014,9 @@ BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
{ {
trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque); trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0, assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
cb, opaque, false); return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
0, cb, opaque, false);
} }
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num, BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
@ -2025,8 +2025,9 @@ BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
{ {
trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque); trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0, assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
cb, opaque, true); return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
0, cb, opaque, true);
} }
void bdrv_aio_cancel(BlockAIOCB *acb) void bdrv_aio_cancel(BlockAIOCB *acb)
@ -2062,8 +2063,8 @@ typedef struct BlockRequest {
union { union {
/* Used during read, write, trim */ /* Used during read, write, trim */
struct { struct {
int64_t sector; int64_t offset;
int nb_sectors; int bytes;
int flags; int flags;
QEMUIOVector *qiov; QEMUIOVector *qiov;
}; };
@ -2127,20 +2128,19 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
BlockAIOCBCoroutine *acb = opaque; BlockAIOCBCoroutine *acb = opaque;
if (!acb->is_write) { if (!acb->is_write) {
acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector, acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
acb->req.nb_sectors, acb->req.qiov, acb->req.flags); acb->req.qiov->size, acb->req.qiov, acb->req.flags);
} else { } else {
acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector, acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
acb->req.nb_sectors, acb->req.qiov, acb->req.flags); acb->req.qiov->size, acb->req.qiov, acb->req.flags);
} }
bdrv_co_complete(acb); bdrv_co_complete(acb);
} }
static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child, static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
int64_t sector_num, int64_t offset,
QEMUIOVector *qiov, QEMUIOVector *qiov,
int nb_sectors,
BdrvRequestFlags flags, BdrvRequestFlags flags,
BlockCompletionFunc *cb, BlockCompletionFunc *cb,
void *opaque, void *opaque,
@ -2153,8 +2153,7 @@ static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
acb->child = child; acb->child = child;
acb->need_bh = true; acb->need_bh = true;
acb->req.error = -EINPROGRESS; acb->req.error = -EINPROGRESS;
acb->req.sector = sector_num; acb->req.offset = offset;
acb->req.nb_sectors = nb_sectors;
acb->req.qiov = qiov; acb->req.qiov = qiov;
acb->req.flags = flags; acb->req.flags = flags;
acb->is_write = is_write; acb->is_write = is_write;
@ -2199,8 +2198,7 @@ static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
BlockAIOCBCoroutine *acb = opaque; BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs; BlockDriverState *bs = acb->common.bs;
acb->req.error = bdrv_co_pdiscard(bs, acb->req.sector << BDRV_SECTOR_BITS, acb->req.error = bdrv_co_pdiscard(bs, acb->req.offset, acb->req.bytes);
acb->req.nb_sectors << BDRV_SECTOR_BITS);
bdrv_co_complete(acb); bdrv_co_complete(acb);
} }
@ -2216,8 +2214,8 @@ BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque); acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->need_bh = true; acb->need_bh = true;
acb->req.error = -EINPROGRESS; acb->req.error = -EINPROGRESS;
acb->req.sector = sector_num; acb->req.offset = sector_num << BDRV_SECTOR_BITS;
acb->req.nb_sectors = nb_sectors; acb->req.bytes = nb_sectors << BDRV_SECTOR_BITS;
co = qemu_coroutine_create(bdrv_aio_discard_co_entry, acb); co = qemu_coroutine_create(bdrv_aio_discard_co_entry, acb);
qemu_coroutine_enter(co); qemu_coroutine_enter(co);