mirror of https://github.com/xemu-project/xemu.git
block: explicit I/O accounting
Decouple the I/O accounting from bdrv_aio_readv/writev/flush and make the hardware models call directly into the accounting helpers. This means: - we do not count internal requests from image formats in addition to guest originating I/O - we do not double count I/O ops if the device model handles it chunk wise - we only account I/O once it actuall is done - can extent I/O accounting to synchronous or coroutine I/O easily - implement I/O latency tracking easily (see the next patch) I've conveted the existing device model callers to the new model, device models that are using synchronous I/O and weren't accounted before haven't been updated yet. Also scsi hasn't been converted to the end-to-end accounting as I want to defer that after the pending scsi layer overhaul. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
2f4b759367
commit
a597e79ce1
46
block.c
46
block.c
|
@ -1942,13 +1942,13 @@ static QObject* bdrv_info_stats_bs(BlockDriverState *bs)
|
||||||
"'wr_highest_offset': %" PRId64 ","
|
"'wr_highest_offset': %" PRId64 ","
|
||||||
"'flush_operations': %" PRId64
|
"'flush_operations': %" PRId64
|
||||||
"} }",
|
"} }",
|
||||||
bs->rd_bytes,
|
bs->nr_bytes[BDRV_ACCT_READ],
|
||||||
bs->wr_bytes,
|
bs->nr_bytes[BDRV_ACCT_WRITE],
|
||||||
bs->rd_ops,
|
bs->nr_ops[BDRV_ACCT_READ],
|
||||||
bs->wr_ops,
|
bs->nr_ops[BDRV_ACCT_WRITE],
|
||||||
bs->wr_highest_sector *
|
bs->wr_highest_sector *
|
||||||
(uint64_t)BDRV_SECTOR_SIZE,
|
(uint64_t)BDRV_SECTOR_SIZE,
|
||||||
bs->flush_ops);
|
bs->nr_ops[BDRV_ACCT_FLUSH]);
|
||||||
dict = qobject_to_qdict(res);
|
dict = qobject_to_qdict(res);
|
||||||
|
|
||||||
if (*bs->device_name) {
|
if (*bs->device_name) {
|
||||||
|
@ -2262,7 +2262,6 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
|
||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**************************************************************/
|
/**************************************************************/
|
||||||
/* async I/Os */
|
/* async I/Os */
|
||||||
|
|
||||||
|
@ -2271,7 +2270,6 @@ BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
BlockDriverCompletionFunc *cb, void *opaque)
|
BlockDriverCompletionFunc *cb, void *opaque)
|
||||||
{
|
{
|
||||||
BlockDriver *drv = bs->drv;
|
BlockDriver *drv = bs->drv;
|
||||||
BlockDriverAIOCB *ret;
|
|
||||||
|
|
||||||
trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
|
trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
|
||||||
|
|
||||||
|
@ -2280,16 +2278,8 @@ BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
|
||||||
if (bdrv_check_request(bs, sector_num, nb_sectors))
|
if (bdrv_check_request(bs, sector_num, nb_sectors))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
|
return drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
|
||||||
cb, opaque);
|
cb, opaque);
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
/* Update stats even though technically transfer has not happened. */
|
|
||||||
bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
|
|
||||||
bs->rd_ops++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct BlockCompleteData {
|
typedef struct BlockCompleteData {
|
||||||
|
@ -2356,9 +2346,6 @@ BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
|
||||||
cb, opaque);
|
cb, opaque);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/* Update stats even though technically transfer has not happened. */
|
|
||||||
bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
|
|
||||||
bs->wr_ops ++;
|
|
||||||
if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
|
if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
|
||||||
bs->wr_highest_sector = sector_num + nb_sectors - 1;
|
bs->wr_highest_sector = sector_num + nb_sectors - 1;
|
||||||
}
|
}
|
||||||
|
@ -2612,8 +2599,6 @@ BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
|
||||||
|
|
||||||
trace_bdrv_aio_flush(bs, opaque);
|
trace_bdrv_aio_flush(bs, opaque);
|
||||||
|
|
||||||
bs->flush_ops++;
|
|
||||||
|
|
||||||
if (bs->open_flags & BDRV_O_NO_FLUSH) {
|
if (bs->open_flags & BDRV_O_NO_FLUSH) {
|
||||||
return bdrv_aio_noop_em(bs, cb, opaque);
|
return bdrv_aio_noop_em(bs, cb, opaque);
|
||||||
}
|
}
|
||||||
|
@ -3168,6 +3153,25 @@ int bdrv_in_use(BlockDriverState *bs)
|
||||||
return bs->in_use;
|
return bs->in_use;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
|
||||||
|
enum BlockAcctType type)
|
||||||
|
{
|
||||||
|
assert(type < BDRV_MAX_IOTYPE);
|
||||||
|
|
||||||
|
cookie->bytes = bytes;
|
||||||
|
cookie->type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
|
||||||
|
{
|
||||||
|
assert(cookie->type < BDRV_MAX_IOTYPE);
|
||||||
|
|
||||||
|
bs->nr_bytes[cookie->type] += cookie->bytes;
|
||||||
|
bs->nr_ops[cookie->type]++;
|
||||||
|
}
|
||||||
|
|
||||||
int bdrv_img_create(const char *filename, const char *fmt,
|
int bdrv_img_create(const char *filename, const char *fmt,
|
||||||
const char *base_filename, const char *base_fmt,
|
const char *base_filename, const char *base_fmt,
|
||||||
char *options, uint64_t img_size, int flags)
|
char *options, uint64_t img_size, int flags)
|
||||||
|
|
17
block.h
17
block.h
|
@ -255,6 +255,22 @@ int64_t bdrv_get_dirty_count(BlockDriverState *bs);
|
||||||
void bdrv_set_in_use(BlockDriverState *bs, int in_use);
|
void bdrv_set_in_use(BlockDriverState *bs, int in_use);
|
||||||
int bdrv_in_use(BlockDriverState *bs);
|
int bdrv_in_use(BlockDriverState *bs);
|
||||||
|
|
||||||
|
enum BlockAcctType {
|
||||||
|
BDRV_ACCT_READ,
|
||||||
|
BDRV_ACCT_WRITE,
|
||||||
|
BDRV_ACCT_FLUSH,
|
||||||
|
BDRV_MAX_IOTYPE,
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef struct BlockAcctCookie {
|
||||||
|
int64_t bytes;
|
||||||
|
enum BlockAcctType type;
|
||||||
|
} BlockAcctCookie;
|
||||||
|
|
||||||
|
void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
||||||
|
int64_t bytes, enum BlockAcctType type);
|
||||||
|
void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie);
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
BLKDBG_L1_UPDATE,
|
BLKDBG_L1_UPDATE,
|
||||||
|
|
||||||
|
@ -307,3 +323,4 @@ typedef enum {
|
||||||
void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
|
void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -184,11 +184,8 @@ struct BlockDriverState {
|
||||||
void *sync_aiocb;
|
void *sync_aiocb;
|
||||||
|
|
||||||
/* I/O stats (display with "info blockstats"). */
|
/* I/O stats (display with "info blockstats"). */
|
||||||
uint64_t rd_bytes;
|
uint64_t nr_bytes[BDRV_MAX_IOTYPE];
|
||||||
uint64_t wr_bytes;
|
uint64_t nr_ops[BDRV_MAX_IOTYPE];
|
||||||
uint64_t rd_ops;
|
|
||||||
uint64_t wr_ops;
|
|
||||||
uint64_t flush_ops;
|
|
||||||
uint64_t wr_highest_sector;
|
uint64_t wr_highest_sector;
|
||||||
|
|
||||||
/* Whether the disk can expand beyond total_sectors */
|
/* Whether the disk can expand beyond total_sectors */
|
||||||
|
|
|
@ -710,6 +710,7 @@ static void ncq_cb(void *opaque, int ret)
|
||||||
DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n",
|
DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n",
|
||||||
ncq_tfs->tag);
|
ncq_tfs->tag);
|
||||||
|
|
||||||
|
bdrv_acct_done(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct);
|
||||||
qemu_sglist_destroy(&ncq_tfs->sglist);
|
qemu_sglist_destroy(&ncq_tfs->sglist);
|
||||||
ncq_tfs->used = 0;
|
ncq_tfs->used = 0;
|
||||||
}
|
}
|
||||||
|
@ -756,6 +757,10 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
|
||||||
ncq_tfs->is_read = 1;
|
ncq_tfs->is_read = 1;
|
||||||
|
|
||||||
DPRINTF(port, "tag %d aio read %ld\n", ncq_tfs->tag, ncq_tfs->lba);
|
DPRINTF(port, "tag %d aio read %ld\n", ncq_tfs->tag, ncq_tfs->lba);
|
||||||
|
|
||||||
|
bdrv_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
|
||||||
|
(ncq_tfs->sector_count-1) * BDRV_SECTOR_SIZE,
|
||||||
|
BDRV_ACCT_READ);
|
||||||
ncq_tfs->aiocb = dma_bdrv_read(ncq_tfs->drive->port.ifs[0].bs,
|
ncq_tfs->aiocb = dma_bdrv_read(ncq_tfs->drive->port.ifs[0].bs,
|
||||||
&ncq_tfs->sglist, ncq_tfs->lba,
|
&ncq_tfs->sglist, ncq_tfs->lba,
|
||||||
ncq_cb, ncq_tfs);
|
ncq_cb, ncq_tfs);
|
||||||
|
@ -766,6 +771,10 @@ static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
|
||||||
ncq_tfs->is_read = 0;
|
ncq_tfs->is_read = 0;
|
||||||
|
|
||||||
DPRINTF(port, "tag %d aio write %ld\n", ncq_tfs->tag, ncq_tfs->lba);
|
DPRINTF(port, "tag %d aio write %ld\n", ncq_tfs->tag, ncq_tfs->lba);
|
||||||
|
|
||||||
|
bdrv_acct_start(ncq_tfs->drive->port.ifs[0].bs, &ncq_tfs->acct,
|
||||||
|
(ncq_tfs->sector_count-1) * BDRV_SECTOR_SIZE,
|
||||||
|
BDRV_ACCT_WRITE);
|
||||||
ncq_tfs->aiocb = dma_bdrv_write(ncq_tfs->drive->port.ifs[0].bs,
|
ncq_tfs->aiocb = dma_bdrv_write(ncq_tfs->drive->port.ifs[0].bs,
|
||||||
&ncq_tfs->sglist, ncq_tfs->lba,
|
&ncq_tfs->sglist, ncq_tfs->lba,
|
||||||
ncq_cb, ncq_tfs);
|
ncq_cb, ncq_tfs);
|
||||||
|
|
|
@ -258,6 +258,7 @@ typedef struct NCQTransferState {
|
||||||
AHCIDevice *drive;
|
AHCIDevice *drive;
|
||||||
BlockDriverAIOCB *aiocb;
|
BlockDriverAIOCB *aiocb;
|
||||||
QEMUSGList sglist;
|
QEMUSGList sglist;
|
||||||
|
BlockAcctCookie acct;
|
||||||
int is_read;
|
int is_read;
|
||||||
uint16_t sector_count;
|
uint16_t sector_count;
|
||||||
uint64_t lba;
|
uint64_t lba;
|
||||||
|
|
|
@ -104,17 +104,20 @@ static void cd_data_to_raw(uint8_t *buf, int lba)
|
||||||
memset(buf, 0, 288);
|
memset(buf, 0, 288);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cd_read_sector(BlockDriverState *bs, int lba, uint8_t *buf,
|
static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size)
|
||||||
int sector_size)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
switch(sector_size) {
|
switch(sector_size) {
|
||||||
case 2048:
|
case 2048:
|
||||||
ret = bdrv_read(bs, (int64_t)lba << 2, buf, 4);
|
bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
|
||||||
|
ret = bdrv_read(s->bs, (int64_t)lba << 2, buf, 4);
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
break;
|
break;
|
||||||
case 2352:
|
case 2352:
|
||||||
ret = bdrv_read(bs, (int64_t)lba << 2, buf + 16, 4);
|
bdrv_acct_start(s->bs, &s->acct, 4 * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
|
||||||
|
ret = bdrv_read(s->bs, (int64_t)lba << 2, buf + 16, 4);
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
cd_data_to_raw(buf, lba);
|
cd_data_to_raw(buf, lba);
|
||||||
|
@ -181,7 +184,7 @@ void ide_atapi_cmd_reply_end(IDEState *s)
|
||||||
} else {
|
} else {
|
||||||
/* see if a new sector must be read */
|
/* see if a new sector must be read */
|
||||||
if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) {
|
if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) {
|
||||||
ret = cd_read_sector(s->bs, s->lba, s->io_buffer, s->cd_sector_size);
|
ret = cd_read_sector(s, s->lba, s->io_buffer, s->cd_sector_size);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ide_transfer_stop(s);
|
ide_transfer_stop(s);
|
||||||
ide_atapi_io_error(s, ret);
|
ide_atapi_io_error(s, ret);
|
||||||
|
@ -250,6 +253,7 @@ static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size)
|
||||||
s->io_buffer_index = 0;
|
s->io_buffer_index = 0;
|
||||||
|
|
||||||
if (s->atapi_dma) {
|
if (s->atapi_dma) {
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, size, BDRV_ACCT_READ);
|
||||||
s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
|
s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
|
||||||
s->bus->dma->ops->start_dma(s->bus->dma, s,
|
s->bus->dma->ops->start_dma(s->bus->dma, s,
|
||||||
ide_atapi_cmd_read_dma_cb);
|
ide_atapi_cmd_read_dma_cb);
|
||||||
|
@ -322,10 +326,7 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
|
||||||
s->status = READY_STAT | SEEK_STAT;
|
s->status = READY_STAT | SEEK_STAT;
|
||||||
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
|
s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD;
|
||||||
ide_set_irq(s->bus);
|
ide_set_irq(s->bus);
|
||||||
eot:
|
goto eot;
|
||||||
s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT);
|
|
||||||
ide_set_inactive(s);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->io_buffer_index = 0;
|
s->io_buffer_index = 0;
|
||||||
|
@ -343,9 +344,11 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
|
||||||
#ifdef DEBUG_AIO
|
#ifdef DEBUG_AIO
|
||||||
printf("aio_read_cd: lba=%u n=%d\n", s->lba, n);
|
printf("aio_read_cd: lba=%u n=%d\n", s->lba, n);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
|
s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
|
||||||
s->bus->dma->iov.iov_len = n * 4 * 512;
|
s->bus->dma->iov.iov_len = n * 4 * 512;
|
||||||
qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
|
qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
|
||||||
|
|
||||||
s->bus->dma->aiocb = bdrv_aio_readv(s->bs, (int64_t)s->lba << 2,
|
s->bus->dma->aiocb = bdrv_aio_readv(s->bs, (int64_t)s->lba << 2,
|
||||||
&s->bus->dma->qiov, n * 4,
|
&s->bus->dma->qiov, n * 4,
|
||||||
ide_atapi_cmd_read_dma_cb, s);
|
ide_atapi_cmd_read_dma_cb, s);
|
||||||
|
@ -355,6 +358,12 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
|
||||||
ASC_MEDIUM_NOT_PRESENT);
|
ASC_MEDIUM_NOT_PRESENT);
|
||||||
goto eot;
|
goto eot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
eot:
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
|
s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT);
|
||||||
|
ide_set_inactive(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* start a CD-CDROM read command with DMA */
|
/* start a CD-CDROM read command with DMA */
|
||||||
|
@ -368,6 +377,8 @@ static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors,
|
||||||
s->io_buffer_size = 0;
|
s->io_buffer_size = 0;
|
||||||
s->cd_sector_size = sector_size;
|
s->cd_sector_size = sector_size;
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, s->packet_transfer_size, BDRV_ACCT_READ);
|
||||||
|
|
||||||
/* XXX: check if BUSY_STAT should be set */
|
/* XXX: check if BUSY_STAT should be set */
|
||||||
s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
|
s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
|
||||||
s->bus->dma->ops->start_dma(s->bus->dma, s,
|
s->bus->dma->ops->start_dma(s->bus->dma, s,
|
||||||
|
|
|
@ -473,7 +473,10 @@ void ide_sector_read(IDEState *s)
|
||||||
#endif
|
#endif
|
||||||
if (n > s->req_nb_sectors)
|
if (n > s->req_nb_sectors)
|
||||||
n = s->req_nb_sectors;
|
n = s->req_nb_sectors;
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
|
||||||
ret = bdrv_read(s->bs, sector_num, s->io_buffer, n);
|
ret = bdrv_read(s->bs, sector_num, s->io_buffer, n);
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
if (ide_handle_rw_error(s, -ret,
|
if (ide_handle_rw_error(s, -ret,
|
||||||
BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ))
|
BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ))
|
||||||
|
@ -610,6 +613,9 @@ handle_rw_error:
|
||||||
return;
|
return;
|
||||||
|
|
||||||
eot:
|
eot:
|
||||||
|
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
|
}
|
||||||
ide_set_inactive(s);
|
ide_set_inactive(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -619,6 +625,20 @@ static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
|
||||||
s->io_buffer_index = 0;
|
s->io_buffer_index = 0;
|
||||||
s->io_buffer_size = 0;
|
s->io_buffer_size = 0;
|
||||||
s->dma_cmd = dma_cmd;
|
s->dma_cmd = dma_cmd;
|
||||||
|
|
||||||
|
switch (dma_cmd) {
|
||||||
|
case IDE_DMA_READ:
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
|
||||||
|
BDRV_ACCT_READ);
|
||||||
|
break;
|
||||||
|
case IDE_DMA_WRITE:
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
|
||||||
|
BDRV_ACCT_WRITE);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
s->bus->dma->ops->start_dma(s->bus->dma, s, ide_dma_cb);
|
s->bus->dma->ops->start_dma(s->bus->dma, s, ide_dma_cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -641,7 +661,10 @@ void ide_sector_write(IDEState *s)
|
||||||
n = s->nsector;
|
n = s->nsector;
|
||||||
if (n > s->req_nb_sectors)
|
if (n > s->req_nb_sectors)
|
||||||
n = s->req_nb_sectors;
|
n = s->req_nb_sectors;
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
|
||||||
ret = bdrv_write(s->bs, sector_num, s->io_buffer, n);
|
ret = bdrv_write(s->bs, sector_num, s->io_buffer, n);
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY))
|
if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY))
|
||||||
|
@ -685,6 +708,7 @@ static void ide_flush_cb(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
s->status = READY_STAT | SEEK_STAT;
|
s->status = READY_STAT | SEEK_STAT;
|
||||||
ide_set_irq(s->bus);
|
ide_set_irq(s->bus);
|
||||||
}
|
}
|
||||||
|
@ -698,6 +722,7 @@ void ide_flush_cache(IDEState *s)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH);
|
||||||
acb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
|
acb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
|
||||||
if (acb == NULL) {
|
if (acb == NULL) {
|
||||||
ide_flush_cb(s, -EIO);
|
ide_flush_cb(s, -EIO);
|
||||||
|
|
|
@ -440,6 +440,7 @@ struct IDEState {
|
||||||
int lba;
|
int lba;
|
||||||
int cd_sector_size;
|
int cd_sector_size;
|
||||||
int atapi_dma; /* true if dma is requested for the packet cmd */
|
int atapi_dma; /* true if dma is requested for the packet cmd */
|
||||||
|
BlockAcctCookie acct;
|
||||||
/* ATA DMA state */
|
/* ATA DMA state */
|
||||||
int io_buffer_size;
|
int io_buffer_size;
|
||||||
QEMUSGList sg;
|
QEMUSGList sg;
|
||||||
|
|
|
@ -52,8 +52,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
|
||||||
m->aiocb = NULL;
|
m->aiocb = NULL;
|
||||||
qemu_sglist_destroy(&s->sg);
|
qemu_sglist_destroy(&s->sg);
|
||||||
ide_atapi_io_error(s, ret);
|
ide_atapi_io_error(s, ret);
|
||||||
io->dma_end(opaque);
|
goto done;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->io_buffer_size > 0) {
|
if (s->io_buffer_size > 0) {
|
||||||
|
@ -71,8 +70,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
|
||||||
ide_atapi_cmd_ok(s);
|
ide_atapi_cmd_ok(s);
|
||||||
|
|
||||||
if (io->len == 0) {
|
if (io->len == 0) {
|
||||||
io->dma_end(opaque);
|
goto done;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* launch next transfer */
|
/* launch next transfer */
|
||||||
|
@ -92,9 +90,14 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
|
||||||
/* Note: media not present is the most likely case */
|
/* Note: media not present is the most likely case */
|
||||||
ide_atapi_cmd_error(s, SENSE_NOT_READY,
|
ide_atapi_cmd_error(s, SENSE_NOT_READY,
|
||||||
ASC_MEDIUM_NOT_PRESENT);
|
ASC_MEDIUM_NOT_PRESENT);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
|
||||||
|
done:
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
io->dma_end(opaque);
|
io->dma_end(opaque);
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pmac_ide_transfer_cb(void *opaque, int ret)
|
static void pmac_ide_transfer_cb(void *opaque, int ret)
|
||||||
|
@ -109,8 +112,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
|
||||||
m->aiocb = NULL;
|
m->aiocb = NULL;
|
||||||
qemu_sglist_destroy(&s->sg);
|
qemu_sglist_destroy(&s->sg);
|
||||||
ide_dma_error(s);
|
ide_dma_error(s);
|
||||||
io->dma_end(io);
|
goto done;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sector_num = ide_get_sector(s);
|
sector_num = ide_get_sector(s);
|
||||||
|
@ -130,10 +132,8 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* end of DMA ? */
|
/* end of DMA ? */
|
||||||
|
|
||||||
if (io->len == 0) {
|
if (io->len == 0) {
|
||||||
io->dma_end(io);
|
goto done;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* launch next transfer */
|
/* launch next transfer */
|
||||||
|
@ -163,6 +163,12 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
|
||||||
|
|
||||||
if (!m->aiocb)
|
if (!m->aiocb)
|
||||||
pmac_ide_transfer_cb(io, -1);
|
pmac_ide_transfer_cb(io, -1);
|
||||||
|
return;
|
||||||
|
done:
|
||||||
|
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
|
||||||
|
bdrv_acct_done(s->bs, &s->acct);
|
||||||
|
}
|
||||||
|
io->dma_end(io);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pmac_ide_transfer(DBDMA_io *io)
|
static void pmac_ide_transfer(DBDMA_io *io)
|
||||||
|
@ -172,10 +178,22 @@ static void pmac_ide_transfer(DBDMA_io *io)
|
||||||
|
|
||||||
s->io_buffer_size = 0;
|
s->io_buffer_size = 0;
|
||||||
if (s->drive_kind == IDE_CD) {
|
if (s->drive_kind == IDE_CD) {
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
|
||||||
pmac_ide_atapi_transfer_cb(io, 0);
|
pmac_ide_atapi_transfer_cb(io, 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch (s->dma_cmd) {
|
||||||
|
case IDE_DMA_READ:
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_READ);
|
||||||
|
break;
|
||||||
|
case IDE_DMA_WRITE:
|
||||||
|
bdrv_acct_start(s->bs, &s->acct, io->len, BDRV_ACCT_WRITE);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
pmac_ide_transfer_cb(io, 0);
|
pmac_ide_transfer_cb(io, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ typedef struct SCSIDiskReq {
|
||||||
struct iovec iov;
|
struct iovec iov;
|
||||||
QEMUIOVector qiov;
|
QEMUIOVector qiov;
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
|
BlockAcctCookie acct;
|
||||||
} SCSIDiskReq;
|
} SCSIDiskReq;
|
||||||
|
|
||||||
struct SCSIDiskState
|
struct SCSIDiskState
|
||||||
|
@ -107,10 +108,13 @@ static void scsi_cancel_io(SCSIRequest *req)
|
||||||
static void scsi_read_complete(void * opaque, int ret)
|
static void scsi_read_complete(void * opaque, int ret)
|
||||||
{
|
{
|
||||||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||||
|
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
r->req.aiocb = NULL;
|
r->req.aiocb = NULL;
|
||||||
|
|
||||||
|
bdrv_acct_done(s->bs, &r->acct);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_READ)) {
|
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_READ)) {
|
||||||
return;
|
return;
|
||||||
|
@ -161,6 +165,8 @@ static void scsi_read_data(SCSIRequest *req)
|
||||||
|
|
||||||
r->iov.iov_len = n * 512;
|
r->iov.iov_len = n * 512;
|
||||||
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
|
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
|
||||||
r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n,
|
r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n,
|
||||||
scsi_read_complete, r);
|
scsi_read_complete, r);
|
||||||
if (r->req.aiocb == NULL) {
|
if (r->req.aiocb == NULL) {
|
||||||
|
@ -207,11 +213,14 @@ static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type)
|
||||||
static void scsi_write_complete(void * opaque, int ret)
|
static void scsi_write_complete(void * opaque, int ret)
|
||||||
{
|
{
|
||||||
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
|
||||||
|
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
|
||||||
uint32_t len;
|
uint32_t len;
|
||||||
uint32_t n;
|
uint32_t n;
|
||||||
|
|
||||||
r->req.aiocb = NULL;
|
r->req.aiocb = NULL;
|
||||||
|
|
||||||
|
bdrv_acct_done(s->bs, &r->acct);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_WRITE)) {
|
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_WRITE)) {
|
||||||
return;
|
return;
|
||||||
|
@ -252,6 +261,8 @@ static void scsi_write_data(SCSIRequest *req)
|
||||||
n = r->iov.iov_len / 512;
|
n = r->iov.iov_len / 512;
|
||||||
if (n) {
|
if (n) {
|
||||||
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
|
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &r->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_WRITE);
|
||||||
r->req.aiocb = bdrv_aio_writev(s->bs, r->sector, &r->qiov, n,
|
r->req.aiocb = bdrv_aio_writev(s->bs, r->sector, &r->qiov, n,
|
||||||
scsi_write_complete, r);
|
scsi_write_complete, r);
|
||||||
if (r->req.aiocb == NULL) {
|
if (r->req.aiocb == NULL) {
|
||||||
|
@ -854,13 +865,19 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf)
|
||||||
buflen = 8;
|
buflen = 8;
|
||||||
break;
|
break;
|
||||||
case SYNCHRONIZE_CACHE:
|
case SYNCHRONIZE_CACHE:
|
||||||
|
{
|
||||||
|
BlockAcctCookie acct;
|
||||||
|
|
||||||
|
bdrv_acct_start(s->bs, &acct, 0, BDRV_ACCT_FLUSH);
|
||||||
ret = bdrv_flush(s->bs);
|
ret = bdrv_flush(s->bs);
|
||||||
|
bdrv_acct_done(s->bs, &acct);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) {
|
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case GET_CONFIGURATION:
|
case GET_CONFIGURATION:
|
||||||
memset(outbuf, 0, 8);
|
memset(outbuf, 0, 8);
|
||||||
/* ??? This should probably return much more information. For now
|
/* ??? This should probably return much more information. For now
|
||||||
|
|
|
@ -47,6 +47,7 @@ typedef struct VirtIOBlockReq
|
||||||
struct virtio_scsi_inhdr *scsi;
|
struct virtio_scsi_inhdr *scsi;
|
||||||
QEMUIOVector qiov;
|
QEMUIOVector qiov;
|
||||||
struct VirtIOBlockReq *next;
|
struct VirtIOBlockReq *next;
|
||||||
|
BlockAcctCookie acct;
|
||||||
} VirtIOBlockReq;
|
} VirtIOBlockReq;
|
||||||
|
|
||||||
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
|
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
|
||||||
|
@ -58,8 +59,6 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, int status)
|
||||||
stb_p(&req->in->status, status);
|
stb_p(&req->in->status, status);
|
||||||
virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
|
virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
|
||||||
virtio_notify(&s->vdev, s->vq);
|
virtio_notify(&s->vdev, s->vq);
|
||||||
|
|
||||||
g_free(req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
|
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
|
||||||
|
@ -81,6 +80,8 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
|
||||||
vm_stop(VMSTOP_DISKFULL);
|
vm_stop(VMSTOP_DISKFULL);
|
||||||
} else {
|
} else {
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
|
||||||
|
bdrv_acct_done(s->bs, &req->acct);
|
||||||
|
g_free(req);
|
||||||
bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read);
|
bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,6 +101,8 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
||||||
|
bdrv_acct_done(req->dev->bs, &req->acct);
|
||||||
|
g_free(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_blk_flush_complete(void *opaque, int ret)
|
static void virtio_blk_flush_complete(void *opaque, int ret)
|
||||||
|
@ -113,6 +116,8 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
||||||
|
bdrv_acct_done(req->dev->bs, &req->acct);
|
||||||
|
g_free(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
||||||
|
@ -155,6 +160,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
|
||||||
*/
|
*/
|
||||||
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
|
if (req->elem.out_num < 2 || req->elem.in_num < 3) {
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
|
||||||
|
g_free(req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,6 +169,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
|
||||||
*/
|
*/
|
||||||
if (req->elem.out_num > 2 && req->elem.in_num > 3) {
|
if (req->elem.out_num > 2 && req->elem.in_num > 3) {
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
|
||||||
|
g_free(req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,11 +236,13 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
|
||||||
stl_p(&req->scsi->data_len, hdr.dxfer_len);
|
stl_p(&req->scsi->data_len, hdr.dxfer_len);
|
||||||
|
|
||||||
virtio_blk_req_complete(req, status);
|
virtio_blk_req_complete(req, status);
|
||||||
|
g_free(req);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
|
static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
|
||||||
{
|
{
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
|
||||||
|
g_free(req);
|
||||||
}
|
}
|
||||||
#endif /* __linux__ */
|
#endif /* __linux__ */
|
||||||
|
|
||||||
|
@ -266,6 +275,8 @@ static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
|
||||||
{
|
{
|
||||||
BlockDriverAIOCB *acb;
|
BlockDriverAIOCB *acb;
|
||||||
|
|
||||||
|
bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure all outstanding writes are posted to the backing device.
|
* Make sure all outstanding writes are posted to the backing device.
|
||||||
*/
|
*/
|
||||||
|
@ -284,6 +295,8 @@ static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb)
|
||||||
|
|
||||||
sector = ldq_p(&req->out->sector);
|
sector = ldq_p(&req->out->sector);
|
||||||
|
|
||||||
|
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE);
|
||||||
|
|
||||||
trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
|
trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
|
||||||
|
|
||||||
if (sector & req->dev->sector_mask) {
|
if (sector & req->dev->sector_mask) {
|
||||||
|
@ -317,6 +330,8 @@ static void virtio_blk_handle_read(VirtIOBlockReq *req)
|
||||||
|
|
||||||
sector = ldq_p(&req->out->sector);
|
sector = ldq_p(&req->out->sector);
|
||||||
|
|
||||||
|
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ);
|
||||||
|
|
||||||
if (sector & req->dev->sector_mask) {
|
if (sector & req->dev->sector_mask) {
|
||||||
virtio_blk_rw_complete(req, -EIO);
|
virtio_blk_rw_complete(req, -EIO);
|
||||||
return;
|
return;
|
||||||
|
@ -370,6 +385,7 @@ static void virtio_blk_handle_request(VirtIOBlockReq *req,
|
||||||
s->serial ? s->serial : "",
|
s->serial ? s->serial : "",
|
||||||
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
|
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
|
||||||
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
|
||||||
|
g_free(req);
|
||||||
} else if (type & VIRTIO_BLK_T_OUT) {
|
} else if (type & VIRTIO_BLK_T_OUT) {
|
||||||
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
|
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
|
||||||
req->elem.out_num - 1);
|
req->elem.out_num - 1);
|
||||||
|
|
|
@ -79,6 +79,7 @@ struct ioreq {
|
||||||
|
|
||||||
struct XenBlkDev *blkdev;
|
struct XenBlkDev *blkdev;
|
||||||
QLIST_ENTRY(ioreq) list;
|
QLIST_ENTRY(ioreq) list;
|
||||||
|
BlockAcctCookie acct;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct XenBlkDev {
|
struct XenBlkDev {
|
||||||
|
@ -401,6 +402,7 @@ static void qemu_aio_complete(void *opaque, int ret)
|
||||||
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
|
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
|
||||||
ioreq_unmap(ioreq);
|
ioreq_unmap(ioreq);
|
||||||
ioreq_finish(ioreq);
|
ioreq_finish(ioreq);
|
||||||
|
bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
|
||||||
qemu_bh_schedule(ioreq->blkdev->bh);
|
qemu_bh_schedule(ioreq->blkdev->bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -419,6 +421,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||||
|
|
||||||
switch (ioreq->req.operation) {
|
switch (ioreq->req.operation) {
|
||||||
case BLKIF_OP_READ:
|
case BLKIF_OP_READ:
|
||||||
|
bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
|
||||||
ioreq->aio_inflight++;
|
ioreq->aio_inflight++;
|
||||||
bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
|
bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
|
||||||
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
|
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
|
||||||
|
@ -429,6 +432,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
|
||||||
if (!ioreq->req.nr_segments) {
|
if (!ioreq->req.nr_segments) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
|
||||||
ioreq->aio_inflight++;
|
ioreq->aio_inflight++;
|
||||||
bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
|
bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
|
||||||
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
|
&ioreq->v, ioreq->v.size / BLOCK_SIZE,
|
||||||
|
|
Loading…
Reference in New Issue