diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 3a7dc194e2..456a2d5694 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -660,6 +660,38 @@ static void qemu_aio_complete(void *opaque, int ret) qemu_bh_schedule(ioreq->blkdev->bh); } +static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number, + uint64_t nr_sectors) +{ + struct XenBlkDev *blkdev = ioreq->blkdev; + int64_t byte_offset; + int byte_chunk; + uint64_t byte_remaining, limit; + uint64_t sec_start = sector_number; + uint64_t sec_count = nr_sectors; + + /* Wrap around, or overflowing byte limit? */ + if (sec_start + sec_count < sec_count || + sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) { + return false; + } + + limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS; + byte_offset = sec_start << BDRV_SECTOR_BITS; + byte_remaining = sec_count << BDRV_SECTOR_BITS; + + do { + byte_chunk = byte_remaining > limit ? limit : byte_remaining; + ioreq->aio_inflight++; + blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk, + qemu_aio_complete, ioreq); + byte_remaining -= byte_chunk; + byte_offset += byte_chunk; + } while (byte_remaining > 0); + + return true; +} + static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; @@ -708,12 +740,10 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) break; case BLKIF_OP_DISCARD: { - struct blkif_request_discard *discard_req = (void *)&ioreq->req; - ioreq->aio_inflight++; - blk_aio_pdiscard(blkdev->blk, - discard_req->sector_number << BDRV_SECTOR_BITS, - discard_req->nr_sectors << BDRV_SECTOR_BITS, - qemu_aio_complete, ioreq); + struct blkif_request_discard *req = (void *)&ioreq->req; + if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) { + goto err; + } break; } default: diff --git a/xen-hvm.c b/xen-hvm.c index 99b8ee8a4f..0892361cc2 100644 --- a/xen-hvm.c +++ b/xen-hvm.c @@ -995,6 +995,9 @@ static int handle_buffered_iopage(XenIOState *state) } memset(&req, 0x00, sizeof(req)); + req.state = STATE_IOREQ_READY; + req.count = 1; + req.dir = IOREQ_WRITE; for (;;) { uint32_t rdptr = buf_page->read_pointer, wrptr; @@ -1009,18 +1012,16 @@ static int handle_buffered_iopage(XenIOState *state) break; } buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; - req.size = 1UL << buf_req->size; - req.count = 1; + req.size = 1U << buf_req->size; req.addr = buf_req->addr; req.data = buf_req->data; - req.state = STATE_IOREQ_READY; - req.dir = buf_req->dir; - req.df = 1; req.type = buf_req->type; - req.data_is_ptr = 0; xen_rmb(); qw = (req.size == 8); if (qw) { + if (rdptr + 1 == wrptr) { + hw_error("Incomplete quad word buffered ioreq"); + } buf_req = &buf_page->buf_ioreq[(rdptr + 1) % IOREQ_BUFFER_SLOT_NUM]; req.data |= ((uint64_t)buf_req->data) << 32; @@ -1029,6 +1030,15 @@ static int handle_buffered_iopage(XenIOState *state) handle_ioreq(state, &req); + /* Only req.data may get updated by handle_ioreq(), albeit even that + * should not happen as such data would never make it to the guest (we + * can only usefully see writes here after all). + */ + assert(req.state == STATE_IOREQ_READY); + assert(req.count == 1); + assert(req.dir == IOREQ_WRITE); + assert(!req.data_is_ptr); + atomic_add(&buf_page->read_pointer, qw + 1); }