mirror of https://github.com/xemu-project/xemu.git
hw/nvme fixes
* fixes for aio cancellation in commands that may issue several aios -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmOI2uQACgkQTeGvMW1P Dem6nQgAi8Dm0vhRLoEHqT6FG+VBy0Evpw2QThGE8PxsfzJ1nlwXt6s/NwEc10Uc d5exp6AR9p37dGJfH82y8EYdEgMeJfsKQRDVMUR4n7eEOW+/Sp4WicO7iamEIWhr CgRBw1aqU7Im0CHn+3nXu0LKXEtT+tOQrfnr255ELzCxKPZuP3Iw/+nzLQij1G4N 9D9FPPyec+blz+0HuRg12m1ri6TAb2k9CuODuZrqLDCW8Hnl1MVmmYGZrYBy9sPr Q2zohAjad6R5/+4BCAlusbQ0deoXYKOJdb8J2A9EN73maSqjsHQAagfs+kKxAQK4 ttiy/M/l5EGJG496rZfUJZCnVlOllQ== =Blzi -----END PGP SIGNATURE----- Merge tag 'nvme-next-pull-request' of git://git.infradead.org/qemu-nvme into staging hw/nvme fixes * fixes for aio cancellation in commands that may issue several aios # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCgAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmOI2uQACgkQTeGvMW1P # Dem6nQgAi8Dm0vhRLoEHqT6FG+VBy0Evpw2QThGE8PxsfzJ1nlwXt6s/NwEc10Uc # d5exp6AR9p37dGJfH82y8EYdEgMeJfsKQRDVMUR4n7eEOW+/Sp4WicO7iamEIWhr # CgRBw1aqU7Im0CHn+3nXu0LKXEtT+tOQrfnr255ELzCxKPZuP3Iw/+nzLQij1G4N # 9D9FPPyec+blz+0HuRg12m1ri6TAb2k9CuODuZrqLDCW8Hnl1MVmmYGZrYBy9sPr # Q2zohAjad6R5/+4BCAlusbQ0deoXYKOJdb8J2A9EN73maSqjsHQAagfs+kKxAQK4 # ttiy/M/l5EGJG496rZfUJZCnVlOllQ== # =Blzi # -----END PGP SIGNATURE----- # gpg: Signature made Thu 01 Dec 2022 11:48:36 EST # gpg: using RSA key 522833AA75E2DCE6A24766C04DE1AF316D4F0DE9 # gpg: Good signature from "Klaus Jensen <its@irrelevant.dk>" [unknown] # gpg: aka "Klaus Jensen <k.jensen@samsung.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: DDCA 4D9C 9EF9 31CC 3468 4272 63D5 6FC5 E55D A838 # Subkey fingerprint: 5228 33AA 75E2 DCE6 A247 66C0 4DE1 AF31 6D4F 0DE9 * tag 'nvme-next-pull-request' of git://git.infradead.org/qemu-nvme: hw/nvme: remove copy bh scheduling hw/nvme: fix aio cancel in dsm hw/nvme: fix aio cancel in zone reset hw/nvme: fix aio cancel in flush hw/nvme: fix aio cancel in format Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
bb94fa8646
182
hw/nvme/ctrl.c
182
hw/nvme/ctrl.c
|
@ -2329,7 +2329,6 @@ typedef struct NvmeDSMAIOCB {
|
|||
BlockAIOCB common;
|
||||
BlockAIOCB *aiocb;
|
||||
NvmeRequest *req;
|
||||
QEMUBH *bh;
|
||||
int ret;
|
||||
|
||||
NvmeDsmRange *range;
|
||||
|
@ -2351,7 +2350,7 @@ static void nvme_dsm_cancel(BlockAIOCB *aiocb)
|
|||
} else {
|
||||
/*
|
||||
* We only reach this if nvme_dsm_cancel() has already been called or
|
||||
* the command ran to completion and nvme_dsm_bh is scheduled to run.
|
||||
* the command ran to completion.
|
||||
*/
|
||||
assert(iocb->idx == iocb->nr);
|
||||
}
|
||||
|
@ -2362,17 +2361,6 @@ static const AIOCBInfo nvme_dsm_aiocb_info = {
|
|||
.cancel_async = nvme_dsm_cancel,
|
||||
};
|
||||
|
||||
static void nvme_dsm_bh(void *opaque)
|
||||
{
|
||||
NvmeDSMAIOCB *iocb = opaque;
|
||||
|
||||
iocb->common.cb(iocb->common.opaque, iocb->ret);
|
||||
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
qemu_aio_unref(iocb);
|
||||
}
|
||||
|
||||
static void nvme_dsm_cb(void *opaque, int ret);
|
||||
|
||||
static void nvme_dsm_md_cb(void *opaque, int ret)
|
||||
|
@ -2384,16 +2372,10 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
|
|||
uint64_t slba;
|
||||
uint32_t nlb;
|
||||
|
||||
if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!ns->lbaf.ms) {
|
||||
nvme_dsm_cb(iocb, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
range = &iocb->range[iocb->idx - 1];
|
||||
slba = le64_to_cpu(range->slba);
|
||||
nlb = le32_to_cpu(range->nlb);
|
||||
|
@ -2406,7 +2388,6 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
|
|||
ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO);
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -2420,8 +2401,7 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
|
|||
return;
|
||||
|
||||
done:
|
||||
iocb->aiocb = NULL;
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
nvme_dsm_cb(iocb, ret);
|
||||
}
|
||||
|
||||
static void nvme_dsm_cb(void *opaque, int ret)
|
||||
|
@ -2434,7 +2414,9 @@ static void nvme_dsm_cb(void *opaque, int ret)
|
|||
uint64_t slba;
|
||||
uint32_t nlb;
|
||||
|
||||
if (ret < 0) {
|
||||
if (iocb->ret < 0) {
|
||||
goto done;
|
||||
} else if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
goto done;
|
||||
}
|
||||
|
@ -2468,7 +2450,8 @@ next:
|
|||
|
||||
done:
|
||||
iocb->aiocb = NULL;
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
iocb->common.cb(iocb->common.opaque, iocb->ret);
|
||||
qemu_aio_unref(iocb);
|
||||
}
|
||||
|
||||
static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
|
||||
|
@ -2486,7 +2469,6 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
|
|||
nvme_misc_cb, req);
|
||||
|
||||
iocb->req = req;
|
||||
iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb);
|
||||
iocb->ret = 0;
|
||||
iocb->range = g_new(NvmeDsmRange, nr);
|
||||
iocb->nr = nr;
|
||||
|
@ -2570,7 +2552,6 @@ typedef struct NvmeCopyAIOCB {
|
|||
BlockAIOCB common;
|
||||
BlockAIOCB *aiocb;
|
||||
NvmeRequest *req;
|
||||
QEMUBH *bh;
|
||||
int ret;
|
||||
|
||||
void *ranges;
|
||||
|
@ -2608,9 +2589,8 @@ static const AIOCBInfo nvme_copy_aiocb_info = {
|
|||
.cancel_async = nvme_copy_cancel,
|
||||
};
|
||||
|
||||
static void nvme_copy_bh(void *opaque)
|
||||
static void nvme_copy_done(NvmeCopyAIOCB *iocb)
|
||||
{
|
||||
NvmeCopyAIOCB *iocb = opaque;
|
||||
NvmeRequest *req = iocb->req;
|
||||
NvmeNamespace *ns = req->ns;
|
||||
BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk);
|
||||
|
@ -2622,9 +2602,6 @@ static void nvme_copy_bh(void *opaque)
|
|||
qemu_iovec_destroy(&iocb->iov);
|
||||
g_free(iocb->bounce);
|
||||
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
|
||||
if (iocb->ret < 0) {
|
||||
block_acct_failed(stats, &iocb->acct.read);
|
||||
block_acct_failed(stats, &iocb->acct.write);
|
||||
|
@ -2637,7 +2614,7 @@ static void nvme_copy_bh(void *opaque)
|
|||
qemu_aio_unref(iocb);
|
||||
}
|
||||
|
||||
static void nvme_copy_cb(void *opaque, int ret);
|
||||
static void nvme_do_copy(NvmeCopyAIOCB *iocb);
|
||||
|
||||
static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
|
||||
uint64_t *slba, uint32_t *nlb,
|
||||
|
@ -2749,7 +2726,7 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret)
|
|||
iocb->idx++;
|
||||
iocb->slba += nlb;
|
||||
out:
|
||||
nvme_copy_cb(iocb, iocb->ret);
|
||||
nvme_do_copy(iocb);
|
||||
}
|
||||
|
||||
static void nvme_copy_out_cb(void *opaque, int ret)
|
||||
|
@ -2761,16 +2738,8 @@ static void nvme_copy_out_cb(void *opaque, int ret)
|
|||
size_t mlen;
|
||||
uint8_t *mbounce;
|
||||
|
||||
if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
|
||||
goto out;
|
||||
} else if (iocb->ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ns->lbaf.ms) {
|
||||
nvme_copy_out_completed_cb(iocb, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
|
||||
|
@ -2789,7 +2758,7 @@ static void nvme_copy_out_cb(void *opaque, int ret)
|
|||
return;
|
||||
|
||||
out:
|
||||
nvme_copy_cb(iocb, ret);
|
||||
nvme_copy_out_completed_cb(iocb, ret);
|
||||
}
|
||||
|
||||
static void nvme_copy_in_completed_cb(void *opaque, int ret)
|
||||
|
@ -2883,15 +2852,9 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
|
|||
|
||||
invalid:
|
||||
req->status = status;
|
||||
iocb->aiocb = NULL;
|
||||
if (iocb->bh) {
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
iocb->ret = -1;
|
||||
out:
|
||||
nvme_copy_cb(iocb, ret);
|
||||
nvme_do_copy(iocb);
|
||||
}
|
||||
|
||||
static void nvme_copy_in_cb(void *opaque, int ret)
|
||||
|
@ -2902,16 +2865,8 @@ static void nvme_copy_in_cb(void *opaque, int ret)
|
|||
uint64_t slba;
|
||||
uint32_t nlb;
|
||||
|
||||
if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
|
||||
goto out;
|
||||
} else if (iocb->ret < 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ns->lbaf.ms) {
|
||||
nvme_copy_in_completed_cb(iocb, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
|
||||
|
@ -2927,12 +2882,11 @@ static void nvme_copy_in_cb(void *opaque, int ret)
|
|||
return;
|
||||
|
||||
out:
|
||||
nvme_copy_cb(iocb, iocb->ret);
|
||||
nvme_copy_in_completed_cb(iocb, ret);
|
||||
}
|
||||
|
||||
static void nvme_copy_cb(void *opaque, int ret)
|
||||
static void nvme_do_copy(NvmeCopyAIOCB *iocb)
|
||||
{
|
||||
NvmeCopyAIOCB *iocb = opaque;
|
||||
NvmeRequest *req = iocb->req;
|
||||
NvmeNamespace *ns = req->ns;
|
||||
uint64_t slba;
|
||||
|
@ -2940,10 +2894,7 @@ static void nvme_copy_cb(void *opaque, int ret)
|
|||
size_t len;
|
||||
uint16_t status;
|
||||
|
||||
if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
goto done;
|
||||
} else if (iocb->ret < 0) {
|
||||
if (iocb->ret < 0) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -2990,14 +2941,11 @@ static void nvme_copy_cb(void *opaque, int ret)
|
|||
|
||||
invalid:
|
||||
req->status = status;
|
||||
iocb->ret = -1;
|
||||
done:
|
||||
iocb->aiocb = NULL;
|
||||
if (iocb->bh) {
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
}
|
||||
nvme_copy_done(iocb);
|
||||
}
|
||||
|
||||
|
||||
static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
|
||||
{
|
||||
NvmeNamespace *ns = req->ns;
|
||||
|
@ -3067,7 +3015,6 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
|
|||
}
|
||||
|
||||
iocb->req = req;
|
||||
iocb->bh = qemu_bh_new(nvme_copy_bh, iocb);
|
||||
iocb->ret = 0;
|
||||
iocb->nr = nr;
|
||||
iocb->idx = 0;
|
||||
|
@ -3084,7 +3031,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
|
|||
BLOCK_ACCT_WRITE);
|
||||
|
||||
req->aiocb = &iocb->common;
|
||||
nvme_copy_cb(iocb, 0);
|
||||
nvme_do_copy(iocb);
|
||||
|
||||
return NVME_NO_COMPLETE;
|
||||
|
||||
|
@ -3160,7 +3107,6 @@ typedef struct NvmeFlushAIOCB {
|
|||
BlockAIOCB common;
|
||||
BlockAIOCB *aiocb;
|
||||
NvmeRequest *req;
|
||||
QEMUBH *bh;
|
||||
int ret;
|
||||
|
||||
NvmeNamespace *ns;
|
||||
|
@ -3176,6 +3122,7 @@ static void nvme_flush_cancel(BlockAIOCB *acb)
|
|||
|
||||
if (iocb->aiocb) {
|
||||
blk_aio_cancel_async(iocb->aiocb);
|
||||
iocb->aiocb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3185,6 +3132,8 @@ static const AIOCBInfo nvme_flush_aiocb_info = {
|
|||
.get_aio_context = nvme_get_aio_context,
|
||||
};
|
||||
|
||||
static void nvme_do_flush(NvmeFlushAIOCB *iocb);
|
||||
|
||||
static void nvme_flush_ns_cb(void *opaque, int ret)
|
||||
{
|
||||
NvmeFlushAIOCB *iocb = opaque;
|
||||
|
@ -3206,13 +3155,11 @@ static void nvme_flush_ns_cb(void *opaque, int ret)
|
|||
}
|
||||
|
||||
out:
|
||||
iocb->aiocb = NULL;
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
nvme_do_flush(iocb);
|
||||
}
|
||||
|
||||
static void nvme_flush_bh(void *opaque)
|
||||
static void nvme_do_flush(NvmeFlushAIOCB *iocb)
|
||||
{
|
||||
NvmeFlushAIOCB *iocb = opaque;
|
||||
NvmeRequest *req = iocb->req;
|
||||
NvmeCtrl *n = nvme_ctrl(req);
|
||||
int i;
|
||||
|
@ -3239,14 +3186,8 @@ static void nvme_flush_bh(void *opaque)
|
|||
return;
|
||||
|
||||
done:
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
|
||||
iocb->common.cb(iocb->common.opaque, iocb->ret);
|
||||
|
||||
qemu_aio_unref(iocb);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
|
||||
|
@ -3258,7 +3199,6 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
|
|||
iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req);
|
||||
|
||||
iocb->req = req;
|
||||
iocb->bh = qemu_bh_new(nvme_flush_bh, iocb);
|
||||
iocb->ret = 0;
|
||||
iocb->ns = NULL;
|
||||
iocb->nsid = 0;
|
||||
|
@ -3280,13 +3220,11 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
|
|||
}
|
||||
|
||||
req->aiocb = &iocb->common;
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
nvme_do_flush(iocb);
|
||||
|
||||
return NVME_NO_COMPLETE;
|
||||
|
||||
out:
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
qemu_aio_unref(iocb);
|
||||
|
||||
return status;
|
||||
|
@ -3721,7 +3659,6 @@ typedef struct NvmeZoneResetAIOCB {
|
|||
BlockAIOCB common;
|
||||
BlockAIOCB *aiocb;
|
||||
NvmeRequest *req;
|
||||
QEMUBH *bh;
|
||||
int ret;
|
||||
|
||||
bool all;
|
||||
|
@ -3750,17 +3687,6 @@ static const AIOCBInfo nvme_zone_reset_aiocb_info = {
|
|||
.cancel_async = nvme_zone_reset_cancel,
|
||||
};
|
||||
|
||||
static void nvme_zone_reset_bh(void *opaque)
|
||||
{
|
||||
NvmeZoneResetAIOCB *iocb = opaque;
|
||||
|
||||
iocb->common.cb(iocb->common.opaque, iocb->ret);
|
||||
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
qemu_aio_unref(iocb);
|
||||
}
|
||||
|
||||
static void nvme_zone_reset_cb(void *opaque, int ret);
|
||||
|
||||
static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
|
||||
|
@ -3771,14 +3697,8 @@ static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
|
|||
int64_t moff;
|
||||
int count;
|
||||
|
||||
if (ret < 0) {
|
||||
nvme_zone_reset_cb(iocb, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ns->lbaf.ms) {
|
||||
nvme_zone_reset_cb(iocb, 0);
|
||||
return;
|
||||
if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
moff = nvme_moff(ns, iocb->zone->d.zslba);
|
||||
|
@ -3788,6 +3708,9 @@ static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
|
|||
BDRV_REQ_MAY_UNMAP,
|
||||
nvme_zone_reset_cb, iocb);
|
||||
return;
|
||||
|
||||
out:
|
||||
nvme_zone_reset_cb(iocb, ret);
|
||||
}
|
||||
|
||||
static void nvme_zone_reset_cb(void *opaque, int ret)
|
||||
|
@ -3796,7 +3719,9 @@ static void nvme_zone_reset_cb(void *opaque, int ret)
|
|||
NvmeRequest *req = iocb->req;
|
||||
NvmeNamespace *ns = req->ns;
|
||||
|
||||
if (ret < 0) {
|
||||
if (iocb->ret < 0) {
|
||||
goto done;
|
||||
} else if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
goto done;
|
||||
}
|
||||
|
@ -3844,9 +3769,9 @@ static void nvme_zone_reset_cb(void *opaque, int ret)
|
|||
|
||||
done:
|
||||
iocb->aiocb = NULL;
|
||||
if (iocb->bh) {
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
}
|
||||
|
||||
iocb->common.cb(iocb->common.opaque, iocb->ret);
|
||||
qemu_aio_unref(iocb);
|
||||
}
|
||||
|
||||
static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone,
|
||||
|
@ -3951,7 +3876,6 @@ static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
|
|||
nvme_misc_cb, req);
|
||||
|
||||
iocb->req = req;
|
||||
iocb->bh = qemu_bh_new(nvme_zone_reset_bh, iocb);
|
||||
iocb->ret = 0;
|
||||
iocb->all = all;
|
||||
iocb->idx = zone_idx;
|
||||
|
@ -5741,7 +5665,6 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
|
|||
typedef struct NvmeFormatAIOCB {
|
||||
BlockAIOCB common;
|
||||
BlockAIOCB *aiocb;
|
||||
QEMUBH *bh;
|
||||
NvmeRequest *req;
|
||||
int ret;
|
||||
|
||||
|
@ -5756,14 +5679,15 @@ typedef struct NvmeFormatAIOCB {
|
|||
uint8_t pil;
|
||||
} NvmeFormatAIOCB;
|
||||
|
||||
static void nvme_format_bh(void *opaque);
|
||||
|
||||
static void nvme_format_cancel(BlockAIOCB *aiocb)
|
||||
{
|
||||
NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common);
|
||||
|
||||
iocb->ret = -ECANCELED;
|
||||
|
||||
if (iocb->aiocb) {
|
||||
blk_aio_cancel_async(iocb->aiocb);
|
||||
iocb->aiocb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5787,13 +5711,17 @@ static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset,
|
|||
nvme_ns_init_format(ns);
|
||||
}
|
||||
|
||||
static void nvme_do_format(NvmeFormatAIOCB *iocb);
|
||||
|
||||
static void nvme_format_ns_cb(void *opaque, int ret)
|
||||
{
|
||||
NvmeFormatAIOCB *iocb = opaque;
|
||||
NvmeNamespace *ns = iocb->ns;
|
||||
int bytes;
|
||||
|
||||
if (ret < 0) {
|
||||
if (iocb->ret < 0) {
|
||||
goto done;
|
||||
} else if (ret < 0) {
|
||||
iocb->ret = ret;
|
||||
goto done;
|
||||
}
|
||||
|
@ -5817,8 +5745,7 @@ static void nvme_format_ns_cb(void *opaque, int ret)
|
|||
iocb->offset = 0;
|
||||
|
||||
done:
|
||||
iocb->aiocb = NULL;
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
nvme_do_format(iocb);
|
||||
}
|
||||
|
||||
static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi)
|
||||
|
@ -5842,9 +5769,8 @@ static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi)
|
|||
return NVME_SUCCESS;
|
||||
}
|
||||
|
||||
static void nvme_format_bh(void *opaque)
|
||||
static void nvme_do_format(NvmeFormatAIOCB *iocb)
|
||||
{
|
||||
NvmeFormatAIOCB *iocb = opaque;
|
||||
NvmeRequest *req = iocb->req;
|
||||
NvmeCtrl *n = nvme_ctrl(req);
|
||||
uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
|
||||
|
@ -5882,11 +5808,7 @@ static void nvme_format_bh(void *opaque)
|
|||
return;
|
||||
|
||||
done:
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
|
||||
iocb->common.cb(iocb->common.opaque, iocb->ret);
|
||||
|
||||
qemu_aio_unref(iocb);
|
||||
}
|
||||
|
||||
|
@ -5905,7 +5827,6 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req)
|
|||
iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req);
|
||||
|
||||
iocb->req = req;
|
||||
iocb->bh = qemu_bh_new(nvme_format_bh, iocb);
|
||||
iocb->ret = 0;
|
||||
iocb->ns = NULL;
|
||||
iocb->nsid = 0;
|
||||
|
@ -5934,14 +5855,13 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req)
|
|||
}
|
||||
|
||||
req->aiocb = &iocb->common;
|
||||
qemu_bh_schedule(iocb->bh);
|
||||
nvme_do_format(iocb);
|
||||
|
||||
return NVME_NO_COMPLETE;
|
||||
|
||||
out:
|
||||
qemu_bh_delete(iocb->bh);
|
||||
iocb->bh = NULL;
|
||||
qemu_aio_unref(iocb);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue