hw/nvme: use QOM accessors

Replace various ->parent_obj use with the equivalent QOM accessors.

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
This commit is contained in:
Klaus Jensen 2022-12-08 12:43:18 +01:00
parent 528d9f33ca
commit 48b32c28d5
1 changed files with 48 additions and 41 deletions

View File

@ -449,7 +449,7 @@ static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
return 0; return 0;
} }
return pci_dma_read(&n->parent_obj, addr, buf, size); return pci_dma_read(PCI_DEVICE(n), addr, buf, size);
} }
static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size) static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size)
@ -469,7 +469,7 @@ static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size)
return 0; return 0;
} }
return pci_dma_write(&n->parent_obj, addr, buf, size); return pci_dma_write(PCI_DEVICE(n), addr, buf, size);
} }
static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid)
@ -514,24 +514,27 @@ static uint8_t nvme_sq_empty(NvmeSQueue *sq)
static void nvme_irq_check(NvmeCtrl *n) static void nvme_irq_check(NvmeCtrl *n)
{ {
PCIDevice *pci = PCI_DEVICE(n);
uint32_t intms = ldl_le_p(&n->bar.intms); uint32_t intms = ldl_le_p(&n->bar.intms);
if (msix_enabled(&(n->parent_obj))) { if (msix_enabled(pci)) {
return; return;
} }
if (~intms & n->irq_status) { if (~intms & n->irq_status) {
pci_irq_assert(&n->parent_obj); pci_irq_assert(pci);
} else { } else {
pci_irq_deassert(&n->parent_obj); pci_irq_deassert(pci);
} }
} }
static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
{ {
PCIDevice *pci = PCI_DEVICE(n);
if (cq->irq_enabled) { if (cq->irq_enabled) {
if (msix_enabled(&(n->parent_obj))) { if (msix_enabled(pci)) {
trace_pci_nvme_irq_msix(cq->vector); trace_pci_nvme_irq_msix(cq->vector);
msix_notify(&(n->parent_obj), cq->vector); msix_notify(pci, cq->vector);
} else { } else {
trace_pci_nvme_irq_pin(); trace_pci_nvme_irq_pin();
assert(cq->vector < 32); assert(cq->vector < 32);
@ -546,7 +549,7 @@ static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
{ {
if (cq->irq_enabled) { if (cq->irq_enabled) {
if (msix_enabled(&(n->parent_obj))) { if (msix_enabled(PCI_DEVICE(n))) {
return; return;
} else { } else {
assert(cq->vector < 32); assert(cq->vector < 32);
@ -570,7 +573,7 @@ static void nvme_req_clear(NvmeRequest *req)
static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma) static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma)
{ {
if (dma) { if (dma) {
pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0); pci_dma_sglist_init(&sg->qsg, PCI_DEVICE(n), 0);
sg->flags = NVME_SG_DMA; sg->flags = NVME_SG_DMA;
} else { } else {
qemu_iovec_init(&sg->iov, 0); qemu_iovec_init(&sg->iov, 0);
@ -1333,7 +1336,7 @@ static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
static void nvme_update_cq_head(NvmeCQueue *cq) static void nvme_update_cq_head(NvmeCQueue *cq)
{ {
pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &cq->head, pci_dma_read(PCI_DEVICE(cq->ctrl), cq->db_addr, &cq->head,
sizeof(cq->head)); sizeof(cq->head));
trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head); trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head);
} }
@ -1363,7 +1366,7 @@ static void nvme_post_cqes(void *opaque)
req->cqe.sq_id = cpu_to_le16(sq->sqid); req->cqe.sq_id = cpu_to_le16(sq->sqid);
req->cqe.sq_head = cpu_to_le16(sq->head); req->cqe.sq_head = cpu_to_le16(sq->head);
addr = cq->dma_addr + cq->tail * n->cqe_size; addr = cq->dma_addr + cq->tail * n->cqe_size;
ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, ret = pci_dma_write(PCI_DEVICE(n), addr, (void *)&req->cqe,
sizeof(req->cqe)); sizeof(req->cqe));
if (ret) { if (ret) {
trace_pci_nvme_err_addr_write(addr); trace_pci_nvme_err_addr_write(addr);
@ -4615,6 +4618,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
{ {
PCIDevice *pci = PCI_DEVICE(n);
uint16_t offset = (cq->cqid << 3) + (1 << 2); uint16_t offset = (cq->cqid << 3) + (1 << 2);
n->cq[cq->cqid] = NULL; n->cq[cq->cqid] = NULL;
@ -4625,8 +4629,8 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
event_notifier_set_handler(&cq->notifier, NULL); event_notifier_set_handler(&cq->notifier, NULL);
event_notifier_cleanup(&cq->notifier); event_notifier_cleanup(&cq->notifier);
} }
if (msix_enabled(&n->parent_obj)) { if (msix_enabled(pci)) {
msix_vector_unuse(&n->parent_obj, cq->vector); msix_vector_unuse(pci, cq->vector);
} }
if (cq->cqid) { if (cq->cqid) {
g_free(cq); g_free(cq);
@ -4664,8 +4668,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
uint16_t cqid, uint16_t vector, uint16_t size, uint16_t cqid, uint16_t vector, uint16_t size,
uint16_t irq_enabled) uint16_t irq_enabled)
{ {
if (msix_enabled(&n->parent_obj)) { PCIDevice *pci = PCI_DEVICE(n);
msix_vector_use(&n->parent_obj, vector);
if (msix_enabled(pci)) {
msix_vector_use(pci, vector);
} }
cq->ctrl = n; cq->ctrl = n;
cq->cqid = cqid; cq->cqid = cqid;
@ -4716,7 +4722,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_err_invalid_create_cq_addr(prp1); trace_pci_nvme_err_invalid_create_cq_addr(prp1);
return NVME_INVALID_PRP_OFFSET | NVME_DNR; return NVME_INVALID_PRP_OFFSET | NVME_DNR;
} }
if (unlikely(!msix_enabled(&n->parent_obj) && vector)) { if (unlikely(!msix_enabled(PCI_DEVICE(n)) && vector)) {
trace_pci_nvme_err_invalid_create_cq_vector(vector); trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR; return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
} }
@ -5959,6 +5965,7 @@ static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req,
static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online) static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online)
{ {
PCIDevice *pci = PCI_DEVICE(n);
NvmeCtrl *sn = NULL; NvmeCtrl *sn = NULL;
NvmeSecCtrlEntry *sctrl; NvmeSecCtrlEntry *sctrl;
int vf_index; int vf_index;
@ -5968,9 +5975,9 @@ static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online)
return NVME_INVALID_CTRL_ID | NVME_DNR; return NVME_INVALID_CTRL_ID | NVME_DNR;
} }
if (!pci_is_vf(&n->parent_obj)) { if (!pci_is_vf(pci)) {
vf_index = le16_to_cpu(sctrl->vfn) - 1; vf_index = le16_to_cpu(sctrl->vfn) - 1;
sn = NVME(pcie_sriov_get_vf_at_index(&n->parent_obj, vf_index)); sn = NVME(pcie_sriov_get_vf_at_index(pci, vf_index));
} }
if (online) { if (online) {
@ -6028,6 +6035,7 @@ static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req)
static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
{ {
PCIDevice *pci = PCI_DEVICE(n);
uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1); uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1);
uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2); uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2);
int i; int i;
@ -6054,8 +6062,7 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
*/ */
sq->db_addr = dbs_addr + (i << 3); sq->db_addr = dbs_addr + (i << 3);
sq->ei_addr = eis_addr + (i << 3); sq->ei_addr = eis_addr + (i << 3);
pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail, pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
sizeof(sq->tail));
if (n->params.ioeventfd && sq->sqid != 0) { if (n->params.ioeventfd && sq->sqid != 0) {
if (!nvme_init_sq_ioeventfd(sq)) { if (!nvme_init_sq_ioeventfd(sq)) {
@ -6068,8 +6075,7 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
/* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */ /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */
cq->db_addr = dbs_addr + (i << 3) + (1 << 2); cq->db_addr = dbs_addr + (i << 3) + (1 << 2);
cq->ei_addr = eis_addr + (i << 3) + (1 << 2); cq->ei_addr = eis_addr + (i << 3) + (1 << 2);
pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head, pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
sizeof(cq->head));
if (n->params.ioeventfd && cq->cqid != 0) { if (n->params.ioeventfd && cq->cqid != 0) {
if (!nvme_init_cq_ioeventfd(cq)) { if (!nvme_init_cq_ioeventfd(cq)) {
@ -6141,14 +6147,14 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
static void nvme_update_sq_eventidx(const NvmeSQueue *sq) static void nvme_update_sq_eventidx(const NvmeSQueue *sq)
{ {
pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &sq->tail, pci_dma_write(PCI_DEVICE(sq->ctrl), sq->ei_addr, &sq->tail,
sizeof(sq->tail)); sizeof(sq->tail));
trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail); trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail);
} }
static void nvme_update_sq_tail(NvmeSQueue *sq) static void nvme_update_sq_tail(NvmeSQueue *sq)
{ {
pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &sq->tail, pci_dma_read(PCI_DEVICE(sq->ctrl), sq->db_addr, &sq->tail,
sizeof(sq->tail)); sizeof(sq->tail));
trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail); trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail);
} }
@ -6216,7 +6222,7 @@ static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size)
static void nvme_activate_virt_res(NvmeCtrl *n) static void nvme_activate_virt_res(NvmeCtrl *n)
{ {
PCIDevice *pci_dev = &n->parent_obj; PCIDevice *pci_dev = PCI_DEVICE(n);
NvmePriCtrlCap *cap = &n->pri_ctrl_cap; NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
NvmeSecCtrlEntry *sctrl; NvmeSecCtrlEntry *sctrl;
@ -6239,7 +6245,7 @@ static void nvme_activate_virt_res(NvmeCtrl *n)
static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
{ {
PCIDevice *pci_dev = &n->parent_obj; PCIDevice *pci_dev = PCI_DEVICE(n);
NvmeSecCtrlEntry *sctrl; NvmeSecCtrlEntry *sctrl;
NvmeNamespace *ns; NvmeNamespace *ns;
int i; int i;
@ -6356,7 +6362,7 @@ static int nvme_start_ctrl(NvmeCtrl *n)
uint32_t page_size = 1 << page_bits; uint32_t page_size = 1 << page_bits;
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
if (pci_is_vf(&n->parent_obj) && !sctrl->scs) { if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) {
trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi), trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi),
le16_to_cpu(sctrl->nvq), le16_to_cpu(sctrl->nvq),
sctrl->scs ? "ONLINE" : sctrl->scs ? "ONLINE" :
@ -6471,6 +6477,7 @@ static void nvme_cmb_enable_regs(NvmeCtrl *n)
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
unsigned size) unsigned size)
{ {
PCIDevice *pci = PCI_DEVICE(n);
uint64_t cap = ldq_le_p(&n->bar.cap); uint64_t cap = ldq_le_p(&n->bar.cap);
uint32_t cc = ldl_le_p(&n->bar.cc); uint32_t cc = ldl_le_p(&n->bar.cc);
uint32_t intms = ldl_le_p(&n->bar.intms); uint32_t intms = ldl_le_p(&n->bar.intms);
@ -6494,7 +6501,7 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
switch (offset) { switch (offset) {
case NVME_REG_INTMS: case NVME_REG_INTMS:
if (unlikely(msix_enabled(&(n->parent_obj)))) { if (unlikely(msix_enabled(pci))) {
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
"undefined access to interrupt mask set" "undefined access to interrupt mask set"
" when MSI-X is enabled"); " when MSI-X is enabled");
@ -6507,7 +6514,7 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
nvme_irq_check(n); nvme_irq_check(n);
break; break;
case NVME_REG_INTMC: case NVME_REG_INTMC:
if (unlikely(msix_enabled(&(n->parent_obj)))) { if (unlikely(msix_enabled(pci))) {
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
"undefined access to interrupt mask clr" "undefined access to interrupt mask clr"
" when MSI-X is enabled"); " when MSI-X is enabled");
@ -6732,7 +6739,7 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
return 0; return 0;
} }
if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs && if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs &&
addr != NVME_REG_CSTS) { addr != NVME_REG_CSTS) {
trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size);
return 0; return 0;
@ -6753,6 +6760,7 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
{ {
PCIDevice *pci = PCI_DEVICE(n);
uint32_t qid; uint32_t qid;
if (unlikely(addr & ((1 << 2) - 1))) { if (unlikely(addr & ((1 << 2) - 1))) {
@ -6820,8 +6828,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
start_sqs = nvme_cq_full(cq) ? 1 : 0; start_sqs = nvme_cq_full(cq) ? 1 : 0;
cq->head = new_head; cq->head = new_head;
if (!qid && n->dbbuf_enabled) { if (!qid && n->dbbuf_enabled) {
pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head, pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
sizeof(cq->head));
} }
if (start_sqs) { if (start_sqs) {
NvmeSQueue *sq; NvmeSQueue *sq;
@ -6894,8 +6901,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
* including ones that run on Linux, are not updating Admin Queues, * including ones that run on Linux, are not updating Admin Queues,
* so we can't trust reading it for an appropriate sq tail. * so we can't trust reading it for an appropriate sq tail.
*/ */
pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail, pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
sizeof(sq->tail));
} }
qemu_bh_schedule(sq->bh); qemu_bh_schedule(sq->bh);
@ -6909,7 +6915,7 @@ static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
trace_pci_nvme_mmio_write(addr, data, size); trace_pci_nvme_mmio_write(addr, data, size);
if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs && if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs &&
addr != NVME_REG_CSTS) { addr != NVME_REG_CSTS) {
trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size); trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size);
return; return;
@ -7093,10 +7099,11 @@ static void nvme_init_state(NvmeCtrl *n)
NvmePriCtrlCap *cap = &n->pri_ctrl_cap; NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
NvmeSecCtrlList *list = &n->sec_ctrl_list; NvmeSecCtrlList *list = &n->sec_ctrl_list;
NvmeSecCtrlEntry *sctrl; NvmeSecCtrlEntry *sctrl;
PCIDevice *pci = PCI_DEVICE(n);
uint8_t max_vfs; uint8_t max_vfs;
int i; int i;
if (pci_is_vf(&n->parent_obj)) { if (pci_is_vf(pci)) {
sctrl = nvme_sctrl(n); sctrl = nvme_sctrl(n);
max_vfs = 0; max_vfs = 0;
n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0; n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0;
@ -7125,7 +7132,7 @@ static void nvme_init_state(NvmeCtrl *n)
cap->cntlid = cpu_to_le16(n->cntlid); cap->cntlid = cpu_to_le16(n->cntlid);
cap->crt = NVME_CRT_VQ | NVME_CRT_VI; cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
if (pci_is_vf(&n->parent_obj)) { if (pci_is_vf(pci)) {
cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs); cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
} else { } else {
cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs - cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
@ -7138,7 +7145,7 @@ static void nvme_init_state(NvmeCtrl *n)
cap->vqfrt / MAX(max_vfs, 1); cap->vqfrt / MAX(max_vfs, 1);
} }
if (pci_is_vf(&n->parent_obj)) { if (pci_is_vf(pci)) {
cap->viprt = cpu_to_le16(n->conf_msix_qsize); cap->viprt = cpu_to_le16(n->conf_msix_qsize);
} else { } else {
cap->viprt = cpu_to_le16(n->params.msix_qsize - cap->viprt = cpu_to_le16(n->params.msix_qsize -
@ -7445,7 +7452,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
stl_le_p(&n->bar.vs, NVME_SPEC_VER); stl_le_p(&n->bar.vs, NVME_SPEC_VER);
n->bar.intmc = n->bar.intms = 0; n->bar.intmc = n->bar.intms = 0;
if (pci_is_vf(&n->parent_obj) && !sctrl->scs) { if (pci_is_vf(pci_dev) && !sctrl->scs) {
stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
} }
} }
@ -7483,6 +7490,7 @@ void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns)
static void nvme_realize(PCIDevice *pci_dev, Error **errp) static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{ {
NvmeCtrl *n = NVME(pci_dev); NvmeCtrl *n = NVME(pci_dev);
DeviceState *dev = DEVICE(pci_dev);
NvmeNamespace *ns; NvmeNamespace *ns;
Error *local_err = NULL; Error *local_err = NULL;
NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev)); NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev));
@ -7502,8 +7510,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return; return;
} }
qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id);
&pci_dev->qdev, n->parent_obj.qdev.id);
if (nvme_init_subsys(n, errp)) { if (nvme_init_subsys(n, errp)) {
error_propagate(errp, local_err); error_propagate(errp, local_err);