hw/block/nvme: add mapping helpers

Add nvme_map_addr, nvme_map_addr_cmb and nvme_addr_to_cmb helpers and
use them in nvme_map_prp.

This fixes a bug where in the case of a CMB transfer, the device would
map to the buffer with a wrong length.

Fixes: b2b2b67a00 ("nvme: Add support for Read Data and Write Data in CMBs.")
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
Reviewed-by: Andrzej Jakowski <andrzej.jakowski@linux.intel.com>
This commit is contained in:
Klaus Jensen 2020-02-23 14:21:52 +01:00
parent d1322b4668
commit a80b2ce682
2 changed files with 96 additions and 17 deletions

View File

@ -132,10 +132,17 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
return addr >= low && addr < hi; return addr >= low && addr < hi;
} }
static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
{
assert(nvme_addr_is_cmb(n, addr));
return &n->cmbuf[addr - n->ctrl_mem.addr];
}
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
{ {
if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) { if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size); memcpy(buf, nvme_addr_to_cmb(n, addr), size);
return; return;
} }
@ -218,29 +225,91 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
} }
} }
static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
size_t len)
{
if (!len) {
return NVME_SUCCESS;
}
trace_pci_nvme_map_addr_cmb(addr, len);
if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
return NVME_DATA_TRAS_ERROR;
}
qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
return NVME_SUCCESS;
}
static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
hwaddr addr, size_t len)
{
if (!len) {
return NVME_SUCCESS;
}
trace_pci_nvme_map_addr(addr, len);
if (nvme_addr_is_cmb(n, addr)) {
if (qsg && qsg->sg) {
return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
assert(iov);
if (!iov->iov) {
qemu_iovec_init(iov, 1);
}
return nvme_map_addr_cmb(n, iov, addr, len);
}
if (iov && iov->iov) {
return NVME_INVALID_USE_OF_CMB | NVME_DNR;
}
assert(qsg);
if (!qsg->sg) {
pci_dma_sglist_init(qsg, &n->parent_obj, 1);
}
qemu_sglist_add(qsg, addr, len);
return NVME_SUCCESS;
}
static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1, static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
uint64_t prp2, uint32_t len, NvmeCtrl *n) uint64_t prp2, uint32_t len, NvmeCtrl *n)
{ {
hwaddr trans_len = n->page_size - (prp1 % n->page_size); hwaddr trans_len = n->page_size - (prp1 % n->page_size);
trans_len = MIN(len, trans_len); trans_len = MIN(len, trans_len);
int num_prps = (len >> n->page_bits) + 1; int num_prps = (len >> n->page_bits) + 1;
uint16_t status;
if (unlikely(!prp1)) { if (unlikely(!prp1)) {
trace_pci_nvme_err_invalid_prp(); trace_pci_nvme_err_invalid_prp();
return NVME_INVALID_FIELD | NVME_DNR; return NVME_INVALID_FIELD | NVME_DNR;
} else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr && }
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
qsg->nsg = 0; if (nvme_addr_is_cmb(n, prp1)) {
qemu_iovec_init(iov, num_prps); qemu_iovec_init(iov, num_prps);
qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
} else { } else {
pci_dma_sglist_init(qsg, &n->parent_obj, num_prps); pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
qemu_sglist_add(qsg, prp1, trans_len);
} }
status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
if (status) {
goto unmap;
}
len -= trans_len; len -= trans_len;
if (len) { if (len) {
if (unlikely(!prp2)) { if (unlikely(!prp2)) {
trace_pci_nvme_err_invalid_prp2_missing(); trace_pci_nvme_err_invalid_prp2_missing();
status = NVME_INVALID_FIELD | NVME_DNR;
goto unmap; goto unmap;
} }
if (len > n->page_size) { if (len > n->page_size) {
@ -257,6 +326,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
if (i == n->max_prp_ents - 1 && len > n->page_size) { if (i == n->max_prp_ents - 1 && len > n->page_size) {
if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prplist_ent(prp_ent); trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
status = NVME_INVALID_FIELD | NVME_DNR;
goto unmap; goto unmap;
} }
@ -270,14 +340,14 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prplist_ent(prp_ent); trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
status = NVME_INVALID_FIELD | NVME_DNR;
goto unmap; goto unmap;
} }
trans_len = MIN(len, n->page_size); trans_len = MIN(len, n->page_size);
if (qsg->nsg){ status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
qemu_sglist_add(qsg, prp_ent, trans_len); if (status) {
} else { goto unmap;
qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
} }
len -= trans_len; len -= trans_len;
i++; i++;
@ -285,20 +355,27 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
} else { } else {
if (unlikely(prp2 & (n->page_size - 1))) { if (unlikely(prp2 & (n->page_size - 1))) {
trace_pci_nvme_err_invalid_prp2_align(prp2); trace_pci_nvme_err_invalid_prp2_align(prp2);
status = NVME_INVALID_FIELD | NVME_DNR;
goto unmap; goto unmap;
} }
if (qsg->nsg) { status = nvme_map_addr(n, qsg, iov, prp2, len);
qemu_sglist_add(qsg, prp2, len); if (status) {
} else { goto unmap;
qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
} }
} }
} }
return NVME_SUCCESS; return NVME_SUCCESS;
unmap: unmap:
qemu_sglist_destroy(qsg); if (iov && iov->iov) {
return NVME_INVALID_FIELD | NVME_DNR; qemu_iovec_destroy(iov);
}
if (qsg && qsg->sg) {
qemu_sglist_destroy(qsg);
}
return status;
} }
static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,

View File

@ -33,6 +33,8 @@ pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
pci_nvme_irq_pin(void) "pulsing IRQ pin" pci_nvme_irq_pin(void) "pulsing IRQ pin"
pci_nvme_irq_masked(void) "IRQ is masked" pci_nvme_irq_masked(void) "IRQ is masked"
pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64""
pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8"" pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8""
pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8"" pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8""
pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64"" pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""