hw/rdma: Modify create/destroy QP to support SRQ

Modify create/destroy QP to support shared receive queue and rearrange
the destroy_qp() code to avoid touching the QP after calling
rdma_rm_dealloc_qp().

Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Message-Id: <20190403113343.26384-4-kamalheib1@gmail.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
This commit is contained in:
Kamal Heib 2019-04-03 14:33:42 +03:00 committed by Marcel Apfelbaum
parent cdc84058bc
commit 8b42cfab82
6 changed files with 67 additions and 33 deletions

View File

@ -794,9 +794,9 @@ void rdma_backend_destroy_cq(RdmaBackendCQ *cq)
int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
RdmaBackendPD *pd, RdmaBackendCQ *scq, RdmaBackendPD *pd, RdmaBackendCQ *scq,
RdmaBackendCQ *rcq, uint32_t max_send_wr, RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
uint32_t max_recv_wr, uint32_t max_send_sge, uint32_t max_send_wr, uint32_t max_recv_wr,
uint32_t max_recv_sge) uint32_t max_send_sge, uint32_t max_recv_sge)
{ {
struct ibv_qp_init_attr attr = {}; struct ibv_qp_init_attr attr = {};
@ -824,6 +824,9 @@ int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
attr.cap.max_recv_wr = max_recv_wr; attr.cap.max_recv_wr = max_recv_wr;
attr.cap.max_send_sge = max_send_sge; attr.cap.max_send_sge = max_send_sge;
attr.cap.max_recv_sge = max_recv_sge; attr.cap.max_recv_sge = max_recv_sge;
if (srq) {
attr.srq = srq->ibsrq;
}
qp->ibqp = ibv_create_qp(pd->ibpd, &attr); qp->ibqp = ibv_create_qp(pd->ibpd, &attr);
if (!qp->ibqp) { if (!qp->ibqp) {

View File

@ -89,9 +89,9 @@ void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq);
int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
RdmaBackendPD *pd, RdmaBackendCQ *scq, RdmaBackendPD *pd, RdmaBackendCQ *scq,
RdmaBackendCQ *rcq, uint32_t max_send_wr, RdmaBackendCQ *rcq, RdmaBackendSRQ *srq,
uint32_t max_recv_wr, uint32_t max_send_sge, uint32_t max_send_wr, uint32_t max_recv_wr,
uint32_t max_recv_sge); uint32_t max_send_sge, uint32_t max_recv_sge);
int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
uint8_t qp_type, uint32_t qkey); uint8_t qp_type, uint32_t qkey);
int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,

View File

@ -386,12 +386,14 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
uint8_t qp_type, uint32_t max_send_wr, uint8_t qp_type, uint32_t max_send_wr,
uint32_t max_send_sge, uint32_t send_cq_handle, uint32_t max_send_sge, uint32_t send_cq_handle,
uint32_t max_recv_wr, uint32_t max_recv_sge, uint32_t max_recv_wr, uint32_t max_recv_sge,
uint32_t recv_cq_handle, void *opaque, uint32_t *qpn) uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
uint8_t is_srq, uint32_t srq_handle)
{ {
int rc; int rc;
RdmaRmQP *qp; RdmaRmQP *qp;
RdmaRmCQ *scq, *rcq; RdmaRmCQ *scq, *rcq;
RdmaRmPD *pd; RdmaRmPD *pd;
RdmaRmSRQ *srq = NULL;
uint32_t rm_qpn; uint32_t rm_qpn;
pd = rdma_rm_get_pd(dev_res, pd_handle); pd = rdma_rm_get_pd(dev_res, pd_handle);
@ -408,6 +410,16 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
return -EINVAL; return -EINVAL;
} }
if (is_srq) {
srq = rdma_rm_get_srq(dev_res, srq_handle);
if (!srq) {
rdma_error_report("Invalid srqn %d", srq_handle);
return -EINVAL;
}
srq->recv_cq_handle = recv_cq_handle;
}
if (qp_type == IBV_QPT_GSI) { if (qp_type == IBV_QPT_GSI) {
scq->notify = CNT_SET; scq->notify = CNT_SET;
rcq->notify = CNT_SET; rcq->notify = CNT_SET;
@ -424,10 +436,14 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
qp->send_cq_handle = send_cq_handle; qp->send_cq_handle = send_cq_handle;
qp->recv_cq_handle = recv_cq_handle; qp->recv_cq_handle = recv_cq_handle;
qp->opaque = opaque; qp->opaque = opaque;
qp->is_srq = is_srq;
rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd, rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd,
&scq->backend_cq, &rcq->backend_cq, max_send_wr, &scq->backend_cq, &rcq->backend_cq,
max_recv_wr, max_send_sge, max_recv_sge); is_srq ? &srq->backend_srq : NULL,
max_send_wr, max_recv_wr, max_send_sge,
max_recv_sge);
if (rc) { if (rc) {
rc = -EIO; rc = -EIO;
goto out_dealloc_qp; goto out_dealloc_qp;

View File

@ -53,7 +53,8 @@ int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle,
uint8_t qp_type, uint32_t max_send_wr, uint8_t qp_type, uint32_t max_send_wr,
uint32_t max_send_sge, uint32_t send_cq_handle, uint32_t max_send_sge, uint32_t send_cq_handle,
uint32_t max_recv_wr, uint32_t max_recv_sge, uint32_t max_recv_wr, uint32_t max_recv_sge,
uint32_t recv_cq_handle, void *opaque, uint32_t *qpn); uint32_t recv_cq_handle, void *opaque, uint32_t *qpn,
uint8_t is_srq, uint32_t srq_handle);
RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn); RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn);
int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx, uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx,

View File

@ -88,6 +88,7 @@ typedef struct RdmaRmQP {
uint32_t send_cq_handle; uint32_t send_cq_handle;
uint32_t recv_cq_handle; uint32_t recv_cq_handle;
enum ibv_qp_state qp_state; enum ibv_qp_state qp_state;
uint8_t is_srq;
} RdmaRmQP; } RdmaRmQP;
typedef struct RdmaRmSRQ { typedef struct RdmaRmSRQ {

View File

@ -357,7 +357,7 @@ static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma, static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge, PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
uint32_t spages, uint32_t rcqe, uint32_t rmax_sge, uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
uint32_t rpages) uint32_t rpages, uint8_t is_srq)
{ {
uint64_t *dir = NULL, *tbl = NULL; uint64_t *dir = NULL, *tbl = NULL;
PvrdmaRing *sr, *rr; PvrdmaRing *sr, *rr;
@ -365,9 +365,14 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
char ring_name[MAX_RING_NAME_SZ]; char ring_name[MAX_RING_NAME_SZ];
uint32_t wqe_sz; uint32_t wqe_sz;
if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) {
|| !rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES) { rdma_error_report("Got invalid send page count for QP ring: %d",
rdma_error_report("Got invalid page count for QP ring: %d, %d", spages, spages);
return rc;
}
if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) {
rdma_error_report("Got invalid recv page count for QP ring: %d",
rpages); rpages);
return rc; return rc;
} }
@ -384,8 +389,12 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
goto out; goto out;
} }
sr = g_malloc(2 * sizeof(*rr)); if (!is_srq) {
rr = &sr[1]; sr = g_malloc(2 * sizeof(*rr));
rr = &sr[1];
} else {
sr = g_malloc(sizeof(*sr));
}
*rings = sr; *rings = sr;
@ -407,15 +416,18 @@ static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
goto out_unmap_ring_state; goto out_unmap_ring_state;
} }
/* Create recv ring */ if (!is_srq) {
rr->ring_state = &sr->ring_state[1]; /* Create recv ring */
wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) + rr->ring_state = &sr->ring_state[1];
sizeof(struct pvrdma_sge) * rmax_sge - 1); wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma); sizeof(struct pvrdma_sge) * rmax_sge - 1);
rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state, sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages); rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
if (rc) { rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages],
goto out_free_sr; rpages);
if (rc) {
goto out_free_sr;
}
} }
goto out; goto out;
@ -436,10 +448,12 @@ out:
return rc; return rc;
} }
static void destroy_qp_rings(PvrdmaRing *ring) static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq)
{ {
pvrdma_ring_free(&ring[0]); pvrdma_ring_free(&ring[0]);
pvrdma_ring_free(&ring[1]); if (!is_srq) {
pvrdma_ring_free(&ring[1]);
}
rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE); rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
g_free(ring); g_free(ring);
@ -458,7 +472,7 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings, rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks, cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
cmd->max_recv_wr, cmd->max_recv_sge, cmd->max_recv_wr, cmd->max_recv_sge,
cmd->total_chunks - cmd->send_chunks - 1); cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq);
if (rc) { if (rc) {
return rc; return rc;
} }
@ -467,9 +481,9 @@ static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
cmd->max_send_wr, cmd->max_send_sge, cmd->max_send_wr, cmd->max_send_sge,
cmd->send_cq_handle, cmd->max_recv_wr, cmd->send_cq_handle, cmd->max_recv_wr,
cmd->max_recv_sge, cmd->recv_cq_handle, rings, cmd->max_recv_sge, cmd->recv_cq_handle, rings,
&resp->qpn); &resp->qpn, cmd->is_srq, cmd->srq_handle);
if (rc) { if (rc) {
destroy_qp_rings(rings); destroy_qp_rings(rings, cmd->is_srq);
return rc; return rc;
} }
@ -531,10 +545,9 @@ static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
return -EINVAL; return -EINVAL;
} }
rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
ring = (PvrdmaRing *)qp->opaque; ring = (PvrdmaRing *)qp->opaque;
destroy_qp_rings(ring); destroy_qp_rings(ring, qp->is_srq);
rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
return 0; return 0;
} }