Re: [Patch v2 3/3] RDMA/mana_ib: Add CQ interrupt support for RAW QP

From: Simon Horman
Date: Sat Dec 09 2023 - 12:34:15 EST


On Mon, Dec 04, 2023 at 03:02:59PM -0800, longli@xxxxxxxxxxxxxxxxx wrote:
> From: Long Li <longli@xxxxxxxxxxxxx>
>
> At probing time, the MANA core code allocates EQs for supporting interrupts
> on Ethernet queues. The same interrupt mechanisum is used by RAW QP.
>
> Use the same EQs for delivering interrupts on the CQ for the RAW QP.
>
> Signed-off-by: Long Li <longli@xxxxxxxxxxxxx>

Hi Long Li,

some minor feedback from my side.

...

> diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
> index 4667b18ec1dd..186d9829bb93 100644
> --- a/drivers/infiniband/hw/mana/qp.c
> +++ b/drivers/infiniband/hw/mana/qp.c
> @@ -99,25 +99,34 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
> struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
> struct mana_ib_dev *mdev =
> container_of(pd->device, struct mana_ib_dev, ib_dev);
> + struct ib_ucontext *ib_ucontext = pd->uobject->context;
> struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
> struct mana_ib_create_qp_rss_resp resp = {};
> struct mana_ib_create_qp_rss ucmd = {};
> + struct mana_ib_ucontext *mana_ucontext;
> + struct gdma_queue **gdma_cq_allocated;
> mana_handle_t *mana_ind_table;
> struct mana_port_context *mpc;
> + struct gdma_queue *gdma_cq;
> unsigned int ind_tbl_size;
> struct mana_context *mc;
> struct net_device *ndev;
> + struct gdma_context *gc;
> struct mana_ib_cq *cq;
> struct mana_ib_wq *wq;
> struct gdma_dev *gd;
> + struct mana_eq *eq;
> struct ib_cq *ibcq;
> struct ib_wq *ibwq;
> int i = 0;
> u32 port;
> int ret;
>
> - gd = &mdev->gdma_dev->gdma_context->mana;
> + gc = mdev->gdma_dev->gdma_context;
> + gd = &gc->mana;
> mc = gd->driver_data;
> + mana_ucontext =
> + container_of(ib_ucontext, struct mana_ib_ucontext, ibucontext);
>
> if (!udata || udata->inlen < sizeof(ucmd))
> return -EINVAL;

nit: mana_ucontext appears to be set but unused.

Flagged by W=1 builds.

> @@ -179,6 +188,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
> goto fail;
> }
>
> + gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
> + GFP_KERNEL);
> + if (!gdma_cq_allocated) {
> + ret = -ENOMEM;
> + goto fail;
> + }
> +
> qp->port = port;
>
> for (i = 0; i < ind_tbl_size; i++) {

...

> @@ -219,6 +236,21 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
> resp.entries[i].wqid = wq->id;
>
> mana_ind_table[i] = wq->rx_object;
> +
> + /* Create CQ table entry */
> + WARN_ON(gc->cq_table[cq->id]);
> + gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
> + if (!gdma_cq) {
> + ret = -ENOMEM;
> + goto fail;
> + }
> + gdma_cq_allocated[i] = gdma_cq;
> +
> + gdma_cq->cq.context = cq;
> + gdma_cq->type = GDMA_CQ;
> + gdma_cq->cq.callback = mana_ib_cq_handler;
> + gdma_cq->id = cq->id;
> + gc->cq_table[cq->id] = gdma_cq;
> }
> resp.num_entries = i;
>
> @@ -238,6 +270,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
> goto fail;
> }
>
> + kfree(gdma_cq_allocated);
> kfree(mana_ind_table);
>
> return 0;
> @@ -247,8 +280,15 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
> ibwq = ind_tbl->ind_tbl[i];
> wq = container_of(ibwq, struct mana_ib_wq, ibwq);
> mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
> +
> + if (gdma_cq_allocated[i]) {

nit: It is not clear to me that condition can ever be false.
If we get here then gdma_cq_allocated[i] is a valid pointer.

> + gc->cq_table[gdma_cq_allocated[i]->id] =
> + NULL;
> + kfree(gdma_cq_allocated[i]);
> + }
> }
>
> + kfree(gdma_cq_allocated);
> kfree(mana_ind_table);
>
> return ret;

...