[PATCH rdma-next v3 3/4] RDMA/mana_ib: Use struct mana_ib_queue for WQs
From: Konstantin Taranov
Date: Tue Mar 26 2024 - 16:08:46 EST
From: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
Use struct mana_ib_queue and its helpers for WQs
Signed-off-by: Konstantin Taranov <kotaranov@xxxxxxxxxxxxx>
Reviewed-by: Long Li <longli@xxxxxxxxxxxxx>
---
drivers/infiniband/hw/mana/mana_ib.h | 4 +---
drivers/infiniband/hw/mana/qp.c | 10 ++++-----
drivers/infiniband/hw/mana/wq.c | 31 ++++------------------------
3 files changed, 10 insertions(+), 35 deletions(-)
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6acb5c281..a8953ee80 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -59,11 +59,9 @@ struct mana_ib_dev {
struct mana_ib_wq {
struct ib_wq ibwq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int wqe;
u32 wq_buf_size;
- u64 gdma_region;
- u64 id;
mana_handle_t rx_object;
};
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index d7485ee6a..f606caa75 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -194,7 +194,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ibcq = ibwq->cq;
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- wq_spec.gdma_region = wq->gdma_region;
+ wq_spec.gdma_region = wq->queue.gdma_region;
wq_spec.queue_size = wq->wq_buf_size;
cq_spec.gdma_region = cq->queue.gdma_region;
@@ -212,18 +212,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
}
/* The GDMA regions are now owned by the WQ object */
- wq->gdma_region = GDMA_INVALID_DMA_REGION;
+ wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- wq->id = wq_spec.queue_index;
+ wq->queue.id = wq_spec.queue_index;
cq->queue.id = cq_spec.queue_index;
ibdev_dbg(&mdev->ib_dev,
"ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
- ret, wq->rx_object, wq->id, cq->queue.id);
+ ret, wq->rx_object, wq->queue.id, cq->queue.id);
resp.entries[i].cqid = cq->queue.id;
- resp.entries[i].wqid = wq->id;
+ resp.entries[i].wqid = wq->queue.id;
mana_ind_table[i] = wq->rx_object;
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 7c9c69962..f959f4b92 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -13,7 +13,6 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq;
- struct ib_umem *umem;
int err;
if (udata->inlen < sizeof(ucmd))
@@ -32,39 +31,18 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
- umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
+ err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
+ if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create wq, err %d\n", err);
+ "Failed to create queue for create wq, %d\n", err);
goto err_free_wq;
}
- wq->umem = umem;
wq->wqe = init_attr->max_wr;
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
- if (err) {
- ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create wq, %d\n",
- err);
- goto err_release_umem;
- }
-
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, wq->gdma_region);
-
- /* WQ ID is returned at wq_create time, doesn't know the value yet */
-
return &wq->ibwq;
-err_release_umem:
- ib_umem_release(umem);
-
err_free_wq:
kfree(wq);
@@ -86,8 +64,7 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
- mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
- ib_umem_release(wq->umem);
+ mana_ib_destroy_queue(mdev, &wq->queue);
kfree(wq);
--
2.43.0