[PATCH AUTOSEL 6.4 20/58] RDMA/mana_ib: Use v2 version of cfg_rx_steer_req to enable RX coalescing

From: Sasha Levin
Date: Sun Jul 23 2023 - 21:16:50 EST


From: Long Li <longli@xxxxxxxxxxxxx>

[ Upstream commit 2145328515c8fa9b8a9f7889250bc6c032f2a0e6 ]

With RX coalescing, one CQE entry can be used to indicate multiple packets
on the receive queue. This saves processing time and PCI bandwidth over
the CQ.

The MANA Ethernet driver also uses the v2 version of the protocol. It
doesn't use RX coalescing and its behavior is not changed.

Link: https://lore.kernel.org/r/1684045095-31228-1-git-send-email-longli@xxxxxxxxxxxxxxxxx
Signed-off-by: Long Li <longli@xxxxxxxxxxxxx>
Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
drivers/infiniband/hw/mana/qp.c | 5 ++++-
drivers/net/ethernet/microsoft/mana/mana_en.c | 5 ++++-
include/net/mana/mana.h | 4 +++-
3 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 54b61930a7fdb..4b3b5b274e849 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -13,7 +13,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
u8 *rx_hash_key)
{
struct mana_port_context *mpc = netdev_priv(ndev);
- struct mana_cfg_rx_steer_req *req = NULL;
+ struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
mana_handle_t *req_indir_tab;
struct gdma_context *gc;
@@ -33,6 +33,8 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));

+ req->hdr.req.msg_version = GDMA_MESSAGE_V2;
+
req->vport = mpc->port_handle;
req->rx_enable = 1;
req->update_default_rxobj = 1;
@@ -46,6 +48,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
req->indir_tab_offset = sizeof(*req);
req->update_indir_tab = true;
+ req->cqe_coalescing_enable = 1;

req_indir_tab = (mana_handle_t *)(req + 1);
/* The ind table passed to the hardware must have
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d907727c7b7a5..7f4e861e398e6 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -972,7 +972,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
bool update_tab)
{
u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
- struct mana_cfg_rx_steer_req *req = NULL;
+ struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
struct net_device *ndev = apc->ndev;
mana_handle_t *req_indir_tab;
@@ -987,6 +987,8 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));

+ req->hdr.req.msg_version = GDMA_MESSAGE_V2;
+
req->vport = apc->port_handle;
req->num_indir_entries = num_entries;
req->indir_tab_offset = sizeof(*req);
@@ -996,6 +998,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
req->update_hashkey = update_key;
req->update_indir_tab = update_tab;
req->default_rxobj = apc->default_rxobj;
+ req->cqe_coalescing_enable = 0;

if (update_key)
memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 9eef199728454..024ad8ddb27e5 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -579,7 +579,7 @@ struct mana_fence_rq_resp {
}; /* HW DATA */

/* Configure vPort Rx Steering */
-struct mana_cfg_rx_steer_req {
+struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
mana_handle_t vport;
u16 num_indir_entries;
@@ -592,6 +592,8 @@ struct mana_cfg_rx_steer_req {
u8 reserved;
mana_handle_t default_rxobj;
u8 hashkey[MANA_HASH_KEY_SIZE];
+ u8 cqe_coalescing_enable;
+ u8 reserved2[7];
}; /* HW DATA */

struct mana_cfg_rx_steer_resp {
--
2.39.2