Add page pool for RX buffers for faster buffer cycle and reduce CPU[...]
usage.
The standard page pool API is used.
Signed-off-by: Haiyang Zhang <haiyangz@xxxxxxxxxxxxx>
---
V3:
Update xdp mem model, pool param, alloc as suggested by Jakub Kicinski
V2:
Use the standard page pool API as suggested by Jesper Dangaard Brouer
---
drivers/net/ethernet/microsoft/mana/mana_en.c | 91 +++++++++++++++----
include/net/mana/mana.h | 3 +
2 files changed, 78 insertions(+), 16 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a499e460594b..4307f25f8c7a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1659,6 +1679,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
if (rxq->xdp_flush)
xdp_do_flush();
+
+ page_pool_nid_changed(rxq->page_pool, numa_mem_id());
}[...]
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
@@ -2008,6 +2041,25 @@ static int mana_push_wqe(struct mana_rxq *rxq)
return 0;
}
+static int mana_create_page_pool(struct mana_rxq *rxq)
+{
+ struct page_pool_params pprm = {};
+ int ret;
+
+ pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.napi = &rxq->rx_cq.napi;
+
+ rxq->page_pool = page_pool_create(&pprm);
+
+ if (IS_ERR(rxq->page_pool)) {
+ ret = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+