[PATCH 1/1] net: fec: enable the XDP_TX support

From: Shenwei Wang
Date: Tue May 02 2023 - 15:32:49 EST


Enable the XDP_TX path and correct the return value of the xmit function.

If any individual frame cannot transmit due to lack of BD entries, the
function would still return success for sending all frames. This results
in prematurely indicating frames were sent when they were actually dropped.

The patch resolves the issue by ensureing the return value properly
indicates the actual number of frames successfully transmitted, rather than
potentially reporting success for all frames when some could not transmit.

Fixes: 6d6b39f180b8 ("net: fec: add initial XDP support")
Signed-off-by: Gagandeep Singh <g.singh@xxxxxxx>
Signed-off-by: Shenwei Wang <shenwei.wang@xxxxxxx>
---
drivers/net/ethernet/freescale/fec_main.c | 27 ++++++++++++++++-------
1 file changed, 19 insertions(+), 8 deletions(-)

diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 160c1b3525f5..dfc1bcc9a8db 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -75,6 +75,10 @@

static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
+static int fec_enet_xdp_xmit(struct net_device *dev,
+ int num_frames,
+ struct xdp_frame **frames,
+ u32 flags);

#define DRIVER_NAME "fec"

@@ -1517,6 +1521,7 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
{
unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = FEC_ENET_XDP_PASS;
+ struct xdp_frame *xdp_frame;
struct page *page;
int err;
u32 act;
@@ -1545,11 +1550,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
}
break;

- default:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
case XDP_TX:
+ xdp_frame = xdp_convert_buff_to_frame(xdp);
+ ret = fec_enet_xdp_xmit(fep->netdev, 1, &xdp_frame, 0);
+ break;
+
+ default:
bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
fallthrough;

@@ -3798,7 +3804,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
netdev_err(fep->netdev, "NOT enough BD for SG!\n");
- return NETDEV_TX_OK;
+ xdp_return_frame(frame);
+ return NETDEV_TX_BUSY;
}

/* Fill in a Tx ring entry */
@@ -3856,6 +3863,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
struct fec_enet_private *fep = netdev_priv(dev);
struct fec_enet_priv_tx_q *txq;
int cpu = smp_processor_id();
+ unsigned int sent_frames = 0;
struct netdev_queue *nq;
unsigned int queue;
int i;
@@ -3866,8 +3874,11 @@ static int fec_enet_xdp_xmit(struct net_device *dev,

__netif_tx_lock(nq, cpu);

- for (i = 0; i < num_frames; i++)
- fec_enet_txq_xmit_frame(fep, txq, frames[i]);
+ for (i = 0; i < num_frames; i++) {
+ if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) != 0)
+ break;
+ sent_frames++;
+ }

/* Make sure the update to bdp and tx_skbuff are performed. */
wmb();
@@ -3877,7 +3888,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,

__netif_tx_unlock(nq);

- return num_frames;
+ return sent_frames;
}

static const struct net_device_ops fec_netdev_ops = {
--
2.34.1