[PATCH v2 2/2] net: arc_emac: prevent reuse of unreclaimed tx descriptors

From: Beniamino Galvani
Date: Wed Sep 10 2014 - 16:51:51 EST


This patch changes the logic in tx path to ensure that tx descriptors
are reused for transmission only after they have been reclaimed by
arc_emac_tx_clean().

Signed-off-by: Beniamino Galvani <b.galvani@xxxxxxxxx>
---
drivers/net/ethernet/arc/emac_main.c | 43 +++++++++++++++++++++++++++---------
1 file changed, 32 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 1d08f63..abe1eab 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -28,6 +28,17 @@


/**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+ return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
+/**
* arc_emac_adjust_link - Adjust the PHY link duplex.
* @ndev: Pointer to the net_device structure.
*
@@ -182,10 +193,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->info = 0;

*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
- if (netif_queue_stopped(ndev))
- netif_wake_queue(ndev);
}
+
+ /* Ensure that txbd_dirty is visible to tx() before checking
+ * for queue stopped.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+ netif_wake_queue(ndev);
}

/**
@@ -576,11 +592,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)

len = max_t(unsigned int, ETH_ZLEN, skb->len);

- /* EMAC still holds this buffer in its possession.
- * CPU must not modify this buffer descriptor
- */
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+ if (unlikely(!arc_emac_tx_avail(priv))) {
netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}

@@ -609,12 +623,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;

- /* Get "info" of the next BD */
- info = &priv->txbd[*txbd_curr].info;
+ /* Ensure that tx_clean() sees the new txbd_curr before
+ * checking the queue status. This prevents an unneeded wake
+ * of the queue in tx_clean().
+ */
+ smp_mb();

- /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+ if (!arc_emac_tx_avail(priv)) {
netif_stop_queue(ndev);
+ /* Refresh tx_dirty */
+ smp_mb();
+ if (arc_emac_tx_avail(priv))
+ netif_start_queue(ndev);
+ }

arc_reg_set(priv, R_STATUS, TXPL_MASK);

--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/