Re: [RFC PATCH net-next v1 1/2] net: mirror skb frag ref/unref helpers

From: Dragos Tatulea
Date: Thu Mar 07 2024 - 05:44:26 EST


On Wed, 2024-03-06 at 15:59 -0800, Mina Almasry wrote:
> Refactor some of the skb frag ref/unref helpers for improved clarity.
>
> Implement napi_pp_get_page() to be the mirror counterpart of
> napi_pp_put_page().
>
> Implement napi_frag_ref() to be the mirror counterpart of
> napi_frag_unref().
>
> Improve __skb_frag_ref() to become a mirror counterpart of
> __skb_frag_unref(). Previously unref could handle pp & non-pp pages,
> while the ref could only handle non-pp pages. Now both the ref & unref
> helpers can correctly handle both pp & non-pp pages.
>
> Now that __skb_frag_ref() can handle both pp & non-pp pages, remove
> skb_pp_frag_ref(), and use __skb_frag_ref() instead. This lets us
> remove pp specific handling from skb_try_coalesce.
>
> Signed-off-by: Mina Almasry <almasrymina@xxxxxxxxxx>
>
> ---
> include/linux/skbuff.h | 24 +++++++++++++++---
> net/core/skbuff.c | 56 ++++++++++++++----------------------------
> 2 files changed, 39 insertions(+), 41 deletions(-)
>
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index d577e0bee18d..51316b0e20bc 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -3477,15 +3477,31 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag)
> return netmem_to_page(frag->netmem);
> }
>
> +bool napi_pp_get_page(struct page *page);
> +
> +static inline void napi_frag_ref(skb_frag_t *frag, bool recycle)
> +{
> +#ifdef CONFIG_PAGE_POOL
> + struct page *page = skb_frag_page(frag);
> +
Move assignment out of ifdef.

> + if (recycle && napi_pp_get_page(page))
> + return;
> +#endif
> + get_page(page);
> +}
> +
> /**
> * __skb_frag_ref - take an addition reference on a paged fragment.
> * @frag: the paged fragment
> + * @recycle: skb->pp_recycle param of the parent skb.
> *
> - * Takes an additional reference on the paged fragment @frag.
> + * Takes an additional reference on the paged fragment @frag. Obtains the
> + * correct reference count depending on whether skb->pp_recycle is set and
> + * whether the frag is a page pool frag.
> */
> -static inline void __skb_frag_ref(skb_frag_t *frag)
> +static inline void __skb_frag_ref(skb_frag_t *frag, bool recycle)
> {
> - get_page(skb_frag_page(frag));
> + napi_frag_ref(frag, recycle);
> }
>
> /**
> @@ -3497,7 +3513,7 @@ static inline void __skb_frag_ref(skb_frag_t *frag)
> */
> static inline void skb_frag_ref(struct sk_buff *skb, int f)
> {
> - __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
> + __skb_frag_ref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
> }
>
> int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 1f918e602bc4..6d234faa9d9e 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -1006,6 +1006,21 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
> EXPORT_SYMBOL(skb_cow_data_for_xdp);
>
> #if IS_ENABLED(CONFIG_PAGE_POOL)
> +bool napi_pp_get_page(struct page *page)
> +{
> +
> + struct page *head_page;
> +
> + head_page = compound_head(page);
> +
> + if (!is_pp_page(page))
> + return false;
> +
> + page_pool_ref_page(head_page);
> + return true;
> +}
> +EXPORT_SYMBOL(napi_pp_get_page);
> +
> bool napi_pp_put_page(struct page *page, bool napi_safe)
> {
> bool allow_direct = false;
> @@ -1058,37 +1073,6 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
> return napi_pp_put_page(virt_to_page(data), napi_safe);
> }
>
> -/**
> - * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
> - * @skb: page pool aware skb
> - *
> - * Increase the fragment reference count (pp_ref_count) of a skb. This is
> - * intended to gain fragment references only for page pool aware skbs,
> - * i.e. when skb->pp_recycle is true, and not for fragments in a
> - * non-pp-recycling skb. It has a fallback to increase references on normal
> - * pages, as page pool aware skbs may also have normal page fragments.
> - */
> -static int skb_pp_frag_ref(struct sk_buff *skb)
> -{
> - struct skb_shared_info *shinfo;
> - struct page *head_page;
> - int i;
> -
> - if (!skb->pp_recycle)
> - return -EINVAL;
> -
> - shinfo = skb_shinfo(skb);
> -
> - for (i = 0; i < shinfo->nr_frags; i++) {
> - head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
> - if (likely(is_pp_page(head_page)))
> - page_pool_ref_page(head_page);
> - else
> - page_ref_inc(head_page);
> - }
> - return 0;
> -}
> -
> static void skb_kfree_head(void *head, unsigned int end_offset)
> {
> if (end_offset == SKB_SMALL_HEAD_HEADROOM)
> @@ -4199,7 +4183,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
> to++;
>
> } else {
> - __skb_frag_ref(fragfrom);
> + __skb_frag_ref(fragfrom, skb->pp_recycle);
> skb_frag_page_copy(fragto, fragfrom);
> skb_frag_off_copy(fragto, fragfrom);
> skb_frag_size_set(fragto, todo);
> @@ -4849,7 +4833,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
> }
>
> *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
> - __skb_frag_ref(nskb_frag);
> + __skb_frag_ref(nskb_frag, nskb->pp_recycle);
> size = skb_frag_size(nskb_frag);
>
> if (pos < offset) {
> @@ -5980,10 +5964,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
> /* if the skb is not cloned this does nothing
> * since we set nr_frags to 0.
> */
> - if (skb_pp_frag_ref(from)) {
> - for (i = 0; i < from_shinfo->nr_frags; i++)
> - __skb_frag_ref(&from_shinfo->frags[i]);
> - }
> + for (i = 0; i < from_shinfo->nr_frags; i++)
> + __skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle);
>
> to->truesize += delta;
> to->len += len;