Re: [PATCH net-next v11 1/6] page_pool: fragment API support for 32-bit arch with 64-bit DMA

From: Ilias Apalodimas
Date: Tue Oct 17 2023 - 08:17:54 EST


Hi Yunsheng

re-sending, HTML was somehow enabled and the ML dropped the public part.

Apologies for the late reply (and the noise)


On Fri, 13 Oct 2023 at 09:47, Yunsheng Lin <linyunsheng@xxxxxxxxxx> wrote:
>
> Currently page_pool_alloc_frag() is not supported in 32-bit
> arch with 64-bit DMA because of the overlap issue between
> pp_frag_count and dma_addr_upper in 'struct page' for those
> arches, which seems to be quite common, see [1], which means
> driver may need to handle it when using fragment API.
>
> It is assumed that the combination of the above arch with an
> address space >16TB does not exist, as all those arches have
> 64b equivalent, it seems logical to use the 64b version for a
> system with a large address space. It is also assumed that dma
> address is page aligned when we are dma mapping a page aligned
> buffer, see [2].
>
> That means we're storing 12 bits of 0 at the lower end for a
> dma address, we can reuse those bits for the above arches to
> support 32b+12b, which is 16TB of memory.
>
> If we make a wrong assumption, a warning is emitted so that
> user can report to us.
>
> 1. https://lore.kernel.org/all/20211117075652.58299-1-linyunsheng@xxxxxxxxxx/
> 2. https://lore.kernel.org/all/20230818145145.4b357c89@xxxxxxxxxx/
>
> Tested-by: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx>
> Signed-off-by: Jakub Kicinski <kuba@xxxxxxxxxx>
> Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx>
> CC: Lorenzo Bianconi <lorenzo@xxxxxxxxxx>
> CC: Alexander Duyck <alexander.duyck@xxxxxxxxx>
> CC: Liang Chen <liangchen.linux@xxxxxxxxx>
> CC: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx>
> CC: Guillaume Tucker <guillaume.tucker@xxxxxxxxxxxxx>
> CC: Matthew Wilcox <willy@xxxxxxxxxxxxx>
> CC: Linux-MM <linux-mm@xxxxxxxxx>
> ---
> include/linux/mm_types.h | 13 +------------
> include/net/page_pool/helpers.h | 20 ++++++++++++++------
> net/core/page_pool.c | 14 +++++++++-----
> 3 files changed, 24 insertions(+), 23 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 36c5b43999e6..74b49c4c7a52 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -125,18 +125,7 @@ struct page {
> struct page_pool *pp;
> unsigned long _pp_mapping_pad;
> unsigned long dma_addr;
> - union {
> - /**
> - * dma_addr_upper: might require a 64-bit
> - * value on 32-bit architectures.
> - */
> - unsigned long dma_addr_upper;
> - /**
> - * For frag page support, not supported in
> - * 32-bit architectures with 64-bit DMA.
> - */
> - atomic_long_t pp_frag_count;
> - };
> + atomic_long_t pp_frag_count;
> };
> struct { /* Tail pages of compound page */
> unsigned long compound_head; /* Bit zero is set */
> diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
> index 8e7751464ff5..8f64adf86f5b 100644
> --- a/include/net/page_pool/helpers.h
> +++ b/include/net/page_pool/helpers.h
> @@ -197,7 +197,7 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
> page_pool_put_full_page(pool, page, true);
> }
>
> -#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
> +#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
> (sizeof(dma_addr_t) > sizeof(unsigned long))
>
> /**
> @@ -211,17 +211,25 @@ static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
> {
> dma_addr_t ret = page->dma_addr;
>
> - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
> - ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
> + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
> + ret <<= PAGE_SHIFT;
>
> return ret;
> }
>
> -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> {
> + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
> + page->dma_addr = addr >> PAGE_SHIFT;
> +
> + /* We assume page alignment to shave off bottom bits,
> + * if this "compression" doesn't work we need to drop.
> + */
> + return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
> + }
> +
> page->dma_addr = addr;
> - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
> - page->dma_addr_upper = upper_32_bits(addr);
> + return false;
> }
>
> static inline bool page_pool_put(struct page_pool *pool)
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 77cb75e63aca..8a9868ea5067 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -211,10 +211,6 @@ static int page_pool_init(struct page_pool *pool,
> */
> }
>
> - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
> - pool->p.flags & PP_FLAG_PAGE_FRAG)
> - return -EINVAL;
> -
> #ifdef CONFIG_PAGE_POOL_STATS
> pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
> if (!pool->recycle_stats)
> @@ -359,12 +355,20 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
> if (dma_mapping_error(pool->p.dev, dma))
> return false;
>
> - page_pool_set_dma_addr(page, dma);
> + if (page_pool_set_dma_addr(page, dma))
> + goto unmap_failed;
>
> if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
>
> return true;
> +
> +unmap_failed:
> + WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
> + dma_unmap_page_attrs(pool->p.dev, dma,
> + PAGE_SIZE << pool->p.order, pool->p.dma_dir,
> + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
> + return false;
> }
>
> static void page_pool_set_pp_info(struct page_pool *pool,
> --
> 2.33.0
>

That looks fine wrt what we discussed with Jakub,

Acked-by: Ilias Apalodimas <ilias.apalodimas@xxxxxxxxxx>