[PATCH 1/2] mm/page_alloc: remove track of active PCP lists range in bulk free

From: Kemeng Shi
Date: Tue Aug 08 2023 - 22:07:52 EST


After commit fd56eef258a17 ("mm/page_alloc: simplify how many pages are
selected per pcp list during bulk free"), we will drain all pages in
selected pcp list. And we ensured passed count is < pcp->count. Then,
the search will finish before wrap-around and track of active PCP lists
range intended for wrap-around case is no longer needed.

Signed-off-by: Kemeng Shi <shikemeng@xxxxxxxxxxxxxxx>
---
mm/page_alloc.c | 15 +++------------
1 file changed, 3 insertions(+), 12 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 96b7c1a7d1f2..1ddcb2707d05 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1207,8 +1207,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
int pindex)
{
unsigned long flags;
- int min_pindex = 0;
- int max_pindex = NR_PCP_LISTS - 1;
unsigned int order;
bool isolated_pageblocks;
struct page *page;
@@ -1231,17 +1229,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,

/* Remove pages from lists in a round-robin fashion. */
do {
- if (++pindex > max_pindex)
- pindex = min_pindex;
+ if (++pindex > NR_PCP_LISTS - 1)
+ pindex = 0;
list = &pcp->lists[pindex];
- if (!list_empty(list))
- break;
-
- if (pindex == max_pindex)
- max_pindex--;
- if (pindex == min_pindex)
- min_pindex++;
- } while (1);
+ } while (list_empty(list));

order = pindex_to_order(pindex);
nr_pages = 1 << order;
--
2.30.0