[PATCH] mm/min_free_kbytes: modify min_free_kbytes calculation rules

From: liuq
Date: Thu Jun 08 2023 - 23:34:58 EST


The current calculation of min_free_kbytes only uses ZONE_DMA and
ZONE_NORMAL pages,but the ZONE_MOVABLE zone->_watermark[WMARK_MIN]
will also divide part of min_free_kbytes.This will cause the min
watermark of ZONE_NORMAL to be too small in the presence of ZONE_MOVEABLE.

Signed-off-by: liuq <liuq131@xxxxxxxxxxxxxxx>
---
include/linux/mm.h | 1 +
mm/khugepaged.c | 2 +-
mm/page_alloc.c | 15 ++++++++++++++-
3 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index cf3d0d673f6b..1f91d035bcaf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -863,6 +863,7 @@ void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);

unsigned long nr_free_buffer_pages(void);
+unsigned long nr_free_pagecache_pages(void);

/*
* Compound pages have a destructor function. Provide a
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 16be62d493cd..6632264b951c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2342,7 +2342,7 @@ static void set_recommended_min_free_kbytes(void)

/* don't ever allow to reserve more than 5% of the lowmem */
recommended_min = min(recommended_min,
- (unsigned long) nr_free_buffer_pages() / 20);
+ (unsigned long) nr_free_pagecache_pages() / 20);
recommended_min <<= (PAGE_SHIFT-10);

if (recommended_min > min_free_kbytes) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e008a3df0485..489b564526dd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5775,6 +5775,19 @@ unsigned long nr_free_buffer_pages(void)
}
EXPORT_SYMBOL_GPL(nr_free_buffer_pages);

+/**
+ * nr_free_pagecache_pages - count number of pages beyond high watermark
+ *
+ * nr_free_pagecache_pages() counts the number of pages which are beyond the
+ * high watermark within all zones.
+ *
+ * Return: number of pages beyond high watermark within all zones.
+ */
+unsigned long nr_free_pagecache_pages(void)
+{
+ return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
+}
+
static inline void show_node(struct zone *zone)
{
if (IS_ENABLED(CONFIG_NUMA))
@@ -8651,7 +8664,7 @@ void calculate_min_free_kbytes(void)
unsigned long lowmem_kbytes;
int new_min_free_kbytes;

- lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
+ lowmem_kbytes = nr_free_pagecache_pages() * (PAGE_SIZE >> 10);
new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);

if (new_min_free_kbytes > user_min_free_kbytes)
--
2.27.0