[PATCH 2/6] mm: page_alloc: Break out zone page aging distribution into its own helper

From: Mel Gorman
Date: Tue Dec 17 2013 - 11:48:37 EST


This patch moves the decision on whether to round-robin allocations between
zones and nodes into its own helper functions. It'll make some later patches
easier to understand and it will be automatically inlined.

Signed-off-by: Mel Gorman <mgorman@xxxxxxx>
Reviewed-by: Rik van Riel <riel@xxxxxxxxxx>
Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
mm/page_alloc.c | 63 ++++++++++++++++++++++++++++++++++++++-------------------
1 file changed, 42 insertions(+), 21 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f861d02..64020eb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1872,6 +1872,42 @@ static inline void init_zone_allows_reclaim(int nid)
#endif /* CONFIG_NUMA */

/*
+ * Distribute pages in proportion to the individual zone size to ensure fair
+ * page aging. The zone a page was allocated in should have no effect on the
+ * time the page has in memory before being reclaimed.
+ *
+ * Returns true if this zone should be skipped to spread the page ages to
+ * other zones.
+ */
+static bool zone_distribute_age(gfp_t gfp_mask, struct zone *preferred_zone,
+ struct zone *zone, int alloc_flags)
+{
+ /* Only round robin in the allocator fast path */
+ if (!(alloc_flags & ALLOC_WMARK_LOW))
+ return false;
+
+ /* Only round robin pages likely to be LRU or reclaimable slab */
+ if (!(gfp_mask & GFP_MOVABLE_MASK))
+ return false;
+
+ /* Distribute to the next zone if this zone has exhausted its batch */
+ if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
+ return true;
+
+ /*
+ * When zone_reclaim_mode is enabled, try to stay in local zones in the
+ * fastpath. If that fails, the slowpath is entered, which will do
+ * another pass starting with the local zones, but ultimately fall back
+ * back to remote zones that do not partake in the fairness round-robin
+ * cycle of this zonelist.
+ */
+ if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+ return true;
+
+ return false;
+}
+
+/*
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
*/
@@ -1907,27 +1943,12 @@ zonelist_scan:
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
goto try_this_zone;
- /*
- * Distribute pages in proportion to the individual
- * zone size to ensure fair page aging. The zone a
- * page was allocated in should have no effect on the
- * time the page has in memory before being reclaimed.
- *
- * When zone_reclaim_mode is enabled, try to stay in
- * local zones in the fastpath. If that fails, the
- * slowpath is entered, which will do another pass
- * starting with the local zones, but ultimately fall
- * back to remote zones that do not partake in the
- * fairness round-robin cycle of this zonelist.
- */
- if ((alloc_flags & ALLOC_WMARK_LOW) &&
- (gfp_mask & GFP_MOVABLE_MASK)) {
- if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
- continue;
- if (zone_reclaim_mode &&
- !zone_local(preferred_zone, zone))
- continue;
- }
+
+ /* Distribute pages to ensure fair page aging */
+ if (zone_distribute_age(gfp_mask, preferred_zone, zone,
+ alloc_flags))
+ continue;
+
/*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
--
1.8.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/