[PATCH 24/34] mm, vmscan: avoid passing in classzone_idx unnecessarily to shrink_node

From: Mel Gorman
Date: Fri Jul 08 2016 - 05:39:42 EST


shrink_node receives all information it needs about classzone_idx
from sc->reclaim_idx so remove the aliases.

Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx>
Acked-by: Hillf Danton <hillf.zj@xxxxxxxxxxxxxxx>
---
mm/vmscan.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index e12b0fd2044c..bba71b6c9a4c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2426,8 +2426,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return true;
}

-static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
- enum zone_type classzone_idx)
+static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned;
@@ -2656,7 +2655,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
- shrink_node(zone->zone_pgdat, sc, classzone_idx);
+ shrink_node(zone->zone_pgdat, sc);
}

/*
@@ -3080,7 +3079,6 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
* This is used to determine if the scanning priority needs to be raised.
*/
static bool kswapd_shrink_node(pg_data_t *pgdat,
- int classzone_idx,
struct scan_control *sc)
{
struct zone *zone;
@@ -3088,7 +3086,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,

/* Reclaim a number of pages proportional to the number of zones */
sc->nr_to_reclaim = 0;
- for (z = 0; z <= classzone_idx; z++) {
+ for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
@@ -3100,7 +3098,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* Historically care was taken to put equal pressure on all zones but
* now pressure is applied based on node LRU order.
*/
- shrink_node(pgdat, sc, classzone_idx);
+ shrink_node(pgdat, sc);

/*
* Fragmentation may mean that the system cannot be rebalanced for
@@ -3162,7 +3160,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (!populated_zone(zone))
continue;

- classzone_idx = i;
+ sc.reclaim_idx = i;
break;
}
}
@@ -3175,12 +3173,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* zone was balanced even under extreme pressure when the
* overall node may be congested.
*/
- for (i = classzone_idx; i >= 0; i--) {
+ for (i = sc.reclaim_idx; i >= 0; i--) {
zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;

- if (zone_balanced(zone, sc.order, classzone_idx))
+ if (zone_balanced(zone, sc.order, sc.reclaim_idx))
goto out;
}

@@ -3211,7 +3209,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* enough pages are already being scanned that that high
* watermark would be met at 100% efficiency.
*/
- if (kswapd_shrink_node(pgdat, classzone_idx, &sc))
+ if (kswapd_shrink_node(pgdat, &sc))
raise_priority = false;

/*
@@ -3674,7 +3672,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed.
*/
do {
- shrink_node(pgdat, &sc, classzone_idx);
+ shrink_node(pgdat, &sc);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}

--
2.6.4