diff options
author | Andrey Ryabinin <aryabinin@virtuozzo.com> | 2019-11-30 17:55:24 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-01 12:59:06 -0800 |
commit | f87bccde6a7dd1bdb219a4045e8ac111590c9314 (patch) | |
tree | b059a2895df1440c1ec411582a1e82cf0b28da58 /mm | |
parent | e47b346aba0873529bf5130d599e4d91197cdd52 (diff) | |
download | linux-f87bccde6a7dd1bdb219a4045e8ac111590c9314.tar.gz linux-f87bccde6a7dd1bdb219a4045e8ac111590c9314.tar.bz2 linux-f87bccde6a7dd1bdb219a4045e8ac111590c9314.zip |
mm/vmscan: remove unused lru_pages argument
Since 9092c71bb724 ("mm: use sc->priority for slab shrink targets") the
argument 'unsigned long *lru_pages' passed around with no purpose. Remove
it.
Link: http://lkml.kernel.org/r/20190228083329.31892-4-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 2beff0e0dc7b..f7b598bd430f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2302,8 +2302,7 @@ enum scan_balance { * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan */ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, - struct scan_control *sc, unsigned long *nr, - unsigned long *lru_pages) + struct scan_control *sc, unsigned long *nr) { int swappiness = mem_cgroup_swappiness(memcg); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; @@ -2454,7 +2453,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, fraction[1] = fp; denominator = ap + fp + 1; out: - *lru_pages = 0; for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; @@ -2549,7 +2547,6 @@ out: BUG(); } - *lru_pages += lruvec_size; nr[lru] = scan; } } @@ -2558,7 +2555,7 @@ out: * This is a basic per-node page freer. Used by both kswapd and direct reclaim. */ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, - struct scan_control *sc, unsigned long *lru_pages) + struct scan_control *sc) { struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); unsigned long nr[NR_LRU_LISTS]; @@ -2570,7 +2567,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc struct blk_plug plug; bool scan_adjusted; - get_scan_count(lruvec, memcg, sc, nr, lru_pages); + get_scan_count(lruvec, memcg, sc, nr); /* Record the original scan target for proportional adjustments later */ memcpy(targets, nr, sizeof(nr)); @@ -2758,7 +2755,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) do { struct mem_cgroup *root = sc->target_mem_cgroup; - unsigned long node_lru_pages = 0; struct mem_cgroup *memcg; memset(&sc->nr, 0, sizeof(sc->nr)); @@ -2768,7 +2764,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) memcg = mem_cgroup_iter(root, NULL, NULL); do { - unsigned long lru_pages; unsigned long reclaimed; unsigned long scanned; @@ -2805,8 +2800,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) reclaimed = sc->nr_reclaimed; scanned = sc->nr_scanned; - shrink_node_memcg(pgdat, memcg, sc, &lru_pages); - node_lru_pages += lru_pages; + shrink_node_memcg(pgdat, memcg, sc); shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); @@ -3317,7 +3311,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, }; - unsigned long lru_pages; WARN_ON_ONCE(!current->reclaim_state); @@ -3334,7 +3327,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, * will pick up pages from other mem cgroup's as well. We hack * the priority and make it zero. */ - shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); + shrink_node_memcg(pgdat, memcg, &sc); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |