summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorYu Zhao <yuzhao@google.com>2023-02-13 00:53:22 -0700
committerAndrew Morton <akpm@linux-foundation.org>2023-02-16 20:43:54 -0800
commit9f550d78b40da21b4da515db4c37d8d7b12aa1a6 (patch)
tree5bcf8622f7d5220ef30141a840975482d5efe2bf /mm
parent6f7d760e86fa84862d749e36ebd29abf31f4f883 (diff)
downloadlinux-stable-9f550d78b40da21b4da515db4c37d8d7b12aa1a6.tar.gz
linux-stable-9f550d78b40da21b4da515db4c37d8d7b12aa1a6.tar.bz2
linux-stable-9f550d78b40da21b4da515db4c37d8d7b12aa1a6.zip
mm: multi-gen LRU: avoid futile retries
Recall that the per-node memcg LRU has two generations and they alternate when the last memcg (of a given node) is moved from one to the other. Each generation is also sharded into multiple bins to improve scalability. A reclaimer starts with a random bin (in the old generation) and, if it fails, it will retry, i.e., to try the rest of the bins. If a reclaimer fails with the last memcg, it should move this memcg to the young generation first, which causes the generations to alternate, and then retry. Otherwise, the retries will be futile because all other bins are empty. Link: https://lkml.kernel.org/r/20230213075322.1416966-1-yuzhao@google.com Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists") Signed-off-by: Yu Zhao <yuzhao@google.com> Reported-by: T.J. Mercier <tjmercier@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d4b9fd1ae0ed..34535bbd4fe9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -5356,18 +5356,20 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
+ int op;
int gen;
int bin;
int first_bin;
struct lruvec *lruvec;
struct lru_gen_folio *lrugen;
+ struct mem_cgroup *memcg;
const struct hlist_nulls_node *pos;
- int op = 0;
- struct mem_cgroup *memcg = NULL;
unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
bin = first_bin = get_random_u32_below(MEMCG_NR_BINS);
restart:
+ op = 0;
+ memcg = NULL;
gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
rcu_read_lock();
@@ -5391,14 +5393,22 @@ restart:
op = shrink_one(lruvec, sc);
- if (sc->nr_reclaimed >= nr_to_reclaim)
- goto success;
-
rcu_read_lock();
+
+ if (sc->nr_reclaimed >= nr_to_reclaim)
+ break;
}
rcu_read_unlock();
+ if (op)
+ lru_gen_rotate_memcg(lruvec, op);
+
+ mem_cgroup_put(memcg);
+
+ if (sc->nr_reclaimed >= nr_to_reclaim)
+ return;
+
/* restart if raced with lru_gen_rotate_memcg() */
if (gen != get_nulls_value(pos))
goto restart;
@@ -5407,11 +5417,6 @@ restart:
bin = get_memcg_bin(bin + 1);
if (bin != first_bin)
goto restart;
-success:
- if (op)
- lru_gen_rotate_memcg(lruvec, op);
-
- mem_cgroup_put(memcg);
}
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)