summaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-06-17 18:50:08 +0100
committerakpm <akpm@linux-foundation.org>2022-07-03 18:08:46 -0700
commit4864545a4669781f75aa711ebf7b25e6f0f37d13 (patch)
treefa9574a7d3e212edb3f9b6f0378280fbc6f0b329 /mm/swap.c
parent82ac64d86fb079431e3af618a074e77be398299b (diff)
downloadlinux-4864545a4669781f75aa711ebf7b25e6f0f37d13.tar.gz
linux-4864545a4669781f75aa711ebf7b25e6f0f37d13.tar.bz2
linux-4864545a4669781f75aa711ebf7b25e6f0f37d13.zip
mm/swap: pull the CPU conditional out of __lru_add_drain_all()
The function is too long, so pull this complicated conditional out into cpu_needs_drain(). This ends up shrinking the text by 14 bytes, by allowing GCC to cache the result of calling per_cpu() instead of relocating each lookup individually. Link: https://lkml.kernel.org/r/20220617175020.717127-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 01e4e9c7d7a3..df78c4c4dbeb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -765,6 +765,21 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
lru_add_and_bh_lrus_drain();
}
+static bool cpu_needs_drain(unsigned int cpu)
+{
+ struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+
+ /* Check these in order of likelihood that they're not zero */
+ return folio_batch_count(&fbatches->lru_add) ||
+ data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+ folio_batch_count(&fbatches->lru_deactivate_file) ||
+ folio_batch_count(&fbatches->lru_deactivate) ||
+ folio_batch_count(&fbatches->lru_lazyfree) ||
+ folio_batch_count(&fbatches->activate) ||
+ need_mlock_page_drain(cpu) ||
+ has_bh_in_lru(cpu, NULL);
+}
+
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -849,14 +864,7 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
for_each_online_cpu(cpu) {
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
- if (folio_batch_count(&per_cpu(cpu_fbatches.lru_add, cpu)) ||
- data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
- folio_batch_count(&per_cpu(cpu_fbatches.lru_deactivate_file, cpu)) ||
- folio_batch_count(&per_cpu(cpu_fbatches.lru_deactivate, cpu)) ||
- folio_batch_count(&per_cpu(cpu_fbatches.lru_lazyfree, cpu)) ||
- folio_batch_count(&per_cpu(cpu_fbatches.activate, cpu)) ||
- need_mlock_page_drain(cpu) ||
- has_bh_in_lru(cpu, NULL)) {
+ if (cpu_needs_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work);