summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/btree_key_cache_types.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-11-06 09:53:14 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-11-13 21:45:01 -0500
commitc65c13f0eac61218c9ee4635c05661c0b9760e58 (patch)
treeed2b34dcc4e115a692c45a7f2b1be27d1ad99907 /fs/bcachefs/btree_key_cache_types.h
parent1bd5bcc9f5eef968ed021d72b14a157be7abdb49 (diff)
downloadlinux-c65c13f0eac61218c9ee4635c05661c0b9760e58.tar.gz
linux-c65c13f0eac61218c9ee4635c05661c0b9760e58.tar.bz2
linux-c65c13f0eac61218c9ee4635c05661c0b9760e58.zip
bcachefs: Run btree key cache shrinker less aggressively
The btree key cache maintains lists of items that have been freed, but can't yet be reclaimed because a bch2_trans_relock() call might find them - we're waiting for SRCU readers to release. Previously, we wouldn't count these items against the number we're attempting to scan for, which would mean we'd evict more live key cache entries - doing quite a bit of potentially unecessary work. With recent work to make sure we don't hold SRCU locks for too long, it should be safe to count all the items on the freelists against number to scan - even if we can't reclaim them yet, we will be able to soon. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_key_cache_types.h')
-rw-r--r--fs/bcachefs/btree_key_cache_types.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h
index 0f967808d766..290e4e57df5b 100644
--- a/fs/bcachefs/btree_key_cache_types.h
+++ b/fs/bcachefs/btree_key_cache_types.h
@@ -11,8 +11,12 @@ struct btree_key_cache {
struct mutex lock;
struct rhashtable table;
bool table_init_done;
+
struct list_head freed_pcpu;
+ size_t nr_freed_pcpu;
struct list_head freed_nonpcpu;
+ size_t nr_freed_nonpcpu;
+
struct shrinker *shrink;
unsigned shrink_iter;
struct btree_key_cache_freelist __percpu *pcpu_freed;