summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/alloc_background.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-12-05 10:24:19 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:53 -0400
commit80c33085783656617d0d07e1bc9fba70a592ce5c (patch)
tree2f2a6b43a3b6caab092c4f74df9f5a582e1a60b0 /fs/bcachefs/alloc_background.h
parent1b30ed5fd87828b5e29647510eefb18a363e4d19 (diff)
downloadlinux-80c33085783656617d0d07e1bc9fba70a592ce5c.tar.gz
linux-80c33085783656617d0d07e1bc9fba70a592ce5c.tar.bz2
linux-80c33085783656617d0d07e1bc9fba70a592ce5c.zip
bcachefs: Fragmentation LRU
Now that we have much more efficient updates to the LRU btree, this patch adds a new LRU that indexes buckets by fragmentation. This means copygc no longer has to scan every bucket to find buckets that need to be evacuated. Changes: - A new field in bch_alloc_v4, fragmentation_lru - this corresponds to the bucket's position in the fragmentation LRU. We add a new field for this instead of calculating it as needed because we may make the fragmentation LRU optional; this field indicates whether a bucket is on the fragmentation LRU. Also, zoned devices will introduce variable bucket sizes; explicitly recording the LRU position will be safer for them. - A new copygc path for using the fragmentation LRU instead of scanning every bucket and building up an in-memory heap. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/alloc_background.h')
-rw-r--r--fs/bcachefs/alloc_background.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index b3c2f1e0deb6..96ac8f396d46 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -64,11 +64,24 @@ static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
a.stripe, a, data_type);
}
-static inline u64 alloc_lru_idx(struct bch_alloc_v4 a)
+static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
{
return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
}
+static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
+ struct bch_dev *ca)
+{
+ if (a.data_type != BCH_DATA_btree &&
+ a.data_type != BCH_DATA_user)
+ return 0;
+
+ if (a.dirty_sectors >= ca->mi.bucket_size)
+ return 0;
+
+ return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size);
+}
+
static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
{
return ((u64) alloc_gc_gen(a) >> 4) << 56;