summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/bset.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-08-19 13:43:01 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:25 -0400
commite67ab0450cca7dc1673e4cd00eecf9d896b15889 (patch)
tree8d50317ed64879a1b9b692c25d66685eaad1c930 /fs/bcachefs/bset.c
parent9df279407a2daaf8e6586be483632fe9aaca6ef3 (diff)
downloadlinux-e67ab0450cca7dc1673e4cd00eecf9d896b15889.tar.gz
linux-e67ab0450cca7dc1673e4cd00eecf9d896b15889.tar.bz2
linux-e67ab0450cca7dc1673e4cd00eecf9d896b15889.zip
bcachefs: Fix bch2_btree_node_iter_prev_filter()
bch2_btree_node_iter_prev_filter() tried to be smart about iterating backwards when skipping over whiteouts/discards - but unfortunately, doing so can leave the node iterator in an inconsistent state; the sane solution is to just always iterate backwards one key at a time. But we compact btree nodes when more than a quarter of the keys are whiteouts/discards, so the optimization wasn't buying us that much anyways. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bset.c')
-rw-r--r--fs/bcachefs/bset.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 78e6fd3f1306..1dd2bcc69c35 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -1682,12 +1682,10 @@ void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
/*
* Expensive:
*/
-struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
- struct btree *b,
- unsigned min_key_type)
+struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
+ struct btree *b)
{
struct bkey_packed *k, *prev = NULL;
- struct bkey_packed *orig_pos = bch2_btree_node_iter_peek_all(iter, b);
struct btree_node_iter_set *set;
struct bset_tree *t;
unsigned end = 0;
@@ -1695,9 +1693,8 @@ struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *ite
bch2_btree_node_iter_verify(iter, b);
for_each_bset(b, t) {
- k = bch2_bkey_prev_filter(b, t,
- bch2_btree_node_iter_bset_pos(iter, b, t),
- min_key_type);
+ k = bch2_bkey_prev_all(b, t,
+ bch2_btree_node_iter_bset_pos(iter, b, t));
if (k &&
(!prev || bkey_iter_cmp(b, k, prev) > 0)) {
prev = k;
@@ -1706,7 +1703,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *ite
}
if (!prev)
- goto out;
+ return NULL;
/*
* We're manually memmoving instead of just calling sort() to ensure the
@@ -1727,18 +1724,20 @@ found:
iter->data[0].k = __btree_node_key_to_offset(b, prev);
iter->data[0].end = end;
-out:
- if (btree_keys_expensive_checks(b)) {
- struct btree_node_iter iter2 = *iter;
- if (prev)
- __bch2_btree_node_iter_advance(&iter2, b);
+ bch2_btree_node_iter_verify(iter, b);
+ return prev;
+}
- while ((k = bch2_btree_node_iter_peek_all(&iter2, b)) != orig_pos) {
- BUG_ON(k->type >= min_key_type);
- __bch2_btree_node_iter_advance(&iter2, b);
- }
- }
+struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
+ struct btree *b,
+ unsigned min_key_type)
+{
+ struct bkey_packed *prev;
+
+ do {
+ prev = bch2_btree_node_iter_prev_all(iter, b);
+ } while (prev && prev->type < min_key_type);
return prev;
}