summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/extent_update.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-02-10 16:13:57 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:53 -0400
commit8042b5b715e6722fb26e40724b87f93b4b777acf (patch)
tree5758d125a3fd04085b0dc7c5f24b2def52f8dce0 /fs/bcachefs/extent_update.c
parent7e1a3aa9dfcb9cd8f46085df86f158a1f23085dc (diff)
downloadlinux-8042b5b715e6722fb26e40724b87f93b4b777acf.tar.gz
linux-8042b5b715e6722fb26e40724b87f93b4b777acf.tar.bz2
linux-8042b5b715e6722fb26e40724b87f93b4b777acf.zip
bcachefs: Extents may now cross btree node boundaries
When snapshots arrive, we won't necessarily be able to arbitrarily split existis - when we need to split an existing extent, we'll have to check if the extent was overwritten in child snapshots and if so emit a whiteout for the split in the child snapshot. Because extents couldn't span btree nodes previously, journal replay would sometimes have to split existing extents. That's no good anymore, but fortunately since extent handling has already been lifted above most of the btree code there's no real need for that rule anymore. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/extent_update.c')
-rw-r--r--fs/bcachefs/extent_update.c29
1 files changed, 8 insertions, 21 deletions
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index 5c43678e94a3..16d2bca8a662 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -99,24 +99,12 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
struct bpos *end)
{
struct btree_trans *trans = iter->trans;
- struct btree *b;
- struct btree_node_iter node_iter;
- struct bkey_packed *_k;
- unsigned nr_iters = 0;
+ struct btree_iter *copy;
+ struct bkey_s_c k;
+ unsigned nr_iters = 0;
int ret;
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
-
- b = iter->l[0].b;
- node_iter = iter->l[0].iter;
-
- BUG_ON(bkey_cmp(b->data->min_key, POS_MIN) &&
- bkey_cmp(bkey_start_pos(&insert->k),
- bkey_predecessor(b->data->min_key)) < 0);
-
- *end = bpos_min(insert->k.p, b->key.k.p);
+ *end = insert->k.p;
/* extent_update_to_keys(): */
nr_iters += 1;
@@ -126,9 +114,9 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
if (ret < 0)
return ret;
- while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
- struct bkey unpacked;
- struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
+ copy = bch2_trans_copy_iter(trans, iter);
+
+ for_each_btree_key_continue(copy, 0, k, ret) {
unsigned offset = 0;
if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
@@ -155,10 +143,9 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
&nr_iters, EXTENT_ITERS_MAX);
if (ret)
break;
-
- bch2_btree_node_iter_advance(&node_iter, b);
}
+ bch2_trans_iter_put(trans, copy);
return ret < 0 ? ret : 0;
}