summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-07-17 00:44:19 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:36 -0400
commit326568f18cb57ed95a420361da9a64330e122cda (patch)
tree9dd4f9a1fa2aceedf206edd29ae9deb6461cc880
parenteace11a730b36e7e8ee184675927ce7e658e7616 (diff)
downloadlinux-stable-326568f18cb57ed95a420361da9a64330e122cda.tar.gz
linux-stable-326568f18cb57ed95a420361da9a64330e122cda.tar.bz2
linux-stable-326568f18cb57ed95a420361da9a64330e122cda.zip
bcachefs: Convert bch2_gc_done() for_each_btree_key2()
This converts bch2_gc_stripes_done() and bch2_gc_reflink_done() to the new for_each_btree_key_commit() macro. The new for_each_btree_key2() and for_each_btree_key_commit() macros handles transaction retries, allowing us to avoid nested transactions - which we want to avoid since they're tricky to do completely correctly and upcoming assertions are going to be checking for that. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_gc.c232
1 files changed, 117 insertions, 115 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index f72a5ceb130b..7a7639e9ee3f 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -1321,21 +1321,19 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
+ struct bkey_s_c k,
bool metadata_only)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc, *b;
- struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new;
enum bch_data_type type;
int ret;
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret)
- return ret;
+ if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+ return 1;
bch2_alloc_to_v4(k, &old);
new = old;
@@ -1428,23 +1426,13 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
bch2_trans_init(&trans, c, 0, 0);
for_each_member_device(ca, c, i) {
- for_each_btree_key(&trans, iter, BTREE_ID_alloc,
- POS(ca->dev_idx, ca->mi.first_bucket),
- BTREE_ITER_SLOTS|
- BTREE_ITER_PREFETCH, k, ret) {
- if (bkey_cmp(iter.pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
- break;
+ ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, ca->mi.first_bucket),
+ BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW,
+ bch2_alloc_write_key(&trans, &iter, k, metadata_only));
- ret = commit_do(&trans, NULL, NULL,
- BTREE_INSERT_LAZY_RW,
- bch2_alloc_write_key(&trans, &iter,
- metadata_only));
- if (ret)
- break;
- }
- bch2_trans_iter_exit(&trans, &iter);
-
- if (ret) {
+ if (ret < 0) {
bch_err(c, "error writing alloc info: %i", ret);
percpu_ref_put(&ca->ref);
break;
@@ -1452,7 +1440,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
}
bch2_trans_exit(&trans);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
@@ -1536,72 +1524,79 @@ static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
};
}
-static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_write_reflink_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ size_t *idx)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct reflink_gc *r;
- size_t idx = 0;
+ struct bch_fs *c = trans->c;
+ const __le64 *refcount = bkey_refcount_c(k);
struct printbuf buf = PRINTBUF;
+ struct reflink_gc *r;
int ret = 0;
- if (metadata_only)
+ if (!refcount)
return 0;
- bch2_trans_init(&trans, c, 0, 0);
+ while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
+ r->offset < k.k->p.offset)
+ ++*idx;
- for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
- const __le64 *refcount = bkey_refcount_c(k);
+ if (!r ||
+ r->offset != k.k->p.offset ||
+ r->size != k.k->size) {
+ bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
+ return -EINVAL;
+ }
- if (!refcount)
- continue;
+ if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+ "reflink key has wrong refcount:\n"
+ " %s\n"
+ " should be %u",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
+ r->refcount)) {
+ struct bkey_i *new;
- r = genradix_ptr(&c->reflink_gc_table, idx++);
- if (!r ||
- r->offset != k.k->p.offset ||
- r->size != k.k->size) {
- bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
- ret = -EINVAL;
- break;
- }
+ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
- if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
- "reflink key has wrong refcount:\n"
- " %s\n"
- " should be %u",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf),
- r->refcount)) {
- struct bkey_i *new;
+ bkey_reassemble(new, k);
- new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- break;
- }
+ if (!r->refcount)
+ new->k.type = KEY_TYPE_deleted;
+ else
+ *bkey_refcount(new) = cpu_to_le64(r->refcount);
- bkey_reassemble(new, k);
+ ret = bch2_trans_update(trans, iter, new, 0);
+ }
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
- if (!r->refcount)
- new->k.type = KEY_TYPE_deleted;
- else
- *bkey_refcount(new) = cpu_to_le64(r->refcount);
+static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ size_t idx = 0;
+ int ret = 0;
- ret = commit_do(&trans, NULL, NULL, 0,
- __bch2_btree_insert(&trans, BTREE_ID_reflink, new));
- kfree(new);
+ if (metadata_only)
+ return 0;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ ret = for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_gc_write_reflink_key(&trans, &iter, k, &idx));
- if (ret)
- break;
- }
- }
-fsck_err:
- bch2_trans_iter_exit(&trans, &iter);
c->reflink_gc_nr = 0;
bch2_trans_exit(&trans);
- printbuf_exit(&buf);
return ret;
}
@@ -1653,66 +1648,73 @@ static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
r->refcount = 0;
}
-static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_write_stripes_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
{
- struct btree_trans trans;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct gc_stripe *m;
- const struct bch_stripe *s;
+ struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ const struct bch_stripe *s;
+ struct gc_stripe *m;
unsigned i;
int ret = 0;
- if (metadata_only)
+ if (k.k->type != KEY_TYPE_stripe)
return 0;
- bch2_trans_init(&trans, c, 0, 0);
+ s = bkey_s_c_to_stripe(k).v;
+ m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
- for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
- if (k.k->type != KEY_TYPE_stripe)
- continue;
-
- s = bkey_s_c_to_stripe(k).v;
- m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
-
- for (i = 0; i < s->nr_blocks; i++)
- if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
- goto inconsistent;
- continue;
+ for (i = 0; i < s->nr_blocks; i++)
+ if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
+ goto inconsistent;
+ return 0;
inconsistent:
- if (fsck_err_on(true, c,
- "stripe has wrong block sector count %u:\n"
- " %s\n"
- " should be %u", i,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf),
- m ? m->block_sectors[i] : 0)) {
- struct bkey_i_stripe *new;
-
- new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
- if (!new) {
- ret = -ENOMEM;
- break;
- }
+ if (fsck_err_on(true, c,
+ "stripe has wrong block sector count %u:\n"
+ " %s\n"
+ " should be %u", i,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf),
+ m ? m->block_sectors[i] : 0)) {
+ struct bkey_i_stripe *new;
+
+ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
- bkey_reassemble(&new->k_i, k);
+ bkey_reassemble(&new->k_i, k);
- for (i = 0; i < new->v.nr_blocks; i++)
- stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
+ for (i = 0; i < new->v.nr_blocks; i++)
+ stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
- ret = commit_do(&trans, NULL, NULL, 0,
- __bch2_btree_insert(&trans, BTREE_ID_reflink, &new->k_i));
- kfree(new);
- }
+ ret = bch2_trans_update(trans, iter, &new->k_i, 0);
}
fsck_err:
- bch2_trans_iter_exit(&trans, &iter);
+ printbuf_exit(&buf);
+ return ret;
+}
- bch2_trans_exit(&trans);
+static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
- printbuf_exit(&buf);
+ if (metadata_only)
+ return 0;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ ret = for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_stripes, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_gc_write_stripes_key(&trans, &iter, k));
+
+ bch2_trans_exit(&trans);
return ret;
}