summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-02-11 12:57:04 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:52 -0400
commita1f26d700aa51fc942ca07ee501b9117075c84e0 (patch)
tree49ee1f1dbc4d5877953f9d902565063fc1eeb8ec
parent09d70d0be1d5670a9df24656c5e429ab4f239c16 (diff)
downloadlinux-stable-a1f26d700aa51fc942ca07ee501b9117075c84e0.tar.gz
linux-stable-a1f26d700aa51fc942ca07ee501b9117075c84e0.tar.bz2
linux-stable-a1f26d700aa51fc942ca07ee501b9117075c84e0.zip
bcachefs: Handle btree node rewrites before going RW
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/bcachefs.h3
-rw-r--r--fs/bcachefs/btree_update_interior.c65
-rw-r--r--fs/bcachefs/btree_update_interior.h3
-rw-r--r--fs/bcachefs/super.c2
4 files changed, 66 insertions, 7 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 6089d9ed6c27..84b30adf56c9 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -780,6 +780,9 @@ struct bch_fs {
struct workqueue_struct *btree_interior_update_worker;
struct work_struct btree_interior_update_work;
+ struct list_head pending_node_rewrites;
+ struct mutex pending_node_rewrites_lock;
+
/* btree_io.c: */
spinlock_t btree_write_error_lock;
struct btree_write_stats {
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 612d0007fb23..45004f17d51d 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1998,6 +1998,7 @@ err:
struct async_btree_rewrite {
struct bch_fs *c;
struct work_struct work;
+ struct list_head list;
enum btree_id btree_id;
unsigned level;
struct bpos pos;
@@ -2057,15 +2058,10 @@ void async_btree_node_rewrite_work(struct work_struct *work)
void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
{
struct async_btree_rewrite *a;
-
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
- bch_err(c, "%s: error getting c->writes ref", __func__);
- return;
- }
+ int ret;
a = kmalloc(sizeof(*a), GFP_NOFS);
if (!a) {
- bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
bch_err(c, "%s: error allocating memory", __func__);
return;
}
@@ -2075,11 +2071,63 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
a->level = b->c.level;
a->pos = b->key.k.p;
a->seq = b->data->keys.seq;
-
INIT_WORK(&a->work, async_btree_node_rewrite_work);
+
+ if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_add(&a->list, &c->pending_node_rewrites);
+ mutex_unlock(&c->pending_node_rewrites_lock);
+ return;
+ }
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
+ if (test_bit(BCH_FS_STARTED, &c->flags)) {
+ bch_err(c, "%s: error getting c->writes ref", __func__);
+ kfree(a);
+ return;
+ }
+
+ ret = bch2_fs_read_write_early(c);
+ if (ret) {
+ bch_err(c, "%s: error going read-write: %s",
+ __func__, bch2_err_str(ret));
+ kfree(a);
+ return;
+ }
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ }
+
queue_work(c->btree_interior_update_worker, &a->work);
}
+void bch2_do_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ queue_work(c->btree_interior_update_worker, &a->work);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
+void bch2_free_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ kfree(a);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
static int __bch2_btree_node_update_key(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b, struct btree *new_hash,
@@ -2417,6 +2465,9 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c)
mutex_init(&c->btree_interior_update_lock);
INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
+ INIT_LIST_HEAD(&c->pending_node_rewrites);
+ mutex_init(&c->pending_node_rewrites_lock);
+
c->btree_interior_update_worker =
alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
if (!c->btree_interior_update_worker)
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 2e6d220c3bcd..30e9c137b0e2 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -318,6 +318,9 @@ void bch2_journal_entries_to_btree_roots(struct bch_fs *, struct jset *);
struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
struct jset_entry *, struct jset_entry *);
+void bch2_do_pending_node_rewrites(struct bch_fs *);
+void bch2_free_pending_node_rewrites(struct bch_fs *);
+
void bch2_fs_btree_interior_update_exit(struct bch_fs *);
int bch2_fs_btree_interior_update_init(struct bch_fs *);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index e142de2a5527..58517f6d128f 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -418,6 +418,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_do_discards(c);
bch2_do_invalidates(c);
bch2_do_stripe_deletes(c);
+ bch2_do_pending_node_rewrites(c);
return 0;
err:
__bch2_fs_read_only(c);
@@ -446,6 +447,7 @@ static void __bch2_fs_free(struct bch_fs *c)
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
+ bch2_free_pending_node_rewrites(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c);