summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-03-05 18:00:55 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:53 -0400
commit59a7405161425df39d33faabf9f97c101fcb75d9 (patch)
treeec21bf092375e2c5d62f40b010da510c95ca64d8 /fs/bcachefs
parent18a7b97239b6f0bae3fa1475cb276a273e07597a (diff)
downloadlinux-59a7405161425df39d33faabf9f97c101fcb75d9.tar.gz
linux-59a7405161425df39d33faabf9f97c101fcb75d9.tar.bz2
linux-59a7405161425df39d33faabf9f97c101fcb75d9.zip
bcachefs: Create allocator threads when allocating filesystem
We're seeing failures to mount because of a failure to start the allocator threads, which currently happens fairly late in the mount process, after walking all metadata, and kthread_create() fails if something has tried to kill the mount process, which is probably not what we want. This patch avoids this issue by creating, but not starting, the allocator threads when we preallocate all of our other in memory data structures. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/alloc_background.c15
-rw-r--r--fs/bcachefs/super.c11
2 files changed, 25 insertions, 1 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index eac82c9880ba..b9b97cbda177 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -1068,6 +1068,12 @@ static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
return 0;
}
+static inline bool allocator_thread_running(struct bch_dev *ca)
+{
+ return ca->mi.state == BCH_MEMBER_STATE_RW &&
+ test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags);
+}
+
/**
* bch_allocator_thread - move buckets from free_inc to reserves
*
@@ -1084,9 +1090,16 @@ static int bch2_allocator_thread(void *arg)
int ret;
set_freezable();
- ca->allocator_state = ALLOCATOR_RUNNING;
while (1) {
+ if (!allocator_thread_running(ca)) {
+ ca->allocator_state = ALLOCATOR_STOPPED;
+ if (kthread_wait_freezable(allocator_thread_running(ca)))
+ break;
+ }
+
+ ca->allocator_state = ALLOCATOR_RUNNING;
+
cond_resched();
if (kthread_should_stop())
break;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index e10e7e0c0454..224c21c3f9f7 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -431,6 +431,9 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
+ for_each_rw_member(ca, c, i)
+ bch2_wake_allocator(ca);
+
ret = bch2_journal_reclaim_start(&c->journal);
if (ret) {
bch_err(c, "error starting journal reclaim: %i", ret);
@@ -1008,6 +1011,8 @@ static void bch2_dev_release(struct kobject *kobj)
static void bch2_dev_free(struct bch_dev *ca)
{
+ bch2_dev_allocator_stop(ca);
+
cancel_work_sync(&ca->io_error_work);
if (ca->kobj.state_in_sysfs &&
@@ -1172,6 +1177,12 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
if (!ca)
goto err;
+ if (ca->mi.state == BCH_MEMBER_STATE_RW &&
+ bch2_dev_allocator_start(ca)) {
+ bch2_dev_free(ca);
+ goto err;
+ }
+
bch2_dev_attach(c, ca, dev_idx);
out:
pr_verbose_init(c->opts, "ret %i", ret);