summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-11-20 17:24:32 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-11-24 02:08:25 -0500
commit50e029c6390a6795869b742a5fce1e57d6a76c82 (patch)
tree072b0f894d0989609a47db26900eda354ca50bba /fs
parent6201d91ee32cf92e9bcca69a3cf73461827b5ce5 (diff)
downloadlinux-stable-50e029c6390a6795869b742a5fce1e57d6a76c82.tar.gz
linux-stable-50e029c6390a6795869b742a5fce1e57d6a76c82.tar.bz2
linux-stable-50e029c6390a6795869b742a5fce1e57d6a76c82.zip
bcachefs: bch2_moving_ctxt_flush_all()
Introduce a new helper to flush all move IOs, and use it in a few places where we should have been. The new helper also drops btree locks before waiting on outstanding move writes, avoiding potential deadlocks. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/move.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index ab749bf2fcbc..7819ed8d9df9 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -163,12 +163,18 @@ void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
+static void bch2_moving_ctxt_flush_all(struct moving_context *ctxt)
+{
+ move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ bch2_trans_unlock_long(ctxt->trans);
+ closure_sync(&ctxt->cl);
+}
+
void bch2_moving_ctxt_exit(struct moving_context *ctxt)
{
struct bch_fs *c = ctxt->trans->c;
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
- closure_sync(&ctxt->cl);
+ bch2_moving_ctxt_flush_all(ctxt);
EBUG_ON(atomic_read(&ctxt->write_sectors));
EBUG_ON(atomic_read(&ctxt->write_ios));
@@ -484,8 +490,8 @@ int bch2_move_ratelimit(struct moving_context *ctxt)
struct bch_fs *c = ctxt->trans->c;
u64 delay;
- if (ctxt->wait_on_copygc && !c->copygc_running) {
- bch2_trans_unlock_long(ctxt->trans);
+ if (ctxt->wait_on_copygc && c->copygc_running) {
+ bch2_moving_ctxt_flush_all(ctxt);
wait_event_killable(c->copygc_running_wq,
!c->copygc_running ||
kthread_should_stop());
@@ -512,7 +518,7 @@ int bch2_move_ratelimit(struct moving_context *ctxt)
schedule_timeout(delay);
if (unlikely(freezing(current))) {
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ bch2_moving_ctxt_flush_all(ctxt);
try_to_freeze();
}
} while (delay);