summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/io_misc.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-09-04 05:38:30 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:10:12 -0400
commit5902cc283c060f0a006ee9b2f2a64855a09399b4 (patch)
treec7d2e909fa088255d845fe205394397581ed4bb3 /fs/bcachefs/io_misc.c
parent1809b8cba756d32bd6e976ed4ee64efdf66c6d94 (diff)
downloadlinux-5902cc283c060f0a006ee9b2f2a64855a09399b4.tar.gz
linux-5902cc283c060f0a006ee9b2f2a64855a09399b4.tar.bz2
linux-5902cc283c060f0a006ee9b2f2a64855a09399b4.zip
bcachefs: New io_misc.c helpers
This pulls the non vfs specific parts of truncate and finsert/fcollapse out of fs-io.c, and moves them to io_misc.c. This is prep work for logging these operations, to make them atomic in the event of a crash. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/io_misc.c')
-rw-r--r--fs/bcachefs/io_misc.c226
1 files changed, 226 insertions, 0 deletions
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index c04e5dacfc8d..1afea613df4a 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -9,7 +9,10 @@
#include "btree_update.h"
#include "buckets.h"
#include "clock.h"
+#include "error.h"
#include "extents.h"
+#include "extent_update.h"
+#include "inode.h"
#include "io_misc.h"
#include "io_write.h"
#include "subvolume.h"
@@ -213,3 +216,226 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
return ret;
}
+
+static int truncate_set_isize(struct btree_trans *trans,
+ subvol_inum inum,
+ u64 new_i_size)
+{
+ struct btree_iter iter = { NULL };
+ struct bch_inode_unpacked inode_u;
+ int ret;
+
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT) ?:
+ (inode_u.bi_size = new_i_size, 0) ?:
+ bch2_inode_write(trans, &iter, &inode_u);
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta)
+{
+ struct btree_trans trans;
+ struct btree_iter fpunch_iter;
+ int ret;
+
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+ bch2_trans_iter_init(&trans, &fpunch_iter, BTREE_ID_extents,
+ POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
+ BTREE_ITER_INTENT);
+
+ ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ truncate_set_isize(&trans, inum, new_i_size));
+ if (ret)
+ goto err;
+
+ ret = bch2_fpunch_at(&trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(&trans, &fpunch_iter);
+ bch2_trans_exit(&trans);
+
+ bch2_fs_fatal_err_on(ret, c, "%s: error truncating %u:%llu: %s",
+ __func__, inum.subvol, inum.inum, bch2_err_str(ret));
+ return ret;
+}
+
+static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, u64 offset, s64 len)
+{
+ struct btree_iter iter;
+ struct bch_inode_unpacked inode_u;
+ int ret;
+
+ offset <<= 9;
+ len <<= 9;
+
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
+ if (ret)
+ return ret;
+
+ if (len > 0) {
+ if (MAX_LFS_FILESIZE - inode_u.bi_size < len) {
+ ret = -EFBIG;
+ goto err;
+ }
+
+ if (offset >= inode_u.bi_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ inode_u.bi_size += len;
+ inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c);
+
+ ret = bch2_inode_write(trans, &iter, &inode_u);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum,
+ u64 offset, u64 len, bool insert,
+ s64 *i_sectors_delta)
+{
+ struct bkey_buf copy;
+ struct btree_trans trans;
+ struct btree_iter src = { NULL }, dst = { NULL }, del = { NULL };
+ s64 shift = insert ? len : -len;
+ int ret = 0;
+
+ bch2_bkey_buf_init(&copy);
+ bch2_trans_init(&trans, c, 0, 1024);
+
+ bch2_trans_iter_init(&trans, &src, BTREE_ID_extents,
+ POS(inum.inum, U64_MAX),
+ BTREE_ITER_INTENT);
+ bch2_trans_copy_iter(&dst, &src);
+ bch2_trans_copy_iter(&del, &src);
+
+ if (insert) {
+ ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ adjust_i_size(&trans, inum, offset, len));
+ if (ret)
+ goto err;
+ } else {
+ bch2_btree_iter_set_pos(&src, POS(inum.inum, offset));
+
+ ret = bch2_fpunch_at(&trans, &src, inum, offset + len, i_sectors_delta);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto err;
+
+ bch2_btree_iter_set_pos(&src, POS(inum.inum, offset + len));
+ }
+
+ while (ret == 0 || bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(c, 0);
+ struct bkey_i delete;
+ struct bkey_s_c k;
+ struct bpos next_pos;
+ struct bpos move_pos = POS(inum.inum, offset);
+ struct bpos atomic_end;
+ unsigned trigger_flags = 0;
+ u32 snapshot;
+
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ continue;
+
+ bch2_btree_iter_set_snapshot(&src, snapshot);
+ bch2_btree_iter_set_snapshot(&dst, snapshot);
+ bch2_btree_iter_set_snapshot(&del, snapshot);
+
+ bch2_trans_begin(&trans);
+
+ k = insert
+ ? bch2_btree_iter_peek_prev(&src)
+ : bch2_btree_iter_peek_upto(&src, POS(inum.inum, U64_MAX));
+ if ((ret = bkey_err(k)))
+ continue;
+
+ if (!k.k || k.k->p.inode != inum.inum)
+ break;
+
+ if (insert &&
+ bkey_le(k.k->p, POS(inum.inum, offset)))
+ break;
+reassemble:
+ bch2_bkey_buf_reassemble(&copy, c, k);
+
+ if (insert &&
+ bkey_lt(bkey_start_pos(k.k), move_pos))
+ bch2_cut_front(move_pos, copy.k);
+
+ copy.k->k.p.offset += shift;
+ bch2_btree_iter_set_pos(&dst, bkey_start_pos(&copy.k->k));
+
+ ret = bch2_extent_atomic_end(&trans, &dst, copy.k, &atomic_end);
+ if (ret)
+ continue;
+
+ if (!bkey_eq(atomic_end, copy.k->k.p)) {
+ if (insert) {
+ move_pos = atomic_end;
+ move_pos.offset -= shift;
+ goto reassemble;
+ } else {
+ bch2_cut_back(atomic_end, copy.k);
+ }
+ }
+
+ bkey_init(&delete.k);
+ delete.k.p = copy.k->k.p;
+ delete.k.size = copy.k->k.size;
+ delete.k.p.offset -= shift;
+ bch2_btree_iter_set_pos(&del, bkey_start_pos(&delete.k));
+
+ next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
+
+ if (copy.k->k.size != k.k->size) {
+ /* We might end up splitting compressed extents: */
+ unsigned nr_ptrs =
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
+
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ copy.k->k.size, nr_ptrs,
+ BCH_DISK_RESERVATION_NOFAIL);
+ BUG_ON(ret);
+ }
+
+ ret = bch2_btree_iter_traverse(&del) ?:
+ bch2_trans_update(&trans, &del, &delete, trigger_flags) ?:
+ bch2_trans_update(&trans, &dst, copy.k, trigger_flags) ?:
+ bch2_trans_commit(&trans, &disk_res, NULL,
+ BTREE_INSERT_NOFAIL);
+ bch2_disk_reservation_put(c, &disk_res);
+
+ if (!ret)
+ bch2_btree_iter_set_pos(&src, next_pos);
+ }
+
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto err;
+
+ if (!insert) {
+ ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ adjust_i_size(&trans, inum, offset, -len));
+ } else {
+ /* We need an inode update to update bi_journal_seq for fsync: */
+ ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ adjust_i_size(&trans, inum, 0, 0));
+ }
+err:
+ bch2_trans_iter_exit(&trans, &del);
+ bch2_trans_iter_exit(&trans, &dst);
+ bch2_trans_iter_exit(&trans, &src);
+ bch2_trans_exit(&trans);
+ bch2_bkey_buf_exit(&copy, c);
+ return ret;
+}