diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/accessors.c | 2 | ||||
-rw-r--r-- | fs/btrfs/accessors.h | 2 | ||||
-rw-r--r-- | fs/btrfs/backref.c | 12 | ||||
-rw-r--r-- | fs/btrfs/delayed-ref.c | 42 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 13 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 26 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 14 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 4 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.h | 6 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 7 | ||||
-rw-r--r-- | fs/btrfs/messages.c | 3 | ||||
-rw-r--r-- | fs/btrfs/relocation.c | 77 | ||||
-rw-r--r-- | fs/btrfs/send.c | 31 | ||||
-rw-r--r-- | fs/btrfs/uuid-tree.c | 2 | ||||
-rw-r--r-- | fs/btrfs/volumes.h | 6 | ||||
-rw-r--r-- | fs/btrfs/zoned.c | 2 |
16 files changed, 138 insertions, 111 deletions
diff --git a/fs/btrfs/accessors.c b/fs/btrfs/accessors.c index 79026917db19..e3716516ca38 100644 --- a/fs/btrfs/accessors.c +++ b/fs/btrfs/accessors.c @@ -3,7 +3,7 @@ * Copyright (C) 2007 Oracle. All rights reserved. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "messages.h" #include "extent_io.h" #include "fs.h" diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h index b2eb9cde2c5d..7a7e0ef69973 100644 --- a/fs/btrfs/accessors.h +++ b/fs/btrfs/accessors.h @@ -3,7 +3,7 @@ #ifndef BTRFS_ACCESSORS_H #define BTRFS_ACCESSORS_H -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/stddef.h> #include <linux/types.h> #include <linux/align.h> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index e2f478ecd7fd..f8e1d5b2c512 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -3179,10 +3179,14 @@ void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) btrfs_backref_cleanup_node(cache, node); } - cache->last_trans = 0; - - for (i = 0; i < BTRFS_MAX_LEVEL; i++) - ASSERT(list_empty(&cache->pending[i])); + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + while (!list_empty(&cache->pending[i])) { + node = list_first_entry(&cache->pending[i], + struct btrfs_backref_node, + list); + btrfs_backref_cleanup_node(cache, node); + } + } ASSERT(list_empty(&cache->pending_edge)); ASSERT(list_empty(&cache->useless_node)); ASSERT(list_empty(&cache->changed)); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index ad9ef8312e41..32f719b9e661 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -840,6 +840,8 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref, * helper function to actually insert a head node into the rbtree. * this does all the dirty work in terms of maintaining the correct * overall modification count. + * + * Returns an error pointer in case of an error. */ static noinline struct btrfs_delayed_ref_head * add_delayed_ref_head(struct btrfs_trans_handle *trans, @@ -862,6 +864,9 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, if (ret) { /* Clean up if insertion fails or item exists. */ xa_release(&delayed_refs->dirty_extents, qrecord->bytenr); + /* Caller responsible for freeing qrecord on error. */ + if (ret < 0) + return ERR_PTR(ret); kfree(qrecord); } else { qrecord_inserted = true; @@ -1000,27 +1005,35 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_delayed_ref_node *node; struct btrfs_delayed_ref_head *head_ref; + struct btrfs_delayed_ref_head *new_head_ref; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; bool qrecord_inserted; int action = generic_ref->action; bool merged; + int ret; node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS); if (!node) return -ENOMEM; head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); - if (!head_ref) + if (!head_ref) { + ret = -ENOMEM; goto free_node; + } if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) { record = kzalloc(sizeof(*record), GFP_NOFS); - if (!record) + if (!record) { + ret = -ENOMEM; goto free_head_ref; + } if (xa_reserve(&trans->transaction->delayed_refs.dirty_extents, - generic_ref->bytenr, GFP_NOFS)) + generic_ref->bytenr, GFP_NOFS)) { + ret = -ENOMEM; goto free_record; + } } init_delayed_ref_common(fs_info, node, generic_ref); @@ -1034,8 +1047,14 @@ static int add_delayed_ref(struct btrfs_trans_handle *trans, * insert both the head node and the new ref without dropping * the spin lock */ - head_ref = add_delayed_ref_head(trans, head_ref, record, - action, &qrecord_inserted); + new_head_ref = add_delayed_ref_head(trans, head_ref, record, + action, &qrecord_inserted); + if (IS_ERR(new_head_ref)) { + spin_unlock(&delayed_refs->lock); + ret = PTR_ERR(new_head_ref); + goto free_record; + } + head_ref = new_head_ref; merged = insert_delayed_ref(trans, head_ref, node); spin_unlock(&delayed_refs->lock); @@ -1063,7 +1082,7 @@ free_head_ref: kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); free_node: kmem_cache_free(btrfs_delayed_ref_node_cachep, node); - return -ENOMEM; + return ret; } /* @@ -1094,6 +1113,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_delayed_ref_head *head_ref; + struct btrfs_delayed_ref_head *head_ref_ret; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_ref generic_ref = { .type = BTRFS_REF_METADATA, @@ -1113,11 +1133,15 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); - add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD, - NULL); - + head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL, + BTRFS_UPDATE_DELAYED_HEAD, NULL); spin_unlock(&delayed_refs->lock); + if (IS_ERR(head_ref_ret)) { + kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); + return PTR_ERR(head_ref_ret); + } + /* * Need to update the delayed_refs_rsv with any changes we may have * made. diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 25d768e67e37..4ad5db619b00 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -17,7 +17,7 @@ #include <linux/error-injection.h> #include <linux/crc32c.h> #include <linux/sched/mm.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/hash.h> #include "ctree.h" #include "disk-io.h" @@ -4256,6 +4256,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) btrfs_cleanup_defrag_inodes(fs_info); /* + * Wait for any fixup workers to complete. + * If we don't wait for them here and they are still running by the time + * we call kthread_stop() against the cleaner kthread further below, we + * get an use-after-free on the cleaner because the fixup worker adds an + * inode to the list of delayed iputs and then attempts to wakeup the + * cleaner kthread, which was already stopped and destroyed. We parked + * already the cleaner, but below we run all pending delayed iputs. + */ + btrfs_flush_workqueue(fs_info->fixup_workers); + + /* * After we parked the cleaner kthread, ordered extents may have * completed and created new delayed iputs. If one of the async reclaim * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a5966324607d..d9f511babd89 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1300,13 +1300,29 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, bytes_left = end - start; } - if (bytes_left) { + while (bytes_left) { + u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left); + ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, - bytes_left >> SECTOR_SHIFT, + bytes_to_discard >> SECTOR_SHIFT, GFP_NOFS); - if (!ret) - *discarded_bytes += bytes_left; + + if (ret) { + if (ret != -EOPNOTSUPP) + break; + continue; + } + + start += bytes_to_discard; + bytes_left -= bytes_to_discard; + *discarded_bytes += bytes_to_discard; + + if (btrfs_trim_interrupted()) { + ret = -ERESTARTSYS; + break; + } } + return ret; } @@ -6459,7 +6475,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) start += len; *trimmed += bytes; - if (fatal_signal_pending(current)) { + if (btrfs_trim_interrupted()) { ret = -ERESTARTSYS; break; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 39c9677c47d5..309a8ae48434 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1306,6 +1306,13 @@ static int submit_one_sector(struct btrfs_inode *inode, free_extent_map(em); em = NULL; + /* + * Although the PageDirty bit is cleared before entering this + * function, subpage dirty bit is not cleared. + * So clear subpage dirty bit here so next time we won't submit + * a folio for a range already written to disk. + */ + btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize); btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1); /* * Above call should set the whole folio with writeback flag, even @@ -1315,13 +1322,6 @@ static int submit_one_sector(struct btrfs_inode *inode, */ ASSERT(folio_test_writeback(folio)); - /* - * Although the PageDirty bit is cleared before entering this - * function, subpage dirty bit is not cleared. - * So clear subpage dirty bit here so next time we won't submit - * folio for range already written to disk. - */ - btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize); submit_extent_folio(bio_ctrl, disk_bytenr, folio, sectorsize, filepos - folio_pos(folio)); return 0; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index eaa1dbd31352..f4bcb2530660 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -3809,7 +3809,7 @@ next: if (async && *total_trimmed) break; - if (fatal_signal_pending(current)) { + if (btrfs_trim_interrupted()) { ret = -ERESTARTSYS; break; } @@ -4000,7 +4000,7 @@ next: } block_group->discard_cursor = start; - if (fatal_signal_pending(current)) { + if (btrfs_trim_interrupted()) { if (start != offset) reset_trimming_bitmap(ctl, offset); ret = -ERESTARTSYS; diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 83774bfd7b3b..9f1dbfdee8ca 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -10,6 +10,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mutex.h> +#include <linux/freezer.h> #include "fs.h" struct inode; @@ -56,6 +57,11 @@ static inline bool btrfs_free_space_trimming_bitmap( return (info->trim_state == BTRFS_TRIM_STATE_TRIMMING); } +static inline bool btrfs_trim_interrupted(void) +{ + return fatal_signal_pending(current) || freezing(current); +} + /* * Deltas are an effective way to populate global statistics. Give macro names * to make it clear what we're doing. An example is discard_extents in diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index edac499fd83d..5618ca02934a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -32,7 +32,7 @@ #include <linux/migrate.h> #include <linux/sched/mm.h> #include <linux/iomap.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/fsverity.h> #include "misc.h" #include "ctree.h" @@ -3111,6 +3111,11 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent) ret = btrfs_update_inode_fallback(trans, inode); if (ret) /* -ENOMEM or corruption */ btrfs_abort_transaction(trans, ret); + + ret = btrfs_insert_raid_extent(trans, ordered_extent); + if (ret) + btrfs_abort_transaction(trans, ret); + goto out; } diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c index 77752eec125d..363fd28c0268 100644 --- a/fs/btrfs/messages.c +++ b/fs/btrfs/messages.c @@ -239,7 +239,8 @@ void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, vaf.fmt = fmt; vaf.va = &args; - if (__ratelimit(ratelimit)) { + /* Do not ratelimit if CONFIG_BTRFS_DEBUG is enabled. */ + if (IS_ENABLED(CONFIG_BTRFS_DEBUG) || __ratelimit(ratelimit)) { if (fs_info) { char statestr[STATE_STRING_BUF_LEN]; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ea4ed85919ec..f3834f8d26b4 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -232,70 +232,6 @@ static struct btrfs_backref_node *walk_down_backref( return NULL; } -static void update_backref_node(struct btrfs_backref_cache *cache, - struct btrfs_backref_node *node, u64 bytenr) -{ - struct rb_node *rb_node; - rb_erase(&node->rb_node, &cache->rb_root); - node->bytenr = bytenr; - rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node); - if (rb_node) - btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST); -} - -/* - * update backref cache after a transaction commit - */ -static int update_backref_cache(struct btrfs_trans_handle *trans, - struct btrfs_backref_cache *cache) -{ - struct btrfs_backref_node *node; - int level = 0; - - if (cache->last_trans == 0) { - cache->last_trans = trans->transid; - return 0; - } - - if (cache->last_trans == trans->transid) - return 0; - - /* - * detached nodes are used to avoid unnecessary backref - * lookup. transaction commit changes the extent tree. - * so the detached nodes are no longer useful. - */ - while (!list_empty(&cache->detached)) { - node = list_entry(cache->detached.next, - struct btrfs_backref_node, list); - btrfs_backref_cleanup_node(cache, node); - } - - while (!list_empty(&cache->changed)) { - node = list_entry(cache->changed.next, - struct btrfs_backref_node, list); - list_del_init(&node->list); - BUG_ON(node->pending); - update_backref_node(cache, node, node->new_bytenr); - } - - /* - * some nodes can be left in the pending list if there were - * errors during processing the pending nodes. - */ - for (level = 0; level < BTRFS_MAX_LEVEL; level++) { - list_for_each_entry(node, &cache->pending[level], list) { - BUG_ON(!node->pending); - if (node->bytenr == node->new_bytenr) - continue; - update_backref_node(cache, node, node->new_bytenr); - } - } - - cache->last_trans = 0; - return 1; -} - static bool reloc_root_is_dead(const struct btrfs_root *root) { /* @@ -551,9 +487,6 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, struct btrfs_backref_edge *new_edge; struct rb_node *rb_node; - if (cache->last_trans > 0) - update_backref_cache(trans, cache); - rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start); if (rb_node) { node = rb_entry(rb_node, struct btrfs_backref_node, rb_node); @@ -923,7 +856,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, btrfs_grab_root(reloc_root); /* root->reloc_root will stay until current relocation finished */ - if (fs_info->reloc_ctl->merge_reloc_tree && + if (fs_info->reloc_ctl && fs_info->reloc_ctl->merge_reloc_tree && btrfs_root_refs(root_item) == 0) { set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state); /* @@ -3698,11 +3631,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) break; } restart: - if (update_backref_cache(trans, &rc->backref_cache)) { - btrfs_end_transaction(trans); - trans = NULL; - continue; - } + if (rc->backref_cache.last_trans != trans->transid) + btrfs_backref_release_cache(&rc->backref_cache); + rc->backref_cache.last_trans = trans->transid; ret = find_next_extent(rc, path, &key); if (ret < 0) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 7f48ba6c1c77..27306d98ec43 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -346,8 +346,10 @@ struct name_cache_entry { u64 parent_gen; int ret; int need_later_update; + /* Name length without NUL terminator. */ int name_len; - char name[] __counted_by(name_len); + /* Not NUL terminated. */ + char name[] __counted_by(name_len) __nonstring; }; /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */ @@ -2388,7 +2390,7 @@ out_cache: /* * Store the result of the lookup in the name cache. */ - nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); + nce = kmalloc(sizeof(*nce) + fs_path_len(dest), GFP_KERNEL); if (!nce) { ret = -ENOMEM; goto out; @@ -2400,7 +2402,7 @@ out_cache: nce->parent_gen = *parent_gen; nce->name_len = fs_path_len(dest); nce->ret = ret; - strcpy(nce->name, dest->start); + memcpy(nce->name, dest->start, nce->name_len); if (ino < sctx->send_progress) nce->need_later_update = 0; @@ -6187,8 +6189,29 @@ static int send_write_or_clone(struct send_ctx *sctx, if (ret < 0) return ret; - if (clone_root->offset + num_bytes == info.size) + if (clone_root->offset + num_bytes == info.size) { + /* + * The final size of our file matches the end offset, but it may + * be that its current size is larger, so we have to truncate it + * to any value between the start offset of the range and the + * final i_size, otherwise the clone operation is invalid + * because it's unaligned and it ends before the current EOF. + * We do this truncate to the final i_size when we finish + * processing the inode, but it's too late by then. And here we + * truncate to the start offset of the range because it's always + * sector size aligned while if it were the final i_size it + * would result in dirtying part of a page, filling part of a + * page with zeroes and then having the clone operation at the + * receiver trigger IO and wait for it due to the dirty page. + */ + if (sctx->parent_root != NULL) { + ret = send_truncate(sctx, sctx->cur_ino, + sctx->cur_inode_gen, offset); + if (ret < 0) + return ret; + } goto clone_data; + } write_data: ret = send_extent_data(sctx, path, offset, num_bytes); diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index c6399513c66f..aca2861f2187 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -5,7 +5,7 @@ #include <linux/kthread.h> #include <linux/uuid.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "messages.h" #include "ctree.h" #include "transaction.h" diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 03d2d60afe0c..4481575dd70f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -30,6 +30,12 @@ struct btrfs_zoned_device_info; #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G) +/* + * Arbitratry maximum size of one discard request to limit potentially long time + * spent in blkdev_issue_discard(). + */ +#define BTRFS_MAX_DISCARD_CHUNK_SIZE (SZ_1G) + extern struct mutex uuid_mutex; #define BTRFS_STRIPE_LEN SZ_64K diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 7fa2920632ba..69d03feea4e0 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1340,7 +1340,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx, switch (zone.cond) { case BLK_ZONE_COND_OFFLINE: case BLK_ZONE_COND_READONLY: - btrfs_err(fs_info, + btrfs_err_in_rcu(fs_info, "zoned: offline/readonly zone %llu on device %s (devid %llu)", (info->physical >> device->zone_info->zone_size_shift), rcu_str_deref(device->name), device->devid); |