summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ioctl.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2022-09-09 17:53:43 -0400
committerDavid Sterba <dsterba@suse.com>2022-09-26 12:28:05 +0200
commit570eb97bace8743f45c6830b64c7a0889d0915dd (patch)
treedd6e39e3006b7a4c91abe461074e9fed4e2aa559 /fs/btrfs/ioctl.c
parent291bbb1e7ea84ef2aba3a5756b9c573d2c572bd7 (diff)
downloadlinux-570eb97bace8743f45c6830b64c7a0889d0915dd.tar.gz
linux-570eb97bace8743f45c6830b64c7a0889d0915dd.tar.bz2
linux-570eb97bace8743f45c6830b64c7a0889d0915dd.zip
btrfs: unify the lock/unlock extent variants
We have two variants of lock/unlock extent, one set that takes a cached state, another that does not. This is slightly annoying, and generally speaking there are only a few places where we don't have a cached state. Simplify this by making lock_extent/unlock_extent the only variant and make it take a cached state, then convert all the callers appropriately. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/ioctl.c')
-rw-r--r--fs/btrfs/ioctl.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 274ad3e23190..d59e1f1b762e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1218,10 +1218,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
/* get the big lock and read metadata off disk */
if (!locked)
- lock_extent_bits(io_tree, start, end, &cached);
+ lock_extent(io_tree, start, end, &cached);
em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
if (!locked)
- unlock_extent_cached(io_tree, start, end, &cached);
+ unlock_extent(io_tree, start, end, &cached);
if (IS_ERR(em))
return NULL;
@@ -1333,10 +1333,10 @@ again:
while (1) {
struct btrfs_ordered_extent *ordered;
- lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state);
+ lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
- unlock_extent_cached(&inode->io_tree, page_start, page_end,
- &cached_state);
+ unlock_extent(&inode->io_tree, page_start, page_end,
+ &cached_state);
if (!ordered)
break;
@@ -1666,9 +1666,9 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
wait_on_page_writeback(pages[i]);
/* Lock the pages range */
- lock_extent_bits(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
+ lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+ (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+ &cached_state);
/*
* Now we have a consistent view about the extent map, re-check
* which range really needs to be defragged.
@@ -1694,9 +1694,9 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
kfree(entry);
}
unlock_extent:
- unlock_extent_cached(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
+ unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+ (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+ &cached_state);
free_pages:
for (i = 0; i < nr_pages; i++) {
if (pages[i]) {