summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2022-10-11 13:17:02 +0100
committerDavid Sterba <dsterba@suse.com>2022-12-05 18:00:39 +0100
commit61dbb952f0a5f587c983d88853212e969d2d4ede (patch)
tree0e28d1d2e8a16d2c72afaedb04c595f475d0e854 /fs/btrfs
parentceb707da9ad92ad3a5251dc13844034ded06cb3d (diff)
downloadlinux-stable-61dbb952f0a5f587c983d88853212e969d2d4ede.tar.gz
linux-stable-61dbb952f0a5f587c983d88853212e969d2d4ede.tar.bz2
linux-stable-61dbb952f0a5f587c983d88853212e969d2d4ede.zip
btrfs: turn the backref sharedness check cache into a context object
Right now we are using a struct btrfs_backref_shared_cache to pass state across multiple btrfs_is_data_extent_shared() calls. The structure's name closely follows its current purpose, which is to cache previous checks for the sharedness of metadata extents. However we will start using the structure for more things other than caching sharedness checks, so rename it to struct btrfs_backref_share_check_ctx. Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/backref.c32
-rw-r--r--fs/btrfs/backref.h8
-rw-r--r--fs/btrfs/extent_io.c24
3 files changed, 32 insertions, 32 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index efdeb69e8ed6..693c99ad4afb 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1542,13 +1542,13 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
* fs_info->commit_root_sem semaphore, so no need to worry about the root's last
* snapshot field changing while updating or checking the cache.
*/
-static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
+static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
struct btrfs_root *root,
u64 bytenr, int level, bool *is_shared)
{
struct btrfs_backref_shared_cache_entry *entry;
- if (!cache->use_cache)
+ if (!ctx->use_path_cache)
return false;
if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
@@ -1562,7 +1562,7 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
*/
ASSERT(level >= 0);
- entry = &cache->entries[level];
+ entry = &ctx->path_cache_entries[level];
/* Unused cache entry or being used for some other extent buffer. */
if (entry->bytenr != bytenr)
@@ -1595,8 +1595,8 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
*/
if (*is_shared) {
for (int i = 0; i < level; i++) {
- cache->entries[i].is_shared = true;
- cache->entries[i].gen = entry->gen;
+ ctx->path_cache_entries[i].is_shared = true;
+ ctx->path_cache_entries[i].gen = entry->gen;
}
}
@@ -1608,14 +1608,14 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
* fs_info->commit_root_sem semaphore, so no need to worry about the root's last
* snapshot field changing while updating or checking the cache.
*/
-static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
+static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
struct btrfs_root *root,
u64 bytenr, int level, bool is_shared)
{
struct btrfs_backref_shared_cache_entry *entry;
u64 gen;
- if (!cache->use_cache)
+ if (!ctx->use_path_cache)
return;
if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
@@ -1634,7 +1634,7 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
else
gen = btrfs_root_last_snapshot(&root->root_item);
- entry = &cache->entries[level];
+ entry = &ctx->path_cache_entries[level];
entry->bytenr = bytenr;
entry->is_shared = is_shared;
entry->gen = gen;
@@ -1648,7 +1648,7 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
*/
if (is_shared) {
for (int i = 0; i < level; i++) {
- entry = &cache->entries[i];
+ entry = &ctx->path_cache_entries[i];
entry->is_shared = is_shared;
entry->gen = gen;
}
@@ -1664,7 +1664,7 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
* not known.
* @roots: List of roots this extent is shared among.
* @tmp: Temporary list used for iteration.
- * @cache: A backref lookup result cache.
+ * @ctx: A backref sharedness check context.
*
* btrfs_is_data_extent_shared uses the backref walking code but will short
* circuit as soon as it finds a root or inode that doesn't match the
@@ -1680,7 +1680,7 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
u64 extent_gen,
struct ulist *roots, struct ulist *tmp,
- struct btrfs_backref_shared_cache *cache)
+ struct btrfs_backref_share_check_ctx *ctx)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1715,7 +1715,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
/* -1 means we are in the bytenr of the data extent. */
level = -1;
ULIST_ITER_INIT(&uiter);
- cache->use_cache = true;
+ ctx->use_path_cache = true;
while (1) {
bool is_shared;
bool cached;
@@ -1726,7 +1726,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
/* this is the only condition under which we return 1 */
ret = 1;
if (level >= 0)
- store_backref_shared_cache(cache, root, bytenr,
+ store_backref_shared_cache(ctx, root, bytenr,
level, true);
break;
}
@@ -1761,17 +1761,17 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
* (which implies multiple paths).
*/
if (level == -1 && tmp->nnodes > 1)
- cache->use_cache = false;
+ ctx->use_path_cache = false;
if (level >= 0)
- store_backref_shared_cache(cache, root, bytenr,
+ store_backref_shared_cache(ctx, root, bytenr,
level, false);
node = ulist_next(tmp, &uiter);
if (!node)
break;
bytenr = node->val;
level++;
- cached = lookup_backref_shared_cache(cache, root, bytenr, level,
+ cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
&is_shared);
if (cached) {
ret = (is_shared ? 1 : 0);
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index c846fa2bdf93..e3a2b45a76e3 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -23,13 +23,13 @@ struct btrfs_backref_shared_cache_entry {
bool is_shared;
};
-struct btrfs_backref_shared_cache {
+struct btrfs_backref_share_check_ctx {
/*
* A path from a root to a leaf that has a file extent item pointing to
* a given data extent should never exceed the maximum b+tree height.
*/
- struct btrfs_backref_shared_cache_entry entries[BTRFS_MAX_LEVEL];
- bool use_cache;
+ struct btrfs_backref_shared_cache_entry path_cache_entries[BTRFS_MAX_LEVEL];
+ bool use_path_cache;
};
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
@@ -80,7 +80,7 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
u64 extent_gen,
struct ulist *roots, struct ulist *tmp,
- struct btrfs_backref_shared_cache *cache);
+ struct btrfs_backref_share_check_ctx *ctx);
int __init btrfs_prelim_ref_init(void);
void __cold btrfs_prelim_ref_exit(void);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 83be7e85ba91..365ad00a5942 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3705,7 +3705,7 @@ static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path
static int fiemap_process_hole(struct btrfs_inode *inode,
struct fiemap_extent_info *fieinfo,
struct fiemap_cache *cache,
- struct btrfs_backref_shared_cache *backref_cache,
+ struct btrfs_backref_share_check_ctx *backref_ctx,
u64 disk_bytenr, u64 extent_offset,
u64 extent_gen,
struct ulist *roots, struct ulist *tmp_ulist,
@@ -3755,7 +3755,7 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
disk_bytenr,
extent_gen, roots,
tmp_ulist,
- backref_cache);
+ backref_ctx);
if (ret < 0)
return ret;
else if (ret > 0)
@@ -3805,7 +3805,7 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
disk_bytenr,
extent_gen, roots,
tmp_ulist,
- backref_cache);
+ backref_ctx);
if (ret < 0)
return ret;
else if (ret > 0)
@@ -3904,7 +3904,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
struct fiemap_cache cache = { 0 };
- struct btrfs_backref_shared_cache *backref_cache;
+ struct btrfs_backref_share_check_ctx *backref_ctx;
struct ulist *roots;
struct ulist *tmp_ulist;
u64 last_extent_end;
@@ -3914,11 +3914,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
bool stopped = false;
int ret;
- backref_cache = kzalloc(sizeof(*backref_cache), GFP_KERNEL);
+ backref_ctx = kzalloc(sizeof(*backref_ctx), GFP_KERNEL);
path = btrfs_alloc_path();
roots = ulist_alloc(GFP_KERNEL);
tmp_ulist = ulist_alloc(GFP_KERNEL);
- if (!backref_cache || !path || !roots || !tmp_ulist) {
+ if (!backref_ctx || !path || !roots || !tmp_ulist) {
ret = -ENOMEM;
goto out;
}
@@ -3978,7 +3978,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
const u64 range_end = min(key.offset, lockend) - 1;
ret = fiemap_process_hole(inode, fieinfo, &cache,
- backref_cache, 0, 0, 0,
+ backref_ctx, 0, 0, 0,
roots, tmp_ulist,
prev_extent_end, range_end);
if (ret < 0) {
@@ -4019,14 +4019,14 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
extent_len, flags);
} else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = fiemap_process_hole(inode, fieinfo, &cache,
- backref_cache,
+ backref_ctx,
disk_bytenr, extent_offset,
extent_gen, roots, tmp_ulist,
key.offset, extent_end - 1);
} else if (disk_bytenr == 0) {
/* We have an explicit hole. */
ret = fiemap_process_hole(inode, fieinfo, &cache,
- backref_cache, 0, 0, 0,
+ backref_ctx, 0, 0, 0,
roots, tmp_ulist,
key.offset, extent_end - 1);
} else {
@@ -4037,7 +4037,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
extent_gen,
roots,
tmp_ulist,
- backref_cache);
+ backref_ctx);
if (ret < 0)
goto out_unlock;
else if (ret > 0)
@@ -4086,7 +4086,7 @@ check_eof_delalloc:
path = NULL;
if (!stopped && prev_extent_end < lockend) {
- ret = fiemap_process_hole(inode, fieinfo, &cache, backref_cache,
+ ret = fiemap_process_hole(inode, fieinfo, &cache, backref_ctx,
0, 0, 0, roots, tmp_ulist,
prev_extent_end, lockend - 1);
if (ret < 0)
@@ -4119,7 +4119,7 @@ check_eof_delalloc:
out_unlock:
unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
out:
- kfree(backref_cache);
+ kfree(backref_ctx);
btrfs_free_path(path);
ulist_free(roots);
ulist_free(tmp_ulist);