summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/bkey_sort.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-12-17 15:08:58 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:50 -0400
commit07a1006ae81580c6a1b52b80e32fa9dadea1954b (patch)
tree7ac1a004d0209465c211f71f00818d9d3f176075 /fs/bcachefs/bkey_sort.c
parent8deed5f4e547e675cf8c1de88720c23c3c3093ca (diff)
downloadlinux-07a1006ae81580c6a1b52b80e32fa9dadea1954b.tar.gz
linux-07a1006ae81580c6a1b52b80e32fa9dadea1954b.tar.bz2
linux-07a1006ae81580c6a1b52b80e32fa9dadea1954b.zip
bcachefs: Reduce/kill BKEY_PADDED use
With various newer key types - stripe keys, inline data extents - the old approach of calculating the maximum size of the value is becoming more and more error prone. Better to switch to bkey_on_stack, which can dynamically allocate if necessary to handle any size bkey. In particular we also want to get rid of BKEY_EXTENT_VAL_U64s_MAX. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bkey_sort.c')
-rw-r--r--fs/bcachefs/bkey_sort.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c
index 99e0a4011fae..2e1d9cd65f43 100644
--- a/fs/bcachefs/bkey_sort.c
+++ b/fs/bcachefs/bkey_sort.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
-#include "bkey_on_stack.h"
+#include "bkey_buf.h"
#include "bkey_sort.h"
#include "bset.h"
#include "extents.h"
@@ -187,11 +187,11 @@ bch2_sort_repack_merge(struct bch_fs *c,
bool filter_whiteouts)
{
struct bkey_packed *out = vstruct_last(dst), *k_packed;
- struct bkey_on_stack k;
+ struct bkey_buf k;
struct btree_nr_keys nr;
memset(&nr, 0, sizeof(nr));
- bkey_on_stack_init(&k);
+ bch2_bkey_buf_init(&k);
while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
if (filter_whiteouts && bkey_whiteout(k_packed))
@@ -204,7 +204,7 @@ bch2_sort_repack_merge(struct bch_fs *c,
* node; we have to make a copy of the entire key before calling
* normalize
*/
- bkey_on_stack_realloc(&k, c, k_packed->u64s + BKEY_U64s);
+ bch2_bkey_buf_realloc(&k, c, k_packed->u64s + BKEY_U64s);
bch2_bkey_unpack(src, k.k, k_packed);
if (filter_whiteouts &&
@@ -215,7 +215,7 @@ bch2_sort_repack_merge(struct bch_fs *c,
}
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- bkey_on_stack_exit(&k, c);
+ bch2_bkey_buf_exit(&k, c);
return nr;
}
@@ -315,11 +315,11 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
struct bkey l_unpacked, r_unpacked;
struct bkey_s l, r;
struct btree_nr_keys nr;
- struct bkey_on_stack split;
+ struct bkey_buf split;
unsigned i;
memset(&nr, 0, sizeof(nr));
- bkey_on_stack_init(&split);
+ bch2_bkey_buf_init(&split);
sort_iter_sort(iter, extent_sort_fix_overlapping_cmp);
for (i = 0; i < iter->used;) {
@@ -379,7 +379,7 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
/*
* r wins, but it overlaps in the middle of l - split l:
*/
- bkey_on_stack_reassemble(&split, c, l.s_c);
+ bch2_bkey_buf_reassemble(&split, c, l.s_c);
bch2_cut_back(bkey_start_pos(r.k), split.k);
bch2_cut_front_s(r.k->p, l);
@@ -398,7 +398,7 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- bkey_on_stack_exit(&split, c);
+ bch2_bkey_buf_exit(&split, c);
return nr;
}