summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/btree_types.h
blob: 9404d96c38f3b368726a6603b601b241b5106100 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_TYPES_H
#define _BCACHEFS_BTREE_TYPES_H

#include <linux/list.h>
#include <linux/rhashtable.h>

#include "bbpos_types.h"
#include "btree_key_cache_types.h"
#include "buckets_types.h"
#include "darray.h"
#include "errcode.h"
#include "journal_types.h"
#include "replicas_types.h"
#include "six.h"

struct open_bucket;
struct btree_update;
struct btree_trans;

#define MAX_BSETS		3U

struct btree_nr_keys {

	/*
	 * Amount of live metadata (i.e. size of node after a compaction) in
	 * units of u64s
	 */
	u16			live_u64s;
	u16			bset_u64s[MAX_BSETS];

	/* live keys only: */
	u16			packed_keys;
	u16			unpacked_keys;
};

struct bset_tree {
	/*
	 * We construct a binary tree in an array as if the array
	 * started at 1, so that things line up on the same cachelines
	 * better: see comments in bset.c at cacheline_to_bkey() for
	 * details
	 */

	/* size of the binary tree and prev array */
	u16			size;

	/* function of size - precalculated for to_inorder() */
	u16			extra;

	u16			data_offset;
	u16			aux_data_offset;
	u16			end_offset;
};

struct btree_write {
	struct journal_entry_pin	journal;
};

struct btree_alloc {
	struct open_buckets	ob;
	__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
};

struct btree_bkey_cached_common {
	struct six_lock		lock;
	u8			level;
	u8			btree_id;
	bool			cached;
};

struct btree {
	struct btree_bkey_cached_common c;

	struct rhash_head	hash;
	u64			hash_val;

	unsigned long		flags;
	u16			written;
	u8			nsets;
	u8			nr_key_bits;
	u16			version_ondisk;

	struct bkey_format	format;

	struct btree_node	*data;
	void			*aux_data;

	/*
	 * Sets of sorted keys - the real btree node - plus a binary search tree
	 *
	 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
	 * to the memory we have allocated for this btree node. Additionally,
	 * set[0]->data points to the entire btree node as it exists on disk.
	 */
	struct bset_tree	set[MAX_BSETS];

	struct btree_nr_keys	nr;
	u16			sib_u64s[2];
	u16			whiteout_u64s;
	u8			byte_order;
	u8			unpack_fn_len;

	struct btree_write	writes[2];

	/* Key/pointer for this btree node */
	__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);

	/*
	 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
	 * fails because the lock sequence number has changed - i.e. the
	 * contents were modified - we can still relock the node if it's still
	 * the one we want, without redoing the traversal
	 */

	/*
	 * For asynchronous splits/interior node updates:
	 * When we do a split, we allocate new child nodes and update the parent
	 * node to point to them: we update the parent in memory immediately,
	 * but then we must wait until the children have been written out before
	 * the update to the parent can be written - this is a list of the
	 * btree_updates that are blocking this node from being
	 * written:
	 */
	struct list_head	write_blocked;

	/*
	 * Also for asynchronous splits/interior node updates:
	 * If a btree node isn't reachable yet, we don't want to kick off
	 * another write - because that write also won't yet be reachable and
	 * marking it as completed before it's reachable would be incorrect:
	 */
	unsigned long		will_make_reachable;

	struct open_buckets	ob;

	/* lru list */
	struct list_head	list;
};

struct btree_cache {
	struct rhashtable	table;
	bool			table_init_done;
	/*
	 * We never free a struct btree, except on shutdown - we just put it on
	 * the btree_cache_freed list and reuse it later. This simplifies the
	 * code, and it doesn't cost us much memory as the memory usage is
	 * dominated by buffers that hold the actual btree node data and those
	 * can be freed - and the number of struct btrees allocated is
	 * effectively bounded.
	 *
	 * btree_cache_freeable effectively is a small cache - we use it because
	 * high order page allocations can be rather expensive, and it's quite
	 * common to delete and allocate btree nodes in quick succession. It
	 * should never grow past ~2-3 nodes in practice.
	 */
	struct mutex		lock;
	struct list_head	live;
	struct list_head	freeable;
	struct list_head	freed_pcpu;
	struct list_head	freed_nonpcpu;

	/* Number of elements in live + freeable lists */
	unsigned		used;
	unsigned		reserve;
	atomic_t		dirty;
	struct shrinker		*shrink;

	/*
	 * If we need to allocate memory for a new btree node and that
	 * allocation fails, we can cannibalize another node in the btree cache
	 * to satisfy the allocation - lock to guarantee only one thread does
	 * this at a time:
	 */
	struct task_struct	*alloc_lock;
	struct closure_waitlist	alloc_wait;

	struct bbpos		pinned_nodes_start;
	struct bbpos		pinned_nodes_end;
	u64			pinned_nodes_leaf_mask;
	u64			pinned_nodes_interior_mask;
};

struct btree_node_iter {
	struct btree_node_iter_set {
		u16	k, end;
	} data[MAX_BSETS];
};

/*
 * Iterate over all possible positions, synthesizing deleted keys for holes:
 */
static const __maybe_unused u16 BTREE_ITER_SLOTS		= 1 << 0;
/*
 * Indicates that intent locks should be taken on leaf nodes, because we expect
 * to be doing updates:
 */
static const __maybe_unused u16 BTREE_ITER_INTENT		= 1 << 1;
/*
 * Causes the btree iterator code to prefetch additional btree nodes from disk:
 */
static const __maybe_unused u16 BTREE_ITER_PREFETCH		= 1 << 2;
/*
 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
 * @pos or the first key strictly greater than @pos
 */
static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS		= 1 << 3;
static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS		= 1 << 4;
static const __maybe_unused u16 BTREE_ITER_CACHED		= 1 << 5;
static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE	= 1 << 6;
static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES		= 1 << 7;
static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL		= 1 << 8;
static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS	= 1 << 9;
static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS	= 1 << 10;
static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS	= 1 << 11;
static const __maybe_unused u16 BTREE_ITER_NOPRESERVE		= 1 << 12;
static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL	= 1 << 13;
static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL	= 1 << 14;
#define __BTREE_ITER_FLAGS_END					       15

enum btree_path_uptodate {
	BTREE_ITER_UPTODATE		= 0,
	BTREE_ITER_NEED_RELOCK		= 1,
	BTREE_ITER_NEED_TRAVERSE	= 2,
};

#if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
#define TRACK_PATH_ALLOCATED
#endif

typedef u16 btree_path_idx_t;

struct btree_path {
	btree_path_idx_t	sorted_idx;
	u8			ref;
	u8			intent_ref;

	/* btree_iter_copy starts here: */
	struct bpos		pos;

	enum btree_id		btree_id:5;
	bool			cached:1;
	bool			preserve:1;
	enum btree_path_uptodate uptodate:2;
	/*
	 * When true, failing to relock this path will cause the transaction to
	 * restart:
	 */
	bool			should_be_locked:1;
	unsigned		level:3,
				locks_want:3;
	u8			nodes_locked;

	struct btree_path_level {
		struct btree	*b;
		struct btree_node_iter iter;
		u32		lock_seq;
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
		u64             lock_taken_time;
#endif
	}			l[BTREE_MAX_DEPTH];
#ifdef TRACK_PATH_ALLOCATED
	unsigned long		ip_allocated;
#endif
};

static inline struct btree_path_level *path_l(struct btree_path *path)
{
	return path->l + path->level;
}

static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
{
#ifdef TRACK_PATH_ALLOCATED
	return path->ip_allocated;
#else
	return _THIS_IP_;
#endif
}

/*
 * @pos			- iterator's current position
 * @level		- current btree depth
 * @locks_want		- btree level below which we start taking intent locks
 * @nodes_locked	- bitmask indicating which nodes in @nodes are locked
 * @nodes_intent_locked	- bitmask indicating which locks are intent locks
 */
struct btree_iter {
	struct btree_trans	*trans;
	btree_path_idx_t	path;
	btree_path_idx_t	update_path;
	btree_path_idx_t	key_cache_path;

	enum btree_id		btree_id:8;
	u8			min_depth;

	/* btree_iter_copy starts here: */
	u16			flags;

	/* When we're filtering by snapshot, the snapshot ID we're looking for: */
	unsigned		snapshot;

	struct bpos		pos;
	/*
	 * Current unpacked key - so that bch2_btree_iter_next()/
	 * bch2_btree_iter_next_slot() can correctly advance pos.
	 */
	struct bkey		k;

	/* BTREE_ITER_WITH_JOURNAL: */
	size_t			journal_idx;
#ifdef TRACK_PATH_ALLOCATED
	unsigned long		ip_allocated;
#endif
};

#define BKEY_CACHED_ACCESSED		0
#define BKEY_CACHED_DIRTY		1

struct bkey_cached {
	struct btree_bkey_cached_common c;

	unsigned long		flags;
	u16			u64s;
	bool			valid;
	u32			btree_trans_barrier_seq;
	struct bkey_cached_key	key;

	struct rhash_head	hash;
	struct list_head	list;

	struct journal_entry_pin journal;
	u64			seq;

	struct bkey_i		*k;
};

static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
{
	return !b->cached
		? container_of(b, struct btree, c)->key.k.p
		: container_of(b, struct bkey_cached, c)->key.pos;
}

struct btree_insert_entry {
	unsigned		flags;
	u8			bkey_type;
	enum btree_id		btree_id:8;
	u8			level:4;
	bool			cached:1;
	bool			insert_trigger_run:1;
	bool			overwrite_trigger_run:1;
	bool			key_cache_already_flushed:1;
	/*
	 * @old_k may be a key from the journal; @old_btree_u64s always refers
	 * to the size of the key being overwritten in the btree:
	 */
	u8			old_btree_u64s;
	btree_path_idx_t	path;
	struct bkey_i		*k;
	/* key being overwritten: */
	struct bkey		old_k;
	const struct bch_val	*old_v;
	unsigned long		ip_allocated;
};

#define BTREE_ITER_INITIAL		64
#define BTREE_ITER_MAX			(1U << 10)

struct btree_trans_commit_hook;
typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);

struct btree_trans_commit_hook {
	btree_trans_commit_hook_fn	*fn;
	struct btree_trans_commit_hook	*next;
};

#define BTREE_TRANS_MEM_MAX	(1U << 16)

#define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS	10000

struct btree_trans_paths {
	unsigned long		nr_paths;
	struct btree_path	paths[];
};

struct btree_trans {
	struct bch_fs		*c;

	unsigned long		*paths_allocated;
	struct btree_path	*paths;
	btree_path_idx_t	*sorted;
	struct btree_insert_entry *updates;

	void			*mem;
	unsigned		mem_top;
	unsigned		mem_bytes;

	btree_path_idx_t	nr_sorted;
	btree_path_idx_t	nr_paths;
	btree_path_idx_t	nr_paths_max;
	u8			fn_idx;
	u8			nr_updates;
	u8			lock_must_abort;
	bool			lock_may_not_fail:1;
	bool			srcu_held:1;
	bool			used_mempool:1;
	bool			in_traverse_all:1;
	bool			paths_sorted:1;
	bool			memory_allocation_failure:1;
	bool			journal_transaction_names:1;
	bool			journal_replay_not_finished:1;
	bool			notrace_relock_fail:1;
	bool			write_locked:1;
	enum bch_errcode	restarted:16;
	u32			restart_count;

	u64			last_begin_time;
	unsigned long		last_begin_ip;
	unsigned long		last_restarted_ip;
	unsigned long		srcu_lock_time;

	const char		*fn;
	struct btree_bkey_cached_common *locking;
	struct six_lock_waiter	locking_wait;
	int			srcu_idx;

	/* update path: */
	u16			journal_entries_u64s;
	u16			journal_entries_size;
	struct jset_entry	*journal_entries;

	struct btree_trans_commit_hook *hooks;
	struct journal_entry_pin *journal_pin;

	struct journal_res	journal_res;
	u64			*journal_seq;
	struct disk_reservation *disk_res;

	struct bch_fs_usage_base fs_usage_delta;

	unsigned		journal_u64s;
	unsigned		extra_disk_res; /* XXX kill */
	struct replicas_delta_list *fs_usage_deltas;

	/* Entries before this are zeroed out on every bch2_trans_get() call */

	struct list_head	list;
	struct closure		ref;

	unsigned long		_paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
	struct btree_trans_paths trans_paths;
	struct btree_path	_paths[BTREE_ITER_INITIAL];
	btree_path_idx_t	_sorted[BTREE_ITER_INITIAL + 4];
	struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
};

static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
{
	return trans->paths + iter->path;
}

static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
{
	return iter->key_cache_path
		? trans->paths + iter->key_cache_path
		: NULL;
}

#define BCH_BTREE_WRITE_TYPES()						\
	x(initial,		0)					\
	x(init_next_bset,	1)					\
	x(cache_reclaim,	2)					\
	x(journal_reclaim,	3)					\
	x(interior,		4)

enum btree_write_type {
#define x(t, n) BTREE_WRITE_##t,
	BCH_BTREE_WRITE_TYPES()
#undef x
	BTREE_WRITE_TYPE_NR,
};

#define BTREE_WRITE_TYPE_MASK	(roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
#define BTREE_WRITE_TYPE_BITS	ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))

#define BTREE_FLAGS()							\
	x(read_in_flight)						\
	x(read_error)							\
	x(dirty)							\
	x(need_write)							\
	x(write_blocked)						\
	x(will_make_reachable)						\
	x(noevict)							\
	x(write_idx)							\
	x(accessed)							\
	x(write_in_flight)						\
	x(write_in_flight_inner)					\
	x(just_written)							\
	x(dying)							\
	x(fake)								\
	x(need_rewrite)							\
	x(never_write)

enum btree_flags {
	/* First bits for btree node write type */
	BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
#define x(flag)	BTREE_NODE_##flag,
	BTREE_FLAGS()
#undef x
};

#define x(flag)								\
static inline bool btree_node_ ## flag(struct btree *b)			\
{	return test_bit(BTREE_NODE_ ## flag, &b->flags); }		\
									\
static inline void set_btree_node_ ## flag(struct btree *b)		\
{	set_bit(BTREE_NODE_ ## flag, &b->flags); }			\
									\
static inline void clear_btree_node_ ## flag(struct btree *b)		\
{	clear_bit(BTREE_NODE_ ## flag, &b->flags); }

BTREE_FLAGS()
#undef x

static inline struct btree_write *btree_current_write(struct btree *b)
{
	return b->writes + btree_node_write_idx(b);
}

static inline struct btree_write *btree_prev_write(struct btree *b)
{
	return b->writes + (btree_node_write_idx(b) ^ 1);
}

static inline struct bset_tree *bset_tree_last(struct btree *b)
{
	EBUG_ON(!b->nsets);
	return b->set + b->nsets - 1;
}

static inline void *
__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
{
	return (void *) ((u64 *) b->data + 1 + offset);
}

static inline u16
__btree_node_ptr_to_offset(const struct btree *b, const void *p)
{
	u16 ret = (u64 *) p - 1 - (u64 *) b->data;

	EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
	return ret;
}

static inline struct bset *bset(const struct btree *b,
				const struct bset_tree *t)
{
	return __btree_node_offset_to_ptr(b, t->data_offset);
}

static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
{
	t->end_offset =
		__btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
}

static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
				  const struct bset *i)
{
	t->data_offset = __btree_node_ptr_to_offset(b, i);
	set_btree_bset_end(b, t);
}

static inline struct bset *btree_bset_first(struct btree *b)
{
	return bset(b, b->set);
}

static inline struct bset *btree_bset_last(struct btree *b)
{
	return bset(b, bset_tree_last(b));
}

static inline u16
__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
{
	return __btree_node_ptr_to_offset(b, k);
}

static inline struct bkey_packed *
__btree_node_offset_to_key(const struct btree *b, u16 k)
{
	return __btree_node_offset_to_ptr(b, k);
}

static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
{
	return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
}

#define btree_bkey_first(_b, _t)					\
({									\
	EBUG_ON(bset(_b, _t)->start !=					\
		__btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
									\
	bset(_b, _t)->start;						\
})

#define btree_bkey_last(_b, _t)						\
({									\
	EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=	\
		vstruct_last(bset(_b, _t)));				\
									\
	__btree_node_offset_to_key(_b, (_t)->end_offset);		\
})

static inline unsigned bset_u64s(struct bset_tree *t)
{
	return t->end_offset - t->data_offset -
		sizeof(struct bset) / sizeof(u64);
}

static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
{
	return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
}

static inline unsigned bset_byte_offset(struct btree *b, void *i)
{
	return i - (void *) b->data;
}

enum btree_node_type {
	BKEY_TYPE_btree,
#define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
	BCH_BTREE_IDS()
#undef x
	BKEY_TYPE_NR
};

/* Type of a key in btree @id at level @level: */
static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
{
	return level ? BKEY_TYPE_btree : (unsigned) id + 1;
}

/* Type of keys @b contains: */
static inline enum btree_node_type btree_node_type(struct btree *b)
{
	return __btree_node_type(b->c.level, b->c.btree_id);
}

const char *bch2_btree_node_type_str(enum btree_node_type);

#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS		\
	(BIT_ULL(BKEY_TYPE_extents)|			\
	 BIT_ULL(BKEY_TYPE_alloc)|			\
	 BIT_ULL(BKEY_TYPE_inodes)|			\
	 BIT_ULL(BKEY_TYPE_stripes)|			\
	 BIT_ULL(BKEY_TYPE_reflink)|			\
	 BIT_ULL(BKEY_TYPE_subvolumes)|			\
	 BIT_ULL(BKEY_TYPE_btree))

#define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS		\
	(BIT_ULL(BKEY_TYPE_alloc)|			\
	 BIT_ULL(BKEY_TYPE_inodes)|			\
	 BIT_ULL(BKEY_TYPE_stripes)|			\
	 BIT_ULL(BKEY_TYPE_snapshots))

#define BTREE_NODE_TYPE_HAS_TRIGGERS			\
	(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|		\
	 BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)

static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
	return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
}

static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
	const unsigned mask = 0
#define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
	BCH_BTREE_IDS()
#undef x
	;

	return (1U << type) & mask;
}

static inline bool btree_id_is_extents(enum btree_id btree)
{
	return btree_node_type_is_extents(__btree_node_type(0, btree));
}

static inline bool btree_type_has_snapshots(enum btree_id id)
{
	const unsigned mask = 0
#define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
	BCH_BTREE_IDS()
#undef x
	;

	return (1U << id) & mask;
}

static inline bool btree_type_has_snapshot_field(enum btree_id id)
{
	const unsigned mask = 0
#define x(name, nr, flags, ...)	|((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
	BCH_BTREE_IDS()
#undef x
	;

	return (1U << id) & mask;
}

static inline bool btree_type_has_ptrs(enum btree_id id)
{
	const unsigned mask = 0
#define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_DATA)) << nr)
	BCH_BTREE_IDS()
#undef x
	;

	return (1U << id) & mask;
}

struct btree_root {
	struct btree		*b;

	/* On disk root - see async splits: */
	__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
	u8			level;
	u8			alive;
	s16			error;
};

enum btree_gc_coalesce_fail_reason {
	BTREE_GC_COALESCE_FAIL_RESERVE_GET,
	BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
	BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
};

enum btree_node_sibling {
	btree_prev_sib,
	btree_next_sib,
};

struct get_locks_fail {
	unsigned	l;
	struct btree	*b;
};

#endif /* _BCACHEFS_BTREE_TYPES_H */