summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/journal_io.h
blob: 2ca9cde30ea8da0fdabbc2fa084be8df140ba3fa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_JOURNAL_IO_H
#define _BCACHEFS_JOURNAL_IO_H

#include "darray.h"

void bch2_journal_pos_from_member_info_set(struct bch_fs *);
void bch2_journal_pos_from_member_info_resume(struct bch_fs *);

struct journal_ptr {
	bool		csum_good;
	u8		dev;
	u32		bucket;
	u32		bucket_offset;
	u64		sector;
};

/*
 * Only used for holding the journal entries we read in btree_journal_read()
 * during cache_registration
 */
struct journal_replay {
	DARRAY_PREALLOCATED(struct journal_ptr, 8) ptrs;

	bool			csum_good;
	bool			ignore_blacklisted;
	bool			ignore_not_dirty;
	/* must be last: */
	struct jset		j;
};

static inline bool journal_replay_ignore(struct journal_replay *i)
{
	return !i || i->ignore_blacklisted || i->ignore_not_dirty;
}

static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
					struct jset_entry *entry, unsigned type)
{
	while (entry < vstruct_last(jset)) {
		if (entry->type == type)
			return entry;

		entry = vstruct_next(entry);
	}

	return NULL;
}

#define for_each_jset_entry_type(entry, jset, type)			\
	for (struct jset_entry *entry = (jset)->start;			\
	     (entry = __jset_entry_type_next(jset, entry, type));	\
	     entry = vstruct_next(entry))

#define jset_entry_for_each_key(_e, _k)					\
	for (struct bkey_i *_k = (_e)->start;				\
	     _k < vstruct_last(_e);					\
	     _k = bkey_next(_k))

#define for_each_jset_key(k, entry, jset)				\
	for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
		jset_entry_for_each_key(entry, k)

int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
				struct jset_entry *, unsigned, int,
				enum bch_validate_flags);
void bch2_journal_entry_to_text(struct printbuf *, struct bch_fs *,
				struct jset_entry *);

void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
			       struct journal_replay *);

int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *);

CLOSURE_CALLBACK(bch2_journal_write);

static inline struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size)
{
	struct jset_entry *entry = *end;
	unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));

	memset(entry, 0, u64s * sizeof(u64));
	/*
	 * The u64s field counts from the start of data, ignoring the shared
	 * fields.
	 */
	entry->u64s = cpu_to_le16(u64s - 1);

	*end = vstruct_next(*end);
	return entry;
}

#endif /* _BCACHEFS_JOURNAL_IO_H */