summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/subpage.h
blob: 3042c5ea840aec7fbe57f2a63fa876fa8eebc7af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef BTRFS_SUBPAGE_H
#define BTRFS_SUBPAGE_H

#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/sizes.h>
#include "btrfs_inode.h"
#include "fs.h"

struct address_space;
struct folio;

/*
 * Extra info for subpapge bitmap.
 *
 * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
 * one larger bitmap.
 *
 * This structure records how they are organized in the bitmap:
 *
 * /- uptodate          /- dirty        /- ordered
 * |			|		|
 * v			v		v
 * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
 * |< sectors_per_page >|
 *
 * Unlike regular macro-like enums, here we do not go upper-case names, as
 * these names will be utilized in various macros to define function names.
 */
enum {
	btrfs_bitmap_nr_uptodate = 0,
	btrfs_bitmap_nr_dirty,
	btrfs_bitmap_nr_writeback,
	btrfs_bitmap_nr_ordered,
	btrfs_bitmap_nr_checked,
	btrfs_bitmap_nr_locked,
	btrfs_bitmap_nr_max
};

/*
 * Structure to trace status of each sector inside a page, attached to
 * page::private for both data and metadata inodes.
 */
struct btrfs_subpage {
	/* Common members for both data and metadata pages */
	spinlock_t lock;
	union {
		/*
		 * Structures only used by metadata
		 *
		 * @eb_refs should only be operated under private_lock, as it
		 * manages whether the subpage can be detached.
		 */
		atomic_t eb_refs;

		/*
		 * Structures only used by data,
		 *
		 * How many sectors inside the page is locked.
		 */
		atomic_t nr_locked;
	};
	unsigned long bitmaps[];
};

enum btrfs_subpage_type {
	BTRFS_SUBPAGE_METADATA,
	BTRFS_SUBPAGE_DATA,
};

#if PAGE_SIZE > BTRFS_MIN_BLOCKSIZE
/*
 * Subpage support for metadata is more complex, as we can have dummy extent
 * buffers, where folios have no mapping to determine the owning inode.
 *
 * Thankfully we only need to check if node size is smaller than page size.
 * Even with larger folio support, we will only allocate a folio as large as
 * node size.
 * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
 */
static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
{
	return fs_info->nodesize < PAGE_SIZE;
}
static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
				    struct folio *folio)
{
	if (folio->mapping && folio->mapping->host)
		ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
	return fs_info->sectorsize < folio_size(folio);
}
#else
static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
{
	return false;
}
static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
				    struct folio *folio)
{
	if (folio->mapping && folio->mapping->host)
		ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
	return false;
}
#endif

int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
			 struct folio *folio, enum btrfs_subpage_type type);
void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio,
			  enum btrfs_subpage_type type);

/* Allocate additional data where page represents more than one sector */
struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
				size_t fsize, enum btrfs_subpage_type type);
void btrfs_free_subpage(struct btrfs_subpage *subpage);

void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);

void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
			  struct folio *folio, u64 start, u32 len);
void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
			  struct folio *folio, u64 start, u32 len);
void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
				 struct folio *folio, unsigned long bitmap);
/*
 * Template for subpage related operations.
 *
 * btrfs_subpage_*() are for call sites where the folio has subpage attached and
 * the range is ensured to be inside the folio's single page.
 *
 * btrfs_folio_*() are for call sites where the page can either be subpage
 * specific or regular folios. The function will handle both cases.
 * But the range still needs to be inside one single page.
 *
 * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
 * need to be inside the page. Those functions will truncate the range
 * automatically.
 *
 * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
 *
 * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
 * is no clamp version for metadata helpers, as we either go subpage
 * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
 * and our folio is never larger than nodesize).
 */
#define DECLARE_BTRFS_SUBPAGE_OPS(name)					\
void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);			\
bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
		struct folio *folio, u64 start, u32 len);		\
void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);

DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
DECLARE_BTRFS_SUBPAGE_OPS(dirty);
DECLARE_BTRFS_SUBPAGE_OPS(writeback);
DECLARE_BTRFS_SUBPAGE_OPS(ordered);
DECLARE_BTRFS_SUBPAGE_OPS(checked);

/*
 * Helper for error cleanup, where a folio will have its dirty flag cleared,
 * with writeback started and finished.
 */
static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
					       struct folio *locked_folio,
					       u64 start, u32 len)
{
	btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
	btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
	btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
}

bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
					struct folio *folio, u64 start, u32 len);

void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
				  struct folio *folio, u64 start, u32 len);
bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
				    struct folio *folio,
				    unsigned long *ret_bitmap);
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
				      struct folio *folio, u64 start, u32 len);

#endif