1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/fs.h>
#include <linux/writeback.h>
#include <linux/swap.h>
#include <linux/gfs2_ondisk.h>
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <trace/events/writeback.h>
#include <linux/sched/signal.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "trans.h"
#include "rgrp.h"
#include "super.h"
#include "util.h"
#include "glops.h"
#include "aops.h"
void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
unsigned int from, unsigned int len)
{
struct buffer_head *head = page_buffers(page);
unsigned int bsize = head->b_size;
struct buffer_head *bh;
unsigned int to = from + len;
unsigned int start, end;
for (bh = head, start = 0; bh != head || !start;
bh = bh->b_this_page, start = end) {
end = start + bsize;
if (end <= from)
continue;
if (start >= to)
break;
set_buffer_uptodate(bh);
gfs2_trans_add_data(ip->i_gl, bh);
}
}
/**
* gfs2_get_block_noalloc - Fills in a buffer head with details about a block
* @inode: The inode
* @lblock: The block number to look up
* @bh_result: The buffer head to return the result in
* @create: Non-zero if we may add block to the file
*
* Returns: errno
*/
static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
struct buffer_head *bh_result, int create)
{
int error;
error = gfs2_block_map(inode, lblock, bh_result, 0);
if (error)
return error;
if (!buffer_mapped(bh_result))
return -ENODATA;
return 0;
}
/**
* gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
* @page: The page to write
* @wbc: The writeback control
*
* This is the same as calling block_write_full_page, but it also
* writes pages outside of i_size
*/
static int gfs2_write_jdata_page(struct page *page,
struct writeback_control *wbc)
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_SHIFT;
unsigned offset;
/*
* The page straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
offset = i_size & (PAGE_SIZE - 1);
if (page->index == end_index && offset)
zero_user_segment(page, offset, PAGE_SIZE);
return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
end_buffer_async_write);
}
/**
* __gfs2_jdata_writepage - The core of jdata writepage
* @page: The page to write
* @wbc: The writeback control
*
* This is shared between writepage and writepages and implements the
* core of the writepage operation. If a transaction is required then
* PageChecked will have been set and the transaction will have
* already been started before this is called.
*/
static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
if (PageChecked(page)) {
ClearPageChecked(page);
if (!page_has_buffers(page)) {
create_empty_buffers(page, inode->i_sb->s_blocksize,
BIT(BH_Dirty)|BIT(BH_Uptodate));
}
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
}
return gfs2_write_jdata_page(page, wbc);
}
/**
* gfs2_jdata_writepage - Write complete page
* @page: Page to write
* @wbc: The writeback control
*
* Returns: errno
*
*/
static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
goto out;
if (PageChecked(page) || current->journal_info)
goto out_ignore;
return __gfs2_jdata_writepage(page, wbc);
out_ignore:
redirty_page_for_writepage(wbc, page);
out:
unlock_page(page);
return 0;
}
/**
* gfs2_writepages - Write a bunch of dirty pages back to disk
* @mapping: The mapping to write
* @wbc: Write-back control
*
* Used for both ordered and writeback modes.
*/
static int gfs2_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct iomap_writepage_ctx wpc = { };
int ret;
/*
* Even if we didn't write any pages here, we might still be holding
* dirty pages in the ail. We forcibly flush the ail because we don't
* want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find.
*/
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
if (ret == 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret;
}
/**
* gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
* @mapping: The mapping
* @wbc: The writeback control
* @pvec: The vector of pages
* @nr_pages: The number of pages to write
* @done_index: Page index
*
* Returns: non-zero if loop should terminate, zero otherwise
*/
static int gfs2_write_jdata_pagevec(struct address_space *mapping,
struct writeback_control *wbc,
struct pagevec *pvec,
int nr_pages,
pgoff_t *done_index)
{
struct inode *inode = mapping->host;
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
int i;
int ret;
ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
if (ret < 0)
return ret;
for(i = 0; i < nr_pages; i++) {
struct page *page = pvec->pages[i];
*done_index = page->index;
lock_page(page);
if (unlikely(page->mapping != mapping)) {
continue_unlock:
unlock_page(page);
continue;
}
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
else
goto continue_unlock;
}
BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(inode));
ret = __gfs2_jdata_writepage(page, wbc);
if (unlikely(ret)) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
} else {
/*
* done_index is set past this page,
* so media errors will not choke
* background writeout for the entire
* file. This has consequences for
* range_cyclic semantics (ie. it may
* not be suitable for data integrity
* writeout).
*/
*done_index = page->index + 1;
ret = 1;
break;
}
}
/*
* We stop writing back only if we are not doing
* integrity sync. In case of integrity sync we have to
* keep going until we have written all the pages
* we tagged for writeback prior to entering this loop.
*/
if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
ret = 1;
break;
}
}
gfs2_trans_end(sdp);
return ret;
}
/**
* gfs2_write_cache_jdata - Like write_cache_pages but different
* @mapping: The mapping to write
* @wbc: The writeback control
*
* The reason that we use our own function here is that we need to
* start transactions before we grab page locks. This allows us
* to get the ordering right.
*/
static int gfs2_write_cache_jdata(struct address_space *mapping,
struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
struct pagevec pvec;
int nr_pages;
pgoff_t writeback_index;
pgoff_t index;
pgoff_t end;
pgoff_t done_index;
int cycled;
int range_whole = 0;
xa_mark_t tag;
pagevec_init(&pvec);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
if (index == 0)
cycled = 1;
else
cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
break;
ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
if (ret)
done = 1;
if (ret > 0)
ret = 0;
pagevec_release(&pvec);
cond_resched();
}
if (!cycled && !done) {
/*
* range_cyclic:
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
*/
cycled = 1;
index = 0;
end = writeback_index - 1;
goto retry;
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
return ret;
}
/**
* gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
* @mapping: The mapping to write
* @wbc: The writeback control
*
*/
static int gfs2_jdata_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct gfs2_inode *ip = GFS2_I(mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
int ret;
ret = gfs2_write_cache_jdata(mapping, wbc);
if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_JDATA_WPAGES);
ret = gfs2_write_cache_jdata(mapping, wbc);
}
return ret;
}
/**
* stuffed_readpage - Fill in a Linux page with stuffed file data
* @ip: the inode
* @page: the page
*
* Returns: errno
*/
static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *dibh;
u64 dsize = i_size_read(&ip->i_inode);
void *kaddr;
int error;
/*
* Due to the order of unstuffing files and ->fault(), we can be
* asked for a zero page in the case of a stuffed file being extended,
* so we need to supply one here. It doesn't happen often.
*/
if (unlikely(page->index)) {
zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
return 0;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
return error;
kaddr = kmap_atomic(page);
if (dsize > gfs2_max_stuffed_size(ip))
dsize = gfs2_max_stuffed_size(ip);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap_atomic(kaddr);
flush_dcache_page(page);
brelse(dibh);
SetPageUptodate(page);
return 0;
}
/**
* gfs2_read_folio - read a folio from a file
* @file: The file to read
* @folio: The folio in the file
*/
static int gfs2_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = folio->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
int error;
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, &folio->page);
folio_unlock(folio);
} else {
error = mpage_read_folio(folio, gfs2_block_map);
}
if (unlikely(gfs2_withdrawn(sdp)))
return -EIO;
return error;
}
/**
* gfs2_internal_read - read an internal file
* @ip: The gfs2 inode
* @buf: The buffer to fill
* @pos: The file position
* @size: The amount to read
*
*/
int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
unsigned size)
{
struct address_space *mapping = ip->i_inode.i_mapping;
unsigned long index = *pos >> PAGE_SHIFT;
unsigned offset = *pos & (PAGE_SIZE - 1);
unsigned copied = 0;
unsigned amt;
struct page *page;
void *p;
do {
amt = size - copied;
if (offset + size > PAGE_SIZE)
amt = PAGE_SIZE - offset;
page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
p = kmap_atomic(page);
memcpy(buf + copied, p + offset, amt);
kunmap_atomic(p);
put_page(page);
copied += amt;
index++;
offset = 0;
} while(copied < size);
(*pos) += size;
return size;
}
/**
* gfs2_readahead - Read a bunch of pages at once
* @rac: Read-ahead control structure
*
* Some notes:
* 1. This is only for readahead, so we can simply ignore any things
* which are slightly inconvenient (such as locking conflicts between
* the page lock and the glock) and return having done no I/O. Its
* obviously not something we'd want to do on too regular a basis.
* Any I/O we ignore at this time will be done via readpage later.
* 2. We don't handle stuffed files here we let readpage do the honours.
* 3. mpage_readahead() does most of the heavy lifting in the common case.
* 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
*/
static void gfs2_readahead(struct readahead_control *rac)
{
struct inode *inode = rac->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
if (gfs2_is_stuffed(ip))
;
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
iomap_readahead(rac, &gfs2_iomap_ops);
}
/**
* adjust_fs_space - Adjusts the free space available due to gfs2_grow
* @inode: the rindex inode
*/
void adjust_fs_space(struct inode *inode)
{
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
struct buffer_head *m_bh;
u64 fs_total, new_free;
if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
return;
/* Total up the file system space, according to the latest rindex. */
fs_total = gfs2_ri_total(sdp);
if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
goto out;
spin_lock(&sdp->sd_statfs_spin);
gfs2_statfs_change_in(m_sc, m_bh->b_data +
sizeof(struct gfs2_dinode));
if (fs_total > (m_sc->sc_total + l_sc->sc_total))
new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
else
new_free = 0;
spin_unlock(&sdp->sd_statfs_spin);
fs_warn(sdp, "File system extended by %llu blocks.\n",
(unsigned long long)new_free);
gfs2_statfs_change(sdp, new_free, new_free, 0);
update_statfs(sdp, m_bh);
brelse(m_bh);
out:
sdp->sd_rindex_uptodate = 0;
gfs2_trans_end(sdp);
}
static bool jdata_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
if (current->journal_info)
folio_set_checked(folio);
return block_dirty_folio(mapping, folio);
}
/**
* gfs2_bmap - Block map function
* @mapping: Address space info
* @lblock: The block to map
*
* Returns: The disk address for the block or 0 on hole or error
*/
static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
{
struct gfs2_inode *ip = GFS2_I(mapping->host);
struct gfs2_holder i_gh;
sector_t dblock = 0;
int error;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return 0;
if (!gfs2_is_stuffed(ip))
dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
gfs2_glock_dq_uninit(&i_gh);
return dblock;
}
static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
struct gfs2_bufdata *bd;
lock_buffer(bh);
gfs2_log_lock(sdp);
clear_buffer_dirty(bh);
bd = bh->b_private;
if (bd) {
if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
list_del_init(&bd->bd_list);
else {
spin_lock(&sdp->sd_ail_lock);
gfs2_remove_from_journal(bh, REMOVE_JDATA);
spin_unlock(&sdp->sd_ail_lock);
}
}
bh->b_bdev = NULL;
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
gfs2_log_unlock(sdp);
unlock_buffer(bh);
}
static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
size_t stop = offset + length;
int partial_page = (offset || length < folio_size(folio));
struct buffer_head *bh, *head;
unsigned long pos = 0;
BUG_ON(!folio_test_locked(folio));
if (!partial_page)
folio_clear_checked(folio);
head = folio_buffers(folio);
if (!head)
goto out;
bh = head;
do {
if (pos + bh->b_size > stop)
return;
if (offset <= pos)
gfs2_discard(sdp, bh);
pos += bh->b_size;
bh = bh->b_this_page;
} while (bh != head);
out:
if (!partial_page)
filemap_release_folio(folio, 0);
}
/**
* gfs2_release_folio - free the metadata associated with a folio
* @folio: the folio that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
*
* Calls try_to_free_buffers() to free the buffers and put the folio if the
* buffers can be released.
*
* Returns: true if the folio was put or else false
*/
bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
struct address_space *mapping = folio->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
head = folio_buffers(folio);
if (!head)
return false;
/*
* mm accommodates an old ext3 case where clean folios might
* not have had the dirty bit cleared. Thus, it can send actual
* dirty folios to ->release_folio() via shrink_active_list().
*
* As a workaround, we skip folios that contain dirty buffers
* below. Once ->release_folio isn't called on dirty folios
* anymore, we can warn on dirty buffers like we used to here
* again.
*/
gfs2_log_lock(sdp);
bh = head;
do {
if (atomic_read(&bh->b_count))
goto cannot_release;
bd = bh->b_private;
if (bd && bd->bd_tr)
goto cannot_release;
if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
goto cannot_release;
bh = bh->b_this_page;
} while (bh != head);
bh = head;
do {
bd = bh->b_private;
if (bd) {
gfs2_assert_warn(sdp, bd->bd_bh == bh);
bd->bd_bh = NULL;
bh->b_private = NULL;
/*
* The bd may still be queued as a revoke, in which
* case we must not dequeue nor free it.
*/
if (!bd->bd_blkno && !list_empty(&bd->bd_list))
list_del_init(&bd->bd_list);
if (list_empty(&bd->bd_list))
kmem_cache_free(gfs2_bufdata_cachep, bd);
}
bh = bh->b_this_page;
} while (bh != head);
gfs2_log_unlock(sdp);
return try_to_free_buffers(folio);
cannot_release:
gfs2_log_unlock(sdp);
return false;
}
static const struct address_space_operations gfs2_aops = {
.writepages = gfs2_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
.dirty_folio = filemap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
.direct_IO = noop_direct_IO,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations gfs2_jdata_aops = {
.writepage = gfs2_jdata_writepage,
.writepages = gfs2_jdata_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
.dirty_folio = jdata_dirty_folio,
.bmap = gfs2_bmap,
.invalidate_folio = gfs2_invalidate_folio,
.release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
void gfs2_set_aops(struct inode *inode)
{
if (gfs2_is_jdata(GFS2_I(inode)))
inode->i_mapping->a_ops = &gfs2_jdata_aops;
else
inode->i_mapping->a_ops = &gfs2_aops;
}
|