From 8306a5f56305521d8b307b4ee1f69949fbb49279 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 28 Apr 2021 07:51:36 -0400 Subject: iomap: Add iomap_invalidate_folio Keep iomap_invalidatepage around as a wrapper for use in address_space operations. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong --- fs/iomap/buffered-io.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'fs/iomap') diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index b0192b148c9f..de7ce1909527 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -479,23 +479,27 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(iomap_releasepage); -void -iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) +void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { - struct folio *folio = page_folio(page); - - trace_iomap_invalidatepage(page->mapping->host, offset, len); + trace_iomap_invalidatepage(folio->mapping->host, offset, len); /* * If we're invalidating the entire page, clear the dirty state from it * and release it to avoid unnecessary buildup of the LRU. */ - if (offset == 0 && len == PAGE_SIZE) { - WARN_ON_ONCE(PageWriteback(page)); - cancel_dirty_page(page); + if (offset == 0 && len == folio_size(folio)) { + WARN_ON_ONCE(folio_test_writeback(folio)); + folio_cancel_dirty(folio); iomap_page_release(folio); } } +EXPORT_SYMBOL_GPL(iomap_invalidate_folio); + +void iomap_invalidatepage(struct page *page, unsigned int offset, + unsigned int len) +{ + iomap_invalidate_folio(page_folio(page), offset, len); +} EXPORT_SYMBOL_GPL(iomap_invalidatepage); #ifdef CONFIG_MIGRATION -- cgit v1.2.3