diff options
author | Joanne Koong <joannelkoong@gmail.com> | 2024-10-24 10:18:07 -0700 |
---|---|---|
committer | Miklos Szeredi <mszeredi@redhat.com> | 2024-11-05 11:14:32 +0100 |
commit | 7fce207af5ec074a9a50e90eb866b17ca4a90f06 (patch) | |
tree | 2bb120614a15f4d22b11e2c5445749f66bc88a0d /mm | |
parent | cbe9c115b7441dd790540436118eee4626ec9979 (diff) | |
download | linux-stable-7fce207af5ec074a9a50e90eb866b17ca4a90f06.tar.gz linux-stable-7fce207af5ec074a9a50e90eb866b17ca4a90f06.tar.bz2 linux-stable-7fce207af5ec074a9a50e90eb866b17ca4a90f06.zip |
mm/writeback: add folio_mark_dirty_lock()
Add a new convenience helper folio_mark_dirty_lock() that grabs the
folio lock before calling folio_mark_dirty().
Refactor set_page_dirty_lock() to directly use folio_mark_dirty_lock().
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/folio-compat.c | 6 | ||||
-rw-r--r-- | mm/page-writeback.c | 22 |
2 files changed, 17 insertions, 11 deletions
diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 80746182e9e8..1d1832e2a599 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -52,6 +52,12 @@ bool set_page_dirty(struct page *page) } EXPORT_SYMBOL(set_page_dirty); +int set_page_dirty_lock(struct page *page) +{ + return folio_mark_dirty_lock(page_folio(page)); +} +EXPORT_SYMBOL(set_page_dirty_lock); + bool clear_page_dirty_for_io(struct page *page) { return folio_clear_dirty_for_io(page_folio(page)); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index fcd4c1439cb9..db00a66d8b84 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2914,25 +2914,25 @@ bool folio_mark_dirty(struct folio *folio) EXPORT_SYMBOL(folio_mark_dirty); /* - * set_page_dirty() is racy if the caller has no reference against - * page->mapping->host, and if the page is unlocked. This is because another - * CPU could truncate the page off the mapping and then free the mapping. + * folio_mark_dirty() is racy if the caller has no reference against + * folio->mapping->host, and if the folio is unlocked. This is because another + * CPU could truncate the folio off the mapping and then free the mapping. * - * Usually, the page _is_ locked, or the caller is a user-space process which + * Usually, the folio _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * - * In other cases, the page should be locked before running set_page_dirty(). + * In other cases, the folio should be locked before running folio_mark_dirty(). */ -int set_page_dirty_lock(struct page *page) +bool folio_mark_dirty_lock(struct folio *folio) { - int ret; + bool ret; - lock_page(page); - ret = set_page_dirty(page); - unlock_page(page); + folio_lock(folio); + ret = folio_mark_dirty(folio); + folio_unlock(folio); return ret; } -EXPORT_SYMBOL(set_page_dirty_lock); +EXPORT_SYMBOL(folio_mark_dirty_lock); /* * This cancels just the dirty bit on the kernel page itself, it does NOT |