diff options
-rw-r--r-- | fs/netfs/buffered_write.c | 4 | ||||
-rw-r--r-- | fs/smb/client/file.c | 4 | ||||
-rw-r--r-- | include/linux/page_ref.h | 49 | ||||
-rw-r--r-- | mm/filemap.c | 10 | ||||
-rw-r--r-- | mm/gup.c | 2 |
5 files changed, 12 insertions, 57 deletions
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index d2ce0849bb53..e6066ddbb3ac 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -811,7 +811,7 @@ static void netfs_extend_writeback(struct address_space *mapping, break; } - if (!folio_try_get_rcu(folio)) { + if (!folio_try_get(folio)) { xas_reset(xas); continue; } @@ -1028,7 +1028,7 @@ search_again: if (!folio) break; - if (!folio_try_get_rcu(folio)) { + if (!folio_try_get(folio)) { xas_reset(xas); continue; } diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 9be37d0fe724..4784fece4d99 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -2753,7 +2753,7 @@ static void cifs_extend_writeback(struct address_space *mapping, break; } - if (!folio_try_get_rcu(folio)) { + if (!folio_try_get(folio)) { xas_reset(xas); continue; } @@ -2989,7 +2989,7 @@ search_again: if (!folio) break; - if (!folio_try_get_rcu(folio)) { + if (!folio_try_get(folio)) { xas_reset(xas); continue; } diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index d7c2d33baa7f..fdd2a75adb03 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -263,54 +263,9 @@ static inline bool folio_try_get(struct folio *folio) return folio_ref_add_unless(folio, 1, 0); } -static inline bool folio_ref_try_add_rcu(struct folio *folio, int count) -{ -#ifdef CONFIG_TINY_RCU - /* - * The caller guarantees the folio will not be freed from interrupt - * context, so (on !SMP) we only need preemption to be disabled - * and TINY_RCU does that for us. - */ -# ifdef CONFIG_PREEMPT_COUNT - VM_BUG_ON(!in_atomic() && !irqs_disabled()); -# endif - VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio); - folio_ref_add(folio, count); -#else - if (unlikely(!folio_ref_add_unless(folio, count, 0))) { - /* Either the folio has been freed, or will be freed. */ - return false; - } -#endif - return true; -} - -/** - * folio_try_get_rcu - Attempt to increase the refcount on a folio. - * @folio: The folio. - * - * This is a version of folio_try_get() optimised for non-SMP kernels. - * If you are still holding the rcu_read_lock() after looking up the - * page and know that the page cannot have its refcount decreased to - * zero in interrupt context, you can use this instead of folio_try_get(). - * - * Example users include get_user_pages_fast() (as pages are not unmapped - * from interrupt context) and the page cache lookups (as pages are not - * truncated from interrupt context). We also know that pages are not - * frozen in interrupt context for the purposes of splitting or migration. - * - * You can also use this function if you're holding a lock that prevents - * pages being frozen & removed; eg the i_pages lock for the page cache - * or the mmap_lock or page table lock for page tables. In this case, - * it will always succeed, and you could have used a plain folio_get(), - * but it's sometimes more convenient to have a common function called - * from both locked and RCU-protected contexts. - * - * Return: True if the reference count was successfully incremented. - */ -static inline bool folio_try_get_rcu(struct folio *folio) +static inline bool folio_ref_try_add(struct folio *folio, int count) { - return folio_ref_try_add_rcu(folio, 1); + return folio_ref_add_unless(folio, count, 0); } static inline int page_ref_freeze(struct page *page, int count) diff --git a/mm/filemap.c b/mm/filemap.c index 41bf94f7dbd1..196f70166537 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1823,7 +1823,7 @@ repeat: if (!folio || xa_is_value(folio)) goto out; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto repeat; if (unlikely(folio != xas_reload(&xas))) { @@ -1977,7 +1977,7 @@ retry: if (!folio || xa_is_value(folio)) return folio; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto reset; if (unlikely(folio != xas_reload(xas))) { @@ -2157,7 +2157,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, if (xa_is_value(folio)) goto update_start; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto retry; if (unlikely(folio != xas_reload(&xas))) @@ -2289,7 +2289,7 @@ static void filemap_get_read_batch(struct address_space *mapping, break; if (xa_is_sibling(folio)) break; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto retry; if (unlikely(folio != xas_reload(&xas))) @@ -3449,7 +3449,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, continue; if (folio_test_locked(folio)) continue; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) continue; /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(xas))) @@ -76,7 +76,7 @@ retry: folio = page_folio(page); if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) return NULL; - if (unlikely(!folio_ref_try_add_rcu(folio, refs))) + if (unlikely(!folio_ref_try_add(folio, refs))) return NULL; /* |