summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-05-16 18:00:33 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:34 -0400
commit3ece58a270cd1e5026282abe778bd50db7a11d08 (patch)
treeed4feeec0489b4fa9e513e8817363944da48dd3d /mm/filemap.c
parentfd1b3cee2a867868d39bb8cbcc4b00c36d07cc01 (diff)
downloadlinux-stable-3ece58a270cd1e5026282abe778bd50db7a11d08.tar.gz
linux-stable-3ece58a270cd1e5026282abe778bd50db7a11d08.tar.bz2
linux-stable-3ece58a270cd1e5026282abe778bd50db7a11d08.zip
page cache: Convert find_get_pages_contig to XArray
There's no direct replacement for radix_tree_for_each_contig() in the XArray API as it's an unusual thing to do. Instead, open-code a loop using xas_next(). This removes the only user of radix_tree_for_each_contig() so delete the iterator from the API and the test suite code for it. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c53
1 files changed, 22 insertions, 31 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index b72c39fe61c2..089b67598100 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1721,57 +1721,43 @@ out:
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
{
- struct radix_tree_iter iter;
- void **slot;
+ XA_STATE(xas, &mapping->i_pages, index);
+ struct page *page;
unsigned int ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
- radix_tree_for_each_contig(slot, &mapping->i_pages, &iter, index) {
- struct page *head, *page;
-repeat:
- page = radix_tree_deref_slot(slot);
- /* The hole, there no reason to continue */
- if (unlikely(!page))
- break;
-
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
- /*
- * A shadow entry of a recently evicted page,
- * or a swap entry from shmem/tmpfs. Stop
- * looking for contiguous pages.
- */
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ struct page *head;
+ if (xas_retry(&xas, page))
+ continue;
+ /*
+ * If the entry has been swapped out, we can stop looking.
+ * No current caller is looking for DAX entries.
+ */
+ if (xa_is_value(page))
break;
- }
head = compound_head(page);
if (!page_cache_get_speculative(head))
- goto repeat;
+ goto retry;
/* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
+ if (compound_head(page) != head)
+ goto put_page;
/* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
+ if (unlikely(page != xas_reload(&xas)))
+ goto put_page;
/*
* must check mapping and index after taking the ref.
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
- if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
+ if (!page->mapping || page_to_pgoff(page) != xas.xa_index) {
put_page(page);
break;
}
@@ -1779,6 +1765,11 @@ repeat:
pages[ret] = page;
if (++ret == nr_pages)
break;
+ continue;
+put_page:
+ put_page(head);
+retry:
+ xas_reset(&xas);
}
rcu_read_unlock();
return ret;