diff options
author | Matthew Wilcox <willy@infradead.org> | 2018-05-16 18:12:54 -0400 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-10-21 10:46:35 -0400 |
commit | a6906972fe67fb6f73ba04088f7897227fd1cd8f (patch) | |
tree | ae5e24910060321dcdbbae7c1096596f9baaa5c8 /mm/filemap.c | |
parent | 3ece58a270cd1e5026282abe778bd50db7a11d08 (diff) | |
download | linux-stable-a6906972fe67fb6f73ba04088f7897227fd1cd8f.tar.gz linux-stable-a6906972fe67fb6f73ba04088f7897227fd1cd8f.tar.bz2 linux-stable-a6906972fe67fb6f73ba04088f7897227fd1cd8f.zip |
page cache; Convert find_get_pages_range_tag to XArray
The 'end' parameter of the xas_for_each iterator avoids a useless
iteration at the end of the range.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 68 |
1 files changed, 26 insertions, 42 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 089b67598100..0df28aa6411c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1789,74 +1789,58 @@ EXPORT_SYMBOL(find_get_pages_contig); * @tag. We update @index to index the next page for the traversal. */ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, - pgoff_t end, int tag, unsigned int nr_pages, + pgoff_t end, xa_mark_t tag, unsigned int nr_pages, struct page **pages) { - struct radix_tree_iter iter; - void **slot; + XA_STATE(xas, &mapping->i_pages, *index); + struct page *page; unsigned ret = 0; if (unlikely(!nr_pages)) return 0; rcu_read_lock(); - radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) { - struct page *head, *page; - - if (iter.index > end) - break; -repeat: - page = radix_tree_deref_slot(slot); - if (unlikely(!page)) + xas_for_each_marked(&xas, page, end, tag) { + struct page *head; + if (xas_retry(&xas, page)) continue; - - if (radix_tree_exception(page)) { - if (radix_tree_deref_retry(page)) { - slot = radix_tree_iter_retry(&iter); - continue; - } - /* - * A shadow entry of a recently evicted page. - * - * Those entries should never be tagged, but - * this tree walk is lockless and the tags are - * looked up in bulk, one radix tree node at a - * time, so there is a sizable window for page - * reclaim to evict a page we saw tagged. - * - * Skip over it. - */ + /* + * Shadow entries should never be tagged, but this iteration + * is lockless so there is a window for page reclaim to evict + * a page we saw tagged. Skip over it. + */ + if (xa_is_value(page)) continue; - } head = compound_head(page); if (!page_cache_get_speculative(head)) - goto repeat; + goto retry; /* The page was split under us? */ - if (compound_head(page) != head) { - put_page(head); - goto repeat; - } + if (compound_head(page) != head) + goto put_page; /* Has the page moved? */ - if (unlikely(page != *slot)) { - put_page(head); - goto repeat; - } + if (unlikely(page != xas_reload(&xas))) + goto put_page; pages[ret] = page; if (++ret == nr_pages) { - *index = pages[ret - 1]->index + 1; + *index = page->index + 1; goto out; } + continue; +put_page: + put_page(head); +retry: + xas_reset(&xas); } /* - * We come here when we got at @end. We take care to not overflow the + * We come here when we got to @end. We take care to not overflow the * index @index as it confuses some of the callers. This breaks the - * iteration when there is page at index -1 but that is already broken - * anyway. + * iteration when there is a page at index -1 but that is already + * broken anyway. */ if (end == (pgoff_t)-1) *index = (pgoff_t)-1; |