summaryrefslogtreecommitdiffstats
path: root/include/linux/rmap.h
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-02-03 11:40:17 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 12:59:02 -0400
commit2aff7a4755bed2870ee23b75bc88cdc8d76cdd03 (patch)
treea638ee31555747a2252b02e87440dfa303b9f64e /include/linux/rmap.h
parentaef13dec0a5fa3c4adc8949307fc8d8aac7337df (diff)
downloadlinux-2aff7a4755bed2870ee23b75bc88cdc8d76cdd03.tar.gz
linux-2aff7a4755bed2870ee23b75bc88cdc8d76cdd03.tar.bz2
linux-2aff7a4755bed2870ee23b75bc88cdc8d76cdd03.zip
mm: Convert page_vma_mapped_walk to work on PFNs
page_mapped_in_vma() really just wants to walk one page, but as the code stands, if passed the head page of a compound page, it will walk every page in the compound page. Extract pfn/nr_pages/pgoff from the struct page early, so they can be overridden by page_mapped_in_vma(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r--include/linux/rmap.h17
1 files changed, 12 insertions, 5 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 0d894a2bfaa1..0c838ba1a8ee 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -11,6 +11,7 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -201,11 +202,13 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
-/* Look for migarion entries rather than present PTEs */
+/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
struct page_vma_mapped_walk {
- struct page *page;
+ unsigned long pfn;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
@@ -216,7 +219,9 @@ struct page_vma_mapped_walk {
#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
- .page = _page, \
+ .pfn = page_to_pfn(_page), \
+ .nr_pages = compound_nr(page), \
+ .pgoff = page_to_pgoff(page), \
.vma = _vma, \
.address = _address, \
.flags = _flags, \
@@ -224,7 +229,9 @@ struct page_vma_mapped_walk {
#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
- .page = &_folio->page, \
+ .pfn = folio_pfn(_folio), \
+ .nr_pages = folio_nr_pages(_folio), \
+ .pgoff = folio_pgoff(_folio), \
.vma = _vma, \
.address = _address, \
.flags = _flags, \
@@ -233,7 +240,7 @@ struct page_vma_mapped_walk {
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
- if (pvmw->pte && !PageHuge(pvmw->page))
+ if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);