summaryrefslogtreecommitdiffstats
path: root/mm/page_vma_mapped.c
blob: ae5cc42aa2087a42fcaa8b1642e53690b2c8c2f9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/rmap.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/swapops.h>

#include "internal.h"

static inline bool not_found(struct page_vma_mapped_walk *pvmw)
{
	page_vma_mapped_walk_done(pvmw);
	return false;
}

static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
{
	pte_t ptent;

	if (pvmw->flags & PVMW_SYNC) {
		/* Use the stricter lookup */
		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
						pvmw->address, &pvmw->ptl);
		*ptlp = pvmw->ptl;
		return !!pvmw->pte;
	}

	/*
	 * It is important to return the ptl corresponding to pte,
	 * in case *pvmw->pmd changes underneath us; so we need to
	 * return it even when choosing not to lock, in case caller
	 * proceeds to loop over next ptes, and finds a match later.
	 * Though, in most cases, page lock already protects this.
	 */
	pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
					  pvmw->address, ptlp);
	if (!pvmw->pte)
		return false;

	ptent = ptep_get(pvmw->pte);

	if (pvmw->flags & PVMW_MIGRATION) {
		if (!is_swap_pte(ptent))
			return false;
	} else if (is_swap_pte(ptent)) {
		swp_entry_t entry;
		/*
		 * Handle un-addressable ZONE_DEVICE memory.
		 *
		 * We get here when we are trying to unmap a private
		 * device page from the process address space. Such
		 * page is not CPU accessible and thus is mapped as
		 * a special swap entry, nonetheless it still does
		 * count as a valid regular mapping for the page
		 * (and is accounted as such in page maps count).
		 *
		 * So handle this special case as if it was a normal
		 * page mapping ie lock CPU page table and return true.
		 *
		 * For more details on device private memory see HMM
		 * (include/linux/hmm.h or mm/hmm.c).
		 */
		entry = pte_to_swp_entry(ptent);
		if (!is_device_private_entry(entry) &&
		    !is_device_exclusive_entry(entry))
			return false;
	} else if (!pte_present(ptent)) {
		return false;
	}
	pvmw->ptl = *ptlp;
	spin_lock(pvmw->ptl);
	return true;
}

/**
 * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
 * mapped at the @pvmw->pte
 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
 * for checking
 *
 * page_vma_mapped_walk() found a place where pfn range is *potentially*
 * mapped. check_pte() has to validate this.
 *
 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
 * arbitrary page.
 *
 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
 * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
 *
 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
 * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
 *
 * Otherwise, return false.
 *
 */
static bool check_pte(struct page_vma_mapped_walk *pvmw)
{
	unsigned long pfn;
	pte_t ptent = ptep_get(pvmw->pte);

	if (pvmw->flags & PVMW_MIGRATION) {
		swp_entry_t entry;
		if (!is_swap_pte(ptent))
			return false;
		entry = pte_to_swp_entry(ptent);

		if (!is_migration_entry(entry) &&
		    !is_device_exclusive_entry(entry))
			return false;

		pfn = swp_offset_pfn(entry);
	} else if (is_swap_pte(ptent)) {
		swp_entry_t entry;

		/* Handle un-addressable ZONE_DEVICE memory */
		entry = pte_to_swp_entry(ptent);
		if (!is_device_private_entry(entry) &&
		    !is_device_exclusive_entry(entry))
			return false;

		pfn = swp_offset_pfn(entry);
	} else {
		if (!pte_present(ptent))
			return false;

		pfn = pte_pfn(ptent);
	}

	return (pfn - pvmw->pfn) < pvmw->nr_pages;
}

/* Returns true if the two ranges overlap.  Careful to not overflow. */
static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
{
	if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
		return false;
	if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
		return false;
	return true;
}

static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
{
	pvmw->address = (pvmw->address + size) & ~(size - 1);
	if (!pvmw->address)
		pvmw->address = ULONG_MAX;
}

/**
 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
 * @pvmw->address
 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
 * must be set. pmd, pte and ptl must be NULL.
 *
 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
 * adjusted if needed (for PTE-mapped THPs).
 *
 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
 * a loop to find all PTEs that map the THP.
 *
 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
 * regardless of which page table level the page is mapped at. @pvmw->pmd is
 * NULL.
 *
 * Returns false if there are no more page table entries for the page in
 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
 *
 * If you need to stop the walk before page_vma_mapped_walk() returned false,
 * use page_vma_mapped_walk_done(). It will do the housekeeping.
 */
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{
	struct vm_area_struct *vma = pvmw->vma;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long end;
	spinlock_t *ptl;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t pmde;

	/* The only possible pmd mapping has been handled on last iteration */
	if (pvmw->pmd && !pvmw->pte)
		return not_found(pvmw);

	if (unlikely(is_vm_hugetlb_page(vma))) {
		struct hstate *hstate = hstate_vma(vma);
		unsigned long size = huge_page_size(hstate);
		/* The only possible mapping was handled on last iteration */
		if (pvmw->pte)
			return not_found(pvmw);
		/*
		 * All callers that get here will already hold the
		 * i_mmap_rwsem.  Therefore, no additional locks need to be
		 * taken before calling hugetlb_walk().
		 */
		pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
		if (!pvmw->pte)
			return false;

		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
		if (!check_pte(pvmw))
			return not_found(pvmw);
		return true;
	}

	end = vma_address_end(pvmw);
	if (pvmw->pte)
		goto next_pte;
restart:
	do {
		pgd = pgd_offset(mm, pvmw->address);
		if (!pgd_present(*pgd)) {
			step_forward(pvmw, PGDIR_SIZE);
			continue;
		}
		p4d = p4d_offset(pgd, pvmw->address);
		if (!p4d_present(*p4d)) {
			step_forward(pvmw, P4D_SIZE);
			continue;
		}
		pud = pud_offset(p4d, pvmw->address);
		if (!pud_present(*pud)) {
			step_forward(pvmw, PUD_SIZE);
			continue;
		}

		pvmw->pmd = pmd_offset(pud, pvmw->address);
		/*
		 * Make sure the pmd value isn't cached in a register by the
		 * compiler and used as a stale value after we've observed a
		 * subsequent update.
		 */
		pmde = pmdp_get_lockless(pvmw->pmd);

		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
		    (pmd_present(pmde) && pmd_devmap(pmde))) {
			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
			pmde = *pvmw->pmd;
			if (!pmd_present(pmde)) {
				swp_entry_t entry;

				if (!thp_migration_supported() ||
				    !(pvmw->flags & PVMW_MIGRATION))
					return not_found(pvmw);
				entry = pmd_to_swp_entry(pmde);
				if (!is_migration_entry(entry) ||
				    !check_pmd(swp_offset_pfn(entry), pvmw))
					return not_found(pvmw);
				return true;
			}
			if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
				if (pvmw->flags & PVMW_MIGRATION)
					return not_found(pvmw);
				if (!check_pmd(pmd_pfn(pmde), pvmw))
					return not_found(pvmw);
				return true;
			}
			/* THP pmd was split under us: handle on pte level */
			spin_unlock(pvmw->ptl);
			pvmw->ptl = NULL;
		} else if (!pmd_present(pmde)) {
			/*
			 * If PVMW_SYNC, take and drop THP pmd lock so that we
			 * cannot return prematurely, while zap_huge_pmd() has
			 * cleared *pmd but not decremented compound_mapcount().
			 */
			if ((pvmw->flags & PVMW_SYNC) &&
			    thp_vma_suitable_order(vma, pvmw->address,
						   PMD_ORDER) &&
			    (pvmw->nr_pages >= HPAGE_PMD_NR)) {
				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);

				spin_unlock(ptl);
			}
			step_forward(pvmw, PMD_SIZE);
			continue;
		}
		if (!map_pte(pvmw, &ptl)) {
			if (!pvmw->pte)
				goto restart;
			goto next_pte;
		}
this_pte:
		if (check_pte(pvmw))
			return true;
next_pte:
		do {
			pvmw->address += PAGE_SIZE;
			if (pvmw->address >= end)
				return not_found(pvmw);
			/* Did we cross page table boundary? */
			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
				if (pvmw->ptl) {
					spin_unlock(pvmw->ptl);
					pvmw->ptl = NULL;
				}
				pte_unmap(pvmw->pte);
				pvmw->pte = NULL;
				goto restart;
			}
			pvmw->pte++;
		} while (pte_none(ptep_get(pvmw->pte)));

		if (!pvmw->ptl) {
			pvmw->ptl = ptl;
			spin_lock(pvmw->ptl);
		}
		goto this_pte;
	} while (pvmw->address < end);

	return false;
}

#ifdef CONFIG_MEMORY_FAILURE
/**
 * page_mapped_in_vma - check whether a page is really mapped in a VMA
 * @page: the page to test
 * @vma: the VMA to test
 *
 * Return: The address the page is mapped at if the page is in the range
 * covered by the VMA and present in the page table.  If the page is
 * outside the VMA or not present, returns -EFAULT.
 * Only valid for normal file or anonymous VMAs.
 */
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
	struct folio *folio = page_folio(page);
	pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
	struct page_vma_mapped_walk pvmw = {
		.pfn = page_to_pfn(page),
		.nr_pages = 1,
		.vma = vma,
		.flags = PVMW_SYNC,
	};

	pvmw.address = vma_address(vma, pgoff, 1);
	if (pvmw.address == -EFAULT)
		goto out;
	if (!page_vma_mapped_walk(&pvmw))
		return -EFAULT;
	page_vma_mapped_walk_done(&pvmw);
out:
	return pvmw.address;
}
#endif