summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-18 21:23:35 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-08-24 16:20:30 -0700
commit1d024e7a8dabcc3c84d77532a88c774c32cf8245 (patch)
tree054d0ee873c69836f7dae835b19d11006e1e27f6 /mm/memory.c
parent40d49a3c9e4a0e5cf7a6fcebc8d4d7d63d1f3f1b (diff)
downloadlinux-1d024e7a8dabcc3c84d77532a88c774c32cf8245.tar.gz
linux-1d024e7a8dabcc3c84d77532a88c774c32cf8245.tar.bz2
linux-1d024e7a8dabcc3c84d77532a88c774c32cf8245.zip
mm: remove enum page_entry_size
Remove the unnecessary encoding of page order into an enum and pass the page order directly. That lets us get rid of pe_order(). The switch constructs have to be changed to if/else constructs to prevent GCC from warning on builds with 3-level page tables where PMD_ORDER and PUD_ORDER have the same value. If you are looking at this commit because your driver stopped compiling, look at the previous commit as well and audit your driver to be sure it doesn't depend on mmap_lock being held in its ->huge_fault method. [willy@infradead.org: use "order %u" to match the (non dev_t) style] Link: https://lkml.kernel.org/r/ZOUYekbtTv+n8hYf@casper.infradead.org Link: https://lkml.kernel.org/r/20230818202335.2739663-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 7a7e58729510..00a5ce113090 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4855,7 +4855,7 @@ static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
if (vma_is_anonymous(vma))
return do_huge_pmd_anonymous_page(vmf);
if (vma->vm_ops->huge_fault)
- return vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
+ return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
return VM_FAULT_FALLBACK;
}
@@ -4875,7 +4875,7 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
if (vma->vm_ops->huge_fault) {
- ret = vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
+ ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
}
@@ -4896,7 +4896,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf)
if (vma_is_anonymous(vma))
return VM_FAULT_FALLBACK;
if (vma->vm_ops->huge_fault)
- return vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+ return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK;
}
@@ -4913,7 +4913,7 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
goto split;
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
if (vma->vm_ops->huge_fault) {
- ret = vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
+ ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
}