summaryrefslogtreecommitdiffstats
path: root/mm/hmm.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-08-06 19:05:48 +0300
committerJason Gunthorpe <jgg@mellanox.com>2019-08-07 14:58:06 -0300
commitf0b3c45c8931fd7448a638557752f2743f76f51a (patch)
tree9ab822626de13f51b504527850cb1bf668697b8c /mm/hmm.c
parent309f9a4f5e1a233d5df2101b9394ee689d9e463f (diff)
downloadlinux-stable-f0b3c45c8931fd7448a638557752f2743f76f51a.tar.gz
linux-stable-f0b3c45c8931fd7448a638557752f2743f76f51a.tar.bz2
linux-stable-f0b3c45c8931fd7448a638557752f2743f76f51a.zip
mm/hmm: only define hmm_vma_walk_pud if needed
We only need the special pud_entry walker if PUD-sized hugepages and pte mappings are supported, else the common pagewalk code will take care of the iteration. Not implementing this callback reduced the amount of code compiled for non-x86 platforms, and also fixes compile failures with other architectures when helpers like pud_pfn are not implemented. Link: https://lore.kernel.org/r/20190806160554.14046-11-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r--mm/hmm.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 8d56a4342624..fb1306409258 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -456,15 +456,6 @@ static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
range->flags[HMM_PFN_VALID];
}
-static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
-{
- if (!pud_present(pud))
- return 0;
- return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
- range->flags[HMM_PFN_WRITE] :
- range->flags[HMM_PFN_VALID];
-}
-
static int hmm_vma_handle_pmd(struct mm_walk *walk,
unsigned long addr,
unsigned long end,
@@ -705,10 +696,19 @@ again:
return 0;
}
-static int hmm_vma_walk_pud(pud_t *pudp,
- unsigned long start,
- unsigned long end,
- struct mm_walk *walk)
+#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
+{
+ if (!pud_present(pud))
+ return 0;
+ return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
+ range->flags[HMM_PFN_WRITE] :
+ range->flags[HMM_PFN_VALID];
+}
+
+static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
+ struct mm_walk *walk)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
@@ -772,6 +772,9 @@ again:
return 0;
}
+#else
+#define hmm_vma_walk_pud NULL
+#endif
static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
unsigned long start, unsigned long end,