summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s/64/pgtable.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 14:36:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 14:36:21 -0700
commit685f7e4f161425b137056abe35ba8ef7b669d83d (patch)
tree550dd1f5dc9e852cfeec26bf5e3ce9dd060c8a33 /arch/powerpc/include/asm/book3s/64/pgtable.h
parentc7a2c49ea6c9eebbe44ff2c08b663b2905ee2c13 (diff)
parent58cfbac25b1fd2b76f94566aed28a3662b0ff8c6 (diff)
downloadlinux-685f7e4f161425b137056abe35ba8ef7b669d83d.tar.gz
linux-685f7e4f161425b137056abe35ba8ef7b669d83d.tar.bz2
linux-685f7e4f161425b137056abe35ba8ef7b669d83d.zip
Merge tag 'powerpc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - A large series to rewrite our SLB miss handling, replacing a lot of fairly complicated asm with much fewer lines of C. - Following on from that, we now maintain a cache of SLB entries for each process and preload them on context switch. Leading to a 27% speedup for our context switch benchmark on Power9. - Improvements to our handling of SLB multi-hit errors. We now print more debug information when they occur, and try to continue running by flushing the SLB and reloading, rather than treating them as fatal. - Enable THP migration on 64-bit Book3S machines (eg. Power7/8/9). - Add support for physical memory up to 2PB in the linear mapping on 64-bit Book3S. We only support up to 512TB as regular system memory, otherwise the percpu allocator runs out of vmalloc space. - Add stack protector support for 32 and 64-bit, with a per-task canary. - Add support for PTRACE_SYSEMU and PTRACE_SYSEMU_SINGLESTEP. - Support recognising "big cores" on Power9, where two SMT4 cores are presented to us as a single SMT8 core. - A large series to cleanup some of our ioremap handling and PTE flags. - Add a driver for the PAPR SCM (storage class memory) interface, allowing guests to operate on SCM devices (acked by Dan). - Changes to our ftrace code to handle very large kernels, where we need to use a trampoline to get to ftrace_caller(). And many other smaller enhancements and cleanups. Thanks to: Alan Modra, Alistair Popple, Aneesh Kumar K.V, Anton Blanchard, Aravinda Prasad, Bartlomiej Zolnierkiewicz, Benjamin Herrenschmidt, Breno Leitao, Cédric Le Goater, Christophe Leroy, Christophe Lombard, Dan Carpenter, Daniel Axtens, Finn Thain, Gautham R. Shenoy, Gustavo Romero, Haren Myneni, Hari Bathini, Jia Hongtao, Joel Stanley, John Allen, Laurent Dufour, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Hairgrove, Masahiro Yamada, Michael Bringmann, Michael Neuling, Michal Suchanek, Murilo Opsfelder Araujo, Nathan Fontenot, Naveen N. Rao, Nicholas Piggin, Nick Desaulniers, Oliver O'Halloran, Paul Mackerras, Petr Vorel, Rashmica Gupta, Reza Arbab, Rob Herring, Sam Bobroff, Samuel Mendoza-Jonas, Scott Wood, Stan Johnson, Stephen Rothwell, Stewart Smith, Suraj Jitindar Singh, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, YueHaibing, zhong jiang" * tag 'powerpc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (221 commits) Revert "selftests/powerpc: Fix out-of-tree build errors" powerpc/msi: Fix compile error on mpc83xx powerpc: Fix stack protector crashes on CPU hotplug powerpc/traps: restore recoverability of machine_check interrupts powerpc/64/module: REL32 relocation range check powerpc/64s/radix: Fix radix__flush_tlb_collapsed_pmd double flushing pmd selftests/powerpc: Add a test of wild bctr powerpc/mm: Fix page table dump to work on Radix powerpc/mm/radix: Display if mappings are exec or not powerpc/mm/radix: Simplify split mapping logic powerpc/mm/radix: Remove the retry in the split mapping logic powerpc/mm/radix: Fix small page at boundary when splitting powerpc/mm/radix: Fix overuse of small pages in splitting logic powerpc/mm/radix: Fix off-by-one in split mapping logic powerpc/ftrace: Handle large kernel configs powerpc/mm: Fix WARN_ON with THP NUMA migration selftests/powerpc: Fix out-of-tree build errors powerpc/time: no steal_time when CONFIG_PPC_SPLPAR is not selected powerpc/time: Only set CONFIG_ARCH_HAS_SCALED_CPUTIME on PPC64 powerpc/time: isolate scaled cputime accounting in dedicated functions. ...
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/pgtable.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h181
1 files changed, 103 insertions, 78 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2a2486526d1f..cb5dd4078d42 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -14,10 +14,6 @@
*/
#define _PAGE_BIT_SWAP_TYPE 0
-#define _PAGE_NA 0
-#define _PAGE_RO 0
-#define _PAGE_USER 0
-
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
@@ -123,10 +119,6 @@
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_RW | _PAGE_EXEC)
/*
- * No page size encoding in the linux PTE
- */
-#define _PAGE_PSIZE 0
-/*
* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
*/
@@ -137,19 +129,12 @@
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
/*
- * Mask of bits returned by pte_pgprot()
- */
-#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
- H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
- _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
- _PAGE_SOFT_DIRTY | H_PTE_PKEY)
-/*
* We define 2 sets of base prot bits, one for basic pages (ie,
* cacheable kernel and user pages) and one for non cacheable
* pages. We always set _PAGE_COHERENT when SMP is enabled or
* the processor might need it for DMA coherency.
*/
-#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
/* Permission masks used to generate the __P and __S table,
@@ -159,8 +144,6 @@
* Write permissions imply read permissions for now (we could make write-only
* pages on BookE but we don't bother for now). Execute permission control is
* possible on platforms that define _PAGE_EXEC
- *
- * Note due to the way vm flags are laid out, the bits are XWR
*/
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
@@ -170,24 +153,6 @@
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
@@ -519,7 +484,11 @@ static inline int pte_special(pte_t pte)
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
}
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+static inline bool pte_exec(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
+}
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pte_soft_dirty(pte_t pte)
@@ -529,12 +498,12 @@ static inline bool pte_soft_dirty(pte_t pte)
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY));
}
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY));
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
@@ -555,7 +524,7 @@ static inline pte_t pte_mk_savedwrite(pte_t pte)
*/
VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
- return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}
#define pte_clear_savedwrite pte_clear_savedwrite
@@ -565,14 +534,14 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
* Used by KSM subsystem to make a protnone pte readonly.
*/
VM_BUG_ON(!pte_protnone(pte));
- return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
}
#else
#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
VM_WARN_ON(1);
- return __pte(pte_val(pte) & ~_PAGE_WRITE);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -587,6 +556,11 @@ static inline int pte_present(pte_t pte)
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
}
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
+}
+
#ifdef CONFIG_PPC_MEM_KEYS
extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
#else
@@ -596,25 +570,22 @@ static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
}
#endif /* CONFIG_PPC_MEM_KEYS */
+static inline bool pte_user(pte_t pte)
+{
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
- unsigned long pteval = pte_val(pte);
- /* Also check for pte_user */
- unsigned long clear_pte_bits = _PAGE_PRIVILEGED;
/*
* _PAGE_READ is needed for any access and will be
* cleared for PROT_NONE
*/
- unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ;
-
- if (write)
- need_pte_bits |= _PAGE_WRITE;
-
- if ((pteval & need_pte_bits) != need_pte_bits)
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
return false;
- if ((pteval & clear_pte_bits) == clear_pte_bits)
+ if (write && !pte_write(pte))
return false;
return arch_pte_access_permitted(pte_val(pte), write, 0);
@@ -643,17 +614,32 @@ static inline pte_t pte_wrprotect(pte_t pte)
{
if (unlikely(pte_savedwrite(pte)))
return pte_clear_savedwrite(pte);
- return __pte(pte_val(pte) & ~_PAGE_WRITE);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC));
}
static inline pte_t pte_mkclean(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
}
static inline pte_t pte_mkold(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
+}
+
+static inline pte_t pte_mkpte(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
}
static inline pte_t pte_mkwrite(pte_t pte)
@@ -661,22 +647,22 @@ static inline pte_t pte_mkwrite(pte_t pte)
/*
* write implies read, hence set both
*/
- return __pte(pte_val(pte) | _PAGE_RW);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL));
}
static inline pte_t pte_mkhuge(pte_t pte)
@@ -686,7 +672,17 @@ static inline pte_t pte_mkhuge(pte_t pte)
static inline pte_t pte_mkdevmap(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
+}
+
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}
/*
@@ -705,12 +701,8 @@ static inline int pte_devmap(pte_t pte)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
/* FIXME!! check whether this need to be a conditional */
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
-}
-
-static inline bool pte_user(pte_t pte)
-{
- return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+ return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) |
+ cpu_to_be64(pgprot_val(newprot)));
}
/* Encode and de-code a swap entry */
@@ -741,6 +733,8 @@ static inline bool pte_user(pte_t pte)
*/
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
+#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
+#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
@@ -751,7 +745,7 @@ static inline bool pte_user(pte_t pte)
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
}
static inline bool pte_swp_soft_dirty(pte_t pte)
@@ -761,7 +755,7 @@ static inline bool pte_swp_soft_dirty(pte_t pte)
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY));
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
@@ -850,10 +844,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t prot)
*/
static inline bool pte_ci(pte_t pte)
{
- unsigned long pte_v = pte_val(pte);
+ __be64 pte_v = pte_raw(pte);
- if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
- ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
+ if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) ||
+ ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT)))
return true;
return false;
}
@@ -875,8 +869,16 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+ /*
+ * A pmd is considerent present if _PAGE_PRESENT is set.
+ * We also need to consider the pmd present which is marked
+ * invalid during a split. Hence we look for _PAGE_INVALID
+ * if we find _PAGE_PRESENT cleared.
+ */
+ if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
+ return true;
- return !pmd_none(pmd);
+ return false;
}
static inline int pmd_bad(pmd_t pmd)
@@ -903,7 +905,7 @@ static inline int pud_none(pud_t pud)
static inline int pud_present(pud_t pud)
{
- return !pud_none(pud);
+ return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
}
extern struct page *pud_page(pud_t pud);
@@ -950,7 +952,7 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_present(pgd_t pgd)
{
- return !pgd_none(pgd);
+ return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
}
static inline pte_t pgd_pte(pgd_t pgd)
@@ -1020,17 +1022,16 @@ extern struct page *pgd_page(pgd_t pgd);
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-static inline int map_kernel_page(unsigned long ea, unsigned long pa,
- unsigned long flags)
+static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
if (radix_enabled()) {
#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
#endif
- return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+ return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
}
- return hash__map_kernel_page(ea, pa, flags);
+ return hash__map_kernel_page(ea, pa, prot);
}
static inline int __meminit vmemmap_create_mapping(unsigned long start,
@@ -1082,6 +1083,12 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd))
+#define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)))
+#endif
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
#ifdef CONFIG_NUMA_BALANCING
@@ -1127,6 +1134,10 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
}
+/*
+ * returns true for pmd migration entries, THP, devmap, hugetlb
+ * But compile time dependent on THP config
+ */
static inline int pmd_large(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
@@ -1161,8 +1172,22 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
}
+/*
+ * Only returns true for a THP. False for pmd migration entry.
+ * We also need to return true when we come across a pte that
+ * in between a thp split. While splitting THP, we mark the pmd
+ * invalid (pmdp_invalidate()) before we set it with pte page
+ * address. A pmd_trans_huge() check against a pmd entry during that time
+ * should return true.
+ * We should not call this on a hugetlb entry. We should check for HugeTLB
+ * entry using vma->vm_flags
+ * The page table walk rule is explained in Documentation/vm/transhuge.rst
+ */
static inline int pmd_trans_huge(pmd_t pmd)
{
+ if (!pmd_present(pmd))
+ return false;
+
if (radix_enabled())
return radix__pmd_trans_huge(pmd);
return hash__pmd_trans_huge(pmd);