summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage-hash64.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 23:25:45 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 18:32:43 +1000
commit945537df7a107e0dad7b0d3d14df46cfba46f4c5 (patch)
tree6e1af4950b894c193a7e460be854633ceee16d35 /arch/powerpc/mm/hugetlbpage-hash64.c
parenteee24b5aafe87f3591990528eae2ad22e2c1d50c (diff)
downloadlinux-stable-945537df7a107e0dad7b0d3d14df46cfba46f4c5.tar.gz
linux-stable-945537df7a107e0dad7b0d3d14df46cfba46f4c5.tar.bz2
linux-stable-945537df7a107e0dad7b0d3d14df46cfba46f4c5.zip
powerpc/mm/book3s: Rename hash specific PTE bits to carry H_ prefix
This helps to make following hash only pte bits easier. We have kept _PAGE_CHG_MASK, _HPAGE_CHG_MASK and _PAGE_PROT_BITS as it is in this patch eventhough they use hash specific bits. Using them in radix as it is should be ok, because with radix we expect those bit positions to be zero. Only renames in this patch, no change in functionality. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage-hash64.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index bf9078440256..3058560b6121 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -47,7 +47,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
do {
old_pte = pte_val(*ptep);
/* If PTE busy, retry the access */
- if (unlikely(old_pte & _PAGE_BUSY))
+ if (unlikely(old_pte & H_PAGE_BUSY))
return 0;
/* If PTE permissions don't match, take page fault */
if (unlikely(!check_pte_access(access, old_pte)))
@@ -55,7 +55,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Try to lock the PTE, add ACCESSED and DIRTY if it was
* a write access */
- new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
+ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_WRITE)
new_pte |= _PAGE_DIRTY;
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
@@ -69,28 +69,28 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
/* Check if pte already has an hpte (case 2) */
- if (unlikely(old_pte & _PAGE_HASHPTE)) {
+ if (unlikely(old_pte & H_PAGE_HASHPTE)) {
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot;
hash = hpt_hash(vpn, shift, ssize);
- if (old_pte & _PAGE_F_SECOND)
+ if (old_pte & H_PAGE_F_SECOND)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT;
+ slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
mmu_psize, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
- if (likely(!(old_pte & _PAGE_HASHPTE))) {
+ if (likely(!(old_pte & H_PAGE_HASHPTE))) {
unsigned long hash = hpt_hash(vpn, shift, ssize);
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
/* clear HPTE slot informations in new PTE */
- new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+ new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
mmu_psize, ssize);
@@ -106,14 +106,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
return -1;
}
- new_pte |= (slot << _PAGE_F_GIX_SHIFT) &
- (_PAGE_F_SECOND | _PAGE_F_GIX);
+ new_pte |= (slot << H_PAGE_F_GIX_SHIFT) &
+ (H_PAGE_F_SECOND | H_PAGE_F_GIX);
}
/*
* No need to use ldarx/stdcx here
*/
- *ptep = __pte(new_pte & ~_PAGE_BUSY);
+ *ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
}