diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-02-17 12:36:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-02-17 12:36:49 -0800 |
commit | f2a77abdb8226b8dbad039d415d5d70a934ac605 (patch) | |
tree | 5bafcd57b253dec45b6e7d8995821653acecfba1 /mm | |
parent | e4178d809fdaee32a56833fff1f5056c99e90a1a (diff) | |
parent | 66f9af83e56bfa12964d251df9d60fb571579913 (diff) | |
download | linux-f2a77abdb8226b8dbad039d415d5d70a934ac605.tar.gz linux-f2a77abdb8226b8dbad039d415d5d70a934ac605.tar.bz2 linux-f2a77abdb8226b8dbad039d415d5d70a934ac605.zip |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Ben Herrenschmidt:
"Here are some more powerpc fixes for 3.14
The main one is a nasty issue with the NUMA balancing support which
requires a small generic change and the addition of a new accessor to
set _PAGE_NUMA. Both have been reviewed and acked by Mel and Rik.
The changelog should have plenty of details but basically, without
this fix, we get random user segfaults and/or corruptions due to
missing TLB/hash flushes. Aneesh series of 3 patches fixes it.
We have some vDSO vs. perf fixes from Anton, some small EEH fixes
from Gavin, a ppc32 regression vs the stack overflow detector, and a
fix for the way we handle PCIe host bridge speed settings on pseries
(which is needed for proper operations of AMD graphics cards on
Power8)"
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
powerpc/eeh: Disable EEH on reboot
powerpc/eeh: Cleanup on eeh_subsystem_enabled
powerpc/powernv: Rework EEH reset
powerpc: Use unstripped VDSO image for more accurate profiling data
powerpc: Link VDSOs at 0x0
mm: Use ptep/pmdp_set_numa() for updating _PAGE_NUMA bit
mm: Dirty accountable change only apply to non prot numa case
powerpc/mm: Add new "set" flag argument to pte/pmd update function
powerpc/pseries: Add Gen3 definitions for PCIE link speed
powerpc/pseries: Fix regression on PCI link speed
powerpc: Set the correct ksp_limit on ppc32 when switching to irq stack
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 9 | ||||
-rw-r--r-- | mm/mprotect.c | 25 |
2 files changed, 10 insertions, 24 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 82166bf974e1..da23eb96779f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); BUG_ON(pmd_write(entry)); } else { struct page *page = pmd_page(*pmd); @@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = *pmd; - entry = pmd_mknuma(entry); + pmdp_set_numa(mm, addr, pmd); ret = HPAGE_PMD_NR; } } - - /* Set PMD if cleared earlier */ - if (ret == HPAGE_PMD_NR) - set_pmd_at(mm, addr, pmd, entry); - spin_unlock(ptl); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 7332c1785744..769a67a15803 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (pte_numa(ptent)) ptent = pte_mknonnuma(ptent); ptent = pte_modify(ptent, newprot); + /* + * Avoid taking write faults for pages we + * know to be dirty. + */ + if (dirty_accountable && pte_dirty(ptent)) + ptent = pte_mkwrite(ptent); + ptep_modify_prot_commit(mm, addr, pte, ptent); updated = true; } else { struct page *page; - ptent = *pte; page = vm_normal_page(vma, addr, oldpte); if (page && !PageKsm(page)) { if (!pte_numa(oldpte)) { - ptent = pte_mknuma(ptent); - set_pte_at(mm, addr, pte, ptent); + ptep_set_numa(mm, addr, pte); updated = true; } } } - - /* - * Avoid taking write faults for pages we know to be - * dirty. - */ - if (dirty_accountable && pte_dirty(ptent)) { - ptent = pte_mkwrite(ptent); - updated = true; - } - if (updated) pages++; - - /* Only !prot_numa always clears the pte */ - if (!prot_numa) - ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); |