summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-04 16:34:01 -0700
committerDavid S. Miller <davem@davemloft.net>2014-08-04 16:34:01 -0700
commit18f38132528c3e603c66ea464727b29e9bbcb91b (patch)
tree9600ee863f2b3fc4898ad218a1273cf77fe020d7 /arch/sparc/mm
parent31dab719fa50cf56d56d3dc25980fecd336f6ca8 (diff)
downloadlinux-18f38132528c3e603c66ea464727b29e9bbcb91b.tar.gz
linux-18f38132528c3e603c66ea464727b29e9bbcb91b.tar.bz2
linux-18f38132528c3e603c66ea464727b29e9bbcb91b.zip
sparc64: Do not insert non-valid PTEs into the TSB hash table.
The assumption was that update_mmu_cache() (and the equivalent for PMDs) would only be called when the PTE being installed will be accessible by the user. This is not true for code paths originating from remove_migration_pte(). There are dire consequences for placing a non-valid PTE into the TSB. The TLB miss frramework assumes thatwhen a TSB entry matches we can just load it into the TLB and return from the TLB miss trap. So if a non-valid PTE is in there, we will deadlock taking the TLB miss over and over, never satisfying the miss. Just exit early from update_mmu_cache() and friends in this situation. Based upon a report and patch from Christopher Alexander Tobias Schulze. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/init_64.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 16b58ff11e65..db5ddde0b335 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -351,6 +351,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
mm = vma->vm_mm;
+ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
+ if (!pte_accessible(mm, pte))
+ return;
+
spin_lock_irqsave(&mm->context.lock, flags);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -2619,6 +2623,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pte = pmd_val(entry);
+ /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
+ if (!(pte & _PAGE_VALID))
+ return;
+
/* We are fabricating 8MB pages using 4MB real hw pages. */
pte |= (addr & (1UL << REAL_HPAGE_SHIFT));