summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:16:21 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:40 -0700
commit872fec16d9a0ed3b75b8893aa217e49cca575ee5 (patch)
tree1dfc8b9f2754bdfff645188e497865c00201d535 /mm
parent46dea3d092d23a58b42499cc8a21de0fad079f4a (diff)
downloadlinux-872fec16d9a0ed3b75b8893aa217e49cca575ee5.tar.gz
linux-872fec16d9a0ed3b75b8893aa217e49cca575ee5.tar.bz2
linux-872fec16d9a0ed3b75b8893aa217e49cca575ee5.zip
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has been used throughout the architectures (usually for ioremap): not to serialize kernel address space allocation (that's usually vmlist_lock), but because pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it. Reverse that: don't lock or unlock init_mm.page_table_lock in any of the architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take and drop it when allocating a new one, to check lest a racing task already did. Similarly no page_table_lock in vmalloc's map_vm_area. Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle user mms, which are converted only by a later patch, for now they have to lock differently according to whether or not it's init_mm. If sources get muddled, there's a danger that an arch source taking init_mm.page_table_lock will be mixed with common source also taking it (or neither take it). So break the rules and make another change, which should break the build for such a mismatch: remove the redundant mm arg from pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13). Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64 used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64 map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free took page_table_lock for no good reason. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c60
-rw-r--r--mm/vmalloc.c4
2 files changed, 28 insertions, 36 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 692ad810263d..95a4553c75f7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -307,28 +307,22 @@ out:
return pte_offset_map(pmd, address);
}
-pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
if (!pmd_present(*pmd)) {
pte_t *new;
- spin_unlock(&mm->page_table_lock);
- new = pte_alloc_one_kernel(mm, address);
- spin_lock(&mm->page_table_lock);
+ new = pte_alloc_one_kernel(&init_mm, address);
if (!new)
return NULL;
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
- if (pmd_present(*pmd)) {
+ spin_lock(&init_mm.page_table_lock);
+ if (pmd_present(*pmd))
pte_free_kernel(new);
- goto out;
- }
- pmd_populate_kernel(mm, pmd, new);
+ else
+ pmd_populate_kernel(&init_mm, pmd, new);
+ spin_unlock(&init_mm.page_table_lock);
}
-out:
return pte_offset_kernel(pmd, address);
}
@@ -2097,30 +2091,30 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
+ * We've already handled the fast-path in-line.
*/
pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
pud_t *new;
- spin_unlock(&mm->page_table_lock);
+ if (mm != &init_mm) /* Temporary bridging hack */
+ spin_unlock(&mm->page_table_lock);
new = pud_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
+ if (!new) {
+ if (mm != &init_mm) /* Temporary bridging hack */
+ spin_lock(&mm->page_table_lock);
return NULL;
+ }
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
+ spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) {
pud_free(new);
goto out;
}
pgd_populate(mm, pgd, new);
out:
+ if (mm == &init_mm) /* Temporary bridging hack */
+ spin_unlock(&mm->page_table_lock);
return pud_offset(pgd, address);
}
#endif /* __PAGETABLE_PUD_FOLDED */
@@ -2128,24 +2122,22 @@ pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Allocate page middle directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
+ * We've already handled the fast-path in-line.
*/
pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
pmd_t *new;
- spin_unlock(&mm->page_table_lock);
+ if (mm != &init_mm) /* Temporary bridging hack */
+ spin_unlock(&mm->page_table_lock);
new = pmd_alloc_one(mm, address);
- spin_lock(&mm->page_table_lock);
- if (!new)
+ if (!new) {
+ if (mm != &init_mm) /* Temporary bridging hack */
+ spin_lock(&mm->page_table_lock);
return NULL;
+ }
- /*
- * Because we dropped the lock, we should re-check the
- * entry, as somebody else could have populated it..
- */
+ spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) {
pmd_free(new);
@@ -2161,6 +2153,8 @@ pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr
#endif /* __ARCH_HAS_4LEVEL_HACK */
out:
+ if (mm == &init_mm) /* Temporary bridging hack */
+ spin_unlock(&mm->page_table_lock);
return pmd_offset(pud, address);
}
#endif /* __PAGETABLE_PMD_FOLDED */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5e9120598799..54a90e83cb31 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -89,7 +89,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
{
pte_t *pte;
- pte = pte_alloc_kernel(&init_mm, pmd, addr);
+ pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
do {
@@ -147,14 +147,12 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
- spin_lock(&init_mm.page_table_lock);
do {
next = pgd_addr_end(addr, end);
err = vmap_pud_range(pgd, addr, next, prot, pages);
if (err)
break;
} while (pgd++, addr = next, addr != end);
- spin_unlock(&init_mm.page_table_lock);
flush_cache_vmap((unsigned long) area->addr, end);
return err;
}