diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 18:16:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-29 21:40:40 -0700 |
commit | b462705ac679f6195d1b23a752cda592d9107495 (patch) | |
tree | c4d9be08f67b0ffdc66c3e170614bd03945f3c42 | |
parent | c74df32c724a1652ad8399b4891bb02c9d43743a (diff) | |
download | linux-b462705ac679f6195d1b23a752cda592d9107495.tar.gz linux-b462705ac679f6195d1b23a752cda592d9107495.tar.bz2 linux-b462705ac679f6195d1b23a752cda592d9107495.zip |
[PATCH] mm: arches skip ptlock
Convert those few architectures which are calling pud_alloc, pmd_alloc,
pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it
after. Each of these can continue to use pte_alloc_map, no need to change
over to pte_alloc_map_lock, they're neither racy nor swappable.
In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the
page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has
always been called from outside of page_table_lock in dup_mmap.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/arm/mm/mm-armv.c | 14 | ||||
-rw-r--r-- | arch/arm26/mm/memc.c | 15 | ||||
-rw-r--r-- | arch/sparc/mm/generic.c | 4 | ||||
-rw-r--r-- | arch/sparc64/mm/generic.c | 6 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 3 |
5 files changed, 3 insertions, 39 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 61bc2fa0511e..60f3e039bac2 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -180,11 +180,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) if (!vectors_high()) { /* - * This lock is here just to satisfy pmd_alloc and pte_lock - */ - spin_lock(&mm->page_table_lock); - - /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ @@ -201,23 +196,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) set_pte(new_pte, *init_pte); pte_unmap_nested(init_pte); pte_unmap(new_pte); - - spin_unlock(&mm->page_table_lock); } return new_pgd; no_pte: - spin_unlock(&mm->page_table_lock); pmd_free(new_pmd); - free_pages((unsigned long)new_pgd, 2); - return NULL; - no_pmd: - spin_unlock(&mm->page_table_lock); free_pages((unsigned long)new_pgd, 2); - return NULL; - no_pgd: return NULL; } diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c index d6b008b8db76..34def6397c3c 100644 --- a/arch/arm26/mm/memc.c +++ b/arch/arm26/mm/memc.c @@ -79,12 +79,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) goto no_pgd; /* - * This lock is here just to satisfy pmd_alloc and pte_lock - * FIXME: I bet we could avoid taking it pretty much altogether - */ - spin_lock(&mm->page_table_lock); - - /* * On ARM, first page must always be allocated since it contains * the machine vectors. */ @@ -113,23 +107,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); - spin_unlock(&mm->page_table_lock); - /* update MEMC tables */ cpu_memc_update_all(new_pgd); return new_pgd; no_pte: - spin_unlock(&mm->page_table_lock); pmd_free(new_pmd); - free_pgd_slow(new_pgd); - return NULL; - no_pmd: - spin_unlock(&mm->page_table_lock); free_pgd_slow(new_pgd); - return NULL; - no_pgd: return NULL; } diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index 659c9a71f867..9604893ffdbd 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c @@ -81,9 +81,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, dir = pgd_offset(mm, from); flush_cache_range(vma, beg, end); - spin_lock(&mm->page_table_lock); while (from < end) { - pmd_t *pmd = pmd_alloc(current->mm, dir, from); + pmd_t *pmd = pmd_alloc(mm, dir, from); error = -ENOMEM; if (!pmd) break; @@ -93,7 +92,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, from = (from + PGDIR_SIZE) & PGDIR_MASK; dir++; } - spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, beg, end); return error; diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index afc01cec701f..112c316e7cd2 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -135,9 +135,8 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, dir = pgd_offset(mm, from); flush_cache_range(vma, beg, end); - spin_lock(&mm->page_table_lock); while (from < end) { - pud_t *pud = pud_alloc(current->mm, dir, from); + pud_t *pud = pud_alloc(mm, dir, from); error = -ENOMEM; if (!pud) break; @@ -147,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, from = (from + PGDIR_SIZE) & PGDIR_MASK; dir++; } - flush_tlb_range(vma, beg, end); - spin_unlock(&mm->page_table_lock); + flush_tlb_range(vma, beg, end); return error; } diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 240143b616a2..02cf36e0331a 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, pmd_t *pmd; pte_t *pte; - spin_lock(&mm->page_table_lock); pgd = pgd_offset(mm, proc); pud = pud_alloc(mm, pgd, proc); if (!pud) @@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkexec(*pte); *pte = pte_wrprotect(*pte); - spin_unlock(&mm->page_table_lock); return(0); out_pmd: @@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, out_pte: pmd_free(pmd); out: - spin_unlock(&mm->page_table_lock); return(-ENOMEM); } |