diff options
author | Kirill Tkhai <tkhai@yandex.ru> | 2013-04-09 00:29:46 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-04-08 22:50:47 -0400 |
commit | 07df841877195765d958df146f614fc7bdedd5e3 (patch) | |
tree | 9c749251ab90cd88b42f98c42e652f9b75b425b9 /arch | |
parent | 598ec971ddcf7dcb0c381230e69a39c75b7fac1a (diff) | |
download | linux-07df841877195765d958df146f614fc7bdedd5e3.tar.gz linux-07df841877195765d958df146f614fc7bdedd5e3.tar.bz2 linux-07df841877195765d958df146f614fc7bdedd5e3.zip |
sparc64: Do not save/restore interrupts in get_new_mmu_context()
get_new_mmu_context() is always called with interrupts disabled.
So it's possible to do this micro optimization.
(Also fix the comment to switch_mm, which is called in both cases)
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
CC: David Miller <davem@davemloft.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 5 |
2 files changed, 3 insertions, 4 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 9191ca62ed9c..3d528f06e4b0 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -68,7 +68,7 @@ extern void smp_tsb_sync(struct mm_struct *mm); extern void __flush_tlb_mm(unsigned long, unsigned long); -/* Switch the current MM context. Interrupts are disabled. */ +/* Switch the current MM context. */ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { unsigned long ctx_valid, flags; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 076068f4459e..4ccaa1b9961f 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -681,10 +681,9 @@ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; - unsigned long flags; int new_version; - spin_lock_irqsave(&ctx_alloc_lock, flags); + spin_lock(&ctx_alloc_lock); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); @@ -720,7 +719,7 @@ void get_new_mmu_context(struct mm_struct *mm) out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; - spin_unlock_irqrestore(&ctx_alloc_lock, flags); + spin_unlock(&ctx_alloc_lock); if (unlikely(new_version)) smp_new_mmu_context_version(); |