diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 18:14:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 18:14:11 -0700 |
commit | 94a8d5caba74211ec76dac80fc6e2d5c391530df (patch) | |
tree | 21d17d214a354ae00ae27217d82b67bfc5bff3a3 /arch/arm | |
parent | 2bcd57ab61e7cabed626226a3771617981c11ce1 (diff) | |
parent | 6ba2ef7baac23a5d9bb85e28b882d16b439a2293 (diff) | |
download | linux-94a8d5caba74211ec76dac80fc6e2d5c391530df.tar.gz linux-94a8d5caba74211ec76dac80fc6e2d5c391530df.tar.bz2 linux-94a8d5caba74211ec76dac80fc6e2d5c391530df.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (39 commits)
cpumask: Move deprecated functions to end of header.
cpumask: remove unused deprecated functions, avoid accusations of insanity
cpumask: use new-style cpumask ops in mm/quicklist.
cpumask: use mm_cpumask() wrapper: x86
cpumask: use mm_cpumask() wrapper: um
cpumask: use mm_cpumask() wrapper: mips
cpumask: use mm_cpumask() wrapper: mn10300
cpumask: use mm_cpumask() wrapper: m32r
cpumask: use mm_cpumask() wrapper: arm
cpumask: Use accessors for cpu_*_mask: um
cpumask: Use accessors for cpu_*_mask: powerpc
cpumask: Use accessors for cpu_*_mask: mips
cpumask: Use accessors for cpu_*_mask: m32r
cpumask: remove arch_send_call_function_ipi
cpumask: arch_send_call_function_ipi_mask: s390
cpumask: arch_send_call_function_ipi_mask: powerpc
cpumask: arch_send_call_function_ipi_mask: mips
cpumask: arch_send_call_function_ipi_mask: m32r
cpumask: arch_send_call_function_ipi_mask: alpha
cpumask: remove obsolete topology_core_siblings and topology_thread_siblings: ia64
...
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/smp.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/tlbflush.h | 4 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 10 |
7 files changed, 21 insertions, 21 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 1a711ea8418b..fd03fb63a332 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) #ifndef CONFIG_CPU_CACHE_VIPT static inline void flush_cache_mm(struct mm_struct *mm) { - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); } static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } @@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index bcdb9291ef0c..de6cefb329dd 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #ifdef CONFIG_SMP /* check for possible thread migration */ - if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) + if (!cpumask_empty(mm_cpumask(next)) && + !cpumask_test_cpu(cpu, mm_cpumask(next))) __flush_icache_all(); #endif - if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) - cpu_clear(cpu, prev->cpu_vm_mask); + cpumask_clear_cpu(cpu, mm_cpumask(prev)); } #endif } diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a06e735b262a..e0d763be1846 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask /* * show local interrupt info diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c964f3fc3bc5..a45ab5dd8255 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_WB)) dsb(); - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { if (tlb_flag(TLB_V3_FULL)) asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); if (tlb_flag(TLB_V4_U_FULL)) @@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_WB)) dsb(); - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { if (tlb_flag(TLB_V3_PAGE)) asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); if (tlb_flag(TLB_V4_U_PAGE)) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index de885fd256c5..e0d32770bb3d 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void) read_lock(&tasklist_lock); for_each_process(p) { if (p->mm) - cpu_clear(cpu, p->mm->cpu_vm_mask); + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); } read_unlock(&tasklist_lock); @@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_count); current->active_mm = mm; - cpu_set(cpu, mm->cpu_vm_mask); + cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); enter_lazy_tlb(mm, current); local_flush_tlb_all(); @@ -643,7 +643,7 @@ void flush_tlb_all(void) void flush_tlb_mm(struct mm_struct *mm) { if (tlb_ops_need_broadcast()) - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); else local_flush_tlb_mm(mm); } @@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = uaddr; - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); } else local_flush_tlb_page(vma, uaddr); } @@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma, ta.ta_vma = vma; ta.ta_start = start; ta.ta_end = end; - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); } else local_flush_tlb_range(vma, start, end); } diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index fc84fcc74380..6bda76a43199 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm) } spin_unlock(&cpu_asid_lock); - mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); mm->context.id = asid; } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 575f3ad722e7..b27942909b23 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_all(); return; } @@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); return; @@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } @@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long len, int write) { if (cache_is_vivt()) { - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } @@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ - if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; /* only flushing the kernel mapping on non-aliasing VIPT */ |