diff options
Diffstat (limited to 'arch/csky')
-rw-r--r-- | arch/csky/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/csky/include/asm/mmu_context.h | 112 | ||||
-rw-r--r-- | arch/csky/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/csky/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/csky/mm/init.c | 2 | ||||
-rw-r--r-- | arch/csky/mm/tlb.c | 190 |
6 files changed, 14 insertions, 295 deletions
diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h index cb344675ccc4..06f509ae09b0 100644 --- a/arch/csky/include/asm/mmu.h +++ b/arch/csky/include/asm/mmu.h @@ -5,7 +5,6 @@ #define __ASM_CSKY_MMU_H typedef struct { - unsigned long asid[NR_CPUS]; void *vdso; } mm_context_t; diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h index 734db3a122e1..86dde481df76 100644 --- a/arch/csky/include/asm/mmu_context.h +++ b/arch/csky/include/asm/mmu_context.h @@ -16,122 +16,24 @@ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ setup_pgd(__pa(pgd), false) + #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ setup_pgd(__pa(pgd), true) -#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) -#define asid_cache(cpu) (cpu_data[cpu].asid_cache) - -#define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS) -#define ASID_INC 0x1 -#define ASID_MASK (ASID_FIRST_VERSION - 1) -#define ASID_VERSION_MASK ~ASID_MASK +#define init_new_context(tsk,mm) 0 +#define activate_mm(prev,next) switch_mm(prev, next, current) #define destroy_context(mm) do {} while (0) #define enter_lazy_tlb(mm, tsk) do {} while (0) #define deactivate_mm(tsk, mm) do {} while (0) -/* - * All unused by hardware upper bits will be considered - * as a software asid extension. - */ -static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) -{ - unsigned long asid = asid_cache(cpu); - - asid += ASID_INC; - if (!(asid & ASID_MASK)) { - flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ - asid = ASID_FIRST_VERSION; - } - cpu_context(cpu, mm) = asid_cache(cpu) = asid; -} - -/* - * Initialize the context related info for a new mm_struct - * instance. - */ -static inline int -init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - int i; - - for_each_online_cpu(i) - cpu_context(i, mm) = 0; - return 0; -} - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ - unsigned int cpu = smp_processor_id(); - unsigned long flags; - - local_irq_save(flags); - /* Check if our ASID is of an older version and thus invalid */ - if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) - get_new_mmu_context(next, cpu); - write_mmu_entryhi(cpu_asid(cpu, next)); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); - - /* - * Mark current->active_mm as not "active" anymore. - * We don't want to mislead possible IPI tlb flush routines. - */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - local_irq_restore(flags); -} - -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ static inline void -activate_mm(struct mm_struct *prev, struct mm_struct *next) +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { - unsigned long flags; - int cpu = smp_processor_id(); - - local_irq_save(flags); + if (prev != next) + tlb_invalid_all(); - /* Unconditionally get a new ASID. */ - get_new_mmu_context(next, cpu); - - write_mmu_entryhi(cpu_asid(cpu, next)); TLBMISS_HANDLER_SETUP_PGD(next->pgd); - - /* mark mmu ownership change */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - local_irq_restore(flags); } - -/* - * If mm is currently active_mm, we can't really drop it. Instead, - * we will get a new one for it. - */ -static inline void -drop_mmu_context(struct mm_struct *mm, unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - - if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { - get_new_mmu_context(mm, cpu); - write_mmu_entryhi(cpu_asid(cpu, mm)); - } else { - /* will get a new context next time */ - cpu_context(cpu, mm) = 0; - } - - local_irq_restore(flags); -} - #endif /* __ASM_CSKY_MMU_CONTEXT_H */ diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index dcea277c09ae..c429a6f347de 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -290,8 +290,6 @@ static inline pte_t *pte_offset(pmd_t *dir, unsigned long address) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); -extern void show_jtlb_table(void); - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *pte); diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index b07a534b3062..b753d382e4ce 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -212,8 +212,6 @@ void csky_start_secondary(void) TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); - asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; - #ifdef CONFIG_CPU_HAS_FPU init_fpu(); #endif diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 66e597053488..eb0dc9e5065f 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -114,8 +114,6 @@ void __init pre_mmu_init(void) TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); - asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; - /* Setup page mask to 4k */ write_mmu_pagemask(0); } diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c index 08b8394e5b8f..efae81ce7fbc 100644 --- a/arch/csky/mm/tlb.c +++ b/arch/csky/mm/tlb.c @@ -10,8 +10,6 @@ #include <asm/pgtable.h> #include <asm/setup.h> -#define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE - void flush_tlb_all(void) { tlb_invalid_all(); @@ -19,201 +17,27 @@ void flush_tlb_all(void) void flush_tlb_mm(struct mm_struct *mm) { - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); - tlb_invalid_all(); } -#define restore_asid_inv_utlb(oldpid, newpid) \ -do { \ - if ((oldpid & ASID_MASK) == newpid) \ - write_mmu_entryhi(oldpid + 1); \ - write_mmu_entryhi(oldpid); \ -} while (0) - void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) + unsigned long end) { - struct mm_struct *mm = vma->vm_mm; - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { - unsigned long size, flags; - int newpid = cpu_asid(cpu, mm); - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - size = (size + 1) >> 1; - if (size <= CSKY_TLB_SIZE/2) { - start &= (PAGE_MASK << 1); - end += ((PAGE_SIZE << 1) - 1); - end &= (PAGE_MASK << 1); -#ifdef CONFIG_CPU_HAS_TLBI - while (start < end) { - asm volatile("tlbi.vaas %0" - ::"r"(start | newpid)); - start += (PAGE_SIZE << 1); - } - sync_is(); -#else - { - int oldpid = read_mmu_entryhi(); - - while (start < end) { - int idx; - - write_mmu_entryhi(start | newpid); - start += (PAGE_SIZE << 1); - tlb_probe(); - idx = read_mmu_index(); - if (idx >= 0) - tlb_invalid_indexed(); - } - restore_asid_inv_utlb(oldpid, newpid); - } -#endif - } else { - drop_mmu_context(mm, cpu); - } - local_irq_restore(flags); - } + tlb_invalid_all(); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - unsigned long size, flags; - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size <= CSKY_TLB_SIZE) { - start &= (PAGE_MASK << 1); - end += ((PAGE_SIZE << 1) - 1); - end &= (PAGE_MASK << 1); -#ifdef CONFIG_CPU_HAS_TLBI - while (start < end) { - asm volatile("tlbi.vaas %0"::"r"(start)); - start += (PAGE_SIZE << 1); - } - sync_is(); -#else - { - int oldpid = read_mmu_entryhi(); - - while (start < end) { - int idx; - - write_mmu_entryhi(start); - start += (PAGE_SIZE << 1); - tlb_probe(); - idx = read_mmu_index(); - if (idx >= 0) - tlb_invalid_indexed(); - } - restore_asid_inv_utlb(oldpid, 0); - } -#endif - } else { - flush_tlb_all(); - } - - local_irq_restore(flags); + tlb_invalid_all(); } -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - int cpu = smp_processor_id(); - int newpid = cpu_asid(cpu, vma->vm_mm); - - if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { - page &= (PAGE_MASK << 1); - -#ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.vaas %0"::"r"(page | newpid)); - sync_is(); -#else - { - int oldpid, idx; - unsigned long flags; - - local_irq_save(flags); - oldpid = read_mmu_entryhi(); - write_mmu_entryhi(page | newpid); - tlb_probe(); - idx = read_mmu_index(); - if (idx >= 0) - tlb_invalid_indexed(); - - restore_asid_inv_utlb(oldpid, newpid); - local_irq_restore(flags); - } -#endif - } + tlb_invalid_all(); } -/* - * Remove one kernel space TLB entry. This entry is assumed to be marked - * global so we don't do the ASID thing. - */ -void flush_tlb_one(unsigned long page) +void flush_tlb_one(unsigned long addr) { - int oldpid; - - oldpid = read_mmu_entryhi(); - page &= (PAGE_MASK << 1); - -#ifdef CONFIG_CPU_HAS_TLBI - page = page | (oldpid & 0xfff); - asm volatile("tlbi.vaas %0"::"r"(page)); - sync_is(); -#else - { - int idx; - unsigned long flags; - - page = page | (oldpid & 0xff); - - local_irq_save(flags); - write_mmu_entryhi(page); - tlb_probe(); - idx = read_mmu_index(); - if (idx >= 0) - tlb_invalid_indexed(); - restore_asid_inv_utlb(oldpid, oldpid); - local_irq_restore(flags); - } -#endif + tlb_invalid_all(); } EXPORT_SYMBOL(flush_tlb_one); - -/* show current 32 jtlbs */ -void show_jtlb_table(void) -{ - unsigned long flags; - int entryhi, entrylo0, entrylo1; - int entry; - int oldpid; - - local_irq_save(flags); - entry = 0; - pr_info("\n\n\n"); - - oldpid = read_mmu_entryhi(); - while (entry < CSKY_TLB_SIZE) { - write_mmu_index(entry); - tlb_read(); - entryhi = read_mmu_entryhi(); - entrylo0 = read_mmu_entrylo0(); - entrylo0 = entrylo0; - entrylo1 = read_mmu_entrylo1(); - entrylo1 = entrylo1; - pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;" - " entrylo1 - 0x%x\n", - entry, entryhi, entrylo0, entrylo1); - entry++; - } - write_mmu_entryhi(oldpid); - local_irq_restore(flags); -} |