diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r3k.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 98 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 7 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 13 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 50 | ||||
-rw-r--r-- | arch/mips/mm/pg-r4k.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/sc-rm7k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 85 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 89 |
12 files changed, 267 insertions, 87 deletions
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 9dd1352d5748..bb041a22f20a 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -260,6 +260,10 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, { } +static void local_r3k_flush_data_cache_page(unsigned long addr) +{ +} + static void r3k_flush_data_cache_page(unsigned long addr) { } @@ -335,6 +339,7 @@ void __init r3k_cache_init(void) flush_icache_range = r3k_flush_icache_range; flush_cache_sigtramp = r3k_flush_cache_sigtramp; + local_flush_data_cache_page = local_r3k_flush_data_cache_page; flush_data_cache_page = r3k_flush_data_cache_page; _dma_cache_wback_inv = r3k_dma_cache_wback_inv; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 32b7f6aeb983..4a43924cd4fc 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -29,6 +29,27 @@ #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ + +/* + * Special Variant of smp_call_function for use by cache functions: + * + * o No return value + * o collapses to normal function call on UP kernels + * o collapses to normal function call on systems with a single shared + * primary cache. + */ +static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, + int retry, int wait) +{ + preempt_disable(); + +#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) + smp_call_function(func, info, retry, wait); +#endif + func(info); + preempt_enable(); +} + /* * Must die. */ @@ -154,7 +175,8 @@ static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) static inline void tx49_blast_icache32_page_indexed(unsigned long page) { - unsigned long start = page; + unsigned long indexmask = current_cpu_data.icache.waysize - 1; + unsigned long start = INDEX_BASE + (page & indexmask); unsigned long end = start + PAGE_SIZE; unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; unsigned long ws_end = current_cpu_data.icache.ways << @@ -298,7 +320,7 @@ static void r4k_flush_cache_all(void) if (!cpu_has_dc_aliases) return; - on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); } static inline void local_r4k___flush_cache_all(void * args) @@ -313,13 +335,14 @@ static inline void local_r4k___flush_cache_all(void * args) case CPU_R4400MC: case CPU_R10000: case CPU_R12000: + case CPU_R14000: r4k_blast_scache(); } } static void r4k___flush_cache_all(void) { - on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); + r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); } static inline void local_r4k_flush_cache_range(void * args) @@ -340,7 +363,7 @@ static inline void local_r4k_flush_cache_range(void * args) static void r4k_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); } static inline void local_r4k_flush_cache_mm(void * args) @@ -369,7 +392,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) if (!cpu_has_dc_aliases) return; - on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); } struct flush_cache_page_args { @@ -460,7 +483,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, args.addr = addr; args.pfn = pfn; - on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); } static inline void local_r4k_flush_data_cache_page(void * addr) @@ -470,7 +493,7 @@ static inline void local_r4k_flush_data_cache_page(void * addr) static void r4k_flush_data_cache_page(unsigned long addr) { - on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); + r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); } struct flush_icache_range_args { @@ -513,7 +536,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) args.start = start; args.end = end; - on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); + r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); instruction_hazard(); } @@ -589,7 +612,7 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma, args.vma = vma; args.page = page; - on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); + r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); } @@ -688,7 +711,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) static void r4k_flush_cache_sigtramp(unsigned long addr) { - on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); + r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); } static void r4k_flush_icache_all(void) @@ -749,12 +772,12 @@ static void __init probe_pcache(void) icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; - c->icache.waybit = ffs(icache_size/2) - 1; + c->icache.waybit = __ffs(icache_size/2); dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; - c->dcache.waybit= ffs(dcache_size/2) - 1; + c->dcache.waybit= __ffs(dcache_size/2); c->options |= MIPS_CPU_CACHE_CDEX_P; break; @@ -811,6 +834,7 @@ static void __init probe_pcache(void) case CPU_R10000: case CPU_R12000: + case CPU_R14000: icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); c->icache.linesz = 64; c->icache.ways = 2; @@ -837,12 +861,12 @@ static void __init probe_pcache(void) icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; - c->icache.waybit = ffs(icache_size/2) - 1; + c->icache.waybit = __ffs(icache_size/2); dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; - c->dcache.waybit = ffs(dcache_size/2) - 1; + c->dcache.waybit = __ffs(dcache_size/2); c->options |= MIPS_CPU_CACHE_CDEX_P; break; @@ -873,12 +897,12 @@ static void __init probe_pcache(void) icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 4; - c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; + c->icache.waybit = __ffs(icache_size / c->icache.ways); dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 4; - c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; + c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) c->options |= MIPS_CPU_CACHE_CDEX_P; @@ -906,7 +930,7 @@ static void __init probe_pcache(void) icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; - c->icache.waybit = ffs(icache_size/c->icache.ways) - 1; + c->icache.waybit = __ffs(icache_size/c->icache.ways); if (config & 0x8) /* VI bit */ c->icache.flags |= MIPS_CACHE_VTAG; @@ -926,7 +950,7 @@ static void __init probe_pcache(void) dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; - c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1; + c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); c->options |= MIPS_CPU_PREFETCH; break; @@ -964,9 +988,11 @@ static void __init probe_pcache(void) c->dcache.flags |= MIPS_CACHE_PINDEX; case CPU_R10000: case CPU_R12000: + case CPU_R14000: case CPU_SB1: break; case CPU_24K: + case CPU_34K: if (!(read_c0_config7() & (1 << 16))) default: if (c->dcache.waysize > PAGE_SIZE) @@ -1090,6 +1116,7 @@ static void __init setup_scache(void) case CPU_R10000: case CPU_R12000: + case CPU_R14000: scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); c->scache.linesz = 64 << ((config >> 13) & 1); c->scache.ways = 2; @@ -1134,6 +1161,31 @@ static void __init setup_scache(void) c->options |= MIPS_CPU_SUBSET_CACHES; } +void au1x00_fixup_config_od(void) +{ + /* + * c0_config.od (bit 19) was write only (and read as 0) + * on the early revisions of Alchemy SOCs. It disables the bus + * transaction overlapping and needs to be set to fix various errata. + */ + switch (read_c0_prid()) { + case 0x00030100: /* Au1000 DA */ + case 0x00030201: /* Au1000 HA */ + case 0x00030202: /* Au1000 HB */ + case 0x01030200: /* Au1500 AB */ + /* + * Au1100 errata actually keeps silence about this bit, so we set it + * just in case for those revisions that require it to be set according + * to arch/mips/au1000/common/cputable.c + */ + case 0x02030200: /* Au1100 AB */ + case 0x02030201: /* Au1100 BA */ + case 0x02030202: /* Au1100 BC */ + set_c0_config(1 << 19); + break; + } +} + static inline void coherency_setup(void) { change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); @@ -1154,6 +1206,15 @@ static inline void coherency_setup(void) case CPU_R4400MC: clear_c0_config(CONF_CU); break; + /* + * We need to catch the ealry Alchemy SOCs with + * the write-only co_config.od bit and set it back to one... + */ + case CPU_AU1000: /* rev. DA, HA, HB */ + case CPU_AU1100: /* rev. AB, BA, BC ?? */ + case CPU_AU1500: /* rev. AB */ + au1x00_fixup_config_od(); + break; } } @@ -1198,6 +1259,7 @@ void __init r4k_cache_init(void) flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_icache_all = r4k_flush_icache_all; + local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; flush_icache_range = r4k_flush_icache_range; diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index 2f08b535f20e..f9b129491b1e 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c @@ -528,6 +528,7 @@ void sb1_cache_init(void) flush_cache_page = sb1_flush_cache_page; flush_cache_sigtramp = sb1_flush_cache_sigtramp; + local_flush_data_cache_page = (void *) sb1_nop; flush_data_cache_page = (void *) sb1_nop; /* Full flush */ diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index fe232e3988e3..5dfc9b1901f6 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -216,6 +216,11 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page tx39_blast_icache_page_indexed(page); } +static void local_tx39_flush_data_cache_page(void * addr) +{ + tx39_blast_dcache_page(addr); +} + static void tx39_flush_data_cache_page(unsigned long addr) { tx39_blast_dcache_page(addr); @@ -381,6 +386,7 @@ void __init tx39_cache_init(void) flush_icache_range = (void *) tx39h_flush_icache_all; flush_cache_sigtramp = (void *) tx39h_flush_icache_all; + local_flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all; _dma_cache_wback_inv = tx39h_dma_cache_wback_inv; @@ -406,6 +412,7 @@ void __init tx39_cache_init(void) flush_icache_range = tx39_flush_icache_range; flush_cache_sigtramp = tx39_flush_cache_sigtramp; + local_flush_data_cache_page = local_tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page; _dma_cache_wback_inv = tx39_dma_cache_wback_inv; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 591c22b080e4..83a56296be86 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -30,6 +30,7 @@ void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); /* MIPS specific cache operations */ void (*flush_cache_sigtramp)(unsigned long addr); +void (*local_flush_data_cache_page)(void * addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 2d9624fd10ec..e3a617224868 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -157,7 +157,6 @@ no_context: * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - bust_spinlocks(1); printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " @@ -188,11 +187,20 @@ do_sigbus: /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; - + else /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ +#if 0 + printk("do_page_fault() #3: sending SIGBUS to %s for " + "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", + tsk->comm, + write ? "write access to" : "read access from", + field, address, + field, (unsigned long) regs->cp0_epc, + field, (unsigned long) regs->regs[31]); +#endif tsk->thread.cp0_badvaddr = address; info.si_signo = SIGBUS; info.si_errno = 0; @@ -201,7 +209,6 @@ do_sigbus: force_sig_info(SIGBUS, &info, tsk); return; - vmalloc_fault: { /* diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 1f7b37b38f5c..0c544375b856 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -83,6 +83,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) preempt_check_resched(); } +#ifndef CONFIG_LIMITED_DMA /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. @@ -101,6 +102,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) return (void*) vaddr; } +#endif /* CONFIG_LIMITED_DMA */ struct page *__kmap_atomic_to_page(void *ptr) { diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index ad89c442f299..33f6e1cdfd5b 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -227,7 +227,7 @@ void __init mem_init(void) for (tmp = 0; tmp < max_low_pfn; tmp++) if (page_is_ram(tmp)) { ram++; - if (PageReserved(mem_map+tmp)) + if (PageReserved(pfn_to_page(tmp))) reservedpages++; } @@ -276,6 +276,20 @@ void __init mem_init(void) } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ +void free_init_pages(char *what, unsigned long begin, unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + memset((void *)addr, 0xcc, PAGE_SIZE); + free_page(addr); + totalram_pages++; + } + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); +} + #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { @@ -284,16 +298,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) start = (unsigned long)phys_to_virt(CPHYSADDR(start)); end = (unsigned long)phys_to_virt(CPHYSADDR(end)); #endif - if (start < end) - printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", - (end - start) >> 10); - - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } + free_init_pages("initrd memory", start, end); } #endif @@ -301,24 +306,17 @@ extern unsigned long prom_free_prom_memory(void); void free_initmem(void) { - unsigned long addr, page, freed; + unsigned long start, end, freed; freed = prom_free_prom_memory(); + if (freed) + printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed); - addr = (unsigned long) &__init_begin; - while (addr < (unsigned long) &__init_end) { + start = (unsigned long)(&__init_begin); + end = (unsigned long)(&__init_end); #ifdef CONFIG_64BIT - page = PAGE_OFFSET | CPHYSADDR(addr); -#else - page = addr; + start = PAGE_OFFSET | CPHYSADDR(start); + end = PAGE_OFFSET | CPHYSADDR(end); #endif - ClearPageReserved(virt_to_page(page)); - init_page_count(virt_to_page(page)); - free_page(page); - totalram_pages++; - freed += PAGE_SIZE; - addr += PAGE_SIZE; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldk freed\n", - freed >> 10); + free_init_pages("unused kernel memory", start, end); } diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index e4390dc3eb48..b7c749232ffe 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c @@ -357,6 +357,7 @@ void __init build_clear_page(void) case CPU_R10000: case CPU_R12000: + case CPU_R14000: pref_src_mode = Pref_LoadStreamed; pref_dst_mode = Pref_StoreStreamed; break; diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 3b6cc9ba1b05..31ec73052423 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c @@ -138,7 +138,7 @@ void __init rm7k_sc_init(void) c->scache.linesz = sc_lsize; c->scache.ways = 4; - c->scache.waybit= ffs(scache_size / c->scache.ways) - 1; + c->scache.waybit= __ffs(scache_size / c->scache.ways); c->scache.waysize = scache_size / c->scache.ways; c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n", diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index a865f2394cb0..9dca099ba16b 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void); "nop; nop; nop; nop; nop; nop;\n\t" \ ".set reorder\n\t") +/* Atomicity and interruptability */ +#ifdef CONFIG_MIPS_MT_SMTC + +#include <asm/smtc.h> +#include <asm/mipsmtregs.h> + +#define ENTER_CRITICAL(flags) \ + { \ + unsigned int mvpflags; \ + local_irq_save(flags);\ + mvpflags = dvpe() +#define EXIT_CRITICAL(flags) \ + evpe(mvpflags); \ + local_irq_restore(flags); \ + } +#else + +#define ENTER_CRITICAL(flags) local_irq_save(flags) +#define EXIT_CRITICAL(flags) local_irq_restore(flags) + +#endif /* CONFIG_MIPS_MT_SMTC */ + void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; - local_irq_save(flags); + ENTER_CRITICAL(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); write_c0_entrylo0(0); @@ -57,7 +79,7 @@ void local_flush_tlb_all(void) } tlbw_use_hazard(); write_c0_entryhi(old_ctx); - local_irq_restore(flags); + EXIT_CRITICAL(flags); } /* All entries common to a mm share an asid. To effectively flush @@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long flags; int size; + ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; local_irq_save(flags); @@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } else { drop_mmu_context(mm, cpu); } - local_irq_restore(flags); + EXIT_CRITICAL(flags); } } @@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) unsigned long flags; int size; + ENTER_CRITICAL(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (size + 1) >> 1; - local_irq_save(flags); if (size <= current_cpu_data.tlbsize / 2) { int pid = read_c0_entryhi(); @@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } else { local_flush_tlb_all(); } - local_irq_restore(flags); + EXIT_CRITICAL(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) @@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) newpid = cpu_asid(cpu, vma->vm_mm); page &= (PAGE_MASK << 1); - local_irq_save(flags); + ENTER_CRITICAL(flags); oldpid = read_c0_entryhi(); write_c0_entryhi(page | newpid); mtc0_tlbw_hazard(); @@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) finish: write_c0_entryhi(oldpid); - local_irq_restore(flags); + EXIT_CRITICAL(flags); } } @@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page) unsigned long flags; int oldpid, idx; - local_irq_save(flags); + ENTER_CRITICAL(flags); oldpid = read_c0_entryhi(); page &= (PAGE_MASK << 1); write_c0_entryhi(page); @@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page) } write_c0_entryhi(oldpid); - local_irq_restore(flags); + EXIT_CRITICAL(flags); } /* @@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - local_irq_save(flags); + ENTER_CRITICAL(flags); pid = read_c0_entryhi() & ASID_MASK; address &= (PAGE_MASK << 1); @@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) else tlb_write_indexed(); tlbw_use_hazard(); - local_irq_restore(flags); + EXIT_CRITICAL(flags); } #if 0 @@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, pte_t *ptep; int idx; - local_irq_save(flags); + ENTER_CRITICAL(flags); address &= (PAGE_MASK << 1); asid = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(address | asid); @@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma, else tlb_write_indexed(); tlbw_use_hazard(); - local_irq_restore(flags); + EXIT_CRITICAL(flags); } #endif @@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long old_pagemask; unsigned long old_ctx; - local_irq_save(flags); + ENTER_CRITICAL(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); @@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, BARRIER; write_c0_pagemask(old_pagemask); local_flush_tlb_all(); - local_irq_restore(flags); + EXIT_CRITICAL(flags); } /* @@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long old_pagemask; unsigned long old_ctx; - local_irq_save(flags); + ENTER_CRITICAL(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); @@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); out: - local_irq_restore(flags); + EXIT_CRITICAL(flags); return ret; } +extern void __init sanitize_tlb_entries(void); static void __init probe_tlb(unsigned long config) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config) */ if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY) return; +#ifdef CONFIG_MIPS_MT_SMTC + /* + * If TLB is shared in SMTC system, total size already + * has been calculated and written into cpu_data tlbsize + */ + if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED) + return; +#endif /* CONFIG_MIPS_MT_SMTC */ reg = read_c0_config1(); if (!((config >> 7) & 3)) @@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config) c->tlbsize = ((reg >> 25) & 0x3f) + 1; } +static int __initdata ntlb = 0; +static int __init set_ntlb(char *str) +{ + get_option(&str, &ntlb); + return 1; +} + +__setup("ntlb=", set_ntlb); + void __init tlb_init(void) { unsigned int config = read_c0_config(); @@ -432,5 +473,15 @@ void __init tlb_init(void) /* Did I tell you that ARC SUCKS? */ + if (ntlb) { + if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { + int wired = current_cpu_data.tlbsize - ntlb; + write_c0_wired(wired); + write_c0_index(wired-1); + printk ("Restricting TLB to %d entries\n", ntlb); + } else + printk("Ignoring invalid argument ntlb=%d\n", ntlb); + } + build_tlb_refill_handler(); } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 599b3c297186..54507be2ab5b 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -7,6 +7,16 @@ * * Copyright (C) 2004,2005 by Thiemo Seufer * Copyright (C) 2005 Maciej W. Rozycki + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * + * ... and the days got worse and worse and now you see + * I've gone completly out of my mind. + * + * They're coming to take me a away haha + * they're coming to take me a away hoho hihi haha + * to the funny farm where code is beautiful all the time ... + * + * (Condolences to Napoleon XIV) */ #include <stdarg.h> @@ -68,6 +78,7 @@ enum fields BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, + SET = 0x200 }; #define OP_MASK 0x2f @@ -86,6 +97,8 @@ enum fields #define JIMM_SH 0 #define FUNC_MASK 0x2f #define FUNC_SH 0 +#define SET_MASK 0x7 +#define SET_SH 0 enum opcode { insn_invalid, @@ -129,8 +142,8 @@ static __initdata struct insn insn_table[] = { { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM }, { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM }, { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD }, - { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD }, - { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD }, + { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET}, + { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET}, { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE }, { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE }, { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE }, @@ -145,8 +158,8 @@ static __initdata struct insn insn_table[] = { { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM }, { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM }, { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM }, - { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD }, - { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD }, + { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET}, + { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET}, { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM }, { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 }, { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM }, @@ -242,6 +255,14 @@ static __init u32 build_func(u32 arg) return arg & FUNC_MASK; } +static __init u32 build_set(u32 arg) +{ + if (arg & ~SET_MASK) + printk(KERN_WARNING "TLB synthesizer field overflow\n"); + + return arg & SET_MASK; +} + /* * The order of opcode arguments is implicitly left to right, * starting with RS and ending with FUNC or IMM. @@ -273,6 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...) if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); + if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); va_end(ap); **buf = op; @@ -358,8 +380,8 @@ I_u1s2(_bgezl); I_u1s2(_bltz); I_u1s2(_bltzl); I_u1u2s3(_bne); -I_u1u2(_dmfc0); -I_u1u2(_dmtc0); +I_u1u2u3(_dmfc0); +I_u1u2u3(_dmtc0); I_u2u1s3(_daddiu); I_u3u1u2(_daddu); I_u2u1u3(_dsll); @@ -376,8 +398,8 @@ I_u2s3u1(_ll); I_u2s3u1(_lld); I_u1s2(_lui); I_u2s3u1(_lw); -I_u1u2(_mfc0); -I_u1u2(_mtc0); +I_u1u2u3(_mfc0); +I_u1u2u3(_mtc0); I_u2u1u3(_ori); I_0(_rfe); I_u2s3u1(_sc); @@ -451,8 +473,8 @@ L_LA(_r3000_write_probe_fail) # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh) # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh) # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh) -# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd) -# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd) +# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd) +# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd) # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val) # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd) # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd) @@ -464,8 +486,8 @@ L_LA(_r3000_write_probe_fail) # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh) # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh) # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh) -# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd) -# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd) +# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd) +# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd) # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val) # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd) # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd) @@ -670,14 +692,15 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, #define K1 27 /* Some CP0 registers */ -#define C0_INDEX 0 -#define C0_ENTRYLO0 2 -#define C0_ENTRYLO1 3 -#define C0_CONTEXT 4 -#define C0_BADVADDR 8 -#define C0_ENTRYHI 10 -#define C0_EPC 14 -#define C0_XCONTEXT 20 +#define C0_INDEX 0, 0 +#define C0_ENTRYLO0 2, 0 +#define C0_TCBIND 2, 2 +#define C0_ENTRYLO1 3, 0 +#define C0_CONTEXT 4, 0 +#define C0_BADVADDR 8, 0 +#define C0_ENTRYHI 10, 0 +#define C0_EPC 14, 0 +#define C0_XCONTEXT 20, 0 #ifdef CONFIG_64BIT # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT) @@ -742,7 +765,7 @@ static void __init build_r3000_tlb_refill_handler(void) } #endif - memcpy((void *)CAC_BASE, tlb_handler, 0x80); + memcpy((void *)ebase, tlb_handler, 0x80); } /* @@ -852,6 +875,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, case CPU_R10000: case CPU_R12000: + case CPU_R14000: case CPU_4KC: case CPU_SB1: case CPU_SB1A: @@ -883,6 +907,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l, case CPU_4KEC: case CPU_24K: case CPU_34K: + case CPU_74K: i_ehb(p); tlbw(p); break; @@ -951,12 +976,20 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, /* No i_nop needed here, since the next insn doesn't touch TMP. */ #ifdef CONFIG_SMP +# ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC uses TCBind value as "CPU" index + */ + i_mfc0(p, ptr, C0_TCBIND); + i_dsrl(p, ptr, ptr, 19); +# else /* * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 * stored in CONTEXT. */ i_dmfc0(p, ptr, C0_CONTEXT); i_dsrl(p, ptr, ptr, 23); +#endif i_LA_mostly(p, tmp, pgdc); i_daddu(p, ptr, ptr, tmp); i_dmfc0(p, tmp, C0_BADVADDR); @@ -1014,9 +1047,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ #ifdef CONFIG_SMP +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC uses TCBind value as "CPU" index + */ + i_mfc0(p, ptr, C0_TCBIND); + i_LA_mostly(p, tmp, pgdc); + i_srl(p, ptr, ptr, 19); +#else + /* + * smp_processor_id() << 3 is stored in CONTEXT. + */ i_mfc0(p, ptr, C0_CONTEXT); i_LA_mostly(p, tmp, pgdc); i_srl(p, ptr, ptr, 23); +#endif i_addu(p, ptr, tmp, ptr); #else i_LA_mostly(p, ptr, pgdc); @@ -1247,7 +1292,7 @@ static void __init build_r4000_tlb_refill_handler(void) } #endif - memcpy((void *)CAC_BASE, final_handler, 0x100); + memcpy((void *)ebase, final_handler, 0x100); } /* |