diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-07-31 17:07:28 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-09-21 11:57:46 +0900 |
commit | e7bd34a15b85655f24d1b45edbe3bdfebf9d027e (patch) | |
tree | 051647273266582fe95dcc5cf008534c264be5ae | |
parent | ac919986d7dfc5d1d9f5585521307f222a8ebeaf (diff) | |
download | linux-e7bd34a15b85655f24d1b45edbe3bdfebf9d027e.tar.gz linux-e7bd34a15b85655f24d1b45edbe3bdfebf9d027e.tar.bz2 linux-e7bd34a15b85655f24d1b45edbe3bdfebf9d027e.zip |
sh: Support explicit L1 cache disabling.
This reworks the cache mode configuration in Kconfig, and allows for
explicit selection of write-back/write-through/off configurations.
All of the cache flushing routines are optimized away for the off
case.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 11 | ||||
-rw-r--r-- | arch/sh/kernel/sh_ksyms.c | 7 | ||||
-rw-r--r-- | arch/sh/mm/Kconfig | 19 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 11 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 25 | ||||
-rw-r--r-- | include/asm-sh/cacheflush.h | 33 | ||||
-rw-r--r-- | include/asm-sh/page.h | 6 | ||||
-rw-r--r-- | include/asm-sh/pgtable.h | 3 |
9 files changed, 86 insertions, 31 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 9172e97dc26a..fdc245b7b043 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -143,12 +143,15 @@ static void __init cache_init(void) flags &= ~CCR_CACHE_EMODE; #endif -#ifdef CONFIG_SH_WRITETHROUGH - /* Turn on Write-through caching */ +#if defined(CONFIG_CACHE_WRITETHROUGH) + /* Write-through */ flags |= CCR_CACHE_WT; -#else - /* .. or default to Write-back */ +#elif defined(CONFIG_CACHE_WRITEBACK) + /* Write-back */ flags |= CCR_CACHE_CB; +#else + /* Off */ + flags &= ~CCR_CACHE_ENABLE; #endif ctrl_outl(flags, CCR); diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c index 37aef0a85197..de250705c35e 100644 --- a/arch/sh/kernel/sh_ksyms.c +++ b/arch/sh/kernel/sh_ksyms.c @@ -128,7 +128,8 @@ DECLARE_EXPORT(__movstrSI12_i4); #endif /* __GNUC__ == 4 */ #endif -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) +#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) /* needed by some modules */ EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_cache_range); @@ -136,8 +137,8 @@ EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(__flush_purge_region); #endif -#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) +#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ + (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) EXPORT_SYMBOL(clear_user_page); #endif diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 093d491424fd..c2777555003e 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -2,7 +2,6 @@ # Processor families # config CPU_SH2 - select SH_WRITETHROUGH if !CPU_SH2A bool config CPU_SH2A @@ -414,8 +413,17 @@ config SH_DIRECT_MAPPED Turn this option off for platforms that do not have a direct-mapped cache, and you have no need to run the caches in such a configuration. -config SH_WRITETHROUGH - bool "Use write-through caching" +choice + prompt "Cache mode" + default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 + default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A) + +config CACHE_WRITEBACK + bool "Write-back" + depends on CPU_SH2A || CPU_SH3 || CPU_SH4 + +config CACHE_WRITETHROUGH + bool "Write-through" help Selecting this option will configure the caches in write-through mode, as opposed to the default write-back configuration. @@ -426,4 +434,9 @@ config SH_WRITETHROUGH If unsure, say N. +config CACHE_OFF + bool "Off" + +endchoice + endmenu diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 4061e89d84d0..e73d7f970ce0 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -4,9 +4,10 @@ obj-y := init.o extable.o consistent.o -obj-$(CONFIG_CPU_SH2) += cache-sh2.o -obj-$(CONFIG_CPU_SH3) += cache-sh3.o -obj-$(CONFIG_CPU_SH4) += cache-sh4.o +cache-$(CONFIG_CPU_SH2) := cache-sh2.o +cache-$(CONFIG_CPU_SH3) := cache-sh3.o +cache-$(CONFIG_CPU_SH4) := cache-sh4.o pg-sh4.o +cache-$(CONFIG_CACHE_OFF) := mmu-y := tlb-nommu.o pg-nommu.o mmu-$(CONFIG_CPU_SH3) += fault-nommu.o @@ -14,7 +15,7 @@ mmu-$(CONFIG_CPU_SH4) += fault-nommu.o mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \ ioremap.o -obj-y += $(mmu-y) +obj-y += $(cache-y) $(mmu-y) ifdef CONFIG_DEBUG_FS obj-$(CONFIG_CPU_SH4) += cache-debugfs.o @@ -22,7 +23,7 @@ endif ifdef CONFIG_MMU obj-$(CONFIG_CPU_SH3) += tlb-sh3.o -obj-$(CONFIG_CPU_SH4) += tlb-sh4.o pg-sh4.o +obj-$(CONFIG_CPU_SH4) += tlb-sh4.o obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o endif diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index a08a4a958add..7d43758dc244 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -145,7 +145,7 @@ repeat: ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); -#ifdef CONFIG_SH_WRITETHROUGH +#ifdef CONFIG_CACHE_WRITETHROUGH /* * When we are in 32-bit address extended mode, CCR.CB becomes * invalid, so care must be taken to manually adjust cacheable diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index f74cf667c8fa..13fde8cc7179 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -34,22 +34,27 @@ void update_mmu_cache(struct vm_area_struct * vma, unsigned long flags; unsigned long pteval; unsigned long vpn; - struct page *page; - unsigned long pfn; /* Ptrace may call this routine. */ if (vma && current->active_mm != vma->vm_mm) return; - pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); +#ifndef CONFIG_CACHE_OFF + { + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + + if (!test_bit(PG_mapped, &page->flags)) { + unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; + __flush_wback_region((void *)P1SEGADDR(phys), + PAGE_SIZE); + __set_bit(PG_mapped, &page->flags); + } } } +#endif local_irq_save(flags); @@ -66,7 +71,7 @@ void update_mmu_cache(struct vm_area_struct * vma, /* Set PTEL register */ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ -#ifdef CONFIG_SH_WRITETHROUGH +#ifdef CONFIG_CACHE_WRITETHROUGH pteval |= _PAGE_WT; #endif /* conveniently, we want all the software flags to be 0 anyway */ diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h index 07f62ec9ff0c..aa558da08471 100644 --- a/include/asm-sh/cacheflush.h +++ b/include/asm-sh/cacheflush.h @@ -1,16 +1,47 @@ #ifndef __ASM_SH_CACHEFLUSH_H #define __ASM_SH_CACHEFLUSH_H + #ifdef __KERNEL__ -#include <linux/mm.h> +#ifdef CONFIG_CACHE_OFF +/* + * Nothing to do when the cache is disabled, initial flush and explicit + * disabling is handled at CPU init time. + * + * See arch/sh/kernel/cpu/init.c:cache_init(). + */ +#define p3_cache_init() do { } while (0) +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_icache_range(start, end) do { } while (0) +#define flush_icache_page(vma,pg) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_cache_sigtramp(vaddr) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#define __flush_wback_region(start, size) do { (void)(start); } while (0) +#define __flush_purge_region(start, size) do { (void)(start); } while (0) +#define __flush_invalidate_region(start, size) do { (void)(start); } while (0) +#else #include <asm/cpu/cacheflush.h> +/* + * Consistent DMA requires that the __flush_xxx() primitives must be set + * for any of the enabled non-coherent caches (most of the UP CPUs), + * regardless of PIPT or VIPT cache configurations. + */ + /* Flush (write-back only) a region (smaller than a page) */ extern void __flush_wback_region(void *start, int size); /* Flush (write-back & invalidate) a region (smaller than a page) */ extern void __flush_purge_region(void *start, int size); /* Flush (invalidate only) a region (smaller than a page) */ extern void __flush_invalidate_region(void *start, int size); +#endif #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 6bc9bba10105..48b718e7455c 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -70,14 +70,14 @@ extern void clear_page_nommu(void *to); extern void copy_page_nommu(void *to, void *from); #endif -#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) +#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ + (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) struct page; extern void clear_user_page(void *to, unsigned long address, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); extern void __clear_user_page(void *to, void *orig_to); extern void __copy_user_page(void *to, void *from, void *orig_to); -#elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) +#else #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #endif diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index e3fae12c0e49..54ad5037fe40 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -563,7 +563,8 @@ struct mm_struct; extern unsigned int kobjsize(const void *objp); #endif /* !CONFIG_MMU */ -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) +#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); #endif |