From c470662854ff94d44bf8c192cefac3efa33db676 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 27 Sep 2006 15:29:18 +0900 Subject: sh: Fixup SHMLBA definition for SH7705. We need this set to something sensible anywhere were we have an aliasing dcache.. Signed-off-by: Paul Mundt --- include/asm-sh/cpu-sh3/cacheflush.h | 48 ++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 27 deletions(-) (limited to 'include/asm-sh/cpu-sh3/cacheflush.h') diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h index db0cb071ea8e..97f5a64c2ab8 100644 --- a/include/asm-sh/cpu-sh3/cacheflush.h +++ b/include/asm-sh/cpu-sh3/cacheflush.h @@ -35,47 +35,41 @@ /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ #define CACHE_ALIAS 0x00001000 -extern void flush_cache_all(void); -extern void flush_cache_mm(struct mm_struct *mm); -extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -extern void flush_dcache_page(struct page *pg); -extern void flush_icache_range(unsigned long start, unsigned long end); -extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); - -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) - -/* SH3 has unified cache so no special action needed here */ -#define flush_cache_sigtramp(vaddr) do { } while (0) -#define flush_page_to_ram(page) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) - -#define p3_cache_init() do { } while (0) - #define PG_mapped PG_arch_1 -/* We provide our own get_unmapped_area to avoid cache alias issue */ -#define HAVE_ARCH_UNMAPPED_AREA - +void flush_cache_all(void); +void flush_cache_mm(struct mm_struct *mm); +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +void flush_dcache_page(struct page *pg); +void flush_icache_range(unsigned long start, unsigned long end); +void flush_icache_page(struct vm_area_struct *vma, struct page *page); #else - #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) +#endif + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +/* SH3 has unified cache so no special action needed here */ #define flush_cache_sigtramp(vaddr) do { } while (0) +#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define p3_cache_init() do { } while (0) -#endif +/* + * We provide our own get_unmapped_area to avoid cache aliasing issues + * on SH7705 with a 32KB cache, and to page align addresses in the + * non-aliasing case. + */ +#define HAVE_ARCH_UNMAPPED_AREA #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ -- cgit v1.2.3