summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-11-04 12:46:19 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-11-04 12:46:19 +0900
commitedc9a958fd31ef1d89f9eaee82b2a3882c8e34c9 (patch)
treebdc96edeb6a41ab72ae87daf46d3402aba633541 /arch/sh
parente2fcf74f3d3dabe8591732cd37869a0cc88ed7a5 (diff)
downloadlinux-edc9a958fd31ef1d89f9eaee82b2a3882c8e34c9.tar.gz
linux-edc9a958fd31ef1d89f9eaee82b2a3882c8e34c9.tar.bz2
linux-edc9a958fd31ef1d89f9eaee82b2a3882c8e34c9.zip
sh: nommu: Support building without an uncached mapping.
Now that nommu selects 32BIT we run in to the situation where SH-2A supports an uncached identity mapping by way of the BSC, while the SH-2 does not. This provides stubs for the PC manglers and tidies up some of the system*.h mess in the process. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/system.h4
-rw-r--r--arch/sh/include/asm/system_32.h36
-rw-r--r--arch/sh/include/asm/system_64.h3
-rw-r--r--arch/sh/include/asm/uncached.h40
4 files changed, 41 insertions, 42 deletions
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 1f1af5afff03..10c8b1823a18 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/types.h>
+#include <asm/uncached.h>
#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
#define instruction_size(insn) (4)
#endif
-extern unsigned long cached_to_uncached;
-extern unsigned long uncached_size;
-
void per_cpu_trap_init(void);
void default_idle(void);
void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index c941b2739405..a4ad1cd9bc4d 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@ do { \
__restore_dsp(prev); \
} while (0)
-/*
- * Jump to uncached area.
- * When handling TLB or caches, we need to do it from an uncached area.
- */
-#define jump_to_uncached() \
-do { \
- unsigned long __dummy; \
- \
- __asm__ __volatile__( \
- "mova 1f, %0\n\t" \
- "add %1, %0\n\t" \
- "jmp @%0\n\t" \
- " nop\n\t" \
- ".balign 4\n" \
- "1:" \
- : "=&z" (__dummy) \
- : "r" (cached_to_uncached)); \
-} while (0)
-
-/*
- * Back to cached area.
- */
-#define back_to_cached() \
-do { \
- unsigned long __dummy; \
- ctrl_barrier(); \
- __asm__ __volatile__( \
- "mov.l 1f, %0\n\t" \
- "jmp @%0\n\t" \
- " nop\n\t" \
- ".balign 4\n" \
- "1: .long 2f\n" \
- "2:" \
- : "=&r" (__dummy)); \
-} while (0)
-
#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector() \
({ \
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 36338646dfc8..8593bc8d1a4e 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@ do { \
&next->thread); \
} while (0)
-#define jump_to_uncached() do { } while (0)
-#define back_to_cached() do { } while (0)
-
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f96626a..6f8816b79cf1 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
#include <linux/bug.h>
#ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);
+
+/*
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
+ */
+#define jump_to_uncached() \
+do { \
+ unsigned long __dummy; \
+ \
+ __asm__ __volatile__( \
+ "mova 1f, %0\n\t" \
+ "add %1, %0\n\t" \
+ "jmp @%0\n\t" \
+ " nop\n\t" \
+ ".balign 4\n" \
+ "1:" \
+ : "=&z" (__dummy) \
+ : "r" (cached_to_uncached)); \
+} while (0)
+
+/*
+ * Back to cached area.
+ */
+#define back_to_cached() \
+do { \
+ unsigned long __dummy; \
+ ctrl_barrier(); \
+ __asm__ __volatile__( \
+ "mov.l 1f, %0\n\t" \
+ "jmp @%0\n\t" \
+ " nop\n\t" \
+ ".balign 4\n" \
+ "1: .long 2f\n" \
+ "2:" \
+ : "=&r" (__dummy)); \
+} while (0)
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
+#define jump_to_uncached() do { } while (0)
+#define back_to_cached() do { } while (0)
#endif
#endif /* __ASM_SH_UNCACHED_H */