summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig12
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/mm-armv.c56
-rw-r--r--arch/arm/mm/proc-v6.S26
4 files changed, 81 insertions, 22 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5568403e984d..e3c14d6b4328 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -250,6 +250,18 @@ config CPU_V6
select CPU_COPY_V6
select CPU_TLB_V6
+# ARMv6k
+config CPU_32v6K
+ bool "Support ARM V6K processor extensions" if !SMP
+ depends on CPU_V6
+ default y if SMP
+ help
+ Say Y here if your ARMv6 processor supports the 'K' extension.
+ This enables the kernel to use some instructions not present
+ on previous processors, and as such a kernel build with this
+ enabled will not boot on processors with do not support these
+ instructions.
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fd079ff1fc53..c168f322ef8c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -486,10 +486,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Ask the machine support to map in the statically mapped devices.
- * After this point, we can start to touch devices again.
*/
if (mdesc->map_io)
mdesc->map_io();
+
+ /*
+ * Finally flush the tlb again - this ensures that we're in a
+ * consistent state wrt the writebuffer if the writebuffer needs
+ * draining. After this point, we can start to touch devices
+ * again.
+ */
+ local_flush_tlb_all();
}
/*
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 1221fdde1769..9e50127be635 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -354,7 +354,7 @@ void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot;
+ unsigned int user_pgprot, kern_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -381,7 +381,7 @@ void __init build_mem_type_table(void)
}
cp = &cache_policies[cachepolicy];
- user_pgprot = cp->pte;
+ kern_pgprot = user_pgprot = cp->pte;
/*
* ARMv6 and above have extended page tables.
@@ -393,6 +393,7 @@ void __init build_mem_type_table(void)
*/
mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
+
/*
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
@@ -412,32 +413,47 @@ void __init build_mem_type_table(void)
* (iow, non-global)
*/
user_pgprot |= L_PTE_ASID;
+
+#ifdef CONFIG_SMP
+ /*
+ * Mark memory with the "shared" attribute for SMP systems
+ */
+ user_pgprot |= L_PTE_SHARED;
+ kern_pgprot |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+#endif
}
+ for (i = 0; i < 16; i++) {
+ unsigned long v = pgprot_val(protection_map[i]);
+ v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
+ protection_map[i] = __pgprot(v);
+ }
+
+ mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
+
if (cpu_arch >= CPU_ARCH_ARMv5) {
- mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+#ifndef CONFIG_SMP
+ /*
+ * Only use write-through for non-SMP systems
+ */
+ mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
+ mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
+#endif
} else {
- mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
- mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
+ pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
+ L_PTE_DIRTY | L_PTE_WRITE |
+ L_PTE_EXEC | kern_pgprot);
+
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd;
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
- protection_map[i] = __pgprot(v);
- }
-
- pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
- L_PTE_DIRTY | L_PTE_WRITE |
- L_PTE_EXEC | cp->pte);
-
switch (cp->pmd) {
case PMD_SECT_WT:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
@@ -469,14 +485,14 @@ void __init create_mapping(struct map_desc *md)
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for "
- "0x%016llx at 0x%08lx in user region\n",
+ "0x%08llx at 0x%08lx in user region\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
- printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
"overlaps vmalloc space\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
}
@@ -492,14 +508,14 @@ void __init create_mapping(struct map_desc *md)
if(md->pfn >= 0x100000) {
if(domain) {
printk(KERN_ERR "MM: invalid domain in supersection "
- "mapping for 0x%016llx at 0x%08lx\n",
+ "mapping for 0x%08llx at 0x%08lx\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if((md->virtual | md->length | __pfn_to_phys(md->pfn))
& ~SUPERSECTION_MASK) {
printk(KERN_ERR "MM: cannot create mapping for "
- "0x%016llx at 0x%08lx invalid alignment\n",
+ "0x%08llx at 0x%08lx invalid alignment\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 9bb5fff406fb..92f3ca31b7b9 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/hardware/arm_scu.h>
#include <asm/procinfo.h>
#include <asm/pgtable.h>
@@ -112,6 +113,9 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+#ifdef CONFIG_SMP
+ orr r0, r0, #2 @ set shared pgtable
+#endif
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -140,7 +144,7 @@ ENTRY(cpu_v6_switch_mm)
ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version
- bic r2, r1, #0x000007f0
+ bic r2, r1, #0x000003f0
bic r2, r2, #0x00000003
orr r2, r2, #PTE_EXT_AP0 | 2
@@ -191,6 +195,23 @@ cpu_v6_name:
* - cache type register is implemented
*/
__v6_setup:
+#ifdef CONFIG_SMP
+ /* Set up the SCU on core 0 only */
+ mrc p15, 0, r0, c0, c0, 5 @ CPU core number
+ ands r0, r0, #15
+ moveq r0, #0x10000000 @ SCU_BASE
+ orreq r0, r0, #0x00100000
+ ldreq r5, [r0, #SCU_CTRL]
+ orreq r5, r5, #1
+ streq r5, [r0, #SCU_CTRL]
+
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
+ orr r0, r0, #0x20
+ mcr p15, 0, r0, c1, c0, 1
+#endif
+#endif
+
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
@@ -198,6 +219,9 @@ __v6_setup:
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
+#ifdef CONFIG_SMP
+ orr r4, r4, #2 @ set shared pgtable
+#endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#ifdef CONFIG_VFP
mrc p15, 0, r0, c1, c0, 2