summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/alignment.c24
-rw-r--r--arch/arm/mm/cache-v6.S17
-rw-r--r--arch/arm/mm/cache-v7.S4
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/init.c14
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/mm/nommu.c13
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/tlb-v7.S8
12 files changed, 166 insertions, 52 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index edddd66faac6..a2ab51fa73e2 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -166,15 +166,15 @@ union offset_union {
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
@@ -226,16 +226,16 @@ union offset_union {
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
@@ -266,18 +266,18 @@ union offset_union {
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 9d89c67a1cc3..e46ecd847138 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,6 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
+#ifdef CONFIG_SMP
+ str r0, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
@@ -231,6 +234,9 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
@@ -251,6 +257,10 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
@@ -273,7 +283,9 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
- b v6_dma_clean_range
+ teq r2, #DMA_TO_DEVICE
+ beq v6_dma_clean_range
+ b v6_dma_flush_range
ENDPROC(v6_dma_map_area)
/*
@@ -283,9 +295,6 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
- add r1, r1, r0
- teq r2, #DMA_TO_DEVICE
- bne v6_dma_inv_range
mov pc, lr
ENDPROC(v6_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bcd64f265870..06a90dcfc60a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
+#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
-#ifdef CONFIG_HIGHMEM
- /*
- * kmap_atomic() doesn't set the page virtual address, and
- * kunmap_atomic() takes care of cache flushing already.
- */
- if (page_address(to) != NULL)
-#endif
- __cpuc_flush_dcache_area(kto, PAGE_SIZE);
+ __cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26f..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
vaddr += offset;
op(vaddr, len, dir);
kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ pte_t saved_pte;
+ vaddr = kmap_high_l1_vipt(page, &saved_pte);
+ op(vaddr + offset, len, dir);
+ kunmap_high_l1_vipt(page, saved_pte);
}
} else {
vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
+#include <asm/highmem.h>
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
- void *addr = page_address(page);
-
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
-#ifdef CONFIG_HIGHMEM
- /*
- * kmap_atomic() doesn't set the page virtual address, and
- * kunmap_atomic() takes care of cache flushing already.
- */
- if (addr)
-#endif
- __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ if (!PageHighMem(page)) {
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+ } else {
+ void *addr = kmap_high_get(page);
+ if (addr) {
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ pte_t saved_pte;
+ addr = kmap_high_l1_vipt(page, &saved_pte);
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high_l1_vipt(page, saved_pte);
+ }
+ }
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) {
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+
+#include <linux/percpu.h>
+
+/*
+ * The VIVT cache of a highmem page is always flushed before the page
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
+ * in that case.
+ *
+ * However unmapped pages may still be cached with a VIPT cache, and
+ * it is not possible to perform cache maintenance on them using physical
+ * addresses unfortunately. So we have no choice but to set up a temporary
+ * virtual mapping for that purpose.
+ *
+ * Yet this VIPT cache maintenance may be triggered from DMA support
+ * functions which are possibly called from interrupt context. As we don't
+ * want to keep interrupt disabled all the time when such maintenance is
+ * taking place, we therefore allow for some reentrancy by preserving and
+ * restoring the previous fixmap entry before the interrupted context is
+ * resumed. If the reentrancy depth is 0 then there is no need to restore
+ * the previous fixmap, and leaving the current one in place allow it to
+ * be reused the next time without a TLB flush (common with DMA).
+ */
+
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
+
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
+{
+ unsigned int idx, cpu = smp_processor_id();
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+ unsigned long vaddr, flags;
+ pte_t pte, *ptep;
+
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ ptep = TOP_PTE(vaddr);
+ pte = mk_pte(page, kmap_prot);
+
+ if (!in_interrupt())
+ preempt_disable();
+
+ raw_local_irq_save(flags);
+ (*depth)++;
+ if (pte_val(*ptep) == pte_val(pte)) {
+ *saved_pte = pte;
+ } else {
+ *saved_pte = *ptep;
+ set_pte_ext(ptep, pte, 0);
+ local_flush_tlb_kernel_page(vaddr);
+ }
+ raw_local_irq_restore(flags);
+
+ return (void *)vaddr;
+}
+
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
+{
+ unsigned int idx, cpu = smp_processor_id();
+ int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+ unsigned long vaddr, flags;
+ pte_t pte, *ptep;
+
+ idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ ptep = TOP_PTE(vaddr);
+ pte = mk_pte(page, kmap_prot);
+
+ BUG_ON(pte_val(*ptep) != pte_val(pte));
+ BUG_ON(*depth <= 0);
+
+ raw_local_irq_save(flags);
+ (*depth)--;
+ if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
+ set_pte_ext(ptep, saved_pte, 0);
+ local_flush_tlb_kernel_page(vaddr);
+ }
+ raw_local_irq_restore(flags);
+
+ if (!in_interrupt())
+ preempt_enable();
+}
+
+#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 83db12a68d56..0ed29bfeba1c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -86,9 +86,6 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
for_each_online_node(node) {
- pg_data_t *n = NODE_DATA(node);
- struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
-
for_each_nodebank (i,mi,node) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
@@ -97,8 +94,8 @@ void show_mem(void)
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
- page = map + pfn1;
- end = map + pfn2;
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
@@ -603,9 +600,6 @@ void __init mem_init(void)
reserved_pages = free_pages = 0;
for_each_online_node(node) {
- pg_data_t *n = NODE_DATA(node);
- struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
-
for_each_nodebank(i, &meminfo, node) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
@@ -614,8 +608,8 @@ void __init mem_init(void)
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
- page = map + pfn1;
- end = map + pfn2;
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4223d086aa17..241c24a1c18f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
pgd_t *pgd;
int i;
- if (current->mm && current->mm->pgd)
- pgd = current->mm->pgd;
- else
- pgd = init_mm.pgd;
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+ * we don't have any user-mode mappings so we use the context that we
+ * "borrowed".
+ */
+ pgd = current->active_mm->pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 9bfeb6b9509a..33b327379f07 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -65,6 +65,15 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC)
+ __cpuc_coherent_user_range(uaddr, uaddr + len);
+}
+
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype)
{
@@ -87,8 +96,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index ee7700242c19..5c47760c2064 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init)
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
- .previous
+ .section .text
/*
* cpu_sa1100_proc_fin()
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 0cb1848bd876..f3f288a9546d 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
mov ip, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
mov r2, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
isb
mov pc, lr