summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2010-10-27 17:28:49 +0100
committerDavid Howells <dhowells@redhat.com>2010-10-27 17:28:49 +0100
commit492e675116003b99dfcf0fa70084027e86bc0161 (patch)
treec799a3c3f3226700e44dfe323f7c3f1764755049
parent8f19e3daf3fffee9e18a8812067a6a4b538ae6c8 (diff)
downloadlinux-492e675116003b99dfcf0fa70084027e86bc0161.tar.gz
linux-492e675116003b99dfcf0fa70084027e86bc0161.tar.bz2
linux-492e675116003b99dfcf0fa70084027e86bc0161.zip
MN10300: Rename __flush_tlb*() to local_flush_tlb*()
Rename __flush_tlb*() to local_flush_tlb*() as it's more appropriate, and ready to differentiate local from global TLB flushes when SMP is introduced. Whilst we're at it, get rid of __flush_tlb_global() and make local_flush_tlb_page() take an mm_struct pointer rather than VMA pointer. Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r--arch/mn10300/include/asm/highmem.h4
-rw-r--r--arch/mn10300/include/asm/mmu_context.h2
-rw-r--r--arch/mn10300/include/asm/tlbflush.h56
-rw-r--r--arch/mn10300/mm/init.c2
-rw-r--r--arch/mn10300/mm/mmu-context.c4
-rw-r--r--arch/mn10300/mm/pgtable.c2
6 files changed, 42 insertions, 28 deletions
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index f577ba2268ca..3817d9f34e72 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
BUG();
#endif
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
return vaddr;
}
@@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
* this pte without first remap it
*/
pte_clear(kmap_pte - idx);
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
#endif
pagefault_enable();
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c244de3..24d63f0f7377 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -58,7 +58,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
/* we exhausted the TLB PIDs of this version on this CPU, so we
* flush this CPU's TLB in its entirety and start new cycle */
- flush_tlb_all();
+ local_flush_tlb_all();
/* fix the TLB version if needed (we avoid version #0 so as to
* distingush MMU_NO_CONTEXT) */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e29281c5d..5d54bf57e6c3 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -13,21 +13,37 @@
#include <asm/processor.h>
-#define __flush_tlb() \
-do { \
- int w; \
- __asm__ __volatile__ \
- (" mov %1,%0 \n" \
- " or %2,%0 \n" \
- " mov %0,%1 \n" \
- : "=d"(w) \
- : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
- : "cc", "memory" \
- ); \
-} while (0)
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+ int w;
+ asm volatile(
+ " mov %1,%0 \n"
+ " or %2,%0 \n"
+ " mov %0,%1 \n"
+ : "=d"(w)
+ : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+ : "cc", "memory");
+}
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+#define local_flush_tlb_all() local_flush_tlb()
+
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+#define local_flush_tlb_one(addr) local_flush_tlb()
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);
/*
@@ -43,14 +59,14 @@ do { \
#define flush_tlb_all() \
do { \
preempt_disable(); \
- __flush_tlb_all(); \
+ local_flush_tlb_all(); \
preempt_enable(); \
} while (0)
#define flush_tlb_mm(mm) \
do { \
preempt_disable(); \
- __flush_tlb_all(); \
+ local_flush_tlb_all(); \
preempt_enable(); \
} while (0)
@@ -59,13 +75,13 @@ do { \
unsigned long __s __attribute__((unused)) = (start); \
unsigned long __e __attribute__((unused)) = (end); \
preempt_disable(); \
- __flush_tlb_all(); \
+ local_flush_tlb_all(); \
preempt_enable(); \
} while (0)
+#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb() flush_tlb_all()
-#define __flush_tlb_global() flush_tlb_all()
-#define flush_tlb() flush_tlb_all()
#define flush_tlb_kernel_range(start, end) \
do { \
unsigned long __s __attribute__((unused)) = (start); \
@@ -73,8 +89,6 @@ do { \
flush_tlb_all(); \
} while (0)
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
#define flush_tlb_pgtables(mm, start, end) do {} while (0)
#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index f86c28315a8e..1daf97fd7c99 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -73,7 +73,7 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+ local_flush_tlb_all();
}
/*
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba02191d40..3d83966e30e1 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -23,7 +23,7 @@ unsigned long mmu_context_cache[NR_CPUS] = {
/*
* flush the specified TLB entry
*/
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
{
unsigned long pteu, cnx, flags;
@@ -33,7 +33,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
* interference from vmalloc'd regions */
local_irq_save(flags);
- cnx = mm_context(vma->vm_mm);
+ cnx = mm_context(mm);
if (cnx != MMU_NO_CONTEXT) {
pteu = addr | (cnx & 0x000000ffUL);
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c9e4e9..450f7ba3f8f2 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)