summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/tlbflush.h29
1 files changed, 26 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 3796ea6bb734..73f0ce570fb3 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -98,8 +98,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ish);
}
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr;
@@ -112,7 +112,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
dsb(ish);
}
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
start >>= 12;
@@ -126,6 +126,29 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
}
/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if ((end - start) <= MAX_TLB_RANGE)
+ __flush_tlb_range(vma, start, end);
+ else
+ flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ if ((end - start) <= MAX_TLB_RANGE)
+ __flush_tlb_kernel_range(start, end);
+ else
+ flush_tlb_all();
+}
+
+/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,