summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-10-11 16:51:51 +0200
committerIngo Molnar <mingo@kernel.org>2019-04-03 10:32:44 +0200
commita30e32bd79e924f460b8b83408d88af34a402f6d (patch)
tree93fe3aab6afd5eac92bac803acaafb5d94c87092 /include/asm-generic
parent5f307be18b32aeff7bbad540c0d3897ecedbeb56 (diff)
downloadlinux-stable-a30e32bd79e924f460b8b83408d88af34a402f6d.tar.gz
linux-stable-a30e32bd79e924f460b8b83408d88af34a402f6d.tar.bz2
linux-stable-a30e32bd79e924f460b8b83408d88af34a402f6d.zip
asm-generic/tlb: Provide generic tlb_flush() based on flush_tlb_mm()
When an architecture does not have (an efficient) flush_tlb_range(), but instead always uses full TLB invalidates, the current generic tlb_flush() is sub-optimal, for it will generate extra flushes in order to keep the range small. But if we cannot do range flushes, that is a moot concern. Optionally provide this simplified default. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/tlb.h41
1 files changed, 40 insertions, 1 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e6a4c407be6c..6850d8bab6d3 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -114,7 +114,8 @@
* returns the smallest TLB entry size unmapped in this range.
*
* If an architecture does not provide tlb_flush() a default implementation
- * based on flush_tlb_range() will be used.
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
+ * specified, in which case we'll default to flush_tlb_mm().
*
* Additionally there are a few opt-in features:
*
@@ -140,6 +141,9 @@
* the page-table pages. Required if you use HAVE_RCU_TABLE_FREE and your
* architecture uses the Linux page-tables natively.
*
+ * MMU_GATHER_NO_RANGE
+ *
+ * Use this if your architecture lacks an efficient flush_tlb_range().
*/
#define HAVE_GENERIC_MMU_GATHER
@@ -302,12 +306,45 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
*/
}
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
+
+#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not have efficient means of range flushing TLBs
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
+ * range small. We equally don't have to worry about page granularity or other
+ * things.
+ *
+ * All we need to do is issue a full flush for any !0 range.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->end)
+ flush_tlb_mm(tlb->mm);
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#define tlb_end_vma tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
+
#ifndef tlb_flush
#if defined(tlb_start_vma) || defined(tlb_end_vma)
#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
#endif
+/*
+ * When an architecture does not provide its own tlb_flush() implementation
+ * but does have a reasonably efficient flush_vma_range() implementation
+ * use that.
+ */
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || tlb->need_flush_all) {
@@ -348,6 +385,8 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#endif
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)