summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/tlb.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-04-21 11:20:34 +0200
committerBorislav Petkov <bp@suse.de>2020-04-26 11:00:29 +0200
commit127ac915c8e1c11b8209393e700ca16be0efabe8 (patch)
treea10211e3901cb5d272e9af4e0a6c8b773442d0e3 /arch/x86/mm/tlb.c
parentcd30d26cf307b45159cd629d60b989e582372afe (diff)
downloadlinux-stable-127ac915c8e1c11b8209393e700ca16be0efabe8.tar.gz
linux-stable-127ac915c8e1c11b8209393e700ca16be0efabe8.tar.bz2
linux-stable-127ac915c8e1c11b8209393e700ca16be0efabe8.zip
x86/tlb: Move __flush_tlb_one_user() out of line
cpu_tlbstate is exported because various TLB-related functions need access to it, but cpu_tlbstate is sensitive information which should only be accessed by well-contained kernel functions and not be directly exposed to modules. As a third step, move _flush_tlb_one_user() out of line and hide the native function. The latter can be static when CONFIG_PARAVIRT is disabled. Consolidate the name space while at it and remove the pointless extra wrapper in the paravirt code. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200421092559.428213098@linutronix.de
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r--arch/x86/mm/tlb.c56
1 files changed, 55 insertions, 1 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index d548b98e5a49..2822602ce60a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -24,6 +24,7 @@
# define STATIC_NOPV static
# define __flush_tlb_local native_flush_tlb_local
# define __flush_tlb_global native_flush_tlb_global
+# define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
#endif
/*
@@ -118,6 +119,32 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
*need_flush = true;
}
+/*
+ * Given an ASID, flush the corresponding user ASID. We can delay this
+ * until the next time we switch to it.
+ *
+ * See SWITCH_TO_USER_CR3.
+ */
+static inline void invalidate_user_asid(u16 asid)
+{
+ /* There is no user ASID if address space separation is off */
+ if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+ return;
+
+ /*
+ * We only have a single ASID if PCID is off and the CR3
+ * write will have flushed it.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_PCID))
+ return;
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ __set_bit(kern_pcid(asid),
+ (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
+}
+
static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
{
unsigned long new_mm_cr3;
@@ -645,7 +672,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
unsigned long addr = f->start;
while (addr < f->end) {
- __flush_tlb_one_user(addr);
+ flush_tlb_one_user(addr);
addr += 1UL << f->stride_shift;
}
if (local)
@@ -892,6 +919,33 @@ unsigned long __get_current_cr3_fast(void)
EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
/*
+ * Flush one page in the user mapping
+ */
+STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
+{
+ u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ /*
+ * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
+ * Just use invalidate_user_asid() in case we are called early.
+ */
+ if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
+ invalidate_user_asid(loaded_mm_asid);
+ else
+ invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
+}
+
+void flush_tlb_one_user(unsigned long addr)
+{
+ __flush_tlb_one_user(addr);
+}
+
+/*
* Flush everything
*/
STATIC_NOPV void native_flush_tlb_global(void)