summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/tlb.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-06-28 09:02:24 +0800
committerH. Peter Anvin <hpa@zytor.com>2012-06-27 19:29:14 -0700
commiteffee4b9b3b0aa5770bcd98de5f672b05b27703c (patch)
treeb167657ec2ba05797b925a93e7e1b45222ac5ac3 /arch/x86/mm/tlb.c
parent52aec3308db85f4e9f5c8b9f5dc4fbd0138c6fa4 (diff)
downloadlinux-stable-effee4b9b3b0aa5770bcd98de5f672b05b27703c.tar.gz
linux-stable-effee4b9b3b0aa5770bcd98de5f672b05b27703c.tar.bz2
linux-stable-effee4b9b3b0aa5770bcd98de5f672b05b27703c.zip
x86/tlb: do flush_tlb_kernel_range by 'invlpg'
This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay and gain was analyzed in previous patch (x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range). In the testing: http://lkml.org/lkml/2012/6/21/10 The pay is mostly covered by long kernel path, but the gain is still quite clear, memory access in user APP can increase 30+% when kernel execute this funtion. Signed-off-by: Alex Shi <alex.shi@intel.com> Link: http://lkml.kernel.org/r/1340845344-27557-10-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r--arch/x86/mm/tlb.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 2b5f506a7655..613cd83e8c0c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -264,6 +264,36 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
+static void do_kernel_range_flush(void *info)
+{
+ struct flush_tlb_info *f = info;
+ unsigned long addr;
+
+ /* flush range by one by one 'invlpg' */
+ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+ __flush_tlb_single(addr);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned act_entries;
+ struct flush_tlb_info info;
+
+ /* In modern CPU, last level tlb used for both data/ins */
+ act_entries = tlb_lld_4k[ENTRIES];
+
+ /* Balance as user space task's flush, a bit conservative */
+ if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
+ (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+
+ on_each_cpu(do_flush_tlb_all, NULL, 1);
+ else {
+ info.flush_start = start;
+ info.flush_end = end;
+ on_each_cpu(do_kernel_range_flush, &info, 1);
+ }
+}
+
#ifdef CONFIG_DEBUG_TLBFLUSH
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)