summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/core.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-04-21 11:20:30 +0200
committerBorislav Petkov <bp@suse.de>2020-04-24 19:01:17 +0200
commitcb2a02355b042ec3ef11d0ba2a46742678e41632 (patch)
tree618c7b32b616ef12f39858249b6f67581dd16029 /arch/x86/events/core.c
parentd8f0b35331c4423e033f81f10eb5e0c7e4e1dcec (diff)
downloadlinux-cb2a02355b042ec3ef11d0ba2a46742678e41632.tar.gz
linux-cb2a02355b042ec3ef11d0ba2a46742678e41632.tar.bz2
linux-cb2a02355b042ec3ef11d0ba2a46742678e41632.zip
x86/cr4: Sanitize CR4.PCE update
load_mm_cr4_irqsoff() is really a strange name for a function which has only one purpose: Update the CR4.PCE bit depending on the perf state. Rename it to update_cr4_pce_mm(), move it into the tlb code and provide a function which can be invoked by the perf smp function calls. Another step to remove exposure of cpu_tlbstate. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200421092559.049499158@linutronix.de
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r--arch/x86/events/core.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index a619763e96e1..30d2b1d3e94c 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2162,11 +2162,6 @@ static int x86_pmu_event_init(struct perf_event *event)
return err;
}
-static void refresh_pce(void *ignored)
-{
- load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
-}
-
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2185,7 +2180,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
lockdep_assert_held_write(&mm->mmap_sem);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
- on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
}
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
@@ -2195,7 +2190,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
return;
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
- on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
}
static int x86_pmu_event_idx(struct perf_event *event)
@@ -2253,7 +2248,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);
- on_each_cpu(refresh_pce, NULL, 1);
+ on_each_cpu(cr4_update_pce, NULL, 1);
x86_pmu.attr_rdpmc = val;
}