summaryrefslogtreecommitdiffstats
path: root/arch/x86/events
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 13:55:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 13:55:30 -0700
commita6408f6cb63ac0958fee7dbce7861ffb540d8a49 (patch)
treec94a835d343974171951e3b805e6bbbb02852ebc /arch/x86/events
parent1a81a8f2a5918956e214bb718099a89e500e7ec5 (diff)
parent4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c (diff)
downloadlinux-a6408f6cb63ac0958fee7dbce7861ffb540d8a49.tar.gz
linux-a6408f6cb63ac0958fee7dbce7861ffb540d8a49.tar.bz2
linux-a6408f6cb63ac0958fee7dbce7861ffb540d8a49.zip
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the next part of the hotplug rework. - Convert all notifiers with a priority assigned - Convert all CPU_STARTING/DYING notifiers The final removal of the STARTING/DYING infrastructure will happen when the merge window closes. Another 700 hundred line of unpenetrable maze gone :)" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) timers/core: Correct callback order during CPU hot plug leds/trigger/cpu: Move from CPU_STARTING to ONLINE level powerpc/numa: Convert to hotplug state machine arm/perf: Fix hotplug state machine conversion irqchip/armada: Avoid unused function warnings ARC/time: Convert to hotplug state machine clocksource/atlas7: Convert to hotplug state machine clocksource/armada-370-xp: Convert to hotplug state machine clocksource/exynos_mct: Convert to hotplug state machine clocksource/arm_global_timer: Convert to hotplug state machine rcu: Convert rcutree to hotplug state machine KVM/arm/arm64/vgic-new: Convert to hotplug state machine smp/cfd: Convert core to hotplug state machine x86/x2apic: Convert to CPU hotplug state machine profile: Convert to hotplug state machine timers/core: Convert to hotplug state machine hrtimer: Convert to hotplug state machine x86/tboot: Convert to hotplug state machine arm64/armv8 deprecated: Convert to hotplug state machine hwtracing/coresight-etm4x: Convert to hotplug state machine ...
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/amd/core.c6
-rw-r--r--arch/x86/events/amd/ibs.c64
-rw-r--r--arch/x86/events/amd/power.c60
-rw-r--r--arch/x86/events/amd/uncore.c122
-rw-r--r--arch/x86/events/core.c103
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/events/intel/cqm.c49
-rw-r--r--arch/x86/events/intel/cstate.c51
-rw-r--r--arch/x86/events/intel/rapl.c84
-rw-r--r--arch/x86/events/intel/uncore.c133
10 files changed, 246 insertions, 430 deletions
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index bd3e8421b57c..e07a22bb9308 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu)
WARN_ON_ONCE(cpuc->amd_nb);
if (!x86_pmu.amd_nb_constraints)
- return NOTIFY_OK;
+ return 0;
cpuc->amd_nb = amd_alloc_nb(cpu);
if (!cpuc->amd_nb)
- return NOTIFY_BAD;
+ return -ENOMEM;
- return NOTIFY_OK;
+ return 0;
}
static void amd_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 72dea2f40fc4..155ea5324ae0 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -725,13 +725,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
return ret;
}
-static __init int perf_event_ibs_init(void)
+static __init void perf_event_ibs_init(void)
{
struct attribute **attr = ibs_op_format_attrs;
- if (!ibs_caps)
- return -ENODEV; /* ibs not supported by the cpu */
-
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
if (ibs_caps & IBS_CAPS_OPCNT) {
@@ -742,13 +739,11 @@ static __init int perf_event_ibs_init(void)
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
-
- return 0;
}
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
-static __init int perf_event_ibs_init(void) { return 0; }
+static __init void perf_event_ibs_init(void) { }
#endif
@@ -925,7 +920,7 @@ static inline int get_ibs_lvt_offset(void)
return val & IBSCTL_LVT_OFFSET_MASK;
}
-static void setup_APIC_ibs(void *dummy)
+static void setup_APIC_ibs(void)
{
int offset;
@@ -940,7 +935,7 @@ failed:
smp_processor_id());
}
-static void clear_APIC_ibs(void *dummy)
+static void clear_APIC_ibs(void)
{
int offset;
@@ -949,18 +944,24 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
+static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
+{
+ setup_APIC_ibs();
+ return 0;
+}
+
#ifdef CONFIG_PM
static int perf_ibs_suspend(void)
{
- clear_APIC_ibs(NULL);
+ clear_APIC_ibs();
return 0;
}
static void perf_ibs_resume(void)
{
ibs_eilvt_setup();
- setup_APIC_ibs(NULL);
+ setup_APIC_ibs();
}
static struct syscore_ops perf_ibs_syscore_ops = {
@@ -979,27 +980,15 @@ static inline void perf_ibs_pm_init(void) { }
#endif
-static int
-perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- setup_APIC_ibs(NULL);
- break;
- case CPU_DYING:
- clear_APIC_ibs(NULL);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
+ clear_APIC_ibs();
+ return 0;
}
static __init int amd_ibs_init(void)
{
u32 caps;
- int ret = -EINVAL;
caps = __get_ibs_caps();
if (!caps)
@@ -1008,22 +997,25 @@ static __init int amd_ibs_init(void)
ibs_eilvt_setup();
if (!ibs_eilvt_valid())
- goto out;
+ return -EINVAL;
perf_ibs_pm_init();
- cpu_notifier_register_begin();
+
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
smp_mb();
- smp_call_function(setup_APIC_ibs, NULL, 1);
- __perf_cpu_notifier(perf_ibs_cpu_notifier);
- cpu_notifier_register_done();
+ /*
+ * x86_pmu_amd_ibs_starting_cpu will be called from core on
+ * all online cpus.
+ */
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+ "AP_PERF_X86_AMD_IBS_STARTING",
+ x86_pmu_amd_ibs_starting_cpu,
+ x86_pmu_amd_ibs_dying_cpu);
- ret = perf_event_ibs_init();
-out:
- if (ret)
- pr_err("Failed to setup IBS, %d\n", ret);
- return ret;
+ perf_event_ibs_init();
+
+ return 0;
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 55a3529dbf12..9842270ed2f2 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -228,12 +228,12 @@ static struct pmu pmu_class = {
.read = pmu_event_read,
};
-static void power_cpu_exit(int cpu)
+static int power_cpu_exit(unsigned int cpu)
{
int target;
if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
- return;
+ return 0;
/*
* Find a new CPU on the same compute unit, if was set in cpumask
@@ -245,9 +245,10 @@ static void power_cpu_exit(int cpu)
cpumask_set_cpu(target, &cpu_mask);
perf_pmu_migrate_context(&pmu_class, cpu, target);
}
+ return 0;
}
-static void power_cpu_init(int cpu)
+static int power_cpu_init(unsigned int cpu)
{
int target;
@@ -255,7 +256,7 @@ static void power_cpu_init(int cpu)
* 1) If any CPU is set at cpu_mask in the same compute unit, do
* nothing.
* 2) If no CPU is set at cpu_mask in the same compute unit,
- * set current STARTING CPU.
+ * set current ONLINE CPU.
*
* Note: if there is a CPU aside of the new one already in the
* sibling mask, then it is also in cpu_mask.
@@ -263,33 +264,9 @@ static void power_cpu_init(int cpu)
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
if (target >= nr_cpumask_bits)
cpumask_set_cpu(cpu, &cpu_mask);
+ return 0;
}
-static int
-power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_STARTING:
- power_cpu_init(cpu);
- break;
- case CPU_DOWN_PREPARE:
- power_cpu_exit(cpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block power_cpu_notifier_nb = {
- .notifier_call = power_cpu_notifier,
- .priority = CPU_PRI_PERF,
-};
-
static const struct x86_cpu_id cpu_match[] = {
{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
{},
@@ -297,7 +274,7 @@ static const struct x86_cpu_id cpu_match[] = {
static int __init amd_power_pmu_init(void)
{
- int cpu, target, ret;
+ int ret;
if (!x86_match_cpu(cpu_match))
return 0;
@@ -312,38 +289,25 @@ static int __init amd_power_pmu_init(void)
return -ENODEV;
}
- cpu_notifier_register_begin();
- /* Choose one online core of each compute unit. */
- for_each_online_cpu(cpu) {
- target = cpumask_first(topology_sibling_cpumask(cpu));
- if (!cpumask_test_cpu(target, &cpu_mask))
- cpumask_set_cpu(target, &cpu_mask);
- }
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
+ "AP_PERF_X86_AMD_POWER_ONLINE",
+ power_cpu_init, power_cpu_exit);
ret = perf_pmu_register(&pmu_class, "power", -1);
if (WARN_ON(ret)) {
pr_warn("AMD Power PMU registration failed\n");
- goto out;
+ return ret;
}
- __register_cpu_notifier(&power_cpu_notifier_nb);
-
pr_info("AMD Power PMU detected\n");
-
-out:
- cpu_notifier_register_done();
-
return ret;
}
module_init(amd_power_pmu_init);
static void __exit amd_power_pmu_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&power_cpu_notifier_nb);
- cpu_notifier_register_done();
-
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE);
perf_pmu_unregister(&pmu_class);
}
module_exit(amd_power_pmu_exit);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 98ac57381bf9..e6131d4454e6 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
return this;
}
-static void amd_uncore_cpu_starting(unsigned int cpu)
+static int amd_uncore_cpu_starting(unsigned int cpu)
{
unsigned int eax, ebx, ecx, edx;
struct amd_uncore *uncore;
@@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu)
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
}
+
+ return 0;
}
static void uncore_online(unsigned int cpu,
@@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu,
cpumask_set_cpu(cpu, uncore->active_mask);
}
-static void amd_uncore_cpu_online(unsigned int cpu)
+static int amd_uncore_cpu_online(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_online(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_online(cpu, amd_uncore_l2);
+
+ return 0;
}
static void uncore_down_prepare(unsigned int cpu,
@@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu,
}
}
-static void amd_uncore_cpu_down_prepare(unsigned int cpu)
+static int amd_uncore_cpu_down_prepare(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_down_prepare(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_down_prepare(cpu, amd_uncore_l2);
+
+ return 0;
}
static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
@@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
*per_cpu_ptr(uncores, cpu) = NULL;
}
-static void amd_uncore_cpu_dead(unsigned int cpu)
+static int amd_uncore_cpu_dead(unsigned int cpu)
{
if (amd_uncore_nb)
uncore_dead(cpu, amd_uncore_nb);
if (amd_uncore_l2)
uncore_dead(cpu, amd_uncore_l2);
-}
-
-static int
-amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- if (amd_uncore_cpu_up_prepare(cpu))
- return notifier_from_errno(-ENOMEM);
- break;
-
- case CPU_STARTING:
- amd_uncore_cpu_starting(cpu);
- break;
-
- case CPU_ONLINE:
- amd_uncore_cpu_online(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- amd_uncore_cpu_down_prepare(cpu);
- break;
-
- case CPU_UP_CANCELED:
- case CPU_DEAD:
- amd_uncore_cpu_dead(cpu);
- break;
-
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block amd_uncore_cpu_notifier_block = {
- .notifier_call = amd_uncore_cpu_notifier,
- .priority = CPU_PRI_PERF + 1,
-};
-
-static void __init init_cpu_already_online(void *dummy)
-{
- unsigned int cpu = smp_processor_id();
-
- amd_uncore_cpu_starting(cpu);
- amd_uncore_cpu_online(cpu);
-}
-static void cleanup_cpu_online(void *dummy)
-{
- unsigned int cpu = smp_processor_id();
-
- amd_uncore_cpu_dead(cpu);
+ return 0;
}
static int __init amd_uncore_init(void)
{
- unsigned int cpu, cpu2;
int ret = -ENODEV;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
@@ -558,38 +509,29 @@ static int __init amd_uncore_init(void)
ret = 0;
}
- if (ret)
- goto fail_nodev;
-
- cpu_notifier_register_begin();
-
- /* init cpus already online before registering for hotplug notifier */
- for_each_online_cpu(cpu) {
- ret = amd_uncore_cpu_up_prepare(cpu);
- if (ret)
- goto fail_online;
- smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
- }
-
- __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
- cpu_notifier_register_done();
-
+ /*
+ * Install callbacks. Core will call them for each online cpu.
+ */
+ if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
+ "PERF_X86_AMD_UNCORE_PREP",
+ amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
+ goto fail_l2;
+
+ if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
+ "AP_PERF_X86_AMD_UNCORE_STARTING",
+ amd_uncore_cpu_starting, NULL))
+ goto fail_prep;
+ if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
+ "AP_PERF_X86_AMD_UNCORE_ONLINE",
+ amd_uncore_cpu_online,
+ amd_uncore_cpu_down_prepare))
+ goto fail_start;
return 0;
-
-fail_online:
- for_each_online_cpu(cpu2) {
- if (cpu2 == cpu)
- break;
- smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
- }
- cpu_notifier_register_done();
-
- /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
- amd_uncore_nb = amd_uncore_l2 = NULL;
-
- if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
- perf_pmu_unregister(&amd_l2_pmu);
+fail_start:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
+fail_prep:
+ cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
fail_l2:
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index dfebbde2a4cc..c17f0de5fd39 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1477,49 +1477,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler);
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
-static int
-x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_prepare_cpu(unsigned int cpu)
{
- unsigned int cpu = (long)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int i, ret = NOTIFY_OK;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
- cpuc->kfree_on_online[i] = NULL;
- if (x86_pmu.cpu_prepare)
- ret = x86_pmu.cpu_prepare(cpu);
- break;
-
- case CPU_STARTING:
- if (x86_pmu.cpu_starting)
- x86_pmu.cpu_starting(cpu);
- break;
+ int i;
- case CPU_ONLINE:
- for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
- kfree(cpuc->kfree_on_online[i]);
- cpuc->kfree_on_online[i] = NULL;
- }
- break;
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
+ cpuc->kfree_on_online[i] = NULL;
+ if (x86_pmu.cpu_prepare)
+ return x86_pmu.cpu_prepare(cpu);
+ return 0;
+}
- case CPU_DYING:
- if (x86_pmu.cpu_dying)
- x86_pmu.cpu_dying(cpu);
- break;
+static int x86_pmu_dead_cpu(unsigned int cpu)
+{
+ if (x86_pmu.cpu_dead)
+ x86_pmu.cpu_dead(cpu);
+ return 0;
+}
- case CPU_UP_CANCELED:
- case CPU_DEAD:
- if (x86_pmu.cpu_dead)
- x86_pmu.cpu_dead(cpu);
- break;
+static int x86_pmu_online_cpu(unsigned int cpu)
+{
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ int i;
- default:
- break;
+ for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
+ kfree(cpuc->kfree_on_online[i]);
+ cpuc->kfree_on_online[i] = NULL;
}
+ return 0;
+}
- return ret;
+static int x86_pmu_starting_cpu(unsigned int cpu)
+{
+ if (x86_pmu.cpu_starting)
+ x86_pmu.cpu_starting(cpu);
+ return 0;
+}
+
+static int x86_pmu_dying_cpu(unsigned int cpu)
+{
+ if (x86_pmu.cpu_dying)
+ x86_pmu.cpu_dying(cpu);
+ return 0;
}
static void __init pmu_check_apic(void)
@@ -1787,10 +1787,39 @@ static int __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
- perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
- perf_cpu_notifier(x86_pmu_notifier);
+ /*
+ * Install callbacks. Core will call them for each online
+ * cpu.
+ */
+ err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
+ x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
+ if (err)
+ return err;
+
+ err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
+ "AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
+ x86_pmu_dying_cpu);
+ if (err)
+ goto out;
+
+ err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
+ x86_pmu_online_cpu, NULL);
+ if (err)
+ goto out1;
+
+ err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+ if (err)
+ goto out2;
return 0;
+
+out2:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
+out1:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
+out:
+ cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
+ return err;
}
early_initcall(init_hw_perf_events);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 0974ba11e954..2cbde2f449aa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3109,7 +3109,7 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->excl_thread_id = 0;
}
- return NOTIFY_OK;
+ return 0;
err_constraint_list:
kfree(cpuc->constraint_list);
@@ -3120,7 +3120,7 @@ err_shared_regs:
cpuc->shared_regs = NULL;
err:
- return NOTIFY_BAD;
+ return -ENOMEM;
}
static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 7b5fd811ef45..783c49ddef29 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1577,7 +1577,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
-static void intel_cqm_cpu_starting(unsigned int cpu)
+static int intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1588,39 +1588,26 @@ static void intel_cqm_cpu_starting(unsigned int cpu)
WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
+
+ cqm_pick_event_reader(cpu);
+ return 0;
}
-static void intel_cqm_cpu_exit(unsigned int cpu)
+static int intel_cqm_cpu_exit(unsigned int cpu)
{
int target;
/* Is @cpu the current cqm reader for this package ? */
if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
- return;
+ return 0;
/* Find another online reader in this package */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
if (target < nr_cpu_ids)
cpumask_set_cpu(target, &cqm_cpumask);
-}
-
-static int intel_cqm_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- intel_cqm_cpu_exit(cpu);
- break;
- case CPU_STARTING:
- intel_cqm_cpu_starting(cpu);
- cqm_pick_event_reader(cpu);
- break;
- }
- return NOTIFY_OK;
+ return 0;
}
static const struct x86_cpu_id intel_cqm_match[] = {
@@ -1682,7 +1669,7 @@ out:
static int __init intel_cqm_init(void)
{
char *str = NULL, scale[20];
- int i, cpu, ret;
+ int cpu, ret;
if (x86_match_cpu(intel_cqm_match))
cqm_enabled = true;
@@ -1705,8 +1692,7 @@ static int __init intel_cqm_init(void)
*
* Also, check that the scales match on all cpus.
*/
- cpu_notifier_register_begin();
-
+ get_online_cpus();
for_each_online_cpu(cpu) {
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1743,11 +1729,6 @@ static int __init intel_cqm_init(void)
if (ret)
goto out;
- for_each_online_cpu(i) {
- intel_cqm_cpu_starting(i);
- cqm_pick_event_reader(i);
- }
-
if (mbm_enabled)
ret = intel_mbm_init();
if (ret && !cqm_enabled)
@@ -1772,12 +1753,18 @@ static int __init intel_cqm_init(void)
pr_info("Intel MBM enabled\n");
/*
- * Register the hot cpu notifier once we are sure cqm
+ * Setup the hot cpu notifier once we are sure cqm
* is enabled to avoid notifier leak.
*/
- __perf_cpu_notifier(intel_cqm_cpu_notifier);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
+ "AP_PERF_X86_CQM_STARTING",
+ intel_cqm_cpu_starting, NULL);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
+ NULL, intel_cqm_cpu_exit);
+
out:
- cpu_notifier_register_done();
+ put_online_cpus();
+
if (ret) {
kfree(str);
cqm_cleanup();
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 4c7638b91fa5..3ca87b5a8677 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -366,7 +366,7 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode)
* Check if exiting cpu is the designated reader. If so migrate the
* events when there is a valid target available
*/
-static void cstate_cpu_exit(int cpu)
+static int cstate_cpu_exit(unsigned int cpu)
{
unsigned int target;
@@ -391,9 +391,10 @@ static void cstate_cpu_exit(int cpu)
perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
}
}
+ return 0;
}
-static void cstate_cpu_init(int cpu)
+static int cstate_cpu_init(unsigned int cpu)
{
unsigned int target;
@@ -415,31 +416,10 @@ static void cstate_cpu_init(int cpu)
topology_core_cpumask(cpu));
if (has_cstate_pkg && target >= nr_cpu_ids)
cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
-}
-static int cstate_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- cstate_cpu_init(cpu);
- break;
- case CPU_DOWN_PREPARE:
- cstate_cpu_exit(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block cstate_cpu_nb = {
- .notifier_call = cstate_cpu_notifier,
- .priority = CPU_PRI_PERF + 1,
-};
-
static struct pmu cstate_core_pmu = {
.attr_groups = core_attr_groups,
.name = "cstate_core",
@@ -600,18 +580,20 @@ static inline void cstate_cleanup(void)
static int __init cstate_init(void)
{
- int cpu, err;
+ int err;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cstate_cpu_init(cpu);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
+ "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
+ NULL);
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+ "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
if (has_cstate_core) {
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
if (err) {
has_cstate_core = false;
pr_info("Failed to register cstate core pmu\n");
- goto out;
+ return err;
}
}
@@ -621,12 +603,10 @@ static int __init cstate_init(void)
has_cstate_pkg = false;
pr_info("Failed to register cstate pkg pmu\n");
cstate_cleanup();
- goto out;
+ return err;
}
}
- __register_cpu_notifier(&cstate_cpu_nb);
-out:
- cpu_notifier_register_done();
+
return err;
}
@@ -652,9 +632,8 @@ module_init(cstate_pmu_init);
static void __exit cstate_pmu_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&cstate_cpu_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
cstate_cleanup();
- cpu_notifier_register_done();
}
module_exit(cstate_pmu_exit);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index d0c58b35155f..28865938aadf 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -556,14 +556,14 @@ const struct attribute_group *rapl_attr_groups[] = {
NULL,
};
-static void rapl_cpu_exit(int cpu)
+static int rapl_cpu_offline(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
/* Check if exiting cpu is used for collecting rapl events */
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
- return;
+ return 0;
pmu->cpu = -1;
/* Find a new cpu to collect rapl events */
@@ -575,9 +575,10 @@ static void rapl_cpu_exit(int cpu)
pmu->cpu = target;
perf_pmu_migrate_context(pmu->pmu, cpu, target);
}
+ return 0;
}
-static void rapl_cpu_init(int cpu)
+static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
@@ -588,13 +589,14 @@ static void rapl_cpu_init(int cpu)
*/
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
- return;
+ return 0;
cpumask_set_cpu(cpu, &rapl_cpu_mask);
pmu->cpu = cpu;
+ return 0;
}
-static int rapl_cpu_prepare(int cpu)
+static int rapl_cpu_prepare(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
@@ -615,33 +617,6 @@ static int rapl_cpu_prepare(int cpu)
return 0;
}
-static int rapl_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- rapl_cpu_prepare(cpu);
- break;
-
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- rapl_cpu_init(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- rapl_cpu_exit(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block rapl_cpu_nb = {
- .notifier_call = rapl_cpu_notifier,
- .priority = CPU_PRI_PERF + 1,
-};
-
static int rapl_check_hw_unit(bool apply_quirk)
{
u64 msr_rapl_power_unit_bits;
@@ -692,24 +667,6 @@ static void __init rapl_advertise(void)
}
}
-static int __init rapl_prepare_cpus(void)
-{
- unsigned int cpu, pkg;
- int ret;
-
- for_each_online_cpu(cpu) {
- pkg = topology_logical_package_id(cpu);
- if (rapl_pmus->pmus[pkg])
- continue;
-
- ret = rapl_cpu_prepare(cpu);
- if (ret)
- return ret;
- rapl_cpu_init(cpu);
- }
- return 0;
-}
-
static void cleanup_rapl_pmus(void)
{
int i;
@@ -837,35 +794,44 @@ static int __init rapl_pmu_init(void)
if (ret)
return ret;
- cpu_notifier_register_begin();
+ /*
+ * Install callbacks. Core will call them for each online cpu.
+ */
- ret = rapl_prepare_cpus();
+ ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
+ rapl_cpu_prepare, NULL);
if (ret)
goto out;
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
+ "AP_PERF_X86_RAPL_ONLINE",
+ rapl_cpu_online, rapl_cpu_offline);
+ if (ret)
+ goto out1;
+
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
if (ret)
- goto out;
+ goto out2;
- __register_cpu_notifier(&rapl_cpu_nb);
- cpu_notifier_register_done();
rapl_advertise();
return 0;
+out2:
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
+out1:
+ cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
out:
pr_warn("Initialization failed (%d), disabled\n", ret);
cleanup_rapl_pmus();
- cpu_notifier_register_done();
return ret;
}
module_init(rapl_pmu_init);
static void __exit intel_rapl_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&rapl_cpu_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
perf_pmu_unregister(&rapl_pmus->pmu);
cleanup_rapl_pmus();
- cpu_notifier_register_done();
}
module_exit(intel_rapl_exit);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 59b4974c697f..3f3d0d67749b 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1052,7 +1052,7 @@ static void uncore_pci_exit(void)
}
}
-static void uncore_cpu_dying(int cpu)
+static int uncore_cpu_dying(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
@@ -1069,16 +1069,19 @@ static void uncore_cpu_dying(int cpu)
uncore_box_exit(box);
}
}
+ return 0;
}
-static void uncore_cpu_starting(int cpu, bool init)
+static int first_init;
+
+static int uncore_cpu_starting(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, ncpus = 1;
- if (init) {
+ if (first_init) {
/*
* On init we get the number of online cpus in the package
* and set refcount for all of them.
@@ -1099,9 +1102,11 @@ static void uncore_cpu_starting(int cpu, bool init)
uncore_box_init(box);
}
}
+
+ return 0;
}
-static int uncore_cpu_prepare(int cpu)
+static int uncore_cpu_prepare(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
@@ -1164,13 +1169,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
}
-static void uncore_event_exit_cpu(int cpu)
+static int uncore_event_cpu_offline(unsigned int cpu)
{
int target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
- return;
+ return 0;
/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
@@ -1183,9 +1188,10 @@ static void uncore_event_exit_cpu(int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
+ return 0;
}
-static void uncore_event_init_cpu(int cpu)
+static int uncore_event_cpu_online(unsigned int cpu)
{
int target;
@@ -1195,50 +1201,15 @@ static void uncore_event_init_cpu(int cpu)
*/
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
- return;
+ return 0;
cpumask_set_cpu(cpu, &uncore_cpu_mask);
uncore_change_context(uncore_msr_uncores, -1, cpu);
uncore_change_context(uncore_pci_uncores, -1, cpu);
+ return 0;
}
-static int uncore_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- return notifier_from_errno(uncore_cpu_prepare(cpu));
-
- case CPU_STARTING:
- uncore_cpu_starting(cpu, false);
- case CPU_DOWN_FAILED:
- uncore_event_init_cpu(cpu);
- break;
-
- case CPU_UP_CANCELED:
- case CPU_DYING:
- uncore_cpu_dying(cpu);
- break;
-
- case CPU_DOWN_PREPARE:
- uncore_event_exit_cpu(cpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block uncore_cpu_nb = {
- .notifier_call = uncore_cpu_notifier,
- /*
- * to migrate uncore events, our notifier should be executed
- * before perf core's notifier.
- */
- .priority = CPU_PRI_PERF + 1,
-};
-
static int __init type_pmu_register(struct intel_uncore_type *type)
{
int i, ret;
@@ -1282,41 +1253,6 @@ err:
return ret;
}
-static void __init uncore_cpu_setup(void *dummy)
-{
- uncore_cpu_starting(smp_processor_id(), true);
-}
-
-/* Lazy to avoid allocation of a few bytes for the normal case */
-static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
-
-static int __init uncore_cpumask_init(bool msr)
-{
- unsigned int cpu;
-
- for_each_online_cpu(cpu) {
- unsigned int pkg = topology_logical_package_id(cpu);
- int ret;
-
- if (test_and_set_bit(pkg, packages))
- continue;
- /*
- * The first online cpu of each package allocates and takes
- * the refcounts for all other online cpus in that package.
- * If msrs are not enabled no allocation is required.
- */
- if (msr) {
- ret = uncore_cpu_prepare(cpu);
- if (ret)
- return ret;
- }
- uncore_event_init_cpu(cpu);
- smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
- }
- __register_cpu_notifier(&uncore_cpu_nb);
- return 0;
-}
-
#define X86_UNCORE_MODEL_MATCH(model, init) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
@@ -1440,11 +1376,33 @@ static int __init intel_uncore_init(void)
if (cret && pret)
return -ENODEV;
- cpu_notifier_register_begin();
- ret = uncore_cpumask_init(!cret);
- if (ret)
- goto err;
- cpu_notifier_register_done();
+ /*
+ * Install callbacks. Core will call them for each online cpu.
+ *
+ * The first online cpu of each package allocates and takes
+ * the refcounts for all other online cpus in that package.
+ * If msrs are not enabled no allocation is required and
+ * uncore_cpu_prepare() is not called for each online cpu.
+ */
+ if (!cret) {
+ ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
+ "PERF_X86_UNCORE_PREP",
+ uncore_cpu_prepare, NULL);
+ if (ret)
+ goto err;
+ } else {
+ cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
+ "PERF_X86_UNCORE_PREP",
+ uncore_cpu_prepare, NULL);
+ }
+ first_init = 1;
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
+ "AP_PERF_X86_UNCORE_STARTING",
+ uncore_cpu_starting, uncore_cpu_dying);
+ first_init = 0;
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ "AP_PERF_X86_UNCORE_ONLINE",
+ uncore_event_cpu_online, uncore_event_cpu_offline);
return 0;
err:
@@ -1452,17 +1410,16 @@ err:
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
- cpu_notifier_register_done();
return ret;
}
module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void)
{
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&uncore_cpu_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
+ cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
- cpu_notifier_register_done();
}
module_exit(intel_uncore_exit);