diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:43:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:43:08 -0700 |
commit | 597f03f9d133e9837d00965016170271d4f87dcf (patch) | |
tree | 33bdb5c1104d5b466387f4ae98748c5f4ddd29bb /drivers/perf | |
parent | 999dcbe2414e15e19cdc1f91497d01f262c6e1cf (diff) | |
parent | 0bf71e4d02ffec8ab9a6adecca61d3eed74fc99d (diff) | |
download | linux-stable-597f03f9d133e9837d00965016170271d4f87dcf.tar.gz linux-stable-597f03f9d133e9837d00965016170271d4f87dcf.tar.bz2 linux-stable-597f03f9d133e9837d00965016170271d4f87dcf.zip |
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull CPU hotplug updates from Thomas Gleixner:
"Yet another batch of cpu hotplug core updates and conversions:
- Provide core infrastructure for multi instance drivers so the
drivers do not have to keep custom lists.
- Convert custom lists to the new infrastructure. The block-mq custom
list conversion comes through the block tree and makes the diffstat
tip over to more lines removed than added.
- Handle unbalanced hotplug enable/disable calls more gracefully.
- Remove the obsolete CPU_STARTING/DYING notifier support.
- Convert another batch of notifier users.
The relayfs changes which conflicted with the conversion have been
shipped to me by Andrew.
The remaining lot is targeted for 4.10 so that we finally can remove
the rest of the notifiers"
* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
cpufreq: Fix up conversion to hotplug state machine
blk/mq: Reserve hotplug states for block multiqueue
x86/apic/uv: Convert to hotplug state machine
s390/mm/pfault: Convert to hotplug state machine
mips/loongson/smp: Convert to hotplug state machine
mips/octeon/smp: Convert to hotplug state machine
fault-injection/cpu: Convert to hotplug state machine
padata: Convert to hotplug state machine
cpufreq: Convert to hotplug state machine
ACPI/processor: Convert to hotplug state machine
virtio scsi: Convert to hotplug state machine
oprofile/timer: Convert to hotplug state machine
block/softirq: Convert to hotplug state machine
lib/irq_poll: Convert to hotplug state machine
x86/microcode: Convert to hotplug state machine
sh/SH-X3 SMP: Convert to hotplug state machine
ia64/mca: Convert to hotplug state machine
ARM/OMAP/wakeupgen: Convert to hotplug state machine
ARM/shmobile: Convert to hotplug state machine
arm64/FP/SIMD: Convert to hotplug state machine
...
Diffstat (limited to 'drivers/perf')
-rw-r--r-- | drivers/perf/arm_pmu.c | 44 |
1 files changed, 18 insertions, 26 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 30370817bf13..b37b57294566 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -709,28 +709,20 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } -static DEFINE_SPINLOCK(arm_pmu_lock); -static LIST_HEAD(arm_pmu_list); - /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int arm_perf_starting_cpu(unsigned int cpu) +static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) { - struct arm_pmu *pmu; - - spin_lock(&arm_pmu_lock); - list_for_each_entry(pmu, &arm_pmu_list, entry) { + struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - continue; - if (pmu->reset) - pmu->reset(pmu); - } - spin_unlock(&arm_pmu_lock); + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return 0; + if (pmu->reset) + pmu->reset(pmu); return 0; } @@ -842,9 +834,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - spin_lock(&arm_pmu_lock); - list_add_tail(&cpu_pmu->entry, &arm_pmu_list); - spin_unlock(&arm_pmu_lock); + err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); + if (err) + goto out_free; err = cpu_pm_pmu_register(cpu_pmu); if (err) @@ -880,9 +873,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - spin_lock(&arm_pmu_lock); - list_del(&cpu_pmu->entry); - spin_unlock(&arm_pmu_lock); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); +out_free: free_percpu(cpu_hw_events); return err; } @@ -890,9 +883,8 @@ out_unregister: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - spin_lock(&arm_pmu_lock); - list_del(&cpu_pmu->entry); - spin_unlock(&arm_pmu_lock); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); free_percpu(cpu_pmu->hw_events); } @@ -1091,9 +1083,9 @@ static int arm_pmu_hp_init(void) { int ret; - ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, - "AP_PERF_ARM_STARTING", - arm_perf_starting_cpu, NULL); + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, + "AP_PERF_ARM_STARTING", + arm_perf_starting_cpu, NULL); if (ret) pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", ret); |