summaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-02-26 18:43:37 +0000
committerThomas Gleixner <tglx@linutronix.de>2016-03-01 20:36:56 +0100
commit2e1a3483ce74d197876e58281925dd4e378a7f28 (patch)
tree9a9ffcc23caad99b7e001336a0282911ca9c324d /kernel/cpu.c
parent931ef163309ee955611f287dc65248b39a65fc9d (diff)
downloadlinux-2e1a3483ce74d197876e58281925dd4e378a7f28.tar.gz
linux-2e1a3483ce74d197876e58281925dd4e378a7f28.tar.bz2
linux-2e1a3483ce74d197876e58281925dd4e378a7f28.zip
cpu/hotplug: Split out the state walk into functions
We need that for running callbacks on the AP and the BP. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: Rik van Riel <riel@redhat.com> Cc: Rafael Wysocki <rafael.j.wysocki@intel.com> Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Turner <pjt@google.com> Link: http://lkml.kernel.org/r/20160226182341.374946234@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c111
1 files changed, 68 insertions, 43 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3ec86bc414b7..9572ca043727 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -329,10 +329,74 @@ static int bringup_cpu(unsigned int cpu)
return 0;
}
+/*
+ * Hotplug state machine related functions
+ */
+static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
+ struct cpuhp_step *steps)
+{
+ for (st->state++; st->state < st->target; st->state++) {
+ struct cpuhp_step *step = steps + st->state;
+
+ if (!step->skip_onerr)
+ cpuhp_invoke_callback(cpu, st->state, step->startup);
+ }
+}
+
+static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+ struct cpuhp_step *steps, enum cpuhp_state target)
+{
+ enum cpuhp_state prev_state = st->state;
+ int ret = 0;
+
+ for (; st->state > target; st->state--) {
+ struct cpuhp_step *step = steps + st->state;
+
+ ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
+ if (ret) {
+ st->target = prev_state;
+ undo_cpu_down(cpu, st, steps);
+ break;
+ }
+ }
+ return ret;
+}
+
+static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
+ struct cpuhp_step *steps)
+{
+ for (st->state--; st->state > st->target; st->state--) {
+ struct cpuhp_step *step = steps + st->state;
+
+ if (!step->skip_onerr)
+ cpuhp_invoke_callback(cpu, st->state, step->teardown);
+ }
+}
+
+static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
+ struct cpuhp_step *steps, enum cpuhp_state target)
+{
+ enum cpuhp_state prev_state = st->state;
+ int ret = 0;
+
+ while (st->state < target) {
+ struct cpuhp_step *step;
+
+ st->state++;
+ step = steps + st->state;
+ ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
+ if (ret) {
+ st->target = prev_state;
+ undo_cpu_up(cpu, st, steps);
+ break;
+ }
+ }
+ return ret;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier);
-
void unregister_cpu_notifier(struct notifier_block *nb)
{
cpu_maps_update_begin();
@@ -537,15 +601,6 @@ static int notify_dead(unsigned int cpu)
#endif
#ifdef CONFIG_HOTPLUG_CPU
-static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
-{
- for (st->state++; st->state < st->target; st->state++) {
- struct cpuhp_step *step = cpuhp_bp_states + st->state;
-
- if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, step->startup);
- }
-}
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
@@ -567,16 +622,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
prev_state = st->state;
st->target = target;
- for (; st->state > st->target; st->state--) {
- struct cpuhp_step *step = cpuhp_bp_states + st->state;
+ ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
- ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
- if (ret) {
- st->target = prev_state;
- undo_cpu_down(cpu, st);
- break;
- }
- }
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
cpu_hotplug_done();
@@ -645,22 +692,12 @@ static int cpuhp_set_cpu_active(unsigned int cpu)
return 0;
}
-static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
-{
- for (st->state--; st->state > st->target; st->state--) {
- struct cpuhp_step *step = cpuhp_bp_states + st->state;
-
- if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, step->teardown);
- }
-}
-
/* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle;
- int prev_state, ret = 0;
+ int ret = 0;
cpu_hotplug_begin();
@@ -687,20 +724,8 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
cpuhp_tasks_frozen = tasks_frozen;
- prev_state = st->state;
st->target = target;
- while (st->state < st->target) {
- struct cpuhp_step *step;
-
- st->state++;
- step = cpuhp_bp_states + st->state;
- ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
- if (ret) {
- st->target = prev_state;
- undo_cpu_up(cpu, st);
- break;
- }
- }
+ ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
out:
cpu_hotplug_done();
return ret;