summaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-11-15 20:33:32 +0100
committerIngo Molnar <mingo@kernel.org>2015-11-23 09:48:20 +0100
commitaccaf6ea3db6f5fb997f096b6eefd5431d03f7e5 (patch)
tree3e17684822d4d0e9685ef0d598c2fa55ab88495e /kernel/stop_machine.c
parentdd2e3121e3cb16d03a6e3f2db48f260f046f39c2 (diff)
downloadlinux-stable-accaf6ea3db6f5fb997f096b6eefd5431d03f7e5.tar.gz
linux-stable-accaf6ea3db6f5fb997f096b6eefd5431d03f7e5.tar.bz2
linux-stable-accaf6ea3db6f5fb997f096b6eefd5431d03f7e5.zip
stop_machine: Clean up the usage of the preemption counter in cpu_stopper_thread()
1. Change this code to use preempt_count_inc/preempt_count_dec; this way it works even if CONFIG_PREEMPT_COUNT=n, and we avoid the unnecessary __preempt_schedule() check (stop_sched_class is not preemptible). And this makes clear that we only want to make preempt_count() != 0 for __might_sleep() / schedule_debug(). 2. Change WARN_ONCE() to use %pf to print the function name and remove kallsyms_lookup/ksym_buf. 3. Move "int ret" into the "if (work)" block, this looks more consistent. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Milos Vyletel <milos@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20151115193332.GA8281@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 7ff7acee2c76..61101193967e 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -435,7 +435,6 @@ static void cpu_stopper_thread(unsigned int cpu)
{
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
struct cpu_stop_work *work;
- int ret;
repeat:
work = NULL;
@@ -451,23 +450,19 @@ repeat:
cpu_stop_fn_t fn = work->fn;
void *arg = work->arg;
struct cpu_stop_done *done = work->done;
- char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
+ int ret;
- /* cpu stop callbacks are not allowed to sleep */
- preempt_disable();
+ /* cpu stop callbacks must not sleep, make in_atomic() == T */
+ preempt_count_inc();
ret = fn(arg);
if (done) {
if (ret)
done->ret = ret;
cpu_stop_signal_done(done);
}
- /* restore preemption and check it's still balanced */
- preempt_enable();
+ preempt_count_dec();
WARN_ONCE(preempt_count(),
- "cpu_stop: %s(%p) leaked preempt count\n",
- kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
- ksym_buf), arg);
-
+ "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
goto repeat;
}
}