summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2009-10-14 06:22:47 +0200
committerAndi Kleen <ak@linux.intel.com>2009-10-19 07:29:22 +0200
commit65a64464349883891e21e74af16c05d6e1eeb4e9 (patch)
tree7b4744f01840c337506dbb24debe5e50ee76186e /kernel
parent5d5429af066b8896e903d829ac143711ed2c25f2 (diff)
downloadlinux-65a64464349883891e21e74af16c05d6e1eeb4e9.tar.gz
linux-65a64464349883891e21e74af16c05d6e1eeb4e9.tar.bz2
linux-65a64464349883891e21e74af16c05d6e1eeb4e9.zip
HWPOISON: Allow schedule_on_each_cpu() from keventd
Right now when calling schedule_on_each_cpu() from keventd there is a deadlock because it tries to schedule a work item on the current CPU too. This happens via lru_add_drain_all() in hwpoison. Just call the function for the current CPU in this case. This is actually faster too. Debugging with Fengguang Wu & Max Asbock Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index addfe2df93b1..f61a2fecf281 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -667,21 +667,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
+ int orig = -1;
struct work_struct *works;
works = alloc_percpu(struct work_struct);
if (!works)
return -ENOMEM;
+ /*
+ * when running in keventd don't schedule a work item on itself.
+ * Can just call directly because the work queue is already bound.
+ * This also is faster.
+ * Make this a generic parameter for other workqueues?
+ */
+ if (current_is_keventd()) {
+ orig = raw_smp_processor_id();
+ INIT_WORK(per_cpu_ptr(works, orig), func);
+ func(per_cpu_ptr(works, orig));
+ }
+
get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
+ if (cpu == orig)
+ continue;
INIT_WORK(work, func);
schedule_work_on(cpu, work);
}
- for_each_online_cpu(cpu)
- flush_work(per_cpu_ptr(works, cpu));
+ for_each_online_cpu(cpu) {
+ if (cpu != orig)
+ flush_work(per_cpu_ptr(works, cpu));
+ }
put_online_cpus();
free_percpu(works);
return 0;