diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 02:34:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 12:30:52 -0700 |
commit | b1f4ec172f75bc2f5cc4f4be69b5587660a955d2 (patch) | |
tree | 41a96f3996246d1f8667b9b84705a800f03d7c49 /kernel/workqueue.c | |
parent | dfb4b82e1c631b1a6057e77212996a890aa515b7 (diff) | |
download | linux-b1f4ec172f75bc2f5cc4f4be69b5587660a955d2.tar.gz linux-b1f4ec172f75bc2f5cc4f4be69b5587660a955d2.tar.bz2 linux-b1f4ec172f75bc2f5cc4f4be69b5587660a955d2.zip |
workqueue: introduce cpu_singlethread_map
The code like
if (is_single_threaded(wq))
do_something(singlethread_cpu);
else {
for_each_cpu_mask(cpu, cpu_populated_map)
do_something(cpu);
}
looks very annoying. We can add "static cpumask_t cpu_singlethread_map" and
simplify the code. Lessens .text a bit, and imho makes the code more readable.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 55 |
1 files changed, 25 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce72d45c7fd8..6308a4bc6a82 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -69,6 +69,7 @@ static DEFINE_MUTEX(workqueue_mutex); static LIST_HEAD(workqueues); static int singlethread_cpu __read_mostly; +static cpumask_t cpu_singlethread_map __read_mostly; /* optimization, we could use cpu_possible_map */ static cpumask_t cpu_populated_map __read_mostly; @@ -78,6 +79,12 @@ static inline int is_single_threaded(struct workqueue_struct *wq) return list_empty(&wq->list); } +static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) +{ + return is_single_threaded(wq) + ? &cpu_singlethread_map : &cpu_populated_map; +} + /* * Set the workqueue on which a work item is to be run * - Must *only* be called if the pending flag is set @@ -393,16 +400,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void fastcall flush_workqueue(struct workqueue_struct *wq) { - might_sleep(); - - if (is_single_threaded(wq)) - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); - else { - int cpu; + const cpumask_t *cpu_map = wq_cpu_map(wq); + int cpu - for_each_cpu_mask(cpu, cpu_populated_map) - flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); - } + might_sleep(); + for_each_cpu_mask(cpu, *cpu_map) + flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } EXPORT_SYMBOL_GPL(flush_workqueue); @@ -439,7 +442,9 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq, */ void flush_work(struct workqueue_struct *wq, struct work_struct *work) { + const cpumask_t *cpu_map = wq_cpu_map(wq); struct cpu_workqueue_struct *cwq; + int cpu; might_sleep(); @@ -457,14 +462,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) work_release(work); spin_unlock_irq(&cwq->lock); - if (is_single_threaded(wq)) - wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); - else { - int cpu; - - for_each_cpu_mask(cpu, cpu_populated_map) - wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); - } + for_each_cpu_mask(cpu, *cpu_map) + wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } EXPORT_SYMBOL_GPL(flush_work); @@ -757,22 +756,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) */ void destroy_workqueue(struct workqueue_struct *wq) { + const cpumask_t *cpu_map = wq_cpu_map(wq); struct cpu_workqueue_struct *cwq; + int cpu; - if (is_single_threaded(wq)) { - cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu); - cleanup_workqueue_thread(cwq, singlethread_cpu); - } else { - int cpu; + mutex_lock(&workqueue_mutex); + list_del(&wq->list); + mutex_unlock(&workqueue_mutex); - mutex_lock(&workqueue_mutex); - list_del(&wq->list); - mutex_unlock(&workqueue_mutex); - - for_each_cpu_mask(cpu, cpu_populated_map) { - cwq = per_cpu_ptr(wq->cpu_wq, cpu); - cleanup_workqueue_thread(cwq, cpu); - } + for_each_cpu_mask(cpu, *cpu_map) { + cwq = per_cpu_ptr(wq->cpu_wq, cpu); + cleanup_workqueue_thread(cwq, cpu); } free_percpu(wq->cpu_wq); @@ -831,6 +825,7 @@ void init_workqueues(void) { cpu_populated_map = cpu_online_map; singlethread_cpu = first_cpu(cpu_possible_map); + cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); |