diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-16 09:53:50 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-16 09:53:50 -0800 |
commit | a08727bae727fc2ca3a6ee9506d77786b71070b3 (patch) | |
tree | b1b42acea520c7738fc2e62476221a049f195f87 /kernel | |
parent | 2f77d107050abc14bc393b34bdb7b91cf670c250 (diff) | |
download | linux-a08727bae727fc2ca3a6ee9506d77786b71070b3.tar.gz linux-a08727bae727fc2ca3a6ee9506d77786b71070b3.tar.bz2 linux-a08727bae727fc2ca3a6ee9506d77786b71070b3.zip |
Make workqueue bit operations work on "atomic_long_t"
On architectures where the atomicity of the bit operations is handled by
external means (ie a separate spinlock to protect concurrent accesses),
just doing a direct assignment on the workqueue data field (as done by
commit 4594bf159f1962cec3b727954b7c598b07e2e737) can cause the
assignment to be lost due to lack of serialization with the bitops on
the same word.
So we need to serialize the assignment with the locks on those
architectures (notably older ARM chips, PA-RISC and sparc32).
So rather than using an "unsigned long", let's use "atomic_long_t",
which already has a safe assignment operation (atomic_long_set()) on
such architectures.
This requires that the atomic operations use the same atomicity locks as
the bit operations do, but that is largely the case anyway. Sparc32
will probably need fixing.
Architectures (including modern ARM with LL/SC) that implement sane
atomic operations for SMP won't see any of this matter.
Cc: Russell King <rmk+lkml@arm.linux.org.uk>
Cc: David Howells <dhowells@redhat.com>
Cc: David Miller <davem@davemloft.com>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Linux Arch Maintainers <linux-arch@vger.kernel.org>
Cc: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index db49886bfae1..742cbbe49bdc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq) BUG_ON(!work_pending(work)); new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); - new |= work->management & WORK_STRUCT_FLAG_MASK; - work->management = new; + new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); + atomic_long_set(&work->data, new); } static inline void *get_wq_data(struct work_struct *work) { - return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); + return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); } static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) @@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work list_del_init(&work->entry); spin_unlock_irqrestore(&cwq->lock, flags); - if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) + if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) work_release(work); f(work); @@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0, cpu = get_cpu(); - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { if (unlikely(is_single_threaded(wq))) cpu = singlethread_cpu; BUG_ON(!list_empty(&work->entry)); @@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, if (delay == 0) return queue_work(wq, work); - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; - if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); @@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) spin_unlock_irqrestore(&cwq->lock, flags); BUG_ON(get_wq_data(work) != cwq); - if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) + if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) work_release(work); f(work); |