summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2009-03-10 20:33:16 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2009-03-10 20:33:16 -0400
commiteb9b55ab4d73280597fd183b367d50452f4d7846 (patch)
tree80e0c4e5315ea5fd18bb5ba9d37ec5f77170631a /net
parent16b71fdf97599f1b1b7f38418ee9922d9f117396 (diff)
downloadlinux-eb9b55ab4d73280597fd183b367d50452f4d7846.tar.gz
linux-eb9b55ab4d73280597fd183b367d50452f4d7846.tar.bz2
linux-eb9b55ab4d73280597fd183b367d50452f4d7846.zip
SUNRPC: Tighten up the task locking rules in __rpc_execute()
We should probably not be testing any flags after we've cleared the RPC_TASK_RUNNING flag, since rpc_make_runnable() is then free to assign the rpc_task to another workqueue, which may then destroy it. We can fix any races with rpc_make_runnable() by ensuring that we only clear the RPC_TASK_RUNNING flag while holding the rpc_wait_queue->lock that the task is supposed to be sleeping on (and then checking whether or not the task really is sleeping). Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/sched.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 385f427bedad..ff50a0546865 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task)
rpc_clear_queued(task);
if (rpc_test_and_set_running(task))
return;
- /* We might have raced */
- if (RPC_IS_QUEUED(task)) {
- rpc_clear_running(task);
- return;
- }
if (RPC_IS_ASYNC(task)) {
int status;
@@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
*/
static void __rpc_execute(struct rpc_task *task)
{
- int status = 0;
+ struct rpc_wait_queue *queue;
+ int task_is_async = RPC_IS_ASYNC(task);
+ int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
@@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task)
*/
if (!RPC_IS_QUEUED(task))
continue;
- rpc_clear_running(task);
- if (RPC_IS_ASYNC(task)) {
- /* Careful! we may have raced... */
- if (RPC_IS_QUEUED(task))
- return;
- if (rpc_test_and_set_running(task))
- return;
+ /*
+ * The queue->lock protects against races with
+ * rpc_make_runnable().
+ *
+ * Note that once we clear RPC_TASK_RUNNING on an asynchronous
+ * rpc_task, rpc_make_runnable() can assign it to a
+ * different workqueue. We therefore cannot assume that the
+ * rpc_task pointer may still be dereferenced.
+ */
+ queue = task->tk_waitqueue;
+ spin_lock_bh(&queue->lock);
+ if (!RPC_IS_QUEUED(task)) {
+ spin_unlock_bh(&queue->lock);
continue;
}
+ rpc_clear_running(task);
+ spin_unlock_bh(&queue->lock);
+ if (task_is_async)
+ return;
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);