diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-01-17 22:57:37 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2012-01-31 19:28:08 -0500 |
commit | 961a828df64979d2a9faeeeee043391670a193b9 (patch) | |
tree | 4b7ddaf1a19c589e3c8ec96b6c732faa507f2899 /net/sunrpc/xprt.c | |
parent | 2aeb98f498ce37742b743080fdc6c8cf64053599 (diff) | |
download | linux-961a828df64979d2a9faeeeee043391670a193b9.tar.gz linux-961a828df64979d2a9faeeeee043391670a193b9.tar.bz2 linux-961a828df64979d2a9faeeeee043391670a193b9.zip |
SUNRPC: Fix potential races in xprt_lock_write_next()
We have to ensure that the wake up from the waitqueue and the assignment
of xprt->snd_task are atomic. We can do this by assigning the snd_task
while under the waitqueue spinlock.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r-- | net/sunrpc/xprt.c | 49 |
1 files changed, 26 insertions, 23 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index c64c0ef519b5..839f6ef2326b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -292,54 +292,57 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) return retval; } -static void __xprt_lock_write_next(struct rpc_xprt *xprt) +static bool __xprt_lock_write_func(struct rpc_task *task, void *data) { - struct rpc_task *task; + struct rpc_xprt *xprt = data; struct rpc_rqst *req; - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) - return; - - task = rpc_wake_up_next(&xprt->sending); - if (task == NULL) - goto out_unlock; - req = task->tk_rqstp; xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; req->rq_ntrans++; } - return; + return true; +} -out_unlock: +static void __xprt_lock_write_next(struct rpc_xprt *xprt) +{ + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) + return; + + if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt)) + return; xprt_clear_locked(xprt); } -static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) +static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data) { - struct rpc_task *task; + struct rpc_xprt *xprt = data; struct rpc_rqst *req; - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) - return; - if (RPCXPRT_CONGESTED(xprt)) - goto out_unlock; - task = rpc_wake_up_next(&xprt->sending); - if (task == NULL) - goto out_unlock; - req = task->tk_rqstp; if (req == NULL) { xprt->snd_task = task; - return; + return true; } if (__xprt_get_cong(xprt, task)) { xprt->snd_task = task; req->rq_bytes_sent = 0; req->rq_ntrans++; - return; + return true; } + return false; +} + +static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) +{ + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) + return; + if (RPCXPRT_CONGESTED(xprt)) + goto out_unlock; + if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt)) + return; out_unlock: xprt_clear_locked(xprt); } |