summaryrefslogtreecommitdiffstats
path: root/net/netlink
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-11-07 02:42:09 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-11-07 04:15:12 -0800
commitc3d8d1e30cace31fed6186a4b8c6b1401836d89c (patch)
tree7122fccf27aa337438123071f3cb07999429de9e /net/netlink
parent230140cffa7feae90ad50bf259db1fa07674f3a7 (diff)
downloadlinux-c3d8d1e30cace31fed6186a4b8c6b1401836d89c.tar.gz
linux-c3d8d1e30cace31fed6186a4b8c6b1401836d89c.tar.bz2
linux-c3d8d1e30cace31fed6186a4b8c6b1401836d89c.zip
[NETLINK]: Fix unicast timeouts
Commit ed6dcf4a in the history.git tree broke netlink_unicast timeouts by moving the schedule_timeout() call to a new function that doesn't propagate the remaining timeout back to the caller. This means on each retry we start with the full timeout again. ipc/mqueue.c seems to actually want to wait indefinitely so this behaviour is retained. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink')
-rw-r--r--net/netlink/af_netlink.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 260171255576..415c97236f63 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -752,7 +752,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
- long timeo, struct sock *ssk)
+ long *timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
@@ -761,7 +761,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) {
DECLARE_WAITQUEUE(wait, current);
- if (!timeo) {
+ if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
@@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
- timeo = schedule_timeout(timeo);
+ *timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
@@ -783,7 +783,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
if (signal_pending(current)) {
kfree_skb(skb);
- return sock_intr_errno(timeo);
+ return sock_intr_errno(*timeo);
}
return 1;
}
@@ -877,7 +877,7 @@ retry:
if (netlink_is_kernel(sk))
return netlink_unicast_kernel(sk, skb);
- err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
+ err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
if (err == 1)
goto retry;
if (err)