summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@mandriva.com>2005-08-23 21:54:23 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 16:05:45 -0700
commit331968bd0c1b2437f3ad773cbf55f2e0737bafc0 (patch)
treeec9c5aeaa5217c8ce009a7e5e07a60c3a390e021 /net
parent8efa544f9c84919c047dc2f96e308c902e8fb1a4 (diff)
downloadlinux-stable-331968bd0c1b2437f3ad773cbf55f2e0737bafc0.tar.gz
linux-stable-331968bd0c1b2437f3ad773cbf55f2e0737bafc0.tar.bz2
linux-stable-331968bd0c1b2437f3ad773cbf55f2e0737bafc0.zip
[DCCP]: Initial dccp_poll implementation
Tested with a patched netcat, no horror stories so far 8) Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/proto.c59
2 files changed, 59 insertions, 1 deletions
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 02af05ec23a2..ef29cef1dafe 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -34,6 +34,7 @@ static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
dccp_v4_send_reset(sk, DCCP_RESET_CODE_CLOSED);
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_CLOSED);
+ sk_wake_async(sk, 1, POLL_HUP);
}
static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 2b6db18e607f..600dda51d995 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -140,6 +140,62 @@ int dccp_disconnect(struct sock *sk, int flags)
return err;
}
+/*
+ * Wait for a DCCP event.
+ *
+ * Note that we don't need to lock the socket, as the upper poll layers
+ * take care of normal races (between the test and the event) and we don't
+ * go look at any of the socket buffers directly.
+ */
+static unsigned int dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ unsigned int mask;
+ struct sock *sk = sock->sk;
+
+ poll_wait(file, sk->sk_sleep, wait);
+ if (sk->sk_state == DCCP_LISTEN)
+ return inet_csk_listen_poll(sk);
+
+ /* Socket is not locked. We are protected from async events
+ by poll logic and correct handling of state changes
+ made by another threads is impossible in any case.
+ */
+
+ mask = 0;
+ if (sk->sk_err)
+ mask = POLLERR;
+
+ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
+ mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLIN | POLLRDNORM;
+
+ /* Connected? */
+ if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > 0)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ mask |= POLLOUT | POLLWRNORM;
+ } else { /* send SIGIO later */
+ set_bit(SOCK_ASYNC_NOSPACE,
+ &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+
+ /* Race breaker. If space is freed after
+ * wspace test but before the flags are set,
+ * IO signal will be lost.
+ */
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ }
+ }
+ return mask;
+}
+
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
dccp_pr_debug("entry\n");
@@ -478,7 +534,8 @@ static struct proto_ops inet_dccp_ops = {
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
- .poll = sock_no_poll,
+ /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
+ .poll = dccp_poll,
.ioctl = inet_ioctl,
/* FIXME: work on inet_listen to rename it to sock_common_listen */
.listen = inet_dccp_listen,