summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-09-01 17:48:59 -0700
committerDavid S. Miller <davem@davemloft.net>2005-09-01 17:48:59 -0700
commitef015786152adaff5a6a8bf0c8ea2f70cee8059d (patch)
tree3042db7e451c61aefc60c1463bb6e307ca510638
parentd80d99d643090c3cf2b1f9fb3fadd1256f7e384f (diff)
downloadlinux-ef015786152adaff5a6a8bf0c8ea2f70cee8059d.tar.gz
linux-ef015786152adaff5a6a8bf0c8ea2f70cee8059d.tar.bz2
linux-ef015786152adaff5a6a8bf0c8ea2f70cee8059d.zip
[TCP]: Fix sk_forward_alloc underflow in tcp_sendmsg
I've finally found a potential cause of the sk_forward_alloc underflows that people have been reporting sporadically. When tcp_sendmsg tacks on extra bits to an existing TCP_PAGE we don't check sk_forward_alloc even though a large amount of time may have elapsed since we allocated the page. In the mean time someone could've come along and liberated packets and reclaimed sk_forward_alloc memory. This patch makes tcp_sendmsg check sk_forward_alloc every time as we do in do_tcp_sendpages. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h5
-rw-r--r--net/ipv4/tcp.c14
2 files changed, 11 insertions, 8 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index e51e626e9af1..cf628261da52 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1232,9 +1232,8 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
{
struct page *page = NULL;
- if (sk_stream_wmem_schedule(sk, PAGE_SIZE))
- page = alloc_pages(sk->sk_allocation, 0);
- else {
+ page = alloc_pages(sk->sk_allocation, 0);
+ if (!page) {
sk->sk_prot->enter_memory_pressure();
sk_stream_moderate_sndbuf(sk);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 854f6d0c4bb3..cbcc9fc47783 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -769,19 +769,23 @@ new_segment:
if (off == PAGE_SIZE) {
put_page(page);
TCP_PAGE(sk) = page = NULL;
+ TCP_OFF(sk) = off = 0;
}
- }
+ } else
+ BUG_ON(off);
+
+ if (copy > PAGE_SIZE - off)
+ copy = PAGE_SIZE - off;
+
+ if (!sk_stream_wmem_schedule(sk, copy))
+ goto wait_for_memory;
if (!page) {
/* Allocate new cache page. */
if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
- off = 0;
}
- if (copy > PAGE_SIZE - off)
- copy = PAGE_SIZE - off;
-
/* Time to copy data. We are close to
* the end! */
err = skb_copy_to_page(sk, from, skb, page,