summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2019-04-19 16:02:03 -0700
committerDavid S. Miller <davem@davemloft.net>2019-04-19 16:42:33 -0700
commitd7cc399e1227e74e44f78847d9732a228b46cc91 (patch)
treed83b970c017bc2c96286be18e81e20dd43eaaf86 /net/ipv4/tcp.c
parent0a9798c123d0eee43e55cc9361b0c10314bb2250 (diff)
downloadlinux-d7cc399e1227e74e44f78847d9732a228b46cc91.tar.gz
linux-d7cc399e1227e74e44f78847d9732a228b46cc91.tar.bz2
linux-d7cc399e1227e74e44f78847d9732a228b46cc91.zip
tcp: properly reset skb->truesize for tx recycling
tcp sendmsg() and sendpage() normally advance skb->data_len and skb->truesize by the payload added to an skb. But sendmsg(fd, ..., MSG_ZEROCOPY) has to account for whole pages, even if a single byte of payload is used in the page. This means that we can not assume skb->truesize can be adjusted by skb->data_len. We must instead overwrite its value. Otherwise skb->truesize is too big and can hit socket sndbuf limit, especially if the skb is recycled multiple times :/ Fixes: 472c2e07eef0 ("tcp: add one skb cache for tx") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Soheil Hassas Yeganeh <soheil@google.com> Cc: Willem de Bruijn <willemb@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 603e770d59b3..f7567a3698eb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -868,7 +868,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
if (likely(!size)) {
skb = sk->sk_tx_skb_cache;
if (skb && !skb_cloned(skb)) {
- skb->truesize -= skb->data_len;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
sk->sk_tx_skb_cache = NULL;
pskb_trim(skb, 0);
INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);