diff options
author | Eric Dumazet <edumazet@google.com> | 2021-09-22 19:26:43 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-09-23 12:50:26 +0100 |
commit | d8b81175e412c7abebdb5b37d8a84d5fd19b1aad (patch) | |
tree | 1976a34b63ebd315e6741ceb3bf582fe0e63a4bc /net/ipv6 | |
parent | ff6fb083a07f1b71fc6a9438f27113d73cf23381 (diff) | |
download | linux-d8b81175e412c7abebdb5b37d8a84d5fd19b1aad.tar.gz linux-d8b81175e412c7abebdb5b37d8a84d5fd19b1aad.tar.bz2 linux-d8b81175e412c7abebdb5b37d8a84d5fd19b1aad.zip |
tcp: remove sk_{tr}x_skb_cache
This reverts the following patches :
- commit 2e05fcae83c4 ("tcp: fix compile error if !CONFIG_SYSCTL")
- commit 4f661542a402 ("tcp: fix zerocopy and notsent_lowat issues")
- commit 472c2e07eef0 ("tcp: add one skb cache for tx")
- commit 8b27dae5a2e8 ("tcp: add one skb cache for rx")
Having a cache of one skb (in each direction) per TCP socket is fragile,
since it can cause a significant increase of memory needs,
and not good enough for high speed flows anyway where more than one skb
is needed.
We want instead to add a generic infrastructure, with more flexible
per-cpu caches, for alien NUMA nodes.
Acked-by: Paolo Abeni <pabeni@redhat.com>
Acked-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 6 |
1 files changed, 0 insertions, 6 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 0ce52d46e4f8..8cf5ff2e9504 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1618,7 +1618,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) { - struct sk_buff *skb_to_free; int sdif = inet6_sdif(skb); int dif = inet6_iif(skb); const struct tcphdr *th; @@ -1754,17 +1753,12 @@ process: tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { - skb_to_free = sk->sk_rx_skb_cache; - sk->sk_rx_skb_cache = NULL; ret = tcp_v6_do_rcv(sk, skb); } else { if (tcp_add_backlog(sk, skb)) goto discard_and_relse; - skb_to_free = NULL; } bh_unlock_sock(sk); - if (skb_to_free) - __kfree_skb(skb_to_free); put_and_return: if (refcounted) sock_put(sk); |