summaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch')
-rw-r--r--target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch14
1 files changed, 7 insertions, 7 deletions
diff --git a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
index 4ac95d3ee4..3ebf2755a2 100644
--- a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
+++ b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
@@ -58,7 +58,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
goto out;
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -773,14 +773,15 @@ static void tcp_tasklet_func(unsigned lo
+@@ -775,14 +775,15 @@ static void tcp_tasklet_func(unsigned lo
list_for_each_safe(q, n, &list) {
tp = list_entry(q, struct tcp_sock, tsq_node);
list_del(&tp->tsq_node);
@@ -77,7 +77,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
tcp_tsq_handler(sk);
}
bh_unlock_sock(sk);
-@@ -803,16 +804,15 @@ static void tcp_tasklet_func(unsigned lo
+@@ -805,16 +806,15 @@ static void tcp_tasklet_func(unsigned lo
*/
void tcp_release_cb(struct sock *sk)
{
@@ -96,7 +96,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (flags & TCPF_TSQ_DEFERRED)
tcp_tsq_handler(sk);
-@@ -884,7 +884,7 @@ void tcp_wfree(struct sk_buff *skb)
+@@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb)
if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
goto out;
@@ -105,7 +105,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
struct tsq_tasklet *tsq;
bool empty;
-@@ -892,7 +892,7 @@ void tcp_wfree(struct sk_buff *skb)
+@@ -894,7 +894,7 @@ void tcp_wfree(struct sk_buff *skb)
goto out;
nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
@@ -114,7 +114,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (nval != oval)
continue;
-@@ -2151,7 +2151,7 @@ static bool tcp_small_queue_check(struct
+@@ -2153,7 +2153,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
@@ -123,7 +123,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2249,8 +2249,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2251,8 +2251,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
@@ -134,7 +134,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3569,8 +3569,6 @@ void __tcp_send_ack(struct sock *sk, u32
+@@ -3574,8 +3574,6 @@ void __tcp_send_ack(struct sock *sk, u32
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784