summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorWei Yongjun <weiyongjun1@huawei.com>2017-12-27 17:05:52 +0800
committerDavid S. Miller <davem@davemloft.net>2018-01-02 13:48:29 -0500
commit9540d977618c31586035870a56bd2d1cc2b4a9ba (patch)
treebf212df68278a7556241877a3358df4aa0635f4c /net/sched/sch_generic.c
parenta9add1944eb1432c28087eba2b5637657a90daa3 (diff)
downloadlinux-stable-9540d977618c31586035870a56bd2d1cc2b4a9ba.tar.gz
linux-stable-9540d977618c31586035870a56bd2d1cc2b4a9ba.tar.bz2
linux-stable-9540d977618c31586035870a56bd2d1cc2b4a9ba.zip
net: sched: fix skb leak in dev_requeue_skb()
When dev_requeue_skb() is called with bulked skb list, only the first skb of the list will be requeued to qdisc layer, and leak the others without free them. TCP is broken due to skb leak since no free skb will be considered as still in the host queue and never be retransmitted. This happend when dev_requeue_skb() called from qdisc_restart(). qdisc_restart |-- dequeue_skb |-- sch_direct_xmit() |-- dev_requeue_skb() <-- skb may bluked Fix dev_requeue_skb() to requeue the full bluked list. Also change to use __skb_queue_tail() in __dev_requeue_skb() to avoid skb out of order. Fixes: a53851e2c321 ("net: sched: explicit locking in gso_cpu fallback") Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cc069b2acf0e..a883c501d5ec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -112,10 +112,16 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- __skb_queue_head(&q->gso_skb, skb);
- q->qstats.requeues++;
- qdisc_qstats_backlog_inc(q, skb);
- q->q.qlen++; /* it's still part of the queue */
+ while (skb) {
+ struct sk_buff *next = skb->next;
+
+ __skb_queue_tail(&q->gso_skb, skb);
+ q->qstats.requeues++;
+ qdisc_qstats_backlog_inc(q, skb);
+ q->q.qlen++; /* it's still part of the queue */
+
+ skb = next;
+ }
__netif_schedule(q);
return 0;
@@ -126,12 +132,19 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
spinlock_t *lock = qdisc_lock(q);
spin_lock(lock);
- __skb_queue_tail(&q->gso_skb, skb);
+ while (skb) {
+ struct sk_buff *next = skb->next;
+
+ __skb_queue_tail(&q->gso_skb, skb);
+
+ qdisc_qstats_cpu_requeues_inc(q);
+ qdisc_qstats_cpu_backlog_inc(q, skb);
+ qdisc_qstats_cpu_qlen_inc(q);
+
+ skb = next;
+ }
spin_unlock(lock);
- qdisc_qstats_cpu_requeues_inc(q);
- qdisc_qstats_cpu_backlog_inc(q, skb);
- qdisc_qstats_cpu_qlen_inc(q);
__netif_schedule(q);
return 0;