summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_fq_codel.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-06-04 12:55:13 -0700
committerDavid S. Miller <davem@davemloft.net>2016-06-07 14:49:56 -0700
commit80e509db54c81247b32fcb75bb1730fc789b893d (patch)
tree6252613ac72781d36051be76b3248cce6f9c3e9a /net/sched/sch_fq_codel.c
parent5b6c1b4d46b0dae4edea636a776d09f2064f4cd7 (diff)
downloadlinux-80e509db54c81247b32fcb75bb1730fc789b893d.tar.gz
linux-80e509db54c81247b32fcb75bb1730fc789b893d.tar.bz2
linux-80e509db54c81247b32fcb75bb1730fc789b893d.zip
fq_codel: fix NET_XMIT_CN behavior
My prior attempt to fix the backlogs of parents failed. If we return NET_XMIT_CN, our parents wont increase their backlog, so our qdisc_tree_reduce_backlog() should take this into account. v2: Florian Westphal pointed out that we could drop the packet, so we need to save qdisc_pkt_len(skb) in a temp variable before calling fq_codel_drop() Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()") Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too") Reported-by: Stas Nichiporovich <stasn77@gmail.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: WANG Cong <xiyou.wangcong@gmail.com> Cc: Jamal Hadi Salim <jhs@mojatatu.com> Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> Acked-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_fq_codel.c')
-rw-r--r--net/sched/sch_fq_codel.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 6883a8971562..fff7867f4a4f 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
+ unsigned int pkt_len;
bool memory_limited;
idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
prev_backlog = sch->qstats.backlog;
prev_qlen = sch->q.qlen;
+ /* save this packet length as it might be dropped by fq_codel_drop() */
+ pkt_len = qdisc_pkt_len(skb);
/* fq_codel_drop() is quite expensive, as it performs a linear search
* in q->backlogs[] to find a fat flow.
* So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/
ret = fq_codel_drop(sch, q->drop_batch_size);
- q->drop_overlimit += prev_qlen - sch->q.qlen;
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ q->drop_overlimit += prev_qlen;
if (memory_limited)
- q->drop_overmemory += prev_qlen - sch->q.qlen;
- /* As we dropped packet(s), better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ q->drop_overmemory += prev_qlen;
- return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ /* As we dropped packet(s), better let upper stack know this.
+ * If we dropped a packet for this flow, return NET_XMIT_CN,
+ * but in this case, our parents wont increase their backlogs.
+ */
+ if (ret == idx) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - pkt_len);
+ return NET_XMIT_CN;
+ }
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+ return NET_XMIT_SUCCESS;
}
/* This is the specific function called from codel_dequeue()