diff options
author | Vladimir Oltean <vladimir.oltean@nxp.com> | 2023-02-07 15:54:40 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-02-08 09:48:53 +0000 |
commit | 39b02d6d104a285836d98be2ad00c7f484d43a16 (patch) | |
tree | f826db8f5546c5c07d4683750ec25e81b501097a /net | |
parent | 2d5e8071c47a03218f3f658ed13b8a9ff703b396 (diff) | |
download | linux-stable-39b02d6d104a285836d98be2ad00c7f484d43a16.tar.gz linux-stable-39b02d6d104a285836d98be2ad00c7f484d43a16.tar.bz2 linux-stable-39b02d6d104a285836d98be2ad00c7f484d43a16.zip |
net/sched: taprio: don't segment unnecessarily
Improve commit 497cc00224cf ("taprio: Handle short intervals and large
packets") to only perform segmentation when skb->len exceeds what
taprio_dequeue() expects.
In practice, this will make the biggest difference when a traffic class
gate is always open in the schedule. This is because the max_frm_len
will be U32_MAX, and such large skb->len values as Kurt reported will be
sent just fine unsegmented.
What I don't seem to know how to handle is how to make sure that the
segmented skbs themselves are smaller than the maximum frame size given
by the current queueMaxSDU[tc]. Nonetheless, we still need to drop
those, otherwise the Qdisc will hang.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_taprio.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 839beb599f55..9781b47962bb 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -566,9 +566,6 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, return qdisc_drop(skb, sch, to_free); } - if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) - return qdisc_drop(skb, sch, to_free); - qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; @@ -593,7 +590,14 @@ static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch, qdisc_skb_cb(segs)->pkt_len = segs->len; slen += segs->len; - ret = taprio_enqueue_one(segs, sch, child, to_free); + /* FIXME: we should be segmenting to a smaller size + * rather than dropping these + */ + if (taprio_skb_exceeds_queue_max_sdu(sch, segs)) + ret = qdisc_drop(segs, sch, to_free); + else + ret = taprio_enqueue_one(segs, sch, child, to_free); + if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); @@ -625,13 +629,18 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (unlikely(!child)) return qdisc_drop(skb, sch, to_free); - /* Large packets might not be transmitted when the transmission duration - * exceeds any configured interval. Therefore, segment the skb into - * smaller chunks. Drivers with full offload are expected to handle - * this in hardware. - */ - if (skb_is_gso(skb)) - return taprio_enqueue_segmented(skb, sch, child, to_free); + if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) { + /* Large packets might not be transmitted when the transmission + * duration exceeds any configured interval. Therefore, segment + * the skb into smaller chunks. Drivers with full offload are + * expected to handle this in hardware. + */ + if (skb_is_gso(skb)) + return taprio_enqueue_segmented(skb, sch, child, + to_free); + + return qdisc_drop(skb, sch, to_free); + } return taprio_enqueue_one(skb, sch, child, to_free); } |