summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_tbf.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-05-21 08:16:46 +0000
committerDavid S. Miller <davem@davemloft.net>2013-05-23 00:06:40 -0700
commite43ac79a4bc6ca90de4ba10983b4ca39cd215b4b (patch)
tree6104167d3ff96882bfec0bacc16ec32a032161a1 /net/sched/sch_tbf.c
parentffed61e6fd3a405e79f4d267f22acaee3267fb37 (diff)
downloadlinux-stable-e43ac79a4bc6ca90de4ba10983b4ca39cd215b4b.tar.gz
linux-stable-e43ac79a4bc6ca90de4ba10983b4ca39cd215b4b.tar.bz2
linux-stable-e43ac79a4bc6ca90de4ba10983b4ca39cd215b4b.zip
sch_tbf: segment too big GSO packets
If a GSO packet has a length above tbf burst limit, the packet is currently silently dropped. Current way to handle this is to set the device in non GSO/TSO mode, or setting high bursts, and its sub optimal. We can actually segment too big GSO packets, and send individual segments as tbf parameters allow, allowing for better interoperability. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Jiri Pirko <jiri@resnulli.us> Cc: Jamal Hadi Salim <jhs@mojatatu.com> Reviewed-by: Jiri Pirko <jiri@resnulli.us> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_tbf.c')
-rw-r--r--net/sched/sch_tbf.c47
1 files changed, 45 insertions, 2 deletions
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c8388f3c3426..38008b0980d9 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -116,14 +116,57 @@ struct tbf_sched_data {
struct qdisc_watchdog watchdog; /* Watchdog timer */
};
+
+/* GSO packet is too big, segment it so that tbf can transmit
+ * each segment in time
+ */
+static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct tbf_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *segs, *nskb;
+ netdev_features_t features = netif_skb_features(skb);
+ int ret, nb;
+
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+ if (IS_ERR_OR_NULL(segs))
+ return qdisc_reshape_fail(skb, sch);
+
+ nb = 0;
+ while (segs) {
+ nskb = segs->next;
+ segs->next = NULL;
+ if (likely(segs->len <= q->max_size)) {
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ ret = qdisc_enqueue(segs, q->qdisc);
+ } else {
+ ret = qdisc_reshape_fail(skb, sch);
+ }
+ if (ret != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
+ } else {
+ nb++;
+ }
+ segs = nskb;
+ }
+ sch->q.qlen += nb;
+ if (nb > 1)
+ qdisc_tree_decrease_qlen(sch, 1 - nb);
+ consume_skb(skb);
+ return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+}
+
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
- if (qdisc_pkt_len(skb) > q->max_size)
+ if (qdisc_pkt_len(skb) > q->max_size) {
+ if (skb_is_gso(skb))
+ return tbf_segment(skb, sch);
return qdisc_reshape_fail(skb, sch);
-
+ }
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))