summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-01-09 08:30:54 +0000
committerDavid S. Miller <davem@davemloft.net>2011-01-10 16:07:54 -0800
commitbfe0d0298f2a67d94d58c39ea904a999aeeb7c3c (patch)
tree5a6f966ebabe8d88d6d6c78c61411e325150927f /include
parentf1593d2298acca8b6680100d622911827edb8b0a (diff)
downloadlinux-bfe0d0298f2a67d94d58c39ea904a999aeeb7c3c.tar.gz
linux-bfe0d0298f2a67d94d58c39ea904a999aeeb7c3c.tar.bz2
linux-bfe0d0298f2a67d94d58c39ea904a999aeeb7c3c.zip
net_sched: factorize qdisc stats handling
HTB takes into account skb is segmented in stats updates. Generalize this to all schedulers. They should use qdisc_bstats_update() helper instead of manipulating bstats.bytes and bstats.packets Add bstats_update() helper too for classes that use gnet_stats_basic_packed fields. Note : Right now, TCQ_F_CAN_BYPASS shortcurt can be taken only if no stab is setup on qdisc. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/sch_generic.h20
1 files changed, 14 insertions, 6 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 0af57ebae762..e9eee99d8b1f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
return q->q.qlen;
}
-static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{
return (struct qdisc_skb_cb *)skb->cb;
}
@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
return true;
}
-static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{
return qdisc_skb_cb(skb)->pkt_len;
}
@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
-static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
+
+static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+ const struct sk_buff *skb)
+{
+ bstats->bytes += qdisc_pkt_len(skb);
+ bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+ const struct sk_buff *skb)
{
- sch->bstats.bytes += len;
- sch->bstats.packets++;
+ bstats_update(&sch->bstats, skb);
}
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- __qdisc_update_bstats(sch, qdisc_pkt_len(skb));
+ qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}