summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-18 21:30:39 -0400
committerDavid S. Miller <davem@davemloft.net>2019-06-18 21:30:39 -0400
commite11e1007a177fe4abc12b620dcf9e36eb8027f40 (patch)
treea95a10626d4d9c66e1e230d3a45fefc99e9e5d5a
parent9476274093a0e79b905f4cd6cf6d149f65e02c17 (diff)
parent3e14c383de349a86895b8c6c410b9222646574f6 (diff)
downloadlinux-e11e1007a177fe4abc12b620dcf9e36eb8027f40.tar.gz
linux-e11e1007a177fe4abc12b620dcf9e36eb8027f40.tar.bz2
linux-e11e1007a177fe4abc12b620dcf9e36eb8027f40.zip
Merge branch 'net-netem-fix-issues-with-corrupting-GSO-frames'
Jakub Kicinski says: ==================== net: netem: fix issues with corrupting GSO frames Corrupting GSO frames currently leads to crashes, due to skb use after free. These stem from the skb list handling - the segmented skbs come back on a list, and this list is not properly unlinked before enqueuing the segments. Turns out this condition is made very likely to occur because of another bug - in backlog accounting. Segments are counted twice, which means qdisc's limit gets reached leading to drops and making the use after free very likely to happen. The bugs are fixed in order in which they were added to the tree. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_netem.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 956ff3da81f4..b17f2ed970e2 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -439,8 +439,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct netem_skb_cb *cb;
struct sk_buff *skb2;
struct sk_buff *segs = NULL;
- unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
- int nb = 0;
+ unsigned int prev_len = qdisc_pkt_len(skb);
int count = 1;
int rc = NET_XMIT_SUCCESS;
int rc_drop = NET_XMIT_DROP;
@@ -494,16 +493,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (skb_is_gso(skb)) {
- segs = netem_segment(skb, sch, to_free);
- if (!segs)
+ skb = netem_segment(skb, sch, to_free);
+ if (!skb)
return rc_drop;
- } else {
- segs = skb;
+ segs = skb->next;
+ skb_mark_not_on_list(skb);
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
}
- skb = segs;
- segs = segs->next;
-
skb = skb_unshare(skb, GFP_ATOMIC);
if (unlikely(!skb)) {
qdisc_qstats_drop(sch);
@@ -520,6 +517,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (unlikely(sch->q.qlen >= sch->limit)) {
+ /* re-link segs, so that qdisc_drop_all() frees them all */
+ skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
return rc_drop;
}
@@ -593,6 +592,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
finish_segs:
if (segs) {
+ unsigned int len, last_len;
+ int nb = 0;
+
+ len = skb->len;
+
while (segs) {
skb2 = segs->next;
skb_mark_not_on_list(segs);
@@ -608,9 +612,7 @@ finish_segs:
}
segs = skb2;
}
- sch->q.qlen += nb;
- if (nb > 1)
- qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
}
return NET_XMIT_SUCCESS;
}