summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-07 17:25:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-07 17:25:19 -0700
commitda703fe9416d7a03058432ccc27b898504b1383d (patch)
tree2a64caeb1d1aa339e17c47925505722c604f9894 /drivers
parent64094e7e3118aff4b0be8ff713c242303e139834 (diff)
parent534fc31d09b706a16d83533e16b5dc855caf7576 (diff)
downloadlinux-stable-da703fe9416d7a03058432ccc27b898504b1383d.tar.gz
linux-stable-da703fe9416d7a03058432ccc27b898504b1383d.tar.bz2
linux-stable-da703fe9416d7a03058432ccc27b898504b1383d.zip
Merge tag 'xsa432-6.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen netback buffer overflow fix from Juergen Gross: "The fix for XSA-423 added logic to Linux'es netback driver to deal with a frontend splitting a packet in a way such that not all of the headers would come in one piece. Unfortunately the logic introduced there didn't account for the extreme case of the entire packet being split into as many pieces as permitted by the protocol, yet still being smaller than the area that's specially dealt with to keep all (possible) headers together. Such an unusual packet would therefore trigger a buffer overrun in the driver" * tag 'xsa432-6.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/netback: Fix buffer overrun triggered by unusual packet
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/xen-netback/netback.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c8d20cddf658..88f760a7cbc3 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -396,7 +396,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
struct xen_netif_tx_request *txp = first;
- nr_slots = shinfo->nr_frags + 1;
+ nr_slots = shinfo->nr_frags + frag_overflow + 1;
copy_count(skb) = 0;
XENVIF_TX_CB(skb)->split_mask = 0;
@@ -462,8 +462,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
}
- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
- shinfo->nr_frags++, gop++) {
+ for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
+ shinfo->nr_frags++, gop++, nr_slots--) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
@@ -476,12 +476,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
txp++;
}
- if (frag_overflow) {
+ if (nr_slots > 0) {
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
- for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
@@ -492,6 +492,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
skb_shinfo(skb)->frag_list = nskb;
+ } else if (nskb) {
+ /* A frag_list skb was allocated but it is no longer needed
+ * because enough slots were converted to copy ops above.
+ */
+ kfree_skb(nskb);
}
(*copy_ops) = cop - queue->tx_copy_ops;