summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorIan Campbell <Ian.Campbell@citrix.com>2012-10-10 03:48:42 +0000
committerDavid S. Miller <davem@davemloft.net>2012-10-10 22:50:45 -0400
commit6a8ed462f16b8455eec5ae00eb6014159a6721f0 (patch)
treefa48619820077f351d2a30e3566bae0b4dcd11ba /drivers/net
parent6caab7b0544e83e6c160b5e80f5a4a7dd69545c7 (diff)
downloadlinux-6a8ed462f16b8455eec5ae00eb6014159a6721f0.tar.gz
linux-6a8ed462f16b8455eec5ae00eb6014159a6721f0.tar.bz2
linux-6a8ed462f16b8455eec5ae00eb6014159a6721f0.zip
xen: netback: handle compound page fragments on transmit.
An SKB paged fragment can consist of a compound page with order > 0. However the netchannel protocol deals only in PAGE_SIZE frames. Handle this in netbk_gop_frag_copy and xen_netbk_count_skb_slots by iterating over the frames which make up the page. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Konrad Rzeszutek Wilk <konrad@kernel.org> Cc: Sander Eikelenboom <linux@eikelenboom.it> Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netback/netback.c40
1 files changed, 35 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4ebfcf3d8a3b..f2d6b78d901d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -335,21 +335,35 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
unsigned long bytes;
+
+ offset &= ~PAGE_MASK;
+
while (size > 0) {
+ BUG_ON(offset >= PAGE_SIZE);
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
- if (start_new_rx_buffer(copy_off, size, 0)) {
+ bytes = PAGE_SIZE - offset;
+
+ if (bytes > size)
+ bytes = size;
+
+ if (start_new_rx_buffer(copy_off, bytes, 0)) {
count++;
copy_off = 0;
}
- bytes = size;
if (copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - copy_off;
copy_off += bytes;
+
+ offset += bytes;
size -= bytes;
+
+ if (offset == PAGE_SIZE)
+ offset = 0;
}
}
return count;
@@ -403,14 +417,24 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
unsigned long bytes;
/* Data must not cross a page boundary. */
- BUG_ON(size + offset > PAGE_SIZE);
+ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
meta = npo->meta + npo->meta_prod - 1;
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+
while (size > 0) {
+ BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
- if (start_new_rx_buffer(npo->copy_off, size, *head)) {
+ bytes = PAGE_SIZE - offset;
+
+ if (bytes > size)
+ bytes = size;
+
+ if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
/*
* Netfront requires there to be some data in the head
* buffer.
@@ -420,7 +444,6 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
meta = get_next_rx_buffer(vif, npo);
}
- bytes = size;
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
@@ -453,6 +476,13 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
offset += bytes;
size -= bytes;
+ /* Next frame */
+ if (offset == PAGE_SIZE && size) {
+ BUG_ON(!PageCompound(page));
+ page++;
+ offset = 0;
+ }
+
/* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
vif->rx.req_cons++;