summaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2018-01-04 11:14:28 +0800
committerDavid S. Miller <davem@davemloft.net>2018-01-09 10:57:08 -0500
commitfc72d1d54dd9ffe2552c76b17e9129803ca7b255 (patch)
tree4ca29f2049fa6f6d553c70a3dd50b22fdd614710 /drivers/vhost/net.c
parent5990a30510ed1c37a769d3a035ad2d030b843528 (diff)
downloadlinux-stable-fc72d1d54dd9ffe2552c76b17e9129803ca7b255.tar.gz
linux-stable-fc72d1d54dd9ffe2552c76b17e9129803ca7b255.tar.bz2
linux-stable-fc72d1d54dd9ffe2552c76b17e9129803ca7b255.zip
tuntap: XDP transmission
This patch implements XDP transmission for TAP. Since we can't create new queues for TAP during XDP set, exist ptr_ring was reused for queuing XDP buffers. To differ xdp_buff from sk_buff, TUN_XDP_FLAG (0x1UL) was encoded into lowest bit of xpd_buff pointer during ptr_ring_produce, and was decoded during consuming. XDP metadata was stored in the headroom of the packet which should work in most of cases since driver usually reserve enough headroom. Very minor changes were done for vhost_net: it just need to peek the length depends on the type of pointer. Tests were done on two Intel E5-2630 2.40GHz machines connected back to back through two 82599ES. Traffic were generated/received through MoonGen/testpmd(rxonly). It reports ~20% improvements when xdp_redirect_map is doing redirection from ixgbe to TAP (from 2.50Mpps to 3.05Mpps) Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c31655548da2..a5a1db647635 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -175,6 +175,17 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
}
}
+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
@@ -186,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
return 0;
out:
- return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
}
static void vhost_net_buf_init(struct vhost_net_buf *rxq)