summaryrefslogtreecommitdiffstats
path: root/drivers/net/veth.c
diff options
context:
space:
mode:
authorToshiaki Makita <makita.toshiaki@lab.ntt.co.jp>2018-08-03 16:58:14 +0900
committerDaniel Borkmann <daniel@iogearbox.net>2018-08-10 16:12:21 +0200
commitaf87a3aa1b5f397a2f5c99b97b000943c5177da7 (patch)
tree24ef46bf0b5a4f47a2abe78758eef93cdf5b88e1 /drivers/net/veth.c
parent9fc8d518d9d590998209f2686e026a488f65d41e (diff)
downloadlinux-stable-af87a3aa1b5f397a2f5c99b97b000943c5177da7.tar.gz
linux-stable-af87a3aa1b5f397a2f5c99b97b000943c5177da7.tar.bz2
linux-stable-af87a3aa1b5f397a2f5c99b97b000943c5177da7.zip
veth: Add ndo_xdp_xmit
This allows NIC's XDP to redirect packets to veth. The destination veth device enqueues redirected packets to the napi ring of its peer, then they are processed by XDP on its peer veth device. This can be thought as calling another XDP program by XDP program using REDIRECT, when the peer enables driver XDP. Note that when the peer veth device does not set driver xdp, redirected packets will be dropped because the peer is not ready for NAPI. v4: - Don't use xdp_ok_fwd_dev() because checking IFF_UP is not necessary. Add comments about it and check only MTU. v2: - Drop the part converting xdp_frame into skb when XDP is not enabled. - Implement bulk interface of ndo_xdp_xmit. - Implement XDP_XMIT_FLUSH bit and drop ndo_xdp_flush. Signed-off-by: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp> Acked-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'drivers/net/veth.c')
-rw-r--r--drivers/net/veth.c51
1 files changed, 51 insertions, 0 deletions
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 89f3059e603d..dbb693a7795e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,6 +17,7 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include <linux/veth.h>
#include <linux/module.h>
#include <linux/bpf.h>
@@ -125,6 +126,11 @@ static void *veth_ptr_to_xdp(void *ptr)
return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
}
+static void *veth_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+}
+
static void veth_ptr_free(void *ptr)
{
if (veth_is_xdp_frame(ptr))
@@ -267,6 +273,50 @@ static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
return skb;
}
+static int veth_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ unsigned int max_len;
+ int i, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv))
+ return -ENXIO;
+
+ rcv_priv = netdev_priv(rcv);
+ /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
+ * side. This means an XDP program is loaded on the peer and the peer
+ * device is up.
+ */
+ if (!rcu_access_pointer(rcv_priv->xdp_prog))
+ return -ENXIO;
+
+ max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
+
+ spin_lock(&rcv_priv->xdp_ring.producer_lock);
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *frame = frames[i];
+ void *ptr = veth_xdp_to_ptr(frame);
+
+ if (unlikely(frame->len > max_len ||
+ __ptr_ring_produce(&rcv_priv->xdp_ring, ptr))) {
+ xdp_return_frame_rx_napi(frame);
+ drops++;
+ }
+ }
+ spin_unlock(&rcv_priv->xdp_ring.producer_lock);
+
+ if (flags & XDP_XMIT_FLUSH)
+ __veth_xdp_flush(rcv_priv);
+
+ return n - drops;
+}
+
static struct sk_buff *veth_xdp_rcv_one(struct veth_priv *priv,
struct xdp_frame *frame)
{
@@ -769,6 +819,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
+ .ndo_xdp_xmit = veth_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \