diff options
author | David S. Miller <davem@davemloft.net> | 2017-09-08 21:11:01 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-09-08 21:11:01 -0700 |
commit | a7bc57749f231dcd5fdbc7b653bc260064caf0b9 (patch) | |
tree | 0925d65fb3e7e0eec9f6d08f22086a861d294dac /net/core/dev.c | |
parent | 109980b894e9dae66c37c3d804a415aa68b19c7e (diff) | |
parent | 374fb014fc5b15e420faa00af036868a635eadd3 (diff) | |
download | linux-stable-a7bc57749f231dcd5fdbc7b653bc260064caf0b9.tar.gz linux-stable-a7bc57749f231dcd5fdbc7b653bc260064caf0b9.tar.bz2 linux-stable-a7bc57749f231dcd5fdbc7b653bc260064caf0b9.zip |
Merge branch 'xdp-bpf-fixes'
John Fastabend says:
====================
net: Fixes for XDP/BPF
The following fixes, UAPI updates, and small improvement,
i. XDP needs to be called inside RCU with preempt disabled.
ii. Not strictly a bug fix but we have an attach command in the
sockmap UAPI already to avoid having a single kernel released with
only the attach and not the detach I'm pushing this into net branch.
Its early in the RC cycle so I think this is OK (not ideal but better
than supporting a UAPI with a missing detach forever).
iii. Final patch replace cpu_relax with cond_resched in devmap.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 6f845e4fec17..fb766d906148 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3981,8 +3981,13 @@ static int netif_rx_internal(struct sk_buff *skb) trace_netif_rx(skb); if (static_key_false(&generic_xdp_needed)) { - int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), - skb); + int ret; + + preempt_disable(); + rcu_read_lock(); + ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); + rcu_read_unlock(); + preempt_enable(); /* Consider XDP consuming the packet a success from * the netdev point of view we do not want to count @@ -4500,18 +4505,20 @@ static int netif_receive_skb_internal(struct sk_buff *skb) if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; - rcu_read_lock(); - if (static_key_false(&generic_xdp_needed)) { - int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), - skb); + int ret; - if (ret != XDP_PASS) { - rcu_read_unlock(); + preempt_disable(); + rcu_read_lock(); + ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); + rcu_read_unlock(); + preempt_enable(); + + if (ret != XDP_PASS) return NET_RX_DROP; - } } + rcu_read_lock(); #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; |