summaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-11-18 06:30:53 -0800
committerDavid S. Miller <davem@davemloft.net>2015-11-18 16:17:38 -0500
commit2a028ecb76497d05e5cd4e3e8b09d965cac2e3f1 (patch)
tree053ca335742c9e7d6c68c4b72a257563d37808c8 /net/core/dev.c
parent02d62e86fe892c59a1259d089d4d16ac76977a37 (diff)
downloadlinux-2a028ecb76497d05e5cd4e3e8b09d965cac2e3f1.tar.gz
linux-2a028ecb76497d05e5cd4e3e8b09d965cac2e3f1.tar.bz2
linux-2a028ecb76497d05e5cd4e3e8b09d965cac2e3f1.zip
net: allow BH servicing in sk_busy_loop()
Instead of blocking BH in whole sk_busy_loop(), block them only around ->ndo_busy_poll() calls. This has many benefits. 1) allow tunneled traffic to use busy poll as well as native traffic. Tunnels handlers usually call netif_rx() and depend on net_rx_action() being run (from sofirq handler) 2) allow RFS/RPS being used (sending IPI to other cpus if needed) 3) use the 'lets burn cpu cycles' budget to do useful work (like TX completions, timers, RCU callbacks...) 4) reduce BH latencies, making busy poll a better citizen. Tested: Tested with SIT tunnel lpaa5:~# echo 0 >/proc/sys/net/core/busy_read lpaa5:~# ./netperf -H 2002:af6:786::1 -t TCP_RR MIGRATED TCP REQUEST/RESPONSE TEST from ::0 (::) port 0 AF_INET6 to 2002:af6:786::1 () port 0 AF_INET6 : first burst 0 Local /Remote Socket Size Request Resp. Elapsed Trans. Send Recv Size Size Time Rate bytes Bytes bytes bytes secs. per sec 16384 87380 1 1 10.00 37373.93 16384 87380 Now enable busy poll on both hosts lpaa5:~# echo 70 >/proc/sys/net/core/busy_read lpaa6:~# echo 70 >/proc/sys/net/core/busy_read lpaa5:~# ./netperf -H 2002:af6:786::1 -t TCP_RR MIGRATED TCP REQUEST/RESPONSE TEST from ::0 (::) port 0 AF_INET6 to 2002:af6:786::1 () port 0 AF_INET6 : first burst 0 Local /Remote Socket Size Request Resp. Elapsed Trans. Send Recv Size Size Time Rate bytes Bytes bytes bytes secs. per sec 16384 87380 1 1 10.00 58314.77 16384 87380 Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 74a816b299df..2002eec2617d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4684,11 +4684,7 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
struct napi_struct *napi;
int rc = false;
- /*
- * rcu read lock for napi hash
- * bh so we don't race with net_rx_action
- */
- rcu_read_lock_bh();
+ rcu_read_lock();
napi = napi_by_id(sk->sk_napi_id);
if (!napi)
@@ -4699,23 +4695,23 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
goto out;
do {
+ local_bh_disable();
rc = ops->ndo_busy_poll(napi);
+ if (rc > 0)
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+ local_bh_enable();
if (rc == LL_FLUSH_FAILED)
break; /* permanent failure */
- if (rc > 0)
- /* local bh are disabled so it is ok to use _BH */
- NET_ADD_STATS_BH(sock_net(sk),
- LINUX_MIB_BUSYPOLLRXPACKETS, rc);
cpu_relax();
-
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
rc = !skb_queue_empty(&sk->sk_receive_queue);
out:
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return rc;
}
EXPORT_SYMBOL(sk_busy_loop);