summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorSridhar Samudrala <sridhar.samudrala@intel.com>2017-03-24 10:08:24 -0700
committerDavid S. Miller <davem@davemloft.net>2017-03-24 20:49:31 -0700
commit7db6b048da3b9f84fe1d22fb29ff7e7c2ec6c0e5 (patch)
tree4280f554866625190cd24db1768927764ba2bc77 /net
parent37056719bba500d0d2b8216fdf641e5507ec9a0e (diff)
downloadlinux-stable-7db6b048da3b9f84fe1d22fb29ff7e7c2ec6c0e5.tar.gz
linux-stable-7db6b048da3b9f84fe1d22fb29ff7e7c2ec6c0e5.tar.bz2
linux-stable-7db6b048da3b9f84fe1d22fb29ff7e7c2ec6c0e5.zip
net: Commonize busy polling code to focus on napi_id instead of socket
Move the core functionality in sk_busy_loop() to napi_busy_loop() and make it independent of sk. This enables re-using this function in epoll busy loop implementation. Signed-off-by: Sridhar Samudrala <sridhar.samudrala@intel.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/sock.c11
2 files changed, 19 insertions, 13 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 2d1b5613b7fd..ef9fe60ee294 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5060,19 +5060,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
do_softirq();
}
-void sk_busy_loop(struct sock *sk, int nonblock)
+void napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg)
{
- unsigned long start_time = nonblock ? 0 : busy_loop_current_time();
+ unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget);
void *have_poll_lock = NULL;
struct napi_struct *napi;
- unsigned int napi_id;
restart:
- napi_id = READ_ONCE(sk->sk_napi_id);
- if (napi_id < MIN_NAPI_ID)
- return;
-
napi_poll = NULL;
rcu_read_lock();
@@ -5106,12 +5103,11 @@ restart:
trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
count:
if (work > 0)
- __NET_ADD_STATS(sock_net(sk),
+ __NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
local_bh_enable();
- if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
- sk_busy_loop_timeout(sk, start_time))
+ if (!loop_end || loop_end(loop_end_arg, start_time))
break;
if (unlikely(need_resched())) {
@@ -5120,8 +5116,7 @@ count:
preempt_enable();
rcu_read_unlock();
cond_resched();
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- sk_busy_loop_timeout(sk, start_time))
+ if (loop_end(loop_end_arg, start_time))
return;
goto restart;
}
@@ -5133,7 +5128,7 @@ count:
out:
rcu_read_unlock();
}
-EXPORT_SYMBOL(sk_busy_loop);
+EXPORT_SYMBOL(napi_busy_loop);
#endif /* CONFIG_NET_RX_BUSY_POLL */
diff --git a/net/core/sock.c b/net/core/sock.c
index 1b9030ee6f4b..4b762f2a3552 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3237,3 +3237,14 @@ static int __init proto_init(void)
subsys_initcall(proto_init);
#endif /* PROC_FS */
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+bool sk_busy_loop_end(void *p, unsigned long start_time)
+{
+ struct sock *sk = p;
+
+ return !skb_queue_empty(&sk->sk_receive_queue) ||
+ sk_busy_loop_timeout(sk, start_time);
+}
+EXPORT_SYMBOL(sk_busy_loop_end);
+#endif /* CONFIG_NET_RX_BUSY_POLL */