diff options
author | Yunsheng Lin <linyunsheng@huawei.com> | 2019-06-28 09:13:19 +0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-07-01 19:02:47 -0700 |
commit | 27ba4059e06b3bbd38a7d944fd5a78cdf47534f4 (patch) | |
tree | 717af81b11215303a6c0a7d5fbfeee53284e1825 | |
parent | 0d0bcacc54e65540b8a3d680c130b741010e23a3 (diff) | |
download | linux-stable-27ba4059e06b3bbd38a7d944fd5a78cdf47534f4.tar.gz linux-stable-27ba4059e06b3bbd38a7d944fd5a78cdf47534f4.tar.bz2 linux-stable-27ba4059e06b3bbd38a7d944fd5a78cdf47534f4.zip |
net: link_watch: prevent starvation when processing linkwatch wq
When user has configured a large number of virtual netdev, such
as 4K vlans, the carrier on/off operation of the real netdev
will also cause it's virtual netdev's link state to be processed
in linkwatch. Currently, the processing is done in a work queue,
which may cause rtnl locking starvation problem and worker
starvation problem for other work queue, such as irqfd_inject wq.
This patch releases the cpu when link watch worker has processed
a fixed number of netdev' link watch event, and schedule the
work queue again when there is still link watch event remaining.
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/link_watch.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 04fdc9535772..f153e0601838 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -163,9 +163,16 @@ static void linkwatch_do_dev(struct net_device *dev) static void __linkwatch_run_queue(int urgent_only) { +#define MAX_DO_DEV_PER_LOOP 100 + + int do_dev = MAX_DO_DEV_PER_LOOP; struct net_device *dev; LIST_HEAD(wrk); + /* Give urgent case more budget */ + if (urgent_only) + do_dev += MAX_DO_DEV_PER_LOOP; + /* * Limit the number of linkwatch events to one * per second so that a runaway driver does not @@ -184,7 +191,7 @@ static void __linkwatch_run_queue(int urgent_only) spin_lock_irq(&lweventlist_lock); list_splice_init(&lweventlist, &wrk); - while (!list_empty(&wrk)) { + while (!list_empty(&wrk) && do_dev > 0) { dev = list_first_entry(&wrk, struct net_device, link_watch_list); list_del_init(&dev->link_watch_list); @@ -195,9 +202,13 @@ static void __linkwatch_run_queue(int urgent_only) } spin_unlock_irq(&lweventlist_lock); linkwatch_do_dev(dev); + do_dev--; spin_lock_irq(&lweventlist_lock); } + /* Add the remaining work back to lweventlist */ + list_splice_init(&wrk, &lweventlist); + if (!list_empty(&lweventlist)) linkwatch_schedule_work(0); spin_unlock_irq(&lweventlist_lock); |