diff options
author | Eric Dumazet <edumazet@google.com> | 2016-08-26 12:50:39 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-08-30 22:17:20 -0700 |
commit | 41852497a9205964b958a245a9526040b980926f (patch) | |
tree | da16e7cc6a3717afa785961be2fc72c4d70e13e2 /net/core | |
parent | ae3cb8cb20c87c0833a54360344ad4ee77bdb184 (diff) | |
download | linux-41852497a9205964b958a245a9526040b980926f.tar.gz linux-41852497a9205964b958a245a9526040b980926f.tar.bz2 linux-41852497a9205964b958a245a9526040b980926f.zip |
net: batch calls to flush_all_backlogs()
After commit 145dd5f9c88f ("net: flush the softnet backlog in process
context"), we can easily batch calls to flush_all_backlogs() for all
devices processed in rollback_registered_many()
Tested:
Before patch, on an idle host.
modprobe dummy numdummies=10000
perf stat -e context-switches -a rmmod dummy
Performance counter stats for 'system wide':
1,211,798 context-switches
1.302137465 seconds time elapsed
After patch:
perf stat -e context-switches -a rmmod dummy
Performance counter stats for 'system wide':
225,523 context-switches
0.721623566 seconds time elapsed
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 1d5c6dda1988..34b5322bc081 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4282,18 +4282,11 @@ int netif_receive_skb(struct sk_buff *skb) } EXPORT_SYMBOL(netif_receive_skb); -struct flush_work { - struct net_device *dev; - struct work_struct work; -}; - -DEFINE_PER_CPU(struct flush_work, flush_works); +DEFINE_PER_CPU(struct work_struct, flush_works); /* Network device is going away, flush any packets still pending */ static void flush_backlog(struct work_struct *work) { - struct flush_work *flush = container_of(work, typeof(*flush), work); - struct net_device *dev = flush->dev; struct sk_buff *skb, *tmp; struct softnet_data *sd; @@ -4303,7 +4296,7 @@ static void flush_backlog(struct work_struct *work) local_irq_disable(); rps_lock(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev == dev) { + if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); kfree_skb(skb); input_queue_head_incr(sd); @@ -4313,7 +4306,7 @@ static void flush_backlog(struct work_struct *work) local_irq_enable(); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev == dev) { + if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); kfree_skb(skb); input_queue_head_incr(sd); @@ -4322,22 +4315,18 @@ static void flush_backlog(struct work_struct *work) local_bh_enable(); } -static void flush_all_backlogs(struct net_device *dev) +static void flush_all_backlogs(void) { unsigned int cpu; get_online_cpus(); - for_each_online_cpu(cpu) { - struct flush_work *flush = per_cpu_ptr(&flush_works, cpu); - - INIT_WORK(&flush->work, flush_backlog); - flush->dev = dev; - queue_work_on(cpu, system_highpri_wq, &flush->work); - } + for_each_online_cpu(cpu) + queue_work_on(cpu, system_highpri_wq, + per_cpu_ptr(&flush_works, cpu)); for_each_online_cpu(cpu) - flush_work(&per_cpu_ptr(&flush_works, cpu)->work); + flush_work(per_cpu_ptr(&flush_works, cpu)); put_online_cpus(); } @@ -6725,8 +6714,8 @@ static void rollback_registered_many(struct list_head *head) unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; - flush_all_backlogs(dev); } + flush_all_backlogs(); synchronize_net(); @@ -8291,8 +8280,11 @@ static int __init net_dev_init(void) */ for_each_possible_cpu(i) { + struct work_struct *flush = per_cpu_ptr(&flush_works, i); struct softnet_data *sd = &per_cpu(softnet_data, i); + INIT_WORK(flush, flush_backlog); + skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->process_queue); INIT_LIST_HEAD(&sd->poll_list); |