summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-01-20 06:34:22 -0800
committerTejun Heo <tj@kernel.org>2017-01-20 10:06:56 -0500
commitaaf0f2fa682861e47a4f6a8762d2b8a9a4a51077 (patch)
treebc57d4a655a39f065e6a606913be6a25de9ab55a /lib
parent44b4b461a0fb407507b46ea76a71376d74de7058 (diff)
downloadlinux-stable-aaf0f2fa682861e47a4f6a8762d2b8a9a4a51077.tar.gz
linux-stable-aaf0f2fa682861e47a4f6a8762d2b8a9a4a51077.tar.bz2
linux-stable-aaf0f2fa682861e47a4f6a8762d2b8a9a4a51077.zip
percpu_counter: percpu_counter_hotcpu_callback() cleanup
In commit ebd8fef304f9 ("percpu_counter: make percpu_counters_lock irq-safe") we disabled irqs in percpu_counter_hotcpu_callback() We can grab every counter spinlock without having to disable irqs again. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu_counter.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c8cebb137076..9c21000df0b5 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu)
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
- unsigned long flags;
- raw_spin_lock_irqsave(&fbc->lock, flags);
+ raw_spin_lock(&fbc->lock);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
- raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ raw_spin_unlock(&fbc->lock);
}
spin_unlock_irq(&percpu_counters_lock);
#endif