diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2022-12-16 16:04:39 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-02 22:50:01 -0800 |
commit | 805afd8300998948d16bdba0358dcfeb202a70d5 (patch) | |
tree | 66b0eab28da035317de3d2d178ab680a8a76e005 | |
parent | a9dc087fd3c484fd1ed18c5efb290efaaf44ce03 (diff) | |
download | linux-805afd8300998948d16bdba0358dcfeb202a70d5.tar.gz linux-805afd8300998948d16bdba0358dcfeb202a70d5.tar.bz2 linux-805afd8300998948d16bdba0358dcfeb202a70d5.zip |
lib/percpu_counter: percpu_counter_add_batch() overflow/underflow
Patch series "various irq handling fixes/docu updates".
If an interrupt happens between __this_cpu_read(*fbc->counters) and
this_cpu_add(*fbc->counters, amount), and that interrupt modifies the
per_cpu_counter, then the this_cpu_add() after the interrupt returns may
under/overflow.
Link: https://lkml.kernel.org/r/20221216150155.200389-1-manfred@colorfullife.com
Link: https://lkml.kernel.org/r/20221216150441.200533-1-manfred@colorfullife.com
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: "Sun, Jiebin" <jiebin.sun@intel.com>
Cc: <1vier1@web.de>
Cc: Alexander Sverdlin <alexander.sverdlin@siemens.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | lib/percpu_counter.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 42f729c8e56c..dba56c5c1837 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -73,28 +73,33 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) EXPORT_SYMBOL(percpu_counter_set); /* - * This function is both preempt and irq safe. The former is due to explicit - * preemption disable. The latter is guaranteed by the fact that the slow path - * is explicitly protected by an irq-safe spinlock whereas the fast patch uses - * this_cpu_add which is irq-safe by definition. Hence there is no need muck - * with irq state before calling this one + * local_irq_save() is needed to make the function irq safe: + * - The slow path would be ok as protected by an irq-safe spinlock. + * - this_cpu_add would be ok as it is irq-safe by definition. + * But: + * The decision slow path/fast path and the actual update must be atomic, too. + * Otherwise a call in process context could check the current values and + * decide that the fast path can be used. If now an interrupt occurs before + * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters), + * then the this_cpu_add() that is executed after the interrupt has completed + * can produce values larger than "batch" or even overflows. */ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; + unsigned long flags; - preempt_disable(); + local_irq_save(flags); count = __this_cpu_read(*fbc->counters) + amount; if (abs(count) >= batch) { - unsigned long flags; - raw_spin_lock_irqsave(&fbc->lock, flags); + raw_spin_lock(&fbc->lock); fbc->count += count; __this_cpu_sub(*fbc->counters, count - amount); - raw_spin_unlock_irqrestore(&fbc->lock, flags); + raw_spin_unlock(&fbc->lock); } else { this_cpu_add(*fbc->counters, amount); } - preempt_enable(); + local_irq_restore(flags); } EXPORT_SYMBOL(percpu_counter_add_batch); |