summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-12-08 14:43:43 -0800
committerPaul E. McKenney <paulmck@kernel.org>2021-01-22 15:24:23 -0800
commit3375efeddf6972df47df26a5b5c643189bd3c02a (patch)
tree1cbf279264fc88f0cab1a396ffdba91f54ca11d0 /lib
parentb4b7914a6a73fc169fd1ce2fcd78a1d83d9528a9 (diff)
downloadlinux-stable-3375efeddf6972df47df26a5b5c643189bd3c02a.tar.gz
linux-stable-3375efeddf6972df47df26a5b5c643189bd3c02a.tar.bz2
linux-stable-3375efeddf6972df47df26a5b5c643189bd3c02a.zip
percpu_ref: Dump mem_dump_obj() info upon reference-count underflow
Reference-count underflow for percpu_ref is detected in the RCU callback percpu_ref_switch_to_atomic_rcu(), and the resulting warning does not print anything allowing easy identification of which percpu_ref use case is underflowing. This is of course not normally a problem when developing a new percpu_ref use case because it is most likely that the problem resides in this new use case. However, when deploying a new kernel to a large set of servers, the underflow might well be a new corner case in any of the old percpu_ref use cases. This commit therefore calls mem_dump_obj() to dump out any additional available information on the underflowing percpu_ref instance. Cc: Ming Lei <ming.lei@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reported-by: Andrii Nakryiko <andrii@kernel.org> Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index e59eda07305e..a1071cdefb5a 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/percpu-refcount.h>
/*
@@ -168,6 +169,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ static atomic_t underflows;
unsigned long count = 0;
int cpu;
@@ -191,9 +193,13 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
*/
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
- WARN_ONCE(atomic_long_read(&data->count) <= 0,
- "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
- data->release, atomic_long_read(&data->count));
+ if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
+ "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
+ data->release, atomic_long_read(&data->count)) &&
+ atomic_inc_return(&underflows) < 4) {
+ pr_err("%s(): percpu_ref underflow", __func__);
+ mem_dump_obj(data);
+ }
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);