summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-05-01 17:42:06 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-05-17 11:13:22 +0200
commitf653ce4db543cdbb417be1d8732684b5becb90eb (patch)
treeba3d269a6e911cf1eb124695a99524fdc2357d1b /lib
parent409b4fa76466231db305643d75ef477a5b779f99 (diff)
downloadlinux-stable-f653ce4db543cdbb417be1d8732684b5becb90eb.tar.gz
linux-stable-f653ce4db543cdbb417be1d8732684b5becb90eb.tar.bz2
linux-stable-f653ce4db543cdbb417be1d8732684b5becb90eb.zip
debugobject: Ensure pool refill (again)
commit 0af462f19e635ad522f28981238334620881badc upstream. The recent fix to ensure atomicity of lookup and allocation inadvertently broke the pool refill mechanism. Prior to that change debug_objects_activate() and debug_objecs_assert_init() invoked debug_objecs_init() to set up the tracking object for statically initialized objects. That's not longer the case and debug_objecs_init() is now the only place which does pool refills. Depending on the number of statically initialized objects this can be enough to actually deplete the pool, which was observed by Ido via a debugobjects OOM warning. Restore the old behaviour by adding explicit refill opportunities to debug_objects_activate() and debug_objecs_assert_init(). Fixes: 63a759694eed ("debugobject: Prevent init race with static objects") Reported-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Ido Schimmel <idosch@nvidia.com> Link: https://lore.kernel.org/r/871qk05a9d.ffs@tglx Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/debugobjects.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2315a75c45c3..5f23d896df55 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -460,6 +460,16 @@ static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket
return NULL;
}
+static void debug_objects_fill_pool(void)
+{
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ fill_pool();
+}
+
static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
@@ -468,7 +478,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ debug_objects_fill_pool();
db = get_bucket((unsigned long) addr);
@@ -553,6 +563,8 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return 0;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);
@@ -762,6 +774,8 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
if (!debug_objects_enabled)
return;
+ debug_objects_fill_pool();
+
db = get_bucket((unsigned long) addr);
raw_spin_lock_irqsave(&db->lock, flags);