summaryrefslogtreecommitdiffstats
path: root/kernel/kcsan
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2021-11-30 12:44:11 +0100
committerPaul E. McKenney <paulmck@kernel.org>2021-12-09 16:42:26 -0800
commit9756f64c8f2d19c0029a5827bda8ac275302ec22 (patch)
tree8c3d8f3bb18b527d6da3b27ff899f9d6f1ae6b62 /kernel/kcsan
parent71f8de7092cb2cf95e3f7055df139118d1445597 (diff)
downloadlinux-9756f64c8f2d19c0029a5827bda8ac275302ec22.tar.gz
linux-9756f64c8f2d19c0029a5827bda8ac275302ec22.tar.bz2
linux-9756f64c8f2d19c0029a5827bda8ac275302ec22.zip
kcsan: Avoid checking scoped accesses from nested contexts
Avoid checking scoped accesses from nested contexts (such as nested interrupts or in scheduler code) which share the same kcsan_ctx. This is to avoid detecting false positive races of accesses in the same thread with currently scoped accesses: consider setting up a watchpoint for a non-scoped (normal) access that also "conflicts" with a current scoped access. In a nested interrupt (or in the scheduler), which shares the same kcsan_ctx, we cannot check scoped accesses set up in the parent context -- simply ignore them in this case. With the introduction of kcsan_ctx::disable_scoped, we can also clean up kcsan_check_scoped_accesses()'s recursion guard, and do not need to modify the list's prev pointer. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/kcsan')
-rw-r--r--kernel/kcsan/core.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index e34a1710b7bc..bd359f8ee63a 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -204,15 +204,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
static noinline void kcsan_check_scoped_accesses(void)
{
struct kcsan_ctx *ctx = get_ctx();
- struct list_head *prev_save = ctx->scoped_accesses.prev;
struct kcsan_scoped_access *scoped_access;
- ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
+ if (ctx->disable_scoped)
+ return;
+
+ ctx->disable_scoped++;
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
check_access(scoped_access->ptr, scoped_access->size,
scoped_access->type, scoped_access->ip);
}
- ctx->scoped_accesses.prev = prev_save;
+ ctx->disable_scoped--;
}
/* Rules for generic atomic accesses. Called from fast-path. */
@@ -466,6 +468,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
}
/*
+ * Avoid races of scoped accesses from nested interrupts (or scheduler).
+ * Assume setting up a watchpoint for a non-scoped (normal) access that
+ * also conflicts with a current scoped access. In a nested interrupt,
+ * which shares the context, it would check a conflicting scoped access.
+ * To avoid, disable scoped access checking.
+ */
+ ctx->disable_scoped++;
+
+ /*
* Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
* runtime is entered for every memory access, and potentially useful
* information is lost if dirtied by KCSAN.
@@ -578,6 +589,7 @@ out_unlock:
if (!kcsan_interrupt_watcher)
local_irq_restore(irq_flags);
kcsan_restore_irqtrace(current);
+ ctx->disable_scoped--;
out:
user_access_restore(ua_flags);
}