summaryrefslogtreecommitdiffstats
path: root/include/linux/seqlock.h
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2024-11-04 16:43:09 +0100
committerPeter Zijlstra <peterz@infradead.org>2024-11-05 12:55:35 +0100
commit183ec5f26b2fc97a4a9871865bfe9b33c41fddb2 (patch)
treeedd0f5c701d061b60f9df5c75df48b4a29ce9955 /include/linux/seqlock.h
parent93190bc35d6d4364a4d8c38ac8961dabecbff4ed (diff)
downloadlinux-183ec5f26b2fc97a4a9871865bfe9b33c41fddb2.tar.gz
linux-183ec5f26b2fc97a4a9871865bfe9b33c41fddb2.tar.bz2
linux-183ec5f26b2fc97a4a9871865bfe9b33c41fddb2.zip
kcsan, seqlock: Fix incorrect assumption in read_seqbegin()
During testing of the preceding changes, I noticed that in some cases, current->kcsan_ctx.in_flat_atomic remained true until task exit. This is obviously wrong, because _all_ accesses for the given task will be treated as atomic, resulting in false negatives i.e. missed data races. Debugging led to fs/dcache.c, where we can see this usage of seqlock: struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) { struct dentry *dentry; unsigned seq; do { seq = read_seqbegin(&rename_lock); dentry = __d_lookup(parent, name); if (dentry) break; } while (read_seqretry(&rename_lock, seq)); [...] As can be seen, read_seqretry() is never called if dentry != NULL; consequently, current->kcsan_ctx.in_flat_atomic will never be reset to false by read_seqretry(). Give up on the wrong assumption of "assume closing read_seqretry()", and rely on the already-present annotations in read_seqcount_begin/retry(). Fixes: 88ecd153be95 ("seqlock, kcsan: Add annotations for KCSAN") Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20241104161910.780003-6-elver@google.com
Diffstat (limited to 'include/linux/seqlock.h')
-rw-r--r--include/linux/seqlock.h12
1 files changed, 1 insertions, 11 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 45eee0e5dca0..5298765d6ca4 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -810,11 +810,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- unsigned ret = read_seqcount_begin(&sl->seqcount);
-
- kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
- kcsan_flat_atomic_begin();
- return ret;
+ return read_seqcount_begin(&sl->seqcount);
}
/**
@@ -830,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
- /*
- * Assume not nested: read_seqretry() may be called multiple times when
- * completing read critical section.
- */
- kcsan_flat_atomic_end();
-
return read_seqcount_retry(&sl->seqcount, start);
}