summaryrefslogtreecommitdiffstats
path: root/kernel/kcsan
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kcsan')
-rw-r--r--kernel/kcsan/kcsan_test.c37
-rw-r--r--kernel/kcsan/selftest.c14
2 files changed, 30 insertions, 21 deletions
diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c
index 5bf94550bcdf..2bad0820f73a 100644
--- a/kernel/kcsan/kcsan_test.c
+++ b/kernel/kcsan/kcsan_test.c
@@ -300,6 +300,8 @@ static struct {
long val[8];
} test_struct;
static DEFINE_SEQLOCK(test_seqlock);
+static DEFINE_SPINLOCK(test_spinlock);
+static DEFINE_MUTEX(test_mutex);
/*
* Helper to avoid compiler optimizing out reads, and to generate source values
@@ -523,8 +525,6 @@ static void test_barrier_nothreads(struct kunit *test)
struct kcsan_scoped_access *reorder_access = NULL;
#endif
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
- DEFINE_SPINLOCK(spinlock);
- DEFINE_MUTEX(mutex);
atomic_t dummy;
KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
@@ -543,6 +543,15 @@ static void test_barrier_nothreads(struct kunit *test)
#define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b)
#define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b)
+ /*
+ * Lockdep initialization can strengthen certain locking operations due
+ * to calling into instrumented files; "warm up" our locks.
+ */
+ spin_lock(&test_spinlock);
+ spin_unlock(&test_spinlock);
+ mutex_lock(&test_mutex);
+ mutex_unlock(&test_mutex);
+
/* Force creating a valid entry in reorder_access first. */
test_var = 0;
while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
@@ -592,10 +601,10 @@ static void test_barrier_nothreads(struct kunit *test)
KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false);
KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true);
- KCSAN_EXPECT_READ_BARRIER(spin_lock(&spinlock), false);
- KCSAN_EXPECT_READ_BARRIER(spin_unlock(&spinlock), true);
- KCSAN_EXPECT_READ_BARRIER(mutex_lock(&mutex), false);
- KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&mutex), true);
+ KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false);
+ KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true);
+ KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false);
+ KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true);
KCSAN_EXPECT_WRITE_BARRIER(mb(), true);
KCSAN_EXPECT_WRITE_BARRIER(wmb(), true);
@@ -638,10 +647,10 @@ static void test_barrier_nothreads(struct kunit *test)
KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false);
KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true);
- KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&spinlock), false);
- KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&spinlock), true);
- KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&mutex), false);
- KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&mutex), true);
+ KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false);
+ KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true);
+ KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false);
+ KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true);
KCSAN_EXPECT_RW_BARRIER(mb(), true);
KCSAN_EXPECT_RW_BARRIER(wmb(), true);
@@ -684,10 +693,10 @@ static void test_barrier_nothreads(struct kunit *test)
KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false);
KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true);
- KCSAN_EXPECT_RW_BARRIER(spin_lock(&spinlock), false);
- KCSAN_EXPECT_RW_BARRIER(spin_unlock(&spinlock), true);
- KCSAN_EXPECT_RW_BARRIER(mutex_lock(&mutex), false);
- KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&mutex), true);
+ KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false);
+ KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
+ KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
+ KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
kcsan_nestable_atomic_end();
}
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index 08c6b84b9ebe..b6d4da07d80a 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -113,6 +113,7 @@ static bool __init test_matching_access(void)
* positives: simple test to check at boot certain barriers are always properly
* instrumented. See kcsan_test for a more complete test.
*/
+static DEFINE_SPINLOCK(test_spinlock);
static bool __init test_barrier(void)
{
#ifdef CONFIG_KCSAN_WEAK_MEMORY
@@ -122,7 +123,6 @@ static bool __init test_barrier(void)
#endif
bool ret = true;
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
- DEFINE_SPINLOCK(spinlock);
atomic_t dummy;
long test_var;
@@ -172,8 +172,8 @@ static bool __init test_barrier(void)
KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
arch_spin_lock(&arch_spinlock);
KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock));
- spin_lock(&spinlock);
- KCSAN_CHECK_READ_BARRIER(spin_unlock(&spinlock));
+ spin_lock(&test_spinlock);
+ KCSAN_CHECK_READ_BARRIER(spin_unlock(&test_spinlock));
KCSAN_CHECK_WRITE_BARRIER(mb());
KCSAN_CHECK_WRITE_BARRIER(wmb());
@@ -202,8 +202,8 @@ static bool __init test_barrier(void)
KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
arch_spin_lock(&arch_spinlock);
KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock));
- spin_lock(&spinlock);
- KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&spinlock));
+ spin_lock(&test_spinlock);
+ KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&test_spinlock));
KCSAN_CHECK_RW_BARRIER(mb());
KCSAN_CHECK_RW_BARRIER(wmb());
@@ -235,8 +235,8 @@ static bool __init test_barrier(void)
KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
arch_spin_lock(&arch_spinlock);
KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock));
- spin_lock(&spinlock);
- KCSAN_CHECK_RW_BARRIER(spin_unlock(&spinlock));
+ spin_lock(&test_spinlock);
+ KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock));
kcsan_nestable_atomic_end();