1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KCSAN test with various race scenarious to test runtime behaviour. Since the
4 * interface with which KCSAN's reports are obtained is via the console, this is
5 * the output we should verify. For each test case checks the presence (or
6 * absence) of generated reports. Relies on 'console' tracepoint to capture
7 * reports as they appear in the kernel log.
8 *
9 * Makes use of KUnit for test organization, and the Torture framework for test
10 * thread control.
11 *
12 * Copyright (C) 2020, Google LLC.
13 * Author: Marco Elver <elver@google.com>
14 */
15
16#define pr_fmt(fmt) "kcsan_test: " fmt
17
18#include <kunit/test.h>
19#include <linux/atomic.h>
20#include <linux/bitops.h>
21#include <linux/jiffies.h>
22#include <linux/kcsan-checks.h>
23#include <linux/kernel.h>
24#include <linux/mutex.h>
25#include <linux/sched.h>
26#include <linux/seqlock.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/torture.h>
31#include <linux/tracepoint.h>
32#include <linux/types.h>
33#include <trace/events/printk.h>
34
35#define KCSAN_TEST_REQUIRES(test, cond) do {			\
36	if (!(cond))						\
37		kunit_skip((test), "Test requires: " #cond);	\
38} while (0)
39
40#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
41#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
42#else
43#define __KCSAN_ACCESS_RW(alt) (alt)
44#endif
45
46/* Points to current test-case memory access "kernels". */
47static void (*access_kernels[2])(void);
48
49static struct task_struct **threads; /* Lists of threads. */
50static unsigned long end_time;       /* End time of test. */
51
52/* Report as observed from console. */
53static struct {
54	spinlock_t lock;
55	int nlines;
56	char lines[3][512];
57} observed = {
58	.lock = __SPIN_LOCK_UNLOCKED(observed.lock),
59};
60
61/* Setup test checking loop. */
62static __no_kcsan inline void
63begin_test_checks(void (*func1)(void), void (*func2)(void))
64{
65	kcsan_disable_current();
66
67	/*
68	 * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at
69	 * least one race is reported.
70	 */
71	end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500);
72
73	/* Signal start; release potential initialization of shared data. */
74	smp_store_release(&access_kernels[0], func1);
75	smp_store_release(&access_kernels[1], func2);
76}
77
78/* End test checking loop. */
79static __no_kcsan inline bool
80end_test_checks(bool stop)
81{
82	if (!stop && time_before(jiffies, end_time)) {
83		/* Continue checking */
84		might_sleep();
85		return false;
86	}
87
88	kcsan_enable_current();
89	return true;
90}
91
92/*
93 * Probe for console output: checks if a race was reported, and obtains observed
94 * lines of interest.
95 */
96__no_kcsan
97static void probe_console(void *ignore, const char *buf, size_t len)
98{
99	unsigned long flags;
100	int nlines;
101
102	/*
103	 * Note that KCSAN reports under a global lock, so we do not risk the
104	 * possibility of having multiple reports interleaved. If that were the
105	 * case, we'd expect tests to fail.
106	 */
107
108	spin_lock_irqsave(&observed.lock, flags);
109	nlines = observed.nlines;
110
111	if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) {
112		/*
113		 * KCSAN report and related to the test.
114		 *
115		 * The provided @buf is not NUL-terminated; copy no more than
116		 * @len bytes and let strscpy() add the missing NUL-terminator.
117		 */
118		strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
119		nlines = 1;
120	} else if ((nlines == 1 || nlines == 2) && strnstr(buf, "bytes by", len)) {
121		strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
122
123		if (strnstr(buf, "race at unknown origin", len)) {
124			if (WARN_ON(nlines != 2))
125				goto out;
126
127			/* No second line of interest. */
128			strcpy(observed.lines[nlines++], "<none>");
129		}
130	}
131
132out:
133	WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
134	spin_unlock_irqrestore(&observed.lock, flags);
135}
136
137/* Check if a report related to the test exists. */
138__no_kcsan
139static bool report_available(void)
140{
141	return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
142}
143
144/* Report information we expect in a report. */
145struct expect_report {
146	/* Access information of both accesses. */
147	struct {
148		void *fn;    /* Function pointer to expected function of top frame. */
149		void *addr;  /* Address of access; unchecked if NULL. */
150		size_t size; /* Size of access; unchecked if @addr is NULL. */
151		int type;    /* Access type, see KCSAN_ACCESS definitions. */
152	} access[2];
153};
154
155/* Check observed report matches information in @r. */
156__no_kcsan
157static bool __report_matches(const struct expect_report *r)
158{
159	const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
160	bool ret = false;
161	unsigned long flags;
162	typeof(*observed.lines) *expect;
163	const char *end;
164	char *cur;
165	int i;
166
167	/* Doubled-checked locking. */
168	if (!report_available())
169		return false;
170
171	expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
172	if (WARN_ON(!expect))
173		return false;
174
175	/* Generate expected report contents. */
176
177	/* Title */
178	cur = expect[0];
179	end = &expect[0][sizeof(expect[0]) - 1];
180	cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ",
181			 is_assert ? "assert: race" : "data-race");
182	if (r->access[1].fn) {
183		char tmp[2][64];
184		int cmp;
185
186		/* Expect lexographically sorted function names in title. */
187		scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn);
188		scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn);
189		cmp = strcmp(tmp[0], tmp[1]);
190		cur += scnprintf(cur, end - cur, "%ps / %ps",
191				 cmp < 0 ? r->access[0].fn : r->access[1].fn,
192				 cmp < 0 ? r->access[1].fn : r->access[0].fn);
193	} else {
194		scnprintf(cur, end - cur, "%pS", r->access[0].fn);
195		/* The exact offset won't match, remove it. */
196		cur = strchr(expect[0], '+');
197		if (cur)
198			*cur = '\0';
199	}
200
201	/* Access 1 */
202	cur = expect[1];
203	end = &expect[1][sizeof(expect[1]) - 1];
204	if (!r->access[1].fn)
205		cur += scnprintf(cur, end - cur, "race at unknown origin, with ");
206
207	/* Access 1 & 2 */
208	for (i = 0; i < 2; ++i) {
209		const int ty = r->access[i].type;
210		const char *const access_type =
211			(ty & KCSAN_ACCESS_ASSERT) ?
212				      ((ty & KCSAN_ACCESS_WRITE) ?
213					       "assert no accesses" :
214					       "assert no writes") :
215				      ((ty & KCSAN_ACCESS_WRITE) ?
216					       ((ty & KCSAN_ACCESS_COMPOUND) ?
217							"read-write" :
218							"write") :
219					       "read");
220		const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
221		const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
222		const char *const access_type_aux =
223				(is_atomic && is_scoped)	? " (marked, reordered)"
224				: (is_atomic			? " (marked)"
225				   : (is_scoped			? " (reordered)" : ""));
226
227		if (i == 1) {
228			/* Access 2 */
229			cur = expect[2];
230			end = &expect[2][sizeof(expect[2]) - 1];
231
232			if (!r->access[1].fn) {
233				/* Dummy string if no second access is available. */
234				strcpy(cur, "<none>");
235				break;
236			}
237		}
238
239		cur += scnprintf(cur, end - cur, "%s%s to ", access_type,
240				 access_type_aux);
241
242		if (r->access[i].addr) /* Address is optional. */
243			cur += scnprintf(cur, end - cur, "0x%px of %zu bytes",
244					 r->access[i].addr, r->access[i].size);
245	}
246
247	spin_lock_irqsave(&observed.lock, flags);
248	if (!report_available())
249		goto out; /* A new report is being captured. */
250
251	/* Finally match expected output to what we actually observed. */
252	ret = strstr(observed.lines[0], expect[0]) &&
253	      /* Access info may appear in any order. */
254	      ((strstr(observed.lines[1], expect[1]) &&
255		strstr(observed.lines[2], expect[2])) ||
256	       (strstr(observed.lines[1], expect[2]) &&
257		strstr(observed.lines[2], expect[1])));
258out:
259	spin_unlock_irqrestore(&observed.lock, flags);
260	kfree(expect);
261	return ret;
262}
263
264static __always_inline const struct expect_report *
265__report_set_scoped(struct expect_report *r, int accesses)
266{
267	BUILD_BUG_ON(accesses > 3);
268
269	if (accesses & 1)
270		r->access[0].type |= KCSAN_ACCESS_SCOPED;
271	else
272		r->access[0].type &= ~KCSAN_ACCESS_SCOPED;
273
274	if (accesses & 2)
275		r->access[1].type |= KCSAN_ACCESS_SCOPED;
276	else
277		r->access[1].type &= ~KCSAN_ACCESS_SCOPED;
278
279	return r;
280}
281
282__no_kcsan
283static bool report_matches_any_reordered(struct expect_report *r)
284{
285	return __report_matches(__report_set_scoped(r, 0)) ||
286	       __report_matches(__report_set_scoped(r, 1)) ||
287	       __report_matches(__report_set_scoped(r, 2)) ||
288	       __report_matches(__report_set_scoped(r, 3));
289}
290
291#ifdef CONFIG_KCSAN_WEAK_MEMORY
292/* Due to reordering accesses, any access may appear as "(reordered)". */
293#define report_matches report_matches_any_reordered
294#else
295#define report_matches __report_matches
296#endif
297
298/* ===== Test kernels ===== */
299
300static long test_sink;
301static long test_var;
302/* @test_array should be large enough to fall into multiple watchpoint slots. */
303static long test_array[3 * PAGE_SIZE / sizeof(long)];
304static struct {
305	long val[8];
306} test_struct;
307static DEFINE_SEQLOCK(test_seqlock);
308static DEFINE_SPINLOCK(test_spinlock);
309static DEFINE_MUTEX(test_mutex);
310
311/*
312 * Helper to avoid compiler optimizing out reads, and to generate source values
313 * for writes.
314 */
315__no_kcsan
316static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); }
317
318/*
319 * Generates a delay and some accesses that enter the runtime but do not produce
320 * data races.
321 */
322static noinline void test_delay(int iter)
323{
324	while (iter--)
325		sink_value(READ_ONCE(test_sink));
326}
327
328static noinline void test_kernel_read(void) { sink_value(test_var); }
329
330static noinline void test_kernel_write(void)
331{
332	test_var = READ_ONCE_NOCHECK(test_sink) + 1;
333}
334
335static noinline void test_kernel_write_nochange(void) { test_var = 42; }
336
337/* Suffixed by value-change exception filter. */
338static noinline void test_kernel_write_nochange_rcu(void) { test_var = 42; }
339
340static noinline void test_kernel_read_atomic(void)
341{
342	sink_value(READ_ONCE(test_var));
343}
344
345static noinline void test_kernel_write_atomic(void)
346{
347	WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
348}
349
350static noinline void test_kernel_atomic_rmw(void)
351{
352	/* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
353	__atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
354}
355
356__no_kcsan
357static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
358
359static noinline void test_kernel_data_race(void) { data_race(test_var++); }
360
361static noinline void test_kernel_assert_writer(void)
362{
363	ASSERT_EXCLUSIVE_WRITER(test_var);
364}
365
366static noinline void test_kernel_assert_access(void)
367{
368	ASSERT_EXCLUSIVE_ACCESS(test_var);
369}
370
371#define TEST_CHANGE_BITS 0xff00ff00
372
373static noinline void test_kernel_change_bits(void)
374{
375	if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
376		/*
377		 * Avoid race of unknown origin for this test, just pretend they
378		 * are atomic.
379		 */
380		kcsan_nestable_atomic_begin();
381		test_var ^= TEST_CHANGE_BITS;
382		kcsan_nestable_atomic_end();
383	} else
384		WRITE_ONCE(test_var, READ_ONCE(test_var) ^ TEST_CHANGE_BITS);
385}
386
387static noinline void test_kernel_assert_bits_change(void)
388{
389	ASSERT_EXCLUSIVE_BITS(test_var, TEST_CHANGE_BITS);
390}
391
392static noinline void test_kernel_assert_bits_nochange(void)
393{
394	ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
395}
396
397/*
398 * Scoped assertions do trigger anywhere in scope. However, the report should
399 * still only point at the start of the scope.
400 */
401static noinline void test_enter_scope(void)
402{
403	int x = 0;
404
405	/* Unrelated accesses to scoped assert. */
406	READ_ONCE(test_sink);
407	kcsan_check_read(&x, sizeof(x));
408}
409
410static noinline void test_kernel_assert_writer_scoped(void)
411{
412	ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var);
413	test_enter_scope();
414}
415
416static noinline void test_kernel_assert_access_scoped(void)
417{
418	ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var);
419	test_enter_scope();
420}
421
422static noinline void test_kernel_rmw_array(void)
423{
424	int i;
425
426	for (i = 0; i < ARRAY_SIZE(test_array); ++i)
427		test_array[i]++;
428}
429
430static noinline void test_kernel_write_struct(void)
431{
432	kcsan_check_write(&test_struct, sizeof(test_struct));
433	kcsan_disable_current();
434	test_struct.val[3]++; /* induce value change */
435	kcsan_enable_current();
436}
437
438static noinline void test_kernel_write_struct_part(void)
439{
440	test_struct.val[3] = 42;
441}
442
443static noinline void test_kernel_read_struct_zero_size(void)
444{
445	kcsan_check_read(&test_struct.val[3], 0);
446}
447
448static noinline void test_kernel_jiffies_reader(void)
449{
450	sink_value((long)jiffies);
451}
452
453static noinline void test_kernel_seqlock_reader(void)
454{
455	unsigned int seq;
456
457	do {
458		seq = read_seqbegin(&test_seqlock);
459		sink_value(test_var);
460	} while (read_seqretry(&test_seqlock, seq));
461}
462
463static noinline void test_kernel_seqlock_writer(void)
464{
465	unsigned long flags;
466
467	write_seqlock_irqsave(&test_seqlock, flags);
468	test_var++;
469	write_sequnlock_irqrestore(&test_seqlock, flags);
470}
471
472static noinline void test_kernel_atomic_builtins(void)
473{
474	/*
475	 * Generate concurrent accesses, expecting no reports, ensuring KCSAN
476	 * treats builtin atomics as actually atomic.
477	 */
478	__atomic_load_n(&test_var, __ATOMIC_RELAXED);
479}
480
481static noinline void test_kernel_xor_1bit(void)
482{
483	/* Do not report data races between the read-writes. */
484	kcsan_nestable_atomic_begin();
485	test_var ^= 0x10000;
486	kcsan_nestable_atomic_end();
487}
488
489#define TEST_KERNEL_LOCKED(name, acquire, release)		\
490	static noinline void test_kernel_##name(void)		\
491	{							\
492		long *flag = &test_struct.val[0];		\
493		long v = 0;					\
494		if (!(acquire))					\
495			return;					\
496		while (v++ < 100) {				\
497			test_var++;				\
498			barrier();				\
499		}						\
500		release;					\
501		test_delay(10);					\
502	}
503
504TEST_KERNEL_LOCKED(with_memorder,
505		   cmpxchg_acquire(flag, 0, 1) == 0,
506		   smp_store_release(flag, 0));
507TEST_KERNEL_LOCKED(wrong_memorder,
508		   cmpxchg_relaxed(flag, 0, 1) == 0,
509		   WRITE_ONCE(*flag, 0));
510TEST_KERNEL_LOCKED(atomic_builtin_with_memorder,
511		   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED),
512		   __atomic_store_n(flag, 0, __ATOMIC_RELEASE));
513TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder,
514		   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED),
515		   __atomic_store_n(flag, 0, __ATOMIC_RELAXED));
516
517/* ===== Test cases ===== */
518
519/*
520 * Tests that various barriers have the expected effect on internal state. Not
521 * exhaustive on atomic_t operations. Unlike the selftest, also checks for
522 * too-strict barrier instrumentation; these can be tolerated, because it does
523 * not cause false positives, but at least we should be aware of such cases.
524 */
525static void test_barrier_nothreads(struct kunit *test)
526{
527#ifdef CONFIG_KCSAN_WEAK_MEMORY
528	struct kcsan_scoped_access *reorder_access = &current->kcsan_ctx.reorder_access;
529#else
530	struct kcsan_scoped_access *reorder_access = NULL;
531#endif
532	arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
533	atomic_t dummy;
534
535	KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
536	KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
537
538#define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name)			\
539	do {											\
540		reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED;			\
541		reorder_access->size = sizeof(test_var);					\
542		barrier;									\
543		KUNIT_EXPECT_EQ_MSG(test, reorder_access->size,					\
544				    order_before ? 0 : sizeof(test_var),			\
545				    "improperly instrumented type=(" #access_type "): " name);	\
546	} while (0)
547#define KCSAN_EXPECT_READ_BARRIER(b, o)  __KCSAN_EXPECT_BARRIER(0, b, o, #b)
548#define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b)
549#define KCSAN_EXPECT_RW_BARRIER(b, o)    __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b)
550
551	/*
552	 * Lockdep initialization can strengthen certain locking operations due
553	 * to calling into instrumented files; "warm up" our locks.
554	 */
555	spin_lock(&test_spinlock);
556	spin_unlock(&test_spinlock);
557	mutex_lock(&test_mutex);
558	mutex_unlock(&test_mutex);
559
560	/* Force creating a valid entry in reorder_access first. */
561	test_var = 0;
562	while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
563		__kcsan_check_read(&test_var, sizeof(test_var));
564	KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var));
565
566	kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
567
568	KCSAN_EXPECT_READ_BARRIER(mb(), true);
569	KCSAN_EXPECT_READ_BARRIER(wmb(), false);
570	KCSAN_EXPECT_READ_BARRIER(rmb(), true);
571	KCSAN_EXPECT_READ_BARRIER(smp_mb(), true);
572	KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false);
573	KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true);
574	KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false);
575	KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true);
576	KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true);
577	KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true);
578	KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true);
579	KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true);
580	KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false);
581	KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true);
582	KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true);
583	KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true);
584	KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false);
585	KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0,  0), true);
586	KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
587	KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
588	KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false);
589	KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false);
590	KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false);
591	KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true);
592	KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false);
593	KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true);
594	KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false);
595	KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true);
596	KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
597	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true);
598	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
599	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true);
600	KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
601	KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true);
602	KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true);
603	KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true);
604	KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true);
605	KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true);
606	KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false);
607	KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true);
608	KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false);
609	KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true);
610	KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false);
611	KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true);
612
613	KCSAN_EXPECT_WRITE_BARRIER(mb(), true);
614	KCSAN_EXPECT_WRITE_BARRIER(wmb(), true);
615	KCSAN_EXPECT_WRITE_BARRIER(rmb(), false);
616	KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true);
617	KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true);
618	KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false);
619	KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true);
620	KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false);
621	KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true);
622	KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true);
623	KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true);
624	KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true);
625	KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false);
626	KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true);
627	KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true);
628	KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true);
629	KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false);
630	KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0,  0), true);
631	KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
632	KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
633	KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false);
634	KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false);
635	KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false);
636	KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true);
637	KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false);
638	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true);
639	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false);
640	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true);
641	KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
642	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true);
643	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
644	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true);
645	KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
646	KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true);
647	KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true);
648	KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true);
649	KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true);
650	KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true);
651	KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false);
652	KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true);
653	KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false);
654	KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true);
655	KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false);
656	KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true);
657
658	KCSAN_EXPECT_RW_BARRIER(mb(), true);
659	KCSAN_EXPECT_RW_BARRIER(wmb(), true);
660	KCSAN_EXPECT_RW_BARRIER(rmb(), true);
661	KCSAN_EXPECT_RW_BARRIER(smp_mb(), true);
662	KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true);
663	KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true);
664	KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true);
665	KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true);
666	KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true);
667	KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true);
668	KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true);
669	KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true);
670	KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false);
671	KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true);
672	KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true);
673	KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true);
674	KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false);
675	KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0,  0), true);
676	KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
677	KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
678	KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false);
679	KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false);
680	KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false);
681	KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true);
682	KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false);
683	KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true);
684	KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false);
685	KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true);
686	KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
687	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true);
688	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
689	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true);
690	KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
691	KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true);
692	KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true);
693	KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true);
694	KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true);
695	KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true);
696	KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false);
697	KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true);
698	KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false);
699	KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
700	KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
701	KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
702	KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
703	KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
704	KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
705	kcsan_nestable_atomic_end();
706}
707
708/* Simple test with normal data race. */
709__no_kcsan
710static void test_basic(struct kunit *test)
711{
712	struct expect_report expect = {
713		.access = {
714			{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
715			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
716		},
717	};
718	struct expect_report never = {
719		.access = {
720			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
721			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
722		},
723	};
724	bool match_expect = false;
725	bool match_never = false;
726
727	begin_test_checks(test_kernel_write, test_kernel_read);
728	do {
729		match_expect |= report_matches(&expect);
730		match_never = report_matches(&never);
731	} while (!end_test_checks(match_never));
732	KUNIT_EXPECT_TRUE(test, match_expect);
733	KUNIT_EXPECT_FALSE(test, match_never);
734}
735
736/*
737 * Stress KCSAN with lots of concurrent races on different addresses until
738 * timeout.
739 */
740__no_kcsan
741static void test_concurrent_races(struct kunit *test)
742{
743	struct expect_report expect = {
744		.access = {
745			/* NULL will match any address. */
746			{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
747			{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
748		},
749	};
750	struct expect_report never = {
751		.access = {
752			{ test_kernel_rmw_array, NULL, 0, 0 },
753			{ test_kernel_rmw_array, NULL, 0, 0 },
754		},
755	};
756	bool match_expect = false;
757	bool match_never = false;
758
759	begin_test_checks(test_kernel_rmw_array, test_kernel_rmw_array);
760	do {
761		match_expect |= report_matches(&expect);
762		match_never |= report_matches(&never);
763	} while (!end_test_checks(false));
764	KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check matches exist. */
765	KUNIT_EXPECT_FALSE(test, match_never);
766}
767
768/* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */
769__no_kcsan
770static void test_novalue_change(struct kunit *test)
771{
772	struct expect_report expect_rw = {
773		.access = {
774			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
775			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
776		},
777	};
778	struct expect_report expect_ww = {
779		.access = {
780			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
781			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
782		},
783	};
784	bool match_expect = false;
785
786	test_kernel_write_nochange(); /* Reset value. */
787	begin_test_checks(test_kernel_write_nochange, test_kernel_read);
788	do {
789		match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
790	} while (!end_test_checks(match_expect));
791	if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
792		KUNIT_EXPECT_FALSE(test, match_expect);
793	else
794		KUNIT_EXPECT_TRUE(test, match_expect);
795}
796
797/*
798 * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should
799 * never apply work.
800 */
801__no_kcsan
802static void test_novalue_change_exception(struct kunit *test)
803{
804	struct expect_report expect_rw = {
805		.access = {
806			{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
807			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
808		},
809	};
810	struct expect_report expect_ww = {
811		.access = {
812			{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
813			{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
814		},
815	};
816	bool match_expect = false;
817
818	test_kernel_write_nochange_rcu(); /* Reset value. */
819	begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
820	do {
821		match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
822	} while (!end_test_checks(match_expect));
823	KUNIT_EXPECT_TRUE(test, match_expect);
824}
825
826/* Test that data races of unknown origin are reported. */
827__no_kcsan
828static void test_unknown_origin(struct kunit *test)
829{
830	struct expect_report expect = {
831		.access = {
832			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
833			{ NULL },
834		},
835	};
836	bool match_expect = false;
837
838	begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read);
839	do {
840		match_expect = report_matches(&expect);
841	} while (!end_test_checks(match_expect));
842	if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
843		KUNIT_EXPECT_TRUE(test, match_expect);
844	else
845		KUNIT_EXPECT_FALSE(test, match_expect);
846}
847
848/* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
849__no_kcsan
850static void test_write_write_assume_atomic(struct kunit *test)
851{
852	struct expect_report expect = {
853		.access = {
854			{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
855			{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
856		},
857	};
858	bool match_expect = false;
859
860	begin_test_checks(test_kernel_write, test_kernel_write);
861	do {
862		sink_value(READ_ONCE(test_var)); /* induce value-change */
863		match_expect = report_matches(&expect);
864	} while (!end_test_checks(match_expect));
865	if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC))
866		KUNIT_EXPECT_FALSE(test, match_expect);
867	else
868		KUNIT_EXPECT_TRUE(test, match_expect);
869}
870
871/*
872 * Test that data races with writes larger than word-size are always reported,
873 * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
874 */
875__no_kcsan
876static void test_write_write_struct(struct kunit *test)
877{
878	struct expect_report expect = {
879		.access = {
880			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
881			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
882		},
883	};
884	bool match_expect = false;
885
886	begin_test_checks(test_kernel_write_struct, test_kernel_write_struct);
887	do {
888		match_expect = report_matches(&expect);
889	} while (!end_test_checks(match_expect));
890	KUNIT_EXPECT_TRUE(test, match_expect);
891}
892
893/*
894 * Test that data races where only one write is larger than word-size are always
895 * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
896 */
897__no_kcsan
898static void test_write_write_struct_part(struct kunit *test)
899{
900	struct expect_report expect = {
901		.access = {
902			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
903			{ test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE },
904		},
905	};
906	bool match_expect = false;
907
908	begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part);
909	do {
910		match_expect = report_matches(&expect);
911	} while (!end_test_checks(match_expect));
912	KUNIT_EXPECT_TRUE(test, match_expect);
913}
914
915/* Test that races with atomic accesses never result in reports. */
916__no_kcsan
917static void test_read_atomic_write_atomic(struct kunit *test)
918{
919	bool match_never = false;
920
921	begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic);
922	do {
923		match_never = report_available();
924	} while (!end_test_checks(match_never));
925	KUNIT_EXPECT_FALSE(test, match_never);
926}
927
928/* Test that a race with an atomic and plain access result in reports. */
929__no_kcsan
930static void test_read_plain_atomic_write(struct kunit *test)
931{
932	struct expect_report expect = {
933		.access = {
934			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
935			{ test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
936		},
937	};
938	bool match_expect = false;
939
940	KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
941
942	begin_test_checks(test_kernel_read, test_kernel_write_atomic);
943	do {
944		match_expect = report_matches(&expect);
945	} while (!end_test_checks(match_expect));
946	KUNIT_EXPECT_TRUE(test, match_expect);
947}
948
949/* Test that atomic RMWs generate correct report. */
950__no_kcsan
951static void test_read_plain_atomic_rmw(struct kunit *test)
952{
953	struct expect_report expect = {
954		.access = {
955			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
956			{ test_kernel_atomic_rmw, &test_var, sizeof(test_var),
957				KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
958		},
959	};
960	bool match_expect = false;
961
962	KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
963
964	begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
965	do {
966		match_expect = report_matches(&expect);
967	} while (!end_test_checks(match_expect));
968	KUNIT_EXPECT_TRUE(test, match_expect);
969}
970
971/* Zero-sized accesses should never cause data race reports. */
972__no_kcsan
973static void test_zero_size_access(struct kunit *test)
974{
975	struct expect_report expect = {
976		.access = {
977			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
978			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
979		},
980	};
981	struct expect_report never = {
982		.access = {
983			{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
984			{ test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 },
985		},
986	};
987	bool match_expect = false;
988	bool match_never = false;
989
990	begin_test_checks(test_kernel_write_struct, test_kernel_read_struct_zero_size);
991	do {
992		match_expect |= report_matches(&expect);
993		match_never = report_matches(&never);
994	} while (!end_test_checks(match_never));
995	KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check. */
996	KUNIT_EXPECT_FALSE(test, match_never);
997}
998
999/* Test the data_race() macro. */
1000__no_kcsan
1001static void test_data_race(struct kunit *test)
1002{
1003	bool match_never = false;
1004
1005	begin_test_checks(test_kernel_data_race, test_kernel_data_race);
1006	do {
1007		match_never = report_available();
1008	} while (!end_test_checks(match_never));
1009	KUNIT_EXPECT_FALSE(test, match_never);
1010}
1011
1012__no_kcsan
1013static void test_assert_exclusive_writer(struct kunit *test)
1014{
1015	struct expect_report expect = {
1016		.access = {
1017			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1018			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1019		},
1020	};
1021	bool match_expect = false;
1022
1023	begin_test_checks(test_kernel_assert_writer, test_kernel_write_nochange);
1024	do {
1025		match_expect = report_matches(&expect);
1026	} while (!end_test_checks(match_expect));
1027	KUNIT_EXPECT_TRUE(test, match_expect);
1028}
1029
1030__no_kcsan
1031static void test_assert_exclusive_access(struct kunit *test)
1032{
1033	struct expect_report expect = {
1034		.access = {
1035			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1036			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1037		},
1038	};
1039	bool match_expect = false;
1040
1041	begin_test_checks(test_kernel_assert_access, test_kernel_read);
1042	do {
1043		match_expect = report_matches(&expect);
1044	} while (!end_test_checks(match_expect));
1045	KUNIT_EXPECT_TRUE(test, match_expect);
1046}
1047
1048__no_kcsan
1049static void test_assert_exclusive_access_writer(struct kunit *test)
1050{
1051	struct expect_report expect_access_writer = {
1052		.access = {
1053			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1054			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1055		},
1056	};
1057	struct expect_report expect_access_access = {
1058		.access = {
1059			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1060			{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1061		},
1062	};
1063	struct expect_report never = {
1064		.access = {
1065			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1066			{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1067		},
1068	};
1069	bool match_expect_access_writer = false;
1070	bool match_expect_access_access = false;
1071	bool match_never = false;
1072
1073	begin_test_checks(test_kernel_assert_access, test_kernel_assert_writer);
1074	do {
1075		match_expect_access_writer |= report_matches(&expect_access_writer);
1076		match_expect_access_access |= report_matches(&expect_access_access);
1077		match_never |= report_matches(&never);
1078	} while (!end_test_checks(match_never));
1079	KUNIT_EXPECT_TRUE(test, match_expect_access_writer);
1080	KUNIT_EXPECT_TRUE(test, match_expect_access_access);
1081	KUNIT_EXPECT_FALSE(test, match_never);
1082}
1083
1084__no_kcsan
1085static void test_assert_exclusive_bits_change(struct kunit *test)
1086{
1087	struct expect_report expect = {
1088		.access = {
1089			{ test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1090			{ test_kernel_change_bits, &test_var, sizeof(test_var),
1091				KCSAN_ACCESS_WRITE | (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) ? 0 : KCSAN_ACCESS_ATOMIC) },
1092		},
1093	};
1094	bool match_expect = false;
1095
1096	begin_test_checks(test_kernel_assert_bits_change, test_kernel_change_bits);
1097	do {
1098		match_expect = report_matches(&expect);
1099	} while (!end_test_checks(match_expect));
1100	KUNIT_EXPECT_TRUE(test, match_expect);
1101}
1102
1103__no_kcsan
1104static void test_assert_exclusive_bits_nochange(struct kunit *test)
1105{
1106	bool match_never = false;
1107
1108	begin_test_checks(test_kernel_assert_bits_nochange, test_kernel_change_bits);
1109	do {
1110		match_never = report_available();
1111	} while (!end_test_checks(match_never));
1112	KUNIT_EXPECT_FALSE(test, match_never);
1113}
1114
1115__no_kcsan
1116static void test_assert_exclusive_writer_scoped(struct kunit *test)
1117{
1118	struct expect_report expect_start = {
1119		.access = {
1120			{ test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1121			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1122		},
1123	};
1124	struct expect_report expect_inscope = {
1125		.access = {
1126			{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1127			{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1128		},
1129	};
1130	bool match_expect_start = false;
1131	bool match_expect_inscope = false;
1132
1133	begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
1134	do {
1135		match_expect_start |= report_matches(&expect_start);
1136		match_expect_inscope |= report_matches(&expect_inscope);
1137	} while (!end_test_checks(match_expect_inscope));
1138	KUNIT_EXPECT_TRUE(test, match_expect_start);
1139	KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1140}
1141
1142__no_kcsan
1143static void test_assert_exclusive_access_scoped(struct kunit *test)
1144{
1145	struct expect_report expect_start1 = {
1146		.access = {
1147			{ test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1148			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1149		},
1150	};
1151	struct expect_report expect_start2 = {
1152		.access = { expect_start1.access[0], expect_start1.access[0] },
1153	};
1154	struct expect_report expect_inscope = {
1155		.access = {
1156			{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1157			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1158		},
1159	};
1160	bool match_expect_start = false;
1161	bool match_expect_inscope = false;
1162
1163	begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read);
1164	end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */
1165	do {
1166		match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
1167		match_expect_inscope |= report_matches(&expect_inscope);
1168	} while (!end_test_checks(match_expect_inscope));
1169	KUNIT_EXPECT_TRUE(test, match_expect_start);
1170	KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1171}
1172
1173/*
1174 * jiffies is special (declared to be volatile) and its accesses are typically
1175 * not marked; this test ensures that the compiler nor KCSAN gets confused about
1176 * jiffies's declaration on different architectures.
1177 */
1178__no_kcsan
1179static void test_jiffies_noreport(struct kunit *test)
1180{
1181	bool match_never = false;
1182
1183	begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader);
1184	do {
1185		match_never = report_available();
1186	} while (!end_test_checks(match_never));
1187	KUNIT_EXPECT_FALSE(test, match_never);
1188}
1189
1190/* Test that racing accesses in seqlock critical sections are not reported. */
1191__no_kcsan
1192static void test_seqlock_noreport(struct kunit *test)
1193{
1194	bool match_never = false;
1195
1196	begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer);
1197	do {
1198		match_never = report_available();
1199	} while (!end_test_checks(match_never));
1200	KUNIT_EXPECT_FALSE(test, match_never);
1201}
1202
1203/*
1204 * Test atomic builtins work and required instrumentation functions exist. We
1205 * also test that KCSAN understands they're atomic by racing with them via
1206 * test_kernel_atomic_builtins(), and expect no reports.
1207 *
1208 * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
1209 */
1210static void test_atomic_builtins(struct kunit *test)
1211{
1212	bool match_never = false;
1213
1214	begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
1215	do {
1216		long tmp;
1217
1218		kcsan_enable_current();
1219
1220		__atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
1221		KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
1222
1223		KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
1224		KUNIT_EXPECT_EQ(test, 20L, test_var);
1225
1226		tmp = 20L;
1227		KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
1228								    0, __ATOMIC_RELAXED,
1229								    __ATOMIC_RELAXED));
1230		KUNIT_EXPECT_EQ(test, tmp, 20L);
1231		KUNIT_EXPECT_EQ(test, test_var, 30L);
1232		KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
1233								     1, __ATOMIC_RELAXED,
1234								     __ATOMIC_RELAXED));
1235		KUNIT_EXPECT_EQ(test, tmp, 30L);
1236		KUNIT_EXPECT_EQ(test, test_var, 30L);
1237
1238		KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
1239		KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
1240		KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
1241		KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
1242		KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
1243		KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
1244		KUNIT_EXPECT_EQ(test, -2L, test_var);
1245
1246		__atomic_thread_fence(__ATOMIC_SEQ_CST);
1247		__atomic_signal_fence(__ATOMIC_SEQ_CST);
1248
1249		kcsan_disable_current();
1250
1251		match_never = report_available();
1252	} while (!end_test_checks(match_never));
1253	KUNIT_EXPECT_FALSE(test, match_never);
1254}
1255
1256__no_kcsan
1257static void test_1bit_value_change(struct kunit *test)
1258{
1259	struct expect_report expect = {
1260		.access = {
1261			{ test_kernel_read, &test_var, sizeof(test_var), 0 },
1262			{ test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1263		},
1264	};
1265	bool match = false;
1266
1267	begin_test_checks(test_kernel_read, test_kernel_xor_1bit);
1268	do {
1269		match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)
1270				? report_available()
1271				: report_matches(&expect);
1272	} while (!end_test_checks(match));
1273	if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE))
1274		KUNIT_EXPECT_FALSE(test, match);
1275	else
1276		KUNIT_EXPECT_TRUE(test, match);
1277}
1278
1279__no_kcsan
1280static void test_correct_barrier(struct kunit *test)
1281{
1282	struct expect_report expect = {
1283		.access = {
1284			{ test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1285			{ test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1286		},
1287	};
1288	bool match_expect = false;
1289
1290	test_struct.val[0] = 0; /* init unlocked */
1291	begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder);
1292	do {
1293		match_expect = report_matches_any_reordered(&expect);
1294	} while (!end_test_checks(match_expect));
1295	KUNIT_EXPECT_FALSE(test, match_expect);
1296}
1297
1298__no_kcsan
1299static void test_missing_barrier(struct kunit *test)
1300{
1301	struct expect_report expect = {
1302		.access = {
1303			{ test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1304			{ test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1305		},
1306	};
1307	bool match_expect = false;
1308
1309	test_struct.val[0] = 0; /* init unlocked */
1310	begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder);
1311	do {
1312		match_expect = report_matches_any_reordered(&expect);
1313	} while (!end_test_checks(match_expect));
1314	if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1315		KUNIT_EXPECT_TRUE(test, match_expect);
1316	else
1317		KUNIT_EXPECT_FALSE(test, match_expect);
1318}
1319
1320__no_kcsan
1321static void test_atomic_builtins_correct_barrier(struct kunit *test)
1322{
1323	struct expect_report expect = {
1324		.access = {
1325			{ test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1326			{ test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1327		},
1328	};
1329	bool match_expect = false;
1330
1331	test_struct.val[0] = 0; /* init unlocked */
1332	begin_test_checks(test_kernel_atomic_builtin_with_memorder,
1333			  test_kernel_atomic_builtin_with_memorder);
1334	do {
1335		match_expect = report_matches_any_reordered(&expect);
1336	} while (!end_test_checks(match_expect));
1337	KUNIT_EXPECT_FALSE(test, match_expect);
1338}
1339
1340__no_kcsan
1341static void test_atomic_builtins_missing_barrier(struct kunit *test)
1342{
1343	struct expect_report expect = {
1344		.access = {
1345			{ test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1346			{ test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1347		},
1348	};
1349	bool match_expect = false;
1350
1351	test_struct.val[0] = 0; /* init unlocked */
1352	begin_test_checks(test_kernel_atomic_builtin_wrong_memorder,
1353			  test_kernel_atomic_builtin_wrong_memorder);
1354	do {
1355		match_expect = report_matches_any_reordered(&expect);
1356	} while (!end_test_checks(match_expect));
1357	if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1358		KUNIT_EXPECT_TRUE(test, match_expect);
1359	else
1360		KUNIT_EXPECT_FALSE(test, match_expect);
1361}
1362
1363/*
1364 * Generate thread counts for all test cases. Values generated are in interval
1365 * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
1366 *
1367 * The thread counts are chosen to cover potentially interesting boundaries and
1368 * corner cases (2 to 5), and then stress the system with larger counts.
1369 */
1370static const void *nthreads_gen_params(const void *prev, char *desc)
1371{
1372	long nthreads = (long)prev;
1373
1374	if (nthreads < 0 || nthreads >= 32)
1375		nthreads = 0; /* stop */
1376	else if (!nthreads)
1377		nthreads = 2; /* initial value */
1378	else if (nthreads < 5)
1379		nthreads++;
1380	else if (nthreads == 5)
1381		nthreads = 8;
1382	else
1383		nthreads *= 2;
1384
1385	if (!preempt_model_preemptible() ||
1386	    !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) {
1387		/*
1388		 * Without any preemption, keep 2 CPUs free for other tasks, one
1389		 * of which is the main test case function checking for
1390		 * completion or failure.
1391		 */
1392		const long min_unused_cpus = preempt_model_none() ? 2 : 0;
1393		const long min_required_cpus = 2 + min_unused_cpus;
1394
1395		if (num_online_cpus() < min_required_cpus) {
1396			pr_err_once("Too few online CPUs (%u < %ld) for test\n",
1397				    num_online_cpus(), min_required_cpus);
1398			nthreads = 0;
1399		} else if (nthreads >= num_online_cpus() - min_unused_cpus) {
1400			/* Use negative value to indicate last param. */
1401			nthreads = -(num_online_cpus() - min_unused_cpus);
1402			pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
1403				     -nthreads, num_online_cpus());
1404		}
1405	}
1406
1407	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
1408	return (void *)nthreads;
1409}
1410
1411#define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params)
1412static struct kunit_case kcsan_test_cases[] = {
1413	KUNIT_CASE(test_barrier_nothreads),
1414	KCSAN_KUNIT_CASE(test_basic),
1415	KCSAN_KUNIT_CASE(test_concurrent_races),
1416	KCSAN_KUNIT_CASE(test_novalue_change),
1417	KCSAN_KUNIT_CASE(test_novalue_change_exception),
1418	KCSAN_KUNIT_CASE(test_unknown_origin),
1419	KCSAN_KUNIT_CASE(test_write_write_assume_atomic),
1420	KCSAN_KUNIT_CASE(test_write_write_struct),
1421	KCSAN_KUNIT_CASE(test_write_write_struct_part),
1422	KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
1423	KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
1424	KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw),
1425	KCSAN_KUNIT_CASE(test_zero_size_access),
1426	KCSAN_KUNIT_CASE(test_data_race),
1427	KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
1428	KCSAN_KUNIT_CASE(test_assert_exclusive_access),
1429	KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer),
1430	KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change),
1431	KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange),
1432	KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped),
1433	KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
1434	KCSAN_KUNIT_CASE(test_jiffies_noreport),
1435	KCSAN_KUNIT_CASE(test_seqlock_noreport),
1436	KCSAN_KUNIT_CASE(test_atomic_builtins),
1437	KCSAN_KUNIT_CASE(test_1bit_value_change),
1438	KCSAN_KUNIT_CASE(test_correct_barrier),
1439	KCSAN_KUNIT_CASE(test_missing_barrier),
1440	KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier),
1441	KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier),
1442	{},
1443};
1444
1445/* ===== End test cases ===== */
1446
1447/* Concurrent accesses from interrupts. */
1448__no_kcsan
1449static void access_thread_timer(struct timer_list *timer)
1450{
1451	static atomic_t cnt = ATOMIC_INIT(0);
1452	unsigned int idx;
1453	void (*func)(void);
1454
1455	idx = (unsigned int)atomic_inc_return(&cnt) % ARRAY_SIZE(access_kernels);
1456	/* Acquire potential initialization. */
1457	func = smp_load_acquire(&access_kernels[idx]);
1458	if (func)
1459		func();
1460}
1461
1462/* The main loop for each thread. */
1463__no_kcsan
1464static int access_thread(void *arg)
1465{
1466	struct timer_list timer;
1467	unsigned int cnt = 0;
1468	unsigned int idx;
1469	void (*func)(void);
1470
1471	timer_setup_on_stack(&timer, access_thread_timer, 0);
1472	do {
1473		might_sleep();
1474
1475		if (!timer_pending(&timer))
1476			mod_timer(&timer, jiffies + 1);
1477		else {
1478			/* Iterate through all kernels. */
1479			idx = cnt++ % ARRAY_SIZE(access_kernels);
1480			/* Acquire potential initialization. */
1481			func = smp_load_acquire(&access_kernels[idx]);
1482			if (func)
1483				func();
1484		}
1485	} while (!torture_must_stop());
1486	del_timer_sync(&timer);
1487	destroy_timer_on_stack(&timer);
1488
1489	torture_kthread_stopping("access_thread");
1490	return 0;
1491}
1492
1493__no_kcsan
1494static int test_init(struct kunit *test)
1495{
1496	unsigned long flags;
1497	int nthreads;
1498	int i;
1499
1500	spin_lock_irqsave(&observed.lock, flags);
1501	for (i = 0; i < ARRAY_SIZE(observed.lines); ++i)
1502		observed.lines[i][0] = '\0';
1503	observed.nlines = 0;
1504	spin_unlock_irqrestore(&observed.lock, flags);
1505
1506	if (strstr(test->name, "nothreads"))
1507		return 0;
1508
1509	if (!torture_init_begin((char *)test->name, 1))
1510		return -EBUSY;
1511
1512	if (WARN_ON(threads))
1513		goto err;
1514
1515	for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) {
1516		if (WARN_ON(access_kernels[i]))
1517			goto err;
1518	}
1519
1520	nthreads = abs((long)test->param_value);
1521	if (WARN_ON(!nthreads))
1522		goto err;
1523
1524	threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL);
1525	if (WARN_ON(!threads))
1526		goto err;
1527
1528	threads[nthreads] = NULL;
1529	for (i = 0; i < nthreads; ++i) {
1530		if (torture_create_kthread(access_thread, NULL, threads[i]))
1531			goto err;
1532	}
1533
1534	torture_init_end();
1535
1536	return 0;
1537
1538err:
1539	kfree(threads);
1540	threads = NULL;
1541	torture_init_end();
1542	return -EINVAL;
1543}
1544
1545__no_kcsan
1546static void test_exit(struct kunit *test)
1547{
1548	struct task_struct **stop_thread;
1549	int i;
1550
1551	if (strstr(test->name, "nothreads"))
1552		return;
1553
1554	if (torture_cleanup_begin())
1555		return;
1556
1557	for (i = 0; i < ARRAY_SIZE(access_kernels); ++i)
1558		WRITE_ONCE(access_kernels[i], NULL);
1559
1560	if (threads) {
1561		for (stop_thread = threads; *stop_thread; stop_thread++)
1562			torture_stop_kthread(reader_thread, *stop_thread);
1563
1564		kfree(threads);
1565		threads = NULL;
1566	}
1567
1568	torture_cleanup_end();
1569}
1570
1571__no_kcsan
1572static void register_tracepoints(void)
1573{
1574	register_trace_console(probe_console, NULL);
1575}
1576
1577__no_kcsan
1578static void unregister_tracepoints(void)
1579{
1580	unregister_trace_console(probe_console, NULL);
1581}
1582
1583static int kcsan_suite_init(struct kunit_suite *suite)
1584{
1585	register_tracepoints();
1586	return 0;
1587}
1588
1589static void kcsan_suite_exit(struct kunit_suite *suite)
1590{
1591	unregister_tracepoints();
1592	tracepoint_synchronize_unregister();
1593}
1594
1595static struct kunit_suite kcsan_test_suite = {
1596	.name = "kcsan",
1597	.test_cases = kcsan_test_cases,
1598	.init = test_init,
1599	.exit = test_exit,
1600	.suite_init = kcsan_suite_init,
1601	.suite_exit = kcsan_suite_exit,
1602};
1603
1604kunit_test_suites(&kcsan_test_suite);
1605
1606MODULE_LICENSE("GPL v2");
1607MODULE_AUTHOR("Marco Elver <elver@google.com>");
1608