1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Module-based API test facility for ww_mutexes
4 */
5
6#include <linux/kernel.h>
7
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/prandom.h>
13#include <linux/slab.h>
14#include <linux/ww_mutex.h>
15
16static DEFINE_WD_CLASS(ww_class);
17struct workqueue_struct *wq;
18
19#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20#define ww_acquire_init_noinject(a, b) do { \
21		ww_acquire_init((a), (b)); \
22		(a)->deadlock_inject_countdown = ~0U; \
23	} while (0)
24#else
25#define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
26#endif
27
28struct test_mutex {
29	struct work_struct work;
30	struct ww_mutex mutex;
31	struct completion ready, go, done;
32	unsigned int flags;
33};
34
35#define TEST_MTX_SPIN BIT(0)
36#define TEST_MTX_TRY BIT(1)
37#define TEST_MTX_CTX BIT(2)
38#define __TEST_MTX_LAST BIT(3)
39
40static void test_mutex_work(struct work_struct *work)
41{
42	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
43
44	complete(&mtx->ready);
45	wait_for_completion(&mtx->go);
46
47	if (mtx->flags & TEST_MTX_TRY) {
48		while (!ww_mutex_trylock(&mtx->mutex, NULL))
49			cond_resched();
50	} else {
51		ww_mutex_lock(&mtx->mutex, NULL);
52	}
53	complete(&mtx->done);
54	ww_mutex_unlock(&mtx->mutex);
55}
56
57static int __test_mutex(unsigned int flags)
58{
59#define TIMEOUT (HZ / 16)
60	struct test_mutex mtx;
61	struct ww_acquire_ctx ctx;
62	int ret;
63
64	ww_mutex_init(&mtx.mutex, &ww_class);
65	ww_acquire_init(&ctx, &ww_class);
66
67	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
68	init_completion(&mtx.ready);
69	init_completion(&mtx.go);
70	init_completion(&mtx.done);
71	mtx.flags = flags;
72
73	schedule_work(&mtx.work);
74
75	wait_for_completion(&mtx.ready);
76	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
77	complete(&mtx.go);
78	if (flags & TEST_MTX_SPIN) {
79		unsigned long timeout = jiffies + TIMEOUT;
80
81		ret = 0;
82		do {
83			if (completion_done(&mtx.done)) {
84				ret = -EINVAL;
85				break;
86			}
87			cond_resched();
88		} while (time_before(jiffies, timeout));
89	} else {
90		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
91	}
92	ww_mutex_unlock(&mtx.mutex);
93	ww_acquire_fini(&ctx);
94
95	if (ret) {
96		pr_err("%s(flags=%x): mutual exclusion failure\n",
97		       __func__, flags);
98		ret = -EINVAL;
99	}
100
101	flush_work(&mtx.work);
102	destroy_work_on_stack(&mtx.work);
103	return ret;
104#undef TIMEOUT
105}
106
107static int test_mutex(void)
108{
109	int ret;
110	int i;
111
112	for (i = 0; i < __TEST_MTX_LAST; i++) {
113		ret = __test_mutex(i);
114		if (ret)
115			return ret;
116	}
117
118	return 0;
119}
120
121static int test_aa(bool trylock)
122{
123	struct ww_mutex mutex;
124	struct ww_acquire_ctx ctx;
125	int ret;
126	const char *from = trylock ? "trylock" : "lock";
127
128	ww_mutex_init(&mutex, &ww_class);
129	ww_acquire_init(&ctx, &ww_class);
130
131	if (!trylock) {
132		ret = ww_mutex_lock(&mutex, &ctx);
133		if (ret) {
134			pr_err("%s: initial lock failed!\n", __func__);
135			goto out;
136		}
137	} else {
138		ret = !ww_mutex_trylock(&mutex, &ctx);
139		if (ret) {
140			pr_err("%s: initial trylock failed!\n", __func__);
141			goto out;
142		}
143	}
144
145	if (ww_mutex_trylock(&mutex, NULL))  {
146		pr_err("%s: trylocked itself without context from %s!\n", __func__, from);
147		ww_mutex_unlock(&mutex);
148		ret = -EINVAL;
149		goto out;
150	}
151
152	if (ww_mutex_trylock(&mutex, &ctx))  {
153		pr_err("%s: trylocked itself with context from %s!\n", __func__, from);
154		ww_mutex_unlock(&mutex);
155		ret = -EINVAL;
156		goto out;
157	}
158
159	ret = ww_mutex_lock(&mutex, &ctx);
160	if (ret != -EALREADY) {
161		pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
162		       __func__, ret, from);
163		if (!ret)
164			ww_mutex_unlock(&mutex);
165		ret = -EINVAL;
166		goto out;
167	}
168
169	ww_mutex_unlock(&mutex);
170	ret = 0;
171out:
172	ww_acquire_fini(&ctx);
173	return ret;
174}
175
176struct test_abba {
177	struct work_struct work;
178	struct ww_mutex a_mutex;
179	struct ww_mutex b_mutex;
180	struct completion a_ready;
181	struct completion b_ready;
182	bool resolve, trylock;
183	int result;
184};
185
186static void test_abba_work(struct work_struct *work)
187{
188	struct test_abba *abba = container_of(work, typeof(*abba), work);
189	struct ww_acquire_ctx ctx;
190	int err;
191
192	ww_acquire_init_noinject(&ctx, &ww_class);
193	if (!abba->trylock)
194		ww_mutex_lock(&abba->b_mutex, &ctx);
195	else
196		WARN_ON(!ww_mutex_trylock(&abba->b_mutex, &ctx));
197
198	WARN_ON(READ_ONCE(abba->b_mutex.ctx) != &ctx);
199
200	complete(&abba->b_ready);
201	wait_for_completion(&abba->a_ready);
202
203	err = ww_mutex_lock(&abba->a_mutex, &ctx);
204	if (abba->resolve && err == -EDEADLK) {
205		ww_mutex_unlock(&abba->b_mutex);
206		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
207		err = ww_mutex_lock(&abba->b_mutex, &ctx);
208	}
209
210	if (!err)
211		ww_mutex_unlock(&abba->a_mutex);
212	ww_mutex_unlock(&abba->b_mutex);
213	ww_acquire_fini(&ctx);
214
215	abba->result = err;
216}
217
218static int test_abba(bool trylock, bool resolve)
219{
220	struct test_abba abba;
221	struct ww_acquire_ctx ctx;
222	int err, ret;
223
224	ww_mutex_init(&abba.a_mutex, &ww_class);
225	ww_mutex_init(&abba.b_mutex, &ww_class);
226	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
227	init_completion(&abba.a_ready);
228	init_completion(&abba.b_ready);
229	abba.trylock = trylock;
230	abba.resolve = resolve;
231
232	schedule_work(&abba.work);
233
234	ww_acquire_init_noinject(&ctx, &ww_class);
235	if (!trylock)
236		ww_mutex_lock(&abba.a_mutex, &ctx);
237	else
238		WARN_ON(!ww_mutex_trylock(&abba.a_mutex, &ctx));
239
240	WARN_ON(READ_ONCE(abba.a_mutex.ctx) != &ctx);
241
242	complete(&abba.a_ready);
243	wait_for_completion(&abba.b_ready);
244
245	err = ww_mutex_lock(&abba.b_mutex, &ctx);
246	if (resolve && err == -EDEADLK) {
247		ww_mutex_unlock(&abba.a_mutex);
248		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
249		err = ww_mutex_lock(&abba.a_mutex, &ctx);
250	}
251
252	if (!err)
253		ww_mutex_unlock(&abba.b_mutex);
254	ww_mutex_unlock(&abba.a_mutex);
255	ww_acquire_fini(&ctx);
256
257	flush_work(&abba.work);
258	destroy_work_on_stack(&abba.work);
259
260	ret = 0;
261	if (resolve) {
262		if (err || abba.result) {
263			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
264			       __func__, err, abba.result);
265			ret = -EINVAL;
266		}
267	} else {
268		if (err != -EDEADLK && abba.result != -EDEADLK) {
269			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
270			       __func__, err, abba.result);
271			ret = -EINVAL;
272		}
273	}
274	return ret;
275}
276
277struct test_cycle {
278	struct work_struct work;
279	struct ww_mutex a_mutex;
280	struct ww_mutex *b_mutex;
281	struct completion *a_signal;
282	struct completion b_signal;
283	int result;
284};
285
286static void test_cycle_work(struct work_struct *work)
287{
288	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
289	struct ww_acquire_ctx ctx;
290	int err, erra = 0;
291
292	ww_acquire_init_noinject(&ctx, &ww_class);
293	ww_mutex_lock(&cycle->a_mutex, &ctx);
294
295	complete(cycle->a_signal);
296	wait_for_completion(&cycle->b_signal);
297
298	err = ww_mutex_lock(cycle->b_mutex, &ctx);
299	if (err == -EDEADLK) {
300		err = 0;
301		ww_mutex_unlock(&cycle->a_mutex);
302		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
303		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
304	}
305
306	if (!err)
307		ww_mutex_unlock(cycle->b_mutex);
308	if (!erra)
309		ww_mutex_unlock(&cycle->a_mutex);
310	ww_acquire_fini(&ctx);
311
312	cycle->result = err ?: erra;
313}
314
315static int __test_cycle(unsigned int nthreads)
316{
317	struct test_cycle *cycles;
318	unsigned int n, last = nthreads - 1;
319	int ret;
320
321	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
322	if (!cycles)
323		return -ENOMEM;
324
325	for (n = 0; n < nthreads; n++) {
326		struct test_cycle *cycle = &cycles[n];
327
328		ww_mutex_init(&cycle->a_mutex, &ww_class);
329		if (n == last)
330			cycle->b_mutex = &cycles[0].a_mutex;
331		else
332			cycle->b_mutex = &cycles[n + 1].a_mutex;
333
334		if (n == 0)
335			cycle->a_signal = &cycles[last].b_signal;
336		else
337			cycle->a_signal = &cycles[n - 1].b_signal;
338		init_completion(&cycle->b_signal);
339
340		INIT_WORK(&cycle->work, test_cycle_work);
341		cycle->result = 0;
342	}
343
344	for (n = 0; n < nthreads; n++)
345		queue_work(wq, &cycles[n].work);
346
347	flush_workqueue(wq);
348
349	ret = 0;
350	for (n = 0; n < nthreads; n++) {
351		struct test_cycle *cycle = &cycles[n];
352
353		if (!cycle->result)
354			continue;
355
356		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
357		       n, nthreads, cycle->result);
358		ret = -EINVAL;
359		break;
360	}
361
362	for (n = 0; n < nthreads; n++)
363		ww_mutex_destroy(&cycles[n].a_mutex);
364	kfree(cycles);
365	return ret;
366}
367
368static int test_cycle(unsigned int ncpus)
369{
370	unsigned int n;
371	int ret;
372
373	for (n = 2; n <= ncpus + 1; n++) {
374		ret = __test_cycle(n);
375		if (ret)
376			return ret;
377	}
378
379	return 0;
380}
381
382struct stress {
383	struct work_struct work;
384	struct ww_mutex *locks;
385	unsigned long timeout;
386	int nlocks;
387};
388
389struct rnd_state rng;
390DEFINE_SPINLOCK(rng_lock);
391
392static inline u32 prandom_u32_below(u32 ceil)
393{
394	u32 ret;
395
396	spin_lock(&rng_lock);
397	ret = prandom_u32_state(&rng) % ceil;
398	spin_unlock(&rng_lock);
399	return ret;
400}
401
402static int *get_random_order(int count)
403{
404	int *order;
405	int n, r, tmp;
406
407	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
408	if (!order)
409		return order;
410
411	for (n = 0; n < count; n++)
412		order[n] = n;
413
414	for (n = count - 1; n > 1; n--) {
415		r = prandom_u32_below(n + 1);
416		if (r != n) {
417			tmp = order[n];
418			order[n] = order[r];
419			order[r] = tmp;
420		}
421	}
422
423	return order;
424}
425
426static void dummy_load(struct stress *stress)
427{
428	usleep_range(1000, 2000);
429}
430
431static void stress_inorder_work(struct work_struct *work)
432{
433	struct stress *stress = container_of(work, typeof(*stress), work);
434	const int nlocks = stress->nlocks;
435	struct ww_mutex *locks = stress->locks;
436	struct ww_acquire_ctx ctx;
437	int *order;
438
439	order = get_random_order(nlocks);
440	if (!order)
441		return;
442
443	do {
444		int contended = -1;
445		int n, err;
446
447		ww_acquire_init(&ctx, &ww_class);
448retry:
449		err = 0;
450		for (n = 0; n < nlocks; n++) {
451			if (n == contended)
452				continue;
453
454			err = ww_mutex_lock(&locks[order[n]], &ctx);
455			if (err < 0)
456				break;
457		}
458		if (!err)
459			dummy_load(stress);
460
461		if (contended > n)
462			ww_mutex_unlock(&locks[order[contended]]);
463		contended = n;
464		while (n--)
465			ww_mutex_unlock(&locks[order[n]]);
466
467		if (err == -EDEADLK) {
468			if (!time_after(jiffies, stress->timeout)) {
469				ww_mutex_lock_slow(&locks[order[contended]], &ctx);
470				goto retry;
471			}
472		}
473
474		ww_acquire_fini(&ctx);
475		if (err) {
476			pr_err_once("stress (%s) failed with %d\n",
477				    __func__, err);
478			break;
479		}
480	} while (!time_after(jiffies, stress->timeout));
481
482	kfree(order);
483}
484
485struct reorder_lock {
486	struct list_head link;
487	struct ww_mutex *lock;
488};
489
490static void stress_reorder_work(struct work_struct *work)
491{
492	struct stress *stress = container_of(work, typeof(*stress), work);
493	LIST_HEAD(locks);
494	struct ww_acquire_ctx ctx;
495	struct reorder_lock *ll, *ln;
496	int *order;
497	int n, err;
498
499	order = get_random_order(stress->nlocks);
500	if (!order)
501		return;
502
503	for (n = 0; n < stress->nlocks; n++) {
504		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
505		if (!ll)
506			goto out;
507
508		ll->lock = &stress->locks[order[n]];
509		list_add(&ll->link, &locks);
510	}
511	kfree(order);
512	order = NULL;
513
514	do {
515		ww_acquire_init(&ctx, &ww_class);
516
517		list_for_each_entry(ll, &locks, link) {
518			err = ww_mutex_lock(ll->lock, &ctx);
519			if (!err)
520				continue;
521
522			ln = ll;
523			list_for_each_entry_continue_reverse(ln, &locks, link)
524				ww_mutex_unlock(ln->lock);
525
526			if (err != -EDEADLK) {
527				pr_err_once("stress (%s) failed with %d\n",
528					    __func__, err);
529				break;
530			}
531
532			ww_mutex_lock_slow(ll->lock, &ctx);
533			list_move(&ll->link, &locks); /* restarts iteration */
534		}
535
536		dummy_load(stress);
537		list_for_each_entry(ll, &locks, link)
538			ww_mutex_unlock(ll->lock);
539
540		ww_acquire_fini(&ctx);
541	} while (!time_after(jiffies, stress->timeout));
542
543out:
544	list_for_each_entry_safe(ll, ln, &locks, link)
545		kfree(ll);
546	kfree(order);
547}
548
549static void stress_one_work(struct work_struct *work)
550{
551	struct stress *stress = container_of(work, typeof(*stress), work);
552	const int nlocks = stress->nlocks;
553	struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
554	int err;
555
556	do {
557		err = ww_mutex_lock(lock, NULL);
558		if (!err) {
559			dummy_load(stress);
560			ww_mutex_unlock(lock);
561		} else {
562			pr_err_once("stress (%s) failed with %d\n",
563				    __func__, err);
564			break;
565		}
566	} while (!time_after(jiffies, stress->timeout));
567}
568
569#define STRESS_INORDER BIT(0)
570#define STRESS_REORDER BIT(1)
571#define STRESS_ONE BIT(2)
572#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
573
574static int stress(int nlocks, int nthreads, unsigned int flags)
575{
576	struct ww_mutex *locks;
577	struct stress *stress_array;
578	int n, count;
579
580	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
581	if (!locks)
582		return -ENOMEM;
583
584	stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
585				     GFP_KERNEL);
586	if (!stress_array) {
587		kfree(locks);
588		return -ENOMEM;
589	}
590
591	for (n = 0; n < nlocks; n++)
592		ww_mutex_init(&locks[n], &ww_class);
593
594	count = 0;
595	for (n = 0; nthreads; n++) {
596		struct stress *stress;
597		void (*fn)(struct work_struct *work);
598
599		fn = NULL;
600		switch (n & 3) {
601		case 0:
602			if (flags & STRESS_INORDER)
603				fn = stress_inorder_work;
604			break;
605		case 1:
606			if (flags & STRESS_REORDER)
607				fn = stress_reorder_work;
608			break;
609		case 2:
610			if (flags & STRESS_ONE)
611				fn = stress_one_work;
612			break;
613		}
614
615		if (!fn)
616			continue;
617
618		stress = &stress_array[count++];
619
620		INIT_WORK(&stress->work, fn);
621		stress->locks = locks;
622		stress->nlocks = nlocks;
623		stress->timeout = jiffies + 2*HZ;
624
625		queue_work(wq, &stress->work);
626		nthreads--;
627	}
628
629	flush_workqueue(wq);
630
631	for (n = 0; n < nlocks; n++)
632		ww_mutex_destroy(&locks[n]);
633	kfree(stress_array);
634	kfree(locks);
635
636	return 0;
637}
638
639static int __init test_ww_mutex_init(void)
640{
641	int ncpus = num_online_cpus();
642	int ret, i;
643
644	printk(KERN_INFO "Beginning ww mutex selftests\n");
645
646	prandom_seed_state(&rng, get_random_u64());
647
648	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
649	if (!wq)
650		return -ENOMEM;
651
652	ret = test_mutex();
653	if (ret)
654		return ret;
655
656	ret = test_aa(false);
657	if (ret)
658		return ret;
659
660	ret = test_aa(true);
661	if (ret)
662		return ret;
663
664	for (i = 0; i < 4; i++) {
665		ret = test_abba(i & 1, i & 2);
666		if (ret)
667			return ret;
668	}
669
670	ret = test_cycle(ncpus);
671	if (ret)
672		return ret;
673
674	ret = stress(16, 2*ncpus, STRESS_INORDER);
675	if (ret)
676		return ret;
677
678	ret = stress(16, 2*ncpus, STRESS_REORDER);
679	if (ret)
680		return ret;
681
682	ret = stress(2047, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
683	if (ret)
684		return ret;
685
686	printk(KERN_INFO "All ww mutex selftests passed\n");
687	return 0;
688}
689
690static void __exit test_ww_mutex_exit(void)
691{
692	destroy_workqueue(wq);
693}
694
695module_init(test_ww_mutex_init);
696module_exit(test_ww_mutex_exit);
697
698MODULE_LICENSE("GPL");
699MODULE_AUTHOR("Intel Corporation");
700