1/*	$NetBSD: i915_sw_fence.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
2
3/*
4 * Copyright �� 2017 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 */
26
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: i915_sw_fence.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
29
30#include <linux/completion.h>
31#include <linux/delay.h>
32#include <linux/prime_numbers.h>
33
34#include "../i915_selftest.h"
35
36static int __i915_sw_fence_call
37fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
38{
39	switch (state) {
40	case FENCE_COMPLETE:
41		break;
42
43	case FENCE_FREE:
44		/* Leave the fence for the caller to free it after testing */
45		break;
46	}
47
48	return NOTIFY_DONE;
49}
50
51static struct i915_sw_fence *alloc_fence(void)
52{
53	struct i915_sw_fence *fence;
54
55	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
56	if (!fence)
57		return NULL;
58
59	i915_sw_fence_init(fence, fence_notify);
60	return fence;
61}
62
63static void free_fence(struct i915_sw_fence *fence)
64{
65	i915_sw_fence_fini(fence);
66	kfree(fence);
67}
68
69static int __test_self(struct i915_sw_fence *fence)
70{
71	if (i915_sw_fence_done(fence))
72		return -EINVAL;
73
74	i915_sw_fence_commit(fence);
75	if (!i915_sw_fence_done(fence))
76		return -EINVAL;
77
78	i915_sw_fence_wait(fence);
79	if (!i915_sw_fence_done(fence))
80		return -EINVAL;
81
82	return 0;
83}
84
85static int test_self(void *arg)
86{
87	struct i915_sw_fence *fence;
88	int ret;
89
90	/* Test i915_sw_fence signaling and completion testing */
91	fence = alloc_fence();
92	if (!fence)
93		return -ENOMEM;
94
95	ret = __test_self(fence);
96
97	free_fence(fence);
98	return ret;
99}
100
101static int test_dag(void *arg)
102{
103	struct i915_sw_fence *A, *B, *C;
104	int ret = -EINVAL;
105
106	/* Test detection of cycles within the i915_sw_fence graphs */
107	if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
108		return 0;
109
110	A = alloc_fence();
111	if (!A)
112		return -ENOMEM;
113
114	if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
115		pr_err("recursive cycle not detected (AA)\n");
116		goto err_A;
117	}
118
119	B = alloc_fence();
120	if (!B) {
121		ret = -ENOMEM;
122		goto err_A;
123	}
124
125	i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
126	if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
127		pr_err("single depth cycle not detected (BAB)\n");
128		goto err_B;
129	}
130
131	C = alloc_fence();
132	if (!C) {
133		ret = -ENOMEM;
134		goto err_B;
135	}
136
137	if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
138		pr_err("invalid cycle detected\n");
139		goto err_C;
140	}
141	if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
142		pr_err("single depth cycle not detected (CBC)\n");
143		goto err_C;
144	}
145	if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
146		pr_err("cycle not detected (BA, CB, AC)\n");
147		goto err_C;
148	}
149	if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
150		pr_err("invalid cycle detected\n");
151		goto err_C;
152	}
153
154	i915_sw_fence_commit(A);
155	i915_sw_fence_commit(B);
156	i915_sw_fence_commit(C);
157
158	ret = 0;
159	if (!i915_sw_fence_done(C)) {
160		pr_err("fence C not done\n");
161		ret = -EINVAL;
162	}
163	if (!i915_sw_fence_done(B)) {
164		pr_err("fence B not done\n");
165		ret = -EINVAL;
166	}
167	if (!i915_sw_fence_done(A)) {
168		pr_err("fence A not done\n");
169		ret = -EINVAL;
170	}
171err_C:
172	free_fence(C);
173err_B:
174	free_fence(B);
175err_A:
176	free_fence(A);
177	return ret;
178}
179
180static int test_AB(void *arg)
181{
182	struct i915_sw_fence *A, *B;
183	int ret;
184
185	/* Test i915_sw_fence (A) waiting on an event source (B) */
186	A = alloc_fence();
187	if (!A)
188		return -ENOMEM;
189	B = alloc_fence();
190	if (!B) {
191		ret = -ENOMEM;
192		goto err_A;
193	}
194
195	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
196	if (ret < 0)
197		goto err_B;
198	if (ret == 0) {
199		pr_err("Incorrectly reported fence A was complete before await\n");
200		ret = -EINVAL;
201		goto err_B;
202	}
203
204	ret = -EINVAL;
205	i915_sw_fence_commit(A);
206	if (i915_sw_fence_done(A))
207		goto err_B;
208
209	i915_sw_fence_commit(B);
210	if (!i915_sw_fence_done(B)) {
211		pr_err("Fence B is not done\n");
212		goto err_B;
213	}
214
215	if (!i915_sw_fence_done(A)) {
216		pr_err("Fence A is not done\n");
217		goto err_B;
218	}
219
220	ret = 0;
221err_B:
222	free_fence(B);
223err_A:
224	free_fence(A);
225	return ret;
226}
227
228static int test_ABC(void *arg)
229{
230	struct i915_sw_fence *A, *B, *C;
231	int ret;
232
233	/* Test a chain of fences, A waits on B who waits on C */
234	A = alloc_fence();
235	if (!A)
236		return -ENOMEM;
237
238	B = alloc_fence();
239	if (!B) {
240		ret = -ENOMEM;
241		goto err_A;
242	}
243
244	C = alloc_fence();
245	if (!C) {
246		ret = -ENOMEM;
247		goto err_B;
248	}
249
250	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
251	if (ret < 0)
252		goto err_C;
253	if (ret == 0) {
254		pr_err("Incorrectly reported fence B was complete before await\n");
255		goto err_C;
256	}
257
258	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
259	if (ret < 0)
260		goto err_C;
261	if (ret == 0) {
262		pr_err("Incorrectly reported fence C was complete before await\n");
263		goto err_C;
264	}
265
266	ret = -EINVAL;
267	i915_sw_fence_commit(A);
268	if (i915_sw_fence_done(A)) {
269		pr_err("Fence A completed early\n");
270		goto err_C;
271	}
272
273	i915_sw_fence_commit(B);
274	if (i915_sw_fence_done(B)) {
275		pr_err("Fence B completed early\n");
276		goto err_C;
277	}
278
279	if (i915_sw_fence_done(A)) {
280		pr_err("Fence A completed early (after signaling B)\n");
281		goto err_C;
282	}
283
284	i915_sw_fence_commit(C);
285
286	ret = 0;
287	if (!i915_sw_fence_done(C)) {
288		pr_err("Fence C not done\n");
289		ret = -EINVAL;
290	}
291	if (!i915_sw_fence_done(B)) {
292		pr_err("Fence B not done\n");
293		ret = -EINVAL;
294	}
295	if (!i915_sw_fence_done(A)) {
296		pr_err("Fence A not done\n");
297		ret = -EINVAL;
298	}
299err_C:
300	free_fence(C);
301err_B:
302	free_fence(B);
303err_A:
304	free_fence(A);
305	return ret;
306}
307
308static int test_AB_C(void *arg)
309{
310	struct i915_sw_fence *A, *B, *C;
311	int ret = -EINVAL;
312
313	/* Test multiple fences (AB) waiting on a single event (C) */
314	A = alloc_fence();
315	if (!A)
316		return -ENOMEM;
317
318	B = alloc_fence();
319	if (!B) {
320		ret = -ENOMEM;
321		goto err_A;
322	}
323
324	C = alloc_fence();
325	if (!C) {
326		ret = -ENOMEM;
327		goto err_B;
328	}
329
330	ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
331	if (ret < 0)
332		goto err_C;
333	if (ret == 0) {
334		ret = -EINVAL;
335		goto err_C;
336	}
337
338	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
339	if (ret < 0)
340		goto err_C;
341	if (ret == 0) {
342		ret = -EINVAL;
343		goto err_C;
344	}
345
346	i915_sw_fence_commit(A);
347	i915_sw_fence_commit(B);
348
349	ret = 0;
350	if (i915_sw_fence_done(A)) {
351		pr_err("Fence A completed early\n");
352		ret = -EINVAL;
353	}
354
355	if (i915_sw_fence_done(B)) {
356		pr_err("Fence B completed early\n");
357		ret = -EINVAL;
358	}
359
360	i915_sw_fence_commit(C);
361	if (!i915_sw_fence_done(C)) {
362		pr_err("Fence C not done\n");
363		ret = -EINVAL;
364	}
365
366	if (!i915_sw_fence_done(B)) {
367		pr_err("Fence B not done\n");
368		ret = -EINVAL;
369	}
370
371	if (!i915_sw_fence_done(A)) {
372		pr_err("Fence A not done\n");
373		ret = -EINVAL;
374	}
375
376err_C:
377	free_fence(C);
378err_B:
379	free_fence(B);
380err_A:
381	free_fence(A);
382	return ret;
383}
384
385static int test_C_AB(void *arg)
386{
387	struct i915_sw_fence *A, *B, *C;
388	int ret;
389
390	/* Test multiple event sources (A,B) for a single fence (C) */
391	A = alloc_fence();
392	if (!A)
393		return -ENOMEM;
394
395	B = alloc_fence();
396	if (!B) {
397		ret = -ENOMEM;
398		goto err_A;
399	}
400
401	C = alloc_fence();
402	if (!C) {
403		ret = -ENOMEM;
404		goto err_B;
405	}
406
407	ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
408	if (ret < 0)
409		goto err_C;
410	if (ret == 0) {
411		ret = -EINVAL;
412		goto err_C;
413	}
414
415	ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
416	if (ret < 0)
417		goto err_C;
418	if (ret == 0) {
419		ret = -EINVAL;
420		goto err_C;
421	}
422
423	ret = 0;
424	i915_sw_fence_commit(C);
425	if (i915_sw_fence_done(C))
426		ret = -EINVAL;
427
428	i915_sw_fence_commit(A);
429	i915_sw_fence_commit(B);
430
431	if (!i915_sw_fence_done(A)) {
432		pr_err("Fence A not done\n");
433		ret = -EINVAL;
434	}
435
436	if (!i915_sw_fence_done(B)) {
437		pr_err("Fence B not done\n");
438		ret = -EINVAL;
439	}
440
441	if (!i915_sw_fence_done(C)) {
442		pr_err("Fence C not done\n");
443		ret = -EINVAL;
444	}
445
446err_C:
447	free_fence(C);
448err_B:
449	free_fence(B);
450err_A:
451	free_fence(A);
452	return ret;
453}
454
455static int test_chain(void *arg)
456{
457	int nfences = 4096;
458	struct i915_sw_fence **fences;
459	int ret, i;
460
461	/* Test a long chain of fences */
462	fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
463	if (!fences)
464		return -ENOMEM;
465
466	for (i = 0; i < nfences; i++) {
467		fences[i] = alloc_fence();
468		if (!fences[i]) {
469			nfences = i;
470			ret = -ENOMEM;
471			goto err;
472		}
473
474		if (i > 0) {
475			ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
476							       fences[i - 1],
477							       GFP_KERNEL);
478			if (ret < 0) {
479				nfences = i + 1;
480				goto err;
481			}
482
483			i915_sw_fence_commit(fences[i]);
484		}
485	}
486
487	ret = 0;
488	for (i = nfences; --i; ) {
489		if (i915_sw_fence_done(fences[i])) {
490			if (ret == 0)
491				pr_err("Fence[%d] completed early\n", i);
492			ret = -EINVAL;
493		}
494	}
495	i915_sw_fence_commit(fences[0]);
496	for (i = 0; ret == 0 && i < nfences; i++) {
497		if (!i915_sw_fence_done(fences[i])) {
498			pr_err("Fence[%d] is not done\n", i);
499			ret = -EINVAL;
500		}
501	}
502
503err:
504	for (i = 0; i < nfences; i++)
505		free_fence(fences[i]);
506	kfree(fences);
507	return ret;
508}
509
510struct task_ipc {
511	struct work_struct work;
512	struct completion started;
513	struct i915_sw_fence *in, *out;
514	int value;
515};
516
517static void task_ipc(struct work_struct *work)
518{
519	struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
520
521	complete(&ipc->started);
522
523	i915_sw_fence_wait(ipc->in);
524	smp_store_mb(ipc->value, 1);
525	i915_sw_fence_commit(ipc->out);
526}
527
528static int test_ipc(void *arg)
529{
530	struct task_ipc ipc;
531	int ret = 0;
532
533	/* Test use of i915_sw_fence as an interprocess signaling mechanism */
534	ipc.in = alloc_fence();
535	if (!ipc.in)
536		return -ENOMEM;
537	ipc.out = alloc_fence();
538	if (!ipc.out) {
539		ret = -ENOMEM;
540		goto err_in;
541	}
542
543	/* use a completion to avoid chicken-and-egg testing */
544	init_completion(&ipc.started);
545
546	ipc.value = 0;
547	INIT_WORK_ONSTACK(&ipc.work, task_ipc);
548	schedule_work(&ipc.work);
549
550	wait_for_completion(&ipc.started);
551
552	usleep_range(1000, 2000);
553	if (READ_ONCE(ipc.value)) {
554		pr_err("worker updated value before i915_sw_fence was signaled\n");
555		ret = -EINVAL;
556	}
557
558	i915_sw_fence_commit(ipc.in);
559	i915_sw_fence_wait(ipc.out);
560
561	if (!READ_ONCE(ipc.value)) {
562		pr_err("worker signaled i915_sw_fence before value was posted\n");
563		ret = -EINVAL;
564	}
565
566	flush_work(&ipc.work);
567	destroy_work_on_stack(&ipc.work);
568	free_fence(ipc.out);
569err_in:
570	free_fence(ipc.in);
571	return ret;
572}
573
574static int test_timer(void *arg)
575{
576	unsigned long target, delay;
577	struct timed_fence tf;
578
579	preempt_disable();
580	timed_fence_init(&tf, target = jiffies);
581	if (!i915_sw_fence_done(&tf.fence)) {
582		pr_err("Fence with immediate expiration not signaled\n");
583		goto err;
584	}
585	preempt_enable();
586	timed_fence_fini(&tf);
587
588	for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
589		preempt_disable();
590		timed_fence_init(&tf, target = jiffies + delay);
591		if (i915_sw_fence_done(&tf.fence)) {
592			pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
593			goto err;
594		}
595		preempt_enable();
596
597		i915_sw_fence_wait(&tf.fence);
598
599		preempt_disable();
600		if (!i915_sw_fence_done(&tf.fence)) {
601			pr_err("Fence not signaled after wait\n");
602			goto err;
603		}
604		if (time_before(jiffies, target)) {
605			pr_err("Fence signaled too early, target=%lu, now=%lu\n",
606			       target, jiffies);
607			goto err;
608		}
609		preempt_enable();
610		timed_fence_fini(&tf);
611	}
612
613	return 0;
614
615err:
616	preempt_enable();
617	timed_fence_fini(&tf);
618	return -EINVAL;
619}
620
621static const char *mock_name(struct dma_fence *fence)
622{
623	return "mock";
624}
625
626static const struct dma_fence_ops mock_fence_ops = {
627	.get_driver_name = mock_name,
628	.get_timeline_name = mock_name,
629};
630
631static DEFINE_SPINLOCK(mock_fence_lock);
632
633static struct dma_fence *alloc_dma_fence(void)
634{
635	struct dma_fence *dma;
636
637	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
638	if (dma)
639		dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
640
641	return dma;
642}
643
644static struct i915_sw_fence *
645wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
646{
647	struct i915_sw_fence *fence;
648	int err;
649
650	fence = alloc_fence();
651	if (!fence)
652		return ERR_PTR(-ENOMEM);
653
654	err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
655	i915_sw_fence_commit(fence);
656	if (err < 0) {
657		free_fence(fence);
658		return ERR_PTR(err);
659	}
660
661	return fence;
662}
663
664static int test_dma_fence(void *arg)
665{
666	struct i915_sw_fence *timeout = NULL, *not = NULL;
667	unsigned long delay = i915_selftest.timeout_jiffies;
668	unsigned long end, sleep;
669	struct dma_fence *dma;
670	int err;
671
672	dma = alloc_dma_fence();
673	if (!dma)
674		return -ENOMEM;
675
676	timeout = wrap_dma_fence(dma, delay);
677	if (IS_ERR(timeout)) {
678		err = PTR_ERR(timeout);
679		goto err;
680	}
681
682	not = wrap_dma_fence(dma, 0);
683	if (IS_ERR(not)) {
684		err = PTR_ERR(not);
685		goto err;
686	}
687
688	err = -EINVAL;
689	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
690		pr_err("Fences immediately signaled\n");
691		goto err;
692	}
693
694	/* We round the timeout for the fence up to the next second */
695	end = round_jiffies_up(jiffies + delay);
696
697	sleep = jiffies_to_usecs(delay) / 3;
698	usleep_range(sleep, 2 * sleep);
699	if (time_after(jiffies, end)) {
700		pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
701			 delay, end, jiffies);
702		goto skip;
703	}
704
705	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
706		pr_err("Fences signaled too early\n");
707		goto err;
708	}
709
710	if (!wait_event_timeout(timeout->wait,
711				i915_sw_fence_done(timeout),
712				2 * (end - jiffies) + 1)) {
713		pr_err("Timeout fence unsignaled!\n");
714		goto err;
715	}
716
717	if (i915_sw_fence_done(not)) {
718		pr_err("No timeout fence signaled!\n");
719		goto err;
720	}
721
722skip:
723	dma_fence_signal(dma);
724
725	if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
726		pr_err("Fences unsignaled\n");
727		goto err;
728	}
729
730	free_fence(not);
731	free_fence(timeout);
732	dma_fence_put(dma);
733
734	return 0;
735
736err:
737	dma_fence_signal(dma);
738	if (!IS_ERR_OR_NULL(timeout))
739		free_fence(timeout);
740	if (!IS_ERR_OR_NULL(not))
741		free_fence(not);
742	dma_fence_put(dma);
743	return err;
744}
745
746int i915_sw_fence_mock_selftests(void)
747{
748	static const struct i915_subtest tests[] = {
749		SUBTEST(test_self),
750		SUBTEST(test_dag),
751		SUBTEST(test_AB),
752		SUBTEST(test_ABC),
753		SUBTEST(test_AB_C),
754		SUBTEST(test_C_AB),
755		SUBTEST(test_chain),
756		SUBTEST(test_ipc),
757		SUBTEST(test_timer),
758		SUBTEST(test_dma_fence),
759	};
760
761	return i915_subtests(tests, NULL);
762}
763