1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
4 *
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 */
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/atomic.h>
16#include <linux/dma-fence.h>
17#include <linux/sched/signal.h>
18#include <linux/seq_file.h>
19
20#define CREATE_TRACE_POINTS
21#include <trace/events/dma_fence.h>
22
23EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
24EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
25EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
26
27static DEFINE_SPINLOCK(dma_fence_stub_lock);
28static struct dma_fence dma_fence_stub;
29
30/*
31 * fence context counter: each execution context should have its own
32 * fence context, this allows checking if fences belong to the same
33 * context or not. One device can have multiple separate contexts,
34 * and they're used if some engine can run independently of another.
35 */
36static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
37
38/**
39 * DOC: DMA fences overview
40 *
41 * DMA fences, represented by &struct dma_fence, are the kernel internal
42 * synchronization primitive for DMA operations like GPU rendering, video
43 * encoding/decoding, or displaying buffers on a screen.
44 *
45 * A fence is initialized using dma_fence_init() and completed using
46 * dma_fence_signal(). Fences are associated with a context, allocated through
47 * dma_fence_context_alloc(), and all fences on the same context are
48 * fully ordered.
49 *
50 * Since the purposes of fences is to facilitate cross-device and
51 * cross-application synchronization, there's multiple ways to use one:
52 *
53 * - Individual fences can be exposed as a &sync_file, accessed as a file
54 *   descriptor from userspace, created by calling sync_file_create(). This is
55 *   called explicit fencing, since userspace passes around explicit
56 *   synchronization points.
57 *
58 * - Some subsystems also have their own explicit fencing primitives, like
59 *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
60 *   fence to be updated.
61 *
62 * - Then there's also implicit fencing, where the synchronization points are
63 *   implicitly passed around as part of shared &dma_buf instances. Such
64 *   implicit fences are stored in &struct dma_resv through the
65 *   &dma_buf.resv pointer.
66 */
67
68/**
69 * DOC: fence cross-driver contract
70 *
71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
72 * same rules:
73 *
74 * * Fences must complete in a reasonable time. Fences which represent kernels
75 *   and shaders submitted by userspace, which could run forever, must be backed
76 *   up by timeout and gpu hang recovery code. Minimally that code must prevent
77 *   further command submission and force complete all in-flight fences, e.g.
78 *   when the driver or hardware do not support gpu reset, or if the gpu reset
79 *   failed for some reason. Ideally the driver supports gpu recovery which only
80 *   affects the offending userspace context, and no other userspace
81 *   submissions.
82 *
83 * * Drivers may have different ideas of what completion within a reasonable
84 *   time means. Some hang recovery code uses a fixed timeout, others a mix
85 *   between observing forward progress and increasingly strict timeouts.
86 *   Drivers should not try to second guess timeout handling of fences from
87 *   other drivers.
88 *
89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
90 *   drivers should annotate all code required to reach dma_fence_signal(),
91 *   which completes the fences, with dma_fence_begin_signalling() and
92 *   dma_fence_end_signalling().
93 *
94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
95 *   This means any code required for fence completion cannot acquire a
96 *   &dma_resv lock. Note that this also pulls in the entire established
97 *   locking hierarchy around dma_resv_lock() and dma_resv_unlock().
98 *
99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
100 *   callbacks. This means any code required for fence completion cannot
101 *   allocate memory with GFP_KERNEL.
102 *
103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
104 *   respectively &mmu_interval_notifier callbacks. This means any code required
105 *   for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO.
106 *   Only GFP_ATOMIC is permissible, which might fail.
107 *
108 * Note that only GPU drivers have a reasonable excuse for both requiring
109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
110 * track asynchronous compute work using &dma_fence. No driver outside of
111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
112 */
113
114static const char *dma_fence_stub_get_name(struct dma_fence *fence)
115{
116        return "stub";
117}
118
119static const struct dma_fence_ops dma_fence_stub_ops = {
120	.get_driver_name = dma_fence_stub_get_name,
121	.get_timeline_name = dma_fence_stub_get_name,
122};
123
124/**
125 * dma_fence_get_stub - return a signaled fence
126 *
127 * Return a stub fence which is already signaled. The fence's
128 * timestamp corresponds to the first time after boot this
129 * function is called.
130 */
131struct dma_fence *dma_fence_get_stub(void)
132{
133	spin_lock(&dma_fence_stub_lock);
134	if (!dma_fence_stub.ops) {
135		dma_fence_init(&dma_fence_stub,
136			       &dma_fence_stub_ops,
137			       &dma_fence_stub_lock,
138			       0, 0);
139
140		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
141			&dma_fence_stub.flags);
142
143		dma_fence_signal_locked(&dma_fence_stub);
144	}
145	spin_unlock(&dma_fence_stub_lock);
146
147	return dma_fence_get(&dma_fence_stub);
148}
149EXPORT_SYMBOL(dma_fence_get_stub);
150
151/**
152 * dma_fence_allocate_private_stub - return a private, signaled fence
153 * @timestamp: timestamp when the fence was signaled
154 *
155 * Return a newly allocated and signaled stub fence.
156 */
157struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
158{
159	struct dma_fence *fence;
160
161	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
162	if (fence == NULL)
163		return NULL;
164
165	dma_fence_init(fence,
166		       &dma_fence_stub_ops,
167		       &dma_fence_stub_lock,
168		       0, 0);
169
170	set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
171		&fence->flags);
172
173	dma_fence_signal_timestamp(fence, timestamp);
174
175	return fence;
176}
177EXPORT_SYMBOL(dma_fence_allocate_private_stub);
178
179/**
180 * dma_fence_context_alloc - allocate an array of fence contexts
181 * @num: amount of contexts to allocate
182 *
183 * This function will return the first index of the number of fence contexts
184 * allocated.  The fence context is used for setting &dma_fence.context to a
185 * unique number by passing the context to dma_fence_init().
186 */
187u64 dma_fence_context_alloc(unsigned num)
188{
189	WARN_ON(!num);
190	return atomic64_fetch_add(num, &dma_fence_context_counter);
191}
192EXPORT_SYMBOL(dma_fence_context_alloc);
193
194/**
195 * DOC: fence signalling annotation
196 *
197 * Proving correctness of all the kernel code around &dma_fence through code
198 * review and testing is tricky for a few reasons:
199 *
200 * * It is a cross-driver contract, and therefore all drivers must follow the
201 *   same rules for lock nesting order, calling contexts for various functions
202 *   and anything else significant for in-kernel interfaces. But it is also
203 *   impossible to test all drivers in a single machine, hence brute-force N vs.
204 *   N testing of all combinations is impossible. Even just limiting to the
205 *   possible combinations is infeasible.
206 *
207 * * There is an enormous amount of driver code involved. For render drivers
208 *   there's the tail of command submission, after fences are published,
209 *   scheduler code, interrupt and workers to process job completion,
210 *   and timeout, gpu reset and gpu hang recovery code. Plus for integration
211 *   with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
212 *   and &shrinker. For modesetting drivers there's the commit tail functions
213 *   between when fences for an atomic modeset are published, and when the
214 *   corresponding vblank completes, including any interrupt processing and
215 *   related workers. Auditing all that code, across all drivers, is not
216 *   feasible.
217 *
218 * * Due to how many other subsystems are involved and the locking hierarchies
219 *   this pulls in there is extremely thin wiggle-room for driver-specific
220 *   differences. &dma_fence interacts with almost all of the core memory
221 *   handling through page fault handlers via &dma_resv, dma_resv_lock() and
222 *   dma_resv_unlock(). On the other side it also interacts through all
223 *   allocation sites through &mmu_notifier and &shrinker.
224 *
225 * Furthermore lockdep does not handle cross-release dependencies, which means
226 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
227 * at runtime with some quick testing. The simplest example is one thread
228 * waiting on a &dma_fence while holding a lock::
229 *
230 *     lock(A);
231 *     dma_fence_wait(B);
232 *     unlock(A);
233 *
234 * while the other thread is stuck trying to acquire the same lock, which
235 * prevents it from signalling the fence the previous thread is stuck waiting
236 * on::
237 *
238 *     lock(A);
239 *     unlock(A);
240 *     dma_fence_signal(B);
241 *
242 * By manually annotating all code relevant to signalling a &dma_fence we can
243 * teach lockdep about these dependencies, which also helps with the validation
244 * headache since now lockdep can check all the rules for us::
245 *
246 *    cookie = dma_fence_begin_signalling();
247 *    lock(A);
248 *    unlock(A);
249 *    dma_fence_signal(B);
250 *    dma_fence_end_signalling(cookie);
251 *
252 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
253 * annotate critical sections the following rules need to be observed:
254 *
255 * * All code necessary to complete a &dma_fence must be annotated, from the
256 *   point where a fence is accessible to other threads, to the point where
257 *   dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
258 *   and due to the very strict rules and many corner cases it is infeasible to
259 *   catch these just with review or normal stress testing.
260 *
261 * * &struct dma_resv deserves a special note, since the readers are only
262 *   protected by rcu. This means the signalling critical section starts as soon
263 *   as the new fences are installed, even before dma_resv_unlock() is called.
264 *
265 * * The only exception are fast paths and opportunistic signalling code, which
266 *   calls dma_fence_signal() purely as an optimization, but is not required to
267 *   guarantee completion of a &dma_fence. The usual example is a wait IOCTL
268 *   which calls dma_fence_signal(), while the mandatory completion path goes
269 *   through a hardware interrupt and possible job completion worker.
270 *
271 * * To aid composability of code, the annotations can be freely nested, as long
272 *   as the overall locking hierarchy is consistent. The annotations also work
273 *   both in interrupt and process context. Due to implementation details this
274 *   requires that callers pass an opaque cookie from
275 *   dma_fence_begin_signalling() to dma_fence_end_signalling().
276 *
277 * * Validation against the cross driver contract is implemented by priming
278 *   lockdep with the relevant hierarchy at boot-up. This means even just
279 *   testing with a single device is enough to validate a driver, at least as
280 *   far as deadlocks with dma_fence_wait() against dma_fence_signal() are
281 *   concerned.
282 */
283#ifdef CONFIG_LOCKDEP
284static struct lockdep_map dma_fence_lockdep_map = {
285	.name = "dma_fence_map"
286};
287
288/**
289 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
290 *
291 * Drivers should use this to annotate the beginning of any code section
292 * required to eventually complete &dma_fence by calling dma_fence_signal().
293 *
294 * The end of these critical sections are annotated with
295 * dma_fence_end_signalling().
296 *
297 * Returns:
298 *
299 * Opaque cookie needed by the implementation, which needs to be passed to
300 * dma_fence_end_signalling().
301 */
302bool dma_fence_begin_signalling(void)
303{
304	/* explicitly nesting ... */
305	if (lock_is_held_type(&dma_fence_lockdep_map, 1))
306		return true;
307
308	/* rely on might_sleep check for soft/hardirq locks */
309	if (in_atomic())
310		return true;
311
312	/* ... and non-recursive readlock */
313	lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
314
315	return false;
316}
317EXPORT_SYMBOL(dma_fence_begin_signalling);
318
319/**
320 * dma_fence_end_signalling - end a critical DMA fence signalling section
321 * @cookie: opaque cookie from dma_fence_begin_signalling()
322 *
323 * Closes a critical section annotation opened by dma_fence_begin_signalling().
324 */
325void dma_fence_end_signalling(bool cookie)
326{
327	if (cookie)
328		return;
329
330	lock_release(&dma_fence_lockdep_map, _RET_IP_);
331}
332EXPORT_SYMBOL(dma_fence_end_signalling);
333
334void __dma_fence_might_wait(void)
335{
336	bool tmp;
337
338	tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
339	if (tmp)
340		lock_release(&dma_fence_lockdep_map, _THIS_IP_);
341	lock_map_acquire(&dma_fence_lockdep_map);
342	lock_map_release(&dma_fence_lockdep_map);
343	if (tmp)
344		lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
345}
346#endif
347
348
349/**
350 * dma_fence_signal_timestamp_locked - signal completion of a fence
351 * @fence: the fence to signal
352 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
353 *
354 * Signal completion for software callbacks on a fence, this will unblock
355 * dma_fence_wait() calls and run all the callbacks added with
356 * dma_fence_add_callback(). Can be called multiple times, but since a fence
357 * can only go from the unsignaled to the signaled state and not back, it will
358 * only be effective the first time. Set the timestamp provided as the fence
359 * signal timestamp.
360 *
361 * Unlike dma_fence_signal_timestamp(), this function must be called with
362 * &dma_fence.lock held.
363 *
364 * Returns 0 on success and a negative error value when @fence has been
365 * signalled already.
366 */
367int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
368				      ktime_t timestamp)
369{
370	struct dma_fence_cb *cur, *tmp;
371	struct list_head cb_list;
372
373	lockdep_assert_held(fence->lock);
374
375	if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
376				      &fence->flags)))
377		return -EINVAL;
378
379	/* Stash the cb_list before replacing it with the timestamp */
380	list_replace(&fence->cb_list, &cb_list);
381
382	fence->timestamp = timestamp;
383	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
384	trace_dma_fence_signaled(fence);
385
386	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
387		INIT_LIST_HEAD(&cur->node);
388		cur->func(fence, cur);
389	}
390
391	return 0;
392}
393EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
394
395/**
396 * dma_fence_signal_timestamp - signal completion of a fence
397 * @fence: the fence to signal
398 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
399 *
400 * Signal completion for software callbacks on a fence, this will unblock
401 * dma_fence_wait() calls and run all the callbacks added with
402 * dma_fence_add_callback(). Can be called multiple times, but since a fence
403 * can only go from the unsignaled to the signaled state and not back, it will
404 * only be effective the first time. Set the timestamp provided as the fence
405 * signal timestamp.
406 *
407 * Returns 0 on success and a negative error value when @fence has been
408 * signalled already.
409 */
410int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
411{
412	unsigned long flags;
413	int ret;
414
415	if (!fence)
416		return -EINVAL;
417
418	spin_lock_irqsave(fence->lock, flags);
419	ret = dma_fence_signal_timestamp_locked(fence, timestamp);
420	spin_unlock_irqrestore(fence->lock, flags);
421
422	return ret;
423}
424EXPORT_SYMBOL(dma_fence_signal_timestamp);
425
426/**
427 * dma_fence_signal_locked - signal completion of a fence
428 * @fence: the fence to signal
429 *
430 * Signal completion for software callbacks on a fence, this will unblock
431 * dma_fence_wait() calls and run all the callbacks added with
432 * dma_fence_add_callback(). Can be called multiple times, but since a fence
433 * can only go from the unsignaled to the signaled state and not back, it will
434 * only be effective the first time.
435 *
436 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
437 * held.
438 *
439 * Returns 0 on success and a negative error value when @fence has been
440 * signalled already.
441 */
442int dma_fence_signal_locked(struct dma_fence *fence)
443{
444	return dma_fence_signal_timestamp_locked(fence, ktime_get());
445}
446EXPORT_SYMBOL(dma_fence_signal_locked);
447
448/**
449 * dma_fence_signal - signal completion of a fence
450 * @fence: the fence to signal
451 *
452 * Signal completion for software callbacks on a fence, this will unblock
453 * dma_fence_wait() calls and run all the callbacks added with
454 * dma_fence_add_callback(). Can be called multiple times, but since a fence
455 * can only go from the unsignaled to the signaled state and not back, it will
456 * only be effective the first time.
457 *
458 * Returns 0 on success and a negative error value when @fence has been
459 * signalled already.
460 */
461int dma_fence_signal(struct dma_fence *fence)
462{
463	unsigned long flags;
464	int ret;
465	bool tmp;
466
467	if (!fence)
468		return -EINVAL;
469
470	tmp = dma_fence_begin_signalling();
471
472	spin_lock_irqsave(fence->lock, flags);
473	ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
474	spin_unlock_irqrestore(fence->lock, flags);
475
476	dma_fence_end_signalling(tmp);
477
478	return ret;
479}
480EXPORT_SYMBOL(dma_fence_signal);
481
482/**
483 * dma_fence_wait_timeout - sleep until the fence gets signaled
484 * or until timeout elapses
485 * @fence: the fence to wait on
486 * @intr: if true, do an interruptible wait
487 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
488 *
489 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
490 * remaining timeout in jiffies on success. Other error values may be
491 * returned on custom implementations.
492 *
493 * Performs a synchronous wait on this fence. It is assumed the caller
494 * directly or indirectly (buf-mgr between reservation and committing)
495 * holds a reference to the fence, otherwise the fence might be
496 * freed before return, resulting in undefined behavior.
497 *
498 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
499 */
500signed long
501dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
502{
503	signed long ret;
504
505	if (WARN_ON(timeout < 0))
506		return -EINVAL;
507
508	might_sleep();
509
510	__dma_fence_might_wait();
511
512	dma_fence_enable_sw_signaling(fence);
513
514	trace_dma_fence_wait_start(fence);
515	if (fence->ops->wait)
516		ret = fence->ops->wait(fence, intr, timeout);
517	else
518		ret = dma_fence_default_wait(fence, intr, timeout);
519	trace_dma_fence_wait_end(fence);
520	return ret;
521}
522EXPORT_SYMBOL(dma_fence_wait_timeout);
523
524/**
525 * dma_fence_release - default release function for fences
526 * @kref: &dma_fence.recfount
527 *
528 * This is the default release functions for &dma_fence. Drivers shouldn't call
529 * this directly, but instead call dma_fence_put().
530 */
531void dma_fence_release(struct kref *kref)
532{
533	struct dma_fence *fence =
534		container_of(kref, struct dma_fence, refcount);
535
536	trace_dma_fence_destroy(fence);
537
538	if (WARN(!list_empty(&fence->cb_list) &&
539		 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
540		 "Fence %s:%s:%llx:%llx released with pending signals!\n",
541		 fence->ops->get_driver_name(fence),
542		 fence->ops->get_timeline_name(fence),
543		 fence->context, fence->seqno)) {
544		unsigned long flags;
545
546		/*
547		 * Failed to signal before release, likely a refcounting issue.
548		 *
549		 * This should never happen, but if it does make sure that we
550		 * don't leave chains dangling. We set the error flag first
551		 * so that the callbacks know this signal is due to an error.
552		 */
553		spin_lock_irqsave(fence->lock, flags);
554		fence->error = -EDEADLK;
555		dma_fence_signal_locked(fence);
556		spin_unlock_irqrestore(fence->lock, flags);
557	}
558
559	if (fence->ops->release)
560		fence->ops->release(fence);
561	else
562		dma_fence_free(fence);
563}
564EXPORT_SYMBOL(dma_fence_release);
565
566/**
567 * dma_fence_free - default release function for &dma_fence.
568 * @fence: fence to release
569 *
570 * This is the default implementation for &dma_fence_ops.release. It calls
571 * kfree_rcu() on @fence.
572 */
573void dma_fence_free(struct dma_fence *fence)
574{
575	kfree_rcu(fence, rcu);
576}
577EXPORT_SYMBOL(dma_fence_free);
578
579static bool __dma_fence_enable_signaling(struct dma_fence *fence)
580{
581	bool was_set;
582
583	lockdep_assert_held(fence->lock);
584
585	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
586				   &fence->flags);
587
588	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
589		return false;
590
591	if (!was_set && fence->ops->enable_signaling) {
592		trace_dma_fence_enable_signal(fence);
593
594		if (!fence->ops->enable_signaling(fence)) {
595			dma_fence_signal_locked(fence);
596			return false;
597		}
598	}
599
600	return true;
601}
602
603/**
604 * dma_fence_enable_sw_signaling - enable signaling on fence
605 * @fence: the fence to enable
606 *
607 * This will request for sw signaling to be enabled, to make the fence
608 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
609 * internally.
610 */
611void dma_fence_enable_sw_signaling(struct dma_fence *fence)
612{
613	unsigned long flags;
614
615	spin_lock_irqsave(fence->lock, flags);
616	__dma_fence_enable_signaling(fence);
617	spin_unlock_irqrestore(fence->lock, flags);
618}
619EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
620
621/**
622 * dma_fence_add_callback - add a callback to be called when the fence
623 * is signaled
624 * @fence: the fence to wait on
625 * @cb: the callback to register
626 * @func: the function to call
627 *
628 * Add a software callback to the fence. The caller should keep a reference to
629 * the fence.
630 *
631 * @cb will be initialized by dma_fence_add_callback(), no initialization
632 * by the caller is required. Any number of callbacks can be registered
633 * to a fence, but a callback can only be registered to one fence at a time.
634 *
635 * If fence is already signaled, this function will return -ENOENT (and
636 * *not* call the callback).
637 *
638 * Note that the callback can be called from an atomic context or irq context.
639 *
640 * Returns 0 in case of success, -ENOENT if the fence is already signaled
641 * and -EINVAL in case of error.
642 */
643int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
644			   dma_fence_func_t func)
645{
646	unsigned long flags;
647	int ret = 0;
648
649	if (WARN_ON(!fence || !func))
650		return -EINVAL;
651
652	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
653		INIT_LIST_HEAD(&cb->node);
654		return -ENOENT;
655	}
656
657	spin_lock_irqsave(fence->lock, flags);
658
659	if (__dma_fence_enable_signaling(fence)) {
660		cb->func = func;
661		list_add_tail(&cb->node, &fence->cb_list);
662	} else {
663		INIT_LIST_HEAD(&cb->node);
664		ret = -ENOENT;
665	}
666
667	spin_unlock_irqrestore(fence->lock, flags);
668
669	return ret;
670}
671EXPORT_SYMBOL(dma_fence_add_callback);
672
673/**
674 * dma_fence_get_status - returns the status upon completion
675 * @fence: the dma_fence to query
676 *
677 * This wraps dma_fence_get_status_locked() to return the error status
678 * condition on a signaled fence. See dma_fence_get_status_locked() for more
679 * details.
680 *
681 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
682 * been signaled without an error condition, or a negative error code
683 * if the fence has been completed in err.
684 */
685int dma_fence_get_status(struct dma_fence *fence)
686{
687	unsigned long flags;
688	int status;
689
690	spin_lock_irqsave(fence->lock, flags);
691	status = dma_fence_get_status_locked(fence);
692	spin_unlock_irqrestore(fence->lock, flags);
693
694	return status;
695}
696EXPORT_SYMBOL(dma_fence_get_status);
697
698/**
699 * dma_fence_remove_callback - remove a callback from the signaling list
700 * @fence: the fence to wait on
701 * @cb: the callback to remove
702 *
703 * Remove a previously queued callback from the fence. This function returns
704 * true if the callback is successfully removed, or false if the fence has
705 * already been signaled.
706 *
707 * *WARNING*:
708 * Cancelling a callback should only be done if you really know what you're
709 * doing, since deadlocks and race conditions could occur all too easily. For
710 * this reason, it should only ever be done on hardware lockup recovery,
711 * with a reference held to the fence.
712 *
713 * Behaviour is undefined if @cb has not been added to @fence using
714 * dma_fence_add_callback() beforehand.
715 */
716bool
717dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
718{
719	unsigned long flags;
720	bool ret;
721
722	spin_lock_irqsave(fence->lock, flags);
723
724	ret = !list_empty(&cb->node);
725	if (ret)
726		list_del_init(&cb->node);
727
728	spin_unlock_irqrestore(fence->lock, flags);
729
730	return ret;
731}
732EXPORT_SYMBOL(dma_fence_remove_callback);
733
734struct default_wait_cb {
735	struct dma_fence_cb base;
736	struct task_struct *task;
737};
738
739static void
740dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
741{
742	struct default_wait_cb *wait =
743		container_of(cb, struct default_wait_cb, base);
744
745	wake_up_state(wait->task, TASK_NORMAL);
746}
747
748/**
749 * dma_fence_default_wait - default sleep until the fence gets signaled
750 * or until timeout elapses
751 * @fence: the fence to wait on
752 * @intr: if true, do an interruptible wait
753 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
754 *
755 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
756 * remaining timeout in jiffies on success. If timeout is zero the value one is
757 * returned if the fence is already signaled for consistency with other
758 * functions taking a jiffies timeout.
759 */
760signed long
761dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
762{
763	struct default_wait_cb cb;
764	unsigned long flags;
765	signed long ret = timeout ? timeout : 1;
766
767	spin_lock_irqsave(fence->lock, flags);
768
769	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
770		goto out;
771
772	if (intr && signal_pending(current)) {
773		ret = -ERESTARTSYS;
774		goto out;
775	}
776
777	if (!timeout) {
778		ret = 0;
779		goto out;
780	}
781
782	cb.base.func = dma_fence_default_wait_cb;
783	cb.task = current;
784	list_add(&cb.base.node, &fence->cb_list);
785
786	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
787		if (intr)
788			__set_current_state(TASK_INTERRUPTIBLE);
789		else
790			__set_current_state(TASK_UNINTERRUPTIBLE);
791		spin_unlock_irqrestore(fence->lock, flags);
792
793		ret = schedule_timeout(ret);
794
795		spin_lock_irqsave(fence->lock, flags);
796		if (ret > 0 && intr && signal_pending(current))
797			ret = -ERESTARTSYS;
798	}
799
800	if (!list_empty(&cb.base.node))
801		list_del(&cb.base.node);
802	__set_current_state(TASK_RUNNING);
803
804out:
805	spin_unlock_irqrestore(fence->lock, flags);
806	return ret;
807}
808EXPORT_SYMBOL(dma_fence_default_wait);
809
810static bool
811dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
812			    uint32_t *idx)
813{
814	int i;
815
816	for (i = 0; i < count; ++i) {
817		struct dma_fence *fence = fences[i];
818		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
819			if (idx)
820				*idx = i;
821			return true;
822		}
823	}
824	return false;
825}
826
827/**
828 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
829 * or until timeout elapses
830 * @fences: array of fences to wait on
831 * @count: number of fences to wait on
832 * @intr: if true, do an interruptible wait
833 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
834 * @idx: used to store the first signaled fence index, meaningful only on
835 *	positive return
836 *
837 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
838 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
839 * on success.
840 *
841 * Synchronous waits for the first fence in the array to be signaled. The
842 * caller needs to hold a reference to all fences in the array, otherwise a
843 * fence might be freed before return, resulting in undefined behavior.
844 *
845 * See also dma_fence_wait() and dma_fence_wait_timeout().
846 */
847signed long
848dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
849			   bool intr, signed long timeout, uint32_t *idx)
850{
851	struct default_wait_cb *cb;
852	signed long ret = timeout;
853	unsigned i;
854
855	if (WARN_ON(!fences || !count || timeout < 0))
856		return -EINVAL;
857
858	if (timeout == 0) {
859		for (i = 0; i < count; ++i)
860			if (dma_fence_is_signaled(fences[i])) {
861				if (idx)
862					*idx = i;
863				return 1;
864			}
865
866		return 0;
867	}
868
869	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
870	if (cb == NULL) {
871		ret = -ENOMEM;
872		goto err_free_cb;
873	}
874
875	for (i = 0; i < count; ++i) {
876		struct dma_fence *fence = fences[i];
877
878		cb[i].task = current;
879		if (dma_fence_add_callback(fence, &cb[i].base,
880					   dma_fence_default_wait_cb)) {
881			/* This fence is already signaled */
882			if (idx)
883				*idx = i;
884			goto fence_rm_cb;
885		}
886	}
887
888	while (ret > 0) {
889		if (intr)
890			set_current_state(TASK_INTERRUPTIBLE);
891		else
892			set_current_state(TASK_UNINTERRUPTIBLE);
893
894		if (dma_fence_test_signaled_any(fences, count, idx))
895			break;
896
897		ret = schedule_timeout(ret);
898
899		if (ret > 0 && intr && signal_pending(current))
900			ret = -ERESTARTSYS;
901	}
902
903	__set_current_state(TASK_RUNNING);
904
905fence_rm_cb:
906	while (i-- > 0)
907		dma_fence_remove_callback(fences[i], &cb[i].base);
908
909err_free_cb:
910	kfree(cb);
911
912	return ret;
913}
914EXPORT_SYMBOL(dma_fence_wait_any_timeout);
915
916/**
917 * DOC: deadline hints
918 *
919 * In an ideal world, it would be possible to pipeline a workload sufficiently
920 * that a utilization based device frequency governor could arrive at a minimum
921 * frequency that meets the requirements of the use-case, in order to minimize
922 * power consumption.  But in the real world there are many workloads which
923 * defy this ideal.  For example, but not limited to:
924 *
925 * * Workloads that ping-pong between device and CPU, with alternating periods
926 *   of CPU waiting for device, and device waiting on CPU.  This can result in
927 *   devfreq and cpufreq seeing idle time in their respective domains and in
928 *   result reduce frequency.
929 *
930 * * Workloads that interact with a periodic time based deadline, such as double
931 *   buffered GPU rendering vs vblank sync'd page flipping.  In this scenario,
932 *   missing a vblank deadline results in an *increase* in idle time on the GPU
933 *   (since it has to wait an additional vblank period), sending a signal to
934 *   the GPU's devfreq to reduce frequency, when in fact the opposite is what is
935 *   needed.
936 *
937 * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
938 * (or indirectly via userspace facing ioctls like &sync_set_deadline).
939 * The deadline hint provides a way for the waiting driver, or userspace, to
940 * convey an appropriate sense of urgency to the signaling driver.
941 *
942 * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
943 * facing APIs).  The time could either be some point in the future (such as
944 * the vblank based deadline for page-flipping, or the start of a compositor's
945 * composition cycle), or the current time to indicate an immediate deadline
946 * hint (Ie. forward progress cannot be made until this fence is signaled).
947 *
948 * Multiple deadlines may be set on a given fence, even in parallel.  See the
949 * documentation for &dma_fence_ops.set_deadline.
950 *
951 * The deadline hint is just that, a hint.  The driver that created the fence
952 * may react by increasing frequency, making different scheduling choices, etc.
953 * Or doing nothing at all.
954 */
955
956/**
957 * dma_fence_set_deadline - set desired fence-wait deadline hint
958 * @fence:    the fence that is to be waited on
959 * @deadline: the time by which the waiter hopes for the fence to be
960 *            signaled
961 *
962 * Give the fence signaler a hint about an upcoming deadline, such as
963 * vblank, by which point the waiter would prefer the fence to be
964 * signaled by.  This is intended to give feedback to the fence signaler
965 * to aid in power management decisions, such as boosting GPU frequency
966 * if a periodic vblank deadline is approaching but the fence is not
967 * yet signaled..
968 */
969void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
970{
971	if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
972		fence->ops->set_deadline(fence, deadline);
973}
974EXPORT_SYMBOL(dma_fence_set_deadline);
975
976/**
977 * dma_fence_describe - Dump fence description into seq_file
978 * @fence: the fence to describe
979 * @seq: the seq_file to put the textual description into
980 *
981 * Dump a textual description of the fence and it's state into the seq_file.
982 */
983void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
984{
985	seq_printf(seq, "%s %s seq %llu %ssignalled\n",
986		   fence->ops->get_driver_name(fence),
987		   fence->ops->get_timeline_name(fence), fence->seqno,
988		   dma_fence_is_signaled(fence) ? "" : "un");
989}
990EXPORT_SYMBOL(dma_fence_describe);
991
992/**
993 * dma_fence_init - Initialize a custom fence.
994 * @fence: the fence to initialize
995 * @ops: the dma_fence_ops for operations on this fence
996 * @lock: the irqsafe spinlock to use for locking this fence
997 * @context: the execution context this fence is run on
998 * @seqno: a linear increasing sequence number for this context
999 *
1000 * Initializes an allocated fence, the caller doesn't have to keep its
1001 * refcount after committing with this fence, but it will need to hold a
1002 * refcount again if &dma_fence_ops.enable_signaling gets called.
1003 *
1004 * context and seqno are used for easy comparison between fences, allowing
1005 * to check which fence is later by simply using dma_fence_later().
1006 */
1007void
1008dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1009	       spinlock_t *lock, u64 context, u64 seqno)
1010{
1011	BUG_ON(!lock);
1012	BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
1013
1014	kref_init(&fence->refcount);
1015	fence->ops = ops;
1016	INIT_LIST_HEAD(&fence->cb_list);
1017	fence->lock = lock;
1018	fence->context = context;
1019	fence->seqno = seqno;
1020	fence->flags = 0UL;
1021	fence->error = 0;
1022
1023	trace_dma_fence_init(fence);
1024}
1025EXPORT_SYMBOL(dma_fence_init);
1026