1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright �� 2018 Intel Corporation
5 */
6
7#include <linux/mutex.h>
8
9#include "i915_drv.h"
10#include "i915_request.h"
11#include "i915_scheduler.h"
12
13static struct pool slab_dependencies;
14static struct pool slab_priorities;
15
16static DEFINE_SPINLOCK(schedule_lock);
17
18static const struct i915_request *
19node_to_request(const struct i915_sched_node *node)
20{
21	return container_of(node, const struct i915_request, sched);
22}
23
24static inline bool node_started(const struct i915_sched_node *node)
25{
26	return i915_request_started(node_to_request(node));
27}
28
29static inline bool node_signaled(const struct i915_sched_node *node)
30{
31	return i915_request_completed(node_to_request(node));
32}
33
34static inline struct i915_priolist *to_priolist(struct rb_node *rb)
35{
36	return rb_entry(rb, struct i915_priolist, node);
37}
38
39static void assert_priolists(struct i915_sched_engine * const sched_engine)
40{
41	struct rb_node *rb;
42	long last_prio;
43
44	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
45		return;
46
47	GEM_BUG_ON(rb_first_cached(&sched_engine->queue) !=
48		   rb_first(&sched_engine->queue.rb_root));
49
50	last_prio = INT_MAX;
51	for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
52		const struct i915_priolist *p = to_priolist(rb);
53
54		GEM_BUG_ON(p->priority > last_prio);
55		last_prio = p->priority;
56	}
57}
58
59struct list_head *
60i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
61{
62	struct i915_priolist *p;
63	struct rb_node **parent, *rb;
64	bool first = true;
65
66	lockdep_assert_held(&sched_engine->lock);
67	assert_priolists(sched_engine);
68
69	if (unlikely(sched_engine->no_priolist))
70		prio = I915_PRIORITY_NORMAL;
71
72find_priolist:
73	/* most positive priority is scheduled first, equal priorities fifo */
74	rb = NULL;
75	parent = &sched_engine->queue.rb_root.rb_node;
76	while (*parent) {
77		rb = *parent;
78		p = to_priolist(rb);
79		if (prio > p->priority) {
80			parent = &rb->rb_left;
81		} else if (prio < p->priority) {
82			parent = &rb->rb_right;
83			first = false;
84		} else {
85			return &p->requests;
86		}
87	}
88
89	if (prio == I915_PRIORITY_NORMAL) {
90		p = &sched_engine->default_priolist;
91	} else {
92#ifdef __linux__
93		p = kmem_cache_alloc(slab_priorities, GFP_ATOMIC);
94#else
95		p = pool_get(&slab_priorities, PR_NOWAIT);
96#endif
97		/* Convert an allocation failure to a priority bump */
98		if (unlikely(!p)) {
99			prio = I915_PRIORITY_NORMAL; /* recurses just once */
100
101			/* To maintain ordering with all rendering, after an
102			 * allocation failure we have to disable all scheduling.
103			 * Requests will then be executed in fifo, and schedule
104			 * will ensure that dependencies are emitted in fifo.
105			 * There will be still some reordering with existing
106			 * requests, so if userspace lied about their
107			 * dependencies that reordering may be visible.
108			 */
109			sched_engine->no_priolist = true;
110			goto find_priolist;
111		}
112	}
113
114	p->priority = prio;
115	INIT_LIST_HEAD(&p->requests);
116
117	rb_link_node(&p->node, rb, parent);
118	rb_insert_color_cached(&p->node, &sched_engine->queue, first);
119
120	return &p->requests;
121}
122
123void __i915_priolist_free(struct i915_priolist *p)
124{
125#ifdef __linux__
126	kmem_cache_free(slab_priorities, p);
127#else
128	pool_put(&slab_priorities, p);
129#endif
130}
131
132struct sched_cache {
133	struct list_head *priolist;
134};
135
136static struct i915_sched_engine *
137lock_sched_engine(struct i915_sched_node *node,
138		  struct i915_sched_engine *locked,
139		  struct sched_cache *cache)
140{
141	const struct i915_request *rq = node_to_request(node);
142	struct i915_sched_engine *sched_engine;
143
144	GEM_BUG_ON(!locked);
145
146	/*
147	 * Virtual engines complicate acquiring the engine timeline lock,
148	 * as their rq->engine pointer is not stable until under that
149	 * engine lock. The simple ploy we use is to take the lock then
150	 * check that the rq still belongs to the newly locked engine.
151	 */
152	while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) {
153		spin_unlock(&locked->lock);
154		memset(cache, 0, sizeof(*cache));
155		spin_lock(&sched_engine->lock);
156		locked = sched_engine;
157	}
158
159	GEM_BUG_ON(locked != sched_engine);
160	return locked;
161}
162
163static void __i915_schedule(struct i915_sched_node *node,
164			    const struct i915_sched_attr *attr)
165{
166	const int prio = max(attr->priority, node->attr.priority);
167	struct i915_sched_engine *sched_engine;
168	struct i915_dependency *dep, *p;
169	struct i915_dependency stack;
170	struct sched_cache cache;
171	DRM_LIST_HEAD(dfs);
172
173	/* Needed in order to use the temporary link inside i915_dependency */
174	lockdep_assert_held(&schedule_lock);
175	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
176
177	if (node_signaled(node))
178		return;
179
180	stack.signaler = node;
181	list_add(&stack.dfs_link, &dfs);
182
183	/*
184	 * Recursively bump all dependent priorities to match the new request.
185	 *
186	 * A naive approach would be to use recursion:
187	 * static void update_priorities(struct i915_sched_node *node, prio) {
188	 *	list_for_each_entry(dep, &node->signalers_list, signal_link)
189	 *		update_priorities(dep->signal, prio)
190	 *	queue_request(node);
191	 * }
192	 * but that may have unlimited recursion depth and so runs a very
193	 * real risk of overunning the kernel stack. Instead, we build
194	 * a flat list of all dependencies starting with the current request.
195	 * As we walk the list of dependencies, we add all of its dependencies
196	 * to the end of the list (this may include an already visited
197	 * request) and continue to walk onwards onto the new dependencies. The
198	 * end result is a topological list of requests in reverse order, the
199	 * last element in the list is the request we must execute first.
200	 */
201	list_for_each_entry(dep, &dfs, dfs_link) {
202		struct i915_sched_node *node = dep->signaler;
203
204		/* If we are already flying, we know we have no signalers */
205		if (node_started(node))
206			continue;
207
208		/*
209		 * Within an engine, there can be no cycle, but we may
210		 * refer to the same dependency chain multiple times
211		 * (redundant dependencies are not eliminated) and across
212		 * engines.
213		 */
214		list_for_each_entry(p, &node->signalers_list, signal_link) {
215			GEM_BUG_ON(p == dep); /* no cycles! */
216
217			if (node_signaled(p->signaler))
218				continue;
219
220			if (prio > READ_ONCE(p->signaler->attr.priority))
221				list_move_tail(&p->dfs_link, &dfs);
222		}
223	}
224
225	/*
226	 * If we didn't need to bump any existing priorities, and we haven't
227	 * yet submitted this request (i.e. there is no potential race with
228	 * execlists_submit_request()), we can set our own priority and skip
229	 * acquiring the engine locks.
230	 */
231	if (node->attr.priority == I915_PRIORITY_INVALID) {
232		GEM_BUG_ON(!list_empty(&node->link));
233		node->attr = *attr;
234
235		if (stack.dfs_link.next == stack.dfs_link.prev)
236			return;
237
238		__list_del_entry(&stack.dfs_link);
239	}
240
241	memset(&cache, 0, sizeof(cache));
242	sched_engine = node_to_request(node)->engine->sched_engine;
243	spin_lock(&sched_engine->lock);
244
245	/* Fifo and depth-first replacement ensure our deps execute before us */
246	sched_engine = lock_sched_engine(node, sched_engine, &cache);
247	list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
248		struct i915_request *from = container_of(dep->signaler,
249							 struct i915_request,
250							 sched);
251		INIT_LIST_HEAD(&dep->dfs_link);
252
253		node = dep->signaler;
254		sched_engine = lock_sched_engine(node, sched_engine, &cache);
255		lockdep_assert_held(&sched_engine->lock);
256
257		/* Recheck after acquiring the engine->timeline.lock */
258		if (prio <= node->attr.priority || node_signaled(node))
259			continue;
260
261		GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=
262			   sched_engine);
263
264		/* Must be called before changing the nodes priority */
265		if (sched_engine->bump_inflight_request_prio)
266			sched_engine->bump_inflight_request_prio(from, prio);
267
268		WRITE_ONCE(node->attr.priority, prio);
269
270		/*
271		 * Once the request is ready, it will be placed into the
272		 * priority lists and then onto the HW runlist. Before the
273		 * request is ready, it does not contribute to our preemption
274		 * decisions and we can safely ignore it, as it will, and
275		 * any preemption required, be dealt with upon submission.
276		 * See engine->submit_request()
277		 */
278		if (list_empty(&node->link))
279			continue;
280
281		if (i915_request_in_priority_queue(node_to_request(node))) {
282			if (!cache.priolist)
283				cache.priolist =
284					i915_sched_lookup_priolist(sched_engine,
285								   prio);
286			list_move_tail(&node->link, cache.priolist);
287		}
288
289		/* Defer (tasklet) submission until after all of our updates. */
290		if (sched_engine->kick_backend)
291			sched_engine->kick_backend(node_to_request(node), prio);
292	}
293
294	spin_unlock(&sched_engine->lock);
295}
296
297void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
298{
299	spin_lock_irq(&schedule_lock);
300	__i915_schedule(&rq->sched, attr);
301	spin_unlock_irq(&schedule_lock);
302}
303
304void i915_sched_node_init(struct i915_sched_node *node)
305{
306	INIT_LIST_HEAD(&node->signalers_list);
307	INIT_LIST_HEAD(&node->waiters_list);
308	INIT_LIST_HEAD(&node->link);
309
310	i915_sched_node_reinit(node);
311}
312
313void i915_sched_node_reinit(struct i915_sched_node *node)
314{
315	node->attr.priority = I915_PRIORITY_INVALID;
316	node->semaphores = 0;
317	node->flags = 0;
318
319	GEM_BUG_ON(!list_empty(&node->signalers_list));
320	GEM_BUG_ON(!list_empty(&node->waiters_list));
321	GEM_BUG_ON(!list_empty(&node->link));
322}
323
324static struct i915_dependency *
325i915_dependency_alloc(void)
326{
327#ifdef __linux__
328	return kmem_cache_alloc(slab_dependencies, GFP_KERNEL);
329#else
330	return pool_get(&slab_dependencies, PR_WAITOK);
331#endif
332}
333
334static void
335i915_dependency_free(struct i915_dependency *dep)
336{
337#ifdef __linux__
338	kmem_cache_free(slab_dependencies, dep);
339#else
340	pool_put(&slab_dependencies, dep);
341#endif
342}
343
344bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
345				      struct i915_sched_node *signal,
346				      struct i915_dependency *dep,
347				      unsigned long flags)
348{
349	bool ret = false;
350
351	spin_lock_irq(&schedule_lock);
352
353	if (!node_signaled(signal)) {
354		INIT_LIST_HEAD(&dep->dfs_link);
355		dep->signaler = signal;
356		dep->waiter = node;
357		dep->flags = flags;
358
359		/* All set, now publish. Beware the lockless walkers. */
360		list_add_rcu(&dep->signal_link, &node->signalers_list);
361		list_add_rcu(&dep->wait_link, &signal->waiters_list);
362
363		/* Propagate the chains */
364		node->flags |= signal->flags;
365		ret = true;
366	}
367
368	spin_unlock_irq(&schedule_lock);
369
370	return ret;
371}
372
373int i915_sched_node_add_dependency(struct i915_sched_node *node,
374				   struct i915_sched_node *signal,
375				   unsigned long flags)
376{
377	struct i915_dependency *dep;
378
379	dep = i915_dependency_alloc();
380	if (!dep)
381		return -ENOMEM;
382
383	if (!__i915_sched_node_add_dependency(node, signal, dep,
384					      flags | I915_DEPENDENCY_ALLOC))
385		i915_dependency_free(dep);
386
387	return 0;
388}
389
390void i915_sched_node_fini(struct i915_sched_node *node)
391{
392	struct i915_dependency *dep, *tmp;
393
394	spin_lock_irq(&schedule_lock);
395
396	/*
397	 * Everyone we depended upon (the fences we wait to be signaled)
398	 * should retire before us and remove themselves from our list.
399	 * However, retirement is run independently on each timeline and
400	 * so we may be called out-of-order.
401	 */
402	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
403		GEM_BUG_ON(!list_empty(&dep->dfs_link));
404
405		list_del_rcu(&dep->wait_link);
406		if (dep->flags & I915_DEPENDENCY_ALLOC)
407			i915_dependency_free(dep);
408	}
409	INIT_LIST_HEAD(&node->signalers_list);
410
411	/* Remove ourselves from everyone who depends upon us */
412	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
413		GEM_BUG_ON(dep->signaler != node);
414		GEM_BUG_ON(!list_empty(&dep->dfs_link));
415
416		list_del_rcu(&dep->signal_link);
417		if (dep->flags & I915_DEPENDENCY_ALLOC)
418			i915_dependency_free(dep);
419	}
420	INIT_LIST_HEAD(&node->waiters_list);
421
422	spin_unlock_irq(&schedule_lock);
423}
424
425void i915_request_show_with_schedule(struct drm_printer *m,
426				     const struct i915_request *rq,
427				     const char *prefix,
428				     int indent)
429{
430	struct i915_dependency *dep;
431
432	i915_request_show(m, rq, prefix, indent);
433	if (i915_request_completed(rq))
434		return;
435
436	rcu_read_lock();
437	for_each_signaler(dep, rq) {
438		const struct i915_request *signaler =
439			node_to_request(dep->signaler);
440
441		/* Dependencies along the same timeline are expected. */
442		if (signaler->timeline == rq->timeline)
443			continue;
444
445		if (__i915_request_is_complete(signaler))
446			continue;
447
448		i915_request_show(m, signaler, prefix, indent + 2);
449	}
450	rcu_read_unlock();
451}
452
453static void default_destroy(struct kref *kref)
454{
455	struct i915_sched_engine *sched_engine =
456		container_of(kref, typeof(*sched_engine), ref);
457
458	tasklet_kill(&sched_engine->tasklet); /* flush the callback */
459	kfree(sched_engine);
460}
461
462static bool default_disabled(struct i915_sched_engine *sched_engine)
463{
464	return false;
465}
466
467struct i915_sched_engine *
468i915_sched_engine_create(unsigned int subclass)
469{
470	struct i915_sched_engine *sched_engine;
471
472	sched_engine = kzalloc(sizeof(*sched_engine), GFP_KERNEL);
473	if (!sched_engine)
474		return NULL;
475
476	kref_init(&sched_engine->ref);
477
478	sched_engine->queue = RB_ROOT_CACHED;
479	sched_engine->queue_priority_hint = INT_MIN;
480	sched_engine->destroy = default_destroy;
481	sched_engine->disabled = default_disabled;
482
483	INIT_LIST_HEAD(&sched_engine->requests);
484	INIT_LIST_HEAD(&sched_engine->hold);
485
486	mtx_init(&sched_engine->lock, IPL_TTY);
487	lockdep_set_subclass(&sched_engine->lock, subclass);
488
489	/*
490	 * Due to an interesting quirk in lockdep's internal debug tracking,
491	 * after setting a subclass we must ensure the lock is used. Otherwise,
492	 * nr_unused_locks is incremented once too often.
493	 */
494#ifdef CONFIG_DEBUG_LOCK_ALLOC
495	local_irq_disable();
496	lock_map_acquire(&sched_engine->lock.dep_map);
497	lock_map_release(&sched_engine->lock.dep_map);
498	local_irq_enable();
499#endif
500
501	return sched_engine;
502}
503
504void i915_scheduler_module_exit(void)
505{
506#ifdef __linux__
507	kmem_cache_destroy(slab_dependencies);
508	kmem_cache_destroy(slab_priorities);
509#else
510	pool_destroy(&slab_dependencies);
511	pool_destroy(&slab_priorities);
512#endif
513}
514
515int __init i915_scheduler_module_init(void)
516{
517#ifdef __linux__
518	slab_dependencies = KMEM_CACHE(i915_dependency,
519					      SLAB_HWCACHE_ALIGN |
520					      SLAB_TYPESAFE_BY_RCU);
521	if (!slab_dependencies)
522		return -ENOMEM;
523
524	slab_priorities = KMEM_CACHE(i915_priolist, 0);
525	if (!slab_priorities)
526		goto err_priorities;
527
528	return 0;
529
530err_priorities:
531	kmem_cache_destroy(slab_priorities);
532	return -ENOMEM;
533#else
534	pool_init(&slab_dependencies, sizeof(struct i915_dependency),
535	    CACHELINESIZE, IPL_TTY, 0, "gsdep", NULL);
536	pool_init(&slab_priorities, sizeof(struct i915_priolist),
537	    CACHELINESIZE, IPL_TTY, 0, "gspri", NULL);
538
539	return 0;
540#endif
541}
542