sched_entity.c revision 1.2
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27
28#include <drm/drm_print.h>
29#include <drm/gpu_scheduler.h>
30
31#include "gpu_scheduler_trace.h"
32
33#define to_drm_sched_job(sched_job)		\
34		container_of((sched_job), struct drm_sched_job, queue_node)
35
36/**
37 * drm_sched_entity_init - Init a context entity used by scheduler when
38 * submit to HW ring.
39 *
40 * @entity: scheduler entity to init
41 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
43 *           entity can be submitted
44 * @num_sched_list: number of drm sched in sched_list
45 * @guilty: atomic_t set to 1 when a job on this queue
46 *          is found to be guilty causing a timeout
47 *
48 * Note: the sched_list should have at least one element to schedule
49 *       the entity
50 *
51 * Returns 0 on success or a negative error code on failure.
52 */
53int drm_sched_entity_init(struct drm_sched_entity *entity,
54			  enum drm_sched_priority priority,
55			  struct drm_gpu_scheduler **sched_list,
56			  unsigned int num_sched_list,
57			  atomic_t *guilty)
58{
59	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60		return -EINVAL;
61
62	memset(entity, 0, sizeof(struct drm_sched_entity));
63	INIT_LIST_HEAD(&entity->list);
64	entity->rq = NULL;
65	entity->guilty = guilty;
66	entity->num_sched_list = num_sched_list;
67	entity->priority = priority;
68	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69	entity->last_scheduled = NULL;
70
71	if(num_sched_list)
72		entity->rq = &sched_list[0]->sched_rq[entity->priority];
73
74	init_completion(&entity->entity_idle);
75
76	mtx_init(&entity->rq_lock, IPL_NONE);
77	spsc_queue_init(&entity->job_queue);
78
79	atomic_set(&entity->fence_seq, 0);
80	entity->fence_context = dma_fence_context_alloc(2);
81
82	return 0;
83}
84EXPORT_SYMBOL(drm_sched_entity_init);
85
86/**
87 * drm_sched_entity_modify_sched - Modify sched of an entity
88 * @entity: scheduler entity to init
89 * @sched_list: the list of new drm scheds which will replace
90 *		 existing entity->sched_list
91 * @num_sched_list: number of drm sched in sched_list
92 */
93void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
94				    struct drm_gpu_scheduler **sched_list,
95				    unsigned int num_sched_list)
96{
97	WARN_ON(!num_sched_list || !sched_list);
98
99	entity->sched_list = sched_list;
100	entity->num_sched_list = num_sched_list;
101}
102EXPORT_SYMBOL(drm_sched_entity_modify_sched);
103
104/**
105 * drm_sched_entity_is_idle - Check if entity is idle
106 *
107 * @entity: scheduler entity
108 *
109 * Returns true if the entity does not have any unscheduled jobs.
110 */
111static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
112{
113	rmb(); /* for list_empty to work without lock */
114
115	if (list_empty(&entity->list) ||
116	    spsc_queue_count(&entity->job_queue) == 0)
117		return true;
118
119	return false;
120}
121
122/**
123 * drm_sched_entity_is_ready - Check if entity is ready
124 *
125 * @entity: scheduler entity
126 *
127 * Return true if entity could provide a job.
128 */
129bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
130{
131	if (spsc_queue_peek(&entity->job_queue) == NULL)
132		return false;
133
134	if (READ_ONCE(entity->dependency))
135		return false;
136
137	return true;
138}
139
140/**
141 * drm_sched_entity_flush - Flush a context entity
142 *
143 * @entity: scheduler entity
144 * @timeout: time to wait in for Q to become empty in jiffies.
145 *
146 * Splitting drm_sched_entity_fini() into two functions, The first one does the
147 * waiting, removes the entity from the runqueue and returns an error when the
148 * process was killed.
149 *
150 * Returns the remaining time in jiffies left from the input timeout
151 */
152long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
153{
154	struct drm_gpu_scheduler *sched;
155#ifdef __linux__
156	struct task_struct *last_user;
157#else
158	struct process *last_user, *curpr;
159#endif
160	long ret = timeout;
161
162	if (!entity->rq)
163		return 0;
164
165	sched = entity->rq->sched;
166	/**
167	 * The client will not queue more IBs during this fini, consume existing
168	 * queued IBs or discard them on SIGKILL
169	 */
170#ifdef __linux__
171	if (current->flags & PF_EXITING) {
172#else
173	curpr = curproc->p_p;
174	if (curpr->ps_flags & PS_EXITING) {
175#endif
176		if (timeout)
177			ret = wait_event_timeout(
178					sched->job_scheduled,
179					drm_sched_entity_is_idle(entity),
180					timeout);
181	} else {
182		wait_event_killable(sched->job_scheduled,
183				    drm_sched_entity_is_idle(entity));
184	}
185
186	/* For killed process disable any more IBs enqueue right now */
187#ifdef __linux__
188	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
189	if ((!last_user || last_user == current->group_leader) &&
190	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
191#else
192	last_user = cmpxchg(&entity->last_user, curpr, NULL);
193	if ((!last_user || last_user == curproc->p_p) &&
194	    (curpr->ps_flags & PS_EXITING) &&
195	    (curpr->ps_xsig == SIGKILL)) {
196#endif
197		spin_lock(&entity->rq_lock);
198		entity->stopped = true;
199		drm_sched_rq_remove_entity(entity->rq, entity);
200		spin_unlock(&entity->rq_lock);
201	}
202
203	return ret;
204}
205EXPORT_SYMBOL(drm_sched_entity_flush);
206
207/**
208 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
209 *
210 * @f: signaled fence
211 * @cb: our callback structure
212 *
213 * Signal the scheduler finished fence when the entity in question is killed.
214 */
215static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
216					  struct dma_fence_cb *cb)
217{
218	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
219						 finish_cb);
220
221	drm_sched_fence_finished(job->s_fence);
222	WARN_ON(job->s_fence->parent);
223	job->sched->ops->free_job(job);
224}
225
226/**
227 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
228 *
229 * @entity: entity which is cleaned up
230 *
231 * Makes sure that all remaining jobs in an entity are killed before it is
232 * destroyed.
233 */
234static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
235{
236	struct drm_sched_job *job;
237	int r;
238
239	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
240		struct drm_sched_fence *s_fence = job->s_fence;
241
242		drm_sched_fence_scheduled(s_fence);
243		dma_fence_set_error(&s_fence->finished, -ESRCH);
244
245		/*
246		 * When pipe is hanged by older entity, new entity might
247		 * not even have chance to submit it's first job to HW
248		 * and so entity->last_scheduled will remain NULL
249		 */
250		if (!entity->last_scheduled) {
251			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
252			continue;
253		}
254
255		r = dma_fence_add_callback(entity->last_scheduled,
256					   &job->finish_cb,
257					   drm_sched_entity_kill_jobs_cb);
258		if (r == -ENOENT)
259			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
260		else if (r)
261			DRM_ERROR("fence add callback failed (%d)\n", r);
262	}
263}
264
265/**
266 * drm_sched_entity_cleanup - Destroy a context entity
267 *
268 * @entity: scheduler entity
269 *
270 * This should be called after @drm_sched_entity_do_release. It goes over the
271 * entity and signals all jobs with an error code if the process was killed.
272 *
273 */
274void drm_sched_entity_fini(struct drm_sched_entity *entity)
275{
276	struct drm_gpu_scheduler *sched = NULL;
277
278	if (entity->rq) {
279		sched = entity->rq->sched;
280		drm_sched_rq_remove_entity(entity->rq, entity);
281	}
282
283	/* Consumption of existing IBs wasn't completed. Forcefully
284	 * remove them here.
285	 */
286	if (spsc_queue_count(&entity->job_queue)) {
287		if (sched) {
288			/*
289			 * Wait for thread to idle to make sure it isn't processing
290			 * this entity.
291			 */
292			wait_for_completion(&entity->entity_idle);
293
294		}
295		if (entity->dependency) {
296			dma_fence_remove_callback(entity->dependency,
297						  &entity->cb);
298			dma_fence_put(entity->dependency);
299			entity->dependency = NULL;
300		}
301
302		drm_sched_entity_kill_jobs(entity);
303	}
304
305	dma_fence_put(entity->last_scheduled);
306	entity->last_scheduled = NULL;
307}
308EXPORT_SYMBOL(drm_sched_entity_fini);
309
310/**
311 * drm_sched_entity_fini - Destroy a context entity
312 *
313 * @entity: scheduler entity
314 *
315 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
316 */
317void drm_sched_entity_destroy(struct drm_sched_entity *entity)
318{
319	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
320	drm_sched_entity_fini(entity);
321}
322EXPORT_SYMBOL(drm_sched_entity_destroy);
323
324/**
325 * drm_sched_entity_clear_dep - callback to clear the entities dependency
326 */
327static void drm_sched_entity_clear_dep(struct dma_fence *f,
328				       struct dma_fence_cb *cb)
329{
330	struct drm_sched_entity *entity =
331		container_of(cb, struct drm_sched_entity, cb);
332
333	entity->dependency = NULL;
334	dma_fence_put(f);
335}
336
337/**
338 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
339 * wake up scheduler
340 */
341static void drm_sched_entity_wakeup(struct dma_fence *f,
342				    struct dma_fence_cb *cb)
343{
344	struct drm_sched_entity *entity =
345		container_of(cb, struct drm_sched_entity, cb);
346
347	drm_sched_entity_clear_dep(f, cb);
348	drm_sched_wakeup(entity->rq->sched);
349}
350
351/**
352 * drm_sched_entity_set_priority - Sets priority of the entity
353 *
354 * @entity: scheduler entity
355 * @priority: scheduler priority
356 *
357 * Update the priority of runqueus used for the entity.
358 */
359void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
360				   enum drm_sched_priority priority)
361{
362	spin_lock(&entity->rq_lock);
363	entity->priority = priority;
364	spin_unlock(&entity->rq_lock);
365}
366EXPORT_SYMBOL(drm_sched_entity_set_priority);
367
368/**
369 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
370 *
371 * @entity: entity with dependency
372 *
373 * Add a callback to the current dependency of the entity to wake up the
374 * scheduler when the entity becomes available.
375 */
376static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
377{
378	struct drm_gpu_scheduler *sched = entity->rq->sched;
379	struct dma_fence *fence = entity->dependency;
380	struct drm_sched_fence *s_fence;
381
382	if (fence->context == entity->fence_context ||
383	    fence->context == entity->fence_context + 1) {
384		/*
385		 * Fence is a scheduled/finished fence from a job
386		 * which belongs to the same entity, we can ignore
387		 * fences from ourself
388		 */
389		dma_fence_put(entity->dependency);
390		return false;
391	}
392
393	s_fence = to_drm_sched_fence(fence);
394	if (s_fence && s_fence->sched == sched) {
395
396		/*
397		 * Fence is from the same scheduler, only need to wait for
398		 * it to be scheduled
399		 */
400		fence = dma_fence_get(&s_fence->scheduled);
401		dma_fence_put(entity->dependency);
402		entity->dependency = fence;
403		if (!dma_fence_add_callback(fence, &entity->cb,
404					    drm_sched_entity_clear_dep))
405			return true;
406
407		/* Ignore it when it is already scheduled */
408		dma_fence_put(fence);
409		return false;
410	}
411
412	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
413				    drm_sched_entity_wakeup))
414		return true;
415
416	dma_fence_put(entity->dependency);
417	return false;
418}
419
420/**
421 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
422 *
423 * @entity: entity to get the job from
424 *
425 * Process all dependencies and try to get one job from the entities queue.
426 */
427struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
428{
429	struct drm_gpu_scheduler *sched = entity->rq->sched;
430	struct drm_sched_job *sched_job;
431
432	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
433	if (!sched_job)
434		return NULL;
435
436	while ((entity->dependency =
437			sched->ops->dependency(sched_job, entity))) {
438		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
439
440		if (drm_sched_entity_add_dependency_cb(entity))
441			return NULL;
442	}
443
444	/* skip jobs from entity that marked guilty */
445	if (entity->guilty && atomic_read(entity->guilty))
446		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
447
448	dma_fence_put(entity->last_scheduled);
449	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
450
451	spsc_queue_pop(&entity->job_queue);
452	return sched_job;
453}
454
455/**
456 * drm_sched_entity_select_rq - select a new rq for the entity
457 *
458 * @entity: scheduler entity
459 *
460 * Check all prerequisites and select a new rq for the entity for load
461 * balancing.
462 */
463void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
464{
465	struct dma_fence *fence;
466	struct drm_gpu_scheduler *sched;
467	struct drm_sched_rq *rq;
468
469	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
470		return;
471
472	fence = READ_ONCE(entity->last_scheduled);
473	if (fence && !dma_fence_is_signaled(fence))
474		return;
475
476	spin_lock(&entity->rq_lock);
477	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
478	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
479	if (rq != entity->rq) {
480		drm_sched_rq_remove_entity(entity->rq, entity);
481		entity->rq = rq;
482	}
483
484	spin_unlock(&entity->rq_lock);
485}
486
487/**
488 * drm_sched_entity_push_job - Submit a job to the entity's job queue
489 *
490 * @sched_job: job to submit
491 * @entity: scheduler entity
492 *
493 * Note: To guarantee that the order of insertion to queue matches
494 * the job's fence sequence number this function should be
495 * called with drm_sched_job_init under common lock.
496 *
497 * Returns 0 for success, negative error code otherwise.
498 */
499void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
500			       struct drm_sched_entity *entity)
501{
502	bool first;
503
504	trace_drm_sched_job(sched_job, entity);
505	atomic_inc(&entity->rq->sched->num_jobs);
506#ifdef __linux__
507	WRITE_ONCE(entity->last_user, current->group_leader);
508#else
509	WRITE_ONCE(entity->last_user, curproc->p_p);
510#endif
511	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
512
513	/* first job wakes up scheduler */
514	if (first) {
515		/* Add the entity to the run queue */
516		spin_lock(&entity->rq_lock);
517		if (entity->stopped) {
518			spin_unlock(&entity->rq_lock);
519
520			DRM_ERROR("Trying to push to a killed entity\n");
521			return;
522		}
523		drm_sched_rq_add_entity(entity->rq, entity);
524		spin_unlock(&entity->rq_lock);
525		drm_sched_wakeup(entity->rq->sched);
526	}
527}
528EXPORT_SYMBOL(drm_sched_entity_push_job);
529