1/*	$NetBSD: gpu_scheduler.h,v 1.4 2021/12/19 12:23:16 riastradh Exp $	*/
2
3/*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#ifndef _DRM_GPU_SCHEDULER_H_
27#define _DRM_GPU_SCHEDULER_H_
28
29#include <drm/spsc_queue.h>
30#include <drm/drm_wait_netbsd.h>
31#include <linux/dma-fence.h>
32#include <linux/completion.h>
33#include <linux/workqueue.h>
34
35#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
36
37struct drm_gpu_scheduler;
38struct drm_sched_rq;
39
40enum drm_sched_priority {
41	DRM_SCHED_PRIORITY_MIN,
42	DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
43	DRM_SCHED_PRIORITY_NORMAL,
44	DRM_SCHED_PRIORITY_HIGH_SW,
45	DRM_SCHED_PRIORITY_HIGH_HW,
46	DRM_SCHED_PRIORITY_KERNEL,
47	DRM_SCHED_PRIORITY_MAX,
48	DRM_SCHED_PRIORITY_INVALID = -1,
49	DRM_SCHED_PRIORITY_UNSET = -2
50};
51
52/**
53 * struct drm_sched_entity - A wrapper around a job queue (typically
54 * attached to the DRM file_priv).
55 *
56 * @list: used to append this struct to the list of entities in the
57 *        runqueue.
58 * @rq: runqueue on which this entity is currently scheduled.
59 * @sched_list: A list of schedulers (drm_gpu_schedulers).
60 *              Jobs from this entity can be scheduled on any scheduler
61 *              on this list.
62 * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
63 * @rq_lock: lock to modify the runqueue to which this entity belongs.
64 * @job_queue: the list of jobs of this entity.
65 * @fence_seq: a linearly increasing seqno incremented with each
66 *             new &drm_sched_fence which is part of the entity.
67 * @fence_context: a unique context for all the fences which belong
68 *                 to this entity.
69 *                 The &drm_sched_fence.scheduled uses the
70 *                 fence_context but &drm_sched_fence.finished uses
71 *                 fence_context + 1.
72 * @dependency: the dependency fence of the job which is on the top
73 *              of the job queue.
74 * @cb: callback for the dependency fence above.
75 * @guilty: points to ctx's guilty.
76 * @fini_status: contains the exit status in case the process was signalled.
77 * @last_scheduled: points to the finished fence of the last scheduled job.
78 * @last_user: last group leader pushing a job into the entity.
79 * @stopped: Marks the enity as removed from rq and destined for termination.
80 * @entity_idle: Signals when enityt is not in use
81 *
82 * Entities will emit jobs in order to their corresponding hardware
83 * ring, and the scheduler will alternate between entities based on
84 * scheduling policy.
85 */
86struct drm_sched_entity {
87	struct list_head		list;
88	struct drm_sched_rq		*rq;
89	struct drm_gpu_scheduler        **sched_list;
90	unsigned int                    num_sched_list;
91	enum drm_sched_priority         priority;
92	spinlock_t			rq_lock;
93
94	struct spsc_queue		job_queue;
95
96	atomic_t			fence_seq;
97	uint64_t			fence_context;
98
99	struct dma_fence		*dependency;
100	struct dma_fence_cb		cb;
101	atomic_t			*guilty;
102	struct dma_fence                *last_scheduled;
103#ifdef __NetBSD__
104	struct proc			*last_user;
105#else
106	struct task_struct		*last_user;
107#endif
108	bool 				stopped;
109	struct completion		entity_idle;
110};
111
112/**
113 * struct drm_sched_rq - queue of entities to be scheduled.
114 *
115 * @lock: to modify the entities list.
116 * @sched: the scheduler to which this rq belongs to.
117 * @entities: list of the entities to be scheduled.
118 * @current_entity: the entity which is to be scheduled.
119 *
120 * Run queue is a set of entities scheduling command submissions for
121 * one specific ring. It implements the scheduling policy that selects
122 * the next entity to emit commands from.
123 */
124struct drm_sched_rq {
125	spinlock_t			lock;
126	struct drm_gpu_scheduler	*sched;
127	struct list_head		entities;
128	struct drm_sched_entity		*current_entity;
129};
130
131/**
132 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
133 */
134struct drm_sched_fence {
135        /**
136         * @scheduled: this fence is what will be signaled by the scheduler
137         * when the job is scheduled.
138         */
139	struct dma_fence		scheduled;
140
141        /**
142         * @finished: this fence is what will be signaled by the scheduler
143         * when the job is completed.
144         *
145         * When setting up an out fence for the job, you should use
146         * this, since it's available immediately upon
147         * drm_sched_job_init(), and the fence returned by the driver
148         * from run_job() won't be created until the dependencies have
149         * resolved.
150         */
151	struct dma_fence		finished;
152
153        /**
154         * @parent: the fence returned by &drm_sched_backend_ops.run_job
155         * when scheduling the job on hardware. We signal the
156         * &drm_sched_fence.finished fence once parent is signalled.
157         */
158	struct dma_fence		*parent;
159        /**
160         * @sched: the scheduler instance to which the job having this struct
161         * belongs to.
162         */
163	struct drm_gpu_scheduler	*sched;
164        /**
165         * @lock: the lock used by the scheduled and the finished fences.
166         */
167	spinlock_t			lock;
168        /**
169         * @owner: job owner for debugging
170         */
171	void				*owner;
172};
173
174struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
175
176/**
177 * struct drm_sched_job - A job to be run by an entity.
178 *
179 * @queue_node: used to append this struct to the queue of jobs in an entity.
180 * @sched: the scheduler instance on which this job is scheduled.
181 * @s_fence: contains the fences for the scheduling of job.
182 * @finish_cb: the callback for the finished fence.
183 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
184 * @id: a unique id assigned to each job scheduled on the scheduler.
185 * @karma: increment on every hang caused by this job. If this exceeds the hang
186 *         limit of the scheduler then the job is marked guilty and will not
187 *         be scheduled further.
188 * @s_priority: the priority of the job.
189 * @entity: the entity to which this job belongs.
190 * @cb: the callback for the parent fence in s_fence.
191 *
192 * A job is created by the driver using drm_sched_job_init(), and
193 * should call drm_sched_entity_push_job() once it wants the scheduler
194 * to schedule the job.
195 */
196struct drm_sched_job {
197	struct spsc_node		queue_node;
198	struct drm_gpu_scheduler	*sched;
199	struct drm_sched_fence		*s_fence;
200	struct dma_fence_cb		finish_cb;
201	struct list_head		node;
202	uint64_t			id;
203	atomic_t			karma;
204	enum drm_sched_priority		s_priority;
205	struct drm_sched_entity  *entity;
206	struct dma_fence_cb		cb;
207};
208
209static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
210					    int threshold)
211{
212	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
213}
214
215/**
216 * struct drm_sched_backend_ops
217 *
218 * Define the backend operations called by the scheduler,
219 * these functions should be implemented in driver side.
220 */
221struct drm_sched_backend_ops {
222	/**
223         * @dependency: Called when the scheduler is considering scheduling
224         * this job next, to get another struct dma_fence for this job to
225	 * block on.  Once it returns NULL, run_job() may be called.
226	 */
227	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
228					struct drm_sched_entity *s_entity);
229
230	/**
231         * @run_job: Called to execute the job once all of the dependencies
232         * have been resolved.  This may be called multiple times, if
233	 * timedout_job() has happened and drm_sched_job_recovery()
234	 * decides to try it again.
235	 */
236	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
237
238	/**
239         * @timedout_job: Called when a job has taken too long to execute,
240         * to trigger GPU recovery.
241	 */
242	void (*timedout_job)(struct drm_sched_job *sched_job);
243
244	/**
245         * @free_job: Called once the job's finished fence has been signaled
246         * and it's time to clean it up.
247	 */
248	void (*free_job)(struct drm_sched_job *sched_job);
249};
250
251/**
252 * struct drm_gpu_scheduler
253 *
254 * @ops: backend operations provided by the driver.
255 * @hw_submission_limit: the max size of the hardware queue.
256 * @timeout: the time after which a job is removed from the scheduler.
257 * @name: name of the ring for which this scheduler is being used.
258 * @sched_rq: priority wise array of run queues.
259 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
260 *                  is ready to be scheduled.
261 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
262 *                 waits on this wait queue until all the scheduled jobs are
263 *                 finished.
264 * @hw_rq_count: the number of jobs currently in the hardware queue.
265 * @job_id_count: used to assign unique id to the each job.
266 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
267 *            timeout interval is over.
268 * @thread: the kthread on which the scheduler which run.
269 * @ring_mirror_list: the list of jobs which are currently in the job queue.
270 * @job_list_lock: lock to protect the ring_mirror_list.
271 * @hang_limit: once the hangs by a job crosses this limit then it is marked
272 *              guilty and it will be considered for scheduling further.
273 * @score: score to help loadbalancer pick a idle sched
274 * @ready: marks if the underlying HW is ready to work
275 * @free_guilty: A hit to time out handler to free the guilty job.
276 *
277 * One scheduler is implemented for each hardware ring.
278 */
279struct drm_gpu_scheduler {
280	const struct drm_sched_backend_ops	*ops;
281	uint32_t			hw_submission_limit;
282	long				timeout;
283	const char			*name;
284	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_MAX];
285	drm_waitqueue_t			wake_up_worker;
286	drm_waitqueue_t			job_scheduled;
287	atomic_t			hw_rq_count;
288	atomic64_t			job_id_count;
289	struct delayed_work		work_tdr;
290	struct task_struct		*thread;
291	struct list_head		ring_mirror_list;
292	spinlock_t			job_list_lock;
293	int				hang_limit;
294	atomic_t                        score;
295	bool				ready;
296	bool				free_guilty;
297};
298
299int drm_sched_init(struct drm_gpu_scheduler *sched,
300		   const struct drm_sched_backend_ops *ops,
301		   uint32_t hw_submission, unsigned hang_limit, long timeout,
302		   const char *name);
303
304void drm_sched_fini(struct drm_gpu_scheduler *sched);
305int drm_sched_job_init(struct drm_sched_job *job,
306		       struct drm_sched_entity *entity,
307		       void *owner);
308void drm_sched_job_cleanup(struct drm_sched_job *job);
309void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
310void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
311void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
312void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
313void drm_sched_increase_karma(struct drm_sched_job *bad);
314bool drm_sched_dependency_optimized(struct dma_fence* fence,
315				    struct drm_sched_entity *entity);
316void drm_sched_fault(struct drm_gpu_scheduler *sched);
317void drm_sched_job_kickout(struct drm_sched_job *s_job);
318
319void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
320			     struct drm_sched_entity *entity);
321void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
322				struct drm_sched_entity *entity);
323
324int drm_sched_entity_init(struct drm_sched_entity *entity,
325			  enum drm_sched_priority priority,
326			  struct drm_gpu_scheduler **sched_list,
327			  unsigned int num_sched_list,
328			  atomic_t *guilty);
329long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
330void drm_sched_entity_fini(struct drm_sched_entity *entity);
331void drm_sched_entity_destroy(struct drm_sched_entity *entity);
332void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
333struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
334void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
335			       struct drm_sched_entity *entity);
336void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
337				   enum drm_sched_priority priority);
338bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
339
340struct drm_sched_fence *drm_sched_fence_create(
341	struct drm_sched_entity *s_entity, void *owner);
342void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
343void drm_sched_fence_finished(struct drm_sched_fence *fence);
344
345unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
346void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
347		                unsigned long remaining);
348
349#endif
350