sched_entity.c revision 1.4
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <linux/kthread.h> 25#include <linux/slab.h> 26#include <linux/completion.h> 27 28#include <drm/drm_print.h> 29#include <drm/gpu_scheduler.h> 30 31#include "gpu_scheduler_trace.h" 32 33#define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36/** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note: the sched_list should have at least one element to schedule 49 * the entity 50 * 51 * Returns 0 on success or a negative error code on failure. 52 */ 53int drm_sched_entity_init(struct drm_sched_entity *entity, 54 enum drm_sched_priority priority, 55 struct drm_gpu_scheduler **sched_list, 56 unsigned int num_sched_list, 57 atomic_t *guilty) 58{ 59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 60 return -EINVAL; 61 62 memset(entity, 0, sizeof(struct drm_sched_entity)); 63 INIT_LIST_HEAD(&entity->list); 64 entity->rq = NULL; 65 entity->guilty = guilty; 66 entity->num_sched_list = num_sched_list; 67 entity->priority = priority; 68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 69 entity->last_scheduled = NULL; 70 71 if(num_sched_list) 72 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 73 74 init_completion(&entity->entity_idle); 75 76 mtx_init(&entity->rq_lock, IPL_NONE); 77 spsc_queue_init(&entity->job_queue); 78 79 atomic_set(&entity->fence_seq, 0); 80 entity->fence_context = dma_fence_context_alloc(2); 81 82 return 0; 83} 84EXPORT_SYMBOL(drm_sched_entity_init); 85 86/** 87 * drm_sched_entity_modify_sched - Modify sched of an entity 88 * @entity: scheduler entity to init 89 * @sched_list: the list of new drm scheds which will replace 90 * existing entity->sched_list 91 * @num_sched_list: number of drm sched in sched_list 92 */ 93void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 94 struct drm_gpu_scheduler **sched_list, 95 unsigned int num_sched_list) 96{ 97 WARN_ON(!num_sched_list || !sched_list); 98 99 entity->sched_list = sched_list; 100 entity->num_sched_list = num_sched_list; 101} 102EXPORT_SYMBOL(drm_sched_entity_modify_sched); 103 104/** 105 * drm_sched_entity_is_idle - Check if entity is idle 106 * 107 * @entity: scheduler entity 108 * 109 * Returns true if the entity does not have any unscheduled jobs. 110 */ 111static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 112{ 113 rmb(); /* for list_empty to work without lock */ 114 115 if (list_empty(&entity->list) || 116 spsc_queue_count(&entity->job_queue) == 0 || 117 entity->stopped) 118 return true; 119 120 return false; 121} 122 123/** 124 * drm_sched_entity_is_ready - Check if entity is ready 125 * 126 * @entity: scheduler entity 127 * 128 * Return true if entity could provide a job. 129 */ 130bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 131{ 132 if (spsc_queue_peek(&entity->job_queue) == NULL) 133 return false; 134 135 if (READ_ONCE(entity->dependency)) 136 return false; 137 138 return true; 139} 140 141/** 142 * drm_sched_entity_flush - Flush a context entity 143 * 144 * @entity: scheduler entity 145 * @timeout: time to wait in for Q to become empty in jiffies. 146 * 147 * Splitting drm_sched_entity_fini() into two functions, The first one does the 148 * waiting, removes the entity from the runqueue and returns an error when the 149 * process was killed. 150 * 151 * Returns the remaining time in jiffies left from the input timeout 152 */ 153long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 154{ 155 struct drm_gpu_scheduler *sched; 156#ifdef __linux__ 157 struct task_struct *last_user; 158#else 159 struct process *last_user, *curpr; 160#endif 161 long ret = timeout; 162 163 if (!entity->rq) 164 return 0; 165 166 sched = entity->rq->sched; 167 /** 168 * The client will not queue more IBs during this fini, consume existing 169 * queued IBs or discard them on SIGKILL 170 */ 171#ifdef __linux__ 172 if (current->flags & PF_EXITING) { 173#else 174 curpr = curproc->p_p; 175 if (curpr->ps_flags & PS_EXITING) { 176#endif 177 if (timeout) 178 ret = wait_event_timeout( 179 sched->job_scheduled, 180 drm_sched_entity_is_idle(entity), 181 timeout); 182 } else { 183 wait_event_killable(sched->job_scheduled, 184 drm_sched_entity_is_idle(entity)); 185 } 186 187 /* For killed process disable any more IBs enqueue right now */ 188#ifdef __linux__ 189 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 190 if ((!last_user || last_user == current->group_leader) && 191 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 192#else 193 last_user = cmpxchg(&entity->last_user, curpr, NULL); 194 if ((!last_user || last_user == curproc->p_p) && 195 (curpr->ps_flags & PS_EXITING) && 196 (curpr->ps_xsig == SIGKILL)) { 197#endif 198 spin_lock(&entity->rq_lock); 199 entity->stopped = true; 200 drm_sched_rq_remove_entity(entity->rq, entity); 201 spin_unlock(&entity->rq_lock); 202 } 203 204 return ret; 205} 206EXPORT_SYMBOL(drm_sched_entity_flush); 207 208/** 209 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs 210 * 211 * @f: signaled fence 212 * @cb: our callback structure 213 * 214 * Signal the scheduler finished fence when the entity in question is killed. 215 */ 216static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 217 struct dma_fence_cb *cb) 218{ 219 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 220 finish_cb); 221 222 drm_sched_fence_finished(job->s_fence); 223 WARN_ON(job->s_fence->parent); 224 job->sched->ops->free_job(job); 225} 226 227/** 228 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed 229 * 230 * @entity: entity which is cleaned up 231 * 232 * Makes sure that all remaining jobs in an entity are killed before it is 233 * destroyed. 234 */ 235static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 236{ 237 struct drm_sched_job *job; 238 int r; 239 240 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 241 struct drm_sched_fence *s_fence = job->s_fence; 242 243 drm_sched_fence_scheduled(s_fence); 244 dma_fence_set_error(&s_fence->finished, -ESRCH); 245 246 /* 247 * When pipe is hanged by older entity, new entity might 248 * not even have chance to submit it's first job to HW 249 * and so entity->last_scheduled will remain NULL 250 */ 251 if (!entity->last_scheduled) { 252 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 253 continue; 254 } 255 256 r = dma_fence_add_callback(entity->last_scheduled, 257 &job->finish_cb, 258 drm_sched_entity_kill_jobs_cb); 259 if (r == -ENOENT) 260 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 261 else if (r) 262 DRM_ERROR("fence add callback failed (%d)\n", r); 263 } 264} 265 266/** 267 * drm_sched_entity_cleanup - Destroy a context entity 268 * 269 * @entity: scheduler entity 270 * 271 * This should be called after @drm_sched_entity_do_release. It goes over the 272 * entity and signals all jobs with an error code if the process was killed. 273 * 274 */ 275void drm_sched_entity_fini(struct drm_sched_entity *entity) 276{ 277 struct drm_gpu_scheduler *sched = NULL; 278 279 if (entity->rq) { 280 sched = entity->rq->sched; 281 drm_sched_rq_remove_entity(entity->rq, entity); 282 } 283 284 /* Consumption of existing IBs wasn't completed. Forcefully 285 * remove them here. 286 */ 287 if (spsc_queue_count(&entity->job_queue)) { 288 if (sched) { 289 /* 290 * Wait for thread to idle to make sure it isn't processing 291 * this entity. 292 */ 293 wait_for_completion(&entity->entity_idle); 294 295 } 296 if (entity->dependency) { 297 dma_fence_remove_callback(entity->dependency, 298 &entity->cb); 299 dma_fence_put(entity->dependency); 300 entity->dependency = NULL; 301 } 302 303 drm_sched_entity_kill_jobs(entity); 304 } 305 306 dma_fence_put(entity->last_scheduled); 307 entity->last_scheduled = NULL; 308} 309EXPORT_SYMBOL(drm_sched_entity_fini); 310 311/** 312 * drm_sched_entity_fini - Destroy a context entity 313 * 314 * @entity: scheduler entity 315 * 316 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 317 */ 318void drm_sched_entity_destroy(struct drm_sched_entity *entity) 319{ 320 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 321 drm_sched_entity_fini(entity); 322} 323EXPORT_SYMBOL(drm_sched_entity_destroy); 324 325/** 326 * drm_sched_entity_clear_dep - callback to clear the entities dependency 327 */ 328static void drm_sched_entity_clear_dep(struct dma_fence *f, 329 struct dma_fence_cb *cb) 330{ 331 struct drm_sched_entity *entity = 332 container_of(cb, struct drm_sched_entity, cb); 333 334 entity->dependency = NULL; 335 dma_fence_put(f); 336} 337 338/** 339 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 340 * wake up scheduler 341 */ 342static void drm_sched_entity_wakeup(struct dma_fence *f, 343 struct dma_fence_cb *cb) 344{ 345 struct drm_sched_entity *entity = 346 container_of(cb, struct drm_sched_entity, cb); 347 348 drm_sched_entity_clear_dep(f, cb); 349 drm_sched_wakeup(entity->rq->sched); 350} 351 352/** 353 * drm_sched_entity_set_priority - Sets priority of the entity 354 * 355 * @entity: scheduler entity 356 * @priority: scheduler priority 357 * 358 * Update the priority of runqueus used for the entity. 359 */ 360void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 361 enum drm_sched_priority priority) 362{ 363 spin_lock(&entity->rq_lock); 364 entity->priority = priority; 365 spin_unlock(&entity->rq_lock); 366} 367EXPORT_SYMBOL(drm_sched_entity_set_priority); 368 369/** 370 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency 371 * 372 * @entity: entity with dependency 373 * 374 * Add a callback to the current dependency of the entity to wake up the 375 * scheduler when the entity becomes available. 376 */ 377static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 378{ 379 struct drm_gpu_scheduler *sched = entity->rq->sched; 380 struct dma_fence *fence = entity->dependency; 381 struct drm_sched_fence *s_fence; 382 383 if (fence->context == entity->fence_context || 384 fence->context == entity->fence_context + 1) { 385 /* 386 * Fence is a scheduled/finished fence from a job 387 * which belongs to the same entity, we can ignore 388 * fences from ourself 389 */ 390 dma_fence_put(entity->dependency); 391 return false; 392 } 393 394 s_fence = to_drm_sched_fence(fence); 395 if (s_fence && s_fence->sched == sched) { 396 397 /* 398 * Fence is from the same scheduler, only need to wait for 399 * it to be scheduled 400 */ 401 fence = dma_fence_get(&s_fence->scheduled); 402 dma_fence_put(entity->dependency); 403 entity->dependency = fence; 404 if (!dma_fence_add_callback(fence, &entity->cb, 405 drm_sched_entity_clear_dep)) 406 return true; 407 408 /* Ignore it when it is already scheduled */ 409 dma_fence_put(fence); 410 return false; 411 } 412 413 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 414 drm_sched_entity_wakeup)) 415 return true; 416 417 dma_fence_put(entity->dependency); 418 return false; 419} 420 421/** 422 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity 423 * 424 * @entity: entity to get the job from 425 * 426 * Process all dependencies and try to get one job from the entities queue. 427 */ 428struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 429{ 430 struct drm_gpu_scheduler *sched = entity->rq->sched; 431 struct drm_sched_job *sched_job; 432 433 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 434 if (!sched_job) 435 return NULL; 436 437 while ((entity->dependency = 438 sched->ops->dependency(sched_job, entity))) { 439 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 440 441 if (drm_sched_entity_add_dependency_cb(entity)) 442 return NULL; 443 } 444 445 /* skip jobs from entity that marked guilty */ 446 if (entity->guilty && atomic_read(entity->guilty)) 447 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 448 449 dma_fence_put(entity->last_scheduled); 450 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 451 452 spsc_queue_pop(&entity->job_queue); 453 return sched_job; 454} 455 456/** 457 * drm_sched_entity_select_rq - select a new rq for the entity 458 * 459 * @entity: scheduler entity 460 * 461 * Check all prerequisites and select a new rq for the entity for load 462 * balancing. 463 */ 464void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 465{ 466 struct dma_fence *fence; 467 struct drm_gpu_scheduler *sched; 468 struct drm_sched_rq *rq; 469 470 if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1) 471 return; 472 473 fence = READ_ONCE(entity->last_scheduled); 474 if (fence && !dma_fence_is_signaled(fence)) 475 return; 476 477 spin_lock(&entity->rq_lock); 478 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 479 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 480 if (rq != entity->rq) { 481 drm_sched_rq_remove_entity(entity->rq, entity); 482 entity->rq = rq; 483 } 484 485 spin_unlock(&entity->rq_lock); 486} 487 488/** 489 * drm_sched_entity_push_job - Submit a job to the entity's job queue 490 * 491 * @sched_job: job to submit 492 * @entity: scheduler entity 493 * 494 * Note: To guarantee that the order of insertion to queue matches 495 * the job's fence sequence number this function should be 496 * called with drm_sched_job_init under common lock. 497 * 498 * Returns 0 for success, negative error code otherwise. 499 */ 500void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 501 struct drm_sched_entity *entity) 502{ 503 bool first; 504 505 trace_drm_sched_job(sched_job, entity); 506 atomic_inc(&entity->rq->sched->score); 507#ifdef __linux__ 508 WRITE_ONCE(entity->last_user, current->group_leader); 509#else 510 WRITE_ONCE(entity->last_user, curproc->p_p); 511#endif 512 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 513 514 /* first job wakes up scheduler */ 515 if (first) { 516 /* Add the entity to the run queue */ 517 spin_lock(&entity->rq_lock); 518 if (entity->stopped) { 519 spin_unlock(&entity->rq_lock); 520 521 DRM_ERROR("Trying to push to a killed entity\n"); 522 return; 523 } 524 drm_sched_rq_add_entity(entity->rq, entity); 525 spin_unlock(&entity->rq_lock); 526 drm_sched_wakeup(entity->rq->sched); 527 } 528} 529EXPORT_SYMBOL(drm_sched_entity_push_job); 530