sched_entity.c revision 1.7
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <linux/kthread.h> 25#include <linux/slab.h> 26#include <linux/completion.h> 27 28#include <drm/drm_print.h> 29#include <drm/gpu_scheduler.h> 30 31#include "gpu_scheduler_trace.h" 32 33#define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36/** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note that the &sched_list must have at least one element to schedule the entity. 49 * 50 * For changing @priority later on at runtime see 51 * drm_sched_entity_set_priority(). For changing the set of schedulers 52 * @sched_list at runtime see drm_sched_entity_modify_sched(). 53 * 54 * An entity is cleaned up by callind drm_sched_entity_fini(). See also 55 * drm_sched_entity_destroy(). 56 * 57 * Returns 0 on success or a negative error code on failure. 58 */ 59int drm_sched_entity_init(struct drm_sched_entity *entity, 60 enum drm_sched_priority priority, 61 struct drm_gpu_scheduler **sched_list, 62 unsigned int num_sched_list, 63 atomic_t *guilty) 64{ 65 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 66 return -EINVAL; 67 68 memset(entity, 0, sizeof(struct drm_sched_entity)); 69 INIT_LIST_HEAD(&entity->list); 70 entity->rq = NULL; 71 entity->guilty = guilty; 72 entity->num_sched_list = num_sched_list; 73 entity->priority = priority; 74 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 75 entity->last_scheduled = NULL; 76 77 if(num_sched_list) 78 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 79 80 init_completion(&entity->entity_idle); 81 82 /* We start in an idle state. */ 83 complete(&entity->entity_idle); 84 85 mtx_init(&entity->rq_lock, IPL_NONE); 86 spsc_queue_init(&entity->job_queue); 87 88 atomic_set(&entity->fence_seq, 0); 89 entity->fence_context = dma_fence_context_alloc(2); 90 91 return 0; 92} 93EXPORT_SYMBOL(drm_sched_entity_init); 94 95/** 96 * drm_sched_entity_modify_sched - Modify sched of an entity 97 * @entity: scheduler entity to init 98 * @sched_list: the list of new drm scheds which will replace 99 * existing entity->sched_list 100 * @num_sched_list: number of drm sched in sched_list 101 * 102 * Note that this must be called under the same common lock for @entity as 103 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to 104 * guarantee through some other means that this is never called while new jobs 105 * can be pushed to @entity. 106 */ 107void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 108 struct drm_gpu_scheduler **sched_list, 109 unsigned int num_sched_list) 110{ 111 WARN_ON(!num_sched_list || !sched_list); 112 113 entity->sched_list = sched_list; 114 entity->num_sched_list = num_sched_list; 115} 116EXPORT_SYMBOL(drm_sched_entity_modify_sched); 117 118static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 119{ 120 rmb(); /* for list_empty to work without lock */ 121 122 if (list_empty(&entity->list) || 123 spsc_queue_count(&entity->job_queue) == 0 || 124 entity->stopped) 125 return true; 126 127 return false; 128} 129 130/* Return true if entity could provide a job. */ 131bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 132{ 133 if (spsc_queue_peek(&entity->job_queue) == NULL) 134 return false; 135 136 if (READ_ONCE(entity->dependency)) 137 return false; 138 139 return true; 140} 141 142/** 143 * drm_sched_entity_flush - Flush a context entity 144 * 145 * @entity: scheduler entity 146 * @timeout: time to wait in for Q to become empty in jiffies. 147 * 148 * Splitting drm_sched_entity_fini() into two functions, The first one does the 149 * waiting, removes the entity from the runqueue and returns an error when the 150 * process was killed. 151 * 152 * Returns the remaining time in jiffies left from the input timeout 153 */ 154long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 155{ 156 struct drm_gpu_scheduler *sched; 157#ifdef __linux__ 158 struct task_struct *last_user; 159#else 160 struct process *last_user, *curpr; 161#endif 162 long ret = timeout; 163 164 if (!entity->rq) 165 return 0; 166 167 sched = entity->rq->sched; 168 /** 169 * The client will not queue more IBs during this fini, consume existing 170 * queued IBs or discard them on SIGKILL 171 */ 172#ifdef __linux__ 173 if (current->flags & PF_EXITING) { 174#else 175 curpr = curproc->p_p; 176 if (curpr->ps_flags & PS_EXITING) { 177#endif 178 if (timeout) 179 ret = wait_event_timeout( 180 sched->job_scheduled, 181 drm_sched_entity_is_idle(entity), 182 timeout); 183 } else { 184 wait_event_killable(sched->job_scheduled, 185 drm_sched_entity_is_idle(entity)); 186 } 187 188 /* For killed process disable any more IBs enqueue right now */ 189#ifdef __linux__ 190 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 191 if ((!last_user || last_user == current->group_leader) && 192 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 193#else 194 last_user = cmpxchg(&entity->last_user, curpr, NULL); 195 if ((!last_user || last_user == curproc->p_p) && 196 (curpr->ps_flags & PS_EXITING) && 197 (curpr->ps_xsig == SIGKILL)) { 198#endif 199 spin_lock(&entity->rq_lock); 200 entity->stopped = true; 201 drm_sched_rq_remove_entity(entity->rq, entity); 202 spin_unlock(&entity->rq_lock); 203 } 204 205 return ret; 206} 207EXPORT_SYMBOL(drm_sched_entity_flush); 208 209static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 210{ 211 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 212 213 drm_sched_fence_finished(job->s_fence); 214 WARN_ON(job->s_fence->parent); 215 job->sched->ops->free_job(job); 216} 217 218 219/* Signal the scheduler finished fence when the entity in question is killed. */ 220static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 221 struct dma_fence_cb *cb) 222{ 223 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 224 finish_cb); 225 226 dma_fence_put(f); 227 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 228 schedule_work(&job->work); 229} 230 231static struct dma_fence * 232drm_sched_job_dependency(struct drm_sched_job *job, 233 struct drm_sched_entity *entity) 234{ 235 if (!xa_empty(&job->dependencies)) 236 return xa_erase(&job->dependencies, job->last_dependency++); 237 238 if (job->sched->ops->dependency) 239 return job->sched->ops->dependency(job, entity); 240 241 return NULL; 242} 243 244static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 245{ 246 struct drm_sched_job *job; 247 struct dma_fence *f; 248 int r; 249 250 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 251 struct drm_sched_fence *s_fence = job->s_fence; 252 253 /* Wait for all dependencies to avoid data corruptions */ 254 while ((f = drm_sched_job_dependency(job, entity))) { 255 dma_fence_wait(f, false); 256 dma_fence_put(f); 257 } 258 259 drm_sched_fence_scheduled(s_fence); 260 dma_fence_set_error(&s_fence->finished, -ESRCH); 261 262 /* 263 * When pipe is hanged by older entity, new entity might 264 * not even have chance to submit it's first job to HW 265 * and so entity->last_scheduled will remain NULL 266 */ 267 if (!entity->last_scheduled) { 268 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 269 continue; 270 } 271 272 dma_fence_get(entity->last_scheduled); 273 r = dma_fence_add_callback(entity->last_scheduled, 274 &job->finish_cb, 275 drm_sched_entity_kill_jobs_cb); 276 if (r == -ENOENT) 277 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 278 else if (r) 279 DRM_ERROR("fence add callback failed (%d)\n", r); 280 } 281} 282 283/** 284 * drm_sched_entity_fini - Destroy a context entity 285 * 286 * @entity: scheduler entity 287 * 288 * Cleanups up @entity which has been initialized by drm_sched_entity_init(). 289 * 290 * If there are potentially job still in flight or getting newly queued 291 * drm_sched_entity_flush() must be called first. This function then goes over 292 * the entity and signals all jobs with an error code if the process was killed. 293 */ 294void drm_sched_entity_fini(struct drm_sched_entity *entity) 295{ 296 struct drm_gpu_scheduler *sched = NULL; 297 298 if (entity->rq) { 299 sched = entity->rq->sched; 300 drm_sched_rq_remove_entity(entity->rq, entity); 301 } 302 303 /* Consumption of existing IBs wasn't completed. Forcefully 304 * remove them here. 305 */ 306 if (spsc_queue_count(&entity->job_queue)) { 307 if (sched) { 308 /* 309 * Wait for thread to idle to make sure it isn't processing 310 * this entity. 311 */ 312 wait_for_completion(&entity->entity_idle); 313 314 } 315 if (entity->dependency) { 316 dma_fence_remove_callback(entity->dependency, 317 &entity->cb); 318 dma_fence_put(entity->dependency); 319 entity->dependency = NULL; 320 } 321 322 drm_sched_entity_kill_jobs(entity); 323 } 324 325 dma_fence_put(entity->last_scheduled); 326 entity->last_scheduled = NULL; 327} 328EXPORT_SYMBOL(drm_sched_entity_fini); 329 330/** 331 * drm_sched_entity_destroy - Destroy a context entity 332 * @entity: scheduler entity 333 * 334 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a 335 * convenience wrapper. 336 */ 337void drm_sched_entity_destroy(struct drm_sched_entity *entity) 338{ 339 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 340 drm_sched_entity_fini(entity); 341} 342EXPORT_SYMBOL(drm_sched_entity_destroy); 343 344/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ 345static void drm_sched_entity_clear_dep(struct dma_fence *f, 346 struct dma_fence_cb *cb) 347{ 348 struct drm_sched_entity *entity = 349 container_of(cb, struct drm_sched_entity, cb); 350 351 entity->dependency = NULL; 352 dma_fence_put(f); 353} 354 355/* 356 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 357 * wake up scheduler 358 */ 359static void drm_sched_entity_wakeup(struct dma_fence *f, 360 struct dma_fence_cb *cb) 361{ 362 struct drm_sched_entity *entity = 363 container_of(cb, struct drm_sched_entity, cb); 364 365 drm_sched_entity_clear_dep(f, cb); 366 drm_sched_wakeup(entity->rq->sched); 367} 368 369/** 370 * drm_sched_entity_set_priority - Sets priority of the entity 371 * 372 * @entity: scheduler entity 373 * @priority: scheduler priority 374 * 375 * Update the priority of runqueus used for the entity. 376 */ 377void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 378 enum drm_sched_priority priority) 379{ 380 spin_lock(&entity->rq_lock); 381 entity->priority = priority; 382 spin_unlock(&entity->rq_lock); 383} 384EXPORT_SYMBOL(drm_sched_entity_set_priority); 385 386/* 387 * Add a callback to the current dependency of the entity to wake up the 388 * scheduler when the entity becomes available. 389 */ 390static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 391{ 392 struct drm_gpu_scheduler *sched = entity->rq->sched; 393 struct dma_fence *fence = entity->dependency; 394 struct drm_sched_fence *s_fence; 395 396 if (fence->context == entity->fence_context || 397 fence->context == entity->fence_context + 1) { 398 /* 399 * Fence is a scheduled/finished fence from a job 400 * which belongs to the same entity, we can ignore 401 * fences from ourself 402 */ 403 dma_fence_put(entity->dependency); 404 return false; 405 } 406 407 s_fence = to_drm_sched_fence(fence); 408 if (s_fence && s_fence->sched == sched && 409 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { 410 411 /* 412 * Fence is from the same scheduler, only need to wait for 413 * it to be scheduled 414 */ 415 fence = dma_fence_get(&s_fence->scheduled); 416 dma_fence_put(entity->dependency); 417 entity->dependency = fence; 418 if (!dma_fence_add_callback(fence, &entity->cb, 419 drm_sched_entity_clear_dep)) 420 return true; 421 422 /* Ignore it when it is already scheduled */ 423 dma_fence_put(fence); 424 return false; 425 } 426 427 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 428 drm_sched_entity_wakeup)) 429 return true; 430 431 dma_fence_put(entity->dependency); 432 return false; 433} 434 435struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 436{ 437 struct drm_sched_job *sched_job; 438 439 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 440 if (!sched_job) 441 return NULL; 442 443 while ((entity->dependency = 444 drm_sched_job_dependency(sched_job, entity))) { 445 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 446 447 if (drm_sched_entity_add_dependency_cb(entity)) 448 return NULL; 449 } 450 451 /* skip jobs from entity that marked guilty */ 452 if (entity->guilty && atomic_read(entity->guilty)) 453 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 454 455 dma_fence_put(entity->last_scheduled); 456 457 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 458 459 /* 460 * If the queue is empty we allow drm_sched_entity_select_rq() to 461 * locklessly access ->last_scheduled. This only works if we set the 462 * pointer before we dequeue and if we a write barrier here. 463 */ 464 smp_wmb(); 465 466 spsc_queue_pop(&entity->job_queue); 467 return sched_job; 468} 469 470void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 471{ 472 struct dma_fence *fence; 473 struct drm_gpu_scheduler *sched; 474 struct drm_sched_rq *rq; 475 476 /* single possible engine and already selected */ 477 if (!entity->sched_list) 478 return; 479 480 /* queue non-empty, stay on the same engine */ 481 if (spsc_queue_count(&entity->job_queue)) 482 return; 483 484 /* 485 * Only when the queue is empty are we guaranteed that the scheduler 486 * thread cannot change ->last_scheduled. To enforce ordering we need 487 * a read barrier here. See drm_sched_entity_pop_job() for the other 488 * side. 489 */ 490 smp_rmb(); 491 492 fence = entity->last_scheduled; 493 494 /* stay on the same engine if the previous job hasn't finished */ 495 if (fence && !dma_fence_is_signaled(fence)) 496 return; 497 498 spin_lock(&entity->rq_lock); 499 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 500 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 501 if (rq != entity->rq) { 502 drm_sched_rq_remove_entity(entity->rq, entity); 503 entity->rq = rq; 504 } 505 spin_unlock(&entity->rq_lock); 506 507 if (entity->num_sched_list == 1) 508 entity->sched_list = NULL; 509} 510 511/** 512 * drm_sched_entity_push_job - Submit a job to the entity's job queue 513 * @sched_job: job to submit 514 * 515 * Note: To guarantee that the order of insertion to queue matches the job's 516 * fence sequence number this function should be called with drm_sched_job_arm() 517 * under common lock for the struct drm_sched_entity that was set up for 518 * @sched_job in drm_sched_job_init(). 519 * 520 * Returns 0 for success, negative error code otherwise. 521 */ 522void drm_sched_entity_push_job(struct drm_sched_job *sched_job) 523{ 524 struct drm_sched_entity *entity = sched_job->entity; 525 bool first; 526 527 trace_drm_sched_job(sched_job, entity); 528 atomic_inc(entity->rq->sched->score); 529#ifdef __linux__ 530 WRITE_ONCE(entity->last_user, current->group_leader); 531#else 532 WRITE_ONCE(entity->last_user, curproc->p_p); 533#endif 534 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 535 536 /* first job wakes up scheduler */ 537 if (first) { 538 /* Add the entity to the run queue */ 539 spin_lock(&entity->rq_lock); 540 if (entity->stopped) { 541 spin_unlock(&entity->rq_lock); 542 543 DRM_ERROR("Trying to push to a killed entity\n"); 544 return; 545 } 546 drm_sched_rq_add_entity(entity->rq, entity); 547 spin_unlock(&entity->rq_lock); 548 drm_sched_wakeup(entity->rq->sched); 549 } 550} 551EXPORT_SYMBOL(drm_sched_entity_push_job); 552