sched_entity.c revision 1.5
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <linux/kthread.h> 25#include <linux/slab.h> 26#include <linux/completion.h> 27 28#include <drm/drm_print.h> 29#include <drm/gpu_scheduler.h> 30 31#include "gpu_scheduler_trace.h" 32 33#define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36/** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note: the sched_list should have at least one element to schedule 49 * the entity 50 * 51 * Returns 0 on success or a negative error code on failure. 52 */ 53int drm_sched_entity_init(struct drm_sched_entity *entity, 54 enum drm_sched_priority priority, 55 struct drm_gpu_scheduler **sched_list, 56 unsigned int num_sched_list, 57 atomic_t *guilty) 58{ 59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 60 return -EINVAL; 61 62 memset(entity, 0, sizeof(struct drm_sched_entity)); 63 INIT_LIST_HEAD(&entity->list); 64 entity->rq = NULL; 65 entity->guilty = guilty; 66 entity->num_sched_list = num_sched_list; 67 entity->priority = priority; 68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 69 entity->last_scheduled = NULL; 70 71 if(num_sched_list) 72 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 73 74 init_completion(&entity->entity_idle); 75 76 mtx_init(&entity->rq_lock, IPL_NONE); 77 spsc_queue_init(&entity->job_queue); 78 79 atomic_set(&entity->fence_seq, 0); 80 entity->fence_context = dma_fence_context_alloc(2); 81 82 return 0; 83} 84EXPORT_SYMBOL(drm_sched_entity_init); 85 86/** 87 * drm_sched_entity_modify_sched - Modify sched of an entity 88 * @entity: scheduler entity to init 89 * @sched_list: the list of new drm scheds which will replace 90 * existing entity->sched_list 91 * @num_sched_list: number of drm sched in sched_list 92 */ 93void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 94 struct drm_gpu_scheduler **sched_list, 95 unsigned int num_sched_list) 96{ 97 WARN_ON(!num_sched_list || !sched_list); 98 99 entity->sched_list = sched_list; 100 entity->num_sched_list = num_sched_list; 101} 102EXPORT_SYMBOL(drm_sched_entity_modify_sched); 103 104/** 105 * drm_sched_entity_is_idle - Check if entity is idle 106 * 107 * @entity: scheduler entity 108 * 109 * Returns true if the entity does not have any unscheduled jobs. 110 */ 111static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 112{ 113 rmb(); /* for list_empty to work without lock */ 114 115 if (list_empty(&entity->list) || 116 spsc_queue_count(&entity->job_queue) == 0 || 117 entity->stopped) 118 return true; 119 120 return false; 121} 122 123/** 124 * drm_sched_entity_is_ready - Check if entity is ready 125 * 126 * @entity: scheduler entity 127 * 128 * Return true if entity could provide a job. 129 */ 130bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 131{ 132 if (spsc_queue_peek(&entity->job_queue) == NULL) 133 return false; 134 135 if (READ_ONCE(entity->dependency)) 136 return false; 137 138 return true; 139} 140 141/** 142 * drm_sched_entity_flush - Flush a context entity 143 * 144 * @entity: scheduler entity 145 * @timeout: time to wait in for Q to become empty in jiffies. 146 * 147 * Splitting drm_sched_entity_fini() into two functions, The first one does the 148 * waiting, removes the entity from the runqueue and returns an error when the 149 * process was killed. 150 * 151 * Returns the remaining time in jiffies left from the input timeout 152 */ 153long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 154{ 155 struct drm_gpu_scheduler *sched; 156#ifdef __linux__ 157 struct task_struct *last_user; 158#else 159 struct process *last_user, *curpr; 160#endif 161 long ret = timeout; 162 163 if (!entity->rq) 164 return 0; 165 166 sched = entity->rq->sched; 167 /** 168 * The client will not queue more IBs during this fini, consume existing 169 * queued IBs or discard them on SIGKILL 170 */ 171#ifdef __linux__ 172 if (current->flags & PF_EXITING) { 173#else 174 curpr = curproc->p_p; 175 if (curpr->ps_flags & PS_EXITING) { 176#endif 177 if (timeout) 178 ret = wait_event_timeout( 179 sched->job_scheduled, 180 drm_sched_entity_is_idle(entity), 181 timeout); 182 } else { 183 wait_event_killable(sched->job_scheduled, 184 drm_sched_entity_is_idle(entity)); 185 } 186 187 /* For killed process disable any more IBs enqueue right now */ 188#ifdef __linux__ 189 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 190 if ((!last_user || last_user == current->group_leader) && 191 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 192#else 193 last_user = cmpxchg(&entity->last_user, curpr, NULL); 194 if ((!last_user || last_user == curproc->p_p) && 195 (curpr->ps_flags & PS_EXITING) && 196 (curpr->ps_xsig == SIGKILL)) { 197#endif 198 spin_lock(&entity->rq_lock); 199 entity->stopped = true; 200 drm_sched_rq_remove_entity(entity->rq, entity); 201 spin_unlock(&entity->rq_lock); 202 } 203 204 return ret; 205} 206EXPORT_SYMBOL(drm_sched_entity_flush); 207 208/** 209 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs 210 * 211 * @f: signaled fence 212 * @cb: our callback structure 213 * 214 * Signal the scheduler finished fence when the entity in question is killed. 215 */ 216static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 217 struct dma_fence_cb *cb) 218{ 219 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 220 finish_cb); 221 222 drm_sched_fence_finished(job->s_fence); 223 WARN_ON(job->s_fence->parent); 224 job->sched->ops->free_job(job); 225} 226 227/** 228 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed 229 * 230 * @entity: entity which is cleaned up 231 * 232 * Makes sure that all remaining jobs in an entity are killed before it is 233 * destroyed. 234 */ 235static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 236{ 237 struct drm_sched_job *job; 238 struct dma_fence *f; 239 int r; 240 241 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 242 struct drm_sched_fence *s_fence = job->s_fence; 243 244 /* Wait for all dependencies to avoid data corruptions */ 245 while ((f = job->sched->ops->dependency(job, entity))) 246 dma_fence_wait(f, false); 247 248 drm_sched_fence_scheduled(s_fence); 249 dma_fence_set_error(&s_fence->finished, -ESRCH); 250 251 /* 252 * When pipe is hanged by older entity, new entity might 253 * not even have chance to submit it's first job to HW 254 * and so entity->last_scheduled will remain NULL 255 */ 256 if (!entity->last_scheduled) { 257 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 258 continue; 259 } 260 261 r = dma_fence_add_callback(entity->last_scheduled, 262 &job->finish_cb, 263 drm_sched_entity_kill_jobs_cb); 264 if (r == -ENOENT) 265 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 266 else if (r) 267 DRM_ERROR("fence add callback failed (%d)\n", r); 268 } 269} 270 271/** 272 * drm_sched_entity_cleanup - Destroy a context entity 273 * 274 * @entity: scheduler entity 275 * 276 * This should be called after @drm_sched_entity_do_release. It goes over the 277 * entity and signals all jobs with an error code if the process was killed. 278 * 279 */ 280void drm_sched_entity_fini(struct drm_sched_entity *entity) 281{ 282 struct drm_gpu_scheduler *sched = NULL; 283 284 if (entity->rq) { 285 sched = entity->rq->sched; 286 drm_sched_rq_remove_entity(entity->rq, entity); 287 } 288 289 /* Consumption of existing IBs wasn't completed. Forcefully 290 * remove them here. 291 */ 292 if (spsc_queue_count(&entity->job_queue)) { 293 if (sched) { 294 /* 295 * Wait for thread to idle to make sure it isn't processing 296 * this entity. 297 */ 298 wait_for_completion(&entity->entity_idle); 299 300 } 301 if (entity->dependency) { 302 dma_fence_remove_callback(entity->dependency, 303 &entity->cb); 304 dma_fence_put(entity->dependency); 305 entity->dependency = NULL; 306 } 307 308 drm_sched_entity_kill_jobs(entity); 309 } 310 311 dma_fence_put(entity->last_scheduled); 312 entity->last_scheduled = NULL; 313} 314EXPORT_SYMBOL(drm_sched_entity_fini); 315 316/** 317 * drm_sched_entity_fini - Destroy a context entity 318 * 319 * @entity: scheduler entity 320 * 321 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 322 */ 323void drm_sched_entity_destroy(struct drm_sched_entity *entity) 324{ 325 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 326 drm_sched_entity_fini(entity); 327} 328EXPORT_SYMBOL(drm_sched_entity_destroy); 329 330/** 331 * drm_sched_entity_clear_dep - callback to clear the entities dependency 332 */ 333static void drm_sched_entity_clear_dep(struct dma_fence *f, 334 struct dma_fence_cb *cb) 335{ 336 struct drm_sched_entity *entity = 337 container_of(cb, struct drm_sched_entity, cb); 338 339 entity->dependency = NULL; 340 dma_fence_put(f); 341} 342 343/** 344 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 345 * wake up scheduler 346 */ 347static void drm_sched_entity_wakeup(struct dma_fence *f, 348 struct dma_fence_cb *cb) 349{ 350 struct drm_sched_entity *entity = 351 container_of(cb, struct drm_sched_entity, cb); 352 353 drm_sched_entity_clear_dep(f, cb); 354 drm_sched_wakeup(entity->rq->sched); 355} 356 357/** 358 * drm_sched_entity_set_priority - Sets priority of the entity 359 * 360 * @entity: scheduler entity 361 * @priority: scheduler priority 362 * 363 * Update the priority of runqueus used for the entity. 364 */ 365void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 366 enum drm_sched_priority priority) 367{ 368 spin_lock(&entity->rq_lock); 369 entity->priority = priority; 370 spin_unlock(&entity->rq_lock); 371} 372EXPORT_SYMBOL(drm_sched_entity_set_priority); 373 374/** 375 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency 376 * 377 * @entity: entity with dependency 378 * 379 * Add a callback to the current dependency of the entity to wake up the 380 * scheduler when the entity becomes available. 381 */ 382static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 383{ 384 struct drm_gpu_scheduler *sched = entity->rq->sched; 385 struct dma_fence *fence = entity->dependency; 386 struct drm_sched_fence *s_fence; 387 388 if (fence->context == entity->fence_context || 389 fence->context == entity->fence_context + 1) { 390 /* 391 * Fence is a scheduled/finished fence from a job 392 * which belongs to the same entity, we can ignore 393 * fences from ourself 394 */ 395 dma_fence_put(entity->dependency); 396 return false; 397 } 398 399 s_fence = to_drm_sched_fence(fence); 400 if (s_fence && s_fence->sched == sched) { 401 402 /* 403 * Fence is from the same scheduler, only need to wait for 404 * it to be scheduled 405 */ 406 fence = dma_fence_get(&s_fence->scheduled); 407 dma_fence_put(entity->dependency); 408 entity->dependency = fence; 409 if (!dma_fence_add_callback(fence, &entity->cb, 410 drm_sched_entity_clear_dep)) 411 return true; 412 413 /* Ignore it when it is already scheduled */ 414 dma_fence_put(fence); 415 return false; 416 } 417 418 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 419 drm_sched_entity_wakeup)) 420 return true; 421 422 dma_fence_put(entity->dependency); 423 return false; 424} 425 426/** 427 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity 428 * 429 * @entity: entity to get the job from 430 * 431 * Process all dependencies and try to get one job from the entities queue. 432 */ 433struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 434{ 435 struct drm_gpu_scheduler *sched = entity->rq->sched; 436 struct drm_sched_job *sched_job; 437 438 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 439 if (!sched_job) 440 return NULL; 441 442 while ((entity->dependency = 443 sched->ops->dependency(sched_job, entity))) { 444 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 445 446 if (drm_sched_entity_add_dependency_cb(entity)) 447 return NULL; 448 } 449 450 /* skip jobs from entity that marked guilty */ 451 if (entity->guilty && atomic_read(entity->guilty)) 452 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 453 454 dma_fence_put(entity->last_scheduled); 455 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 456 457 spsc_queue_pop(&entity->job_queue); 458 return sched_job; 459} 460 461/** 462 * drm_sched_entity_select_rq - select a new rq for the entity 463 * 464 * @entity: scheduler entity 465 * 466 * Check all prerequisites and select a new rq for the entity for load 467 * balancing. 468 */ 469void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 470{ 471 struct dma_fence *fence; 472 struct drm_gpu_scheduler *sched; 473 struct drm_sched_rq *rq; 474 475 if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1) 476 return; 477 478 fence = READ_ONCE(entity->last_scheduled); 479 if (fence && !dma_fence_is_signaled(fence)) 480 return; 481 482 spin_lock(&entity->rq_lock); 483 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 484 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 485 if (rq != entity->rq) { 486 drm_sched_rq_remove_entity(entity->rq, entity); 487 entity->rq = rq; 488 } 489 490 spin_unlock(&entity->rq_lock); 491} 492 493/** 494 * drm_sched_entity_push_job - Submit a job to the entity's job queue 495 * 496 * @sched_job: job to submit 497 * @entity: scheduler entity 498 * 499 * Note: To guarantee that the order of insertion to queue matches 500 * the job's fence sequence number this function should be 501 * called with drm_sched_job_init under common lock. 502 * 503 * Returns 0 for success, negative error code otherwise. 504 */ 505void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 506 struct drm_sched_entity *entity) 507{ 508 bool first; 509 510 trace_drm_sched_job(sched_job, entity); 511 atomic_inc(&entity->rq->sched->score); 512#ifdef __linux__ 513 WRITE_ONCE(entity->last_user, current->group_leader); 514#else 515 WRITE_ONCE(entity->last_user, curproc->p_p); 516#endif 517 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 518 519 /* first job wakes up scheduler */ 520 if (first) { 521 /* Add the entity to the run queue */ 522 spin_lock(&entity->rq_lock); 523 if (entity->stopped) { 524 spin_unlock(&entity->rq_lock); 525 526 DRM_ERROR("Trying to push to a killed entity\n"); 527 return; 528 } 529 drm_sched_rq_add_entity(entity->rq, entity); 530 spin_unlock(&entity->rq_lock); 531 drm_sched_wakeup(entity->rq->sched); 532 } 533} 534EXPORT_SYMBOL(drm_sched_entity_push_job); 535