sched_entity.c revision 1.6
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <linux/kthread.h> 25#include <linux/slab.h> 26#include <linux/completion.h> 27 28#include <drm/drm_print.h> 29#include <drm/gpu_scheduler.h> 30 31#include "gpu_scheduler_trace.h" 32 33#define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36/** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note: the sched_list should have at least one element to schedule 49 * the entity 50 * 51 * Returns 0 on success or a negative error code on failure. 52 */ 53int drm_sched_entity_init(struct drm_sched_entity *entity, 54 enum drm_sched_priority priority, 55 struct drm_gpu_scheduler **sched_list, 56 unsigned int num_sched_list, 57 atomic_t *guilty) 58{ 59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 60 return -EINVAL; 61 62 memset(entity, 0, sizeof(struct drm_sched_entity)); 63 INIT_LIST_HEAD(&entity->list); 64 entity->rq = NULL; 65 entity->guilty = guilty; 66 entity->num_sched_list = num_sched_list; 67 entity->priority = priority; 68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 69 entity->last_scheduled = NULL; 70 71 if(num_sched_list) 72 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 73 74 init_completion(&entity->entity_idle); 75 76 /* We start in an idle state. */ 77 complete(&entity->entity_idle); 78 79 mtx_init(&entity->rq_lock, IPL_NONE); 80 spsc_queue_init(&entity->job_queue); 81 82 atomic_set(&entity->fence_seq, 0); 83 entity->fence_context = dma_fence_context_alloc(2); 84 85 return 0; 86} 87EXPORT_SYMBOL(drm_sched_entity_init); 88 89/** 90 * drm_sched_entity_modify_sched - Modify sched of an entity 91 * @entity: scheduler entity to init 92 * @sched_list: the list of new drm scheds which will replace 93 * existing entity->sched_list 94 * @num_sched_list: number of drm sched in sched_list 95 */ 96void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 97 struct drm_gpu_scheduler **sched_list, 98 unsigned int num_sched_list) 99{ 100 WARN_ON(!num_sched_list || !sched_list); 101 102 entity->sched_list = sched_list; 103 entity->num_sched_list = num_sched_list; 104} 105EXPORT_SYMBOL(drm_sched_entity_modify_sched); 106 107/** 108 * drm_sched_entity_is_idle - Check if entity is idle 109 * 110 * @entity: scheduler entity 111 * 112 * Returns true if the entity does not have any unscheduled jobs. 113 */ 114static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 115{ 116 rmb(); /* for list_empty to work without lock */ 117 118 if (list_empty(&entity->list) || 119 spsc_queue_count(&entity->job_queue) == 0 || 120 entity->stopped) 121 return true; 122 123 return false; 124} 125 126/** 127 * drm_sched_entity_is_ready - Check if entity is ready 128 * 129 * @entity: scheduler entity 130 * 131 * Return true if entity could provide a job. 132 */ 133bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 134{ 135 if (spsc_queue_peek(&entity->job_queue) == NULL) 136 return false; 137 138 if (READ_ONCE(entity->dependency)) 139 return false; 140 141 return true; 142} 143 144/** 145 * drm_sched_entity_flush - Flush a context entity 146 * 147 * @entity: scheduler entity 148 * @timeout: time to wait in for Q to become empty in jiffies. 149 * 150 * Splitting drm_sched_entity_fini() into two functions, The first one does the 151 * waiting, removes the entity from the runqueue and returns an error when the 152 * process was killed. 153 * 154 * Returns the remaining time in jiffies left from the input timeout 155 */ 156long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 157{ 158 struct drm_gpu_scheduler *sched; 159#ifdef __linux__ 160 struct task_struct *last_user; 161#else 162 struct process *last_user, *curpr; 163#endif 164 long ret = timeout; 165 166 if (!entity->rq) 167 return 0; 168 169 sched = entity->rq->sched; 170 /** 171 * The client will not queue more IBs during this fini, consume existing 172 * queued IBs or discard them on SIGKILL 173 */ 174#ifdef __linux__ 175 if (current->flags & PF_EXITING) { 176#else 177 curpr = curproc->p_p; 178 if (curpr->ps_flags & PS_EXITING) { 179#endif 180 if (timeout) 181 ret = wait_event_timeout( 182 sched->job_scheduled, 183 drm_sched_entity_is_idle(entity), 184 timeout); 185 } else { 186 wait_event_killable(sched->job_scheduled, 187 drm_sched_entity_is_idle(entity)); 188 } 189 190 /* For killed process disable any more IBs enqueue right now */ 191#ifdef __linux__ 192 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 193 if ((!last_user || last_user == current->group_leader) && 194 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 195#else 196 last_user = cmpxchg(&entity->last_user, curpr, NULL); 197 if ((!last_user || last_user == curproc->p_p) && 198 (curpr->ps_flags & PS_EXITING) && 199 (curpr->ps_xsig == SIGKILL)) { 200#endif 201 spin_lock(&entity->rq_lock); 202 entity->stopped = true; 203 drm_sched_rq_remove_entity(entity->rq, entity); 204 spin_unlock(&entity->rq_lock); 205 } 206 207 return ret; 208} 209EXPORT_SYMBOL(drm_sched_entity_flush); 210 211/** 212 * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs 213 * 214 * @f: signaled fence 215 * @cb: our callback structure 216 * 217 * Signal the scheduler finished fence when the entity in question is killed. 218 */ 219static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 220 struct dma_fence_cb *cb) 221{ 222 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 223 finish_cb); 224 225 drm_sched_fence_finished(job->s_fence); 226 WARN_ON(job->s_fence->parent); 227 job->sched->ops->free_job(job); 228} 229 230/** 231 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed 232 * 233 * @entity: entity which is cleaned up 234 * 235 * Makes sure that all remaining jobs in an entity are killed before it is 236 * destroyed. 237 */ 238static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 239{ 240 struct drm_sched_job *job; 241 struct dma_fence *f; 242 int r; 243 244 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 245 struct drm_sched_fence *s_fence = job->s_fence; 246 247 /* Wait for all dependencies to avoid data corruptions */ 248 while ((f = job->sched->ops->dependency(job, entity))) 249 dma_fence_wait(f, false); 250 251 drm_sched_fence_scheduled(s_fence); 252 dma_fence_set_error(&s_fence->finished, -ESRCH); 253 254 /* 255 * When pipe is hanged by older entity, new entity might 256 * not even have chance to submit it's first job to HW 257 * and so entity->last_scheduled will remain NULL 258 */ 259 if (!entity->last_scheduled) { 260 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 261 continue; 262 } 263 264 r = dma_fence_add_callback(entity->last_scheduled, 265 &job->finish_cb, 266 drm_sched_entity_kill_jobs_cb); 267 if (r == -ENOENT) 268 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 269 else if (r) 270 DRM_ERROR("fence add callback failed (%d)\n", r); 271 } 272} 273 274/** 275 * drm_sched_entity_fini - Destroy a context entity 276 * 277 * @entity: scheduler entity 278 * 279 * This should be called after @drm_sched_entity_do_release. It goes over the 280 * entity and signals all jobs with an error code if the process was killed. 281 * 282 */ 283void drm_sched_entity_fini(struct drm_sched_entity *entity) 284{ 285 struct drm_gpu_scheduler *sched = NULL; 286 287 if (entity->rq) { 288 sched = entity->rq->sched; 289 drm_sched_rq_remove_entity(entity->rq, entity); 290 } 291 292 /* Consumption of existing IBs wasn't completed. Forcefully 293 * remove them here. 294 */ 295 if (spsc_queue_count(&entity->job_queue)) { 296 if (sched) { 297 /* 298 * Wait for thread to idle to make sure it isn't processing 299 * this entity. 300 */ 301 wait_for_completion(&entity->entity_idle); 302 303 } 304 if (entity->dependency) { 305 dma_fence_remove_callback(entity->dependency, 306 &entity->cb); 307 dma_fence_put(entity->dependency); 308 entity->dependency = NULL; 309 } 310 311 drm_sched_entity_kill_jobs(entity); 312 } 313 314 dma_fence_put(entity->last_scheduled); 315 entity->last_scheduled = NULL; 316} 317EXPORT_SYMBOL(drm_sched_entity_fini); 318 319/** 320 * drm_sched_entity_destroy - Destroy a context entity 321 * 322 * @entity: scheduler entity 323 * 324 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 325 */ 326void drm_sched_entity_destroy(struct drm_sched_entity *entity) 327{ 328 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 329 drm_sched_entity_fini(entity); 330} 331EXPORT_SYMBOL(drm_sched_entity_destroy); 332 333/* 334 * drm_sched_entity_clear_dep - callback to clear the entities dependency 335 */ 336static void drm_sched_entity_clear_dep(struct dma_fence *f, 337 struct dma_fence_cb *cb) 338{ 339 struct drm_sched_entity *entity = 340 container_of(cb, struct drm_sched_entity, cb); 341 342 entity->dependency = NULL; 343 dma_fence_put(f); 344} 345 346/* 347 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 348 * wake up scheduler 349 */ 350static void drm_sched_entity_wakeup(struct dma_fence *f, 351 struct dma_fence_cb *cb) 352{ 353 struct drm_sched_entity *entity = 354 container_of(cb, struct drm_sched_entity, cb); 355 356 drm_sched_entity_clear_dep(f, cb); 357 drm_sched_wakeup(entity->rq->sched); 358} 359 360/** 361 * drm_sched_entity_set_priority - Sets priority of the entity 362 * 363 * @entity: scheduler entity 364 * @priority: scheduler priority 365 * 366 * Update the priority of runqueus used for the entity. 367 */ 368void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 369 enum drm_sched_priority priority) 370{ 371 spin_lock(&entity->rq_lock); 372 entity->priority = priority; 373 spin_unlock(&entity->rq_lock); 374} 375EXPORT_SYMBOL(drm_sched_entity_set_priority); 376 377/** 378 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency 379 * 380 * @entity: entity with dependency 381 * 382 * Add a callback to the current dependency of the entity to wake up the 383 * scheduler when the entity becomes available. 384 */ 385static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 386{ 387 struct drm_gpu_scheduler *sched = entity->rq->sched; 388 struct dma_fence *fence = entity->dependency; 389 struct drm_sched_fence *s_fence; 390 391 if (fence->context == entity->fence_context || 392 fence->context == entity->fence_context + 1) { 393 /* 394 * Fence is a scheduled/finished fence from a job 395 * which belongs to the same entity, we can ignore 396 * fences from ourself 397 */ 398 dma_fence_put(entity->dependency); 399 return false; 400 } 401 402 s_fence = to_drm_sched_fence(fence); 403 if (s_fence && s_fence->sched == sched) { 404 405 /* 406 * Fence is from the same scheduler, only need to wait for 407 * it to be scheduled 408 */ 409 fence = dma_fence_get(&s_fence->scheduled); 410 dma_fence_put(entity->dependency); 411 entity->dependency = fence; 412 if (!dma_fence_add_callback(fence, &entity->cb, 413 drm_sched_entity_clear_dep)) 414 return true; 415 416 /* Ignore it when it is already scheduled */ 417 dma_fence_put(fence); 418 return false; 419 } 420 421 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 422 drm_sched_entity_wakeup)) 423 return true; 424 425 dma_fence_put(entity->dependency); 426 return false; 427} 428 429/** 430 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity 431 * 432 * @entity: entity to get the job from 433 * 434 * Process all dependencies and try to get one job from the entities queue. 435 */ 436struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 437{ 438 struct drm_gpu_scheduler *sched = entity->rq->sched; 439 struct drm_sched_job *sched_job; 440 441 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 442 if (!sched_job) 443 return NULL; 444 445 while ((entity->dependency = 446 sched->ops->dependency(sched_job, entity))) { 447 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 448 449 if (drm_sched_entity_add_dependency_cb(entity)) 450 return NULL; 451 } 452 453 /* skip jobs from entity that marked guilty */ 454 if (entity->guilty && atomic_read(entity->guilty)) 455 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 456 457 dma_fence_put(entity->last_scheduled); 458 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 459 460 spsc_queue_pop(&entity->job_queue); 461 return sched_job; 462} 463 464/** 465 * drm_sched_entity_select_rq - select a new rq for the entity 466 * 467 * @entity: scheduler entity 468 * 469 * Check all prerequisites and select a new rq for the entity for load 470 * balancing. 471 */ 472void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 473{ 474 struct dma_fence *fence; 475 struct drm_gpu_scheduler *sched; 476 struct drm_sched_rq *rq; 477 478 if (spsc_queue_count(&entity->job_queue) || !entity->sched_list) 479 return; 480 481 fence = READ_ONCE(entity->last_scheduled); 482 if (fence && !dma_fence_is_signaled(fence)) 483 return; 484 485 spin_lock(&entity->rq_lock); 486 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 487 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 488 if (rq != entity->rq) { 489 drm_sched_rq_remove_entity(entity->rq, entity); 490 entity->rq = rq; 491 } 492 spin_unlock(&entity->rq_lock); 493 494 if (entity->num_sched_list == 1) 495 entity->sched_list = NULL; 496} 497 498/** 499 * drm_sched_entity_push_job - Submit a job to the entity's job queue 500 * 501 * @sched_job: job to submit 502 * @entity: scheduler entity 503 * 504 * Note: To guarantee that the order of insertion to queue matches 505 * the job's fence sequence number this function should be 506 * called with drm_sched_job_init under common lock. 507 * 508 * Returns 0 for success, negative error code otherwise. 509 */ 510void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 511 struct drm_sched_entity *entity) 512{ 513 bool first; 514 515 trace_drm_sched_job(sched_job, entity); 516 atomic_inc(entity->rq->sched->score); 517#ifdef __linux__ 518 WRITE_ONCE(entity->last_user, current->group_leader); 519#else 520 WRITE_ONCE(entity->last_user, curproc->p_p); 521#endif 522 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 523 524 /* first job wakes up scheduler */ 525 if (first) { 526 /* Add the entity to the run queue */ 527 spin_lock(&entity->rq_lock); 528 if (entity->stopped) { 529 spin_unlock(&entity->rq_lock); 530 531 DRM_ERROR("Trying to push to a killed entity\n"); 532 return; 533 } 534 drm_sched_rq_add_entity(entity->rq, entity); 535 spin_unlock(&entity->rq_lock); 536 drm_sched_wakeup(entity->rq->sched); 537 } 538} 539EXPORT_SYMBOL(drm_sched_entity_push_job); 540