Lines Matching refs:sched

76 #include <linux/sched.h>
79 #include <uapi/linux/sched/types.h>
102 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
106 drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
107 atomic_read(&sched->credit_count),
115 * @sched: scheduler instance
121 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
130 if (sched->ops->update_job_credits) {
131 s_job->credits = sched->ops->update_job_credits(s_job);
133 drm_WARN(sched, !s_job->credits,
140 if (drm_WARN(sched, s_job->credits > sched->credit_limit,
142 s_job->credits = sched->credit_limit;
144 return drm_sched_available_credits(sched) >= s_job->credits;
190 * @sched: scheduler instance to associate with this run queue
195 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
202 rq->sched = sched;
221 atomic_inc(rq->sched->score);
243 atomic_dec(rq->sched->score);
258 * @sched: the gpu scheduler
268 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
282 if (!drm_sched_can_queue(sched, entity)) {
300 if (!drm_sched_can_queue(sched, entity)) {
323 * @sched: the gpu scheduler
333 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
347 if (!drm_sched_can_queue(sched, entity)) {
364 * @sched: scheduler instance
366 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
368 if (!READ_ONCE(sched->pause_submit))
369 queue_work(sched->submit_wq, &sched->work_run_job);
374 * @sched: scheduler instance
376 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
378 if (!READ_ONCE(sched->pause_submit))
379 queue_work(sched->submit_wq, &sched->work_free_job);
384 * @sched: scheduler instance
386 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
390 spin_lock(&sched->job_list_lock);
391 job = list_first_entry_or_null(&sched->pending_list,
394 __drm_sched_run_free_queue(sched);
395 spin_unlock(&sched->job_list_lock);
407 struct drm_gpu_scheduler *sched = s_fence->sched;
409 atomic_sub(s_job->credits, &sched->credit_count);
410 atomic_dec(sched->score);
417 __drm_sched_run_free_queue(sched);
435 * @sched: scheduler instance to start the worker for
439 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
441 lockdep_assert_held(&sched->job_list_lock);
443 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
444 !list_empty(&sched->pending_list))
445 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
448 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
450 spin_lock(&sched->job_list_lock);
451 drm_sched_start_timeout(sched);
452 spin_unlock(&sched->job_list_lock);
458 * @sched: scheduler for which the timeout handling should be started.
462 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
464 spin_lock(&sched->job_list_lock);
465 sched->timeout = 0;
466 drm_sched_start_timeout(sched);
467 spin_unlock(&sched->job_list_lock);
474 * @sched: scheduler where the timeout handling should be started.
478 void drm_sched_fault(struct drm_gpu_scheduler *sched)
480 if (sched->timeout_wq)
481 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
488 * @sched: scheduler instance for which to suspend the timeout
497 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
501 sched_timeout = sched->work_tdr.timer.expires;
507 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
511 return sched->timeout;
518 * @sched: scheduler instance for which to resume the timeout
523 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
526 spin_lock(&sched->job_list_lock);
528 if (list_empty(&sched->pending_list))
529 cancel_delayed_work(&sched->work_tdr);
531 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
533 spin_unlock(&sched->job_list_lock);
539 struct drm_gpu_scheduler *sched = s_job->sched;
541 spin_lock(&sched->job_list_lock);
542 list_add_tail(&s_job->list, &sched->pending_list);
543 drm_sched_start_timeout(sched);
544 spin_unlock(&sched->job_list_lock);
549 struct drm_gpu_scheduler *sched;
553 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
556 spin_lock(&sched->job_list_lock);
557 job = list_first_entry_or_null(&sched->pending_list,
563 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
567 spin_unlock(&sched->job_list_lock);
569 status = job->sched->ops->timedout_job(job);
575 if (sched->free_guilty) {
576 job->sched->ops->free_job(job);
577 sched->free_guilty = false;
580 spin_unlock(&sched->job_list_lock);
584 drm_sched_start_timeout_unlocked(sched);
590 * @sched: scheduler instance
599 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
603 drm_sched_wqueue_stop(sched);
612 if (bad && bad->sched == sched)
617 list_add(&bad->list, &sched->pending_list);
623 * This iteration is thread safe as sched thread is stopped.
625 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
632 atomic_sub(s_job->credits, &sched->credit_count);
638 spin_lock(&sched->job_list_lock);
640 spin_unlock(&sched->job_list_lock);
656 sched->ops->free_job(s_job);
658 sched->free_guilty = true;
668 cancel_delayed_work(&sched->work_tdr);
676 * @sched: scheduler instance
677 * @full_recovery: proceed with complete sched restart
680 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
686 * Locking the list is not required here as the sched thread is parked
690 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
693 atomic_add(s_job->credits, &sched->credit_count);
704 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
711 drm_sched_start_timeout_unlocked(sched);
713 drm_sched_wqueue_start(sched);
720 * @sched: scheduler instance
734 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
741 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
744 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
752 fence = sched->ops->run_job(s_job);
800 drm_err(job->sched, "%s: entity has no rq!\n", __func__);
838 struct drm_gpu_scheduler *sched;
843 sched = entity->rq->sched;
845 job->sched = sched;
847 job->id = atomic64_inc_return(&sched->job_id_count);
1024 * @sched: scheduler instance
1029 void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
1032 if (drm_sched_can_queue(sched, entity))
1033 drm_sched_run_job_queue(sched);
1039 * @sched: scheduler instance
1048 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1055 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1057 drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1058 drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1069 * @sched: scheduler instance
1075 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1079 spin_lock(&sched->job_list_lock);
1081 job = list_first_entry_or_null(&sched->pending_list,
1089 cancel_delayed_work(&sched->work_tdr);
1091 next = list_first_entry_or_null(&sched->pending_list,
1100 drm_sched_start_timeout(sched);
1106 spin_unlock(&sched->job_list_lock);
1112 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1116 * Returns pointer of the sched with the least load or NULL if none of the
1123 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1128 sched = sched_list[i];
1130 if (!sched->ready) {
1132 sched->name);
1136 num_score = atomic_read(sched->score);
1139 picked_sched = sched;
1154 struct drm_gpu_scheduler *sched =
1158 if (READ_ONCE(sched->pause_submit))
1161 job = drm_sched_get_finished_job(sched);
1163 sched->ops->free_job(job);
1165 drm_sched_run_free_queue(sched);
1166 drm_sched_run_job_queue(sched);
1176 struct drm_gpu_scheduler *sched =
1184 if (READ_ONCE(sched->pause_submit))
1188 entity = drm_sched_select_entity(sched);
1195 drm_sched_run_job_queue(sched);
1201 atomic_add(sched_job->credits, &sched->credit_count);
1205 fence = sched->ops->run_job(sched_job);
1218 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1224 wake_up(&sched->job_scheduled);
1225 drm_sched_run_job_queue(sched);
1231 * @sched: scheduler instance
1247 int drm_sched_init(struct drm_gpu_scheduler *sched,
1256 sched->ops = ops;
1257 sched->credit_limit = credit_limit;
1258 sched->name = name;
1259 sched->timeout = timeout;
1260 sched->timeout_wq = timeout_wq ? : system_wq;
1261 sched->hang_limit = hang_limit;
1262 sched->score = score ? score : &sched->_score;
1263 sched->dev = dev;
1268 drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1271 } else if (sched->sched_rq) {
1276 drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
1281 sched->submit_wq = submit_wq;
1282 sched->own_submit_wq = false;
1284 sched->submit_wq = alloc_ordered_workqueue(name, 0);
1285 if (!sched->submit_wq)
1288 sched->own_submit_wq = true;
1291 sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
1293 if (!sched->sched_rq)
1295 sched->num_rqs = num_rqs;
1296 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1297 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1298 if (!sched->sched_rq[i])
1300 drm_sched_rq_init(sched, sched->sched_rq[i]);
1303 init_waitqueue_head(&sched->job_scheduled);
1304 INIT_LIST_HEAD(&sched->pending_list);
1305 spin_lock_init(&sched->job_list_lock);
1306 atomic_set(&sched->credit_count, 0);
1307 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1308 INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1309 INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1310 atomic_set(&sched->_score, 0);
1311 atomic64_set(&sched->job_id_count, 0);
1312 sched->pause_submit = false;
1314 sched->ready = true;
1318 kfree(sched->sched_rq[i]);
1320 kfree(sched->sched_rq);
1321 sched->sched_rq = NULL;
1323 if (sched->own_submit_wq)
1324 destroy_workqueue(sched->submit_wq);
1325 drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1333 * @sched: scheduler instance
1337 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1342 drm_sched_wqueue_stop(sched);
1344 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1345 struct drm_sched_rq *rq = sched->sched_rq[i];
1356 kfree(sched->sched_rq[i]);
1360 wake_up_all(&sched->job_scheduled);
1363 cancel_delayed_work_sync(&sched->work_tdr);
1365 if (sched->own_submit_wq)
1366 destroy_workqueue(sched->submit_wq);
1367 sched->ready = false;
1368 kfree(sched->sched_rq);
1369 sched->sched_rq = NULL;
1379 * limit of the scheduler then the respective sched entity is marked guilty and
1387 struct drm_gpu_scheduler *sched = bad->sched;
1396 for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1397 struct drm_sched_rq *rq = sched->sched_rq[i];
1419 * @sched: scheduler instance
1423 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1425 return sched->ready;
1432 * @sched: scheduler instance
1434 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1436 WRITE_ONCE(sched->pause_submit, true);
1437 cancel_work_sync(&sched->work_run_job);
1438 cancel_work_sync(&sched->work_free_job);
1445 * @sched: scheduler instance
1447 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1449 WRITE_ONCE(sched->pause_submit, false);
1450 queue_work(sched->submit_wq, &sched->work_run_job);
1451 queue_work(sched->submit_wq, &sched->work_free_job);