Lines Matching defs:sched_engine

102  * sched_engine->lock
104 * engine (sched_engine), thus only one of the contexts which share a
105 * sched_engine can be submitting at a time. Currently only one sched_engine is
121 * sched_engine->lock -> ce->guc_state.lock
705 lockdep_assert_held(&rq->engine->sched_engine->lock);
929 struct i915_sched_engine * const sched_engine = guc->sched_engine;
935 lockdep_assert_held(&sched_engine->lock);
953 while ((rb = rb_first_cached(&sched_engine->queue))) {
984 rb_erase_cached(&p->node, &sched_engine->queue);
1038 sched_engine->tasklet.callback = NULL;
1039 tasklet_disable_nosync(&sched_engine->tasklet);
1043 tasklet_schedule(&sched_engine->tasklet);
1049 struct i915_sched_engine *sched_engine =
1050 from_tasklet(sched_engine, t, tasklet);
1054 spin_lock_irqsave(&sched_engine->lock, flags);
1057 loop = guc_dequeue_one_context(sched_engine->private_data);
1060 i915_sched_engine_reset_on_empty(sched_engine);
1062 spin_unlock_irqrestore(&sched_engine->lock, flags);
1623 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1625 return unlikely(!sched_engine ||
1626 !__tasklet_is_enabled(&sched_engine->tasklet) ||
1632 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1634 if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1636 __tasklet_disable_sync_once(&sched_engine->tasklet);
1637 sched_engine->tasklet.callback = NULL;
1643 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1646 spin_lock_irqsave(&guc->sched_engine->lock, flags);
1647 sched_engine->tasklet.callback = guc_submission_tasklet;
1649 if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1650 __tasklet_enable(&sched_engine->tasklet)) {
1654 tasklet_hi_schedule(&sched_engine->tasklet);
1656 spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1661 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1664 spin_lock_irqsave(&sched_engine->lock, flags);
1665 spin_unlock_irqrestore(&sched_engine->lock, flags);
1773 struct i915_sched_engine * const sched_engine =
1774 ce->engine->sched_engine;
1777 spin_lock_irqsave(&sched_engine->lock, flags);
1792 pl = i915_sched_lookup_priolist(sched_engine, prio);
1794 GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1800 spin_unlock_irqrestore(&sched_engine->lock, flags);
1907 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1912 spin_lock_irqsave(&sched_engine->lock, flags);
1917 spin_unlock_irqrestore(&sched_engine->lock, flags);
1921 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1928 if (!sched_engine)
1945 spin_lock_irqsave(&sched_engine->lock, flags);
1948 while ((rb = rb_first_cached(&sched_engine->queue))) {
1959 rb_erase_cached(&p->node, &sched_engine->queue);
1965 sched_engine->queue_priority_hint = INT_MIN;
1966 sched_engine->queue = RB_ROOT_CACHED;
1968 spin_unlock_irqrestore(&sched_engine->lock, flags);
1994 guc_cancel_sched_engine_requests(guc->sched_engine);
2136 i915_sched_engine_put(guc->sched_engine);
2142 static inline void queue_request(struct i915_sched_engine *sched_engine,
2148 i915_sched_lookup_priolist(sched_engine, prio));
2150 tasklet_hi_schedule(&sched_engine->tasklet);
2181 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
2185 !i915_sched_engine_is_empty(sched_engine) ||
2191 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
2196 spin_lock_irqsave(&sched_engine->lock, flags);
2199 queue_request(sched_engine, rq, rq_prio(rq));
2201 tasklet_hi_schedule(&sched_engine->tasklet);
2203 spin_unlock_irqrestore(&sched_engine->lock, flags);
3738 might_lock(&rq->engine->sched_engine->lock);
3752 * Use an IRQ to ensure locking order of sched_engine->lock ->
4362 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
4364 return !sched_engine->tasklet.callback;
4469 engine->sched_engine->schedule = i915_schedule;
4535 struct i915_sched_engine *sched_engine =
4536 container_of(kref, typeof(*sched_engine), ref);
4537 struct intel_guc *guc = sched_engine->private_data;
4539 guc->sched_engine = NULL;
4540 tasklet_kill(&sched_engine->tasklet); /* flush the callback */
4541 kfree(sched_engine);
4555 if (!guc->sched_engine) {
4556 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
4557 if (!guc->sched_engine)
4560 guc->sched_engine->schedule = i915_schedule;
4561 guc->sched_engine->disabled = guc_sched_engine_disabled;
4562 guc->sched_engine->private_data = guc;
4563 guc->sched_engine->destroy = guc_sched_engine_destroy;
4564 guc->sched_engine->bump_inflight_request_prio =
4566 guc->sched_engine->retire_inflight_request_prio =
4568 tasklet_setup(&guc->sched_engine->tasklet,
4571 i915_sched_engine_put(engine->sched_engine);
4572 engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
5137 struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
5140 tasklet_hi_schedule(&sched_engine->tasklet);
5410 struct i915_sched_engine *sched_engine = guc->sched_engine;
5414 if (!sched_engine)
5423 atomic_read(&sched_engine->tasklet.count));
5425 spin_lock_irqsave(&sched_engine->lock, flags);
5427 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
5436 spin_unlock_irqrestore(&sched_engine->lock, flags);
5834 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);