Lines Matching defs:ve

465 	/* We've switched away, so this should be a no-op, but intent matters */
547 resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
554 WRITE_ONCE(rq->engine, &ve->base);
555 ve->base.submit_request(rq);
562 struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
583 resubmit_virtual_request(rq, ve);
585 if (READ_ONCE(ve->request))
586 tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
996 static bool virtual_matches(const struct virtual_engine *ve,
1017 inflight = intel_context_inflight(&ve->context);
1031 struct virtual_engine *ve =
1032 rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
1033 struct i915_request *rq = READ_ONCE(ve->request);
1036 if (!rq || !virtual_matches(ve, rq, engine)) {
1043 return ve;
1049 static void virtual_xfer_context(struct virtual_engine *ve,
1054 if (likely(engine == ve->siblings[0]))
1057 GEM_BUG_ON(READ_ONCE(ve->context.inflight));
1059 lrc_update_offsets(&ve->context, engine);
1067 for (n = 1; n < ve->num_siblings; n++) {
1068 if (ve->siblings[n] == engine) {
1069 swap(ve->siblings[n], ve->siblings[0]);
1280 struct virtual_engine *ve;
1413 while ((ve = first_virtual_engine(engine))) {
1416 spin_lock(&ve->base.sched_engine->lock);
1418 rq = ve->request;
1419 if (unlikely(!virtual_matches(ve, rq, engine)))
1422 GEM_BUG_ON(rq->engine != &ve->base);
1423 GEM_BUG_ON(rq->context != &ve->context);
1426 spin_unlock(&ve->base.sched_engine->lock);
1431 spin_unlock(&ve->base.sched_engine->lock);
1443 str_yes_no(engine != ve->siblings[0]));
1445 WRITE_ONCE(ve->request, NULL);
1446 WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN);
1448 rb = &ve->nodes[engine->id].rb;
1465 * ve->siblings[] on an idle context, where
1466 * we may be using ve->siblings[] in
1469 virtual_xfer_context(ve, engine);
1470 GEM_BUG_ON(ve->siblings[0] != engine);
1478 spin_unlock(&ve->base.sched_engine->lock);
3202 struct virtual_engine *ve =
3203 rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
3208 spin_lock(&ve->base.sched_engine->lock);
3209 rq = fetch_and_zero(&ve->request);
3218 ve->base.sched_engine->queue_priority_hint = INT_MIN;
3220 spin_unlock(&ve->base.sched_engine->lock);
3600 static struct list_head *virtual_queue(struct virtual_engine *ve)
3602 return &ve->base.sched_engine->default_priolist.requests;
3607 struct virtual_engine *ve =
3608 container_of(wrk, typeof(*ve), rcu.work);
3611 GEM_BUG_ON(ve->context.inflight);
3614 if (unlikely(ve->request)) {
3617 spin_lock_irq(&ve->base.sched_engine->lock);
3619 old = fetch_and_zero(&ve->request);
3626 spin_unlock_irq(&ve->base.sched_engine->lock);
3636 tasklet_kill(&ve->base.sched_engine->tasklet);
3639 for (n = 0; n < ve->num_siblings; n++) {
3640 struct intel_engine_cs *sibling = ve->siblings[n];
3641 struct rb_node *node = &ve->nodes[sibling->id].rb;
3654 GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
3655 GEM_BUG_ON(!list_empty(virtual_queue(ve)));
3657 lrc_fini(&ve->context);
3658 intel_context_fini(&ve->context);
3660 if (ve->base.breadcrumbs)
3661 intel_breadcrumbs_put(ve->base.breadcrumbs);
3662 if (ve->base.sched_engine)
3663 i915_sched_engine_put(ve->base.sched_engine);
3664 intel_engine_free_request_pool(&ve->base);
3666 kfree(ve);
3671 struct virtual_engine *ve =
3672 container_of(kref, typeof(*ve), context.ref);
3674 GEM_BUG_ON(!list_empty(&ve->context.signals));
3686 INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
3687 queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu);
3690 static void virtual_engine_initial_hint(struct virtual_engine *ve)
3707 swp = get_random_u32_below(ve->num_siblings);
3709 swap(ve->siblings[swp], ve->siblings[0]);
3714 struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
3716 return lrc_alloc(ce, ve->siblings[0]);
3723 struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
3726 return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
3731 struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
3733 return lrc_pin(ce, ve->siblings[0], vaddr);
3738 struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
3741 for (n = 0; n < ve->num_siblings; n++)
3742 intel_engine_pm_get(ve->siblings[n]);
3749 struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
3754 for (n = 0; n < ve->num_siblings; n++)
3755 intel_engine_pm_put(ve->siblings[n]);
3761 struct virtual_engine *ve = to_virtual_engine(engine);
3763 if (sibling >= ve->num_siblings)
3766 return ve->siblings[sibling];
3789 static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
3794 rq = READ_ONCE(ve->request);
3803 mask = ve->siblings[0]->mask;
3806 ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
3808 mask, ve->base.sched_engine->queue_priority_hint);
3817 struct virtual_engine * const ve =
3824 mask = virtual_submission_mask(ve);
3829 for (n = 0; n < ve->num_siblings; n++) {
3830 struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
3831 struct ve_node * const node = &ve->nodes[sibling->id];
3835 if (!READ_ONCE(ve->request))
3893 if (intel_context_inflight(&ve->context))
3900 struct virtual_engine *ve = to_virtual_engine(rq->engine);
3903 ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
3907 GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
3909 spin_lock_irqsave(&ve->base.sched_engine->lock, flags);
3917 if (ve->request) { /* background completion from preempt-to-busy */
3918 GEM_BUG_ON(!__i915_request_is_complete(ve->request));
3919 __i915_request_submit(ve->request);
3920 i915_request_put(ve->request);
3923 ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
3924 ve->request = i915_request_get(rq);
3926 GEM_BUG_ON(!list_empty(virtual_queue(ve)));
3927 list_move_tail(&rq->sched.link, virtual_queue(ve));
3929 tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
3932 spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
3940 struct virtual_engine *ve;
3944 ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
3945 if (!ve)
3948 ve->base.i915 = i915;
3949 ve->base.gt = siblings[0]->gt;
3950 ve->base.uncore = siblings[0]->uncore;
3951 ve->base.id = -1;
3953 ve->base.class = OTHER_CLASS;
3954 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
3955 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
3956 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
3971 ve->base.saturated = ALL_ENGINES;
3973 snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
3975 intel_engine_init_execlists(&ve->base);
3977 ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
3978 if (!ve->base.sched_engine) {
3982 ve->base.sched_engine->private_data = &ve->base;
3984 ve->base.cops = &virtual_context_ops;
3985 ve->base.request_alloc = execlists_request_alloc;
3987 ve->base.sched_engine->schedule = i915_schedule;
3988 ve->base.sched_engine->kick_backend = kick_execlists;
3989 ve->base.submit_request = virtual_submit_request;
3991 INIT_LIST_HEAD(virtual_queue(ve));
3992 tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
3994 intel_context_init(&ve->context, &ve->base);
3996 ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
3997 if (!ve->base.breadcrumbs) {
4006 if (sibling->mask & ve->base.mask) {
4027 GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
4028 RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
4030 ve->siblings[ve->num_siblings++] = sibling;
4031 ve->base.mask |= sibling->mask;
4032 ve->base.logical_mask |= sibling->logical_mask;
4041 if (ve->base.class != OTHER_CLASS) {
4042 if (ve->base.class != sibling->class) {
4045 sibling->class, ve->base.class);
4052 ve->base.class = sibling->class;
4053 ve->base.uabi_class = sibling->uabi_class;
4054 snprintf(ve->base.name, sizeof(ve->base.name),
4055 "v%dx%d", ve->base.class, count);
4056 ve->base.context_size = sibling->context_size;
4058 ve->base.add_active_request = sibling->add_active_request;
4059 ve->base.remove_active_request = sibling->remove_active_request;
4060 ve->base.emit_bb_start = sibling->emit_bb_start;
4061 ve->base.emit_flush = sibling->emit_flush;
4062 ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
4063 ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
4064 ve->base.emit_fini_breadcrumb_dw =
4067 ve->base.flags = sibling->flags;
4070 ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
4072 virtual_engine_initial_hint(ve);
4073 return &ve->context;
4076 intel_context_put(&ve->context);
4142 struct virtual_engine *ve =
4143 rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
4144 struct i915_request *rq = READ_ONCE(ve->request);