Lines Matching refs:execlists

345 	return max(virtual_prio(&engine->execlists),
409 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
412 container_of(execlists, typeof(*engine), execlists);
517 ce->lrc.ccid |= engine->execlists.ccid;
726 static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
728 if (execlists->ctrl_reg) {
729 writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
730 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
732 writel(upper_32_bits(desc), execlists->submit_reg);
733 writel(lower_32_bits(desc), execlists->submit_reg);
756 trace_ports(const struct intel_engine_execlists *execlists,
761 container_of(execlists, typeof(*engine), execlists);
779 assert_pending_valid(const struct intel_engine_execlists *execlists,
783 container_of(execlists, typeof(*engine), execlists);
788 trace_ports(execlists, msg, execlists->pending);
794 if (!execlists->pending[0]) {
800 if (execlists->pending[execlists_num_ports(execlists)]) {
802 engine->name, execlists_num_ports(execlists));
806 for (port = execlists->pending; (rq = *port); port++) {
817 port - execlists->pending);
826 port - execlists->pending);
845 port - execlists->pending);
856 port != execlists->pending) {
860 port - execlists->pending);
876 port - execlists->pending);
885 port - execlists->pending);
894 port - execlists->pending);
910 struct intel_engine_execlists *execlists = &engine->execlists;
913 GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
931 for (n = execlists_num_ports(execlists); n--; ) {
932 struct i915_request *rq = execlists->pending[n];
934 write_desc(execlists,
940 if (execlists->ctrl_reg)
941 writel(EL_CTRL_LOAD, execlists->ctrl_reg);
1027 struct intel_engine_execlists *el = &engine->execlists;
1165 if (READ_ONCE(engine->execlists.pending[0]))
1181 if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
1192 const struct intel_engine_execlists *el = &engine->execlists;
1210 struct intel_engine_execlists *el = &engine->execlists;
1234 static void record_preemption(struct intel_engine_execlists *execlists)
1236 (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
1246 engine->execlists.preempt_target = rq;
1261 set_timer_ms(&engine->execlists.preempt,
1275 struct intel_engine_execlists * const execlists = &engine->execlists;
1277 struct i915_request **port = execlists->pending;
1278 struct i915_request ** const last_port = port + execlists->port_mask;
1317 active = execlists->active;
1329 record_preemption(execlists);
1351 str_yes_no(timer_expired(&execlists->timer)),
1355 str_yes_no(timeslice_yield(execlists, last)));
1373 cancel_timer(&execlists->timer);
1449 rb_erase_cached(rb, &execlists->virtual);
1603 execlists->pending,
1604 (port - execlists->pending) * sizeof(*port))) {
1606 while (port-- != execlists->pending)
1607 execlists_schedule_in(*port, port - execlists->pending);
1609 WRITE_ONCE(execlists->yield, -1);
1614 while (port-- != execlists->pending)
1616 *execlists->pending = NULL;
1641 cancel_port_requests(struct intel_engine_execlists * const execlists,
1646 for (port = execlists->pending; *port; port++)
1648 clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
1651 for (port = xchg(&execlists->active, execlists->pending); *port; port++)
1653 clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
1656 WRITE_ONCE(execlists->active, execlists->inflight);
1659 GEM_BUG_ON(execlists->pending[0]);
1660 cancel_timer(&execlists->timer);
1661 cancel_timer(&execlists->preempt);
1767 int idx = csb - engine->execlists.csb_status;
1819 struct intel_engine_execlists * const execlists = &engine->execlists;
1820 u64 * const buf = execlists->csb_status;
1821 const u8 num_entries = execlists->csb_size;
1826 * As we modify our execlists state tracking we require exclusive
1843 head = execlists->csb_head;
1844 tail = READ_ONCE(*execlists->csb_write);
1864 execlists->csb_head = tail;
1917 struct i915_request * const *old = execlists->active;
1919 if (GEM_WARN_ON(!*execlists->pending)) {
1920 execlists->error_interrupt |= ERROR_CSB;
1927 WRITE_ONCE(execlists->active, execlists->pending);
1931 trace_ports(execlists, "preempted", old);
1936 GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
1937 copy_ports(execlists->inflight,
1938 execlists->pending,
1939 execlists_num_ports(execlists));
1941 WRITE_ONCE(execlists->active, execlists->inflight);
1946 WRITE_ONCE(execlists->pending[0], NULL);
1948 if (GEM_WARN_ON(!*execlists->active)) {
1949 execlists->error_interrupt |= ERROR_CSB;
1954 trace_ports(execlists, "completed", execlists->active);
1967 !__i915_request_is_complete(*execlists->active)) {
1968 struct i915_request *rq = *execlists->active;
1995 *inactive++ = *execlists->active++;
1997 GEM_BUG_ON(execlists->active - execlists->inflight >
1998 execlists_num_ports(execlists));
2020 if (*prev != *execlists->active) { /* elide lite-restores */
2035 if (*execlists->active)
2036 active_ce = (*execlists->active)->context;
2043 new_timeslice(execlists);
2292 const struct intel_engine_execlists * const el = &engine->execlists;
2356 * Remove the request from the execlists queue, and take ownership
2361 * By removing them from the execlists queue, we also remove the
2415 const struct timer_list *t = &engine->execlists.preempt;
2423 return engine->execlists.pending[0];
2443 const struct i915_request *rq = *engine->execlists.active;
2455 cancel_timer(&engine->execlists.preempt);
2456 if (rq == engine->execlists.preempt_target)
2457 engine->execlists.error_interrupt |= ERROR_PREEMPT;
2459 set_timer_ms(&engine->execlists.preempt,
2463 if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
2467 if (engine->execlists.error_interrupt & GENMASK(15, 0))
2469 else if (engine->execlists.error_interrupt & ERROR_CSB)
2471 else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
2476 engine->execlists.error_interrupt = 0;
2480 if (!engine->execlists.pending[0]) {
2504 WRITE_ONCE(engine->execlists.error_interrupt, eir);
2510 WRITE_ONCE(engine->execlists.yield,
2513 engine->execlists.yield);
2514 if (del_timer(&engine->execlists.timer))
2528 static void __execlists_kick(struct intel_engine_execlists *execlists)
2531 container_of(execlists, typeof(*engine), execlists);
2599 __execlists_kick(&engine->execlists);
2809 struct intel_engine_execlists * const execlists = &engine->execlists;
2810 const unsigned int reset_value = execlists->csb_size - 1;
2831 execlists->csb_head = reset_value;
2832 WRITE_ONCE(*execlists->csb_write, reset_value);
2836 memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
2837 drm_clflush_virt_range(execlists->csb_status,
2838 execlists->csb_size *
2839 sizeof(execlists->csb_status));
2846 GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
2859 GEM_BUG_ON(execlists_active(&engine->execlists));
2892 engine->execlists.error_interrupt = 0;
2977 * to a second via its execlists->tasklet *just* as we are
2979 * Turning off the execlists->tasklet until the reset is over
3007 engine->execlists.reset_ccid = active_ccid(engine);
3013 struct intel_engine_execlists * const execlists = &engine->execlists;
3015 drm_clflush_virt_range(execlists->csb_write,
3016 sizeof(execlists->csb_write[0]));
3038 rq = active_context(engine, engine->execlists.reset_ccid);
3106 struct intel_engine_execlists * const execlists = &engine->execlists;
3115 inactive = cancel_port_requests(execlists, inactive);
3149 struct intel_engine_execlists * const execlists = &engine->execlists;
3201 while ((rb = rb_first_cached(&execlists->virtual))) {
3205 rb_erase_cached(rb, &execlists->virtual);
3237 struct intel_engine_execlists * const execlists = &engine->execlists;
3253 __execlists_kick(execlists);
3273 cancel_timer(&engine->execlists.timer);
3274 cancel_timer(&engine->execlists.preempt);
3341 inflight = execlists_active(&engine->execlists);
3386 del_timer_sync(&engine->execlists.timer);
3387 del_timer_sync(&engine->execlists.preempt);
3404 struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
3421 struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
3542 struct intel_engine_execlists * const execlists = &engine->execlists;
3548 timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
3549 timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
3554 seqcount_init(&engine->stats.execlists.lock);
3562 execlists->submit_reg = intel_uncore_regs(uncore) +
3564 execlists->ctrl_reg = intel_uncore_regs(uncore) +
3571 execlists->submit_reg = intel_uncore_regs(uncore) +
3575 execlists->csb_status =
3578 execlists->csb_write =
3582 execlists->csb_size = GEN8_CSB_ENTRIES;
3584 execlists->csb_size = GEN11_CSB_ENTRIES;
3589 execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
3590 execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
3650 rb_erase_cached(node, &sibling->execlists.virtual);
3843 &sibling->execlists.virtual);
3855 first = rb_first_cached(&sibling->execlists.virtual) ==
3860 rb_erase_cached(&node->rb, &sibling->execlists.virtual);
3865 parent = &sibling->execlists.virtual.rb_root.rb_node;
3881 &sibling->execlists.virtual,
4016 * the execlists backend -- we push out request directly
4088 const struct intel_engine_execlists *execlists = &engine->execlists;
4141 for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {