Lines Matching defs:engine

260  * intel_engine_context_size() - return the size of the context for an engine
262 * @class: engine class
264 * Each engine class may require a different amount of space for a context
267 * Return: size (in bytes) of an engine class specific context image
358 static void __sprint_engine_name(struct intel_engine_cs *engine)
361 * Before we know what the uABI name for this engine will be,
362 * we still would like to keep track of this engine in the debug logs.
365 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
366 intel_engine_class_repr(engine->class),
367 engine->instance) >= sizeof(engine->name));
370 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
374 * per-engine HWSTAM until gen6.
376 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
379 if (GRAPHICS_VER(engine->i915) >= 3)
380 ENGINE_WRITE(engine, RING_HWSTAM, mask);
382 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
385 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
388 intel_engine_set_hwsp_writemask(engine, ~0u);
391 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
454 struct intel_engine_cs *engine;
462 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
474 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
475 if (!engine)
478 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
480 INIT_LIST_HEAD(&engine->pinned_contexts_list);
481 engine->id = id;
482 engine->legacy_idx = INVALID_ENGINE;
483 engine->mask = BIT(id);
484 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915),
486 engine->i915 = i915;
487 engine->gt = gt;
488 engine->uncore = gt->uncore;
490 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
491 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
493 engine->irq_handler = nop_irq_handler;
495 engine->class = info->class;
496 engine->instance = info->instance;
497 engine->logical_mask = BIT(logical_instance);
498 __sprint_engine_name(engine);
500 if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
501 __ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance)
502 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
505 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
506 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
507 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
510 engine->props.heartbeat_interval_ms =
512 engine->props.max_busywait_duration_ns =
514 engine->props.preempt_timeout_ms =
516 engine->props.stop_timeout_ms =
518 engine->props.timeslice_duration_ms =
527 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
528 engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE;
533 u64 clamp = intel_clamp_##field(engine, engine->props.field); \
534 if (clamp != engine->props.field) { \
535 drm_notice(&engine->i915->drm, \
538 engine->props.field = clamp; \
550 engine->defaults = engine->props; /* never to change again */
552 engine->context_size = intel_engine_context_size(gt, engine->class);
553 if (WARN_ON(engine->context_size > BIT(20)))
554 engine->context_size = 0;
555 if (engine->context_size)
558 ewma__engine_latency_init(&engine->latency);
560 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
563 intel_engine_sanitize_mmio(engine);
565 gt->engine_class[info->class][info->instance] = engine;
566 gt->engine[id] = engine;
571 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
578 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
585 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
591 if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
599 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
606 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
612 if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
620 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
622 struct drm_i915_private *i915 = engine->i915;
624 if (engine->class == VIDEO_DECODE_CLASS) {
626 * HEVC support is present on first engine instance
630 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
631 engine->uabi_capabilities |=
635 * SFC block is present only on even logical engine
639 (engine->gt->info.vdbox_sfc_access &
640 BIT(engine->instance))) ||
641 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
642 engine->uabi_capabilities |=
644 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
646 engine->gt->info.sfc_mask & BIT(engine->instance))
647 engine->uabi_capabilities |=
654 struct intel_engine_cs *engine;
657 for_each_engine(engine, gt, id)
658 __setup_engine_capabilities(engine);
667 struct intel_engine_cs *engine;
671 * Before we release the resources held by engine, we must be certain
684 for_each_engine(engine, gt, id) {
685 if (!engine->release)
688 intel_wakeref_wait_for_idle(&engine->wakeref);
689 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
691 engine->release(engine);
692 engine->release = NULL;
694 memset(&engine->reset, 0, sizeof(engine->reset));
698 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
700 if (!engine->request_pool)
703 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
708 struct intel_engine_cs *engine;
714 for_each_engine(engine, gt, id) {
715 intel_engine_free_request_pool(engine);
716 kfree(engine);
717 gt->engine[id] = NULL;
833 * engine is not available for use.
844 * the blitter forcewake domain to read the engine fuses, but at the same time
847 * domains based on the full engine mask in the platform capabilities before
879 * All the workload submitted to the first engine will be shared among
888 /* Mask off all the CCS engine */
890 /* Put back in the first CCS engine */
1004 void intel_engine_init_execlists(struct intel_engine_cs *engine)
1006 struct intel_engine_execlists * const execlists = &engine->execlists;
1017 static void cleanup_status_page(struct intel_engine_cs *engine)
1022 intel_engine_set_hwsp_writemask(engine, ~0u);
1024 vma = fetch_and_zero(&engine->status_page.vma);
1028 if (!HWS_NEEDS_PHYSICAL(engine->i915))
1035 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
1041 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
1060 static int init_status_page(struct intel_engine_cs *engine)
1068 INIT_LIST_HEAD(&engine->status_page.timelines);
1077 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1079 gt_err(engine->gt, "Failed to allocate status page\n");
1085 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1094 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
1095 ret = pin_ggtt_status_page(engine, &ww, vma);
1105 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
1106 engine->status_page.vma = vma;
1124 static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
1151 struct drm_i915_private *i915 = engine->i915;
1152 const unsigned int instance = engine->instance;
1153 const unsigned int class = engine->class;
1166 * respective engine registers were moved to masked type. Then after the
1170 if (engine->gt->type == GT_MEDIA) {
1194 if (gt_WARN_ONCE(engine->gt, !num,
1198 if (gt_WARN_ON_ONCE(engine->gt,
1222 engine->tlb_inv.mcr = regs == xehp_regs;
1223 engine->tlb_inv.reg = reg;
1224 engine->tlb_inv.done = val;
1227 (engine->class == VIDEO_DECODE_CLASS ||
1228 engine->class == VIDEO_ENHANCEMENT_CLASS ||
1229 engine->class == COMPUTE_CLASS ||
1230 engine->class == OTHER_CLASS))
1231 engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
1233 engine->tlb_inv.request = val;
1238 static int engine_setup_common(struct intel_engine_cs *engine)
1242 init_llist_head(&engine->barrier_tasks);
1244 err = intel_engine_init_tlb_invalidation(engine);
1248 err = init_status_page(engine);
1252 engine->breadcrumbs = intel_breadcrumbs_create(engine);
1253 if (!engine->breadcrumbs) {
1258 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
1259 if (!engine->sched_engine) {
1263 engine->sched_engine->private_data = engine;
1265 err = intel_engine_init_cmd_parser(engine);
1269 intel_engine_init_execlists(engine);
1270 intel_engine_init__pm(engine);
1271 intel_engine_init_retire(engine);
1274 engine->sseu =
1275 intel_sseu_from_device_info(&engine->gt->info.sseu);
1277 intel_engine_init_workarounds(engine);
1278 intel_engine_init_whitelist(engine);
1279 intel_engine_init_ctx_wa(engine);
1281 if (GRAPHICS_VER(engine->i915) >= 12)
1282 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
1287 i915_sched_engine_put(engine->sched_engine);
1289 intel_breadcrumbs_put(engine->breadcrumbs);
1291 cleanup_status_page(engine);
1303 struct intel_engine_cs *engine = ce->engine;
1307 GEM_BUG_ON(!engine->gt->scratch);
1313 frame->rq.i915 = engine->i915;
1314 frame->rq.engine = engine;
1328 spin_lock_irq(&engine->sched_engine->lock);
1330 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
1332 spin_unlock_irq(&engine->sched_engine->lock);
1342 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
1352 ce = intel_context_create(engine);
1370 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
1385 struct intel_engine_cs *engine = ce->engine;
1386 struct i915_vma *hwsp = engine->status_page.vma;
1400 create_ggtt_bind_context(struct intel_engine_cs *engine)
1408 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
1414 create_kernel_context(struct intel_engine_cs *engine)
1418 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
1424 * engine_init_common - initialize engine state which might require hw access
1425 * @engine: Engine to initialize.
1427 * Initializes @engine@ structure members shared between legacy and execlists
1430 * Typcally done at later stages of submission mode specific engine setup.
1434 static int engine_init_common(struct intel_engine_cs *engine)
1439 engine->set_default_submission(engine);
1449 ce = create_kernel_context(engine);
1453 * Create a separate pinned context for GGTT update with blitter engine
1455 * engines as well but BCS should be less busy engine so pick that for
1458 if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) {
1459 bce = create_ggtt_bind_context(engine);
1470 engine->emit_fini_breadcrumb_dw = ret;
1471 engine->kernel_context = ce;
1472 engine->bind_context = bce;
1486 int (*setup)(struct intel_engine_cs *engine);
1487 struct intel_engine_cs *engine;
1502 for_each_engine(engine, gt, id) {
1503 err = engine_setup_common(engine);
1507 err = setup(engine);
1509 intel_engine_cleanup_common(engine);
1514 GEM_BUG_ON(engine->release == NULL);
1516 err = engine_init_common(engine);
1520 intel_engine_add_user(engine);
1527 * intel_engine_cleanup_common - cleans up the engine state created by
1529 * @engine: Engine to cleanup.
1533 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1535 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1537 i915_sched_engine_put(engine->sched_engine);
1538 intel_breadcrumbs_put(engine->breadcrumbs);
1540 intel_engine_fini_retire(engine);
1541 intel_engine_cleanup_cmd_parser(engine);
1543 if (engine->default_state)
1544 fput(engine->default_state);
1546 if (engine->kernel_context)
1547 intel_engine_destroy_pinned_context(engine->kernel_context);
1549 if (engine->bind_context)
1550 intel_engine_destroy_pinned_context(engine->bind_context);
1553 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1554 cleanup_status_page(engine);
1556 intel_wa_list_free(&engine->ctx_wa_list);
1557 intel_wa_list_free(&engine->wa_list);
1558 intel_wa_list_free(&engine->whitelist);
1562 * intel_engine_resume - re-initializes the HW state of the engine
1563 * @engine: Engine to resume.
1567 int intel_engine_resume(struct intel_engine_cs *engine)
1569 intel_engine_apply_workarounds(engine);
1570 intel_engine_apply_whitelist(engine);
1572 return engine->resume(engine);
1575 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1577 struct drm_i915_private *i915 = engine->i915;
1582 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1584 acthd = ENGINE_READ(engine, RING_ACTHD);
1586 acthd = ENGINE_READ(engine, ACTHD);
1591 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1595 if (GRAPHICS_VER(engine->i915) >= 8)
1596 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1598 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1603 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1610 * the engine to quiesce. We've stopped submission to the engine, and
1612 * leave the engine idle. So they should not be caught unaware by
1615 return READ_ONCE(engine->props.stop_timeout_ms);
1618 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1622 struct intel_uncore *uncore = engine->uncore;
1623 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1632 if (intel_engine_reset_needs_wa_22011802037(engine->gt))
1633 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
1636 err = __intel_wait_for_register_fw(engine->uncore, mode,
1647 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1651 if (GRAPHICS_VER(engine->i915) < 3)
1654 ENGINE_TRACE(engine, "\n");
1667 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1668 ENGINE_TRACE(engine,
1670 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1671 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1678 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1679 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1686 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1688 ENGINE_TRACE(engine, "\n");
1690 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1693 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
1717 if (!_reg[engine->id].reg)
1720 val = intel_uncore_read(engine->uncore, _reg[engine->id]);
1754 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
1756 u32 fw_pending = __cs_pending_mi_force_wakes(engine);
1759 __gpm_wait_for_fw_complete(engine->gt, fw_pending);
1763 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1766 struct drm_i915_private *i915 = engine->i915;
1767 struct intel_uncore *uncore = engine->uncore;
1768 u32 mmio_base = engine->mmio_base;
1779 if (engine->id != RCS0)
1791 for_each_ss_steering(iter, engine->gt, slice, subslice) {
1793 intel_gt_mcr_read(engine->gt,
1797 intel_gt_mcr_read(engine->gt,
1803 for_each_ss_steering(iter, engine->gt, slice, subslice)
1805 intel_gt_mcr_read(engine->gt,
1813 if (engine->id != RCS0)
1825 if (engine->id == RCS0)
1834 static bool ring_is_idle(struct intel_engine_cs *engine)
1838 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1841 if (!intel_engine_pm_get_if_awake(engine))
1845 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1846 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1850 if (GRAPHICS_VER(engine->i915) > 2 &&
1851 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1854 intel_engine_pm_put(engine);
1859 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1861 struct tasklet_struct *t = &engine->sched_engine->tasklet;
1881 * intel_engine_is_idle() - Report if the engine has finished process all work
1882 * @engine: the intel_engine_cs
1885 * to hardware, and that the engine is idle.
1887 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1890 if (intel_gt_is_wedged(engine->gt))
1893 if (!intel_engine_pm_is_awake(engine))
1897 intel_synchronize_hardirq(engine->i915);
1898 intel_engine_flush_submission(engine);
1901 if (!i915_sched_engine_is_empty(engine->sched_engine))
1905 return ring_is_idle(engine);
1910 struct intel_engine_cs *engine;
1924 for_each_engine(engine, gt, id) {
1925 if (!intel_engine_is_idle(engine))
1932 bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1934 if (!engine->irq_enable)
1938 spin_lock(engine->gt->irq_lock);
1939 engine->irq_enable(engine);
1940 spin_unlock(engine->gt->irq_lock);
1945 void intel_engine_irq_disable(struct intel_engine_cs *engine)
1947 if (!engine->irq_disable)
1951 spin_lock(engine->gt->irq_lock);
1952 engine->irq_disable(engine);
1953 spin_unlock(engine->gt->irq_lock);
1958 struct intel_engine_cs *engine;
1961 for_each_engine(engine, gt, id) {
1962 if (engine->sanitize)
1963 engine->sanitize(engine);
1965 engine->set_default_submission(engine);
1969 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1971 switch (GRAPHICS_VER(engine->i915)) {
1976 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1978 return !IS_I965G(engine->i915); /* who knows! */
1980 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1991 * Even though we are holding the engine->sched_engine->lock here, there
2069 static void intel_engine_print_registers(struct intel_engine_cs *engine,
2072 struct drm_i915_private *i915 = engine->i915;
2073 struct intel_engine_execlists * const execlists = &engine->execlists;
2076 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
2077 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
2080 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
2082 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
2085 ENGINE_READ(engine, RING_START));
2087 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
2089 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
2091 ENGINE_READ(engine, RING_CTL),
2092 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
2093 if (GRAPHICS_VER(engine->i915) > 2) {
2095 ENGINE_READ(engine, RING_MI_MODE),
2096 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
2101 ENGINE_READ(engine, RING_IMR));
2103 ENGINE_READ(engine, RING_ESR));
2105 ENGINE_READ(engine, RING_EMR));
2107 ENGINE_READ(engine, RING_EIR));
2110 addr = intel_engine_get_active_head(engine);
2113 addr = intel_engine_get_last_batch_head(engine);
2117 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
2119 addr = ENGINE_READ(engine, RING_DMA_FADD);
2121 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
2126 ENGINE_READ(engine, RING_IPEIR));
2128 ENGINE_READ(engine, RING_IPEHR));
2130 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
2131 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
2134 if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
2137 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2143 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
2144 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
2145 repr_timer(&engine->execlists.preempt),
2146 repr_timer(&engine->execlists.timer));
2152 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
2153 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
2168 i915_sched_engine_active_lock_bh(engine->sched_engine);
2199 i915_sched_engine_active_unlock_bh(engine->sched_engine);
2202 ENGINE_READ(engine, RING_PP_DIR_BASE));
2204 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
2206 ENGINE_READ(engine, RING_PP_DIR_DCLV));
2249 static void print_properties(struct intel_engine_cs *engine,
2257 .offset = offsetof(typeof(engine->props), x), \
2275 read_ul(&engine->props, p->offset),
2276 read_ul(&engine->defaults, p->offset));
2327 msg = "\t\tactive on engine";
2335 static void engine_dump_active_requests(struct intel_engine_cs *engine,
2342 * No need for an engine->irq_seqno_barrier() before the seqno reads.
2348 intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
2357 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2358 intel_guc_dump_active_requests(engine, hung_rq, m);
2360 intel_execlists_dump_active_requests(engine, hung_rq, m);
2366 void intel_engine_dump(struct intel_engine_cs *engine,
2370 struct i915_gpu_error * const error = &engine->i915->gpu_error;
2383 if (intel_gt_is_wedged(engine->gt))
2386 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
2388 str_yes_no(!llist_empty(&engine->barrier_tasks)));
2390 ewma__engine_latency_read(&engine->latency));
2391 if (intel_engine_supports_stats(engine))
2393 ktime_to_ms(intel_engine_get_busy_time(engine,
2396 engine->fw_domain, READ_ONCE(engine->fw_active));
2399 rq = READ_ONCE(engine->heartbeat.systole);
2405 i915_reset_engine_count(error, engine),
2407 print_properties(engine, m);
2409 engine_dump_active_requests(engine, m);
2411 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
2412 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
2414 intel_engine_print_registers(engine, m);
2415 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
2420 intel_execlists_show_requests(engine, m, i915_request_show, 8);
2423 hexdump(m, engine->status_page.addr, PAGE_SIZE);
2425 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
2427 intel_engine_print_breadcrumbs(engine, m);
2431 * intel_engine_get_busy_time() - Return current accumulated engine busyness
2432 * @engine: engine to report on
2435 * Returns accumulated time @engine was busy since engine stats were enabled.
2437 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
2439 return engine->busyness(engine, now);
2456 static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
2465 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
2468 * We are called by the error capture, reset and to dump engine
2474 * not need an engine->irq_seqno_barrier() before the seqno reads.
2478 lockdep_assert_held(&engine->sched_engine->lock);
2481 request = execlists_active(&engine->execlists);
2496 list_for_each_entry(request, &engine->sched_engine->requests,
2508 void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
2513 *ce = intel_engine_get_hung_context(engine);
2515 intel_engine_clear_hung_context(engine);
2525 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2528 spin_lock_irqsave(&engine->sched_engine->lock, flags);
2529 *rq = engine_execlist_find_hung_request(engine);
2532 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
2535 void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
2540 * so for simplicity we'll take care of this in the RCS engine's
2545 if (!CCS_MASK(engine->gt))
2548 intel_uncore_write(engine->uncore, GEN12_RCU_MODE,