Lines Matching defs:gt

10 #include "gt/gen8_engine_cs.h"
11 #include "gt/intel_breadcrumbs.h"
12 #include "gt/intel_context.h"
13 #include "gt/intel_engine_heartbeat.h"
14 #include "gt/intel_engine_pm.h"
15 #include "gt/intel_engine_regs.h"
16 #include "gt/intel_gpu_commands.h"
17 #include "gt/intel_gt.h"
18 #include "gt/intel_gt_clock_utils.h"
19 #include "gt/intel_gt_irq.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_gt_regs.h"
22 #include "gt/intel_gt_requests.h"
23 #include "gt/intel_lrc.h"
24 #include "gt/intel_lrc_reg.h"
25 #include "gt/intel_mocs.h"
26 #include "gt/intel_ring.h"
401 return &ce->engine->gt->uc.guc;
1167 * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1175 * 27 seconds for a gt clock frequency of 19.2 MHz).
1191 * When gt is unparked, we update the gt timestamp and start the ping
1192 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1249 struct intel_guc *guc = &engine->gt->uc.guc;
1270 static u32 gpm_timestamp_shift(struct intel_gt *gt)
1275 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
1276 reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
1286 struct intel_gt *gt = guc_to_gt(guc);
1293 gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
1306 * gt clocks. The *now parameter is retained to return the cpu time at which the
1313 struct intel_gt *gt = engine->gt;
1314 struct intel_guc *guc = &gt->uc.guc;
1326 * Synchronize with gt reset using reset_count and the
1332 in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
1338 * gt_stamp is updated by i915 only when gt is awake and the
1340 * view of activity, we query the GuC state only if gt is awake.
1342 wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
1347 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
1352 intel_gt_pm_put_async(gt, wakeref);
1359 total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1363 total += intel_gt_clock_interval_to_ns(gt, clk);
1406 * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
1424 struct intel_gt *gt = guc_to_gt(guc);
1433 for_each_engine(engine, gt, id) {
1443 struct intel_gt *gt = guc_to_gt(guc);
1454 for_each_engine(engine, gt, id)
1484 struct intel_gt *gt = guc_to_gt(guc);
1491 * Ideally the busyness worker should take a gt pm wakeref because the
1492 * worker only needs to be active while gt is awake. However, the
1500 * the stats would already be updated when the gt was parked.
1507 * - If the gt was parked longer than time taken for GT timestamp to roll
1509 * the exact GT time. We only care about roll overs when the gt is
1516 wakeref = intel_runtime_pm_get_if_active(&gt->i915->runtime_pm);
1521 * Synchronize with gt reset to make sure the worker does not
1526 ret = intel_gt_reset_trylock(gt, &srcu);
1536 intel_gt_reset_unlock(gt, srcu);
1541 intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
1558 struct intel_gt *gt = guc_to_gt(guc);
1562 with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
1578 void intel_guc_busyness_park(struct intel_gt *gt)
1580 struct intel_guc *guc = &gt->uc.guc;
1605 void intel_guc_busyness_unpark(struct intel_gt *gt)
1607 struct intel_guc *guc = &gt->uc.guc;
1705 for_each_engine_masked(engine, ve->gt, mask, tmp)
1753 if (intel_engine_reset_needs_wa_22011802037(engine->gt)) {
2092 struct intel_gt *gt = guc_to_gt(guc);
2115 guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
2116 guc->timestamp.shift = gpm_timestamp_shift(gt);
2192 struct intel_guc *guc = &rq->engine->gt->uc.guc;
2658 struct intel_guc *guc = &engine->gt->uc.guc;
2734 struct intel_guc *guc = &engine->gt->uc.guc;
2803 struct intel_guc *guc = &engine->gt->uc.guc;
2866 struct intel_guc *guc = &engine->gt->uc.guc;
3196 &ce->engine->gt->i915->runtime_pm;
3249 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
3348 struct intel_gt *gt = guc_to_gt(guc);
3353 GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
3363 * Take a gt-pm ref and change context state to be destroyed.
3364 * NOTE: a G2H IRQ that comes after will put this gt-pm ref back
3366 __intel_gt_pm_get(gt);
3390 * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
3393 intel_wakeref_put_async(&gt->wakeref);
3487 struct intel_gt *gt = guc_to_gt(guc);
3501 with_intel_gt_pm(gt, wakeref)
3625 struct intel_guc *guc = &ce->engine->gt->uc.guc;
3930 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3948 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3957 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3968 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
4209 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
4221 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
4239 engine->gt->engine_class[engine->class][i];
4403 struct intel_gt *gt = guc_to_gt(guc);
4424 for_each_engine(engine, gt, id) {
4454 for_each_engine_masked(e, engine->gt, mask, tmp)
4492 if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
4547 struct intel_guc *guc = &engine->gt->uc.guc;
4656 struct intel_gt *gt = guc_to_gt(guc);
4665 with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
4683 struct intel_gt *gt = guc_to_gt(guc);
4686 if (GRAPHICS_VER(gt->i915) < 12)
4694 intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
4870 static bool intel_gt_is_enabled(const struct intel_gt *gt)
4873 if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
4882 struct intel_gt *gt = guc_to_gt(guc);
4900 if (!intel_gt_is_enabled(gt))
4942 intel_gt_is_enabled(gt)) {
4995 &ce->engine->gt->i915->runtime_pm;
5099 struct intel_gt *gt = guc_to_gt(guc);
5100 struct drm_i915_private *i915 = gt->i915;
5109 for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
5110 bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
5132 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
5219 struct intel_gt *gt = guc_to_gt(guc);
5225 return gt->engine_class[engine_class][instance];
5232 struct intel_gt *gt = guc_to_gt(guc);
5251 for_each_engine_masked(engine, gt, reset_fail_mask, id)
5254 intel_gt_handle_error(gt, reset_fail_mask,
5306 struct intel_guc *guc = &engine->gt->uc.guc;
5368 struct intel_guc *guc = &engine->gt->uc.guc;
5820 guc = &siblings[0]->gt->uc.guc;
5823 ve->base.gt = siblings[0]->gt;
5911 for_each_engine_masked(engine, ve->gt, mask, tmp)