Lines Matching refs:guc

42 void intel_guc_notify(struct intel_guc *guc)
44 struct intel_gt *gt = guc_to_gt(guc);
52 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
55 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
57 GEM_BUG_ON(!guc->send_regs.base);
58 GEM_BUG_ON(!guc->send_regs.count);
59 GEM_BUG_ON(i >= guc->send_regs.count);
61 return _MMIO(guc->send_regs.base + 4 * i);
64 void intel_guc_init_send_regs(struct intel_guc *guc)
66 struct intel_gt *gt = guc_to_gt(guc);
70 GEM_BUG_ON(!guc->send_regs.base);
71 GEM_BUG_ON(!guc->send_regs.count);
73 for (i = 0; i < guc->send_regs.count; i++) {
75 guc_send_reg(guc, i),
78 guc->send_regs.fw_domains = fw_domains;
81 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
83 struct intel_gt *gt = guc_to_gt(guc);
92 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
94 struct intel_gt *gt = guc_to_gt(guc);
99 guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
104 guc->interrupts.enabled = true;
107 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
109 struct intel_gt *gt = guc_to_gt(guc);
112 guc->interrupts.enabled = false;
121 gen9_reset_guc_interrupts(guc);
132 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
134 struct intel_gt *gt = guc_to_gt(guc);
141 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
143 struct intel_gt *gt = guc_to_gt(guc);
149 guc->interrupts.enabled = true;
152 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
154 struct intel_gt *gt = guc_to_gt(guc);
156 guc->interrupts.enabled = false;
159 gen11_reset_guc_interrupts(guc);
164 struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker);
165 struct intel_gt *gt = guc_to_gt(guc);
166 unsigned long last = guc->last_dead_guc_jiffies;
173 guc->last_dead_guc_jiffies = jiffies;
177 void intel_guc_init_early(struct intel_guc *guc)
179 struct intel_gt *gt = guc_to_gt(guc);
182 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true);
183 intel_guc_ct_init_early(&guc->ct);
184 intel_guc_log_init_early(&guc->log);
185 intel_guc_submission_init_early(guc);
186 intel_guc_slpc_init_early(&guc->slpc);
187 intel_guc_rc_init_early(guc);
189 INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func);
191 mutex_init(&guc->send_mutex);
192 spin_lock_init(&guc->irq_lock);
194 guc->interrupts.reset = gen11_reset_guc_interrupts;
195 guc->interrupts.enable = gen11_enable_guc_interrupts;
196 guc->interrupts.disable = gen11_disable_guc_interrupts;
198 guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
199 guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
201 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
202 guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
205 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
208 guc->notify_reg = GUC_SEND_INTERRUPT;
209 guc->interrupts.reset = gen9_reset_guc_interrupts;
210 guc->interrupts.enable = gen9_enable_guc_interrupts;
211 guc->interrupts.disable = gen9_disable_guc_interrupts;
212 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
213 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
217 intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
221 void intel_guc_init_late(struct intel_guc *guc)
223 intel_guc_ads_init_late(guc);
226 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
228 u32 level = intel_guc_log_get_level(&guc->log);
240 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
244 if (!intel_guc_submission_is_used(guc))
247 if (intel_guc_slpc_is_used(guc))
253 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
255 struct intel_guc_log *log = &guc->log;
260 offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
274 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
276 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
282 static u32 guc_ctl_wa_flags(struct intel_guc *guc)
284 struct intel_gt *gt = guc_to_gt(guc);
323 if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
331 static u32 guc_ctl_devid(struct intel_guc *guc)
333 struct drm_i915_private *i915 = guc_to_i915(guc);
343 static void guc_init_params(struct intel_guc *guc)
345 u32 *params = guc->params;
348 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
350 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
351 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
352 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
353 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
354 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
355 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
358 guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
366 void intel_guc_write_params(struct intel_guc *guc)
368 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
381 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
386 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
388 struct intel_gt *gt = guc_to_gt(guc);
403 int intel_guc_init(struct intel_guc *guc)
407 ret = intel_uc_fw_init(&guc->fw);
411 ret = intel_guc_log_create(&guc->log);
415 ret = intel_guc_capture_init(guc);
419 ret = intel_guc_ads_create(guc);
423 GEM_BUG_ON(!guc->ads_vma);
425 ret = intel_guc_ct_init(&guc->ct);
429 if (intel_guc_submission_is_used(guc)) {
434 ret = intel_guc_submission_init(guc);
439 if (intel_guc_slpc_is_used(guc)) {
440 ret = intel_guc_slpc_init(&guc->slpc);
446 guc_init_params(guc);
448 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
453 intel_guc_submission_fini(guc);
455 intel_guc_ct_fini(&guc->ct);
457 intel_guc_ads_destroy(guc);
459 intel_guc_capture_destroy(guc);
461 intel_guc_log_destroy(&guc->log);
463 intel_uc_fw_fini(&guc->fw);
465 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
466 guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
470 void intel_guc_fini(struct intel_guc *guc)
472 if (!intel_uc_fw_is_loadable(&guc->fw))
475 flush_work(&guc->dead_guc_worker);
477 if (intel_guc_slpc_is_used(guc))
478 intel_guc_slpc_fini(&guc->slpc);
480 if (intel_guc_submission_is_used(guc))
481 intel_guc_submission_fini(guc);
483 intel_guc_ct_fini(&guc->ct);
485 intel_guc_ads_destroy(guc);
486 intel_guc_capture_destroy(guc);
487 intel_guc_log_destroy(&guc->log);
488 intel_uc_fw_fini(&guc->fw);
494 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
497 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
503 GEM_BUG_ON(len > guc->send_regs.count);
508 mutex_lock(&guc->send_mutex);
509 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
513 intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
515 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
517 intel_guc_notify(guc);
524 guc_send_reg(guc, 0),
531 guc_err(guc, "mmio request %#x: no reply %x\n",
537 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
553 guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
562 guc_err(guc, "mmio request %#x: failure %x/%u\n",
570 guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
577 int count = min(response_buf_size, guc->send_regs.count);
585 guc_send_reg(guc, i));
595 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
596 mutex_unlock(&guc->send_mutex);
601 int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
604 guc_err(guc, "Crash dump notification\n");
606 guc_err(guc, "Exception notification\n");
608 guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
610 queue_work(system_unbound_wq, &guc->dead_guc_worker);
615 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
624 msg = payload[0] & guc->msg_enabled_mask;
627 guc_err(guc, "Received early crash dump notification!\n");
629 guc_err(guc, "Received early exception notification!\n");
632 queue_work(system_unbound_wq, &guc->dead_guc_worker);
639 * @guc: intel_guc structure
648 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
655 return intel_guc_send(guc, action, ARRAY_SIZE(action));
660 * @guc: the guc
662 int intel_guc_suspend(struct intel_guc *guc)
669 if (!intel_guc_is_ready(guc))
672 if (intel_guc_submission_is_used(guc)) {
673 flush_work(&guc->dead_guc_worker);
686 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
688 guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
693 intel_guc_sanitize(guc);
700 * @guc: the guc
702 int intel_guc_resume(struct intel_guc *guc)
749 * @guc: the guc
760 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
762 struct intel_gt *gt = guc_to_gt(guc);
807 * @guc: the guc
817 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
823 vma = intel_guc_allocate_vma(guc, size);
828 intel_gt_coherent_map_type(guc_to_gt(guc),
841 static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
858 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
870 static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
872 int err = __guc_action_self_cfg(guc, key, len, value);
875 guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
880 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
882 return __guc_self_cfg(guc, key, 1, value);
885 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
887 return __guc_self_cfg(guc, key, 2, value);
892 * @guc: the GuC
897 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
899 struct intel_gt *gt = guc_to_gt(guc);
903 if (!intel_guc_is_supported(guc)) {
908 if (!intel_guc_is_wanted(guc)) {
913 intel_uc_fw_dump(&guc->fw, p);
934 void intel_guc_write_barrier(struct intel_guc *guc)
936 struct intel_gt *gt = guc_to_gt(guc);
938 if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
943 GEM_BUG_ON(guc->send_regs.fw_domains);