Lines Matching defs:guc

42 void intel_guc_notify(struct intel_guc *guc)
44 struct intel_gt *gt = guc_to_gt(guc);
52 intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
55 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
57 GEM_BUG_ON(!guc->send_regs.base);
58 GEM_BUG_ON(!guc->send_regs.count);
59 GEM_BUG_ON(i >= guc->send_regs.count);
61 return _MMIO(guc->send_regs.base + 4 * i);
64 void intel_guc_init_send_regs(struct intel_guc *guc)
66 struct intel_gt *gt = guc_to_gt(guc);
70 GEM_BUG_ON(!guc->send_regs.base);
71 GEM_BUG_ON(!guc->send_regs.count);
73 for (i = 0; i < guc->send_regs.count; i++) {
75 guc_send_reg(guc, i),
78 guc->send_regs.fw_domains = fw_domains;
81 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
83 struct intel_gt *gt = guc_to_gt(guc);
92 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
94 struct intel_gt *gt = guc_to_gt(guc);
99 guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
104 guc->interrupts.enabled = true;
107 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
109 struct intel_gt *gt = guc_to_gt(guc);
112 guc->interrupts.enabled = false;
121 gen9_reset_guc_interrupts(guc);
132 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
134 struct intel_gt *gt = guc_to_gt(guc);
141 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
143 struct intel_gt *gt = guc_to_gt(guc);
149 guc->interrupts.enabled = true;
152 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
154 struct intel_gt *gt = guc_to_gt(guc);
156 guc->interrupts.enabled = false;
159 gen11_reset_guc_interrupts(guc);
164 struct intel_guc *guc = container_of(w, struct intel_guc, dead_guc_worker);
165 struct intel_gt *gt = guc_to_gt(guc);
166 unsigned long last = guc->last_dead_guc_jiffies;
173 guc->last_dead_guc_jiffies = jiffies;
177 void intel_guc_init_early(struct intel_guc *guc)
179 struct intel_gt *gt = guc_to_gt(guc);
182 intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true);
183 intel_guc_ct_init_early(&guc->ct);
184 intel_guc_log_init_early(&guc->log);
185 intel_guc_submission_init_early(guc);
186 intel_guc_slpc_init_early(&guc->slpc);
187 intel_guc_rc_init_early(guc);
189 INIT_WORK(&guc->dead_guc_worker, guc_dead_worker_func);
191 mutex_init(&guc->send_mutex);
192 spin_lock_init(&guc->irq_lock);
194 guc->interrupts.reset = gen11_reset_guc_interrupts;
195 guc->interrupts.enable = gen11_enable_guc_interrupts;
196 guc->interrupts.disable = gen11_disable_guc_interrupts;
198 guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
199 guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
201 guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
202 guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
205 guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
208 guc->notify_reg = GUC_SEND_INTERRUPT;
209 guc->interrupts.reset = gen9_reset_guc_interrupts;
210 guc->interrupts.enable = gen9_enable_guc_interrupts;
211 guc->interrupts.disable = gen9_disable_guc_interrupts;
212 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
213 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
217 intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
221 void intel_guc_init_late(struct intel_guc *guc)
223 intel_guc_ads_init_late(guc);
226 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
228 u32 level = intel_guc_log_get_level(&guc->log);
240 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
244 if (!intel_guc_submission_is_used(guc))
247 if (intel_guc_slpc_is_used(guc))
253 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
255 struct intel_guc_log *log = &guc->log;
260 offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
274 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
276 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
282 static u32 guc_ctl_wa_flags(struct intel_guc *guc)
284 struct intel_gt *gt = guc_to_gt(guc);
327 if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
333 static u32 guc_ctl_devid(struct intel_guc *guc)
335 struct drm_i915_private *i915 = guc_to_i915(guc);
345 static void guc_init_params(struct intel_guc *guc)
347 u32 *params = guc->params;
350 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
352 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
353 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
354 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
355 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
356 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
357 params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
360 guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
368 void intel_guc_write_params(struct intel_guc *guc)
370 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
383 intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
388 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
390 struct intel_gt *gt = guc_to_gt(guc);
405 int intel_guc_init(struct intel_guc *guc)
409 ret = intel_uc_fw_init(&guc->fw);
413 ret = intel_guc_log_create(&guc->log);
417 ret = intel_guc_capture_init(guc);
421 ret = intel_guc_ads_create(guc);
425 GEM_BUG_ON(!guc->ads_vma);
427 ret = intel_guc_ct_init(&guc->ct);
431 if (intel_guc_submission_is_used(guc)) {
436 ret = intel_guc_submission_init(guc);
441 if (intel_guc_slpc_is_used(guc)) {
442 ret = intel_guc_slpc_init(&guc->slpc);
448 guc_init_params(guc);
450 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
455 intel_guc_submission_fini(guc);
457 intel_guc_ct_fini(&guc->ct);
459 intel_guc_ads_destroy(guc);
461 intel_guc_capture_destroy(guc);
463 intel_guc_log_destroy(&guc->log);
465 intel_uc_fw_fini(&guc->fw);
467 intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
468 guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
472 void intel_guc_fini(struct intel_guc *guc)
474 if (!intel_uc_fw_is_loadable(&guc->fw))
477 flush_work(&guc->dead_guc_worker);
479 if (intel_guc_slpc_is_used(guc))
480 intel_guc_slpc_fini(&guc->slpc);
482 if (intel_guc_submission_is_used(guc))
483 intel_guc_submission_fini(guc);
485 intel_guc_ct_fini(&guc->ct);
487 intel_guc_ads_destroy(guc);
488 intel_guc_capture_destroy(guc);
489 intel_guc_log_destroy(&guc->log);
490 intel_uc_fw_fini(&guc->fw);
496 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
499 struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
505 GEM_BUG_ON(len > guc->send_regs.count);
510 mutex_lock(&guc->send_mutex);
511 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
515 intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
517 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
519 intel_guc_notify(guc);
526 guc_send_reg(guc, 0),
533 guc_err(guc, "mmio request %#x: no reply %x\n",
539 #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
555 guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
564 guc_err(guc, "mmio request %#x: failure %x/%u\n",
572 guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
579 int count = min(response_buf_size, guc->send_regs.count);
587 guc_send_reg(guc, i));
597 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
598 mutex_unlock(&guc->send_mutex);
603 int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
606 guc_err(guc, "Crash dump notification\n");
608 guc_err(guc, "Exception notification\n");
610 guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
612 queue_work(system_unbound_wq, &guc->dead_guc_worker);
617 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
626 msg = payload[0] & guc->msg_enabled_mask;
629 guc_err(guc, "Received early crash dump notification!\n");
631 guc_err(guc, "Received early exception notification!\n");
634 queue_work(system_unbound_wq, &guc->dead_guc_worker);
641 * @guc: intel_guc structure
650 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
657 return intel_guc_send(guc, action, ARRAY_SIZE(action));
662 * @guc: the guc
664 int intel_guc_suspend(struct intel_guc *guc)
671 if (!intel_guc_is_ready(guc))
674 if (intel_guc_submission_is_used(guc)) {
675 flush_work(&guc->dead_guc_worker);
688 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
690 guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
695 intel_guc_sanitize(guc);
702 * @guc: the guc
704 int intel_guc_resume(struct intel_guc *guc)
751 * @guc: the guc
762 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
764 struct intel_gt *gt = guc_to_gt(guc);
809 * @guc: the guc
819 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
825 vma = intel_guc_allocate_vma(guc, size);
830 intel_gt_coherent_map_type(guc_to_gt(guc),
843 static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
860 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
872 static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
874 int err = __guc_action_self_cfg(guc, key, len, value);
877 guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
882 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
884 return __guc_self_cfg(guc, key, 1, value);
887 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
889 return __guc_self_cfg(guc, key, 2, value);
894 * @guc: the GuC
899 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
901 struct intel_gt *gt = guc_to_gt(guc);
905 if (!intel_guc_is_supported(guc)) {
910 if (!intel_guc_is_wanted(guc)) {
915 intel_uc_fw_dump(&guc->fw, p);
936 void intel_guc_write_barrier(struct intel_guc *guc)
938 struct intel_gt *gt = guc_to_gt(guc);
940 if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
945 GEM_BUG_ON(guc->send_regs.fw_domains);