1218822Sdim// SPDX-License-Identifier: MIT
2218822Sdim/*
3218822Sdim * Copyright �� 2016-2019 Intel Corporation
4218822Sdim */
5218822Sdim
6218822Sdim#include <linux/types.h>
7218822Sdim
8218822Sdim#include "gt/intel_gt.h"
9218822Sdim#include "gt/intel_rps.h"
1038889Sjdp#include "intel_guc_reg.h"
1138889Sjdp#include "intel_huc.h"
1238889Sjdp#include "intel_huc_print.h"
1338889Sjdp#include "i915_drv.h"
1438889Sjdp#include "i915_reg.h"
1538889Sjdp#include "pxp/intel_pxp_cmd_interface_43.h"
1638889Sjdp
1738889Sjdp#include <linux/device/bus.h>
1838889Sjdp#include <linux/mei_aux.h>
1938889Sjdp
2038889Sjdp/**
2138889Sjdp * DOC: HuC
22218822Sdim *
2338889Sjdp * The HuC is a dedicated microcontroller for usage in media HEVC (High
2438889Sjdp * Efficiency Video Coding) operations. Userspace can directly use the firmware
2538889Sjdp * capabilities by adding HuC specific commands to batch buffers.
2638889Sjdp *
2738889Sjdp * The kernel driver is only responsible for loading the HuC firmware and
2838889Sjdp * triggering its security authentication. This is done differently depending
29218822Sdim * on the platform:
30218822Sdim *
31218822Sdim * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
32218822Sdim *   and the authentication via GuC
33218822Sdim * - DG2: load and authentication are both performed via GSC.
34218822Sdim * - MTL and newer platforms: the load is performed via DMA (same as with
35218822Sdim *   not-DG2 older platforms), while the authentication is done in 2-steps,
36218822Sdim *   a first auth for clear-media workloads via GuC and a second one for all
37218822Sdim *   workloads via GSC.
38218822Sdim *
39218822Sdim * On platforms where the GuC does the authentication, to correctly do so the
40218822Sdim * HuC binary must be loaded before the GuC one.
41218822Sdim * Loading the HuC is optional; however, not using the HuC might negatively
42218822Sdim * impact power usage and/or performance of media workloads, depending on the
43218822Sdim * use-cases.
44218822Sdim * HuC must be reloaded on events that cause the WOPCM to lose its contents
45218822Sdim * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT
46218822Sdim * reset, while on newer ones it will survive that.
47218822Sdim *
48218822Sdim * See https://github.com/intel/media-driver for the latest details on HuC
49218822Sdim * functionality.
50218822Sdim */
51218822Sdim
52218822Sdim/**
53218822Sdim * DOC: HuC Memory Management
54218822Sdim *
55218822Sdim * Similarly to the GuC, the HuC can't do any memory allocations on its own,
56218822Sdim * with the difference being that the allocations for HuC usage are handled by
57218822Sdim * the userspace driver instead of the kernel one. The HuC accesses the memory
58218822Sdim * via the PPGTT belonging to the context loaded on the VCS executing the
59218822Sdim * HuC-specific commands.
60218822Sdim */
61218822Sdim
62218822Sdim/*
63218822Sdim * MEI-GSC load is an async process. The probing of the exposed aux device
64218822Sdim * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending
65218822Sdim * on when the kernel schedules it. Unless something goes terribly wrong, we're
66218822Sdim * guaranteed for this to happen during boot, so the big timeout is a safety net
67218822Sdim * that we never expect to need.
68218822Sdim * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed
69218822Sdim * and/or reset, this can take longer. Note that the kernel might schedule
70218822Sdim * other work between the i915 init/resume and the MEI one, which can add to
71218822Sdim * the delay.
72218822Sdim */
73218822Sdim#define GSC_INIT_TIMEOUT_MS 10000
74218822Sdim#define PXP_INIT_TIMEOUT_MS 5000
75218822Sdim
76218822Sdimstatic int sw_fence_dummy_notify(struct i915_sw_fence *sf,
77218822Sdim				 enum i915_sw_fence_notify state)
78218822Sdim{
79218822Sdim	return NOTIFY_DONE;
80218822Sdim}
81218822Sdim
82218822Sdimstatic void __delayed_huc_load_complete(struct intel_huc *huc)
83218822Sdim{
84218822Sdim	if (!i915_sw_fence_done(&huc->delayed_load.fence))
85218822Sdim		i915_sw_fence_complete(&huc->delayed_load.fence);
86218822Sdim}
87218822Sdim
88218822Sdimstatic void delayed_huc_load_complete(struct intel_huc *huc)
89218822Sdim{
90218822Sdim	hrtimer_cancel(&huc->delayed_load.timer);
91218822Sdim	__delayed_huc_load_complete(huc);
92218822Sdim}
93218822Sdim
94218822Sdimstatic void __gsc_init_error(struct intel_huc *huc)
95218822Sdim{
96218822Sdim	huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR;
97218822Sdim	__delayed_huc_load_complete(huc);
98218822Sdim}
99218822Sdim
100218822Sdimstatic void gsc_init_error(struct intel_huc *huc)
101218822Sdim{
102218822Sdim	hrtimer_cancel(&huc->delayed_load.timer);
103218822Sdim	__gsc_init_error(huc);
104218822Sdim}
105218822Sdim
106218822Sdimstatic void gsc_init_done(struct intel_huc *huc)
107218822Sdim{
108218822Sdim	hrtimer_cancel(&huc->delayed_load.timer);
109218822Sdim
110218822Sdim	/* MEI-GSC init is done, now we wait for MEI-PXP to bind */
111218822Sdim	huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP;
112218822Sdim	if (!i915_sw_fence_done(&huc->delayed_load.fence))
113218822Sdim		hrtimer_start(&huc->delayed_load.timer,
114218822Sdim			      ms_to_ktime(PXP_INIT_TIMEOUT_MS),
115218822Sdim			      HRTIMER_MODE_REL);
116218822Sdim}
117218822Sdim
118218822Sdimstatic enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer)
119218822Sdim{
120218822Sdim	struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer);
121218822Sdim
122218822Sdim	if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) {
123218822Sdim		if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
124218822Sdim			huc_notice(huc, "timed out waiting for MEI GSC\n");
125218822Sdim		else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
126218822Sdim			huc_notice(huc, "timed out waiting for MEI PXP\n");
127218822Sdim		else
128218822Sdim			MISSING_CASE(huc->delayed_load.status);
129218822Sdim
130218822Sdim		__gsc_init_error(huc);
131218822Sdim	}
132218822Sdim
133218822Sdim	return HRTIMER_NORESTART;
134218822Sdim}
135218822Sdim
136218822Sdimstatic void huc_delayed_load_start(struct intel_huc *huc)
137218822Sdim{
138218822Sdim	ktime_t delay;
139218822Sdim
140218822Sdim	GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC));
141218822Sdim
142218822Sdim	/*
143218822Sdim	 * On resume we don't have to wait for MEI-GSC to be re-probed, but we
144218822Sdim	 * do need to wait for MEI-PXP to reset & re-bind
145218822Sdim	 */
146218822Sdim	switch (huc->delayed_load.status) {
147218822Sdim	case INTEL_HUC_WAITING_ON_GSC:
148218822Sdim		delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS);
149218822Sdim		break;
150218822Sdim	case INTEL_HUC_WAITING_ON_PXP:
151218822Sdim		delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS);
152218822Sdim		break;
153218822Sdim	default:
154218822Sdim		gsc_init_error(huc);
155218822Sdim		return;
156218822Sdim	}
157218822Sdim
158218822Sdim	/*
159218822Sdim	 * This fence is always complete unless we're waiting for the
160218822Sdim	 * GSC device to come up to load the HuC. We arm the fence here
161218822Sdim	 * and complete it when we confirm that the HuC is loaded from
162218822Sdim	 * the PXP bind callback.
163218822Sdim	 */
164218822Sdim	GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence));
165218822Sdim	i915_sw_fence_fini(&huc->delayed_load.fence);
166218822Sdim	i915_sw_fence_reinit(&huc->delayed_load.fence);
167218822Sdim	i915_sw_fence_await(&huc->delayed_load.fence);
168218822Sdim	i915_sw_fence_commit(&huc->delayed_load.fence);
169218822Sdim
170218822Sdim	hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL);
171218822Sdim}
172218822Sdim
173218822Sdimstatic int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data)
174218822Sdim{
175218822Sdim	struct device *dev = data;
176218822Sdim	struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb);
177218822Sdim	struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0];
178218822Sdim
179218822Sdim	if (!intf->adev || &intf->adev->aux_dev.dev != dev)
180218822Sdim		return 0;
181218822Sdim
182218822Sdim	switch (action) {
183218822Sdim	case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */
184218822Sdim		gsc_init_done(huc);
185218822Sdim		break;
186218822Sdim
187218822Sdim	case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
188218822Sdim	case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
189218822Sdim		huc_info(huc, "MEI driver not bound, disabling load\n");
190218822Sdim		gsc_init_error(huc);
191218822Sdim		break;
192218822Sdim	}
193218822Sdim
194218822Sdim	return 0;
195218822Sdim}
196218822Sdim
197218822Sdimvoid intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
198218822Sdim{
199218822Sdim	int ret;
200218822Sdim
201218822Sdim	if (!intel_huc_is_loaded_by_gsc(huc))
202218822Sdim		return;
203218822Sdim
204218822Sdim	huc->delayed_load.nb.notifier_call = gsc_notifier;
205218822Sdim	ret = bus_register_notifier(bus, &huc->delayed_load.nb);
206218822Sdim	if (ret) {
207218822Sdim		huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
208218822Sdim		huc->delayed_load.nb.notifier_call = NULL;
209218822Sdim		gsc_init_error(huc);
210218822Sdim	}
211218822Sdim}
212218822Sdim
213218822Sdimvoid intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
214218822Sdim{
215218822Sdim	if (!huc->delayed_load.nb.notifier_call)
216218822Sdim		return;
217218822Sdim
218218822Sdim	delayed_huc_load_complete(huc);
219218822Sdim
220218822Sdim	bus_unregister_notifier(bus, &huc->delayed_load.nb);
221218822Sdim	huc->delayed_load.nb.notifier_call = NULL;
222218822Sdim}
223218822Sdim
224218822Sdimstatic void delayed_huc_load_init(struct intel_huc *huc)
225218822Sdim{
226218822Sdim	/*
227218822Sdim	 * Initialize fence to be complete as this is expected to be complete
228218822Sdim	 * unless there is a delayed HuC load in progress.
229218822Sdim	 */
230218822Sdim	i915_sw_fence_init(&huc->delayed_load.fence,
231218822Sdim			   sw_fence_dummy_notify);
232218822Sdim	i915_sw_fence_commit(&huc->delayed_load.fence);
233218822Sdim
234218822Sdim	hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
235218822Sdim	huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
236218822Sdim}
237218822Sdim
238218822Sdimstatic void delayed_huc_load_fini(struct intel_huc *huc)
239218822Sdim{
240218822Sdim	/*
241218822Sdim	 * the fence is initialized in init_early, so we need to clean it up
242218822Sdim	 * even if HuC loading is off.
243218822Sdim	 */
244218822Sdim	delayed_huc_load_complete(huc);
245218822Sdim	i915_sw_fence_fini(&huc->delayed_load.fence);
246218822Sdim}
247218822Sdim
248218822Sdimint intel_huc_sanitize(struct intel_huc *huc)
249218822Sdim{
250218822Sdim	delayed_huc_load_complete(huc);
251218822Sdim	intel_uc_fw_sanitize(&huc->fw);
252218822Sdim	return 0;
253218822Sdim}
254218822Sdim
255218822Sdimstatic bool vcs_supported(struct intel_gt *gt)
256218822Sdim{
257218822Sdim	intel_engine_mask_t mask = gt->info.engine_mask;
258218822Sdim
259218822Sdim	/*
260218822Sdim	 * We reach here from i915_driver_early_probe for the primary GT before
261218822Sdim	 * its engine mask is set, so we use the device info engine mask for it;
262218822Sdim	 * this means we're not taking VCS fusing into account, but if the
263218822Sdim	 * primary GT supports VCS engines we expect at least one of them to
264218822Sdim	 * remain unfused so we're fine.
265218822Sdim	 * For other GTs we expect the GT-specific mask to be set before we
266218822Sdim	 * call this function.
267218822Sdim	 */
268218822Sdim	GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
269218822Sdim
270218822Sdim	if (gt_is_root(gt))
271218822Sdim		mask = INTEL_INFO(gt->i915)->platform_engine_mask;
272218822Sdim	else
273218822Sdim		mask = gt->info.engine_mask;
274218822Sdim
275218822Sdim	return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS);
276218822Sdim}
277218822Sdim
278218822Sdimvoid intel_huc_init_early(struct intel_huc *huc)
279218822Sdim{
280218822Sdim	struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
281218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
282218822Sdim
283218822Sdim	intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true);
284218822Sdim
285218822Sdim	/*
286218822Sdim	 * we always init the fence as already completed, even if HuC is not
287218822Sdim	 * supported. This way we don't have to distinguish between HuC not
288218822Sdim	 * supported/disabled or already loaded, and can focus on if the load
289218822Sdim	 * is currently in progress (fence not complete) or not, which is what
290218822Sdim	 * we care about for stalling userspace submissions.
291218822Sdim	 */
292218822Sdim	delayed_huc_load_init(huc);
293218822Sdim
294218822Sdim	if (!vcs_supported(gt)) {
295218822Sdim		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
296218822Sdim		return;
297218822Sdim	}
298218822Sdim
299218822Sdim	if (GRAPHICS_VER(i915) >= 11) {
300218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO;
301218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL;
302218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL;
303218822Sdim	} else {
304218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2;
305218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED;
306218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED;
307218822Sdim	}
308218822Sdim
309218822Sdim	if (IS_DG2(i915)) {
310218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO;
311218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL;
312218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL;
313218822Sdim	} else {
314218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5);
315218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE;
316218822Sdim		huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE;
317218822Sdim	}
318218822Sdim}
319218822Sdim
320218822Sdim#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
321218822Sdimstatic int check_huc_loading_mode(struct intel_huc *huc)
322218822Sdim{
323218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
324218822Sdim	bool gsc_enabled = huc->fw.has_gsc_headers;
325218822Sdim
326218822Sdim	/*
327218822Sdim	 * The fuse for HuC load via GSC is only valid on platforms that have
328218822Sdim	 * GuC deprivilege.
329218822Sdim	 */
330218822Sdim	if (HAS_GUC_DEPRIVILEGE(gt->i915))
331218822Sdim		huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
332218822Sdim				      GSC_LOADS_HUC;
333218822Sdim
334218822Sdim	if (huc->loaded_via_gsc && !gsc_enabled) {
335218822Sdim		huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n");
336218822Sdim		return -ENOEXEC;
337218822Sdim	}
338218822Sdim
339218822Sdim	/*
340218822Sdim	 * On newer platforms we have GSC-enabled binaries but we load the HuC
341218822Sdim	 * via DMA. To do so we need to find the location of the legacy-style
342218822Sdim	 * binary inside the GSC-enabled one, which we do at fetch time. Make
343218822Sdim	 * sure that we were able to do so if the fuse says we need to load via
344218822Sdim	 * DMA and the binary is GSC-enabled.
345218822Sdim	 */
346218822Sdim	if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) {
347218822Sdim		huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n");
348218822Sdim		return -ENOEXEC;
349218822Sdim	}
350218822Sdim
351218822Sdim	/*
352218822Sdim	 * If the HuC is loaded via GSC, we need to be able to access the GSC.
353218822Sdim	 * On DG2 this is done via the mei components, while on newer platforms
354218822Sdim	 * it is done via the GSCCS,
355218822Sdim	 */
356218822Sdim	if (huc->loaded_via_gsc) {
357218822Sdim		if (IS_DG2(gt->i915)) {
358218822Sdim			if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) ||
359218822Sdim			    !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) {
360218822Sdim				huc_info(huc, "can't load due to missing mei modules\n");
361218822Sdim				return -EIO;
362218822Sdim			}
363218822Sdim		} else {
364218822Sdim			if (!HAS_ENGINE(gt, GSC0)) {
365218822Sdim				huc_info(huc, "can't load due to missing GSCCS\n");
366218822Sdim				return -EIO;
367218822Sdim			}
368218822Sdim		}
369218822Sdim	}
370218822Sdim
371218822Sdim	huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc));
372218822Sdim
373218822Sdim	return 0;
374218822Sdim}
375218822Sdim
376218822Sdimint intel_huc_init(struct intel_huc *huc)
377218822Sdim{
378218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
379218822Sdim	int err;
380218822Sdim
381218822Sdim	err = check_huc_loading_mode(huc);
382218822Sdim	if (err)
383218822Sdim		goto out;
384218822Sdim
385218822Sdim	if (HAS_ENGINE(gt, GSC0)) {
386218822Sdim		struct i915_vma *vma;
387218822Sdim
388218822Sdim		vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
389218822Sdim		if (IS_ERR(vma)) {
390218822Sdim			err = PTR_ERR(vma);
391218822Sdim			huc_info(huc, "Failed to allocate heci pkt\n");
392218822Sdim			goto out;
393218822Sdim		}
394218822Sdim
395218822Sdim		huc->heci_pkt = vma;
396218822Sdim	}
397218822Sdim
398218822Sdim	err = intel_uc_fw_init(&huc->fw);
399218822Sdim	if (err)
400218822Sdim		goto out_pkt;
401218822Sdim
402218822Sdim	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
403218822Sdim
404218822Sdim	return 0;
405218822Sdim
406218822Sdimout_pkt:
407218822Sdim	if (huc->heci_pkt)
408218822Sdim		i915_vma_unpin_and_release(&huc->heci_pkt, 0);
409218822Sdimout:
410218822Sdim	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
411218822Sdim	huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
412218822Sdim	return err;
413218822Sdim}
414218822Sdim
415218822Sdimvoid intel_huc_fini(struct intel_huc *huc)
416218822Sdim{
417218822Sdim	/*
418218822Sdim	 * the fence is initialized in init_early, so we need to clean it up
419218822Sdim	 * even if HuC loading is off.
420218822Sdim	 */
421218822Sdim	delayed_huc_load_fini(huc);
422218822Sdim
423218822Sdim	if (huc->heci_pkt)
424218822Sdim		i915_vma_unpin_and_release(&huc->heci_pkt, 0);
425218822Sdim
426218822Sdim	if (intel_uc_fw_is_loadable(&huc->fw))
427218822Sdim		intel_uc_fw_fini(&huc->fw);
428218822Sdim}
429218822Sdim
430218822Sdimvoid intel_huc_suspend(struct intel_huc *huc)
431218822Sdim{
432218822Sdim	if (!intel_uc_fw_is_loadable(&huc->fw))
433218822Sdim		return;
434218822Sdim
435218822Sdim	/*
436218822Sdim	 * in the unlikely case that we're suspending before the GSC has
437218822Sdim	 * completed its loading sequence, just stop waiting. We'll restart
438218822Sdim	 * on resume.
439218822Sdim	 */
440218822Sdim	delayed_huc_load_complete(huc);
441218822Sdim}
442218822Sdim
443218822Sdimstatic const char *auth_mode_string(struct intel_huc *huc,
444218822Sdim				    enum intel_huc_authentication_type type)
445218822Sdim{
446218822Sdim	bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC;
447218822Sdim
448218822Sdim	return partial ? "clear media" : "all workloads";
449218822Sdim}
450218822Sdim
451218822Sdim/*
452218822Sdim * Use a longer timeout for debug builds so that problems can be detected
453218822Sdim * and analysed. But a shorter timeout for releases so that user's don't
454218822Sdim * wait forever to find out there is a problem. Note that the only reason
455218822Sdim * an end user should hit the timeout is in case of extreme thermal throttling.
456218822Sdim * And a system that is that hot during boot is probably dead anyway!
457218822Sdim */
458218822Sdim#if defined(CONFIG_DRM_I915_DEBUG_GEM)
459218822Sdim#define HUC_LOAD_RETRY_LIMIT   20
460218822Sdim#else
461218822Sdim#define HUC_LOAD_RETRY_LIMIT   3
462218822Sdim#endif
463218822Sdim
464218822Sdimint intel_huc_wait_for_auth_complete(struct intel_huc *huc,
465218822Sdim				     enum intel_huc_authentication_type type)
466218822Sdim{
467218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
468218822Sdim	struct intel_uncore *uncore = gt->uncore;
469218822Sdim	ktime_t before, after, delta;
470218822Sdim	int ret, count;
471218822Sdim	u64 delta_ms;
472218822Sdim	u32 before_freq;
473218822Sdim
474218822Sdim	/*
475218822Sdim	 * The KMD requests maximum frequency during driver load, however thermal
476218822Sdim	 * throttling can force the frequency down to minimum (although the board
477218822Sdim	 * really should never get that hot in real life!). IFWI  issues have been
478218822Sdim	 * seen to cause sporadic failures to grant the higher frequency. And at
479218822Sdim	 * minimum frequency, the authentication time can be in the seconds range.
480218822Sdim	 * Note that there is a limit on how long an individual wait_for() can wait.
481218822Sdim	 * So wrap it in a loop.
482218822Sdim	 */
483218822Sdim	before_freq = intel_rps_read_actual_frequency(&gt->rps);
484218822Sdim	before = ktime_get();
485218822Sdim	for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) {
486218822Sdim		ret = __intel_wait_for_register(gt->uncore,
487218822Sdim						huc->status[type].reg,
488218822Sdim						huc->status[type].mask,
489218822Sdim						huc->status[type].value,
490218822Sdim						2, 1000, NULL);
491218822Sdim		if (!ret)
492218822Sdim			break;
493218822Sdim
49460484Sobrien		huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n",
49560484Sobrien			count, intel_rps_read_actual_frequency(&gt->rps),
49660484Sobrien			huc->status[type].reg.reg);
49760484Sobrien	}
49860484Sobrien	after = ktime_get();
49960484Sobrien	delta = ktime_sub(after, before);
500218822Sdim	delta_ms = ktime_to_ms(delta);
501218822Sdim
50260484Sobrien	if (delta_ms > 50) {
50360484Sobrien		huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
504218822Sdim			 delta_ms, huc->status[type].reg.reg, count, ret);
505218822Sdim		huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
50660484Sobrien			 intel_rps_read_actual_frequency(&gt->rps), before_freq,
50760484Sobrien			 intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
50860484Sobrien	} else {
50960484Sobrien		huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
51060484Sobrien			delta_ms, intel_rps_read_actual_frequency(&gt->rps),
51160484Sobrien			before_freq, huc->status[type].reg.reg, count, ret);
51260484Sobrien	}
51360484Sobrien
514218822Sdim	/* mark the load process as complete even if the wait failed */
51560484Sobrien	delayed_huc_load_complete(huc);
51660484Sobrien
51738889Sjdp	if (ret) {
51838889Sjdp		huc_err(huc, "firmware not verified for %s: %pe\n",
51938889Sjdp			auth_mode_string(huc, type), ERR_PTR(ret));
52038889Sjdp		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
521218822Sdim		return ret;
52238889Sjdp	}
52338889Sjdp
524218822Sdim	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
52560484Sobrien	huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type));
52660484Sobrien	return 0;
527218822Sdim}
528218822Sdim
52938889Sjdp/**
530218822Sdim * intel_huc_auth() - Authenticate HuC uCode
531218822Sdim * @huc: intel_huc structure
532218822Sdim * @type: authentication type (via GuC or via GSC)
533218822Sdim *
53438889Sjdp * Called after HuC and GuC firmware loading during intel_uc_init_hw().
535218822Sdim *
536218822Sdim * This function invokes the GuC action to authenticate the HuC firmware,
537218822Sdim * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
538218822Sdim * waits for up to 50ms for firmware verification ACK.
53938889Sjdp */
540218822Sdimint intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
541218822Sdim{
542218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
543218822Sdim	struct intel_guc *guc = &gt->uc.guc;
544218822Sdim	int ret;
545218822Sdim
546218822Sdim	if (!intel_uc_fw_is_loaded(&huc->fw))
547218822Sdim		return -ENOEXEC;
548218822Sdim
54977298Sobrien	/* GSC will do the auth with the load */
55077298Sobrien	if (intel_huc_is_loaded_by_gsc(huc))
551218822Sdim		return -ENODEV;
552218822Sdim
553218822Sdim	if (intel_huc_is_authenticated(huc, type))
554218822Sdim		return -EEXIST;
555218822Sdim
556218822Sdim	ret = i915_inject_probe_error(gt->i915, -ENXIO);
55738889Sjdp	if (ret)
558218822Sdim		goto fail;
559218822Sdim
56038889Sjdp	switch (type) {
56177298Sobrien	case INTEL_HUC_AUTH_BY_GUC:
56277298Sobrien		ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
563218822Sdim		break;
56477298Sobrien	case INTEL_HUC_AUTH_BY_GSC:
56560484Sobrien		ret = intel_huc_fw_auth_via_gsccs(huc);
566218822Sdim		break;
567218822Sdim	default:
56860484Sobrien		MISSING_CASE(type);
569218822Sdim		ret = -EINVAL;
570218822Sdim	}
571218822Sdim	if (ret)
572218822Sdim		goto fail;
573218822Sdim
57438889Sjdp	/* Check authentication status, it should be done by now */
575218822Sdim	ret = intel_huc_wait_for_auth_complete(huc, type);
57638889Sjdp	if (ret)
577218822Sdim		goto fail;
57838889Sjdp
57938889Sjdp	return 0;
580218822Sdim
58138889Sjdpfail:
582218822Sdim	huc_probe_error(huc, "%s authentication failed %pe\n",
58338889Sjdp			auth_mode_string(huc, type), ERR_PTR(ret));
58438889Sjdp	return ret;
585218822Sdim}
586218822Sdim
58738889Sjdpbool intel_huc_is_authenticated(struct intel_huc *huc,
588218822Sdim				enum intel_huc_authentication_type type)
589218822Sdim{
590218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
591218822Sdim	intel_wakeref_t wakeref;
592218822Sdim	u32 status = 0;
593218822Sdim
594218822Sdim	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
595218822Sdim		status = intel_uncore_read(gt->uncore, huc->status[type].reg);
59638889Sjdp
597218822Sdim	return (status & huc->status[type].mask) == huc->status[type].value;
598218822Sdim}
599218822Sdim
60038889Sjdpstatic bool huc_is_fully_authenticated(struct intel_huc *huc)
601218822Sdim{
602218822Sdim	struct intel_uc_fw *huc_fw = &huc->fw;
603218822Sdim
604218822Sdim	if (!huc_fw->has_gsc_headers)
605218822Sdim		return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC);
606218822Sdim	else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0))
60777298Sobrien		return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC);
608218822Sdim	else
609218822Sdim		return false;
610218822Sdim}
611218822Sdim
612218822Sdim/**
613218822Sdim * intel_huc_check_status() - check HuC status
614218822Sdim * @huc: intel_huc structure
61538889Sjdp *
616218822Sdim * This function reads status register to verify if HuC
617218822Sdim * firmware was successfully loaded.
618218822Sdim *
619218822Sdim * The return values match what is expected for the I915_PARAM_HUC_STATUS
620218822Sdim * getparam.
621218822Sdim */
622218822Sdimint intel_huc_check_status(struct intel_huc *huc)
623218822Sdim{
624218822Sdim	struct intel_uc_fw *huc_fw = &huc->fw;
625218822Sdim
626218822Sdim	switch (__intel_uc_fw_status(huc_fw)) {
627218822Sdim	case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
62838889Sjdp		return -ENODEV;
629218822Sdim	case INTEL_UC_FIRMWARE_DISABLED:
630218822Sdim		return -EOPNOTSUPP;
631218822Sdim	case INTEL_UC_FIRMWARE_MISSING:
632218822Sdim		return -ENOPKG;
633218822Sdim	case INTEL_UC_FIRMWARE_ERROR:
634218822Sdim		return -ENOEXEC;
635218822Sdim	case INTEL_UC_FIRMWARE_INIT_FAIL:
636218822Sdim		return -ENOMEM;
637218822Sdim	case INTEL_UC_FIRMWARE_LOAD_FAIL:
638218822Sdim		return -EIO;
639218822Sdim	default:
640218822Sdim		break;
641218822Sdim	}
642218822Sdim
643218822Sdim	/*
644218822Sdim	 * GSC-enabled binaries loaded via DMA are first partially
645218822Sdim	 * authenticated by GuC and then fully authenticated by GSC
646218822Sdim	 */
647218822Sdim	if (huc_is_fully_authenticated(huc))
648218822Sdim		return 1; /* full auth */
649218822Sdim	else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) &&
650218822Sdim		 intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC))
651218822Sdim		return 2; /* clear media only */
652218822Sdim	else
653218822Sdim		return 0;
654218822Sdim}
655218822Sdim
656218822Sdimstatic bool huc_has_delayed_load(struct intel_huc *huc)
657218822Sdim{
658218822Sdim	return intel_huc_is_loaded_by_gsc(huc) &&
659218822Sdim	       (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR);
660218822Sdim}
661218822Sdim
662218822Sdimvoid intel_huc_update_auth_status(struct intel_huc *huc)
663218822Sdim{
664218822Sdim	if (!intel_uc_fw_is_loadable(&huc->fw))
665218822Sdim		return;
666218822Sdim
667218822Sdim	if (!huc->fw.has_gsc_headers)
668218822Sdim		return;
669218822Sdim
670218822Sdim	if (huc_is_fully_authenticated(huc))
671218822Sdim		intel_uc_fw_change_status(&huc->fw,
672218822Sdim					  INTEL_UC_FIRMWARE_RUNNING);
673218822Sdim	else if (huc_has_delayed_load(huc))
674218822Sdim		huc_delayed_load_start(huc);
675218822Sdim}
676218822Sdim
677218822Sdim/**
678218822Sdim * intel_huc_load_status - dump information about HuC load status
679218822Sdim * @huc: the HuC
680218822Sdim * @p: the &drm_printer
681218822Sdim *
682218822Sdim * Pretty printer for HuC load status.
683218822Sdim */
684218822Sdimvoid intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
685218822Sdim{
686218822Sdim	struct intel_gt *gt = huc_to_gt(huc);
687218822Sdim	intel_wakeref_t wakeref;
688218822Sdim
689218822Sdim	if (!intel_huc_is_supported(huc)) {
690218822Sdim		drm_printf(p, "HuC not supported\n");
691218822Sdim		return;
692218822Sdim	}
693218822Sdim
694218822Sdim	if (!intel_huc_is_wanted(huc)) {
695218822Sdim		drm_printf(p, "HuC disabled\n");
696218822Sdim		return;
697218822Sdim	}
698218822Sdim
699218822Sdim	intel_uc_fw_dump(&huc->fw, p);
700218822Sdim
701218822Sdim	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
702218822Sdim		drm_printf(p, "HuC status: 0x%08x\n",
703218822Sdim			   intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg));
704218822Sdim}
705218822Sdim