1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2022 Intel Corporation
4 */
5
6#include "xe_huc.h"
7
8#include <drm/drm_managed.h>
9
10#include "abi/gsc_pxp_commands_abi.h"
11#include "regs/xe_gsc_regs.h"
12#include "regs/xe_guc_regs.h"
13#include "xe_assert.h"
14#include "xe_bo.h"
15#include "xe_device.h"
16#include "xe_force_wake.h"
17#include "xe_gsc_submit.h"
18#include "xe_gt.h"
19#include "xe_guc.h"
20#include "xe_map.h"
21#include "xe_mmio.h"
22#include "xe_uc_fw.h"
23
24static struct xe_gt *
25huc_to_gt(struct xe_huc *huc)
26{
27	return container_of(huc, struct xe_gt, uc.huc);
28}
29
30static struct xe_device *
31huc_to_xe(struct xe_huc *huc)
32{
33	return gt_to_xe(huc_to_gt(huc));
34}
35
36static struct xe_guc *
37huc_to_guc(struct xe_huc *huc)
38{
39	return &container_of(huc, struct xe_uc, huc)->guc;
40}
41
42static void free_gsc_pkt(struct drm_device *drm, void *arg)
43{
44	struct xe_huc *huc = arg;
45
46	xe_bo_unpin_map_no_vm(huc->gsc_pkt);
47	huc->gsc_pkt = NULL;
48}
49
50#define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
51static int huc_alloc_gsc_pkt(struct xe_huc *huc)
52{
53	struct xe_gt *gt = huc_to_gt(huc);
54	struct xe_device *xe = gt_to_xe(gt);
55	struct xe_bo *bo;
56
57	/* we use a single object for both input and output */
58	bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
59				  PXP43_HUC_AUTH_INOUT_SIZE * 2,
60				  ttm_bo_type_kernel,
61				  XE_BO_CREATE_SYSTEM_BIT |
62				  XE_BO_CREATE_GGTT_BIT);
63	if (IS_ERR(bo))
64		return PTR_ERR(bo);
65
66	huc->gsc_pkt = bo;
67
68	return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
69}
70
71int xe_huc_init(struct xe_huc *huc)
72{
73	struct xe_gt *gt = huc_to_gt(huc);
74	struct xe_tile *tile = gt_to_tile(gt);
75	struct xe_device *xe = gt_to_xe(gt);
76	int ret;
77
78	huc->fw.type = XE_UC_FW_TYPE_HUC;
79
80	/* On platforms with a media GT the HuC is only available there */
81	if (tile->media_gt && (gt != tile->media_gt)) {
82		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
83		return 0;
84	}
85
86	ret = xe_uc_fw_init(&huc->fw);
87	if (ret)
88		goto out;
89
90	if (!xe_uc_fw_is_enabled(&huc->fw))
91		return 0;
92
93	if (huc->fw.has_gsc_headers) {
94		ret = huc_alloc_gsc_pkt(huc);
95		if (ret)
96			goto out;
97	}
98
99	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
100
101	return 0;
102
103out:
104	drm_err(&xe->drm, "HuC init failed with %d", ret);
105	return ret;
106}
107
108int xe_huc_init_post_hwconfig(struct xe_huc *huc)
109{
110	struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
111	struct xe_device *xe = huc_to_xe(huc);
112	int ret;
113
114	if (!IS_DGFX(huc_to_xe(huc)))
115		return 0;
116
117	if (!xe_uc_fw_is_loadable(&huc->fw))
118		return 0;
119
120	ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
121	if (ret)
122		return ret;
123
124	return 0;
125}
126
127int xe_huc_upload(struct xe_huc *huc)
128{
129	if (!xe_uc_fw_is_loadable(&huc->fw))
130		return 0;
131	return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
132}
133
134#define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
135	xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
136#define huc_auth_msg_rd(xe_, map_, offset_, field_) \
137	xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
138
139static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
140				 u32 wr_offset, u32 huc_offset, u32 huc_size)
141{
142	xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
143
144	huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
145	huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
146	huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
147	huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
148			sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
149	huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
150	huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
151
152	return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
153}
154
155static int huc_auth_via_gsccs(struct xe_huc *huc)
156{
157	struct xe_gt *gt = huc_to_gt(huc);
158	struct xe_device *xe = gt_to_xe(gt);
159	struct xe_bo *pkt = huc->gsc_pkt;
160	u32 wr_offset;
161	u32 rd_offset;
162	u64 ggtt_offset;
163	u32 out_status;
164	int retry = 5;
165	int err = 0;
166
167	if (!pkt)
168		return -ENODEV;
169
170	ggtt_offset = xe_bo_ggtt_addr(pkt);
171
172	wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
173				       sizeof(struct pxp43_new_huc_auth_in));
174	wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
175					  xe_bo_ggtt_addr(huc->fw.bo),
176					  huc->fw.bo->size);
177	do {
178		err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
179					       ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
180					       PXP43_HUC_AUTH_INOUT_SIZE);
181		if (err)
182			break;
183
184		if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
185						    PXP43_HUC_AUTH_INOUT_SIZE)) {
186			err = -EBUSY;
187			msleep(50);
188		}
189	} while (--retry && err == -EBUSY);
190
191	if (err) {
192		drm_err(&xe->drm, "failed to submit GSC request to auth: %d\n", err);
193		return err;
194	}
195
196	err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
197				     sizeof(struct pxp43_huc_auth_out), &rd_offset);
198	if (err) {
199		drm_err(&xe->drm, "HuC: invalid GSC reply for auth (err=%d)\n", err);
200		return err;
201	}
202
203	/*
204	 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
205	 * authenticated. If the same error is ever returned with HuC not loaded
206	 * we'll still catch it when we check the authentication bit later.
207	 */
208	out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
209	if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
210		drm_err(&xe->drm, "auth failed with GSC error = 0x%x\n", out_status);
211		return -EIO;
212	}
213
214	return 0;
215}
216
217static const struct {
218	const char *name;
219	struct xe_reg reg;
220	u32 val;
221} huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
222	[XE_HUC_AUTH_VIA_GUC] = { "GuC",
223				  HUC_KERNEL_LOAD_INFO,
224				  HUC_LOAD_SUCCESSFUL },
225	[XE_HUC_AUTH_VIA_GSC] = { "GSC",
226				  HECI_FWSTS5(MTL_GSC_HECI1_BASE),
227				  HECI1_FWSTS5_HUC_AUTH_DONE },
228};
229
230bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
231{
232	struct xe_gt *gt = huc_to_gt(huc);
233
234	return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
235}
236
237int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
238{
239	struct xe_device *xe = huc_to_xe(huc);
240	struct xe_gt *gt = huc_to_gt(huc);
241	struct xe_guc *guc = huc_to_guc(huc);
242	int ret;
243
244	if (!xe_uc_fw_is_loadable(&huc->fw))
245		return 0;
246
247	/* On newer platforms the HuC survives reset, so no need to re-auth */
248	if (xe_huc_is_authenticated(huc, type)) {
249		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
250		return 0;
251	}
252
253	if (!xe_uc_fw_is_loaded(&huc->fw))
254		return -ENOEXEC;
255
256	switch (type) {
257	case XE_HUC_AUTH_VIA_GUC:
258		ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
259				      xe_uc_fw_rsa_offset(&huc->fw));
260		break;
261	case XE_HUC_AUTH_VIA_GSC:
262		ret = huc_auth_via_gsccs(huc);
263		break;
264	default:
265		XE_WARN_ON(type);
266		return -EINVAL;
267	}
268	if (ret) {
269		drm_err(&xe->drm, "Failed to trigger HuC auth via %s: %d\n",
270			huc_auth_modes[type].name, ret);
271		goto fail;
272	}
273
274	ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
275			     huc_auth_modes[type].val, 100000, NULL, false);
276	if (ret) {
277		drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
278		goto fail;
279	}
280
281	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
282	drm_dbg(&xe->drm, "HuC authenticated via %s\n", huc_auth_modes[type].name);
283
284	return 0;
285
286fail:
287	drm_err(&xe->drm, "HuC: Auth via %s failed: %d\n",
288		huc_auth_modes[type].name, ret);
289	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
290
291	return ret;
292}
293
294void xe_huc_sanitize(struct xe_huc *huc)
295{
296	if (!xe_uc_fw_is_loadable(&huc->fw))
297		return;
298	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
299}
300
301void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
302{
303	struct xe_gt *gt = huc_to_gt(huc);
304	int err;
305
306	xe_uc_fw_print(&huc->fw, p);
307
308	if (!xe_uc_fw_is_enabled(&huc->fw))
309		return;
310
311	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
312	if (err)
313		return;
314
315	drm_printf(p, "\nHuC status: 0x%08x\n",
316		   xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
317
318	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
319}
320