1// SPDX-License-Identifier: MIT
2/*
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#include <linux/component.h>
7
8#include <drm/i915_pxp_tee_interface.h>
9#include <drm/i915_component.h>
10
11#include "gem/i915_gem_lmem.h"
12#include "gt/intel_gt_print.h"
13
14#include "i915_drv.h"
15#include "gt/intel_gt.h"
16
17#include "intel_pxp.h"
18#include "intel_pxp_cmd_interface_42.h"
19#include "intel_pxp_huc.h"
20#include "intel_pxp_session.h"
21#include "intel_pxp_tee.h"
22#include "intel_pxp_types.h"
23
24#define PXP_TRANSPORT_TIMEOUT_MS 5000 /* 5 sec */
25
26static bool
27is_fw_err_platform_config(struct intel_pxp *pxp, u32 type)
28{
29	switch (type) {
30	case PXP_STATUS_ERROR_API_VERSION:
31	case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
32	case PXP_STATUS_PLATFCONFIG_KF1_BAD:
33		pxp->platform_cfg_is_bad = true;
34		return true;
35	default:
36		break;
37	}
38	return false;
39}
40
41static const char *
42fw_err_to_string(u32 type)
43{
44	switch (type) {
45	case PXP_STATUS_ERROR_API_VERSION:
46		return "ERR_API_VERSION";
47	case PXP_STATUS_NOT_READY:
48		return "ERR_NOT_READY";
49	case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
50	case PXP_STATUS_PLATFCONFIG_KF1_BAD:
51		return "ERR_PLATFORM_CONFIG";
52	default:
53		break;
54	}
55	return NULL;
56}
57
58static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
59				    void *msg_in, u32 msg_in_size,
60				    void *msg_out, u32 msg_out_max_size,
61				    u32 *msg_out_rcv_size)
62{
63	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
64	struct i915_pxp_component *pxp_component = pxp->pxp_component;
65	int ret = 0;
66
67	mutex_lock(&pxp->tee_mutex);
68
69	/*
70	 * The binding of the component is asynchronous from i915 probe, so we
71	 * can't be sure it has happened.
72	 */
73	if (!pxp_component) {
74		ret = -ENODEV;
75		goto unlock;
76	}
77
78	ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size,
79				       PXP_TRANSPORT_TIMEOUT_MS);
80	if (ret) {
81		drm_err(&i915->drm, "Failed to send PXP TEE message\n");
82		goto unlock;
83	}
84
85	ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size,
86				       PXP_TRANSPORT_TIMEOUT_MS);
87	if (ret < 0) {
88		drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
89		goto unlock;
90	}
91
92	if (ret > msg_out_max_size) {
93		drm_err(&i915->drm,
94			"Failed to receive PXP TEE message due to unexpected output size\n");
95		ret = -ENOSPC;
96		goto unlock;
97	}
98
99	if (msg_out_rcv_size)
100		*msg_out_rcv_size = ret;
101
102	ret = 0;
103unlock:
104	mutex_unlock(&pxp->tee_mutex);
105	return ret;
106}
107
108int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
109				 u8 client_id, u32 fence_id,
110				 void *msg_in, size_t msg_in_len,
111				 void *msg_out, size_t msg_out_len)
112{
113	/* TODO: for bigger objects we need to use a sg of 4k pages */
114	const size_t max_msg_size = PAGE_SIZE;
115	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
116	struct i915_pxp_component *pxp_component = pxp->pxp_component;
117	unsigned int offset = 0;
118	struct scatterlist *sg;
119	int ret;
120
121	if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
122		return -ENOSPC;
123
124	mutex_lock(&pxp->tee_mutex);
125
126	if (unlikely(!pxp_component || !pxp_component->ops->gsc_command)) {
127		ret = -ENODEV;
128		goto unlock;
129	}
130
131	GEM_BUG_ON(!pxp->stream_cmd.obj);
132
133	sg = i915_gem_object_get_sg_dma(pxp->stream_cmd.obj, 0, &offset);
134
135	memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len);
136
137	ret = pxp_component->ops->gsc_command(pxp_component->tee_dev, client_id,
138					      fence_id, sg, msg_in_len, sg);
139	if (ret < 0)
140		drm_err(&i915->drm, "Failed to send PXP TEE gsc command\n");
141	else
142		memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len);
143
144unlock:
145	mutex_unlock(&pxp->tee_mutex);
146	return ret;
147}
148
149/**
150 * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
151 * @i915_kdev: pointer to i915 kernel device
152 * @tee_kdev: pointer to tee kernel device
153 * @data: pointer to pxp_tee_master containing the function pointers
154 *
155 * This bind function is called during the system boot or resume from system sleep.
156 *
157 * Return: return 0 if successful.
158 */
159static int i915_pxp_tee_component_bind(struct device *i915_kdev,
160				       struct device *tee_kdev, void *data)
161{
162	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
163	struct intel_pxp *pxp = i915->pxp;
164	struct intel_gt *gt = pxp->ctrl_gt;
165	struct intel_uc *uc = &gt->uc;
166	intel_wakeref_t wakeref;
167	int ret = 0;
168
169	if (!HAS_HECI_PXP(i915)) {
170		pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS);
171		if (drm_WARN_ON(&i915->drm, !pxp->dev_link))
172			return -ENODEV;
173	}
174
175	mutex_lock(&pxp->tee_mutex);
176	pxp->pxp_component = data;
177	pxp->pxp_component->tee_dev = tee_kdev;
178	mutex_unlock(&pxp->tee_mutex);
179
180	if (intel_uc_uses_huc(uc) && intel_huc_is_loaded_by_gsc(&uc->huc)) {
181		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
182			/* load huc via pxp */
183			ret = intel_huc_fw_load_and_auth_via_gsc(&uc->huc);
184			if (ret < 0)
185				gt_probe_error(gt, "failed to load huc via gsc %d\n", ret);
186		}
187	}
188
189	/* if we are suspended, the HW will be re-initialized on resume */
190	wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
191	if (!wakeref)
192		return 0;
193
194	/* the component is required to fully start the PXP HW */
195	if (intel_pxp_is_enabled(pxp))
196		intel_pxp_init_hw(pxp);
197
198	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
199
200	return ret;
201}
202
203static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
204					  struct device *tee_kdev, void *data)
205{
206	struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
207	struct intel_pxp *pxp = i915->pxp;
208	intel_wakeref_t wakeref;
209
210	if (intel_pxp_is_enabled(pxp))
211		with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
212			intel_pxp_fini_hw(pxp);
213
214	mutex_lock(&pxp->tee_mutex);
215	pxp->pxp_component = NULL;
216	mutex_unlock(&pxp->tee_mutex);
217
218	if (pxp->dev_link) {
219		device_link_del(pxp->dev_link);
220		pxp->dev_link = NULL;
221	}
222}
223
224static const struct component_ops i915_pxp_tee_component_ops = {
225	.bind   = i915_pxp_tee_component_bind,
226	.unbind = i915_pxp_tee_component_unbind,
227};
228
229static int alloc_streaming_command(struct intel_pxp *pxp)
230{
231	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
232	struct drm_i915_gem_object *obj = NULL;
233	void *cmd;
234	int err;
235
236	pxp->stream_cmd.obj = NULL;
237	pxp->stream_cmd.vaddr = NULL;
238
239	if (!IS_DGFX(i915))
240		return 0;
241
242	/* allocate lmem object of one page for PXP command memory and store it */
243	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS);
244	if (IS_ERR(obj)) {
245		drm_err(&i915->drm, "Failed to allocate pxp streaming command!\n");
246		return PTR_ERR(obj);
247	}
248
249	err = i915_gem_object_pin_pages_unlocked(obj);
250	if (err) {
251		drm_err(&i915->drm, "Failed to pin gsc message page!\n");
252		goto out_put;
253	}
254
255	/* map the lmem into the virtual memory pointer */
256	cmd = i915_gem_object_pin_map_unlocked(obj,
257					       intel_gt_coherent_map_type(pxp->ctrl_gt,
258									  obj, true));
259	if (IS_ERR(cmd)) {
260		drm_err(&i915->drm, "Failed to map gsc message page!\n");
261		err = PTR_ERR(cmd);
262		goto out_unpin;
263	}
264
265	memset(cmd, 0, obj->base.size);
266
267	pxp->stream_cmd.obj = obj;
268	pxp->stream_cmd.vaddr = cmd;
269
270	return 0;
271
272out_unpin:
273	i915_gem_object_unpin_pages(obj);
274out_put:
275	i915_gem_object_put(obj);
276	return err;
277}
278
279static void free_streaming_command(struct intel_pxp *pxp)
280{
281	struct drm_i915_gem_object *obj = fetch_and_zero(&pxp->stream_cmd.obj);
282
283	if (!obj)
284		return;
285
286	i915_gem_object_unpin_map(obj);
287	i915_gem_object_unpin_pages(obj);
288	i915_gem_object_put(obj);
289}
290
291int intel_pxp_tee_component_init(struct intel_pxp *pxp)
292{
293	int ret;
294	struct intel_gt *gt = pxp->ctrl_gt;
295	struct drm_i915_private *i915 = gt->i915;
296
297	ret = alloc_streaming_command(pxp);
298	if (ret)
299		return ret;
300
301	ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
302				  I915_COMPONENT_PXP);
303	if (ret < 0) {
304		drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
305		goto out_free;
306	}
307
308	pxp->pxp_component_added = true;
309
310	return 0;
311
312out_free:
313	free_streaming_command(pxp);
314	return ret;
315}
316
317void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
318{
319	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
320
321	if (!pxp->pxp_component_added)
322		return;
323
324	component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
325	pxp->pxp_component_added = false;
326
327	free_streaming_command(pxp);
328}
329
330int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
331					 int arb_session_id)
332{
333	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
334	struct pxp42_create_arb_in msg_in = {};
335	struct pxp42_create_arb_out msg_out = {};
336	int ret;
337
338	msg_in.header.api_version = PXP_APIVER(4, 2);
339	msg_in.header.command_id = PXP42_CMDID_INIT_SESSION;
340	msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
341	msg_in.protection_mode = PXP42_ARB_SESSION_MODE_HEAVY;
342	msg_in.session_id = arb_session_id;
343
344	ret = intel_pxp_tee_io_message(pxp,
345				       &msg_in, sizeof(msg_in),
346				       &msg_out, sizeof(msg_out),
347				       NULL);
348
349	if (ret) {
350		drm_err(&i915->drm, "Failed to send tee msg init arb session, ret=[%d]\n", ret);
351	} else if (msg_out.header.status != 0) {
352		if (is_fw_err_platform_config(pxp, msg_out.header.status)) {
353			drm_info_once(&i915->drm,
354				      "PXP init-arb-session-%d failed due to BIOS/SOC:0x%08x:%s\n",
355				      arb_session_id, msg_out.header.status,
356				      fw_err_to_string(msg_out.header.status));
357		} else {
358			drm_dbg(&i915->drm, "PXP init-arb-session--%d failed 0x%08x:%st:\n",
359				arb_session_id, msg_out.header.status,
360				fw_err_to_string(msg_out.header.status));
361			drm_dbg(&i915->drm, "     cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
362				msg_in.header.command_id, msg_in.header.api_version);
363		}
364	}
365
366	return ret;
367}
368
369void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
370{
371	struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
372	struct pxp42_inv_stream_key_in msg_in = {};
373	struct pxp42_inv_stream_key_out msg_out = {};
374	int ret, trials = 0;
375
376try_again:
377	memset(&msg_in, 0, sizeof(msg_in));
378	memset(&msg_out, 0, sizeof(msg_out));
379	msg_in.header.api_version = PXP_APIVER(4, 2);
380	msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
381	msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
382
383	msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
384	msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
385	msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
386
387	ret = intel_pxp_tee_io_message(pxp,
388				       &msg_in, sizeof(msg_in),
389				       &msg_out, sizeof(msg_out),
390				       NULL);
391
392	/* Cleanup coherency between GT and Firmware is critical, so try again if it fails */
393	if ((ret || msg_out.header.status != 0x0) && ++trials < 3)
394		goto try_again;
395
396	if (ret) {
397		drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%u, ret=[%d]\n",
398			session_id, ret);
399	} else if (msg_out.header.status != 0) {
400		if (is_fw_err_platform_config(pxp, msg_out.header.status)) {
401			drm_info_once(&i915->drm,
402				      "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n",
403				      session_id, msg_out.header.status,
404				      fw_err_to_string(msg_out.header.status));
405		} else {
406			drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n",
407				session_id, msg_out.header.status,
408				fw_err_to_string(msg_out.header.status));
409			drm_dbg(&i915->drm, "     cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
410				msg_in.header.command_id, msg_in.header.api_version);
411		}
412	}
413}
414