1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2023 Intel Corporation
4 */
5
6#include "xe_gsc_proxy.h"
7
8#include <linux/component.h>
9#include <linux/delay.h>
10
11#include <drm/drm_managed.h>
12#include <drm/i915_component.h>
13#include <drm/i915_gsc_proxy_mei_interface.h>
14
15#include "abi/gsc_proxy_commands_abi.h"
16#include "regs/xe_gsc_regs.h"
17#include "xe_bo.h"
18#include "xe_gsc.h"
19#include "xe_gsc_submit.h"
20#include "xe_gt.h"
21#include "xe_gt_printk.h"
22#include "xe_map.h"
23#include "xe_mmio.h"
24#include "xe_pm.h"
25
26/*
27 * GSC proxy:
28 * The GSC uC needs to communicate with the CSME to perform certain operations.
29 * Since the GSC can't perform this communication directly on platforms where it
30 * is integrated in GT, the graphics driver needs to transfer the messages from
31 * GSC to CSME and back. The proxy flow must be manually started after the GSC
32 * is loaded to signal to GSC that we're ready to handle its messages and allow
33 * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt
34 * if it needs to send messages to CSME again.
35 * The proxy flow is as follow:
36 * 1 - Xe submits a request to GSC asking for the message to CSME
37 * 2 - GSC replies with the proxy header + payload for CSME
38 * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component
39 * 4 - CSME replies with the proxy header + payload for GSC
40 * 5 - Xe submits a request to GSC with the reply from CSME
41 * 6 - GSC replies either with a new header + payload (same as step 2, so we
42 *     restart from there) or with an end message.
43 */
44
45/*
46 * The component should load quite quickly in most cases, but it could take
47 * a bit. Using a very big timeout just to cover the worst case scenario
48 */
49#define GSC_PROXY_INIT_TIMEOUT_MS 20000
50
51/* shorthand define for code compactness */
52#define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header))
53
54/* the protocol supports up to 32K in each direction */
55#define GSC_PROXY_BUFFER_SIZE SZ_32K
56#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
57
58static struct xe_gt *
59gsc_to_gt(struct xe_gsc *gsc)
60{
61	return container_of(gsc, struct xe_gt, uc.gsc);
62}
63
64static inline struct xe_device *kdev_to_xe(struct device *kdev)
65{
66	return dev_get_drvdata(kdev);
67}
68
69static bool gsc_proxy_init_done(struct xe_gsc *gsc)
70{
71	struct xe_gt *gt = gsc_to_gt(gsc);
72	u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
73
74	return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
75	       HECI1_FWSTS1_PROXY_STATE_NORMAL;
76}
77
78static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
79{
80	struct xe_gt *gt = gsc_to_gt(gsc);
81
82	/* make sure we never accidentally write the RST bit */
83	clr |= HECI_H_CSR_RST;
84
85	xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
86}
87
88static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
89{
90	/* The status bit is cleared by writing to it */
91	__gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
92}
93
94static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
95{
96	u32 set = enabled ? HECI_H_CSR_IE : 0;
97	u32 clr = enabled ? 0 : HECI_H_CSR_IE;
98
99	__gsc_proxy_irq_rmw(gsc, clr, set);
100}
101
102static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
103{
104	struct xe_gt *gt = gsc_to_gt(gsc);
105	struct i915_gsc_proxy_component *comp = gsc->proxy.component;
106	int ret;
107
108	ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size);
109	if (ret < 0) {
110		xe_gt_err(gt, "Failed to send CSME proxy message\n");
111		return ret;
112	}
113
114	ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE);
115	if (ret < 0) {
116		xe_gt_err(gt, "Failed to receive CSME proxy message\n");
117		return ret;
118	}
119
120	return ret;
121}
122
123static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
124{
125	struct xe_gt *gt = gsc_to_gt(gsc);
126	u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo);
127	u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
128	int err;
129
130	/* the message must contain at least the gsc and proxy headers */
131	if (size > GSC_PROXY_BUFFER_SIZE) {
132		xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
133		return -EINVAL;
134	}
135
136	err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size,
137				       addr_out, GSC_PROXY_BUFFER_SIZE);
138	if (err) {
139		xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err));
140		return err;
141	}
142
143	return 0;
144}
145
146static int validate_proxy_header(struct xe_gsc_proxy_header *header,
147				 u32 source, u32 dest, u32 max_size)
148{
149	u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
150	u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
151
152	if (header->destination != dest || header->source != source)
153		return -ENOEXEC;
154
155	if (length + PROXY_HDR_SIZE > max_size)
156		return -E2BIG;
157
158	switch (type) {
159	case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
160		if (length > 0)
161			break;
162		fallthrough;
163	case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
164		return -EIO;
165	default:
166		break;
167	}
168
169	return 0;
170}
171
172#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
173	xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_)
174
175#define proxy_header_rd(xe_, map_, offset_, field_) \
176	xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_)
177
178static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
179{
180	xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
181
182	proxy_header_wr(xe, map, offset, hdr,
183			FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
184			FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0));
185
186	proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
187	proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
188	proxy_header_wr(xe, map, offset, status, 0);
189
190	return offset + PROXY_HDR_SIZE;
191}
192
193static int proxy_query(struct xe_gsc *gsc)
194{
195	struct xe_gt *gt = gsc_to_gt(gsc);
196	struct xe_device *xe = gt_to_xe(gt);
197	struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme;
198	void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE;
199	u32 wr_offset;
200	u32 reply_offset;
201	u32 size;
202	int ret;
203
204	wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
205				       HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE);
206	wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset);
207
208	size = wr_offset;
209
210	while (1) {
211		/*
212		 * Poison the GSC response header space to make sure we don't
213		 * read a stale reply.
214		 */
215		xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0);
216
217		/* send proxy message to GSC */
218		ret = proxy_send_to_gsc(gsc, size);
219		if (ret)
220			goto proxy_error;
221
222		/* check the reply from GSC */
223		ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0,
224					     PROXY_HDR_SIZE, &reply_offset);
225		if (ret) {
226			xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n",
227				  ERR_PTR(ret));
228			goto proxy_error;
229		}
230
231		/* copy the proxy header reply from GSC */
232		xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
233				   reply_offset, PROXY_HDR_SIZE);
234
235		/* stop if this was the last message */
236		if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
237			break;
238
239		/* make sure the GSC-to-CSME proxy header is sane */
240		ret = validate_proxy_header(to_csme_hdr,
241					    GSC_PROXY_ADDRESSING_GSC,
242					    GSC_PROXY_ADDRESSING_CSME,
243					    GSC_PROXY_BUFFER_SIZE - reply_offset);
244		if (ret) {
245			xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n",
246				  ERR_PTR(ret));
247			goto proxy_error;
248		}
249
250		/* copy the rest of the message */
251		size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr);
252		xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc,
253				   reply_offset + PROXY_HDR_SIZE, size);
254
255		/* send the GSC message to the CSME */
256		ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE);
257		if (ret < 0)
258			goto proxy_error;
259
260		/* reply size from CSME, including the proxy header */
261		size = ret;
262		if (size < PROXY_HDR_SIZE) {
263			xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size);
264			ret = -EPROTO;
265			goto proxy_error;
266		}
267
268		/* make sure the CSME-to-GSC proxy header is sane */
269		ret = validate_proxy_header(gsc->proxy.from_csme,
270					    GSC_PROXY_ADDRESSING_CSME,
271					    GSC_PROXY_ADDRESSING_GSC,
272					    GSC_PROXY_BUFFER_SIZE - reply_offset);
273		if (ret) {
274			xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
275			goto proxy_error;
276		}
277
278		/* Emit a new header for sending the reply to the GSC */
279		wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
280					       HECI_MEADDRESS_PROXY, 0, size);
281
282		/* copy the CSME reply and update the total msg size to include the GSC header */
283		xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size);
284
285		size += wr_offset;
286	}
287
288proxy_error:
289	return ret < 0 ? ret : 0;
290}
291
292int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
293{
294	struct xe_gt *gt = gsc_to_gt(gsc);
295	int slept;
296	int err;
297
298	if (!gsc->proxy.component_added)
299		return -ENODEV;
300
301	/* when GSC is loaded, we can queue this before the component is bound */
302	for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) {
303		if (gsc->proxy.component)
304			break;
305
306		msleep(100);
307	}
308
309	mutex_lock(&gsc->proxy.mutex);
310	if (!gsc->proxy.component) {
311		xe_gt_err(gt, "GSC proxy component not bound!\n");
312		err = -EIO;
313	} else {
314		/*
315		 * clear the pending interrupt and allow new proxy requests to
316		 * be generated while we handle the current one
317		 */
318		gsc_proxy_irq_clear(gsc);
319		err = proxy_query(gsc);
320	}
321	mutex_unlock(&gsc->proxy.mutex);
322	return err;
323}
324
325void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
326{
327	struct xe_gt *gt = gsc_to_gt(gsc);
328
329	if (unlikely(!iir))
330		return;
331
332	if (!gsc->proxy.component) {
333		xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
334		return;
335	}
336
337	spin_lock(&gsc->lock);
338	gsc->work_actions |= GSC_ACTION_SW_PROXY;
339	spin_unlock(&gsc->lock);
340
341	queue_work(gsc->wq, &gsc->work);
342}
343
344static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
345				       struct device *mei_kdev, void *data)
346{
347	struct xe_device *xe = kdev_to_xe(xe_kdev);
348	struct xe_gt *gt = xe->tiles[0].media_gt;
349	struct xe_gsc *gsc = &gt->uc.gsc;
350
351	mutex_lock(&gsc->proxy.mutex);
352	gsc->proxy.component = data;
353	gsc->proxy.component->mei_dev = mei_kdev;
354	mutex_unlock(&gsc->proxy.mutex);
355
356	return 0;
357}
358
359static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
360					  struct device *mei_kdev, void *data)
361{
362	struct xe_device *xe = kdev_to_xe(xe_kdev);
363	struct xe_gt *gt = xe->tiles[0].media_gt;
364	struct xe_gsc *gsc = &gt->uc.gsc;
365
366	xe_gsc_wait_for_worker_completion(gsc);
367
368	mutex_lock(&gsc->proxy.mutex);
369	gsc->proxy.component = NULL;
370	mutex_unlock(&gsc->proxy.mutex);
371}
372
373static const struct component_ops xe_gsc_proxy_component_ops = {
374	.bind   = xe_gsc_proxy_component_bind,
375	.unbind = xe_gsc_proxy_component_unbind,
376};
377
378static void proxy_channel_free(struct drm_device *drm, void *arg)
379{
380	struct xe_gsc *gsc = arg;
381
382	if (!gsc->proxy.bo)
383		return;
384
385	if (gsc->proxy.to_csme) {
386		kfree(gsc->proxy.to_csme);
387		gsc->proxy.to_csme = NULL;
388		gsc->proxy.from_csme = NULL;
389	}
390
391	if (gsc->proxy.bo) {
392		iosys_map_clear(&gsc->proxy.to_gsc);
393		iosys_map_clear(&gsc->proxy.from_gsc);
394		xe_bo_unpin_map_no_vm(gsc->proxy.bo);
395		gsc->proxy.bo = NULL;
396	}
397}
398
399static int proxy_channel_alloc(struct xe_gsc *gsc)
400{
401	struct xe_gt *gt = gsc_to_gt(gsc);
402	struct xe_tile *tile = gt_to_tile(gt);
403	struct xe_device *xe = gt_to_xe(gt);
404	struct xe_bo *bo;
405	void *csme;
406	int err;
407
408	csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
409	if (!csme)
410		return -ENOMEM;
411
412	bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
413				  ttm_bo_type_kernel,
414				  XE_BO_CREATE_SYSTEM_BIT |
415				  XE_BO_CREATE_GGTT_BIT);
416	if (IS_ERR(bo)) {
417		kfree(csme);
418		return PTR_ERR(bo);
419	}
420
421	gsc->proxy.bo = bo;
422	gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
423	gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE);
424	gsc->proxy.to_csme = csme;
425	gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
426
427	err = drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
428	if (err)
429		return err;
430
431	return 0;
432}
433
434/**
435 * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
436 * @gsc: the GSC uC
437 *
438 * Return: 0 if the initialization was successful, a negative errno otherwise.
439 */
440int xe_gsc_proxy_init(struct xe_gsc *gsc)
441{
442	int err;
443	struct xe_gt *gt = gsc_to_gt(gsc);
444	struct xe_tile *tile = gt_to_tile(gt);
445	struct xe_device *xe = tile_to_xe(tile);
446
447	mutex_init(&gsc->proxy.mutex);
448
449	if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
450		xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n");
451		return -ENODEV;
452	}
453
454	/* no multi-tile devices with this feature yet */
455	if (tile->id > 0) {
456		xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
457		return -EINVAL;
458	}
459
460	err = proxy_channel_alloc(gsc);
461	if (err)
462		return err;
463
464	err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops,
465				  I915_COMPONENT_GSC_PROXY);
466	if (err < 0) {
467		xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err));
468		return err;
469	}
470
471	gsc->proxy.component_added = true;
472
473	/* the component must be removed before unload, so can't use drmm for cleanup */
474
475	return 0;
476}
477
478/**
479 * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
480 * @gsc: the GSC uC
481 */
482void xe_gsc_proxy_remove(struct xe_gsc *gsc)
483{
484	struct xe_gt *gt = gsc_to_gt(gsc);
485	struct xe_device *xe = gt_to_xe(gt);
486	int err = 0;
487
488	if (!gsc->proxy.component_added)
489		return;
490
491	/* disable HECI2 IRQs */
492	xe_pm_runtime_get(xe);
493	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
494	if (err)
495		xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
496
497	/* try do disable irq even if forcewake failed */
498	gsc_proxy_irq_toggle(gsc, false);
499
500	if (!err)
501		xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
502	xe_pm_runtime_put(xe);
503
504	xe_gsc_wait_for_worker_completion(gsc);
505
506	component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
507	gsc->proxy.component_added = false;
508}
509
510/**
511 * xe_gsc_proxy_start() - start the proxy by submitting the first request
512 * @gsc: the GSC uC
513 *
514 * Return: 0 if the proxy are now enabled, a negative errno otherwise.
515 */
516int xe_gsc_proxy_start(struct xe_gsc *gsc)
517{
518	int err;
519
520	/* enable the proxy interrupt in the GSC shim layer */
521	gsc_proxy_irq_toggle(gsc, true);
522
523	/*
524	 * The handling of the first proxy request must be manually triggered to
525	 * notify the GSC that we're ready to support the proxy flow.
526	 */
527	err = xe_gsc_proxy_request_handler(gsc);
528	if (err)
529		return err;
530
531	if (!gsc_proxy_init_done(gsc)) {
532		xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
533		return -EIO;
534	}
535
536	return 0;
537}
538