1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2023 Intel Corporation
4 */
5
6#include <drm/drm_managed.h>
7
8#include "regs/xe_gt_regs.h"
9#include "regs/xe_guc_regs.h"
10#include "regs/xe_regs.h"
11
12#include "xe_assert.h"
13#include "xe_bo.h"
14#include "xe_device.h"
15#include "xe_device_types.h"
16#include "xe_gt.h"
17#include "xe_gt_printk.h"
18#include "xe_guc.h"
19#include "xe_hw_engine.h"
20#include "xe_map.h"
21#include "xe_memirq.h"
22#include "xe_sriov.h"
23#include "xe_sriov_printk.h"
24
25#define memirq_assert(m, condition)	xe_tile_assert(memirq_to_tile(m), condition)
26#define memirq_debug(m, msg...)		xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg)
27
28static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
29{
30	return container_of(memirq, struct xe_tile, sriov.vf.memirq);
31}
32
33static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
34{
35	return tile_to_xe(memirq_to_tile(memirq));
36}
37
38static const char *guc_name(struct xe_guc *guc)
39{
40	return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
41}
42
43/**
44 * DOC: Memory Based Interrupts
45 *
46 * MMIO register based interrupts infrastructure used for non-virtualized mode
47 * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently
48 * to allow delivering interrupts to a large number of Virtual machines or
49 * containers. Memory based interrupt status reporting provides an efficient
50 * and scalable infrastructure.
51 *
52 * For memory based interrupt status reporting hardware sequence is:
53 *  * Engine writes the interrupt event to memory
54 *    (Pointer to memory location is provided by SW. This memory surface must
55 *    be mapped to system memory and must be marked as un-cacheable (UC) on
56 *    Graphics IP Caches)
57 *  * Engine triggers an interrupt to host.
58 */
59
60/**
61 * DOC: Memory Based Interrupts Page Layout
62 *
63 * `Memory Based Interrupts`_ requires three different objects, which are
64 * called "page" in the specs, even if they aren't page-sized or aligned.
65 *
66 * To simplify the code we allocate a single page size object and then use
67 * offsets to embedded "pages". The address of those "pages" are then
68 * programmed in the HW via LRI and LRM in the context image.
69 *
70 * - _`Interrupt Status Report Page`: this page contains the interrupt
71 *   status vectors for each unit. Each bit in the interrupt vectors is
72 *   converted to a byte, with the byte being set to 0xFF when an
73 *   interrupt is triggered; interrupt vectors are 16b big so each unit
74 *   gets 16B. One space is reserved for each bit in one of the
75 *   GT_INTR_DWx registers, so this object needs a total of 1024B.
76 *   This object needs to be 4KiB aligned.
77 *
78 * - _`Interrupt Source Report Page`: this is the equivalent of the
79 *   GEN11_GT_INTR_DWx registers, with each bit in those registers being
80 *   mapped to a byte here. The offsets are the same, just bytes instead
81 *   of bits. This object needs to be cacheline aligned.
82 *
83 * - Interrupt Mask: the HW needs a location to fetch the interrupt
84 *   mask vector to be used by the LRM in the context, so we just use
85 *   the next available space in the interrupt page.
86 *
87 * ::
88 *
89 *   0x0000   +===========+  <== Interrupt Status Report Page
90 *            |           |
91 *            |           |     ____ +----+----------------+
92 *            |           |    /     |  0 | USER INTERRUPT |
93 *            +-----------+ __/      |  1 |                |
94 *            |  HWE(n)   | __       |    | CTX SWITCH     |
95 *            +-----------+   \      |    | WAIT SEMAPHORE |
96 *            |           |    \____ | 15 |                |
97 *            |           |          +----+----------------+
98 *            |           |
99 *   0x0400   +===========+  <== Interrupt Source Report Page
100 *            |  HWE(0)   |
101 *            |  HWE(1)   |
102 *            |           |
103 *            |  HWE(x)   |
104 *   0x0440   +===========+  <== Interrupt Enable Mask
105 *            |           |
106 *            |           |
107 *            +-----------+
108 */
109
110static void __release_xe_bo(struct drm_device *drm, void *arg)
111{
112	struct xe_bo *bo = arg;
113
114	xe_bo_unpin_map_no_vm(bo);
115}
116
117static int memirq_alloc_pages(struct xe_memirq *memirq)
118{
119	struct xe_device *xe = memirq_to_xe(memirq);
120	struct xe_tile *tile = memirq_to_tile(memirq);
121	struct xe_bo *bo;
122	int err;
123
124	BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64));
125	BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K));
126
127	/* XXX: convert to managed bo */
128	bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
129				  ttm_bo_type_kernel,
130				  XE_BO_CREATE_SYSTEM_BIT |
131				  XE_BO_CREATE_GGTT_BIT |
132				  XE_BO_NEEDS_UC |
133				  XE_BO_NEEDS_CPU_ACCESS);
134	if (IS_ERR(bo)) {
135		err = PTR_ERR(bo);
136		goto out;
137	}
138
139	memirq_assert(memirq, !xe_bo_is_vram(bo));
140	memirq_assert(memirq, !memirq->bo);
141
142	iosys_map_memset(&bo->vmap, 0, 0, SZ_4K);
143
144	memirq->bo = bo;
145	memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET);
146	memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET);
147	memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
148
149	memirq_assert(memirq, !memirq->source.is_iomem);
150	memirq_assert(memirq, !memirq->status.is_iomem);
151	memirq_assert(memirq, !memirq->mask.is_iomem);
152
153	memirq_debug(memirq, "page offsets: source %#x status %#x\n",
154		     xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq));
155
156	return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
157
158out:
159	xe_sriov_err(memirq_to_xe(memirq),
160		     "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
161	return err;
162}
163
164static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
165{
166	iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0);
167
168	memirq->enabled = enable;
169}
170
171/**
172 * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_.
173 * @memirq: the &xe_memirq to initialize
174 *
175 * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
176 * used by `Memory Based Interrupts`_.
177 *
178 * These allocations are managed and will be implicitly released on unload.
179 *
180 * Note: This function shall be called only by the VF driver.
181 *
182 * If this function fails then VF driver won't be able to operate correctly.
183 * If `Memory Based Interrupts`_ are not used this function will return 0.
184 *
185 * Return: 0 on success or a negative error code on failure.
186 */
187int xe_memirq_init(struct xe_memirq *memirq)
188{
189	struct xe_device *xe = memirq_to_xe(memirq);
190	int err;
191
192	memirq_assert(memirq, IS_SRIOV_VF(xe));
193
194	if (!xe_device_has_memirq(xe))
195		return 0;
196
197	err = memirq_alloc_pages(memirq);
198	if (unlikely(err))
199		return err;
200
201	/* we need to start with all irqs enabled */
202	memirq_set_enable(memirq, true);
203
204	return 0;
205}
206
207/**
208 * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
209 * @memirq: the &xe_memirq to query
210 *
211 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
212 * and xe_memirq_init() didn't fail.
213 *
214 * Return: GGTT's offset of the `Interrupt Source Report Page`_.
215 */
216u32 xe_memirq_source_ptr(struct xe_memirq *memirq)
217{
218	memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
219	memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
220	memirq_assert(memirq, memirq->bo);
221
222	return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET;
223}
224
225/**
226 * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
227 * @memirq: the &xe_memirq to query
228 *
229 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
230 * and xe_memirq_init() didn't fail.
231 *
232 * Return: GGTT's offset of the `Interrupt Status Report Page`_.
233 */
234u32 xe_memirq_status_ptr(struct xe_memirq *memirq)
235{
236	memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
237	memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
238	memirq_assert(memirq, memirq->bo);
239
240	return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET;
241}
242
243/**
244 * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
245 * @memirq: the &xe_memirq to query
246 *
247 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
248 * and xe_memirq_init() didn't fail.
249 *
250 * Return: GGTT's offset of the Interrupt Enable Mask.
251 */
252u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
253{
254	memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
255	memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
256	memirq_assert(memirq, memirq->bo);
257
258	return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
259}
260
261/**
262 * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_.
263 * @memirq: the &xe_memirq
264 * @guc: the &xe_guc to setup
265 *
266 * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
267 * to be used by the GuC when `Memory Based Interrupts`_ are required.
268 *
269 * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
270 * and xe_memirq_init() didn't fail.
271 *
272 * Return: 0 on success or a negative error code on failure.
273 */
274int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
275{
276	bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
277	u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
278	u32 source, status;
279	int err;
280
281	memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
282	memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
283	memirq_assert(memirq, memirq->bo);
284
285	source = xe_memirq_source_ptr(memirq) + offset;
286	status = xe_memirq_status_ptr(memirq) + offset * SZ_16;
287
288	err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
289				source);
290	if (unlikely(err))
291		goto failed;
292
293	err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
294				status);
295	if (unlikely(err))
296		goto failed;
297
298	return 0;
299
300failed:
301	xe_sriov_err(memirq_to_xe(memirq),
302		     "Failed to setup report pages in %s (%pe)\n",
303		     guc_name(guc), ERR_PTR(err));
304	return err;
305}
306
307/**
308 * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_.
309 * @memirq: struct xe_memirq
310 *
311 * This is part of the driver IRQ setup flow.
312 *
313 * This function shall only be used by the VF driver on platforms that use
314 * `Memory Based Interrupts`_.
315 */
316void xe_memirq_reset(struct xe_memirq *memirq)
317{
318	memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
319	memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
320
321	if (memirq->bo)
322		memirq_set_enable(memirq, false);
323}
324
325/**
326 * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_.
327 * @memirq: the &xe_memirq
328 *
329 * This is part of the driver IRQ setup flow.
330 *
331 * This function shall only be used by the VF driver on platforms that use
332 * `Memory Based Interrupts`_.
333 */
334void xe_memirq_postinstall(struct xe_memirq *memirq)
335{
336	memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
337	memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
338
339	if (memirq->bo)
340		memirq_set_enable(memirq, true);
341}
342
343static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
344			    u16 offset, const char *name)
345{
346	u8 value;
347
348	value = iosys_map_rd(vector, offset, u8);
349	if (value) {
350		if (value != 0xff)
351			xe_sriov_err_ratelimited(memirq_to_xe(memirq),
352						 "Unexpected memirq value %#x from %s at %u\n",
353						 value, name, offset);
354		iosys_map_wr(vector, offset, u8, 0x00);
355	}
356
357	return value;
358}
359
360static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
361				   struct xe_hw_engine *hwe)
362{
363	memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
364
365	if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
366		xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
367}
368
369static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
370				struct xe_guc *guc)
371{
372	const char *name = guc_name(guc);
373
374	memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
375
376	if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
377		xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
378}
379
380/**
381 * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
382 * @memirq: the &xe_memirq
383 *
384 * This function reads and dispatches `Memory Based Interrupts`.
385 */
386void xe_memirq_handler(struct xe_memirq *memirq)
387{
388	struct xe_device *xe = memirq_to_xe(memirq);
389	struct xe_tile *tile = memirq_to_tile(memirq);
390	struct xe_hw_engine *hwe;
391	enum xe_hw_engine_id id;
392	struct iosys_map map;
393	unsigned int gtid;
394	struct xe_gt *gt;
395
396	if (!memirq->bo)
397		return;
398
399	memirq_assert(memirq, !memirq->source.is_iomem);
400	memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
401	memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
402
403	for_each_gt(gt, xe, gtid) {
404		if (gt->tile != tile)
405			continue;
406
407		for_each_hw_engine(hwe, gt, id) {
408			if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) {
409				map = IOSYS_MAP_INIT_OFFSET(&memirq->status,
410							    hwe->irq_offset * SZ_16);
411				memirq_dispatch_engine(memirq, &map, hwe);
412			}
413		}
414	}
415
416	/* GuC and media GuC (if present) must be checked separately */
417
418	if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) {
419		map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
420		memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
421	}
422
423	if (!tile->media_gt)
424		return;
425
426	if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) {
427		map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
428		memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
429	}
430}
431