1/*	$NetBSD: intel_gt_irq.c,v 1.3 2021/12/19 01:43:37 riastradh Exp $	*/
2
3/*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright �� 2019 Intel Corporation
7 */
8
9#include <sys/cdefs.h>
10__KERNEL_RCSID(0, "$NetBSD: intel_gt_irq.c,v 1.3 2021/12/19 01:43:37 riastradh Exp $");
11
12#include <linux/sched/clock.h>
13
14#include "i915_drv.h"
15#include "i915_irq.h"
16#include "intel_gt.h"
17#include "intel_gt_irq.h"
18#include "intel_uncore.h"
19#include "intel_rps.h"
20
21static void guc_irq_handler(struct intel_guc *guc, u16 iir)
22{
23	if (iir & GUC_INTR_GUC2HOST)
24		intel_guc_to_host_event_handler(guc);
25}
26
27static void
28cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
29{
30	bool tasklet = false;
31
32	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
33		tasklet = true;
34
35	if (iir & GT_RENDER_USER_INTERRUPT) {
36		intel_engine_signal_breadcrumbs(engine);
37		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
38	}
39
40	if (tasklet)
41		tasklet_hi_schedule(&engine->execlists.tasklet);
42}
43
44static u32
45gen11_gt_engine_identity(struct intel_gt *gt,
46			 const unsigned int bank, const unsigned int bit)
47{
48	u32 timeout_ts;
49	u32 ident;
50
51	lockdep_assert_held(&gt->irq_lock);
52
53	raw_reg_write(gt->uncore, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
54
55	/*
56	 * NB: Specs do not specify how long to spin wait,
57	 * so we do ~100us as an educated guess.
58	 */
59	timeout_ts = (local_clock() >> 10) + 100;
60	do {
61		ident = raw_reg_read(gt->uncore, GEN11_INTR_IDENTITY_REG(bank));
62	} while (!(ident & GEN11_INTR_DATA_VALID) &&
63		 !time_after32(local_clock() >> 10, timeout_ts));
64
65	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
66		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
67			  bank, bit, ident);
68		return 0;
69	}
70
71	raw_reg_write(gt->uncore, GEN11_INTR_IDENTITY_REG(bank),
72		      GEN11_INTR_DATA_VALID);
73
74	return ident;
75}
76
77static void
78gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
79			const u16 iir)
80{
81	if (instance == OTHER_GUC_INSTANCE)
82		return guc_irq_handler(&gt->uc.guc, iir);
83
84	if (instance == OTHER_GTPM_INSTANCE)
85		return gen11_rps_irq_handler(&gt->rps, iir);
86
87	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
88		  instance, iir);
89}
90
91static void
92gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
93			 const u8 instance, const u16 iir)
94{
95	struct intel_engine_cs *engine;
96
97	if (instance <= MAX_ENGINE_INSTANCE)
98		engine = gt->engine_class[class][instance];
99	else
100		engine = NULL;
101
102	if (likely(engine))
103		return cs_irq_handler(engine, iir);
104
105	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
106		  class, instance);
107}
108
109static void
110gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
111{
112	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
113	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
114	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
115
116	if (unlikely(!intr))
117		return;
118
119	if (class <= COPY_ENGINE_CLASS)
120		return gen11_engine_irq_handler(gt, class, instance, intr);
121
122	if (class == OTHER_CLASS)
123		return gen11_other_irq_handler(gt, instance, intr);
124
125	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
126		  class, instance, intr);
127}
128
129static void
130gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
131{
132	unsigned long intr_dw;
133	unsigned int bit;
134
135	lockdep_assert_held(&gt->irq_lock);
136
137	intr_dw = raw_reg_read(gt->uncore, GEN11_GT_INTR_DW(bank));
138
139	for_each_set_bit(bit, &intr_dw, 32) {
140		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
141
142		gen11_gt_identity_handler(gt, ident);
143	}
144
145	/* Clear must be after shared has been served for engine */
146	raw_reg_write(gt->uncore, GEN11_GT_INTR_DW(bank), intr_dw);
147}
148
149void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
150{
151	unsigned int bank;
152
153	spin_lock(&gt->irq_lock);
154
155	for (bank = 0; bank < 2; bank++) {
156		if (master_ctl & GEN11_GT_DW_IRQ(bank))
157			gen11_gt_bank_handler(gt, bank);
158	}
159
160	spin_unlock(&gt->irq_lock);
161}
162
163bool gen11_gt_reset_one_iir(struct intel_gt *gt,
164			    const unsigned int bank, const unsigned int bit)
165{
166	u32 dw;
167
168	lockdep_assert_held(&gt->irq_lock);
169
170	dw = raw_reg_read(gt->uncore, GEN11_GT_INTR_DW(bank));
171	if (dw & BIT(bit)) {
172		/*
173		 * According to the BSpec, DW_IIR bits cannot be cleared without
174		 * first servicing the Selector & Shared IIR registers.
175		 */
176		gen11_gt_engine_identity(gt, bank, bit);
177
178		/*
179		 * We locked GT INT DW by reading it. If we want to (try
180		 * to) recover from this successfully, we need to clear
181		 * our bit, otherwise we are locking the register for
182		 * everybody.
183		 */
184		raw_reg_write(gt->uncore, GEN11_GT_INTR_DW(bank), BIT(bit));
185
186		return true;
187	}
188
189	return false;
190}
191
192void gen11_gt_irq_reset(struct intel_gt *gt)
193{
194	struct intel_uncore *uncore = gt->uncore;
195
196	/* Disable RCS, BCS, VCS and VECS class engines. */
197	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
198	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
199
200	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
201	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
202	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
203	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
204	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
205	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
206
207	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
208	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
209	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
210	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
211}
212
213void gen11_gt_irq_postinstall(struct intel_gt *gt)
214{
215	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
216	struct intel_uncore *uncore = gt->uncore;
217	const u32 dmask = irqs << 16 | irqs;
218	const u32 smask = irqs << 16;
219
220	BUILD_BUG_ON(irqs & 0xffff0000);
221
222	/* Enable RCS, BCS, VCS and VECS class interrupts. */
223	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
224	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
225
226	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
227	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
228	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
229	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
230	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
231	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
232
233	/*
234	 * RPS interrupts will get enabled/disabled on demand when RPS itself
235	 * is enabled/disabled.
236	 */
237	gt->pm_ier = 0x0;
238	gt->pm_imr = ~gt->pm_ier;
239	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
240	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
241
242	/* Same thing for GuC interrupts */
243	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
244	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
245}
246
247void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
248{
249	if (gt_iir & GT_RENDER_USER_INTERRUPT)
250		intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
251	if (gt_iir & ILK_BSD_USER_INTERRUPT)
252		intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
253}
254
255static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
256{
257	if (!HAS_L3_DPF(gt->i915))
258		return;
259
260	spin_lock(&gt->irq_lock);
261	gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
262	spin_unlock(&gt->irq_lock);
263
264	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
265		gt->i915->l3_parity.which_slice |= 1 << 1;
266
267	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
268		gt->i915->l3_parity.which_slice |= 1 << 0;
269
270	schedule_work(&gt->i915->l3_parity.error_work);
271}
272
273void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
274{
275	if (gt_iir & GT_RENDER_USER_INTERRUPT)
276		intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
277	if (gt_iir & GT_BSD_USER_INTERRUPT)
278		intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
279	if (gt_iir & GT_BLT_USER_INTERRUPT)
280		intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
281
282	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
283		      GT_BSD_CS_ERROR_INTERRUPT |
284		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
285		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
286
287	if (gt_iir & GT_PARITY_ERROR(gt->i915))
288		gen7_parity_error_irq_handler(gt, gt_iir);
289}
290
291void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
292{
293
294	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
295		gt_iir[0] = raw_reg_read(gt->uncore, GEN8_GT_IIR(0));
296		if (likely(gt_iir[0]))
297			raw_reg_write(gt->uncore, GEN8_GT_IIR(0), gt_iir[0]);
298	}
299
300	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
301		gt_iir[1] = raw_reg_read(gt->uncore, GEN8_GT_IIR(1));
302		if (likely(gt_iir[1]))
303			raw_reg_write(gt->uncore, GEN8_GT_IIR(1), gt_iir[1]);
304	}
305
306	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
307		gt_iir[2] = raw_reg_read(gt->uncore, GEN8_GT_IIR(2));
308		if (likely(gt_iir[2]))
309			raw_reg_write(gt->uncore, GEN8_GT_IIR(2), gt_iir[2]);
310	}
311
312	if (master_ctl & GEN8_GT_VECS_IRQ) {
313		gt_iir[3] = raw_reg_read(gt->uncore, GEN8_GT_IIR(3));
314		if (likely(gt_iir[3]))
315			raw_reg_write(gt->uncore, GEN8_GT_IIR(3), gt_iir[3]);
316	}
317}
318
319void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
320{
321	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
322		cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
323			       gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
324		cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
325			       gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
326	}
327
328	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
329		cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
330			       gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
331		cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
332			       gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
333	}
334
335	if (master_ctl & GEN8_GT_VECS_IRQ) {
336		cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
337			       gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
338	}
339
340	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
341		gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
342		guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
343	}
344}
345
346void gen8_gt_irq_reset(struct intel_gt *gt)
347{
348	struct intel_uncore *uncore = gt->uncore;
349
350	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
351	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
352	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
353	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
354}
355
356void gen8_gt_irq_postinstall(struct intel_gt *gt)
357{
358	struct intel_uncore *uncore = gt->uncore;
359
360	/* These are interrupts we'll toggle with the ring mask register */
361	u32 gt_interrupts[] = {
362		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
363		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
364		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
365		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
366
367		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
368		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
369		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
370		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
371
372		0,
373
374		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
375		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
376	};
377
378	gt->pm_ier = 0x0;
379	gt->pm_imr = ~gt->pm_ier;
380	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
381	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
382	/*
383	 * RPS interrupts will get enabled/disabled on demand when RPS itself
384	 * is enabled/disabled. Same wil be the case for GuC interrupts.
385	 */
386	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
387	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
388}
389
390static void gen5_gt_update_irq(struct intel_gt *gt,
391			       u32 interrupt_mask,
392			       u32 enabled_irq_mask)
393{
394	lockdep_assert_held(&gt->irq_lock);
395
396	GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
397
398	gt->gt_imr &= ~interrupt_mask;
399	gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
400	intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
401}
402
403void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
404{
405	gen5_gt_update_irq(gt, mask, mask);
406	intel_uncore_posting_read_fw(gt->uncore, GTIMR);
407}
408
409void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
410{
411	gen5_gt_update_irq(gt, mask, 0);
412}
413
414void gen5_gt_irq_reset(struct intel_gt *gt)
415{
416	struct intel_uncore *uncore = gt->uncore;
417
418	GEN3_IRQ_RESET(uncore, GT);
419	if (INTEL_GEN(gt->i915) >= 6)
420		GEN3_IRQ_RESET(uncore, GEN6_PM);
421}
422
423void gen5_gt_irq_postinstall(struct intel_gt *gt)
424{
425	struct intel_uncore *uncore = gt->uncore;
426	u32 pm_irqs = 0;
427	u32 gt_irqs = 0;
428
429	gt->gt_imr = ~0;
430	if (HAS_L3_DPF(gt->i915)) {
431		/* L3 parity interrupt is always unmasked. */
432		gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
433		gt_irqs |= GT_PARITY_ERROR(gt->i915);
434	}
435
436	gt_irqs |= GT_RENDER_USER_INTERRUPT;
437	if (IS_GEN(gt->i915, 5))
438		gt_irqs |= ILK_BSD_USER_INTERRUPT;
439	else
440		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
441
442	GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
443
444	if (INTEL_GEN(gt->i915) >= 6) {
445		/*
446		 * RPS interrupts will get enabled/disabled on demand when RPS
447		 * itself is enabled/disabled.
448		 */
449		if (HAS_ENGINE(gt->i915, VECS0)) {
450			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
451			gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
452		}
453
454		gt->pm_imr = 0xffffffff;
455		GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
456	}
457}
458