1/* $OpenBSD: ampintc.c,v 1.31 2023/09/22 01:10:43 jsg Exp $ */
2/*
3 * Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/*
19 * This driver implements the interrupt controller as specified in
20 * DDI0407E_cortex_a9_mpcore_r2p0_trm with the
21 * IHI0048A_gic_architecture_spec_v1_0 underlying specification
22 */
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/queue.h>
26#include <sys/malloc.h>
27#include <sys/device.h>
28#include <sys/evcount.h>
29
30#include <uvm/uvm_extern.h>
31
32#include <machine/bus.h>
33#include <machine/fdt.h>
34
35#include <dev/ofw/fdt.h>
36#include <dev/ofw/openfirm.h>
37
38#include <machine/simplebusvar.h>
39
40/* registers */
41#define	ICD_DCR			0x000
42#define		ICD_DCR_ES		0x00000001
43#define		ICD_DCR_ENS		0x00000002
44
45#define ICD_ICTR			0x004
46#define		ICD_ICTR_LSPI_SH	11
47#define		ICD_ICTR_LSPI_M		0x1f
48#define		ICD_ICTR_CPU_SH		5
49#define		ICD_ICTR_CPU_M		0x07
50#define		ICD_ICTR_ITL_SH		0
51#define		ICD_ICTR_ITL_M		0x1f
52#define ICD_IDIR			0x008
53#define 	ICD_DIR_PROD_SH		24
54#define 	ICD_DIR_PROD_M		0xff
55#define 	ICD_DIR_REV_SH		12
56#define 	ICD_DIR_REV_M		0xfff
57#define 	ICD_DIR_IMP_SH		0
58#define 	ICD_DIR_IMP_M		0xfff
59
60#define IRQ_TO_REG32(i)		(((i) >> 5) & 0x1f)
61#define IRQ_TO_REG32BIT(i)	((i) & 0x1f)
62#define IRQ_TO_REG4(i)		(((i) >> 2) & 0xff)
63#define IRQ_TO_REG4BIT(i)	((i) & 0x3)
64#define IRQ_TO_REG16(i)		(((i) >> 4) & 0x3f)
65#define IRQ_TO_REG16BIT(i)	((i) & 0xf)
66#define IRQ_TO_REGBIT_S(i)	8
67#define IRQ_TO_REG4BIT_M(i)	8
68
69#define ICD_ISRn(i)		(0x080 + (IRQ_TO_REG32(i) * 4))
70#define ICD_ISERn(i)		(0x100 + (IRQ_TO_REG32(i) * 4))
71#define ICD_ICERn(i)		(0x180 + (IRQ_TO_REG32(i) * 4))
72#define ICD_ISPRn(i)		(0x200 + (IRQ_TO_REG32(i) * 4))
73#define ICD_ICPRn(i)		(0x280 + (IRQ_TO_REG32(i) * 4))
74#define ICD_ABRn(i)		(0x300 + (IRQ_TO_REG32(i) * 4))
75#define ICD_IPRn(i)		(0x400 + (i))
76#define ICD_IPTRn(i)		(0x800 + (i))
77#define ICD_ICRn(i)		(0xC00 + (IRQ_TO_REG16(i) * 4))
78#define 	ICD_ICR_TRIG_LEVEL(i)	(0x0 << (IRQ_TO_REG16BIT(i) * 2))
79#define 	ICD_ICR_TRIG_EDGE(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
80#define 	ICD_ICR_TRIG_MASK(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
81
82/*
83 * what about (ppi|spi)_status
84 */
85#define ICD_PPI			0xD00
86#define 	ICD_PPI_GTIMER	(1 << 11)
87#define 	ICD_PPI_FIQ		(1 << 12)
88#define 	ICD_PPI_PTIMER	(1 << 13)
89#define 	ICD_PPI_PWDOG	(1 << 14)
90#define 	ICD_PPI_IRQ		(1 << 15)
91#define ICD_SPI_BASE		0xD04
92#define ICD_SPIn(i)			(ICD_SPI_BASE + ((i) * 4))
93
94
95#define ICD_SGIR			0xF00
96
97#define ICD_PERIPH_ID_0			0xFD0
98#define ICD_PERIPH_ID_1			0xFD4
99#define ICD_PERIPH_ID_2			0xFD8
100#define ICD_PERIPH_ID_3			0xFDC
101#define ICD_PERIPH_ID_4			0xFE0
102#define ICD_PERIPH_ID_5			0xFE4
103#define ICD_PERIPH_ID_6			0xFE8
104#define ICD_PERIPH_ID_7			0xFEC
105
106#define ICD_COMP_ID_0			0xFEC
107#define ICD_COMP_ID_1			0xFEC
108#define ICD_COMP_ID_2			0xFEC
109#define ICD_COMP_ID_3			0xFEC
110
111
112#define ICPICR				0x00
113#define ICPIPMR				0x04
114/* XXX - must left justify bits to  0 - 7  */
115#define 	ICMIPMR_SH 		4
116#define ICPBPR				0x08
117#define ICPIAR				0x0C
118#define 	ICPIAR_IRQ_SH		0
119#define 	ICPIAR_IRQ_M		0x3ff
120#define 	ICPIAR_CPUID_SH		10
121#define 	ICPIAR_CPUID_M		0x7
122#define 	ICPIAR_NO_PENDING_IRQ	ICPIAR_IRQ_M
123#define ICPEOIR				0x10
124#define ICPPRP				0x14
125#define ICPHPIR				0x18
126#define ICPIIR				0xFC
127
128/*
129 * what about periph_id and component_id
130 */
131
132#define IRQ_ENABLE	1
133#define IRQ_DISABLE	0
134
135struct ampintc_softc {
136	struct simplebus_softc	 sc_sbus;
137	struct intrq 		*sc_handler;
138	int			 sc_nintr;
139	bus_space_tag_t		 sc_iot;
140	bus_space_handle_t	 sc_d_ioh, sc_p_ioh;
141	uint8_t			 sc_cpu_mask[ICD_ICTR_CPU_M + 1];
142	struct evcount		 sc_spur;
143	struct interrupt_controller sc_ic;
144	int			 sc_ipi_reason[ICD_ICTR_CPU_M + 1];
145	int			 sc_ipi_num[3];
146};
147struct ampintc_softc *ampintc;
148
149
150struct intrhand {
151	TAILQ_ENTRY(intrhand) ih_list;	/* link on intrq list */
152	int (*ih_func)(void *);		/* handler */
153	void *ih_arg;			/* arg for handler */
154	int ih_ipl;			/* IPL_* */
155	int ih_flags;
156	int ih_irq;			/* IRQ number */
157	struct evcount	ih_count;
158	char *ih_name;
159	struct cpu_info *ih_ci;		/* CPU the IRQ runs on */
160};
161
162struct intrq {
163	TAILQ_HEAD(, intrhand) iq_list;	/* handler list */
164	struct cpu_info *iq_ci;		/* CPU the IRQ runs on */
165	int iq_irq_max;			/* IRQ to mask while handling */
166	int iq_irq_min;			/* lowest IRQ when shared */
167	int iq_ist;			/* share type */
168};
169
170
171int		 ampintc_match(struct device *, void *, void *);
172void		 ampintc_attach(struct device *, struct device *, void *);
173int		 ampintc_activate(struct device *, int);
174void		 ampintc_init(struct ampintc_softc *);
175void		 ampintc_cpuinit(void);
176int		 ampintc_spllower(int);
177void		 ampintc_splx(int);
178int		 ampintc_splraise(int);
179void		 ampintc_setipl(int);
180void		 ampintc_calc_mask(void);
181void		 ampintc_calc_irq(struct ampintc_softc *, int);
182void		*ampintc_intr_establish(int, int, int, struct cpu_info *,
183		    int (*)(void *), void *, char *);
184void		*ampintc_intr_establish_fdt(void *, int *, int,
185		    struct cpu_info *, int (*)(void *), void *, char *);
186void		 ampintc_intr_disestablish(void *);
187void		 ampintc_irq_handler(void *);
188const char	*ampintc_intr_string(void *);
189uint32_t	 ampintc_iack(void);
190void		 ampintc_eoi(uint32_t);
191void		 ampintc_set_priority(int, int);
192void		 ampintc_intr_enable(int);
193void		 ampintc_intr_disable(int);
194void		 ampintc_intr_config(int, int);
195void		 ampintc_route(int, int, struct cpu_info *);
196void		 ampintc_route_irq(void *, int, struct cpu_info *);
197void		 ampintc_intr_barrier(void *);
198
199int		 ampintc_ipi_combined(void *);
200int		 ampintc_ipi_nop(void *);
201int		 ampintc_ipi_ddb(void *);
202int		 ampintc_ipi_halt(void *);
203void		 ampintc_send_ipi(struct cpu_info *, int);
204
205const struct cfattach	ampintc_ca = {
206	sizeof (struct ampintc_softc), ampintc_match, ampintc_attach,
207	NULL, ampintc_activate
208};
209
210struct cfdriver ampintc_cd = {
211	NULL, "ampintc", DV_DULL
212};
213
214static char *ampintc_compatibles[] = {
215	"arm,cortex-a7-gic",
216	"arm,cortex-a9-gic",
217	"arm,cortex-a15-gic",
218	"arm,gic-400",
219	NULL
220};
221
222int
223ampintc_match(struct device *parent, void *cfdata, void *aux)
224{
225	struct fdt_attach_args *faa = aux;
226	int i;
227
228	for (i = 0; ampintc_compatibles[i]; i++)
229		if (OF_is_compatible(faa->fa_node, ampintc_compatibles[i]))
230			return (1);
231
232	return (0);
233}
234
235void
236ampintc_attach(struct device *parent, struct device *self, void *aux)
237{
238	struct ampintc_softc *sc = (struct ampintc_softc *)self;
239	struct fdt_attach_args *faa = aux;
240	int i, nintr, ncpu;
241	uint32_t ictr;
242#ifdef MULTIPROCESSOR
243	int nipi, ipiirq[3];
244#endif
245
246	ampintc = sc;
247
248	arm_init_smask();
249
250	sc->sc_iot = faa->fa_iot;
251
252	/* First row: ICD */
253	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
254	    faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
255		panic("%s: ICD bus_space_map failed!", __func__);
256
257	/* Second row: ICP */
258	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
259	    faa->fa_reg[1].size, 0, &sc->sc_p_ioh))
260		panic("%s: ICP bus_space_map failed!", __func__);
261
262	evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
263
264	ictr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICTR);
265	nintr = 32 * ((ictr >> ICD_ICTR_ITL_SH) & ICD_ICTR_ITL_M);
266	nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
267	sc->sc_nintr = nintr;
268	ncpu = ((ictr >> ICD_ICTR_CPU_SH) & ICD_ICTR_CPU_M) + 1;
269	printf(" nirq %d, ncpu %d", nintr, ncpu);
270
271	KASSERT(curcpu()->ci_cpuid <= ICD_ICTR_CPU_M);
272	sc->sc_cpu_mask[curcpu()->ci_cpuid] =
273	    bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(0));
274
275	ampintc_init(sc);
276
277	/* software reset of the part? */
278	/* set protection bit (kernel only)? */
279
280	/* XXX - check power saving bit */
281
282	sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF,
283	    M_ZERO | M_NOWAIT);
284	for (i = 0; i < nintr; i++) {
285		TAILQ_INIT(&sc->sc_handler[i].iq_list);
286	}
287
288	ampintc_setipl(IPL_HIGH);  /* XXX ??? */
289	ampintc_calc_mask();
290
291	/* insert self as interrupt handler */
292	arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx,
293	    ampintc_setipl, ampintc_irq_handler, NULL, NULL, NULL);
294
295#ifdef MULTIPROCESSOR
296	/* setup IPI interrupts */
297
298	/*
299	 * Ideally we want three IPI interrupts, one for NOP, one for
300	 * DDB and one for HALT.  However we can survive if only one
301	 * is available; it is possible that most are not available to
302	 * the non-secure OS.
303	 */
304	nipi = 0;
305	for (i = 0; i < 16; i++) {
306		int reg, oldreg;
307
308		oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
309		    ICD_IPRn(i));
310		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
311		    oldreg ^ 0x20);
312
313		/* if this interrupt is not usable, route will be zero */
314		reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i));
315		if (reg == oldreg)
316			continue;
317
318		/* return to original value, will be set when used */
319		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
320		    oldreg);
321
322		if (nipi == 0)
323			printf(" ipi: %d", i);
324		else
325			printf(", %d", i);
326		ipiirq[nipi++] = i;
327		if (nipi == 3)
328			break;
329	}
330
331	if (nipi == 0)
332		panic ("no irq available for IPI");
333
334	switch (nipi) {
335	case 1:
336		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
337		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_combined, sc, "ipi");
338		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
339		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
340		sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[0];
341		break;
342	case 2:
343		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
344		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_nop, sc, "ipinop");
345		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
346		ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
347		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_combined, sc, "ipi");
348		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
349		sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[1];
350		break;
351	case 3:
352		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
353		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_nop, sc, "ipinop");
354		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
355		ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
356		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_ddb, sc, "ipiddb");
357		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
358		ampintc_intr_establish(ipiirq[2], IST_EDGE_RISING,
359		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_halt, sc, "ipihalt");
360		sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[2];
361		break;
362	default:
363		panic("nipi unexpected number %d", nipi);
364	}
365
366	intr_send_ipi_func = ampintc_send_ipi;
367#endif
368
369	/* enable interrupts */
370	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
371	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
372	intr_enable();
373
374	sc->sc_ic.ic_node = faa->fa_node;
375	sc->sc_ic.ic_cookie = self;
376	sc->sc_ic.ic_establish = ampintc_intr_establish_fdt;
377	sc->sc_ic.ic_disestablish = ampintc_intr_disestablish;
378	sc->sc_ic.ic_route = ampintc_route_irq;
379	sc->sc_ic.ic_cpu_enable = ampintc_cpuinit;
380	sc->sc_ic.ic_barrier = ampintc_intr_barrier;
381	arm_intr_register_fdt(&sc->sc_ic);
382
383	/* attach GICv2M frame controller */
384	simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
385}
386
387int
388ampintc_activate(struct device *self, int act)
389{
390	struct ampintc_softc *sc = (struct ampintc_softc *)self;
391	struct cpu_info *ci;
392	int irq, min;
393
394	switch (act) {
395	case DVACT_RESUME:
396		for (irq = 0; irq < sc->sc_nintr; irq++) {
397			ci = sc->sc_handler[irq].iq_ci;
398			min = sc->sc_handler[irq].iq_irq_min;
399			if (min != IPL_NONE) {
400				ampintc_set_priority(irq, min);
401				ampintc_intr_enable(irq);
402				ampintc_route(irq, IRQ_ENABLE, ci);
403			} else {
404				ampintc_intr_disable(irq);
405			}
406		}
407
408		/* enable interrupts */
409		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
410		bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
411		break;
412	}
413
414	return 0;
415}
416
417void
418ampintc_init(struct ampintc_softc *sc)
419{
420	int i;
421
422	/* Disable all interrupts, clear all pending */
423	for (i = 0; i < sc->sc_nintr / 32; i++) {
424		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
425		    ICD_ICERn(i * 32), ~0);
426		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
427		    ICD_ICPRn(i * 32), ~0);
428	}
429	for (i = 0; i < sc->sc_nintr; i++) {
430		/* lowest priority ?? */
431		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 0xff);
432		/* target no cpus */
433		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(i), 0);
434	}
435	for (i = 2; i < sc->sc_nintr / 16; i++) {
436		/* irq 32 - N */
437		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
438		    ICD_ICRn(i * 16), 0);
439	}
440}
441
442void
443ampintc_set_priority(int irq, int pri)
444{
445	struct ampintc_softc	*sc = ampintc;
446	uint32_t		 prival;
447
448	/*
449	 * We only use 16 (13 really) interrupt priorities,
450	 * and a CPU is only required to implement bit 4-7 of each field
451	 * so shift into the top bits.
452	 * also low values are higher priority thus IPL_HIGH - pri
453	 */
454	prival = (IPL_HIGH - pri) << ICMIPMR_SH;
455	bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(irq), prival);
456}
457
458void
459ampintc_setipl(int new)
460{
461	struct cpu_info		*ci = curcpu();
462	struct ampintc_softc	*sc = ampintc;
463	u_long			 psw;
464
465	/* disable here is only to keep hardware in sync with ci->ci_cpl */
466	psw = intr_disable();
467	ci->ci_cpl = new;
468
469	/* low values are higher priority thus IPL_HIGH - pri */
470	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPIPMR,
471	    (IPL_HIGH - new) << ICMIPMR_SH);
472	intr_restore(psw);
473}
474
475void
476ampintc_intr_enable(int irq)
477{
478	struct ampintc_softc	*sc = ampintc;
479
480#ifdef DEBUG
481	printf("enable irq %d register %x bitmask %08x\n",
482	    irq, ICD_ISERn(irq), 1 << IRQ_TO_REG32BIT(irq));
483#endif
484
485	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ISERn(irq),
486	    1 << IRQ_TO_REG32BIT(irq));
487}
488
489void
490ampintc_intr_disable(int irq)
491{
492	struct ampintc_softc	*sc = ampintc;
493
494	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICERn(irq),
495	    1 << IRQ_TO_REG32BIT(irq));
496}
497
498void
499ampintc_intr_config(int irqno, int type)
500{
501	struct ampintc_softc	*sc = ampintc;
502	uint32_t		 ctrl;
503
504	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno));
505
506	ctrl &= ~ICD_ICR_TRIG_MASK(irqno);
507	if (type == IST_EDGE_RISING)
508		ctrl |= ICD_ICR_TRIG_EDGE(irqno);
509	else
510		ctrl |= ICD_ICR_TRIG_LEVEL(irqno);
511
512	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno), ctrl);
513}
514
515void
516ampintc_calc_mask(void)
517{
518	struct ampintc_softc	*sc = ampintc;
519	int			 irq;
520
521	for (irq = 0; irq < sc->sc_nintr; irq++)
522		ampintc_calc_irq(sc, irq);
523}
524
525void
526ampintc_calc_irq(struct ampintc_softc *sc, int irq)
527{
528	struct cpu_info		*ci = sc->sc_handler[irq].iq_ci;
529	struct intrhand		*ih;
530	int			max = IPL_NONE;
531	int			min = IPL_HIGH;
532
533	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
534		if (ih->ih_ipl > max)
535			max = ih->ih_ipl;
536
537		if (ih->ih_ipl < min)
538			min = ih->ih_ipl;
539	}
540
541	if (max == IPL_NONE)
542		min = IPL_NONE;
543
544	if (sc->sc_handler[irq].iq_irq_max == max &&
545	    sc->sc_handler[irq].iq_irq_min == min)
546		return;
547
548	sc->sc_handler[irq].iq_irq_max = max;
549	sc->sc_handler[irq].iq_irq_min = min;
550
551	/* Enable interrupts at lower levels, clear -> enable */
552	/* Set interrupt priority/enable */
553	if (min != IPL_NONE) {
554		ampintc_set_priority(irq, min);
555		ampintc_intr_enable(irq);
556		ampintc_route(irq, IRQ_ENABLE, ci);
557	} else {
558		ampintc_intr_disable(irq);
559		ampintc_route(irq, IRQ_DISABLE, ci);
560	}
561}
562
563void
564ampintc_splx(int new)
565{
566	struct cpu_info *ci = curcpu();
567
568	if (ci->ci_ipending & arm_smask[new])
569		arm_do_pending_intr(new);
570
571	ampintc_setipl(new);
572}
573
574int
575ampintc_spllower(int new)
576{
577	struct cpu_info *ci = curcpu();
578	int old = ci->ci_cpl;
579	ampintc_splx(new);
580	return (old);
581}
582
583int
584ampintc_splraise(int new)
585{
586	struct cpu_info *ci = curcpu();
587	int old;
588	old = ci->ci_cpl;
589
590	/*
591	 * setipl must always be called because there is a race window
592	 * where the variable is updated before the mask is set
593	 * an interrupt occurs in that window without the mask always
594	 * being set, the hardware might not get updated on the next
595	 * splraise completely messing up spl protection.
596	 */
597	if (old > new)
598		new = old;
599
600	ampintc_setipl(new);
601
602	return (old);
603}
604
605
606uint32_t
607ampintc_iack(void)
608{
609	uint32_t intid;
610	struct ampintc_softc	*sc = ampintc;
611
612	intid = bus_space_read_4(sc->sc_iot, sc->sc_p_ioh, ICPIAR);
613
614	return (intid);
615}
616
617void
618ampintc_eoi(uint32_t eoi)
619{
620	struct ampintc_softc	*sc = ampintc;
621
622	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPEOIR, eoi);
623}
624
625void
626ampintc_route(int irq, int enable, struct cpu_info *ci)
627{
628	struct ampintc_softc	*sc = ampintc;
629	uint8_t			 mask, val;
630
631	KASSERT(ci->ci_cpuid <= ICD_ICTR_CPU_M);
632	mask = sc->sc_cpu_mask[ci->ci_cpuid];
633
634	val = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq));
635	if (enable == IRQ_ENABLE)
636		val |= mask;
637	else
638		val &= ~mask;
639	bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val);
640}
641
642void
643ampintc_cpuinit(void)
644{
645	struct ampintc_softc	*sc = ampintc;
646	int			 i, irq;
647
648	/* XXX - this is the only cpu specific call to set this */
649	if (sc->sc_cpu_mask[cpu_number()] == 0) {
650		for (i = 0; i < 32; i++) {
651			int cpumask =
652			    bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
653			        ICD_IPTRn(i));
654
655			if (cpumask != 0) {
656				sc->sc_cpu_mask[cpu_number()] = cpumask;
657				break;
658			}
659		}
660	}
661
662	if (sc->sc_cpu_mask[cpu_number()] == 0)
663		panic("could not determine cpu target mask");
664
665	for (irq = 0; irq < sc->sc_nintr; irq++) {
666		if (sc->sc_handler[irq].iq_ci != curcpu())
667			continue;
668		if (sc->sc_handler[irq].iq_irq_min != IPL_NONE)
669			ampintc_route(irq, IRQ_ENABLE, curcpu());
670		else
671			ampintc_route(irq, IRQ_DISABLE, curcpu());
672	}
673
674	/*
675	 * If a secondary CPU is turned off from an IPI handler and
676	 * the GIC did not go through a full reset (for example when
677	 * we fail to suspend) the IPI might still be active.  So
678	 * signal EOI here to make sure new interrupts will be
679	 * serviced.
680	 */
681	ampintc_eoi(sc->sc_ipi_num[ARM_IPI_HALT]);
682}
683
684void
685ampintc_route_irq(void *v, int enable, struct cpu_info *ci)
686{
687	struct ampintc_softc    *sc = ampintc;
688	struct intrhand         *ih = v;
689
690	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
691	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0);
692	if (enable) {
693		ampintc_set_priority(ih->ih_irq,
694		    sc->sc_handler[ih->ih_irq].iq_irq_min);
695		ampintc_intr_enable(ih->ih_irq);
696	}
697
698	ampintc_route(ih->ih_irq, enable, ci);
699}
700
701void
702ampintc_intr_barrier(void *cookie)
703{
704	struct intrhand		*ih = cookie;
705
706	sched_barrier(ih->ih_ci);
707}
708
709void
710ampintc_run_handler(struct intrhand *ih, void *frame, int s)
711{
712	void *arg;
713	int handled;
714
715#ifdef MULTIPROCESSOR
716	int need_lock;
717
718	if (ih->ih_flags & IPL_MPSAFE)
719		need_lock = 0;
720	else
721		need_lock = s < IPL_SCHED;
722
723	if (need_lock)
724		KERNEL_LOCK();
725#endif
726
727	if (ih->ih_arg)
728		arg = ih->ih_arg;
729	else
730		arg = frame;
731
732	handled = ih->ih_func(arg);
733	if (handled)
734		ih->ih_count.ec_count++;
735
736#ifdef MULTIPROCESSOR
737	if (need_lock)
738		KERNEL_UNLOCK();
739#endif
740}
741
742void
743ampintc_irq_handler(void *frame)
744{
745	struct ampintc_softc	*sc = ampintc;
746	struct intrhand		*ih;
747	uint32_t		 iack_val;
748	int			 irq, pri, s;
749
750	iack_val = ampintc_iack();
751#ifdef DEBUG_INTC
752	if (iack_val != 27)
753		printf("irq  %d fired\n", iack_val);
754	else {
755		static int cnt = 0;
756		if ((cnt++ % 100) == 0) {
757			printf("irq  %d fired * _100\n", iack_val);
758#ifdef DDB
759			db_enter();
760#endif
761		}
762
763	}
764#endif
765
766	irq = iack_val & ICPIAR_IRQ_M;
767
768	if (irq == 1023) {
769		sc->sc_spur.ec_count++;
770		return;
771	}
772
773	if (irq >= sc->sc_nintr)
774		return;
775
776	pri = sc->sc_handler[irq].iq_irq_max;
777	s = ampintc_splraise(pri);
778	intr_enable();
779	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
780		ampintc_run_handler(ih, frame, s);
781	}
782	intr_disable();
783	ampintc_eoi(iack_val);
784
785	ampintc_splx(s);
786}
787
788void *
789ampintc_intr_establish_fdt(void *cookie, int *cell, int level,
790    struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
791{
792	struct ampintc_softc	*sc = (struct ampintc_softc *)cookie;
793	int			 irq;
794	int			 type;
795
796	/* 2nd cell contains the interrupt number */
797	irq = cell[1];
798
799	/* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
800	if (cell[0] == 0)
801		irq += 32;
802	else if (cell[0] == 1)
803		irq += 16;
804	else
805		panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
806
807	/* SPIs are only active-high level or low-to-high edge */
808	if (cell[2] & 0x3)
809		type = IST_EDGE_RISING;
810	else
811		type = IST_LEVEL_HIGH;
812
813	return ampintc_intr_establish(irq, type, level, ci, func, arg, name);
814}
815
816void *
817ampintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
818    int (*func)(void *), void *arg, char *name)
819{
820	struct ampintc_softc	*sc = ampintc;
821	struct intrhand		*ih;
822	u_long			 psw;
823
824	if (irqno < 0 || irqno >= sc->sc_nintr)
825		panic("ampintc_intr_establish: bogus irqnumber %d: %s",
826		     irqno, name);
827
828	if (ci == NULL)
829		ci = &cpu_info_primary;
830
831	if (irqno < 16) {
832		/* SGI are only EDGE */
833		type = IST_EDGE_RISING;
834	} else if (irqno < 32) {
835		/* PPI are only LEVEL */
836		type = IST_LEVEL_HIGH;
837	}
838
839	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
840	ih->ih_func = func;
841	ih->ih_arg = arg;
842	ih->ih_ipl = level & IPL_IRQMASK;
843	ih->ih_flags = level & IPL_FLAGMASK;
844	ih->ih_irq = irqno;
845	ih->ih_name = name;
846	ih->ih_ci = ci;
847
848	psw = intr_disable();
849
850	if (!TAILQ_EMPTY(&sc->sc_handler[irqno].iq_list) &&
851	    sc->sc_handler[irqno].iq_ci != ci) {
852		free(ih, M_DEVBUF, sizeof(*ih));
853		intr_restore(psw);
854		return NULL;
855	}
856
857	TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
858	sc->sc_handler[irqno].iq_ci = ci;
859
860	if (name != NULL)
861		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
862
863#ifdef DEBUG_INTC
864	printf("ampintc_intr_establish irq %d level %d [%s]\n", irqno, level,
865	    name);
866#endif
867
868	ampintc_intr_config(irqno, type);
869	ampintc_calc_mask();
870
871	intr_restore(psw);
872	return (ih);
873}
874
875void
876ampintc_intr_disestablish(void *cookie)
877{
878	struct ampintc_softc	*sc = ampintc;
879	struct intrhand		*ih = cookie;
880	u_long			 psw;
881
882#ifdef DEBUG_INTC
883	printf("ampintc_intr_disestablish irq %d level %d [%s]\n",
884	    ih->ih_irq, ih->ih_ipl, ih->ih_name);
885#endif
886
887	psw = intr_disable();
888
889	TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list);
890	if (ih->ih_name != NULL)
891		evcount_detach(&ih->ih_count);
892
893	ampintc_calc_mask();
894
895	intr_restore(psw);
896
897	free(ih, M_DEVBUF, sizeof(*ih));
898}
899
900const char *
901ampintc_intr_string(void *cookie)
902{
903	struct intrhand *ih = (struct intrhand *)cookie;
904	static char irqstr[1 + sizeof("ampintc irq ") + 4];
905
906	snprintf(irqstr, sizeof irqstr, "ampintc irq %d", ih->ih_irq);
907	return irqstr;
908}
909
910/*
911 * GICv2m frame controller for MSI interrupts.
912 */
913#define GICV2M_TYPER		0x008
914#define  GICV2M_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
915#define  GICV2M_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
916#define GICV2M_SETSPI_NS	0x040
917
918int	 ampintc_msi_match(struct device *, void *, void *);
919void	 ampintc_msi_attach(struct device *, struct device *, void *);
920void	*ampintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
921	    int , struct cpu_info *, int (*)(void *), void *, char *);
922void	 ampintc_intr_disestablish_msi(void *);
923void	 ampintc_intr_barrier_msi(void *);
924
925struct ampintc_msi_softc {
926	struct device			 sc_dev;
927	bus_space_tag_t			 sc_iot;
928	bus_space_handle_t		 sc_ioh;
929	int				 sc_node;
930	paddr_t				 sc_addr;
931	int				 sc_bspi;
932	int				 sc_nspi;
933	void				**sc_spi;
934	struct interrupt_controller	 sc_ic;
935};
936
937const struct cfattach	ampintcmsi_ca = {
938	sizeof (struct ampintc_msi_softc), ampintc_msi_match, ampintc_msi_attach
939};
940
941struct cfdriver ampintcmsi_cd = {
942	NULL, "ampintcmsi", DV_DULL
943};
944
945int
946ampintc_msi_match(struct device *parent, void *cfdata, void *aux)
947{
948	struct fdt_attach_args *faa = aux;
949
950	return OF_is_compatible(faa->fa_node, "arm,gic-v2m-frame");
951}
952
953void
954ampintc_msi_attach(struct device *parent, struct device *self, void *aux)
955{
956	struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
957	struct fdt_attach_args *faa = aux;
958	uint32_t typer;
959
960	sc->sc_node = faa->fa_node;
961	sc->sc_iot = faa->fa_iot;
962	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
963	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
964		panic("%s: bus_space_map failed!", __func__);
965
966	/* XXX: Hack to retrieve the physical address (from a CPU PoV). */
967	if (!pmap_extract(pmap_kernel(), sc->sc_ioh, &sc->sc_addr)) {
968		printf(": cannot retrieve msi addr\n");
969		return;
970	}
971
972	typer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GICV2M_TYPER);
973	sc->sc_bspi = GICV2M_TYPER_SPI_BASE(typer);
974	sc->sc_nspi = GICV2M_TYPER_SPI_COUNT(typer);
975
976	sc->sc_bspi = OF_getpropint(faa->fa_node,
977	    "arm,msi-base-spi", sc->sc_bspi);
978	sc->sc_nspi = OF_getpropint(faa->fa_node,
979	    "arm,msi-num-spis", sc->sc_nspi);
980
981	printf(": nspi %d\n", sc->sc_nspi);
982
983	sc->sc_spi = mallocarray(sc->sc_nspi, sizeof(void *), M_DEVBUF,
984	    M_WAITOK|M_ZERO);
985
986	sc->sc_ic.ic_node = faa->fa_node;
987	sc->sc_ic.ic_cookie = sc;
988	sc->sc_ic.ic_establish_msi = ampintc_intr_establish_msi;
989	sc->sc_ic.ic_disestablish = ampintc_intr_disestablish_msi;
990	sc->sc_ic.ic_barrier = ampintc_intr_barrier_msi;
991	arm_intr_register_fdt(&sc->sc_ic);
992}
993
994void *
995ampintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
996    int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
997{
998	struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
999	extern LIST_HEAD(, interrupt_controller) interrupt_controllers;
1000	struct interrupt_controller *ic;
1001	struct machine_intr_handle *ih;
1002	void *cookie;
1003	int cells[3];
1004	int i;
1005
1006	LIST_FOREACH(ic, &interrupt_controllers, ic_list) {
1007		if (ic->ic_node == OF_parent(sc->sc_node))
1008			break;
1009	}
1010	if (ic == NULL)
1011		return NULL;
1012
1013	cells[0] = 0; /* SPI */
1014	cells[2] = 1; /* Edge-Rising */
1015
1016	for (i = 0; i < sc->sc_nspi; i++) {
1017		if (sc->sc_spi[i] != NULL)
1018			continue;
1019
1020		cells[1] = sc->sc_bspi + i - 32;
1021		cookie = ic->ic_establish(ic->ic_cookie, cells,
1022		    level, ci, func, arg, name);
1023		if (cookie == NULL)
1024			return NULL;
1025
1026		ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
1027		ih->ih_ic = ic;
1028		ih->ih_ih = cookie;
1029
1030		*addr = sc->sc_addr + GICV2M_SETSPI_NS;
1031		*data = sc->sc_bspi + i;
1032		sc->sc_spi[i] = ih;
1033		return &sc->sc_spi[i];
1034	}
1035
1036	return NULL;
1037}
1038
1039void
1040ampintc_intr_disestablish_msi(void *cookie)
1041{
1042	fdt_intr_disestablish(*(void **)cookie);
1043	*(void **)cookie = NULL;
1044}
1045
1046void
1047ampintc_intr_barrier_msi(void *cookie)
1048{
1049	intr_barrier(*(void **)cookie);
1050}
1051
1052#ifdef MULTIPROCESSOR
1053int
1054ampintc_ipi_ddb(void *v)
1055{
1056	/* XXX */
1057#ifdef DDB
1058	db_enter();
1059#endif
1060	return 1;
1061}
1062
1063int
1064ampintc_ipi_halt(void *v)
1065{
1066	cpu_halt();
1067	return 1;
1068}
1069
1070int
1071ampintc_ipi_nop(void *v)
1072{
1073	/* Nothing to do here, just enough to wake up from WFI */
1074	return 1;
1075}
1076
1077int
1078ampintc_ipi_combined(void *v)
1079{
1080	struct ampintc_softc *sc = (struct ampintc_softc *)v;
1081
1082	if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
1083		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1084		return ampintc_ipi_ddb(v);
1085	} else if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_HALT) {
1086		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1087		return ampintc_ipi_halt(v);
1088	} else {
1089		return ampintc_ipi_nop(v);
1090	}
1091}
1092
1093void
1094ampintc_send_ipi(struct cpu_info *ci, int id)
1095{
1096	struct ampintc_softc	*sc = ampintc;
1097	int sendmask;
1098
1099	if (ci == curcpu() && id == ARM_IPI_NOP)
1100		return;
1101
1102	/* never overwrite IPI_DDB or IPI_HALT with IPI_NOP */
1103	if (id == ARM_IPI_DDB || id == ARM_IPI_HALT)
1104		sc->sc_ipi_reason[ci->ci_cpuid] = id;
1105
1106	/* currently will only send to one cpu */
1107	sendmask = sc->sc_cpu_mask[ci->ci_cpuid] << 16;
1108	sendmask |= sc->sc_ipi_num[id];
1109
1110	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask);
1111}
1112#endif
1113