gic.c revision 1.42
1/*	$NetBSD: gic.c,v 1.42 2020/09/26 10:06:25 skrll Exp $	*/
2/*-
3 * Copyright (c) 2012 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas of 3am Software Foundry.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "opt_ddb.h"
32#include "opt_multiprocessor.h"
33
34#define _INTR_PRIVATE
35
36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: gic.c,v 1.42 2020/09/26 10:06:25 skrll Exp $");
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/cpu.h>
42#include <sys/device.h>
43#include <sys/evcnt.h>
44#include <sys/intr.h>
45#include <sys/proc.h>
46#include <sys/atomic.h>
47
48#include <arm/armreg.h>
49#include <arm/cpufunc.h>
50#include <arm/locore.h>
51
52#include <arm/cortex/gic_reg.h>
53#include <arm/cortex/mpcore_var.h>
54
55void armgic_irq_handler(void *);
56
57#define	ARMGIC_SGI_IPIBASE	0
58
59/*
60 * SGIs 8-16 are reserved for use by ARM Trusted Firmware.
61 */
62__CTASSERT(ARMGIC_SGI_IPIBASE + NIPI <= 8);
63
64static int armgic_match(device_t, cfdata_t, void *);
65static void armgic_attach(device_t, device_t, void *);
66
67static void armgic_set_priority(struct pic_softc *, int);
68static void armgic_unblock_irqs(struct pic_softc *, size_t, uint32_t);
69static void armgic_block_irqs(struct pic_softc *, size_t, uint32_t);
70static void armgic_establish_irq(struct pic_softc *, struct intrsource *);
71#if 0
72static void armgic_source_name(struct pic_softc *, int, char *, size_t);
73#endif
74
75#ifdef MULTIPROCESSOR
76static void armgic_cpu_init(struct pic_softc *, struct cpu_info *);
77static void armgic_ipi_send(struct pic_softc *, const kcpuset_t *, u_long);
78static void armgic_get_affinity(struct pic_softc *, size_t, kcpuset_t *);
79static int armgic_set_affinity(struct pic_softc *, size_t, const kcpuset_t *);
80#endif
81
82static const struct pic_ops armgic_picops = {
83	.pic_unblock_irqs = armgic_unblock_irqs,
84	.pic_block_irqs = armgic_block_irqs,
85	.pic_establish_irq = armgic_establish_irq,
86#if 0
87	.pic_source_name = armgic_source_name,
88#endif
89	.pic_set_priority = armgic_set_priority,
90#ifdef MULTIPROCESSOR
91	.pic_cpu_init = armgic_cpu_init,
92	.pic_ipi_send = armgic_ipi_send,
93	.pic_get_affinity = armgic_get_affinity,
94	.pic_set_affinity = armgic_set_affinity,
95#endif
96};
97
98#define	PICTOSOFTC(pic)		((struct armgic_softc *)(pic))
99
100static struct armgic_softc {
101	struct pic_softc sc_pic;
102	device_t sc_dev;
103	bus_space_tag_t sc_memt;
104	bus_space_handle_t sc_gicch;
105	bus_space_handle_t sc_gicdh;
106	size_t sc_gic_lines;
107	uint32_t sc_gic_type;
108	uint32_t sc_gic_valid_lines[1024/32];
109	uint32_t sc_enabled_local;
110#ifdef MULTIPROCESSOR
111	uint32_t sc_target[MAXCPUS];
112	uint32_t sc_mptargets;
113#endif
114	uint32_t sc_bptargets;
115} armgic_softc = {
116	.sc_pic = {
117		.pic_ops = &armgic_picops,
118		.pic_name = "armgic",
119	},
120};
121
122static struct intrsource armgic_dummy_source;
123
124__CTASSERT(NIPL == 8);
125
126/*
127 * GIC register are always in little-endian.  It is assumed the bus_space
128 * will do any endian conversion required.
129 */
130static inline uint32_t
131gicc_read(struct armgic_softc *sc, bus_size_t o)
132{
133	return bus_space_read_4(sc->sc_memt, sc->sc_gicch, o);
134}
135
136static inline void
137gicc_write(struct armgic_softc *sc, bus_size_t o, uint32_t v)
138{
139	bus_space_write_4(sc->sc_memt, sc->sc_gicch, o, v);
140}
141
142static inline uint32_t
143gicd_read(struct armgic_softc *sc, bus_size_t o)
144{
145	return bus_space_read_4(sc->sc_memt, sc->sc_gicdh, o);
146}
147
148static inline void
149gicd_write(struct armgic_softc *sc, bus_size_t o, uint32_t v)
150{
151	bus_space_write_4(sc->sc_memt, sc->sc_gicdh, o, v);
152}
153
154static uint32_t
155gicd_find_targets(struct armgic_softc *sc)
156{
157	uint32_t targets = 0;
158
159	/*
160	 * GICD_ITARGETSR0 through 7 are read-only, and each field returns
161	 * a value that corresponds only to the processor reading the
162	 * register. Use this to determine the current processor's
163	 * CPU interface number.
164	 */
165	for (int i = 0; i < 8; i++) {
166		targets = gicd_read(sc, GICD_ITARGETSRn(i));
167		if (targets != 0)
168			break;
169	}
170	targets |= (targets >> 16);
171	targets |= (targets >> 8);
172	targets &= 0xff;
173
174	return targets ? targets : 1;
175}
176
177/*
178 * In the GIC prioritization scheme, lower numbers have higher priority.
179 * Only write priorities that could be non-secure.
180 */
181static inline uint32_t
182armgic_ipl_to_priority(int ipl)
183{
184	return GICC_PMR_NONSECURE
185	    | ((IPL_HIGH - ipl) * GICC_PMR_NS_PRIORITIES / NIPL);
186}
187
188#if 0
189static inline int
190armgic_priority_to_ipl(uint32_t priority)
191{
192	return IPL_HIGH
193	    - (priority & ~GICC_PMR_NONSECURE) * NIPL / GICC_PMR_NS_PRIORITIES;
194}
195#endif
196
197static void
198armgic_unblock_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask)
199{
200	struct armgic_softc * const sc = PICTOSOFTC(pic);
201	const size_t group = irq_base / 32;
202
203	if (group == 0)
204		sc->sc_enabled_local |= irq_mask;
205
206	gicd_write(sc, GICD_ISENABLERn(group), irq_mask);
207}
208
209static void
210armgic_block_irqs(struct pic_softc *pic, size_t irq_base, uint32_t irq_mask)
211{
212	struct armgic_softc * const sc = PICTOSOFTC(pic);
213	const size_t group = irq_base / 32;
214
215	if (group == 0)
216		sc->sc_enabled_local &= ~irq_mask;
217
218	gicd_write(sc, GICD_ICENABLERn(group), irq_mask);
219}
220
221static void
222armgic_set_priority(struct pic_softc *pic, int ipl)
223{
224	struct armgic_softc * const sc = PICTOSOFTC(pic);
225
226	const uint32_t priority = armgic_ipl_to_priority(ipl);
227	gicc_write(sc, GICC_PMR, priority);
228}
229
230#ifdef MULTIPROCESSOR
231static void
232armgic_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
233{
234	struct armgic_softc * const sc = PICTOSOFTC(pic);
235	const size_t group = irq / 32;
236	int n;
237
238	kcpuset_zero(affinity);
239	if (group == 0) {
240		/* All CPUs are targets for group 0 (SGI/PPI) */
241		for (n = 0; n < MAXCPUS; n++) {
242			if (sc->sc_target[n] != 0)
243				kcpuset_set(affinity, n);
244		}
245	} else {
246		/* Find distributor targets (SPI) */
247		const u_int byte_shift = 8 * (irq & 3);
248		const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
249		const uint32_t targets = gicd_read(sc, targets_reg);
250		const uint32_t targets_val = (targets >> byte_shift) & 0xff;
251
252		for (n = 0; n < MAXCPUS; n++) {
253			if (sc->sc_target[n] & targets_val)
254				kcpuset_set(affinity, n);
255		}
256	}
257}
258
259static int
260armgic_set_affinity(struct pic_softc *pic, size_t irq,
261    const kcpuset_t *affinity)
262{
263	struct armgic_softc * const sc = PICTOSOFTC(pic);
264	const size_t group = irq / 32;
265	if (group == 0)
266		return EINVAL;
267
268	const u_int byte_shift = 8 * (irq & 3);
269	const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
270	uint32_t targets_val = 0;
271	int n;
272
273	for (n = 0; n < MAXCPUS; n++) {
274		if (kcpuset_isset(affinity, n))
275			targets_val |= sc->sc_target[n];
276	}
277
278	uint32_t targets = gicd_read(sc, targets_reg);
279	targets &= ~(0xff << byte_shift);
280	targets |= (targets_val << byte_shift);
281	gicd_write(sc, targets_reg, targets);
282
283	return 0;
284}
285#endif
286
287#ifdef __HAVE_PIC_FAST_SOFTINTS
288void
289softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep_p)
290{
291	lwp_t **lp = &l->l_cpu->ci_softlwps[level];
292	KASSERT(*lp == NULL || *lp == l);
293	*lp = l;
294	/*
295	 * Really easy.  Just tell it to trigger the local CPU.
296	 */
297	*machdep_p = GICD_SGIR_TargetListFilter_Me
298	    | __SHIFTIN(level, GICD_SGIR_SGIINTID);
299}
300
301void
302softint_trigger(uintptr_t machdep)
303{
304
305	gicd_write(&armgic_softc, GICD_SGIR, machdep);
306}
307#endif
308
309void
310armgic_irq_handler(void *tf)
311{
312	struct cpu_info * const ci = curcpu();
313	struct armgic_softc * const sc = &armgic_softc;
314	const int old_ipl = ci->ci_cpl;
315#ifdef DIAGNOSTIC
316	const int old_mtx_count = ci->ci_mtx_count;
317	const int old_l_biglocks = ci->ci_curlwp->l_biglocks;
318#endif
319#ifdef DEBUG
320	size_t n = 0;
321#endif
322
323	ci->ci_data.cpu_nintr++;
324
325	for (;;) {
326		uint32_t iar = gicc_read(sc, GICC_IAR);
327		uint32_t irq = __SHIFTOUT(iar, GICC_IAR_IRQ);
328
329		if (irq == GICC_IAR_IRQ_SPURIOUS ||
330		    irq == GICC_IAR_IRQ_SSPURIOUS) {
331			iar = gicc_read(sc, GICC_IAR);
332			irq = __SHIFTOUT(iar, GICC_IAR_IRQ);
333			if (irq == GICC_IAR_IRQ_SPURIOUS)
334				break;
335			if (irq == GICC_IAR_IRQ_SSPURIOUS) {
336				break;
337			}
338		}
339
340		KASSERTMSG(old_ipl != IPL_HIGH, "old_ipl %d pmr %#x hppir %#x",
341		    old_ipl, gicc_read(sc, GICC_PMR), gicc_read(sc, GICC_HPPIR));
342
343		//const uint32_t cpuid = __SHIFTOUT(iar, GICC_IAR_CPUID_MASK);
344		struct intrsource * const is = sc->sc_pic.pic_sources[irq];
345		KASSERT(is != &armgic_dummy_source);
346
347		/*
348		 * GIC has asserted IPL for us so we can just update ci_cpl.
349		 *
350		 * But it's not that simple.  We may have already bumped ci_cpl
351		 * due to a high priority interrupt and now we are about to
352		 * dispatch one lower than the previous.  It's possible for
353		 * that previous interrupt to have deferred some interrupts
354		 * so we need deal with those when lowering to the current
355		 * interrupt's ipl.
356		 *
357		 * However, if are just raising ipl, we can just update ci_cpl.
358		 */
359		const int ipl = is->is_ipl;
360		if (__predict_false(ipl < ci->ci_cpl)) {
361			pic_do_pending_ints(I32_bit, ipl, tf);
362			KASSERT(ci->ci_cpl == ipl);
363		} else {
364			KASSERTMSG(ipl > ci->ci_cpl, "ipl %d cpl %d hw-ipl %#x",
365			    ipl, ci->ci_cpl,
366			    gicc_read(sc, GICC_PMR));
367			gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ipl));
368			ci->ci_cpl = ipl;
369		}
370		cpsie(I32_bit);
371		pic_dispatch(is, tf);
372		cpsid(I32_bit);
373		gicc_write(sc, GICC_EOIR, iar);
374#ifdef DEBUG
375		n++;
376		KDASSERTMSG(n < 5, "%s: processed too many (%zu)",
377		    ci->ci_data.cpu_name, n);
378#endif
379	}
380
381	/*
382	 * Now handle any pending ints.
383	 */
384	pic_do_pending_ints(I32_bit, old_ipl, tf);
385	KASSERTMSG(ci->ci_cpl == old_ipl, "ci_cpl %d old_ipl %d", ci->ci_cpl, old_ipl);
386	KASSERT(old_mtx_count == ci->ci_mtx_count);
387	KASSERT(old_l_biglocks == ci->ci_curlwp->l_biglocks);
388}
389
390void
391armgic_establish_irq(struct pic_softc *pic, struct intrsource *is)
392{
393	struct armgic_softc * const sc = PICTOSOFTC(pic);
394	const size_t group = is->is_irq / 32;
395	const u_int irq = is->is_irq & 31;
396	const u_int byte_shift = 8 * (irq & 3);
397	const u_int twopair_shift = 2 * (irq & 15);
398
399	KASSERTMSG(sc->sc_gic_valid_lines[group] & __BIT(irq),
400	    "irq %u: not valid (group[%zu]=0x%08x [0x%08x])",
401	    is->is_irq, group, sc->sc_gic_valid_lines[group],
402	    (uint32_t)__BIT(irq));
403
404	KASSERTMSG(is->is_type == IST_LEVEL || is->is_type == IST_EDGE,
405	    "irq %u: type %u unsupported", is->is_irq, is->is_type);
406
407	const bus_size_t targets_reg = GICD_ITARGETSRn(is->is_irq / 4);
408	const bus_size_t cfg_reg = GICD_ICFGRn(is->is_irq / 16);
409	uint32_t targets = gicd_read(sc, targets_reg);
410	uint32_t cfg = gicd_read(sc, cfg_reg);
411
412	if (group > 0) {
413		/*
414		 * There are 4 irqs per TARGETS register.  For now bind
415		 * to the primary cpu.
416		 */
417		targets &= ~(0xffU << byte_shift);
418#if 0
419#ifdef MULTIPROCESSOR
420		if (is->is_mpsafe) {
421			targets |= sc->sc_mptargets << byte_shift;
422		} else
423#endif
424#endif
425		targets |= sc->sc_bptargets << byte_shift;
426		gicd_write(sc, targets_reg, targets);
427
428		/*
429		 * There are 16 irqs per CFG register.  10=EDGE 00=LEVEL
430		 */
431		uint32_t new_cfg = cfg;
432		uint32_t old_cfg = (cfg >> twopair_shift) & __BITS(1, 0);
433		if (is->is_type == IST_LEVEL && (old_cfg & __BIT(1)) != 0) {
434			new_cfg &= ~(__BITS(1, 0) << twopair_shift);
435		} else if (is->is_type == IST_EDGE && (old_cfg & 2) == 0) {
436			new_cfg |= __BIT(1) << twopair_shift;
437		}
438		if (new_cfg != cfg) {
439			gicd_write(sc, cfg_reg, new_cfg);
440		}
441#ifdef MULTIPROCESSOR
442	} else {
443		/*
444		 * All group 0 interrupts are per processor and MPSAFE by
445		 * default.
446		 */
447		is->is_mpsafe = true;
448#endif
449	}
450
451	/*
452	 * There are 4 irqs per PRIORITY register.  Map the IPL
453	 * to GIC priority.
454	 */
455	const bus_size_t priority_reg = GICD_IPRIORITYRn(is->is_irq / 4);
456	uint32_t priority = gicd_read(sc, priority_reg);
457	priority &= ~(0xffU << byte_shift);
458	priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift;
459	gicd_write(sc, priority_reg, priority);
460}
461
462#ifdef MULTIPROCESSOR
463static void
464armgic_cpu_init_priorities(struct armgic_softc *sc)
465{
466	/* Set lowest priority, i.e. disable interrupts */
467	for (size_t i = 0; i < sc->sc_pic.pic_maxsources; i += 4) {
468		const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4);
469		gicd_write(sc, priority_reg, ~0);
470	}
471}
472
473static void
474armgic_cpu_update_priorities(struct armgic_softc *sc)
475{
476	uint32_t enabled = sc->sc_enabled_local;
477	for (size_t i = 0; i < sc->sc_pic.pic_maxsources; i += 4, enabled >>= 4) {
478		const bus_size_t priority_reg = GICD_IPRIORITYRn(i / 4);
479		uint32_t priority = gicd_read(sc, priority_reg);
480		uint32_t byte_mask = 0xff;
481		size_t byte_shift = 0;
482		for (size_t j = 0; j < 4; j++, byte_mask <<= 8, byte_shift += 8) {
483			struct intrsource * const is = sc->sc_pic.pic_sources[i+j];
484			priority |= byte_mask;
485			if (is == NULL || is == &armgic_dummy_source)
486				continue;
487			priority &= ~byte_mask;
488			priority |= armgic_ipl_to_priority(is->is_ipl) << byte_shift;
489		}
490		gicd_write(sc, priority_reg, priority);
491	}
492}
493
494static void
495armgic_cpu_init_targets(struct armgic_softc *sc)
496{
497	/*
498	 * Update the mpsafe targets
499	 */
500	for (size_t irq = 32; irq < sc->sc_pic.pic_maxsources; irq++) {
501		struct intrsource * const is = sc->sc_pic.pic_sources[irq];
502		const bus_size_t targets_reg = GICD_ITARGETSRn(irq / 4);
503		if (is != NULL && is->is_mpsafe) {
504			const u_int byte_shift = 8 * (irq & 3);
505			uint32_t targets = gicd_read(sc, targets_reg);
506			targets |= sc->sc_mptargets << byte_shift;
507			gicd_write(sc, targets_reg, targets);
508		}
509	}
510}
511
512void
513armgic_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
514{
515	struct armgic_softc * const sc = PICTOSOFTC(pic);
516	sc->sc_target[cpu_index(ci)] = gicd_find_targets(sc);
517	atomic_or_32(&sc->sc_mptargets, sc->sc_target[cpu_index(ci)]);
518	KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl);
519	armgic_cpu_init_priorities(sc);
520	if (!CPU_IS_PRIMARY(ci)) {
521		if (popcount(sc->sc_mptargets) != 1) {
522			armgic_cpu_init_targets(sc);
523		}
524		if (sc->sc_enabled_local) {
525			armgic_cpu_update_priorities(sc);
526			gicd_write(sc, GICD_ISENABLERn(0),
527			    sc->sc_enabled_local);
528		}
529	}
530	gicc_write(sc, GICC_PMR, armgic_ipl_to_priority(ci->ci_cpl));	// set PMR
531	gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable);	// enable interrupt
532	cpsie(I32_bit);					// allow IRQ exceptions
533}
534
535void
536armgic_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
537{
538	struct armgic_softc * const sc = PICTOSOFTC(pic);
539
540#if 0
541	if (ipi == IPI_NOP) {
542		__asm __volatile("sev");
543		return;
544	}
545#endif
546
547	uint32_t sgir = __SHIFTIN(ARMGIC_SGI_IPIBASE + ipi, GICD_SGIR_SGIINTID);
548	if (kcp != NULL) {
549		uint32_t targets_val = 0;
550		for (int n = 0; n < MAXCPUS; n++) {
551			if (kcpuset_isset(kcp, n))
552				targets_val |= sc->sc_target[n];
553		}
554		sgir |= __SHIFTIN(targets_val, GICD_SGIR_TargetList);
555		sgir |= GICD_SGIR_TargetListFilter_List;
556	} else {
557		if (ncpu == 1)
558			return;
559		sgir |= GICD_SGIR_TargetListFilter_NotMe;
560	}
561
562	gicd_write(sc, GICD_SGIR, sgir);
563}
564#endif
565
566int
567armgic_match(device_t parent, cfdata_t cf, void *aux)
568{
569	struct mpcore_attach_args * const mpcaa = aux;
570
571	if (strcmp(cf->cf_name, mpcaa->mpcaa_name) != 0)
572		return 0;
573
574	return 1;
575}
576
577void
578armgic_attach(device_t parent, device_t self, void *aux)
579{
580	struct armgic_softc * const sc = &armgic_softc;
581	struct mpcore_attach_args * const mpcaa = aux;
582
583	sc->sc_dev = self;
584	self->dv_private = sc;
585
586	sc->sc_memt = mpcaa->mpcaa_memt;	/* provided for us */
587	bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off1,
588	    4096, &sc->sc_gicdh);
589	bus_space_subregion(sc->sc_memt, mpcaa->mpcaa_memh, mpcaa->mpcaa_off2,
590	    4096, &sc->sc_gicch);
591
592	sc->sc_gic_type = gicd_read(sc, GICD_TYPER);
593	sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(sc->sc_gic_type);
594
595	gicc_write(sc, GICC_CTRL, 0);	/* disable all interrupts */
596	gicd_write(sc, GICD_CTRL, 0);	/* disable all interrupts */
597
598	gicc_write(sc, GICC_PMR, 0xff);
599	uint32_t pmr = gicc_read(sc, GICC_PMR);
600	u_int priorities = 1 << popcount32(pmr);
601
602	const uint32_t iidr = gicc_read(sc, GICC_IIDR);
603	const int iidr_prod = __SHIFTOUT(iidr, GICC_IIDR_ProductID);
604	const int iidr_arch = __SHIFTOUT(iidr, GICC_IIDR_ArchVersion);
605	const int iidr_rev = __SHIFTOUT(iidr, GICC_IIDR_Revision);
606	const int iidr_imp = __SHIFTOUT(iidr, GICC_IIDR_Implementer);
607
608	/*
609	 * Find the boot processor's CPU interface number.
610	 */
611	sc->sc_bptargets = gicd_find_targets(sc);
612
613	/*
614	 * Let's find out how many real sources we have.
615	 */
616	for (size_t i = 0, group = 0;
617	     i < sc->sc_pic.pic_maxsources;
618	     i += 32, group++) {
619		/*
620		 * To figure what sources are real, one enables all interrupts
621		 * and then reads back the enable mask so which ones really
622		 * got enabled.
623		 */
624		gicd_write(sc, GICD_ISENABLERn(group), 0xffffffff);
625		uint32_t valid = gicd_read(sc, GICD_ISENABLERn(group));
626
627		/*
628		 * Now disable (clear enable) them again.
629		 */
630		gicd_write(sc, GICD_ICENABLERn(group), valid);
631
632		/*
633		 * Count how many are valid.
634		 */
635		sc->sc_gic_lines += popcount32(valid);
636		sc->sc_gic_valid_lines[group] = valid;
637	}
638
639	aprint_normal(": Generic Interrupt Controller, "
640	    "%zu sources (%zu valid)\n",
641	    sc->sc_pic.pic_maxsources, sc->sc_gic_lines);
642	aprint_debug_dev(sc->sc_dev, "Architecture version %d"
643	    " (0x%x:%d rev %d)\n", iidr_arch, iidr_imp, iidr_prod,
644	    iidr_rev);
645
646#ifdef MULTIPROCESSOR
647	sc->sc_pic.pic_cpus = kcpuset_running;
648#endif
649	pic_add(&sc->sc_pic, 0);
650
651	/*
652	 * Force the GICD to IPL_HIGH and then enable interrupts.
653	 */
654	struct cpu_info * const ci = curcpu();
655	KASSERTMSG(ci->ci_cpl == IPL_HIGH, "ipl %d not IPL_HIGH", ci->ci_cpl);
656	armgic_set_priority(&sc->sc_pic, ci->ci_cpl);	// set PMR
657	gicd_write(sc, GICD_CTRL, GICD_CTRL_Enable);	// enable Distributer
658	gicc_write(sc, GICC_CTRL, GICC_CTRL_V1_Enable);	// enable CPU interrupts
659	cpsie(I32_bit);					// allow interrupt exceptions
660
661	/*
662	 * For each line that isn't valid, we set the intrsource for it to
663	 * point at a dummy source so that pic_intr_establish will fail for it.
664	 */
665	for (size_t i = 0, group = 0;
666	     i < sc->sc_pic.pic_maxsources;
667	     i += 32, group++) {
668		uint32_t invalid = ~sc->sc_gic_valid_lines[group];
669		for (size_t j = 0; invalid && j < 32; j++, invalid >>= 1) {
670			if (invalid & 1) {
671				sc->sc_pic.pic_sources[i + j] =
672				     &armgic_dummy_source;
673			}
674		}
675	}
676#ifdef __HAVE_PIC_FAST_SOFTINTS
677	intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE,
678	    pic_handle_softint, (void *)SOFTINT_BIO, "softint bio");
679	intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE,
680	    pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock");
681	intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE,
682	    pic_handle_softint, (void *)SOFTINT_NET, "softint net");
683	intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE,
684	    pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial");
685#endif
686#ifdef MULTIPROCESSOR
687	armgic_cpu_init(&sc->sc_pic, curcpu());
688
689	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_AST, IPL_VM,
690	    IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
691	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_XCALL, IPL_HIGH,
692	    IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
693	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_GENERIC, IPL_HIGH,
694	    IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
695	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_NOP, IPL_VM,
696	    IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
697	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_SHOOTDOWN, IPL_SCHED,
698	    IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
699#ifdef DDB
700	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_DDB, IPL_HIGH,
701	    IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
702#endif
703#ifdef __HAVE_PREEMPTION
704	intr_establish_xname(ARMGIC_SGI_IPIBASE + IPI_KPREEMPT, IPL_VM,
705	    IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
706#endif
707#endif
708
709	const u_int ppis = popcount32(sc->sc_gic_valid_lines[0] >> 16);
710	const u_int sgis = popcount32(sc->sc_gic_valid_lines[0] & 0xffff);
711	aprint_normal_dev(sc->sc_dev, "%u Priorities, %zu SPIs, %u PPIs, "
712	    "%u SGIs\n",  priorities, sc->sc_gic_lines - ppis - sgis, ppis,
713	    sgis);
714}
715
716CFATTACH_DECL_NEW(armgic, 0,
717    armgic_match, armgic_attach, NULL, NULL);
718