pic.c revision 1.51
1/*	$NetBSD: pic.c,v 1.51 2019/12/24 20:37:44 skrll Exp $	*/
2/*-
3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#define _INTR_PRIVATE
32#include "opt_ddb.h"
33#include "opt_multiprocessor.h"
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.51 2019/12/24 20:37:44 skrll Exp $");
37
38#include <sys/param.h>
39#include <sys/atomic.h>
40#include <sys/cpu.h>
41#include <sys/evcnt.h>
42#include <sys/intr.h>
43#include <sys/kernel.h>
44#include <sys/kmem.h>
45#include <sys/mutex.h>
46#include <sys/once.h>
47#include <sys/interrupt.h>
48#include <sys/xcall.h>
49#include <sys/ipi.h>
50
51#include <arm/armreg.h>
52#include <arm/cpufunc.h>
53#include <arm/locore.h>	/* for compat aarch64 */
54
55#ifdef DDB
56#include <arm/db_machdep.h>
57#endif
58
59#include <arm/pic/picvar.h>
60
61#if defined(__HAVE_PIC_PENDING_INTRS)
62/*
63 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
64 * the assumption that a PIC (pic_softc) shall only have all its interrupts
65 * come from the same CPU.  In other words, interrupts from a single PIC will
66 * not be distributed among multiple CPUs.
67 */
68struct pic_pending {
69	volatile uint32_t blocked_pics;
70	volatile uint32_t pending_pics;
71	volatile uint32_t pending_ipls;
72};
73static uint32_t
74	pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
75static struct pic_softc *
76	pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t);
77static void
78	pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *);
79static void
80	pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *);
81
82#ifdef MULTIPROCESSOR
83percpu_t *pic_pending_percpu;
84#else
85struct pic_pending pic_pending;
86#endif /* MULTIPROCESSOR */
87#endif /* __HAVE_PIC_PENDING_INTRS */
88
89struct pic_softc *pic_list[PIC_MAXPICS];
90#if PIC_MAXPICS > 32
91#error PIC_MAXPICS > 32 not supported
92#endif
93struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
94struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
95struct intrsource **pic_iplsource[NIPL] = {
96	[0 ... NIPL-1] = pic__iplsources,
97};
98size_t pic_ipl_offset[NIPL+1];
99
100static kmutex_t pic_lock;
101static size_t pic_sourcebase;
102static struct evcnt pic_deferral_ev =
103    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
104EVCNT_ATTACH_STATIC(pic_deferral_ev);
105
106static int pic_init(void);
107
108#ifdef __HAVE_PIC_SET_PRIORITY
109void
110pic_set_priority(struct cpu_info *ci, int newipl)
111{
112	register_t psw = cpsid(I32_bit);
113	if (pic_list[0] != NULL)
114		(pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl);
115	ci->ci_cpl = newipl;
116	if ((psw & I32_bit) == 0)
117		cpsie(I32_bit);
118}
119#endif
120
121#ifdef MULTIPROCESSOR
122int
123pic_ipi_ast(void *arg)
124{
125	setsoftast(curcpu());
126	return 1;
127}
128
129int
130pic_ipi_nop(void *arg)
131{
132	/* do nothing */
133	return 1;
134}
135
136int
137pic_ipi_xcall(void *arg)
138{
139	xc_ipi_handler();
140	return 1;
141}
142
143int
144pic_ipi_generic(void *arg)
145{
146	ipi_cpu_handler();
147	return 1;
148}
149
150#ifdef DDB
151int
152pic_ipi_ddb(void *arg)
153{
154//	printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
155	kdb_trap(-1, arg);
156	return 1;
157}
158#endif /* DDB */
159
160#ifdef __HAVE_PREEMPTION
161int
162pic_ipi_kpreempt(void *arg)
163{
164	atomic_or_uint(&curcpu()->ci_astpending, __BIT(1));
165	return 1;
166}
167#endif /* __HAVE_PREEMPTION */
168
169void
170intr_cpu_init(struct cpu_info *ci)
171{
172	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
173		struct pic_softc * const pic = pic_list[slot];
174		if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
175			(*pic->pic_ops->pic_cpu_init)(pic, ci);
176		}
177	}
178}
179
180typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
181
182void
183intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
184{
185	struct cpu_info * const ci = curcpu();
186	KASSERT(ipi < NIPI);
187	bool __diagused sent_p = false;
188	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
189		struct pic_softc * const pic = pic_list[slot];
190		if (pic == NULL || pic->pic_cpus == NULL)
191			continue;
192		if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
193			// never send to ourself
194			if (pic->pic_cpus == ci->ci_kcpuset)
195				continue;
196
197			(*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
198			// If we were targeting a single CPU or this pic
199			// handles all cpus, we're done.
200			if (kcp != NULL || pic->pic_cpus == kcpuset_running)
201				return;
202			sent_p = true;
203		}
204	}
205	KASSERT(cold || sent_p || ncpu <= 1);
206}
207#endif /* MULTIPROCESSOR */
208
209#ifdef __HAVE_PIC_FAST_SOFTINTS
210int
211pic_handle_softint(void *arg)
212{
213	void softint_switch(lwp_t *, int);
214	struct cpu_info * const ci = curcpu();
215	const size_t softint = (size_t) arg;
216	int s = splhigh();
217	ci->ci_intr_depth--;	// don't count these as interrupts
218	softint_switch(ci->ci_softlwps[softint], s);
219	ci->ci_intr_depth++;
220	splx(s);
221	return 1;
222}
223#endif
224
225int
226pic_handle_intr(void *arg)
227{
228	struct pic_softc * const pic = arg;
229	int rv;
230
231	rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
232
233	return rv > 0;
234}
235
236#if defined(__HAVE_PIC_PENDING_INTRS)
237void
238pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
239{
240	const uint32_t ipl_mask = __BIT(is->is_ipl);
241
242	atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
243	    __BIT(is->is_irq & 0x1f));
244
245	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
246#ifdef MULTIPROCESSOR
247	struct pic_pending *pend = percpu_getref(pic_pending_percpu);
248#else
249	struct pic_pending *pend = &pic_pending;
250#endif
251	atomic_or_32(&pend->pending_ipls, ipl_mask);
252	atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
253#ifdef MULTIPROCESSOR
254	percpu_putref(pic_pending_percpu);
255#endif
256}
257
258void
259pic_mark_pending(struct pic_softc *pic, int irq)
260{
261	struct intrsource * const is = pic->pic_sources[irq];
262
263	KASSERT(irq < pic->pic_maxsources);
264	KASSERT(is != NULL);
265
266	pic_mark_pending_source(pic, is);
267}
268
269uint32_t
270pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
271	uint32_t pending)
272{
273	struct intrsource ** const isbase = &pic->pic_sources[irq_base];
274	struct intrsource *is;
275	volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
276	uint32_t ipl_mask = 0;
277
278	if (pending == 0)
279		return ipl_mask;
280
281	KASSERT((irq_base & 31) == 0);
282
283	(*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
284
285	atomic_or_32(ipending, pending);
286	while (pending != 0) {
287		int n = ffs(pending);
288		if (n-- == 0)
289			break;
290		is = isbase[n];
291		KASSERT(is != NULL);
292		KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
293		pending &= ~__BIT(n);
294		ipl_mask |= __BIT(is->is_ipl);
295	}
296
297	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
298#ifdef MULTIPROCESSOR
299	struct pic_pending *pend = percpu_getref(pic_pending_percpu);
300#else
301	struct pic_pending *pend = &pic_pending;
302#endif
303	atomic_or_32(&pend->pending_ipls, ipl_mask);
304	atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
305#ifdef MULTIPROCESSOR
306	percpu_putref(pic_pending_percpu);
307#endif
308	return ipl_mask;
309}
310
311uint32_t
312pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
313	uint32_t pending, int ipl)
314{
315	uint32_t ipl_irq_mask = 0;
316	uint32_t irq_mask;
317
318	for (;;) {
319		int irq = ffs(pending);
320		if (irq-- == 0)
321			return ipl_irq_mask;
322
323		irq_mask = __BIT(irq);
324#if 1
325    		KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
326		   "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
327#else
328		if (pic->pic_sources[irq_base + irq] == NULL) {
329			aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
330			    irq_base, irq);
331		} else
332#endif
333		if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
334			ipl_irq_mask |= irq_mask;
335
336		pending &= ~irq_mask;
337	}
338}
339#endif /* __HAVE_PIC_PENDING_INTRS */
340
341void
342pic_dispatch(struct intrsource *is, void *frame)
343{
344	int (*func)(void *) = is->is_func;
345	void *arg = is->is_arg;
346
347	if (__predict_false(arg == NULL)) {
348		if (__predict_false(frame == NULL)) {
349			pic_deferral_ev.ev_count++;
350			return;
351		}
352		arg = frame;
353	}
354
355#ifdef MULTIPROCESSOR
356	if (!is->is_mpsafe) {
357		KERNEL_LOCK(1, NULL);
358		const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
359		const u_int l_blcnt __diagused = curlwp->l_blcnt;
360		(void)(*func)(arg);
361		KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
362		KASSERT(l_blcnt == curlwp->l_blcnt);
363		KERNEL_UNLOCK_ONE(NULL);
364	} else
365#endif
366		(void)(*func)(arg);
367
368
369	struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
370	KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
371	pcpu->pcpu_evs[is->is_irq].ev_count++;
372	percpu_putref(is->is_pic->pic_percpu);
373}
374
375#if defined(__HAVE_PIC_PENDING_INTRS)
376void
377pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl,
378    void *frame)
379{
380	const uint32_t ipl_mask = __BIT(ipl);
381	struct intrsource *is;
382	volatile uint32_t *ipending = pic->pic_pending_irqs;
383	volatile uint32_t *iblocked = pic->pic_blocked_irqs;
384	size_t irq_base;
385#if PIC_MAXSOURCES > 32
386	size_t irq_count;
387	int poi = 0;		/* Possibility of interrupting */
388#endif
389	uint32_t pending_irqs;
390	uint32_t blocked_irqs;
391	int irq;
392	bool progress __diagused = false;
393
394	KASSERT(pic->pic_pending_ipls & ipl_mask);
395
396	irq_base = 0;
397#if PIC_MAXSOURCES > 32
398	irq_count = 0;
399#endif
400
401	for (;;) {
402		pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
403		    *ipending, ipl);
404		KASSERT((pending_irqs & *ipending) == pending_irqs);
405		KASSERT((pending_irqs & ~(*ipending)) == 0);
406		if (pending_irqs == 0) {
407#if PIC_MAXSOURCES > 32
408			irq_count += 32;
409			if (__predict_true(irq_count >= pic->pic_maxsources)) {
410				if (!poi)
411					/*Interrupt at this level was handled.*/
412					break;
413				irq_base = 0;
414				irq_count = 0;
415				poi = 0;
416				ipending = pic->pic_pending_irqs;
417				iblocked = pic->pic_blocked_irqs;
418			} else {
419				irq_base += 32;
420				ipending++;
421				iblocked++;
422				KASSERT(irq_base <= pic->pic_maxsources);
423			}
424			continue;
425#else
426			break;
427#endif
428		}
429		progress = true;
430		blocked_irqs = 0;
431		do {
432			irq = ffs(pending_irqs) - 1;
433			KASSERT(irq >= 0);
434
435			atomic_and_32(ipending, ~__BIT(irq));
436			is = pic->pic_sources[irq_base + irq];
437			if (is != NULL) {
438				cpsie(I32_bit);
439				pic_dispatch(is, frame);
440				cpsid(I32_bit);
441#if PIC_MAXSOURCES > 32
442				/*
443				 * There is a possibility of interrupting
444				 * from cpsie() to cpsid().
445				 */
446				poi = 1;
447#endif
448				blocked_irqs |= __BIT(irq);
449			} else {
450				KASSERT(0);
451			}
452			pending_irqs = pic_find_pending_irqs_by_ipl(pic,
453			    irq_base, *ipending, ipl);
454		} while (pending_irqs);
455		if (blocked_irqs) {
456			atomic_or_32(iblocked, blocked_irqs);
457			atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id));
458		}
459	}
460
461	KASSERT(progress);
462	/*
463	 * Since interrupts are disabled, we don't have to be too careful
464	 * about these.
465	 */
466	if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
467		atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id));
468}
469
470static void
471pic_list_unblock_irqs(struct pic_pending *pend)
472{
473	uint32_t blocked_pics = pend->blocked_pics;
474
475	pend->blocked_pics = 0;
476
477	for (;;) {
478		struct pic_softc *pic;
479#if PIC_MAXSOURCES > 32
480		volatile uint32_t *iblocked;
481		uint32_t blocked;
482		size_t irq_base;
483#endif
484
485		int pic_id = ffs(blocked_pics);
486		if (pic_id-- == 0)
487			return;
488
489		pic = pic_list[pic_id];
490		KASSERT(pic != NULL);
491#if PIC_MAXSOURCES > 32
492		for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
493		     irq_base < pic->pic_maxsources;
494		     irq_base += 32, iblocked++) {
495			if ((blocked = *iblocked) != 0) {
496				(*pic->pic_ops->pic_unblock_irqs)(pic,
497				    irq_base, blocked);
498				atomic_and_32(iblocked, ~blocked);
499			}
500		}
501#else
502		KASSERT(pic->pic_blocked_irqs[0] != 0);
503		(*pic->pic_ops->pic_unblock_irqs)(pic,
504		    0, pic->pic_blocked_irqs[0]);
505		pic->pic_blocked_irqs[0] = 0;
506#endif
507		blocked_pics &= ~__BIT(pic_id);
508	}
509}
510
511
512struct pic_softc *
513pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask)
514{
515	uint32_t pending_pics = pend->pending_pics;
516	struct pic_softc *pic;
517
518	for (;;) {
519		int pic_id = ffs(pending_pics);
520		if (pic_id-- == 0)
521			return NULL;
522
523		pic = pic_list[pic_id];
524		KASSERT(pic != NULL);
525		if (pic->pic_pending_ipls & ipl_mask)
526			return pic;
527		pending_pics &= ~__BIT(pic_id);
528	}
529}
530
531void
532pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl,
533    void *frame)
534{
535	const uint32_t ipl_mask = __BIT(ipl);
536	struct pic_softc *pic;
537
538	while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) {
539		pic_deliver_irqs(pend, pic, ipl, frame);
540		KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
541	}
542	atomic_and_32(&pend->pending_ipls, ~ipl_mask);
543}
544#endif /* __HAVE_PIC_PENDING_INTRS */
545
546void
547pic_do_pending_ints(register_t psw, int newipl, void *frame)
548{
549	struct cpu_info * const ci = curcpu();
550	if (__predict_false(newipl == IPL_HIGH)) {
551		KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
552		return;
553	}
554#if defined(__HAVE_PIC_PENDING_INTRS)
555#ifdef MULTIPROCESSOR
556	struct pic_pending *pend = percpu_getref(pic_pending_percpu);
557#else
558	struct pic_pending *pend = &pic_pending;
559#endif
560	while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
561		KASSERT(pend->pending_ipls < __BIT(NIPL));
562		for (;;) {
563			int ipl = 31 - __builtin_clz(pend->pending_ipls);
564			KASSERT(ipl < NIPL);
565			if (ipl <= newipl)
566				break;
567
568			pic_set_priority(ci, ipl);
569			pic_list_deliver_irqs(pend, psw, ipl, frame);
570			pic_list_unblock_irqs(pend);
571		}
572	}
573#ifdef MULTIPROCESSOR
574	percpu_putref(pic_pending_percpu);
575#endif
576#endif /* __HAVE_PIC_PENDING_INTRS */
577#ifdef __HAVE_PREEMPTION
578	if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
579		pic_set_priority(ci, IPL_SCHED);
580		kpreempt(0);
581	}
582#endif
583	if (ci->ci_cpl != newipl)
584		pic_set_priority(ci, newipl);
585}
586
587static void
588pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
589{
590	struct pic_percpu * const pcpu = v0;
591	struct pic_softc * const pic = v1;
592
593	pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
594	    KM_SLEEP);
595	KASSERT(pcpu->pcpu_evs != NULL);
596
597#define	PCPU_NAMELEN	32
598#ifdef DIAGNOSTIC
599	const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
600#endif
601
602	KASSERT(namelen < PCPU_NAMELEN);
603	pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
604#ifdef MULTIPROCESSOR
605	snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
606	    "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
607#else
608	strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
609#endif
610	pcpu->pcpu_magic = PICPERCPU_MAGIC;
611#if 0
612	printf("%s: %s %s: <%s>\n",
613	    __func__, ci->ci_data.cpu_name, pic->pic_name,
614	    pcpu->pcpu_name);
615#endif
616}
617
618#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
619static void
620pic_pending_zero(void *v0, void *v1, struct cpu_info *ci)
621{
622	struct pic_pending * const p = v0;
623	memset(p, 0, sizeof(*p));
624}
625#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
626
627static int
628pic_init(void)
629{
630
631	mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
632
633	return 0;
634}
635
636void
637pic_add(struct pic_softc *pic, int irqbase)
638{
639	int slot, maybe_slot = -1;
640	size_t sourcebase;
641	static ONCE_DECL(pic_once);
642
643	RUN_ONCE(&pic_once, pic_init);
644
645	KASSERT(strlen(pic->pic_name) > 0);
646
647#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
648	if (__predict_false(pic_pending_percpu == NULL)) {
649		pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending));
650
651		/*
652		 * Now zero the per-cpu pending data.
653		 */
654		percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL);
655	}
656#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
657
658	mutex_enter(&pic_lock);
659	for (slot = 0; slot < PIC_MAXPICS; slot++) {
660		struct pic_softc * const xpic = pic_list[slot];
661		if (xpic == NULL) {
662			if (maybe_slot < 0)
663				maybe_slot = slot;
664			if (irqbase < 0)
665				break;
666			continue;
667		}
668		if (irqbase < 0 || xpic->pic_irqbase < 0)
669			continue;
670		if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
671			continue;
672		if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
673			continue;
674		panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
675		    " with pic %s (%zu sources @ irq %u)",
676		    pic->pic_name, pic->pic_maxsources, irqbase,
677		    xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
678	}
679	slot = maybe_slot;
680#if 0
681	printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
682	    pic->pic_name, pic_sourcebase, pic->pic_maxsources);
683#endif
684	KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
685	    pic->pic_maxsources);
686	KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
687	sourcebase = pic_sourcebase;
688	pic_sourcebase += pic->pic_maxsources;
689
690	mutex_exit(&pic_lock);
691
692	/*
693	 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
694	 * allocate its evcnts and then attach an evcnt for each pin.
695	 * We can't allocate the evcnt structures directly since
696	 * percpu will move the contents of percpu memory around and
697	 * corrupt the pointers in the evcnts themselves.  Remember, any
698	 * problem can be solved with sufficient indirection.
699	 */
700	pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu));
701
702	/*
703	 * Now allocate the per-cpu evcnts.
704	 */
705	percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic);
706
707	pic->pic_sources = &pic_sources[sourcebase];
708	pic->pic_irqbase = irqbase;
709	pic->pic_id = slot;
710#ifdef __HAVE_PIC_SET_PRIORITY
711	KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
712#endif
713#ifdef MULTIPROCESSOR
714	KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
715#endif
716	pic_list[slot] = pic;
717}
718
719int
720pic_alloc_irq(struct pic_softc *pic)
721{
722	int irq;
723
724	for (irq = 0; irq < pic->pic_maxsources; irq++) {
725		if (pic->pic_sources[irq] == NULL)
726			return irq;
727	}
728
729	return -1;
730}
731
732static void
733pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
734{
735	struct pic_percpu * const pcpu = v0;
736	struct intrsource * const is = v1;
737
738	KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
739	evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
740	    pcpu->pcpu_name, is->is_source);
741}
742
743void *
744pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
745	int (*func)(void *), void *arg, const char *xname)
746{
747	struct intrsource *is;
748	int off, nipl;
749
750	if (pic->pic_sources[irq]) {
751		printf("pic_establish_intr: pic %s irq %d already present\n",
752		    pic->pic_name, irq);
753		return NULL;
754	}
755
756	is = kmem_zalloc(sizeof(*is), KM_SLEEP);
757	is->is_pic = pic;
758	is->is_irq = irq;
759	is->is_ipl = ipl;
760	is->is_type = type & 0xff;
761	is->is_func = func;
762	is->is_arg = arg;
763#ifdef MULTIPROCESSOR
764	is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
765#endif
766
767	if (pic->pic_ops->pic_source_name)
768		(*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
769		    sizeof(is->is_source));
770	else
771		snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
772
773	/*
774	 * Now attach the per-cpu evcnts.
775	 */
776	percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
777
778	pic->pic_sources[irq] = is;
779
780	/*
781	 * First try to use an existing slot which is empty.
782	 */
783	for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) {
784		if (pic__iplsources[off] == NULL) {
785			is->is_iplidx = off - pic_ipl_offset[ipl];
786			pic__iplsources[off] = is;
787			goto unblock;
788		}
789	}
790
791	/*
792	 * Move up all the sources by one.
793 	 */
794	if (ipl < NIPL) {
795		off = pic_ipl_offset[ipl+1];
796		memmove(&pic__iplsources[off+1], &pic__iplsources[off],
797		    sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
798	}
799
800	/*
801	 * Advance the offset of all IPLs higher than this.  Include an
802	 * extra one as well.  Thus the number of sources per ipl is
803	 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl].
804	 */
805	for (nipl = ipl + 1; nipl <= NIPL; nipl++)
806		pic_ipl_offset[nipl]++;
807
808	/*
809	 * Insert into the previously made position at the end of this IPL's
810	 * sources.
811	 */
812	off = pic_ipl_offset[ipl + 1] - 1;
813	is->is_iplidx = off - pic_ipl_offset[ipl];
814	pic__iplsources[off] = is;
815
816	(*pic->pic_ops->pic_establish_irq)(pic, is);
817
818unblock:
819	(*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
820	    __BIT(is->is_irq & 0x1f));
821
822	if (xname) {
823		if (is->is_xname == NULL)
824			is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP);
825		if (is->is_xname[0] != '\0')
826			strlcat(is->is_xname, ", ", INTRDEVNAMEBUF);
827		strlcat(is->is_xname, xname, INTRDEVNAMEBUF);
828	}
829
830	/* We're done. */
831	return is;
832}
833
834static void
835pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
836{
837	struct pic_percpu * const pcpu = v0;
838	struct intrsource * const is = v1;
839
840	KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
841	evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
842}
843
844void
845pic_disestablish_source(struct intrsource *is)
846{
847	struct pic_softc * const pic = is->is_pic;
848	const int irq = is->is_irq;
849
850	KASSERT(is == pic->pic_sources[irq]);
851
852	(*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
853	pic->pic_sources[irq] = NULL;
854	pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
855	if (is->is_xname != NULL) {
856		kmem_free(is->is_xname, INTRDEVNAMEBUF);
857		is->is_xname = NULL;
858	}
859	/*
860	 * Now detach the per-cpu evcnts.
861	 */
862	percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
863
864	kmem_free(is, sizeof(*is));
865}
866
867void *
868intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
869{
870	return intr_establish_xname(irq, ipl, type, func, arg, NULL);
871}
872
873void *
874intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg,
875    const char *xname)
876{
877	KASSERT(!cpu_intr_p());
878	KASSERT(!cpu_softintr_p());
879
880	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
881		struct pic_softc * const pic = pic_list[slot];
882		if (pic == NULL || pic->pic_irqbase < 0)
883			continue;
884		if (pic->pic_irqbase <= irq
885		    && irq < pic->pic_irqbase + pic->pic_maxsources) {
886			return pic_establish_intr(pic, irq - pic->pic_irqbase,
887			    ipl, type, func, arg, xname);
888		}
889	}
890
891	return NULL;
892}
893
894void
895intr_disestablish(void *ih)
896{
897	struct intrsource * const is = ih;
898
899	KASSERT(!cpu_intr_p());
900	KASSERT(!cpu_softintr_p());
901
902	pic_disestablish_source(is);
903}
904
905void
906intr_mask(void *ih)
907{
908	struct intrsource * const is = ih;
909	struct pic_softc * const pic = is->is_pic;
910	const int irq = is->is_irq;
911
912	if (atomic_inc_32_nv(&is->is_mask_count) == 1)
913		(*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
914}
915
916void
917intr_unmask(void *ih)
918{
919	struct intrsource * const is = ih;
920	struct pic_softc * const pic = is->is_pic;
921	const int irq = is->is_irq;
922
923	if (atomic_dec_32_nv(&is->is_mask_count) == 0)
924		(*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
925}
926
927const char *
928intr_string(intr_handle_t irq, char *buf, size_t len)
929{
930	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
931		struct pic_softc * const pic = pic_list[slot];
932		if (pic == NULL || pic->pic_irqbase < 0)
933			continue;
934		if (pic->pic_irqbase <= irq
935		    && irq < pic->pic_irqbase + pic->pic_maxsources) {
936			struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
937			snprintf(buf, len, "%s %s", pic->pic_name, is->is_source);
938			return buf;
939		}
940	}
941
942	return NULL;
943}
944
945static struct intrsource *
946intr_get_source(const char *intrid)
947{
948	struct intrsource *is;
949	intrid_t buf;
950	size_t slot;
951	int irq;
952
953	KASSERT(mutex_owned(&cpu_lock));
954
955	for (slot = 0; slot < PIC_MAXPICS; slot++) {
956		struct pic_softc * const pic = pic_list[slot];
957		if (pic == NULL || pic->pic_irqbase < 0)
958			continue;
959		for (irq = 0; irq < pic->pic_maxsources; irq++) {
960			is = pic->pic_sources[irq];
961			if (is == NULL || is->is_source[0] == '\0')
962				continue;
963
964			snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source);
965			if (strcmp(buf, intrid) == 0)
966				return is;
967		}
968	}
969
970	return NULL;
971}
972
973struct intrids_handler *
974interrupt_construct_intrids(const kcpuset_t *cpuset)
975{
976	struct intrids_handler *iih;
977	struct intrsource *is;
978	int count, irq, n;
979	size_t slot;
980
981	if (kcpuset_iszero(cpuset))
982		return NULL;
983
984	count = 0;
985	for (slot = 0; slot < PIC_MAXPICS; slot++) {
986		struct pic_softc * const pic = pic_list[slot];
987		if (pic != NULL && pic->pic_irqbase >= 0) {
988			for (irq = 0; irq < pic->pic_maxsources; irq++) {
989				is = pic->pic_sources[irq];
990				if (is && is->is_source[0] != '\0')
991					count++;
992			}
993		}
994	}
995
996	iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP);
997	iih->iih_nids = count;
998
999	for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) {
1000		struct pic_softc * const pic = pic_list[slot];
1001		if (pic == NULL || pic->pic_irqbase < 0)
1002			continue;
1003		for (irq = 0; irq < pic->pic_maxsources; irq++) {
1004			is = pic->pic_sources[irq];
1005			if (is == NULL || is->is_source[0] == '\0')
1006				continue;
1007
1008			snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s",
1009			    pic->pic_name, is->is_source);
1010		}
1011	}
1012
1013	return iih;
1014}
1015
1016void
1017interrupt_destruct_intrids(struct intrids_handler *iih)
1018{
1019	if (iih == NULL)
1020		return;
1021
1022	kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids);
1023}
1024
1025void
1026interrupt_get_available(kcpuset_t *cpuset)
1027{
1028	CPU_INFO_ITERATOR cii;
1029	struct cpu_info *ci;
1030
1031	kcpuset_zero(cpuset);
1032
1033	mutex_enter(&cpu_lock);
1034	for (CPU_INFO_FOREACH(cii, ci)) {
1035		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1036			kcpuset_set(cpuset, cpu_index(ci));
1037	}
1038	mutex_exit(&cpu_lock);
1039}
1040
1041void
1042interrupt_get_devname(const char *intrid, char *buf, size_t len)
1043{
1044	struct intrsource *is;
1045
1046	mutex_enter(&cpu_lock);
1047	is = intr_get_source(intrid);
1048	if (is == NULL || is->is_xname == NULL)
1049		buf[0] = '\0';
1050	else
1051		strlcpy(buf, is->is_xname, len);
1052	mutex_exit(&cpu_lock);
1053}
1054
1055struct interrupt_get_count_arg {
1056	struct intrsource *is;
1057	uint64_t count;
1058	u_int cpu_idx;
1059};
1060
1061static void
1062interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci)
1063{
1064	struct pic_percpu * const pcpu = v0;
1065	struct interrupt_get_count_arg * const arg = v1;
1066
1067	if (arg->cpu_idx != cpu_index(ci))
1068		return;
1069
1070	arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count;
1071}
1072
1073uint64_t
1074interrupt_get_count(const char *intrid, u_int cpu_idx)
1075{
1076	struct interrupt_get_count_arg arg;
1077	struct intrsource *is;
1078	uint64_t count;
1079
1080	count = 0;
1081
1082	mutex_enter(&cpu_lock);
1083	is = intr_get_source(intrid);
1084	if (is != NULL && is->is_pic != NULL) {
1085		arg.is = is;
1086		arg.count = 0;
1087		arg.cpu_idx = cpu_idx;
1088		percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg);
1089		count = arg.count;
1090	}
1091	mutex_exit(&cpu_lock);
1092
1093	return count;
1094}
1095
1096#ifdef MULTIPROCESSOR
1097void
1098interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1099{
1100	struct intrsource *is;
1101	struct pic_softc *pic;
1102
1103	kcpuset_zero(cpuset);
1104
1105	mutex_enter(&cpu_lock);
1106	is = intr_get_source(intrid);
1107	if (is != NULL) {
1108		pic = is->is_pic;
1109		if (pic && pic->pic_ops->pic_get_affinity)
1110			pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset);
1111	}
1112	mutex_exit(&cpu_lock);
1113}
1114
1115int
1116interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1117    kcpuset_t *oldset)
1118{
1119	struct intrsource *is;
1120	int error;
1121
1122	mutex_enter(&cpu_lock);
1123	is = intr_get_source(intrid);
1124	if (is == NULL) {
1125		error = ENOENT;
1126	} else {
1127		error = interrupt_distribute(is, newset, oldset);
1128	}
1129	mutex_exit(&cpu_lock);
1130
1131	return error;
1132}
1133
1134int
1135interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset)
1136{
1137	struct intrsource * const is = ih;
1138	struct pic_softc * const pic = is->is_pic;
1139
1140	if (pic == NULL)
1141		return EOPNOTSUPP;
1142	if (pic->pic_ops->pic_set_affinity == NULL ||
1143	    pic->pic_ops->pic_get_affinity == NULL)
1144		return EOPNOTSUPP;
1145
1146	if (!is->is_mpsafe)
1147		return EINVAL;
1148
1149	if (oldset != NULL)
1150		pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset);
1151
1152	return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset);
1153}
1154#endif
1155