pic.c revision 1.77
1/*	$NetBSD: pic.c,v 1.77 2021/12/21 07:07:32 skrll Exp $	*/
2
3/*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#define _INTR_PRIVATE
33#include "opt_ddb.h"
34#include "opt_multiprocessor.h"
35
36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.77 2021/12/21 07:07:32 skrll Exp $");
38
39#include <sys/param.h>
40#include <sys/atomic.h>
41#include <sys/cpu.h>
42#include <sys/evcnt.h>
43#include <sys/interrupt.h>
44#include <sys/intr.h>
45#include <sys/ipi.h>
46#include <sys/kernel.h>
47#include <sys/kmem.h>
48#include <sys/mutex.h>
49#include <sys/once.h>
50#include <sys/xcall.h>
51
52#include <arm/armreg.h>
53#include <arm/cpufunc.h>
54#include <arm/locore.h>	/* for compat aarch64 */
55
56#ifdef DDB
57#include <arm/db_machdep.h>
58#endif
59
60#include <arm/pic/picvar.h>
61
62#if defined(__HAVE_PIC_PENDING_INTRS)
63/*
64 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
65 * the assumption that a PIC (pic_softc) shall only have all its interrupts
66 * come from the same CPU.  In other words, interrupts from a single PIC will
67 * not be distributed among multiple CPUs.
68 */
69static uint32_t
70	pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
71static struct pic_softc *
72	pic_list_find_pic_by_pending_ipl(struct cpu_info *, uint32_t);
73static void
74	pic_deliver_irqs(struct cpu_info *, struct pic_softc *, int, void *);
75static void
76	pic_list_deliver_irqs(struct cpu_info *, register_t, int, void *);
77
78#endif /* __HAVE_PIC_PENDING_INTRS */
79
80struct pic_softc *pic_list[PIC_MAXPICS];
81#if PIC_MAXPICS > 32
82#error PIC_MAXPICS > 32 not supported
83#endif
84struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
85struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
86size_t pic_ipl_offset[NIPL + 1];
87
88static kmutex_t pic_lock;
89static size_t pic_sourcebase;
90static int pic_lastbase;
91static struct evcnt pic_deferral_ev =
92    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr");
93EVCNT_ATTACH_STATIC(pic_deferral_ev);
94
95static int pic_init(void);
96
97#ifdef __HAVE_PIC_SET_PRIORITY
98void
99pic_set_priority(struct cpu_info *ci, int newipl)
100{
101	register_t psw = DISABLE_INTERRUPT_SAVE();
102	if (pic_list[0] != NULL)
103		(pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl);
104	ci->ci_cpl = newipl;
105	if ((psw & I32_bit) == 0) {
106		ENABLE_INTERRUPT();
107	}
108}
109#endif
110
111#ifdef MULTIPROCESSOR
112int
113pic_ipi_ast(void *arg)
114{
115	setsoftast(curcpu());
116	return 1;
117}
118
119int
120pic_ipi_nop(void *arg)
121{
122	/* do nothing */
123	return 1;
124}
125
126int
127pic_ipi_xcall(void *arg)
128{
129	xc_ipi_handler();
130	return 1;
131}
132
133int
134pic_ipi_generic(void *arg)
135{
136	ipi_cpu_handler();
137	return 1;
138}
139
140#ifdef DDB
141int
142pic_ipi_ddb(void *arg)
143{
144//	printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg);
145	kdb_trap(-1, arg);
146	return 1;
147}
148#endif /* DDB */
149
150#ifdef __HAVE_PREEMPTION
151int
152pic_ipi_kpreempt(void *arg)
153{
154	atomic_or_uint(&curcpu()->ci_astpending, __BIT(1));
155	return 1;
156}
157#endif /* __HAVE_PREEMPTION */
158
159void
160intr_cpu_init(struct cpu_info *ci)
161{
162	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
163		struct pic_softc * const pic = pic_list[slot];
164		if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) {
165			(*pic->pic_ops->pic_cpu_init)(pic, ci);
166		}
167	}
168}
169
170typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long);
171
172void
173intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
174{
175	struct cpu_info * const ci = curcpu();
176	KASSERT(ipi < NIPI);
177	KASSERT(kcp == NULL || kcpuset_countset(kcp) == 1);
178	bool __diagused sent_p = false;
179	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
180		struct pic_softc * const pic = pic_list[slot];
181		if (pic == NULL || pic->pic_cpus == NULL)
182			continue;
183		if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) {
184			/*
185			 * Never send to ourself.
186			 *
187			 * This test uses pointer comparison for systems
188			 * that have a pic per cpu, e.g. RPI[23].  GIC sets
189			 * pic_cpus to kcpuset_running and handles "not for
190			 * self" internally.
191			 */
192			if (pic->pic_cpus == ci->ci_kcpuset)
193				continue;
194
195			(*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
196
197			/*
198			 * If we were targeting a single CPU or this pic
199			 * handles all cpus, we're done.
200			 */
201			if (kcp != NULL || pic->pic_cpus == kcpuset_running)
202				return;
203			sent_p = true;
204		}
205	}
206	KASSERTMSG(cold || sent_p || ncpu <= 1, "cold %d sent_p %d ncpu %d",
207	    cold, sent_p, ncpu);
208}
209#endif /* MULTIPROCESSOR */
210
211#ifdef __HAVE_PIC_FAST_SOFTINTS
212int
213pic_handle_softint(void *arg)
214{
215	void softint_switch(lwp_t *, int);
216	struct cpu_info * const ci = curcpu();
217	const size_t softint = (size_t) arg;
218	int s = splhigh();
219	ci->ci_intr_depth--;	// don't count these as interrupts
220	softint_switch(ci->ci_softlwps[softint], s);
221	ci->ci_intr_depth++;
222	splx(s);
223	return 1;
224}
225#endif
226
227int
228pic_handle_intr(void *arg)
229{
230	struct pic_softc * const pic = arg;
231	int rv;
232
233	rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
234
235	return rv > 0;
236}
237
238#if defined(__HAVE_PIC_PENDING_INTRS)
239void
240pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
241{
242	const uint32_t ipl_mask = __BIT(is->is_ipl);
243	struct cpu_info * const ci = curcpu();
244
245	atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
246	    __BIT(is->is_irq & 0x1f));
247
248	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
249	ci->ci_pending_ipls |= ipl_mask;
250	ci->ci_pending_pics |= __BIT(pic->pic_id);
251}
252
253void
254pic_mark_pending(struct pic_softc *pic, int irq)
255{
256	struct intrsource * const is = pic->pic_sources[irq];
257
258	KASSERT(irq < pic->pic_maxsources);
259	KASSERT(is != NULL);
260
261	pic_mark_pending_source(pic, is);
262}
263
264uint32_t
265pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
266    uint32_t pending)
267{
268	struct intrsource ** const isbase = &pic->pic_sources[irq_base];
269	struct cpu_info * const ci = curcpu();
270	struct intrsource *is;
271	volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
272	uint32_t ipl_mask = 0;
273
274	if (pending == 0)
275		return ipl_mask;
276
277	KASSERT((irq_base & 31) == 0);
278
279	(*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
280
281	atomic_or_32(ipending, pending);
282	while (pending != 0) {
283		int n = ffs(pending);
284		if (n-- == 0)
285			break;
286		is = isbase[n];
287		KASSERT(is != NULL);
288		KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
289		pending &= ~__BIT(n);
290		ipl_mask |= __BIT(is->is_ipl);
291	}
292
293	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
294	ci->ci_pending_ipls |= ipl_mask;
295	ci->ci_pending_pics |= __BIT(pic->pic_id);
296
297	return ipl_mask;
298}
299
300uint32_t
301pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
302	uint32_t pending, int ipl)
303{
304	uint32_t ipl_irq_mask = 0;
305	uint32_t irq_mask;
306
307	for (;;) {
308		int irq = ffs(pending);
309		if (irq-- == 0)
310			return ipl_irq_mask;
311
312		irq_mask = __BIT(irq);
313#if 1
314    		KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL,
315		   "%s: irq_base %zu irq %d\n", __func__, irq_base, irq);
316#else
317		if (pic->pic_sources[irq_base + irq] == NULL) {
318			aprint_error("stray interrupt? irq_base=%zu irq=%d\n",
319			    irq_base, irq);
320		} else
321#endif
322		if (pic->pic_sources[irq_base + irq]->is_ipl == ipl)
323			ipl_irq_mask |= irq_mask;
324
325		pending &= ~irq_mask;
326	}
327}
328#endif /* __HAVE_PIC_PENDING_INTRS */
329
330void
331pic_dispatch(struct intrsource *is, void *frame)
332{
333	int (*func)(void *) = is->is_func;
334	void *arg = is->is_arg;
335
336	if (__predict_false(arg == NULL)) {
337		if (__predict_false(frame == NULL)) {
338			pic_deferral_ev.ev_count++;
339			return;
340		}
341		arg = frame;
342	}
343
344#ifdef MULTIPROCESSOR
345	if (!is->is_mpsafe) {
346		KERNEL_LOCK(1, NULL);
347		const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count;
348		const u_int l_blcnt __diagused = curlwp->l_blcnt;
349		(void)(*func)(arg);
350		KASSERT(ci_blcnt == curcpu()->ci_biglock_count);
351		KASSERT(l_blcnt == curlwp->l_blcnt);
352		KERNEL_UNLOCK_ONE(NULL);
353	} else
354#endif
355		(void)(*func)(arg);
356
357	struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
358	KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
359	pcpu->pcpu_evs[is->is_irq].ev_count++;
360	percpu_putref(is->is_pic->pic_percpu);
361}
362
363#if defined(__HAVE_PIC_PENDING_INTRS)
364void
365pic_deliver_irqs(struct cpu_info *ci, struct pic_softc *pic, int ipl,
366    void *frame)
367{
368	const uint32_t ipl_mask = __BIT(ipl);
369	struct intrsource *is;
370	volatile uint32_t *ipending = pic->pic_pending_irqs;
371	volatile uint32_t *iblocked = pic->pic_blocked_irqs;
372	size_t irq_base;
373#if PIC_MAXSOURCES > 32
374	size_t irq_count;
375	int poi = 0;		/* Possibility of interrupting */
376#endif
377	uint32_t pending_irqs;
378	uint32_t blocked_irqs;
379	int irq;
380	bool progress __diagused = false;
381
382	KASSERT(pic->pic_pending_ipls & ipl_mask);
383
384	irq_base = 0;
385#if PIC_MAXSOURCES > 32
386	irq_count = 0;
387#endif
388
389	for (;;) {
390		pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
391		    *ipending, ipl);
392		KASSERT((pending_irqs & *ipending) == pending_irqs);
393		KASSERT((pending_irqs & ~(*ipending)) == 0);
394		if (pending_irqs == 0) {
395#if PIC_MAXSOURCES > 32
396			irq_count += 32;
397			if (__predict_true(irq_count >= pic->pic_maxsources)) {
398				if (!poi)
399					/*Interrupt at this level was handled.*/
400					break;
401				irq_base = 0;
402				irq_count = 0;
403				poi = 0;
404				ipending = pic->pic_pending_irqs;
405				iblocked = pic->pic_blocked_irqs;
406			} else {
407				irq_base += 32;
408				ipending++;
409				iblocked++;
410				KASSERT(irq_base <= pic->pic_maxsources);
411			}
412			continue;
413#else
414			break;
415#endif
416		}
417		progress = true;
418		blocked_irqs = 0;
419		do {
420			irq = ffs(pending_irqs) - 1;
421			KASSERT(irq >= 0);
422
423			atomic_and_32(ipending, ~__BIT(irq));
424			is = pic->pic_sources[irq_base + irq];
425			if (is != NULL) {
426				ENABLE_INTERRUPT();
427				pic_dispatch(is, frame);
428				DISABLE_INTERRUPT();
429#if PIC_MAXSOURCES > 32
430				/*
431				 * There is a possibility of interrupting
432				 * from ENABLE_INTERRUPT() to
433				 * DISABLE_INTERRUPT().
434				 */
435				poi = 1;
436#endif
437				blocked_irqs |= __BIT(irq);
438			} else {
439				KASSERT(0);
440			}
441			pending_irqs = pic_find_pending_irqs_by_ipl(pic,
442			    irq_base, *ipending, ipl);
443		} while (pending_irqs);
444		if (blocked_irqs) {
445			atomic_or_32(iblocked, blocked_irqs);
446			ci->ci_blocked_pics |= __BIT(pic->pic_id);
447		}
448	}
449
450	KASSERT(progress);
451	/*
452	 * Since interrupts are disabled, we don't have to be too careful
453	 * about these.
454	 */
455	if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
456		ci->ci_pending_pics &= ~__BIT(pic->pic_id);
457}
458
459static void
460pic_list_unblock_irqs(struct cpu_info *ci)
461{
462	uint32_t blocked_pics = ci->ci_blocked_pics;
463
464	ci->ci_blocked_pics = 0;
465
466	for (;;) {
467		struct pic_softc *pic;
468#if PIC_MAXSOURCES > 32
469		volatile uint32_t *iblocked;
470		uint32_t blocked;
471		size_t irq_base;
472#endif
473
474		int pic_id = ffs(blocked_pics);
475		if (pic_id-- == 0)
476			return;
477
478		pic = pic_list[pic_id];
479		KASSERT(pic != NULL);
480#if PIC_MAXSOURCES > 32
481		for (irq_base = 0, iblocked = pic->pic_blocked_irqs;
482		     irq_base < pic->pic_maxsources;
483		     irq_base += 32, iblocked++) {
484			if ((blocked = *iblocked) != 0) {
485				(*pic->pic_ops->pic_unblock_irqs)(pic,
486				    irq_base, blocked);
487				atomic_and_32(iblocked, ~blocked);
488			}
489		}
490#else
491		KASSERT(pic->pic_blocked_irqs[0] != 0);
492		(*pic->pic_ops->pic_unblock_irqs)(pic,
493		    0, pic->pic_blocked_irqs[0]);
494		pic->pic_blocked_irqs[0] = 0;
495#endif
496		blocked_pics &= ~__BIT(pic_id);
497	}
498}
499
500struct pic_softc *
501pic_list_find_pic_by_pending_ipl(struct cpu_info *ci, uint32_t ipl_mask)
502{
503	uint32_t pending_pics = ci->ci_pending_pics;
504	struct pic_softc *pic;
505
506	for (;;) {
507		int pic_id = ffs(pending_pics);
508		if (pic_id-- == 0)
509			return NULL;
510
511		pic = pic_list[pic_id];
512		KASSERT(pic != NULL);
513		if (pic->pic_pending_ipls & ipl_mask)
514			return pic;
515		pending_pics &= ~__BIT(pic_id);
516	}
517}
518
519void
520pic_list_deliver_irqs(struct cpu_info *ci, register_t psw, int ipl,
521    void *frame)
522{
523	const uint32_t ipl_mask = __BIT(ipl);
524	struct pic_softc *pic;
525
526	while ((pic = pic_list_find_pic_by_pending_ipl(ci, ipl_mask)) != NULL) {
527		pic_deliver_irqs(ci, pic, ipl, frame);
528		KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
529	}
530	ci->ci_pending_ipls &= ~ipl_mask;
531}
532#endif /* __HAVE_PIC_PENDING_INTRS */
533
534void
535pic_do_pending_ints(register_t psw, int newipl, void *frame)
536{
537	struct cpu_info * const ci = curcpu();
538	if (__predict_false(newipl == IPL_HIGH)) {
539		KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
540		return;
541	}
542#if defined(__HAVE_PIC_PENDING_INTRS)
543	while ((ci->ci_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
544		KASSERT(ci->ci_pending_ipls < __BIT(NIPL));
545		for (;;) {
546			int ipl = 31 - __builtin_clz(ci->ci_pending_ipls);
547			KASSERT(ipl < NIPL);
548			if (ipl <= newipl)
549				break;
550
551			pic_set_priority(ci, ipl);
552			pic_list_deliver_irqs(ci, psw, ipl, frame);
553			pic_list_unblock_irqs(ci);
554		}
555	}
556#endif /* __HAVE_PIC_PENDING_INTRS */
557#ifdef __HAVE_PREEMPTION
558	if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
559		pic_set_priority(ci, IPL_SCHED);
560		kpreempt(0);
561	}
562#endif
563	if (ci->ci_cpl != newipl)
564		pic_set_priority(ci, newipl);
565}
566
567static void
568pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
569{
570	struct pic_percpu * const pcpu = v0;
571	struct pic_softc * const pic = v1;
572
573	pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]),
574	    KM_SLEEP);
575	KASSERT(pcpu->pcpu_evs != NULL);
576
577#define	PCPU_NAMELEN	32
578#ifdef DIAGNOSTIC
579	const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name);
580#endif
581
582	KASSERT(namelen < PCPU_NAMELEN);
583	pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP);
584#ifdef MULTIPROCESSOR
585	snprintf(pcpu->pcpu_name, PCPU_NAMELEN,
586	    "%s (%s)", pic->pic_name, ci->ci_data.cpu_name);
587#else
588	strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN);
589#endif
590	pcpu->pcpu_magic = PICPERCPU_MAGIC;
591#if 0
592	printf("%s: %s %s: <%s>\n",
593	    __func__, ci->ci_data.cpu_name, pic->pic_name,
594	    pcpu->pcpu_name);
595#endif
596}
597
598static int
599pic_init(void)
600{
601
602	mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH);
603
604	return 0;
605}
606
607int
608pic_add(struct pic_softc *pic, int irqbase)
609{
610	int slot, maybe_slot = -1;
611	size_t sourcebase;
612	static ONCE_DECL(pic_once);
613
614	ASSERT_SLEEPABLE();
615
616	RUN_ONCE(&pic_once, pic_init);
617
618	KASSERT(strlen(pic->pic_name) > 0);
619
620	mutex_enter(&pic_lock);
621	if (irqbase == PIC_IRQBASE_ALLOC) {
622		irqbase = pic_lastbase;
623	}
624	for (slot = 0; slot < PIC_MAXPICS; slot++) {
625		struct pic_softc * const xpic = pic_list[slot];
626		if (xpic == NULL) {
627			if (maybe_slot < 0)
628				maybe_slot = slot;
629			if (irqbase < 0)
630				break;
631			continue;
632		}
633		if (irqbase < 0 || xpic->pic_irqbase < 0)
634			continue;
635		if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources)
636			continue;
637		if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase)
638			continue;
639		panic("pic_add: pic %s (%zu sources @ irq %u) conflicts"
640		    " with pic %s (%zu sources @ irq %u)",
641		    pic->pic_name, pic->pic_maxsources, irqbase,
642		    xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase);
643	}
644	slot = maybe_slot;
645#if 0
646	printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n",
647	    pic->pic_name, pic_sourcebase, pic->pic_maxsources);
648#endif
649	KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu",
650	    pic->pic_maxsources);
651	KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES);
652	sourcebase = pic_sourcebase;
653	pic_sourcebase += pic->pic_maxsources;
654        if (pic_lastbase < irqbase + pic->pic_maxsources)
655                pic_lastbase = irqbase + pic->pic_maxsources;
656	mutex_exit(&pic_lock);
657
658	/*
659	 * Allocate a pointer to each cpu's evcnts and then, for each cpu,
660	 * allocate its evcnts and then attach an evcnt for each pin.
661	 * We can't allocate the evcnt structures directly since
662	 * percpu will move the contents of percpu memory around and
663	 * corrupt the pointers in the evcnts themselves.  Remember, any
664	 * problem can be solved with sufficient indirection.
665	 */
666	pic->pic_percpu = percpu_create(sizeof(struct pic_percpu),
667	    pic_percpu_allocate, NULL, pic);
668
669	pic->pic_sources = &pic_sources[sourcebase];
670	pic->pic_irqbase = irqbase;
671	pic->pic_id = slot;
672#ifdef __HAVE_PIC_SET_PRIORITY
673	KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
674#endif
675#ifdef MULTIPROCESSOR
676	KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
677#endif
678	pic_list[slot] = pic;
679
680	return irqbase;
681}
682
683int
684pic_alloc_irq(struct pic_softc *pic)
685{
686	int irq;
687
688	for (irq = 0; irq < pic->pic_maxsources; irq++) {
689		if (pic->pic_sources[irq] == NULL)
690			return irq;
691	}
692
693	return -1;
694}
695
696static void
697pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci)
698{
699	struct pic_percpu * const pcpu = v0;
700	struct intrsource * const is = v1;
701
702	KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
703	evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL,
704	    pcpu->pcpu_name, is->is_source);
705}
706
707static void
708pic_unblock_percpu(void *arg1, void *arg2)
709{
710	struct pic_softc *pic = arg1;
711	struct intrsource *is = arg2;
712
713	(*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
714	    __BIT(is->is_irq & 0x1f));
715}
716
717void *
718pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type,
719	int (*func)(void *), void *arg, const char *xname)
720{
721	struct intrsource *is;
722	int off, nipl;
723
724	if (pic->pic_sources[irq]) {
725		printf("pic_establish_intr: pic %s irq %d already present\n",
726		    pic->pic_name, irq);
727		return NULL;
728	}
729
730	is = kmem_zalloc(sizeof(*is), KM_SLEEP);
731	is->is_pic = pic;
732	is->is_irq = irq;
733	is->is_ipl = ipl;
734	is->is_type = type & 0xff;
735	is->is_func = func;
736	is->is_arg = arg;
737#ifdef MULTIPROCESSOR
738	is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM;
739#endif
740
741	if (pic->pic_ops->pic_source_name)
742		(*pic->pic_ops->pic_source_name)(pic, irq, is->is_source,
743		    sizeof(is->is_source));
744	else
745		snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq);
746
747	/*
748	 * Now attach the per-cpu evcnts.
749	 */
750	percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is);
751
752	pic->pic_sources[irq] = is;
753
754	/*
755	 * First try to use an existing slot which is empty.
756	 */
757	for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl + 1]; off++) {
758		if (pic__iplsources[off] == NULL) {
759			is->is_iplidx = off - pic_ipl_offset[ipl];
760			pic__iplsources[off] = is;
761			goto unblock;
762		}
763	}
764
765	/*
766	 * Move up all the sources by one.
767 	 */
768	if (ipl < NIPL) {
769		off = pic_ipl_offset[ipl + 1];
770		memmove(&pic__iplsources[off + 1], &pic__iplsources[off],
771		    sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off));
772	}
773
774	/*
775	 * Advance the offset of all IPLs higher than this.  Include an
776	 * extra one as well.  Thus the number of sources per ipl is
777	 * pic_ipl_offset[ipl + 1] - pic_ipl_offset[ipl].
778	 */
779	for (nipl = ipl + 1; nipl <= NIPL; nipl++)
780		pic_ipl_offset[nipl]++;
781
782	/*
783	 * Insert into the previously made position at the end of this IPL's
784	 * sources.
785	 */
786	off = pic_ipl_offset[ipl + 1] - 1;
787	is->is_iplidx = off - pic_ipl_offset[ipl];
788	pic__iplsources[off] = is;
789
790	(*pic->pic_ops->pic_establish_irq)(pic, is);
791
792unblock:
793	if (!mp_online || !is->is_mpsafe || !is->is_percpu) {
794		(*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f,
795		    __BIT(is->is_irq & 0x1f));
796	} else {
797		uint64_t xc = xc_broadcast(0, pic_unblock_percpu, pic, is);
798		xc_wait(xc);
799	}
800
801	if (xname) {
802		if (is->is_xname == NULL)
803			is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP);
804		if (is->is_xname[0] != '\0')
805			strlcat(is->is_xname, ", ", INTRDEVNAMEBUF);
806		strlcat(is->is_xname, xname, INTRDEVNAMEBUF);
807	}
808
809	/* We're done. */
810	return is;
811}
812
813static void
814pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci)
815{
816	struct pic_percpu * const pcpu = v0;
817	struct intrsource * const is = v1;
818
819	KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
820	evcnt_detach(&pcpu->pcpu_evs[is->is_irq]);
821}
822
823void
824pic_disestablish_source(struct intrsource *is)
825{
826	struct pic_softc * const pic = is->is_pic;
827	const int irq = is->is_irq;
828
829	KASSERT(is == pic->pic_sources[irq]);
830
831	(*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
832	pic->pic_sources[irq] = NULL;
833	pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL;
834	if (is->is_xname != NULL) {
835		kmem_free(is->is_xname, INTRDEVNAMEBUF);
836		is->is_xname = NULL;
837	}
838	/*
839	 * Now detach the per-cpu evcnts.
840	 */
841	percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is);
842
843	kmem_free(is, sizeof(*is));
844}
845
846void *
847intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg)
848{
849	return intr_establish_xname(irq, ipl, type, func, arg, NULL);
850}
851
852void *
853intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg,
854    const char *xname)
855{
856	KASSERT(!cpu_intr_p());
857	KASSERT(!cpu_softintr_p());
858
859	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
860		struct pic_softc * const pic = pic_list[slot];
861		if (pic == NULL || pic->pic_irqbase < 0)
862			continue;
863		if (pic->pic_irqbase <= irq
864		    && irq < pic->pic_irqbase + pic->pic_maxsources) {
865			return pic_establish_intr(pic, irq - pic->pic_irqbase,
866			    ipl, type, func, arg, xname);
867		}
868	}
869
870	return NULL;
871}
872
873void
874intr_disestablish(void *ih)
875{
876	struct intrsource * const is = ih;
877
878	KASSERT(!cpu_intr_p());
879	KASSERT(!cpu_softintr_p());
880
881	pic_disestablish_source(is);
882}
883
884void
885intr_mask(void *ih)
886{
887	struct intrsource * const is = ih;
888	struct pic_softc * const pic = is->is_pic;
889	const int irq = is->is_irq;
890
891	if (atomic_inc_32_nv(&is->is_mask_count) == 1)
892		(*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
893}
894
895void
896intr_unmask(void *ih)
897{
898	struct intrsource * const is = ih;
899	struct pic_softc * const pic = is->is_pic;
900	const int irq = is->is_irq;
901
902	if (atomic_dec_32_nv(&is->is_mask_count) == 0)
903		(*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f));
904}
905
906const char *
907intr_string(intr_handle_t irq, char *buf, size_t len)
908{
909	for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
910		struct pic_softc * const pic = pic_list[slot];
911		if (pic == NULL || pic->pic_irqbase < 0)
912			continue;
913		if (pic->pic_irqbase <= irq
914		    && irq < pic->pic_irqbase + pic->pic_maxsources) {
915			struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
916			snprintf(buf, len, "%s %s", pic->pic_name, is->is_source);
917			return buf;
918		}
919	}
920
921	return NULL;
922}
923
924static struct intrsource *
925intr_get_source(const char *intrid)
926{
927	struct intrsource *is;
928	intrid_t buf;
929	size_t slot;
930	int irq;
931
932	KASSERT(mutex_owned(&cpu_lock));
933
934	for (slot = 0; slot < PIC_MAXPICS; slot++) {
935		struct pic_softc * const pic = pic_list[slot];
936		if (pic == NULL || pic->pic_irqbase < 0)
937			continue;
938		for (irq = 0; irq < pic->pic_maxsources; irq++) {
939			is = pic->pic_sources[irq];
940			if (is == NULL || is->is_source[0] == '\0')
941				continue;
942
943			snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source);
944			if (strcmp(buf, intrid) == 0)
945				return is;
946		}
947	}
948
949	return NULL;
950}
951
952struct intrids_handler *
953interrupt_construct_intrids(const kcpuset_t *cpuset)
954{
955	struct intrids_handler *iih;
956	struct intrsource *is;
957	int count, irq, n;
958	size_t slot;
959
960	if (kcpuset_iszero(cpuset))
961		return NULL;
962
963	count = 0;
964	for (slot = 0; slot < PIC_MAXPICS; slot++) {
965		struct pic_softc * const pic = pic_list[slot];
966		if (pic != NULL && pic->pic_irqbase >= 0) {
967			for (irq = 0; irq < pic->pic_maxsources; irq++) {
968				is = pic->pic_sources[irq];
969				if (is && is->is_source[0] != '\0')
970					count++;
971			}
972		}
973	}
974
975	iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP);
976	iih->iih_nids = count;
977
978	for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) {
979		struct pic_softc * const pic = pic_list[slot];
980		if (pic == NULL || pic->pic_irqbase < 0)
981			continue;
982		for (irq = 0; irq < pic->pic_maxsources; irq++) {
983			is = pic->pic_sources[irq];
984			if (is == NULL || is->is_source[0] == '\0')
985				continue;
986
987			snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s",
988			    pic->pic_name, is->is_source);
989		}
990	}
991
992	return iih;
993}
994
995void
996interrupt_destruct_intrids(struct intrids_handler *iih)
997{
998	if (iih == NULL)
999		return;
1000
1001	kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids);
1002}
1003
1004void
1005interrupt_get_available(kcpuset_t *cpuset)
1006{
1007	CPU_INFO_ITERATOR cii;
1008	struct cpu_info *ci;
1009
1010	kcpuset_zero(cpuset);
1011
1012	mutex_enter(&cpu_lock);
1013	for (CPU_INFO_FOREACH(cii, ci)) {
1014		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
1015			kcpuset_set(cpuset, cpu_index(ci));
1016	}
1017	mutex_exit(&cpu_lock);
1018}
1019
1020void
1021interrupt_get_devname(const char *intrid, char *buf, size_t len)
1022{
1023	struct intrsource *is;
1024
1025	mutex_enter(&cpu_lock);
1026	is = intr_get_source(intrid);
1027	if (is == NULL || is->is_xname == NULL)
1028		buf[0] = '\0';
1029	else
1030		strlcpy(buf, is->is_xname, len);
1031	mutex_exit(&cpu_lock);
1032}
1033
1034struct interrupt_get_count_arg {
1035	struct intrsource *is;
1036	uint64_t count;
1037	u_int cpu_idx;
1038};
1039
1040static void
1041interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci)
1042{
1043	struct pic_percpu * const pcpu = v0;
1044	struct interrupt_get_count_arg * const arg = v1;
1045
1046	if (arg->cpu_idx != cpu_index(ci))
1047		return;
1048
1049	arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count;
1050}
1051
1052uint64_t
1053interrupt_get_count(const char *intrid, u_int cpu_idx)
1054{
1055	struct interrupt_get_count_arg arg;
1056	struct intrsource *is;
1057	uint64_t count;
1058
1059	count = 0;
1060
1061	mutex_enter(&cpu_lock);
1062	is = intr_get_source(intrid);
1063	if (is != NULL && is->is_pic != NULL) {
1064		arg.is = is;
1065		arg.count = 0;
1066		arg.cpu_idx = cpu_idx;
1067		percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg);
1068		count = arg.count;
1069	}
1070	mutex_exit(&cpu_lock);
1071
1072	return count;
1073}
1074
1075#ifdef MULTIPROCESSOR
1076void
1077interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
1078{
1079	struct intrsource *is;
1080	struct pic_softc *pic;
1081
1082	kcpuset_zero(cpuset);
1083
1084	mutex_enter(&cpu_lock);
1085	is = intr_get_source(intrid);
1086	if (is != NULL) {
1087		pic = is->is_pic;
1088		if (pic && pic->pic_ops->pic_get_affinity)
1089			pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset);
1090	}
1091	mutex_exit(&cpu_lock);
1092}
1093
1094int
1095interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
1096    kcpuset_t *oldset)
1097{
1098	struct intrsource *is;
1099	int error;
1100
1101	mutex_enter(&cpu_lock);
1102	is = intr_get_source(intrid);
1103	if (is == NULL) {
1104		error = ENOENT;
1105	} else {
1106		error = interrupt_distribute(is, newset, oldset);
1107	}
1108	mutex_exit(&cpu_lock);
1109
1110	return error;
1111}
1112
1113int
1114interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset)
1115{
1116	struct intrsource * const is = ih;
1117	struct pic_softc * const pic = is->is_pic;
1118
1119	if (pic == NULL)
1120		return EOPNOTSUPP;
1121	if (pic->pic_ops->pic_set_affinity == NULL ||
1122	    pic->pic_ops->pic_get_affinity == NULL)
1123		return EOPNOTSUPP;
1124
1125	if (!is->is_mpsafe)
1126		return EINVAL;
1127
1128	if (oldset != NULL)
1129		pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset);
1130
1131	return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset);
1132}
1133#endif
1134