1/*-
2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/kern/subr_intr.c 331017 2018-03-15 19:08:33Z kevans $");
30
31/*
32 *	New-style Interrupt Framework
33 *
34 *  TODO: - add support for disconnected PICs.
35 *        - to support IPI (PPI) enabling on other CPUs if already started.
36 *        - to complete things for removable PICs.
37 */
38
39#include "opt_ddb.h"
40#include "opt_hwpmc_hooks.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/syslog.h>
46#include <sys/malloc.h>
47#include <sys/proc.h>
48#include <sys/queue.h>
49#include <sys/bus.h>
50#include <sys/interrupt.h>
51#include <sys/conf.h>
52#include <sys/cpuset.h>
53#include <sys/rman.h>
54#include <sys/sched.h>
55#include <sys/smp.h>
56#include <sys/vmmeter.h>
57#ifdef HWPMC_HOOKS
58#include <sys/pmckern.h>
59#endif
60
61#include <machine/atomic.h>
62#include <machine/intr.h>
63#include <machine/cpu.h>
64#include <machine/smp.h>
65#include <machine/stdarg.h>
66
67#ifdef DDB
68#include <ddb/ddb.h>
69#endif
70
71#include "pic_if.h"
72#include "msi_if.h"
73
74#define	INTRNAME_LEN	(2*MAXCOMLEN + 1)
75
76#ifdef DEBUG
77#define debugf(fmt, args...) do { printf("%s(): ", __func__);	\
78    printf(fmt,##args); } while (0)
79#else
80#define debugf(fmt, args...)
81#endif
82
83MALLOC_DECLARE(M_INTRNG);
84MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
85
86/* Main interrupt handler called from assembler -> 'hidden' for C code. */
87void intr_irq_handler(struct trapframe *tf);
88
89/* Root interrupt controller stuff. */
90device_t intr_irq_root_dev;
91static intr_irq_filter_t *irq_root_filter;
92static void *irq_root_arg;
93static u_int irq_root_ipicount;
94
95struct intr_pic_child {
96	SLIST_ENTRY(intr_pic_child)	 pc_next;
97	struct intr_pic			*pc_pic;
98	intr_child_irq_filter_t		*pc_filter;
99	void				*pc_filter_arg;
100	uintptr_t			 pc_start;
101	uintptr_t			 pc_length;
102};
103
104/* Interrupt controller definition. */
105struct intr_pic {
106	SLIST_ENTRY(intr_pic)	pic_next;
107	intptr_t		pic_xref;	/* hardware identification */
108	device_t		pic_dev;
109#define	FLAG_PIC	(1 << 0)
110#define	FLAG_MSI	(1 << 1)
111	u_int			pic_flags;
112	struct mtx		pic_child_lock;
113	SLIST_HEAD(, intr_pic_child) pic_children;
114};
115
116static struct mtx pic_list_lock;
117static SLIST_HEAD(, intr_pic) pic_list;
118
119static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
120
121/* Interrupt source definition. */
122static struct mtx isrc_table_lock;
123static struct intr_irqsrc *irq_sources[NIRQ];
124u_int irq_next_free;
125
126#ifdef SMP
127static boolean_t irq_assign_cpu = FALSE;
128#endif
129
130/*
131 * - 2 counters for each I/O interrupt.
132 * - MAXCPU counters for each IPI counters for SMP.
133 */
134#ifdef SMP
135#define INTRCNT_COUNT   (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
136#else
137#define INTRCNT_COUNT   (NIRQ * 2)
138#endif
139
140/* Data for MI statistics reporting. */
141u_long intrcnt[INTRCNT_COUNT];
142char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
143size_t sintrcnt = sizeof(intrcnt);
144size_t sintrnames = sizeof(intrnames);
145static u_int intrcnt_index;
146
147static struct intr_irqsrc *intr_map_get_isrc(u_int res_id);
148static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc);
149static struct intr_map_data * intr_map_get_map_data(u_int res_id);
150static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref,
151    struct intr_map_data **data);
152
153/*
154 *  Interrupt framework initialization routine.
155 */
156static void
157intr_irq_init(void *dummy __unused)
158{
159
160	SLIST_INIT(&pic_list);
161	mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
162
163	mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
164}
165SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
166
167static void
168intrcnt_setname(const char *name, int index)
169{
170
171	snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
172	    INTRNAME_LEN - 1, name);
173}
174
175/*
176 *  Update name for interrupt source with interrupt event.
177 */
178static void
179intrcnt_updatename(struct intr_irqsrc *isrc)
180{
181
182	/* QQQ: What about stray counter name? */
183	mtx_assert(&isrc_table_lock, MA_OWNED);
184	intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
185}
186
187/*
188 *  Virtualization for interrupt source interrupt counter increment.
189 */
190static inline void
191isrc_increment_count(struct intr_irqsrc *isrc)
192{
193
194	if (isrc->isrc_flags & INTR_ISRCF_PPI)
195		atomic_add_long(&isrc->isrc_count[0], 1);
196	else
197		isrc->isrc_count[0]++;
198}
199
200/*
201 *  Virtualization for interrupt source interrupt stray counter increment.
202 */
203static inline void
204isrc_increment_straycount(struct intr_irqsrc *isrc)
205{
206
207	isrc->isrc_count[1]++;
208}
209
210/*
211 *  Virtualization for interrupt source interrupt name update.
212 */
213static void
214isrc_update_name(struct intr_irqsrc *isrc, const char *name)
215{
216	char str[INTRNAME_LEN];
217
218	mtx_assert(&isrc_table_lock, MA_OWNED);
219
220	if (name != NULL) {
221		snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
222		intrcnt_setname(str, isrc->isrc_index);
223		snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
224		    name);
225		intrcnt_setname(str, isrc->isrc_index + 1);
226	} else {
227		snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
228		intrcnt_setname(str, isrc->isrc_index);
229		snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
230		intrcnt_setname(str, isrc->isrc_index + 1);
231	}
232}
233
234/*
235 *  Virtualization for interrupt source interrupt counters setup.
236 */
237static void
238isrc_setup_counters(struct intr_irqsrc *isrc)
239{
240	u_int index;
241
242	/*
243	 *  XXX - it does not work well with removable controllers and
244	 *        interrupt sources !!!
245	 */
246	index = atomic_fetchadd_int(&intrcnt_index, 2);
247	isrc->isrc_index = index;
248	isrc->isrc_count = &intrcnt[index];
249	isrc_update_name(isrc, NULL);
250}
251
252/*
253 *  Virtualization for interrupt source interrupt counters release.
254 */
255static void
256isrc_release_counters(struct intr_irqsrc *isrc)
257{
258
259	panic("%s: not implemented", __func__);
260}
261
262#ifdef SMP
263/*
264 *  Virtualization for interrupt source IPI counters setup.
265 */
266u_long *
267intr_ipi_setup_counters(const char *name)
268{
269	u_int index, i;
270	char str[INTRNAME_LEN];
271
272	index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
273	for (i = 0; i < MAXCPU; i++) {
274		snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
275		intrcnt_setname(str, index + i);
276	}
277	return (&intrcnt[index]);
278}
279#endif
280
281/*
282 *  Main interrupt dispatch handler. It's called straight
283 *  from the assembler, where CPU interrupt is served.
284 */
285void
286intr_irq_handler(struct trapframe *tf)
287{
288	struct trapframe * oldframe;
289	struct thread * td;
290
291	KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
292
293	PCPU_INC(cnt.v_intr);
294	critical_enter();
295	td = curthread;
296	oldframe = td->td_intr_frame;
297	td->td_intr_frame = tf;
298	irq_root_filter(irq_root_arg);
299	td->td_intr_frame = oldframe;
300	critical_exit();
301#ifdef HWPMC_HOOKS
302	if (pmc_hook && TRAPF_USERMODE(tf) &&
303	    (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
304		pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
305#endif
306}
307
308int
309intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq)
310{
311	struct intr_pic_child *child;
312	bool found;
313
314	found = false;
315	mtx_lock_spin(&parent->pic_child_lock);
316	SLIST_FOREACH(child, &parent->pic_children, pc_next) {
317		if (child->pc_start <= irq &&
318		    irq < (child->pc_start + child->pc_length)) {
319			found = true;
320			break;
321		}
322	}
323	mtx_unlock_spin(&parent->pic_child_lock);
324
325	if (found)
326		return (child->pc_filter(child->pc_filter_arg, irq));
327
328	return (FILTER_STRAY);
329}
330
331/*
332 *  interrupt controller dispatch function for interrupts. It should
333 *  be called straight from the interrupt controller, when associated interrupt
334 *  source is learned.
335 */
336int
337intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
338{
339
340	KASSERT(isrc != NULL, ("%s: no source", __func__));
341
342	isrc_increment_count(isrc);
343
344#ifdef INTR_SOLO
345	if (isrc->isrc_filter != NULL) {
346		int error;
347		error = isrc->isrc_filter(isrc->isrc_arg, tf);
348		PIC_POST_FILTER(isrc->isrc_dev, isrc);
349		if (error == FILTER_HANDLED)
350			return (0);
351	} else
352#endif
353	if (isrc->isrc_event != NULL) {
354		if (intr_event_handle(isrc->isrc_event, tf) == 0)
355			return (0);
356	}
357
358	isrc_increment_straycount(isrc);
359	return (EINVAL);
360}
361
362/*
363 *  Alloc unique interrupt number (resource handle) for interrupt source.
364 *
365 *  There could be various strategies how to allocate free interrupt number
366 *  (resource handle) for new interrupt source.
367 *
368 *  1. Handles are always allocated forward, so handles are not recycled
369 *     immediately. However, if only one free handle left which is reused
370 *     constantly...
371 */
372static inline int
373isrc_alloc_irq(struct intr_irqsrc *isrc)
374{
375	u_int maxirqs, irq;
376
377	mtx_assert(&isrc_table_lock, MA_OWNED);
378
379	maxirqs = nitems(irq_sources);
380	if (irq_next_free >= maxirqs)
381		return (ENOSPC);
382
383	for (irq = irq_next_free; irq < maxirqs; irq++) {
384		if (irq_sources[irq] == NULL)
385			goto found;
386	}
387	for (irq = 0; irq < irq_next_free; irq++) {
388		if (irq_sources[irq] == NULL)
389			goto found;
390	}
391
392	irq_next_free = maxirqs;
393	return (ENOSPC);
394
395found:
396	isrc->isrc_irq = irq;
397	irq_sources[irq] = isrc;
398
399	irq_next_free = irq + 1;
400	if (irq_next_free >= maxirqs)
401		irq_next_free = 0;
402	return (0);
403}
404
405/*
406 *  Free unique interrupt number (resource handle) from interrupt source.
407 */
408static inline int
409isrc_free_irq(struct intr_irqsrc *isrc)
410{
411
412	mtx_assert(&isrc_table_lock, MA_OWNED);
413
414	if (isrc->isrc_irq >= nitems(irq_sources))
415		return (EINVAL);
416	if (irq_sources[isrc->isrc_irq] != isrc)
417		return (EINVAL);
418
419	irq_sources[isrc->isrc_irq] = NULL;
420	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
421	return (0);
422}
423
424/*
425 *  Initialize interrupt source and register it into global interrupt table.
426 */
427int
428intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
429    const char *fmt, ...)
430{
431	int error;
432	va_list ap;
433
434	bzero(isrc, sizeof(struct intr_irqsrc));
435	isrc->isrc_dev = dev;
436	isrc->isrc_irq = INTR_IRQ_INVALID;	/* just to be safe */
437	isrc->isrc_flags = flags;
438
439	va_start(ap, fmt);
440	vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
441	va_end(ap);
442
443	mtx_lock(&isrc_table_lock);
444	error = isrc_alloc_irq(isrc);
445	if (error != 0) {
446		mtx_unlock(&isrc_table_lock);
447		return (error);
448	}
449	/*
450	 * Setup interrupt counters, but not for IPI sources. Those are setup
451	 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
452	 * our counter pool.
453	 */
454	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
455		isrc_setup_counters(isrc);
456	mtx_unlock(&isrc_table_lock);
457	return (0);
458}
459
460/*
461 *  Deregister interrupt source from global interrupt table.
462 */
463int
464intr_isrc_deregister(struct intr_irqsrc *isrc)
465{
466	int error;
467
468	mtx_lock(&isrc_table_lock);
469	if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
470		isrc_release_counters(isrc);
471	error = isrc_free_irq(isrc);
472	mtx_unlock(&isrc_table_lock);
473	return (error);
474}
475
476#ifdef SMP
477/*
478 *  A support function for a PIC to decide if provided ISRC should be inited
479 *  on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
480 *  struct intr_irqsrc is the following:
481 *
482 *     If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
483 *     set in isrc_cpu. If not, the ISRC should be inited on every cpu and
484 *     isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
485 */
486bool
487intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
488{
489
490	if (isrc->isrc_handlers == 0)
491		return (false);
492	if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
493		return (false);
494	if (isrc->isrc_flags & INTR_ISRCF_BOUND)
495		return (CPU_ISSET(cpu, &isrc->isrc_cpu));
496
497	CPU_SET(cpu, &isrc->isrc_cpu);
498	return (true);
499}
500#endif
501
502#ifdef INTR_SOLO
503/*
504 *  Setup filter into interrupt source.
505 */
506static int
507iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
508    intr_irq_filter_t *filter, void *arg, void **cookiep)
509{
510
511	if (filter == NULL)
512		return (EINVAL);
513
514	mtx_lock(&isrc_table_lock);
515	/*
516	 * Make sure that we do not mix the two ways
517	 * how we handle interrupt sources.
518	 */
519	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
520		mtx_unlock(&isrc_table_lock);
521		return (EBUSY);
522	}
523	isrc->isrc_filter = filter;
524	isrc->isrc_arg = arg;
525	isrc_update_name(isrc, name);
526	mtx_unlock(&isrc_table_lock);
527
528	*cookiep = isrc;
529	return (0);
530}
531#endif
532
533/*
534 *  Interrupt source pre_ithread method for MI interrupt framework.
535 */
536static void
537intr_isrc_pre_ithread(void *arg)
538{
539	struct intr_irqsrc *isrc = arg;
540
541	PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
542}
543
544/*
545 *  Interrupt source post_ithread method for MI interrupt framework.
546 */
547static void
548intr_isrc_post_ithread(void *arg)
549{
550	struct intr_irqsrc *isrc = arg;
551
552	PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
553}
554
555/*
556 *  Interrupt source post_filter method for MI interrupt framework.
557 */
558static void
559intr_isrc_post_filter(void *arg)
560{
561	struct intr_irqsrc *isrc = arg;
562
563	PIC_POST_FILTER(isrc->isrc_dev, isrc);
564}
565
566/*
567 *  Interrupt source assign_cpu method for MI interrupt framework.
568 */
569static int
570intr_isrc_assign_cpu(void *arg, int cpu)
571{
572#ifdef SMP
573	struct intr_irqsrc *isrc = arg;
574	int error;
575
576	if (isrc->isrc_dev != intr_irq_root_dev)
577		return (EINVAL);
578
579	mtx_lock(&isrc_table_lock);
580	if (cpu == NOCPU) {
581		CPU_ZERO(&isrc->isrc_cpu);
582		isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
583	} else {
584		CPU_SETOF(cpu, &isrc->isrc_cpu);
585		isrc->isrc_flags |= INTR_ISRCF_BOUND;
586	}
587
588	/*
589	 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
590	 * re-balance it to another CPU or enable it on more CPUs. However,
591	 * PIC is expected to change isrc_cpu appropriately to keep us well
592	 * informed if the call is successful.
593	 */
594	if (irq_assign_cpu) {
595		error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
596		if (error) {
597			CPU_ZERO(&isrc->isrc_cpu);
598			mtx_unlock(&isrc_table_lock);
599			return (error);
600		}
601	}
602	mtx_unlock(&isrc_table_lock);
603	return (0);
604#else
605	return (EOPNOTSUPP);
606#endif
607}
608
609/*
610 *  Create interrupt event for interrupt source.
611 */
612static int
613isrc_event_create(struct intr_irqsrc *isrc)
614{
615	struct intr_event *ie;
616	int error;
617
618	error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
619	    intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
620	    intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
621	if (error)
622		return (error);
623
624	mtx_lock(&isrc_table_lock);
625	/*
626	 * Make sure that we do not mix the two ways
627	 * how we handle interrupt sources. Let contested event wins.
628	 */
629#ifdef INTR_SOLO
630	if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
631#else
632	if (isrc->isrc_event != NULL) {
633#endif
634		mtx_unlock(&isrc_table_lock);
635		intr_event_destroy(ie);
636		return (isrc->isrc_event != NULL ? EBUSY : 0);
637	}
638	isrc->isrc_event = ie;
639	mtx_unlock(&isrc_table_lock);
640
641	return (0);
642}
643#ifdef notyet
644/*
645 *  Destroy interrupt event for interrupt source.
646 */
647static void
648isrc_event_destroy(struct intr_irqsrc *isrc)
649{
650	struct intr_event *ie;
651
652	mtx_lock(&isrc_table_lock);
653	ie = isrc->isrc_event;
654	isrc->isrc_event = NULL;
655	mtx_unlock(&isrc_table_lock);
656
657	if (ie != NULL)
658		intr_event_destroy(ie);
659}
660#endif
661/*
662 *  Add handler to interrupt source.
663 */
664static int
665isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
666    driver_filter_t filter, driver_intr_t handler, void *arg,
667    enum intr_type flags, void **cookiep)
668{
669	int error;
670
671	if (isrc->isrc_event == NULL) {
672		error = isrc_event_create(isrc);
673		if (error)
674			return (error);
675	}
676
677	error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
678	    arg, intr_priority(flags), flags, cookiep);
679	if (error == 0) {
680		mtx_lock(&isrc_table_lock);
681		intrcnt_updatename(isrc);
682		mtx_unlock(&isrc_table_lock);
683	}
684
685	return (error);
686}
687
688/*
689 *  Lookup interrupt controller locked.
690 */
691static inline struct intr_pic *
692pic_lookup_locked(device_t dev, intptr_t xref)
693{
694	struct intr_pic *pic;
695
696	mtx_assert(&pic_list_lock, MA_OWNED);
697
698	if (dev == NULL && xref == 0)
699		return (NULL);
700
701	/* Note that pic->pic_dev is never NULL on registered PIC. */
702	SLIST_FOREACH(pic, &pic_list, pic_next) {
703		if (dev == NULL) {
704			if (xref == pic->pic_xref)
705				return (pic);
706		} else if (xref == 0 || pic->pic_xref == 0) {
707			if (dev == pic->pic_dev)
708				return (pic);
709		} else if (xref == pic->pic_xref && dev == pic->pic_dev)
710				return (pic);
711	}
712	return (NULL);
713}
714
715/*
716 *  Lookup interrupt controller.
717 */
718static struct intr_pic *
719pic_lookup(device_t dev, intptr_t xref)
720{
721	struct intr_pic *pic;
722
723	mtx_lock(&pic_list_lock);
724	pic = pic_lookup_locked(dev, xref);
725	mtx_unlock(&pic_list_lock);
726	return (pic);
727}
728
729/*
730 *  Create interrupt controller.
731 */
732static struct intr_pic *
733pic_create(device_t dev, intptr_t xref)
734{
735	struct intr_pic *pic;
736
737	mtx_lock(&pic_list_lock);
738	pic = pic_lookup_locked(dev, xref);
739	if (pic != NULL) {
740		mtx_unlock(&pic_list_lock);
741		return (pic);
742	}
743	pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
744	if (pic == NULL) {
745		mtx_unlock(&pic_list_lock);
746		return (NULL);
747	}
748	pic->pic_xref = xref;
749	pic->pic_dev = dev;
750	mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN);
751	SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
752	mtx_unlock(&pic_list_lock);
753
754	return (pic);
755}
756#ifdef notyet
757/*
758 *  Destroy interrupt controller.
759 */
760static void
761pic_destroy(device_t dev, intptr_t xref)
762{
763	struct intr_pic *pic;
764
765	mtx_lock(&pic_list_lock);
766	pic = pic_lookup_locked(dev, xref);
767	if (pic == NULL) {
768		mtx_unlock(&pic_list_lock);
769		return;
770	}
771	SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
772	mtx_unlock(&pic_list_lock);
773
774	free(pic, M_INTRNG);
775}
776#endif
777/*
778 *  Register interrupt controller.
779 */
780struct intr_pic *
781intr_pic_register(device_t dev, intptr_t xref)
782{
783	struct intr_pic *pic;
784
785	if (dev == NULL)
786		return (NULL);
787	pic = pic_create(dev, xref);
788	if (pic == NULL)
789		return (NULL);
790
791	pic->pic_flags |= FLAG_PIC;
792
793	debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
794	    device_get_nameunit(dev), dev, xref);
795	return (pic);
796}
797
798/*
799 *  Unregister interrupt controller.
800 */
801int
802intr_pic_deregister(device_t dev, intptr_t xref)
803{
804
805	panic("%s: not implemented", __func__);
806}
807
808/*
809 *  Mark interrupt controller (itself) as a root one.
810 *
811 *  Note that only an interrupt controller can really know its position
812 *  in interrupt controller's tree. So root PIC must claim itself as a root.
813 *
814 *  In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
815 *  page 30:
816 *    "The root of the interrupt tree is determined when traversal
817 *     of the interrupt tree reaches an interrupt controller node without
818 *     an interrupts property and thus no explicit interrupt parent."
819 */
820int
821intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
822    void *arg, u_int ipicount)
823{
824	struct intr_pic *pic;
825
826	pic = pic_lookup(dev, xref);
827	if (pic == NULL) {
828		device_printf(dev, "not registered\n");
829		return (EINVAL);
830	}
831
832	KASSERT((pic->pic_flags & FLAG_PIC) != 0,
833	    ("%s: Found a non-PIC controller: %s", __func__,
834	     device_get_name(pic->pic_dev)));
835
836	if (filter == NULL) {
837		device_printf(dev, "filter missing\n");
838		return (EINVAL);
839	}
840
841	/*
842	 * Only one interrupt controllers could be on the root for now.
843	 * Note that we further suppose that there is not threaded interrupt
844	 * routine (handler) on the root. See intr_irq_handler().
845	 */
846	if (intr_irq_root_dev != NULL) {
847		device_printf(dev, "another root already set\n");
848		return (EBUSY);
849	}
850
851	intr_irq_root_dev = dev;
852	irq_root_filter = filter;
853	irq_root_arg = arg;
854	irq_root_ipicount = ipicount;
855
856	debugf("irq root set to %s\n", device_get_nameunit(dev));
857	return (0);
858}
859
860/*
861 * Add a handler to manage a sub range of a parents interrupts.
862 */
863struct intr_pic *
864intr_pic_add_handler(device_t parent, struct intr_pic *pic,
865    intr_child_irq_filter_t *filter, void *arg, uintptr_t start,
866    uintptr_t length)
867{
868	struct intr_pic *parent_pic;
869	struct intr_pic_child *newchild;
870#ifdef INVARIANTS
871	struct intr_pic_child *child;
872#endif
873
874	parent_pic = pic_lookup(parent, 0);
875	if (parent_pic == NULL)
876		return (NULL);
877
878	newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO);
879	newchild->pc_pic = pic;
880	newchild->pc_filter = filter;
881	newchild->pc_filter_arg = arg;
882	newchild->pc_start = start;
883	newchild->pc_length = length;
884
885	mtx_lock_spin(&parent_pic->pic_child_lock);
886#ifdef INVARIANTS
887	SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) {
888		KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice",
889		    __func__));
890	}
891#endif
892	SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next);
893	mtx_unlock_spin(&parent_pic->pic_child_lock);
894
895	return (pic);
896}
897
898static int
899intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
900    struct intr_irqsrc **isrc)
901{
902	struct intr_pic *pic;
903	struct intr_map_data_msi *msi;
904
905	if (data == NULL)
906		return (EINVAL);
907
908	pic = pic_lookup(dev, xref);
909	if (pic == NULL)
910		return (ESRCH);
911
912	switch (data->type) {
913	case INTR_MAP_DATA_MSI:
914		KASSERT((pic->pic_flags & FLAG_MSI) != 0,
915		    ("%s: Found a non-MSI controller: %s", __func__,
916		     device_get_name(pic->pic_dev)));
917		msi = (struct intr_map_data_msi *)data;
918		*isrc = msi->isrc;
919		return (0);
920
921	default:
922		KASSERT((pic->pic_flags & FLAG_PIC) != 0,
923		    ("%s: Found a non-PIC controller: %s", __func__,
924		     device_get_name(pic->pic_dev)));
925		return (PIC_MAP_INTR(pic->pic_dev, data, isrc));
926
927	}
928}
929
930int
931intr_activate_irq(device_t dev, struct resource *res)
932{
933	device_t map_dev;
934	intptr_t map_xref;
935	struct intr_map_data *data;
936	struct intr_irqsrc *isrc;
937	u_int res_id;
938	int error;
939
940	KASSERT(rman_get_start(res) == rman_get_end(res),
941	    ("%s: more interrupts in resource", __func__));
942
943	res_id = (u_int)rman_get_start(res);
944	if (intr_map_get_isrc(res_id) != NULL)
945		panic("Attempt to double activation of resource id: %u\n",
946		    res_id);
947	intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data);
948	error = intr_resolve_irq(map_dev, map_xref, data, &isrc);
949	if (error != 0) {
950		free(data, M_INTRNG);
951		/* XXX TODO DISCONECTED PICs */
952		/* if (error == EINVAL) return(0); */
953		return (error);
954	}
955	intr_map_set_isrc(res_id, isrc);
956	rman_set_virtual(res, data);
957	return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data));
958}
959
960int
961intr_deactivate_irq(device_t dev, struct resource *res)
962{
963	struct intr_map_data *data;
964	struct intr_irqsrc *isrc;
965	u_int res_id;
966	int error;
967
968	KASSERT(rman_get_start(res) == rman_get_end(res),
969	    ("%s: more interrupts in resource", __func__));
970
971	res_id = (u_int)rman_get_start(res);
972	isrc = intr_map_get_isrc(res_id);
973	if (isrc == NULL)
974		panic("Attempt to deactivate non-active resource id: %u\n",
975		    res_id);
976
977	data = rman_get_virtual(res);
978	error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data);
979	intr_map_set_isrc(res_id, NULL);
980	rman_set_virtual(res, NULL);
981	free(data, M_INTRNG);
982	return (error);
983}
984
985int
986intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
987    driver_intr_t hand, void *arg, int flags, void **cookiep)
988{
989	int error;
990	struct intr_map_data *data;
991	struct intr_irqsrc *isrc;
992	const char *name;
993	u_int res_id;
994
995	KASSERT(rman_get_start(res) == rman_get_end(res),
996	    ("%s: more interrupts in resource", __func__));
997
998	res_id = (u_int)rman_get_start(res);
999	isrc = intr_map_get_isrc(res_id);
1000	if (isrc == NULL) {
1001		/* XXX TODO DISCONECTED PICs */
1002		return (EINVAL);
1003	}
1004
1005	data = rman_get_virtual(res);
1006	name = device_get_nameunit(dev);
1007
1008#ifdef INTR_SOLO
1009	/*
1010	 * Standard handling is done through MI interrupt framework. However,
1011	 * some interrupts could request solely own special handling. This
1012	 * non standard handling can be used for interrupt controllers without
1013	 * handler (filter only), so in case that interrupt controllers are
1014	 * chained, MI interrupt framework is called only in leaf controller.
1015	 *
1016	 * Note that root interrupt controller routine is served as well,
1017	 * however in intr_irq_handler(), i.e. main system dispatch routine.
1018	 */
1019	if (flags & INTR_SOLO && hand != NULL) {
1020		debugf("irq %u cannot solo on %s\n", irq, name);
1021		return (EINVAL);
1022	}
1023
1024	if (flags & INTR_SOLO) {
1025		error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1026		    arg, cookiep);
1027		debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error,
1028		    name);
1029	} else
1030#endif
1031		{
1032		error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1033		    cookiep);
1034		debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name);
1035	}
1036	if (error != 0)
1037		return (error);
1038
1039	mtx_lock(&isrc_table_lock);
1040	error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1041	if (error == 0) {
1042		isrc->isrc_handlers++;
1043		if (isrc->isrc_handlers == 1)
1044			PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1045	}
1046	mtx_unlock(&isrc_table_lock);
1047	if (error != 0)
1048		intr_event_remove_handler(*cookiep);
1049	return (error);
1050}
1051
1052int
1053intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1054{
1055	int error;
1056	struct intr_map_data *data;
1057	struct intr_irqsrc *isrc;
1058	u_int res_id;
1059
1060	KASSERT(rman_get_start(res) == rman_get_end(res),
1061	    ("%s: more interrupts in resource", __func__));
1062
1063	res_id = (u_int)rman_get_start(res);
1064	isrc = intr_map_get_isrc(res_id);
1065	if (isrc == NULL || isrc->isrc_handlers == 0)
1066		return (EINVAL);
1067
1068	data = rman_get_virtual(res);
1069
1070#ifdef INTR_SOLO
1071	if (isrc->isrc_filter != NULL) {
1072		if (isrc != cookie)
1073			return (EINVAL);
1074
1075		mtx_lock(&isrc_table_lock);
1076		isrc->isrc_filter = NULL;
1077		isrc->isrc_arg = NULL;
1078		isrc->isrc_handlers = 0;
1079		PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1080		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1081		isrc_update_name(isrc, NULL);
1082		mtx_unlock(&isrc_table_lock);
1083		return (0);
1084	}
1085#endif
1086	if (isrc != intr_handler_source(cookie))
1087		return (EINVAL);
1088
1089	error = intr_event_remove_handler(cookie);
1090	if (error == 0) {
1091		mtx_lock(&isrc_table_lock);
1092		isrc->isrc_handlers--;
1093		if (isrc->isrc_handlers == 0)
1094			PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1095		PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1096		intrcnt_updatename(isrc);
1097		mtx_unlock(&isrc_table_lock);
1098	}
1099	return (error);
1100}
1101
1102int
1103intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1104    const char *descr)
1105{
1106	int error;
1107	struct intr_irqsrc *isrc;
1108	u_int res_id;
1109
1110	KASSERT(rman_get_start(res) == rman_get_end(res),
1111	    ("%s: more interrupts in resource", __func__));
1112
1113	res_id = (u_int)rman_get_start(res);
1114	isrc = intr_map_get_isrc(res_id);
1115	if (isrc == NULL || isrc->isrc_handlers == 0)
1116		return (EINVAL);
1117#ifdef INTR_SOLO
1118	if (isrc->isrc_filter != NULL) {
1119		if (isrc != cookie)
1120			return (EINVAL);
1121
1122		mtx_lock(&isrc_table_lock);
1123		isrc_update_name(isrc, descr);
1124		mtx_unlock(&isrc_table_lock);
1125		return (0);
1126	}
1127#endif
1128	error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1129	if (error == 0) {
1130		mtx_lock(&isrc_table_lock);
1131		intrcnt_updatename(isrc);
1132		mtx_unlock(&isrc_table_lock);
1133	}
1134	return (error);
1135}
1136
1137#ifdef SMP
1138int
1139intr_bind_irq(device_t dev, struct resource *res, int cpu)
1140{
1141	struct intr_irqsrc *isrc;
1142	u_int res_id;
1143
1144	KASSERT(rman_get_start(res) == rman_get_end(res),
1145	    ("%s: more interrupts in resource", __func__));
1146
1147	res_id = (u_int)rman_get_start(res);
1148	isrc = intr_map_get_isrc(res_id);
1149	if (isrc == NULL || isrc->isrc_handlers == 0)
1150		return (EINVAL);
1151#ifdef INTR_SOLO
1152	if (isrc->isrc_filter != NULL)
1153		return (intr_isrc_assign_cpu(isrc, cpu));
1154#endif
1155	return (intr_event_bind(isrc->isrc_event, cpu));
1156}
1157
1158/*
1159 * Return the CPU that the next interrupt source should use.
1160 * For now just returns the next CPU according to round-robin.
1161 */
1162u_int
1163intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1164{
1165
1166	if (!irq_assign_cpu || mp_ncpus == 1)
1167		return (PCPU_GET(cpuid));
1168
1169	do {
1170		last_cpu++;
1171		if (last_cpu > mp_maxid)
1172			last_cpu = 0;
1173	} while (!CPU_ISSET(last_cpu, cpumask));
1174	return (last_cpu);
1175}
1176
1177/*
1178 *  Distribute all the interrupt sources among the available
1179 *  CPUs once the AP's have been launched.
1180 */
1181static void
1182intr_irq_shuffle(void *arg __unused)
1183{
1184	struct intr_irqsrc *isrc;
1185	u_int i;
1186
1187	if (mp_ncpus == 1)
1188		return;
1189
1190	mtx_lock(&isrc_table_lock);
1191	irq_assign_cpu = TRUE;
1192	for (i = 0; i < NIRQ; i++) {
1193		isrc = irq_sources[i];
1194		if (isrc == NULL || isrc->isrc_handlers == 0 ||
1195		    isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1196			continue;
1197
1198		if (isrc->isrc_event != NULL &&
1199		    isrc->isrc_flags & INTR_ISRCF_BOUND &&
1200		    isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1201			panic("%s: CPU inconsistency", __func__);
1202
1203		if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1204			CPU_ZERO(&isrc->isrc_cpu); /* start again */
1205
1206		/*
1207		 * We are in wicked position here if the following call fails
1208		 * for bound ISRC. The best thing we can do is to clear
1209		 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1210		 */
1211		if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1212			CPU_ZERO(&isrc->isrc_cpu);
1213	}
1214	mtx_unlock(&isrc_table_lock);
1215}
1216SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1217
1218#else
1219u_int
1220intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1221{
1222
1223	return (PCPU_GET(cpuid));
1224}
1225#endif
1226
1227/*
1228 * Allocate memory for new intr_map_data structure.
1229 * Initialize common fields.
1230 */
1231struct intr_map_data *
1232intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags)
1233{
1234	struct intr_map_data *data;
1235
1236	data = malloc(len, M_INTRNG, flags);
1237	data->type = type;
1238	data->len = len;
1239	return (data);
1240}
1241
1242void intr_free_intr_map_data(struct intr_map_data *data)
1243{
1244
1245	free(data, M_INTRNG);
1246}
1247
1248
1249/*
1250 *  Register a MSI/MSI-X interrupt controller
1251 */
1252int
1253intr_msi_register(device_t dev, intptr_t xref)
1254{
1255	struct intr_pic *pic;
1256
1257	if (dev == NULL)
1258		return (EINVAL);
1259	pic = pic_create(dev, xref);
1260	if (pic == NULL)
1261		return (ENOMEM);
1262
1263	pic->pic_flags |= FLAG_MSI;
1264
1265	debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
1266	    device_get_nameunit(dev), dev, (uintmax_t)xref);
1267	return (0);
1268}
1269
1270int
1271intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count,
1272    int maxcount, int *irqs)
1273{
1274	struct intr_irqsrc **isrc;
1275	struct intr_pic *pic;
1276	device_t pdev;
1277	struct intr_map_data_msi *msi;
1278	int err, i;
1279
1280	pic = pic_lookup(NULL, xref);
1281	if (pic == NULL)
1282		return (ESRCH);
1283
1284	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1285	    ("%s: Found a non-MSI controller: %s", __func__,
1286	     device_get_name(pic->pic_dev)));
1287
1288	isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1289	err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc);
1290	if (err != 0) {
1291		free(isrc, M_INTRNG);
1292		return (err);
1293	}
1294
1295	for (i = 0; i < count; i++) {
1296		msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1297		    INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1298		msi-> isrc = isrc[i];
1299		irqs[i] = intr_map_irq(pic->pic_dev, xref,
1300		    (struct intr_map_data *)msi);
1301
1302	}
1303	free(isrc, M_INTRNG);
1304
1305	return (err);
1306}
1307
1308int
1309intr_release_msi(device_t pci, device_t child, intptr_t xref, int count,
1310    int *irqs)
1311{
1312	struct intr_irqsrc **isrc;
1313	struct intr_pic *pic;
1314	struct intr_map_data_msi *msi;
1315	int i, err;
1316
1317	pic = pic_lookup(NULL, xref);
1318	if (pic == NULL)
1319		return (ESRCH);
1320
1321	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1322	    ("%s: Found a non-MSI controller: %s", __func__,
1323	     device_get_name(pic->pic_dev)));
1324
1325	isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1326
1327	for (i = 0; i < count; i++) {
1328		msi = (struct intr_map_data_msi *)
1329		    intr_map_get_map_data(irqs[i]);
1330		KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1331		    ("%s: irq %d map data is not MSI", __func__,
1332		    irqs[i]));
1333		isrc[i] = msi->isrc;
1334	}
1335
1336	err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc);
1337
1338	for (i = 0; i < count; i++) {
1339		if (isrc[i] != NULL)
1340			intr_unmap_irq(irqs[i]);
1341	}
1342
1343	free(isrc, M_INTRNG);
1344	return (err);
1345}
1346
1347int
1348intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq)
1349{
1350	struct intr_irqsrc *isrc;
1351	struct intr_pic *pic;
1352	device_t pdev;
1353	struct intr_map_data_msi *msi;
1354	int err;
1355
1356	pic = pic_lookup(NULL, xref);
1357	if (pic == NULL)
1358		return (ESRCH);
1359
1360	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1361	    ("%s: Found a non-MSI controller: %s", __func__,
1362	     device_get_name(pic->pic_dev)));
1363
1364
1365	err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc);
1366	if (err != 0)
1367		return (err);
1368
1369	msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1370		    INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1371	msi->isrc = isrc;
1372	*irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi);
1373	return (0);
1374}
1375
1376int
1377intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq)
1378{
1379	struct intr_irqsrc *isrc;
1380	struct intr_pic *pic;
1381	struct intr_map_data_msi *msi;
1382	int err;
1383
1384	pic = pic_lookup(NULL, xref);
1385	if (pic == NULL)
1386		return (ESRCH);
1387
1388	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1389	    ("%s: Found a non-MSI controller: %s", __func__,
1390	     device_get_name(pic->pic_dev)));
1391
1392	msi = (struct intr_map_data_msi *)
1393	    intr_map_get_map_data(irq);
1394	KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1395	    ("%s: irq %d map data is not MSI", __func__,
1396	    irq));
1397	isrc = msi->isrc;
1398	if (isrc == NULL) {
1399		intr_unmap_irq(irq);
1400		return (EINVAL);
1401	}
1402
1403	err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc);
1404	intr_unmap_irq(irq);
1405
1406	return (err);
1407}
1408
1409int
1410intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq,
1411    uint64_t *addr, uint32_t *data)
1412{
1413	struct intr_irqsrc *isrc;
1414	struct intr_pic *pic;
1415	int err;
1416
1417	pic = pic_lookup(NULL, xref);
1418	if (pic == NULL)
1419		return (ESRCH);
1420
1421	KASSERT((pic->pic_flags & FLAG_MSI) != 0,
1422	    ("%s: Found a non-MSI controller: %s", __func__,
1423	     device_get_name(pic->pic_dev)));
1424
1425	isrc = intr_map_get_isrc(irq);
1426	if (isrc == NULL)
1427		return (EINVAL);
1428
1429	err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data);
1430	return (err);
1431}
1432
1433
1434void dosoftints(void);
1435void
1436dosoftints(void)
1437{
1438}
1439
1440#ifdef SMP
1441/*
1442 *  Init interrupt controller on another CPU.
1443 */
1444void
1445intr_pic_init_secondary(void)
1446{
1447
1448	/*
1449	 * QQQ: Only root PIC is aware of other CPUs ???
1450	 */
1451	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1452
1453	//mtx_lock(&isrc_table_lock);
1454	PIC_INIT_SECONDARY(intr_irq_root_dev);
1455	//mtx_unlock(&isrc_table_lock);
1456}
1457#endif
1458
1459#ifdef DDB
1460DB_SHOW_COMMAND(irqs, db_show_irqs)
1461{
1462	u_int i, irqsum;
1463	u_long num;
1464	struct intr_irqsrc *isrc;
1465
1466	for (irqsum = 0, i = 0; i < NIRQ; i++) {
1467		isrc = irq_sources[i];
1468		if (isrc == NULL)
1469			continue;
1470
1471		num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1472		db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1473		    isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1474		    isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1475		irqsum += num;
1476	}
1477	db_printf("irq total %u\n", irqsum);
1478}
1479#endif
1480
1481/*
1482 * Interrupt mapping table functions.
1483 *
1484 * Please, keep this part separately, it can be transformed to
1485 * extension of standard resources.
1486 */
1487struct intr_map_entry
1488{
1489	device_t 		dev;
1490	intptr_t 		xref;
1491	struct intr_map_data 	*map_data;
1492	struct intr_irqsrc 	*isrc;
1493	/* XXX TODO DISCONECTED PICs */
1494	/*int			flags */
1495};
1496
1497/* XXX Convert irq_map[] to dynamicaly expandable one. */
1498static struct intr_map_entry *irq_map[2 * NIRQ];
1499static int irq_map_count = nitems(irq_map);
1500static int irq_map_first_free_idx;
1501static struct mtx irq_map_lock;
1502
1503static struct intr_irqsrc *
1504intr_map_get_isrc(u_int res_id)
1505{
1506	struct intr_irqsrc *isrc;
1507
1508	mtx_lock(&irq_map_lock);
1509	if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) {
1510		mtx_unlock(&irq_map_lock);
1511		return (NULL);
1512	}
1513	isrc = irq_map[res_id]->isrc;
1514	mtx_unlock(&irq_map_lock);
1515	return (isrc);
1516}
1517
1518static void
1519intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc)
1520{
1521
1522	mtx_lock(&irq_map_lock);
1523	if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) {
1524		mtx_unlock(&irq_map_lock);
1525		return;
1526	}
1527	irq_map[res_id]->isrc = isrc;
1528	mtx_unlock(&irq_map_lock);
1529}
1530
1531/*
1532 * Get a copy of intr_map_entry data
1533 */
1534static struct intr_map_data *
1535intr_map_get_map_data(u_int res_id)
1536{
1537	struct intr_map_data *data;
1538
1539	data = NULL;
1540	mtx_lock(&irq_map_lock);
1541	if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1542		panic("Attempt to copy invalid resource id: %u\n", res_id);
1543	data = irq_map[res_id]->map_data;
1544	mtx_unlock(&irq_map_lock);
1545
1546	return (data);
1547}
1548
1549/*
1550 * Get a copy of intr_map_entry data
1551 */
1552static void
1553intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref,
1554    struct intr_map_data **data)
1555{
1556	size_t len;
1557
1558	len = 0;
1559	mtx_lock(&irq_map_lock);
1560	if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1561		panic("Attempt to copy invalid resource id: %u\n", res_id);
1562	if (irq_map[res_id]->map_data != NULL)
1563		len = irq_map[res_id]->map_data->len;
1564	mtx_unlock(&irq_map_lock);
1565
1566	if (len == 0)
1567		*data = NULL;
1568	else
1569		*data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO);
1570	mtx_lock(&irq_map_lock);
1571	if (irq_map[res_id] == NULL)
1572		panic("Attempt to copy invalid resource id: %u\n", res_id);
1573	if (len != 0) {
1574		if (len != irq_map[res_id]->map_data->len)
1575			panic("Resource id: %u has changed.\n", res_id);
1576		memcpy(*data, irq_map[res_id]->map_data, len);
1577	}
1578	*map_dev = irq_map[res_id]->dev;
1579	*map_xref = irq_map[res_id]->xref;
1580	mtx_unlock(&irq_map_lock);
1581}
1582
1583
1584/*
1585 * Allocate and fill new entry in irq_map table.
1586 */
1587u_int
1588intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data)
1589{
1590	u_int i;
1591	struct intr_map_entry *entry;
1592
1593	/* Prepare new entry first. */
1594	entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO);
1595
1596	entry->dev = dev;
1597	entry->xref = xref;
1598	entry->map_data = data;
1599	entry->isrc = NULL;
1600
1601	mtx_lock(&irq_map_lock);
1602	for (i = irq_map_first_free_idx; i < irq_map_count; i++) {
1603		if (irq_map[i] == NULL) {
1604			irq_map[i] = entry;
1605			irq_map_first_free_idx = i + 1;
1606			mtx_unlock(&irq_map_lock);
1607			return (i);
1608		}
1609	}
1610	mtx_unlock(&irq_map_lock);
1611
1612	/* XXX Expand irq_map table */
1613	panic("IRQ mapping table is full.");
1614}
1615
1616/*
1617 * Remove and free mapping entry.
1618 */
1619void
1620intr_unmap_irq(u_int res_id)
1621{
1622	struct intr_map_entry *entry;
1623
1624	mtx_lock(&irq_map_lock);
1625	if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL))
1626		panic("Attempt to unmap invalid resource id: %u\n", res_id);
1627	entry = irq_map[res_id];
1628	irq_map[res_id] = NULL;
1629	irq_map_first_free_idx = res_id;
1630	mtx_unlock(&irq_map_lock);
1631	intr_free_intr_map_data(entry->map_data);
1632	free(entry, M_INTRNG);
1633}
1634
1635/*
1636 * Clone mapping entry.
1637 */
1638u_int
1639intr_map_clone_irq(u_int old_res_id)
1640{
1641	device_t map_dev;
1642	intptr_t map_xref;
1643	struct intr_map_data *data;
1644
1645	intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data);
1646	return (intr_map_irq(map_dev, map_xref, data));
1647}
1648
1649static void
1650intr_map_init(void *dummy __unused)
1651{
1652
1653	mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF);
1654}
1655SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL);
1656