Deleted Added
full compact
subr_intr.c (297676) subr_intr.c (298738)
1/*-
2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/subr_intr.c 297676 2016-04-07 15:16:33Z skra $");
29__FBSDID("$FreeBSD: head/sys/kern/subr_intr.c 298738 2016-04-28 12:03:22Z mmel $");
30
31/*
32 * New-style Interrupt Framework
33 *
34 * TODO: - to support IPI (PPI) enabling on other CPUs if already started
35 * - to complete things for removable PICs
36 */
37
38#include "opt_acpi.h"
39#include "opt_ddb.h"
40#include "opt_platform.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/syslog.h>
46#include <sys/malloc.h>
47#include <sys/proc.h>
48#include <sys/queue.h>
49#include <sys/bus.h>
50#include <sys/interrupt.h>
51#include <sys/conf.h>
52#include <sys/cpuset.h>
53#include <sys/rman.h>
54#include <sys/sched.h>
55#include <sys/smp.h>
56#include <machine/atomic.h>
57#include <machine/intr.h>
58#include <machine/cpu.h>
59#include <machine/smp.h>
60#include <machine/stdarg.h>
61
62#ifdef FDT
63#include <dev/ofw/openfirm.h>
64#include <dev/ofw/ofw_bus.h>
65#include <dev/ofw/ofw_bus_subr.h>
66#endif
67
68#ifdef DDB
69#include <ddb/ddb.h>
70#endif
71
72#include "pic_if.h"
73
74#define INTRNAME_LEN (2*MAXCOMLEN + 1)
75
76#ifdef DEBUG
77#define debugf(fmt, args...) do { printf("%s(): ", __func__); \
78 printf(fmt,##args); } while (0)
79#else
80#define debugf(fmt, args...)
81#endif
82
83MALLOC_DECLARE(M_INTRNG);
84MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
85
86/* Main interrupt handler called from assembler -> 'hidden' for C code. */
87void intr_irq_handler(struct trapframe *tf);
88
89/* Root interrupt controller stuff. */
90device_t intr_irq_root_dev;
91static intr_irq_filter_t *irq_root_filter;
92static void *irq_root_arg;
93static u_int irq_root_ipicount;
94
95/* Interrupt controller definition. */
96struct intr_pic {
97 SLIST_ENTRY(intr_pic) pic_next;
98 intptr_t pic_xref; /* hardware identification */
99 device_t pic_dev;
100};
101
102static struct mtx pic_list_lock;
103static SLIST_HEAD(, intr_pic) pic_list;
104
105static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
106
107/* Interrupt source definition. */
108static struct mtx isrc_table_lock;
109static struct intr_irqsrc *irq_sources[NIRQ];
110u_int irq_next_free;
111
112#define IRQ_INVALID nitems(irq_sources)
113
114/*
115 * XXX - All stuff around struct intr_dev_data is considered as temporary
116 * until better place for storing struct intr_map_data will be find.
117 *
118 * For now, there are two global interrupt numbers spaces:
119 * <0, NIRQ) ... interrupts without config data
120 * managed in irq_sources[]
121 * IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data
122 * managed in intr_ddata_tab[]
123 *
124 * Read intr_ddata_lookup() to see how these spaces are worked with.
125 * Note that each interrupt number from second space duplicates some number
126 * from first space at this moment. An interrupt number from first space can
127 * be duplicated even multiple times in second space.
128 */
129struct intr_dev_data {
130 device_t idd_dev;
131 intptr_t idd_xref;
132 u_int idd_irq;
133 struct intr_map_data idd_data;
134 struct intr_irqsrc * idd_isrc;
135};
136
137static struct intr_dev_data *intr_ddata_tab[2 * NIRQ];
138static u_int intr_ddata_first_unused;
139
140#define IRQ_DDATA_BASE 10000
141CTASSERT(IRQ_DDATA_BASE > IRQ_INVALID);
142
143#ifdef SMP
144static boolean_t irq_assign_cpu = FALSE;
145#endif
146
147/*
148 * - 2 counters for each I/O interrupt.
149 * - MAXCPU counters for each IPI counters for SMP.
150 */
151#ifdef SMP
152#define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
153#else
154#define INTRCNT_COUNT (NIRQ * 2)
155#endif
156
157/* Data for MI statistics reporting. */
158u_long intrcnt[INTRCNT_COUNT];
159char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
160size_t sintrcnt = sizeof(intrcnt);
161size_t sintrnames = sizeof(intrnames);
162static u_int intrcnt_index;
163
164/*
165 * Interrupt framework initialization routine.
166 */
167static void
168intr_irq_init(void *dummy __unused)
169{
170
171 SLIST_INIT(&pic_list);
172 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
173 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
174}
175SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
176
177static void
178intrcnt_setname(const char *name, int index)
179{
180
181 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
182 INTRNAME_LEN - 1, name);
183}
184
185/*
186 * Update name for interrupt source with interrupt event.
187 */
188static void
189intrcnt_updatename(struct intr_irqsrc *isrc)
190{
191
192 /* QQQ: What about stray counter name? */
193 mtx_assert(&isrc_table_lock, MA_OWNED);
194 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
195}
196
197/*
198 * Virtualization for interrupt source interrupt counter increment.
199 */
200static inline void
201isrc_increment_count(struct intr_irqsrc *isrc)
202{
203
204 if (isrc->isrc_flags & INTR_ISRCF_PPI)
205 atomic_add_long(&isrc->isrc_count[0], 1);
206 else
207 isrc->isrc_count[0]++;
208}
209
210/*
211 * Virtualization for interrupt source interrupt stray counter increment.
212 */
213static inline void
214isrc_increment_straycount(struct intr_irqsrc *isrc)
215{
216
217 isrc->isrc_count[1]++;
218}
219
220/*
221 * Virtualization for interrupt source interrupt name update.
222 */
223static void
224isrc_update_name(struct intr_irqsrc *isrc, const char *name)
225{
226 char str[INTRNAME_LEN];
227
228 mtx_assert(&isrc_table_lock, MA_OWNED);
229
230 if (name != NULL) {
231 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
232 intrcnt_setname(str, isrc->isrc_index);
233 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
234 name);
235 intrcnt_setname(str, isrc->isrc_index + 1);
236 } else {
237 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
238 intrcnt_setname(str, isrc->isrc_index);
239 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
240 intrcnt_setname(str, isrc->isrc_index + 1);
241 }
242}
243
244/*
245 * Virtualization for interrupt source interrupt counters setup.
246 */
247static void
248isrc_setup_counters(struct intr_irqsrc *isrc)
249{
250 u_int index;
251
252 /*
253 * XXX - it does not work well with removable controllers and
254 * interrupt sources !!!
255 */
256 index = atomic_fetchadd_int(&intrcnt_index, 2);
257 isrc->isrc_index = index;
258 isrc->isrc_count = &intrcnt[index];
259 isrc_update_name(isrc, NULL);
260}
261
262/*
263 * Virtualization for interrupt source interrupt counters release.
264 */
265static void
266isrc_release_counters(struct intr_irqsrc *isrc)
267{
268
269 panic("%s: not implemented", __func__);
270}
271
272#ifdef SMP
273/*
274 * Virtualization for interrupt source IPI counters setup.
275 */
276u_long *
277intr_ipi_setup_counters(const char *name)
278{
279 u_int index, i;
280 char str[INTRNAME_LEN];
281
282 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
283 for (i = 0; i < MAXCPU; i++) {
284 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
285 intrcnt_setname(str, index + i);
286 }
287 return (&intrcnt[index]);
288}
289#endif
290
291/*
292 * Main interrupt dispatch handler. It's called straight
293 * from the assembler, where CPU interrupt is served.
294 */
295void
296intr_irq_handler(struct trapframe *tf)
297{
298 struct trapframe * oldframe;
299 struct thread * td;
300
301 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
302
303 PCPU_INC(cnt.v_intr);
304 critical_enter();
305 td = curthread;
306 oldframe = td->td_intr_frame;
307 td->td_intr_frame = tf;
308 irq_root_filter(irq_root_arg);
309 td->td_intr_frame = oldframe;
310 critical_exit();
311}
312
313/*
314 * interrupt controller dispatch function for interrupts. It should
315 * be called straight from the interrupt controller, when associated interrupt
316 * source is learned.
317 */
318int
319intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
320{
321
322 KASSERT(isrc != NULL, ("%s: no source", __func__));
323
324 isrc_increment_count(isrc);
325
326#ifdef INTR_SOLO
327 if (isrc->isrc_filter != NULL) {
328 int error;
329 error = isrc->isrc_filter(isrc->isrc_arg, tf);
330 PIC_POST_FILTER(isrc->isrc_dev, isrc);
331 if (error == FILTER_HANDLED)
332 return (0);
333 } else
334#endif
335 if (isrc->isrc_event != NULL) {
336 if (intr_event_handle(isrc->isrc_event, tf) == 0)
337 return (0);
338 }
339
340 isrc_increment_straycount(isrc);
341 return (EINVAL);
342}
343
344/*
345 * Alloc unique interrupt number (resource handle) for interrupt source.
346 *
347 * There could be various strategies how to allocate free interrupt number
348 * (resource handle) for new interrupt source.
349 *
350 * 1. Handles are always allocated forward, so handles are not recycled
351 * immediately. However, if only one free handle left which is reused
352 * constantly...
353 */
354static inline int
355isrc_alloc_irq(struct intr_irqsrc *isrc)
356{
357 u_int maxirqs, irq;
358
359 mtx_assert(&isrc_table_lock, MA_OWNED);
360
361 maxirqs = nitems(irq_sources);
362 if (irq_next_free >= maxirqs)
363 return (ENOSPC);
364
365 for (irq = irq_next_free; irq < maxirqs; irq++) {
366 if (irq_sources[irq] == NULL)
367 goto found;
368 }
369 for (irq = 0; irq < irq_next_free; irq++) {
370 if (irq_sources[irq] == NULL)
371 goto found;
372 }
373
374 irq_next_free = maxirqs;
375 return (ENOSPC);
376
377found:
378 isrc->isrc_irq = irq;
379 irq_sources[irq] = isrc;
380
381 irq_next_free = irq + 1;
382 if (irq_next_free >= maxirqs)
383 irq_next_free = 0;
384 return (0);
385}
386
387/*
388 * Free unique interrupt number (resource handle) from interrupt source.
389 */
390static inline int
391isrc_free_irq(struct intr_irqsrc *isrc)
392{
393
394 mtx_assert(&isrc_table_lock, MA_OWNED);
395
396 if (isrc->isrc_irq >= nitems(irq_sources))
397 return (EINVAL);
398 if (irq_sources[isrc->isrc_irq] != isrc)
399 return (EINVAL);
400
401 irq_sources[isrc->isrc_irq] = NULL;
402 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
403 return (0);
404}
405
406/*
407 * Lookup interrupt source by interrupt number (resource handle).
408 */
409static inline struct intr_irqsrc *
410isrc_lookup(u_int irq)
411{
412
413 if (irq < nitems(irq_sources))
414 return (irq_sources[irq]);
415 return (NULL);
416}
417
418/*
419 * Initialize interrupt source and register it into global interrupt table.
420 */
421int
422intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
423 const char *fmt, ...)
424{
425 int error;
426 va_list ap;
427
428 bzero(isrc, sizeof(struct intr_irqsrc));
429 isrc->isrc_dev = dev;
430 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
431 isrc->isrc_flags = flags;
432
433 va_start(ap, fmt);
434 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
435 va_end(ap);
436
437 mtx_lock(&isrc_table_lock);
438 error = isrc_alloc_irq(isrc);
439 if (error != 0) {
440 mtx_unlock(&isrc_table_lock);
441 return (error);
442 }
443 /*
444 * Setup interrupt counters, but not for IPI sources. Those are setup
445 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
446 * our counter pool.
447 */
448 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
449 isrc_setup_counters(isrc);
450 mtx_unlock(&isrc_table_lock);
451 return (0);
452}
453
454/*
455 * Deregister interrupt source from global interrupt table.
456 */
457int
458intr_isrc_deregister(struct intr_irqsrc *isrc)
459{
460 int error;
461
462 mtx_lock(&isrc_table_lock);
463 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
464 isrc_release_counters(isrc);
465 error = isrc_free_irq(isrc);
466 mtx_unlock(&isrc_table_lock);
467 return (error);
468}
469
470#ifdef SMP
471/*
472 * A support function for a PIC to decide if provided ISRC should be inited
473 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
474 * struct intr_irqsrc is the following:
475 *
476 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
477 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and
478 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
479 */
480bool
481intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
482{
483
484 if (isrc->isrc_handlers == 0)
485 return (false);
486 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
487 return (false);
488 if (isrc->isrc_flags & INTR_ISRCF_BOUND)
489 return (CPU_ISSET(cpu, &isrc->isrc_cpu));
490
491 CPU_SET(cpu, &isrc->isrc_cpu);
492 return (true);
493}
494#endif
495
496static struct intr_dev_data *
497intr_ddata_alloc(u_int extsize)
498{
499 struct intr_dev_data *ddata;
500
501 ddata = malloc(sizeof(*ddata) + extsize, M_INTRNG, M_WAITOK | M_ZERO);
502
503 mtx_lock(&isrc_table_lock);
504 if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) {
505 mtx_unlock(&isrc_table_lock);
506 free(ddata, M_INTRNG);
507 return (NULL);
508 }
509 intr_ddata_tab[intr_ddata_first_unused] = ddata;
510 ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++;
511 mtx_unlock(&isrc_table_lock);
512 return (ddata);
513}
514
515static struct intr_irqsrc *
516intr_ddata_lookup(u_int irq, struct intr_map_data **datap)
517{
518 int error;
519 struct intr_irqsrc *isrc;
520 struct intr_dev_data *ddata;
521
522 isrc = isrc_lookup(irq);
523 if (isrc != NULL) {
524 if (datap != NULL)
525 *datap = NULL;
526 return (isrc);
527 }
528
529 if (irq < IRQ_DDATA_BASE)
530 return (NULL);
531
532 irq -= IRQ_DDATA_BASE;
533 if (irq >= nitems(intr_ddata_tab))
534 return (NULL);
535
536 ddata = intr_ddata_tab[irq];
537 if (ddata->idd_isrc == NULL) {
538 error = intr_map_irq(ddata->idd_dev, ddata->idd_xref,
539 &ddata->idd_data, &irq);
540 if (error != 0)
541 return (NULL);
542 ddata->idd_isrc = isrc_lookup(irq);
543 }
544 if (datap != NULL)
545 *datap = &ddata->idd_data;
546 return (ddata->idd_isrc);
547}
548
549#ifdef DEV_ACPI
550/*
551 * Map interrupt source according to ACPI info into framework. If such mapping
552 * does not exist, create it. Return unique interrupt number (resource handle)
553 * associated with mapped interrupt source.
554 */
555u_int
556intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol,
557 enum intr_trigger trig)
558{
559 struct intr_dev_data *ddata;
560
561 ddata = intr_ddata_alloc(0);
562 if (ddata == NULL)
563 return (0xFFFFFFFF); /* no space left */
564
565 ddata->idd_dev = dev;
566 ddata->idd_data.type = INTR_MAP_DATA_ACPI;
567 ddata->idd_data.acpi.irq = irq;
568 ddata->idd_data.acpi.pol = pol;
569 ddata->idd_data.acpi.trig = trig;
570 return (ddata->idd_irq);
571}
572#endif
573#ifdef FDT
574/*
575 * Map interrupt source according to FDT data into framework. If such mapping
576 * does not exist, create it. Return unique interrupt number (resource handle)
577 * associated with mapped interrupt source.
578 */
579u_int
580intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
581{
582 struct intr_dev_data *ddata;
583 u_int cellsize;
584
585 cellsize = ncells * sizeof(*cells);
586 ddata = intr_ddata_alloc(cellsize);
587 if (ddata == NULL)
588 return (0xFFFFFFFF); /* no space left */
589
590 ddata->idd_xref = (intptr_t)node;
591 ddata->idd_data.type = INTR_MAP_DATA_FDT;
592 ddata->idd_data.fdt.ncells = ncells;
593 ddata->idd_data.fdt.cells = (pcell_t *)(ddata + 1);
594 memcpy(ddata->idd_data.fdt.cells, cells, cellsize);
595 return (ddata->idd_irq);
596}
597#endif
598
30
31/*
32 * New-style Interrupt Framework
33 *
34 * TODO: - to support IPI (PPI) enabling on other CPUs if already started
35 * - to complete things for removable PICs
36 */
37
38#include "opt_acpi.h"
39#include "opt_ddb.h"
40#include "opt_platform.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/syslog.h>
46#include <sys/malloc.h>
47#include <sys/proc.h>
48#include <sys/queue.h>
49#include <sys/bus.h>
50#include <sys/interrupt.h>
51#include <sys/conf.h>
52#include <sys/cpuset.h>
53#include <sys/rman.h>
54#include <sys/sched.h>
55#include <sys/smp.h>
56#include <machine/atomic.h>
57#include <machine/intr.h>
58#include <machine/cpu.h>
59#include <machine/smp.h>
60#include <machine/stdarg.h>
61
62#ifdef FDT
63#include <dev/ofw/openfirm.h>
64#include <dev/ofw/ofw_bus.h>
65#include <dev/ofw/ofw_bus_subr.h>
66#endif
67
68#ifdef DDB
69#include <ddb/ddb.h>
70#endif
71
72#include "pic_if.h"
73
74#define INTRNAME_LEN (2*MAXCOMLEN + 1)
75
76#ifdef DEBUG
77#define debugf(fmt, args...) do { printf("%s(): ", __func__); \
78 printf(fmt,##args); } while (0)
79#else
80#define debugf(fmt, args...)
81#endif
82
83MALLOC_DECLARE(M_INTRNG);
84MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
85
86/* Main interrupt handler called from assembler -> 'hidden' for C code. */
87void intr_irq_handler(struct trapframe *tf);
88
89/* Root interrupt controller stuff. */
90device_t intr_irq_root_dev;
91static intr_irq_filter_t *irq_root_filter;
92static void *irq_root_arg;
93static u_int irq_root_ipicount;
94
95/* Interrupt controller definition. */
96struct intr_pic {
97 SLIST_ENTRY(intr_pic) pic_next;
98 intptr_t pic_xref; /* hardware identification */
99 device_t pic_dev;
100};
101
102static struct mtx pic_list_lock;
103static SLIST_HEAD(, intr_pic) pic_list;
104
105static struct intr_pic *pic_lookup(device_t dev, intptr_t xref);
106
107/* Interrupt source definition. */
108static struct mtx isrc_table_lock;
109static struct intr_irqsrc *irq_sources[NIRQ];
110u_int irq_next_free;
111
112#define IRQ_INVALID nitems(irq_sources)
113
114/*
115 * XXX - All stuff around struct intr_dev_data is considered as temporary
116 * until better place for storing struct intr_map_data will be find.
117 *
118 * For now, there are two global interrupt numbers spaces:
119 * <0, NIRQ) ... interrupts without config data
120 * managed in irq_sources[]
121 * IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data
122 * managed in intr_ddata_tab[]
123 *
124 * Read intr_ddata_lookup() to see how these spaces are worked with.
125 * Note that each interrupt number from second space duplicates some number
126 * from first space at this moment. An interrupt number from first space can
127 * be duplicated even multiple times in second space.
128 */
129struct intr_dev_data {
130 device_t idd_dev;
131 intptr_t idd_xref;
132 u_int idd_irq;
133 struct intr_map_data idd_data;
134 struct intr_irqsrc * idd_isrc;
135};
136
137static struct intr_dev_data *intr_ddata_tab[2 * NIRQ];
138static u_int intr_ddata_first_unused;
139
140#define IRQ_DDATA_BASE 10000
141CTASSERT(IRQ_DDATA_BASE > IRQ_INVALID);
142
143#ifdef SMP
144static boolean_t irq_assign_cpu = FALSE;
145#endif
146
147/*
148 * - 2 counters for each I/O interrupt.
149 * - MAXCPU counters for each IPI counters for SMP.
150 */
151#ifdef SMP
152#define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
153#else
154#define INTRCNT_COUNT (NIRQ * 2)
155#endif
156
157/* Data for MI statistics reporting. */
158u_long intrcnt[INTRCNT_COUNT];
159char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
160size_t sintrcnt = sizeof(intrcnt);
161size_t sintrnames = sizeof(intrnames);
162static u_int intrcnt_index;
163
164/*
165 * Interrupt framework initialization routine.
166 */
167static void
168intr_irq_init(void *dummy __unused)
169{
170
171 SLIST_INIT(&pic_list);
172 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
173 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
174}
175SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
176
177static void
178intrcnt_setname(const char *name, int index)
179{
180
181 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
182 INTRNAME_LEN - 1, name);
183}
184
185/*
186 * Update name for interrupt source with interrupt event.
187 */
188static void
189intrcnt_updatename(struct intr_irqsrc *isrc)
190{
191
192 /* QQQ: What about stray counter name? */
193 mtx_assert(&isrc_table_lock, MA_OWNED);
194 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
195}
196
197/*
198 * Virtualization for interrupt source interrupt counter increment.
199 */
200static inline void
201isrc_increment_count(struct intr_irqsrc *isrc)
202{
203
204 if (isrc->isrc_flags & INTR_ISRCF_PPI)
205 atomic_add_long(&isrc->isrc_count[0], 1);
206 else
207 isrc->isrc_count[0]++;
208}
209
210/*
211 * Virtualization for interrupt source interrupt stray counter increment.
212 */
213static inline void
214isrc_increment_straycount(struct intr_irqsrc *isrc)
215{
216
217 isrc->isrc_count[1]++;
218}
219
220/*
221 * Virtualization for interrupt source interrupt name update.
222 */
223static void
224isrc_update_name(struct intr_irqsrc *isrc, const char *name)
225{
226 char str[INTRNAME_LEN];
227
228 mtx_assert(&isrc_table_lock, MA_OWNED);
229
230 if (name != NULL) {
231 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
232 intrcnt_setname(str, isrc->isrc_index);
233 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
234 name);
235 intrcnt_setname(str, isrc->isrc_index + 1);
236 } else {
237 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
238 intrcnt_setname(str, isrc->isrc_index);
239 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
240 intrcnt_setname(str, isrc->isrc_index + 1);
241 }
242}
243
244/*
245 * Virtualization for interrupt source interrupt counters setup.
246 */
247static void
248isrc_setup_counters(struct intr_irqsrc *isrc)
249{
250 u_int index;
251
252 /*
253 * XXX - it does not work well with removable controllers and
254 * interrupt sources !!!
255 */
256 index = atomic_fetchadd_int(&intrcnt_index, 2);
257 isrc->isrc_index = index;
258 isrc->isrc_count = &intrcnt[index];
259 isrc_update_name(isrc, NULL);
260}
261
262/*
263 * Virtualization for interrupt source interrupt counters release.
264 */
265static void
266isrc_release_counters(struct intr_irqsrc *isrc)
267{
268
269 panic("%s: not implemented", __func__);
270}
271
272#ifdef SMP
273/*
274 * Virtualization for interrupt source IPI counters setup.
275 */
276u_long *
277intr_ipi_setup_counters(const char *name)
278{
279 u_int index, i;
280 char str[INTRNAME_LEN];
281
282 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
283 for (i = 0; i < MAXCPU; i++) {
284 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
285 intrcnt_setname(str, index + i);
286 }
287 return (&intrcnt[index]);
288}
289#endif
290
291/*
292 * Main interrupt dispatch handler. It's called straight
293 * from the assembler, where CPU interrupt is served.
294 */
295void
296intr_irq_handler(struct trapframe *tf)
297{
298 struct trapframe * oldframe;
299 struct thread * td;
300
301 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
302
303 PCPU_INC(cnt.v_intr);
304 critical_enter();
305 td = curthread;
306 oldframe = td->td_intr_frame;
307 td->td_intr_frame = tf;
308 irq_root_filter(irq_root_arg);
309 td->td_intr_frame = oldframe;
310 critical_exit();
311}
312
313/*
314 * interrupt controller dispatch function for interrupts. It should
315 * be called straight from the interrupt controller, when associated interrupt
316 * source is learned.
317 */
318int
319intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
320{
321
322 KASSERT(isrc != NULL, ("%s: no source", __func__));
323
324 isrc_increment_count(isrc);
325
326#ifdef INTR_SOLO
327 if (isrc->isrc_filter != NULL) {
328 int error;
329 error = isrc->isrc_filter(isrc->isrc_arg, tf);
330 PIC_POST_FILTER(isrc->isrc_dev, isrc);
331 if (error == FILTER_HANDLED)
332 return (0);
333 } else
334#endif
335 if (isrc->isrc_event != NULL) {
336 if (intr_event_handle(isrc->isrc_event, tf) == 0)
337 return (0);
338 }
339
340 isrc_increment_straycount(isrc);
341 return (EINVAL);
342}
343
344/*
345 * Alloc unique interrupt number (resource handle) for interrupt source.
346 *
347 * There could be various strategies how to allocate free interrupt number
348 * (resource handle) for new interrupt source.
349 *
350 * 1. Handles are always allocated forward, so handles are not recycled
351 * immediately. However, if only one free handle left which is reused
352 * constantly...
353 */
354static inline int
355isrc_alloc_irq(struct intr_irqsrc *isrc)
356{
357 u_int maxirqs, irq;
358
359 mtx_assert(&isrc_table_lock, MA_OWNED);
360
361 maxirqs = nitems(irq_sources);
362 if (irq_next_free >= maxirqs)
363 return (ENOSPC);
364
365 for (irq = irq_next_free; irq < maxirqs; irq++) {
366 if (irq_sources[irq] == NULL)
367 goto found;
368 }
369 for (irq = 0; irq < irq_next_free; irq++) {
370 if (irq_sources[irq] == NULL)
371 goto found;
372 }
373
374 irq_next_free = maxirqs;
375 return (ENOSPC);
376
377found:
378 isrc->isrc_irq = irq;
379 irq_sources[irq] = isrc;
380
381 irq_next_free = irq + 1;
382 if (irq_next_free >= maxirqs)
383 irq_next_free = 0;
384 return (0);
385}
386
387/*
388 * Free unique interrupt number (resource handle) from interrupt source.
389 */
390static inline int
391isrc_free_irq(struct intr_irqsrc *isrc)
392{
393
394 mtx_assert(&isrc_table_lock, MA_OWNED);
395
396 if (isrc->isrc_irq >= nitems(irq_sources))
397 return (EINVAL);
398 if (irq_sources[isrc->isrc_irq] != isrc)
399 return (EINVAL);
400
401 irq_sources[isrc->isrc_irq] = NULL;
402 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
403 return (0);
404}
405
406/*
407 * Lookup interrupt source by interrupt number (resource handle).
408 */
409static inline struct intr_irqsrc *
410isrc_lookup(u_int irq)
411{
412
413 if (irq < nitems(irq_sources))
414 return (irq_sources[irq]);
415 return (NULL);
416}
417
418/*
419 * Initialize interrupt source and register it into global interrupt table.
420 */
421int
422intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
423 const char *fmt, ...)
424{
425 int error;
426 va_list ap;
427
428 bzero(isrc, sizeof(struct intr_irqsrc));
429 isrc->isrc_dev = dev;
430 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */
431 isrc->isrc_flags = flags;
432
433 va_start(ap, fmt);
434 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
435 va_end(ap);
436
437 mtx_lock(&isrc_table_lock);
438 error = isrc_alloc_irq(isrc);
439 if (error != 0) {
440 mtx_unlock(&isrc_table_lock);
441 return (error);
442 }
443 /*
444 * Setup interrupt counters, but not for IPI sources. Those are setup
445 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
446 * our counter pool.
447 */
448 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
449 isrc_setup_counters(isrc);
450 mtx_unlock(&isrc_table_lock);
451 return (0);
452}
453
454/*
455 * Deregister interrupt source from global interrupt table.
456 */
457int
458intr_isrc_deregister(struct intr_irqsrc *isrc)
459{
460 int error;
461
462 mtx_lock(&isrc_table_lock);
463 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
464 isrc_release_counters(isrc);
465 error = isrc_free_irq(isrc);
466 mtx_unlock(&isrc_table_lock);
467 return (error);
468}
469
470#ifdef SMP
471/*
472 * A support function for a PIC to decide if provided ISRC should be inited
473 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
474 * struct intr_irqsrc is the following:
475 *
476 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
477 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and
478 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
479 */
480bool
481intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
482{
483
484 if (isrc->isrc_handlers == 0)
485 return (false);
486 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
487 return (false);
488 if (isrc->isrc_flags & INTR_ISRCF_BOUND)
489 return (CPU_ISSET(cpu, &isrc->isrc_cpu));
490
491 CPU_SET(cpu, &isrc->isrc_cpu);
492 return (true);
493}
494#endif
495
496static struct intr_dev_data *
497intr_ddata_alloc(u_int extsize)
498{
499 struct intr_dev_data *ddata;
500
501 ddata = malloc(sizeof(*ddata) + extsize, M_INTRNG, M_WAITOK | M_ZERO);
502
503 mtx_lock(&isrc_table_lock);
504 if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) {
505 mtx_unlock(&isrc_table_lock);
506 free(ddata, M_INTRNG);
507 return (NULL);
508 }
509 intr_ddata_tab[intr_ddata_first_unused] = ddata;
510 ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++;
511 mtx_unlock(&isrc_table_lock);
512 return (ddata);
513}
514
515static struct intr_irqsrc *
516intr_ddata_lookup(u_int irq, struct intr_map_data **datap)
517{
518 int error;
519 struct intr_irqsrc *isrc;
520 struct intr_dev_data *ddata;
521
522 isrc = isrc_lookup(irq);
523 if (isrc != NULL) {
524 if (datap != NULL)
525 *datap = NULL;
526 return (isrc);
527 }
528
529 if (irq < IRQ_DDATA_BASE)
530 return (NULL);
531
532 irq -= IRQ_DDATA_BASE;
533 if (irq >= nitems(intr_ddata_tab))
534 return (NULL);
535
536 ddata = intr_ddata_tab[irq];
537 if (ddata->idd_isrc == NULL) {
538 error = intr_map_irq(ddata->idd_dev, ddata->idd_xref,
539 &ddata->idd_data, &irq);
540 if (error != 0)
541 return (NULL);
542 ddata->idd_isrc = isrc_lookup(irq);
543 }
544 if (datap != NULL)
545 *datap = &ddata->idd_data;
546 return (ddata->idd_isrc);
547}
548
549#ifdef DEV_ACPI
550/*
551 * Map interrupt source according to ACPI info into framework. If such mapping
552 * does not exist, create it. Return unique interrupt number (resource handle)
553 * associated with mapped interrupt source.
554 */
555u_int
556intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol,
557 enum intr_trigger trig)
558{
559 struct intr_dev_data *ddata;
560
561 ddata = intr_ddata_alloc(0);
562 if (ddata == NULL)
563 return (0xFFFFFFFF); /* no space left */
564
565 ddata->idd_dev = dev;
566 ddata->idd_data.type = INTR_MAP_DATA_ACPI;
567 ddata->idd_data.acpi.irq = irq;
568 ddata->idd_data.acpi.pol = pol;
569 ddata->idd_data.acpi.trig = trig;
570 return (ddata->idd_irq);
571}
572#endif
573#ifdef FDT
574/*
575 * Map interrupt source according to FDT data into framework. If such mapping
576 * does not exist, create it. Return unique interrupt number (resource handle)
577 * associated with mapped interrupt source.
578 */
579u_int
580intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells)
581{
582 struct intr_dev_data *ddata;
583 u_int cellsize;
584
585 cellsize = ncells * sizeof(*cells);
586 ddata = intr_ddata_alloc(cellsize);
587 if (ddata == NULL)
588 return (0xFFFFFFFF); /* no space left */
589
590 ddata->idd_xref = (intptr_t)node;
591 ddata->idd_data.type = INTR_MAP_DATA_FDT;
592 ddata->idd_data.fdt.ncells = ncells;
593 ddata->idd_data.fdt.cells = (pcell_t *)(ddata + 1);
594 memcpy(ddata->idd_data.fdt.cells, cells, cellsize);
595 return (ddata->idd_irq);
596}
597#endif
598
599/*
600 * Store GPIO interrupt decription in framework and return unique interrupt
601 * number (resource handle) associated with it.
602 */
603u_int
604intr_gpio_map_irq(device_t dev, u_int pin_num, u_int pin_flags, u_int intr_mode)
605{
606 struct intr_dev_data *ddata;
607
608 ddata = intr_ddata_alloc(0);
609 if (ddata == NULL)
610 return (0xFFFFFFFF); /* no space left */
611
612 ddata->idd_dev = dev;
613 ddata->idd_data.type = INTR_MAP_DATA_GPIO;
614 ddata->idd_data.gpio.gpio_pin_num = pin_num;
615 ddata->idd_data.gpio.gpio_pin_flags = pin_flags;
616 ddata->idd_data.gpio.gpio_intr_mode = intr_mode;
617 return (ddata->idd_irq);
618}
619
599#ifdef INTR_SOLO
600/*
601 * Setup filter into interrupt source.
602 */
603static int
604iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
605 intr_irq_filter_t *filter, void *arg, void **cookiep)
606{
607
608 if (filter == NULL)
609 return (EINVAL);
610
611 mtx_lock(&isrc_table_lock);
612 /*
613 * Make sure that we do not mix the two ways
614 * how we handle interrupt sources.
615 */
616 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
617 mtx_unlock(&isrc_table_lock);
618 return (EBUSY);
619 }
620 isrc->isrc_filter = filter;
621 isrc->isrc_arg = arg;
622 isrc_update_name(isrc, name);
623 mtx_unlock(&isrc_table_lock);
624
625 *cookiep = isrc;
626 return (0);
627}
628#endif
629
630/*
631 * Interrupt source pre_ithread method for MI interrupt framework.
632 */
633static void
634intr_isrc_pre_ithread(void *arg)
635{
636 struct intr_irqsrc *isrc = arg;
637
638 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
639}
640
641/*
642 * Interrupt source post_ithread method for MI interrupt framework.
643 */
644static void
645intr_isrc_post_ithread(void *arg)
646{
647 struct intr_irqsrc *isrc = arg;
648
649 PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
650}
651
652/*
653 * Interrupt source post_filter method for MI interrupt framework.
654 */
655static void
656intr_isrc_post_filter(void *arg)
657{
658 struct intr_irqsrc *isrc = arg;
659
660 PIC_POST_FILTER(isrc->isrc_dev, isrc);
661}
662
663/*
664 * Interrupt source assign_cpu method for MI interrupt framework.
665 */
666static int
667intr_isrc_assign_cpu(void *arg, int cpu)
668{
669#ifdef SMP
670 struct intr_irqsrc *isrc = arg;
671 int error;
672
673 if (isrc->isrc_dev != intr_irq_root_dev)
674 return (EINVAL);
675
676 mtx_lock(&isrc_table_lock);
677 if (cpu == NOCPU) {
678 CPU_ZERO(&isrc->isrc_cpu);
679 isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
680 } else {
681 CPU_SETOF(cpu, &isrc->isrc_cpu);
682 isrc->isrc_flags |= INTR_ISRCF_BOUND;
683 }
684
685 /*
686 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
687 * re-balance it to another CPU or enable it on more CPUs. However,
688 * PIC is expected to change isrc_cpu appropriately to keep us well
689 * informed if the call is successfull.
690 */
691 if (irq_assign_cpu) {
692 error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
693 if (error) {
694 CPU_ZERO(&isrc->isrc_cpu);
695 mtx_unlock(&isrc_table_lock);
696 return (error);
697 }
698 }
699 mtx_unlock(&isrc_table_lock);
700 return (0);
701#else
702 return (EOPNOTSUPP);
703#endif
704}
705
706/*
707 * Create interrupt event for interrupt source.
708 */
709static int
710isrc_event_create(struct intr_irqsrc *isrc)
711{
712 struct intr_event *ie;
713 int error;
714
715 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
716 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
717 intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
718 if (error)
719 return (error);
720
721 mtx_lock(&isrc_table_lock);
722 /*
723 * Make sure that we do not mix the two ways
724 * how we handle interrupt sources. Let contested event wins.
725 */
726#ifdef INTR_SOLO
727 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
728#else
729 if (isrc->isrc_event != NULL) {
730#endif
731 mtx_unlock(&isrc_table_lock);
732 intr_event_destroy(ie);
733 return (isrc->isrc_event != NULL ? EBUSY : 0);
734 }
735 isrc->isrc_event = ie;
736 mtx_unlock(&isrc_table_lock);
737
738 return (0);
739}
740#ifdef notyet
741/*
742 * Destroy interrupt event for interrupt source.
743 */
744static void
745isrc_event_destroy(struct intr_irqsrc *isrc)
746{
747 struct intr_event *ie;
748
749 mtx_lock(&isrc_table_lock);
750 ie = isrc->isrc_event;
751 isrc->isrc_event = NULL;
752 mtx_unlock(&isrc_table_lock);
753
754 if (ie != NULL)
755 intr_event_destroy(ie);
756}
757#endif
758/*
759 * Add handler to interrupt source.
760 */
761static int
762isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
763 driver_filter_t filter, driver_intr_t handler, void *arg,
764 enum intr_type flags, void **cookiep)
765{
766 int error;
767
768 if (isrc->isrc_event == NULL) {
769 error = isrc_event_create(isrc);
770 if (error)
771 return (error);
772 }
773
774 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
775 arg, intr_priority(flags), flags, cookiep);
776 if (error == 0) {
777 mtx_lock(&isrc_table_lock);
778 intrcnt_updatename(isrc);
779 mtx_unlock(&isrc_table_lock);
780 }
781
782 return (error);
783}
784
785/*
786 * Lookup interrupt controller locked.
787 */
788static inline struct intr_pic *
789pic_lookup_locked(device_t dev, intptr_t xref)
790{
791 struct intr_pic *pic;
792
793 mtx_assert(&pic_list_lock, MA_OWNED);
794
795 if (dev == NULL && xref == 0)
796 return (NULL);
797
798 /* Note that pic->pic_dev is never NULL on registered PIC. */
799 SLIST_FOREACH(pic, &pic_list, pic_next) {
800 if (dev == NULL) {
801 if (xref == pic->pic_xref)
802 return (pic);
803 } else if (xref == 0 || pic->pic_xref == 0) {
804 if (dev == pic->pic_dev)
805 return (pic);
806 } else if (xref == pic->pic_xref && dev == pic->pic_dev)
807 return (pic);
808 }
809 return (NULL);
810}
811
812/*
813 * Lookup interrupt controller.
814 */
815static struct intr_pic *
816pic_lookup(device_t dev, intptr_t xref)
817{
818 struct intr_pic *pic;
819
820 mtx_lock(&pic_list_lock);
821 pic = pic_lookup_locked(dev, xref);
822 mtx_unlock(&pic_list_lock);
823 return (pic);
824}
825
826/*
827 * Create interrupt controller.
828 */
829static struct intr_pic *
830pic_create(device_t dev, intptr_t xref)
831{
832 struct intr_pic *pic;
833
834 mtx_lock(&pic_list_lock);
835 pic = pic_lookup_locked(dev, xref);
836 if (pic != NULL) {
837 mtx_unlock(&pic_list_lock);
838 return (pic);
839 }
840 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
841 pic->pic_xref = xref;
842 pic->pic_dev = dev;
843 SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
844 mtx_unlock(&pic_list_lock);
845
846 return (pic);
847}
848#ifdef notyet
849/*
850 * Destroy interrupt controller.
851 */
852static void
853pic_destroy(device_t dev, intptr_t xref)
854{
855 struct intr_pic *pic;
856
857 mtx_lock(&pic_list_lock);
858 pic = pic_lookup_locked(dev, xref);
859 if (pic == NULL) {
860 mtx_unlock(&pic_list_lock);
861 return;
862 }
863 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
864 mtx_unlock(&pic_list_lock);
865
866 free(pic, M_INTRNG);
867}
868#endif
869/*
870 * Register interrupt controller.
871 */
872int
873intr_pic_register(device_t dev, intptr_t xref)
874{
875 struct intr_pic *pic;
876
877 if (dev == NULL)
878 return (EINVAL);
879 pic = pic_create(dev, xref);
880 if (pic == NULL)
881 return (ENOMEM);
882
883 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
884 device_get_nameunit(dev), dev, xref);
885 return (0);
886}
887
888/*
889 * Unregister interrupt controller.
890 */
891int
892intr_pic_deregister(device_t dev, intptr_t xref)
893{
894
895 panic("%s: not implemented", __func__);
896}
897
898/*
899 * Mark interrupt controller (itself) as a root one.
900 *
901 * Note that only an interrupt controller can really know its position
902 * in interrupt controller's tree. So root PIC must claim itself as a root.
903 *
904 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
905 * page 30:
906 * "The root of the interrupt tree is determined when traversal
907 * of the interrupt tree reaches an interrupt controller node without
908 * an interrupts property and thus no explicit interrupt parent."
909 */
910int
911intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
912 void *arg, u_int ipicount)
913{
914
915 if (pic_lookup(dev, xref) == NULL) {
916 device_printf(dev, "not registered\n");
917 return (EINVAL);
918 }
919 if (filter == NULL) {
920 device_printf(dev, "filter missing\n");
921 return (EINVAL);
922 }
923
924 /*
925 * Only one interrupt controllers could be on the root for now.
926 * Note that we further suppose that there is not threaded interrupt
927 * routine (handler) on the root. See intr_irq_handler().
928 */
929 if (intr_irq_root_dev != NULL) {
930 device_printf(dev, "another root already set\n");
931 return (EBUSY);
932 }
933
934 intr_irq_root_dev = dev;
935 irq_root_filter = filter;
936 irq_root_arg = arg;
937 irq_root_ipicount = ipicount;
938
939 debugf("irq root set to %s\n", device_get_nameunit(dev));
940 return (0);
941}
942
943int
944intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
945 u_int *irqp)
946{
947 int error;
948 struct intr_irqsrc *isrc;
949 struct intr_pic *pic;
950
951 if (data == NULL)
952 return (EINVAL);
953
954 pic = pic_lookup(dev, xref);
955 if (pic == NULL || pic->pic_dev == NULL)
956 return (ESRCH);
957
958 error = PIC_MAP_INTR(pic->pic_dev, data, &isrc);
959 if (error == 0)
960 *irqp = isrc->isrc_irq;
961 return (error);
962}
963
964int
965intr_alloc_irq(device_t dev, struct resource *res)
966{
967 struct intr_map_data *data;
968 struct intr_irqsrc *isrc;
969
970 KASSERT(rman_get_start(res) == rman_get_end(res),
971 ("%s: more interrupts in resource", __func__));
972
973 isrc = intr_ddata_lookup(rman_get_start(res), &data);
974 if (isrc == NULL)
975 return (EINVAL);
976
977 return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data));
978}
979
980int
981intr_release_irq(device_t dev, struct resource *res)
982{
983 struct intr_map_data *data;
984 struct intr_irqsrc *isrc;
985
986 KASSERT(rman_get_start(res) == rman_get_end(res),
987 ("%s: more interrupts in resource", __func__));
988
989 isrc = intr_ddata_lookup(rman_get_start(res), &data);
990 if (isrc == NULL)
991 return (EINVAL);
992
993 return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data));
994}
995
996int
997intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
998 driver_intr_t hand, void *arg, int flags, void **cookiep)
999{
1000 int error;
1001 struct intr_map_data *data;
1002 struct intr_irqsrc *isrc;
1003 const char *name;
1004
1005 KASSERT(rman_get_start(res) == rman_get_end(res),
1006 ("%s: more interrupts in resource", __func__));
1007
1008 isrc = intr_ddata_lookup(rman_get_start(res), &data);
1009 if (isrc == NULL)
1010 return (EINVAL);
1011
1012 name = device_get_nameunit(dev);
1013
1014#ifdef INTR_SOLO
1015 /*
1016 * Standard handling is done thru MI interrupt framework. However,
1017 * some interrupts could request solely own special handling. This
1018 * non standard handling can be used for interrupt controllers without
1019 * handler (filter only), so in case that interrupt controllers are
1020 * chained, MI interrupt framework is called only in leaf controller.
1021 *
1022 * Note that root interrupt controller routine is served as well,
1023 * however in intr_irq_handler(), i.e. main system dispatch routine.
1024 */
1025 if (flags & INTR_SOLO && hand != NULL) {
1026 debugf("irq %u cannot solo on %s\n", irq, name);
1027 return (EINVAL);
1028 }
1029
1030 if (flags & INTR_SOLO) {
1031 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1032 arg, cookiep);
1033 debugf("irq %u setup filter error %d on %s\n", irq, error,
1034 name);
1035 } else
1036#endif
1037 {
1038 error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1039 cookiep);
1040 debugf("irq %u add handler error %d on %s\n", irq, error, name);
1041 }
1042 if (error != 0)
1043 return (error);
1044
1045 mtx_lock(&isrc_table_lock);
1046 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1047 if (error == 0) {
1048 isrc->isrc_handlers++;
1049 if (isrc->isrc_handlers == 1)
1050 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1051 }
1052 mtx_unlock(&isrc_table_lock);
1053 if (error != 0)
1054 intr_event_remove_handler(*cookiep);
1055 return (error);
1056}
1057
1058int
1059intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1060{
1061 int error;
1062 struct intr_map_data *data;
1063 struct intr_irqsrc *isrc;
1064
1065 KASSERT(rman_get_start(res) == rman_get_end(res),
1066 ("%s: more interrupts in resource", __func__));
1067
1068 isrc = intr_ddata_lookup(rman_get_start(res), &data);
1069 if (isrc == NULL || isrc->isrc_handlers == 0)
1070 return (EINVAL);
1071
1072#ifdef INTR_SOLO
1073 if (isrc->isrc_filter != NULL) {
1074 if (isrc != cookie)
1075 return (EINVAL);
1076
1077 mtx_lock(&isrc_table_lock);
1078 isrc->isrc_filter = NULL;
1079 isrc->isrc_arg = NULL;
1080 isrc->isrc_handlers = 0;
1081 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1082 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1083 isrc_update_name(isrc, NULL);
1084 mtx_unlock(&isrc_table_lock);
1085 return (0);
1086 }
1087#endif
1088 if (isrc != intr_handler_source(cookie))
1089 return (EINVAL);
1090
1091 error = intr_event_remove_handler(cookie);
1092 if (error == 0) {
1093 mtx_lock(&isrc_table_lock);
1094 isrc->isrc_handlers--;
1095 if (isrc->isrc_handlers == 0)
1096 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1097 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1098 intrcnt_updatename(isrc);
1099 mtx_unlock(&isrc_table_lock);
1100 }
1101 return (error);
1102}
1103
1104int
1105intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1106 const char *descr)
1107{
1108 int error;
1109 struct intr_irqsrc *isrc;
1110
1111 KASSERT(rman_get_start(res) == rman_get_end(res),
1112 ("%s: more interrupts in resource", __func__));
1113
1114 isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1115 if (isrc == NULL || isrc->isrc_handlers == 0)
1116 return (EINVAL);
1117#ifdef INTR_SOLO
1118 if (isrc->isrc_filter != NULL) {
1119 if (isrc != cookie)
1120 return (EINVAL);
1121
1122 mtx_lock(&isrc_table_lock);
1123 isrc_update_name(isrc, descr);
1124 mtx_unlock(&isrc_table_lock);
1125 return (0);
1126 }
1127#endif
1128 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1129 if (error == 0) {
1130 mtx_lock(&isrc_table_lock);
1131 intrcnt_updatename(isrc);
1132 mtx_unlock(&isrc_table_lock);
1133 }
1134 return (error);
1135}
1136
1137#ifdef SMP
1138int
1139intr_bind_irq(device_t dev, struct resource *res, int cpu)
1140{
1141 struct intr_irqsrc *isrc;
1142
1143 KASSERT(rman_get_start(res) == rman_get_end(res),
1144 ("%s: more interrupts in resource", __func__));
1145
1146 isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1147 if (isrc == NULL || isrc->isrc_handlers == 0)
1148 return (EINVAL);
1149#ifdef INTR_SOLO
1150 if (isrc->isrc_filter != NULL)
1151 return (intr_isrc_assign_cpu(isrc, cpu));
1152#endif
1153 return (intr_event_bind(isrc->isrc_event, cpu));
1154}
1155
1156/*
1157 * Return the CPU that the next interrupt source should use.
1158 * For now just returns the next CPU according to round-robin.
1159 */
1160u_int
1161intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1162{
1163
1164 if (!irq_assign_cpu || mp_ncpus == 1)
1165 return (PCPU_GET(cpuid));
1166
1167 do {
1168 last_cpu++;
1169 if (last_cpu > mp_maxid)
1170 last_cpu = 0;
1171 } while (!CPU_ISSET(last_cpu, cpumask));
1172 return (last_cpu);
1173}
1174
1175/*
1176 * Distribute all the interrupt sources among the available
1177 * CPUs once the AP's have been launched.
1178 */
1179static void
1180intr_irq_shuffle(void *arg __unused)
1181{
1182 struct intr_irqsrc *isrc;
1183 u_int i;
1184
1185 if (mp_ncpus == 1)
1186 return;
1187
1188 mtx_lock(&isrc_table_lock);
1189 irq_assign_cpu = TRUE;
1190 for (i = 0; i < NIRQ; i++) {
1191 isrc = irq_sources[i];
1192 if (isrc == NULL || isrc->isrc_handlers == 0 ||
1193 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1194 continue;
1195
1196 if (isrc->isrc_event != NULL &&
1197 isrc->isrc_flags & INTR_ISRCF_BOUND &&
1198 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1199 panic("%s: CPU inconsistency", __func__);
1200
1201 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1202 CPU_ZERO(&isrc->isrc_cpu); /* start again */
1203
1204 /*
1205 * We are in wicked position here if the following call fails
1206 * for bound ISRC. The best thing we can do is to clear
1207 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1208 */
1209 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1210 CPU_ZERO(&isrc->isrc_cpu);
1211 }
1212 mtx_unlock(&isrc_table_lock);
1213}
1214SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1215
1216#else
1217u_int
1218intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1219{
1220
1221 return (PCPU_GET(cpuid));
1222}
1223#endif
1224
1225void dosoftints(void);
1226void
1227dosoftints(void)
1228{
1229}
1230
1231#ifdef SMP
1232/*
1233 * Init interrupt controller on another CPU.
1234 */
1235void
1236intr_pic_init_secondary(void)
1237{
1238
1239 /*
1240 * QQQ: Only root PIC is aware of other CPUs ???
1241 */
1242 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1243
1244 //mtx_lock(&isrc_table_lock);
1245 PIC_INIT_SECONDARY(intr_irq_root_dev);
1246 //mtx_unlock(&isrc_table_lock);
1247}
1248#endif
1249
1250#ifdef DDB
1251DB_SHOW_COMMAND(irqs, db_show_irqs)
1252{
1253 u_int i, irqsum;
1254 u_long num;
1255 struct intr_irqsrc *isrc;
1256
1257 for (irqsum = 0, i = 0; i < NIRQ; i++) {
1258 isrc = irq_sources[i];
1259 if (isrc == NULL)
1260 continue;
1261
1262 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1263 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1264 isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1265 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1266 irqsum += num;
1267 }
1268 db_printf("irq total %u\n", irqsum);
1269}
1270#endif
620#ifdef INTR_SOLO
621/*
622 * Setup filter into interrupt source.
623 */
624static int
625iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
626 intr_irq_filter_t *filter, void *arg, void **cookiep)
627{
628
629 if (filter == NULL)
630 return (EINVAL);
631
632 mtx_lock(&isrc_table_lock);
633 /*
634 * Make sure that we do not mix the two ways
635 * how we handle interrupt sources.
636 */
637 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
638 mtx_unlock(&isrc_table_lock);
639 return (EBUSY);
640 }
641 isrc->isrc_filter = filter;
642 isrc->isrc_arg = arg;
643 isrc_update_name(isrc, name);
644 mtx_unlock(&isrc_table_lock);
645
646 *cookiep = isrc;
647 return (0);
648}
649#endif
650
651/*
652 * Interrupt source pre_ithread method for MI interrupt framework.
653 */
654static void
655intr_isrc_pre_ithread(void *arg)
656{
657 struct intr_irqsrc *isrc = arg;
658
659 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
660}
661
662/*
663 * Interrupt source post_ithread method for MI interrupt framework.
664 */
665static void
666intr_isrc_post_ithread(void *arg)
667{
668 struct intr_irqsrc *isrc = arg;
669
670 PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
671}
672
673/*
674 * Interrupt source post_filter method for MI interrupt framework.
675 */
676static void
677intr_isrc_post_filter(void *arg)
678{
679 struct intr_irqsrc *isrc = arg;
680
681 PIC_POST_FILTER(isrc->isrc_dev, isrc);
682}
683
684/*
685 * Interrupt source assign_cpu method for MI interrupt framework.
686 */
687static int
688intr_isrc_assign_cpu(void *arg, int cpu)
689{
690#ifdef SMP
691 struct intr_irqsrc *isrc = arg;
692 int error;
693
694 if (isrc->isrc_dev != intr_irq_root_dev)
695 return (EINVAL);
696
697 mtx_lock(&isrc_table_lock);
698 if (cpu == NOCPU) {
699 CPU_ZERO(&isrc->isrc_cpu);
700 isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
701 } else {
702 CPU_SETOF(cpu, &isrc->isrc_cpu);
703 isrc->isrc_flags |= INTR_ISRCF_BOUND;
704 }
705
706 /*
707 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
708 * re-balance it to another CPU or enable it on more CPUs. However,
709 * PIC is expected to change isrc_cpu appropriately to keep us well
710 * informed if the call is successfull.
711 */
712 if (irq_assign_cpu) {
713 error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
714 if (error) {
715 CPU_ZERO(&isrc->isrc_cpu);
716 mtx_unlock(&isrc_table_lock);
717 return (error);
718 }
719 }
720 mtx_unlock(&isrc_table_lock);
721 return (0);
722#else
723 return (EOPNOTSUPP);
724#endif
725}
726
727/*
728 * Create interrupt event for interrupt source.
729 */
730static int
731isrc_event_create(struct intr_irqsrc *isrc)
732{
733 struct intr_event *ie;
734 int error;
735
736 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
737 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
738 intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
739 if (error)
740 return (error);
741
742 mtx_lock(&isrc_table_lock);
743 /*
744 * Make sure that we do not mix the two ways
745 * how we handle interrupt sources. Let contested event wins.
746 */
747#ifdef INTR_SOLO
748 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
749#else
750 if (isrc->isrc_event != NULL) {
751#endif
752 mtx_unlock(&isrc_table_lock);
753 intr_event_destroy(ie);
754 return (isrc->isrc_event != NULL ? EBUSY : 0);
755 }
756 isrc->isrc_event = ie;
757 mtx_unlock(&isrc_table_lock);
758
759 return (0);
760}
761#ifdef notyet
762/*
763 * Destroy interrupt event for interrupt source.
764 */
765static void
766isrc_event_destroy(struct intr_irqsrc *isrc)
767{
768 struct intr_event *ie;
769
770 mtx_lock(&isrc_table_lock);
771 ie = isrc->isrc_event;
772 isrc->isrc_event = NULL;
773 mtx_unlock(&isrc_table_lock);
774
775 if (ie != NULL)
776 intr_event_destroy(ie);
777}
778#endif
779/*
780 * Add handler to interrupt source.
781 */
782static int
783isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
784 driver_filter_t filter, driver_intr_t handler, void *arg,
785 enum intr_type flags, void **cookiep)
786{
787 int error;
788
789 if (isrc->isrc_event == NULL) {
790 error = isrc_event_create(isrc);
791 if (error)
792 return (error);
793 }
794
795 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
796 arg, intr_priority(flags), flags, cookiep);
797 if (error == 0) {
798 mtx_lock(&isrc_table_lock);
799 intrcnt_updatename(isrc);
800 mtx_unlock(&isrc_table_lock);
801 }
802
803 return (error);
804}
805
806/*
807 * Lookup interrupt controller locked.
808 */
809static inline struct intr_pic *
810pic_lookup_locked(device_t dev, intptr_t xref)
811{
812 struct intr_pic *pic;
813
814 mtx_assert(&pic_list_lock, MA_OWNED);
815
816 if (dev == NULL && xref == 0)
817 return (NULL);
818
819 /* Note that pic->pic_dev is never NULL on registered PIC. */
820 SLIST_FOREACH(pic, &pic_list, pic_next) {
821 if (dev == NULL) {
822 if (xref == pic->pic_xref)
823 return (pic);
824 } else if (xref == 0 || pic->pic_xref == 0) {
825 if (dev == pic->pic_dev)
826 return (pic);
827 } else if (xref == pic->pic_xref && dev == pic->pic_dev)
828 return (pic);
829 }
830 return (NULL);
831}
832
833/*
834 * Lookup interrupt controller.
835 */
836static struct intr_pic *
837pic_lookup(device_t dev, intptr_t xref)
838{
839 struct intr_pic *pic;
840
841 mtx_lock(&pic_list_lock);
842 pic = pic_lookup_locked(dev, xref);
843 mtx_unlock(&pic_list_lock);
844 return (pic);
845}
846
847/*
848 * Create interrupt controller.
849 */
850static struct intr_pic *
851pic_create(device_t dev, intptr_t xref)
852{
853 struct intr_pic *pic;
854
855 mtx_lock(&pic_list_lock);
856 pic = pic_lookup_locked(dev, xref);
857 if (pic != NULL) {
858 mtx_unlock(&pic_list_lock);
859 return (pic);
860 }
861 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
862 pic->pic_xref = xref;
863 pic->pic_dev = dev;
864 SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
865 mtx_unlock(&pic_list_lock);
866
867 return (pic);
868}
869#ifdef notyet
870/*
871 * Destroy interrupt controller.
872 */
873static void
874pic_destroy(device_t dev, intptr_t xref)
875{
876 struct intr_pic *pic;
877
878 mtx_lock(&pic_list_lock);
879 pic = pic_lookup_locked(dev, xref);
880 if (pic == NULL) {
881 mtx_unlock(&pic_list_lock);
882 return;
883 }
884 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
885 mtx_unlock(&pic_list_lock);
886
887 free(pic, M_INTRNG);
888}
889#endif
890/*
891 * Register interrupt controller.
892 */
893int
894intr_pic_register(device_t dev, intptr_t xref)
895{
896 struct intr_pic *pic;
897
898 if (dev == NULL)
899 return (EINVAL);
900 pic = pic_create(dev, xref);
901 if (pic == NULL)
902 return (ENOMEM);
903
904 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
905 device_get_nameunit(dev), dev, xref);
906 return (0);
907}
908
909/*
910 * Unregister interrupt controller.
911 */
912int
913intr_pic_deregister(device_t dev, intptr_t xref)
914{
915
916 panic("%s: not implemented", __func__);
917}
918
919/*
920 * Mark interrupt controller (itself) as a root one.
921 *
922 * Note that only an interrupt controller can really know its position
923 * in interrupt controller's tree. So root PIC must claim itself as a root.
924 *
925 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
926 * page 30:
927 * "The root of the interrupt tree is determined when traversal
928 * of the interrupt tree reaches an interrupt controller node without
929 * an interrupts property and thus no explicit interrupt parent."
930 */
931int
932intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
933 void *arg, u_int ipicount)
934{
935
936 if (pic_lookup(dev, xref) == NULL) {
937 device_printf(dev, "not registered\n");
938 return (EINVAL);
939 }
940 if (filter == NULL) {
941 device_printf(dev, "filter missing\n");
942 return (EINVAL);
943 }
944
945 /*
946 * Only one interrupt controllers could be on the root for now.
947 * Note that we further suppose that there is not threaded interrupt
948 * routine (handler) on the root. See intr_irq_handler().
949 */
950 if (intr_irq_root_dev != NULL) {
951 device_printf(dev, "another root already set\n");
952 return (EBUSY);
953 }
954
955 intr_irq_root_dev = dev;
956 irq_root_filter = filter;
957 irq_root_arg = arg;
958 irq_root_ipicount = ipicount;
959
960 debugf("irq root set to %s\n", device_get_nameunit(dev));
961 return (0);
962}
963
964int
965intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
966 u_int *irqp)
967{
968 int error;
969 struct intr_irqsrc *isrc;
970 struct intr_pic *pic;
971
972 if (data == NULL)
973 return (EINVAL);
974
975 pic = pic_lookup(dev, xref);
976 if (pic == NULL || pic->pic_dev == NULL)
977 return (ESRCH);
978
979 error = PIC_MAP_INTR(pic->pic_dev, data, &isrc);
980 if (error == 0)
981 *irqp = isrc->isrc_irq;
982 return (error);
983}
984
985int
986intr_alloc_irq(device_t dev, struct resource *res)
987{
988 struct intr_map_data *data;
989 struct intr_irqsrc *isrc;
990
991 KASSERT(rman_get_start(res) == rman_get_end(res),
992 ("%s: more interrupts in resource", __func__));
993
994 isrc = intr_ddata_lookup(rman_get_start(res), &data);
995 if (isrc == NULL)
996 return (EINVAL);
997
998 return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data));
999}
1000
1001int
1002intr_release_irq(device_t dev, struct resource *res)
1003{
1004 struct intr_map_data *data;
1005 struct intr_irqsrc *isrc;
1006
1007 KASSERT(rman_get_start(res) == rman_get_end(res),
1008 ("%s: more interrupts in resource", __func__));
1009
1010 isrc = intr_ddata_lookup(rman_get_start(res), &data);
1011 if (isrc == NULL)
1012 return (EINVAL);
1013
1014 return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data));
1015}
1016
1017int
1018intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
1019 driver_intr_t hand, void *arg, int flags, void **cookiep)
1020{
1021 int error;
1022 struct intr_map_data *data;
1023 struct intr_irqsrc *isrc;
1024 const char *name;
1025
1026 KASSERT(rman_get_start(res) == rman_get_end(res),
1027 ("%s: more interrupts in resource", __func__));
1028
1029 isrc = intr_ddata_lookup(rman_get_start(res), &data);
1030 if (isrc == NULL)
1031 return (EINVAL);
1032
1033 name = device_get_nameunit(dev);
1034
1035#ifdef INTR_SOLO
1036 /*
1037 * Standard handling is done thru MI interrupt framework. However,
1038 * some interrupts could request solely own special handling. This
1039 * non standard handling can be used for interrupt controllers without
1040 * handler (filter only), so in case that interrupt controllers are
1041 * chained, MI interrupt framework is called only in leaf controller.
1042 *
1043 * Note that root interrupt controller routine is served as well,
1044 * however in intr_irq_handler(), i.e. main system dispatch routine.
1045 */
1046 if (flags & INTR_SOLO && hand != NULL) {
1047 debugf("irq %u cannot solo on %s\n", irq, name);
1048 return (EINVAL);
1049 }
1050
1051 if (flags & INTR_SOLO) {
1052 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1053 arg, cookiep);
1054 debugf("irq %u setup filter error %d on %s\n", irq, error,
1055 name);
1056 } else
1057#endif
1058 {
1059 error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1060 cookiep);
1061 debugf("irq %u add handler error %d on %s\n", irq, error, name);
1062 }
1063 if (error != 0)
1064 return (error);
1065
1066 mtx_lock(&isrc_table_lock);
1067 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1068 if (error == 0) {
1069 isrc->isrc_handlers++;
1070 if (isrc->isrc_handlers == 1)
1071 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1072 }
1073 mtx_unlock(&isrc_table_lock);
1074 if (error != 0)
1075 intr_event_remove_handler(*cookiep);
1076 return (error);
1077}
1078
1079int
1080intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1081{
1082 int error;
1083 struct intr_map_data *data;
1084 struct intr_irqsrc *isrc;
1085
1086 KASSERT(rman_get_start(res) == rman_get_end(res),
1087 ("%s: more interrupts in resource", __func__));
1088
1089 isrc = intr_ddata_lookup(rman_get_start(res), &data);
1090 if (isrc == NULL || isrc->isrc_handlers == 0)
1091 return (EINVAL);
1092
1093#ifdef INTR_SOLO
1094 if (isrc->isrc_filter != NULL) {
1095 if (isrc != cookie)
1096 return (EINVAL);
1097
1098 mtx_lock(&isrc_table_lock);
1099 isrc->isrc_filter = NULL;
1100 isrc->isrc_arg = NULL;
1101 isrc->isrc_handlers = 0;
1102 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1103 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1104 isrc_update_name(isrc, NULL);
1105 mtx_unlock(&isrc_table_lock);
1106 return (0);
1107 }
1108#endif
1109 if (isrc != intr_handler_source(cookie))
1110 return (EINVAL);
1111
1112 error = intr_event_remove_handler(cookie);
1113 if (error == 0) {
1114 mtx_lock(&isrc_table_lock);
1115 isrc->isrc_handlers--;
1116 if (isrc->isrc_handlers == 0)
1117 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1118 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1119 intrcnt_updatename(isrc);
1120 mtx_unlock(&isrc_table_lock);
1121 }
1122 return (error);
1123}
1124
1125int
1126intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1127 const char *descr)
1128{
1129 int error;
1130 struct intr_irqsrc *isrc;
1131
1132 KASSERT(rman_get_start(res) == rman_get_end(res),
1133 ("%s: more interrupts in resource", __func__));
1134
1135 isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1136 if (isrc == NULL || isrc->isrc_handlers == 0)
1137 return (EINVAL);
1138#ifdef INTR_SOLO
1139 if (isrc->isrc_filter != NULL) {
1140 if (isrc != cookie)
1141 return (EINVAL);
1142
1143 mtx_lock(&isrc_table_lock);
1144 isrc_update_name(isrc, descr);
1145 mtx_unlock(&isrc_table_lock);
1146 return (0);
1147 }
1148#endif
1149 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1150 if (error == 0) {
1151 mtx_lock(&isrc_table_lock);
1152 intrcnt_updatename(isrc);
1153 mtx_unlock(&isrc_table_lock);
1154 }
1155 return (error);
1156}
1157
1158#ifdef SMP
1159int
1160intr_bind_irq(device_t dev, struct resource *res, int cpu)
1161{
1162 struct intr_irqsrc *isrc;
1163
1164 KASSERT(rman_get_start(res) == rman_get_end(res),
1165 ("%s: more interrupts in resource", __func__));
1166
1167 isrc = intr_ddata_lookup(rman_get_start(res), NULL);
1168 if (isrc == NULL || isrc->isrc_handlers == 0)
1169 return (EINVAL);
1170#ifdef INTR_SOLO
1171 if (isrc->isrc_filter != NULL)
1172 return (intr_isrc_assign_cpu(isrc, cpu));
1173#endif
1174 return (intr_event_bind(isrc->isrc_event, cpu));
1175}
1176
1177/*
1178 * Return the CPU that the next interrupt source should use.
1179 * For now just returns the next CPU according to round-robin.
1180 */
1181u_int
1182intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1183{
1184
1185 if (!irq_assign_cpu || mp_ncpus == 1)
1186 return (PCPU_GET(cpuid));
1187
1188 do {
1189 last_cpu++;
1190 if (last_cpu > mp_maxid)
1191 last_cpu = 0;
1192 } while (!CPU_ISSET(last_cpu, cpumask));
1193 return (last_cpu);
1194}
1195
1196/*
1197 * Distribute all the interrupt sources among the available
1198 * CPUs once the AP's have been launched.
1199 */
1200static void
1201intr_irq_shuffle(void *arg __unused)
1202{
1203 struct intr_irqsrc *isrc;
1204 u_int i;
1205
1206 if (mp_ncpus == 1)
1207 return;
1208
1209 mtx_lock(&isrc_table_lock);
1210 irq_assign_cpu = TRUE;
1211 for (i = 0; i < NIRQ; i++) {
1212 isrc = irq_sources[i];
1213 if (isrc == NULL || isrc->isrc_handlers == 0 ||
1214 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1215 continue;
1216
1217 if (isrc->isrc_event != NULL &&
1218 isrc->isrc_flags & INTR_ISRCF_BOUND &&
1219 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1220 panic("%s: CPU inconsistency", __func__);
1221
1222 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1223 CPU_ZERO(&isrc->isrc_cpu); /* start again */
1224
1225 /*
1226 * We are in wicked position here if the following call fails
1227 * for bound ISRC. The best thing we can do is to clear
1228 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1229 */
1230 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1231 CPU_ZERO(&isrc->isrc_cpu);
1232 }
1233 mtx_unlock(&isrc_table_lock);
1234}
1235SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1236
1237#else
1238u_int
1239intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1240{
1241
1242 return (PCPU_GET(cpuid));
1243}
1244#endif
1245
1246void dosoftints(void);
1247void
1248dosoftints(void)
1249{
1250}
1251
1252#ifdef SMP
1253/*
1254 * Init interrupt controller on another CPU.
1255 */
1256void
1257intr_pic_init_secondary(void)
1258{
1259
1260 /*
1261 * QQQ: Only root PIC is aware of other CPUs ???
1262 */
1263 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1264
1265 //mtx_lock(&isrc_table_lock);
1266 PIC_INIT_SECONDARY(intr_irq_root_dev);
1267 //mtx_unlock(&isrc_table_lock);
1268}
1269#endif
1270
1271#ifdef DDB
1272DB_SHOW_COMMAND(irqs, db_show_irqs)
1273{
1274 u_int i, irqsum;
1275 u_long num;
1276 struct intr_irqsrc *isrc;
1277
1278 for (irqsum = 0, i = 0; i < NIRQ; i++) {
1279 isrc = irq_sources[i];
1280 if (isrc == NULL)
1281 continue;
1282
1283 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1284 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1285 isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1286 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1287 irqsum += num;
1288 }
1289 db_printf("irq total %u\n", irqsum);
1290}
1291#endif