kern_intr.c revision 177901
1/*-
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 177901 2008-04-04 01:03:23Z jeff $");
29
30#include "opt_ddb.h"
31
32#include <sys/param.h>
33#include <sys/bus.h>
34#include <sys/conf.h>
35#include <sys/rtprio.h>
36#include <sys/systm.h>
37#include <sys/interrupt.h>
38#include <sys/kernel.h>
39#include <sys/kthread.h>
40#include <sys/ktr.h>
41#include <sys/limits.h>
42#include <sys/lock.h>
43#include <sys/malloc.h>
44#include <sys/mutex.h>
45#include <sys/proc.h>
46#include <sys/random.h>
47#include <sys/resourcevar.h>
48#include <sys/sched.h>
49#include <sys/smp.h>
50#include <sys/sysctl.h>
51#include <sys/unistd.h>
52#include <sys/vmmeter.h>
53#include <machine/atomic.h>
54#include <machine/cpu.h>
55#include <machine/md_var.h>
56#include <machine/stdarg.h>
57#ifdef DDB
58#include <ddb/ddb.h>
59#include <ddb/db_sym.h>
60#endif
61
62/*
63 * Describe an interrupt thread.  There is one of these per interrupt event.
64 */
65struct intr_thread {
66	struct intr_event *it_event;
67	struct thread *it_thread;	/* Kernel thread. */
68	int	it_flags;		/* (j) IT_* flags. */
69	int	it_need;		/* Needs service. */
70};
71
72/* Interrupt thread flags kept in it_flags */
73#define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
74
75struct	intr_entropy {
76	struct	thread *td;
77	uintptr_t event;
78};
79
80struct	intr_event *clk_intr_event;
81struct	intr_event *tty_intr_event;
82void	*vm_ih;
83struct proc *intrproc;
84
85static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
86
87static int intr_storm_threshold = 1000;
88TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
89SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
90    &intr_storm_threshold, 0,
91    "Number of consecutive interrupts before storm protection is enabled");
92static TAILQ_HEAD(, intr_event) event_list =
93    TAILQ_HEAD_INITIALIZER(event_list);
94
95static void	intr_event_update(struct intr_event *ie);
96#ifdef INTR_FILTER
97static struct intr_thread *ithread_create(const char *name,
98			      struct intr_handler *ih);
99#else
100static struct intr_thread *ithread_create(const char *name);
101#endif
102static void	ithread_destroy(struct intr_thread *ithread);
103static void	ithread_execute_handlers(struct proc *p,
104		    struct intr_event *ie);
105#ifdef INTR_FILTER
106static void	priv_ithread_execute_handler(struct proc *p,
107		    struct intr_handler *ih);
108#endif
109static void	ithread_loop(void *);
110static void	ithread_update(struct intr_thread *ithd);
111static void	start_softintr(void *);
112
113/* Map an interrupt type to an ithread priority. */
114u_char
115intr_priority(enum intr_type flags)
116{
117	u_char pri;
118
119	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
120	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
121	switch (flags) {
122	case INTR_TYPE_TTY:
123		pri = PI_TTYLOW;
124		break;
125	case INTR_TYPE_BIO:
126		/*
127		 * XXX We need to refine this.  BSD/OS distinguishes
128		 * between tape and disk priorities.
129		 */
130		pri = PI_DISK;
131		break;
132	case INTR_TYPE_NET:
133		pri = PI_NET;
134		break;
135	case INTR_TYPE_CAM:
136		pri = PI_DISK;          /* XXX or PI_CAM? */
137		break;
138	case INTR_TYPE_AV:		/* Audio/video */
139		pri = PI_AV;
140		break;
141	case INTR_TYPE_CLK:
142		pri = PI_REALTIME;
143		break;
144	case INTR_TYPE_MISC:
145		pri = PI_DULL;          /* don't care */
146		break;
147	default:
148		/* We didn't specify an interrupt level. */
149		panic("intr_priority: no interrupt type in flags");
150	}
151
152	return pri;
153}
154
155/*
156 * Update an ithread based on the associated intr_event.
157 */
158static void
159ithread_update(struct intr_thread *ithd)
160{
161	struct intr_event *ie;
162	struct thread *td;
163	u_char pri;
164
165	ie = ithd->it_event;
166	td = ithd->it_thread;
167
168	/* Determine the overall priority of this event. */
169	if (TAILQ_EMPTY(&ie->ie_handlers))
170		pri = PRI_MAX_ITHD;
171	else
172		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
173
174	/* Update name and priority. */
175	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
176	thread_lock(td);
177	sched_prio(td, pri);
178	thread_unlock(td);
179}
180
181/*
182 * Regenerate the full name of an interrupt event and update its priority.
183 */
184static void
185intr_event_update(struct intr_event *ie)
186{
187	struct intr_handler *ih;
188	char *last;
189	int missed, space;
190
191	/* Start off with no entropy and just the name of the event. */
192	mtx_assert(&ie->ie_lock, MA_OWNED);
193	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
194	ie->ie_flags &= ~IE_ENTROPY;
195	missed = 0;
196	space = 1;
197
198	/* Run through all the handlers updating values. */
199	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
200		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
201		    sizeof(ie->ie_fullname)) {
202			strcat(ie->ie_fullname, " ");
203			strcat(ie->ie_fullname, ih->ih_name);
204			space = 0;
205		} else
206			missed++;
207		if (ih->ih_flags & IH_ENTROPY)
208			ie->ie_flags |= IE_ENTROPY;
209	}
210
211	/*
212	 * If the handler names were too long, add +'s to indicate missing
213	 * names. If we run out of room and still have +'s to add, change
214	 * the last character from a + to a *.
215	 */
216	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
217	while (missed-- > 0) {
218		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
219			if (*last == '+') {
220				*last = '*';
221				break;
222			} else
223				*last = '+';
224		} else if (space) {
225			strcat(ie->ie_fullname, " +");
226			space = 0;
227		} else
228			strcat(ie->ie_fullname, "+");
229	}
230
231	/*
232	 * If this event has an ithread, update it's priority and
233	 * name.
234	 */
235	if (ie->ie_thread != NULL)
236		ithread_update(ie->ie_thread);
237	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
238}
239
240int
241intr_event_create(struct intr_event **event, void *source,int flags,
242    void (*disable)(void *), void (*enable)(void *), void (*eoi)(void *),
243    int (*assign_cpu)(void *, u_char), const char *fmt, ...)
244{
245	struct intr_event *ie;
246	va_list ap;
247
248	/* The only valid flag during creation is IE_SOFT. */
249	if ((flags & ~IE_SOFT) != 0)
250		return (EINVAL);
251	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
252	ie->ie_source = source;
253	ie->ie_disable = disable;
254	ie->ie_enable = enable;
255	ie->ie_eoi = eoi;
256	ie->ie_assign_cpu = assign_cpu;
257	ie->ie_flags = flags;
258	ie->ie_cpu = NOCPU;
259	TAILQ_INIT(&ie->ie_handlers);
260	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
261
262	va_start(ap, fmt);
263	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
264	va_end(ap);
265	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
266	mtx_pool_lock(mtxpool_sleep, &event_list);
267	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
268	mtx_pool_unlock(mtxpool_sleep, &event_list);
269	if (event != NULL)
270		*event = ie;
271	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
272	return (0);
273}
274
275/*
276 * Bind an interrupt event to the specified CPU.  Note that not all
277 * platforms support binding an interrupt to a CPU.  For those
278 * platforms this request will fail.  For supported platforms, any
279 * associated ithreads as well as the primary interrupt context will
280 * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
281 * the interrupt event.
282 */
283int
284intr_event_bind(struct intr_event *ie, u_char cpu)
285{
286	struct thread *td;
287	int error;
288
289	/* Need a CPU to bind to. */
290	if (cpu != NOCPU && CPU_ABSENT(cpu))
291		return (EINVAL);
292
293	if (ie->ie_assign_cpu == NULL)
294		return (EOPNOTSUPP);
295
296	/* Don't allow a bind request if the interrupt is already bound. */
297	mtx_lock(&ie->ie_lock);
298	if (ie->ie_cpu != NOCPU && cpu != NOCPU) {
299		mtx_unlock(&ie->ie_lock);
300		return (EBUSY);
301	}
302	mtx_unlock(&ie->ie_lock);
303
304	error = ie->ie_assign_cpu(ie->ie_source, cpu);
305	if (error)
306		return (error);
307	mtx_lock(&ie->ie_lock);
308	if (ie->ie_thread != NULL)
309		td = ie->ie_thread->it_thread;
310	else
311		td = NULL;
312	if (td != NULL)
313		thread_lock(td);
314	ie->ie_cpu = cpu;
315	if (td != NULL)
316		thread_unlock(td);
317	mtx_unlock(&ie->ie_lock);
318	return (0);
319}
320
321int
322intr_event_destroy(struct intr_event *ie)
323{
324
325	mtx_lock(&ie->ie_lock);
326	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
327		mtx_unlock(&ie->ie_lock);
328		return (EBUSY);
329	}
330	mtx_pool_lock(mtxpool_sleep, &event_list);
331	TAILQ_REMOVE(&event_list, ie, ie_list);
332	mtx_pool_unlock(mtxpool_sleep, &event_list);
333#ifndef notyet
334	if (ie->ie_thread != NULL) {
335		ithread_destroy(ie->ie_thread);
336		ie->ie_thread = NULL;
337	}
338#endif
339	mtx_unlock(&ie->ie_lock);
340	mtx_destroy(&ie->ie_lock);
341	free(ie, M_ITHREAD);
342	return (0);
343}
344
345#ifndef INTR_FILTER
346static struct intr_thread *
347ithread_create(const char *name)
348{
349	struct intr_thread *ithd;
350	struct thread *td;
351	int error;
352
353	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
354
355	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
356		    &td, RFSTOPPED | RFHIGHPID,
357	    	    0, "intr", "%s", name);
358	if (error)
359		panic("kproc_create() failed with %d", error);
360	thread_lock(td);
361	sched_class(td, PRI_ITHD);
362	TD_SET_IWAIT(td);
363	thread_unlock(td);
364	td->td_pflags |= TDP_ITHREAD;
365	ithd->it_thread = td;
366	CTR2(KTR_INTR, "%s: created %s", __func__, name);
367	return (ithd);
368}
369#else
370static struct intr_thread *
371ithread_create(const char *name, struct intr_handler *ih)
372{
373	struct intr_thread *ithd;
374	struct thread *td;
375	int error;
376
377	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
378
379	error = kproc_kthread_add(ithread_loop, ih, &intrproc,
380		    &td, RFSTOPPED | RFHIGHPID,
381	    	    0, "intr", "%s", name);
382	if (error)
383		panic("kproc_create() failed with %d", error);
384	thread_lock(td);
385	sched_class(td, PRI_ITHD);
386	TD_SET_IWAIT(td);
387	thread_unlock(td);
388	td->td_pflags |= TDP_ITHREAD;
389	ithd->it_thread = td;
390	CTR2(KTR_INTR, "%s: created %s", __func__, name);
391	return (ithd);
392}
393#endif
394
395static void
396ithread_destroy(struct intr_thread *ithread)
397{
398	struct thread *td;
399
400	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
401	td = ithread->it_thread;
402	thread_lock(td);
403	ithread->it_flags |= IT_DEAD;
404	if (TD_AWAITING_INTR(td)) {
405		TD_CLR_IWAIT(td);
406		sched_add(td, SRQ_INTR);
407	}
408	thread_unlock(td);
409}
410
411#ifndef INTR_FILTER
412int
413intr_event_add_handler(struct intr_event *ie, const char *name,
414    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
415    enum intr_type flags, void **cookiep)
416{
417	struct intr_handler *ih, *temp_ih;
418	struct intr_thread *it;
419
420	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
421		return (EINVAL);
422
423	/* Allocate and populate an interrupt handler structure. */
424	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
425	ih->ih_filter = filter;
426	ih->ih_handler = handler;
427	ih->ih_argument = arg;
428	ih->ih_name = name;
429	ih->ih_event = ie;
430	ih->ih_pri = pri;
431	if (flags & INTR_EXCL)
432		ih->ih_flags = IH_EXCLUSIVE;
433	if (flags & INTR_MPSAFE)
434		ih->ih_flags |= IH_MPSAFE;
435	if (flags & INTR_ENTROPY)
436		ih->ih_flags |= IH_ENTROPY;
437
438	/* We can only have one exclusive handler in a event. */
439	mtx_lock(&ie->ie_lock);
440	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
441		if ((flags & INTR_EXCL) ||
442		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
443			mtx_unlock(&ie->ie_lock);
444			free(ih, M_ITHREAD);
445			return (EINVAL);
446		}
447	}
448
449	/* Add the new handler to the event in priority order. */
450	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
451		if (temp_ih->ih_pri > ih->ih_pri)
452			break;
453	}
454	if (temp_ih == NULL)
455		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
456	else
457		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
458	intr_event_update(ie);
459
460	/* Create a thread if we need one. */
461	while (ie->ie_thread == NULL && handler != NULL) {
462		if (ie->ie_flags & IE_ADDING_THREAD)
463			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
464		else {
465			ie->ie_flags |= IE_ADDING_THREAD;
466			mtx_unlock(&ie->ie_lock);
467			it = ithread_create("intr: newborn");
468			mtx_lock(&ie->ie_lock);
469			ie->ie_flags &= ~IE_ADDING_THREAD;
470			ie->ie_thread = it;
471			it->it_event = ie;
472			ithread_update(it);
473			wakeup(ie);
474		}
475	}
476	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
477	    ie->ie_name);
478	mtx_unlock(&ie->ie_lock);
479
480	if (cookiep != NULL)
481		*cookiep = ih;
482	return (0);
483}
484#else
485int
486intr_event_add_handler(struct intr_event *ie, const char *name,
487    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
488    enum intr_type flags, void **cookiep)
489{
490	struct intr_handler *ih, *temp_ih;
491	struct intr_thread *it;
492
493	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
494		return (EINVAL);
495
496	/* Allocate and populate an interrupt handler structure. */
497	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
498	ih->ih_filter = filter;
499	ih->ih_handler = handler;
500	ih->ih_argument = arg;
501	ih->ih_name = name;
502	ih->ih_event = ie;
503	ih->ih_pri = pri;
504	if (flags & INTR_EXCL)
505		ih->ih_flags = IH_EXCLUSIVE;
506	if (flags & INTR_MPSAFE)
507		ih->ih_flags |= IH_MPSAFE;
508	if (flags & INTR_ENTROPY)
509		ih->ih_flags |= IH_ENTROPY;
510
511	/* We can only have one exclusive handler in a event. */
512	mtx_lock(&ie->ie_lock);
513	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
514		if ((flags & INTR_EXCL) ||
515		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
516			mtx_unlock(&ie->ie_lock);
517			free(ih, M_ITHREAD);
518			return (EINVAL);
519		}
520	}
521
522	/* Add the new handler to the event in priority order. */
523	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
524		if (temp_ih->ih_pri > ih->ih_pri)
525			break;
526	}
527	if (temp_ih == NULL)
528		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
529	else
530		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
531	intr_event_update(ie);
532
533	/* For filtered handlers, create a private ithread to run on. */
534	if (filter != NULL && handler != NULL) {
535		mtx_unlock(&ie->ie_lock);
536		it = ithread_create("intr: newborn", ih);
537		mtx_lock(&ie->ie_lock);
538		it->it_event = ie;
539		ih->ih_thread = it;
540		ithread_update(it); // XXX - do we really need this?!?!?
541	} else { /* Create the global per-event thread if we need one. */
542		while (ie->ie_thread == NULL && handler != NULL) {
543			if (ie->ie_flags & IE_ADDING_THREAD)
544				msleep(ie, &ie->ie_lock, 0, "ithread", 0);
545			else {
546				ie->ie_flags |= IE_ADDING_THREAD;
547				mtx_unlock(&ie->ie_lock);
548				it = ithread_create("intr: newborn", ih);
549				mtx_lock(&ie->ie_lock);
550				ie->ie_flags &= ~IE_ADDING_THREAD;
551				ie->ie_thread = it;
552				it->it_event = ie;
553				ithread_update(it);
554				wakeup(ie);
555			}
556		}
557	}
558	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
559	    ie->ie_name);
560	mtx_unlock(&ie->ie_lock);
561
562	if (cookiep != NULL)
563		*cookiep = ih;
564	return (0);
565}
566#endif
567
568/*
569 * Return the ie_source field from the intr_event an intr_handler is
570 * associated with.
571 */
572void *
573intr_handler_source(void *cookie)
574{
575	struct intr_handler *ih;
576	struct intr_event *ie;
577
578	ih = (struct intr_handler *)cookie;
579	if (ih == NULL)
580		return (NULL);
581	ie = ih->ih_event;
582	KASSERT(ie != NULL,
583	    ("interrupt handler \"%s\" has a NULL interrupt event",
584	    ih->ih_name));
585	return (ie->ie_source);
586}
587
588#ifndef INTR_FILTER
589int
590intr_event_remove_handler(void *cookie)
591{
592	struct intr_handler *handler = (struct intr_handler *)cookie;
593	struct intr_event *ie;
594#ifdef INVARIANTS
595	struct intr_handler *ih;
596#endif
597#ifdef notyet
598	int dead;
599#endif
600
601	if (handler == NULL)
602		return (EINVAL);
603	ie = handler->ih_event;
604	KASSERT(ie != NULL,
605	    ("interrupt handler \"%s\" has a NULL interrupt event",
606	    handler->ih_name));
607	mtx_lock(&ie->ie_lock);
608	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
609	    ie->ie_name);
610#ifdef INVARIANTS
611	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
612		if (ih == handler)
613			goto ok;
614	mtx_unlock(&ie->ie_lock);
615	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
616	    ih->ih_name, ie->ie_name);
617ok:
618#endif
619	/*
620	 * If there is no ithread, then just remove the handler and return.
621	 * XXX: Note that an INTR_FAST handler might be running on another
622	 * CPU!
623	 */
624	if (ie->ie_thread == NULL) {
625		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
626		mtx_unlock(&ie->ie_lock);
627		free(handler, M_ITHREAD);
628		return (0);
629	}
630
631	/*
632	 * If the interrupt thread is already running, then just mark this
633	 * handler as being dead and let the ithread do the actual removal.
634	 *
635	 * During a cold boot while cold is set, msleep() does not sleep,
636	 * so we have to remove the handler here rather than letting the
637	 * thread do it.
638	 */
639	thread_lock(ie->ie_thread->it_thread);
640	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
641		handler->ih_flags |= IH_DEAD;
642
643		/*
644		 * Ensure that the thread will process the handler list
645		 * again and remove this handler if it has already passed
646		 * it on the list.
647		 */
648		ie->ie_thread->it_need = 1;
649	} else
650		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
651	thread_unlock(ie->ie_thread->it_thread);
652	while (handler->ih_flags & IH_DEAD)
653		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
654	intr_event_update(ie);
655#ifdef notyet
656	/*
657	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
658	 * this could lead to races of stale data when servicing an
659	 * interrupt.
660	 */
661	dead = 1;
662	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
663		if (!(ih->ih_flags & IH_FAST)) {
664			dead = 0;
665			break;
666		}
667	}
668	if (dead) {
669		ithread_destroy(ie->ie_thread);
670		ie->ie_thread = NULL;
671	}
672#endif
673	mtx_unlock(&ie->ie_lock);
674	free(handler, M_ITHREAD);
675	return (0);
676}
677
678int
679intr_event_schedule_thread(struct intr_event *ie)
680{
681	struct intr_entropy entropy;
682	struct intr_thread *it;
683	struct thread *td;
684	struct thread *ctd;
685	struct proc *p;
686
687	/*
688	 * If no ithread or no handlers, then we have a stray interrupt.
689	 */
690	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
691	    ie->ie_thread == NULL)
692		return (EINVAL);
693
694	ctd = curthread;
695	it = ie->ie_thread;
696	td = it->it_thread;
697	p = td->td_proc;
698
699	/*
700	 * If any of the handlers for this ithread claim to be good
701	 * sources of entropy, then gather some.
702	 */
703	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
704		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
705		    p->p_pid, td->td_name);
706		entropy.event = (uintptr_t)ie;
707		entropy.td = ctd;
708		random_harvest(&entropy, sizeof(entropy), 2, 0,
709		    RANDOM_INTERRUPT);
710	}
711
712	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
713
714	/*
715	 * Set it_need to tell the thread to keep running if it is already
716	 * running.  Then, lock the thread and see if we actually need to
717	 * put it on the runqueue.
718	 */
719	it->it_need = 1;
720	thread_lock(td);
721	if (TD_AWAITING_INTR(td)) {
722		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
723		    td->td_name);
724		TD_CLR_IWAIT(td);
725		sched_add(td, SRQ_INTR);
726	} else {
727		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
728		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
729	}
730	thread_unlock(td);
731
732	return (0);
733}
734#else
735int
736intr_event_remove_handler(void *cookie)
737{
738	struct intr_handler *handler = (struct intr_handler *)cookie;
739	struct intr_event *ie;
740	struct intr_thread *it;
741#ifdef INVARIANTS
742	struct intr_handler *ih;
743#endif
744#ifdef notyet
745	int dead;
746#endif
747
748	if (handler == NULL)
749		return (EINVAL);
750	ie = handler->ih_event;
751	KASSERT(ie != NULL,
752	    ("interrupt handler \"%s\" has a NULL interrupt event",
753	    handler->ih_name));
754	mtx_lock(&ie->ie_lock);
755	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
756	    ie->ie_name);
757#ifdef INVARIANTS
758	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
759		if (ih == handler)
760			goto ok;
761	mtx_unlock(&ie->ie_lock);
762	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
763	    ih->ih_name, ie->ie_name);
764ok:
765#endif
766	/*
767	 * If there are no ithreads (per event and per handler), then
768	 * just remove the handler and return.
769	 * XXX: Note that an INTR_FAST handler might be running on another CPU!
770	 */
771	if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
772		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
773		mtx_unlock(&ie->ie_lock);
774		free(handler, M_ITHREAD);
775		return (0);
776	}
777
778	/* Private or global ithread? */
779	it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
780	/*
781	 * If the interrupt thread is already running, then just mark this
782	 * handler as being dead and let the ithread do the actual removal.
783	 *
784	 * During a cold boot while cold is set, msleep() does not sleep,
785	 * so we have to remove the handler here rather than letting the
786	 * thread do it.
787	 */
788	thread_lock(it->it_thread);
789	if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
790		handler->ih_flags |= IH_DEAD;
791
792		/*
793		 * Ensure that the thread will process the handler list
794		 * again and remove this handler if it has already passed
795		 * it on the list.
796		 */
797		it->it_need = 1;
798	} else
799		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
800	thread_unlock(it->it_thread);
801	while (handler->ih_flags & IH_DEAD)
802		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
803	/*
804	 * At this point, the handler has been disconnected from the event,
805	 * so we can kill the private ithread if any.
806	 */
807	if (handler->ih_thread) {
808		ithread_destroy(handler->ih_thread);
809		handler->ih_thread = NULL;
810	}
811	intr_event_update(ie);
812#ifdef notyet
813	/*
814	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
815	 * this could lead to races of stale data when servicing an
816	 * interrupt.
817	 */
818	dead = 1;
819	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
820		if (handler != NULL) {
821			dead = 0;
822			break;
823		}
824	}
825	if (dead) {
826		ithread_destroy(ie->ie_thread);
827		ie->ie_thread = NULL;
828	}
829#endif
830	mtx_unlock(&ie->ie_lock);
831	free(handler, M_ITHREAD);
832	return (0);
833}
834
835int
836intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
837{
838	struct intr_entropy entropy;
839	struct thread *td;
840	struct thread *ctd;
841	struct proc *p;
842
843	/*
844	 * If no ithread or no handlers, then we have a stray interrupt.
845	 */
846	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
847		return (EINVAL);
848
849	ctd = curthread;
850	td = it->it_thread;
851	p = td->td_proc;
852
853	/*
854	 * If any of the handlers for this ithread claim to be good
855	 * sources of entropy, then gather some.
856	 */
857	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
858		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
859		    p->p_pid, td->td_name);
860		entropy.event = (uintptr_t)ie;
861		entropy.td = ctd;
862		random_harvest(&entropy, sizeof(entropy), 2, 0,
863		    RANDOM_INTERRUPT);
864	}
865
866	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
867
868	/*
869	 * Set it_need to tell the thread to keep running if it is already
870	 * running.  Then, lock the thread and see if we actually need to
871	 * put it on the runqueue.
872	 */
873	it->it_need = 1;
874	thread_lock(td);
875	if (TD_AWAITING_INTR(td)) {
876		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
877		    td->td_name);
878		TD_CLR_IWAIT(td);
879		sched_add(td, SRQ_INTR);
880	} else {
881		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
882		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
883	}
884	thread_unlock(td);
885
886	return (0);
887}
888#endif
889
890/*
891 * Add a software interrupt handler to a specified event.  If a given event
892 * is not specified, then a new event is created.
893 */
894int
895swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
896	    void *arg, int pri, enum intr_type flags, void **cookiep)
897{
898	struct intr_event *ie;
899	int error;
900
901	if (flags & INTR_ENTROPY)
902		return (EINVAL);
903
904	ie = (eventp != NULL) ? *eventp : NULL;
905
906	if (ie != NULL) {
907		if (!(ie->ie_flags & IE_SOFT))
908			return (EINVAL);
909	} else {
910		error = intr_event_create(&ie, NULL, IE_SOFT,
911		    NULL, NULL, NULL, NULL, "swi%d:", pri);
912		if (error)
913			return (error);
914		if (eventp != NULL)
915			*eventp = ie;
916	}
917	error = intr_event_add_handler(ie, name, NULL, handler, arg,
918	    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep);
919	if (error)
920		return (error);
921	if (pri == SWI_CLOCK) {
922		struct proc *p;
923		p = ie->ie_thread->it_thread->td_proc;
924		PROC_LOCK(p);
925		p->p_flag |= P_NOLOAD;
926		PROC_UNLOCK(p);
927	}
928	return (0);
929}
930
931/*
932 * Schedule a software interrupt thread.
933 */
934void
935swi_sched(void *cookie, int flags)
936{
937	struct intr_handler *ih = (struct intr_handler *)cookie;
938	struct intr_event *ie = ih->ih_event;
939	int error;
940
941	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
942	    ih->ih_need);
943
944	/*
945	 * Set ih_need for this handler so that if the ithread is already
946	 * running it will execute this handler on the next pass.  Otherwise,
947	 * it will execute it the next time it runs.
948	 */
949	atomic_store_rel_int(&ih->ih_need, 1);
950
951	if (!(flags & SWI_DELAY)) {
952		PCPU_INC(cnt.v_soft);
953#ifdef INTR_FILTER
954		error = intr_event_schedule_thread(ie, ie->ie_thread);
955#else
956		error = intr_event_schedule_thread(ie);
957#endif
958		KASSERT(error == 0, ("stray software interrupt"));
959	}
960}
961
962/*
963 * Remove a software interrupt handler.  Currently this code does not
964 * remove the associated interrupt event if it becomes empty.  Calling code
965 * may do so manually via intr_event_destroy(), but that's not really
966 * an optimal interface.
967 */
968int
969swi_remove(void *cookie)
970{
971
972	return (intr_event_remove_handler(cookie));
973}
974
975#ifdef INTR_FILTER
976static void
977priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
978{
979	struct intr_event *ie;
980
981	ie = ih->ih_event;
982	/*
983	 * If this handler is marked for death, remove it from
984	 * the list of handlers and wake up the sleeper.
985	 */
986	if (ih->ih_flags & IH_DEAD) {
987		mtx_lock(&ie->ie_lock);
988		TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
989		ih->ih_flags &= ~IH_DEAD;
990		wakeup(ih);
991		mtx_unlock(&ie->ie_lock);
992		return;
993	}
994
995	/* Execute this handler. */
996	CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
997	     __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
998	     ih->ih_name, ih->ih_flags);
999
1000	if (!(ih->ih_flags & IH_MPSAFE))
1001		mtx_lock(&Giant);
1002	ih->ih_handler(ih->ih_argument);
1003	if (!(ih->ih_flags & IH_MPSAFE))
1004		mtx_unlock(&Giant);
1005}
1006#endif
1007
1008static void
1009ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1010{
1011	struct intr_handler *ih, *ihn;
1012
1013	/* Interrupt handlers should not sleep. */
1014	if (!(ie->ie_flags & IE_SOFT))
1015		THREAD_NO_SLEEPING();
1016	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1017
1018		/*
1019		 * If this handler is marked for death, remove it from
1020		 * the list of handlers and wake up the sleeper.
1021		 */
1022		if (ih->ih_flags & IH_DEAD) {
1023			mtx_lock(&ie->ie_lock);
1024			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1025			ih->ih_flags &= ~IH_DEAD;
1026			wakeup(ih);
1027			mtx_unlock(&ie->ie_lock);
1028			continue;
1029		}
1030
1031		/* Skip filter only handlers */
1032		if (ih->ih_handler == NULL)
1033			continue;
1034
1035		/*
1036		 * For software interrupt threads, we only execute
1037		 * handlers that have their need flag set.  Hardware
1038		 * interrupt threads always invoke all of their handlers.
1039		 */
1040		if (ie->ie_flags & IE_SOFT) {
1041			if (!ih->ih_need)
1042				continue;
1043			else
1044				atomic_store_rel_int(&ih->ih_need, 0);
1045		}
1046
1047		/* Execute this handler. */
1048		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1049		    __func__, p->p_pid, (void *)ih->ih_handler,
1050		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1051
1052		if (!(ih->ih_flags & IH_MPSAFE))
1053			mtx_lock(&Giant);
1054		ih->ih_handler(ih->ih_argument);
1055		if (!(ih->ih_flags & IH_MPSAFE))
1056			mtx_unlock(&Giant);
1057	}
1058	if (!(ie->ie_flags & IE_SOFT))
1059		THREAD_SLEEPING_OK();
1060
1061	/*
1062	 * Interrupt storm handling:
1063	 *
1064	 * If this interrupt source is currently storming, then throttle
1065	 * it to only fire the handler once  per clock tick.
1066	 *
1067	 * If this interrupt source is not currently storming, but the
1068	 * number of back to back interrupts exceeds the storm threshold,
1069	 * then enter storming mode.
1070	 */
1071	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1072	    !(ie->ie_flags & IE_SOFT)) {
1073		/* Report the message only once every second. */
1074		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1075			printf(
1076	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1077			    ie->ie_name);
1078		}
1079		pause("istorm", 1);
1080	} else
1081		ie->ie_count++;
1082
1083	/*
1084	 * Now that all the handlers have had a chance to run, reenable
1085	 * the interrupt source.
1086	 */
1087	if (ie->ie_enable != NULL)
1088		ie->ie_enable(ie->ie_source);
1089}
1090
1091#ifndef INTR_FILTER
1092/*
1093 * This is the main code for interrupt threads.
1094 */
1095static void
1096ithread_loop(void *arg)
1097{
1098	struct intr_thread *ithd;
1099	struct intr_event *ie;
1100	struct thread *td;
1101	struct proc *p;
1102	u_char cpu;
1103
1104	td = curthread;
1105	p = td->td_proc;
1106	ithd = (struct intr_thread *)arg;
1107	KASSERT(ithd->it_thread == td,
1108	    ("%s: ithread and proc linkage out of sync", __func__));
1109	ie = ithd->it_event;
1110	ie->ie_count = 0;
1111	cpu = NOCPU;
1112
1113	/*
1114	 * As long as we have interrupts outstanding, go through the
1115	 * list of handlers, giving each one a go at it.
1116	 */
1117	for (;;) {
1118		/*
1119		 * If we are an orphaned thread, then just die.
1120		 */
1121		if (ithd->it_flags & IT_DEAD) {
1122			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1123			    p->p_pid, td->td_name);
1124			free(ithd, M_ITHREAD);
1125			kthread_exit();
1126		}
1127
1128		/*
1129		 * Service interrupts.  If another interrupt arrives while
1130		 * we are running, it will set it_need to note that we
1131		 * should make another pass.
1132		 */
1133		while (ithd->it_need) {
1134			/*
1135			 * This might need a full read and write barrier
1136			 * to make sure that this write posts before any
1137			 * of the memory or device accesses in the
1138			 * handlers.
1139			 */
1140			atomic_store_rel_int(&ithd->it_need, 0);
1141			ithread_execute_handlers(p, ie);
1142		}
1143		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1144		mtx_assert(&Giant, MA_NOTOWNED);
1145
1146		/*
1147		 * Processed all our interrupts.  Now get the sched
1148		 * lock.  This may take a while and it_need may get
1149		 * set again, so we have to check it again.
1150		 */
1151		thread_lock(td);
1152		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1153			TD_SET_IWAIT(td);
1154			ie->ie_count = 0;
1155			mi_switch(SW_VOL, NULL);
1156		}
1157
1158#ifdef SMP
1159		/*
1160		 * Ensure we are bound to the correct CPU.  We can't
1161		 * move ithreads until SMP is running however, so just
1162		 * leave interrupts on the boor CPU during boot.
1163		 */
1164		if (ie->ie_cpu != cpu && smp_started) {
1165			cpu = ie->ie_cpu;
1166			if (cpu == NOCPU)
1167				sched_unbind(td);
1168			else
1169				sched_bind(td, cpu);
1170		}
1171#endif
1172		thread_unlock(td);
1173	}
1174}
1175#else
1176/*
1177 * This is the main code for interrupt threads.
1178 */
1179static void
1180ithread_loop(void *arg)
1181{
1182	struct intr_thread *ithd;
1183	struct intr_handler *ih;
1184	struct intr_event *ie;
1185	struct thread *td;
1186	struct proc *p;
1187	int priv;
1188	u_char cpu;
1189
1190	td = curthread;
1191	p = td->td_proc;
1192	ih = (struct intr_handler *)arg;
1193	priv = (ih->ih_thread != NULL) ? 1 : 0;
1194	ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1195	KASSERT(ithd->it_thread == td,
1196	    ("%s: ithread and proc linkage out of sync", __func__));
1197	ie = ithd->it_event;
1198	ie->ie_count = 0;
1199	cpu = NOCPU;
1200
1201	/*
1202	 * As long as we have interrupts outstanding, go through the
1203	 * list of handlers, giving each one a go at it.
1204	 */
1205	for (;;) {
1206		/*
1207		 * If we are an orphaned thread, then just die.
1208		 */
1209		if (ithd->it_flags & IT_DEAD) {
1210			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1211			    p->p_pid, td->td_name);
1212			free(ithd, M_ITHREAD);
1213			kthread_exit();
1214		}
1215
1216		/*
1217		 * Service interrupts.  If another interrupt arrives while
1218		 * we are running, it will set it_need to note that we
1219		 * should make another pass.
1220		 */
1221		while (ithd->it_need) {
1222			/*
1223			 * This might need a full read and write barrier
1224			 * to make sure that this write posts before any
1225			 * of the memory or device accesses in the
1226			 * handlers.
1227			 */
1228			atomic_store_rel_int(&ithd->it_need, 0);
1229			if (priv)
1230				priv_ithread_execute_handler(p, ih);
1231			else
1232				ithread_execute_handlers(p, ie);
1233		}
1234		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1235		mtx_assert(&Giant, MA_NOTOWNED);
1236
1237		/*
1238		 * Processed all our interrupts.  Now get the sched
1239		 * lock.  This may take a while and it_need may get
1240		 * set again, so we have to check it again.
1241		 */
1242		thread_lock(td);
1243		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1244			TD_SET_IWAIT(td);
1245			ie->ie_count = 0;
1246			mi_switch(SW_VOL, NULL);
1247		}
1248
1249#ifdef SMP
1250		/*
1251		 * Ensure we are bound to the correct CPU.  We can't
1252		 * move ithreads until SMP is running however, so just
1253		 * leave interrupts on the boor CPU during boot.
1254		 */
1255		if (!priv && ie->ie_cpu != cpu && smp_started) {
1256			cpu = ie->ie_cpu;
1257			if (cpu == NOCPU)
1258				sched_unbind(td);
1259			else
1260				sched_bind(td, cpu);
1261		}
1262#endif
1263		thread_unlock(td);
1264	}
1265}
1266
1267/*
1268 * Main loop for interrupt filter.
1269 *
1270 * Some architectures (i386, amd64 and arm) require the optional frame
1271 * parameter, and use it as the main argument for fast handler execution
1272 * when ih_argument == NULL.
1273 *
1274 * Return value:
1275 * o FILTER_STRAY:              No filter recognized the event, and no
1276 *                              filter-less handler is registered on this
1277 *                              line.
1278 * o FILTER_HANDLED:            A filter claimed the event and served it.
1279 * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
1280 *                              least one filter-less handler on this line.
1281 * o FILTER_HANDLED |
1282 *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
1283 *                              scheduling the per-handler ithread.
1284 *
1285 * In case an ithread has to be scheduled, in *ithd there will be a
1286 * pointer to a struct intr_thread containing the thread to be
1287 * scheduled.
1288 */
1289
1290int
1291intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1292		 struct intr_thread **ithd)
1293{
1294	struct intr_handler *ih;
1295	void *arg;
1296	int ret, thread_only;
1297
1298	ret = 0;
1299	thread_only = 0;
1300	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1301		/*
1302		 * Execute fast interrupt handlers directly.
1303		 * To support clock handlers, if a handler registers
1304		 * with a NULL argument, then we pass it a pointer to
1305		 * a trapframe as its argument.
1306		 */
1307		arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1308
1309		CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1310		     ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1311
1312		if (ih->ih_filter != NULL)
1313			ret = ih->ih_filter(arg);
1314		else {
1315			thread_only = 1;
1316			continue;
1317		}
1318
1319		if (ret & FILTER_STRAY)
1320			continue;
1321		else {
1322			*ithd = ih->ih_thread;
1323			return (ret);
1324		}
1325	}
1326
1327	/*
1328	 * No filters handled the interrupt and we have at least
1329	 * one handler without a filter.  In this case, we schedule
1330	 * all of the filter-less handlers to run in the ithread.
1331	 */
1332	if (thread_only) {
1333		*ithd = ie->ie_thread;
1334		return (FILTER_SCHEDULE_THREAD);
1335	}
1336	return (FILTER_STRAY);
1337}
1338
1339/*
1340 * Main interrupt handling body.
1341 *
1342 * Input:
1343 * o ie:                        the event connected to this interrupt.
1344 * o frame:                     some archs (i.e. i386) pass a frame to some.
1345 *                              handlers as their main argument.
1346 * Return value:
1347 * o 0:                         everything ok.
1348 * o EINVAL:                    stray interrupt.
1349 */
1350int
1351intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1352{
1353	struct intr_thread *ithd;
1354	struct thread *td;
1355	int thread;
1356
1357	ithd = NULL;
1358	td = curthread;
1359
1360	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1361		return (EINVAL);
1362
1363	td->td_intr_nesting_level++;
1364	thread = 0;
1365	critical_enter();
1366	thread = intr_filter_loop(ie, frame, &ithd);
1367
1368	/*
1369	 * If the interrupt was fully served, send it an EOI but leave
1370	 * it unmasked. Otherwise, mask the source as well as sending
1371	 * it an EOI.
1372	 */
1373	if (thread & FILTER_HANDLED) {
1374		if (ie->ie_eoi != NULL)
1375			ie->ie_eoi(ie->ie_source);
1376	} else {
1377		if (ie->ie_disable != NULL)
1378			ie->ie_disable(ie->ie_source);
1379	}
1380	critical_exit();
1381
1382	/* Interrupt storm logic */
1383	if (thread & FILTER_STRAY) {
1384		ie->ie_count++;
1385		if (ie->ie_count < intr_storm_threshold)
1386			printf("Interrupt stray detection not present\n");
1387	}
1388
1389	/* Schedule an ithread if needed. */
1390	if (thread & FILTER_SCHEDULE_THREAD) {
1391		if (intr_event_schedule_thread(ie, ithd) != 0)
1392			panic("%s: impossible stray interrupt", __func__);
1393	}
1394	td->td_intr_nesting_level--;
1395	return (0);
1396}
1397#endif
1398
1399#ifdef DDB
1400/*
1401 * Dump details about an interrupt handler
1402 */
1403static void
1404db_dump_intrhand(struct intr_handler *ih)
1405{
1406	int comma;
1407
1408	db_printf("\t%-10s ", ih->ih_name);
1409	switch (ih->ih_pri) {
1410	case PI_REALTIME:
1411		db_printf("CLK ");
1412		break;
1413	case PI_AV:
1414		db_printf("AV  ");
1415		break;
1416	case PI_TTYHIGH:
1417	case PI_TTYLOW:
1418		db_printf("TTY ");
1419		break;
1420	case PI_TAPE:
1421		db_printf("TAPE");
1422		break;
1423	case PI_NET:
1424		db_printf("NET ");
1425		break;
1426	case PI_DISK:
1427	case PI_DISKLOW:
1428		db_printf("DISK");
1429		break;
1430	case PI_DULL:
1431		db_printf("DULL");
1432		break;
1433	default:
1434		if (ih->ih_pri >= PI_SOFT)
1435			db_printf("SWI ");
1436		else
1437			db_printf("%4u", ih->ih_pri);
1438		break;
1439	}
1440	db_printf(" ");
1441	db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1442	db_printf("(%p)", ih->ih_argument);
1443	if (ih->ih_need ||
1444	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1445	    IH_MPSAFE)) != 0) {
1446		db_printf(" {");
1447		comma = 0;
1448		if (ih->ih_flags & IH_EXCLUSIVE) {
1449			if (comma)
1450				db_printf(", ");
1451			db_printf("EXCL");
1452			comma = 1;
1453		}
1454		if (ih->ih_flags & IH_ENTROPY) {
1455			if (comma)
1456				db_printf(", ");
1457			db_printf("ENTROPY");
1458			comma = 1;
1459		}
1460		if (ih->ih_flags & IH_DEAD) {
1461			if (comma)
1462				db_printf(", ");
1463			db_printf("DEAD");
1464			comma = 1;
1465		}
1466		if (ih->ih_flags & IH_MPSAFE) {
1467			if (comma)
1468				db_printf(", ");
1469			db_printf("MPSAFE");
1470			comma = 1;
1471		}
1472		if (ih->ih_need) {
1473			if (comma)
1474				db_printf(", ");
1475			db_printf("NEED");
1476		}
1477		db_printf("}");
1478	}
1479	db_printf("\n");
1480}
1481
1482/*
1483 * Dump details about a event.
1484 */
1485void
1486db_dump_intr_event(struct intr_event *ie, int handlers)
1487{
1488	struct intr_handler *ih;
1489	struct intr_thread *it;
1490	int comma;
1491
1492	db_printf("%s ", ie->ie_fullname);
1493	it = ie->ie_thread;
1494	if (it != NULL)
1495		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1496	else
1497		db_printf("(no thread)");
1498	if (ie->ie_cpu != NOCPU)
1499		db_printf(" (CPU %d)", ie->ie_cpu);
1500	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1501	    (it != NULL && it->it_need)) {
1502		db_printf(" {");
1503		comma = 0;
1504		if (ie->ie_flags & IE_SOFT) {
1505			db_printf("SOFT");
1506			comma = 1;
1507		}
1508		if (ie->ie_flags & IE_ENTROPY) {
1509			if (comma)
1510				db_printf(", ");
1511			db_printf("ENTROPY");
1512			comma = 1;
1513		}
1514		if (ie->ie_flags & IE_ADDING_THREAD) {
1515			if (comma)
1516				db_printf(", ");
1517			db_printf("ADDING_THREAD");
1518			comma = 1;
1519		}
1520		if (it != NULL && it->it_need) {
1521			if (comma)
1522				db_printf(", ");
1523			db_printf("NEED");
1524		}
1525		db_printf("}");
1526	}
1527	db_printf("\n");
1528
1529	if (handlers)
1530		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1531		    db_dump_intrhand(ih);
1532}
1533
1534/*
1535 * Dump data about interrupt handlers
1536 */
1537DB_SHOW_COMMAND(intr, db_show_intr)
1538{
1539	struct intr_event *ie;
1540	int all, verbose;
1541
1542	verbose = index(modif, 'v') != NULL;
1543	all = index(modif, 'a') != NULL;
1544	TAILQ_FOREACH(ie, &event_list, ie_list) {
1545		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1546			continue;
1547		db_dump_intr_event(ie, verbose);
1548		if (db_pager_quit)
1549			break;
1550	}
1551}
1552#endif /* DDB */
1553
1554/*
1555 * Start standard software interrupt threads
1556 */
1557static void
1558start_softintr(void *dummy)
1559{
1560
1561	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1562		panic("died while creating vm swi ithread");
1563}
1564SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1565    NULL);
1566
1567/*
1568 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1569 * The data for this machine dependent, and the declarations are in machine
1570 * dependent code.  The layout of intrnames and intrcnt however is machine
1571 * independent.
1572 *
1573 * We do not know the length of intrcnt and intrnames at compile time, so
1574 * calculate things at run time.
1575 */
1576static int
1577sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1578{
1579	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
1580	   req));
1581}
1582
1583SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1584    NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1585
1586static int
1587sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1588{
1589	return (sysctl_handle_opaque(oidp, intrcnt,
1590	    (char *)eintrcnt - (char *)intrcnt, req));
1591}
1592
1593SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1594    NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1595
1596#ifdef DDB
1597/*
1598 * DDB command to dump the interrupt statistics.
1599 */
1600DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1601{
1602	u_long *i;
1603	char *cp;
1604
1605	cp = intrnames;
1606	for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
1607		if (*cp == '\0')
1608			break;
1609		if (*i != 0)
1610			db_printf("%s\t%lu\n", cp, *i);
1611		cp += strlen(cp) + 1;
1612	}
1613}
1614#endif
1615