kern_intr.c revision 177859
1139804Simp/*-
226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
326156Sse * All rights reserved.
426156Sse *
526156Sse * Redistribution and use in source and binary forms, with or without
626156Sse * modification, are permitted provided that the following conditions
726156Sse * are met:
826156Sse * 1. Redistributions of source code must retain the above copyright
926156Sse *    notice unmodified, this list of conditions, and the following
1026156Sse *    disclaimer.
1126156Sse * 2. Redistributions in binary form must reproduce the above copyright
1226156Sse *    notice, this list of conditions and the following disclaimer in the
1326156Sse *    documentation and/or other materials provided with the distribution.
1426156Sse *
1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2526156Sse */
2626156Sse
27116182Sobrien#include <sys/cdefs.h>
28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 177859 2008-04-02 11:20:30Z jeff $");
2936887Sdfr
30121482Sjhb#include "opt_ddb.h"
31121482Sjhb
3241059Speter#include <sys/param.h>
3365822Sjhb#include <sys/bus.h>
34110860Salfred#include <sys/conf.h>
3565822Sjhb#include <sys/rtprio.h>
3641059Speter#include <sys/systm.h>
3766698Sjhb#include <sys/interrupt.h>
3866698Sjhb#include <sys/kernel.h>
3966698Sjhb#include <sys/kthread.h>
4066698Sjhb#include <sys/ktr.h>
41130128Sbde#include <sys/limits.h>
4274914Sjhb#include <sys/lock.h>
4326156Sse#include <sys/malloc.h>
4467365Sjhb#include <sys/mutex.h>
4566698Sjhb#include <sys/proc.h>
4672759Sjhb#include <sys/random.h>
4772237Sjhb#include <sys/resourcevar.h>
48139451Sjhb#include <sys/sched.h>
49177181Sjhb#include <sys/smp.h>
5077582Stmm#include <sys/sysctl.h>
5166698Sjhb#include <sys/unistd.h>
5266698Sjhb#include <sys/vmmeter.h>
5366698Sjhb#include <machine/atomic.h>
5466698Sjhb#include <machine/cpu.h>
5567551Sjhb#include <machine/md_var.h>
5672237Sjhb#include <machine/stdarg.h>
57121482Sjhb#ifdef DDB
58121482Sjhb#include <ddb/ddb.h>
59121482Sjhb#include <ddb/db_sym.h>
60121482Sjhb#endif
6126156Sse
62151658Sjhb/*
63151658Sjhb * Describe an interrupt thread.  There is one of these per interrupt event.
64151658Sjhb */
65151658Sjhbstruct intr_thread {
66151658Sjhb	struct intr_event *it_event;
67151658Sjhb	struct thread *it_thread;	/* Kernel thread. */
68151658Sjhb	int	it_flags;		/* (j) IT_* flags. */
69151658Sjhb	int	it_need;		/* Needs service. */
7072759Sjhb};
7172759Sjhb
72151658Sjhb/* Interrupt thread flags kept in it_flags */
73151658Sjhb#define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
74151658Sjhb
75151658Sjhbstruct	intr_entropy {
76151658Sjhb	struct	thread *td;
77151658Sjhb	uintptr_t event;
78151658Sjhb};
79151658Sjhb
80151658Sjhbstruct	intr_event *clk_intr_event;
81151658Sjhbstruct	intr_event *tty_intr_event;
82128339Sbdevoid	*vm_ih;
83173004Sjulianstruct proc *intrproc;
8438244Sbde
8572237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
8672237Sjhb
87168850Snjlstatic int intr_storm_threshold = 1000;
88128331SjhbTUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
89128331SjhbSYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
90128331Sjhb    &intr_storm_threshold, 0,
91128339Sbde    "Number of consecutive interrupts before storm protection is enabled");
92151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list =
93151658Sjhb    TAILQ_HEAD_INITIALIZER(event_list);
94128331Sjhb
95151658Sjhbstatic void	intr_event_update(struct intr_event *ie);
96169320Spiso#ifdef INTR_FILTER
97169320Spisostatic struct intr_thread *ithread_create(const char *name,
98169320Spiso			      struct intr_handler *ih);
99169320Spiso#else
100151658Sjhbstatic struct intr_thread *ithread_create(const char *name);
101169320Spiso#endif
102151658Sjhbstatic void	ithread_destroy(struct intr_thread *ithread);
103169320Spisostatic void	ithread_execute_handlers(struct proc *p,
104169320Spiso		    struct intr_event *ie);
105169320Spiso#ifdef INTR_FILTER
106169320Spisostatic void	priv_ithread_execute_handler(struct proc *p,
107169320Spiso		    struct intr_handler *ih);
108169320Spiso#endif
109128339Sbdestatic void	ithread_loop(void *);
110151658Sjhbstatic void	ithread_update(struct intr_thread *ithd);
111128339Sbdestatic void	start_softintr(void *);
112128339Sbde
113165124Sjhb/* Map an interrupt type to an ithread priority. */
11472237Sjhbu_char
115151658Sjhbintr_priority(enum intr_type flags)
11665822Sjhb{
11772237Sjhb	u_char pri;
11865822Sjhb
11972237Sjhb	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
12078365Speter	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
12165822Sjhb	switch (flags) {
12272237Sjhb	case INTR_TYPE_TTY:
12365822Sjhb		pri = PI_TTYLOW;
12465822Sjhb		break;
12565822Sjhb	case INTR_TYPE_BIO:
12665822Sjhb		/*
12765822Sjhb		 * XXX We need to refine this.  BSD/OS distinguishes
12865822Sjhb		 * between tape and disk priorities.
12965822Sjhb		 */
13065822Sjhb		pri = PI_DISK;
13165822Sjhb		break;
13265822Sjhb	case INTR_TYPE_NET:
13365822Sjhb		pri = PI_NET;
13465822Sjhb		break;
13565822Sjhb	case INTR_TYPE_CAM:
13665822Sjhb		pri = PI_DISK;          /* XXX or PI_CAM? */
13765822Sjhb		break;
13878365Speter	case INTR_TYPE_AV:		/* Audio/video */
13978365Speter		pri = PI_AV;
14078365Speter		break;
14172237Sjhb	case INTR_TYPE_CLK:
14272237Sjhb		pri = PI_REALTIME;
14372237Sjhb		break;
14465822Sjhb	case INTR_TYPE_MISC:
14565822Sjhb		pri = PI_DULL;          /* don't care */
14665822Sjhb		break;
14765822Sjhb	default:
14872237Sjhb		/* We didn't specify an interrupt level. */
149151658Sjhb		panic("intr_priority: no interrupt type in flags");
15065822Sjhb	}
15165822Sjhb
15265822Sjhb	return pri;
15365822Sjhb}
15465822Sjhb
15572237Sjhb/*
156151658Sjhb * Update an ithread based on the associated intr_event.
15772237Sjhb */
15872237Sjhbstatic void
159151658Sjhbithread_update(struct intr_thread *ithd)
16072237Sjhb{
161151658Sjhb	struct intr_event *ie;
16283366Sjulian	struct thread *td;
163151658Sjhb	u_char pri;
16467551Sjhb
165151658Sjhb	ie = ithd->it_event;
166151658Sjhb	td = ithd->it_thread;
16772237Sjhb
168151658Sjhb	/* Determine the overall priority of this event. */
169151658Sjhb	if (TAILQ_EMPTY(&ie->ie_handlers))
170151658Sjhb		pri = PRI_MAX_ITHD;
171151658Sjhb	else
172151658Sjhb		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
173105354Srobert
174151658Sjhb	/* Update name and priority. */
175173004Sjulian	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
176170307Sjeff	thread_lock(td);
177151658Sjhb	sched_prio(td, pri);
178170307Sjeff	thread_unlock(td);
179151658Sjhb}
180151658Sjhb
181151658Sjhb/*
182151658Sjhb * Regenerate the full name of an interrupt event and update its priority.
183151658Sjhb */
184151658Sjhbstatic void
185151658Sjhbintr_event_update(struct intr_event *ie)
186151658Sjhb{
187151658Sjhb	struct intr_handler *ih;
188151658Sjhb	char *last;
189151658Sjhb	int missed, space;
190151658Sjhb
191151658Sjhb	/* Start off with no entropy and just the name of the event. */
192151658Sjhb	mtx_assert(&ie->ie_lock, MA_OWNED);
193151658Sjhb	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
194151658Sjhb	ie->ie_flags &= ~IE_ENTROPY;
195137267Sjhb	missed = 0;
196151658Sjhb	space = 1;
197151658Sjhb
198151658Sjhb	/* Run through all the handlers updating values. */
199151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
200151658Sjhb		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
201151658Sjhb		    sizeof(ie->ie_fullname)) {
202151658Sjhb			strcat(ie->ie_fullname, " ");
203151658Sjhb			strcat(ie->ie_fullname, ih->ih_name);
204151658Sjhb			space = 0;
205137267Sjhb		} else
206137267Sjhb			missed++;
207137267Sjhb		if (ih->ih_flags & IH_ENTROPY)
208151658Sjhb			ie->ie_flags |= IE_ENTROPY;
209137267Sjhb	}
210151658Sjhb
211151658Sjhb	/*
212151658Sjhb	 * If the handler names were too long, add +'s to indicate missing
213151658Sjhb	 * names. If we run out of room and still have +'s to add, change
214151658Sjhb	 * the last character from a + to a *.
215151658Sjhb	 */
216151658Sjhb	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
217137267Sjhb	while (missed-- > 0) {
218151658Sjhb		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
219151658Sjhb			if (*last == '+') {
220151658Sjhb				*last = '*';
221151658Sjhb				break;
222151658Sjhb			} else
223151658Sjhb				*last = '+';
224151658Sjhb		} else if (space) {
225151658Sjhb			strcat(ie->ie_fullname, " +");
226151658Sjhb			space = 0;
22772237Sjhb		} else
228151658Sjhb			strcat(ie->ie_fullname, "+");
22972237Sjhb	}
230151658Sjhb
231151658Sjhb	/*
232151658Sjhb	 * If this event has an ithread, update it's priority and
233151658Sjhb	 * name.
234151658Sjhb	 */
235151658Sjhb	if (ie->ie_thread != NULL)
236151658Sjhb		ithread_update(ie->ie_thread);
237151658Sjhb	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
23872237Sjhb}
23972237Sjhb
24072237Sjhbint
241177325Sjhbintr_event_create(struct intr_event **event, void *source,int flags,
242177325Sjhb    void (*disable)(void *), void (*enable)(void *), void (*eoi)(void *),
243177181Sjhb    int (*assign_cpu)(void *, u_char), const char *fmt, ...)
244169320Spiso{
245169320Spiso	struct intr_event *ie;
246169320Spiso	va_list ap;
24772237Sjhb
248169320Spiso	/* The only valid flag during creation is IE_SOFT. */
249169320Spiso	if ((flags & ~IE_SOFT) != 0)
250169320Spiso		return (EINVAL);
251169320Spiso	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
252169320Spiso	ie->ie_source = source;
253177325Sjhb	ie->ie_disable = disable;
254169320Spiso	ie->ie_enable = enable;
255177325Sjhb	ie->ie_eoi = eoi;
256177181Sjhb	ie->ie_assign_cpu = assign_cpu;
257169320Spiso	ie->ie_flags = flags;
258177181Sjhb	ie->ie_cpu = NOCPU;
259169320Spiso	TAILQ_INIT(&ie->ie_handlers);
260169320Spiso	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
261169320Spiso
262169320Spiso	va_start(ap, fmt);
263169320Spiso	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
264169320Spiso	va_end(ap);
265169320Spiso	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
266169320Spiso	mtx_pool_lock(mtxpool_sleep, &event_list);
267169320Spiso	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
268169320Spiso	mtx_pool_unlock(mtxpool_sleep, &event_list);
269169320Spiso	if (event != NULL)
270169320Spiso		*event = ie;
271169320Spiso	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
272169320Spiso	return (0);
273169320Spiso}
274169320Spiso
275177181Sjhb/*
276177181Sjhb * Bind an interrupt event to the specified CPU.  Note that not all
277177181Sjhb * platforms support binding an interrupt to a CPU.  For those
278177181Sjhb * platforms this request will fail.  For supported platforms, any
279177181Sjhb * associated ithreads as well as the primary interrupt context will
280177181Sjhb * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
281177181Sjhb * the interrupt event.
282177181Sjhb */
283151658Sjhbint
284177181Sjhbintr_event_bind(struct intr_event *ie, u_char cpu)
285177181Sjhb{
286177181Sjhb	struct thread *td;
287177181Sjhb	int error;
288177181Sjhb
289177181Sjhb	/* Need a CPU to bind to. */
290177181Sjhb	if (cpu != NOCPU && CPU_ABSENT(cpu))
291177181Sjhb		return (EINVAL);
292177181Sjhb
293177181Sjhb	if (ie->ie_assign_cpu == NULL)
294177181Sjhb		return (EOPNOTSUPP);
295177181Sjhb
296177181Sjhb	/* Don't allow a bind request if the interrupt is already bound. */
297177181Sjhb	mtx_lock(&ie->ie_lock);
298177181Sjhb	if (ie->ie_cpu != NOCPU && cpu != NOCPU) {
299177181Sjhb		mtx_unlock(&ie->ie_lock);
300177181Sjhb		return (EBUSY);
301177181Sjhb	}
302177181Sjhb	mtx_unlock(&ie->ie_lock);
303177181Sjhb
304177181Sjhb	error = ie->ie_assign_cpu(ie->ie_source, cpu);
305177181Sjhb	if (error)
306177181Sjhb		return (error);
307177181Sjhb	mtx_lock(&ie->ie_lock);
308177181Sjhb	if (ie->ie_thread != NULL)
309177181Sjhb		td = ie->ie_thread->it_thread;
310177181Sjhb	else
311177181Sjhb		td = NULL;
312177181Sjhb	if (td != NULL)
313177181Sjhb		thread_lock(td);
314177181Sjhb	ie->ie_cpu = cpu;
315177181Sjhb	if (td != NULL)
316177181Sjhb		thread_unlock(td);
317177181Sjhb	mtx_unlock(&ie->ie_lock);
318177181Sjhb	return (0);
319177181Sjhb}
320177181Sjhb
321177181Sjhbint
322151658Sjhbintr_event_destroy(struct intr_event *ie)
323151658Sjhb{
324151658Sjhb
325151658Sjhb	mtx_lock(&ie->ie_lock);
326151658Sjhb	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
327151658Sjhb		mtx_unlock(&ie->ie_lock);
328151658Sjhb		return (EBUSY);
329151658Sjhb	}
330151658Sjhb	mtx_pool_lock(mtxpool_sleep, &event_list);
331151658Sjhb	TAILQ_REMOVE(&event_list, ie, ie_list);
332151658Sjhb	mtx_pool_unlock(mtxpool_sleep, &event_list);
333157728Sjhb#ifndef notyet
334157728Sjhb	if (ie->ie_thread != NULL) {
335157728Sjhb		ithread_destroy(ie->ie_thread);
336157728Sjhb		ie->ie_thread = NULL;
337157728Sjhb	}
338157728Sjhb#endif
339151658Sjhb	mtx_unlock(&ie->ie_lock);
340151658Sjhb	mtx_destroy(&ie->ie_lock);
341151658Sjhb	free(ie, M_ITHREAD);
342151658Sjhb	return (0);
343151658Sjhb}
344151658Sjhb
345169320Spiso#ifndef INTR_FILTER
346151658Sjhbstatic struct intr_thread *
347151658Sjhbithread_create(const char *name)
348151658Sjhb{
349151658Sjhb	struct intr_thread *ithd;
350151658Sjhb	struct thread *td;
351151658Sjhb	int error;
352151658Sjhb
353151658Sjhb	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
354151658Sjhb
355173004Sjulian	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
356173004Sjulian		    &td, RFSTOPPED | RFHIGHPID,
357173051Sjulian	    	    0, "intr", "%s", name);
358151658Sjhb	if (error)
359172836Sjulian		panic("kproc_create() failed with %d", error);
360170307Sjeff	thread_lock(td);
361164936Sjulian	sched_class(td, PRI_ITHD);
362103216Sjulian	TD_SET_IWAIT(td);
363170307Sjeff	thread_unlock(td);
364151658Sjhb	td->td_pflags |= TDP_ITHREAD;
365151658Sjhb	ithd->it_thread = td;
366151658Sjhb	CTR2(KTR_INTR, "%s: created %s", __func__, name);
367151658Sjhb	return (ithd);
36872237Sjhb}
369169320Spiso#else
370169320Spisostatic struct intr_thread *
371169320Spisoithread_create(const char *name, struct intr_handler *ih)
372169320Spiso{
373169320Spiso	struct intr_thread *ithd;
374169320Spiso	struct thread *td;
375169320Spiso	int error;
37672237Sjhb
377169320Spiso	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
378169320Spiso
379173153Sjulian	error = kproc_kthread_add(ithread_loop, ih, &intrproc,
380173004Sjulian		    &td, RFSTOPPED | RFHIGHPID,
381173051Sjulian	    	    0, "intr", "%s", name);
382169320Spiso	if (error)
383172836Sjulian		panic("kproc_create() failed with %d", error);
384170307Sjeff	thread_lock(td);
385169320Spiso	sched_class(td, PRI_ITHD);
386169320Spiso	TD_SET_IWAIT(td);
387170307Sjeff	thread_unlock(td);
388169320Spiso	td->td_pflags |= TDP_ITHREAD;
389169320Spiso	ithd->it_thread = td;
390169320Spiso	CTR2(KTR_INTR, "%s: created %s", __func__, name);
391169320Spiso	return (ithd);
392169320Spiso}
393169320Spiso#endif
394169320Spiso
395151658Sjhbstatic void
396151658Sjhbithread_destroy(struct intr_thread *ithread)
39772237Sjhb{
39883366Sjulian	struct thread *td;
39972237Sjhb
400157784Sscottl	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
401151658Sjhb	td = ithread->it_thread;
402170307Sjeff	thread_lock(td);
40376771Sjhb	ithread->it_flags |= IT_DEAD;
404103216Sjulian	if (TD_AWAITING_INTR(td)) {
405103216Sjulian		TD_CLR_IWAIT(td);
406166188Sjeff		sched_add(td, SRQ_INTR);
40772237Sjhb	}
408170307Sjeff	thread_unlock(td);
40972237Sjhb}
41072237Sjhb
411169320Spiso#ifndef INTR_FILTER
41272237Sjhbint
413151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name,
414166901Spiso    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
415166901Spiso    enum intr_type flags, void **cookiep)
41672237Sjhb{
417151658Sjhb	struct intr_handler *ih, *temp_ih;
418151658Sjhb	struct intr_thread *it;
41972237Sjhb
420166901Spiso	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
42172237Sjhb		return (EINVAL);
42272237Sjhb
423151658Sjhb	/* Allocate and populate an interrupt handler structure. */
424151658Sjhb	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
425166901Spiso	ih->ih_filter = filter;
42672237Sjhb	ih->ih_handler = handler;
42772237Sjhb	ih->ih_argument = arg;
42872237Sjhb	ih->ih_name = name;
429151658Sjhb	ih->ih_event = ie;
43072237Sjhb	ih->ih_pri = pri;
431166901Spiso	if (flags & INTR_EXCL)
43272237Sjhb		ih->ih_flags = IH_EXCLUSIVE;
43372237Sjhb	if (flags & INTR_MPSAFE)
43472237Sjhb		ih->ih_flags |= IH_MPSAFE;
43572237Sjhb	if (flags & INTR_ENTROPY)
43672237Sjhb		ih->ih_flags |= IH_ENTROPY;
43772237Sjhb
438151658Sjhb	/* We can only have one exclusive handler in a event. */
439151658Sjhb	mtx_lock(&ie->ie_lock);
440151658Sjhb	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
441151658Sjhb		if ((flags & INTR_EXCL) ||
442151658Sjhb		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
443151658Sjhb			mtx_unlock(&ie->ie_lock);
444151658Sjhb			free(ih, M_ITHREAD);
445151658Sjhb			return (EINVAL);
446151658Sjhb		}
447122002Sjhb	}
44872237Sjhb
449151658Sjhb	/* Add the new handler to the event in priority order. */
450151658Sjhb	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
451151658Sjhb		if (temp_ih->ih_pri > ih->ih_pri)
452151658Sjhb			break;
453151658Sjhb	}
45472237Sjhb	if (temp_ih == NULL)
455151658Sjhb		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
45672237Sjhb	else
45772237Sjhb		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
458151658Sjhb	intr_event_update(ie);
45972237Sjhb
460151658Sjhb	/* Create a thread if we need one. */
461166901Spiso	while (ie->ie_thread == NULL && handler != NULL) {
462151658Sjhb		if (ie->ie_flags & IE_ADDING_THREAD)
463157815Sjhb			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
464151658Sjhb		else {
465151658Sjhb			ie->ie_flags |= IE_ADDING_THREAD;
466151658Sjhb			mtx_unlock(&ie->ie_lock);
467151658Sjhb			it = ithread_create("intr: newborn");
468151658Sjhb			mtx_lock(&ie->ie_lock);
469151658Sjhb			ie->ie_flags &= ~IE_ADDING_THREAD;
470151658Sjhb			ie->ie_thread = it;
471151658Sjhb			it->it_event = ie;
472151658Sjhb			ithread_update(it);
473151658Sjhb			wakeup(ie);
474151658Sjhb		}
475151658Sjhb	}
476151658Sjhb	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
477151658Sjhb	    ie->ie_name);
478151658Sjhb	mtx_unlock(&ie->ie_lock);
479151658Sjhb
48072237Sjhb	if (cookiep != NULL)
48172237Sjhb		*cookiep = ih;
48272237Sjhb	return (0);
48372237Sjhb}
484169320Spiso#else
485169320Spisoint
486169320Spisointr_event_add_handler(struct intr_event *ie, const char *name,
487169320Spiso    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
488169320Spiso    enum intr_type flags, void **cookiep)
489169320Spiso{
490169320Spiso	struct intr_handler *ih, *temp_ih;
491169320Spiso	struct intr_thread *it;
49272237Sjhb
493169320Spiso	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
494169320Spiso		return (EINVAL);
495169320Spiso
496169320Spiso	/* Allocate and populate an interrupt handler structure. */
497169320Spiso	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
498169320Spiso	ih->ih_filter = filter;
499169320Spiso	ih->ih_handler = handler;
500169320Spiso	ih->ih_argument = arg;
501169320Spiso	ih->ih_name = name;
502169320Spiso	ih->ih_event = ie;
503169320Spiso	ih->ih_pri = pri;
504169320Spiso	if (flags & INTR_EXCL)
505169320Spiso		ih->ih_flags = IH_EXCLUSIVE;
506169320Spiso	if (flags & INTR_MPSAFE)
507169320Spiso		ih->ih_flags |= IH_MPSAFE;
508169320Spiso	if (flags & INTR_ENTROPY)
509169320Spiso		ih->ih_flags |= IH_ENTROPY;
510169320Spiso
511169320Spiso	/* We can only have one exclusive handler in a event. */
512169320Spiso	mtx_lock(&ie->ie_lock);
513169320Spiso	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
514169320Spiso		if ((flags & INTR_EXCL) ||
515169320Spiso		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
516169320Spiso			mtx_unlock(&ie->ie_lock);
517169320Spiso			free(ih, M_ITHREAD);
518169320Spiso			return (EINVAL);
519169320Spiso		}
520169320Spiso	}
521169320Spiso
522169320Spiso	/* Add the new handler to the event in priority order. */
523169320Spiso	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
524169320Spiso		if (temp_ih->ih_pri > ih->ih_pri)
525169320Spiso			break;
526169320Spiso	}
527169320Spiso	if (temp_ih == NULL)
528169320Spiso		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
529169320Spiso	else
530169320Spiso		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
531169320Spiso	intr_event_update(ie);
532169320Spiso
533169320Spiso	/* For filtered handlers, create a private ithread to run on. */
534169320Spiso	if (filter != NULL && handler != NULL) {
535169320Spiso		mtx_unlock(&ie->ie_lock);
536169320Spiso		it = ithread_create("intr: newborn", ih);
537169320Spiso		mtx_lock(&ie->ie_lock);
538169320Spiso		it->it_event = ie;
539169320Spiso		ih->ih_thread = it;
540169320Spiso		ithread_update(it); // XXX - do we really need this?!?!?
541169320Spiso	} else { /* Create the global per-event thread if we need one. */
542169320Spiso		while (ie->ie_thread == NULL && handler != NULL) {
543169320Spiso			if (ie->ie_flags & IE_ADDING_THREAD)
544169320Spiso				msleep(ie, &ie->ie_lock, 0, "ithread", 0);
545169320Spiso			else {
546169320Spiso				ie->ie_flags |= IE_ADDING_THREAD;
547169320Spiso				mtx_unlock(&ie->ie_lock);
548169320Spiso				it = ithread_create("intr: newborn", ih);
549169320Spiso				mtx_lock(&ie->ie_lock);
550169320Spiso				ie->ie_flags &= ~IE_ADDING_THREAD;
551169320Spiso				ie->ie_thread = it;
552169320Spiso				it->it_event = ie;
553169320Spiso				ithread_update(it);
554169320Spiso				wakeup(ie);
555169320Spiso			}
556169320Spiso		}
557169320Spiso	}
558169320Spiso	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
559169320Spiso	    ie->ie_name);
560169320Spiso	mtx_unlock(&ie->ie_lock);
561169320Spiso
562169320Spiso	if (cookiep != NULL)
563169320Spiso		*cookiep = ih;
564169320Spiso	return (0);
565169320Spiso}
566169320Spiso#endif
567169320Spiso
568165125Sjhb/*
569165125Sjhb * Return the ie_source field from the intr_event an intr_handler is
570165125Sjhb * associated with.
571165125Sjhb */
572165125Sjhbvoid *
573165125Sjhbintr_handler_source(void *cookie)
574165125Sjhb{
575165125Sjhb	struct intr_handler *ih;
576165125Sjhb	struct intr_event *ie;
577165125Sjhb
578165125Sjhb	ih = (struct intr_handler *)cookie;
579165125Sjhb	if (ih == NULL)
580165125Sjhb		return (NULL);
581165125Sjhb	ie = ih->ih_event;
582165125Sjhb	KASSERT(ie != NULL,
583165125Sjhb	    ("interrupt handler \"%s\" has a NULL interrupt event",
584165125Sjhb	    ih->ih_name));
585165125Sjhb	return (ie->ie_source);
586165125Sjhb}
587165125Sjhb
588169320Spiso#ifndef INTR_FILTER
58972237Sjhbint
590151658Sjhbintr_event_remove_handler(void *cookie)
59172237Sjhb{
592151658Sjhb	struct intr_handler *handler = (struct intr_handler *)cookie;
593151658Sjhb	struct intr_event *ie;
59472237Sjhb#ifdef INVARIANTS
595151658Sjhb	struct intr_handler *ih;
59672237Sjhb#endif
597151658Sjhb#ifdef notyet
598151658Sjhb	int dead;
599151658Sjhb#endif
60072237Sjhb
60172759Sjhb	if (handler == NULL)
60272237Sjhb		return (EINVAL);
603151658Sjhb	ie = handler->ih_event;
604151658Sjhb	KASSERT(ie != NULL,
605151658Sjhb	    ("interrupt handler \"%s\" has a NULL interrupt event",
606165124Sjhb	    handler->ih_name));
607151658Sjhb	mtx_lock(&ie->ie_lock);
60887593Sobrien	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
609151658Sjhb	    ie->ie_name);
61072237Sjhb#ifdef INVARIANTS
611151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
61272759Sjhb		if (ih == handler)
61372759Sjhb			goto ok;
614151658Sjhb	mtx_unlock(&ie->ie_lock);
615151658Sjhb	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
616151658Sjhb	    ih->ih_name, ie->ie_name);
61772759Sjhbok:
61872237Sjhb#endif
61972839Sjhb	/*
620151658Sjhb	 * If there is no ithread, then just remove the handler and return.
621151658Sjhb	 * XXX: Note that an INTR_FAST handler might be running on another
622151658Sjhb	 * CPU!
623151658Sjhb	 */
624151658Sjhb	if (ie->ie_thread == NULL) {
625151658Sjhb		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
626151658Sjhb		mtx_unlock(&ie->ie_lock);
627151658Sjhb		free(handler, M_ITHREAD);
628151658Sjhb		return (0);
629151658Sjhb	}
630151658Sjhb
631151658Sjhb	/*
63272839Sjhb	 * If the interrupt thread is already running, then just mark this
63372839Sjhb	 * handler as being dead and let the ithread do the actual removal.
634124505Struckman	 *
635124505Struckman	 * During a cold boot while cold is set, msleep() does not sleep,
636124505Struckman	 * so we have to remove the handler here rather than letting the
637124505Struckman	 * thread do it.
63872839Sjhb	 */
639170307Sjeff	thread_lock(ie->ie_thread->it_thread);
640151658Sjhb	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
64172839Sjhb		handler->ih_flags |= IH_DEAD;
64272839Sjhb
64372839Sjhb		/*
64472839Sjhb		 * Ensure that the thread will process the handler list
64572839Sjhb		 * again and remove this handler if it has already passed
64672839Sjhb		 * it on the list.
64772839Sjhb		 */
648151658Sjhb		ie->ie_thread->it_need = 1;
649151658Sjhb	} else
650151658Sjhb		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
651170307Sjeff	thread_unlock(ie->ie_thread->it_thread);
652151658Sjhb	while (handler->ih_flags & IH_DEAD)
653157815Sjhb		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
654151658Sjhb	intr_event_update(ie);
655151658Sjhb#ifdef notyet
656151658Sjhb	/*
657151658Sjhb	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
658151658Sjhb	 * this could lead to races of stale data when servicing an
659151658Sjhb	 * interrupt.
660151658Sjhb	 */
661151658Sjhb	dead = 1;
662151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
663151658Sjhb		if (!(ih->ih_flags & IH_FAST)) {
664151658Sjhb			dead = 0;
665151658Sjhb			break;
666151658Sjhb		}
667151658Sjhb	}
668151658Sjhb	if (dead) {
669151658Sjhb		ithread_destroy(ie->ie_thread);
670151658Sjhb		ie->ie_thread = NULL;
671151658Sjhb	}
672151658Sjhb#endif
673151658Sjhb	mtx_unlock(&ie->ie_lock);
67476771Sjhb	free(handler, M_ITHREAD);
67572237Sjhb	return (0);
67672237Sjhb}
67772237Sjhb
67872237Sjhbint
679151658Sjhbintr_event_schedule_thread(struct intr_event *ie)
68072759Sjhb{
681151658Sjhb	struct intr_entropy entropy;
682151658Sjhb	struct intr_thread *it;
68383366Sjulian	struct thread *td;
684101176Sjulian	struct thread *ctd;
68572759Sjhb	struct proc *p;
68672759Sjhb
68772759Sjhb	/*
68872759Sjhb	 * If no ithread or no handlers, then we have a stray interrupt.
68972759Sjhb	 */
690151658Sjhb	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
691151658Sjhb	    ie->ie_thread == NULL)
69272759Sjhb		return (EINVAL);
69372759Sjhb
694101176Sjulian	ctd = curthread;
695151658Sjhb	it = ie->ie_thread;
696151658Sjhb	td = it->it_thread;
697133191Srwatson	p = td->td_proc;
698151658Sjhb
69972759Sjhb	/*
70072759Sjhb	 * If any of the handlers for this ithread claim to be good
70172759Sjhb	 * sources of entropy, then gather some.
70272759Sjhb	 */
703151658Sjhb	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
704133191Srwatson		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
705173004Sjulian		    p->p_pid, td->td_name);
706151658Sjhb		entropy.event = (uintptr_t)ie;
707151658Sjhb		entropy.td = ctd;
70872759Sjhb		random_harvest(&entropy, sizeof(entropy), 2, 0,
70972759Sjhb		    RANDOM_INTERRUPT);
71072759Sjhb	}
71172759Sjhb
712151658Sjhb	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
71372759Sjhb
71472759Sjhb	/*
71572759Sjhb	 * Set it_need to tell the thread to keep running if it is already
716170307Sjeff	 * running.  Then, lock the thread and see if we actually need to
717170307Sjeff	 * put it on the runqueue.
71872759Sjhb	 */
719151658Sjhb	it->it_need = 1;
720170307Sjeff	thread_lock(td);
721103216Sjulian	if (TD_AWAITING_INTR(td)) {
722151658Sjhb		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
723173004Sjulian		    td->td_name);
724103216Sjulian		TD_CLR_IWAIT(td);
725166188Sjeff		sched_add(td, SRQ_INTR);
72672759Sjhb	} else {
727151658Sjhb		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
728173004Sjulian		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
72972759Sjhb	}
730170307Sjeff	thread_unlock(td);
73172759Sjhb
73272759Sjhb	return (0);
73372759Sjhb}
734169320Spiso#else
735169320Spisoint
736169320Spisointr_event_remove_handler(void *cookie)
737169320Spiso{
738169320Spiso	struct intr_handler *handler = (struct intr_handler *)cookie;
739169320Spiso	struct intr_event *ie;
740169320Spiso	struct intr_thread *it;
741169320Spiso#ifdef INVARIANTS
742169320Spiso	struct intr_handler *ih;
743169320Spiso#endif
744169320Spiso#ifdef notyet
745169320Spiso	int dead;
746169320Spiso#endif
74772759Sjhb
748169320Spiso	if (handler == NULL)
749169320Spiso		return (EINVAL);
750169320Spiso	ie = handler->ih_event;
751169320Spiso	KASSERT(ie != NULL,
752169320Spiso	    ("interrupt handler \"%s\" has a NULL interrupt event",
753169320Spiso	    handler->ih_name));
754169320Spiso	mtx_lock(&ie->ie_lock);
755169320Spiso	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
756169320Spiso	    ie->ie_name);
757169320Spiso#ifdef INVARIANTS
758169320Spiso	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
759169320Spiso		if (ih == handler)
760169320Spiso			goto ok;
761169320Spiso	mtx_unlock(&ie->ie_lock);
762169320Spiso	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
763169320Spiso	    ih->ih_name, ie->ie_name);
764169320Spisook:
765169320Spiso#endif
766169320Spiso	/*
767169320Spiso	 * If there are no ithreads (per event and per handler), then
768169320Spiso	 * just remove the handler and return.
769169320Spiso	 * XXX: Note that an INTR_FAST handler might be running on another CPU!
770169320Spiso	 */
771169320Spiso	if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
772169320Spiso		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
773169320Spiso		mtx_unlock(&ie->ie_lock);
774169320Spiso		free(handler, M_ITHREAD);
775169320Spiso		return (0);
776169320Spiso	}
777169320Spiso
778169320Spiso	/* Private or global ithread? */
779169320Spiso	it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
780169320Spiso	/*
781169320Spiso	 * If the interrupt thread is already running, then just mark this
782169320Spiso	 * handler as being dead and let the ithread do the actual removal.
783169320Spiso	 *
784169320Spiso	 * During a cold boot while cold is set, msleep() does not sleep,
785169320Spiso	 * so we have to remove the handler here rather than letting the
786169320Spiso	 * thread do it.
787169320Spiso	 */
788170307Sjeff	thread_lock(it->it_thread);
789169320Spiso	if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
790169320Spiso		handler->ih_flags |= IH_DEAD;
791169320Spiso
792169320Spiso		/*
793169320Spiso		 * Ensure that the thread will process the handler list
794169320Spiso		 * again and remove this handler if it has already passed
795169320Spiso		 * it on the list.
796169320Spiso		 */
797169320Spiso		it->it_need = 1;
798169320Spiso	} else
799169320Spiso		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
800170307Sjeff	thread_unlock(it->it_thread);
801169320Spiso	while (handler->ih_flags & IH_DEAD)
802169320Spiso		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
803169320Spiso	/*
804169320Spiso	 * At this point, the handler has been disconnected from the event,
805169320Spiso	 * so we can kill the private ithread if any.
806169320Spiso	 */
807169320Spiso	if (handler->ih_thread) {
808169320Spiso		ithread_destroy(handler->ih_thread);
809169320Spiso		handler->ih_thread = NULL;
810169320Spiso	}
811169320Spiso	intr_event_update(ie);
812169320Spiso#ifdef notyet
813169320Spiso	/*
814169320Spiso	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
815169320Spiso	 * this could lead to races of stale data when servicing an
816169320Spiso	 * interrupt.
817169320Spiso	 */
818169320Spiso	dead = 1;
819169320Spiso	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
820169320Spiso		if (handler != NULL) {
821169320Spiso			dead = 0;
822169320Spiso			break;
823169320Spiso		}
824169320Spiso	}
825169320Spiso	if (dead) {
826169320Spiso		ithread_destroy(ie->ie_thread);
827169320Spiso		ie->ie_thread = NULL;
828169320Spiso	}
829169320Spiso#endif
830169320Spiso	mtx_unlock(&ie->ie_lock);
831169320Spiso	free(handler, M_ITHREAD);
832169320Spiso	return (0);
833169320Spiso}
834169320Spiso
835169320Spisoint
836169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
837169320Spiso{
838169320Spiso	struct intr_entropy entropy;
839169320Spiso	struct thread *td;
840169320Spiso	struct thread *ctd;
841169320Spiso	struct proc *p;
842169320Spiso
843169320Spiso	/*
844169320Spiso	 * If no ithread or no handlers, then we have a stray interrupt.
845169320Spiso	 */
846169320Spiso	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
847169320Spiso		return (EINVAL);
848169320Spiso
849169320Spiso	ctd = curthread;
850169320Spiso	td = it->it_thread;
851169320Spiso	p = td->td_proc;
852169320Spiso
853169320Spiso	/*
854169320Spiso	 * If any of the handlers for this ithread claim to be good
855169320Spiso	 * sources of entropy, then gather some.
856169320Spiso	 */
857169320Spiso	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
858169320Spiso		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
859173004Sjulian		    p->p_pid, td->td_name);
860169320Spiso		entropy.event = (uintptr_t)ie;
861169320Spiso		entropy.td = ctd;
862169320Spiso		random_harvest(&entropy, sizeof(entropy), 2, 0,
863169320Spiso		    RANDOM_INTERRUPT);
864169320Spiso	}
865169320Spiso
866169320Spiso	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
867169320Spiso
868169320Spiso	/*
869169320Spiso	 * Set it_need to tell the thread to keep running if it is already
870170307Sjeff	 * running.  Then, lock the thread and see if we actually need to
871170307Sjeff	 * put it on the runqueue.
872169320Spiso	 */
873169320Spiso	it->it_need = 1;
874170307Sjeff	thread_lock(td);
875169320Spiso	if (TD_AWAITING_INTR(td)) {
876169320Spiso		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
877173122Sjulian		    td->td_name);
878169320Spiso		TD_CLR_IWAIT(td);
879169320Spiso		sched_add(td, SRQ_INTR);
880169320Spiso	} else {
881169320Spiso		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
882173004Sjulian		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
883169320Spiso	}
884170307Sjeff	thread_unlock(td);
885169320Spiso
886169320Spiso	return (0);
887169320Spiso}
888169320Spiso#endif
889169320Spiso
890151699Sjhb/*
891151699Sjhb * Add a software interrupt handler to a specified event.  If a given event
892151699Sjhb * is not specified, then a new event is created.
893151699Sjhb */
89472759Sjhbint
895151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
89672237Sjhb	    void *arg, int pri, enum intr_type flags, void **cookiep)
89772237Sjhb{
898151658Sjhb	struct intr_event *ie;
89972237Sjhb	int error;
90066698Sjhb
901169320Spiso	if (flags & INTR_ENTROPY)
90272759Sjhb		return (EINVAL);
90372759Sjhb
904151658Sjhb	ie = (eventp != NULL) ? *eventp : NULL;
90566698Sjhb
906151658Sjhb	if (ie != NULL) {
907151658Sjhb		if (!(ie->ie_flags & IE_SOFT))
908151658Sjhb			return (EINVAL);
90972759Sjhb	} else {
910169320Spiso		error = intr_event_create(&ie, NULL, IE_SOFT,
911177181Sjhb		    NULL, NULL, NULL, NULL, "swi%d:", pri);
91267551Sjhb		if (error)
91372237Sjhb			return (error);
914151658Sjhb		if (eventp != NULL)
915151658Sjhb			*eventp = ie;
91666698Sjhb	}
917166901Spiso	return (intr_event_add_handler(ie, name, NULL, handler, arg,
91872376Sjake		    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
919177859Sjeff	error = intr_event_add_handler(ie, name, NULL, handler, arg,
920177859Sjeff	    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep);
921177859Sjeff	if (error)
922177859Sjeff		return (error);
923177859Sjeff	if (pri == SWI_CLOCK) {
924177859Sjeff		struct proc *p;
925177859Sjeff		p = ie->ie_thread->it_thread->td_proc;
926177859Sjeff		PROC_LOCK(p);
927177859Sjeff		p->p_flag |= P_NOLOAD;
928177859Sjeff		PROC_UNLOCK(p);
929177859Sjeff	}
930177859Sjeff	return (0);
93166698Sjhb}
93266698Sjhb
93366698Sjhb/*
934151658Sjhb * Schedule a software interrupt thread.
93566698Sjhb */
93667551Sjhbvoid
93772237Sjhbswi_sched(void *cookie, int flags)
93866698Sjhb{
939151658Sjhb	struct intr_handler *ih = (struct intr_handler *)cookie;
940151658Sjhb	struct intr_event *ie = ih->ih_event;
94172759Sjhb	int error;
94266698Sjhb
943151658Sjhb	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
944151658Sjhb	    ih->ih_need);
945151658Sjhb
94667551Sjhb	/*
94772759Sjhb	 * Set ih_need for this handler so that if the ithread is already
94872759Sjhb	 * running it will execute this handler on the next pass.  Otherwise,
94972759Sjhb	 * it will execute it the next time it runs.
95067551Sjhb	 */
95172237Sjhb	atomic_store_rel_int(&ih->ih_need, 1);
952163474Sbde
95372237Sjhb	if (!(flags & SWI_DELAY)) {
954170291Sattilio		PCPU_INC(cnt.v_soft);
955169320Spiso#ifdef INTR_FILTER
956169320Spiso		error = intr_event_schedule_thread(ie, ie->ie_thread);
957169320Spiso#else
958151658Sjhb		error = intr_event_schedule_thread(ie);
959169320Spiso#endif
96072759Sjhb		KASSERT(error == 0, ("stray software interrupt"));
96166698Sjhb	}
96266698Sjhb}
96366698Sjhb
964151699Sjhb/*
965151699Sjhb * Remove a software interrupt handler.  Currently this code does not
966151699Sjhb * remove the associated interrupt event if it becomes empty.  Calling code
967151699Sjhb * may do so manually via intr_event_destroy(), but that's not really
968151699Sjhb * an optimal interface.
969151699Sjhb */
970151699Sjhbint
971151699Sjhbswi_remove(void *cookie)
972151699Sjhb{
973151699Sjhb
974151699Sjhb	return (intr_event_remove_handler(cookie));
975151699Sjhb}
976151699Sjhb
977169320Spiso#ifdef INTR_FILTER
978151658Sjhbstatic void
979169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
980169320Spiso{
981169320Spiso	struct intr_event *ie;
982169320Spiso
983169320Spiso	ie = ih->ih_event;
984169320Spiso	/*
985169320Spiso	 * If this handler is marked for death, remove it from
986169320Spiso	 * the list of handlers and wake up the sleeper.
987169320Spiso	 */
988169320Spiso	if (ih->ih_flags & IH_DEAD) {
989169320Spiso		mtx_lock(&ie->ie_lock);
990169320Spiso		TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
991169320Spiso		ih->ih_flags &= ~IH_DEAD;
992169320Spiso		wakeup(ih);
993169320Spiso		mtx_unlock(&ie->ie_lock);
994169320Spiso		return;
995169320Spiso	}
996169320Spiso
997169320Spiso	/* Execute this handler. */
998169320Spiso	CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
999169320Spiso	     __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1000169320Spiso	     ih->ih_name, ih->ih_flags);
1001169320Spiso
1002169320Spiso	if (!(ih->ih_flags & IH_MPSAFE))
1003169320Spiso		mtx_lock(&Giant);
1004169320Spiso	ih->ih_handler(ih->ih_argument);
1005169320Spiso	if (!(ih->ih_flags & IH_MPSAFE))
1006169320Spiso		mtx_unlock(&Giant);
1007169320Spiso}
1008169320Spiso#endif
1009169320Spiso
1010169320Spisostatic void
1011151658Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie)
1012151658Sjhb{
1013151658Sjhb	struct intr_handler *ih, *ihn;
1014151658Sjhb
1015151658Sjhb	/* Interrupt handlers should not sleep. */
1016151658Sjhb	if (!(ie->ie_flags & IE_SOFT))
1017151658Sjhb		THREAD_NO_SLEEPING();
1018151658Sjhb	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1019151658Sjhb
1020151658Sjhb		/*
1021151658Sjhb		 * If this handler is marked for death, remove it from
1022151658Sjhb		 * the list of handlers and wake up the sleeper.
1023151658Sjhb		 */
1024151658Sjhb		if (ih->ih_flags & IH_DEAD) {
1025151658Sjhb			mtx_lock(&ie->ie_lock);
1026151658Sjhb			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1027151658Sjhb			ih->ih_flags &= ~IH_DEAD;
1028151658Sjhb			wakeup(ih);
1029151658Sjhb			mtx_unlock(&ie->ie_lock);
1030151658Sjhb			continue;
1031151658Sjhb		}
1032151658Sjhb
1033167080Spiso		/* Skip filter only handlers */
1034167080Spiso		if (ih->ih_handler == NULL)
1035167080Spiso			continue;
1036167080Spiso
1037151658Sjhb		/*
1038151658Sjhb		 * For software interrupt threads, we only execute
1039151658Sjhb		 * handlers that have their need flag set.  Hardware
1040151658Sjhb		 * interrupt threads always invoke all of their handlers.
1041151658Sjhb		 */
1042151658Sjhb		if (ie->ie_flags & IE_SOFT) {
1043151658Sjhb			if (!ih->ih_need)
1044151658Sjhb				continue;
1045151658Sjhb			else
1046151658Sjhb				atomic_store_rel_int(&ih->ih_need, 0);
1047151658Sjhb		}
1048151658Sjhb
1049151658Sjhb		/* Execute this handler. */
1050151658Sjhb		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1051169320Spiso		    __func__, p->p_pid, (void *)ih->ih_handler,
1052169320Spiso		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1053151658Sjhb
1054151658Sjhb		if (!(ih->ih_flags & IH_MPSAFE))
1055151658Sjhb			mtx_lock(&Giant);
1056151658Sjhb		ih->ih_handler(ih->ih_argument);
1057151658Sjhb		if (!(ih->ih_flags & IH_MPSAFE))
1058151658Sjhb			mtx_unlock(&Giant);
1059151658Sjhb	}
1060151658Sjhb	if (!(ie->ie_flags & IE_SOFT))
1061151658Sjhb		THREAD_SLEEPING_OK();
1062151658Sjhb
1063151658Sjhb	/*
1064151658Sjhb	 * Interrupt storm handling:
1065151658Sjhb	 *
1066151658Sjhb	 * If this interrupt source is currently storming, then throttle
1067151658Sjhb	 * it to only fire the handler once  per clock tick.
1068151658Sjhb	 *
1069151658Sjhb	 * If this interrupt source is not currently storming, but the
1070151658Sjhb	 * number of back to back interrupts exceeds the storm threshold,
1071151658Sjhb	 * then enter storming mode.
1072151658Sjhb	 */
1073167173Sjhb	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1074167173Sjhb	    !(ie->ie_flags & IE_SOFT)) {
1075168850Snjl		/* Report the message only once every second. */
1076168850Snjl		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1077151658Sjhb			printf(
1078168850Snjl	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1079151658Sjhb			    ie->ie_name);
1080151658Sjhb		}
1081167173Sjhb		pause("istorm", 1);
1082151658Sjhb	} else
1083151658Sjhb		ie->ie_count++;
1084151658Sjhb
1085151658Sjhb	/*
1086151658Sjhb	 * Now that all the handlers have had a chance to run, reenable
1087151658Sjhb	 * the interrupt source.
1088151658Sjhb	 */
1089151658Sjhb	if (ie->ie_enable != NULL)
1090151658Sjhb		ie->ie_enable(ie->ie_source);
1091151658Sjhb}
1092151658Sjhb
1093169320Spiso#ifndef INTR_FILTER
109466698Sjhb/*
109572237Sjhb * This is the main code for interrupt threads.
109666698Sjhb */
1097104094Sphkstatic void
109872237Sjhbithread_loop(void *arg)
109966698Sjhb{
1100151658Sjhb	struct intr_thread *ithd;
1101151658Sjhb	struct intr_event *ie;
110283366Sjulian	struct thread *td;
110372237Sjhb	struct proc *p;
1104177181Sjhb	u_char cpu;
1105151658Sjhb
110683366Sjulian	td = curthread;
110783366Sjulian	p = td->td_proc;
1108151658Sjhb	ithd = (struct intr_thread *)arg;
1109151658Sjhb	KASSERT(ithd->it_thread == td,
111087593Sobrien	    ("%s: ithread and proc linkage out of sync", __func__));
1111151658Sjhb	ie = ithd->it_event;
1112151658Sjhb	ie->ie_count = 0;
1113177181Sjhb	cpu = NOCPU;
111466698Sjhb
111567551Sjhb	/*
111667551Sjhb	 * As long as we have interrupts outstanding, go through the
111767551Sjhb	 * list of handlers, giving each one a go at it.
111867551Sjhb	 */
111966698Sjhb	for (;;) {
112072237Sjhb		/*
112172237Sjhb		 * If we are an orphaned thread, then just die.
112272237Sjhb		 */
112372237Sjhb		if (ithd->it_flags & IT_DEAD) {
1124151658Sjhb			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1125173004Sjulian			    p->p_pid, td->td_name);
112672237Sjhb			free(ithd, M_ITHREAD);
1127173044Sjulian			kthread_exit();
112872237Sjhb		}
112972237Sjhb
1130151658Sjhb		/*
1131151658Sjhb		 * Service interrupts.  If another interrupt arrives while
1132151658Sjhb		 * we are running, it will set it_need to note that we
1133151658Sjhb		 * should make another pass.
1134151658Sjhb		 */
113572237Sjhb		while (ithd->it_need) {
113667551Sjhb			/*
1137151658Sjhb			 * This might need a full read and write barrier
1138151658Sjhb			 * to make sure that this write posts before any
1139151658Sjhb			 * of the memory or device accesses in the
1140151658Sjhb			 * handlers.
114167551Sjhb			 */
114272237Sjhb			atomic_store_rel_int(&ithd->it_need, 0);
1143151658Sjhb			ithread_execute_handlers(p, ie);
114466698Sjhb		}
1145128331Sjhb		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1146128331Sjhb		mtx_assert(&Giant, MA_NOTOWNED);
114767551Sjhb
114866698Sjhb		/*
114966698Sjhb		 * Processed all our interrupts.  Now get the sched
115067551Sjhb		 * lock.  This may take a while and it_need may get
115166698Sjhb		 * set again, so we have to check it again.
115266698Sjhb		 */
1153170307Sjeff		thread_lock(td);
1154151658Sjhb		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1155128331Sjhb			TD_SET_IWAIT(td);
1156151658Sjhb			ie->ie_count = 0;
1157131473Sjhb			mi_switch(SW_VOL, NULL);
115866698Sjhb		}
1159177181Sjhb
1160177181Sjhb#ifdef SMP
1161177181Sjhb		/*
1162177181Sjhb		 * Ensure we are bound to the correct CPU.  We can't
1163177181Sjhb		 * move ithreads until SMP is running however, so just
1164177181Sjhb		 * leave interrupts on the boor CPU during boot.
1165177181Sjhb		 */
1166177181Sjhb		if (ie->ie_cpu != cpu && smp_started) {
1167177181Sjhb			cpu = ie->ie_cpu;
1168177181Sjhb			if (cpu == NOCPU)
1169177181Sjhb				sched_unbind(td);
1170177181Sjhb			else
1171177181Sjhb				sched_bind(td, cpu);
1172177181Sjhb		}
1173177181Sjhb#endif
1174170307Sjeff		thread_unlock(td);
117566698Sjhb	}
117666698Sjhb}
1177169320Spiso#else
1178169320Spiso/*
1179169320Spiso * This is the main code for interrupt threads.
1180169320Spiso */
1181169320Spisostatic void
1182169320Spisoithread_loop(void *arg)
1183169320Spiso{
1184169320Spiso	struct intr_thread *ithd;
1185169320Spiso	struct intr_handler *ih;
1186169320Spiso	struct intr_event *ie;
1187169320Spiso	struct thread *td;
1188169320Spiso	struct proc *p;
1189169320Spiso	int priv;
1190177181Sjhb	u_char cpu;
119166698Sjhb
1192169320Spiso	td = curthread;
1193169320Spiso	p = td->td_proc;
1194169320Spiso	ih = (struct intr_handler *)arg;
1195169320Spiso	priv = (ih->ih_thread != NULL) ? 1 : 0;
1196169320Spiso	ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1197169320Spiso	KASSERT(ithd->it_thread == td,
1198169320Spiso	    ("%s: ithread and proc linkage out of sync", __func__));
1199169320Spiso	ie = ithd->it_event;
1200169320Spiso	ie->ie_count = 0;
1201177181Sjhb	cpu = NOCPU;
1202169320Spiso
1203169320Spiso	/*
1204169320Spiso	 * As long as we have interrupts outstanding, go through the
1205169320Spiso	 * list of handlers, giving each one a go at it.
1206169320Spiso	 */
1207169320Spiso	for (;;) {
1208169320Spiso		/*
1209169320Spiso		 * If we are an orphaned thread, then just die.
1210169320Spiso		 */
1211169320Spiso		if (ithd->it_flags & IT_DEAD) {
1212169320Spiso			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1213173004Sjulian			    p->p_pid, td->td_name);
1214169320Spiso			free(ithd, M_ITHREAD);
1215173044Sjulian			kthread_exit();
1216169320Spiso		}
1217169320Spiso
1218169320Spiso		/*
1219169320Spiso		 * Service interrupts.  If another interrupt arrives while
1220169320Spiso		 * we are running, it will set it_need to note that we
1221169320Spiso		 * should make another pass.
1222169320Spiso		 */
1223169320Spiso		while (ithd->it_need) {
1224169320Spiso			/*
1225169320Spiso			 * This might need a full read and write barrier
1226169320Spiso			 * to make sure that this write posts before any
1227169320Spiso			 * of the memory or device accesses in the
1228169320Spiso			 * handlers.
1229169320Spiso			 */
1230169320Spiso			atomic_store_rel_int(&ithd->it_need, 0);
1231169320Spiso			if (priv)
1232169320Spiso				priv_ithread_execute_handler(p, ih);
1233169320Spiso			else
1234169320Spiso				ithread_execute_handlers(p, ie);
1235169320Spiso		}
1236169320Spiso		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1237169320Spiso		mtx_assert(&Giant, MA_NOTOWNED);
1238169320Spiso
1239169320Spiso		/*
1240169320Spiso		 * Processed all our interrupts.  Now get the sched
1241169320Spiso		 * lock.  This may take a while and it_need may get
1242169320Spiso		 * set again, so we have to check it again.
1243169320Spiso		 */
1244170307Sjeff		thread_lock(td);
1245169320Spiso		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
1246169320Spiso			TD_SET_IWAIT(td);
1247169320Spiso			ie->ie_count = 0;
1248169320Spiso			mi_switch(SW_VOL, NULL);
1249169320Spiso		}
1250177181Sjhb
1251177181Sjhb#ifdef SMP
1252177181Sjhb		/*
1253177181Sjhb		 * Ensure we are bound to the correct CPU.  We can't
1254177181Sjhb		 * move ithreads until SMP is running however, so just
1255177181Sjhb		 * leave interrupts on the boor CPU during boot.
1256177181Sjhb		 */
1257177181Sjhb		if (!priv && ie->ie_cpu != cpu && smp_started) {
1258177181Sjhb			cpu = ie->ie_cpu;
1259177181Sjhb			if (cpu == NOCPU)
1260177181Sjhb				sched_unbind(td);
1261177181Sjhb			else
1262177181Sjhb				sched_bind(td, cpu);
1263177181Sjhb		}
1264177181Sjhb#endif
1265170307Sjeff		thread_unlock(td);
1266169320Spiso	}
1267169320Spiso}
1268169320Spiso
1269169320Spiso/*
1270169320Spiso * Main loop for interrupt filter.
1271169320Spiso *
1272169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame
1273169320Spiso * parameter, and use it as the main argument for fast handler execution
1274169320Spiso * when ih_argument == NULL.
1275169320Spiso *
1276169320Spiso * Return value:
1277169320Spiso * o FILTER_STRAY:              No filter recognized the event, and no
1278169320Spiso *                              filter-less handler is registered on this
1279169320Spiso *                              line.
1280169320Spiso * o FILTER_HANDLED:            A filter claimed the event and served it.
1281169320Spiso * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
1282169320Spiso *                              least one filter-less handler on this line.
1283169320Spiso * o FILTER_HANDLED |
1284169320Spiso *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
1285169320Spiso *                              scheduling the per-handler ithread.
1286169320Spiso *
1287169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a
1288169320Spiso * pointer to a struct intr_thread containing the thread to be
1289169320Spiso * scheduled.
1290169320Spiso */
1291169320Spiso
1292169320Spisoint
1293169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1294169320Spiso		 struct intr_thread **ithd)
1295169320Spiso{
1296169320Spiso	struct intr_handler *ih;
1297169320Spiso	void *arg;
1298169320Spiso	int ret, thread_only;
1299169320Spiso
1300169320Spiso	ret = 0;
1301169320Spiso	thread_only = 0;
1302169320Spiso	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1303169320Spiso		/*
1304169320Spiso		 * Execute fast interrupt handlers directly.
1305169320Spiso		 * To support clock handlers, if a handler registers
1306169320Spiso		 * with a NULL argument, then we pass it a pointer to
1307169320Spiso		 * a trapframe as its argument.
1308169320Spiso		 */
1309169320Spiso		arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1310169320Spiso
1311169320Spiso		CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1312169320Spiso		     ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1313169320Spiso
1314169320Spiso		if (ih->ih_filter != NULL)
1315169320Spiso			ret = ih->ih_filter(arg);
1316169320Spiso		else {
1317169320Spiso			thread_only = 1;
1318169320Spiso			continue;
1319169320Spiso		}
1320169320Spiso
1321169320Spiso		if (ret & FILTER_STRAY)
1322169320Spiso			continue;
1323169320Spiso		else {
1324169320Spiso			*ithd = ih->ih_thread;
1325169320Spiso			return (ret);
1326169320Spiso		}
1327169320Spiso	}
1328169320Spiso
1329169320Spiso	/*
1330169320Spiso	 * No filters handled the interrupt and we have at least
1331169320Spiso	 * one handler without a filter.  In this case, we schedule
1332169320Spiso	 * all of the filter-less handlers to run in the ithread.
1333169320Spiso	 */
1334169320Spiso	if (thread_only) {
1335169320Spiso		*ithd = ie->ie_thread;
1336169320Spiso		return (FILTER_SCHEDULE_THREAD);
1337169320Spiso	}
1338169320Spiso	return (FILTER_STRAY);
1339169320Spiso}
1340169320Spiso
1341169320Spiso/*
1342169320Spiso * Main interrupt handling body.
1343169320Spiso *
1344169320Spiso * Input:
1345169320Spiso * o ie:                        the event connected to this interrupt.
1346169320Spiso * o frame:                     some archs (i.e. i386) pass a frame to some.
1347169320Spiso *                              handlers as their main argument.
1348169320Spiso * Return value:
1349169320Spiso * o 0:                         everything ok.
1350169320Spiso * o EINVAL:                    stray interrupt.
1351169320Spiso */
1352169320Spisoint
1353169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame)
1354169320Spiso{
1355169320Spiso	struct intr_thread *ithd;
1356169320Spiso	struct thread *td;
1357169320Spiso	int thread;
1358169320Spiso
1359169320Spiso	ithd = NULL;
1360169320Spiso	td = curthread;
1361169320Spiso
1362169320Spiso	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1363169320Spiso		return (EINVAL);
1364169320Spiso
1365169320Spiso	td->td_intr_nesting_level++;
1366169320Spiso	thread = 0;
1367169320Spiso	critical_enter();
1368169320Spiso	thread = intr_filter_loop(ie, frame, &ithd);
1369169320Spiso
1370169320Spiso	/*
1371169320Spiso	 * If the interrupt was fully served, send it an EOI but leave
1372169320Spiso	 * it unmasked. Otherwise, mask the source as well as sending
1373169320Spiso	 * it an EOI.
1374169320Spiso	 */
1375169320Spiso	if (thread & FILTER_HANDLED) {
1376169320Spiso		if (ie->ie_eoi != NULL)
1377169320Spiso			ie->ie_eoi(ie->ie_source);
1378169320Spiso	} else {
1379177325Sjhb		if (ie->ie_disable != NULL)
1380177325Sjhb			ie->ie_disable(ie->ie_source);
1381169320Spiso	}
1382169320Spiso	critical_exit();
1383169320Spiso
1384169320Spiso	/* Interrupt storm logic */
1385169320Spiso	if (thread & FILTER_STRAY) {
1386169320Spiso		ie->ie_count++;
1387169320Spiso		if (ie->ie_count < intr_storm_threshold)
1388169320Spiso			printf("Interrupt stray detection not present\n");
1389169320Spiso	}
1390169320Spiso
1391169320Spiso	/* Schedule an ithread if needed. */
1392169320Spiso	if (thread & FILTER_SCHEDULE_THREAD) {
1393169320Spiso		if (intr_event_schedule_thread(ie, ithd) != 0)
1394169320Spiso			panic("%s: impossible stray interrupt", __func__);
1395169320Spiso	}
1396169320Spiso	td->td_intr_nesting_level--;
1397169320Spiso	return (0);
1398169320Spiso}
1399169320Spiso#endif
1400169320Spiso
1401121482Sjhb#ifdef DDB
140272237Sjhb/*
1403121482Sjhb * Dump details about an interrupt handler
1404121482Sjhb */
1405121482Sjhbstatic void
1406151658Sjhbdb_dump_intrhand(struct intr_handler *ih)
1407121482Sjhb{
1408121482Sjhb	int comma;
1409121482Sjhb
1410121482Sjhb	db_printf("\t%-10s ", ih->ih_name);
1411121482Sjhb	switch (ih->ih_pri) {
1412121482Sjhb	case PI_REALTIME:
1413121482Sjhb		db_printf("CLK ");
1414121482Sjhb		break;
1415121482Sjhb	case PI_AV:
1416121482Sjhb		db_printf("AV  ");
1417121482Sjhb		break;
1418121482Sjhb	case PI_TTYHIGH:
1419121482Sjhb	case PI_TTYLOW:
1420121482Sjhb		db_printf("TTY ");
1421121482Sjhb		break;
1422121482Sjhb	case PI_TAPE:
1423121482Sjhb		db_printf("TAPE");
1424121482Sjhb		break;
1425121482Sjhb	case PI_NET:
1426121482Sjhb		db_printf("NET ");
1427121482Sjhb		break;
1428121482Sjhb	case PI_DISK:
1429121482Sjhb	case PI_DISKLOW:
1430121482Sjhb		db_printf("DISK");
1431121482Sjhb		break;
1432121482Sjhb	case PI_DULL:
1433121482Sjhb		db_printf("DULL");
1434121482Sjhb		break;
1435121482Sjhb	default:
1436121482Sjhb		if (ih->ih_pri >= PI_SOFT)
1437121482Sjhb			db_printf("SWI ");
1438121482Sjhb		else
1439121482Sjhb			db_printf("%4u", ih->ih_pri);
1440121482Sjhb		break;
1441121482Sjhb	}
1442121482Sjhb	db_printf(" ");
1443121482Sjhb	db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1444121482Sjhb	db_printf("(%p)", ih->ih_argument);
1445121482Sjhb	if (ih->ih_need ||
1446166901Spiso	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1447121482Sjhb	    IH_MPSAFE)) != 0) {
1448121482Sjhb		db_printf(" {");
1449121482Sjhb		comma = 0;
1450121482Sjhb		if (ih->ih_flags & IH_EXCLUSIVE) {
1451121482Sjhb			if (comma)
1452121482Sjhb				db_printf(", ");
1453121482Sjhb			db_printf("EXCL");
1454121482Sjhb			comma = 1;
1455121482Sjhb		}
1456121482Sjhb		if (ih->ih_flags & IH_ENTROPY) {
1457121482Sjhb			if (comma)
1458121482Sjhb				db_printf(", ");
1459121482Sjhb			db_printf("ENTROPY");
1460121482Sjhb			comma = 1;
1461121482Sjhb		}
1462121482Sjhb		if (ih->ih_flags & IH_DEAD) {
1463121482Sjhb			if (comma)
1464121482Sjhb				db_printf(", ");
1465121482Sjhb			db_printf("DEAD");
1466121482Sjhb			comma = 1;
1467121482Sjhb		}
1468121482Sjhb		if (ih->ih_flags & IH_MPSAFE) {
1469121482Sjhb			if (comma)
1470121482Sjhb				db_printf(", ");
1471121482Sjhb			db_printf("MPSAFE");
1472121482Sjhb			comma = 1;
1473121482Sjhb		}
1474121482Sjhb		if (ih->ih_need) {
1475121482Sjhb			if (comma)
1476121482Sjhb				db_printf(", ");
1477121482Sjhb			db_printf("NEED");
1478121482Sjhb		}
1479121482Sjhb		db_printf("}");
1480121482Sjhb	}
1481121482Sjhb	db_printf("\n");
1482121482Sjhb}
1483121482Sjhb
1484121482Sjhb/*
1485151658Sjhb * Dump details about a event.
1486121482Sjhb */
1487121482Sjhbvoid
1488151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers)
1489121482Sjhb{
1490151658Sjhb	struct intr_handler *ih;
1491151658Sjhb	struct intr_thread *it;
1492121482Sjhb	int comma;
1493121482Sjhb
1494151658Sjhb	db_printf("%s ", ie->ie_fullname);
1495151658Sjhb	it = ie->ie_thread;
1496151658Sjhb	if (it != NULL)
1497151658Sjhb		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1498151658Sjhb	else
1499151658Sjhb		db_printf("(no thread)");
1500177181Sjhb	if (ie->ie_cpu != NOCPU)
1501177181Sjhb		db_printf(" (CPU %d)", ie->ie_cpu);
1502151658Sjhb	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1503151658Sjhb	    (it != NULL && it->it_need)) {
1504121482Sjhb		db_printf(" {");
1505121482Sjhb		comma = 0;
1506151658Sjhb		if (ie->ie_flags & IE_SOFT) {
1507121482Sjhb			db_printf("SOFT");
1508121482Sjhb			comma = 1;
1509121482Sjhb		}
1510151658Sjhb		if (ie->ie_flags & IE_ENTROPY) {
1511121482Sjhb			if (comma)
1512121482Sjhb				db_printf(", ");
1513121482Sjhb			db_printf("ENTROPY");
1514121482Sjhb			comma = 1;
1515121482Sjhb		}
1516151658Sjhb		if (ie->ie_flags & IE_ADDING_THREAD) {
1517121482Sjhb			if (comma)
1518121482Sjhb				db_printf(", ");
1519151658Sjhb			db_printf("ADDING_THREAD");
1520121482Sjhb			comma = 1;
1521121482Sjhb		}
1522151658Sjhb		if (it != NULL && it->it_need) {
1523121482Sjhb			if (comma)
1524121482Sjhb				db_printf(", ");
1525121482Sjhb			db_printf("NEED");
1526121482Sjhb		}
1527121482Sjhb		db_printf("}");
1528121482Sjhb	}
1529121482Sjhb	db_printf("\n");
1530121482Sjhb
1531121482Sjhb	if (handlers)
1532151658Sjhb		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1533121482Sjhb		    db_dump_intrhand(ih);
1534121482Sjhb}
1535151658Sjhb
1536151658Sjhb/*
1537151658Sjhb * Dump data about interrupt handlers
1538151658Sjhb */
1539151658SjhbDB_SHOW_COMMAND(intr, db_show_intr)
1540151658Sjhb{
1541151658Sjhb	struct intr_event *ie;
1542160312Sjhb	int all, verbose;
1543151658Sjhb
1544151658Sjhb	verbose = index(modif, 'v') != NULL;
1545151658Sjhb	all = index(modif, 'a') != NULL;
1546151658Sjhb	TAILQ_FOREACH(ie, &event_list, ie_list) {
1547151658Sjhb		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1548151658Sjhb			continue;
1549151658Sjhb		db_dump_intr_event(ie, verbose);
1550160312Sjhb		if (db_pager_quit)
1551160312Sjhb			break;
1552151658Sjhb	}
1553151658Sjhb}
1554121482Sjhb#endif /* DDB */
1555121482Sjhb
1556121482Sjhb/*
155767551Sjhb * Start standard software interrupt threads
155866698Sjhb */
155967551Sjhbstatic void
156072237Sjhbstart_softintr(void *dummy)
156167551Sjhb{
156272237Sjhb
1563177859Sjeff	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1564177859Sjeff		panic("died while creating vm swi ithread");
156566698Sjhb}
1566177253SrwatsonSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1567177253Srwatson    NULL);
156866698Sjhb
1569151658Sjhb/*
157077582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
157177582Stmm * The data for this machine dependent, and the declarations are in machine
157277582Stmm * dependent code.  The layout of intrnames and intrcnt however is machine
157377582Stmm * independent.
157477582Stmm *
157577582Stmm * We do not know the length of intrcnt and intrnames at compile time, so
157677582Stmm * calculate things at run time.
157777582Stmm */
157877582Stmmstatic int
157977582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS)
158077582Stmm{
1581151658Sjhb	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
158277582Stmm	   req));
158377582Stmm}
158477582Stmm
158577582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
158677582Stmm    NULL, 0, sysctl_intrnames, "", "Interrupt Names");
158777582Stmm
158877582Stmmstatic int
158977582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS)
159077582Stmm{
1591151658Sjhb	return (sysctl_handle_opaque(oidp, intrcnt,
159277582Stmm	    (char *)eintrcnt - (char *)intrcnt, req));
159377582Stmm}
159477582Stmm
159577582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
159677582Stmm    NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1597121482Sjhb
1598121482Sjhb#ifdef DDB
1599121482Sjhb/*
1600121482Sjhb * DDB command to dump the interrupt statistics.
1601121482Sjhb */
1602121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1603121482Sjhb{
1604121482Sjhb	u_long *i;
1605121482Sjhb	char *cp;
1606121482Sjhb
1607121482Sjhb	cp = intrnames;
1608160312Sjhb	for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
1609121482Sjhb		if (*cp == '\0')
1610121482Sjhb			break;
1611121482Sjhb		if (*i != 0)
1612121482Sjhb			db_printf("%s\t%lu\n", cp, *i);
1613121482Sjhb		cp += strlen(cp) + 1;
1614121482Sjhb	}
1615121482Sjhb}
1616121482Sjhb#endif
1617