1139804Simp/*-
226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
326156Sse * All rights reserved.
426156Sse *
526156Sse * Redistribution and use in source and binary forms, with or without
626156Sse * modification, are permitted provided that the following conditions
726156Sse * are met:
826156Sse * 1. Redistributions of source code must retain the above copyright
926156Sse *    notice unmodified, this list of conditions, and the following
1026156Sse *    disclaimer.
1126156Sse * 2. Redistributions in binary form must reproduce the above copyright
1226156Sse *    notice, this list of conditions and the following disclaimer in the
1326156Sse *    documentation and/or other materials provided with the distribution.
1426156Sse *
1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2526156Sse */
2626156Sse
27116182Sobrien#include <sys/cdefs.h>
28116182Sobrien__FBSDID("$FreeBSD: stable/11/sys/kern/kern_intr.c 346560 2019-04-22 15:20:46Z ian $");
2936887Sdfr
30121482Sjhb#include "opt_ddb.h"
31272536Skib#include "opt_kstack_usage_prof.h"
32121482Sjhb
3341059Speter#include <sys/param.h>
3465822Sjhb#include <sys/bus.h>
35110860Salfred#include <sys/conf.h>
36178092Sjeff#include <sys/cpuset.h>
3765822Sjhb#include <sys/rtprio.h>
3841059Speter#include <sys/systm.h>
3966698Sjhb#include <sys/interrupt.h>
4066698Sjhb#include <sys/kernel.h>
4166698Sjhb#include <sys/kthread.h>
4266698Sjhb#include <sys/ktr.h>
43130128Sbde#include <sys/limits.h>
4474914Sjhb#include <sys/lock.h>
4526156Sse#include <sys/malloc.h>
4667365Sjhb#include <sys/mutex.h>
47195249Sjhb#include <sys/priv.h>
4866698Sjhb#include <sys/proc.h>
4972759Sjhb#include <sys/random.h>
5072237Sjhb#include <sys/resourcevar.h>
51139451Sjhb#include <sys/sched.h>
52177181Sjhb#include <sys/smp.h>
5377582Stmm#include <sys/sysctl.h>
54182024Skmacy#include <sys/syslog.h>
5566698Sjhb#include <sys/unistd.h>
5666698Sjhb#include <sys/vmmeter.h>
5766698Sjhb#include <machine/atomic.h>
5866698Sjhb#include <machine/cpu.h>
5967551Sjhb#include <machine/md_var.h>
6072237Sjhb#include <machine/stdarg.h>
61121482Sjhb#ifdef DDB
62121482Sjhb#include <ddb/ddb.h>
63121482Sjhb#include <ddb/db_sym.h>
64121482Sjhb#endif
6526156Sse
66151658Sjhb/*
67151658Sjhb * Describe an interrupt thread.  There is one of these per interrupt event.
68151658Sjhb */
69151658Sjhbstruct intr_thread {
70151658Sjhb	struct intr_event *it_event;
71151658Sjhb	struct thread *it_thread;	/* Kernel thread. */
72151658Sjhb	int	it_flags;		/* (j) IT_* flags. */
73151658Sjhb	int	it_need;		/* Needs service. */
7472759Sjhb};
7572759Sjhb
76151658Sjhb/* Interrupt thread flags kept in it_flags */
77151658Sjhb#define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
78219819Sjeff#define	IT_WAIT		0x000002	/* Thread is waiting for completion. */
79151658Sjhb
80151658Sjhbstruct	intr_entropy {
81151658Sjhb	struct	thread *td;
82151658Sjhb	uintptr_t event;
83151658Sjhb};
84151658Sjhb
85151658Sjhbstruct	intr_event *clk_intr_event;
86151658Sjhbstruct	intr_event *tty_intr_event;
87128339Sbdevoid	*vm_ih;
88173004Sjulianstruct proc *intrproc;
8938244Sbde
9072237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
9172237Sjhb
92168850Snjlstatic int intr_storm_threshold = 1000;
93267992ShselaskySYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
94128331Sjhb    &intr_storm_threshold, 0,
95128339Sbde    "Number of consecutive interrupts before storm protection is enabled");
96151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list =
97151658Sjhb    TAILQ_HEAD_INITIALIZER(event_list);
98178092Sjeffstatic struct mtx event_lock;
99178092SjeffMTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
100128331Sjhb
101151658Sjhbstatic void	intr_event_update(struct intr_event *ie);
102169320Spiso#ifdef INTR_FILTER
103177940Sjhbstatic int	intr_event_schedule_thread(struct intr_event *ie,
104177940Sjhb		    struct intr_thread *ithd);
105177940Sjhbstatic int	intr_filter_loop(struct intr_event *ie,
106177940Sjhb		    struct trapframe *frame, struct intr_thread **ithd);
107169320Spisostatic struct intr_thread *ithread_create(const char *name,
108169320Spiso			      struct intr_handler *ih);
109169320Spiso#else
110177940Sjhbstatic int	intr_event_schedule_thread(struct intr_event *ie);
111151658Sjhbstatic struct intr_thread *ithread_create(const char *name);
112169320Spiso#endif
113151658Sjhbstatic void	ithread_destroy(struct intr_thread *ithread);
114169320Spisostatic void	ithread_execute_handlers(struct proc *p,
115169320Spiso		    struct intr_event *ie);
116169320Spiso#ifdef INTR_FILTER
117169320Spisostatic void	priv_ithread_execute_handler(struct proc *p,
118169320Spiso		    struct intr_handler *ih);
119169320Spiso#endif
120128339Sbdestatic void	ithread_loop(void *);
121151658Sjhbstatic void	ithread_update(struct intr_thread *ithd);
122128339Sbdestatic void	start_softintr(void *);
123128339Sbde
124165124Sjhb/* Map an interrupt type to an ithread priority. */
12572237Sjhbu_char
126151658Sjhbintr_priority(enum intr_type flags)
12765822Sjhb{
12872237Sjhb	u_char pri;
12965822Sjhb
13072237Sjhb	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
13178365Speter	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
13265822Sjhb	switch (flags) {
13372237Sjhb	case INTR_TYPE_TTY:
134217292Sjhb		pri = PI_TTY;
13565822Sjhb		break;
13665822Sjhb	case INTR_TYPE_BIO:
13765822Sjhb		pri = PI_DISK;
13865822Sjhb		break;
13965822Sjhb	case INTR_TYPE_NET:
14065822Sjhb		pri = PI_NET;
14165822Sjhb		break;
14265822Sjhb	case INTR_TYPE_CAM:
143217292Sjhb		pri = PI_DISK;
14465822Sjhb		break;
145217292Sjhb	case INTR_TYPE_AV:
14678365Speter		pri = PI_AV;
14778365Speter		break;
14872237Sjhb	case INTR_TYPE_CLK:
14972237Sjhb		pri = PI_REALTIME;
15072237Sjhb		break;
15165822Sjhb	case INTR_TYPE_MISC:
15265822Sjhb		pri = PI_DULL;          /* don't care */
15365822Sjhb		break;
15465822Sjhb	default:
15572237Sjhb		/* We didn't specify an interrupt level. */
156151658Sjhb		panic("intr_priority: no interrupt type in flags");
15765822Sjhb	}
15865822Sjhb
15965822Sjhb	return pri;
16065822Sjhb}
16165822Sjhb
16272237Sjhb/*
163151658Sjhb * Update an ithread based on the associated intr_event.
16472237Sjhb */
16572237Sjhbstatic void
166151658Sjhbithread_update(struct intr_thread *ithd)
16772237Sjhb{
168151658Sjhb	struct intr_event *ie;
16983366Sjulian	struct thread *td;
170151658Sjhb	u_char pri;
17167551Sjhb
172151658Sjhb	ie = ithd->it_event;
173151658Sjhb	td = ithd->it_thread;
17472237Sjhb
175151658Sjhb	/* Determine the overall priority of this event. */
176151658Sjhb	if (TAILQ_EMPTY(&ie->ie_handlers))
177151658Sjhb		pri = PRI_MAX_ITHD;
178151658Sjhb	else
179151658Sjhb		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
180105354Srobert
181151658Sjhb	/* Update name and priority. */
182173004Sjulian	strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
183232700Sjhb#ifdef KTR
184232700Sjhb	sched_clear_tdname(td);
185232700Sjhb#endif
186170307Sjeff	thread_lock(td);
187151658Sjhb	sched_prio(td, pri);
188170307Sjeff	thread_unlock(td);
189151658Sjhb}
190151658Sjhb
191151658Sjhb/*
192151658Sjhb * Regenerate the full name of an interrupt event and update its priority.
193151658Sjhb */
194151658Sjhbstatic void
195151658Sjhbintr_event_update(struct intr_event *ie)
196151658Sjhb{
197151658Sjhb	struct intr_handler *ih;
198151658Sjhb	char *last;
199151658Sjhb	int missed, space;
200151658Sjhb
201151658Sjhb	/* Start off with no entropy and just the name of the event. */
202151658Sjhb	mtx_assert(&ie->ie_lock, MA_OWNED);
203151658Sjhb	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
204151658Sjhb	ie->ie_flags &= ~IE_ENTROPY;
205137267Sjhb	missed = 0;
206151658Sjhb	space = 1;
207151658Sjhb
208151658Sjhb	/* Run through all the handlers updating values. */
209151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
210151658Sjhb		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
211151658Sjhb		    sizeof(ie->ie_fullname)) {
212151658Sjhb			strcat(ie->ie_fullname, " ");
213151658Sjhb			strcat(ie->ie_fullname, ih->ih_name);
214151658Sjhb			space = 0;
215137267Sjhb		} else
216137267Sjhb			missed++;
217137267Sjhb		if (ih->ih_flags & IH_ENTROPY)
218151658Sjhb			ie->ie_flags |= IE_ENTROPY;
219137267Sjhb	}
220151658Sjhb
221151658Sjhb	/*
222346560Sian	 * If there is only one handler and its name is too long, just copy in
223346560Sian	 * as much of the end of the name (includes the unit number) as will
224346560Sian	 * fit.  Otherwise, we have multiple handlers and not all of the names
225346560Sian	 * will fit.  Add +'s to indicate missing names.  If we run out of room
226346560Sian	 * and still have +'s to add, change the last character from a + to a *.
227151658Sjhb	 */
228346560Sian	if (missed == 1 && space == 1) {
229346560Sian		ih = TAILQ_FIRST(&ie->ie_handlers);
230346560Sian		missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
231346560Sian		    sizeof(ie->ie_fullname);
232346560Sian		strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
233346560Sian		strcat(ie->ie_fullname, &ih->ih_name[missed]);
234346560Sian		missed = 0;
235346560Sian	}
236151658Sjhb	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
237137267Sjhb	while (missed-- > 0) {
238151658Sjhb		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
239151658Sjhb			if (*last == '+') {
240151658Sjhb				*last = '*';
241151658Sjhb				break;
242151658Sjhb			} else
243151658Sjhb				*last = '+';
244151658Sjhb		} else if (space) {
245151658Sjhb			strcat(ie->ie_fullname, " +");
246151658Sjhb			space = 0;
24772237Sjhb		} else
248151658Sjhb			strcat(ie->ie_fullname, "+");
24972237Sjhb	}
250151658Sjhb
251151658Sjhb	/*
252151658Sjhb	 * If this event has an ithread, update it's priority and
253151658Sjhb	 * name.
254151658Sjhb	 */
255151658Sjhb	if (ie->ie_thread != NULL)
256151658Sjhb		ithread_update(ie->ie_thread);
257151658Sjhb	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
25872237Sjhb}
25972237Sjhb
26072237Sjhbint
261183298Sobrienintr_event_create(struct intr_event **event, void *source, int flags, int irq,
262177940Sjhb    void (*pre_ithread)(void *), void (*post_ithread)(void *),
263271712Sadrian    void (*post_filter)(void *), int (*assign_cpu)(void *, int),
264177940Sjhb    const char *fmt, ...)
265169320Spiso{
266169320Spiso	struct intr_event *ie;
267169320Spiso	va_list ap;
26872237Sjhb
269169320Spiso	/* The only valid flag during creation is IE_SOFT. */
270169320Spiso	if ((flags & ~IE_SOFT) != 0)
271169320Spiso		return (EINVAL);
272169320Spiso	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
273169320Spiso	ie->ie_source = source;
274177940Sjhb	ie->ie_pre_ithread = pre_ithread;
275177940Sjhb	ie->ie_post_ithread = post_ithread;
276177940Sjhb	ie->ie_post_filter = post_filter;
277177181Sjhb	ie->ie_assign_cpu = assign_cpu;
278169320Spiso	ie->ie_flags = flags;
279178092Sjeff	ie->ie_irq = irq;
280177181Sjhb	ie->ie_cpu = NOCPU;
281169320Spiso	TAILQ_INIT(&ie->ie_handlers);
282169320Spiso	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
283169320Spiso
284169320Spiso	va_start(ap, fmt);
285169320Spiso	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
286169320Spiso	va_end(ap);
287169320Spiso	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
288178092Sjeff	mtx_lock(&event_lock);
289169320Spiso	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
290178092Sjeff	mtx_unlock(&event_lock);
291169320Spiso	if (event != NULL)
292169320Spiso		*event = ie;
293169320Spiso	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
294169320Spiso	return (0);
295169320Spiso}
296169320Spiso
297177181Sjhb/*
298177181Sjhb * Bind an interrupt event to the specified CPU.  Note that not all
299177181Sjhb * platforms support binding an interrupt to a CPU.  For those
300333338Sshurd * platforms this request will fail.  Using a cpu id of NOCPU unbinds
301177181Sjhb * the interrupt event.
302177181Sjhb */
303333338Sshurdstatic int
304333338Sshurd_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
305177181Sjhb{
306178092Sjeff	lwpid_t id;
307177181Sjhb	int error;
308177181Sjhb
309177181Sjhb	/* Need a CPU to bind to. */
310177181Sjhb	if (cpu != NOCPU && CPU_ABSENT(cpu))
311177181Sjhb		return (EINVAL);
312177181Sjhb
313177181Sjhb	if (ie->ie_assign_cpu == NULL)
314177181Sjhb		return (EOPNOTSUPP);
315195249Sjhb
316195249Sjhb	error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
317195249Sjhb	if (error)
318195249Sjhb		return (error);
319195249Sjhb
320178092Sjeff	/*
321195249Sjhb	 * If we have any ithreads try to set their mask first to verify
322195249Sjhb	 * permissions, etc.
323178092Sjeff	 */
324333338Sshurd	if (bindithread) {
325195249Sjhb		mtx_lock(&ie->ie_lock);
326195249Sjhb		if (ie->ie_thread != NULL) {
327195249Sjhb			id = ie->ie_thread->it_thread->td_tid;
328195249Sjhb			mtx_unlock(&ie->ie_lock);
329333338Sshurd			error = cpuset_setithread(id, cpu);
330333338Sshurd			if (error)
331333338Sshurd				return (error);
332195249Sjhb		} else
333195249Sjhb			mtx_unlock(&ie->ie_lock);
334333338Sshurd	}
335333338Sshurd	if (bindirq)
336333338Sshurd		error = ie->ie_assign_cpu(ie->ie_source, cpu);
337333338Sshurd	if (error) {
338333338Sshurd		if (bindithread) {
339333338Sshurd			mtx_lock(&ie->ie_lock);
340333338Sshurd			if (ie->ie_thread != NULL) {
341333338Sshurd				cpu = ie->ie_cpu;
342333338Sshurd				id = ie->ie_thread->it_thread->td_tid;
343333338Sshurd				mtx_unlock(&ie->ie_lock);
344333338Sshurd				(void)cpuset_setithread(id, cpu);
345333338Sshurd			} else
346333338Sshurd				mtx_unlock(&ie->ie_lock);
347333338Sshurd		}
348177181Sjhb		return (error);
349195249Sjhb	}
350195249Sjhb
351333338Sshurd	if (bindirq) {
352333338Sshurd		mtx_lock(&ie->ie_lock);
353333338Sshurd		ie->ie_cpu = cpu;
354333338Sshurd		mtx_unlock(&ie->ie_lock);
355333338Sshurd	}
356178092Sjeff
357178092Sjeff	return (error);
358178092Sjeff}
359178092Sjeff
360333338Sshurd/*
361333338Sshurd * Bind an interrupt event to the specified CPU.  For supported platforms, any
362333338Sshurd * associated ithreads as well as the primary interrupt context will be bound
363333338Sshurd * to the specificed CPU.
364333338Sshurd */
365333338Sshurdint
366333338Sshurdintr_event_bind(struct intr_event *ie, int cpu)
367333338Sshurd{
368333338Sshurd
369333338Sshurd	return (_intr_event_bind(ie, cpu, true, true));
370333338Sshurd}
371333338Sshurd
372333338Sshurd/*
373333338Sshurd * Bind an interrupt event to the specified CPU, but do not bind associated
374333338Sshurd * ithreads.
375333338Sshurd */
376333338Sshurdint
377333338Sshurdintr_event_bind_irqonly(struct intr_event *ie, int cpu)
378333338Sshurd{
379333338Sshurd
380333338Sshurd	return (_intr_event_bind(ie, cpu, true, false));
381333338Sshurd}
382333338Sshurd
383333338Sshurd/*
384333338Sshurd * Bind an interrupt event's ithread to the specified CPU.
385333338Sshurd */
386333338Sshurdint
387333338Sshurdintr_event_bind_ithread(struct intr_event *ie, int cpu)
388333338Sshurd{
389333338Sshurd
390333338Sshurd	return (_intr_event_bind(ie, cpu, false, true));
391333338Sshurd}
392333338Sshurd
393178092Sjeffstatic struct intr_event *
394178092Sjeffintr_lookup(int irq)
395178092Sjeff{
396178092Sjeff	struct intr_event *ie;
397178092Sjeff
398178092Sjeff	mtx_lock(&event_lock);
399178092Sjeff	TAILQ_FOREACH(ie, &event_list, ie_list)
400178092Sjeff		if (ie->ie_irq == irq &&
401178092Sjeff		    (ie->ie_flags & IE_SOFT) == 0 &&
402178092Sjeff		    TAILQ_FIRST(&ie->ie_handlers) != NULL)
403178092Sjeff			break;
404178092Sjeff	mtx_unlock(&event_lock);
405178092Sjeff	return (ie);
406178092Sjeff}
407178092Sjeff
408178092Sjeffint
409333338Sshurdintr_setaffinity(int irq, int mode, void *m)
410178092Sjeff{
411178092Sjeff	struct intr_event *ie;
412178092Sjeff	cpuset_t *mask;
413273270Sadrian	int cpu, n;
414178092Sjeff
415178092Sjeff	mask = m;
416178092Sjeff	cpu = NOCPU;
417178092Sjeff	/*
418178092Sjeff	 * If we're setting all cpus we can unbind.  Otherwise make sure
419178092Sjeff	 * only one cpu is in the set.
420178092Sjeff	 */
421178092Sjeff	if (CPU_CMP(cpuset_root, mask)) {
422178092Sjeff		for (n = 0; n < CPU_SETSIZE; n++) {
423178092Sjeff			if (!CPU_ISSET(n, mask))
424178092Sjeff				continue;
425178092Sjeff			if (cpu != NOCPU)
426178092Sjeff				return (EINVAL);
427273270Sadrian			cpu = n;
428178092Sjeff		}
429178092Sjeff	}
430178092Sjeff	ie = intr_lookup(irq);
431178092Sjeff	if (ie == NULL)
432178092Sjeff		return (ESRCH);
433333338Sshurd	switch (mode) {
434333338Sshurd	case CPU_WHICH_IRQ:
435333338Sshurd		return (intr_event_bind(ie, cpu));
436333338Sshurd	case CPU_WHICH_INTRHANDLER:
437333338Sshurd		return (intr_event_bind_irqonly(ie, cpu));
438333338Sshurd	case CPU_WHICH_ITHREAD:
439333338Sshurd		return (intr_event_bind_ithread(ie, cpu));
440333338Sshurd	default:
441333338Sshurd		return (EINVAL);
442333338Sshurd	}
443178092Sjeff}
444178092Sjeff
445178092Sjeffint
446333338Sshurdintr_getaffinity(int irq, int mode, void *m)
447178092Sjeff{
448178092Sjeff	struct intr_event *ie;
449333338Sshurd	struct thread *td;
450333338Sshurd	struct proc *p;
451178092Sjeff	cpuset_t *mask;
452333338Sshurd	lwpid_t id;
453333338Sshurd	int error;
454178092Sjeff
455178092Sjeff	mask = m;
456178092Sjeff	ie = intr_lookup(irq);
457178092Sjeff	if (ie == NULL)
458178092Sjeff		return (ESRCH);
459333338Sshurd
460333338Sshurd	error = 0;
461178092Sjeff	CPU_ZERO(mask);
462333338Sshurd	switch (mode) {
463333338Sshurd	case CPU_WHICH_IRQ:
464333338Sshurd	case CPU_WHICH_INTRHANDLER:
465333338Sshurd		mtx_lock(&ie->ie_lock);
466333338Sshurd		if (ie->ie_cpu == NOCPU)
467333338Sshurd			CPU_COPY(cpuset_root, mask);
468333338Sshurd		else
469333338Sshurd			CPU_SET(ie->ie_cpu, mask);
470333338Sshurd		mtx_unlock(&ie->ie_lock);
471333338Sshurd		break;
472333338Sshurd	case CPU_WHICH_ITHREAD:
473333338Sshurd		mtx_lock(&ie->ie_lock);
474333338Sshurd		if (ie->ie_thread == NULL) {
475333338Sshurd			mtx_unlock(&ie->ie_lock);
476333338Sshurd			CPU_COPY(cpuset_root, mask);
477333338Sshurd		} else {
478333338Sshurd			id = ie->ie_thread->it_thread->td_tid;
479333338Sshurd			mtx_unlock(&ie->ie_lock);
480333338Sshurd			error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
481333338Sshurd			if (error != 0)
482333338Sshurd				return (error);
483333338Sshurd			CPU_COPY(&td->td_cpuset->cs_mask, mask);
484333338Sshurd			PROC_UNLOCK(p);
485333338Sshurd		}
486333338Sshurd	default:
487333338Sshurd		return (EINVAL);
488333338Sshurd	}
489177181Sjhb	return (0);
490177181Sjhb}
491177181Sjhb
492177181Sjhbint
493151658Sjhbintr_event_destroy(struct intr_event *ie)
494151658Sjhb{
495151658Sjhb
496178092Sjeff	mtx_lock(&event_lock);
497151658Sjhb	mtx_lock(&ie->ie_lock);
498151658Sjhb	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
499151658Sjhb		mtx_unlock(&ie->ie_lock);
500178092Sjeff		mtx_unlock(&event_lock);
501151658Sjhb		return (EBUSY);
502151658Sjhb	}
503151658Sjhb	TAILQ_REMOVE(&event_list, ie, ie_list);
504157728Sjhb#ifndef notyet
505157728Sjhb	if (ie->ie_thread != NULL) {
506157728Sjhb		ithread_destroy(ie->ie_thread);
507157728Sjhb		ie->ie_thread = NULL;
508157728Sjhb	}
509157728Sjhb#endif
510151658Sjhb	mtx_unlock(&ie->ie_lock);
511178092Sjeff	mtx_unlock(&event_lock);
512151658Sjhb	mtx_destroy(&ie->ie_lock);
513151658Sjhb	free(ie, M_ITHREAD);
514151658Sjhb	return (0);
515151658Sjhb}
516151658Sjhb
517169320Spiso#ifndef INTR_FILTER
518151658Sjhbstatic struct intr_thread *
519151658Sjhbithread_create(const char *name)
520151658Sjhb{
521151658Sjhb	struct intr_thread *ithd;
522151658Sjhb	struct thread *td;
523151658Sjhb	int error;
524151658Sjhb
525151658Sjhb	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
526151658Sjhb
527173004Sjulian	error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
528173004Sjulian		    &td, RFSTOPPED | RFHIGHPID,
529173051Sjulian	    	    0, "intr", "%s", name);
530151658Sjhb	if (error)
531172836Sjulian		panic("kproc_create() failed with %d", error);
532170307Sjeff	thread_lock(td);
533164936Sjulian	sched_class(td, PRI_ITHD);
534103216Sjulian	TD_SET_IWAIT(td);
535170307Sjeff	thread_unlock(td);
536151658Sjhb	td->td_pflags |= TDP_ITHREAD;
537151658Sjhb	ithd->it_thread = td;
538151658Sjhb	CTR2(KTR_INTR, "%s: created %s", __func__, name);
539151658Sjhb	return (ithd);
54072237Sjhb}
541169320Spiso#else
542169320Spisostatic struct intr_thread *
543169320Spisoithread_create(const char *name, struct intr_handler *ih)
544169320Spiso{
545169320Spiso	struct intr_thread *ithd;
546169320Spiso	struct thread *td;
547169320Spiso	int error;
54872237Sjhb
549169320Spiso	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
550169320Spiso
551173153Sjulian	error = kproc_kthread_add(ithread_loop, ih, &intrproc,
552173004Sjulian		    &td, RFSTOPPED | RFHIGHPID,
553173051Sjulian	    	    0, "intr", "%s", name);
554169320Spiso	if (error)
555172836Sjulian		panic("kproc_create() failed with %d", error);
556170307Sjeff	thread_lock(td);
557169320Spiso	sched_class(td, PRI_ITHD);
558169320Spiso	TD_SET_IWAIT(td);
559170307Sjeff	thread_unlock(td);
560169320Spiso	td->td_pflags |= TDP_ITHREAD;
561169320Spiso	ithd->it_thread = td;
562169320Spiso	CTR2(KTR_INTR, "%s: created %s", __func__, name);
563169320Spiso	return (ithd);
564169320Spiso}
565169320Spiso#endif
566169320Spiso
567151658Sjhbstatic void
568151658Sjhbithread_destroy(struct intr_thread *ithread)
56972237Sjhb{
57083366Sjulian	struct thread *td;
57172237Sjhb
572157784Sscottl	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
573151658Sjhb	td = ithread->it_thread;
574170307Sjeff	thread_lock(td);
57576771Sjhb	ithread->it_flags |= IT_DEAD;
576103216Sjulian	if (TD_AWAITING_INTR(td)) {
577103216Sjulian		TD_CLR_IWAIT(td);
578166188Sjeff		sched_add(td, SRQ_INTR);
57972237Sjhb	}
580170307Sjeff	thread_unlock(td);
58172237Sjhb}
58272237Sjhb
583169320Spiso#ifndef INTR_FILTER
58472237Sjhbint
585151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name,
586166901Spiso    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
587166901Spiso    enum intr_type flags, void **cookiep)
58872237Sjhb{
589151658Sjhb	struct intr_handler *ih, *temp_ih;
590151658Sjhb	struct intr_thread *it;
59172237Sjhb
592166901Spiso	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
59372237Sjhb		return (EINVAL);
59472237Sjhb
595151658Sjhb	/* Allocate and populate an interrupt handler structure. */
596151658Sjhb	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
597166901Spiso	ih->ih_filter = filter;
59872237Sjhb	ih->ih_handler = handler;
59972237Sjhb	ih->ih_argument = arg;
600198134Sjhb	strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
601151658Sjhb	ih->ih_event = ie;
60272237Sjhb	ih->ih_pri = pri;
603166901Spiso	if (flags & INTR_EXCL)
60472237Sjhb		ih->ih_flags = IH_EXCLUSIVE;
60572237Sjhb	if (flags & INTR_MPSAFE)
60672237Sjhb		ih->ih_flags |= IH_MPSAFE;
60772237Sjhb	if (flags & INTR_ENTROPY)
60872237Sjhb		ih->ih_flags |= IH_ENTROPY;
60972237Sjhb
610151658Sjhb	/* We can only have one exclusive handler in a event. */
611151658Sjhb	mtx_lock(&ie->ie_lock);
612151658Sjhb	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
613151658Sjhb		if ((flags & INTR_EXCL) ||
614151658Sjhb		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
615151658Sjhb			mtx_unlock(&ie->ie_lock);
616151658Sjhb			free(ih, M_ITHREAD);
617151658Sjhb			return (EINVAL);
618151658Sjhb		}
619122002Sjhb	}
62072237Sjhb
621151658Sjhb	/* Create a thread if we need one. */
622166901Spiso	while (ie->ie_thread == NULL && handler != NULL) {
623151658Sjhb		if (ie->ie_flags & IE_ADDING_THREAD)
624157815Sjhb			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
625151658Sjhb		else {
626151658Sjhb			ie->ie_flags |= IE_ADDING_THREAD;
627151658Sjhb			mtx_unlock(&ie->ie_lock);
628151658Sjhb			it = ithread_create("intr: newborn");
629151658Sjhb			mtx_lock(&ie->ie_lock);
630151658Sjhb			ie->ie_flags &= ~IE_ADDING_THREAD;
631151658Sjhb			ie->ie_thread = it;
632151658Sjhb			it->it_event = ie;
633151658Sjhb			ithread_update(it);
634151658Sjhb			wakeup(ie);
635151658Sjhb		}
636151658Sjhb	}
637239095Skan
638239095Skan	/* Add the new handler to the event in priority order. */
639239095Skan	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
640239095Skan		if (temp_ih->ih_pri > ih->ih_pri)
641239095Skan			break;
642239095Skan	}
643239095Skan	if (temp_ih == NULL)
644239095Skan		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
645239095Skan	else
646239095Skan		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
647239095Skan	intr_event_update(ie);
648239095Skan
649151658Sjhb	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
650151658Sjhb	    ie->ie_name);
651151658Sjhb	mtx_unlock(&ie->ie_lock);
652151658Sjhb
65372237Sjhb	if (cookiep != NULL)
65472237Sjhb		*cookiep = ih;
65572237Sjhb	return (0);
65672237Sjhb}
657169320Spiso#else
658169320Spisoint
659169320Spisointr_event_add_handler(struct intr_event *ie, const char *name,
660169320Spiso    driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
661169320Spiso    enum intr_type flags, void **cookiep)
662169320Spiso{
663169320Spiso	struct intr_handler *ih, *temp_ih;
664169320Spiso	struct intr_thread *it;
66572237Sjhb
666169320Spiso	if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
667169320Spiso		return (EINVAL);
668169320Spiso
669169320Spiso	/* Allocate and populate an interrupt handler structure. */
670169320Spiso	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
671169320Spiso	ih->ih_filter = filter;
672169320Spiso	ih->ih_handler = handler;
673169320Spiso	ih->ih_argument = arg;
674198134Sjhb	strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
675169320Spiso	ih->ih_event = ie;
676169320Spiso	ih->ih_pri = pri;
677169320Spiso	if (flags & INTR_EXCL)
678169320Spiso		ih->ih_flags = IH_EXCLUSIVE;
679169320Spiso	if (flags & INTR_MPSAFE)
680169320Spiso		ih->ih_flags |= IH_MPSAFE;
681169320Spiso	if (flags & INTR_ENTROPY)
682169320Spiso		ih->ih_flags |= IH_ENTROPY;
683169320Spiso
684169320Spiso	/* We can only have one exclusive handler in a event. */
685169320Spiso	mtx_lock(&ie->ie_lock);
686169320Spiso	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
687169320Spiso		if ((flags & INTR_EXCL) ||
688169320Spiso		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
689169320Spiso			mtx_unlock(&ie->ie_lock);
690169320Spiso			free(ih, M_ITHREAD);
691169320Spiso			return (EINVAL);
692169320Spiso		}
693169320Spiso	}
694169320Spiso
695169320Spiso	/* For filtered handlers, create a private ithread to run on. */
696239095Skan	if (filter != NULL && handler != NULL) {
697169320Spiso		mtx_unlock(&ie->ie_lock);
698239095Skan		it = ithread_create("intr: newborn", ih);
699169320Spiso		mtx_lock(&ie->ie_lock);
700239095Skan		it->it_event = ie;
701169320Spiso		ih->ih_thread = it;
702230231Spluknet		ithread_update(it); /* XXX - do we really need this?!?!? */
703169320Spiso	} else { /* Create the global per-event thread if we need one. */
704169320Spiso		while (ie->ie_thread == NULL && handler != NULL) {
705169320Spiso			if (ie->ie_flags & IE_ADDING_THREAD)
706169320Spiso				msleep(ie, &ie->ie_lock, 0, "ithread", 0);
707169320Spiso			else {
708169320Spiso				ie->ie_flags |= IE_ADDING_THREAD;
709169320Spiso				mtx_unlock(&ie->ie_lock);
710169320Spiso				it = ithread_create("intr: newborn", ih);
711169320Spiso				mtx_lock(&ie->ie_lock);
712169320Spiso				ie->ie_flags &= ~IE_ADDING_THREAD;
713169320Spiso				ie->ie_thread = it;
714169320Spiso				it->it_event = ie;
715169320Spiso				ithread_update(it);
716169320Spiso				wakeup(ie);
717169320Spiso			}
718169320Spiso		}
719169320Spiso	}
720239095Skan
721239095Skan	/* Add the new handler to the event in priority order. */
722239095Skan	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
723239095Skan		if (temp_ih->ih_pri > ih->ih_pri)
724239095Skan			break;
725239095Skan	}
726239095Skan	if (temp_ih == NULL)
727239095Skan		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
728239095Skan	else
729239095Skan		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
730239095Skan	intr_event_update(ie);
731239095Skan
732169320Spiso	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
733169320Spiso	    ie->ie_name);
734169320Spiso	mtx_unlock(&ie->ie_lock);
735169320Spiso
736169320Spiso	if (cookiep != NULL)
737169320Spiso		*cookiep = ih;
738169320Spiso	return (0);
739169320Spiso}
740169320Spiso#endif
741169320Spiso
742165125Sjhb/*
743198134Sjhb * Append a description preceded by a ':' to the name of the specified
744198134Sjhb * interrupt handler.
745198134Sjhb */
746198134Sjhbint
747198134Sjhbintr_event_describe_handler(struct intr_event *ie, void *cookie,
748198134Sjhb    const char *descr)
749198134Sjhb{
750198134Sjhb	struct intr_handler *ih;
751198134Sjhb	size_t space;
752198134Sjhb	char *start;
753198134Sjhb
754198134Sjhb	mtx_lock(&ie->ie_lock);
755198134Sjhb#ifdef INVARIANTS
756198134Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
757198134Sjhb		if (ih == cookie)
758198134Sjhb			break;
759198134Sjhb	}
760198134Sjhb	if (ih == NULL) {
761198134Sjhb		mtx_unlock(&ie->ie_lock);
762198149Sjhb		panic("handler %p not found in interrupt event %p", cookie, ie);
763198134Sjhb	}
764198134Sjhb#endif
765198134Sjhb	ih = cookie;
766198134Sjhb
767198134Sjhb	/*
768198134Sjhb	 * Look for an existing description by checking for an
769198134Sjhb	 * existing ":".  This assumes device names do not include
770198134Sjhb	 * colons.  If one is found, prepare to insert the new
771198134Sjhb	 * description at that point.  If one is not found, find the
772198134Sjhb	 * end of the name to use as the insertion point.
773198134Sjhb	 */
774229272Sed	start = strchr(ih->ih_name, ':');
775198134Sjhb	if (start == NULL)
776229272Sed		start = strchr(ih->ih_name, 0);
777198134Sjhb
778198134Sjhb	/*
779198134Sjhb	 * See if there is enough remaining room in the string for the
780198134Sjhb	 * description + ":".  The "- 1" leaves room for the trailing
781198134Sjhb	 * '\0'.  The "+ 1" accounts for the colon.
782198134Sjhb	 */
783198134Sjhb	space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
784198134Sjhb	if (strlen(descr) + 1 > space) {
785198134Sjhb		mtx_unlock(&ie->ie_lock);
786198134Sjhb		return (ENOSPC);
787198134Sjhb	}
788198134Sjhb
789198134Sjhb	/* Append a colon followed by the description. */
790198134Sjhb	*start = ':';
791198134Sjhb	strcpy(start + 1, descr);
792198134Sjhb	intr_event_update(ie);
793198134Sjhb	mtx_unlock(&ie->ie_lock);
794198134Sjhb	return (0);
795198134Sjhb}
796198134Sjhb
797198134Sjhb/*
798165125Sjhb * Return the ie_source field from the intr_event an intr_handler is
799165125Sjhb * associated with.
800165125Sjhb */
801165125Sjhbvoid *
802165125Sjhbintr_handler_source(void *cookie)
803165125Sjhb{
804165125Sjhb	struct intr_handler *ih;
805165125Sjhb	struct intr_event *ie;
806165125Sjhb
807165125Sjhb	ih = (struct intr_handler *)cookie;
808165125Sjhb	if (ih == NULL)
809165125Sjhb		return (NULL);
810165125Sjhb	ie = ih->ih_event;
811165125Sjhb	KASSERT(ie != NULL,
812165125Sjhb	    ("interrupt handler \"%s\" has a NULL interrupt event",
813165125Sjhb	    ih->ih_name));
814165125Sjhb	return (ie->ie_source);
815165125Sjhb}
816165125Sjhb
817219819Sjeff/*
818219819Sjeff * Sleep until an ithread finishes executing an interrupt handler.
819219819Sjeff *
820219819Sjeff * XXX Doesn't currently handle interrupt filters or fast interrupt
821219819Sjeff * handlers.  This is intended for compatibility with linux drivers
822219819Sjeff * only.  Do not use in BSD code.
823219819Sjeff */
824219819Sjeffvoid
825219819Sjeff_intr_drain(int irq)
826219819Sjeff{
827219819Sjeff	struct intr_event *ie;
828219819Sjeff	struct intr_thread *ithd;
829219819Sjeff	struct thread *td;
830219819Sjeff
831219819Sjeff	ie = intr_lookup(irq);
832219819Sjeff	if (ie == NULL)
833219819Sjeff		return;
834219819Sjeff	if (ie->ie_thread == NULL)
835219819Sjeff		return;
836219819Sjeff	ithd = ie->ie_thread;
837219819Sjeff	td = ithd->it_thread;
838221055Sjeff	/*
839221055Sjeff	 * We set the flag and wait for it to be cleared to avoid
840221055Sjeff	 * long delays with potentially busy interrupt handlers
841221055Sjeff	 * were we to only sample TD_AWAITING_INTR() every tick.
842221055Sjeff	 */
843219819Sjeff	thread_lock(td);
844219819Sjeff	if (!TD_AWAITING_INTR(td)) {
845219819Sjeff		ithd->it_flags |= IT_WAIT;
846221055Sjeff		while (ithd->it_flags & IT_WAIT) {
847221055Sjeff			thread_unlock(td);
848221055Sjeff			pause("idrain", 1);
849221055Sjeff			thread_lock(td);
850221055Sjeff		}
851219819Sjeff	}
852221055Sjeff	thread_unlock(td);
853219819Sjeff	return;
854219819Sjeff}
855219819Sjeff
856219819Sjeff
857169320Spiso#ifndef INTR_FILTER
85872237Sjhbint
859151658Sjhbintr_event_remove_handler(void *cookie)
86072237Sjhb{
861151658Sjhb	struct intr_handler *handler = (struct intr_handler *)cookie;
862151658Sjhb	struct intr_event *ie;
86372237Sjhb#ifdef INVARIANTS
864151658Sjhb	struct intr_handler *ih;
86572237Sjhb#endif
866151658Sjhb#ifdef notyet
867151658Sjhb	int dead;
868151658Sjhb#endif
86972237Sjhb
87072759Sjhb	if (handler == NULL)
87172237Sjhb		return (EINVAL);
872151658Sjhb	ie = handler->ih_event;
873151658Sjhb	KASSERT(ie != NULL,
874151658Sjhb	    ("interrupt handler \"%s\" has a NULL interrupt event",
875165124Sjhb	    handler->ih_name));
876151658Sjhb	mtx_lock(&ie->ie_lock);
87787593Sobrien	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
878151658Sjhb	    ie->ie_name);
87972237Sjhb#ifdef INVARIANTS
880151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
88172759Sjhb		if (ih == handler)
88272759Sjhb			goto ok;
883151658Sjhb	mtx_unlock(&ie->ie_lock);
884151658Sjhb	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
885151658Sjhb	    ih->ih_name, ie->ie_name);
88672759Sjhbok:
88772237Sjhb#endif
88872839Sjhb	/*
889151658Sjhb	 * If there is no ithread, then just remove the handler and return.
890151658Sjhb	 * XXX: Note that an INTR_FAST handler might be running on another
891151658Sjhb	 * CPU!
892151658Sjhb	 */
893151658Sjhb	if (ie->ie_thread == NULL) {
894151658Sjhb		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
895151658Sjhb		mtx_unlock(&ie->ie_lock);
896151658Sjhb		free(handler, M_ITHREAD);
897151658Sjhb		return (0);
898151658Sjhb	}
899151658Sjhb
900151658Sjhb	/*
90172839Sjhb	 * If the interrupt thread is already running, then just mark this
90272839Sjhb	 * handler as being dead and let the ithread do the actual removal.
903124505Struckman	 *
904124505Struckman	 * During a cold boot while cold is set, msleep() does not sleep,
905124505Struckman	 * so we have to remove the handler here rather than letting the
906124505Struckman	 * thread do it.
90772839Sjhb	 */
908170307Sjeff	thread_lock(ie->ie_thread->it_thread);
909151658Sjhb	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
91072839Sjhb		handler->ih_flags |= IH_DEAD;
91172839Sjhb
91272839Sjhb		/*
91372839Sjhb		 * Ensure that the thread will process the handler list
91472839Sjhb		 * again and remove this handler if it has already passed
91572839Sjhb		 * it on the list.
916285751Skib		 *
917285751Skib		 * The release part of the following store ensures
918285751Skib		 * that the update of ih_flags is ordered before the
919285751Skib		 * it_need setting.  See the comment before
920285751Skib		 * atomic_cmpset_acq(&ithd->it_need, ...) operation in
921285751Skib		 * the ithread_execute_handlers().
92272839Sjhb		 */
923285751Skib		atomic_store_rel_int(&ie->ie_thread->it_need, 1);
924151658Sjhb	} else
925151658Sjhb		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
926170307Sjeff	thread_unlock(ie->ie_thread->it_thread);
927151658Sjhb	while (handler->ih_flags & IH_DEAD)
928157815Sjhb		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
929151658Sjhb	intr_event_update(ie);
930151658Sjhb#ifdef notyet
931151658Sjhb	/*
932151658Sjhb	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
933151658Sjhb	 * this could lead to races of stale data when servicing an
934151658Sjhb	 * interrupt.
935151658Sjhb	 */
936151658Sjhb	dead = 1;
937151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
938151658Sjhb		if (!(ih->ih_flags & IH_FAST)) {
939151658Sjhb			dead = 0;
940151658Sjhb			break;
941151658Sjhb		}
942151658Sjhb	}
943151658Sjhb	if (dead) {
944151658Sjhb		ithread_destroy(ie->ie_thread);
945151658Sjhb		ie->ie_thread = NULL;
946151658Sjhb	}
947151658Sjhb#endif
948151658Sjhb	mtx_unlock(&ie->ie_lock);
94976771Sjhb	free(handler, M_ITHREAD);
95072237Sjhb	return (0);
95172237Sjhb}
95272237Sjhb
953177940Sjhbstatic int
954151658Sjhbintr_event_schedule_thread(struct intr_event *ie)
95572759Sjhb{
956151658Sjhb	struct intr_entropy entropy;
957151658Sjhb	struct intr_thread *it;
95883366Sjulian	struct thread *td;
959101176Sjulian	struct thread *ctd;
96072759Sjhb	struct proc *p;
96172759Sjhb
96272759Sjhb	/*
96372759Sjhb	 * If no ithread or no handlers, then we have a stray interrupt.
96472759Sjhb	 */
965151658Sjhb	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
966151658Sjhb	    ie->ie_thread == NULL)
96772759Sjhb		return (EINVAL);
96872759Sjhb
969101176Sjulian	ctd = curthread;
970151658Sjhb	it = ie->ie_thread;
971151658Sjhb	td = it->it_thread;
972133191Srwatson	p = td->td_proc;
973151658Sjhb
97472759Sjhb	/*
97572759Sjhb	 * If any of the handlers for this ithread claim to be good
97672759Sjhb	 * sources of entropy, then gather some.
97772759Sjhb	 */
978273872Smarkm	if (ie->ie_flags & IE_ENTROPY) {
979151658Sjhb		entropy.event = (uintptr_t)ie;
980151658Sjhb		entropy.td = ctd;
981284959Smarkm		random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
98272759Sjhb	}
98372759Sjhb
984151658Sjhb	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
98572759Sjhb
98672759Sjhb	/*
98772759Sjhb	 * Set it_need to tell the thread to keep running if it is already
988170307Sjeff	 * running.  Then, lock the thread and see if we actually need to
989170307Sjeff	 * put it on the runqueue.
990285680Skib	 *
991285680Skib	 * Use store_rel to arrange that the store to ih_need in
992285680Skib	 * swi_sched() is before the store to it_need and prepare for
993285680Skib	 * transfer of this order to loads in the ithread.
99472759Sjhb	 */
995252683Salfred	atomic_store_rel_int(&it->it_need, 1);
996170307Sjeff	thread_lock(td);
997103216Sjulian	if (TD_AWAITING_INTR(td)) {
998151658Sjhb		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
999173004Sjulian		    td->td_name);
1000103216Sjulian		TD_CLR_IWAIT(td);
1001166188Sjeff		sched_add(td, SRQ_INTR);
100272759Sjhb	} else {
1003151658Sjhb		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1004173004Sjulian		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
100572759Sjhb	}
1006170307Sjeff	thread_unlock(td);
100772759Sjhb
100872759Sjhb	return (0);
100972759Sjhb}
1010169320Spiso#else
1011169320Spisoint
1012169320Spisointr_event_remove_handler(void *cookie)
1013169320Spiso{
1014169320Spiso	struct intr_handler *handler = (struct intr_handler *)cookie;
1015169320Spiso	struct intr_event *ie;
1016169320Spiso	struct intr_thread *it;
1017169320Spiso#ifdef INVARIANTS
1018169320Spiso	struct intr_handler *ih;
1019169320Spiso#endif
1020169320Spiso#ifdef notyet
1021169320Spiso	int dead;
1022169320Spiso#endif
102372759Sjhb
1024169320Spiso	if (handler == NULL)
1025169320Spiso		return (EINVAL);
1026169320Spiso	ie = handler->ih_event;
1027169320Spiso	KASSERT(ie != NULL,
1028169320Spiso	    ("interrupt handler \"%s\" has a NULL interrupt event",
1029169320Spiso	    handler->ih_name));
1030169320Spiso	mtx_lock(&ie->ie_lock);
1031169320Spiso	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
1032169320Spiso	    ie->ie_name);
1033169320Spiso#ifdef INVARIANTS
1034169320Spiso	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1035169320Spiso		if (ih == handler)
1036169320Spiso			goto ok;
1037169320Spiso	mtx_unlock(&ie->ie_lock);
1038169320Spiso	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
1039169320Spiso	    ih->ih_name, ie->ie_name);
1040169320Spisook:
1041169320Spiso#endif
1042169320Spiso	/*
1043169320Spiso	 * If there are no ithreads (per event and per handler), then
1044169320Spiso	 * just remove the handler and return.
1045169320Spiso	 * XXX: Note that an INTR_FAST handler might be running on another CPU!
1046169320Spiso	 */
1047169320Spiso	if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
1048169320Spiso		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
1049169320Spiso		mtx_unlock(&ie->ie_lock);
1050169320Spiso		free(handler, M_ITHREAD);
1051169320Spiso		return (0);
1052169320Spiso	}
1053169320Spiso
1054169320Spiso	/* Private or global ithread? */
1055169320Spiso	it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
1056169320Spiso	/*
1057169320Spiso	 * If the interrupt thread is already running, then just mark this
1058169320Spiso	 * handler as being dead and let the ithread do the actual removal.
1059169320Spiso	 *
1060169320Spiso	 * During a cold boot while cold is set, msleep() does not sleep,
1061169320Spiso	 * so we have to remove the handler here rather than letting the
1062169320Spiso	 * thread do it.
1063169320Spiso	 */
1064170307Sjeff	thread_lock(it->it_thread);
1065169320Spiso	if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
1066169320Spiso		handler->ih_flags |= IH_DEAD;
1067169320Spiso
1068169320Spiso		/*
1069169320Spiso		 * Ensure that the thread will process the handler list
1070169320Spiso		 * again and remove this handler if it has already passed
1071169320Spiso		 * it on the list.
1072285751Skib		 *
1073285751Skib		 * The release part of the following store ensures
1074285751Skib		 * that the update of ih_flags is ordered before the
1075285751Skib		 * it_need setting.  See the comment before
1076285751Skib		 * atomic_cmpset_acq(&ithd->it_need, ...) operation in
1077285751Skib		 * the ithread_execute_handlers().
1078169320Spiso		 */
1079285751Skib		atomic_store_rel_int(&it->it_need, 1);
1080169320Spiso	} else
1081169320Spiso		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
1082170307Sjeff	thread_unlock(it->it_thread);
1083169320Spiso	while (handler->ih_flags & IH_DEAD)
1084169320Spiso		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
1085169320Spiso	/*
1086169320Spiso	 * At this point, the handler has been disconnected from the event,
1087169320Spiso	 * so we can kill the private ithread if any.
1088169320Spiso	 */
1089169320Spiso	if (handler->ih_thread) {
1090169320Spiso		ithread_destroy(handler->ih_thread);
1091169320Spiso		handler->ih_thread = NULL;
1092169320Spiso	}
1093169320Spiso	intr_event_update(ie);
1094169320Spiso#ifdef notyet
1095169320Spiso	/*
1096169320Spiso	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
1097169320Spiso	 * this could lead to races of stale data when servicing an
1098169320Spiso	 * interrupt.
1099169320Spiso	 */
1100169320Spiso	dead = 1;
1101169320Spiso	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1102169320Spiso		if (handler != NULL) {
1103169320Spiso			dead = 0;
1104169320Spiso			break;
1105169320Spiso		}
1106169320Spiso	}
1107169320Spiso	if (dead) {
1108169320Spiso		ithread_destroy(ie->ie_thread);
1109169320Spiso		ie->ie_thread = NULL;
1110169320Spiso	}
1111169320Spiso#endif
1112169320Spiso	mtx_unlock(&ie->ie_lock);
1113169320Spiso	free(handler, M_ITHREAD);
1114169320Spiso	return (0);
1115169320Spiso}
1116169320Spiso
1117177940Sjhbstatic int
1118169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
1119169320Spiso{
1120169320Spiso	struct intr_entropy entropy;
1121169320Spiso	struct thread *td;
1122169320Spiso	struct thread *ctd;
1123169320Spiso	struct proc *p;
1124169320Spiso
1125169320Spiso	/*
1126169320Spiso	 * If no ithread or no handlers, then we have a stray interrupt.
1127169320Spiso	 */
1128169320Spiso	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
1129169320Spiso		return (EINVAL);
1130169320Spiso
1131169320Spiso	ctd = curthread;
1132169320Spiso	td = it->it_thread;
1133169320Spiso	p = td->td_proc;
1134169320Spiso
1135169320Spiso	/*
1136169320Spiso	 * If any of the handlers for this ithread claim to be good
1137169320Spiso	 * sources of entropy, then gather some.
1138169320Spiso	 */
1139273872Smarkm	if (ie->ie_flags & IE_ENTROPY) {
1140169320Spiso		entropy.event = (uintptr_t)ie;
1141169320Spiso		entropy.td = ctd;
1142284959Smarkm		random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
1143169320Spiso	}
1144169320Spiso
1145169320Spiso	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
1146169320Spiso
1147169320Spiso	/*
1148169320Spiso	 * Set it_need to tell the thread to keep running if it is already
1149170307Sjeff	 * running.  Then, lock the thread and see if we actually need to
1150170307Sjeff	 * put it on the runqueue.
1151285680Skib	 *
1152285680Skib	 * Use store_rel to arrange that the store to ih_need in
1153285680Skib	 * swi_sched() is before the store to it_need and prepare for
1154285680Skib	 * transfer of this order to loads in the ithread.
1155169320Spiso	 */
1156252683Salfred	atomic_store_rel_int(&it->it_need, 1);
1157170307Sjeff	thread_lock(td);
1158169320Spiso	if (TD_AWAITING_INTR(td)) {
1159169320Spiso		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
1160173122Sjulian		    td->td_name);
1161169320Spiso		TD_CLR_IWAIT(td);
1162169320Spiso		sched_add(td, SRQ_INTR);
1163169320Spiso	} else {
1164169320Spiso		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1165173004Sjulian		    __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
1166169320Spiso	}
1167170307Sjeff	thread_unlock(td);
1168169320Spiso
1169169320Spiso	return (0);
1170169320Spiso}
1171169320Spiso#endif
1172169320Spiso
1173151699Sjhb/*
1174192305Srwatson * Allow interrupt event binding for software interrupt handlers -- a no-op,
1175192305Srwatson * since interrupts are generated in software rather than being directed by
1176192305Srwatson * a PIC.
1177192305Srwatson */
1178192305Srwatsonstatic int
1179271712Sadrianswi_assign_cpu(void *arg, int cpu)
1180192305Srwatson{
1181192305Srwatson
1182192305Srwatson	return (0);
1183192305Srwatson}
1184192305Srwatson
1185192305Srwatson/*
1186151699Sjhb * Add a software interrupt handler to a specified event.  If a given event
1187151699Sjhb * is not specified, then a new event is created.
1188151699Sjhb */
118972759Sjhbint
1190151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
119172237Sjhb	    void *arg, int pri, enum intr_type flags, void **cookiep)
119272237Sjhb{
1193151658Sjhb	struct intr_event *ie;
119472237Sjhb	int error;
119566698Sjhb
1196169320Spiso	if (flags & INTR_ENTROPY)
119772759Sjhb		return (EINVAL);
119872759Sjhb
1199151658Sjhb	ie = (eventp != NULL) ? *eventp : NULL;
120066698Sjhb
1201151658Sjhb	if (ie != NULL) {
1202151658Sjhb		if (!(ie->ie_flags & IE_SOFT))
1203151658Sjhb			return (EINVAL);
120472759Sjhb	} else {
1205178092Sjeff		error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1206192305Srwatson		    NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
120767551Sjhb		if (error)
120872237Sjhb			return (error);
1209151658Sjhb		if (eventp != NULL)
1210151658Sjhb			*eventp = ie;
121166698Sjhb	}
1212177859Sjeff	error = intr_event_add_handler(ie, name, NULL, handler, arg,
1213217292Sjhb	    PI_SWI(pri), flags, cookiep);
1214247778Sdavide	return (error);
121566698Sjhb}
121666698Sjhb
121766698Sjhb/*
1218151658Sjhb * Schedule a software interrupt thread.
121966698Sjhb */
122067551Sjhbvoid
122172237Sjhbswi_sched(void *cookie, int flags)
122266698Sjhb{
1223151658Sjhb	struct intr_handler *ih = (struct intr_handler *)cookie;
1224151658Sjhb	struct intr_event *ie = ih->ih_event;
1225240921Sjhb	struct intr_entropy entropy;
122672759Sjhb	int error;
122766698Sjhb
1228151658Sjhb	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1229151658Sjhb	    ih->ih_need);
1230151658Sjhb
1231273872Smarkm	entropy.event = (uintptr_t)ih;
1232273872Smarkm	entropy.td = curthread;
1233284959Smarkm	random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI);
1234240921Sjhb
123567551Sjhb	/*
123672759Sjhb	 * Set ih_need for this handler so that if the ithread is already
123772759Sjhb	 * running it will execute this handler on the next pass.  Otherwise,
123872759Sjhb	 * it will execute it the next time it runs.
123967551Sjhb	 */
1240285680Skib	ih->ih_need = 1;
1241163474Sbde
124272237Sjhb	if (!(flags & SWI_DELAY)) {
1243170291Sattilio		PCPU_INC(cnt.v_soft);
1244169320Spiso#ifdef INTR_FILTER
1245169320Spiso		error = intr_event_schedule_thread(ie, ie->ie_thread);
1246169320Spiso#else
1247151658Sjhb		error = intr_event_schedule_thread(ie);
1248169320Spiso#endif
124972759Sjhb		KASSERT(error == 0, ("stray software interrupt"));
125066698Sjhb	}
125166698Sjhb}
125266698Sjhb
1253151699Sjhb/*
1254151699Sjhb * Remove a software interrupt handler.  Currently this code does not
1255151699Sjhb * remove the associated interrupt event if it becomes empty.  Calling code
1256151699Sjhb * may do so manually via intr_event_destroy(), but that's not really
1257151699Sjhb * an optimal interface.
1258151699Sjhb */
1259151699Sjhbint
1260151699Sjhbswi_remove(void *cookie)
1261151699Sjhb{
1262151699Sjhb
1263151699Sjhb	return (intr_event_remove_handler(cookie));
1264151699Sjhb}
1265151699Sjhb
1266169320Spiso#ifdef INTR_FILTER
1267151658Sjhbstatic void
1268169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
1269169320Spiso{
1270169320Spiso	struct intr_event *ie;
1271169320Spiso
1272169320Spiso	ie = ih->ih_event;
1273169320Spiso	/*
1274169320Spiso	 * If this handler is marked for death, remove it from
1275169320Spiso	 * the list of handlers and wake up the sleeper.
1276169320Spiso	 */
1277169320Spiso	if (ih->ih_flags & IH_DEAD) {
1278169320Spiso		mtx_lock(&ie->ie_lock);
1279169320Spiso		TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1280169320Spiso		ih->ih_flags &= ~IH_DEAD;
1281169320Spiso		wakeup(ih);
1282169320Spiso		mtx_unlock(&ie->ie_lock);
1283169320Spiso		return;
1284169320Spiso	}
1285169320Spiso
1286169320Spiso	/* Execute this handler. */
1287169320Spiso	CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1288169320Spiso	     __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1289169320Spiso	     ih->ih_name, ih->ih_flags);
1290169320Spiso
1291169320Spiso	if (!(ih->ih_flags & IH_MPSAFE))
1292169320Spiso		mtx_lock(&Giant);
1293169320Spiso	ih->ih_handler(ih->ih_argument);
1294169320Spiso	if (!(ih->ih_flags & IH_MPSAFE))
1295169320Spiso		mtx_unlock(&Giant);
1296169320Spiso}
1297169320Spiso#endif
1298169320Spiso
1299183052Sjhb/*
1300183052Sjhb * This is a public function for use by drivers that mux interrupt
1301183052Sjhb * handlers for child devices from their interrupt handler.
1302183052Sjhb */
1303183052Sjhbvoid
1304183052Sjhbintr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1305151658Sjhb{
1306151658Sjhb	struct intr_handler *ih, *ihn;
1307151658Sjhb
1308151658Sjhb	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1309151658Sjhb		/*
1310151658Sjhb		 * If this handler is marked for death, remove it from
1311151658Sjhb		 * the list of handlers and wake up the sleeper.
1312151658Sjhb		 */
1313151658Sjhb		if (ih->ih_flags & IH_DEAD) {
1314151658Sjhb			mtx_lock(&ie->ie_lock);
1315151658Sjhb			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1316151658Sjhb			ih->ih_flags &= ~IH_DEAD;
1317151658Sjhb			wakeup(ih);
1318151658Sjhb			mtx_unlock(&ie->ie_lock);
1319151658Sjhb			continue;
1320151658Sjhb		}
1321151658Sjhb
1322167080Spiso		/* Skip filter only handlers */
1323167080Spiso		if (ih->ih_handler == NULL)
1324167080Spiso			continue;
1325167080Spiso
1326151658Sjhb		/*
1327151658Sjhb		 * For software interrupt threads, we only execute
1328151658Sjhb		 * handlers that have their need flag set.  Hardware
1329151658Sjhb		 * interrupt threads always invoke all of their handlers.
1330285705Smckusick		 *
1331285705Smckusick		 * ih_need can only be 0 or 1.  Failed cmpset below
1332285705Smckusick		 * means that there is no request to execute handlers,
1333285705Smckusick		 * so a retry of the cmpset is not needed.
1334151658Sjhb		 */
1335285705Smckusick		if ((ie->ie_flags & IE_SOFT) != 0 &&
1336285705Smckusick		    atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1337285705Smckusick			continue;
1338151658Sjhb
1339151658Sjhb		/* Execute this handler. */
1340151658Sjhb		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1341169320Spiso		    __func__, p->p_pid, (void *)ih->ih_handler,
1342169320Spiso		    ih->ih_argument, ih->ih_name, ih->ih_flags);
1343151658Sjhb
1344151658Sjhb		if (!(ih->ih_flags & IH_MPSAFE))
1345151658Sjhb			mtx_lock(&Giant);
1346151658Sjhb		ih->ih_handler(ih->ih_argument);
1347151658Sjhb		if (!(ih->ih_flags & IH_MPSAFE))
1348151658Sjhb			mtx_unlock(&Giant);
1349151658Sjhb	}
1350183052Sjhb}
1351183052Sjhb
1352183052Sjhbstatic void
1353183052Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie)
1354183052Sjhb{
1355183052Sjhb
1356183052Sjhb	/* Interrupt handlers should not sleep. */
1357151658Sjhb	if (!(ie->ie_flags & IE_SOFT))
1358183052Sjhb		THREAD_NO_SLEEPING();
1359183052Sjhb	intr_event_execute_handlers(p, ie);
1360183052Sjhb	if (!(ie->ie_flags & IE_SOFT))
1361151658Sjhb		THREAD_SLEEPING_OK();
1362151658Sjhb
1363151658Sjhb	/*
1364151658Sjhb	 * Interrupt storm handling:
1365151658Sjhb	 *
1366151658Sjhb	 * If this interrupt source is currently storming, then throttle
1367151658Sjhb	 * it to only fire the handler once  per clock tick.
1368151658Sjhb	 *
1369151658Sjhb	 * If this interrupt source is not currently storming, but the
1370151658Sjhb	 * number of back to back interrupts exceeds the storm threshold,
1371151658Sjhb	 * then enter storming mode.
1372151658Sjhb	 */
1373167173Sjhb	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1374167173Sjhb	    !(ie->ie_flags & IE_SOFT)) {
1375168850Snjl		/* Report the message only once every second. */
1376168850Snjl		if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1377151658Sjhb			printf(
1378168850Snjl	"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1379151658Sjhb			    ie->ie_name);
1380151658Sjhb		}
1381167173Sjhb		pause("istorm", 1);
1382151658Sjhb	} else
1383151658Sjhb		ie->ie_count++;
1384151658Sjhb
1385151658Sjhb	/*
1386151658Sjhb	 * Now that all the handlers have had a chance to run, reenable
1387151658Sjhb	 * the interrupt source.
1388151658Sjhb	 */
1389177940Sjhb	if (ie->ie_post_ithread != NULL)
1390177940Sjhb		ie->ie_post_ithread(ie->ie_source);
1391151658Sjhb}
1392151658Sjhb
1393169320Spiso#ifndef INTR_FILTER
139466698Sjhb/*
139572237Sjhb * This is the main code for interrupt threads.
139666698Sjhb */
1397104094Sphkstatic void
139872237Sjhbithread_loop(void *arg)
139966698Sjhb{
1400151658Sjhb	struct intr_thread *ithd;
1401151658Sjhb	struct intr_event *ie;
140283366Sjulian	struct thread *td;
140372237Sjhb	struct proc *p;
1404219819Sjeff	int wake;
1405151658Sjhb
140683366Sjulian	td = curthread;
140783366Sjulian	p = td->td_proc;
1408151658Sjhb	ithd = (struct intr_thread *)arg;
1409151658Sjhb	KASSERT(ithd->it_thread == td,
141087593Sobrien	    ("%s: ithread and proc linkage out of sync", __func__));
1411151658Sjhb	ie = ithd->it_event;
1412151658Sjhb	ie->ie_count = 0;
1413219819Sjeff	wake = 0;
141466698Sjhb
141567551Sjhb	/*
141667551Sjhb	 * As long as we have interrupts outstanding, go through the
141767551Sjhb	 * list of handlers, giving each one a go at it.
141867551Sjhb	 */
141966698Sjhb	for (;;) {
142072237Sjhb		/*
142172237Sjhb		 * If we are an orphaned thread, then just die.
142272237Sjhb		 */
142372237Sjhb		if (ithd->it_flags & IT_DEAD) {
1424151658Sjhb			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1425173004Sjulian			    p->p_pid, td->td_name);
142672237Sjhb			free(ithd, M_ITHREAD);
1427173044Sjulian			kthread_exit();
142872237Sjhb		}
142972237Sjhb
1430151658Sjhb		/*
1431151658Sjhb		 * Service interrupts.  If another interrupt arrives while
1432151658Sjhb		 * we are running, it will set it_need to note that we
1433151658Sjhb		 * should make another pass.
1434285680Skib		 *
1435285680Skib		 * The load_acq part of the following cmpset ensures
1436285680Skib		 * that the load of ih_need in ithread_execute_handlers()
1437285680Skib		 * is ordered after the load of it_need here.
1438151658Sjhb		 */
1439285680Skib		while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
1440151658Sjhb			ithread_execute_handlers(p, ie);
1441128331Sjhb		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1442128331Sjhb		mtx_assert(&Giant, MA_NOTOWNED);
144367551Sjhb
144466698Sjhb		/*
144566698Sjhb		 * Processed all our interrupts.  Now get the sched
144667551Sjhb		 * lock.  This may take a while and it_need may get
144766698Sjhb		 * set again, so we have to check it again.
144866698Sjhb		 */
1449170307Sjeff		thread_lock(td);
1450285606Skib		if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1451285606Skib		    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1452128331Sjhb			TD_SET_IWAIT(td);
1453151658Sjhb			ie->ie_count = 0;
1454178272Sjeff			mi_switch(SW_VOL | SWT_IWAIT, NULL);
145566698Sjhb		}
1456219819Sjeff		if (ithd->it_flags & IT_WAIT) {
1457219819Sjeff			wake = 1;
1458219819Sjeff			ithd->it_flags &= ~IT_WAIT;
1459219819Sjeff		}
1460170307Sjeff		thread_unlock(td);
1461219819Sjeff		if (wake) {
1462219819Sjeff			wakeup(ithd);
1463219819Sjeff			wake = 0;
1464219819Sjeff		}
146566698Sjhb	}
146666698Sjhb}
1467177940Sjhb
1468177940Sjhb/*
1469177940Sjhb * Main interrupt handling body.
1470177940Sjhb *
1471177940Sjhb * Input:
1472177940Sjhb * o ie:                        the event connected to this interrupt.
1473177940Sjhb * o frame:                     some archs (i.e. i386) pass a frame to some.
1474177940Sjhb *                              handlers as their main argument.
1475177940Sjhb * Return value:
1476177940Sjhb * o 0:                         everything ok.
1477177940Sjhb * o EINVAL:                    stray interrupt.
1478177940Sjhb */
1479177940Sjhbint
1480177940Sjhbintr_event_handle(struct intr_event *ie, struct trapframe *frame)
1481177940Sjhb{
1482177940Sjhb	struct intr_handler *ih;
1483208988Smav	struct trapframe *oldframe;
1484177940Sjhb	struct thread *td;
1485177940Sjhb	int error, ret, thread;
1486177940Sjhb
1487177940Sjhb	td = curthread;
1488177940Sjhb
1489272536Skib#ifdef KSTACK_USAGE_PROF
1490272536Skib	intr_prof_stack_use(td, frame);
1491272536Skib#endif
1492272536Skib
1493177940Sjhb	/* An interrupt with no event or handlers is a stray interrupt. */
1494177940Sjhb	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1495177940Sjhb		return (EINVAL);
1496177940Sjhb
1497177940Sjhb	/*
1498177940Sjhb	 * Execute fast interrupt handlers directly.
1499177940Sjhb	 * To support clock handlers, if a handler registers
1500177940Sjhb	 * with a NULL argument, then we pass it a pointer to
1501177940Sjhb	 * a trapframe as its argument.
1502177940Sjhb	 */
1503177940Sjhb	td->td_intr_nesting_level++;
1504177940Sjhb	thread = 0;
1505177940Sjhb	ret = 0;
1506177940Sjhb	critical_enter();
1507208988Smav	oldframe = td->td_intr_frame;
1508208988Smav	td->td_intr_frame = frame;
1509177940Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1510177940Sjhb		if (ih->ih_filter == NULL) {
1511177940Sjhb			thread = 1;
1512177940Sjhb			continue;
1513177940Sjhb		}
1514177940Sjhb		CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1515177940Sjhb		    ih->ih_filter, ih->ih_argument == NULL ? frame :
1516177940Sjhb		    ih->ih_argument, ih->ih_name);
1517177940Sjhb		if (ih->ih_argument == NULL)
1518177940Sjhb			ret = ih->ih_filter(frame);
1519177940Sjhb		else
1520177940Sjhb			ret = ih->ih_filter(ih->ih_argument);
1521203061Savg		KASSERT(ret == FILTER_STRAY ||
1522203061Savg		    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1523203061Savg		    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1524203061Savg		    ("%s: incorrect return value %#x from %s", __func__, ret,
1525203061Savg		    ih->ih_name));
1526203061Savg
1527177940Sjhb		/*
1528177940Sjhb		 * Wrapper handler special handling:
1529177940Sjhb		 *
1530177940Sjhb		 * in some particular cases (like pccard and pccbb),
1531177940Sjhb		 * the _real_ device handler is wrapped in a couple of
1532177940Sjhb		 * functions - a filter wrapper and an ithread wrapper.
1533177940Sjhb		 * In this case (and just in this case), the filter wrapper
1534177940Sjhb		 * could ask the system to schedule the ithread and mask
1535177940Sjhb		 * the interrupt source if the wrapped handler is composed
1536177940Sjhb		 * of just an ithread handler.
1537177940Sjhb		 *
1538177940Sjhb		 * TODO: write a generic wrapper to avoid people rolling
1539177940Sjhb		 * their own
1540177940Sjhb		 */
1541177940Sjhb		if (!thread) {
1542177940Sjhb			if (ret == FILTER_SCHEDULE_THREAD)
1543177940Sjhb				thread = 1;
1544177940Sjhb		}
1545177940Sjhb	}
1546208988Smav	td->td_intr_frame = oldframe;
1547177940Sjhb
1548177940Sjhb	if (thread) {
1549177940Sjhb		if (ie->ie_pre_ithread != NULL)
1550177940Sjhb			ie->ie_pre_ithread(ie->ie_source);
1551177940Sjhb	} else {
1552177940Sjhb		if (ie->ie_post_filter != NULL)
1553177940Sjhb			ie->ie_post_filter(ie->ie_source);
1554177940Sjhb	}
1555177940Sjhb
1556177940Sjhb	/* Schedule the ithread if needed. */
1557177940Sjhb	if (thread) {
1558177940Sjhb		error = intr_event_schedule_thread(ie);
1559177940Sjhb		KASSERT(error == 0, ("bad stray interrupt"));
1560177940Sjhb	}
1561177940Sjhb	critical_exit();
1562177940Sjhb	td->td_intr_nesting_level--;
1563177940Sjhb	return (0);
1564177940Sjhb}
1565169320Spiso#else
1566169320Spiso/*
1567169320Spiso * This is the main code for interrupt threads.
1568169320Spiso */
1569169320Spisostatic void
1570169320Spisoithread_loop(void *arg)
1571169320Spiso{
1572169320Spiso	struct intr_thread *ithd;
1573169320Spiso	struct intr_handler *ih;
1574169320Spiso	struct intr_event *ie;
1575169320Spiso	struct thread *td;
1576169320Spiso	struct proc *p;
1577169320Spiso	int priv;
1578219819Sjeff	int wake;
157966698Sjhb
1580169320Spiso	td = curthread;
1581169320Spiso	p = td->td_proc;
1582169320Spiso	ih = (struct intr_handler *)arg;
1583169320Spiso	priv = (ih->ih_thread != NULL) ? 1 : 0;
1584169320Spiso	ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1585169320Spiso	KASSERT(ithd->it_thread == td,
1586169320Spiso	    ("%s: ithread and proc linkage out of sync", __func__));
1587169320Spiso	ie = ithd->it_event;
1588169320Spiso	ie->ie_count = 0;
1589219819Sjeff	wake = 0;
1590169320Spiso
1591169320Spiso	/*
1592169320Spiso	 * As long as we have interrupts outstanding, go through the
1593169320Spiso	 * list of handlers, giving each one a go at it.
1594169320Spiso	 */
1595169320Spiso	for (;;) {
1596169320Spiso		/*
1597169320Spiso		 * If we are an orphaned thread, then just die.
1598169320Spiso		 */
1599169320Spiso		if (ithd->it_flags & IT_DEAD) {
1600169320Spiso			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1601173004Sjulian			    p->p_pid, td->td_name);
1602169320Spiso			free(ithd, M_ITHREAD);
1603173044Sjulian			kthread_exit();
1604169320Spiso		}
1605169320Spiso
1606169320Spiso		/*
1607169320Spiso		 * Service interrupts.  If another interrupt arrives while
1608169320Spiso		 * we are running, it will set it_need to note that we
1609169320Spiso		 * should make another pass.
1610285680Skib		 *
1611285680Skib		 * The load_acq part of the following cmpset ensures
1612285680Skib		 * that the load of ih_need in ithread_execute_handlers()
1613285680Skib		 * is ordered after the load of it_need here.
1614169320Spiso		 */
1615285680Skib		while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1616169320Spiso			if (priv)
1617169320Spiso				priv_ithread_execute_handler(p, ih);
1618169320Spiso			else
1619169320Spiso				ithread_execute_handlers(p, ie);
1620169320Spiso		}
1621169320Spiso		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1622169320Spiso		mtx_assert(&Giant, MA_NOTOWNED);
1623169320Spiso
1624169320Spiso		/*
1625169320Spiso		 * Processed all our interrupts.  Now get the sched
1626169320Spiso		 * lock.  This may take a while and it_need may get
1627169320Spiso		 * set again, so we have to check it again.
1628169320Spiso		 */
1629170307Sjeff		thread_lock(td);
1630285606Skib		if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1631285606Skib		    (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1632169320Spiso			TD_SET_IWAIT(td);
1633169320Spiso			ie->ie_count = 0;
1634178272Sjeff			mi_switch(SW_VOL | SWT_IWAIT, NULL);
1635169320Spiso		}
1636219819Sjeff		if (ithd->it_flags & IT_WAIT) {
1637219819Sjeff			wake = 1;
1638219819Sjeff			ithd->it_flags &= ~IT_WAIT;
1639219819Sjeff		}
1640170307Sjeff		thread_unlock(td);
1641219819Sjeff		if (wake) {
1642219819Sjeff			wakeup(ithd);
1643219819Sjeff			wake = 0;
1644219819Sjeff		}
1645169320Spiso	}
1646169320Spiso}
1647169320Spiso
1648169320Spiso/*
1649169320Spiso * Main loop for interrupt filter.
1650169320Spiso *
1651169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame
1652169320Spiso * parameter, and use it as the main argument for fast handler execution
1653169320Spiso * when ih_argument == NULL.
1654169320Spiso *
1655169320Spiso * Return value:
1656169320Spiso * o FILTER_STRAY:              No filter recognized the event, and no
1657169320Spiso *                              filter-less handler is registered on this
1658169320Spiso *                              line.
1659169320Spiso * o FILTER_HANDLED:            A filter claimed the event and served it.
1660169320Spiso * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
1661169320Spiso *                              least one filter-less handler on this line.
1662169320Spiso * o FILTER_HANDLED |
1663169320Spiso *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
1664169320Spiso *                              scheduling the per-handler ithread.
1665169320Spiso *
1666169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a
1667169320Spiso * pointer to a struct intr_thread containing the thread to be
1668169320Spiso * scheduled.
1669169320Spiso */
1670169320Spiso
1671177940Sjhbstatic int
1672169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1673169320Spiso		 struct intr_thread **ithd)
1674169320Spiso{
1675169320Spiso	struct intr_handler *ih;
1676169320Spiso	void *arg;
1677169320Spiso	int ret, thread_only;
1678169320Spiso
1679169320Spiso	ret = 0;
1680169320Spiso	thread_only = 0;
1681169320Spiso	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1682169320Spiso		/*
1683169320Spiso		 * Execute fast interrupt handlers directly.
1684169320Spiso		 * To support clock handlers, if a handler registers
1685169320Spiso		 * with a NULL argument, then we pass it a pointer to
1686169320Spiso		 * a trapframe as its argument.
1687169320Spiso		 */
1688169320Spiso		arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1689169320Spiso
1690169320Spiso		CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1691169320Spiso		     ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1692169320Spiso
1693169320Spiso		if (ih->ih_filter != NULL)
1694169320Spiso			ret = ih->ih_filter(arg);
1695169320Spiso		else {
1696169320Spiso			thread_only = 1;
1697169320Spiso			continue;
1698169320Spiso		}
1699203061Savg		KASSERT(ret == FILTER_STRAY ||
1700203061Savg		    ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1701203061Savg		    (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1702203061Savg		    ("%s: incorrect return value %#x from %s", __func__, ret,
1703203061Savg		    ih->ih_name));
1704169320Spiso		if (ret & FILTER_STRAY)
1705169320Spiso			continue;
1706169320Spiso		else {
1707169320Spiso			*ithd = ih->ih_thread;
1708169320Spiso			return (ret);
1709169320Spiso		}
1710169320Spiso	}
1711169320Spiso
1712169320Spiso	/*
1713169320Spiso	 * No filters handled the interrupt and we have at least
1714169320Spiso	 * one handler without a filter.  In this case, we schedule
1715169320Spiso	 * all of the filter-less handlers to run in the ithread.
1716169320Spiso	 */
1717169320Spiso	if (thread_only) {
1718169320Spiso		*ithd = ie->ie_thread;
1719169320Spiso		return (FILTER_SCHEDULE_THREAD);
1720169320Spiso	}
1721169320Spiso	return (FILTER_STRAY);
1722169320Spiso}
1723169320Spiso
1724169320Spiso/*
1725169320Spiso * Main interrupt handling body.
1726169320Spiso *
1727169320Spiso * Input:
1728169320Spiso * o ie:                        the event connected to this interrupt.
1729169320Spiso * o frame:                     some archs (i.e. i386) pass a frame to some.
1730169320Spiso *                              handlers as their main argument.
1731169320Spiso * Return value:
1732169320Spiso * o 0:                         everything ok.
1733169320Spiso * o EINVAL:                    stray interrupt.
1734169320Spiso */
1735169320Spisoint
1736169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame)
1737169320Spiso{
1738169320Spiso	struct intr_thread *ithd;
1739208988Smav	struct trapframe *oldframe;
1740169320Spiso	struct thread *td;
1741169320Spiso	int thread;
1742169320Spiso
1743169320Spiso	ithd = NULL;
1744169320Spiso	td = curthread;
1745169320Spiso
1746169320Spiso	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1747169320Spiso		return (EINVAL);
1748169320Spiso
1749169320Spiso	td->td_intr_nesting_level++;
1750169320Spiso	thread = 0;
1751169320Spiso	critical_enter();
1752208988Smav	oldframe = td->td_intr_frame;
1753208988Smav	td->td_intr_frame = frame;
1754177940Sjhb	thread = intr_filter_loop(ie, frame, &ithd);
1755169320Spiso	if (thread & FILTER_HANDLED) {
1756177940Sjhb		if (ie->ie_post_filter != NULL)
1757177940Sjhb			ie->ie_post_filter(ie->ie_source);
1758169320Spiso	} else {
1759177940Sjhb		if (ie->ie_pre_ithread != NULL)
1760177940Sjhb			ie->ie_pre_ithread(ie->ie_source);
1761169320Spiso	}
1762208988Smav	td->td_intr_frame = oldframe;
1763169320Spiso	critical_exit();
1764169320Spiso
1765169320Spiso	/* Interrupt storm logic */
1766169320Spiso	if (thread & FILTER_STRAY) {
1767169320Spiso		ie->ie_count++;
1768169320Spiso		if (ie->ie_count < intr_storm_threshold)
1769169320Spiso			printf("Interrupt stray detection not present\n");
1770169320Spiso	}
1771169320Spiso
1772169320Spiso	/* Schedule an ithread if needed. */
1773169320Spiso	if (thread & FILTER_SCHEDULE_THREAD) {
1774169320Spiso		if (intr_event_schedule_thread(ie, ithd) != 0)
1775169320Spiso			panic("%s: impossible stray interrupt", __func__);
1776169320Spiso	}
1777169320Spiso	td->td_intr_nesting_level--;
1778169320Spiso	return (0);
1779169320Spiso}
1780169320Spiso#endif
1781169320Spiso
1782121482Sjhb#ifdef DDB
178372237Sjhb/*
1784121482Sjhb * Dump details about an interrupt handler
1785121482Sjhb */
1786121482Sjhbstatic void
1787151658Sjhbdb_dump_intrhand(struct intr_handler *ih)
1788121482Sjhb{
1789121482Sjhb	int comma;
1790121482Sjhb
1791121482Sjhb	db_printf("\t%-10s ", ih->ih_name);
1792121482Sjhb	switch (ih->ih_pri) {
1793121482Sjhb	case PI_REALTIME:
1794121482Sjhb		db_printf("CLK ");
1795121482Sjhb		break;
1796121482Sjhb	case PI_AV:
1797121482Sjhb		db_printf("AV  ");
1798121482Sjhb		break;
1799217292Sjhb	case PI_TTY:
1800121482Sjhb		db_printf("TTY ");
1801121482Sjhb		break;
1802121482Sjhb	case PI_NET:
1803121482Sjhb		db_printf("NET ");
1804121482Sjhb		break;
1805121482Sjhb	case PI_DISK:
1806121482Sjhb		db_printf("DISK");
1807121482Sjhb		break;
1808121482Sjhb	case PI_DULL:
1809121482Sjhb		db_printf("DULL");
1810121482Sjhb		break;
1811121482Sjhb	default:
1812121482Sjhb		if (ih->ih_pri >= PI_SOFT)
1813121482Sjhb			db_printf("SWI ");
1814121482Sjhb		else
1815121482Sjhb			db_printf("%4u", ih->ih_pri);
1816121482Sjhb		break;
1817121482Sjhb	}
1818121482Sjhb	db_printf(" ");
1819249163Skib	if (ih->ih_filter != NULL) {
1820249163Skib		db_printf("[F]");
1821249163Skib		db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1822249163Skib	}
1823249163Skib	if (ih->ih_handler != NULL) {
1824249163Skib		if (ih->ih_filter != NULL)
1825249163Skib			db_printf(",");
1826249163Skib		db_printf("[H]");
1827249163Skib		db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1828249163Skib	}
1829121482Sjhb	db_printf("(%p)", ih->ih_argument);
1830121482Sjhb	if (ih->ih_need ||
1831166901Spiso	    (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1832121482Sjhb	    IH_MPSAFE)) != 0) {
1833121482Sjhb		db_printf(" {");
1834121482Sjhb		comma = 0;
1835121482Sjhb		if (ih->ih_flags & IH_EXCLUSIVE) {
1836121482Sjhb			if (comma)
1837121482Sjhb				db_printf(", ");
1838121482Sjhb			db_printf("EXCL");
1839121482Sjhb			comma = 1;
1840121482Sjhb		}
1841121482Sjhb		if (ih->ih_flags & IH_ENTROPY) {
1842121482Sjhb			if (comma)
1843121482Sjhb				db_printf(", ");
1844121482Sjhb			db_printf("ENTROPY");
1845121482Sjhb			comma = 1;
1846121482Sjhb		}
1847121482Sjhb		if (ih->ih_flags & IH_DEAD) {
1848121482Sjhb			if (comma)
1849121482Sjhb				db_printf(", ");
1850121482Sjhb			db_printf("DEAD");
1851121482Sjhb			comma = 1;
1852121482Sjhb		}
1853121482Sjhb		if (ih->ih_flags & IH_MPSAFE) {
1854121482Sjhb			if (comma)
1855121482Sjhb				db_printf(", ");
1856121482Sjhb			db_printf("MPSAFE");
1857121482Sjhb			comma = 1;
1858121482Sjhb		}
1859121482Sjhb		if (ih->ih_need) {
1860121482Sjhb			if (comma)
1861121482Sjhb				db_printf(", ");
1862121482Sjhb			db_printf("NEED");
1863121482Sjhb		}
1864121482Sjhb		db_printf("}");
1865121482Sjhb	}
1866121482Sjhb	db_printf("\n");
1867121482Sjhb}
1868121482Sjhb
1869121482Sjhb/*
1870151658Sjhb * Dump details about a event.
1871121482Sjhb */
1872121482Sjhbvoid
1873151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers)
1874121482Sjhb{
1875151658Sjhb	struct intr_handler *ih;
1876151658Sjhb	struct intr_thread *it;
1877121482Sjhb	int comma;
1878121482Sjhb
1879151658Sjhb	db_printf("%s ", ie->ie_fullname);
1880151658Sjhb	it = ie->ie_thread;
1881151658Sjhb	if (it != NULL)
1882151658Sjhb		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1883151658Sjhb	else
1884151658Sjhb		db_printf("(no thread)");
1885151658Sjhb	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1886151658Sjhb	    (it != NULL && it->it_need)) {
1887121482Sjhb		db_printf(" {");
1888121482Sjhb		comma = 0;
1889151658Sjhb		if (ie->ie_flags & IE_SOFT) {
1890121482Sjhb			db_printf("SOFT");
1891121482Sjhb			comma = 1;
1892121482Sjhb		}
1893151658Sjhb		if (ie->ie_flags & IE_ENTROPY) {
1894121482Sjhb			if (comma)
1895121482Sjhb				db_printf(", ");
1896121482Sjhb			db_printf("ENTROPY");
1897121482Sjhb			comma = 1;
1898121482Sjhb		}
1899151658Sjhb		if (ie->ie_flags & IE_ADDING_THREAD) {
1900121482Sjhb			if (comma)
1901121482Sjhb				db_printf(", ");
1902151658Sjhb			db_printf("ADDING_THREAD");
1903121482Sjhb			comma = 1;
1904121482Sjhb		}
1905151658Sjhb		if (it != NULL && it->it_need) {
1906121482Sjhb			if (comma)
1907121482Sjhb				db_printf(", ");
1908121482Sjhb			db_printf("NEED");
1909121482Sjhb		}
1910121482Sjhb		db_printf("}");
1911121482Sjhb	}
1912121482Sjhb	db_printf("\n");
1913121482Sjhb
1914121482Sjhb	if (handlers)
1915151658Sjhb		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1916121482Sjhb		    db_dump_intrhand(ih);
1917121482Sjhb}
1918151658Sjhb
1919151658Sjhb/*
1920151658Sjhb * Dump data about interrupt handlers
1921151658Sjhb */
1922151658SjhbDB_SHOW_COMMAND(intr, db_show_intr)
1923151658Sjhb{
1924151658Sjhb	struct intr_event *ie;
1925160312Sjhb	int all, verbose;
1926151658Sjhb
1927229272Sed	verbose = strchr(modif, 'v') != NULL;
1928229272Sed	all = strchr(modif, 'a') != NULL;
1929151658Sjhb	TAILQ_FOREACH(ie, &event_list, ie_list) {
1930151658Sjhb		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1931151658Sjhb			continue;
1932151658Sjhb		db_dump_intr_event(ie, verbose);
1933160312Sjhb		if (db_pager_quit)
1934160312Sjhb			break;
1935151658Sjhb	}
1936151658Sjhb}
1937121482Sjhb#endif /* DDB */
1938121482Sjhb
1939121482Sjhb/*
194067551Sjhb * Start standard software interrupt threads
194166698Sjhb */
194267551Sjhbstatic void
194372237Sjhbstart_softintr(void *dummy)
194467551Sjhb{
194572237Sjhb
1946177859Sjeff	if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1947177859Sjeff		panic("died while creating vm swi ithread");
194866698Sjhb}
1949177253SrwatsonSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1950177253Srwatson    NULL);
195166698Sjhb
1952151658Sjhb/*
195377582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
195477582Stmm * The data for this machine dependent, and the declarations are in machine
195577582Stmm * dependent code.  The layout of intrnames and intrcnt however is machine
195677582Stmm * independent.
195777582Stmm *
195877582Stmm * We do not know the length of intrcnt and intrnames at compile time, so
195977582Stmm * calculate things at run time.
196077582Stmm */
196177582Stmmstatic int
196277582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS)
196377582Stmm{
1964224187Sattilio	return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
196577582Stmm}
196677582Stmm
196777582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
196877582Stmm    NULL, 0, sysctl_intrnames, "", "Interrupt Names");
196977582Stmm
197077582Stmmstatic int
197177582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS)
197277582Stmm{
1973232751Sjmallett#ifdef SCTL_MASK32
1974232751Sjmallett	uint32_t *intrcnt32;
1975232751Sjmallett	unsigned i;
1976232751Sjmallett	int error;
1977232751Sjmallett
1978232751Sjmallett	if (req->flags & SCTL_MASK32) {
1979232751Sjmallett		if (!req->oldptr)
1980232751Sjmallett			return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1981232751Sjmallett		intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1982232751Sjmallett		if (intrcnt32 == NULL)
1983232751Sjmallett			return (ENOMEM);
1984232751Sjmallett		for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1985232751Sjmallett			intrcnt32[i] = intrcnt[i];
1986232751Sjmallett		error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1987232751Sjmallett		free(intrcnt32, M_TEMP);
1988232751Sjmallett		return (error);
1989232751Sjmallett	}
1990232751Sjmallett#endif
1991224187Sattilio	return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
199277582Stmm}
199377582Stmm
199477582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
199577582Stmm    NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1996121482Sjhb
1997121482Sjhb#ifdef DDB
1998121482Sjhb/*
1999121482Sjhb * DDB command to dump the interrupt statistics.
2000121482Sjhb */
2001121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
2002121482Sjhb{
2003121482Sjhb	u_long *i;
2004121482Sjhb	char *cp;
2005224187Sattilio	u_int j;
2006121482Sjhb
2007121482Sjhb	cp = intrnames;
2008224187Sattilio	j = 0;
2009224187Sattilio	for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
2010224187Sattilio	    i++, j++) {
2011121482Sjhb		if (*cp == '\0')
2012121482Sjhb			break;
2013121482Sjhb		if (*i != 0)
2014121482Sjhb			db_printf("%s\t%lu\n", cp, *i);
2015121482Sjhb		cp += strlen(cp) + 1;
2016121482Sjhb	}
2017121482Sjhb}
2018121482Sjhb#endif
2019