kern_intr.c revision 165125
1139804Simp/*-
226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
326156Sse * All rights reserved.
426156Sse *
526156Sse * Redistribution and use in source and binary forms, with or without
626156Sse * modification, are permitted provided that the following conditions
726156Sse * are met:
826156Sse * 1. Redistributions of source code must retain the above copyright
926156Sse *    notice unmodified, this list of conditions, and the following
1026156Sse *    disclaimer.
1126156Sse * 2. Redistributions in binary form must reproduce the above copyright
1226156Sse *    notice, this list of conditions and the following disclaimer in the
1326156Sse *    documentation and/or other materials provided with the distribution.
1426156Sse *
1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2526156Sse */
2626156Sse
27116182Sobrien#include <sys/cdefs.h>
28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 165125 2006-12-12 19:20:19Z jhb $");
2936887Sdfr
30121482Sjhb#include "opt_ddb.h"
31121482Sjhb
3241059Speter#include <sys/param.h>
3365822Sjhb#include <sys/bus.h>
34110860Salfred#include <sys/conf.h>
3565822Sjhb#include <sys/rtprio.h>
3641059Speter#include <sys/systm.h>
3766698Sjhb#include <sys/interrupt.h>
3866698Sjhb#include <sys/kernel.h>
3966698Sjhb#include <sys/kthread.h>
4066698Sjhb#include <sys/ktr.h>
41130128Sbde#include <sys/limits.h>
4274914Sjhb#include <sys/lock.h>
4326156Sse#include <sys/malloc.h>
4467365Sjhb#include <sys/mutex.h>
4566698Sjhb#include <sys/proc.h>
4672759Sjhb#include <sys/random.h>
4772237Sjhb#include <sys/resourcevar.h>
48139451Sjhb#include <sys/sched.h>
4977582Stmm#include <sys/sysctl.h>
5066698Sjhb#include <sys/unistd.h>
5166698Sjhb#include <sys/vmmeter.h>
5266698Sjhb#include <machine/atomic.h>
5366698Sjhb#include <machine/cpu.h>
5467551Sjhb#include <machine/md_var.h>
5572237Sjhb#include <machine/stdarg.h>
56121482Sjhb#ifdef DDB
57121482Sjhb#include <ddb/ddb.h>
58121482Sjhb#include <ddb/db_sym.h>
59121482Sjhb#endif
6026156Sse
61151658Sjhb/*
62151658Sjhb * Describe an interrupt thread.  There is one of these per interrupt event.
63151658Sjhb */
64151658Sjhbstruct intr_thread {
65151658Sjhb	struct intr_event *it_event;
66151658Sjhb	struct thread *it_thread;	/* Kernel thread. */
67151658Sjhb	int	it_flags;		/* (j) IT_* flags. */
68151658Sjhb	int	it_need;		/* Needs service. */
6972759Sjhb};
7072759Sjhb
71151658Sjhb/* Interrupt thread flags kept in it_flags */
72151658Sjhb#define	IT_DEAD		0x000001	/* Thread is waiting to exit. */
73151658Sjhb
74151658Sjhbstruct	intr_entropy {
75151658Sjhb	struct	thread *td;
76151658Sjhb	uintptr_t event;
77151658Sjhb};
78151658Sjhb
79151658Sjhbstruct	intr_event *clk_intr_event;
80151658Sjhbstruct	intr_event *tty_intr_event;
81128339Sbdevoid	*softclock_ih;
82128339Sbdevoid	*vm_ih;
8338244Sbde
8472237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
8572237Sjhb
86128331Sjhbstatic int intr_storm_threshold = 500;
87128331SjhbTUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
88128331SjhbSYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
89128331Sjhb    &intr_storm_threshold, 0,
90128339Sbde    "Number of consecutive interrupts before storm protection is enabled");
91151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list =
92151658Sjhb    TAILQ_HEAD_INITIALIZER(event_list);
93128331Sjhb
94151658Sjhbstatic void	intr_event_update(struct intr_event *ie);
95151658Sjhbstatic struct intr_thread *ithread_create(const char *name);
96151658Sjhbstatic void	ithread_destroy(struct intr_thread *ithread);
97151658Sjhbstatic void	ithread_execute_handlers(struct proc *p, struct intr_event *ie);
98128339Sbdestatic void	ithread_loop(void *);
99151658Sjhbstatic void	ithread_update(struct intr_thread *ithd);
100128339Sbdestatic void	start_softintr(void *);
101128339Sbde
102165124Sjhb/* Map an interrupt type to an ithread priority. */
10372237Sjhbu_char
104151658Sjhbintr_priority(enum intr_type flags)
10565822Sjhb{
10672237Sjhb	u_char pri;
10765822Sjhb
10872237Sjhb	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
10978365Speter	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
11065822Sjhb	switch (flags) {
11172237Sjhb	case INTR_TYPE_TTY:
11265822Sjhb		pri = PI_TTYLOW;
11365822Sjhb		break;
11465822Sjhb	case INTR_TYPE_BIO:
11565822Sjhb		/*
11665822Sjhb		 * XXX We need to refine this.  BSD/OS distinguishes
11765822Sjhb		 * between tape and disk priorities.
11865822Sjhb		 */
11965822Sjhb		pri = PI_DISK;
12065822Sjhb		break;
12165822Sjhb	case INTR_TYPE_NET:
12265822Sjhb		pri = PI_NET;
12365822Sjhb		break;
12465822Sjhb	case INTR_TYPE_CAM:
12565822Sjhb		pri = PI_DISK;          /* XXX or PI_CAM? */
12665822Sjhb		break;
12778365Speter	case INTR_TYPE_AV:		/* Audio/video */
12878365Speter		pri = PI_AV;
12978365Speter		break;
13072237Sjhb	case INTR_TYPE_CLK:
13172237Sjhb		pri = PI_REALTIME;
13272237Sjhb		break;
13365822Sjhb	case INTR_TYPE_MISC:
13465822Sjhb		pri = PI_DULL;          /* don't care */
13565822Sjhb		break;
13665822Sjhb	default:
13772237Sjhb		/* We didn't specify an interrupt level. */
138151658Sjhb		panic("intr_priority: no interrupt type in flags");
13965822Sjhb	}
14065822Sjhb
14165822Sjhb	return pri;
14265822Sjhb}
14365822Sjhb
14472237Sjhb/*
145151658Sjhb * Update an ithread based on the associated intr_event.
14672237Sjhb */
14772237Sjhbstatic void
148151658Sjhbithread_update(struct intr_thread *ithd)
14972237Sjhb{
150151658Sjhb	struct intr_event *ie;
15183366Sjulian	struct thread *td;
152151658Sjhb	u_char pri;
15367551Sjhb
154151658Sjhb	ie = ithd->it_event;
155151658Sjhb	td = ithd->it_thread;
15672237Sjhb
157151658Sjhb	/* Determine the overall priority of this event. */
158151658Sjhb	if (TAILQ_EMPTY(&ie->ie_handlers))
159151658Sjhb		pri = PRI_MAX_ITHD;
160151658Sjhb	else
161151658Sjhb		pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
162105354Srobert
163151658Sjhb	/* Update name and priority. */
164151658Sjhb	strlcpy(td->td_proc->p_comm, ie->ie_fullname,
165151658Sjhb	    sizeof(td->td_proc->p_comm));
16694457Sjhb	mtx_lock_spin(&sched_lock);
167151658Sjhb	sched_prio(td, pri);
16894457Sjhb	mtx_unlock_spin(&sched_lock);
169151658Sjhb}
170151658Sjhb
171151658Sjhb/*
172151658Sjhb * Regenerate the full name of an interrupt event and update its priority.
173151658Sjhb */
174151658Sjhbstatic void
175151658Sjhbintr_event_update(struct intr_event *ie)
176151658Sjhb{
177151658Sjhb	struct intr_handler *ih;
178151658Sjhb	char *last;
179151658Sjhb	int missed, space;
180151658Sjhb
181151658Sjhb	/* Start off with no entropy and just the name of the event. */
182151658Sjhb	mtx_assert(&ie->ie_lock, MA_OWNED);
183151658Sjhb	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
184151658Sjhb	ie->ie_flags &= ~IE_ENTROPY;
185137267Sjhb	missed = 0;
186151658Sjhb	space = 1;
187151658Sjhb
188151658Sjhb	/* Run through all the handlers updating values. */
189151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
190151658Sjhb		if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
191151658Sjhb		    sizeof(ie->ie_fullname)) {
192151658Sjhb			strcat(ie->ie_fullname, " ");
193151658Sjhb			strcat(ie->ie_fullname, ih->ih_name);
194151658Sjhb			space = 0;
195137267Sjhb		} else
196137267Sjhb			missed++;
197137267Sjhb		if (ih->ih_flags & IH_ENTROPY)
198151658Sjhb			ie->ie_flags |= IE_ENTROPY;
199137267Sjhb	}
200151658Sjhb
201151658Sjhb	/*
202151658Sjhb	 * If the handler names were too long, add +'s to indicate missing
203151658Sjhb	 * names. If we run out of room and still have +'s to add, change
204151658Sjhb	 * the last character from a + to a *.
205151658Sjhb	 */
206151658Sjhb	last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
207137267Sjhb	while (missed-- > 0) {
208151658Sjhb		if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
209151658Sjhb			if (*last == '+') {
210151658Sjhb				*last = '*';
211151658Sjhb				break;
212151658Sjhb			} else
213151658Sjhb				*last = '+';
214151658Sjhb		} else if (space) {
215151658Sjhb			strcat(ie->ie_fullname, " +");
216151658Sjhb			space = 0;
21772237Sjhb		} else
218151658Sjhb			strcat(ie->ie_fullname, "+");
21972237Sjhb	}
220151658Sjhb
221151658Sjhb	/*
222151658Sjhb	 * If this event has an ithread, update it's priority and
223151658Sjhb	 * name.
224151658Sjhb	 */
225151658Sjhb	if (ie->ie_thread != NULL)
226151658Sjhb		ithread_update(ie->ie_thread);
227151658Sjhb	CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
22872237Sjhb}
22972237Sjhb
23072237Sjhbint
231151658Sjhbintr_event_create(struct intr_event **event, void *source, int flags,
232151658Sjhb    void (*enable)(void *), const char *fmt, ...)
23366698Sjhb{
234151658Sjhb	struct intr_event *ie;
23572237Sjhb	va_list ap;
23672237Sjhb
237151658Sjhb	/* The only valid flag during creation is IE_SOFT. */
238151658Sjhb	if ((flags & ~IE_SOFT) != 0)
23972759Sjhb		return (EINVAL);
240151658Sjhb	ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
241151658Sjhb	ie->ie_source = source;
242151658Sjhb	ie->ie_enable = enable;
243151658Sjhb	ie->ie_flags = flags;
244151658Sjhb	TAILQ_INIT(&ie->ie_handlers);
245151658Sjhb	mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
24672759Sjhb
24772237Sjhb	va_start(ap, fmt);
248151658Sjhb	vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
24972237Sjhb	va_end(ap);
250151658Sjhb	strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
251151658Sjhb	mtx_pool_lock(mtxpool_sleep, &event_list);
252151658Sjhb	TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
253151658Sjhb	mtx_pool_unlock(mtxpool_sleep, &event_list);
254151658Sjhb	if (event != NULL)
255151658Sjhb		*event = ie;
256151658Sjhb	CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
257151658Sjhb	return (0);
258151658Sjhb}
25972237Sjhb
260151658Sjhbint
261151658Sjhbintr_event_destroy(struct intr_event *ie)
262151658Sjhb{
263151658Sjhb
264151658Sjhb	mtx_lock(&ie->ie_lock);
265151658Sjhb	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
266151658Sjhb		mtx_unlock(&ie->ie_lock);
267151658Sjhb		return (EBUSY);
268151658Sjhb	}
269151658Sjhb	mtx_pool_lock(mtxpool_sleep, &event_list);
270151658Sjhb	TAILQ_REMOVE(&event_list, ie, ie_list);
271151658Sjhb	mtx_pool_unlock(mtxpool_sleep, &event_list);
272157728Sjhb#ifndef notyet
273157728Sjhb	if (ie->ie_thread != NULL) {
274157728Sjhb		ithread_destroy(ie->ie_thread);
275157728Sjhb		ie->ie_thread = NULL;
276157728Sjhb	}
277157728Sjhb#endif
278151658Sjhb	mtx_unlock(&ie->ie_lock);
279151658Sjhb	mtx_destroy(&ie->ie_lock);
280151658Sjhb	free(ie, M_ITHREAD);
281151658Sjhb	return (0);
282151658Sjhb}
283151658Sjhb
284151658Sjhbstatic struct intr_thread *
285151658Sjhbithread_create(const char *name)
286151658Sjhb{
287151658Sjhb	struct intr_thread *ithd;
288151658Sjhb	struct thread *td;
289151658Sjhb	struct proc *p;
290151658Sjhb	int error;
291151658Sjhb
292151658Sjhb	ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
293151658Sjhb
29472237Sjhb	error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
295151658Sjhb	    0, "%s", name);
296151658Sjhb	if (error)
297151658Sjhb		panic("kthread_create() failed with %d", error);
29890361Sjulian	td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
299113629Sjhb	mtx_lock_spin(&sched_lock);
300164936Sjulian	sched_class(td, PRI_ITHD);
301103216Sjulian	TD_SET_IWAIT(td);
302113629Sjhb	mtx_unlock_spin(&sched_lock);
303151658Sjhb	td->td_pflags |= TDP_ITHREAD;
304151658Sjhb	ithd->it_thread = td;
305151658Sjhb	CTR2(KTR_INTR, "%s: created %s", __func__, name);
306151658Sjhb	return (ithd);
30772237Sjhb}
30872237Sjhb
309151658Sjhbstatic void
310151658Sjhbithread_destroy(struct intr_thread *ithread)
31172237Sjhb{
31283366Sjulian	struct thread *td;
31372237Sjhb
314157784Sscottl	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
315151658Sjhb	td = ithread->it_thread;
316151658Sjhb	mtx_lock_spin(&sched_lock);
31776771Sjhb	ithread->it_flags |= IT_DEAD;
318103216Sjulian	if (TD_AWAITING_INTR(td)) {
319103216Sjulian		TD_CLR_IWAIT(td);
320134586Sjulian		setrunqueue(td, SRQ_INTR);
32172237Sjhb	}
32272237Sjhb	mtx_unlock_spin(&sched_lock);
32372237Sjhb}
32472237Sjhb
32572237Sjhbint
326151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name,
32772237Sjhb    driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
32872237Sjhb    void **cookiep)
32972237Sjhb{
330151658Sjhb	struct intr_handler *ih, *temp_ih;
331151658Sjhb	struct intr_thread *it;
33272237Sjhb
333151658Sjhb	if (ie == NULL || name == NULL || handler == NULL)
33472237Sjhb		return (EINVAL);
33572237Sjhb
336151658Sjhb	/* Allocate and populate an interrupt handler structure. */
337151658Sjhb	ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
33872237Sjhb	ih->ih_handler = handler;
33972237Sjhb	ih->ih_argument = arg;
34072237Sjhb	ih->ih_name = name;
341151658Sjhb	ih->ih_event = ie;
34272237Sjhb	ih->ih_pri = pri;
34372237Sjhb	if (flags & INTR_FAST)
344122002Sjhb		ih->ih_flags = IH_FAST;
34572237Sjhb	else if (flags & INTR_EXCL)
34672237Sjhb		ih->ih_flags = IH_EXCLUSIVE;
34772237Sjhb	if (flags & INTR_MPSAFE)
34872237Sjhb		ih->ih_flags |= IH_MPSAFE;
34972237Sjhb	if (flags & INTR_ENTROPY)
35072237Sjhb		ih->ih_flags |= IH_ENTROPY;
35172237Sjhb
352151658Sjhb	/* We can only have one exclusive handler in a event. */
353151658Sjhb	mtx_lock(&ie->ie_lock);
354151658Sjhb	if (!TAILQ_EMPTY(&ie->ie_handlers)) {
355151658Sjhb		if ((flags & INTR_EXCL) ||
356151658Sjhb		    (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
357151658Sjhb			mtx_unlock(&ie->ie_lock);
358151658Sjhb			free(ih, M_ITHREAD);
359151658Sjhb			return (EINVAL);
360151658Sjhb		}
361122002Sjhb	}
36272237Sjhb
363151658Sjhb	/* Add the new handler to the event in priority order. */
364151658Sjhb	TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
365151658Sjhb		if (temp_ih->ih_pri > ih->ih_pri)
366151658Sjhb			break;
367151658Sjhb	}
36872237Sjhb	if (temp_ih == NULL)
369151658Sjhb		TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
37072237Sjhb	else
37172237Sjhb		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
372151658Sjhb	intr_event_update(ie);
37372237Sjhb
374151658Sjhb	/* Create a thread if we need one. */
375151658Sjhb	while (ie->ie_thread == NULL && !(flags & INTR_FAST)) {
376151658Sjhb		if (ie->ie_flags & IE_ADDING_THREAD)
377157815Sjhb			msleep(ie, &ie->ie_lock, 0, "ithread", 0);
378151658Sjhb		else {
379151658Sjhb			ie->ie_flags |= IE_ADDING_THREAD;
380151658Sjhb			mtx_unlock(&ie->ie_lock);
381151658Sjhb			it = ithread_create("intr: newborn");
382151658Sjhb			mtx_lock(&ie->ie_lock);
383151658Sjhb			ie->ie_flags &= ~IE_ADDING_THREAD;
384151658Sjhb			ie->ie_thread = it;
385151658Sjhb			it->it_event = ie;
386151658Sjhb			ithread_update(it);
387151658Sjhb			wakeup(ie);
388151658Sjhb		}
389151658Sjhb	}
390151658Sjhb	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
391151658Sjhb	    ie->ie_name);
392151658Sjhb	mtx_unlock(&ie->ie_lock);
393151658Sjhb
39472237Sjhb	if (cookiep != NULL)
39572237Sjhb		*cookiep = ih;
39672237Sjhb	return (0);
39772237Sjhb}
39872237Sjhb
399165125Sjhb/*
400165125Sjhb * Return the ie_source field from the intr_event an intr_handler is
401165125Sjhb * associated with.
402165125Sjhb */
403165125Sjhbvoid *
404165125Sjhbintr_handler_source(void *cookie)
405165125Sjhb{
406165125Sjhb	struct intr_handler *ih;
407165125Sjhb	struct intr_event *ie;
408165125Sjhb
409165125Sjhb	ih = (struct intr_handler *)cookie;
410165125Sjhb	if (ih == NULL)
411165125Sjhb		return (NULL);
412165125Sjhb	ie = ih->ih_event;
413165125Sjhb	KASSERT(ie != NULL,
414165125Sjhb	    ("interrupt handler \"%s\" has a NULL interrupt event",
415165125Sjhb	    ih->ih_name));
416165125Sjhb	return (ie->ie_source);
417165125Sjhb}
418165125Sjhb
41972237Sjhbint
420151658Sjhbintr_event_remove_handler(void *cookie)
42172237Sjhb{
422151658Sjhb	struct intr_handler *handler = (struct intr_handler *)cookie;
423151658Sjhb	struct intr_event *ie;
42472237Sjhb#ifdef INVARIANTS
425151658Sjhb	struct intr_handler *ih;
42672237Sjhb#endif
427151658Sjhb#ifdef notyet
428151658Sjhb	int dead;
429151658Sjhb#endif
43072237Sjhb
43172759Sjhb	if (handler == NULL)
43272237Sjhb		return (EINVAL);
433151658Sjhb	ie = handler->ih_event;
434151658Sjhb	KASSERT(ie != NULL,
435151658Sjhb	    ("interrupt handler \"%s\" has a NULL interrupt event",
436165124Sjhb	    handler->ih_name));
437151658Sjhb	mtx_lock(&ie->ie_lock);
43887593Sobrien	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
439151658Sjhb	    ie->ie_name);
44072237Sjhb#ifdef INVARIANTS
441151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
44272759Sjhb		if (ih == handler)
44372759Sjhb			goto ok;
444151658Sjhb	mtx_unlock(&ie->ie_lock);
445151658Sjhb	panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
446151658Sjhb	    ih->ih_name, ie->ie_name);
44772759Sjhbok:
44872237Sjhb#endif
44972839Sjhb	/*
450151658Sjhb	 * If there is no ithread, then just remove the handler and return.
451151658Sjhb	 * XXX: Note that an INTR_FAST handler might be running on another
452151658Sjhb	 * CPU!
453151658Sjhb	 */
454151658Sjhb	if (ie->ie_thread == NULL) {
455151658Sjhb		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
456151658Sjhb		mtx_unlock(&ie->ie_lock);
457151658Sjhb		free(handler, M_ITHREAD);
458151658Sjhb		return (0);
459151658Sjhb	}
460151658Sjhb
461151658Sjhb	/*
46272839Sjhb	 * If the interrupt thread is already running, then just mark this
46372839Sjhb	 * handler as being dead and let the ithread do the actual removal.
464124505Struckman	 *
465124505Struckman	 * During a cold boot while cold is set, msleep() does not sleep,
466124505Struckman	 * so we have to remove the handler here rather than letting the
467124505Struckman	 * thread do it.
46872839Sjhb	 */
46972839Sjhb	mtx_lock_spin(&sched_lock);
470151658Sjhb	if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
47172839Sjhb		handler->ih_flags |= IH_DEAD;
47272839Sjhb
47372839Sjhb		/*
47472839Sjhb		 * Ensure that the thread will process the handler list
47572839Sjhb		 * again and remove this handler if it has already passed
47672839Sjhb		 * it on the list.
47772839Sjhb		 */
478151658Sjhb		ie->ie_thread->it_need = 1;
479151658Sjhb	} else
480151658Sjhb		TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
48172839Sjhb	mtx_unlock_spin(&sched_lock);
482151658Sjhb	while (handler->ih_flags & IH_DEAD)
483157815Sjhb		msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
484151658Sjhb	intr_event_update(ie);
485151658Sjhb#ifdef notyet
486151658Sjhb	/*
487151658Sjhb	 * XXX: This could be bad in the case of ppbus(8).  Also, I think
488151658Sjhb	 * this could lead to races of stale data when servicing an
489151658Sjhb	 * interrupt.
490151658Sjhb	 */
491151658Sjhb	dead = 1;
492151658Sjhb	TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
493151658Sjhb		if (!(ih->ih_flags & IH_FAST)) {
494151658Sjhb			dead = 0;
495151658Sjhb			break;
496151658Sjhb		}
497151658Sjhb	}
498151658Sjhb	if (dead) {
499151658Sjhb		ithread_destroy(ie->ie_thread);
500151658Sjhb		ie->ie_thread = NULL;
501151658Sjhb	}
502151658Sjhb#endif
503151658Sjhb	mtx_unlock(&ie->ie_lock);
50476771Sjhb	free(handler, M_ITHREAD);
50572237Sjhb	return (0);
50672237Sjhb}
50772237Sjhb
50872237Sjhbint
509151658Sjhbintr_event_schedule_thread(struct intr_event *ie)
51072759Sjhb{
511151658Sjhb	struct intr_entropy entropy;
512151658Sjhb	struct intr_thread *it;
51383366Sjulian	struct thread *td;
514101176Sjulian	struct thread *ctd;
51572759Sjhb	struct proc *p;
51672759Sjhb
51772759Sjhb	/*
51872759Sjhb	 * If no ithread or no handlers, then we have a stray interrupt.
51972759Sjhb	 */
520151658Sjhb	if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
521151658Sjhb	    ie->ie_thread == NULL)
52272759Sjhb		return (EINVAL);
52372759Sjhb
524101176Sjulian	ctd = curthread;
525151658Sjhb	it = ie->ie_thread;
526151658Sjhb	td = it->it_thread;
527133191Srwatson	p = td->td_proc;
528151658Sjhb
52972759Sjhb	/*
53072759Sjhb	 * If any of the handlers for this ithread claim to be good
53172759Sjhb	 * sources of entropy, then gather some.
53272759Sjhb	 */
533151658Sjhb	if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
534133191Srwatson		CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
535133191Srwatson		    p->p_pid, p->p_comm);
536151658Sjhb		entropy.event = (uintptr_t)ie;
537151658Sjhb		entropy.td = ctd;
53872759Sjhb		random_harvest(&entropy, sizeof(entropy), 2, 0,
53972759Sjhb		    RANDOM_INTERRUPT);
54072759Sjhb	}
54172759Sjhb
542151658Sjhb	KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
54372759Sjhb
54472759Sjhb	/*
54572759Sjhb	 * Set it_need to tell the thread to keep running if it is already
54672759Sjhb	 * running.  Then, grab sched_lock and see if we actually need to
547131481Sjhb	 * put this thread on the runqueue.
54872759Sjhb	 */
549151658Sjhb	it->it_need = 1;
55072759Sjhb	mtx_lock_spin(&sched_lock);
551103216Sjulian	if (TD_AWAITING_INTR(td)) {
552151658Sjhb		CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
553151658Sjhb		    p->p_comm);
554103216Sjulian		TD_CLR_IWAIT(td);
555134586Sjulian		setrunqueue(td, SRQ_INTR);
55672759Sjhb	} else {
557151658Sjhb		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
558151658Sjhb		    __func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
55972759Sjhb	}
56072759Sjhb	mtx_unlock_spin(&sched_lock);
56172759Sjhb
56272759Sjhb	return (0);
56372759Sjhb}
56472759Sjhb
565151699Sjhb/*
566151699Sjhb * Add a software interrupt handler to a specified event.  If a given event
567151699Sjhb * is not specified, then a new event is created.
568151699Sjhb */
56972759Sjhbint
570151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
57172237Sjhb	    void *arg, int pri, enum intr_type flags, void **cookiep)
57272237Sjhb{
573151658Sjhb	struct intr_event *ie;
57472237Sjhb	int error;
57566698Sjhb
57672759Sjhb	if (flags & (INTR_FAST | INTR_ENTROPY))
57772759Sjhb		return (EINVAL);
57872759Sjhb
579151658Sjhb	ie = (eventp != NULL) ? *eventp : NULL;
58066698Sjhb
581151658Sjhb	if (ie != NULL) {
582151658Sjhb		if (!(ie->ie_flags & IE_SOFT))
583151658Sjhb			return (EINVAL);
58472759Sjhb	} else {
585151658Sjhb		error = intr_event_create(&ie, NULL, IE_SOFT, NULL,
58672237Sjhb		    "swi%d:", pri);
58767551Sjhb		if (error)
58872237Sjhb			return (error);
589151658Sjhb		if (eventp != NULL)
590151658Sjhb			*eventp = ie;
59166698Sjhb	}
592151658Sjhb	return (intr_event_add_handler(ie, name, handler, arg,
59372376Sjake		    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
594134791Sjulian		    /* XXKSE.. think of a better way to get separate queues */
59566698Sjhb}
59666698Sjhb
59766698Sjhb/*
598151658Sjhb * Schedule a software interrupt thread.
59966698Sjhb */
60067551Sjhbvoid
60172237Sjhbswi_sched(void *cookie, int flags)
60266698Sjhb{
603151658Sjhb	struct intr_handler *ih = (struct intr_handler *)cookie;
604151658Sjhb	struct intr_event *ie = ih->ih_event;
60572759Sjhb	int error;
60666698Sjhb
607151658Sjhb	CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
608151658Sjhb	    ih->ih_need);
609151658Sjhb
61067551Sjhb	/*
61172759Sjhb	 * Set ih_need for this handler so that if the ithread is already
61272759Sjhb	 * running it will execute this handler on the next pass.  Otherwise,
61372759Sjhb	 * it will execute it the next time it runs.
61467551Sjhb	 */
61572237Sjhb	atomic_store_rel_int(&ih->ih_need, 1);
616163474Sbde
61772237Sjhb	if (!(flags & SWI_DELAY)) {
618163474Sbde		PCPU_LAZY_INC(cnt.v_soft);
619151658Sjhb		error = intr_event_schedule_thread(ie);
62072759Sjhb		KASSERT(error == 0, ("stray software interrupt"));
62166698Sjhb	}
62266698Sjhb}
62366698Sjhb
624151699Sjhb/*
625151699Sjhb * Remove a software interrupt handler.  Currently this code does not
626151699Sjhb * remove the associated interrupt event if it becomes empty.  Calling code
627151699Sjhb * may do so manually via intr_event_destroy(), but that's not really
628151699Sjhb * an optimal interface.
629151699Sjhb */
630151699Sjhbint
631151699Sjhbswi_remove(void *cookie)
632151699Sjhb{
633151699Sjhb
634151699Sjhb	return (intr_event_remove_handler(cookie));
635151699Sjhb}
636151699Sjhb
637151658Sjhbstatic void
638151658Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie)
639151658Sjhb{
640151658Sjhb	struct intr_handler *ih, *ihn;
641151658Sjhb
642151658Sjhb	/* Interrupt handlers should not sleep. */
643151658Sjhb	if (!(ie->ie_flags & IE_SOFT))
644151658Sjhb		THREAD_NO_SLEEPING();
645151658Sjhb	TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
646151658Sjhb
647151658Sjhb		/*
648151658Sjhb		 * If this handler is marked for death, remove it from
649151658Sjhb		 * the list of handlers and wake up the sleeper.
650151658Sjhb		 */
651151658Sjhb		if (ih->ih_flags & IH_DEAD) {
652151658Sjhb			mtx_lock(&ie->ie_lock);
653151658Sjhb			TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
654151658Sjhb			ih->ih_flags &= ~IH_DEAD;
655151658Sjhb			wakeup(ih);
656151658Sjhb			mtx_unlock(&ie->ie_lock);
657151658Sjhb			continue;
658151658Sjhb		}
659151658Sjhb
660151658Sjhb		/*
661151658Sjhb		 * For software interrupt threads, we only execute
662151658Sjhb		 * handlers that have their need flag set.  Hardware
663151658Sjhb		 * interrupt threads always invoke all of their handlers.
664151658Sjhb		 */
665151658Sjhb		if (ie->ie_flags & IE_SOFT) {
666151658Sjhb			if (!ih->ih_need)
667151658Sjhb				continue;
668151658Sjhb			else
669151658Sjhb				atomic_store_rel_int(&ih->ih_need, 0);
670151658Sjhb		}
671151658Sjhb
672151658Sjhb		/* Fast handlers are handled in primary interrupt context. */
673151658Sjhb		if (ih->ih_flags & IH_FAST)
674151658Sjhb			continue;
675151658Sjhb
676151658Sjhb		/* Execute this handler. */
677151658Sjhb		CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
678151658Sjhb		    __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
679151658Sjhb		    ih->ih_name, ih->ih_flags);
680151658Sjhb
681151658Sjhb		if (!(ih->ih_flags & IH_MPSAFE))
682151658Sjhb			mtx_lock(&Giant);
683151658Sjhb		ih->ih_handler(ih->ih_argument);
684151658Sjhb		if (!(ih->ih_flags & IH_MPSAFE))
685151658Sjhb			mtx_unlock(&Giant);
686151658Sjhb	}
687151658Sjhb	if (!(ie->ie_flags & IE_SOFT))
688151658Sjhb		THREAD_SLEEPING_OK();
689151658Sjhb
690151658Sjhb	/*
691151658Sjhb	 * Interrupt storm handling:
692151658Sjhb	 *
693151658Sjhb	 * If this interrupt source is currently storming, then throttle
694151658Sjhb	 * it to only fire the handler once  per clock tick.
695151658Sjhb	 *
696151658Sjhb	 * If this interrupt source is not currently storming, but the
697151658Sjhb	 * number of back to back interrupts exceeds the storm threshold,
698151658Sjhb	 * then enter storming mode.
699151658Sjhb	 */
700151658Sjhb	if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold) {
701151658Sjhb		if (ie->ie_warned == 0) {
702151658Sjhb			printf(
703151658Sjhb	"Interrupt storm detected on \"%s\"; throttling interrupt source\n",
704151658Sjhb			    ie->ie_name);
705151658Sjhb			ie->ie_warned = 1;
706151658Sjhb		}
707157815Sjhb		tsleep(&ie->ie_count, 0, "istorm", 1);
708151658Sjhb	} else
709151658Sjhb		ie->ie_count++;
710151658Sjhb
711151658Sjhb	/*
712151658Sjhb	 * Now that all the handlers have had a chance to run, reenable
713151658Sjhb	 * the interrupt source.
714151658Sjhb	 */
715151658Sjhb	if (ie->ie_enable != NULL)
716151658Sjhb		ie->ie_enable(ie->ie_source);
717151658Sjhb}
718151658Sjhb
71966698Sjhb/*
72072237Sjhb * This is the main code for interrupt threads.
72166698Sjhb */
722104094Sphkstatic void
72372237Sjhbithread_loop(void *arg)
72466698Sjhb{
725151658Sjhb	struct intr_thread *ithd;
726151658Sjhb	struct intr_event *ie;
72783366Sjulian	struct thread *td;
72872237Sjhb	struct proc *p;
729151658Sjhb
73083366Sjulian	td = curthread;
73183366Sjulian	p = td->td_proc;
732151658Sjhb	ithd = (struct intr_thread *)arg;
733151658Sjhb	KASSERT(ithd->it_thread == td,
73487593Sobrien	    ("%s: ithread and proc linkage out of sync", __func__));
735151658Sjhb	ie = ithd->it_event;
736151658Sjhb	ie->ie_count = 0;
73766698Sjhb
73867551Sjhb	/*
73967551Sjhb	 * As long as we have interrupts outstanding, go through the
74067551Sjhb	 * list of handlers, giving each one a go at it.
74167551Sjhb	 */
74266698Sjhb	for (;;) {
74372237Sjhb		/*
74472237Sjhb		 * If we are an orphaned thread, then just die.
74572237Sjhb		 */
74672237Sjhb		if (ithd->it_flags & IT_DEAD) {
747151658Sjhb			CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
74872237Sjhb			    p->p_pid, p->p_comm);
74972237Sjhb			free(ithd, M_ITHREAD);
75072237Sjhb			kthread_exit(0);
75172237Sjhb		}
75272237Sjhb
753151658Sjhb		/*
754151658Sjhb		 * Service interrupts.  If another interrupt arrives while
755151658Sjhb		 * we are running, it will set it_need to note that we
756151658Sjhb		 * should make another pass.
757151658Sjhb		 */
75872237Sjhb		while (ithd->it_need) {
75967551Sjhb			/*
760151658Sjhb			 * This might need a full read and write barrier
761151658Sjhb			 * to make sure that this write posts before any
762151658Sjhb			 * of the memory or device accesses in the
763151658Sjhb			 * handlers.
76467551Sjhb			 */
76572237Sjhb			atomic_store_rel_int(&ithd->it_need, 0);
766151658Sjhb			ithread_execute_handlers(p, ie);
76766698Sjhb		}
768128331Sjhb		WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
769128331Sjhb		mtx_assert(&Giant, MA_NOTOWNED);
77067551Sjhb
77166698Sjhb		/*
77266698Sjhb		 * Processed all our interrupts.  Now get the sched
77367551Sjhb		 * lock.  This may take a while and it_need may get
77466698Sjhb		 * set again, so we have to check it again.
77566698Sjhb		 */
77672200Sbmilekic		mtx_lock_spin(&sched_lock);
777151658Sjhb		if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
778128331Sjhb			TD_SET_IWAIT(td);
779151658Sjhb			ie->ie_count = 0;
780131473Sjhb			mi_switch(SW_VOL, NULL);
78166698Sjhb		}
78272200Sbmilekic		mtx_unlock_spin(&sched_lock);
78366698Sjhb	}
78466698Sjhb}
78566698Sjhb
786121482Sjhb#ifdef DDB
78772237Sjhb/*
788121482Sjhb * Dump details about an interrupt handler
789121482Sjhb */
790121482Sjhbstatic void
791151658Sjhbdb_dump_intrhand(struct intr_handler *ih)
792121482Sjhb{
793121482Sjhb	int comma;
794121482Sjhb
795121482Sjhb	db_printf("\t%-10s ", ih->ih_name);
796121482Sjhb	switch (ih->ih_pri) {
797121482Sjhb	case PI_REALTIME:
798121482Sjhb		db_printf("CLK ");
799121482Sjhb		break;
800121482Sjhb	case PI_AV:
801121482Sjhb		db_printf("AV  ");
802121482Sjhb		break;
803121482Sjhb	case PI_TTYHIGH:
804121482Sjhb	case PI_TTYLOW:
805121482Sjhb		db_printf("TTY ");
806121482Sjhb		break;
807121482Sjhb	case PI_TAPE:
808121482Sjhb		db_printf("TAPE");
809121482Sjhb		break;
810121482Sjhb	case PI_NET:
811121482Sjhb		db_printf("NET ");
812121482Sjhb		break;
813121482Sjhb	case PI_DISK:
814121482Sjhb	case PI_DISKLOW:
815121482Sjhb		db_printf("DISK");
816121482Sjhb		break;
817121482Sjhb	case PI_DULL:
818121482Sjhb		db_printf("DULL");
819121482Sjhb		break;
820121482Sjhb	default:
821121482Sjhb		if (ih->ih_pri >= PI_SOFT)
822121482Sjhb			db_printf("SWI ");
823121482Sjhb		else
824121482Sjhb			db_printf("%4u", ih->ih_pri);
825121482Sjhb		break;
826121482Sjhb	}
827121482Sjhb	db_printf(" ");
828121482Sjhb	db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
829121482Sjhb	db_printf("(%p)", ih->ih_argument);
830121482Sjhb	if (ih->ih_need ||
831121482Sjhb	    (ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
832121482Sjhb	    IH_MPSAFE)) != 0) {
833121482Sjhb		db_printf(" {");
834121482Sjhb		comma = 0;
835121482Sjhb		if (ih->ih_flags & IH_FAST) {
836121482Sjhb			db_printf("FAST");
837121482Sjhb			comma = 1;
838121482Sjhb		}
839121482Sjhb		if (ih->ih_flags & IH_EXCLUSIVE) {
840121482Sjhb			if (comma)
841121482Sjhb				db_printf(", ");
842121482Sjhb			db_printf("EXCL");
843121482Sjhb			comma = 1;
844121482Sjhb		}
845121482Sjhb		if (ih->ih_flags & IH_ENTROPY) {
846121482Sjhb			if (comma)
847121482Sjhb				db_printf(", ");
848121482Sjhb			db_printf("ENTROPY");
849121482Sjhb			comma = 1;
850121482Sjhb		}
851121482Sjhb		if (ih->ih_flags & IH_DEAD) {
852121482Sjhb			if (comma)
853121482Sjhb				db_printf(", ");
854121482Sjhb			db_printf("DEAD");
855121482Sjhb			comma = 1;
856121482Sjhb		}
857121482Sjhb		if (ih->ih_flags & IH_MPSAFE) {
858121482Sjhb			if (comma)
859121482Sjhb				db_printf(", ");
860121482Sjhb			db_printf("MPSAFE");
861121482Sjhb			comma = 1;
862121482Sjhb		}
863121482Sjhb		if (ih->ih_need) {
864121482Sjhb			if (comma)
865121482Sjhb				db_printf(", ");
866121482Sjhb			db_printf("NEED");
867121482Sjhb		}
868121482Sjhb		db_printf("}");
869121482Sjhb	}
870121482Sjhb	db_printf("\n");
871121482Sjhb}
872121482Sjhb
873121482Sjhb/*
874151658Sjhb * Dump details about a event.
875121482Sjhb */
876121482Sjhbvoid
877151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers)
878121482Sjhb{
879151658Sjhb	struct intr_handler *ih;
880151658Sjhb	struct intr_thread *it;
881121482Sjhb	int comma;
882121482Sjhb
883151658Sjhb	db_printf("%s ", ie->ie_fullname);
884151658Sjhb	it = ie->ie_thread;
885151658Sjhb	if (it != NULL)
886151658Sjhb		db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
887151658Sjhb	else
888151658Sjhb		db_printf("(no thread)");
889151658Sjhb	if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
890151658Sjhb	    (it != NULL && it->it_need)) {
891121482Sjhb		db_printf(" {");
892121482Sjhb		comma = 0;
893151658Sjhb		if (ie->ie_flags & IE_SOFT) {
894121482Sjhb			db_printf("SOFT");
895121482Sjhb			comma = 1;
896121482Sjhb		}
897151658Sjhb		if (ie->ie_flags & IE_ENTROPY) {
898121482Sjhb			if (comma)
899121482Sjhb				db_printf(", ");
900121482Sjhb			db_printf("ENTROPY");
901121482Sjhb			comma = 1;
902121482Sjhb		}
903151658Sjhb		if (ie->ie_flags & IE_ADDING_THREAD) {
904121482Sjhb			if (comma)
905121482Sjhb				db_printf(", ");
906151658Sjhb			db_printf("ADDING_THREAD");
907121482Sjhb			comma = 1;
908121482Sjhb		}
909151658Sjhb		if (it != NULL && it->it_need) {
910121482Sjhb			if (comma)
911121482Sjhb				db_printf(", ");
912121482Sjhb			db_printf("NEED");
913121482Sjhb		}
914121482Sjhb		db_printf("}");
915121482Sjhb	}
916121482Sjhb	db_printf("\n");
917121482Sjhb
918121482Sjhb	if (handlers)
919151658Sjhb		TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
920121482Sjhb		    db_dump_intrhand(ih);
921121482Sjhb}
922151658Sjhb
923151658Sjhb/*
924151658Sjhb * Dump data about interrupt handlers
925151658Sjhb */
926151658SjhbDB_SHOW_COMMAND(intr, db_show_intr)
927151658Sjhb{
928151658Sjhb	struct intr_event *ie;
929160312Sjhb	int all, verbose;
930151658Sjhb
931151658Sjhb	verbose = index(modif, 'v') != NULL;
932151658Sjhb	all = index(modif, 'a') != NULL;
933151658Sjhb	TAILQ_FOREACH(ie, &event_list, ie_list) {
934151658Sjhb		if (!all && TAILQ_EMPTY(&ie->ie_handlers))
935151658Sjhb			continue;
936151658Sjhb		db_dump_intr_event(ie, verbose);
937160312Sjhb		if (db_pager_quit)
938160312Sjhb			break;
939151658Sjhb	}
940151658Sjhb}
941121482Sjhb#endif /* DDB */
942121482Sjhb
943121482Sjhb/*
94467551Sjhb * Start standard software interrupt threads
94566698Sjhb */
94667551Sjhbstatic void
94772237Sjhbstart_softintr(void *dummy)
94867551Sjhb{
949113613Sjhb	struct proc *p;
95072237Sjhb
951151658Sjhb	if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK,
95272237Sjhb		INTR_MPSAFE, &softclock_ih) ||
953117128Sscottl	    swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
95472237Sjhb		panic("died while creating standard software ithreads");
95572759Sjhb
956151658Sjhb	p = clk_intr_event->ie_thread->it_thread->td_proc;
957113613Sjhb	PROC_LOCK(p);
958113613Sjhb	p->p_flag |= P_NOLOAD;
959113613Sjhb	PROC_UNLOCK(p);
96066698Sjhb}
96172237SjhbSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
96266698Sjhb
963151658Sjhb/*
96477582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
96577582Stmm * The data for this machine dependent, and the declarations are in machine
96677582Stmm * dependent code.  The layout of intrnames and intrcnt however is machine
96777582Stmm * independent.
96877582Stmm *
96977582Stmm * We do not know the length of intrcnt and intrnames at compile time, so
97077582Stmm * calculate things at run time.
97177582Stmm */
97277582Stmmstatic int
97377582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS)
97477582Stmm{
975151658Sjhb	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
97677582Stmm	   req));
97777582Stmm}
97877582Stmm
97977582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
98077582Stmm    NULL, 0, sysctl_intrnames, "", "Interrupt Names");
98177582Stmm
98277582Stmmstatic int
98377582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS)
98477582Stmm{
985151658Sjhb	return (sysctl_handle_opaque(oidp, intrcnt,
98677582Stmm	    (char *)eintrcnt - (char *)intrcnt, req));
98777582Stmm}
98877582Stmm
98977582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
99077582Stmm    NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
991121482Sjhb
992121482Sjhb#ifdef DDB
993121482Sjhb/*
994121482Sjhb * DDB command to dump the interrupt statistics.
995121482Sjhb */
996121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
997121482Sjhb{
998121482Sjhb	u_long *i;
999121482Sjhb	char *cp;
1000121482Sjhb
1001121482Sjhb	cp = intrnames;
1002160312Sjhb	for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
1003121482Sjhb		if (*cp == '\0')
1004121482Sjhb			break;
1005121482Sjhb		if (*i != 0)
1006121482Sjhb			db_printf("%s\t%lu\n", cp, *i);
1007121482Sjhb		cp += strlen(cp) + 1;
1008121482Sjhb	}
1009121482Sjhb}
1010121482Sjhb#endif
1011