kern_intr.c revision 104354
1/*
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_intr.c 104354 2002-10-02 07:44:29Z scottl $
27 *
28 */
29
30
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/rtprio.h>
34#include <sys/systm.h>
35#include <sys/interrupt.h>
36#include <sys/kernel.h>
37#include <sys/kthread.h>
38#include <sys/ktr.h>
39#include <sys/lock.h>
40#include <sys/malloc.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/random.h>
44#include <sys/resourcevar.h>
45#include <sys/sysctl.h>
46#include <sys/unistd.h>
47#include <sys/vmmeter.h>
48#include <machine/atomic.h>
49#include <machine/cpu.h>
50#include <machine/md_var.h>
51#include <machine/stdarg.h>
52
53struct	int_entropy {
54	struct	proc *proc;
55	int	vector;
56};
57
58void	*vm_ih;
59void	*softclock_ih;
60struct	ithd *clk_ithd;
61struct	ithd *tty_ithd;
62
63static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
64
65static void	ithread_update(struct ithd *);
66static void	ithread_loop(void *);
67static void	start_softintr(void *);
68
69u_char
70ithread_priority(enum intr_type flags)
71{
72	u_char pri;
73
74	flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
75	    INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
76	switch (flags) {
77	case INTR_TYPE_TTY:
78		pri = PI_TTYLOW;
79		break;
80	case INTR_TYPE_BIO:
81		/*
82		 * XXX We need to refine this.  BSD/OS distinguishes
83		 * between tape and disk priorities.
84		 */
85		pri = PI_DISK;
86		break;
87	case INTR_TYPE_NET:
88		pri = PI_NET;
89		break;
90	case INTR_TYPE_CAM:
91		pri = PI_DISK;          /* XXX or PI_CAM? */
92		break;
93	case INTR_TYPE_AV:		/* Audio/video */
94		pri = PI_AV;
95		break;
96	case INTR_TYPE_CLK:
97		pri = PI_REALTIME;
98		break;
99	case INTR_TYPE_MISC:
100		pri = PI_DULL;          /* don't care */
101		break;
102	default:
103		/* We didn't specify an interrupt level. */
104		panic("ithread_priority: no interrupt type in flags");
105	}
106
107	return pri;
108}
109
110/*
111 * Regenerate the name (p_comm) and priority for a threaded interrupt thread.
112 */
113static void
114ithread_update(struct ithd *ithd)
115{
116	struct intrhand *ih;
117	struct thread *td;
118	struct proc *p;
119	int entropy;
120
121	mtx_assert(&ithd->it_lock, MA_OWNED);
122	td = ithd->it_td;
123	if (td == NULL)
124		return;
125	p = td->td_proc;
126
127	strncpy(p->p_comm, ithd->it_name, sizeof(ithd->it_name));
128	ih = TAILQ_FIRST(&ithd->it_handlers);
129	if (ih == NULL) {
130		mtx_lock_spin(&sched_lock);
131		td->td_priority = PRI_MAX_ITHD;
132		td->td_base_pri = PRI_MAX_ITHD;
133		mtx_unlock_spin(&sched_lock);
134		ithd->it_flags &= ~IT_ENTROPY;
135		return;
136	}
137	entropy = 0;
138	mtx_lock_spin(&sched_lock);
139	td->td_priority = ih->ih_pri;
140	td->td_base_pri = ih->ih_pri;
141	mtx_unlock_spin(&sched_lock);
142	TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
143		if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
144		    sizeof(p->p_comm)) {
145			strcat(p->p_comm, " ");
146			strcat(p->p_comm, ih->ih_name);
147		} else if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
148			if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
149				p->p_comm[sizeof(p->p_comm) - 2] = '*';
150			else
151				p->p_comm[sizeof(p->p_comm) - 2] = '+';
152		} else
153			strcat(p->p_comm, "+");
154		if (ih->ih_flags & IH_ENTROPY)
155			entropy++;
156	}
157	if (entropy)
158		ithd->it_flags |= IT_ENTROPY;
159	else
160		ithd->it_flags &= ~IT_ENTROPY;
161	CTR2(KTR_INTR, "%s: updated %s\n", __func__, p->p_comm);
162}
163
164int
165ithread_create(struct ithd **ithread, int vector, int flags,
166    void (*disable)(int), void (*enable)(int), const char *fmt, ...)
167{
168	struct ithd *ithd;
169	struct thread *td;
170	struct proc *p;
171	int error;
172	va_list ap;
173
174	/* The only valid flag during creation is IT_SOFT. */
175	if ((flags & ~IT_SOFT) != 0)
176		return (EINVAL);
177
178	ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
179	ithd->it_vector = vector;
180	ithd->it_disable = disable;
181	ithd->it_enable = enable;
182	ithd->it_flags = flags;
183	TAILQ_INIT(&ithd->it_handlers);
184	mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
185
186	va_start(ap, fmt);
187	vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
188	va_end(ap);
189
190	error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
191	    0, "%s", ithd->it_name);
192	if (error) {
193		mtx_destroy(&ithd->it_lock);
194		free(ithd, M_ITHREAD);
195		return (error);
196	}
197	td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
198	td->td_ksegrp->kg_pri_class = PRI_ITHD;
199	td->td_priority = PRI_MAX_ITHD;
200	TD_SET_IWAIT(td);
201	ithd->it_td = td;
202	td->td_ithd = ithd;
203	if (ithread != NULL)
204		*ithread = ithd;
205
206	CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
207	return (0);
208}
209
210int
211ithread_destroy(struct ithd *ithread)
212{
213
214	struct thread *td;
215	struct proc *p;
216	if (ithread == NULL)
217		return (EINVAL);
218
219	td = ithread->it_td;
220	p = td->td_proc;
221	mtx_lock(&ithread->it_lock);
222	if (!TAILQ_EMPTY(&ithread->it_handlers)) {
223		mtx_unlock(&ithread->it_lock);
224		return (EINVAL);
225	}
226	ithread->it_flags |= IT_DEAD;
227	mtx_lock_spin(&sched_lock);
228	if (TD_AWAITING_INTR(td)) {
229		TD_CLR_IWAIT(td);
230		setrunqueue(td);
231	}
232	mtx_unlock_spin(&sched_lock);
233	mtx_unlock(&ithread->it_lock);
234	CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
235	return (0);
236}
237
238int
239ithread_add_handler(struct ithd* ithread, const char *name,
240    driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
241    void **cookiep)
242{
243	struct intrhand *ih, *temp_ih;
244
245	if (ithread == NULL || name == NULL || handler == NULL)
246		return (EINVAL);
247	if ((flags & INTR_FAST) !=0)
248		flags |= INTR_EXCL;
249
250	ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
251	ih->ih_handler = handler;
252	ih->ih_argument = arg;
253	ih->ih_name = name;
254	ih->ih_ithread = ithread;
255	ih->ih_pri = pri;
256	if (flags & INTR_FAST)
257		ih->ih_flags = IH_FAST | IH_EXCLUSIVE;
258	else if (flags & INTR_EXCL)
259		ih->ih_flags = IH_EXCLUSIVE;
260	if (flags & INTR_MPSAFE)
261		ih->ih_flags |= IH_MPSAFE;
262	if (flags & INTR_ENTROPY)
263		ih->ih_flags |= IH_ENTROPY;
264
265	mtx_lock(&ithread->it_lock);
266	if ((flags & INTR_EXCL) !=0 && !TAILQ_EMPTY(&ithread->it_handlers))
267		goto fail;
268	if (!TAILQ_EMPTY(&ithread->it_handlers) &&
269	    (TAILQ_FIRST(&ithread->it_handlers)->ih_flags & IH_EXCLUSIVE) != 0)
270		goto fail;
271
272	TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
273	    if (temp_ih->ih_pri > ih->ih_pri)
274		    break;
275	if (temp_ih == NULL)
276		TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
277	else
278		TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
279	ithread_update(ithread);
280	mtx_unlock(&ithread->it_lock);
281
282	if (cookiep != NULL)
283		*cookiep = ih;
284	CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
285	    ithread->it_name);
286	return (0);
287
288fail:
289	mtx_unlock(&ithread->it_lock);
290	free(ih, M_ITHREAD);
291	return (EINVAL);
292}
293
294int
295ithread_remove_handler(void *cookie)
296{
297	struct intrhand *handler = (struct intrhand *)cookie;
298	struct ithd *ithread;
299#ifdef INVARIANTS
300	struct intrhand *ih;
301#endif
302
303	if (handler == NULL)
304		return (EINVAL);
305	ithread = handler->ih_ithread;
306	KASSERT(ithread != NULL,
307	    ("interrupt handler \"%s\" has a NULL interrupt thread",
308		handler->ih_name));
309	CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
310	    ithread->it_name);
311	mtx_lock(&ithread->it_lock);
312#ifdef INVARIANTS
313	TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
314		if (ih == handler)
315			goto ok;
316	mtx_unlock(&ithread->it_lock);
317	panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
318	    ih->ih_name, ithread->it_name);
319ok:
320#endif
321	/*
322	 * If the interrupt thread is already running, then just mark this
323	 * handler as being dead and let the ithread do the actual removal.
324	 */
325	mtx_lock_spin(&sched_lock);
326	if (!TD_AWAITING_INTR(ithread->it_td)) {
327		handler->ih_flags |= IH_DEAD;
328
329		/*
330		 * Ensure that the thread will process the handler list
331		 * again and remove this handler if it has already passed
332		 * it on the list.
333		 */
334		ithread->it_need = 1;
335	} else
336		TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
337	mtx_unlock_spin(&sched_lock);
338	if ((handler->ih_flags & IH_DEAD) != 0)
339		msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
340	ithread_update(ithread);
341	mtx_unlock(&ithread->it_lock);
342	free(handler, M_ITHREAD);
343	return (0);
344}
345
346int
347ithread_schedule(struct ithd *ithread, int do_switch)
348{
349	struct int_entropy entropy;
350	struct thread *td;
351	struct thread *ctd;
352	struct proc *p;
353
354	/*
355	 * If no ithread or no handlers, then we have a stray interrupt.
356	 */
357	if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
358		return (EINVAL);
359
360	ctd = curthread;
361	/*
362	 * If any of the handlers for this ithread claim to be good
363	 * sources of entropy, then gather some.
364	 */
365	if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
366		entropy.vector = ithread->it_vector;
367		entropy.proc = ctd->td_proc;
368		random_harvest(&entropy, sizeof(entropy), 2, 0,
369		    RANDOM_INTERRUPT);
370	}
371
372	td = ithread->it_td;
373	p = td->td_proc;
374	KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
375	CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
376	    __func__, p->p_pid, p->p_comm, ithread->it_need);
377
378	/*
379	 * Set it_need to tell the thread to keep running if it is already
380	 * running.  Then, grab sched_lock and see if we actually need to
381	 * put this thread on the runqueue.  If so and the do_switch flag is
382	 * true and it is safe to switch, then switch to the ithread
383	 * immediately.  Otherwise, set the needresched flag to guarantee
384	 * that this ithread will run before any userland processes.
385	 */
386	ithread->it_need = 1;
387	mtx_lock_spin(&sched_lock);
388	if (TD_AWAITING_INTR(td)) {
389		CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
390		TD_CLR_IWAIT(td);
391		setrunqueue(td);
392		if (do_switch &&
393		    (ctd->td_critnest == 1) ) {
394			KASSERT((TD_IS_RUNNING(ctd)),
395			    ("ithread_schedule: Bad state for curthread."));
396			ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
397			if (ctd->td_kse->ke_flags & KEF_IDLEKSE)
398				ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
399			mi_switch();
400		} else {
401			curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
402		}
403	} else {
404		CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
405		    __func__, p->p_pid, ithread->it_need, p->p_state);
406	}
407	mtx_unlock_spin(&sched_lock);
408
409	return (0);
410}
411
412int
413swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
414	    void *arg, int pri, enum intr_type flags, void **cookiep)
415{
416	struct ithd *ithd;
417	int error;
418
419	if (flags & (INTR_FAST | INTR_ENTROPY))
420		return (EINVAL);
421
422	ithd = (ithdp != NULL) ? *ithdp : NULL;
423
424	if (ithd != NULL) {
425		if ((ithd->it_flags & IT_SOFT) == 0)
426			return(EINVAL);
427	} else {
428		error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
429		    "swi%d:", pri);
430		if (error)
431			return (error);
432
433		if (ithdp != NULL)
434			*ithdp = ithd;
435	}
436	return (ithread_add_handler(ithd, name, handler, arg,
437		    (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
438}
439
440
441/*
442 * Schedule a heavyweight software interrupt process.
443 */
444void
445swi_sched(void *cookie, int flags)
446{
447	struct intrhand *ih = (struct intrhand *)cookie;
448	struct ithd *it = ih->ih_ithread;
449	int error;
450
451	atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
452
453	CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
454		it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
455
456	/*
457	 * Set ih_need for this handler so that if the ithread is already
458	 * running it will execute this handler on the next pass.  Otherwise,
459	 * it will execute it the next time it runs.
460	 */
461	atomic_store_rel_int(&ih->ih_need, 1);
462	if (!(flags & SWI_DELAY)) {
463		error = ithread_schedule(it, !cold);
464		KASSERT(error == 0, ("stray software interrupt"));
465	}
466}
467
468/*
469 * This is the main code for interrupt threads.
470 */
471static void
472ithread_loop(void *arg)
473{
474	struct ithd *ithd;		/* our thread context */
475	struct intrhand *ih;		/* and our interrupt handler chain */
476	struct thread *td;
477	struct proc *p;
478
479	td = curthread;
480	p = td->td_proc;
481	ithd = (struct ithd *)arg;	/* point to myself */
482	KASSERT(ithd->it_td == td && td->td_ithd == ithd,
483	    ("%s: ithread and proc linkage out of sync", __func__));
484
485	/*
486	 * As long as we have interrupts outstanding, go through the
487	 * list of handlers, giving each one a go at it.
488	 */
489	for (;;) {
490		/*
491		 * If we are an orphaned thread, then just die.
492		 */
493		if (ithd->it_flags & IT_DEAD) {
494			CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
495			    p->p_pid, p->p_comm);
496			td->td_ithd = NULL;
497			mtx_destroy(&ithd->it_lock);
498			mtx_lock(&Giant);
499			free(ithd, M_ITHREAD);
500			kthread_exit(0);
501		}
502
503		CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
504		     p->p_pid, p->p_comm, ithd->it_need);
505		while (ithd->it_need) {
506			/*
507			 * Service interrupts.  If another interrupt
508			 * arrives while we are running, they will set
509			 * it_need to denote that we should make
510			 * another pass.
511			 */
512			atomic_store_rel_int(&ithd->it_need, 0);
513restart:
514			TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
515				if (ithd->it_flags & IT_SOFT && !ih->ih_need)
516					continue;
517				atomic_store_rel_int(&ih->ih_need, 0);
518				CTR6(KTR_INTR,
519				    "%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
520				    p->p_pid, (void *)ih,
521				    (void *)ih->ih_handler, ih->ih_argument,
522				    ih->ih_flags);
523
524				if ((ih->ih_flags & IH_DEAD) != 0) {
525					mtx_lock(&ithd->it_lock);
526					TAILQ_REMOVE(&ithd->it_handlers, ih,
527					    ih_next);
528					wakeup(ih);
529					mtx_unlock(&ithd->it_lock);
530					goto restart;
531				}
532				if ((ih->ih_flags & IH_MPSAFE) == 0)
533					mtx_lock(&Giant);
534				ih->ih_handler(ih->ih_argument);
535				if ((ih->ih_flags & IH_MPSAFE) == 0)
536					mtx_unlock(&Giant);
537			}
538		}
539
540		/*
541		 * Processed all our interrupts.  Now get the sched
542		 * lock.  This may take a while and it_need may get
543		 * set again, so we have to check it again.
544		 */
545		mtx_assert(&Giant, MA_NOTOWNED);
546		mtx_lock_spin(&sched_lock);
547		if (!ithd->it_need) {
548			/*
549			 * Should we call this earlier in the loop above?
550			 */
551			if (ithd->it_enable != NULL)
552				ithd->it_enable(ithd->it_vector);
553			TD_SET_IWAIT(td); /* we're idle */
554			p->p_stats->p_ru.ru_nvcsw++;
555			CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
556			mi_switch();
557			CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
558		}
559		mtx_unlock_spin(&sched_lock);
560	}
561}
562
563/*
564 * Start standard software interrupt threads
565 */
566static void
567start_softintr(void *dummy)
568{
569
570	if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
571		INTR_MPSAFE, &softclock_ih) ||
572	    swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih))
573		panic("died while creating standard software ithreads");
574
575	PROC_LOCK(clk_ithd->it_td->td_proc);
576	clk_ithd->it_td->td_proc->p_flag |= P_NOLOAD;
577	PROC_UNLOCK(clk_ithd->it_td->td_proc);
578}
579SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
580
581/*
582 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
583 * The data for this machine dependent, and the declarations are in machine
584 * dependent code.  The layout of intrnames and intrcnt however is machine
585 * independent.
586 *
587 * We do not know the length of intrcnt and intrnames at compile time, so
588 * calculate things at run time.
589 */
590static int
591sysctl_intrnames(SYSCTL_HANDLER_ARGS)
592{
593	return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
594	   req));
595}
596
597SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
598    NULL, 0, sysctl_intrnames, "", "Interrupt Names");
599
600static int
601sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
602{
603	return (sysctl_handle_opaque(oidp, intrcnt,
604	    (char *)eintrcnt - (char *)intrcnt, req));
605}
606
607SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
608    NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
609