Deleted Added
full compact
kern_intr.c (113629) kern_intr.c (114471)
1/*
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/kern_intr.c 113629 2003-04-17 22:25:35Z jhb $
26 * $FreeBSD: head/sys/kern/kern_intr.c 114471 2003-05-02 00:33:12Z julian $
27 *
28 */
29
30
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/conf.h>
34#include <sys/rtprio.h>
35#include <sys/systm.h>
36#include <sys/interrupt.h>
37#include <sys/kernel.h>
38#include <sys/kthread.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/random.h>
45#include <sys/resourcevar.h>
46#include <sys/sysctl.h>
47#include <sys/unistd.h>
48#include <sys/vmmeter.h>
49#include <machine/atomic.h>
50#include <machine/cpu.h>
51#include <machine/md_var.h>
52#include <machine/stdarg.h>
53
54struct int_entropy {
55 struct proc *proc;
56 int vector;
57};
58
59void *vm_ih;
60void *softclock_ih;
61struct ithd *clk_ithd;
62struct ithd *tty_ithd;
63
64static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
65
66static void ithread_update(struct ithd *);
67static void ithread_loop(void *);
68static void start_softintr(void *);
69
70u_char
71ithread_priority(enum intr_type flags)
72{
73 u_char pri;
74
75 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
76 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
77 switch (flags) {
78 case INTR_TYPE_TTY:
79 pri = PI_TTYLOW;
80 break;
81 case INTR_TYPE_BIO:
82 /*
83 * XXX We need to refine this. BSD/OS distinguishes
84 * between tape and disk priorities.
85 */
86 pri = PI_DISK;
87 break;
88 case INTR_TYPE_NET:
89 pri = PI_NET;
90 break;
91 case INTR_TYPE_CAM:
92 pri = PI_DISK; /* XXX or PI_CAM? */
93 break;
94 case INTR_TYPE_AV: /* Audio/video */
95 pri = PI_AV;
96 break;
97 case INTR_TYPE_CLK:
98 pri = PI_REALTIME;
99 break;
100 case INTR_TYPE_MISC:
101 pri = PI_DULL; /* don't care */
102 break;
103 default:
104 /* We didn't specify an interrupt level. */
105 panic("ithread_priority: no interrupt type in flags");
106 }
107
108 return pri;
109}
110
111/*
112 * Regenerate the name (p_comm) and priority for a threaded interrupt thread.
113 */
114static void
115ithread_update(struct ithd *ithd)
116{
117 struct intrhand *ih;
118 struct thread *td;
119 struct proc *p;
120 int entropy;
121
122 mtx_assert(&ithd->it_lock, MA_OWNED);
123 td = ithd->it_td;
124 if (td == NULL)
125 return;
126 p = td->td_proc;
127
128 strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
129
130 ih = TAILQ_FIRST(&ithd->it_handlers);
131 if (ih == NULL) {
132 mtx_lock_spin(&sched_lock);
133 td->td_priority = PRI_MAX_ITHD;
134 td->td_base_pri = PRI_MAX_ITHD;
135 mtx_unlock_spin(&sched_lock);
136 ithd->it_flags &= ~IT_ENTROPY;
137 return;
138 }
139 entropy = 0;
140 mtx_lock_spin(&sched_lock);
141 td->td_priority = ih->ih_pri;
142 td->td_base_pri = ih->ih_pri;
143 mtx_unlock_spin(&sched_lock);
144 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
145 if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
146 sizeof(p->p_comm)) {
147 strcat(p->p_comm, " ");
148 strcat(p->p_comm, ih->ih_name);
149 } else if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
150 if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
151 p->p_comm[sizeof(p->p_comm) - 2] = '*';
152 else
153 p->p_comm[sizeof(p->p_comm) - 2] = '+';
154 } else
155 strcat(p->p_comm, "+");
156 if (ih->ih_flags & IH_ENTROPY)
157 entropy++;
158 }
159 if (entropy)
160 ithd->it_flags |= IT_ENTROPY;
161 else
162 ithd->it_flags &= ~IT_ENTROPY;
163 CTR2(KTR_INTR, "%s: updated %s", __func__, p->p_comm);
164}
165
166int
167ithread_create(struct ithd **ithread, int vector, int flags,
168 void (*disable)(int), void (*enable)(int), const char *fmt, ...)
169{
170 struct ithd *ithd;
171 struct thread *td;
172 struct proc *p;
173 int error;
174 va_list ap;
175
176 /* The only valid flag during creation is IT_SOFT. */
177 if ((flags & ~IT_SOFT) != 0)
178 return (EINVAL);
179
180 ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
181 ithd->it_vector = vector;
182 ithd->it_disable = disable;
183 ithd->it_enable = enable;
184 ithd->it_flags = flags;
185 TAILQ_INIT(&ithd->it_handlers);
186 mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
187
188 va_start(ap, fmt);
189 vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
190 va_end(ap);
191
192 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
193 0, "%s", ithd->it_name);
194 if (error) {
195 mtx_destroy(&ithd->it_lock);
196 free(ithd, M_ITHREAD);
197 return (error);
198 }
199 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
200 mtx_lock_spin(&sched_lock);
201 td->td_ksegrp->kg_pri_class = PRI_ITHD;
202 td->td_priority = PRI_MAX_ITHD;
203 TD_SET_IWAIT(td);
204 mtx_unlock_spin(&sched_lock);
205 ithd->it_td = td;
206 td->td_ithd = ithd;
207 if (ithread != NULL)
208 *ithread = ithd;
209 CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
210 return (0);
211}
212
213int
214ithread_destroy(struct ithd *ithread)
215{
216
217 struct thread *td;
218 struct proc *p;
219 if (ithread == NULL)
220 return (EINVAL);
221
222 td = ithread->it_td;
223 p = td->td_proc;
224 mtx_lock(&ithread->it_lock);
225 if (!TAILQ_EMPTY(&ithread->it_handlers)) {
226 mtx_unlock(&ithread->it_lock);
227 return (EINVAL);
228 }
229 ithread->it_flags |= IT_DEAD;
230 mtx_lock_spin(&sched_lock);
231 if (TD_AWAITING_INTR(td)) {
232 TD_CLR_IWAIT(td);
233 setrunqueue(td);
234 }
235 mtx_unlock_spin(&sched_lock);
236 mtx_unlock(&ithread->it_lock);
237 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
238 return (0);
239}
240
241int
242ithread_add_handler(struct ithd* ithread, const char *name,
243 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
244 void **cookiep)
245{
246 struct intrhand *ih, *temp_ih;
247
248 if (ithread == NULL || name == NULL || handler == NULL)
249 return (EINVAL);
250 if ((flags & INTR_FAST) !=0)
251 flags |= INTR_EXCL;
252
253 ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
254 ih->ih_handler = handler;
255 ih->ih_argument = arg;
256 ih->ih_name = name;
257 ih->ih_ithread = ithread;
258 ih->ih_pri = pri;
259 if (flags & INTR_FAST)
260 ih->ih_flags = IH_FAST | IH_EXCLUSIVE;
261 else if (flags & INTR_EXCL)
262 ih->ih_flags = IH_EXCLUSIVE;
263 if (flags & INTR_MPSAFE)
264 ih->ih_flags |= IH_MPSAFE;
265 if (flags & INTR_ENTROPY)
266 ih->ih_flags |= IH_ENTROPY;
267
268 mtx_lock(&ithread->it_lock);
269 if ((flags & INTR_EXCL) !=0 && !TAILQ_EMPTY(&ithread->it_handlers))
270 goto fail;
271 if (!TAILQ_EMPTY(&ithread->it_handlers) &&
272 (TAILQ_FIRST(&ithread->it_handlers)->ih_flags & IH_EXCLUSIVE) != 0)
273 goto fail;
274
275 TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
276 if (temp_ih->ih_pri > ih->ih_pri)
277 break;
278 if (temp_ih == NULL)
279 TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
280 else
281 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
282 ithread_update(ithread);
283 mtx_unlock(&ithread->it_lock);
284
285 if (cookiep != NULL)
286 *cookiep = ih;
287 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
288 ithread->it_name);
289 return (0);
290
291fail:
292 mtx_unlock(&ithread->it_lock);
293 free(ih, M_ITHREAD);
294 return (EINVAL);
295}
296
297int
298ithread_remove_handler(void *cookie)
299{
300 struct intrhand *handler = (struct intrhand *)cookie;
301 struct ithd *ithread;
302#ifdef INVARIANTS
303 struct intrhand *ih;
304#endif
305
306 if (handler == NULL)
307 return (EINVAL);
308 ithread = handler->ih_ithread;
309 KASSERT(ithread != NULL,
310 ("interrupt handler \"%s\" has a NULL interrupt thread",
311 handler->ih_name));
312 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
313 ithread->it_name);
314 mtx_lock(&ithread->it_lock);
315#ifdef INVARIANTS
316 TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
317 if (ih == handler)
318 goto ok;
319 mtx_unlock(&ithread->it_lock);
320 panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
321 ih->ih_name, ithread->it_name);
322ok:
323#endif
324 /*
325 * If the interrupt thread is already running, then just mark this
326 * handler as being dead and let the ithread do the actual removal.
327 */
328 mtx_lock_spin(&sched_lock);
329 if (!TD_AWAITING_INTR(ithread->it_td)) {
330 handler->ih_flags |= IH_DEAD;
331
332 /*
333 * Ensure that the thread will process the handler list
334 * again and remove this handler if it has already passed
335 * it on the list.
336 */
337 ithread->it_need = 1;
338 } else
339 TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
340 mtx_unlock_spin(&sched_lock);
341 if ((handler->ih_flags & IH_DEAD) != 0)
342 msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
343 ithread_update(ithread);
344 mtx_unlock(&ithread->it_lock);
345 free(handler, M_ITHREAD);
346 return (0);
347}
348
349int
350ithread_schedule(struct ithd *ithread, int do_switch)
351{
352 struct int_entropy entropy;
353 struct thread *td;
354 struct thread *ctd;
355 struct proc *p;
356
357 /*
358 * If no ithread or no handlers, then we have a stray interrupt.
359 */
360 if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
361 return (EINVAL);
362
363 ctd = curthread;
364 /*
365 * If any of the handlers for this ithread claim to be good
366 * sources of entropy, then gather some.
367 */
368 if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
369 entropy.vector = ithread->it_vector;
370 entropy.proc = ctd->td_proc;
371 random_harvest(&entropy, sizeof(entropy), 2, 0,
372 RANDOM_INTERRUPT);
373 }
374
375 td = ithread->it_td;
376 p = td->td_proc;
377 KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
378 CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
379 __func__, p->p_pid, p->p_comm, ithread->it_need);
380
381 /*
382 * Set it_need to tell the thread to keep running if it is already
383 * running. Then, grab sched_lock and see if we actually need to
384 * put this thread on the runqueue. If so and the do_switch flag is
385 * true and it is safe to switch, then switch to the ithread
386 * immediately. Otherwise, set the needresched flag to guarantee
387 * that this ithread will run before any userland processes.
388 */
389 ithread->it_need = 1;
390 mtx_lock_spin(&sched_lock);
391 if (TD_AWAITING_INTR(td)) {
392 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
393 TD_CLR_IWAIT(td);
394 setrunqueue(td);
395 if (do_switch &&
396 (ctd->td_critnest == 1) ) {
397 KASSERT((TD_IS_RUNNING(ctd)),
398 ("ithread_schedule: Bad state for curthread."));
399 ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
27 *
28 */
29
30
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/conf.h>
34#include <sys/rtprio.h>
35#include <sys/systm.h>
36#include <sys/interrupt.h>
37#include <sys/kernel.h>
38#include <sys/kthread.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/random.h>
45#include <sys/resourcevar.h>
46#include <sys/sysctl.h>
47#include <sys/unistd.h>
48#include <sys/vmmeter.h>
49#include <machine/atomic.h>
50#include <machine/cpu.h>
51#include <machine/md_var.h>
52#include <machine/stdarg.h>
53
54struct int_entropy {
55 struct proc *proc;
56 int vector;
57};
58
59void *vm_ih;
60void *softclock_ih;
61struct ithd *clk_ithd;
62struct ithd *tty_ithd;
63
64static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
65
66static void ithread_update(struct ithd *);
67static void ithread_loop(void *);
68static void start_softintr(void *);
69
70u_char
71ithread_priority(enum intr_type flags)
72{
73 u_char pri;
74
75 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
76 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
77 switch (flags) {
78 case INTR_TYPE_TTY:
79 pri = PI_TTYLOW;
80 break;
81 case INTR_TYPE_BIO:
82 /*
83 * XXX We need to refine this. BSD/OS distinguishes
84 * between tape and disk priorities.
85 */
86 pri = PI_DISK;
87 break;
88 case INTR_TYPE_NET:
89 pri = PI_NET;
90 break;
91 case INTR_TYPE_CAM:
92 pri = PI_DISK; /* XXX or PI_CAM? */
93 break;
94 case INTR_TYPE_AV: /* Audio/video */
95 pri = PI_AV;
96 break;
97 case INTR_TYPE_CLK:
98 pri = PI_REALTIME;
99 break;
100 case INTR_TYPE_MISC:
101 pri = PI_DULL; /* don't care */
102 break;
103 default:
104 /* We didn't specify an interrupt level. */
105 panic("ithread_priority: no interrupt type in flags");
106 }
107
108 return pri;
109}
110
111/*
112 * Regenerate the name (p_comm) and priority for a threaded interrupt thread.
113 */
114static void
115ithread_update(struct ithd *ithd)
116{
117 struct intrhand *ih;
118 struct thread *td;
119 struct proc *p;
120 int entropy;
121
122 mtx_assert(&ithd->it_lock, MA_OWNED);
123 td = ithd->it_td;
124 if (td == NULL)
125 return;
126 p = td->td_proc;
127
128 strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
129
130 ih = TAILQ_FIRST(&ithd->it_handlers);
131 if (ih == NULL) {
132 mtx_lock_spin(&sched_lock);
133 td->td_priority = PRI_MAX_ITHD;
134 td->td_base_pri = PRI_MAX_ITHD;
135 mtx_unlock_spin(&sched_lock);
136 ithd->it_flags &= ~IT_ENTROPY;
137 return;
138 }
139 entropy = 0;
140 mtx_lock_spin(&sched_lock);
141 td->td_priority = ih->ih_pri;
142 td->td_base_pri = ih->ih_pri;
143 mtx_unlock_spin(&sched_lock);
144 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
145 if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
146 sizeof(p->p_comm)) {
147 strcat(p->p_comm, " ");
148 strcat(p->p_comm, ih->ih_name);
149 } else if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
150 if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
151 p->p_comm[sizeof(p->p_comm) - 2] = '*';
152 else
153 p->p_comm[sizeof(p->p_comm) - 2] = '+';
154 } else
155 strcat(p->p_comm, "+");
156 if (ih->ih_flags & IH_ENTROPY)
157 entropy++;
158 }
159 if (entropy)
160 ithd->it_flags |= IT_ENTROPY;
161 else
162 ithd->it_flags &= ~IT_ENTROPY;
163 CTR2(KTR_INTR, "%s: updated %s", __func__, p->p_comm);
164}
165
166int
167ithread_create(struct ithd **ithread, int vector, int flags,
168 void (*disable)(int), void (*enable)(int), const char *fmt, ...)
169{
170 struct ithd *ithd;
171 struct thread *td;
172 struct proc *p;
173 int error;
174 va_list ap;
175
176 /* The only valid flag during creation is IT_SOFT. */
177 if ((flags & ~IT_SOFT) != 0)
178 return (EINVAL);
179
180 ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
181 ithd->it_vector = vector;
182 ithd->it_disable = disable;
183 ithd->it_enable = enable;
184 ithd->it_flags = flags;
185 TAILQ_INIT(&ithd->it_handlers);
186 mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
187
188 va_start(ap, fmt);
189 vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
190 va_end(ap);
191
192 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
193 0, "%s", ithd->it_name);
194 if (error) {
195 mtx_destroy(&ithd->it_lock);
196 free(ithd, M_ITHREAD);
197 return (error);
198 }
199 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
200 mtx_lock_spin(&sched_lock);
201 td->td_ksegrp->kg_pri_class = PRI_ITHD;
202 td->td_priority = PRI_MAX_ITHD;
203 TD_SET_IWAIT(td);
204 mtx_unlock_spin(&sched_lock);
205 ithd->it_td = td;
206 td->td_ithd = ithd;
207 if (ithread != NULL)
208 *ithread = ithd;
209 CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
210 return (0);
211}
212
213int
214ithread_destroy(struct ithd *ithread)
215{
216
217 struct thread *td;
218 struct proc *p;
219 if (ithread == NULL)
220 return (EINVAL);
221
222 td = ithread->it_td;
223 p = td->td_proc;
224 mtx_lock(&ithread->it_lock);
225 if (!TAILQ_EMPTY(&ithread->it_handlers)) {
226 mtx_unlock(&ithread->it_lock);
227 return (EINVAL);
228 }
229 ithread->it_flags |= IT_DEAD;
230 mtx_lock_spin(&sched_lock);
231 if (TD_AWAITING_INTR(td)) {
232 TD_CLR_IWAIT(td);
233 setrunqueue(td);
234 }
235 mtx_unlock_spin(&sched_lock);
236 mtx_unlock(&ithread->it_lock);
237 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
238 return (0);
239}
240
241int
242ithread_add_handler(struct ithd* ithread, const char *name,
243 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
244 void **cookiep)
245{
246 struct intrhand *ih, *temp_ih;
247
248 if (ithread == NULL || name == NULL || handler == NULL)
249 return (EINVAL);
250 if ((flags & INTR_FAST) !=0)
251 flags |= INTR_EXCL;
252
253 ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
254 ih->ih_handler = handler;
255 ih->ih_argument = arg;
256 ih->ih_name = name;
257 ih->ih_ithread = ithread;
258 ih->ih_pri = pri;
259 if (flags & INTR_FAST)
260 ih->ih_flags = IH_FAST | IH_EXCLUSIVE;
261 else if (flags & INTR_EXCL)
262 ih->ih_flags = IH_EXCLUSIVE;
263 if (flags & INTR_MPSAFE)
264 ih->ih_flags |= IH_MPSAFE;
265 if (flags & INTR_ENTROPY)
266 ih->ih_flags |= IH_ENTROPY;
267
268 mtx_lock(&ithread->it_lock);
269 if ((flags & INTR_EXCL) !=0 && !TAILQ_EMPTY(&ithread->it_handlers))
270 goto fail;
271 if (!TAILQ_EMPTY(&ithread->it_handlers) &&
272 (TAILQ_FIRST(&ithread->it_handlers)->ih_flags & IH_EXCLUSIVE) != 0)
273 goto fail;
274
275 TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
276 if (temp_ih->ih_pri > ih->ih_pri)
277 break;
278 if (temp_ih == NULL)
279 TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
280 else
281 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
282 ithread_update(ithread);
283 mtx_unlock(&ithread->it_lock);
284
285 if (cookiep != NULL)
286 *cookiep = ih;
287 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
288 ithread->it_name);
289 return (0);
290
291fail:
292 mtx_unlock(&ithread->it_lock);
293 free(ih, M_ITHREAD);
294 return (EINVAL);
295}
296
297int
298ithread_remove_handler(void *cookie)
299{
300 struct intrhand *handler = (struct intrhand *)cookie;
301 struct ithd *ithread;
302#ifdef INVARIANTS
303 struct intrhand *ih;
304#endif
305
306 if (handler == NULL)
307 return (EINVAL);
308 ithread = handler->ih_ithread;
309 KASSERT(ithread != NULL,
310 ("interrupt handler \"%s\" has a NULL interrupt thread",
311 handler->ih_name));
312 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
313 ithread->it_name);
314 mtx_lock(&ithread->it_lock);
315#ifdef INVARIANTS
316 TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
317 if (ih == handler)
318 goto ok;
319 mtx_unlock(&ithread->it_lock);
320 panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
321 ih->ih_name, ithread->it_name);
322ok:
323#endif
324 /*
325 * If the interrupt thread is already running, then just mark this
326 * handler as being dead and let the ithread do the actual removal.
327 */
328 mtx_lock_spin(&sched_lock);
329 if (!TD_AWAITING_INTR(ithread->it_td)) {
330 handler->ih_flags |= IH_DEAD;
331
332 /*
333 * Ensure that the thread will process the handler list
334 * again and remove this handler if it has already passed
335 * it on the list.
336 */
337 ithread->it_need = 1;
338 } else
339 TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
340 mtx_unlock_spin(&sched_lock);
341 if ((handler->ih_flags & IH_DEAD) != 0)
342 msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
343 ithread_update(ithread);
344 mtx_unlock(&ithread->it_lock);
345 free(handler, M_ITHREAD);
346 return (0);
347}
348
349int
350ithread_schedule(struct ithd *ithread, int do_switch)
351{
352 struct int_entropy entropy;
353 struct thread *td;
354 struct thread *ctd;
355 struct proc *p;
356
357 /*
358 * If no ithread or no handlers, then we have a stray interrupt.
359 */
360 if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
361 return (EINVAL);
362
363 ctd = curthread;
364 /*
365 * If any of the handlers for this ithread claim to be good
366 * sources of entropy, then gather some.
367 */
368 if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
369 entropy.vector = ithread->it_vector;
370 entropy.proc = ctd->td_proc;
371 random_harvest(&entropy, sizeof(entropy), 2, 0,
372 RANDOM_INTERRUPT);
373 }
374
375 td = ithread->it_td;
376 p = td->td_proc;
377 KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
378 CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
379 __func__, p->p_pid, p->p_comm, ithread->it_need);
380
381 /*
382 * Set it_need to tell the thread to keep running if it is already
383 * running. Then, grab sched_lock and see if we actually need to
384 * put this thread on the runqueue. If so and the do_switch flag is
385 * true and it is safe to switch, then switch to the ithread
386 * immediately. Otherwise, set the needresched flag to guarantee
387 * that this ithread will run before any userland processes.
388 */
389 ithread->it_need = 1;
390 mtx_lock_spin(&sched_lock);
391 if (TD_AWAITING_INTR(td)) {
392 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
393 TD_CLR_IWAIT(td);
394 setrunqueue(td);
395 if (do_switch &&
396 (ctd->td_critnest == 1) ) {
397 KASSERT((TD_IS_RUNNING(ctd)),
398 ("ithread_schedule: Bad state for curthread."));
399 ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
400 if (ctd->td_kse->ke_flags & KEF_IDLEKSE)
400 if (ctd->td_flags & TDF_IDLETD)
401 ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
402 mi_switch();
403 } else {
404 curthread->td_flags |= TDF_NEEDRESCHED;
405 }
406 } else {
407 CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
408 __func__, p->p_pid, ithread->it_need, td->td_state);
409 }
410 mtx_unlock_spin(&sched_lock);
411
412 return (0);
413}
414
415int
416swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
417 void *arg, int pri, enum intr_type flags, void **cookiep)
418{
419 struct ithd *ithd;
420 int error;
421
422 if (flags & (INTR_FAST | INTR_ENTROPY))
423 return (EINVAL);
424
425 ithd = (ithdp != NULL) ? *ithdp : NULL;
426
427 if (ithd != NULL) {
428 if ((ithd->it_flags & IT_SOFT) == 0)
429 return(EINVAL);
430 } else {
431 error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
432 "swi%d:", pri);
433 if (error)
434 return (error);
435
436 if (ithdp != NULL)
437 *ithdp = ithd;
438 }
439 return (ithread_add_handler(ithd, name, handler, arg,
440 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
441}
442
443
444/*
445 * Schedule a heavyweight software interrupt process.
446 */
447void
448swi_sched(void *cookie, int flags)
449{
450 struct intrhand *ih = (struct intrhand *)cookie;
451 struct ithd *it = ih->ih_ithread;
452 int error;
453
454 atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
455
456 CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
457 it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
458
459 /*
460 * Set ih_need for this handler so that if the ithread is already
461 * running it will execute this handler on the next pass. Otherwise,
462 * it will execute it the next time it runs.
463 */
464 atomic_store_rel_int(&ih->ih_need, 1);
465 if (!(flags & SWI_DELAY)) {
466 error = ithread_schedule(it, !cold && !dumping);
467 KASSERT(error == 0, ("stray software interrupt"));
468 }
469}
470
471/*
472 * This is the main code for interrupt threads.
473 */
474static void
475ithread_loop(void *arg)
476{
477 struct ithd *ithd; /* our thread context */
478 struct intrhand *ih; /* and our interrupt handler chain */
479 struct thread *td;
480 struct proc *p;
481
482 td = curthread;
483 p = td->td_proc;
484 ithd = (struct ithd *)arg; /* point to myself */
485 KASSERT(ithd->it_td == td && td->td_ithd == ithd,
486 ("%s: ithread and proc linkage out of sync", __func__));
487
488 /*
489 * As long as we have interrupts outstanding, go through the
490 * list of handlers, giving each one a go at it.
491 */
492 for (;;) {
493 /*
494 * If we are an orphaned thread, then just die.
495 */
496 if (ithd->it_flags & IT_DEAD) {
497 CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
498 p->p_pid, p->p_comm);
499 td->td_ithd = NULL;
500 mtx_destroy(&ithd->it_lock);
501 mtx_lock(&Giant);
502 free(ithd, M_ITHREAD);
503 kthread_exit(0);
504 }
505
506 CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
507 p->p_pid, p->p_comm, ithd->it_need);
508 while (ithd->it_need) {
509 /*
510 * Service interrupts. If another interrupt
511 * arrives while we are running, they will set
512 * it_need to denote that we should make
513 * another pass.
514 */
515 atomic_store_rel_int(&ithd->it_need, 0);
516restart:
517 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
518 if (ithd->it_flags & IT_SOFT && !ih->ih_need)
519 continue;
520 atomic_store_rel_int(&ih->ih_need, 0);
521 CTR6(KTR_INTR,
522 "%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
523 p->p_pid, (void *)ih,
524 (void *)ih->ih_handler, ih->ih_argument,
525 ih->ih_flags);
526
527 if ((ih->ih_flags & IH_DEAD) != 0) {
528 mtx_lock(&ithd->it_lock);
529 TAILQ_REMOVE(&ithd->it_handlers, ih,
530 ih_next);
531 wakeup(ih);
532 mtx_unlock(&ithd->it_lock);
533 goto restart;
534 }
535 if ((ih->ih_flags & IH_MPSAFE) == 0)
536 mtx_lock(&Giant);
537 ih->ih_handler(ih->ih_argument);
538 if ((ih->ih_flags & IH_MPSAFE) == 0)
539 mtx_unlock(&Giant);
540 }
541 }
542
543 /*
544 * Processed all our interrupts. Now get the sched
545 * lock. This may take a while and it_need may get
546 * set again, so we have to check it again.
547 */
548 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
549 mtx_assert(&Giant, MA_NOTOWNED);
550 mtx_lock_spin(&sched_lock);
551 if (!ithd->it_need) {
552 /*
553 * Should we call this earlier in the loop above?
554 */
555 if (ithd->it_enable != NULL)
556 ithd->it_enable(ithd->it_vector);
557 TD_SET_IWAIT(td); /* we're idle */
558 p->p_stats->p_ru.ru_nvcsw++;
559 CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
560 mi_switch();
561 CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
562 }
563 mtx_unlock_spin(&sched_lock);
564 }
565}
566
567/*
568 * Start standard software interrupt threads
569 */
570static void
571start_softintr(void *dummy)
572{
573 struct proc *p;
574
575 if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
576 INTR_MPSAFE, &softclock_ih) ||
577 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih))
578 panic("died while creating standard software ithreads");
579
580 p = clk_ithd->it_td->td_proc;
581 PROC_LOCK(p);
582 p->p_flag |= P_NOLOAD;
583 PROC_UNLOCK(p);
584}
585SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
586
587/*
588 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
589 * The data for this machine dependent, and the declarations are in machine
590 * dependent code. The layout of intrnames and intrcnt however is machine
591 * independent.
592 *
593 * We do not know the length of intrcnt and intrnames at compile time, so
594 * calculate things at run time.
595 */
596static int
597sysctl_intrnames(SYSCTL_HANDLER_ARGS)
598{
599 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
600 req));
601}
602
603SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
604 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
605
606static int
607sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
608{
609 return (sysctl_handle_opaque(oidp, intrcnt,
610 (char *)eintrcnt - (char *)intrcnt, req));
611}
612
613SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
614 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
401 ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
402 mi_switch();
403 } else {
404 curthread->td_flags |= TDF_NEEDRESCHED;
405 }
406 } else {
407 CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
408 __func__, p->p_pid, ithread->it_need, td->td_state);
409 }
410 mtx_unlock_spin(&sched_lock);
411
412 return (0);
413}
414
415int
416swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler,
417 void *arg, int pri, enum intr_type flags, void **cookiep)
418{
419 struct ithd *ithd;
420 int error;
421
422 if (flags & (INTR_FAST | INTR_ENTROPY))
423 return (EINVAL);
424
425 ithd = (ithdp != NULL) ? *ithdp : NULL;
426
427 if (ithd != NULL) {
428 if ((ithd->it_flags & IT_SOFT) == 0)
429 return(EINVAL);
430 } else {
431 error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
432 "swi%d:", pri);
433 if (error)
434 return (error);
435
436 if (ithdp != NULL)
437 *ithdp = ithd;
438 }
439 return (ithread_add_handler(ithd, name, handler, arg,
440 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
441}
442
443
444/*
445 * Schedule a heavyweight software interrupt process.
446 */
447void
448swi_sched(void *cookie, int flags)
449{
450 struct intrhand *ih = (struct intrhand *)cookie;
451 struct ithd *it = ih->ih_ithread;
452 int error;
453
454 atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */
455
456 CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
457 it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
458
459 /*
460 * Set ih_need for this handler so that if the ithread is already
461 * running it will execute this handler on the next pass. Otherwise,
462 * it will execute it the next time it runs.
463 */
464 atomic_store_rel_int(&ih->ih_need, 1);
465 if (!(flags & SWI_DELAY)) {
466 error = ithread_schedule(it, !cold && !dumping);
467 KASSERT(error == 0, ("stray software interrupt"));
468 }
469}
470
471/*
472 * This is the main code for interrupt threads.
473 */
474static void
475ithread_loop(void *arg)
476{
477 struct ithd *ithd; /* our thread context */
478 struct intrhand *ih; /* and our interrupt handler chain */
479 struct thread *td;
480 struct proc *p;
481
482 td = curthread;
483 p = td->td_proc;
484 ithd = (struct ithd *)arg; /* point to myself */
485 KASSERT(ithd->it_td == td && td->td_ithd == ithd,
486 ("%s: ithread and proc linkage out of sync", __func__));
487
488 /*
489 * As long as we have interrupts outstanding, go through the
490 * list of handlers, giving each one a go at it.
491 */
492 for (;;) {
493 /*
494 * If we are an orphaned thread, then just die.
495 */
496 if (ithd->it_flags & IT_DEAD) {
497 CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
498 p->p_pid, p->p_comm);
499 td->td_ithd = NULL;
500 mtx_destroy(&ithd->it_lock);
501 mtx_lock(&Giant);
502 free(ithd, M_ITHREAD);
503 kthread_exit(0);
504 }
505
506 CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
507 p->p_pid, p->p_comm, ithd->it_need);
508 while (ithd->it_need) {
509 /*
510 * Service interrupts. If another interrupt
511 * arrives while we are running, they will set
512 * it_need to denote that we should make
513 * another pass.
514 */
515 atomic_store_rel_int(&ithd->it_need, 0);
516restart:
517 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
518 if (ithd->it_flags & IT_SOFT && !ih->ih_need)
519 continue;
520 atomic_store_rel_int(&ih->ih_need, 0);
521 CTR6(KTR_INTR,
522 "%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
523 p->p_pid, (void *)ih,
524 (void *)ih->ih_handler, ih->ih_argument,
525 ih->ih_flags);
526
527 if ((ih->ih_flags & IH_DEAD) != 0) {
528 mtx_lock(&ithd->it_lock);
529 TAILQ_REMOVE(&ithd->it_handlers, ih,
530 ih_next);
531 wakeup(ih);
532 mtx_unlock(&ithd->it_lock);
533 goto restart;
534 }
535 if ((ih->ih_flags & IH_MPSAFE) == 0)
536 mtx_lock(&Giant);
537 ih->ih_handler(ih->ih_argument);
538 if ((ih->ih_flags & IH_MPSAFE) == 0)
539 mtx_unlock(&Giant);
540 }
541 }
542
543 /*
544 * Processed all our interrupts. Now get the sched
545 * lock. This may take a while and it_need may get
546 * set again, so we have to check it again.
547 */
548 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
549 mtx_assert(&Giant, MA_NOTOWNED);
550 mtx_lock_spin(&sched_lock);
551 if (!ithd->it_need) {
552 /*
553 * Should we call this earlier in the loop above?
554 */
555 if (ithd->it_enable != NULL)
556 ithd->it_enable(ithd->it_vector);
557 TD_SET_IWAIT(td); /* we're idle */
558 p->p_stats->p_ru.ru_nvcsw++;
559 CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
560 mi_switch();
561 CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
562 }
563 mtx_unlock_spin(&sched_lock);
564 }
565}
566
567/*
568 * Start standard software interrupt threads
569 */
570static void
571start_softintr(void *dummy)
572{
573 struct proc *p;
574
575 if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
576 INTR_MPSAFE, &softclock_ih) ||
577 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih))
578 panic("died while creating standard software ithreads");
579
580 p = clk_ithd->it_td->td_proc;
581 PROC_LOCK(p);
582 p->p_flag |= P_NOLOAD;
583 PROC_UNLOCK(p);
584}
585SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
586
587/*
588 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
589 * The data for this machine dependent, and the declarations are in machine
590 * dependent code. The layout of intrnames and intrcnt however is machine
591 * independent.
592 *
593 * We do not know the length of intrcnt and intrnames at compile time, so
594 * calculate things at run time.
595 */
596static int
597sysctl_intrnames(SYSCTL_HANDLER_ARGS)
598{
599 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
600 req));
601}
602
603SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
604 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
605
606static int
607sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
608{
609 return (sysctl_handle_opaque(oidp, intrcnt,
610 (char *)eintrcnt - (char *)intrcnt, req));
611}
612
613SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
614 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");