kern_intr.c revision 87593
1226586Sdim/* 2226586Sdim * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3226586Sdim * All rights reserved. 4226586Sdim * 5226586Sdim * Redistribution and use in source and binary forms, with or without 6226586Sdim * modification, are permitted provided that the following conditions 7226586Sdim * are met: 8226586Sdim * 1. Redistributions of source code must retain the above copyright 9226586Sdim * notice unmodified, this list of conditions, and the following 10226586Sdim * disclaimer. 11226586Sdim * 2. Redistributions in binary form must reproduce the above copyright 12226586Sdim * notice, this list of conditions and the following disclaimer in the 13226586Sdim * documentation and/or other materials provided with the distribution. 14226586Sdim * 15226586Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16249423Sdim * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17249423Sdim * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18226586Sdim * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19249423Sdim * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20249423Sdim * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21226586Sdim * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22226586Sdim * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23249423Sdim * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24249423Sdim * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25226586Sdim * 26226586Sdim * $FreeBSD: head/sys/kern/kern_intr.c 87593 2001-12-10 05:40:12Z obrien $ 27239462Sdim * 28226586Sdim */ 29226586Sdim 30226586Sdim 31226586Sdim#include <sys/param.h> 32226586Sdim#include <sys/bus.h> 33226586Sdim#include <sys/rtprio.h> 34226586Sdim#include <sys/systm.h> 35249423Sdim#include <sys/interrupt.h> 36234353Sdim#include <sys/kernel.h> 37226586Sdim#include <sys/kthread.h> 38226586Sdim#include <sys/ktr.h> 39226586Sdim#include <sys/lock.h> 40251662Sdim#include <sys/malloc.h> 41251662Sdim#include <sys/mutex.h> 42226586Sdim#include <sys/proc.h> 43226586Sdim#include <sys/random.h> 44226586Sdim#include <sys/resourcevar.h> 45226586Sdim#include <sys/sysctl.h> 46226586Sdim#include <sys/unistd.h> 47226586Sdim#include <sys/vmmeter.h> 48226586Sdim#include <machine/atomic.h> 49226586Sdim#include <machine/cpu.h> 50226586Sdim#include <machine/md_var.h> 51226586Sdim#include <machine/stdarg.h> 52226586Sdim 53226586Sdim#include <net/netisr.h> /* prototype for legacy_setsoftnet */ 54226586Sdim 55226586Sdimstruct int_entropy { 56243830Sdim struct proc *proc; 57243830Sdim int vector; 58243830Sdim}; 59243830Sdim 60243830Sdimvoid *net_ih; 61243830Sdimvoid *vm_ih; 62243830Sdimvoid *softclock_ih; 63243830Sdimstruct ithd *clk_ithd; 64243830Sdimstruct ithd *tty_ithd; 65243830Sdim 66243830Sdimstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 67243830Sdim 68243830Sdimstatic void ithread_update(struct ithd *); 69243830Sdimstatic void ithread_loop(void *); 70243830Sdimstatic void start_softintr(void *); 71226586Sdimstatic void swi_net(void *); 72226586Sdim 73226586Sdimu_char 74226586Sdimithread_priority(enum intr_type flags) 75226586Sdim{ 76226586Sdim u_char pri; 77226586Sdim 78226586Sdim flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 79226586Sdim INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 80226586Sdim switch (flags) { 81226586Sdim case INTR_TYPE_TTY: 82226586Sdim pri = PI_TTYLOW; 83226586Sdim break; 84226586Sdim case INTR_TYPE_BIO: 85226586Sdim /* 86226586Sdim * XXX We need to refine this. BSD/OS distinguishes 87226586Sdim * between tape and disk priorities. 88226586Sdim */ 89226586Sdim pri = PI_DISK; 90226586Sdim break; 91243830Sdim case INTR_TYPE_NET: 92243830Sdim pri = PI_NET; 93243830Sdim break; 94243830Sdim case INTR_TYPE_CAM: 95243830Sdim pri = PI_DISK; /* XXX or PI_CAM? */ 96243830Sdim break; 97243830Sdim case INTR_TYPE_AV: /* Audio/video */ 98226586Sdim pri = PI_AV; 99226586Sdim break; 100226586Sdim case INTR_TYPE_CLK: 101226586Sdim pri = PI_REALTIME; 102226586Sdim break; 103226586Sdim case INTR_TYPE_MISC: 104226586Sdim pri = PI_DULL; /* don't care */ 105226586Sdim break; 106226586Sdim default: 107226586Sdim /* We didn't specify an interrupt level. */ 108226586Sdim panic("ithread_priority: no interrupt type in flags"); 109226586Sdim } 110226586Sdim 111226586Sdim return pri; 112226586Sdim} 113226586Sdim 114226586Sdim/* 115226586Sdim * Regenerate the name (p_comm) and priority for a threaded interrupt thread. 116226586Sdim */ 117226586Sdimstatic void 118226586Sdimithread_update(struct ithd *ithd) 119226586Sdim{ 120226586Sdim struct intrhand *ih; 121226586Sdim struct thread *td; 122226586Sdim struct proc *p; 123226586Sdim int entropy; 124226586Sdim 125226586Sdim mtx_assert(&ithd->it_lock, MA_OWNED); 126226586Sdim td = ithd->it_td; 127226586Sdim if (td == NULL) 128226586Sdim return; 129226586Sdim p = td->td_proc; 130226586Sdim 131226586Sdim strncpy(p->p_comm, ithd->it_name, sizeof(ithd->it_name)); 132226586Sdim ih = TAILQ_FIRST(&ithd->it_handlers); 133226586Sdim if (ih == NULL) { 134226586Sdim td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD; 135226586Sdim ithd->it_flags &= ~IT_ENTROPY; 136226586Sdim return; 137226586Sdim } 138226586Sdim 139226586Sdim entropy = 0; 140243830Sdim td->td_ksegrp->kg_pri.pri_level = ih->ih_pri; 141243830Sdim td->td_ksegrp->kg_pri.pri_native = ih->ih_pri; 142243830Sdim TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) { 143234353Sdim if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 < 144234353Sdim sizeof(p->p_comm)) { 145234353Sdim strcat(p->p_comm, " "); 146234353Sdim strcat(p->p_comm, ih->ih_name); 147234353Sdim } else if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) { 148226586Sdim if (p->p_comm[sizeof(p->p_comm) - 2] == '+') 149226586Sdim p->p_comm[sizeof(p->p_comm) - 2] = '*'; 150226586Sdim else 151226586Sdim p->p_comm[sizeof(p->p_comm) - 2] = '+'; 152226586Sdim } else 153226586Sdim strcat(p->p_comm, "+"); 154226586Sdim if (ih->ih_flags & IH_ENTROPY) 155226586Sdim entropy++; 156226586Sdim } 157226586Sdim 158226586Sdim if (entropy) 159226586Sdim ithd->it_flags |= IT_ENTROPY; 160226586Sdim else 161226586Sdim ithd->it_flags &= ~IT_ENTROPY; 162226586Sdim 163226586Sdim CTR2(KTR_INTR, "%s: updated %s\n", __func__, p->p_comm); 164226586Sdim} 165226586Sdim 166226586Sdimint 167226586Sdimithread_create(struct ithd **ithread, int vector, int flags, 168226586Sdim void (*disable)(int), void (*enable)(int), const char *fmt, ...) 169226586Sdim{ 170226586Sdim struct ithd *ithd; 171226586Sdim struct thread *td; 172226586Sdim struct proc *p; 173226586Sdim int error; 174226586Sdim va_list ap; 175226586Sdim 176226586Sdim /* The only valid flag during creation is IT_SOFT. */ 177226586Sdim if ((flags & ~IT_SOFT) != 0) 178226586Sdim return (EINVAL); 179226586Sdim 180226586Sdim ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO); 181226586Sdim ithd->it_vector = vector; 182226586Sdim ithd->it_disable = disable; 183226586Sdim ithd->it_enable = enable; 184226586Sdim ithd->it_flags = flags; 185226586Sdim TAILQ_INIT(&ithd->it_handlers); 186226586Sdim mtx_init(&ithd->it_lock, "ithread", MTX_DEF); 187226586Sdim mtx_lock(&ithd->it_lock); 188226586Sdim 189226586Sdim va_start(ap, fmt); 190226586Sdim vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap); 191226586Sdim va_end(ap); 192226586Sdim 193226586Sdim error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, 194226586Sdim "%s", ithd->it_name); 195226586Sdim if (error) { 196226586Sdim mtx_destroy(&ithd->it_lock); 197226586Sdim free(ithd, M_ITHREAD); 198226586Sdim return (error); 199226586Sdim } 200226586Sdim td = &p->p_thread; /* XXXKSE */ 201226586Sdim td->td_ksegrp->kg_pri.pri_class = PRI_ITHD; 202226586Sdim td->td_ksegrp->kg_pri.pri_level = PRI_MAX_ITHD; 203226586Sdim p->p_stat = SWAIT; 204226586Sdim ithd->it_td = td; 205226586Sdim td->td_ithd = ithd; 206226586Sdim if (ithread != NULL) 207226586Sdim *ithread = ithd; 208226586Sdim mtx_unlock(&ithd->it_lock); 209226586Sdim 210226586Sdim CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name); 211226586Sdim return (0); 212226586Sdim} 213226586Sdim 214226586Sdimint 215226586Sdimithread_destroy(struct ithd *ithread) 216226586Sdim{ 217226586Sdim 218226586Sdim struct thread *td; 219226586Sdim struct proc *p; 220226586Sdim if (ithread == NULL) 221226586Sdim return (EINVAL); 222226586Sdim 223226586Sdim td = ithread->it_td; 224226586Sdim p = td->td_proc; 225226586Sdim mtx_lock(&ithread->it_lock); 226226586Sdim if (!TAILQ_EMPTY(&ithread->it_handlers)) { 227226586Sdim mtx_unlock(&ithread->it_lock); 228226586Sdim return (EINVAL); 229226586Sdim } 230226586Sdim ithread->it_flags |= IT_DEAD; 231226586Sdim mtx_lock_spin(&sched_lock); 232226586Sdim if (p->p_stat == SWAIT) { 233226586Sdim p->p_stat = SRUN; /* XXXKSE */ 234226586Sdim setrunqueue(td); 235226586Sdim } 236226586Sdim mtx_unlock_spin(&sched_lock); 237226586Sdim mtx_unlock(&ithread->it_lock); 238226586Sdim CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name); 239226586Sdim return (0); 240226586Sdim} 241226586Sdim 242226586Sdimint 243226586Sdimithread_add_handler(struct ithd* ithread, const char *name, 244226586Sdim driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, 245226586Sdim void **cookiep) 246226586Sdim{ 247226586Sdim struct intrhand *ih, *temp_ih; 248226586Sdim 249226586Sdim if (ithread == NULL || name == NULL || handler == NULL) 250226586Sdim return (EINVAL); 251226586Sdim if ((flags & INTR_FAST) !=0) 252226586Sdim flags |= INTR_EXCL; 253226586Sdim 254226586Sdim ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO); 255226586Sdim ih->ih_handler = handler; 256226586Sdim ih->ih_argument = arg; 257226586Sdim ih->ih_name = name; 258226586Sdim ih->ih_ithread = ithread; 259226586Sdim ih->ih_pri = pri; 260226586Sdim if (flags & INTR_FAST) 261226586Sdim ih->ih_flags = IH_FAST | IH_EXCLUSIVE; 262226586Sdim else if (flags & INTR_EXCL) 263226586Sdim ih->ih_flags = IH_EXCLUSIVE; 264226586Sdim if (flags & INTR_MPSAFE) 265226586Sdim ih->ih_flags |= IH_MPSAFE; 266226586Sdim if (flags & INTR_ENTROPY) 267226586Sdim ih->ih_flags |= IH_ENTROPY; 268226586Sdim 269226586Sdim mtx_lock(&ithread->it_lock); 270226586Sdim if ((flags & INTR_EXCL) !=0 && !TAILQ_EMPTY(&ithread->it_handlers)) 271226586Sdim goto fail; 272226586Sdim if (!TAILQ_EMPTY(&ithread->it_handlers) && 273226586Sdim (TAILQ_FIRST(&ithread->it_handlers)->ih_flags & IH_EXCLUSIVE) != 0) 274226586Sdim goto fail; 275226586Sdim 276226586Sdim TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next) 277226586Sdim if (temp_ih->ih_pri > ih->ih_pri) 278226586Sdim break; 279226586Sdim if (temp_ih == NULL) 280226586Sdim TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next); 281226586Sdim else 282226586Sdim TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 283226586Sdim ithread_update(ithread); 284226586Sdim mtx_unlock(&ithread->it_lock); 285226586Sdim 286226586Sdim if (cookiep != NULL) 287226586Sdim *cookiep = ih; 288226586Sdim CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 289226586Sdim ithread->it_name); 290226586Sdim return (0); 291226586Sdim 292226586Sdimfail: 293226586Sdim mtx_unlock(&ithread->it_lock); 294226586Sdim free(ih, M_ITHREAD); 295226586Sdim return (EINVAL); 296226586Sdim} 297226586Sdim 298226586Sdimint 299226586Sdimithread_remove_handler(void *cookie) 300226586Sdim{ 301226586Sdim struct intrhand *handler = (struct intrhand *)cookie; 302226586Sdim struct ithd *ithread; 303226586Sdim#ifdef INVARIANTS 304226586Sdim struct intrhand *ih; 305226586Sdim#endif 306226586Sdim 307226586Sdim if (handler == NULL) 308226586Sdim return (EINVAL); 309226586Sdim ithread = handler->ih_ithread; 310226586Sdim KASSERT(ithread != NULL, 311226586Sdim ("interrupt handler \"%s\" has a NULL interrupt thread", 312226586Sdim handler->ih_name)); 313226586Sdim CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 314226586Sdim ithread->it_name); 315226586Sdim mtx_lock(&ithread->it_lock); 316226586Sdim#ifdef INVARIANTS 317226586Sdim TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next) 318226586Sdim if (ih == handler) 319226586Sdim goto ok; 320226586Sdim mtx_unlock(&ithread->it_lock); 321226586Sdim panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"", 322226586Sdim ih->ih_name, ithread->it_name); 323226586Sdimok: 324226586Sdim#endif 325226586Sdim /* 326226586Sdim * If the interrupt thread is already running, then just mark this 327226586Sdim * handler as being dead and let the ithread do the actual removal. 328226586Sdim */ 329251662Sdim mtx_lock_spin(&sched_lock); 330226586Sdim if (ithread->it_td->td_proc->p_stat != SWAIT) { 331226586Sdim handler->ih_flags |= IH_DEAD; 332226586Sdim 333226586Sdim /* 334226586Sdim * Ensure that the thread will process the handler list 335226586Sdim * again and remove this handler if it has already passed 336226586Sdim * it on the list. 337226586Sdim */ 338226586Sdim ithread->it_need = 1; 339226586Sdim } else 340226586Sdim TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next); 341226586Sdim mtx_unlock_spin(&sched_lock); 342226586Sdim if ((handler->ih_flags & IH_DEAD) != 0) 343226586Sdim msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0); 344226586Sdim ithread_update(ithread); 345226586Sdim mtx_unlock(&ithread->it_lock); 346226586Sdim free(handler, M_ITHREAD); 347243830Sdim return (0); 348226586Sdim} 349239462Sdim 350239462Sdimint 351239462Sdimithread_schedule(struct ithd *ithread, int do_switch) 352239462Sdim{ 353239462Sdim struct int_entropy entropy; 354239462Sdim struct thread *td; 355239462Sdim struct proc *p; 356239462Sdim 357239462Sdim /* 358239462Sdim * If no ithread or no handlers, then we have a stray interrupt. 359239462Sdim */ 360239462Sdim if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers)) 361239462Sdim return (EINVAL); 362239462Sdim 363226586Sdim /* 364226586Sdim * If any of the handlers for this ithread claim to be good 365226586Sdim * sources of entropy, then gather some. 366226586Sdim */ 367226586Sdim if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) { 368226586Sdim entropy.vector = ithread->it_vector; 369234353Sdim entropy.proc = curthread->td_proc;; 370226586Sdim random_harvest(&entropy, sizeof(entropy), 2, 0, 371226586Sdim RANDOM_INTERRUPT); 372226586Sdim } 373226586Sdim 374226586Sdim td = ithread->it_td; 375226586Sdim p = td->td_proc; 376234353Sdim KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name)); 377226586Sdim CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d", __func__, p->p_pid, p->p_comm, 378226586Sdim ithread->it_need); 379226586Sdim 380234353Sdim /* 381226586Sdim * Set it_need to tell the thread to keep running if it is already 382226586Sdim * running. Then, grab sched_lock and see if we actually need to 383226586Sdim * put this thread on the runqueue. If so and the do_switch flag is 384234353Sdim * true, then switch to the ithread immediately. Otherwise, set the 385226586Sdim * needresched flag to guarantee that this ithread will run before any 386226586Sdim * userland processes. 387226586Sdim */ 388226586Sdim ithread->it_need = 1; 389226586Sdim mtx_lock_spin(&sched_lock); 390226586Sdim if (p->p_stat == SWAIT) { 391226586Sdim CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid); 392226586Sdim p->p_stat = SRUN; 393226586Sdim setrunqueue(td); /* XXXKSE */ 394226586Sdim if (do_switch && curthread->td_proc->p_stat == SRUN) { 395226586Sdim if (curthread != PCPU_GET(idlethread)) 396226586Sdim setrunqueue(curthread); 397226586Sdim curthread->td_proc->p_stats->p_ru.ru_nivcsw++; 398226586Sdim mi_switch(); 399226586Sdim } else 400226586Sdim curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 401226586Sdim } else { 402226586Sdim CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d", 403226586Sdim __func__, p->p_pid, ithread->it_need, p->p_stat); 404226586Sdim } 405226586Sdim mtx_unlock_spin(&sched_lock); 406226586Sdim 407226586Sdim return (0); 408226586Sdim} 409226586Sdim 410226586Sdimint 411226586Sdimswi_add(struct ithd **ithdp, const char *name, driver_intr_t handler, 412226586Sdim void *arg, int pri, enum intr_type flags, void **cookiep) 413226586Sdim{ 414226586Sdim struct ithd *ithd; 415226586Sdim int error; 416226586Sdim 417226586Sdim if (flags & (INTR_FAST | INTR_ENTROPY)) 418226586Sdim return (EINVAL); 419226586Sdim 420226586Sdim ithd = (ithdp != NULL) ? *ithdp : NULL; 421226586Sdim 422226586Sdim if (ithd != NULL) { 423226586Sdim if ((ithd->it_flags & IT_SOFT) == 0) 424226586Sdim return(EINVAL); 425226586Sdim } else { 426226586Sdim error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL, 427226586Sdim "swi%d:", pri); 428226586Sdim if (error) 429226586Sdim return (error); 430234353Sdim 431234353Sdim if (ithdp != NULL) 432234353Sdim *ithdp = ithd; 433234353Sdim } 434234353Sdim return (ithread_add_handler(ithd, name, handler, arg, 435234353Sdim (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 436234353Sdim} 437234353Sdim 438234353Sdim 439234353Sdim/* 440234353Sdim * Schedule a heavyweight software interrupt process. 441234353Sdim */ 442234353Sdimvoid 443239462Sdimswi_sched(void *cookie, int flags) 444239462Sdim{ 445239462Sdim struct intrhand *ih = (struct intrhand *)cookie; 446239462Sdim struct ithd *it = ih->ih_ithread; 447239462Sdim int error; 448239462Sdim 449226586Sdim atomic_add_int(&cnt.v_intr, 1); /* one more global interrupt */ 450226586Sdim 451226586Sdim CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d", 452226586Sdim it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need); 453226586Sdim 454226586Sdim /* 455226586Sdim * Set ih_need for this handler so that if the ithread is already 456226586Sdim * running it will execute this handler on the next pass. Otherwise, 457226586Sdim * it will execute it the next time it runs. 458226586Sdim */ 459226586Sdim atomic_store_rel_int(&ih->ih_need, 1); 460226586Sdim if (!(flags & SWI_DELAY)) { 461226586Sdim error = ithread_schedule(it, !cold && flags & SWI_SWITCH); 462226586Sdim KASSERT(error == 0, ("stray software interrupt")); 463226586Sdim } 464226586Sdim} 465226586Sdim 466226586Sdim/* 467226586Sdim * This is the main code for interrupt threads. 468226586Sdim */ 469226586Sdimvoid 470234353Sdimithread_loop(void *arg) 471226586Sdim{ 472226586Sdim struct ithd *ithd; /* our thread context */ 473226586Sdim struct intrhand *ih; /* and our interrupt handler chain */ 474226586Sdim struct thread *td; 475226586Sdim struct proc *p; 476226586Sdim 477226586Sdim td = curthread; 478226586Sdim p = td->td_proc; 479226586Sdim ithd = (struct ithd *)arg; /* point to myself */ 480226586Sdim KASSERT(ithd->it_td == td && td->td_ithd == ithd, 481226586Sdim ("%s: ithread and proc linkage out of sync", __func__)); 482226586Sdim 483226586Sdim /* 484226586Sdim * As long as we have interrupts outstanding, go through the 485226586Sdim * list of handlers, giving each one a go at it. 486226586Sdim */ 487226586Sdim for (;;) { 488239462Sdim /* 489239462Sdim * If we are an orphaned thread, then just die. 490239462Sdim */ 491226586Sdim if (ithd->it_flags & IT_DEAD) { 492226586Sdim CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__, 493226586Sdim p->p_pid, p->p_comm); 494239462Sdim td->td_ithd = NULL; 495239462Sdim mtx_destroy(&ithd->it_lock); 496226586Sdim mtx_lock(&Giant); 497226586Sdim free(ithd, M_ITHREAD); 498226586Sdim kthread_exit(0); 499226586Sdim } 500226586Sdim 501226586Sdim CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__, 502226586Sdim p->p_pid, p->p_comm, ithd->it_need); 503226586Sdim while (ithd->it_need) { 504226586Sdim /* 505226586Sdim * Service interrupts. If another interrupt 506226586Sdim * arrives while we are running, they will set 507226586Sdim * it_need to denote that we should make 508226586Sdim * another pass. 509226586Sdim */ 510226586Sdim atomic_store_rel_int(&ithd->it_need, 0); 511226586Sdimrestart: 512226586Sdim TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) { 513226586Sdim if (ithd->it_flags & IT_SOFT && !ih->ih_need) 514226586Sdim continue; 515226586Sdim atomic_store_rel_int(&ih->ih_need, 0); 516239462Sdim CTR6(KTR_INTR, 517226586Sdim "%s: pid %d ih=%p: %p(%p) flg=%x", __func__, 518239462Sdim p->p_pid, (void *)ih, 519239462Sdim (void *)ih->ih_handler, ih->ih_argument, 520226586Sdim ih->ih_flags); 521226586Sdim 522226586Sdim if ((ih->ih_flags & IH_DEAD) != 0) { 523226586Sdim mtx_lock(&ithd->it_lock); 524226586Sdim TAILQ_REMOVE(&ithd->it_handlers, ih, 525226586Sdim ih_next); 526226586Sdim wakeup(ih); 527226586Sdim mtx_unlock(&ithd->it_lock); 528226586Sdim goto restart; 529226586Sdim } 530226586Sdim if ((ih->ih_flags & IH_MPSAFE) == 0) 531226586Sdim mtx_lock(&Giant); 532226586Sdim ih->ih_handler(ih->ih_argument); 533226586Sdim if ((ih->ih_flags & IH_MPSAFE) == 0) 534226586Sdim mtx_unlock(&Giant); 535226586Sdim } 536226586Sdim } 537226586Sdim 538226586Sdim /* 539226586Sdim * Processed all our interrupts. Now get the sched 540226586Sdim * lock. This may take a while and it_need may get 541226586Sdim * set again, so we have to check it again. 542234353Sdim */ 543226586Sdim mtx_assert(&Giant, MA_NOTOWNED); 544226586Sdim mtx_lock_spin(&sched_lock); 545226586Sdim if (!ithd->it_need) { 546226586Sdim /* 547226586Sdim * Should we call this earlier in the loop above? 548226586Sdim */ 549226586Sdim if (ithd->it_enable != NULL) 550226586Sdim ithd->it_enable(ithd->it_vector); 551226586Sdim p->p_stat = SWAIT; /* we're idle */ 552226586Sdim p->p_stats->p_ru.ru_nvcsw++; 553226586Sdim CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid); 554226586Sdim mi_switch(); 555226586Sdim CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid); 556226586Sdim } 557226586Sdim mtx_unlock_spin(&sched_lock); 558226586Sdim } 559226586Sdim} 560226586Sdim 561226586Sdim/* 562226586Sdim * Start standard software interrupt threads 563226586Sdim */ 564226586Sdimstatic void 565226586Sdimstart_softintr(void *dummy) 566226586Sdim{ 567226586Sdim 568226586Sdim if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, 0, &net_ih) || 569226586Sdim swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK, 570226586Sdim INTR_MPSAFE, &softclock_ih) || 571226586Sdim swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, 0, &vm_ih)) 572226586Sdim panic("died while creating standard software ithreads"); 573226586Sdim 574226586Sdim PROC_LOCK(clk_ithd->it_td->td_proc); 575226586Sdim clk_ithd->it_td->td_proc->p_flag |= P_NOLOAD; 576226586Sdim PROC_UNLOCK(clk_ithd->it_td->td_proc); 577226586Sdim} 578226586SdimSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 579234353Sdim 580234353Sdimvoid 581226586Sdimlegacy_setsoftnet(void) 582226586Sdim{ 583226586Sdim swi_sched(net_ih, SWI_NOSWITCH); 584226586Sdim} 585226586Sdim 586226586Sdim/* 587226586Sdim * XXX: This should really be in the network code somewhere and installed 588226586Sdim * via a SI_SUB_SOFINTR, SI_ORDER_MIDDLE sysinit. 589226586Sdim */ 590226586Sdimvoid (*netisrs[32]) __P((void)); 591226586Sdimvolatile unsigned int netisr; /* scheduling bits for network */ 592226586Sdim 593226586Sdimint 594226586Sdimregister_netisr(num, handler) 595226586Sdim int num; 596226586Sdim netisr_t *handler; 597226586Sdim{ 598226586Sdim 599226586Sdim if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 600226586Sdim printf("register_netisr: bad isr number: %d\n", num); 601226586Sdim return (EINVAL); 602226586Sdim } 603226586Sdim netisrs[num] = handler; 604226586Sdim return (0); 605226586Sdim} 606226586Sdim 607226586Sdimint 608226586Sdimunregister_netisr(num) 609226586Sdim int num; 610226586Sdim{ 611226586Sdim 612234353Sdim if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 613226586Sdim printf("unregister_netisr: bad isr number: %d\n", num); 614226586Sdim return (EINVAL); 615226586Sdim } 616226586Sdim netisrs[num] = NULL; 617226586Sdim return (0); 618226586Sdim} 619226586Sdim 620226586Sdimstatic void 621226586Sdimswi_net(void *dummy) 622234353Sdim{ 623234353Sdim u_int bits; 624234353Sdim int i; 625226586Sdim 626226586Sdim bits = atomic_readandclear_int(&netisr); 627226586Sdim while ((i = ffs(bits)) != 0) { 628226586Sdim i--; 629226586Sdim if (netisrs[i] != NULL) 630226586Sdim netisrs[i](); 631226586Sdim else 632226586Sdim printf("swi_net: unregistered isr number: %d.\n", i); 633226586Sdim bits &= ~(1 << i); 634226586Sdim } 635226586Sdim} 636234353Sdim 637226586Sdim/* 638226586Sdim * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 639234353Sdim * The data for this machine dependent, and the declarations are in machine 640234353Sdim * dependent code. The layout of intrnames and intrcnt however is machine 641234353Sdim * independent. 642226586Sdim * 643234353Sdim * We do not know the length of intrcnt and intrnames at compile time, so 644226586Sdim * calculate things at run time. 645234353Sdim */ 646226586Sdimstatic int 647226586Sdimsysctl_intrnames(SYSCTL_HANDLER_ARGS) 648234353Sdim{ 649234353Sdim return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 650226586Sdim req)); 651226586Sdim} 652239462Sdim 653239462SdimSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 654239462Sdim NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 655239462Sdim 656234353Sdimstatic int 657234353Sdimsysctl_intrcnt(SYSCTL_HANDLER_ARGS) 658234353Sdim{ 659234353Sdim return (sysctl_handle_opaque(oidp, intrcnt, 660234353Sdim (char *)eintrcnt - (char *)intrcnt, req)); 661226586Sdim} 662234353Sdim 663234353SdimSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 664226586Sdim NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 665226586Sdim