kern_intr.c revision 170307
1139804Simp/*- 226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 326156Sse * All rights reserved. 426156Sse * 526156Sse * Redistribution and use in source and binary forms, with or without 626156Sse * modification, are permitted provided that the following conditions 726156Sse * are met: 826156Sse * 1. Redistributions of source code must retain the above copyright 926156Sse * notice unmodified, this list of conditions, and the following 1026156Sse * disclaimer. 1126156Sse * 2. Redistributions in binary form must reproduce the above copyright 1226156Sse * notice, this list of conditions and the following disclaimer in the 1326156Sse * documentation and/or other materials provided with the distribution. 1426156Sse * 1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2526156Sse */ 2626156Sse 27116182Sobrien#include <sys/cdefs.h> 28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 170307 2007-06-05 00:00:57Z jeff $"); 2936887Sdfr 30121482Sjhb#include "opt_ddb.h" 31121482Sjhb 3241059Speter#include <sys/param.h> 3365822Sjhb#include <sys/bus.h> 34110860Salfred#include <sys/conf.h> 3565822Sjhb#include <sys/rtprio.h> 3641059Speter#include <sys/systm.h> 3766698Sjhb#include <sys/interrupt.h> 3866698Sjhb#include <sys/kernel.h> 3966698Sjhb#include <sys/kthread.h> 4066698Sjhb#include <sys/ktr.h> 41130128Sbde#include <sys/limits.h> 4274914Sjhb#include <sys/lock.h> 4326156Sse#include <sys/malloc.h> 4467365Sjhb#include <sys/mutex.h> 4566698Sjhb#include <sys/proc.h> 4672759Sjhb#include <sys/random.h> 4772237Sjhb#include <sys/resourcevar.h> 48139451Sjhb#include <sys/sched.h> 4977582Stmm#include <sys/sysctl.h> 5066698Sjhb#include <sys/unistd.h> 5166698Sjhb#include <sys/vmmeter.h> 5266698Sjhb#include <machine/atomic.h> 5366698Sjhb#include <machine/cpu.h> 5467551Sjhb#include <machine/md_var.h> 5572237Sjhb#include <machine/stdarg.h> 56121482Sjhb#ifdef DDB 57121482Sjhb#include <ddb/ddb.h> 58121482Sjhb#include <ddb/db_sym.h> 59121482Sjhb#endif 6026156Sse 61151658Sjhb/* 62151658Sjhb * Describe an interrupt thread. There is one of these per interrupt event. 63151658Sjhb */ 64151658Sjhbstruct intr_thread { 65151658Sjhb struct intr_event *it_event; 66151658Sjhb struct thread *it_thread; /* Kernel thread. */ 67151658Sjhb int it_flags; /* (j) IT_* flags. */ 68151658Sjhb int it_need; /* Needs service. */ 6972759Sjhb}; 7072759Sjhb 71151658Sjhb/* Interrupt thread flags kept in it_flags */ 72151658Sjhb#define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 73151658Sjhb 74151658Sjhbstruct intr_entropy { 75151658Sjhb struct thread *td; 76151658Sjhb uintptr_t event; 77151658Sjhb}; 78151658Sjhb 79151658Sjhbstruct intr_event *clk_intr_event; 80151658Sjhbstruct intr_event *tty_intr_event; 81128339Sbdevoid *softclock_ih; 82128339Sbdevoid *vm_ih; 8338244Sbde 8472237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 8572237Sjhb 86168850Snjlstatic int intr_storm_threshold = 1000; 87128331SjhbTUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 88128331SjhbSYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 89128331Sjhb &intr_storm_threshold, 0, 90128339Sbde "Number of consecutive interrupts before storm protection is enabled"); 91151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list = 92151658Sjhb TAILQ_HEAD_INITIALIZER(event_list); 93128331Sjhb 94151658Sjhbstatic void intr_event_update(struct intr_event *ie); 95169320Spiso#ifdef INTR_FILTER 96169320Spisostatic struct intr_thread *ithread_create(const char *name, 97169320Spiso struct intr_handler *ih); 98169320Spiso#else 99151658Sjhbstatic struct intr_thread *ithread_create(const char *name); 100169320Spiso#endif 101151658Sjhbstatic void ithread_destroy(struct intr_thread *ithread); 102169320Spisostatic void ithread_execute_handlers(struct proc *p, 103169320Spiso struct intr_event *ie); 104169320Spiso#ifdef INTR_FILTER 105169320Spisostatic void priv_ithread_execute_handler(struct proc *p, 106169320Spiso struct intr_handler *ih); 107169320Spiso#endif 108128339Sbdestatic void ithread_loop(void *); 109151658Sjhbstatic void ithread_update(struct intr_thread *ithd); 110128339Sbdestatic void start_softintr(void *); 111128339Sbde 112165124Sjhb/* Map an interrupt type to an ithread priority. */ 11372237Sjhbu_char 114151658Sjhbintr_priority(enum intr_type flags) 11565822Sjhb{ 11672237Sjhb u_char pri; 11765822Sjhb 11872237Sjhb flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 11978365Speter INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 12065822Sjhb switch (flags) { 12172237Sjhb case INTR_TYPE_TTY: 12265822Sjhb pri = PI_TTYLOW; 12365822Sjhb break; 12465822Sjhb case INTR_TYPE_BIO: 12565822Sjhb /* 12665822Sjhb * XXX We need to refine this. BSD/OS distinguishes 12765822Sjhb * between tape and disk priorities. 12865822Sjhb */ 12965822Sjhb pri = PI_DISK; 13065822Sjhb break; 13165822Sjhb case INTR_TYPE_NET: 13265822Sjhb pri = PI_NET; 13365822Sjhb break; 13465822Sjhb case INTR_TYPE_CAM: 13565822Sjhb pri = PI_DISK; /* XXX or PI_CAM? */ 13665822Sjhb break; 13778365Speter case INTR_TYPE_AV: /* Audio/video */ 13878365Speter pri = PI_AV; 13978365Speter break; 14072237Sjhb case INTR_TYPE_CLK: 14172237Sjhb pri = PI_REALTIME; 14272237Sjhb break; 14365822Sjhb case INTR_TYPE_MISC: 14465822Sjhb pri = PI_DULL; /* don't care */ 14565822Sjhb break; 14665822Sjhb default: 14772237Sjhb /* We didn't specify an interrupt level. */ 148151658Sjhb panic("intr_priority: no interrupt type in flags"); 14965822Sjhb } 15065822Sjhb 15165822Sjhb return pri; 15265822Sjhb} 15365822Sjhb 15472237Sjhb/* 155151658Sjhb * Update an ithread based on the associated intr_event. 15672237Sjhb */ 15772237Sjhbstatic void 158151658Sjhbithread_update(struct intr_thread *ithd) 15972237Sjhb{ 160151658Sjhb struct intr_event *ie; 16183366Sjulian struct thread *td; 162151658Sjhb u_char pri; 16367551Sjhb 164151658Sjhb ie = ithd->it_event; 165151658Sjhb td = ithd->it_thread; 16672237Sjhb 167151658Sjhb /* Determine the overall priority of this event. */ 168151658Sjhb if (TAILQ_EMPTY(&ie->ie_handlers)) 169151658Sjhb pri = PRI_MAX_ITHD; 170151658Sjhb else 171151658Sjhb pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 172105354Srobert 173151658Sjhb /* Update name and priority. */ 174151658Sjhb strlcpy(td->td_proc->p_comm, ie->ie_fullname, 175151658Sjhb sizeof(td->td_proc->p_comm)); 176170307Sjeff thread_lock(td); 177151658Sjhb sched_prio(td, pri); 178170307Sjeff thread_unlock(td); 179151658Sjhb} 180151658Sjhb 181151658Sjhb/* 182151658Sjhb * Regenerate the full name of an interrupt event and update its priority. 183151658Sjhb */ 184151658Sjhbstatic void 185151658Sjhbintr_event_update(struct intr_event *ie) 186151658Sjhb{ 187151658Sjhb struct intr_handler *ih; 188151658Sjhb char *last; 189151658Sjhb int missed, space; 190151658Sjhb 191151658Sjhb /* Start off with no entropy and just the name of the event. */ 192151658Sjhb mtx_assert(&ie->ie_lock, MA_OWNED); 193151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 194151658Sjhb ie->ie_flags &= ~IE_ENTROPY; 195137267Sjhb missed = 0; 196151658Sjhb space = 1; 197151658Sjhb 198151658Sjhb /* Run through all the handlers updating values. */ 199151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 200151658Sjhb if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 201151658Sjhb sizeof(ie->ie_fullname)) { 202151658Sjhb strcat(ie->ie_fullname, " "); 203151658Sjhb strcat(ie->ie_fullname, ih->ih_name); 204151658Sjhb space = 0; 205137267Sjhb } else 206137267Sjhb missed++; 207137267Sjhb if (ih->ih_flags & IH_ENTROPY) 208151658Sjhb ie->ie_flags |= IE_ENTROPY; 209137267Sjhb } 210151658Sjhb 211151658Sjhb /* 212151658Sjhb * If the handler names were too long, add +'s to indicate missing 213151658Sjhb * names. If we run out of room and still have +'s to add, change 214151658Sjhb * the last character from a + to a *. 215151658Sjhb */ 216151658Sjhb last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 217137267Sjhb while (missed-- > 0) { 218151658Sjhb if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 219151658Sjhb if (*last == '+') { 220151658Sjhb *last = '*'; 221151658Sjhb break; 222151658Sjhb } else 223151658Sjhb *last = '+'; 224151658Sjhb } else if (space) { 225151658Sjhb strcat(ie->ie_fullname, " +"); 226151658Sjhb space = 0; 22772237Sjhb } else 228151658Sjhb strcat(ie->ie_fullname, "+"); 22972237Sjhb } 230151658Sjhb 231151658Sjhb /* 232151658Sjhb * If this event has an ithread, update it's priority and 233151658Sjhb * name. 234151658Sjhb */ 235151658Sjhb if (ie->ie_thread != NULL) 236151658Sjhb ithread_update(ie->ie_thread); 237151658Sjhb CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 23872237Sjhb} 23972237Sjhb 240169320Spiso#ifndef INTR_FILTER 24172237Sjhbint 242151658Sjhbintr_event_create(struct intr_event **event, void *source, int flags, 243151658Sjhb void (*enable)(void *), const char *fmt, ...) 24466698Sjhb{ 245151658Sjhb struct intr_event *ie; 24672237Sjhb va_list ap; 24772237Sjhb 248151658Sjhb /* The only valid flag during creation is IE_SOFT. */ 249151658Sjhb if ((flags & ~IE_SOFT) != 0) 25072759Sjhb return (EINVAL); 251151658Sjhb ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 252151658Sjhb ie->ie_source = source; 253151658Sjhb ie->ie_enable = enable; 254151658Sjhb ie->ie_flags = flags; 255151658Sjhb TAILQ_INIT(&ie->ie_handlers); 256151658Sjhb mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 25772759Sjhb 25872237Sjhb va_start(ap, fmt); 259151658Sjhb vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 26072237Sjhb va_end(ap); 261151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 262151658Sjhb mtx_pool_lock(mtxpool_sleep, &event_list); 263151658Sjhb TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 264151658Sjhb mtx_pool_unlock(mtxpool_sleep, &event_list); 265151658Sjhb if (event != NULL) 266151658Sjhb *event = ie; 267151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 268151658Sjhb return (0); 269151658Sjhb} 270169320Spiso#else 271169320Spisoint 272169320Spisointr_event_create(struct intr_event **event, void *source, int flags, 273169320Spiso void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), 274169320Spiso const char *fmt, ...) 275169320Spiso{ 276169320Spiso struct intr_event *ie; 277169320Spiso va_list ap; 27872237Sjhb 279169320Spiso /* The only valid flag during creation is IE_SOFT. */ 280169320Spiso if ((flags & ~IE_SOFT) != 0) 281169320Spiso return (EINVAL); 282169320Spiso ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 283169320Spiso ie->ie_source = source; 284169320Spiso ie->ie_enable = enable; 285169320Spiso ie->ie_eoi = eoi; 286169320Spiso ie->ie_disab = disab; 287169320Spiso ie->ie_flags = flags; 288169320Spiso TAILQ_INIT(&ie->ie_handlers); 289169320Spiso mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 290169320Spiso 291169320Spiso va_start(ap, fmt); 292169320Spiso vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 293169320Spiso va_end(ap); 294169320Spiso strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 295169320Spiso mtx_pool_lock(mtxpool_sleep, &event_list); 296169320Spiso TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 297169320Spiso mtx_pool_unlock(mtxpool_sleep, &event_list); 298169320Spiso if (event != NULL) 299169320Spiso *event = ie; 300169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 301169320Spiso return (0); 302169320Spiso} 303169320Spiso#endif 304169320Spiso 305151658Sjhbint 306151658Sjhbintr_event_destroy(struct intr_event *ie) 307151658Sjhb{ 308151658Sjhb 309151658Sjhb mtx_lock(&ie->ie_lock); 310151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 311151658Sjhb mtx_unlock(&ie->ie_lock); 312151658Sjhb return (EBUSY); 313151658Sjhb } 314151658Sjhb mtx_pool_lock(mtxpool_sleep, &event_list); 315151658Sjhb TAILQ_REMOVE(&event_list, ie, ie_list); 316151658Sjhb mtx_pool_unlock(mtxpool_sleep, &event_list); 317157728Sjhb#ifndef notyet 318157728Sjhb if (ie->ie_thread != NULL) { 319157728Sjhb ithread_destroy(ie->ie_thread); 320157728Sjhb ie->ie_thread = NULL; 321157728Sjhb } 322157728Sjhb#endif 323151658Sjhb mtx_unlock(&ie->ie_lock); 324151658Sjhb mtx_destroy(&ie->ie_lock); 325151658Sjhb free(ie, M_ITHREAD); 326151658Sjhb return (0); 327151658Sjhb} 328151658Sjhb 329169320Spiso#ifndef INTR_FILTER 330151658Sjhbstatic struct intr_thread * 331151658Sjhbithread_create(const char *name) 332151658Sjhb{ 333151658Sjhb struct intr_thread *ithd; 334151658Sjhb struct thread *td; 335151658Sjhb struct proc *p; 336151658Sjhb int error; 337151658Sjhb 338151658Sjhb ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 339151658Sjhb 34072237Sjhb error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, 341151658Sjhb 0, "%s", name); 342151658Sjhb if (error) 343151658Sjhb panic("kthread_create() failed with %d", error); 34490361Sjulian td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 345170307Sjeff thread_lock(td); 346164936Sjulian sched_class(td, PRI_ITHD); 347103216Sjulian TD_SET_IWAIT(td); 348170307Sjeff thread_unlock(td); 349151658Sjhb td->td_pflags |= TDP_ITHREAD; 350151658Sjhb ithd->it_thread = td; 351151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, name); 352151658Sjhb return (ithd); 35372237Sjhb} 354169320Spiso#else 355169320Spisostatic struct intr_thread * 356169320Spisoithread_create(const char *name, struct intr_handler *ih) 357169320Spiso{ 358169320Spiso struct intr_thread *ithd; 359169320Spiso struct thread *td; 360169320Spiso struct proc *p; 361169320Spiso int error; 36272237Sjhb 363169320Spiso ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 364169320Spiso 365169320Spiso error = kthread_create(ithread_loop, ih, &p, RFSTOPPED | RFHIGHPID, 366169320Spiso 0, "%s", name); 367169320Spiso if (error) 368169320Spiso panic("kthread_create() failed with %d", error); 369169320Spiso td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 370170307Sjeff thread_lock(td); 371169320Spiso sched_class(td, PRI_ITHD); 372169320Spiso TD_SET_IWAIT(td); 373170307Sjeff thread_unlock(td); 374169320Spiso td->td_pflags |= TDP_ITHREAD; 375169320Spiso ithd->it_thread = td; 376169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, name); 377169320Spiso return (ithd); 378169320Spiso} 379169320Spiso#endif 380169320Spiso 381151658Sjhbstatic void 382151658Sjhbithread_destroy(struct intr_thread *ithread) 38372237Sjhb{ 38483366Sjulian struct thread *td; 38572237Sjhb 386157784Sscottl CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 387151658Sjhb td = ithread->it_thread; 388170307Sjeff thread_lock(td); 38976771Sjhb ithread->it_flags |= IT_DEAD; 390103216Sjulian if (TD_AWAITING_INTR(td)) { 391103216Sjulian TD_CLR_IWAIT(td); 392166188Sjeff sched_add(td, SRQ_INTR); 39372237Sjhb } 394170307Sjeff thread_unlock(td); 39572237Sjhb} 39672237Sjhb 397169320Spiso#ifndef INTR_FILTER 39872237Sjhbint 399151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name, 400166901Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 401166901Spiso enum intr_type flags, void **cookiep) 40272237Sjhb{ 403151658Sjhb struct intr_handler *ih, *temp_ih; 404151658Sjhb struct intr_thread *it; 40572237Sjhb 406166901Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 40772237Sjhb return (EINVAL); 40872237Sjhb 409151658Sjhb /* Allocate and populate an interrupt handler structure. */ 410151658Sjhb ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 411166901Spiso ih->ih_filter = filter; 41272237Sjhb ih->ih_handler = handler; 41372237Sjhb ih->ih_argument = arg; 41472237Sjhb ih->ih_name = name; 415151658Sjhb ih->ih_event = ie; 41672237Sjhb ih->ih_pri = pri; 417166901Spiso if (flags & INTR_EXCL) 41872237Sjhb ih->ih_flags = IH_EXCLUSIVE; 41972237Sjhb if (flags & INTR_MPSAFE) 42072237Sjhb ih->ih_flags |= IH_MPSAFE; 42172237Sjhb if (flags & INTR_ENTROPY) 42272237Sjhb ih->ih_flags |= IH_ENTROPY; 42372237Sjhb 424151658Sjhb /* We can only have one exclusive handler in a event. */ 425151658Sjhb mtx_lock(&ie->ie_lock); 426151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 427151658Sjhb if ((flags & INTR_EXCL) || 428151658Sjhb (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 429151658Sjhb mtx_unlock(&ie->ie_lock); 430151658Sjhb free(ih, M_ITHREAD); 431151658Sjhb return (EINVAL); 432151658Sjhb } 433122002Sjhb } 43472237Sjhb 435151658Sjhb /* Add the new handler to the event in priority order. */ 436151658Sjhb TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 437151658Sjhb if (temp_ih->ih_pri > ih->ih_pri) 438151658Sjhb break; 439151658Sjhb } 44072237Sjhb if (temp_ih == NULL) 441151658Sjhb TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 44272237Sjhb else 44372237Sjhb TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 444151658Sjhb intr_event_update(ie); 44572237Sjhb 446151658Sjhb /* Create a thread if we need one. */ 447166901Spiso while (ie->ie_thread == NULL && handler != NULL) { 448151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) 449157815Sjhb msleep(ie, &ie->ie_lock, 0, "ithread", 0); 450151658Sjhb else { 451151658Sjhb ie->ie_flags |= IE_ADDING_THREAD; 452151658Sjhb mtx_unlock(&ie->ie_lock); 453151658Sjhb it = ithread_create("intr: newborn"); 454151658Sjhb mtx_lock(&ie->ie_lock); 455151658Sjhb ie->ie_flags &= ~IE_ADDING_THREAD; 456151658Sjhb ie->ie_thread = it; 457151658Sjhb it->it_event = ie; 458151658Sjhb ithread_update(it); 459151658Sjhb wakeup(ie); 460151658Sjhb } 461151658Sjhb } 462151658Sjhb CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 463151658Sjhb ie->ie_name); 464151658Sjhb mtx_unlock(&ie->ie_lock); 465151658Sjhb 46672237Sjhb if (cookiep != NULL) 46772237Sjhb *cookiep = ih; 46872237Sjhb return (0); 46972237Sjhb} 470169320Spiso#else 471169320Spisoint 472169320Spisointr_event_add_handler(struct intr_event *ie, const char *name, 473169320Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 474169320Spiso enum intr_type flags, void **cookiep) 475169320Spiso{ 476169320Spiso struct intr_handler *ih, *temp_ih; 477169320Spiso struct intr_thread *it; 47872237Sjhb 479169320Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 480169320Spiso return (EINVAL); 481169320Spiso 482169320Spiso /* Allocate and populate an interrupt handler structure. */ 483169320Spiso ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 484169320Spiso ih->ih_filter = filter; 485169320Spiso ih->ih_handler = handler; 486169320Spiso ih->ih_argument = arg; 487169320Spiso ih->ih_name = name; 488169320Spiso ih->ih_event = ie; 489169320Spiso ih->ih_pri = pri; 490169320Spiso if (flags & INTR_EXCL) 491169320Spiso ih->ih_flags = IH_EXCLUSIVE; 492169320Spiso if (flags & INTR_MPSAFE) 493169320Spiso ih->ih_flags |= IH_MPSAFE; 494169320Spiso if (flags & INTR_ENTROPY) 495169320Spiso ih->ih_flags |= IH_ENTROPY; 496169320Spiso 497169320Spiso /* We can only have one exclusive handler in a event. */ 498169320Spiso mtx_lock(&ie->ie_lock); 499169320Spiso if (!TAILQ_EMPTY(&ie->ie_handlers)) { 500169320Spiso if ((flags & INTR_EXCL) || 501169320Spiso (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 502169320Spiso mtx_unlock(&ie->ie_lock); 503169320Spiso free(ih, M_ITHREAD); 504169320Spiso return (EINVAL); 505169320Spiso } 506169320Spiso } 507169320Spiso 508169320Spiso /* Add the new handler to the event in priority order. */ 509169320Spiso TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 510169320Spiso if (temp_ih->ih_pri > ih->ih_pri) 511169320Spiso break; 512169320Spiso } 513169320Spiso if (temp_ih == NULL) 514169320Spiso TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 515169320Spiso else 516169320Spiso TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 517169320Spiso intr_event_update(ie); 518169320Spiso 519169320Spiso /* For filtered handlers, create a private ithread to run on. */ 520169320Spiso if (filter != NULL && handler != NULL) { 521169320Spiso mtx_unlock(&ie->ie_lock); 522169320Spiso it = ithread_create("intr: newborn", ih); 523169320Spiso mtx_lock(&ie->ie_lock); 524169320Spiso it->it_event = ie; 525169320Spiso ih->ih_thread = it; 526169320Spiso ithread_update(it); // XXX - do we really need this?!?!? 527169320Spiso } else { /* Create the global per-event thread if we need one. */ 528169320Spiso while (ie->ie_thread == NULL && handler != NULL) { 529169320Spiso if (ie->ie_flags & IE_ADDING_THREAD) 530169320Spiso msleep(ie, &ie->ie_lock, 0, "ithread", 0); 531169320Spiso else { 532169320Spiso ie->ie_flags |= IE_ADDING_THREAD; 533169320Spiso mtx_unlock(&ie->ie_lock); 534169320Spiso it = ithread_create("intr: newborn", ih); 535169320Spiso mtx_lock(&ie->ie_lock); 536169320Spiso ie->ie_flags &= ~IE_ADDING_THREAD; 537169320Spiso ie->ie_thread = it; 538169320Spiso it->it_event = ie; 539169320Spiso ithread_update(it); 540169320Spiso wakeup(ie); 541169320Spiso } 542169320Spiso } 543169320Spiso } 544169320Spiso CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 545169320Spiso ie->ie_name); 546169320Spiso mtx_unlock(&ie->ie_lock); 547169320Spiso 548169320Spiso if (cookiep != NULL) 549169320Spiso *cookiep = ih; 550169320Spiso return (0); 551169320Spiso} 552169320Spiso#endif 553169320Spiso 554165125Sjhb/* 555165125Sjhb * Return the ie_source field from the intr_event an intr_handler is 556165125Sjhb * associated with. 557165125Sjhb */ 558165125Sjhbvoid * 559165125Sjhbintr_handler_source(void *cookie) 560165125Sjhb{ 561165125Sjhb struct intr_handler *ih; 562165125Sjhb struct intr_event *ie; 563165125Sjhb 564165125Sjhb ih = (struct intr_handler *)cookie; 565165125Sjhb if (ih == NULL) 566165125Sjhb return (NULL); 567165125Sjhb ie = ih->ih_event; 568165125Sjhb KASSERT(ie != NULL, 569165125Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 570165125Sjhb ih->ih_name)); 571165125Sjhb return (ie->ie_source); 572165125Sjhb} 573165125Sjhb 574169320Spiso#ifndef INTR_FILTER 57572237Sjhbint 576151658Sjhbintr_event_remove_handler(void *cookie) 57772237Sjhb{ 578151658Sjhb struct intr_handler *handler = (struct intr_handler *)cookie; 579151658Sjhb struct intr_event *ie; 58072237Sjhb#ifdef INVARIANTS 581151658Sjhb struct intr_handler *ih; 58272237Sjhb#endif 583151658Sjhb#ifdef notyet 584151658Sjhb int dead; 585151658Sjhb#endif 58672237Sjhb 58772759Sjhb if (handler == NULL) 58872237Sjhb return (EINVAL); 589151658Sjhb ie = handler->ih_event; 590151658Sjhb KASSERT(ie != NULL, 591151658Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 592165124Sjhb handler->ih_name)); 593151658Sjhb mtx_lock(&ie->ie_lock); 59487593Sobrien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 595151658Sjhb ie->ie_name); 59672237Sjhb#ifdef INVARIANTS 597151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 59872759Sjhb if (ih == handler) 59972759Sjhb goto ok; 600151658Sjhb mtx_unlock(&ie->ie_lock); 601151658Sjhb panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 602151658Sjhb ih->ih_name, ie->ie_name); 60372759Sjhbok: 60472237Sjhb#endif 60572839Sjhb /* 606151658Sjhb * If there is no ithread, then just remove the handler and return. 607151658Sjhb * XXX: Note that an INTR_FAST handler might be running on another 608151658Sjhb * CPU! 609151658Sjhb */ 610151658Sjhb if (ie->ie_thread == NULL) { 611151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 612151658Sjhb mtx_unlock(&ie->ie_lock); 613151658Sjhb free(handler, M_ITHREAD); 614151658Sjhb return (0); 615151658Sjhb } 616151658Sjhb 617151658Sjhb /* 61872839Sjhb * If the interrupt thread is already running, then just mark this 61972839Sjhb * handler as being dead and let the ithread do the actual removal. 620124505Struckman * 621124505Struckman * During a cold boot while cold is set, msleep() does not sleep, 622124505Struckman * so we have to remove the handler here rather than letting the 623124505Struckman * thread do it. 62472839Sjhb */ 625170307Sjeff thread_lock(ie->ie_thread->it_thread); 626151658Sjhb if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 62772839Sjhb handler->ih_flags |= IH_DEAD; 62872839Sjhb 62972839Sjhb /* 63072839Sjhb * Ensure that the thread will process the handler list 63172839Sjhb * again and remove this handler if it has already passed 63272839Sjhb * it on the list. 63372839Sjhb */ 634151658Sjhb ie->ie_thread->it_need = 1; 635151658Sjhb } else 636151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 637170307Sjeff thread_unlock(ie->ie_thread->it_thread); 638151658Sjhb while (handler->ih_flags & IH_DEAD) 639157815Sjhb msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 640151658Sjhb intr_event_update(ie); 641151658Sjhb#ifdef notyet 642151658Sjhb /* 643151658Sjhb * XXX: This could be bad in the case of ppbus(8). Also, I think 644151658Sjhb * this could lead to races of stale data when servicing an 645151658Sjhb * interrupt. 646151658Sjhb */ 647151658Sjhb dead = 1; 648151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 649151658Sjhb if (!(ih->ih_flags & IH_FAST)) { 650151658Sjhb dead = 0; 651151658Sjhb break; 652151658Sjhb } 653151658Sjhb } 654151658Sjhb if (dead) { 655151658Sjhb ithread_destroy(ie->ie_thread); 656151658Sjhb ie->ie_thread = NULL; 657151658Sjhb } 658151658Sjhb#endif 659151658Sjhb mtx_unlock(&ie->ie_lock); 66076771Sjhb free(handler, M_ITHREAD); 66172237Sjhb return (0); 66272237Sjhb} 66372237Sjhb 66472237Sjhbint 665151658Sjhbintr_event_schedule_thread(struct intr_event *ie) 66672759Sjhb{ 667151658Sjhb struct intr_entropy entropy; 668151658Sjhb struct intr_thread *it; 66983366Sjulian struct thread *td; 670101176Sjulian struct thread *ctd; 67172759Sjhb struct proc *p; 67272759Sjhb 67372759Sjhb /* 67472759Sjhb * If no ithread or no handlers, then we have a stray interrupt. 67572759Sjhb */ 676151658Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 677151658Sjhb ie->ie_thread == NULL) 67872759Sjhb return (EINVAL); 67972759Sjhb 680101176Sjulian ctd = curthread; 681151658Sjhb it = ie->ie_thread; 682151658Sjhb td = it->it_thread; 683133191Srwatson p = td->td_proc; 684151658Sjhb 68572759Sjhb /* 68672759Sjhb * If any of the handlers for this ithread claim to be good 68772759Sjhb * sources of entropy, then gather some. 68872759Sjhb */ 689151658Sjhb if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 690133191Srwatson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 691133191Srwatson p->p_pid, p->p_comm); 692151658Sjhb entropy.event = (uintptr_t)ie; 693151658Sjhb entropy.td = ctd; 69472759Sjhb random_harvest(&entropy, sizeof(entropy), 2, 0, 69572759Sjhb RANDOM_INTERRUPT); 69672759Sjhb } 69772759Sjhb 698151658Sjhb KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 69972759Sjhb 70072759Sjhb /* 70172759Sjhb * Set it_need to tell the thread to keep running if it is already 702170307Sjeff * running. Then, lock the thread and see if we actually need to 703170307Sjeff * put it on the runqueue. 70472759Sjhb */ 705151658Sjhb it->it_need = 1; 706170307Sjeff thread_lock(td); 707103216Sjulian if (TD_AWAITING_INTR(td)) { 708151658Sjhb CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 709151658Sjhb p->p_comm); 710103216Sjulian TD_CLR_IWAIT(td); 711166188Sjeff sched_add(td, SRQ_INTR); 71272759Sjhb } else { 713151658Sjhb CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 714151658Sjhb __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); 71572759Sjhb } 716170307Sjeff thread_unlock(td); 71772759Sjhb 71872759Sjhb return (0); 71972759Sjhb} 720169320Spiso#else 721169320Spisoint 722169320Spisointr_event_remove_handler(void *cookie) 723169320Spiso{ 724169320Spiso struct intr_handler *handler = (struct intr_handler *)cookie; 725169320Spiso struct intr_event *ie; 726169320Spiso struct intr_thread *it; 727169320Spiso#ifdef INVARIANTS 728169320Spiso struct intr_handler *ih; 729169320Spiso#endif 730169320Spiso#ifdef notyet 731169320Spiso int dead; 732169320Spiso#endif 73372759Sjhb 734169320Spiso if (handler == NULL) 735169320Spiso return (EINVAL); 736169320Spiso ie = handler->ih_event; 737169320Spiso KASSERT(ie != NULL, 738169320Spiso ("interrupt handler \"%s\" has a NULL interrupt event", 739169320Spiso handler->ih_name)); 740169320Spiso mtx_lock(&ie->ie_lock); 741169320Spiso CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 742169320Spiso ie->ie_name); 743169320Spiso#ifdef INVARIANTS 744169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 745169320Spiso if (ih == handler) 746169320Spiso goto ok; 747169320Spiso mtx_unlock(&ie->ie_lock); 748169320Spiso panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 749169320Spiso ih->ih_name, ie->ie_name); 750169320Spisook: 751169320Spiso#endif 752169320Spiso /* 753169320Spiso * If there are no ithreads (per event and per handler), then 754169320Spiso * just remove the handler and return. 755169320Spiso * XXX: Note that an INTR_FAST handler might be running on another CPU! 756169320Spiso */ 757169320Spiso if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 758169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 759169320Spiso mtx_unlock(&ie->ie_lock); 760169320Spiso free(handler, M_ITHREAD); 761169320Spiso return (0); 762169320Spiso } 763169320Spiso 764169320Spiso /* Private or global ithread? */ 765169320Spiso it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 766169320Spiso /* 767169320Spiso * If the interrupt thread is already running, then just mark this 768169320Spiso * handler as being dead and let the ithread do the actual removal. 769169320Spiso * 770169320Spiso * During a cold boot while cold is set, msleep() does not sleep, 771169320Spiso * so we have to remove the handler here rather than letting the 772169320Spiso * thread do it. 773169320Spiso */ 774170307Sjeff thread_lock(it->it_thread); 775169320Spiso if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 776169320Spiso handler->ih_flags |= IH_DEAD; 777169320Spiso 778169320Spiso /* 779169320Spiso * Ensure that the thread will process the handler list 780169320Spiso * again and remove this handler if it has already passed 781169320Spiso * it on the list. 782169320Spiso */ 783169320Spiso it->it_need = 1; 784169320Spiso } else 785169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 786170307Sjeff thread_unlock(it->it_thread); 787169320Spiso while (handler->ih_flags & IH_DEAD) 788169320Spiso msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 789169320Spiso /* 790169320Spiso * At this point, the handler has been disconnected from the event, 791169320Spiso * so we can kill the private ithread if any. 792169320Spiso */ 793169320Spiso if (handler->ih_thread) { 794169320Spiso ithread_destroy(handler->ih_thread); 795169320Spiso handler->ih_thread = NULL; 796169320Spiso } 797169320Spiso intr_event_update(ie); 798169320Spiso#ifdef notyet 799169320Spiso /* 800169320Spiso * XXX: This could be bad in the case of ppbus(8). Also, I think 801169320Spiso * this could lead to races of stale data when servicing an 802169320Spiso * interrupt. 803169320Spiso */ 804169320Spiso dead = 1; 805169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 806169320Spiso if (handler != NULL) { 807169320Spiso dead = 0; 808169320Spiso break; 809169320Spiso } 810169320Spiso } 811169320Spiso if (dead) { 812169320Spiso ithread_destroy(ie->ie_thread); 813169320Spiso ie->ie_thread = NULL; 814169320Spiso } 815169320Spiso#endif 816169320Spiso mtx_unlock(&ie->ie_lock); 817169320Spiso free(handler, M_ITHREAD); 818169320Spiso return (0); 819169320Spiso} 820169320Spiso 821169320Spisoint 822169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 823169320Spiso{ 824169320Spiso struct intr_entropy entropy; 825169320Spiso struct thread *td; 826169320Spiso struct thread *ctd; 827169320Spiso struct proc *p; 828169320Spiso 829169320Spiso /* 830169320Spiso * If no ithread or no handlers, then we have a stray interrupt. 831169320Spiso */ 832169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 833169320Spiso return (EINVAL); 834169320Spiso 835169320Spiso ctd = curthread; 836169320Spiso td = it->it_thread; 837169320Spiso p = td->td_proc; 838169320Spiso 839169320Spiso /* 840169320Spiso * If any of the handlers for this ithread claim to be good 841169320Spiso * sources of entropy, then gather some. 842169320Spiso */ 843169320Spiso if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 844169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 845169320Spiso p->p_pid, p->p_comm); 846169320Spiso entropy.event = (uintptr_t)ie; 847169320Spiso entropy.td = ctd; 848169320Spiso random_harvest(&entropy, sizeof(entropy), 2, 0, 849169320Spiso RANDOM_INTERRUPT); 850169320Spiso } 851169320Spiso 852169320Spiso KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 853169320Spiso 854169320Spiso /* 855169320Spiso * Set it_need to tell the thread to keep running if it is already 856170307Sjeff * running. Then, lock the thread and see if we actually need to 857170307Sjeff * put it on the runqueue. 858169320Spiso */ 859169320Spiso it->it_need = 1; 860170307Sjeff thread_lock(td); 861169320Spiso if (TD_AWAITING_INTR(td)) { 862169320Spiso CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 863169320Spiso p->p_comm); 864169320Spiso TD_CLR_IWAIT(td); 865169320Spiso sched_add(td, SRQ_INTR); 866169320Spiso } else { 867169320Spiso CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 868169320Spiso __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); 869169320Spiso } 870170307Sjeff thread_unlock(td); 871169320Spiso 872169320Spiso return (0); 873169320Spiso} 874169320Spiso#endif 875169320Spiso 876151699Sjhb/* 877151699Sjhb * Add a software interrupt handler to a specified event. If a given event 878151699Sjhb * is not specified, then a new event is created. 879151699Sjhb */ 88072759Sjhbint 881151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 88272237Sjhb void *arg, int pri, enum intr_type flags, void **cookiep) 88372237Sjhb{ 884151658Sjhb struct intr_event *ie; 88572237Sjhb int error; 88666698Sjhb 887169320Spiso if (flags & INTR_ENTROPY) 88872759Sjhb return (EINVAL); 88972759Sjhb 890151658Sjhb ie = (eventp != NULL) ? *eventp : NULL; 89166698Sjhb 892151658Sjhb if (ie != NULL) { 893151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 894151658Sjhb return (EINVAL); 89572759Sjhb } else { 896169320Spiso#ifdef INTR_FILTER 897169320Spiso error = intr_event_create(&ie, NULL, IE_SOFT, 898169320Spiso NULL, NULL, NULL, "swi%d:", pri); 899169320Spiso#else 900169320Spiso error = intr_event_create(&ie, NULL, IE_SOFT, 901169320Spiso NULL, "swi%d:", pri); 902169320Spiso#endif 90367551Sjhb if (error) 90472237Sjhb return (error); 905151658Sjhb if (eventp != NULL) 906151658Sjhb *eventp = ie; 90766698Sjhb } 908166901Spiso return (intr_event_add_handler(ie, name, NULL, handler, arg, 90972376Sjake (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 910134791Sjulian /* XXKSE.. think of a better way to get separate queues */ 91166698Sjhb} 91266698Sjhb 91366698Sjhb/* 914151658Sjhb * Schedule a software interrupt thread. 91566698Sjhb */ 91667551Sjhbvoid 91772237Sjhbswi_sched(void *cookie, int flags) 91866698Sjhb{ 919151658Sjhb struct intr_handler *ih = (struct intr_handler *)cookie; 920151658Sjhb struct intr_event *ie = ih->ih_event; 92172759Sjhb int error; 92266698Sjhb 923151658Sjhb CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 924151658Sjhb ih->ih_need); 925151658Sjhb 92667551Sjhb /* 92772759Sjhb * Set ih_need for this handler so that if the ithread is already 92872759Sjhb * running it will execute this handler on the next pass. Otherwise, 92972759Sjhb * it will execute it the next time it runs. 93067551Sjhb */ 93172237Sjhb atomic_store_rel_int(&ih->ih_need, 1); 932163474Sbde 93372237Sjhb if (!(flags & SWI_DELAY)) { 934170291Sattilio PCPU_INC(cnt.v_soft); 935169320Spiso#ifdef INTR_FILTER 936169320Spiso error = intr_event_schedule_thread(ie, ie->ie_thread); 937169320Spiso#else 938151658Sjhb error = intr_event_schedule_thread(ie); 939169320Spiso#endif 94072759Sjhb KASSERT(error == 0, ("stray software interrupt")); 94166698Sjhb } 94266698Sjhb} 94366698Sjhb 944151699Sjhb/* 945151699Sjhb * Remove a software interrupt handler. Currently this code does not 946151699Sjhb * remove the associated interrupt event if it becomes empty. Calling code 947151699Sjhb * may do so manually via intr_event_destroy(), but that's not really 948151699Sjhb * an optimal interface. 949151699Sjhb */ 950151699Sjhbint 951151699Sjhbswi_remove(void *cookie) 952151699Sjhb{ 953151699Sjhb 954151699Sjhb return (intr_event_remove_handler(cookie)); 955151699Sjhb} 956151699Sjhb 957169320Spiso#ifdef INTR_FILTER 958151658Sjhbstatic void 959169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 960169320Spiso{ 961169320Spiso struct intr_event *ie; 962169320Spiso 963169320Spiso ie = ih->ih_event; 964169320Spiso /* 965169320Spiso * If this handler is marked for death, remove it from 966169320Spiso * the list of handlers and wake up the sleeper. 967169320Spiso */ 968169320Spiso if (ih->ih_flags & IH_DEAD) { 969169320Spiso mtx_lock(&ie->ie_lock); 970169320Spiso TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 971169320Spiso ih->ih_flags &= ~IH_DEAD; 972169320Spiso wakeup(ih); 973169320Spiso mtx_unlock(&ie->ie_lock); 974169320Spiso return; 975169320Spiso } 976169320Spiso 977169320Spiso /* Execute this handler. */ 978169320Spiso CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 979169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 980169320Spiso ih->ih_name, ih->ih_flags); 981169320Spiso 982169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 983169320Spiso mtx_lock(&Giant); 984169320Spiso ih->ih_handler(ih->ih_argument); 985169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 986169320Spiso mtx_unlock(&Giant); 987169320Spiso} 988169320Spiso#endif 989169320Spiso 990169320Spisostatic void 991151658Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie) 992151658Sjhb{ 993151658Sjhb struct intr_handler *ih, *ihn; 994151658Sjhb 995151658Sjhb /* Interrupt handlers should not sleep. */ 996151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 997151658Sjhb THREAD_NO_SLEEPING(); 998151658Sjhb TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 999151658Sjhb 1000151658Sjhb /* 1001151658Sjhb * If this handler is marked for death, remove it from 1002151658Sjhb * the list of handlers and wake up the sleeper. 1003151658Sjhb */ 1004151658Sjhb if (ih->ih_flags & IH_DEAD) { 1005151658Sjhb mtx_lock(&ie->ie_lock); 1006151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1007151658Sjhb ih->ih_flags &= ~IH_DEAD; 1008151658Sjhb wakeup(ih); 1009151658Sjhb mtx_unlock(&ie->ie_lock); 1010151658Sjhb continue; 1011151658Sjhb } 1012151658Sjhb 1013167080Spiso /* Skip filter only handlers */ 1014167080Spiso if (ih->ih_handler == NULL) 1015167080Spiso continue; 1016167080Spiso 1017151658Sjhb /* 1018151658Sjhb * For software interrupt threads, we only execute 1019151658Sjhb * handlers that have their need flag set. Hardware 1020151658Sjhb * interrupt threads always invoke all of their handlers. 1021151658Sjhb */ 1022151658Sjhb if (ie->ie_flags & IE_SOFT) { 1023151658Sjhb if (!ih->ih_need) 1024151658Sjhb continue; 1025151658Sjhb else 1026151658Sjhb atomic_store_rel_int(&ih->ih_need, 0); 1027151658Sjhb } 1028151658Sjhb 1029151658Sjhb /* Execute this handler. */ 1030151658Sjhb CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1031169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, 1032169320Spiso ih->ih_argument, ih->ih_name, ih->ih_flags); 1033151658Sjhb 1034151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1035151658Sjhb mtx_lock(&Giant); 1036151658Sjhb ih->ih_handler(ih->ih_argument); 1037151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1038151658Sjhb mtx_unlock(&Giant); 1039151658Sjhb } 1040151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1041151658Sjhb THREAD_SLEEPING_OK(); 1042151658Sjhb 1043151658Sjhb /* 1044151658Sjhb * Interrupt storm handling: 1045151658Sjhb * 1046151658Sjhb * If this interrupt source is currently storming, then throttle 1047151658Sjhb * it to only fire the handler once per clock tick. 1048151658Sjhb * 1049151658Sjhb * If this interrupt source is not currently storming, but the 1050151658Sjhb * number of back to back interrupts exceeds the storm threshold, 1051151658Sjhb * then enter storming mode. 1052151658Sjhb */ 1053167173Sjhb if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1054167173Sjhb !(ie->ie_flags & IE_SOFT)) { 1055168850Snjl /* Report the message only once every second. */ 1056168850Snjl if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1057151658Sjhb printf( 1058168850Snjl "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1059151658Sjhb ie->ie_name); 1060151658Sjhb } 1061167173Sjhb pause("istorm", 1); 1062151658Sjhb } else 1063151658Sjhb ie->ie_count++; 1064151658Sjhb 1065151658Sjhb /* 1066151658Sjhb * Now that all the handlers have had a chance to run, reenable 1067151658Sjhb * the interrupt source. 1068151658Sjhb */ 1069151658Sjhb if (ie->ie_enable != NULL) 1070151658Sjhb ie->ie_enable(ie->ie_source); 1071151658Sjhb} 1072151658Sjhb 1073169320Spiso#ifndef INTR_FILTER 107466698Sjhb/* 107572237Sjhb * This is the main code for interrupt threads. 107666698Sjhb */ 1077104094Sphkstatic void 107872237Sjhbithread_loop(void *arg) 107966698Sjhb{ 1080151658Sjhb struct intr_thread *ithd; 1081151658Sjhb struct intr_event *ie; 108283366Sjulian struct thread *td; 108372237Sjhb struct proc *p; 1084151658Sjhb 108583366Sjulian td = curthread; 108683366Sjulian p = td->td_proc; 1087151658Sjhb ithd = (struct intr_thread *)arg; 1088151658Sjhb KASSERT(ithd->it_thread == td, 108987593Sobrien ("%s: ithread and proc linkage out of sync", __func__)); 1090151658Sjhb ie = ithd->it_event; 1091151658Sjhb ie->ie_count = 0; 109266698Sjhb 109367551Sjhb /* 109467551Sjhb * As long as we have interrupts outstanding, go through the 109567551Sjhb * list of handlers, giving each one a go at it. 109667551Sjhb */ 109766698Sjhb for (;;) { 109872237Sjhb /* 109972237Sjhb * If we are an orphaned thread, then just die. 110072237Sjhb */ 110172237Sjhb if (ithd->it_flags & IT_DEAD) { 1102151658Sjhb CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 110372237Sjhb p->p_pid, p->p_comm); 110472237Sjhb free(ithd, M_ITHREAD); 110572237Sjhb kthread_exit(0); 110672237Sjhb } 110772237Sjhb 1108151658Sjhb /* 1109151658Sjhb * Service interrupts. If another interrupt arrives while 1110151658Sjhb * we are running, it will set it_need to note that we 1111151658Sjhb * should make another pass. 1112151658Sjhb */ 111372237Sjhb while (ithd->it_need) { 111467551Sjhb /* 1115151658Sjhb * This might need a full read and write barrier 1116151658Sjhb * to make sure that this write posts before any 1117151658Sjhb * of the memory or device accesses in the 1118151658Sjhb * handlers. 111967551Sjhb */ 112072237Sjhb atomic_store_rel_int(&ithd->it_need, 0); 1121151658Sjhb ithread_execute_handlers(p, ie); 112266698Sjhb } 1123128331Sjhb WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1124128331Sjhb mtx_assert(&Giant, MA_NOTOWNED); 112567551Sjhb 112666698Sjhb /* 112766698Sjhb * Processed all our interrupts. Now get the sched 112867551Sjhb * lock. This may take a while and it_need may get 112966698Sjhb * set again, so we have to check it again. 113066698Sjhb */ 1131170307Sjeff thread_lock(td); 1132151658Sjhb if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1133128331Sjhb TD_SET_IWAIT(td); 1134151658Sjhb ie->ie_count = 0; 1135131473Sjhb mi_switch(SW_VOL, NULL); 113666698Sjhb } 1137170307Sjeff thread_unlock(td); 113866698Sjhb } 113966698Sjhb} 1140169320Spiso#else 1141169320Spiso/* 1142169320Spiso * This is the main code for interrupt threads. 1143169320Spiso */ 1144169320Spisostatic void 1145169320Spisoithread_loop(void *arg) 1146169320Spiso{ 1147169320Spiso struct intr_thread *ithd; 1148169320Spiso struct intr_handler *ih; 1149169320Spiso struct intr_event *ie; 1150169320Spiso struct thread *td; 1151169320Spiso struct proc *p; 1152169320Spiso int priv; 115366698Sjhb 1154169320Spiso td = curthread; 1155169320Spiso p = td->td_proc; 1156169320Spiso ih = (struct intr_handler *)arg; 1157169320Spiso priv = (ih->ih_thread != NULL) ? 1 : 0; 1158169320Spiso ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1159169320Spiso KASSERT(ithd->it_thread == td, 1160169320Spiso ("%s: ithread and proc linkage out of sync", __func__)); 1161169320Spiso ie = ithd->it_event; 1162169320Spiso ie->ie_count = 0; 1163169320Spiso 1164169320Spiso /* 1165169320Spiso * As long as we have interrupts outstanding, go through the 1166169320Spiso * list of handlers, giving each one a go at it. 1167169320Spiso */ 1168169320Spiso for (;;) { 1169169320Spiso /* 1170169320Spiso * If we are an orphaned thread, then just die. 1171169320Spiso */ 1172169320Spiso if (ithd->it_flags & IT_DEAD) { 1173169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1174169320Spiso p->p_pid, p->p_comm); 1175169320Spiso free(ithd, M_ITHREAD); 1176169320Spiso kthread_exit(0); 1177169320Spiso } 1178169320Spiso 1179169320Spiso /* 1180169320Spiso * Service interrupts. If another interrupt arrives while 1181169320Spiso * we are running, it will set it_need to note that we 1182169320Spiso * should make another pass. 1183169320Spiso */ 1184169320Spiso while (ithd->it_need) { 1185169320Spiso /* 1186169320Spiso * This might need a full read and write barrier 1187169320Spiso * to make sure that this write posts before any 1188169320Spiso * of the memory or device accesses in the 1189169320Spiso * handlers. 1190169320Spiso */ 1191169320Spiso atomic_store_rel_int(&ithd->it_need, 0); 1192169320Spiso if (priv) 1193169320Spiso priv_ithread_execute_handler(p, ih); 1194169320Spiso else 1195169320Spiso ithread_execute_handlers(p, ie); 1196169320Spiso } 1197169320Spiso WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1198169320Spiso mtx_assert(&Giant, MA_NOTOWNED); 1199169320Spiso 1200169320Spiso /* 1201169320Spiso * Processed all our interrupts. Now get the sched 1202169320Spiso * lock. This may take a while and it_need may get 1203169320Spiso * set again, so we have to check it again. 1204169320Spiso */ 1205170307Sjeff thread_lock(td); 1206169320Spiso if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1207169320Spiso TD_SET_IWAIT(td); 1208169320Spiso ie->ie_count = 0; 1209169320Spiso mi_switch(SW_VOL, NULL); 1210169320Spiso } 1211170307Sjeff thread_unlock(td); 1212169320Spiso } 1213169320Spiso} 1214169320Spiso 1215169320Spiso/* 1216169320Spiso * Main loop for interrupt filter. 1217169320Spiso * 1218169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame 1219169320Spiso * parameter, and use it as the main argument for fast handler execution 1220169320Spiso * when ih_argument == NULL. 1221169320Spiso * 1222169320Spiso * Return value: 1223169320Spiso * o FILTER_STRAY: No filter recognized the event, and no 1224169320Spiso * filter-less handler is registered on this 1225169320Spiso * line. 1226169320Spiso * o FILTER_HANDLED: A filter claimed the event and served it. 1227169320Spiso * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1228169320Spiso * least one filter-less handler on this line. 1229169320Spiso * o FILTER_HANDLED | 1230169320Spiso * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1231169320Spiso * scheduling the per-handler ithread. 1232169320Spiso * 1233169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a 1234169320Spiso * pointer to a struct intr_thread containing the thread to be 1235169320Spiso * scheduled. 1236169320Spiso */ 1237169320Spiso 1238169320Spisoint 1239169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1240169320Spiso struct intr_thread **ithd) 1241169320Spiso{ 1242169320Spiso struct intr_handler *ih; 1243169320Spiso void *arg; 1244169320Spiso int ret, thread_only; 1245169320Spiso 1246169320Spiso ret = 0; 1247169320Spiso thread_only = 0; 1248169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1249169320Spiso /* 1250169320Spiso * Execute fast interrupt handlers directly. 1251169320Spiso * To support clock handlers, if a handler registers 1252169320Spiso * with a NULL argument, then we pass it a pointer to 1253169320Spiso * a trapframe as its argument. 1254169320Spiso */ 1255169320Spiso arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1256169320Spiso 1257169320Spiso CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1258169320Spiso ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1259169320Spiso 1260169320Spiso if (ih->ih_filter != NULL) 1261169320Spiso ret = ih->ih_filter(arg); 1262169320Spiso else { 1263169320Spiso thread_only = 1; 1264169320Spiso continue; 1265169320Spiso } 1266169320Spiso 1267169320Spiso if (ret & FILTER_STRAY) 1268169320Spiso continue; 1269169320Spiso else { 1270169320Spiso *ithd = ih->ih_thread; 1271169320Spiso return (ret); 1272169320Spiso } 1273169320Spiso } 1274169320Spiso 1275169320Spiso /* 1276169320Spiso * No filters handled the interrupt and we have at least 1277169320Spiso * one handler without a filter. In this case, we schedule 1278169320Spiso * all of the filter-less handlers to run in the ithread. 1279169320Spiso */ 1280169320Spiso if (thread_only) { 1281169320Spiso *ithd = ie->ie_thread; 1282169320Spiso return (FILTER_SCHEDULE_THREAD); 1283169320Spiso } 1284169320Spiso return (FILTER_STRAY); 1285169320Spiso} 1286169320Spiso 1287169320Spiso/* 1288169320Spiso * Main interrupt handling body. 1289169320Spiso * 1290169320Spiso * Input: 1291169320Spiso * o ie: the event connected to this interrupt. 1292169320Spiso * o frame: some archs (i.e. i386) pass a frame to some. 1293169320Spiso * handlers as their main argument. 1294169320Spiso * Return value: 1295169320Spiso * o 0: everything ok. 1296169320Spiso * o EINVAL: stray interrupt. 1297169320Spiso */ 1298169320Spisoint 1299169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame) 1300169320Spiso{ 1301169320Spiso struct intr_thread *ithd; 1302169320Spiso struct thread *td; 1303169320Spiso int thread; 1304169320Spiso 1305169320Spiso ithd = NULL; 1306169320Spiso td = curthread; 1307169320Spiso 1308169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1309169320Spiso return (EINVAL); 1310169320Spiso 1311169320Spiso td->td_intr_nesting_level++; 1312169320Spiso thread = 0; 1313169320Spiso critical_enter(); 1314169320Spiso thread = intr_filter_loop(ie, frame, &ithd); 1315169320Spiso 1316169320Spiso /* 1317169320Spiso * If the interrupt was fully served, send it an EOI but leave 1318169320Spiso * it unmasked. Otherwise, mask the source as well as sending 1319169320Spiso * it an EOI. 1320169320Spiso */ 1321169320Spiso if (thread & FILTER_HANDLED) { 1322169320Spiso if (ie->ie_eoi != NULL) 1323169320Spiso ie->ie_eoi(ie->ie_source); 1324169320Spiso } else { 1325169320Spiso if (ie->ie_disab != NULL) 1326169320Spiso ie->ie_disab(ie->ie_source); 1327169320Spiso } 1328169320Spiso critical_exit(); 1329169320Spiso 1330169320Spiso /* Interrupt storm logic */ 1331169320Spiso if (thread & FILTER_STRAY) { 1332169320Spiso ie->ie_count++; 1333169320Spiso if (ie->ie_count < intr_storm_threshold) 1334169320Spiso printf("Interrupt stray detection not present\n"); 1335169320Spiso } 1336169320Spiso 1337169320Spiso /* Schedule an ithread if needed. */ 1338169320Spiso if (thread & FILTER_SCHEDULE_THREAD) { 1339169320Spiso if (intr_event_schedule_thread(ie, ithd) != 0) 1340169320Spiso panic("%s: impossible stray interrupt", __func__); 1341169320Spiso } 1342169320Spiso td->td_intr_nesting_level--; 1343169320Spiso return (0); 1344169320Spiso} 1345169320Spiso#endif 1346169320Spiso 1347121482Sjhb#ifdef DDB 134872237Sjhb/* 1349121482Sjhb * Dump details about an interrupt handler 1350121482Sjhb */ 1351121482Sjhbstatic void 1352151658Sjhbdb_dump_intrhand(struct intr_handler *ih) 1353121482Sjhb{ 1354121482Sjhb int comma; 1355121482Sjhb 1356121482Sjhb db_printf("\t%-10s ", ih->ih_name); 1357121482Sjhb switch (ih->ih_pri) { 1358121482Sjhb case PI_REALTIME: 1359121482Sjhb db_printf("CLK "); 1360121482Sjhb break; 1361121482Sjhb case PI_AV: 1362121482Sjhb db_printf("AV "); 1363121482Sjhb break; 1364121482Sjhb case PI_TTYHIGH: 1365121482Sjhb case PI_TTYLOW: 1366121482Sjhb db_printf("TTY "); 1367121482Sjhb break; 1368121482Sjhb case PI_TAPE: 1369121482Sjhb db_printf("TAPE"); 1370121482Sjhb break; 1371121482Sjhb case PI_NET: 1372121482Sjhb db_printf("NET "); 1373121482Sjhb break; 1374121482Sjhb case PI_DISK: 1375121482Sjhb case PI_DISKLOW: 1376121482Sjhb db_printf("DISK"); 1377121482Sjhb break; 1378121482Sjhb case PI_DULL: 1379121482Sjhb db_printf("DULL"); 1380121482Sjhb break; 1381121482Sjhb default: 1382121482Sjhb if (ih->ih_pri >= PI_SOFT) 1383121482Sjhb db_printf("SWI "); 1384121482Sjhb else 1385121482Sjhb db_printf("%4u", ih->ih_pri); 1386121482Sjhb break; 1387121482Sjhb } 1388121482Sjhb db_printf(" "); 1389121482Sjhb db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1390121482Sjhb db_printf("(%p)", ih->ih_argument); 1391121482Sjhb if (ih->ih_need || 1392166901Spiso (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1393121482Sjhb IH_MPSAFE)) != 0) { 1394121482Sjhb db_printf(" {"); 1395121482Sjhb comma = 0; 1396121482Sjhb if (ih->ih_flags & IH_EXCLUSIVE) { 1397121482Sjhb if (comma) 1398121482Sjhb db_printf(", "); 1399121482Sjhb db_printf("EXCL"); 1400121482Sjhb comma = 1; 1401121482Sjhb } 1402121482Sjhb if (ih->ih_flags & IH_ENTROPY) { 1403121482Sjhb if (comma) 1404121482Sjhb db_printf(", "); 1405121482Sjhb db_printf("ENTROPY"); 1406121482Sjhb comma = 1; 1407121482Sjhb } 1408121482Sjhb if (ih->ih_flags & IH_DEAD) { 1409121482Sjhb if (comma) 1410121482Sjhb db_printf(", "); 1411121482Sjhb db_printf("DEAD"); 1412121482Sjhb comma = 1; 1413121482Sjhb } 1414121482Sjhb if (ih->ih_flags & IH_MPSAFE) { 1415121482Sjhb if (comma) 1416121482Sjhb db_printf(", "); 1417121482Sjhb db_printf("MPSAFE"); 1418121482Sjhb comma = 1; 1419121482Sjhb } 1420121482Sjhb if (ih->ih_need) { 1421121482Sjhb if (comma) 1422121482Sjhb db_printf(", "); 1423121482Sjhb db_printf("NEED"); 1424121482Sjhb } 1425121482Sjhb db_printf("}"); 1426121482Sjhb } 1427121482Sjhb db_printf("\n"); 1428121482Sjhb} 1429121482Sjhb 1430121482Sjhb/* 1431151658Sjhb * Dump details about a event. 1432121482Sjhb */ 1433121482Sjhbvoid 1434151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers) 1435121482Sjhb{ 1436151658Sjhb struct intr_handler *ih; 1437151658Sjhb struct intr_thread *it; 1438121482Sjhb int comma; 1439121482Sjhb 1440151658Sjhb db_printf("%s ", ie->ie_fullname); 1441151658Sjhb it = ie->ie_thread; 1442151658Sjhb if (it != NULL) 1443151658Sjhb db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1444151658Sjhb else 1445151658Sjhb db_printf("(no thread)"); 1446151658Sjhb if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1447151658Sjhb (it != NULL && it->it_need)) { 1448121482Sjhb db_printf(" {"); 1449121482Sjhb comma = 0; 1450151658Sjhb if (ie->ie_flags & IE_SOFT) { 1451121482Sjhb db_printf("SOFT"); 1452121482Sjhb comma = 1; 1453121482Sjhb } 1454151658Sjhb if (ie->ie_flags & IE_ENTROPY) { 1455121482Sjhb if (comma) 1456121482Sjhb db_printf(", "); 1457121482Sjhb db_printf("ENTROPY"); 1458121482Sjhb comma = 1; 1459121482Sjhb } 1460151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) { 1461121482Sjhb if (comma) 1462121482Sjhb db_printf(", "); 1463151658Sjhb db_printf("ADDING_THREAD"); 1464121482Sjhb comma = 1; 1465121482Sjhb } 1466151658Sjhb if (it != NULL && it->it_need) { 1467121482Sjhb if (comma) 1468121482Sjhb db_printf(", "); 1469121482Sjhb db_printf("NEED"); 1470121482Sjhb } 1471121482Sjhb db_printf("}"); 1472121482Sjhb } 1473121482Sjhb db_printf("\n"); 1474121482Sjhb 1475121482Sjhb if (handlers) 1476151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1477121482Sjhb db_dump_intrhand(ih); 1478121482Sjhb} 1479151658Sjhb 1480151658Sjhb/* 1481151658Sjhb * Dump data about interrupt handlers 1482151658Sjhb */ 1483151658SjhbDB_SHOW_COMMAND(intr, db_show_intr) 1484151658Sjhb{ 1485151658Sjhb struct intr_event *ie; 1486160312Sjhb int all, verbose; 1487151658Sjhb 1488151658Sjhb verbose = index(modif, 'v') != NULL; 1489151658Sjhb all = index(modif, 'a') != NULL; 1490151658Sjhb TAILQ_FOREACH(ie, &event_list, ie_list) { 1491151658Sjhb if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1492151658Sjhb continue; 1493151658Sjhb db_dump_intr_event(ie, verbose); 1494160312Sjhb if (db_pager_quit) 1495160312Sjhb break; 1496151658Sjhb } 1497151658Sjhb} 1498121482Sjhb#endif /* DDB */ 1499121482Sjhb 1500121482Sjhb/* 150167551Sjhb * Start standard software interrupt threads 150266698Sjhb */ 150367551Sjhbstatic void 150472237Sjhbstart_softintr(void *dummy) 150567551Sjhb{ 1506113613Sjhb struct proc *p; 150772237Sjhb 1508151658Sjhb if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 150972237Sjhb INTR_MPSAFE, &softclock_ih) || 1510117128Sscottl swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 151172237Sjhb panic("died while creating standard software ithreads"); 151272759Sjhb 1513151658Sjhb p = clk_intr_event->ie_thread->it_thread->td_proc; 1514113613Sjhb PROC_LOCK(p); 1515113613Sjhb p->p_flag |= P_NOLOAD; 1516113613Sjhb PROC_UNLOCK(p); 151766698Sjhb} 151872237SjhbSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 151966698Sjhb 1520151658Sjhb/* 152177582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 152277582Stmm * The data for this machine dependent, and the declarations are in machine 152377582Stmm * dependent code. The layout of intrnames and intrcnt however is machine 152477582Stmm * independent. 152577582Stmm * 152677582Stmm * We do not know the length of intrcnt and intrnames at compile time, so 152777582Stmm * calculate things at run time. 152877582Stmm */ 152977582Stmmstatic int 153077582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS) 153177582Stmm{ 1532151658Sjhb return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 153377582Stmm req)); 153477582Stmm} 153577582Stmm 153677582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 153777582Stmm NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 153877582Stmm 153977582Stmmstatic int 154077582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS) 154177582Stmm{ 1542151658Sjhb return (sysctl_handle_opaque(oidp, intrcnt, 154377582Stmm (char *)eintrcnt - (char *)intrcnt, req)); 154477582Stmm} 154577582Stmm 154677582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 154777582Stmm NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1548121482Sjhb 1549121482Sjhb#ifdef DDB 1550121482Sjhb/* 1551121482Sjhb * DDB command to dump the interrupt statistics. 1552121482Sjhb */ 1553121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1554121482Sjhb{ 1555121482Sjhb u_long *i; 1556121482Sjhb char *cp; 1557121482Sjhb 1558121482Sjhb cp = intrnames; 1559160312Sjhb for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1560121482Sjhb if (*cp == '\0') 1561121482Sjhb break; 1562121482Sjhb if (*i != 0) 1563121482Sjhb db_printf("%s\t%lu\n", cp, *i); 1564121482Sjhb cp += strlen(cp) + 1; 1565121482Sjhb } 1566121482Sjhb} 1567121482Sjhb#endif 1568