kern_intr.c revision 177181
1139804Simp/*- 226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 326156Sse * All rights reserved. 426156Sse * 526156Sse * Redistribution and use in source and binary forms, with or without 626156Sse * modification, are permitted provided that the following conditions 726156Sse * are met: 826156Sse * 1. Redistributions of source code must retain the above copyright 926156Sse * notice unmodified, this list of conditions, and the following 1026156Sse * disclaimer. 1126156Sse * 2. Redistributions in binary form must reproduce the above copyright 1226156Sse * notice, this list of conditions and the following disclaimer in the 1326156Sse * documentation and/or other materials provided with the distribution. 1426156Sse * 1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2526156Sse */ 2626156Sse 27116182Sobrien#include <sys/cdefs.h> 28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 177181 2008-03-14 19:41:48Z jhb $"); 2936887Sdfr 30121482Sjhb#include "opt_ddb.h" 31121482Sjhb 3241059Speter#include <sys/param.h> 3365822Sjhb#include <sys/bus.h> 34110860Salfred#include <sys/conf.h> 3565822Sjhb#include <sys/rtprio.h> 3641059Speter#include <sys/systm.h> 3766698Sjhb#include <sys/interrupt.h> 3866698Sjhb#include <sys/kernel.h> 3966698Sjhb#include <sys/kthread.h> 4066698Sjhb#include <sys/ktr.h> 41130128Sbde#include <sys/limits.h> 4274914Sjhb#include <sys/lock.h> 4326156Sse#include <sys/malloc.h> 4467365Sjhb#include <sys/mutex.h> 4566698Sjhb#include <sys/proc.h> 4672759Sjhb#include <sys/random.h> 4772237Sjhb#include <sys/resourcevar.h> 48139451Sjhb#include <sys/sched.h> 49177181Sjhb#include <sys/smp.h> 5077582Stmm#include <sys/sysctl.h> 5166698Sjhb#include <sys/unistd.h> 5266698Sjhb#include <sys/vmmeter.h> 5366698Sjhb#include <machine/atomic.h> 5466698Sjhb#include <machine/cpu.h> 5567551Sjhb#include <machine/md_var.h> 5672237Sjhb#include <machine/stdarg.h> 57121482Sjhb#ifdef DDB 58121482Sjhb#include <ddb/ddb.h> 59121482Sjhb#include <ddb/db_sym.h> 60121482Sjhb#endif 6126156Sse 62151658Sjhb/* 63151658Sjhb * Describe an interrupt thread. There is one of these per interrupt event. 64151658Sjhb */ 65151658Sjhbstruct intr_thread { 66151658Sjhb struct intr_event *it_event; 67151658Sjhb struct thread *it_thread; /* Kernel thread. */ 68151658Sjhb int it_flags; /* (j) IT_* flags. */ 69151658Sjhb int it_need; /* Needs service. */ 7072759Sjhb}; 7172759Sjhb 72151658Sjhb/* Interrupt thread flags kept in it_flags */ 73151658Sjhb#define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 74151658Sjhb 75151658Sjhbstruct intr_entropy { 76151658Sjhb struct thread *td; 77151658Sjhb uintptr_t event; 78151658Sjhb}; 79151658Sjhb 80151658Sjhbstruct intr_event *clk_intr_event; 81151658Sjhbstruct intr_event *tty_intr_event; 82128339Sbdevoid *softclock_ih; 83128339Sbdevoid *vm_ih; 84173004Sjulianstruct proc *intrproc; 8538244Sbde 8672237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 8772237Sjhb 88168850Snjlstatic int intr_storm_threshold = 1000; 89128331SjhbTUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 90128331SjhbSYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 91128331Sjhb &intr_storm_threshold, 0, 92128339Sbde "Number of consecutive interrupts before storm protection is enabled"); 93151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list = 94151658Sjhb TAILQ_HEAD_INITIALIZER(event_list); 95128331Sjhb 96151658Sjhbstatic void intr_event_update(struct intr_event *ie); 97169320Spiso#ifdef INTR_FILTER 98169320Spisostatic struct intr_thread *ithread_create(const char *name, 99169320Spiso struct intr_handler *ih); 100169320Spiso#else 101151658Sjhbstatic struct intr_thread *ithread_create(const char *name); 102169320Spiso#endif 103151658Sjhbstatic void ithread_destroy(struct intr_thread *ithread); 104169320Spisostatic void ithread_execute_handlers(struct proc *p, 105169320Spiso struct intr_event *ie); 106169320Spiso#ifdef INTR_FILTER 107169320Spisostatic void priv_ithread_execute_handler(struct proc *p, 108169320Spiso struct intr_handler *ih); 109169320Spiso#endif 110128339Sbdestatic void ithread_loop(void *); 111151658Sjhbstatic void ithread_update(struct intr_thread *ithd); 112128339Sbdestatic void start_softintr(void *); 113128339Sbde 114165124Sjhb/* Map an interrupt type to an ithread priority. */ 11572237Sjhbu_char 116151658Sjhbintr_priority(enum intr_type flags) 11765822Sjhb{ 11872237Sjhb u_char pri; 11965822Sjhb 12072237Sjhb flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 12178365Speter INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 12265822Sjhb switch (flags) { 12372237Sjhb case INTR_TYPE_TTY: 12465822Sjhb pri = PI_TTYLOW; 12565822Sjhb break; 12665822Sjhb case INTR_TYPE_BIO: 12765822Sjhb /* 12865822Sjhb * XXX We need to refine this. BSD/OS distinguishes 12965822Sjhb * between tape and disk priorities. 13065822Sjhb */ 13165822Sjhb pri = PI_DISK; 13265822Sjhb break; 13365822Sjhb case INTR_TYPE_NET: 13465822Sjhb pri = PI_NET; 13565822Sjhb break; 13665822Sjhb case INTR_TYPE_CAM: 13765822Sjhb pri = PI_DISK; /* XXX or PI_CAM? */ 13865822Sjhb break; 13978365Speter case INTR_TYPE_AV: /* Audio/video */ 14078365Speter pri = PI_AV; 14178365Speter break; 14272237Sjhb case INTR_TYPE_CLK: 14372237Sjhb pri = PI_REALTIME; 14472237Sjhb break; 14565822Sjhb case INTR_TYPE_MISC: 14665822Sjhb pri = PI_DULL; /* don't care */ 14765822Sjhb break; 14865822Sjhb default: 14972237Sjhb /* We didn't specify an interrupt level. */ 150151658Sjhb panic("intr_priority: no interrupt type in flags"); 15165822Sjhb } 15265822Sjhb 15365822Sjhb return pri; 15465822Sjhb} 15565822Sjhb 15672237Sjhb/* 157151658Sjhb * Update an ithread based on the associated intr_event. 15872237Sjhb */ 15972237Sjhbstatic void 160151658Sjhbithread_update(struct intr_thread *ithd) 16172237Sjhb{ 162151658Sjhb struct intr_event *ie; 16383366Sjulian struct thread *td; 164151658Sjhb u_char pri; 16567551Sjhb 166151658Sjhb ie = ithd->it_event; 167151658Sjhb td = ithd->it_thread; 16872237Sjhb 169151658Sjhb /* Determine the overall priority of this event. */ 170151658Sjhb if (TAILQ_EMPTY(&ie->ie_handlers)) 171151658Sjhb pri = PRI_MAX_ITHD; 172151658Sjhb else 173151658Sjhb pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 174105354Srobert 175151658Sjhb /* Update name and priority. */ 176173004Sjulian strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 177170307Sjeff thread_lock(td); 178151658Sjhb sched_prio(td, pri); 179170307Sjeff thread_unlock(td); 180151658Sjhb} 181151658Sjhb 182151658Sjhb/* 183151658Sjhb * Regenerate the full name of an interrupt event and update its priority. 184151658Sjhb */ 185151658Sjhbstatic void 186151658Sjhbintr_event_update(struct intr_event *ie) 187151658Sjhb{ 188151658Sjhb struct intr_handler *ih; 189151658Sjhb char *last; 190151658Sjhb int missed, space; 191151658Sjhb 192151658Sjhb /* Start off with no entropy and just the name of the event. */ 193151658Sjhb mtx_assert(&ie->ie_lock, MA_OWNED); 194151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 195151658Sjhb ie->ie_flags &= ~IE_ENTROPY; 196137267Sjhb missed = 0; 197151658Sjhb space = 1; 198151658Sjhb 199151658Sjhb /* Run through all the handlers updating values. */ 200151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 201151658Sjhb if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 202151658Sjhb sizeof(ie->ie_fullname)) { 203151658Sjhb strcat(ie->ie_fullname, " "); 204151658Sjhb strcat(ie->ie_fullname, ih->ih_name); 205151658Sjhb space = 0; 206137267Sjhb } else 207137267Sjhb missed++; 208137267Sjhb if (ih->ih_flags & IH_ENTROPY) 209151658Sjhb ie->ie_flags |= IE_ENTROPY; 210137267Sjhb } 211151658Sjhb 212151658Sjhb /* 213151658Sjhb * If the handler names were too long, add +'s to indicate missing 214151658Sjhb * names. If we run out of room and still have +'s to add, change 215151658Sjhb * the last character from a + to a *. 216151658Sjhb */ 217151658Sjhb last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 218137267Sjhb while (missed-- > 0) { 219151658Sjhb if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 220151658Sjhb if (*last == '+') { 221151658Sjhb *last = '*'; 222151658Sjhb break; 223151658Sjhb } else 224151658Sjhb *last = '+'; 225151658Sjhb } else if (space) { 226151658Sjhb strcat(ie->ie_fullname, " +"); 227151658Sjhb space = 0; 22872237Sjhb } else 229151658Sjhb strcat(ie->ie_fullname, "+"); 23072237Sjhb } 231151658Sjhb 232151658Sjhb /* 233151658Sjhb * If this event has an ithread, update it's priority and 234151658Sjhb * name. 235151658Sjhb */ 236151658Sjhb if (ie->ie_thread != NULL) 237151658Sjhb ithread_update(ie->ie_thread); 238151658Sjhb CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 23972237Sjhb} 24072237Sjhb 241169320Spiso#ifndef INTR_FILTER 24272237Sjhbint 243151658Sjhbintr_event_create(struct intr_event **event, void *source, int flags, 244177181Sjhb void (*enable)(void *), int (*assign_cpu)(void *, u_char), const char *fmt, 245177181Sjhb ...) 24666698Sjhb{ 247151658Sjhb struct intr_event *ie; 24872237Sjhb va_list ap; 24972237Sjhb 250151658Sjhb /* The only valid flag during creation is IE_SOFT. */ 251151658Sjhb if ((flags & ~IE_SOFT) != 0) 25272759Sjhb return (EINVAL); 253151658Sjhb ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 254151658Sjhb ie->ie_source = source; 255151658Sjhb ie->ie_enable = enable; 256177181Sjhb ie->ie_assign_cpu = assign_cpu; 257151658Sjhb ie->ie_flags = flags; 258177181Sjhb ie->ie_cpu = NOCPU; 259151658Sjhb TAILQ_INIT(&ie->ie_handlers); 260151658Sjhb mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 26172759Sjhb 26272237Sjhb va_start(ap, fmt); 263151658Sjhb vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 26472237Sjhb va_end(ap); 265151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 266151658Sjhb mtx_pool_lock(mtxpool_sleep, &event_list); 267151658Sjhb TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 268151658Sjhb mtx_pool_unlock(mtxpool_sleep, &event_list); 269151658Sjhb if (event != NULL) 270151658Sjhb *event = ie; 271151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 272151658Sjhb return (0); 273151658Sjhb} 274169320Spiso#else 275169320Spisoint 276169320Spisointr_event_create(struct intr_event **event, void *source, int flags, 277169320Spiso void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), 278177181Sjhb int (*assign_cpu)(void *, u_char), const char *fmt, ...) 279169320Spiso{ 280169320Spiso struct intr_event *ie; 281169320Spiso va_list ap; 28272237Sjhb 283169320Spiso /* The only valid flag during creation is IE_SOFT. */ 284169320Spiso if ((flags & ~IE_SOFT) != 0) 285169320Spiso return (EINVAL); 286169320Spiso ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 287169320Spiso ie->ie_source = source; 288169320Spiso ie->ie_enable = enable; 289177181Sjhb ie->ie_assign_cpu = assign_cpu; 290169320Spiso ie->ie_eoi = eoi; 291169320Spiso ie->ie_disab = disab; 292169320Spiso ie->ie_flags = flags; 293177181Sjhb ie->ie_cpu = NOCPU; 294169320Spiso TAILQ_INIT(&ie->ie_handlers); 295169320Spiso mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 296169320Spiso 297169320Spiso va_start(ap, fmt); 298169320Spiso vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 299169320Spiso va_end(ap); 300169320Spiso strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 301169320Spiso mtx_pool_lock(mtxpool_sleep, &event_list); 302169320Spiso TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 303169320Spiso mtx_pool_unlock(mtxpool_sleep, &event_list); 304169320Spiso if (event != NULL) 305169320Spiso *event = ie; 306169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 307169320Spiso return (0); 308169320Spiso} 309169320Spiso#endif 310169320Spiso 311177181Sjhb/* 312177181Sjhb * Bind an interrupt event to the specified CPU. Note that not all 313177181Sjhb * platforms support binding an interrupt to a CPU. For those 314177181Sjhb * platforms this request will fail. For supported platforms, any 315177181Sjhb * associated ithreads as well as the primary interrupt context will 316177181Sjhb * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 317177181Sjhb * the interrupt event. 318177181Sjhb */ 319151658Sjhbint 320177181Sjhbintr_event_bind(struct intr_event *ie, u_char cpu) 321177181Sjhb{ 322177181Sjhb struct thread *td; 323177181Sjhb int error; 324177181Sjhb 325177181Sjhb /* Need a CPU to bind to. */ 326177181Sjhb if (cpu != NOCPU && CPU_ABSENT(cpu)) 327177181Sjhb return (EINVAL); 328177181Sjhb 329177181Sjhb if (ie->ie_assign_cpu == NULL) 330177181Sjhb return (EOPNOTSUPP); 331177181Sjhb 332177181Sjhb /* Don't allow a bind request if the interrupt is already bound. */ 333177181Sjhb mtx_lock(&ie->ie_lock); 334177181Sjhb if (ie->ie_cpu != NOCPU && cpu != NOCPU) { 335177181Sjhb mtx_unlock(&ie->ie_lock); 336177181Sjhb return (EBUSY); 337177181Sjhb } 338177181Sjhb mtx_unlock(&ie->ie_lock); 339177181Sjhb 340177181Sjhb error = ie->ie_assign_cpu(ie->ie_source, cpu); 341177181Sjhb if (error) 342177181Sjhb return (error); 343177181Sjhb mtx_lock(&ie->ie_lock); 344177181Sjhb if (ie->ie_thread != NULL) 345177181Sjhb td = ie->ie_thread->it_thread; 346177181Sjhb else 347177181Sjhb td = NULL; 348177181Sjhb if (td != NULL) 349177181Sjhb thread_lock(td); 350177181Sjhb ie->ie_cpu = cpu; 351177181Sjhb if (td != NULL) 352177181Sjhb thread_unlock(td); 353177181Sjhb mtx_unlock(&ie->ie_lock); 354177181Sjhb return (0); 355177181Sjhb} 356177181Sjhb 357177181Sjhbint 358151658Sjhbintr_event_destroy(struct intr_event *ie) 359151658Sjhb{ 360151658Sjhb 361151658Sjhb mtx_lock(&ie->ie_lock); 362151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 363151658Sjhb mtx_unlock(&ie->ie_lock); 364151658Sjhb return (EBUSY); 365151658Sjhb } 366151658Sjhb mtx_pool_lock(mtxpool_sleep, &event_list); 367151658Sjhb TAILQ_REMOVE(&event_list, ie, ie_list); 368151658Sjhb mtx_pool_unlock(mtxpool_sleep, &event_list); 369157728Sjhb#ifndef notyet 370157728Sjhb if (ie->ie_thread != NULL) { 371157728Sjhb ithread_destroy(ie->ie_thread); 372157728Sjhb ie->ie_thread = NULL; 373157728Sjhb } 374157728Sjhb#endif 375151658Sjhb mtx_unlock(&ie->ie_lock); 376151658Sjhb mtx_destroy(&ie->ie_lock); 377151658Sjhb free(ie, M_ITHREAD); 378151658Sjhb return (0); 379151658Sjhb} 380151658Sjhb 381169320Spiso#ifndef INTR_FILTER 382151658Sjhbstatic struct intr_thread * 383151658Sjhbithread_create(const char *name) 384151658Sjhb{ 385151658Sjhb struct intr_thread *ithd; 386151658Sjhb struct thread *td; 387151658Sjhb int error; 388151658Sjhb 389151658Sjhb ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 390151658Sjhb 391173004Sjulian error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 392173004Sjulian &td, RFSTOPPED | RFHIGHPID, 393173051Sjulian 0, "intr", "%s", name); 394151658Sjhb if (error) 395172836Sjulian panic("kproc_create() failed with %d", error); 396170307Sjeff thread_lock(td); 397164936Sjulian sched_class(td, PRI_ITHD); 398103216Sjulian TD_SET_IWAIT(td); 399170307Sjeff thread_unlock(td); 400151658Sjhb td->td_pflags |= TDP_ITHREAD; 401151658Sjhb ithd->it_thread = td; 402151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, name); 403151658Sjhb return (ithd); 40472237Sjhb} 405169320Spiso#else 406169320Spisostatic struct intr_thread * 407169320Spisoithread_create(const char *name, struct intr_handler *ih) 408169320Spiso{ 409169320Spiso struct intr_thread *ithd; 410169320Spiso struct thread *td; 411169320Spiso int error; 41272237Sjhb 413169320Spiso ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 414169320Spiso 415173153Sjulian error = kproc_kthread_add(ithread_loop, ih, &intrproc, 416173004Sjulian &td, RFSTOPPED | RFHIGHPID, 417173051Sjulian 0, "intr", "%s", name); 418169320Spiso if (error) 419172836Sjulian panic("kproc_create() failed with %d", error); 420170307Sjeff thread_lock(td); 421169320Spiso sched_class(td, PRI_ITHD); 422169320Spiso TD_SET_IWAIT(td); 423170307Sjeff thread_unlock(td); 424169320Spiso td->td_pflags |= TDP_ITHREAD; 425169320Spiso ithd->it_thread = td; 426169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, name); 427169320Spiso return (ithd); 428169320Spiso} 429169320Spiso#endif 430169320Spiso 431151658Sjhbstatic void 432151658Sjhbithread_destroy(struct intr_thread *ithread) 43372237Sjhb{ 43483366Sjulian struct thread *td; 43572237Sjhb 436157784Sscottl CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 437151658Sjhb td = ithread->it_thread; 438170307Sjeff thread_lock(td); 43976771Sjhb ithread->it_flags |= IT_DEAD; 440103216Sjulian if (TD_AWAITING_INTR(td)) { 441103216Sjulian TD_CLR_IWAIT(td); 442166188Sjeff sched_add(td, SRQ_INTR); 44372237Sjhb } 444170307Sjeff thread_unlock(td); 44572237Sjhb} 44672237Sjhb 447169320Spiso#ifndef INTR_FILTER 44872237Sjhbint 449151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name, 450166901Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 451166901Spiso enum intr_type flags, void **cookiep) 45272237Sjhb{ 453151658Sjhb struct intr_handler *ih, *temp_ih; 454151658Sjhb struct intr_thread *it; 45572237Sjhb 456166901Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 45772237Sjhb return (EINVAL); 45872237Sjhb 459151658Sjhb /* Allocate and populate an interrupt handler structure. */ 460151658Sjhb ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 461166901Spiso ih->ih_filter = filter; 46272237Sjhb ih->ih_handler = handler; 46372237Sjhb ih->ih_argument = arg; 46472237Sjhb ih->ih_name = name; 465151658Sjhb ih->ih_event = ie; 46672237Sjhb ih->ih_pri = pri; 467166901Spiso if (flags & INTR_EXCL) 46872237Sjhb ih->ih_flags = IH_EXCLUSIVE; 46972237Sjhb if (flags & INTR_MPSAFE) 47072237Sjhb ih->ih_flags |= IH_MPSAFE; 47172237Sjhb if (flags & INTR_ENTROPY) 47272237Sjhb ih->ih_flags |= IH_ENTROPY; 47372237Sjhb 474151658Sjhb /* We can only have one exclusive handler in a event. */ 475151658Sjhb mtx_lock(&ie->ie_lock); 476151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 477151658Sjhb if ((flags & INTR_EXCL) || 478151658Sjhb (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 479151658Sjhb mtx_unlock(&ie->ie_lock); 480151658Sjhb free(ih, M_ITHREAD); 481151658Sjhb return (EINVAL); 482151658Sjhb } 483122002Sjhb } 48472237Sjhb 485151658Sjhb /* Add the new handler to the event in priority order. */ 486151658Sjhb TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 487151658Sjhb if (temp_ih->ih_pri > ih->ih_pri) 488151658Sjhb break; 489151658Sjhb } 49072237Sjhb if (temp_ih == NULL) 491151658Sjhb TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 49272237Sjhb else 49372237Sjhb TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 494151658Sjhb intr_event_update(ie); 49572237Sjhb 496151658Sjhb /* Create a thread if we need one. */ 497166901Spiso while (ie->ie_thread == NULL && handler != NULL) { 498151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) 499157815Sjhb msleep(ie, &ie->ie_lock, 0, "ithread", 0); 500151658Sjhb else { 501151658Sjhb ie->ie_flags |= IE_ADDING_THREAD; 502151658Sjhb mtx_unlock(&ie->ie_lock); 503151658Sjhb it = ithread_create("intr: newborn"); 504151658Sjhb mtx_lock(&ie->ie_lock); 505151658Sjhb ie->ie_flags &= ~IE_ADDING_THREAD; 506151658Sjhb ie->ie_thread = it; 507151658Sjhb it->it_event = ie; 508151658Sjhb ithread_update(it); 509151658Sjhb wakeup(ie); 510151658Sjhb } 511151658Sjhb } 512151658Sjhb CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 513151658Sjhb ie->ie_name); 514151658Sjhb mtx_unlock(&ie->ie_lock); 515151658Sjhb 51672237Sjhb if (cookiep != NULL) 51772237Sjhb *cookiep = ih; 51872237Sjhb return (0); 51972237Sjhb} 520169320Spiso#else 521169320Spisoint 522169320Spisointr_event_add_handler(struct intr_event *ie, const char *name, 523169320Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 524169320Spiso enum intr_type flags, void **cookiep) 525169320Spiso{ 526169320Spiso struct intr_handler *ih, *temp_ih; 527169320Spiso struct intr_thread *it; 52872237Sjhb 529169320Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 530169320Spiso return (EINVAL); 531169320Spiso 532169320Spiso /* Allocate and populate an interrupt handler structure. */ 533169320Spiso ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 534169320Spiso ih->ih_filter = filter; 535169320Spiso ih->ih_handler = handler; 536169320Spiso ih->ih_argument = arg; 537169320Spiso ih->ih_name = name; 538169320Spiso ih->ih_event = ie; 539169320Spiso ih->ih_pri = pri; 540169320Spiso if (flags & INTR_EXCL) 541169320Spiso ih->ih_flags = IH_EXCLUSIVE; 542169320Spiso if (flags & INTR_MPSAFE) 543169320Spiso ih->ih_flags |= IH_MPSAFE; 544169320Spiso if (flags & INTR_ENTROPY) 545169320Spiso ih->ih_flags |= IH_ENTROPY; 546169320Spiso 547169320Spiso /* We can only have one exclusive handler in a event. */ 548169320Spiso mtx_lock(&ie->ie_lock); 549169320Spiso if (!TAILQ_EMPTY(&ie->ie_handlers)) { 550169320Spiso if ((flags & INTR_EXCL) || 551169320Spiso (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 552169320Spiso mtx_unlock(&ie->ie_lock); 553169320Spiso free(ih, M_ITHREAD); 554169320Spiso return (EINVAL); 555169320Spiso } 556169320Spiso } 557169320Spiso 558169320Spiso /* Add the new handler to the event in priority order. */ 559169320Spiso TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 560169320Spiso if (temp_ih->ih_pri > ih->ih_pri) 561169320Spiso break; 562169320Spiso } 563169320Spiso if (temp_ih == NULL) 564169320Spiso TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 565169320Spiso else 566169320Spiso TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 567169320Spiso intr_event_update(ie); 568169320Spiso 569169320Spiso /* For filtered handlers, create a private ithread to run on. */ 570169320Spiso if (filter != NULL && handler != NULL) { 571169320Spiso mtx_unlock(&ie->ie_lock); 572169320Spiso it = ithread_create("intr: newborn", ih); 573169320Spiso mtx_lock(&ie->ie_lock); 574169320Spiso it->it_event = ie; 575169320Spiso ih->ih_thread = it; 576169320Spiso ithread_update(it); // XXX - do we really need this?!?!? 577169320Spiso } else { /* Create the global per-event thread if we need one. */ 578169320Spiso while (ie->ie_thread == NULL && handler != NULL) { 579169320Spiso if (ie->ie_flags & IE_ADDING_THREAD) 580169320Spiso msleep(ie, &ie->ie_lock, 0, "ithread", 0); 581169320Spiso else { 582169320Spiso ie->ie_flags |= IE_ADDING_THREAD; 583169320Spiso mtx_unlock(&ie->ie_lock); 584169320Spiso it = ithread_create("intr: newborn", ih); 585169320Spiso mtx_lock(&ie->ie_lock); 586169320Spiso ie->ie_flags &= ~IE_ADDING_THREAD; 587169320Spiso ie->ie_thread = it; 588169320Spiso it->it_event = ie; 589169320Spiso ithread_update(it); 590169320Spiso wakeup(ie); 591169320Spiso } 592169320Spiso } 593169320Spiso } 594169320Spiso CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 595169320Spiso ie->ie_name); 596169320Spiso mtx_unlock(&ie->ie_lock); 597169320Spiso 598169320Spiso if (cookiep != NULL) 599169320Spiso *cookiep = ih; 600169320Spiso return (0); 601169320Spiso} 602169320Spiso#endif 603169320Spiso 604165125Sjhb/* 605165125Sjhb * Return the ie_source field from the intr_event an intr_handler is 606165125Sjhb * associated with. 607165125Sjhb */ 608165125Sjhbvoid * 609165125Sjhbintr_handler_source(void *cookie) 610165125Sjhb{ 611165125Sjhb struct intr_handler *ih; 612165125Sjhb struct intr_event *ie; 613165125Sjhb 614165125Sjhb ih = (struct intr_handler *)cookie; 615165125Sjhb if (ih == NULL) 616165125Sjhb return (NULL); 617165125Sjhb ie = ih->ih_event; 618165125Sjhb KASSERT(ie != NULL, 619165125Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 620165125Sjhb ih->ih_name)); 621165125Sjhb return (ie->ie_source); 622165125Sjhb} 623165125Sjhb 624169320Spiso#ifndef INTR_FILTER 62572237Sjhbint 626151658Sjhbintr_event_remove_handler(void *cookie) 62772237Sjhb{ 628151658Sjhb struct intr_handler *handler = (struct intr_handler *)cookie; 629151658Sjhb struct intr_event *ie; 63072237Sjhb#ifdef INVARIANTS 631151658Sjhb struct intr_handler *ih; 63272237Sjhb#endif 633151658Sjhb#ifdef notyet 634151658Sjhb int dead; 635151658Sjhb#endif 63672237Sjhb 63772759Sjhb if (handler == NULL) 63872237Sjhb return (EINVAL); 639151658Sjhb ie = handler->ih_event; 640151658Sjhb KASSERT(ie != NULL, 641151658Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 642165124Sjhb handler->ih_name)); 643151658Sjhb mtx_lock(&ie->ie_lock); 64487593Sobrien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 645151658Sjhb ie->ie_name); 64672237Sjhb#ifdef INVARIANTS 647151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 64872759Sjhb if (ih == handler) 64972759Sjhb goto ok; 650151658Sjhb mtx_unlock(&ie->ie_lock); 651151658Sjhb panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 652151658Sjhb ih->ih_name, ie->ie_name); 65372759Sjhbok: 65472237Sjhb#endif 65572839Sjhb /* 656151658Sjhb * If there is no ithread, then just remove the handler and return. 657151658Sjhb * XXX: Note that an INTR_FAST handler might be running on another 658151658Sjhb * CPU! 659151658Sjhb */ 660151658Sjhb if (ie->ie_thread == NULL) { 661151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 662151658Sjhb mtx_unlock(&ie->ie_lock); 663151658Sjhb free(handler, M_ITHREAD); 664151658Sjhb return (0); 665151658Sjhb } 666151658Sjhb 667151658Sjhb /* 66872839Sjhb * If the interrupt thread is already running, then just mark this 66972839Sjhb * handler as being dead and let the ithread do the actual removal. 670124505Struckman * 671124505Struckman * During a cold boot while cold is set, msleep() does not sleep, 672124505Struckman * so we have to remove the handler here rather than letting the 673124505Struckman * thread do it. 67472839Sjhb */ 675170307Sjeff thread_lock(ie->ie_thread->it_thread); 676151658Sjhb if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 67772839Sjhb handler->ih_flags |= IH_DEAD; 67872839Sjhb 67972839Sjhb /* 68072839Sjhb * Ensure that the thread will process the handler list 68172839Sjhb * again and remove this handler if it has already passed 68272839Sjhb * it on the list. 68372839Sjhb */ 684151658Sjhb ie->ie_thread->it_need = 1; 685151658Sjhb } else 686151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 687170307Sjeff thread_unlock(ie->ie_thread->it_thread); 688151658Sjhb while (handler->ih_flags & IH_DEAD) 689157815Sjhb msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 690151658Sjhb intr_event_update(ie); 691151658Sjhb#ifdef notyet 692151658Sjhb /* 693151658Sjhb * XXX: This could be bad in the case of ppbus(8). Also, I think 694151658Sjhb * this could lead to races of stale data when servicing an 695151658Sjhb * interrupt. 696151658Sjhb */ 697151658Sjhb dead = 1; 698151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 699151658Sjhb if (!(ih->ih_flags & IH_FAST)) { 700151658Sjhb dead = 0; 701151658Sjhb break; 702151658Sjhb } 703151658Sjhb } 704151658Sjhb if (dead) { 705151658Sjhb ithread_destroy(ie->ie_thread); 706151658Sjhb ie->ie_thread = NULL; 707151658Sjhb } 708151658Sjhb#endif 709151658Sjhb mtx_unlock(&ie->ie_lock); 71076771Sjhb free(handler, M_ITHREAD); 71172237Sjhb return (0); 71272237Sjhb} 71372237Sjhb 71472237Sjhbint 715151658Sjhbintr_event_schedule_thread(struct intr_event *ie) 71672759Sjhb{ 717151658Sjhb struct intr_entropy entropy; 718151658Sjhb struct intr_thread *it; 71983366Sjulian struct thread *td; 720101176Sjulian struct thread *ctd; 72172759Sjhb struct proc *p; 72272759Sjhb 72372759Sjhb /* 72472759Sjhb * If no ithread or no handlers, then we have a stray interrupt. 72572759Sjhb */ 726151658Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 727151658Sjhb ie->ie_thread == NULL) 72872759Sjhb return (EINVAL); 72972759Sjhb 730101176Sjulian ctd = curthread; 731151658Sjhb it = ie->ie_thread; 732151658Sjhb td = it->it_thread; 733133191Srwatson p = td->td_proc; 734151658Sjhb 73572759Sjhb /* 73672759Sjhb * If any of the handlers for this ithread claim to be good 73772759Sjhb * sources of entropy, then gather some. 73872759Sjhb */ 739151658Sjhb if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 740133191Srwatson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 741173004Sjulian p->p_pid, td->td_name); 742151658Sjhb entropy.event = (uintptr_t)ie; 743151658Sjhb entropy.td = ctd; 74472759Sjhb random_harvest(&entropy, sizeof(entropy), 2, 0, 74572759Sjhb RANDOM_INTERRUPT); 74672759Sjhb } 74772759Sjhb 748151658Sjhb KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 74972759Sjhb 75072759Sjhb /* 75172759Sjhb * Set it_need to tell the thread to keep running if it is already 752170307Sjeff * running. Then, lock the thread and see if we actually need to 753170307Sjeff * put it on the runqueue. 75472759Sjhb */ 755151658Sjhb it->it_need = 1; 756170307Sjeff thread_lock(td); 757103216Sjulian if (TD_AWAITING_INTR(td)) { 758151658Sjhb CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 759173004Sjulian td->td_name); 760103216Sjulian TD_CLR_IWAIT(td); 761166188Sjeff sched_add(td, SRQ_INTR); 76272759Sjhb } else { 763151658Sjhb CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 764173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 76572759Sjhb } 766170307Sjeff thread_unlock(td); 76772759Sjhb 76872759Sjhb return (0); 76972759Sjhb} 770169320Spiso#else 771169320Spisoint 772169320Spisointr_event_remove_handler(void *cookie) 773169320Spiso{ 774169320Spiso struct intr_handler *handler = (struct intr_handler *)cookie; 775169320Spiso struct intr_event *ie; 776169320Spiso struct intr_thread *it; 777169320Spiso#ifdef INVARIANTS 778169320Spiso struct intr_handler *ih; 779169320Spiso#endif 780169320Spiso#ifdef notyet 781169320Spiso int dead; 782169320Spiso#endif 78372759Sjhb 784169320Spiso if (handler == NULL) 785169320Spiso return (EINVAL); 786169320Spiso ie = handler->ih_event; 787169320Spiso KASSERT(ie != NULL, 788169320Spiso ("interrupt handler \"%s\" has a NULL interrupt event", 789169320Spiso handler->ih_name)); 790169320Spiso mtx_lock(&ie->ie_lock); 791169320Spiso CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 792169320Spiso ie->ie_name); 793169320Spiso#ifdef INVARIANTS 794169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 795169320Spiso if (ih == handler) 796169320Spiso goto ok; 797169320Spiso mtx_unlock(&ie->ie_lock); 798169320Spiso panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 799169320Spiso ih->ih_name, ie->ie_name); 800169320Spisook: 801169320Spiso#endif 802169320Spiso /* 803169320Spiso * If there are no ithreads (per event and per handler), then 804169320Spiso * just remove the handler and return. 805169320Spiso * XXX: Note that an INTR_FAST handler might be running on another CPU! 806169320Spiso */ 807169320Spiso if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 808169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 809169320Spiso mtx_unlock(&ie->ie_lock); 810169320Spiso free(handler, M_ITHREAD); 811169320Spiso return (0); 812169320Spiso } 813169320Spiso 814169320Spiso /* Private or global ithread? */ 815169320Spiso it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 816169320Spiso /* 817169320Spiso * If the interrupt thread is already running, then just mark this 818169320Spiso * handler as being dead and let the ithread do the actual removal. 819169320Spiso * 820169320Spiso * During a cold boot while cold is set, msleep() does not sleep, 821169320Spiso * so we have to remove the handler here rather than letting the 822169320Spiso * thread do it. 823169320Spiso */ 824170307Sjeff thread_lock(it->it_thread); 825169320Spiso if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 826169320Spiso handler->ih_flags |= IH_DEAD; 827169320Spiso 828169320Spiso /* 829169320Spiso * Ensure that the thread will process the handler list 830169320Spiso * again and remove this handler if it has already passed 831169320Spiso * it on the list. 832169320Spiso */ 833169320Spiso it->it_need = 1; 834169320Spiso } else 835169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 836170307Sjeff thread_unlock(it->it_thread); 837169320Spiso while (handler->ih_flags & IH_DEAD) 838169320Spiso msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 839169320Spiso /* 840169320Spiso * At this point, the handler has been disconnected from the event, 841169320Spiso * so we can kill the private ithread if any. 842169320Spiso */ 843169320Spiso if (handler->ih_thread) { 844169320Spiso ithread_destroy(handler->ih_thread); 845169320Spiso handler->ih_thread = NULL; 846169320Spiso } 847169320Spiso intr_event_update(ie); 848169320Spiso#ifdef notyet 849169320Spiso /* 850169320Spiso * XXX: This could be bad in the case of ppbus(8). Also, I think 851169320Spiso * this could lead to races of stale data when servicing an 852169320Spiso * interrupt. 853169320Spiso */ 854169320Spiso dead = 1; 855169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 856169320Spiso if (handler != NULL) { 857169320Spiso dead = 0; 858169320Spiso break; 859169320Spiso } 860169320Spiso } 861169320Spiso if (dead) { 862169320Spiso ithread_destroy(ie->ie_thread); 863169320Spiso ie->ie_thread = NULL; 864169320Spiso } 865169320Spiso#endif 866169320Spiso mtx_unlock(&ie->ie_lock); 867169320Spiso free(handler, M_ITHREAD); 868169320Spiso return (0); 869169320Spiso} 870169320Spiso 871169320Spisoint 872169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 873169320Spiso{ 874169320Spiso struct intr_entropy entropy; 875169320Spiso struct thread *td; 876169320Spiso struct thread *ctd; 877169320Spiso struct proc *p; 878169320Spiso 879169320Spiso /* 880169320Spiso * If no ithread or no handlers, then we have a stray interrupt. 881169320Spiso */ 882169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 883169320Spiso return (EINVAL); 884169320Spiso 885169320Spiso ctd = curthread; 886169320Spiso td = it->it_thread; 887169320Spiso p = td->td_proc; 888169320Spiso 889169320Spiso /* 890169320Spiso * If any of the handlers for this ithread claim to be good 891169320Spiso * sources of entropy, then gather some. 892169320Spiso */ 893169320Spiso if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 894169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 895173004Sjulian p->p_pid, td->td_name); 896169320Spiso entropy.event = (uintptr_t)ie; 897169320Spiso entropy.td = ctd; 898169320Spiso random_harvest(&entropy, sizeof(entropy), 2, 0, 899169320Spiso RANDOM_INTERRUPT); 900169320Spiso } 901169320Spiso 902169320Spiso KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 903169320Spiso 904169320Spiso /* 905169320Spiso * Set it_need to tell the thread to keep running if it is already 906170307Sjeff * running. Then, lock the thread and see if we actually need to 907170307Sjeff * put it on the runqueue. 908169320Spiso */ 909169320Spiso it->it_need = 1; 910170307Sjeff thread_lock(td); 911169320Spiso if (TD_AWAITING_INTR(td)) { 912169320Spiso CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 913173122Sjulian td->td_name); 914169320Spiso TD_CLR_IWAIT(td); 915169320Spiso sched_add(td, SRQ_INTR); 916169320Spiso } else { 917169320Spiso CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 918173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 919169320Spiso } 920170307Sjeff thread_unlock(td); 921169320Spiso 922169320Spiso return (0); 923169320Spiso} 924169320Spiso#endif 925169320Spiso 926151699Sjhb/* 927151699Sjhb * Add a software interrupt handler to a specified event. If a given event 928151699Sjhb * is not specified, then a new event is created. 929151699Sjhb */ 93072759Sjhbint 931151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 93272237Sjhb void *arg, int pri, enum intr_type flags, void **cookiep) 93372237Sjhb{ 934151658Sjhb struct intr_event *ie; 93572237Sjhb int error; 93666698Sjhb 937169320Spiso if (flags & INTR_ENTROPY) 93872759Sjhb return (EINVAL); 93972759Sjhb 940151658Sjhb ie = (eventp != NULL) ? *eventp : NULL; 94166698Sjhb 942151658Sjhb if (ie != NULL) { 943151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 944151658Sjhb return (EINVAL); 94572759Sjhb } else { 946169320Spiso#ifdef INTR_FILTER 947169320Spiso error = intr_event_create(&ie, NULL, IE_SOFT, 948177181Sjhb NULL, NULL, NULL, NULL, "swi%d:", pri); 949169320Spiso#else 950169320Spiso error = intr_event_create(&ie, NULL, IE_SOFT, 951177181Sjhb NULL, NULL, "swi%d:", pri); 952169320Spiso#endif 95367551Sjhb if (error) 95472237Sjhb return (error); 955151658Sjhb if (eventp != NULL) 956151658Sjhb *eventp = ie; 95766698Sjhb } 958166901Spiso return (intr_event_add_handler(ie, name, NULL, handler, arg, 95972376Sjake (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 96066698Sjhb} 96166698Sjhb 96266698Sjhb/* 963151658Sjhb * Schedule a software interrupt thread. 96466698Sjhb */ 96567551Sjhbvoid 96672237Sjhbswi_sched(void *cookie, int flags) 96766698Sjhb{ 968151658Sjhb struct intr_handler *ih = (struct intr_handler *)cookie; 969151658Sjhb struct intr_event *ie = ih->ih_event; 97072759Sjhb int error; 97166698Sjhb 972151658Sjhb CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 973151658Sjhb ih->ih_need); 974151658Sjhb 97567551Sjhb /* 97672759Sjhb * Set ih_need for this handler so that if the ithread is already 97772759Sjhb * running it will execute this handler on the next pass. Otherwise, 97872759Sjhb * it will execute it the next time it runs. 97967551Sjhb */ 98072237Sjhb atomic_store_rel_int(&ih->ih_need, 1); 981163474Sbde 98272237Sjhb if (!(flags & SWI_DELAY)) { 983170291Sattilio PCPU_INC(cnt.v_soft); 984169320Spiso#ifdef INTR_FILTER 985169320Spiso error = intr_event_schedule_thread(ie, ie->ie_thread); 986169320Spiso#else 987151658Sjhb error = intr_event_schedule_thread(ie); 988169320Spiso#endif 98972759Sjhb KASSERT(error == 0, ("stray software interrupt")); 99066698Sjhb } 99166698Sjhb} 99266698Sjhb 993151699Sjhb/* 994151699Sjhb * Remove a software interrupt handler. Currently this code does not 995151699Sjhb * remove the associated interrupt event if it becomes empty. Calling code 996151699Sjhb * may do so manually via intr_event_destroy(), but that's not really 997151699Sjhb * an optimal interface. 998151699Sjhb */ 999151699Sjhbint 1000151699Sjhbswi_remove(void *cookie) 1001151699Sjhb{ 1002151699Sjhb 1003151699Sjhb return (intr_event_remove_handler(cookie)); 1004151699Sjhb} 1005151699Sjhb 1006169320Spiso#ifdef INTR_FILTER 1007151658Sjhbstatic void 1008169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1009169320Spiso{ 1010169320Spiso struct intr_event *ie; 1011169320Spiso 1012169320Spiso ie = ih->ih_event; 1013169320Spiso /* 1014169320Spiso * If this handler is marked for death, remove it from 1015169320Spiso * the list of handlers and wake up the sleeper. 1016169320Spiso */ 1017169320Spiso if (ih->ih_flags & IH_DEAD) { 1018169320Spiso mtx_lock(&ie->ie_lock); 1019169320Spiso TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1020169320Spiso ih->ih_flags &= ~IH_DEAD; 1021169320Spiso wakeup(ih); 1022169320Spiso mtx_unlock(&ie->ie_lock); 1023169320Spiso return; 1024169320Spiso } 1025169320Spiso 1026169320Spiso /* Execute this handler. */ 1027169320Spiso CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1028169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1029169320Spiso ih->ih_name, ih->ih_flags); 1030169320Spiso 1031169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1032169320Spiso mtx_lock(&Giant); 1033169320Spiso ih->ih_handler(ih->ih_argument); 1034169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1035169320Spiso mtx_unlock(&Giant); 1036169320Spiso} 1037169320Spiso#endif 1038169320Spiso 1039169320Spisostatic void 1040151658Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie) 1041151658Sjhb{ 1042151658Sjhb struct intr_handler *ih, *ihn; 1043151658Sjhb 1044151658Sjhb /* Interrupt handlers should not sleep. */ 1045151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1046151658Sjhb THREAD_NO_SLEEPING(); 1047151658Sjhb TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1048151658Sjhb 1049151658Sjhb /* 1050151658Sjhb * If this handler is marked for death, remove it from 1051151658Sjhb * the list of handlers and wake up the sleeper. 1052151658Sjhb */ 1053151658Sjhb if (ih->ih_flags & IH_DEAD) { 1054151658Sjhb mtx_lock(&ie->ie_lock); 1055151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1056151658Sjhb ih->ih_flags &= ~IH_DEAD; 1057151658Sjhb wakeup(ih); 1058151658Sjhb mtx_unlock(&ie->ie_lock); 1059151658Sjhb continue; 1060151658Sjhb } 1061151658Sjhb 1062167080Spiso /* Skip filter only handlers */ 1063167080Spiso if (ih->ih_handler == NULL) 1064167080Spiso continue; 1065167080Spiso 1066151658Sjhb /* 1067151658Sjhb * For software interrupt threads, we only execute 1068151658Sjhb * handlers that have their need flag set. Hardware 1069151658Sjhb * interrupt threads always invoke all of their handlers. 1070151658Sjhb */ 1071151658Sjhb if (ie->ie_flags & IE_SOFT) { 1072151658Sjhb if (!ih->ih_need) 1073151658Sjhb continue; 1074151658Sjhb else 1075151658Sjhb atomic_store_rel_int(&ih->ih_need, 0); 1076151658Sjhb } 1077151658Sjhb 1078151658Sjhb /* Execute this handler. */ 1079151658Sjhb CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1080169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, 1081169320Spiso ih->ih_argument, ih->ih_name, ih->ih_flags); 1082151658Sjhb 1083151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1084151658Sjhb mtx_lock(&Giant); 1085151658Sjhb ih->ih_handler(ih->ih_argument); 1086151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1087151658Sjhb mtx_unlock(&Giant); 1088151658Sjhb } 1089151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1090151658Sjhb THREAD_SLEEPING_OK(); 1091151658Sjhb 1092151658Sjhb /* 1093151658Sjhb * Interrupt storm handling: 1094151658Sjhb * 1095151658Sjhb * If this interrupt source is currently storming, then throttle 1096151658Sjhb * it to only fire the handler once per clock tick. 1097151658Sjhb * 1098151658Sjhb * If this interrupt source is not currently storming, but the 1099151658Sjhb * number of back to back interrupts exceeds the storm threshold, 1100151658Sjhb * then enter storming mode. 1101151658Sjhb */ 1102167173Sjhb if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1103167173Sjhb !(ie->ie_flags & IE_SOFT)) { 1104168850Snjl /* Report the message only once every second. */ 1105168850Snjl if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1106151658Sjhb printf( 1107168850Snjl "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1108151658Sjhb ie->ie_name); 1109151658Sjhb } 1110167173Sjhb pause("istorm", 1); 1111151658Sjhb } else 1112151658Sjhb ie->ie_count++; 1113151658Sjhb 1114151658Sjhb /* 1115151658Sjhb * Now that all the handlers have had a chance to run, reenable 1116151658Sjhb * the interrupt source. 1117151658Sjhb */ 1118151658Sjhb if (ie->ie_enable != NULL) 1119151658Sjhb ie->ie_enable(ie->ie_source); 1120151658Sjhb} 1121151658Sjhb 1122169320Spiso#ifndef INTR_FILTER 112366698Sjhb/* 112472237Sjhb * This is the main code for interrupt threads. 112566698Sjhb */ 1126104094Sphkstatic void 112772237Sjhbithread_loop(void *arg) 112866698Sjhb{ 1129151658Sjhb struct intr_thread *ithd; 1130151658Sjhb struct intr_event *ie; 113183366Sjulian struct thread *td; 113272237Sjhb struct proc *p; 1133177181Sjhb u_char cpu; 1134151658Sjhb 113583366Sjulian td = curthread; 113683366Sjulian p = td->td_proc; 1137151658Sjhb ithd = (struct intr_thread *)arg; 1138151658Sjhb KASSERT(ithd->it_thread == td, 113987593Sobrien ("%s: ithread and proc linkage out of sync", __func__)); 1140151658Sjhb ie = ithd->it_event; 1141151658Sjhb ie->ie_count = 0; 1142177181Sjhb cpu = NOCPU; 114366698Sjhb 114467551Sjhb /* 114567551Sjhb * As long as we have interrupts outstanding, go through the 114667551Sjhb * list of handlers, giving each one a go at it. 114767551Sjhb */ 114866698Sjhb for (;;) { 114972237Sjhb /* 115072237Sjhb * If we are an orphaned thread, then just die. 115172237Sjhb */ 115272237Sjhb if (ithd->it_flags & IT_DEAD) { 1153151658Sjhb CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1154173004Sjulian p->p_pid, td->td_name); 115572237Sjhb free(ithd, M_ITHREAD); 1156173044Sjulian kthread_exit(); 115772237Sjhb } 115872237Sjhb 1159151658Sjhb /* 1160151658Sjhb * Service interrupts. If another interrupt arrives while 1161151658Sjhb * we are running, it will set it_need to note that we 1162151658Sjhb * should make another pass. 1163151658Sjhb */ 116472237Sjhb while (ithd->it_need) { 116567551Sjhb /* 1166151658Sjhb * This might need a full read and write barrier 1167151658Sjhb * to make sure that this write posts before any 1168151658Sjhb * of the memory or device accesses in the 1169151658Sjhb * handlers. 117067551Sjhb */ 117172237Sjhb atomic_store_rel_int(&ithd->it_need, 0); 1172151658Sjhb ithread_execute_handlers(p, ie); 117366698Sjhb } 1174128331Sjhb WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1175128331Sjhb mtx_assert(&Giant, MA_NOTOWNED); 117667551Sjhb 117766698Sjhb /* 117866698Sjhb * Processed all our interrupts. Now get the sched 117967551Sjhb * lock. This may take a while and it_need may get 118066698Sjhb * set again, so we have to check it again. 118166698Sjhb */ 1182170307Sjeff thread_lock(td); 1183151658Sjhb if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1184128331Sjhb TD_SET_IWAIT(td); 1185151658Sjhb ie->ie_count = 0; 1186131473Sjhb mi_switch(SW_VOL, NULL); 118766698Sjhb } 1188177181Sjhb 1189177181Sjhb#ifdef SMP 1190177181Sjhb /* 1191177181Sjhb * Ensure we are bound to the correct CPU. We can't 1192177181Sjhb * move ithreads until SMP is running however, so just 1193177181Sjhb * leave interrupts on the boor CPU during boot. 1194177181Sjhb */ 1195177181Sjhb if (ie->ie_cpu != cpu && smp_started) { 1196177181Sjhb cpu = ie->ie_cpu; 1197177181Sjhb if (cpu == NOCPU) 1198177181Sjhb sched_unbind(td); 1199177181Sjhb else 1200177181Sjhb sched_bind(td, cpu); 1201177181Sjhb } 1202177181Sjhb#endif 1203170307Sjeff thread_unlock(td); 120466698Sjhb } 120566698Sjhb} 1206169320Spiso#else 1207169320Spiso/* 1208169320Spiso * This is the main code for interrupt threads. 1209169320Spiso */ 1210169320Spisostatic void 1211169320Spisoithread_loop(void *arg) 1212169320Spiso{ 1213169320Spiso struct intr_thread *ithd; 1214169320Spiso struct intr_handler *ih; 1215169320Spiso struct intr_event *ie; 1216169320Spiso struct thread *td; 1217169320Spiso struct proc *p; 1218169320Spiso int priv; 1219177181Sjhb u_char cpu; 122066698Sjhb 1221169320Spiso td = curthread; 1222169320Spiso p = td->td_proc; 1223169320Spiso ih = (struct intr_handler *)arg; 1224169320Spiso priv = (ih->ih_thread != NULL) ? 1 : 0; 1225169320Spiso ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1226169320Spiso KASSERT(ithd->it_thread == td, 1227169320Spiso ("%s: ithread and proc linkage out of sync", __func__)); 1228169320Spiso ie = ithd->it_event; 1229169320Spiso ie->ie_count = 0; 1230177181Sjhb cpu = NOCPU; 1231169320Spiso 1232169320Spiso /* 1233169320Spiso * As long as we have interrupts outstanding, go through the 1234169320Spiso * list of handlers, giving each one a go at it. 1235169320Spiso */ 1236169320Spiso for (;;) { 1237169320Spiso /* 1238169320Spiso * If we are an orphaned thread, then just die. 1239169320Spiso */ 1240169320Spiso if (ithd->it_flags & IT_DEAD) { 1241169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1242173004Sjulian p->p_pid, td->td_name); 1243169320Spiso free(ithd, M_ITHREAD); 1244173044Sjulian kthread_exit(); 1245169320Spiso } 1246169320Spiso 1247169320Spiso /* 1248169320Spiso * Service interrupts. If another interrupt arrives while 1249169320Spiso * we are running, it will set it_need to note that we 1250169320Spiso * should make another pass. 1251169320Spiso */ 1252169320Spiso while (ithd->it_need) { 1253169320Spiso /* 1254169320Spiso * This might need a full read and write barrier 1255169320Spiso * to make sure that this write posts before any 1256169320Spiso * of the memory or device accesses in the 1257169320Spiso * handlers. 1258169320Spiso */ 1259169320Spiso atomic_store_rel_int(&ithd->it_need, 0); 1260169320Spiso if (priv) 1261169320Spiso priv_ithread_execute_handler(p, ih); 1262169320Spiso else 1263169320Spiso ithread_execute_handlers(p, ie); 1264169320Spiso } 1265169320Spiso WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1266169320Spiso mtx_assert(&Giant, MA_NOTOWNED); 1267169320Spiso 1268169320Spiso /* 1269169320Spiso * Processed all our interrupts. Now get the sched 1270169320Spiso * lock. This may take a while and it_need may get 1271169320Spiso * set again, so we have to check it again. 1272169320Spiso */ 1273170307Sjeff thread_lock(td); 1274169320Spiso if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1275169320Spiso TD_SET_IWAIT(td); 1276169320Spiso ie->ie_count = 0; 1277169320Spiso mi_switch(SW_VOL, NULL); 1278169320Spiso } 1279177181Sjhb 1280177181Sjhb#ifdef SMP 1281177181Sjhb /* 1282177181Sjhb * Ensure we are bound to the correct CPU. We can't 1283177181Sjhb * move ithreads until SMP is running however, so just 1284177181Sjhb * leave interrupts on the boor CPU during boot. 1285177181Sjhb */ 1286177181Sjhb if (!priv && ie->ie_cpu != cpu && smp_started) { 1287177181Sjhb cpu = ie->ie_cpu; 1288177181Sjhb if (cpu == NOCPU) 1289177181Sjhb sched_unbind(td); 1290177181Sjhb else 1291177181Sjhb sched_bind(td, cpu); 1292177181Sjhb } 1293177181Sjhb#endif 1294170307Sjeff thread_unlock(td); 1295169320Spiso } 1296169320Spiso} 1297169320Spiso 1298169320Spiso/* 1299169320Spiso * Main loop for interrupt filter. 1300169320Spiso * 1301169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame 1302169320Spiso * parameter, and use it as the main argument for fast handler execution 1303169320Spiso * when ih_argument == NULL. 1304169320Spiso * 1305169320Spiso * Return value: 1306169320Spiso * o FILTER_STRAY: No filter recognized the event, and no 1307169320Spiso * filter-less handler is registered on this 1308169320Spiso * line. 1309169320Spiso * o FILTER_HANDLED: A filter claimed the event and served it. 1310169320Spiso * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1311169320Spiso * least one filter-less handler on this line. 1312169320Spiso * o FILTER_HANDLED | 1313169320Spiso * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1314169320Spiso * scheduling the per-handler ithread. 1315169320Spiso * 1316169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a 1317169320Spiso * pointer to a struct intr_thread containing the thread to be 1318169320Spiso * scheduled. 1319169320Spiso */ 1320169320Spiso 1321169320Spisoint 1322169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1323169320Spiso struct intr_thread **ithd) 1324169320Spiso{ 1325169320Spiso struct intr_handler *ih; 1326169320Spiso void *arg; 1327169320Spiso int ret, thread_only; 1328169320Spiso 1329169320Spiso ret = 0; 1330169320Spiso thread_only = 0; 1331169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1332169320Spiso /* 1333169320Spiso * Execute fast interrupt handlers directly. 1334169320Spiso * To support clock handlers, if a handler registers 1335169320Spiso * with a NULL argument, then we pass it a pointer to 1336169320Spiso * a trapframe as its argument. 1337169320Spiso */ 1338169320Spiso arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1339169320Spiso 1340169320Spiso CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1341169320Spiso ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1342169320Spiso 1343169320Spiso if (ih->ih_filter != NULL) 1344169320Spiso ret = ih->ih_filter(arg); 1345169320Spiso else { 1346169320Spiso thread_only = 1; 1347169320Spiso continue; 1348169320Spiso } 1349169320Spiso 1350169320Spiso if (ret & FILTER_STRAY) 1351169320Spiso continue; 1352169320Spiso else { 1353169320Spiso *ithd = ih->ih_thread; 1354169320Spiso return (ret); 1355169320Spiso } 1356169320Spiso } 1357169320Spiso 1358169320Spiso /* 1359169320Spiso * No filters handled the interrupt and we have at least 1360169320Spiso * one handler without a filter. In this case, we schedule 1361169320Spiso * all of the filter-less handlers to run in the ithread. 1362169320Spiso */ 1363169320Spiso if (thread_only) { 1364169320Spiso *ithd = ie->ie_thread; 1365169320Spiso return (FILTER_SCHEDULE_THREAD); 1366169320Spiso } 1367169320Spiso return (FILTER_STRAY); 1368169320Spiso} 1369169320Spiso 1370169320Spiso/* 1371169320Spiso * Main interrupt handling body. 1372169320Spiso * 1373169320Spiso * Input: 1374169320Spiso * o ie: the event connected to this interrupt. 1375169320Spiso * o frame: some archs (i.e. i386) pass a frame to some. 1376169320Spiso * handlers as their main argument. 1377169320Spiso * Return value: 1378169320Spiso * o 0: everything ok. 1379169320Spiso * o EINVAL: stray interrupt. 1380169320Spiso */ 1381169320Spisoint 1382169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame) 1383169320Spiso{ 1384169320Spiso struct intr_thread *ithd; 1385169320Spiso struct thread *td; 1386169320Spiso int thread; 1387169320Spiso 1388169320Spiso ithd = NULL; 1389169320Spiso td = curthread; 1390169320Spiso 1391169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1392169320Spiso return (EINVAL); 1393169320Spiso 1394169320Spiso td->td_intr_nesting_level++; 1395169320Spiso thread = 0; 1396169320Spiso critical_enter(); 1397169320Spiso thread = intr_filter_loop(ie, frame, &ithd); 1398169320Spiso 1399169320Spiso /* 1400169320Spiso * If the interrupt was fully served, send it an EOI but leave 1401169320Spiso * it unmasked. Otherwise, mask the source as well as sending 1402169320Spiso * it an EOI. 1403169320Spiso */ 1404169320Spiso if (thread & FILTER_HANDLED) { 1405169320Spiso if (ie->ie_eoi != NULL) 1406169320Spiso ie->ie_eoi(ie->ie_source); 1407169320Spiso } else { 1408169320Spiso if (ie->ie_disab != NULL) 1409169320Spiso ie->ie_disab(ie->ie_source); 1410169320Spiso } 1411169320Spiso critical_exit(); 1412169320Spiso 1413169320Spiso /* Interrupt storm logic */ 1414169320Spiso if (thread & FILTER_STRAY) { 1415169320Spiso ie->ie_count++; 1416169320Spiso if (ie->ie_count < intr_storm_threshold) 1417169320Spiso printf("Interrupt stray detection not present\n"); 1418169320Spiso } 1419169320Spiso 1420169320Spiso /* Schedule an ithread if needed. */ 1421169320Spiso if (thread & FILTER_SCHEDULE_THREAD) { 1422169320Spiso if (intr_event_schedule_thread(ie, ithd) != 0) 1423169320Spiso panic("%s: impossible stray interrupt", __func__); 1424169320Spiso } 1425169320Spiso td->td_intr_nesting_level--; 1426169320Spiso return (0); 1427169320Spiso} 1428169320Spiso#endif 1429169320Spiso 1430121482Sjhb#ifdef DDB 143172237Sjhb/* 1432121482Sjhb * Dump details about an interrupt handler 1433121482Sjhb */ 1434121482Sjhbstatic void 1435151658Sjhbdb_dump_intrhand(struct intr_handler *ih) 1436121482Sjhb{ 1437121482Sjhb int comma; 1438121482Sjhb 1439121482Sjhb db_printf("\t%-10s ", ih->ih_name); 1440121482Sjhb switch (ih->ih_pri) { 1441121482Sjhb case PI_REALTIME: 1442121482Sjhb db_printf("CLK "); 1443121482Sjhb break; 1444121482Sjhb case PI_AV: 1445121482Sjhb db_printf("AV "); 1446121482Sjhb break; 1447121482Sjhb case PI_TTYHIGH: 1448121482Sjhb case PI_TTYLOW: 1449121482Sjhb db_printf("TTY "); 1450121482Sjhb break; 1451121482Sjhb case PI_TAPE: 1452121482Sjhb db_printf("TAPE"); 1453121482Sjhb break; 1454121482Sjhb case PI_NET: 1455121482Sjhb db_printf("NET "); 1456121482Sjhb break; 1457121482Sjhb case PI_DISK: 1458121482Sjhb case PI_DISKLOW: 1459121482Sjhb db_printf("DISK"); 1460121482Sjhb break; 1461121482Sjhb case PI_DULL: 1462121482Sjhb db_printf("DULL"); 1463121482Sjhb break; 1464121482Sjhb default: 1465121482Sjhb if (ih->ih_pri >= PI_SOFT) 1466121482Sjhb db_printf("SWI "); 1467121482Sjhb else 1468121482Sjhb db_printf("%4u", ih->ih_pri); 1469121482Sjhb break; 1470121482Sjhb } 1471121482Sjhb db_printf(" "); 1472121482Sjhb db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1473121482Sjhb db_printf("(%p)", ih->ih_argument); 1474121482Sjhb if (ih->ih_need || 1475166901Spiso (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1476121482Sjhb IH_MPSAFE)) != 0) { 1477121482Sjhb db_printf(" {"); 1478121482Sjhb comma = 0; 1479121482Sjhb if (ih->ih_flags & IH_EXCLUSIVE) { 1480121482Sjhb if (comma) 1481121482Sjhb db_printf(", "); 1482121482Sjhb db_printf("EXCL"); 1483121482Sjhb comma = 1; 1484121482Sjhb } 1485121482Sjhb if (ih->ih_flags & IH_ENTROPY) { 1486121482Sjhb if (comma) 1487121482Sjhb db_printf(", "); 1488121482Sjhb db_printf("ENTROPY"); 1489121482Sjhb comma = 1; 1490121482Sjhb } 1491121482Sjhb if (ih->ih_flags & IH_DEAD) { 1492121482Sjhb if (comma) 1493121482Sjhb db_printf(", "); 1494121482Sjhb db_printf("DEAD"); 1495121482Sjhb comma = 1; 1496121482Sjhb } 1497121482Sjhb if (ih->ih_flags & IH_MPSAFE) { 1498121482Sjhb if (comma) 1499121482Sjhb db_printf(", "); 1500121482Sjhb db_printf("MPSAFE"); 1501121482Sjhb comma = 1; 1502121482Sjhb } 1503121482Sjhb if (ih->ih_need) { 1504121482Sjhb if (comma) 1505121482Sjhb db_printf(", "); 1506121482Sjhb db_printf("NEED"); 1507121482Sjhb } 1508121482Sjhb db_printf("}"); 1509121482Sjhb } 1510121482Sjhb db_printf("\n"); 1511121482Sjhb} 1512121482Sjhb 1513121482Sjhb/* 1514151658Sjhb * Dump details about a event. 1515121482Sjhb */ 1516121482Sjhbvoid 1517151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers) 1518121482Sjhb{ 1519151658Sjhb struct intr_handler *ih; 1520151658Sjhb struct intr_thread *it; 1521121482Sjhb int comma; 1522121482Sjhb 1523151658Sjhb db_printf("%s ", ie->ie_fullname); 1524151658Sjhb it = ie->ie_thread; 1525151658Sjhb if (it != NULL) 1526151658Sjhb db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1527151658Sjhb else 1528151658Sjhb db_printf("(no thread)"); 1529177181Sjhb if (ie->ie_cpu != NOCPU) 1530177181Sjhb db_printf(" (CPU %d)", ie->ie_cpu); 1531151658Sjhb if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1532151658Sjhb (it != NULL && it->it_need)) { 1533121482Sjhb db_printf(" {"); 1534121482Sjhb comma = 0; 1535151658Sjhb if (ie->ie_flags & IE_SOFT) { 1536121482Sjhb db_printf("SOFT"); 1537121482Sjhb comma = 1; 1538121482Sjhb } 1539151658Sjhb if (ie->ie_flags & IE_ENTROPY) { 1540121482Sjhb if (comma) 1541121482Sjhb db_printf(", "); 1542121482Sjhb db_printf("ENTROPY"); 1543121482Sjhb comma = 1; 1544121482Sjhb } 1545151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) { 1546121482Sjhb if (comma) 1547121482Sjhb db_printf(", "); 1548151658Sjhb db_printf("ADDING_THREAD"); 1549121482Sjhb comma = 1; 1550121482Sjhb } 1551151658Sjhb if (it != NULL && it->it_need) { 1552121482Sjhb if (comma) 1553121482Sjhb db_printf(", "); 1554121482Sjhb db_printf("NEED"); 1555121482Sjhb } 1556121482Sjhb db_printf("}"); 1557121482Sjhb } 1558121482Sjhb db_printf("\n"); 1559121482Sjhb 1560121482Sjhb if (handlers) 1561151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1562121482Sjhb db_dump_intrhand(ih); 1563121482Sjhb} 1564151658Sjhb 1565151658Sjhb/* 1566151658Sjhb * Dump data about interrupt handlers 1567151658Sjhb */ 1568151658SjhbDB_SHOW_COMMAND(intr, db_show_intr) 1569151658Sjhb{ 1570151658Sjhb struct intr_event *ie; 1571160312Sjhb int all, verbose; 1572151658Sjhb 1573151658Sjhb verbose = index(modif, 'v') != NULL; 1574151658Sjhb all = index(modif, 'a') != NULL; 1575151658Sjhb TAILQ_FOREACH(ie, &event_list, ie_list) { 1576151658Sjhb if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1577151658Sjhb continue; 1578151658Sjhb db_dump_intr_event(ie, verbose); 1579160312Sjhb if (db_pager_quit) 1580160312Sjhb break; 1581151658Sjhb } 1582151658Sjhb} 1583121482Sjhb#endif /* DDB */ 1584121482Sjhb 1585121482Sjhb/* 158667551Sjhb * Start standard software interrupt threads 158766698Sjhb */ 158867551Sjhbstatic void 158972237Sjhbstart_softintr(void *dummy) 159067551Sjhb{ 1591113613Sjhb struct proc *p; 159272237Sjhb 1593151658Sjhb if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 159472237Sjhb INTR_MPSAFE, &softclock_ih) || 1595117128Sscottl swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 159672237Sjhb panic("died while creating standard software ithreads"); 159772759Sjhb 1598151658Sjhb p = clk_intr_event->ie_thread->it_thread->td_proc; 1599113613Sjhb PROC_LOCK(p); 1600113613Sjhb p->p_flag |= P_NOLOAD; 1601113613Sjhb PROC_UNLOCK(p); 160266698Sjhb} 160372237SjhbSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 160466698Sjhb 1605151658Sjhb/* 160677582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 160777582Stmm * The data for this machine dependent, and the declarations are in machine 160877582Stmm * dependent code. The layout of intrnames and intrcnt however is machine 160977582Stmm * independent. 161077582Stmm * 161177582Stmm * We do not know the length of intrcnt and intrnames at compile time, so 161277582Stmm * calculate things at run time. 161377582Stmm */ 161477582Stmmstatic int 161577582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS) 161677582Stmm{ 1617151658Sjhb return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 161877582Stmm req)); 161977582Stmm} 162077582Stmm 162177582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 162277582Stmm NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 162377582Stmm 162477582Stmmstatic int 162577582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS) 162677582Stmm{ 1627151658Sjhb return (sysctl_handle_opaque(oidp, intrcnt, 162877582Stmm (char *)eintrcnt - (char *)intrcnt, req)); 162977582Stmm} 163077582Stmm 163177582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 163277582Stmm NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1633121482Sjhb 1634121482Sjhb#ifdef DDB 1635121482Sjhb/* 1636121482Sjhb * DDB command to dump the interrupt statistics. 1637121482Sjhb */ 1638121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1639121482Sjhb{ 1640121482Sjhb u_long *i; 1641121482Sjhb char *cp; 1642121482Sjhb 1643121482Sjhb cp = intrnames; 1644160312Sjhb for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1645121482Sjhb if (*cp == '\0') 1646121482Sjhb break; 1647121482Sjhb if (*i != 0) 1648121482Sjhb db_printf("%s\t%lu\n", cp, *i); 1649121482Sjhb cp += strlen(cp) + 1; 1650121482Sjhb } 1651121482Sjhb} 1652121482Sjhb#endif 1653