kern_intr.c revision 178272
1139804Simp/*- 226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 326156Sse * All rights reserved. 426156Sse * 526156Sse * Redistribution and use in source and binary forms, with or without 626156Sse * modification, are permitted provided that the following conditions 726156Sse * are met: 826156Sse * 1. Redistributions of source code must retain the above copyright 926156Sse * notice unmodified, this list of conditions, and the following 1026156Sse * disclaimer. 1126156Sse * 2. Redistributions in binary form must reproduce the above copyright 1226156Sse * notice, this list of conditions and the following disclaimer in the 1326156Sse * documentation and/or other materials provided with the distribution. 1426156Sse * 1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2526156Sse */ 2626156Sse 27116182Sobrien#include <sys/cdefs.h> 28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 178272 2008-04-17 04:20:10Z jeff $"); 2936887Sdfr 30121482Sjhb#include "opt_ddb.h" 31121482Sjhb 3241059Speter#include <sys/param.h> 3365822Sjhb#include <sys/bus.h> 34110860Salfred#include <sys/conf.h> 35178092Sjeff#include <sys/cpuset.h> 3665822Sjhb#include <sys/rtprio.h> 3741059Speter#include <sys/systm.h> 3866698Sjhb#include <sys/interrupt.h> 3966698Sjhb#include <sys/kernel.h> 4066698Sjhb#include <sys/kthread.h> 4166698Sjhb#include <sys/ktr.h> 42130128Sbde#include <sys/limits.h> 4374914Sjhb#include <sys/lock.h> 4426156Sse#include <sys/malloc.h> 4567365Sjhb#include <sys/mutex.h> 4666698Sjhb#include <sys/proc.h> 4772759Sjhb#include <sys/random.h> 4872237Sjhb#include <sys/resourcevar.h> 49139451Sjhb#include <sys/sched.h> 50177181Sjhb#include <sys/smp.h> 5177582Stmm#include <sys/sysctl.h> 5266698Sjhb#include <sys/unistd.h> 5366698Sjhb#include <sys/vmmeter.h> 5466698Sjhb#include <machine/atomic.h> 5566698Sjhb#include <machine/cpu.h> 5667551Sjhb#include <machine/md_var.h> 5772237Sjhb#include <machine/stdarg.h> 58121482Sjhb#ifdef DDB 59121482Sjhb#include <ddb/ddb.h> 60121482Sjhb#include <ddb/db_sym.h> 61121482Sjhb#endif 6226156Sse 63151658Sjhb/* 64151658Sjhb * Describe an interrupt thread. There is one of these per interrupt event. 65151658Sjhb */ 66151658Sjhbstruct intr_thread { 67151658Sjhb struct intr_event *it_event; 68151658Sjhb struct thread *it_thread; /* Kernel thread. */ 69151658Sjhb int it_flags; /* (j) IT_* flags. */ 70151658Sjhb int it_need; /* Needs service. */ 7172759Sjhb}; 7272759Sjhb 73151658Sjhb/* Interrupt thread flags kept in it_flags */ 74151658Sjhb#define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 75151658Sjhb 76151658Sjhbstruct intr_entropy { 77151658Sjhb struct thread *td; 78151658Sjhb uintptr_t event; 79151658Sjhb}; 80151658Sjhb 81151658Sjhbstruct intr_event *clk_intr_event; 82151658Sjhbstruct intr_event *tty_intr_event; 83128339Sbdevoid *vm_ih; 84173004Sjulianstruct proc *intrproc; 8538244Sbde 8672237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 8772237Sjhb 88168850Snjlstatic int intr_storm_threshold = 1000; 89128331SjhbTUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 90128331SjhbSYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 91128331Sjhb &intr_storm_threshold, 0, 92128339Sbde "Number of consecutive interrupts before storm protection is enabled"); 93151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list = 94151658Sjhb TAILQ_HEAD_INITIALIZER(event_list); 95178092Sjeffstatic struct mtx event_lock; 96178092SjeffMTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 97128331Sjhb 98151658Sjhbstatic void intr_event_update(struct intr_event *ie); 99169320Spiso#ifdef INTR_FILTER 100177940Sjhbstatic int intr_event_schedule_thread(struct intr_event *ie, 101177940Sjhb struct intr_thread *ithd); 102177940Sjhbstatic int intr_filter_loop(struct intr_event *ie, 103177940Sjhb struct trapframe *frame, struct intr_thread **ithd); 104169320Spisostatic struct intr_thread *ithread_create(const char *name, 105169320Spiso struct intr_handler *ih); 106169320Spiso#else 107177940Sjhbstatic int intr_event_schedule_thread(struct intr_event *ie); 108151658Sjhbstatic struct intr_thread *ithread_create(const char *name); 109169320Spiso#endif 110151658Sjhbstatic void ithread_destroy(struct intr_thread *ithread); 111169320Spisostatic void ithread_execute_handlers(struct proc *p, 112169320Spiso struct intr_event *ie); 113169320Spiso#ifdef INTR_FILTER 114169320Spisostatic void priv_ithread_execute_handler(struct proc *p, 115169320Spiso struct intr_handler *ih); 116169320Spiso#endif 117128339Sbdestatic void ithread_loop(void *); 118151658Sjhbstatic void ithread_update(struct intr_thread *ithd); 119128339Sbdestatic void start_softintr(void *); 120128339Sbde 121165124Sjhb/* Map an interrupt type to an ithread priority. */ 12272237Sjhbu_char 123151658Sjhbintr_priority(enum intr_type flags) 12465822Sjhb{ 12572237Sjhb u_char pri; 12665822Sjhb 12772237Sjhb flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 12878365Speter INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 12965822Sjhb switch (flags) { 13072237Sjhb case INTR_TYPE_TTY: 13165822Sjhb pri = PI_TTYLOW; 13265822Sjhb break; 13365822Sjhb case INTR_TYPE_BIO: 13465822Sjhb /* 13565822Sjhb * XXX We need to refine this. BSD/OS distinguishes 13665822Sjhb * between tape and disk priorities. 13765822Sjhb */ 13865822Sjhb pri = PI_DISK; 13965822Sjhb break; 14065822Sjhb case INTR_TYPE_NET: 14165822Sjhb pri = PI_NET; 14265822Sjhb break; 14365822Sjhb case INTR_TYPE_CAM: 14465822Sjhb pri = PI_DISK; /* XXX or PI_CAM? */ 14565822Sjhb break; 14678365Speter case INTR_TYPE_AV: /* Audio/video */ 14778365Speter pri = PI_AV; 14878365Speter break; 14972237Sjhb case INTR_TYPE_CLK: 15072237Sjhb pri = PI_REALTIME; 15172237Sjhb break; 15265822Sjhb case INTR_TYPE_MISC: 15365822Sjhb pri = PI_DULL; /* don't care */ 15465822Sjhb break; 15565822Sjhb default: 15672237Sjhb /* We didn't specify an interrupt level. */ 157151658Sjhb panic("intr_priority: no interrupt type in flags"); 15865822Sjhb } 15965822Sjhb 16065822Sjhb return pri; 16165822Sjhb} 16265822Sjhb 16372237Sjhb/* 164151658Sjhb * Update an ithread based on the associated intr_event. 16572237Sjhb */ 16672237Sjhbstatic void 167151658Sjhbithread_update(struct intr_thread *ithd) 16872237Sjhb{ 169151658Sjhb struct intr_event *ie; 17083366Sjulian struct thread *td; 171151658Sjhb u_char pri; 17267551Sjhb 173151658Sjhb ie = ithd->it_event; 174151658Sjhb td = ithd->it_thread; 17572237Sjhb 176151658Sjhb /* Determine the overall priority of this event. */ 177151658Sjhb if (TAILQ_EMPTY(&ie->ie_handlers)) 178151658Sjhb pri = PRI_MAX_ITHD; 179151658Sjhb else 180151658Sjhb pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 181105354Srobert 182151658Sjhb /* Update name and priority. */ 183173004Sjulian strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 184170307Sjeff thread_lock(td); 185151658Sjhb sched_prio(td, pri); 186170307Sjeff thread_unlock(td); 187151658Sjhb} 188151658Sjhb 189151658Sjhb/* 190151658Sjhb * Regenerate the full name of an interrupt event and update its priority. 191151658Sjhb */ 192151658Sjhbstatic void 193151658Sjhbintr_event_update(struct intr_event *ie) 194151658Sjhb{ 195151658Sjhb struct intr_handler *ih; 196151658Sjhb char *last; 197151658Sjhb int missed, space; 198151658Sjhb 199151658Sjhb /* Start off with no entropy and just the name of the event. */ 200151658Sjhb mtx_assert(&ie->ie_lock, MA_OWNED); 201151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 202151658Sjhb ie->ie_flags &= ~IE_ENTROPY; 203137267Sjhb missed = 0; 204151658Sjhb space = 1; 205151658Sjhb 206151658Sjhb /* Run through all the handlers updating values. */ 207151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 208151658Sjhb if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 209151658Sjhb sizeof(ie->ie_fullname)) { 210151658Sjhb strcat(ie->ie_fullname, " "); 211151658Sjhb strcat(ie->ie_fullname, ih->ih_name); 212151658Sjhb space = 0; 213137267Sjhb } else 214137267Sjhb missed++; 215137267Sjhb if (ih->ih_flags & IH_ENTROPY) 216151658Sjhb ie->ie_flags |= IE_ENTROPY; 217137267Sjhb } 218151658Sjhb 219151658Sjhb /* 220151658Sjhb * If the handler names were too long, add +'s to indicate missing 221151658Sjhb * names. If we run out of room and still have +'s to add, change 222151658Sjhb * the last character from a + to a *. 223151658Sjhb */ 224151658Sjhb last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 225137267Sjhb while (missed-- > 0) { 226151658Sjhb if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 227151658Sjhb if (*last == '+') { 228151658Sjhb *last = '*'; 229151658Sjhb break; 230151658Sjhb } else 231151658Sjhb *last = '+'; 232151658Sjhb } else if (space) { 233151658Sjhb strcat(ie->ie_fullname, " +"); 234151658Sjhb space = 0; 23572237Sjhb } else 236151658Sjhb strcat(ie->ie_fullname, "+"); 23772237Sjhb } 238151658Sjhb 239151658Sjhb /* 240151658Sjhb * If this event has an ithread, update it's priority and 241151658Sjhb * name. 242151658Sjhb */ 243151658Sjhb if (ie->ie_thread != NULL) 244151658Sjhb ithread_update(ie->ie_thread); 245151658Sjhb CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 24672237Sjhb} 24772237Sjhb 24872237Sjhbint 249178092Sjeffintr_event_create(struct intr_event **event, void *source,int flags, int irq, 250177940Sjhb void (*pre_ithread)(void *), void (*post_ithread)(void *), 251177940Sjhb void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 252177940Sjhb const char *fmt, ...) 253169320Spiso{ 254169320Spiso struct intr_event *ie; 255169320Spiso va_list ap; 25672237Sjhb 257169320Spiso /* The only valid flag during creation is IE_SOFT. */ 258169320Spiso if ((flags & ~IE_SOFT) != 0) 259169320Spiso return (EINVAL); 260169320Spiso ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 261169320Spiso ie->ie_source = source; 262177940Sjhb ie->ie_pre_ithread = pre_ithread; 263177940Sjhb ie->ie_post_ithread = post_ithread; 264177940Sjhb ie->ie_post_filter = post_filter; 265177181Sjhb ie->ie_assign_cpu = assign_cpu; 266169320Spiso ie->ie_flags = flags; 267178092Sjeff ie->ie_irq = irq; 268177181Sjhb ie->ie_cpu = NOCPU; 269169320Spiso TAILQ_INIT(&ie->ie_handlers); 270169320Spiso mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 271169320Spiso 272169320Spiso va_start(ap, fmt); 273169320Spiso vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 274169320Spiso va_end(ap); 275169320Spiso strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 276178092Sjeff mtx_lock(&event_lock); 277169320Spiso TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 278178092Sjeff mtx_unlock(&event_lock); 279169320Spiso if (event != NULL) 280169320Spiso *event = ie; 281169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 282169320Spiso return (0); 283169320Spiso} 284169320Spiso 285177181Sjhb/* 286177181Sjhb * Bind an interrupt event to the specified CPU. Note that not all 287177181Sjhb * platforms support binding an interrupt to a CPU. For those 288177181Sjhb * platforms this request will fail. For supported platforms, any 289177181Sjhb * associated ithreads as well as the primary interrupt context will 290177181Sjhb * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 291177181Sjhb * the interrupt event. 292177181Sjhb */ 293151658Sjhbint 294177181Sjhbintr_event_bind(struct intr_event *ie, u_char cpu) 295177181Sjhb{ 296178092Sjeff cpuset_t mask; 297178092Sjeff lwpid_t id; 298177181Sjhb int error; 299177181Sjhb 300177181Sjhb /* Need a CPU to bind to. */ 301177181Sjhb if (cpu != NOCPU && CPU_ABSENT(cpu)) 302177181Sjhb return (EINVAL); 303177181Sjhb 304177181Sjhb if (ie->ie_assign_cpu == NULL) 305177181Sjhb return (EOPNOTSUPP); 306178092Sjeff /* 307178092Sjeff * If we have any ithreads try to set their mask first since this 308178092Sjeff * can fail. 309178092Sjeff */ 310177181Sjhb mtx_lock(&ie->ie_lock); 311178092Sjeff if (ie->ie_thread != NULL) { 312178092Sjeff CPU_ZERO(&mask); 313178092Sjeff if (cpu == NOCPU) 314178092Sjeff CPU_COPY(cpuset_root, &mask); 315178092Sjeff else 316178092Sjeff CPU_SET(cpu, &mask); 317178092Sjeff id = ie->ie_thread->it_thread->td_tid; 318177181Sjhb mtx_unlock(&ie->ie_lock); 319178092Sjeff error = cpuset_setthread(id, &mask); 320178092Sjeff if (error) 321178092Sjeff return (error); 322178092Sjeff } else 323178092Sjeff mtx_unlock(&ie->ie_lock); 324177181Sjhb error = ie->ie_assign_cpu(ie->ie_source, cpu); 325177181Sjhb if (error) 326177181Sjhb return (error); 327177181Sjhb mtx_lock(&ie->ie_lock); 328177181Sjhb ie->ie_cpu = cpu; 329177181Sjhb mtx_unlock(&ie->ie_lock); 330178092Sjeff 331178092Sjeff return (error); 332178092Sjeff} 333178092Sjeff 334178092Sjeffstatic struct intr_event * 335178092Sjeffintr_lookup(int irq) 336178092Sjeff{ 337178092Sjeff struct intr_event *ie; 338178092Sjeff 339178092Sjeff mtx_lock(&event_lock); 340178092Sjeff TAILQ_FOREACH(ie, &event_list, ie_list) 341178092Sjeff if (ie->ie_irq == irq && 342178092Sjeff (ie->ie_flags & IE_SOFT) == 0 && 343178092Sjeff TAILQ_FIRST(&ie->ie_handlers) != NULL) 344178092Sjeff break; 345178092Sjeff mtx_unlock(&event_lock); 346178092Sjeff return (ie); 347178092Sjeff} 348178092Sjeff 349178092Sjeffint 350178092Sjeffintr_setaffinity(int irq, void *m) 351178092Sjeff{ 352178092Sjeff struct intr_event *ie; 353178092Sjeff cpuset_t *mask; 354178092Sjeff u_char cpu; 355178092Sjeff int error; 356178092Sjeff int n; 357178092Sjeff 358178092Sjeff mask = m; 359178092Sjeff error = 0; 360178092Sjeff cpu = NOCPU; 361178092Sjeff /* 362178092Sjeff * If we're setting all cpus we can unbind. Otherwise make sure 363178092Sjeff * only one cpu is in the set. 364178092Sjeff */ 365178092Sjeff if (CPU_CMP(cpuset_root, mask)) { 366178092Sjeff for (n = 0; n < CPU_SETSIZE; n++) { 367178092Sjeff if (!CPU_ISSET(n, mask)) 368178092Sjeff continue; 369178092Sjeff if (cpu != NOCPU) 370178092Sjeff return (EINVAL); 371178092Sjeff cpu = (u_char)n; 372178092Sjeff } 373178092Sjeff } 374178092Sjeff ie = intr_lookup(irq); 375178092Sjeff if (ie == NULL) 376178092Sjeff return (ESRCH); 377178092Sjeff intr_event_bind(ie, cpu); 378178092Sjeff return (error); 379178092Sjeff} 380178092Sjeff 381178092Sjeffint 382178092Sjeffintr_getaffinity(int irq, void *m) 383178092Sjeff{ 384178092Sjeff struct intr_event *ie; 385178092Sjeff cpuset_t *mask; 386178092Sjeff 387178092Sjeff mask = m; 388178092Sjeff ie = intr_lookup(irq); 389178092Sjeff if (ie == NULL) 390178092Sjeff return (ESRCH); 391178092Sjeff CPU_ZERO(mask); 392178092Sjeff mtx_lock(&ie->ie_lock); 393178092Sjeff if (ie->ie_cpu == NOCPU) 394178092Sjeff CPU_COPY(cpuset_root, mask); 395178092Sjeff else 396178092Sjeff CPU_SET(ie->ie_cpu, mask); 397178092Sjeff mtx_unlock(&ie->ie_lock); 398177181Sjhb return (0); 399177181Sjhb} 400177181Sjhb 401177181Sjhbint 402151658Sjhbintr_event_destroy(struct intr_event *ie) 403151658Sjhb{ 404151658Sjhb 405178092Sjeff mtx_lock(&event_lock); 406151658Sjhb mtx_lock(&ie->ie_lock); 407151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 408151658Sjhb mtx_unlock(&ie->ie_lock); 409178092Sjeff mtx_unlock(&event_lock); 410151658Sjhb return (EBUSY); 411151658Sjhb } 412151658Sjhb TAILQ_REMOVE(&event_list, ie, ie_list); 413157728Sjhb#ifndef notyet 414157728Sjhb if (ie->ie_thread != NULL) { 415157728Sjhb ithread_destroy(ie->ie_thread); 416157728Sjhb ie->ie_thread = NULL; 417157728Sjhb } 418157728Sjhb#endif 419151658Sjhb mtx_unlock(&ie->ie_lock); 420178092Sjeff mtx_unlock(&event_lock); 421151658Sjhb mtx_destroy(&ie->ie_lock); 422151658Sjhb free(ie, M_ITHREAD); 423151658Sjhb return (0); 424151658Sjhb} 425151658Sjhb 426169320Spiso#ifndef INTR_FILTER 427151658Sjhbstatic struct intr_thread * 428151658Sjhbithread_create(const char *name) 429151658Sjhb{ 430151658Sjhb struct intr_thread *ithd; 431151658Sjhb struct thread *td; 432151658Sjhb int error; 433151658Sjhb 434151658Sjhb ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 435151658Sjhb 436173004Sjulian error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 437173004Sjulian &td, RFSTOPPED | RFHIGHPID, 438173051Sjulian 0, "intr", "%s", name); 439151658Sjhb if (error) 440172836Sjulian panic("kproc_create() failed with %d", error); 441170307Sjeff thread_lock(td); 442164936Sjulian sched_class(td, PRI_ITHD); 443103216Sjulian TD_SET_IWAIT(td); 444170307Sjeff thread_unlock(td); 445151658Sjhb td->td_pflags |= TDP_ITHREAD; 446151658Sjhb ithd->it_thread = td; 447151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, name); 448151658Sjhb return (ithd); 44972237Sjhb} 450169320Spiso#else 451169320Spisostatic struct intr_thread * 452169320Spisoithread_create(const char *name, struct intr_handler *ih) 453169320Spiso{ 454169320Spiso struct intr_thread *ithd; 455169320Spiso struct thread *td; 456169320Spiso int error; 45772237Sjhb 458169320Spiso ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 459169320Spiso 460173153Sjulian error = kproc_kthread_add(ithread_loop, ih, &intrproc, 461173004Sjulian &td, RFSTOPPED | RFHIGHPID, 462173051Sjulian 0, "intr", "%s", name); 463169320Spiso if (error) 464172836Sjulian panic("kproc_create() failed with %d", error); 465170307Sjeff thread_lock(td); 466169320Spiso sched_class(td, PRI_ITHD); 467169320Spiso TD_SET_IWAIT(td); 468170307Sjeff thread_unlock(td); 469169320Spiso td->td_pflags |= TDP_ITHREAD; 470169320Spiso ithd->it_thread = td; 471169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, name); 472169320Spiso return (ithd); 473169320Spiso} 474169320Spiso#endif 475169320Spiso 476151658Sjhbstatic void 477151658Sjhbithread_destroy(struct intr_thread *ithread) 47872237Sjhb{ 47983366Sjulian struct thread *td; 48072237Sjhb 481157784Sscottl CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 482151658Sjhb td = ithread->it_thread; 483170307Sjeff thread_lock(td); 48476771Sjhb ithread->it_flags |= IT_DEAD; 485103216Sjulian if (TD_AWAITING_INTR(td)) { 486103216Sjulian TD_CLR_IWAIT(td); 487166188Sjeff sched_add(td, SRQ_INTR); 48872237Sjhb } 489170307Sjeff thread_unlock(td); 49072237Sjhb} 49172237Sjhb 492169320Spiso#ifndef INTR_FILTER 49372237Sjhbint 494151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name, 495166901Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 496166901Spiso enum intr_type flags, void **cookiep) 49772237Sjhb{ 498151658Sjhb struct intr_handler *ih, *temp_ih; 499151658Sjhb struct intr_thread *it; 50072237Sjhb 501166901Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 50272237Sjhb return (EINVAL); 50372237Sjhb 504151658Sjhb /* Allocate and populate an interrupt handler structure. */ 505151658Sjhb ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 506166901Spiso ih->ih_filter = filter; 50772237Sjhb ih->ih_handler = handler; 50872237Sjhb ih->ih_argument = arg; 50972237Sjhb ih->ih_name = name; 510151658Sjhb ih->ih_event = ie; 51172237Sjhb ih->ih_pri = pri; 512166901Spiso if (flags & INTR_EXCL) 51372237Sjhb ih->ih_flags = IH_EXCLUSIVE; 51472237Sjhb if (flags & INTR_MPSAFE) 51572237Sjhb ih->ih_flags |= IH_MPSAFE; 51672237Sjhb if (flags & INTR_ENTROPY) 51772237Sjhb ih->ih_flags |= IH_ENTROPY; 51872237Sjhb 519151658Sjhb /* We can only have one exclusive handler in a event. */ 520151658Sjhb mtx_lock(&ie->ie_lock); 521151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 522151658Sjhb if ((flags & INTR_EXCL) || 523151658Sjhb (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 524151658Sjhb mtx_unlock(&ie->ie_lock); 525151658Sjhb free(ih, M_ITHREAD); 526151658Sjhb return (EINVAL); 527151658Sjhb } 528122002Sjhb } 52972237Sjhb 530151658Sjhb /* Add the new handler to the event in priority order. */ 531151658Sjhb TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 532151658Sjhb if (temp_ih->ih_pri > ih->ih_pri) 533151658Sjhb break; 534151658Sjhb } 53572237Sjhb if (temp_ih == NULL) 536151658Sjhb TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 53772237Sjhb else 53872237Sjhb TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 539151658Sjhb intr_event_update(ie); 54072237Sjhb 541151658Sjhb /* Create a thread if we need one. */ 542166901Spiso while (ie->ie_thread == NULL && handler != NULL) { 543151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) 544157815Sjhb msleep(ie, &ie->ie_lock, 0, "ithread", 0); 545151658Sjhb else { 546151658Sjhb ie->ie_flags |= IE_ADDING_THREAD; 547151658Sjhb mtx_unlock(&ie->ie_lock); 548151658Sjhb it = ithread_create("intr: newborn"); 549151658Sjhb mtx_lock(&ie->ie_lock); 550151658Sjhb ie->ie_flags &= ~IE_ADDING_THREAD; 551151658Sjhb ie->ie_thread = it; 552151658Sjhb it->it_event = ie; 553151658Sjhb ithread_update(it); 554151658Sjhb wakeup(ie); 555151658Sjhb } 556151658Sjhb } 557151658Sjhb CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 558151658Sjhb ie->ie_name); 559151658Sjhb mtx_unlock(&ie->ie_lock); 560151658Sjhb 56172237Sjhb if (cookiep != NULL) 56272237Sjhb *cookiep = ih; 56372237Sjhb return (0); 56472237Sjhb} 565169320Spiso#else 566169320Spisoint 567169320Spisointr_event_add_handler(struct intr_event *ie, const char *name, 568169320Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 569169320Spiso enum intr_type flags, void **cookiep) 570169320Spiso{ 571169320Spiso struct intr_handler *ih, *temp_ih; 572169320Spiso struct intr_thread *it; 57372237Sjhb 574169320Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 575169320Spiso return (EINVAL); 576169320Spiso 577169320Spiso /* Allocate and populate an interrupt handler structure. */ 578169320Spiso ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 579169320Spiso ih->ih_filter = filter; 580169320Spiso ih->ih_handler = handler; 581169320Spiso ih->ih_argument = arg; 582169320Spiso ih->ih_name = name; 583169320Spiso ih->ih_event = ie; 584169320Spiso ih->ih_pri = pri; 585169320Spiso if (flags & INTR_EXCL) 586169320Spiso ih->ih_flags = IH_EXCLUSIVE; 587169320Spiso if (flags & INTR_MPSAFE) 588169320Spiso ih->ih_flags |= IH_MPSAFE; 589169320Spiso if (flags & INTR_ENTROPY) 590169320Spiso ih->ih_flags |= IH_ENTROPY; 591169320Spiso 592169320Spiso /* We can only have one exclusive handler in a event. */ 593169320Spiso mtx_lock(&ie->ie_lock); 594169320Spiso if (!TAILQ_EMPTY(&ie->ie_handlers)) { 595169320Spiso if ((flags & INTR_EXCL) || 596169320Spiso (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 597169320Spiso mtx_unlock(&ie->ie_lock); 598169320Spiso free(ih, M_ITHREAD); 599169320Spiso return (EINVAL); 600169320Spiso } 601169320Spiso } 602169320Spiso 603169320Spiso /* Add the new handler to the event in priority order. */ 604169320Spiso TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 605169320Spiso if (temp_ih->ih_pri > ih->ih_pri) 606169320Spiso break; 607169320Spiso } 608169320Spiso if (temp_ih == NULL) 609169320Spiso TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 610169320Spiso else 611169320Spiso TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 612169320Spiso intr_event_update(ie); 613169320Spiso 614169320Spiso /* For filtered handlers, create a private ithread to run on. */ 615169320Spiso if (filter != NULL && handler != NULL) { 616169320Spiso mtx_unlock(&ie->ie_lock); 617169320Spiso it = ithread_create("intr: newborn", ih); 618169320Spiso mtx_lock(&ie->ie_lock); 619169320Spiso it->it_event = ie; 620169320Spiso ih->ih_thread = it; 621169320Spiso ithread_update(it); // XXX - do we really need this?!?!? 622169320Spiso } else { /* Create the global per-event thread if we need one. */ 623169320Spiso while (ie->ie_thread == NULL && handler != NULL) { 624169320Spiso if (ie->ie_flags & IE_ADDING_THREAD) 625169320Spiso msleep(ie, &ie->ie_lock, 0, "ithread", 0); 626169320Spiso else { 627169320Spiso ie->ie_flags |= IE_ADDING_THREAD; 628169320Spiso mtx_unlock(&ie->ie_lock); 629169320Spiso it = ithread_create("intr: newborn", ih); 630169320Spiso mtx_lock(&ie->ie_lock); 631169320Spiso ie->ie_flags &= ~IE_ADDING_THREAD; 632169320Spiso ie->ie_thread = it; 633169320Spiso it->it_event = ie; 634169320Spiso ithread_update(it); 635169320Spiso wakeup(ie); 636169320Spiso } 637169320Spiso } 638169320Spiso } 639169320Spiso CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 640169320Spiso ie->ie_name); 641169320Spiso mtx_unlock(&ie->ie_lock); 642169320Spiso 643169320Spiso if (cookiep != NULL) 644169320Spiso *cookiep = ih; 645169320Spiso return (0); 646169320Spiso} 647169320Spiso#endif 648169320Spiso 649165125Sjhb/* 650165125Sjhb * Return the ie_source field from the intr_event an intr_handler is 651165125Sjhb * associated with. 652165125Sjhb */ 653165125Sjhbvoid * 654165125Sjhbintr_handler_source(void *cookie) 655165125Sjhb{ 656165125Sjhb struct intr_handler *ih; 657165125Sjhb struct intr_event *ie; 658165125Sjhb 659165125Sjhb ih = (struct intr_handler *)cookie; 660165125Sjhb if (ih == NULL) 661165125Sjhb return (NULL); 662165125Sjhb ie = ih->ih_event; 663165125Sjhb KASSERT(ie != NULL, 664165125Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 665165125Sjhb ih->ih_name)); 666165125Sjhb return (ie->ie_source); 667165125Sjhb} 668165125Sjhb 669169320Spiso#ifndef INTR_FILTER 67072237Sjhbint 671151658Sjhbintr_event_remove_handler(void *cookie) 67272237Sjhb{ 673151658Sjhb struct intr_handler *handler = (struct intr_handler *)cookie; 674151658Sjhb struct intr_event *ie; 67572237Sjhb#ifdef INVARIANTS 676151658Sjhb struct intr_handler *ih; 67772237Sjhb#endif 678151658Sjhb#ifdef notyet 679151658Sjhb int dead; 680151658Sjhb#endif 68172237Sjhb 68272759Sjhb if (handler == NULL) 68372237Sjhb return (EINVAL); 684151658Sjhb ie = handler->ih_event; 685151658Sjhb KASSERT(ie != NULL, 686151658Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 687165124Sjhb handler->ih_name)); 688151658Sjhb mtx_lock(&ie->ie_lock); 68987593Sobrien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 690151658Sjhb ie->ie_name); 69172237Sjhb#ifdef INVARIANTS 692151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 69372759Sjhb if (ih == handler) 69472759Sjhb goto ok; 695151658Sjhb mtx_unlock(&ie->ie_lock); 696151658Sjhb panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 697151658Sjhb ih->ih_name, ie->ie_name); 69872759Sjhbok: 69972237Sjhb#endif 70072839Sjhb /* 701151658Sjhb * If there is no ithread, then just remove the handler and return. 702151658Sjhb * XXX: Note that an INTR_FAST handler might be running on another 703151658Sjhb * CPU! 704151658Sjhb */ 705151658Sjhb if (ie->ie_thread == NULL) { 706151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 707151658Sjhb mtx_unlock(&ie->ie_lock); 708151658Sjhb free(handler, M_ITHREAD); 709151658Sjhb return (0); 710151658Sjhb } 711151658Sjhb 712151658Sjhb /* 71372839Sjhb * If the interrupt thread is already running, then just mark this 71472839Sjhb * handler as being dead and let the ithread do the actual removal. 715124505Struckman * 716124505Struckman * During a cold boot while cold is set, msleep() does not sleep, 717124505Struckman * so we have to remove the handler here rather than letting the 718124505Struckman * thread do it. 71972839Sjhb */ 720170307Sjeff thread_lock(ie->ie_thread->it_thread); 721151658Sjhb if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 72272839Sjhb handler->ih_flags |= IH_DEAD; 72372839Sjhb 72472839Sjhb /* 72572839Sjhb * Ensure that the thread will process the handler list 72672839Sjhb * again and remove this handler if it has already passed 72772839Sjhb * it on the list. 72872839Sjhb */ 729151658Sjhb ie->ie_thread->it_need = 1; 730151658Sjhb } else 731151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 732170307Sjeff thread_unlock(ie->ie_thread->it_thread); 733151658Sjhb while (handler->ih_flags & IH_DEAD) 734157815Sjhb msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 735151658Sjhb intr_event_update(ie); 736151658Sjhb#ifdef notyet 737151658Sjhb /* 738151658Sjhb * XXX: This could be bad in the case of ppbus(8). Also, I think 739151658Sjhb * this could lead to races of stale data when servicing an 740151658Sjhb * interrupt. 741151658Sjhb */ 742151658Sjhb dead = 1; 743151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 744151658Sjhb if (!(ih->ih_flags & IH_FAST)) { 745151658Sjhb dead = 0; 746151658Sjhb break; 747151658Sjhb } 748151658Sjhb } 749151658Sjhb if (dead) { 750151658Sjhb ithread_destroy(ie->ie_thread); 751151658Sjhb ie->ie_thread = NULL; 752151658Sjhb } 753151658Sjhb#endif 754151658Sjhb mtx_unlock(&ie->ie_lock); 75576771Sjhb free(handler, M_ITHREAD); 75672237Sjhb return (0); 75772237Sjhb} 75872237Sjhb 759177940Sjhbstatic int 760151658Sjhbintr_event_schedule_thread(struct intr_event *ie) 76172759Sjhb{ 762151658Sjhb struct intr_entropy entropy; 763151658Sjhb struct intr_thread *it; 76483366Sjulian struct thread *td; 765101176Sjulian struct thread *ctd; 76672759Sjhb struct proc *p; 76772759Sjhb 76872759Sjhb /* 76972759Sjhb * If no ithread or no handlers, then we have a stray interrupt. 77072759Sjhb */ 771151658Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 772151658Sjhb ie->ie_thread == NULL) 77372759Sjhb return (EINVAL); 77472759Sjhb 775101176Sjulian ctd = curthread; 776151658Sjhb it = ie->ie_thread; 777151658Sjhb td = it->it_thread; 778133191Srwatson p = td->td_proc; 779151658Sjhb 78072759Sjhb /* 78172759Sjhb * If any of the handlers for this ithread claim to be good 78272759Sjhb * sources of entropy, then gather some. 78372759Sjhb */ 784151658Sjhb if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 785133191Srwatson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 786173004Sjulian p->p_pid, td->td_name); 787151658Sjhb entropy.event = (uintptr_t)ie; 788151658Sjhb entropy.td = ctd; 78972759Sjhb random_harvest(&entropy, sizeof(entropy), 2, 0, 79072759Sjhb RANDOM_INTERRUPT); 79172759Sjhb } 79272759Sjhb 793151658Sjhb KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 79472759Sjhb 79572759Sjhb /* 79672759Sjhb * Set it_need to tell the thread to keep running if it is already 797170307Sjeff * running. Then, lock the thread and see if we actually need to 798170307Sjeff * put it on the runqueue. 79972759Sjhb */ 800151658Sjhb it->it_need = 1; 801170307Sjeff thread_lock(td); 802103216Sjulian if (TD_AWAITING_INTR(td)) { 803151658Sjhb CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 804173004Sjulian td->td_name); 805103216Sjulian TD_CLR_IWAIT(td); 806166188Sjeff sched_add(td, SRQ_INTR); 80772759Sjhb } else { 808151658Sjhb CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 809173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 81072759Sjhb } 811170307Sjeff thread_unlock(td); 81272759Sjhb 81372759Sjhb return (0); 81472759Sjhb} 815169320Spiso#else 816169320Spisoint 817169320Spisointr_event_remove_handler(void *cookie) 818169320Spiso{ 819169320Spiso struct intr_handler *handler = (struct intr_handler *)cookie; 820169320Spiso struct intr_event *ie; 821169320Spiso struct intr_thread *it; 822169320Spiso#ifdef INVARIANTS 823169320Spiso struct intr_handler *ih; 824169320Spiso#endif 825169320Spiso#ifdef notyet 826169320Spiso int dead; 827169320Spiso#endif 82872759Sjhb 829169320Spiso if (handler == NULL) 830169320Spiso return (EINVAL); 831169320Spiso ie = handler->ih_event; 832169320Spiso KASSERT(ie != NULL, 833169320Spiso ("interrupt handler \"%s\" has a NULL interrupt event", 834169320Spiso handler->ih_name)); 835169320Spiso mtx_lock(&ie->ie_lock); 836169320Spiso CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 837169320Spiso ie->ie_name); 838169320Spiso#ifdef INVARIANTS 839169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 840169320Spiso if (ih == handler) 841169320Spiso goto ok; 842169320Spiso mtx_unlock(&ie->ie_lock); 843169320Spiso panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 844169320Spiso ih->ih_name, ie->ie_name); 845169320Spisook: 846169320Spiso#endif 847169320Spiso /* 848169320Spiso * If there are no ithreads (per event and per handler), then 849169320Spiso * just remove the handler and return. 850169320Spiso * XXX: Note that an INTR_FAST handler might be running on another CPU! 851169320Spiso */ 852169320Spiso if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 853169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 854169320Spiso mtx_unlock(&ie->ie_lock); 855169320Spiso free(handler, M_ITHREAD); 856169320Spiso return (0); 857169320Spiso } 858169320Spiso 859169320Spiso /* Private or global ithread? */ 860169320Spiso it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 861169320Spiso /* 862169320Spiso * If the interrupt thread is already running, then just mark this 863169320Spiso * handler as being dead and let the ithread do the actual removal. 864169320Spiso * 865169320Spiso * During a cold boot while cold is set, msleep() does not sleep, 866169320Spiso * so we have to remove the handler here rather than letting the 867169320Spiso * thread do it. 868169320Spiso */ 869170307Sjeff thread_lock(it->it_thread); 870169320Spiso if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 871169320Spiso handler->ih_flags |= IH_DEAD; 872169320Spiso 873169320Spiso /* 874169320Spiso * Ensure that the thread will process the handler list 875169320Spiso * again and remove this handler if it has already passed 876169320Spiso * it on the list. 877169320Spiso */ 878169320Spiso it->it_need = 1; 879169320Spiso } else 880169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 881170307Sjeff thread_unlock(it->it_thread); 882169320Spiso while (handler->ih_flags & IH_DEAD) 883169320Spiso msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 884169320Spiso /* 885169320Spiso * At this point, the handler has been disconnected from the event, 886169320Spiso * so we can kill the private ithread if any. 887169320Spiso */ 888169320Spiso if (handler->ih_thread) { 889169320Spiso ithread_destroy(handler->ih_thread); 890169320Spiso handler->ih_thread = NULL; 891169320Spiso } 892169320Spiso intr_event_update(ie); 893169320Spiso#ifdef notyet 894169320Spiso /* 895169320Spiso * XXX: This could be bad in the case of ppbus(8). Also, I think 896169320Spiso * this could lead to races of stale data when servicing an 897169320Spiso * interrupt. 898169320Spiso */ 899169320Spiso dead = 1; 900169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 901169320Spiso if (handler != NULL) { 902169320Spiso dead = 0; 903169320Spiso break; 904169320Spiso } 905169320Spiso } 906169320Spiso if (dead) { 907169320Spiso ithread_destroy(ie->ie_thread); 908169320Spiso ie->ie_thread = NULL; 909169320Spiso } 910169320Spiso#endif 911169320Spiso mtx_unlock(&ie->ie_lock); 912169320Spiso free(handler, M_ITHREAD); 913169320Spiso return (0); 914169320Spiso} 915169320Spiso 916177940Sjhbstatic int 917169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 918169320Spiso{ 919169320Spiso struct intr_entropy entropy; 920169320Spiso struct thread *td; 921169320Spiso struct thread *ctd; 922169320Spiso struct proc *p; 923169320Spiso 924169320Spiso /* 925169320Spiso * If no ithread or no handlers, then we have a stray interrupt. 926169320Spiso */ 927169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 928169320Spiso return (EINVAL); 929169320Spiso 930169320Spiso ctd = curthread; 931169320Spiso td = it->it_thread; 932169320Spiso p = td->td_proc; 933169320Spiso 934169320Spiso /* 935169320Spiso * If any of the handlers for this ithread claim to be good 936169320Spiso * sources of entropy, then gather some. 937169320Spiso */ 938169320Spiso if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 939169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 940173004Sjulian p->p_pid, td->td_name); 941169320Spiso entropy.event = (uintptr_t)ie; 942169320Spiso entropy.td = ctd; 943169320Spiso random_harvest(&entropy, sizeof(entropy), 2, 0, 944169320Spiso RANDOM_INTERRUPT); 945169320Spiso } 946169320Spiso 947169320Spiso KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 948169320Spiso 949169320Spiso /* 950169320Spiso * Set it_need to tell the thread to keep running if it is already 951170307Sjeff * running. Then, lock the thread and see if we actually need to 952170307Sjeff * put it on the runqueue. 953169320Spiso */ 954169320Spiso it->it_need = 1; 955170307Sjeff thread_lock(td); 956169320Spiso if (TD_AWAITING_INTR(td)) { 957169320Spiso CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 958173122Sjulian td->td_name); 959169320Spiso TD_CLR_IWAIT(td); 960169320Spiso sched_add(td, SRQ_INTR); 961169320Spiso } else { 962169320Spiso CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 963173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 964169320Spiso } 965170307Sjeff thread_unlock(td); 966169320Spiso 967169320Spiso return (0); 968169320Spiso} 969169320Spiso#endif 970169320Spiso 971151699Sjhb/* 972151699Sjhb * Add a software interrupt handler to a specified event. If a given event 973151699Sjhb * is not specified, then a new event is created. 974151699Sjhb */ 97572759Sjhbint 976151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 97772237Sjhb void *arg, int pri, enum intr_type flags, void **cookiep) 97872237Sjhb{ 979151658Sjhb struct intr_event *ie; 98072237Sjhb int error; 98166698Sjhb 982169320Spiso if (flags & INTR_ENTROPY) 98372759Sjhb return (EINVAL); 98472759Sjhb 985151658Sjhb ie = (eventp != NULL) ? *eventp : NULL; 98666698Sjhb 987151658Sjhb if (ie != NULL) { 988151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 989151658Sjhb return (EINVAL); 99072759Sjhb } else { 991178092Sjeff error = intr_event_create(&ie, NULL, IE_SOFT, 0, 992177181Sjhb NULL, NULL, NULL, NULL, "swi%d:", pri); 99367551Sjhb if (error) 99472237Sjhb return (error); 995151658Sjhb if (eventp != NULL) 996151658Sjhb *eventp = ie; 99766698Sjhb } 998177859Sjeff error = intr_event_add_handler(ie, name, NULL, handler, arg, 999177859Sjeff (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 1000177859Sjeff if (error) 1001177859Sjeff return (error); 1002177859Sjeff if (pri == SWI_CLOCK) { 1003177859Sjeff struct proc *p; 1004177859Sjeff p = ie->ie_thread->it_thread->td_proc; 1005177859Sjeff PROC_LOCK(p); 1006177859Sjeff p->p_flag |= P_NOLOAD; 1007177859Sjeff PROC_UNLOCK(p); 1008177859Sjeff } 1009177859Sjeff return (0); 101066698Sjhb} 101166698Sjhb 101266698Sjhb/* 1013151658Sjhb * Schedule a software interrupt thread. 101466698Sjhb */ 101567551Sjhbvoid 101672237Sjhbswi_sched(void *cookie, int flags) 101766698Sjhb{ 1018151658Sjhb struct intr_handler *ih = (struct intr_handler *)cookie; 1019151658Sjhb struct intr_event *ie = ih->ih_event; 102072759Sjhb int error; 102166698Sjhb 1022151658Sjhb CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1023151658Sjhb ih->ih_need); 1024151658Sjhb 102567551Sjhb /* 102672759Sjhb * Set ih_need for this handler so that if the ithread is already 102772759Sjhb * running it will execute this handler on the next pass. Otherwise, 102872759Sjhb * it will execute it the next time it runs. 102967551Sjhb */ 103072237Sjhb atomic_store_rel_int(&ih->ih_need, 1); 1031163474Sbde 103272237Sjhb if (!(flags & SWI_DELAY)) { 1033170291Sattilio PCPU_INC(cnt.v_soft); 1034169320Spiso#ifdef INTR_FILTER 1035169320Spiso error = intr_event_schedule_thread(ie, ie->ie_thread); 1036169320Spiso#else 1037151658Sjhb error = intr_event_schedule_thread(ie); 1038169320Spiso#endif 103972759Sjhb KASSERT(error == 0, ("stray software interrupt")); 104066698Sjhb } 104166698Sjhb} 104266698Sjhb 1043151699Sjhb/* 1044151699Sjhb * Remove a software interrupt handler. Currently this code does not 1045151699Sjhb * remove the associated interrupt event if it becomes empty. Calling code 1046151699Sjhb * may do so manually via intr_event_destroy(), but that's not really 1047151699Sjhb * an optimal interface. 1048151699Sjhb */ 1049151699Sjhbint 1050151699Sjhbswi_remove(void *cookie) 1051151699Sjhb{ 1052151699Sjhb 1053151699Sjhb return (intr_event_remove_handler(cookie)); 1054151699Sjhb} 1055151699Sjhb 1056169320Spiso#ifdef INTR_FILTER 1057151658Sjhbstatic void 1058169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1059169320Spiso{ 1060169320Spiso struct intr_event *ie; 1061169320Spiso 1062169320Spiso ie = ih->ih_event; 1063169320Spiso /* 1064169320Spiso * If this handler is marked for death, remove it from 1065169320Spiso * the list of handlers and wake up the sleeper. 1066169320Spiso */ 1067169320Spiso if (ih->ih_flags & IH_DEAD) { 1068169320Spiso mtx_lock(&ie->ie_lock); 1069169320Spiso TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1070169320Spiso ih->ih_flags &= ~IH_DEAD; 1071169320Spiso wakeup(ih); 1072169320Spiso mtx_unlock(&ie->ie_lock); 1073169320Spiso return; 1074169320Spiso } 1075169320Spiso 1076169320Spiso /* Execute this handler. */ 1077169320Spiso CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1078169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1079169320Spiso ih->ih_name, ih->ih_flags); 1080169320Spiso 1081169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1082169320Spiso mtx_lock(&Giant); 1083169320Spiso ih->ih_handler(ih->ih_argument); 1084169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1085169320Spiso mtx_unlock(&Giant); 1086169320Spiso} 1087169320Spiso#endif 1088169320Spiso 1089169320Spisostatic void 1090151658Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie) 1091151658Sjhb{ 1092151658Sjhb struct intr_handler *ih, *ihn; 1093151658Sjhb 1094151658Sjhb /* Interrupt handlers should not sleep. */ 1095151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1096151658Sjhb THREAD_NO_SLEEPING(); 1097151658Sjhb TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1098151658Sjhb 1099151658Sjhb /* 1100151658Sjhb * If this handler is marked for death, remove it from 1101151658Sjhb * the list of handlers and wake up the sleeper. 1102151658Sjhb */ 1103151658Sjhb if (ih->ih_flags & IH_DEAD) { 1104151658Sjhb mtx_lock(&ie->ie_lock); 1105151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1106151658Sjhb ih->ih_flags &= ~IH_DEAD; 1107151658Sjhb wakeup(ih); 1108151658Sjhb mtx_unlock(&ie->ie_lock); 1109151658Sjhb continue; 1110151658Sjhb } 1111151658Sjhb 1112167080Spiso /* Skip filter only handlers */ 1113167080Spiso if (ih->ih_handler == NULL) 1114167080Spiso continue; 1115167080Spiso 1116151658Sjhb /* 1117151658Sjhb * For software interrupt threads, we only execute 1118151658Sjhb * handlers that have their need flag set. Hardware 1119151658Sjhb * interrupt threads always invoke all of their handlers. 1120151658Sjhb */ 1121151658Sjhb if (ie->ie_flags & IE_SOFT) { 1122151658Sjhb if (!ih->ih_need) 1123151658Sjhb continue; 1124151658Sjhb else 1125151658Sjhb atomic_store_rel_int(&ih->ih_need, 0); 1126151658Sjhb } 1127151658Sjhb 1128151658Sjhb /* Execute this handler. */ 1129151658Sjhb CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1130169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, 1131169320Spiso ih->ih_argument, ih->ih_name, ih->ih_flags); 1132151658Sjhb 1133151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1134151658Sjhb mtx_lock(&Giant); 1135151658Sjhb ih->ih_handler(ih->ih_argument); 1136151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1137151658Sjhb mtx_unlock(&Giant); 1138151658Sjhb } 1139151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1140151658Sjhb THREAD_SLEEPING_OK(); 1141151658Sjhb 1142151658Sjhb /* 1143151658Sjhb * Interrupt storm handling: 1144151658Sjhb * 1145151658Sjhb * If this interrupt source is currently storming, then throttle 1146151658Sjhb * it to only fire the handler once per clock tick. 1147151658Sjhb * 1148151658Sjhb * If this interrupt source is not currently storming, but the 1149151658Sjhb * number of back to back interrupts exceeds the storm threshold, 1150151658Sjhb * then enter storming mode. 1151151658Sjhb */ 1152167173Sjhb if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1153167173Sjhb !(ie->ie_flags & IE_SOFT)) { 1154168850Snjl /* Report the message only once every second. */ 1155168850Snjl if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1156151658Sjhb printf( 1157168850Snjl "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1158151658Sjhb ie->ie_name); 1159151658Sjhb } 1160167173Sjhb pause("istorm", 1); 1161151658Sjhb } else 1162151658Sjhb ie->ie_count++; 1163151658Sjhb 1164151658Sjhb /* 1165151658Sjhb * Now that all the handlers have had a chance to run, reenable 1166151658Sjhb * the interrupt source. 1167151658Sjhb */ 1168177940Sjhb if (ie->ie_post_ithread != NULL) 1169177940Sjhb ie->ie_post_ithread(ie->ie_source); 1170151658Sjhb} 1171151658Sjhb 1172169320Spiso#ifndef INTR_FILTER 117366698Sjhb/* 117472237Sjhb * This is the main code for interrupt threads. 117566698Sjhb */ 1176104094Sphkstatic void 117772237Sjhbithread_loop(void *arg) 117866698Sjhb{ 1179151658Sjhb struct intr_thread *ithd; 1180151658Sjhb struct intr_event *ie; 118183366Sjulian struct thread *td; 118272237Sjhb struct proc *p; 1183151658Sjhb 118483366Sjulian td = curthread; 118583366Sjulian p = td->td_proc; 1186151658Sjhb ithd = (struct intr_thread *)arg; 1187151658Sjhb KASSERT(ithd->it_thread == td, 118887593Sobrien ("%s: ithread and proc linkage out of sync", __func__)); 1189151658Sjhb ie = ithd->it_event; 1190151658Sjhb ie->ie_count = 0; 119166698Sjhb 119267551Sjhb /* 119367551Sjhb * As long as we have interrupts outstanding, go through the 119467551Sjhb * list of handlers, giving each one a go at it. 119567551Sjhb */ 119666698Sjhb for (;;) { 119772237Sjhb /* 119872237Sjhb * If we are an orphaned thread, then just die. 119972237Sjhb */ 120072237Sjhb if (ithd->it_flags & IT_DEAD) { 1201151658Sjhb CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1202173004Sjulian p->p_pid, td->td_name); 120372237Sjhb free(ithd, M_ITHREAD); 1204173044Sjulian kthread_exit(); 120572237Sjhb } 120672237Sjhb 1207151658Sjhb /* 1208151658Sjhb * Service interrupts. If another interrupt arrives while 1209151658Sjhb * we are running, it will set it_need to note that we 1210151658Sjhb * should make another pass. 1211151658Sjhb */ 121272237Sjhb while (ithd->it_need) { 121367551Sjhb /* 1214151658Sjhb * This might need a full read and write barrier 1215151658Sjhb * to make sure that this write posts before any 1216151658Sjhb * of the memory or device accesses in the 1217151658Sjhb * handlers. 121867551Sjhb */ 121972237Sjhb atomic_store_rel_int(&ithd->it_need, 0); 1220151658Sjhb ithread_execute_handlers(p, ie); 122166698Sjhb } 1222128331Sjhb WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1223128331Sjhb mtx_assert(&Giant, MA_NOTOWNED); 122467551Sjhb 122566698Sjhb /* 122666698Sjhb * Processed all our interrupts. Now get the sched 122767551Sjhb * lock. This may take a while and it_need may get 122866698Sjhb * set again, so we have to check it again. 122966698Sjhb */ 1230170307Sjeff thread_lock(td); 1231151658Sjhb if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1232128331Sjhb TD_SET_IWAIT(td); 1233151658Sjhb ie->ie_count = 0; 1234178272Sjeff mi_switch(SW_VOL | SWT_IWAIT, NULL); 123566698Sjhb } 1236170307Sjeff thread_unlock(td); 123766698Sjhb } 123866698Sjhb} 1239177940Sjhb 1240177940Sjhb/* 1241177940Sjhb * Main interrupt handling body. 1242177940Sjhb * 1243177940Sjhb * Input: 1244177940Sjhb * o ie: the event connected to this interrupt. 1245177940Sjhb * o frame: some archs (i.e. i386) pass a frame to some. 1246177940Sjhb * handlers as their main argument. 1247177940Sjhb * Return value: 1248177940Sjhb * o 0: everything ok. 1249177940Sjhb * o EINVAL: stray interrupt. 1250177940Sjhb */ 1251177940Sjhbint 1252177940Sjhbintr_event_handle(struct intr_event *ie, struct trapframe *frame) 1253177940Sjhb{ 1254177940Sjhb struct intr_handler *ih; 1255177940Sjhb struct thread *td; 1256177940Sjhb int error, ret, thread; 1257177940Sjhb 1258177940Sjhb td = curthread; 1259177940Sjhb 1260177940Sjhb /* An interrupt with no event or handlers is a stray interrupt. */ 1261177940Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1262177940Sjhb return (EINVAL); 1263177940Sjhb 1264177940Sjhb /* 1265177940Sjhb * Execute fast interrupt handlers directly. 1266177940Sjhb * To support clock handlers, if a handler registers 1267177940Sjhb * with a NULL argument, then we pass it a pointer to 1268177940Sjhb * a trapframe as its argument. 1269177940Sjhb */ 1270177940Sjhb td->td_intr_nesting_level++; 1271177940Sjhb thread = 0; 1272177940Sjhb ret = 0; 1273177940Sjhb critical_enter(); 1274177940Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1275177940Sjhb if (ih->ih_filter == NULL) { 1276177940Sjhb thread = 1; 1277177940Sjhb continue; 1278177940Sjhb } 1279177940Sjhb CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1280177940Sjhb ih->ih_filter, ih->ih_argument == NULL ? frame : 1281177940Sjhb ih->ih_argument, ih->ih_name); 1282177940Sjhb if (ih->ih_argument == NULL) 1283177940Sjhb ret = ih->ih_filter(frame); 1284177940Sjhb else 1285177940Sjhb ret = ih->ih_filter(ih->ih_argument); 1286177940Sjhb /* 1287177940Sjhb * Wrapper handler special handling: 1288177940Sjhb * 1289177940Sjhb * in some particular cases (like pccard and pccbb), 1290177940Sjhb * the _real_ device handler is wrapped in a couple of 1291177940Sjhb * functions - a filter wrapper and an ithread wrapper. 1292177940Sjhb * In this case (and just in this case), the filter wrapper 1293177940Sjhb * could ask the system to schedule the ithread and mask 1294177940Sjhb * the interrupt source if the wrapped handler is composed 1295177940Sjhb * of just an ithread handler. 1296177940Sjhb * 1297177940Sjhb * TODO: write a generic wrapper to avoid people rolling 1298177940Sjhb * their own 1299177940Sjhb */ 1300177940Sjhb if (!thread) { 1301177940Sjhb if (ret == FILTER_SCHEDULE_THREAD) 1302177940Sjhb thread = 1; 1303177940Sjhb } 1304177940Sjhb } 1305177940Sjhb 1306177940Sjhb if (thread) { 1307177940Sjhb if (ie->ie_pre_ithread != NULL) 1308177940Sjhb ie->ie_pre_ithread(ie->ie_source); 1309177940Sjhb } else { 1310177940Sjhb if (ie->ie_post_filter != NULL) 1311177940Sjhb ie->ie_post_filter(ie->ie_source); 1312177940Sjhb } 1313177940Sjhb 1314177940Sjhb /* Schedule the ithread if needed. */ 1315177940Sjhb if (thread) { 1316177940Sjhb error = intr_event_schedule_thread(ie); 1317177940Sjhb KASSERT(error == 0, ("bad stray interrupt")); 1318177940Sjhb } 1319177940Sjhb critical_exit(); 1320177940Sjhb td->td_intr_nesting_level--; 1321177940Sjhb return (0); 1322177940Sjhb} 1323169320Spiso#else 1324169320Spiso/* 1325169320Spiso * This is the main code for interrupt threads. 1326169320Spiso */ 1327169320Spisostatic void 1328169320Spisoithread_loop(void *arg) 1329169320Spiso{ 1330169320Spiso struct intr_thread *ithd; 1331169320Spiso struct intr_handler *ih; 1332169320Spiso struct intr_event *ie; 1333169320Spiso struct thread *td; 1334169320Spiso struct proc *p; 1335169320Spiso int priv; 133666698Sjhb 1337169320Spiso td = curthread; 1338169320Spiso p = td->td_proc; 1339169320Spiso ih = (struct intr_handler *)arg; 1340169320Spiso priv = (ih->ih_thread != NULL) ? 1 : 0; 1341169320Spiso ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1342169320Spiso KASSERT(ithd->it_thread == td, 1343169320Spiso ("%s: ithread and proc linkage out of sync", __func__)); 1344169320Spiso ie = ithd->it_event; 1345169320Spiso ie->ie_count = 0; 1346169320Spiso 1347169320Spiso /* 1348169320Spiso * As long as we have interrupts outstanding, go through the 1349169320Spiso * list of handlers, giving each one a go at it. 1350169320Spiso */ 1351169320Spiso for (;;) { 1352169320Spiso /* 1353169320Spiso * If we are an orphaned thread, then just die. 1354169320Spiso */ 1355169320Spiso if (ithd->it_flags & IT_DEAD) { 1356169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1357173004Sjulian p->p_pid, td->td_name); 1358169320Spiso free(ithd, M_ITHREAD); 1359173044Sjulian kthread_exit(); 1360169320Spiso } 1361169320Spiso 1362169320Spiso /* 1363169320Spiso * Service interrupts. If another interrupt arrives while 1364169320Spiso * we are running, it will set it_need to note that we 1365169320Spiso * should make another pass. 1366169320Spiso */ 1367169320Spiso while (ithd->it_need) { 1368169320Spiso /* 1369169320Spiso * This might need a full read and write barrier 1370169320Spiso * to make sure that this write posts before any 1371169320Spiso * of the memory or device accesses in the 1372169320Spiso * handlers. 1373169320Spiso */ 1374169320Spiso atomic_store_rel_int(&ithd->it_need, 0); 1375169320Spiso if (priv) 1376169320Spiso priv_ithread_execute_handler(p, ih); 1377169320Spiso else 1378169320Spiso ithread_execute_handlers(p, ie); 1379169320Spiso } 1380169320Spiso WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1381169320Spiso mtx_assert(&Giant, MA_NOTOWNED); 1382169320Spiso 1383169320Spiso /* 1384169320Spiso * Processed all our interrupts. Now get the sched 1385169320Spiso * lock. This may take a while and it_need may get 1386169320Spiso * set again, so we have to check it again. 1387169320Spiso */ 1388170307Sjeff thread_lock(td); 1389169320Spiso if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1390169320Spiso TD_SET_IWAIT(td); 1391169320Spiso ie->ie_count = 0; 1392178272Sjeff mi_switch(SW_VOL | SWT_IWAIT, NULL); 1393169320Spiso } 1394170307Sjeff thread_unlock(td); 1395169320Spiso } 1396169320Spiso} 1397169320Spiso 1398169320Spiso/* 1399169320Spiso * Main loop for interrupt filter. 1400169320Spiso * 1401169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame 1402169320Spiso * parameter, and use it as the main argument for fast handler execution 1403169320Spiso * when ih_argument == NULL. 1404169320Spiso * 1405169320Spiso * Return value: 1406169320Spiso * o FILTER_STRAY: No filter recognized the event, and no 1407169320Spiso * filter-less handler is registered on this 1408169320Spiso * line. 1409169320Spiso * o FILTER_HANDLED: A filter claimed the event and served it. 1410169320Spiso * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1411169320Spiso * least one filter-less handler on this line. 1412169320Spiso * o FILTER_HANDLED | 1413169320Spiso * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1414169320Spiso * scheduling the per-handler ithread. 1415169320Spiso * 1416169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a 1417169320Spiso * pointer to a struct intr_thread containing the thread to be 1418169320Spiso * scheduled. 1419169320Spiso */ 1420169320Spiso 1421177940Sjhbstatic int 1422169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1423169320Spiso struct intr_thread **ithd) 1424169320Spiso{ 1425169320Spiso struct intr_handler *ih; 1426169320Spiso void *arg; 1427169320Spiso int ret, thread_only; 1428169320Spiso 1429169320Spiso ret = 0; 1430169320Spiso thread_only = 0; 1431169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1432169320Spiso /* 1433169320Spiso * Execute fast interrupt handlers directly. 1434169320Spiso * To support clock handlers, if a handler registers 1435169320Spiso * with a NULL argument, then we pass it a pointer to 1436169320Spiso * a trapframe as its argument. 1437169320Spiso */ 1438169320Spiso arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1439169320Spiso 1440169320Spiso CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1441169320Spiso ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1442169320Spiso 1443169320Spiso if (ih->ih_filter != NULL) 1444169320Spiso ret = ih->ih_filter(arg); 1445169320Spiso else { 1446169320Spiso thread_only = 1; 1447169320Spiso continue; 1448169320Spiso } 1449169320Spiso 1450169320Spiso if (ret & FILTER_STRAY) 1451169320Spiso continue; 1452169320Spiso else { 1453169320Spiso *ithd = ih->ih_thread; 1454169320Spiso return (ret); 1455169320Spiso } 1456169320Spiso } 1457169320Spiso 1458169320Spiso /* 1459169320Spiso * No filters handled the interrupt and we have at least 1460169320Spiso * one handler without a filter. In this case, we schedule 1461169320Spiso * all of the filter-less handlers to run in the ithread. 1462169320Spiso */ 1463169320Spiso if (thread_only) { 1464169320Spiso *ithd = ie->ie_thread; 1465169320Spiso return (FILTER_SCHEDULE_THREAD); 1466169320Spiso } 1467169320Spiso return (FILTER_STRAY); 1468169320Spiso} 1469169320Spiso 1470169320Spiso/* 1471169320Spiso * Main interrupt handling body. 1472169320Spiso * 1473169320Spiso * Input: 1474169320Spiso * o ie: the event connected to this interrupt. 1475169320Spiso * o frame: some archs (i.e. i386) pass a frame to some. 1476169320Spiso * handlers as their main argument. 1477169320Spiso * Return value: 1478169320Spiso * o 0: everything ok. 1479169320Spiso * o EINVAL: stray interrupt. 1480169320Spiso */ 1481169320Spisoint 1482169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame) 1483169320Spiso{ 1484169320Spiso struct intr_thread *ithd; 1485169320Spiso struct thread *td; 1486169320Spiso int thread; 1487169320Spiso 1488169320Spiso ithd = NULL; 1489169320Spiso td = curthread; 1490169320Spiso 1491169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1492169320Spiso return (EINVAL); 1493169320Spiso 1494169320Spiso td->td_intr_nesting_level++; 1495169320Spiso thread = 0; 1496169320Spiso critical_enter(); 1497177940Sjhb thread = intr_filter_loop(ie, frame, &ithd); 1498169320Spiso if (thread & FILTER_HANDLED) { 1499177940Sjhb if (ie->ie_post_filter != NULL) 1500177940Sjhb ie->ie_post_filter(ie->ie_source); 1501169320Spiso } else { 1502177940Sjhb if (ie->ie_pre_ithread != NULL) 1503177940Sjhb ie->ie_pre_ithread(ie->ie_source); 1504169320Spiso } 1505169320Spiso critical_exit(); 1506169320Spiso 1507169320Spiso /* Interrupt storm logic */ 1508169320Spiso if (thread & FILTER_STRAY) { 1509169320Spiso ie->ie_count++; 1510169320Spiso if (ie->ie_count < intr_storm_threshold) 1511169320Spiso printf("Interrupt stray detection not present\n"); 1512169320Spiso } 1513169320Spiso 1514169320Spiso /* Schedule an ithread if needed. */ 1515169320Spiso if (thread & FILTER_SCHEDULE_THREAD) { 1516169320Spiso if (intr_event_schedule_thread(ie, ithd) != 0) 1517169320Spiso panic("%s: impossible stray interrupt", __func__); 1518169320Spiso } 1519169320Spiso td->td_intr_nesting_level--; 1520169320Spiso return (0); 1521169320Spiso} 1522169320Spiso#endif 1523169320Spiso 1524121482Sjhb#ifdef DDB 152572237Sjhb/* 1526121482Sjhb * Dump details about an interrupt handler 1527121482Sjhb */ 1528121482Sjhbstatic void 1529151658Sjhbdb_dump_intrhand(struct intr_handler *ih) 1530121482Sjhb{ 1531121482Sjhb int comma; 1532121482Sjhb 1533121482Sjhb db_printf("\t%-10s ", ih->ih_name); 1534121482Sjhb switch (ih->ih_pri) { 1535121482Sjhb case PI_REALTIME: 1536121482Sjhb db_printf("CLK "); 1537121482Sjhb break; 1538121482Sjhb case PI_AV: 1539121482Sjhb db_printf("AV "); 1540121482Sjhb break; 1541121482Sjhb case PI_TTYHIGH: 1542121482Sjhb case PI_TTYLOW: 1543121482Sjhb db_printf("TTY "); 1544121482Sjhb break; 1545121482Sjhb case PI_TAPE: 1546121482Sjhb db_printf("TAPE"); 1547121482Sjhb break; 1548121482Sjhb case PI_NET: 1549121482Sjhb db_printf("NET "); 1550121482Sjhb break; 1551121482Sjhb case PI_DISK: 1552121482Sjhb case PI_DISKLOW: 1553121482Sjhb db_printf("DISK"); 1554121482Sjhb break; 1555121482Sjhb case PI_DULL: 1556121482Sjhb db_printf("DULL"); 1557121482Sjhb break; 1558121482Sjhb default: 1559121482Sjhb if (ih->ih_pri >= PI_SOFT) 1560121482Sjhb db_printf("SWI "); 1561121482Sjhb else 1562121482Sjhb db_printf("%4u", ih->ih_pri); 1563121482Sjhb break; 1564121482Sjhb } 1565121482Sjhb db_printf(" "); 1566121482Sjhb db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1567121482Sjhb db_printf("(%p)", ih->ih_argument); 1568121482Sjhb if (ih->ih_need || 1569166901Spiso (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1570121482Sjhb IH_MPSAFE)) != 0) { 1571121482Sjhb db_printf(" {"); 1572121482Sjhb comma = 0; 1573121482Sjhb if (ih->ih_flags & IH_EXCLUSIVE) { 1574121482Sjhb if (comma) 1575121482Sjhb db_printf(", "); 1576121482Sjhb db_printf("EXCL"); 1577121482Sjhb comma = 1; 1578121482Sjhb } 1579121482Sjhb if (ih->ih_flags & IH_ENTROPY) { 1580121482Sjhb if (comma) 1581121482Sjhb db_printf(", "); 1582121482Sjhb db_printf("ENTROPY"); 1583121482Sjhb comma = 1; 1584121482Sjhb } 1585121482Sjhb if (ih->ih_flags & IH_DEAD) { 1586121482Sjhb if (comma) 1587121482Sjhb db_printf(", "); 1588121482Sjhb db_printf("DEAD"); 1589121482Sjhb comma = 1; 1590121482Sjhb } 1591121482Sjhb if (ih->ih_flags & IH_MPSAFE) { 1592121482Sjhb if (comma) 1593121482Sjhb db_printf(", "); 1594121482Sjhb db_printf("MPSAFE"); 1595121482Sjhb comma = 1; 1596121482Sjhb } 1597121482Sjhb if (ih->ih_need) { 1598121482Sjhb if (comma) 1599121482Sjhb db_printf(", "); 1600121482Sjhb db_printf("NEED"); 1601121482Sjhb } 1602121482Sjhb db_printf("}"); 1603121482Sjhb } 1604121482Sjhb db_printf("\n"); 1605121482Sjhb} 1606121482Sjhb 1607121482Sjhb/* 1608151658Sjhb * Dump details about a event. 1609121482Sjhb */ 1610121482Sjhbvoid 1611151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers) 1612121482Sjhb{ 1613151658Sjhb struct intr_handler *ih; 1614151658Sjhb struct intr_thread *it; 1615121482Sjhb int comma; 1616121482Sjhb 1617151658Sjhb db_printf("%s ", ie->ie_fullname); 1618151658Sjhb it = ie->ie_thread; 1619151658Sjhb if (it != NULL) 1620151658Sjhb db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1621151658Sjhb else 1622151658Sjhb db_printf("(no thread)"); 1623151658Sjhb if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1624151658Sjhb (it != NULL && it->it_need)) { 1625121482Sjhb db_printf(" {"); 1626121482Sjhb comma = 0; 1627151658Sjhb if (ie->ie_flags & IE_SOFT) { 1628121482Sjhb db_printf("SOFT"); 1629121482Sjhb comma = 1; 1630121482Sjhb } 1631151658Sjhb if (ie->ie_flags & IE_ENTROPY) { 1632121482Sjhb if (comma) 1633121482Sjhb db_printf(", "); 1634121482Sjhb db_printf("ENTROPY"); 1635121482Sjhb comma = 1; 1636121482Sjhb } 1637151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) { 1638121482Sjhb if (comma) 1639121482Sjhb db_printf(", "); 1640151658Sjhb db_printf("ADDING_THREAD"); 1641121482Sjhb comma = 1; 1642121482Sjhb } 1643151658Sjhb if (it != NULL && it->it_need) { 1644121482Sjhb if (comma) 1645121482Sjhb db_printf(", "); 1646121482Sjhb db_printf("NEED"); 1647121482Sjhb } 1648121482Sjhb db_printf("}"); 1649121482Sjhb } 1650121482Sjhb db_printf("\n"); 1651121482Sjhb 1652121482Sjhb if (handlers) 1653151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1654121482Sjhb db_dump_intrhand(ih); 1655121482Sjhb} 1656151658Sjhb 1657151658Sjhb/* 1658151658Sjhb * Dump data about interrupt handlers 1659151658Sjhb */ 1660151658SjhbDB_SHOW_COMMAND(intr, db_show_intr) 1661151658Sjhb{ 1662151658Sjhb struct intr_event *ie; 1663160312Sjhb int all, verbose; 1664151658Sjhb 1665151658Sjhb verbose = index(modif, 'v') != NULL; 1666151658Sjhb all = index(modif, 'a') != NULL; 1667151658Sjhb TAILQ_FOREACH(ie, &event_list, ie_list) { 1668151658Sjhb if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1669151658Sjhb continue; 1670151658Sjhb db_dump_intr_event(ie, verbose); 1671160312Sjhb if (db_pager_quit) 1672160312Sjhb break; 1673151658Sjhb } 1674151658Sjhb} 1675121482Sjhb#endif /* DDB */ 1676121482Sjhb 1677121482Sjhb/* 167867551Sjhb * Start standard software interrupt threads 167966698Sjhb */ 168067551Sjhbstatic void 168172237Sjhbstart_softintr(void *dummy) 168267551Sjhb{ 168372237Sjhb 1684177859Sjeff if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1685177859Sjeff panic("died while creating vm swi ithread"); 168666698Sjhb} 1687177253SrwatsonSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1688177253Srwatson NULL); 168966698Sjhb 1690151658Sjhb/* 169177582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 169277582Stmm * The data for this machine dependent, and the declarations are in machine 169377582Stmm * dependent code. The layout of intrnames and intrcnt however is machine 169477582Stmm * independent. 169577582Stmm * 169677582Stmm * We do not know the length of intrcnt and intrnames at compile time, so 169777582Stmm * calculate things at run time. 169877582Stmm */ 169977582Stmmstatic int 170077582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS) 170177582Stmm{ 1702151658Sjhb return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 170377582Stmm req)); 170477582Stmm} 170577582Stmm 170677582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 170777582Stmm NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 170877582Stmm 170977582Stmmstatic int 171077582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS) 171177582Stmm{ 1712151658Sjhb return (sysctl_handle_opaque(oidp, intrcnt, 171377582Stmm (char *)eintrcnt - (char *)intrcnt, req)); 171477582Stmm} 171577582Stmm 171677582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 171777582Stmm NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1718121482Sjhb 1719121482Sjhb#ifdef DDB 1720121482Sjhb/* 1721121482Sjhb * DDB command to dump the interrupt statistics. 1722121482Sjhb */ 1723121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1724121482Sjhb{ 1725121482Sjhb u_long *i; 1726121482Sjhb char *cp; 1727121482Sjhb 1728121482Sjhb cp = intrnames; 1729160312Sjhb for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1730121482Sjhb if (*cp == '\0') 1731121482Sjhb break; 1732121482Sjhb if (*i != 0) 1733121482Sjhb db_printf("%s\t%lu\n", cp, *i); 1734121482Sjhb cp += strlen(cp) + 1; 1735121482Sjhb } 1736121482Sjhb} 1737121482Sjhb#endif 1738