kern_intr.c revision 194987
1139804Simp/*- 226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 326156Sse * All rights reserved. 426156Sse * 526156Sse * Redistribution and use in source and binary forms, with or without 626156Sse * modification, are permitted provided that the following conditions 726156Sse * are met: 826156Sse * 1. Redistributions of source code must retain the above copyright 926156Sse * notice unmodified, this list of conditions, and the following 1026156Sse * disclaimer. 1126156Sse * 2. Redistributions in binary form must reproduce the above copyright 1226156Sse * notice, this list of conditions and the following disclaimer in the 1326156Sse * documentation and/or other materials provided with the distribution. 1426156Sse * 1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2526156Sse */ 2626156Sse 27116182Sobrien#include <sys/cdefs.h> 28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 194987 2009-06-25 18:35:19Z jhb $"); 2936887Sdfr 30121482Sjhb#include "opt_ddb.h" 31121482Sjhb 3241059Speter#include <sys/param.h> 3365822Sjhb#include <sys/bus.h> 34110860Salfred#include <sys/conf.h> 35178092Sjeff#include <sys/cpuset.h> 3665822Sjhb#include <sys/rtprio.h> 3741059Speter#include <sys/systm.h> 3866698Sjhb#include <sys/interrupt.h> 3966698Sjhb#include <sys/kernel.h> 4066698Sjhb#include <sys/kthread.h> 4166698Sjhb#include <sys/ktr.h> 42130128Sbde#include <sys/limits.h> 4374914Sjhb#include <sys/lock.h> 4426156Sse#include <sys/malloc.h> 4567365Sjhb#include <sys/mutex.h> 4666698Sjhb#include <sys/proc.h> 4772759Sjhb#include <sys/random.h> 4872237Sjhb#include <sys/resourcevar.h> 49139451Sjhb#include <sys/sched.h> 50177181Sjhb#include <sys/smp.h> 5177582Stmm#include <sys/sysctl.h> 52182024Skmacy#include <sys/syslog.h> 5366698Sjhb#include <sys/unistd.h> 5466698Sjhb#include <sys/vmmeter.h> 5566698Sjhb#include <machine/atomic.h> 5666698Sjhb#include <machine/cpu.h> 5767551Sjhb#include <machine/md_var.h> 5872237Sjhb#include <machine/stdarg.h> 59121482Sjhb#ifdef DDB 60121482Sjhb#include <ddb/ddb.h> 61121482Sjhb#include <ddb/db_sym.h> 62121482Sjhb#endif 6326156Sse 64151658Sjhb/* 65151658Sjhb * Describe an interrupt thread. There is one of these per interrupt event. 66151658Sjhb */ 67151658Sjhbstruct intr_thread { 68151658Sjhb struct intr_event *it_event; 69151658Sjhb struct thread *it_thread; /* Kernel thread. */ 70151658Sjhb int it_flags; /* (j) IT_* flags. */ 71151658Sjhb int it_need; /* Needs service. */ 7272759Sjhb}; 7372759Sjhb 74151658Sjhb/* Interrupt thread flags kept in it_flags */ 75151658Sjhb#define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 76151658Sjhb 77151658Sjhbstruct intr_entropy { 78151658Sjhb struct thread *td; 79151658Sjhb uintptr_t event; 80151658Sjhb}; 81151658Sjhb 82151658Sjhbstruct intr_event *clk_intr_event; 83151658Sjhbstruct intr_event *tty_intr_event; 84128339Sbdevoid *vm_ih; 85173004Sjulianstruct proc *intrproc; 8638244Sbde 8772237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 8872237Sjhb 89168850Snjlstatic int intr_storm_threshold = 1000; 90128331SjhbTUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 91128331SjhbSYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 92128331Sjhb &intr_storm_threshold, 0, 93128339Sbde "Number of consecutive interrupts before storm protection is enabled"); 94151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list = 95151658Sjhb TAILQ_HEAD_INITIALIZER(event_list); 96178092Sjeffstatic struct mtx event_lock; 97178092SjeffMTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 98128331Sjhb 99151658Sjhbstatic void intr_event_update(struct intr_event *ie); 100169320Spiso#ifdef INTR_FILTER 101177940Sjhbstatic int intr_event_schedule_thread(struct intr_event *ie, 102177940Sjhb struct intr_thread *ithd); 103177940Sjhbstatic int intr_filter_loop(struct intr_event *ie, 104177940Sjhb struct trapframe *frame, struct intr_thread **ithd); 105169320Spisostatic struct intr_thread *ithread_create(const char *name, 106169320Spiso struct intr_handler *ih); 107169320Spiso#else 108177940Sjhbstatic int intr_event_schedule_thread(struct intr_event *ie); 109151658Sjhbstatic struct intr_thread *ithread_create(const char *name); 110169320Spiso#endif 111151658Sjhbstatic void ithread_destroy(struct intr_thread *ithread); 112169320Spisostatic void ithread_execute_handlers(struct proc *p, 113169320Spiso struct intr_event *ie); 114169320Spiso#ifdef INTR_FILTER 115169320Spisostatic void priv_ithread_execute_handler(struct proc *p, 116169320Spiso struct intr_handler *ih); 117169320Spiso#endif 118128339Sbdestatic void ithread_loop(void *); 119151658Sjhbstatic void ithread_update(struct intr_thread *ithd); 120128339Sbdestatic void start_softintr(void *); 121128339Sbde 122165124Sjhb/* Map an interrupt type to an ithread priority. */ 12372237Sjhbu_char 124151658Sjhbintr_priority(enum intr_type flags) 12565822Sjhb{ 12672237Sjhb u_char pri; 12765822Sjhb 12872237Sjhb flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 12978365Speter INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 13065822Sjhb switch (flags) { 13172237Sjhb case INTR_TYPE_TTY: 13265822Sjhb pri = PI_TTYLOW; 13365822Sjhb break; 13465822Sjhb case INTR_TYPE_BIO: 13565822Sjhb /* 13665822Sjhb * XXX We need to refine this. BSD/OS distinguishes 13765822Sjhb * between tape and disk priorities. 13865822Sjhb */ 13965822Sjhb pri = PI_DISK; 14065822Sjhb break; 14165822Sjhb case INTR_TYPE_NET: 14265822Sjhb pri = PI_NET; 14365822Sjhb break; 14465822Sjhb case INTR_TYPE_CAM: 14565822Sjhb pri = PI_DISK; /* XXX or PI_CAM? */ 14665822Sjhb break; 14778365Speter case INTR_TYPE_AV: /* Audio/video */ 14878365Speter pri = PI_AV; 14978365Speter break; 15072237Sjhb case INTR_TYPE_CLK: 15172237Sjhb pri = PI_REALTIME; 15272237Sjhb break; 15365822Sjhb case INTR_TYPE_MISC: 15465822Sjhb pri = PI_DULL; /* don't care */ 15565822Sjhb break; 15665822Sjhb default: 15772237Sjhb /* We didn't specify an interrupt level. */ 158151658Sjhb panic("intr_priority: no interrupt type in flags"); 15965822Sjhb } 16065822Sjhb 16165822Sjhb return pri; 16265822Sjhb} 16365822Sjhb 16472237Sjhb/* 165151658Sjhb * Update an ithread based on the associated intr_event. 16672237Sjhb */ 16772237Sjhbstatic void 168151658Sjhbithread_update(struct intr_thread *ithd) 16972237Sjhb{ 170151658Sjhb struct intr_event *ie; 17183366Sjulian struct thread *td; 172151658Sjhb u_char pri; 17367551Sjhb 174151658Sjhb ie = ithd->it_event; 175151658Sjhb td = ithd->it_thread; 17672237Sjhb 177151658Sjhb /* Determine the overall priority of this event. */ 178151658Sjhb if (TAILQ_EMPTY(&ie->ie_handlers)) 179151658Sjhb pri = PRI_MAX_ITHD; 180151658Sjhb else 181151658Sjhb pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 182105354Srobert 183151658Sjhb /* Update name and priority. */ 184173004Sjulian strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 185170307Sjeff thread_lock(td); 186151658Sjhb sched_prio(td, pri); 187170307Sjeff thread_unlock(td); 188151658Sjhb} 189151658Sjhb 190151658Sjhb/* 191151658Sjhb * Regenerate the full name of an interrupt event and update its priority. 192151658Sjhb */ 193151658Sjhbstatic void 194151658Sjhbintr_event_update(struct intr_event *ie) 195151658Sjhb{ 196151658Sjhb struct intr_handler *ih; 197151658Sjhb char *last; 198151658Sjhb int missed, space; 199151658Sjhb 200151658Sjhb /* Start off with no entropy and just the name of the event. */ 201151658Sjhb mtx_assert(&ie->ie_lock, MA_OWNED); 202151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 203151658Sjhb ie->ie_flags &= ~IE_ENTROPY; 204137267Sjhb missed = 0; 205151658Sjhb space = 1; 206151658Sjhb 207151658Sjhb /* Run through all the handlers updating values. */ 208151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 209151658Sjhb if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 210151658Sjhb sizeof(ie->ie_fullname)) { 211151658Sjhb strcat(ie->ie_fullname, " "); 212151658Sjhb strcat(ie->ie_fullname, ih->ih_name); 213151658Sjhb space = 0; 214137267Sjhb } else 215137267Sjhb missed++; 216137267Sjhb if (ih->ih_flags & IH_ENTROPY) 217151658Sjhb ie->ie_flags |= IE_ENTROPY; 218137267Sjhb } 219151658Sjhb 220151658Sjhb /* 221151658Sjhb * If the handler names were too long, add +'s to indicate missing 222151658Sjhb * names. If we run out of room and still have +'s to add, change 223151658Sjhb * the last character from a + to a *. 224151658Sjhb */ 225151658Sjhb last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 226137267Sjhb while (missed-- > 0) { 227151658Sjhb if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 228151658Sjhb if (*last == '+') { 229151658Sjhb *last = '*'; 230151658Sjhb break; 231151658Sjhb } else 232151658Sjhb *last = '+'; 233151658Sjhb } else if (space) { 234151658Sjhb strcat(ie->ie_fullname, " +"); 235151658Sjhb space = 0; 23672237Sjhb } else 237151658Sjhb strcat(ie->ie_fullname, "+"); 23872237Sjhb } 239151658Sjhb 240151658Sjhb /* 241151658Sjhb * If this event has an ithread, update it's priority and 242151658Sjhb * name. 243151658Sjhb */ 244151658Sjhb if (ie->ie_thread != NULL) 245151658Sjhb ithread_update(ie->ie_thread); 246151658Sjhb CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 24772237Sjhb} 24872237Sjhb 24972237Sjhbint 250183298Sobrienintr_event_create(struct intr_event **event, void *source, int flags, int irq, 251177940Sjhb void (*pre_ithread)(void *), void (*post_ithread)(void *), 252177940Sjhb void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 253177940Sjhb const char *fmt, ...) 254169320Spiso{ 255169320Spiso struct intr_event *ie; 256169320Spiso va_list ap; 25772237Sjhb 258169320Spiso /* The only valid flag during creation is IE_SOFT. */ 259169320Spiso if ((flags & ~IE_SOFT) != 0) 260169320Spiso return (EINVAL); 261169320Spiso ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 262169320Spiso ie->ie_source = source; 263177940Sjhb ie->ie_pre_ithread = pre_ithread; 264177940Sjhb ie->ie_post_ithread = post_ithread; 265177940Sjhb ie->ie_post_filter = post_filter; 266177181Sjhb ie->ie_assign_cpu = assign_cpu; 267169320Spiso ie->ie_flags = flags; 268178092Sjeff ie->ie_irq = irq; 269177181Sjhb ie->ie_cpu = NOCPU; 270169320Spiso TAILQ_INIT(&ie->ie_handlers); 271169320Spiso mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 272169320Spiso 273169320Spiso va_start(ap, fmt); 274169320Spiso vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 275169320Spiso va_end(ap); 276169320Spiso strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 277178092Sjeff mtx_lock(&event_lock); 278169320Spiso TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 279178092Sjeff mtx_unlock(&event_lock); 280169320Spiso if (event != NULL) 281169320Spiso *event = ie; 282169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 283169320Spiso return (0); 284169320Spiso} 285169320Spiso 286177181Sjhb/* 287177181Sjhb * Bind an interrupt event to the specified CPU. Note that not all 288177181Sjhb * platforms support binding an interrupt to a CPU. For those 289177181Sjhb * platforms this request will fail. For supported platforms, any 290177181Sjhb * associated ithreads as well as the primary interrupt context will 291177181Sjhb * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 292177181Sjhb * the interrupt event. 293177181Sjhb */ 294151658Sjhbint 295177181Sjhbintr_event_bind(struct intr_event *ie, u_char cpu) 296177181Sjhb{ 297178092Sjeff cpuset_t mask; 298178092Sjeff lwpid_t id; 299177181Sjhb int error; 300177181Sjhb 301177181Sjhb /* Need a CPU to bind to. */ 302177181Sjhb if (cpu != NOCPU && CPU_ABSENT(cpu)) 303177181Sjhb return (EINVAL); 304177181Sjhb 305177181Sjhb if (ie->ie_assign_cpu == NULL) 306177181Sjhb return (EOPNOTSUPP); 307178092Sjeff /* 308178092Sjeff * If we have any ithreads try to set their mask first since this 309178092Sjeff * can fail. 310178092Sjeff */ 311177181Sjhb mtx_lock(&ie->ie_lock); 312178092Sjeff if (ie->ie_thread != NULL) { 313178092Sjeff CPU_ZERO(&mask); 314178092Sjeff if (cpu == NOCPU) 315178092Sjeff CPU_COPY(cpuset_root, &mask); 316178092Sjeff else 317178092Sjeff CPU_SET(cpu, &mask); 318178092Sjeff id = ie->ie_thread->it_thread->td_tid; 319177181Sjhb mtx_unlock(&ie->ie_lock); 320178092Sjeff error = cpuset_setthread(id, &mask); 321178092Sjeff if (error) 322178092Sjeff return (error); 323178092Sjeff } else 324178092Sjeff mtx_unlock(&ie->ie_lock); 325177181Sjhb error = ie->ie_assign_cpu(ie->ie_source, cpu); 326177181Sjhb if (error) 327177181Sjhb return (error); 328177181Sjhb mtx_lock(&ie->ie_lock); 329177181Sjhb ie->ie_cpu = cpu; 330177181Sjhb mtx_unlock(&ie->ie_lock); 331178092Sjeff 332178092Sjeff return (error); 333178092Sjeff} 334178092Sjeff 335178092Sjeffstatic struct intr_event * 336178092Sjeffintr_lookup(int irq) 337178092Sjeff{ 338178092Sjeff struct intr_event *ie; 339178092Sjeff 340178092Sjeff mtx_lock(&event_lock); 341178092Sjeff TAILQ_FOREACH(ie, &event_list, ie_list) 342178092Sjeff if (ie->ie_irq == irq && 343178092Sjeff (ie->ie_flags & IE_SOFT) == 0 && 344178092Sjeff TAILQ_FIRST(&ie->ie_handlers) != NULL) 345178092Sjeff break; 346178092Sjeff mtx_unlock(&event_lock); 347178092Sjeff return (ie); 348178092Sjeff} 349178092Sjeff 350178092Sjeffint 351178092Sjeffintr_setaffinity(int irq, void *m) 352178092Sjeff{ 353178092Sjeff struct intr_event *ie; 354178092Sjeff cpuset_t *mask; 355178092Sjeff u_char cpu; 356178092Sjeff int n; 357178092Sjeff 358178092Sjeff mask = m; 359178092Sjeff cpu = NOCPU; 360178092Sjeff /* 361178092Sjeff * If we're setting all cpus we can unbind. Otherwise make sure 362178092Sjeff * only one cpu is in the set. 363178092Sjeff */ 364178092Sjeff if (CPU_CMP(cpuset_root, mask)) { 365178092Sjeff for (n = 0; n < CPU_SETSIZE; n++) { 366178092Sjeff if (!CPU_ISSET(n, mask)) 367178092Sjeff continue; 368178092Sjeff if (cpu != NOCPU) 369178092Sjeff return (EINVAL); 370178092Sjeff cpu = (u_char)n; 371178092Sjeff } 372178092Sjeff } 373178092Sjeff ie = intr_lookup(irq); 374178092Sjeff if (ie == NULL) 375178092Sjeff return (ESRCH); 376194987Sjhb return (intr_event_bind(ie, cpu)); 377178092Sjeff} 378178092Sjeff 379178092Sjeffint 380178092Sjeffintr_getaffinity(int irq, void *m) 381178092Sjeff{ 382178092Sjeff struct intr_event *ie; 383178092Sjeff cpuset_t *mask; 384178092Sjeff 385178092Sjeff mask = m; 386178092Sjeff ie = intr_lookup(irq); 387178092Sjeff if (ie == NULL) 388178092Sjeff return (ESRCH); 389178092Sjeff CPU_ZERO(mask); 390178092Sjeff mtx_lock(&ie->ie_lock); 391178092Sjeff if (ie->ie_cpu == NOCPU) 392178092Sjeff CPU_COPY(cpuset_root, mask); 393178092Sjeff else 394178092Sjeff CPU_SET(ie->ie_cpu, mask); 395178092Sjeff mtx_unlock(&ie->ie_lock); 396177181Sjhb return (0); 397177181Sjhb} 398177181Sjhb 399177181Sjhbint 400151658Sjhbintr_event_destroy(struct intr_event *ie) 401151658Sjhb{ 402151658Sjhb 403178092Sjeff mtx_lock(&event_lock); 404151658Sjhb mtx_lock(&ie->ie_lock); 405151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 406151658Sjhb mtx_unlock(&ie->ie_lock); 407178092Sjeff mtx_unlock(&event_lock); 408151658Sjhb return (EBUSY); 409151658Sjhb } 410151658Sjhb TAILQ_REMOVE(&event_list, ie, ie_list); 411157728Sjhb#ifndef notyet 412157728Sjhb if (ie->ie_thread != NULL) { 413157728Sjhb ithread_destroy(ie->ie_thread); 414157728Sjhb ie->ie_thread = NULL; 415157728Sjhb } 416157728Sjhb#endif 417151658Sjhb mtx_unlock(&ie->ie_lock); 418178092Sjeff mtx_unlock(&event_lock); 419151658Sjhb mtx_destroy(&ie->ie_lock); 420151658Sjhb free(ie, M_ITHREAD); 421151658Sjhb return (0); 422151658Sjhb} 423151658Sjhb 424169320Spiso#ifndef INTR_FILTER 425151658Sjhbstatic struct intr_thread * 426151658Sjhbithread_create(const char *name) 427151658Sjhb{ 428151658Sjhb struct intr_thread *ithd; 429151658Sjhb struct thread *td; 430151658Sjhb int error; 431151658Sjhb 432151658Sjhb ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 433151658Sjhb 434173004Sjulian error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 435173004Sjulian &td, RFSTOPPED | RFHIGHPID, 436173051Sjulian 0, "intr", "%s", name); 437151658Sjhb if (error) 438172836Sjulian panic("kproc_create() failed with %d", error); 439170307Sjeff thread_lock(td); 440164936Sjulian sched_class(td, PRI_ITHD); 441103216Sjulian TD_SET_IWAIT(td); 442170307Sjeff thread_unlock(td); 443151658Sjhb td->td_pflags |= TDP_ITHREAD; 444151658Sjhb ithd->it_thread = td; 445151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, name); 446151658Sjhb return (ithd); 44772237Sjhb} 448169320Spiso#else 449169320Spisostatic struct intr_thread * 450169320Spisoithread_create(const char *name, struct intr_handler *ih) 451169320Spiso{ 452169320Spiso struct intr_thread *ithd; 453169320Spiso struct thread *td; 454169320Spiso int error; 45572237Sjhb 456169320Spiso ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 457169320Spiso 458173153Sjulian error = kproc_kthread_add(ithread_loop, ih, &intrproc, 459173004Sjulian &td, RFSTOPPED | RFHIGHPID, 460173051Sjulian 0, "intr", "%s", name); 461169320Spiso if (error) 462172836Sjulian panic("kproc_create() failed with %d", error); 463170307Sjeff thread_lock(td); 464169320Spiso sched_class(td, PRI_ITHD); 465169320Spiso TD_SET_IWAIT(td); 466170307Sjeff thread_unlock(td); 467169320Spiso td->td_pflags |= TDP_ITHREAD; 468169320Spiso ithd->it_thread = td; 469169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, name); 470169320Spiso return (ithd); 471169320Spiso} 472169320Spiso#endif 473169320Spiso 474151658Sjhbstatic void 475151658Sjhbithread_destroy(struct intr_thread *ithread) 47672237Sjhb{ 47783366Sjulian struct thread *td; 47872237Sjhb 479157784Sscottl CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 480151658Sjhb td = ithread->it_thread; 481170307Sjeff thread_lock(td); 48276771Sjhb ithread->it_flags |= IT_DEAD; 483103216Sjulian if (TD_AWAITING_INTR(td)) { 484103216Sjulian TD_CLR_IWAIT(td); 485166188Sjeff sched_add(td, SRQ_INTR); 48672237Sjhb } 487170307Sjeff thread_unlock(td); 48872237Sjhb} 48972237Sjhb 490169320Spiso#ifndef INTR_FILTER 49172237Sjhbint 492151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name, 493166901Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 494166901Spiso enum intr_type flags, void **cookiep) 49572237Sjhb{ 496151658Sjhb struct intr_handler *ih, *temp_ih; 497151658Sjhb struct intr_thread *it; 49872237Sjhb 499166901Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 50072237Sjhb return (EINVAL); 50172237Sjhb 502151658Sjhb /* Allocate and populate an interrupt handler structure. */ 503151658Sjhb ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 504166901Spiso ih->ih_filter = filter; 50572237Sjhb ih->ih_handler = handler; 50672237Sjhb ih->ih_argument = arg; 50772237Sjhb ih->ih_name = name; 508151658Sjhb ih->ih_event = ie; 50972237Sjhb ih->ih_pri = pri; 510166901Spiso if (flags & INTR_EXCL) 51172237Sjhb ih->ih_flags = IH_EXCLUSIVE; 51272237Sjhb if (flags & INTR_MPSAFE) 51372237Sjhb ih->ih_flags |= IH_MPSAFE; 51472237Sjhb if (flags & INTR_ENTROPY) 51572237Sjhb ih->ih_flags |= IH_ENTROPY; 51672237Sjhb 517151658Sjhb /* We can only have one exclusive handler in a event. */ 518151658Sjhb mtx_lock(&ie->ie_lock); 519151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 520151658Sjhb if ((flags & INTR_EXCL) || 521151658Sjhb (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 522151658Sjhb mtx_unlock(&ie->ie_lock); 523151658Sjhb free(ih, M_ITHREAD); 524151658Sjhb return (EINVAL); 525151658Sjhb } 526122002Sjhb } 52772237Sjhb 528151658Sjhb /* Add the new handler to the event in priority order. */ 529151658Sjhb TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 530151658Sjhb if (temp_ih->ih_pri > ih->ih_pri) 531151658Sjhb break; 532151658Sjhb } 53372237Sjhb if (temp_ih == NULL) 534151658Sjhb TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 53572237Sjhb else 53672237Sjhb TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 537151658Sjhb intr_event_update(ie); 53872237Sjhb 539151658Sjhb /* Create a thread if we need one. */ 540166901Spiso while (ie->ie_thread == NULL && handler != NULL) { 541151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) 542157815Sjhb msleep(ie, &ie->ie_lock, 0, "ithread", 0); 543151658Sjhb else { 544151658Sjhb ie->ie_flags |= IE_ADDING_THREAD; 545151658Sjhb mtx_unlock(&ie->ie_lock); 546151658Sjhb it = ithread_create("intr: newborn"); 547151658Sjhb mtx_lock(&ie->ie_lock); 548151658Sjhb ie->ie_flags &= ~IE_ADDING_THREAD; 549151658Sjhb ie->ie_thread = it; 550151658Sjhb it->it_event = ie; 551151658Sjhb ithread_update(it); 552151658Sjhb wakeup(ie); 553151658Sjhb } 554151658Sjhb } 555151658Sjhb CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 556151658Sjhb ie->ie_name); 557151658Sjhb mtx_unlock(&ie->ie_lock); 558151658Sjhb 55972237Sjhb if (cookiep != NULL) 56072237Sjhb *cookiep = ih; 56172237Sjhb return (0); 56272237Sjhb} 563169320Spiso#else 564169320Spisoint 565169320Spisointr_event_add_handler(struct intr_event *ie, const char *name, 566169320Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 567169320Spiso enum intr_type flags, void **cookiep) 568169320Spiso{ 569169320Spiso struct intr_handler *ih, *temp_ih; 570169320Spiso struct intr_thread *it; 57172237Sjhb 572169320Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 573169320Spiso return (EINVAL); 574169320Spiso 575169320Spiso /* Allocate and populate an interrupt handler structure. */ 576169320Spiso ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 577169320Spiso ih->ih_filter = filter; 578169320Spiso ih->ih_handler = handler; 579169320Spiso ih->ih_argument = arg; 580169320Spiso ih->ih_name = name; 581169320Spiso ih->ih_event = ie; 582169320Spiso ih->ih_pri = pri; 583169320Spiso if (flags & INTR_EXCL) 584169320Spiso ih->ih_flags = IH_EXCLUSIVE; 585169320Spiso if (flags & INTR_MPSAFE) 586169320Spiso ih->ih_flags |= IH_MPSAFE; 587169320Spiso if (flags & INTR_ENTROPY) 588169320Spiso ih->ih_flags |= IH_ENTROPY; 589169320Spiso 590169320Spiso /* We can only have one exclusive handler in a event. */ 591169320Spiso mtx_lock(&ie->ie_lock); 592169320Spiso if (!TAILQ_EMPTY(&ie->ie_handlers)) { 593169320Spiso if ((flags & INTR_EXCL) || 594169320Spiso (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 595169320Spiso mtx_unlock(&ie->ie_lock); 596169320Spiso free(ih, M_ITHREAD); 597169320Spiso return (EINVAL); 598169320Spiso } 599169320Spiso } 600169320Spiso 601169320Spiso /* Add the new handler to the event in priority order. */ 602169320Spiso TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 603169320Spiso if (temp_ih->ih_pri > ih->ih_pri) 604169320Spiso break; 605169320Spiso } 606169320Spiso if (temp_ih == NULL) 607169320Spiso TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 608169320Spiso else 609169320Spiso TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 610169320Spiso intr_event_update(ie); 611169320Spiso 612169320Spiso /* For filtered handlers, create a private ithread to run on. */ 613169320Spiso if (filter != NULL && handler != NULL) { 614169320Spiso mtx_unlock(&ie->ie_lock); 615169320Spiso it = ithread_create("intr: newborn", ih); 616169320Spiso mtx_lock(&ie->ie_lock); 617169320Spiso it->it_event = ie; 618169320Spiso ih->ih_thread = it; 619169320Spiso ithread_update(it); // XXX - do we really need this?!?!? 620169320Spiso } else { /* Create the global per-event thread if we need one. */ 621169320Spiso while (ie->ie_thread == NULL && handler != NULL) { 622169320Spiso if (ie->ie_flags & IE_ADDING_THREAD) 623169320Spiso msleep(ie, &ie->ie_lock, 0, "ithread", 0); 624169320Spiso else { 625169320Spiso ie->ie_flags |= IE_ADDING_THREAD; 626169320Spiso mtx_unlock(&ie->ie_lock); 627169320Spiso it = ithread_create("intr: newborn", ih); 628169320Spiso mtx_lock(&ie->ie_lock); 629169320Spiso ie->ie_flags &= ~IE_ADDING_THREAD; 630169320Spiso ie->ie_thread = it; 631169320Spiso it->it_event = ie; 632169320Spiso ithread_update(it); 633169320Spiso wakeup(ie); 634169320Spiso } 635169320Spiso } 636169320Spiso } 637169320Spiso CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 638169320Spiso ie->ie_name); 639169320Spiso mtx_unlock(&ie->ie_lock); 640169320Spiso 641169320Spiso if (cookiep != NULL) 642169320Spiso *cookiep = ih; 643169320Spiso return (0); 644169320Spiso} 645169320Spiso#endif 646169320Spiso 647165125Sjhb/* 648165125Sjhb * Return the ie_source field from the intr_event an intr_handler is 649165125Sjhb * associated with. 650165125Sjhb */ 651165125Sjhbvoid * 652165125Sjhbintr_handler_source(void *cookie) 653165125Sjhb{ 654165125Sjhb struct intr_handler *ih; 655165125Sjhb struct intr_event *ie; 656165125Sjhb 657165125Sjhb ih = (struct intr_handler *)cookie; 658165125Sjhb if (ih == NULL) 659165125Sjhb return (NULL); 660165125Sjhb ie = ih->ih_event; 661165125Sjhb KASSERT(ie != NULL, 662165125Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 663165125Sjhb ih->ih_name)); 664165125Sjhb return (ie->ie_source); 665165125Sjhb} 666165125Sjhb 667169320Spiso#ifndef INTR_FILTER 66872237Sjhbint 669151658Sjhbintr_event_remove_handler(void *cookie) 67072237Sjhb{ 671151658Sjhb struct intr_handler *handler = (struct intr_handler *)cookie; 672151658Sjhb struct intr_event *ie; 67372237Sjhb#ifdef INVARIANTS 674151658Sjhb struct intr_handler *ih; 67572237Sjhb#endif 676151658Sjhb#ifdef notyet 677151658Sjhb int dead; 678151658Sjhb#endif 67972237Sjhb 68072759Sjhb if (handler == NULL) 68172237Sjhb return (EINVAL); 682151658Sjhb ie = handler->ih_event; 683151658Sjhb KASSERT(ie != NULL, 684151658Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 685165124Sjhb handler->ih_name)); 686151658Sjhb mtx_lock(&ie->ie_lock); 68787593Sobrien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 688151658Sjhb ie->ie_name); 68972237Sjhb#ifdef INVARIANTS 690151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 69172759Sjhb if (ih == handler) 69272759Sjhb goto ok; 693151658Sjhb mtx_unlock(&ie->ie_lock); 694151658Sjhb panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 695151658Sjhb ih->ih_name, ie->ie_name); 69672759Sjhbok: 69772237Sjhb#endif 69872839Sjhb /* 699151658Sjhb * If there is no ithread, then just remove the handler and return. 700151658Sjhb * XXX: Note that an INTR_FAST handler might be running on another 701151658Sjhb * CPU! 702151658Sjhb */ 703151658Sjhb if (ie->ie_thread == NULL) { 704151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 705151658Sjhb mtx_unlock(&ie->ie_lock); 706151658Sjhb free(handler, M_ITHREAD); 707151658Sjhb return (0); 708151658Sjhb } 709151658Sjhb 710151658Sjhb /* 71172839Sjhb * If the interrupt thread is already running, then just mark this 71272839Sjhb * handler as being dead and let the ithread do the actual removal. 713124505Struckman * 714124505Struckman * During a cold boot while cold is set, msleep() does not sleep, 715124505Struckman * so we have to remove the handler here rather than letting the 716124505Struckman * thread do it. 71772839Sjhb */ 718170307Sjeff thread_lock(ie->ie_thread->it_thread); 719151658Sjhb if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 72072839Sjhb handler->ih_flags |= IH_DEAD; 72172839Sjhb 72272839Sjhb /* 72372839Sjhb * Ensure that the thread will process the handler list 72472839Sjhb * again and remove this handler if it has already passed 72572839Sjhb * it on the list. 72672839Sjhb */ 727151658Sjhb ie->ie_thread->it_need = 1; 728151658Sjhb } else 729151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 730170307Sjeff thread_unlock(ie->ie_thread->it_thread); 731151658Sjhb while (handler->ih_flags & IH_DEAD) 732157815Sjhb msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 733151658Sjhb intr_event_update(ie); 734151658Sjhb#ifdef notyet 735151658Sjhb /* 736151658Sjhb * XXX: This could be bad in the case of ppbus(8). Also, I think 737151658Sjhb * this could lead to races of stale data when servicing an 738151658Sjhb * interrupt. 739151658Sjhb */ 740151658Sjhb dead = 1; 741151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 742151658Sjhb if (!(ih->ih_flags & IH_FAST)) { 743151658Sjhb dead = 0; 744151658Sjhb break; 745151658Sjhb } 746151658Sjhb } 747151658Sjhb if (dead) { 748151658Sjhb ithread_destroy(ie->ie_thread); 749151658Sjhb ie->ie_thread = NULL; 750151658Sjhb } 751151658Sjhb#endif 752151658Sjhb mtx_unlock(&ie->ie_lock); 75376771Sjhb free(handler, M_ITHREAD); 75472237Sjhb return (0); 75572237Sjhb} 75672237Sjhb 757177940Sjhbstatic int 758151658Sjhbintr_event_schedule_thread(struct intr_event *ie) 75972759Sjhb{ 760151658Sjhb struct intr_entropy entropy; 761151658Sjhb struct intr_thread *it; 76283366Sjulian struct thread *td; 763101176Sjulian struct thread *ctd; 76472759Sjhb struct proc *p; 76572759Sjhb 76672759Sjhb /* 76772759Sjhb * If no ithread or no handlers, then we have a stray interrupt. 76872759Sjhb */ 769151658Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 770151658Sjhb ie->ie_thread == NULL) 77172759Sjhb return (EINVAL); 77272759Sjhb 773101176Sjulian ctd = curthread; 774151658Sjhb it = ie->ie_thread; 775151658Sjhb td = it->it_thread; 776133191Srwatson p = td->td_proc; 777151658Sjhb 77872759Sjhb /* 77972759Sjhb * If any of the handlers for this ithread claim to be good 78072759Sjhb * sources of entropy, then gather some. 78172759Sjhb */ 782151658Sjhb if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 783133191Srwatson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 784173004Sjulian p->p_pid, td->td_name); 785151658Sjhb entropy.event = (uintptr_t)ie; 786151658Sjhb entropy.td = ctd; 78772759Sjhb random_harvest(&entropy, sizeof(entropy), 2, 0, 78872759Sjhb RANDOM_INTERRUPT); 78972759Sjhb } 79072759Sjhb 791151658Sjhb KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 79272759Sjhb 79372759Sjhb /* 79472759Sjhb * Set it_need to tell the thread to keep running if it is already 795170307Sjeff * running. Then, lock the thread and see if we actually need to 796170307Sjeff * put it on the runqueue. 79772759Sjhb */ 798151658Sjhb it->it_need = 1; 799170307Sjeff thread_lock(td); 800103216Sjulian if (TD_AWAITING_INTR(td)) { 801151658Sjhb CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 802173004Sjulian td->td_name); 803103216Sjulian TD_CLR_IWAIT(td); 804166188Sjeff sched_add(td, SRQ_INTR); 80572759Sjhb } else { 806151658Sjhb CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 807173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 80872759Sjhb } 809170307Sjeff thread_unlock(td); 81072759Sjhb 81172759Sjhb return (0); 81272759Sjhb} 813169320Spiso#else 814169320Spisoint 815169320Spisointr_event_remove_handler(void *cookie) 816169320Spiso{ 817169320Spiso struct intr_handler *handler = (struct intr_handler *)cookie; 818169320Spiso struct intr_event *ie; 819169320Spiso struct intr_thread *it; 820169320Spiso#ifdef INVARIANTS 821169320Spiso struct intr_handler *ih; 822169320Spiso#endif 823169320Spiso#ifdef notyet 824169320Spiso int dead; 825169320Spiso#endif 82672759Sjhb 827169320Spiso if (handler == NULL) 828169320Spiso return (EINVAL); 829169320Spiso ie = handler->ih_event; 830169320Spiso KASSERT(ie != NULL, 831169320Spiso ("interrupt handler \"%s\" has a NULL interrupt event", 832169320Spiso handler->ih_name)); 833169320Spiso mtx_lock(&ie->ie_lock); 834169320Spiso CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 835169320Spiso ie->ie_name); 836169320Spiso#ifdef INVARIANTS 837169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 838169320Spiso if (ih == handler) 839169320Spiso goto ok; 840169320Spiso mtx_unlock(&ie->ie_lock); 841169320Spiso panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 842169320Spiso ih->ih_name, ie->ie_name); 843169320Spisook: 844169320Spiso#endif 845169320Spiso /* 846169320Spiso * If there are no ithreads (per event and per handler), then 847169320Spiso * just remove the handler and return. 848169320Spiso * XXX: Note that an INTR_FAST handler might be running on another CPU! 849169320Spiso */ 850169320Spiso if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 851169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 852169320Spiso mtx_unlock(&ie->ie_lock); 853169320Spiso free(handler, M_ITHREAD); 854169320Spiso return (0); 855169320Spiso } 856169320Spiso 857169320Spiso /* Private or global ithread? */ 858169320Spiso it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 859169320Spiso /* 860169320Spiso * If the interrupt thread is already running, then just mark this 861169320Spiso * handler as being dead and let the ithread do the actual removal. 862169320Spiso * 863169320Spiso * During a cold boot while cold is set, msleep() does not sleep, 864169320Spiso * so we have to remove the handler here rather than letting the 865169320Spiso * thread do it. 866169320Spiso */ 867170307Sjeff thread_lock(it->it_thread); 868169320Spiso if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 869169320Spiso handler->ih_flags |= IH_DEAD; 870169320Spiso 871169320Spiso /* 872169320Spiso * Ensure that the thread will process the handler list 873169320Spiso * again and remove this handler if it has already passed 874169320Spiso * it on the list. 875169320Spiso */ 876169320Spiso it->it_need = 1; 877169320Spiso } else 878169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 879170307Sjeff thread_unlock(it->it_thread); 880169320Spiso while (handler->ih_flags & IH_DEAD) 881169320Spiso msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 882169320Spiso /* 883169320Spiso * At this point, the handler has been disconnected from the event, 884169320Spiso * so we can kill the private ithread if any. 885169320Spiso */ 886169320Spiso if (handler->ih_thread) { 887169320Spiso ithread_destroy(handler->ih_thread); 888169320Spiso handler->ih_thread = NULL; 889169320Spiso } 890169320Spiso intr_event_update(ie); 891169320Spiso#ifdef notyet 892169320Spiso /* 893169320Spiso * XXX: This could be bad in the case of ppbus(8). Also, I think 894169320Spiso * this could lead to races of stale data when servicing an 895169320Spiso * interrupt. 896169320Spiso */ 897169320Spiso dead = 1; 898169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 899169320Spiso if (handler != NULL) { 900169320Spiso dead = 0; 901169320Spiso break; 902169320Spiso } 903169320Spiso } 904169320Spiso if (dead) { 905169320Spiso ithread_destroy(ie->ie_thread); 906169320Spiso ie->ie_thread = NULL; 907169320Spiso } 908169320Spiso#endif 909169320Spiso mtx_unlock(&ie->ie_lock); 910169320Spiso free(handler, M_ITHREAD); 911169320Spiso return (0); 912169320Spiso} 913169320Spiso 914177940Sjhbstatic int 915169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 916169320Spiso{ 917169320Spiso struct intr_entropy entropy; 918169320Spiso struct thread *td; 919169320Spiso struct thread *ctd; 920169320Spiso struct proc *p; 921169320Spiso 922169320Spiso /* 923169320Spiso * If no ithread or no handlers, then we have a stray interrupt. 924169320Spiso */ 925169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 926169320Spiso return (EINVAL); 927169320Spiso 928169320Spiso ctd = curthread; 929169320Spiso td = it->it_thread; 930169320Spiso p = td->td_proc; 931169320Spiso 932169320Spiso /* 933169320Spiso * If any of the handlers for this ithread claim to be good 934169320Spiso * sources of entropy, then gather some. 935169320Spiso */ 936169320Spiso if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 937169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 938173004Sjulian p->p_pid, td->td_name); 939169320Spiso entropy.event = (uintptr_t)ie; 940169320Spiso entropy.td = ctd; 941169320Spiso random_harvest(&entropy, sizeof(entropy), 2, 0, 942169320Spiso RANDOM_INTERRUPT); 943169320Spiso } 944169320Spiso 945169320Spiso KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 946169320Spiso 947169320Spiso /* 948169320Spiso * Set it_need to tell the thread to keep running if it is already 949170307Sjeff * running. Then, lock the thread and see if we actually need to 950170307Sjeff * put it on the runqueue. 951169320Spiso */ 952169320Spiso it->it_need = 1; 953170307Sjeff thread_lock(td); 954169320Spiso if (TD_AWAITING_INTR(td)) { 955169320Spiso CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 956173122Sjulian td->td_name); 957169320Spiso TD_CLR_IWAIT(td); 958169320Spiso sched_add(td, SRQ_INTR); 959169320Spiso } else { 960169320Spiso CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 961173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 962169320Spiso } 963170307Sjeff thread_unlock(td); 964169320Spiso 965169320Spiso return (0); 966169320Spiso} 967169320Spiso#endif 968169320Spiso 969151699Sjhb/* 970192305Srwatson * Allow interrupt event binding for software interrupt handlers -- a no-op, 971192305Srwatson * since interrupts are generated in software rather than being directed by 972192305Srwatson * a PIC. 973192305Srwatson */ 974192305Srwatsonstatic int 975192305Srwatsonswi_assign_cpu(void *arg, u_char cpu) 976192305Srwatson{ 977192305Srwatson 978192305Srwatson return (0); 979192305Srwatson} 980192305Srwatson 981192305Srwatson/* 982151699Sjhb * Add a software interrupt handler to a specified event. If a given event 983151699Sjhb * is not specified, then a new event is created. 984151699Sjhb */ 98572759Sjhbint 986151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 98772237Sjhb void *arg, int pri, enum intr_type flags, void **cookiep) 98872237Sjhb{ 989151658Sjhb struct intr_event *ie; 99072237Sjhb int error; 99166698Sjhb 992169320Spiso if (flags & INTR_ENTROPY) 99372759Sjhb return (EINVAL); 99472759Sjhb 995151658Sjhb ie = (eventp != NULL) ? *eventp : NULL; 99666698Sjhb 997151658Sjhb if (ie != NULL) { 998151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 999151658Sjhb return (EINVAL); 100072759Sjhb } else { 1001178092Sjeff error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1002192305Srwatson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 100367551Sjhb if (error) 100472237Sjhb return (error); 1005151658Sjhb if (eventp != NULL) 1006151658Sjhb *eventp = ie; 100766698Sjhb } 1008177859Sjeff error = intr_event_add_handler(ie, name, NULL, handler, arg, 1009177859Sjeff (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 1010177859Sjeff if (error) 1011177859Sjeff return (error); 1012177859Sjeff if (pri == SWI_CLOCK) { 1013177859Sjeff struct proc *p; 1014177859Sjeff p = ie->ie_thread->it_thread->td_proc; 1015177859Sjeff PROC_LOCK(p); 1016177859Sjeff p->p_flag |= P_NOLOAD; 1017177859Sjeff PROC_UNLOCK(p); 1018177859Sjeff } 1019177859Sjeff return (0); 102066698Sjhb} 102166698Sjhb 102266698Sjhb/* 1023151658Sjhb * Schedule a software interrupt thread. 102466698Sjhb */ 102567551Sjhbvoid 102672237Sjhbswi_sched(void *cookie, int flags) 102766698Sjhb{ 1028151658Sjhb struct intr_handler *ih = (struct intr_handler *)cookie; 1029151658Sjhb struct intr_event *ie = ih->ih_event; 103072759Sjhb int error; 103166698Sjhb 1032151658Sjhb CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1033151658Sjhb ih->ih_need); 1034151658Sjhb 103567551Sjhb /* 103672759Sjhb * Set ih_need for this handler so that if the ithread is already 103772759Sjhb * running it will execute this handler on the next pass. Otherwise, 103872759Sjhb * it will execute it the next time it runs. 103967551Sjhb */ 104072237Sjhb atomic_store_rel_int(&ih->ih_need, 1); 1041163474Sbde 104272237Sjhb if (!(flags & SWI_DELAY)) { 1043170291Sattilio PCPU_INC(cnt.v_soft); 1044169320Spiso#ifdef INTR_FILTER 1045169320Spiso error = intr_event_schedule_thread(ie, ie->ie_thread); 1046169320Spiso#else 1047151658Sjhb error = intr_event_schedule_thread(ie); 1048169320Spiso#endif 104972759Sjhb KASSERT(error == 0, ("stray software interrupt")); 105066698Sjhb } 105166698Sjhb} 105266698Sjhb 1053151699Sjhb/* 1054151699Sjhb * Remove a software interrupt handler. Currently this code does not 1055151699Sjhb * remove the associated interrupt event if it becomes empty. Calling code 1056151699Sjhb * may do so manually via intr_event_destroy(), but that's not really 1057151699Sjhb * an optimal interface. 1058151699Sjhb */ 1059151699Sjhbint 1060151699Sjhbswi_remove(void *cookie) 1061151699Sjhb{ 1062151699Sjhb 1063151699Sjhb return (intr_event_remove_handler(cookie)); 1064151699Sjhb} 1065151699Sjhb 1066169320Spiso#ifdef INTR_FILTER 1067151658Sjhbstatic void 1068169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1069169320Spiso{ 1070169320Spiso struct intr_event *ie; 1071169320Spiso 1072169320Spiso ie = ih->ih_event; 1073169320Spiso /* 1074169320Spiso * If this handler is marked for death, remove it from 1075169320Spiso * the list of handlers and wake up the sleeper. 1076169320Spiso */ 1077169320Spiso if (ih->ih_flags & IH_DEAD) { 1078169320Spiso mtx_lock(&ie->ie_lock); 1079169320Spiso TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1080169320Spiso ih->ih_flags &= ~IH_DEAD; 1081169320Spiso wakeup(ih); 1082169320Spiso mtx_unlock(&ie->ie_lock); 1083169320Spiso return; 1084169320Spiso } 1085169320Spiso 1086169320Spiso /* Execute this handler. */ 1087169320Spiso CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1088169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1089169320Spiso ih->ih_name, ih->ih_flags); 1090169320Spiso 1091169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1092169320Spiso mtx_lock(&Giant); 1093169320Spiso ih->ih_handler(ih->ih_argument); 1094169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1095169320Spiso mtx_unlock(&Giant); 1096169320Spiso} 1097169320Spiso#endif 1098169320Spiso 1099183052Sjhb/* 1100183052Sjhb * This is a public function for use by drivers that mux interrupt 1101183052Sjhb * handlers for child devices from their interrupt handler. 1102183052Sjhb */ 1103183052Sjhbvoid 1104183052Sjhbintr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1105151658Sjhb{ 1106151658Sjhb struct intr_handler *ih, *ihn; 1107151658Sjhb 1108151658Sjhb TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1109151658Sjhb /* 1110151658Sjhb * If this handler is marked for death, remove it from 1111151658Sjhb * the list of handlers and wake up the sleeper. 1112151658Sjhb */ 1113151658Sjhb if (ih->ih_flags & IH_DEAD) { 1114151658Sjhb mtx_lock(&ie->ie_lock); 1115151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1116151658Sjhb ih->ih_flags &= ~IH_DEAD; 1117151658Sjhb wakeup(ih); 1118151658Sjhb mtx_unlock(&ie->ie_lock); 1119151658Sjhb continue; 1120151658Sjhb } 1121151658Sjhb 1122167080Spiso /* Skip filter only handlers */ 1123167080Spiso if (ih->ih_handler == NULL) 1124167080Spiso continue; 1125167080Spiso 1126151658Sjhb /* 1127151658Sjhb * For software interrupt threads, we only execute 1128151658Sjhb * handlers that have their need flag set. Hardware 1129151658Sjhb * interrupt threads always invoke all of their handlers. 1130151658Sjhb */ 1131151658Sjhb if (ie->ie_flags & IE_SOFT) { 1132151658Sjhb if (!ih->ih_need) 1133151658Sjhb continue; 1134151658Sjhb else 1135151658Sjhb atomic_store_rel_int(&ih->ih_need, 0); 1136151658Sjhb } 1137151658Sjhb 1138151658Sjhb /* Execute this handler. */ 1139151658Sjhb CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1140169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, 1141169320Spiso ih->ih_argument, ih->ih_name, ih->ih_flags); 1142151658Sjhb 1143151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1144151658Sjhb mtx_lock(&Giant); 1145151658Sjhb ih->ih_handler(ih->ih_argument); 1146151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1147151658Sjhb mtx_unlock(&Giant); 1148151658Sjhb } 1149183052Sjhb} 1150183052Sjhb 1151183052Sjhbstatic void 1152183052Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie) 1153183052Sjhb{ 1154183052Sjhb 1155183052Sjhb /* Interrupt handlers should not sleep. */ 1156151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1157183052Sjhb THREAD_NO_SLEEPING(); 1158183052Sjhb intr_event_execute_handlers(p, ie); 1159183052Sjhb if (!(ie->ie_flags & IE_SOFT)) 1160151658Sjhb THREAD_SLEEPING_OK(); 1161151658Sjhb 1162151658Sjhb /* 1163151658Sjhb * Interrupt storm handling: 1164151658Sjhb * 1165151658Sjhb * If this interrupt source is currently storming, then throttle 1166151658Sjhb * it to only fire the handler once per clock tick. 1167151658Sjhb * 1168151658Sjhb * If this interrupt source is not currently storming, but the 1169151658Sjhb * number of back to back interrupts exceeds the storm threshold, 1170151658Sjhb * then enter storming mode. 1171151658Sjhb */ 1172167173Sjhb if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1173167173Sjhb !(ie->ie_flags & IE_SOFT)) { 1174168850Snjl /* Report the message only once every second. */ 1175168850Snjl if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1176151658Sjhb printf( 1177168850Snjl "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1178151658Sjhb ie->ie_name); 1179151658Sjhb } 1180167173Sjhb pause("istorm", 1); 1181151658Sjhb } else 1182151658Sjhb ie->ie_count++; 1183151658Sjhb 1184151658Sjhb /* 1185151658Sjhb * Now that all the handlers have had a chance to run, reenable 1186151658Sjhb * the interrupt source. 1187151658Sjhb */ 1188177940Sjhb if (ie->ie_post_ithread != NULL) 1189177940Sjhb ie->ie_post_ithread(ie->ie_source); 1190151658Sjhb} 1191151658Sjhb 1192169320Spiso#ifndef INTR_FILTER 119366698Sjhb/* 119472237Sjhb * This is the main code for interrupt threads. 119566698Sjhb */ 1196104094Sphkstatic void 119772237Sjhbithread_loop(void *arg) 119866698Sjhb{ 1199151658Sjhb struct intr_thread *ithd; 1200151658Sjhb struct intr_event *ie; 120183366Sjulian struct thread *td; 120272237Sjhb struct proc *p; 1203151658Sjhb 120483366Sjulian td = curthread; 120583366Sjulian p = td->td_proc; 1206151658Sjhb ithd = (struct intr_thread *)arg; 1207151658Sjhb KASSERT(ithd->it_thread == td, 120887593Sobrien ("%s: ithread and proc linkage out of sync", __func__)); 1209151658Sjhb ie = ithd->it_event; 1210151658Sjhb ie->ie_count = 0; 121166698Sjhb 121267551Sjhb /* 121367551Sjhb * As long as we have interrupts outstanding, go through the 121467551Sjhb * list of handlers, giving each one a go at it. 121567551Sjhb */ 121666698Sjhb for (;;) { 121772237Sjhb /* 121872237Sjhb * If we are an orphaned thread, then just die. 121972237Sjhb */ 122072237Sjhb if (ithd->it_flags & IT_DEAD) { 1221151658Sjhb CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1222173004Sjulian p->p_pid, td->td_name); 122372237Sjhb free(ithd, M_ITHREAD); 1224173044Sjulian kthread_exit(); 122572237Sjhb } 122672237Sjhb 1227151658Sjhb /* 1228151658Sjhb * Service interrupts. If another interrupt arrives while 1229151658Sjhb * we are running, it will set it_need to note that we 1230151658Sjhb * should make another pass. 1231151658Sjhb */ 123272237Sjhb while (ithd->it_need) { 123367551Sjhb /* 1234151658Sjhb * This might need a full read and write barrier 1235151658Sjhb * to make sure that this write posts before any 1236151658Sjhb * of the memory or device accesses in the 1237151658Sjhb * handlers. 123867551Sjhb */ 123972237Sjhb atomic_store_rel_int(&ithd->it_need, 0); 1240151658Sjhb ithread_execute_handlers(p, ie); 124166698Sjhb } 1242128331Sjhb WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1243128331Sjhb mtx_assert(&Giant, MA_NOTOWNED); 124467551Sjhb 124566698Sjhb /* 124666698Sjhb * Processed all our interrupts. Now get the sched 124767551Sjhb * lock. This may take a while and it_need may get 124866698Sjhb * set again, so we have to check it again. 124966698Sjhb */ 1250170307Sjeff thread_lock(td); 1251151658Sjhb if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1252128331Sjhb TD_SET_IWAIT(td); 1253151658Sjhb ie->ie_count = 0; 1254178272Sjeff mi_switch(SW_VOL | SWT_IWAIT, NULL); 125566698Sjhb } 1256170307Sjeff thread_unlock(td); 125766698Sjhb } 125866698Sjhb} 1259177940Sjhb 1260177940Sjhb/* 1261177940Sjhb * Main interrupt handling body. 1262177940Sjhb * 1263177940Sjhb * Input: 1264177940Sjhb * o ie: the event connected to this interrupt. 1265177940Sjhb * o frame: some archs (i.e. i386) pass a frame to some. 1266177940Sjhb * handlers as their main argument. 1267177940Sjhb * Return value: 1268177940Sjhb * o 0: everything ok. 1269177940Sjhb * o EINVAL: stray interrupt. 1270177940Sjhb */ 1271177940Sjhbint 1272177940Sjhbintr_event_handle(struct intr_event *ie, struct trapframe *frame) 1273177940Sjhb{ 1274177940Sjhb struct intr_handler *ih; 1275177940Sjhb struct thread *td; 1276177940Sjhb int error, ret, thread; 1277177940Sjhb 1278177940Sjhb td = curthread; 1279177940Sjhb 1280177940Sjhb /* An interrupt with no event or handlers is a stray interrupt. */ 1281177940Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1282177940Sjhb return (EINVAL); 1283177940Sjhb 1284177940Sjhb /* 1285177940Sjhb * Execute fast interrupt handlers directly. 1286177940Sjhb * To support clock handlers, if a handler registers 1287177940Sjhb * with a NULL argument, then we pass it a pointer to 1288177940Sjhb * a trapframe as its argument. 1289177940Sjhb */ 1290177940Sjhb td->td_intr_nesting_level++; 1291177940Sjhb thread = 0; 1292177940Sjhb ret = 0; 1293177940Sjhb critical_enter(); 1294177940Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1295177940Sjhb if (ih->ih_filter == NULL) { 1296177940Sjhb thread = 1; 1297177940Sjhb continue; 1298177940Sjhb } 1299177940Sjhb CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1300177940Sjhb ih->ih_filter, ih->ih_argument == NULL ? frame : 1301177940Sjhb ih->ih_argument, ih->ih_name); 1302177940Sjhb if (ih->ih_argument == NULL) 1303177940Sjhb ret = ih->ih_filter(frame); 1304177940Sjhb else 1305177940Sjhb ret = ih->ih_filter(ih->ih_argument); 1306177940Sjhb /* 1307177940Sjhb * Wrapper handler special handling: 1308177940Sjhb * 1309177940Sjhb * in some particular cases (like pccard and pccbb), 1310177940Sjhb * the _real_ device handler is wrapped in a couple of 1311177940Sjhb * functions - a filter wrapper and an ithread wrapper. 1312177940Sjhb * In this case (and just in this case), the filter wrapper 1313177940Sjhb * could ask the system to schedule the ithread and mask 1314177940Sjhb * the interrupt source if the wrapped handler is composed 1315177940Sjhb * of just an ithread handler. 1316177940Sjhb * 1317177940Sjhb * TODO: write a generic wrapper to avoid people rolling 1318177940Sjhb * their own 1319177940Sjhb */ 1320177940Sjhb if (!thread) { 1321177940Sjhb if (ret == FILTER_SCHEDULE_THREAD) 1322177940Sjhb thread = 1; 1323177940Sjhb } 1324177940Sjhb } 1325177940Sjhb 1326177940Sjhb if (thread) { 1327177940Sjhb if (ie->ie_pre_ithread != NULL) 1328177940Sjhb ie->ie_pre_ithread(ie->ie_source); 1329177940Sjhb } else { 1330177940Sjhb if (ie->ie_post_filter != NULL) 1331177940Sjhb ie->ie_post_filter(ie->ie_source); 1332177940Sjhb } 1333177940Sjhb 1334177940Sjhb /* Schedule the ithread if needed. */ 1335177940Sjhb if (thread) { 1336177940Sjhb error = intr_event_schedule_thread(ie); 1337182024Skmacy#ifndef XEN 1338177940Sjhb KASSERT(error == 0, ("bad stray interrupt")); 1339182024Skmacy#else 1340182024Skmacy if (error != 0) 1341182024Skmacy log(LOG_WARNING, "bad stray interrupt"); 1342182024Skmacy#endif 1343177940Sjhb } 1344177940Sjhb critical_exit(); 1345177940Sjhb td->td_intr_nesting_level--; 1346177940Sjhb return (0); 1347177940Sjhb} 1348169320Spiso#else 1349169320Spiso/* 1350169320Spiso * This is the main code for interrupt threads. 1351169320Spiso */ 1352169320Spisostatic void 1353169320Spisoithread_loop(void *arg) 1354169320Spiso{ 1355169320Spiso struct intr_thread *ithd; 1356169320Spiso struct intr_handler *ih; 1357169320Spiso struct intr_event *ie; 1358169320Spiso struct thread *td; 1359169320Spiso struct proc *p; 1360169320Spiso int priv; 136166698Sjhb 1362169320Spiso td = curthread; 1363169320Spiso p = td->td_proc; 1364169320Spiso ih = (struct intr_handler *)arg; 1365169320Spiso priv = (ih->ih_thread != NULL) ? 1 : 0; 1366169320Spiso ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1367169320Spiso KASSERT(ithd->it_thread == td, 1368169320Spiso ("%s: ithread and proc linkage out of sync", __func__)); 1369169320Spiso ie = ithd->it_event; 1370169320Spiso ie->ie_count = 0; 1371169320Spiso 1372169320Spiso /* 1373169320Spiso * As long as we have interrupts outstanding, go through the 1374169320Spiso * list of handlers, giving each one a go at it. 1375169320Spiso */ 1376169320Spiso for (;;) { 1377169320Spiso /* 1378169320Spiso * If we are an orphaned thread, then just die. 1379169320Spiso */ 1380169320Spiso if (ithd->it_flags & IT_DEAD) { 1381169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1382173004Sjulian p->p_pid, td->td_name); 1383169320Spiso free(ithd, M_ITHREAD); 1384173044Sjulian kthread_exit(); 1385169320Spiso } 1386169320Spiso 1387169320Spiso /* 1388169320Spiso * Service interrupts. If another interrupt arrives while 1389169320Spiso * we are running, it will set it_need to note that we 1390169320Spiso * should make another pass. 1391169320Spiso */ 1392169320Spiso while (ithd->it_need) { 1393169320Spiso /* 1394169320Spiso * This might need a full read and write barrier 1395169320Spiso * to make sure that this write posts before any 1396169320Spiso * of the memory or device accesses in the 1397169320Spiso * handlers. 1398169320Spiso */ 1399169320Spiso atomic_store_rel_int(&ithd->it_need, 0); 1400169320Spiso if (priv) 1401169320Spiso priv_ithread_execute_handler(p, ih); 1402169320Spiso else 1403169320Spiso ithread_execute_handlers(p, ie); 1404169320Spiso } 1405169320Spiso WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1406169320Spiso mtx_assert(&Giant, MA_NOTOWNED); 1407169320Spiso 1408169320Spiso /* 1409169320Spiso * Processed all our interrupts. Now get the sched 1410169320Spiso * lock. This may take a while and it_need may get 1411169320Spiso * set again, so we have to check it again. 1412169320Spiso */ 1413170307Sjeff thread_lock(td); 1414169320Spiso if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1415169320Spiso TD_SET_IWAIT(td); 1416169320Spiso ie->ie_count = 0; 1417178272Sjeff mi_switch(SW_VOL | SWT_IWAIT, NULL); 1418169320Spiso } 1419170307Sjeff thread_unlock(td); 1420169320Spiso } 1421169320Spiso} 1422169320Spiso 1423169320Spiso/* 1424169320Spiso * Main loop for interrupt filter. 1425169320Spiso * 1426169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame 1427169320Spiso * parameter, and use it as the main argument for fast handler execution 1428169320Spiso * when ih_argument == NULL. 1429169320Spiso * 1430169320Spiso * Return value: 1431169320Spiso * o FILTER_STRAY: No filter recognized the event, and no 1432169320Spiso * filter-less handler is registered on this 1433169320Spiso * line. 1434169320Spiso * o FILTER_HANDLED: A filter claimed the event and served it. 1435169320Spiso * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1436169320Spiso * least one filter-less handler on this line. 1437169320Spiso * o FILTER_HANDLED | 1438169320Spiso * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1439169320Spiso * scheduling the per-handler ithread. 1440169320Spiso * 1441169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a 1442169320Spiso * pointer to a struct intr_thread containing the thread to be 1443169320Spiso * scheduled. 1444169320Spiso */ 1445169320Spiso 1446177940Sjhbstatic int 1447169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1448169320Spiso struct intr_thread **ithd) 1449169320Spiso{ 1450169320Spiso struct intr_handler *ih; 1451169320Spiso void *arg; 1452169320Spiso int ret, thread_only; 1453169320Spiso 1454169320Spiso ret = 0; 1455169320Spiso thread_only = 0; 1456169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1457169320Spiso /* 1458169320Spiso * Execute fast interrupt handlers directly. 1459169320Spiso * To support clock handlers, if a handler registers 1460169320Spiso * with a NULL argument, then we pass it a pointer to 1461169320Spiso * a trapframe as its argument. 1462169320Spiso */ 1463169320Spiso arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1464169320Spiso 1465169320Spiso CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1466169320Spiso ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1467169320Spiso 1468169320Spiso if (ih->ih_filter != NULL) 1469169320Spiso ret = ih->ih_filter(arg); 1470169320Spiso else { 1471169320Spiso thread_only = 1; 1472169320Spiso continue; 1473169320Spiso } 1474169320Spiso 1475169320Spiso if (ret & FILTER_STRAY) 1476169320Spiso continue; 1477169320Spiso else { 1478169320Spiso *ithd = ih->ih_thread; 1479169320Spiso return (ret); 1480169320Spiso } 1481169320Spiso } 1482169320Spiso 1483169320Spiso /* 1484169320Spiso * No filters handled the interrupt and we have at least 1485169320Spiso * one handler without a filter. In this case, we schedule 1486169320Spiso * all of the filter-less handlers to run in the ithread. 1487169320Spiso */ 1488169320Spiso if (thread_only) { 1489169320Spiso *ithd = ie->ie_thread; 1490169320Spiso return (FILTER_SCHEDULE_THREAD); 1491169320Spiso } 1492169320Spiso return (FILTER_STRAY); 1493169320Spiso} 1494169320Spiso 1495169320Spiso/* 1496169320Spiso * Main interrupt handling body. 1497169320Spiso * 1498169320Spiso * Input: 1499169320Spiso * o ie: the event connected to this interrupt. 1500169320Spiso * o frame: some archs (i.e. i386) pass a frame to some. 1501169320Spiso * handlers as their main argument. 1502169320Spiso * Return value: 1503169320Spiso * o 0: everything ok. 1504169320Spiso * o EINVAL: stray interrupt. 1505169320Spiso */ 1506169320Spisoint 1507169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame) 1508169320Spiso{ 1509169320Spiso struct intr_thread *ithd; 1510169320Spiso struct thread *td; 1511169320Spiso int thread; 1512169320Spiso 1513169320Spiso ithd = NULL; 1514169320Spiso td = curthread; 1515169320Spiso 1516169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1517169320Spiso return (EINVAL); 1518169320Spiso 1519169320Spiso td->td_intr_nesting_level++; 1520169320Spiso thread = 0; 1521169320Spiso critical_enter(); 1522177940Sjhb thread = intr_filter_loop(ie, frame, &ithd); 1523169320Spiso if (thread & FILTER_HANDLED) { 1524177940Sjhb if (ie->ie_post_filter != NULL) 1525177940Sjhb ie->ie_post_filter(ie->ie_source); 1526169320Spiso } else { 1527177940Sjhb if (ie->ie_pre_ithread != NULL) 1528177940Sjhb ie->ie_pre_ithread(ie->ie_source); 1529169320Spiso } 1530169320Spiso critical_exit(); 1531169320Spiso 1532169320Spiso /* Interrupt storm logic */ 1533169320Spiso if (thread & FILTER_STRAY) { 1534169320Spiso ie->ie_count++; 1535169320Spiso if (ie->ie_count < intr_storm_threshold) 1536169320Spiso printf("Interrupt stray detection not present\n"); 1537169320Spiso } 1538169320Spiso 1539169320Spiso /* Schedule an ithread if needed. */ 1540169320Spiso if (thread & FILTER_SCHEDULE_THREAD) { 1541169320Spiso if (intr_event_schedule_thread(ie, ithd) != 0) 1542169320Spiso panic("%s: impossible stray interrupt", __func__); 1543169320Spiso } 1544169320Spiso td->td_intr_nesting_level--; 1545169320Spiso return (0); 1546169320Spiso} 1547169320Spiso#endif 1548169320Spiso 1549121482Sjhb#ifdef DDB 155072237Sjhb/* 1551121482Sjhb * Dump details about an interrupt handler 1552121482Sjhb */ 1553121482Sjhbstatic void 1554151658Sjhbdb_dump_intrhand(struct intr_handler *ih) 1555121482Sjhb{ 1556121482Sjhb int comma; 1557121482Sjhb 1558121482Sjhb db_printf("\t%-10s ", ih->ih_name); 1559121482Sjhb switch (ih->ih_pri) { 1560121482Sjhb case PI_REALTIME: 1561121482Sjhb db_printf("CLK "); 1562121482Sjhb break; 1563121482Sjhb case PI_AV: 1564121482Sjhb db_printf("AV "); 1565121482Sjhb break; 1566121482Sjhb case PI_TTYHIGH: 1567121482Sjhb case PI_TTYLOW: 1568121482Sjhb db_printf("TTY "); 1569121482Sjhb break; 1570121482Sjhb case PI_TAPE: 1571121482Sjhb db_printf("TAPE"); 1572121482Sjhb break; 1573121482Sjhb case PI_NET: 1574121482Sjhb db_printf("NET "); 1575121482Sjhb break; 1576121482Sjhb case PI_DISK: 1577121482Sjhb case PI_DISKLOW: 1578121482Sjhb db_printf("DISK"); 1579121482Sjhb break; 1580121482Sjhb case PI_DULL: 1581121482Sjhb db_printf("DULL"); 1582121482Sjhb break; 1583121482Sjhb default: 1584121482Sjhb if (ih->ih_pri >= PI_SOFT) 1585121482Sjhb db_printf("SWI "); 1586121482Sjhb else 1587121482Sjhb db_printf("%4u", ih->ih_pri); 1588121482Sjhb break; 1589121482Sjhb } 1590121482Sjhb db_printf(" "); 1591121482Sjhb db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1592121482Sjhb db_printf("(%p)", ih->ih_argument); 1593121482Sjhb if (ih->ih_need || 1594166901Spiso (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1595121482Sjhb IH_MPSAFE)) != 0) { 1596121482Sjhb db_printf(" {"); 1597121482Sjhb comma = 0; 1598121482Sjhb if (ih->ih_flags & IH_EXCLUSIVE) { 1599121482Sjhb if (comma) 1600121482Sjhb db_printf(", "); 1601121482Sjhb db_printf("EXCL"); 1602121482Sjhb comma = 1; 1603121482Sjhb } 1604121482Sjhb if (ih->ih_flags & IH_ENTROPY) { 1605121482Sjhb if (comma) 1606121482Sjhb db_printf(", "); 1607121482Sjhb db_printf("ENTROPY"); 1608121482Sjhb comma = 1; 1609121482Sjhb } 1610121482Sjhb if (ih->ih_flags & IH_DEAD) { 1611121482Sjhb if (comma) 1612121482Sjhb db_printf(", "); 1613121482Sjhb db_printf("DEAD"); 1614121482Sjhb comma = 1; 1615121482Sjhb } 1616121482Sjhb if (ih->ih_flags & IH_MPSAFE) { 1617121482Sjhb if (comma) 1618121482Sjhb db_printf(", "); 1619121482Sjhb db_printf("MPSAFE"); 1620121482Sjhb comma = 1; 1621121482Sjhb } 1622121482Sjhb if (ih->ih_need) { 1623121482Sjhb if (comma) 1624121482Sjhb db_printf(", "); 1625121482Sjhb db_printf("NEED"); 1626121482Sjhb } 1627121482Sjhb db_printf("}"); 1628121482Sjhb } 1629121482Sjhb db_printf("\n"); 1630121482Sjhb} 1631121482Sjhb 1632121482Sjhb/* 1633151658Sjhb * Dump details about a event. 1634121482Sjhb */ 1635121482Sjhbvoid 1636151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers) 1637121482Sjhb{ 1638151658Sjhb struct intr_handler *ih; 1639151658Sjhb struct intr_thread *it; 1640121482Sjhb int comma; 1641121482Sjhb 1642151658Sjhb db_printf("%s ", ie->ie_fullname); 1643151658Sjhb it = ie->ie_thread; 1644151658Sjhb if (it != NULL) 1645151658Sjhb db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1646151658Sjhb else 1647151658Sjhb db_printf("(no thread)"); 1648151658Sjhb if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1649151658Sjhb (it != NULL && it->it_need)) { 1650121482Sjhb db_printf(" {"); 1651121482Sjhb comma = 0; 1652151658Sjhb if (ie->ie_flags & IE_SOFT) { 1653121482Sjhb db_printf("SOFT"); 1654121482Sjhb comma = 1; 1655121482Sjhb } 1656151658Sjhb if (ie->ie_flags & IE_ENTROPY) { 1657121482Sjhb if (comma) 1658121482Sjhb db_printf(", "); 1659121482Sjhb db_printf("ENTROPY"); 1660121482Sjhb comma = 1; 1661121482Sjhb } 1662151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) { 1663121482Sjhb if (comma) 1664121482Sjhb db_printf(", "); 1665151658Sjhb db_printf("ADDING_THREAD"); 1666121482Sjhb comma = 1; 1667121482Sjhb } 1668151658Sjhb if (it != NULL && it->it_need) { 1669121482Sjhb if (comma) 1670121482Sjhb db_printf(", "); 1671121482Sjhb db_printf("NEED"); 1672121482Sjhb } 1673121482Sjhb db_printf("}"); 1674121482Sjhb } 1675121482Sjhb db_printf("\n"); 1676121482Sjhb 1677121482Sjhb if (handlers) 1678151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1679121482Sjhb db_dump_intrhand(ih); 1680121482Sjhb} 1681151658Sjhb 1682151658Sjhb/* 1683151658Sjhb * Dump data about interrupt handlers 1684151658Sjhb */ 1685151658SjhbDB_SHOW_COMMAND(intr, db_show_intr) 1686151658Sjhb{ 1687151658Sjhb struct intr_event *ie; 1688160312Sjhb int all, verbose; 1689151658Sjhb 1690151658Sjhb verbose = index(modif, 'v') != NULL; 1691151658Sjhb all = index(modif, 'a') != NULL; 1692151658Sjhb TAILQ_FOREACH(ie, &event_list, ie_list) { 1693151658Sjhb if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1694151658Sjhb continue; 1695151658Sjhb db_dump_intr_event(ie, verbose); 1696160312Sjhb if (db_pager_quit) 1697160312Sjhb break; 1698151658Sjhb } 1699151658Sjhb} 1700121482Sjhb#endif /* DDB */ 1701121482Sjhb 1702121482Sjhb/* 170367551Sjhb * Start standard software interrupt threads 170466698Sjhb */ 170567551Sjhbstatic void 170672237Sjhbstart_softintr(void *dummy) 170767551Sjhb{ 170872237Sjhb 1709177859Sjeff if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1710177859Sjeff panic("died while creating vm swi ithread"); 171166698Sjhb} 1712177253SrwatsonSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1713177253Srwatson NULL); 171466698Sjhb 1715151658Sjhb/* 171677582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 171777582Stmm * The data for this machine dependent, and the declarations are in machine 171877582Stmm * dependent code. The layout of intrnames and intrcnt however is machine 171977582Stmm * independent. 172077582Stmm * 172177582Stmm * We do not know the length of intrcnt and intrnames at compile time, so 172277582Stmm * calculate things at run time. 172377582Stmm */ 172477582Stmmstatic int 172577582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS) 172677582Stmm{ 1727151658Sjhb return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 172877582Stmm req)); 172977582Stmm} 173077582Stmm 173177582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 173277582Stmm NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 173377582Stmm 173477582Stmmstatic int 173577582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS) 173677582Stmm{ 1737151658Sjhb return (sysctl_handle_opaque(oidp, intrcnt, 173877582Stmm (char *)eintrcnt - (char *)intrcnt, req)); 173977582Stmm} 174077582Stmm 174177582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 174277582Stmm NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1743121482Sjhb 1744121482Sjhb#ifdef DDB 1745121482Sjhb/* 1746121482Sjhb * DDB command to dump the interrupt statistics. 1747121482Sjhb */ 1748121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1749121482Sjhb{ 1750121482Sjhb u_long *i; 1751121482Sjhb char *cp; 1752121482Sjhb 1753121482Sjhb cp = intrnames; 1754160312Sjhb for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1755121482Sjhb if (*cp == '\0') 1756121482Sjhb break; 1757121482Sjhb if (*i != 0) 1758121482Sjhb db_printf("%s\t%lu\n", cp, *i); 1759121482Sjhb cp += strlen(cp) + 1; 1760121482Sjhb } 1761121482Sjhb} 1762121482Sjhb#endif 1763