kern_intr.c revision 271712
1139804Simp/*- 226156Sse * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 326156Sse * All rights reserved. 426156Sse * 526156Sse * Redistribution and use in source and binary forms, with or without 626156Sse * modification, are permitted provided that the following conditions 726156Sse * are met: 826156Sse * 1. Redistributions of source code must retain the above copyright 926156Sse * notice unmodified, this list of conditions, and the following 1026156Sse * disclaimer. 1126156Sse * 2. Redistributions in binary form must reproduce the above copyright 1226156Sse * notice, this list of conditions and the following disclaimer in the 1326156Sse * documentation and/or other materials provided with the distribution. 1426156Sse * 1526156Sse * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1626156Sse * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1726156Sse * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1826156Sse * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1926156Sse * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2026156Sse * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2126156Sse * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2226156Sse * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2326156Sse * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2426156Sse * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2526156Sse */ 2626156Sse 27116182Sobrien#include <sys/cdefs.h> 28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_intr.c 271712 2014-09-17 17:33:22Z adrian $"); 2936887Sdfr 30121482Sjhb#include "opt_ddb.h" 31121482Sjhb 3241059Speter#include <sys/param.h> 3365822Sjhb#include <sys/bus.h> 34110860Salfred#include <sys/conf.h> 35178092Sjeff#include <sys/cpuset.h> 3665822Sjhb#include <sys/rtprio.h> 3741059Speter#include <sys/systm.h> 3866698Sjhb#include <sys/interrupt.h> 3966698Sjhb#include <sys/kernel.h> 4066698Sjhb#include <sys/kthread.h> 4166698Sjhb#include <sys/ktr.h> 42130128Sbde#include <sys/limits.h> 4374914Sjhb#include <sys/lock.h> 4426156Sse#include <sys/malloc.h> 4567365Sjhb#include <sys/mutex.h> 46195249Sjhb#include <sys/priv.h> 4766698Sjhb#include <sys/proc.h> 4872759Sjhb#include <sys/random.h> 4972237Sjhb#include <sys/resourcevar.h> 50139451Sjhb#include <sys/sched.h> 51177181Sjhb#include <sys/smp.h> 5277582Stmm#include <sys/sysctl.h> 53182024Skmacy#include <sys/syslog.h> 5466698Sjhb#include <sys/unistd.h> 5566698Sjhb#include <sys/vmmeter.h> 5666698Sjhb#include <machine/atomic.h> 5766698Sjhb#include <machine/cpu.h> 5867551Sjhb#include <machine/md_var.h> 5972237Sjhb#include <machine/stdarg.h> 60121482Sjhb#ifdef DDB 61121482Sjhb#include <ddb/ddb.h> 62121482Sjhb#include <ddb/db_sym.h> 63121482Sjhb#endif 6426156Sse 65151658Sjhb/* 66151658Sjhb * Describe an interrupt thread. There is one of these per interrupt event. 67151658Sjhb */ 68151658Sjhbstruct intr_thread { 69151658Sjhb struct intr_event *it_event; 70151658Sjhb struct thread *it_thread; /* Kernel thread. */ 71151658Sjhb int it_flags; /* (j) IT_* flags. */ 72151658Sjhb int it_need; /* Needs service. */ 7372759Sjhb}; 7472759Sjhb 75151658Sjhb/* Interrupt thread flags kept in it_flags */ 76151658Sjhb#define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77219819Sjeff#define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 78151658Sjhb 79151658Sjhbstruct intr_entropy { 80151658Sjhb struct thread *td; 81151658Sjhb uintptr_t event; 82151658Sjhb}; 83151658Sjhb 84151658Sjhbstruct intr_event *clk_intr_event; 85151658Sjhbstruct intr_event *tty_intr_event; 86128339Sbdevoid *vm_ih; 87173004Sjulianstruct proc *intrproc; 8838244Sbde 8972237Sjhbstatic MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 9072237Sjhb 91168850Snjlstatic int intr_storm_threshold = 1000; 92267992ShselaskySYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 93128331Sjhb &intr_storm_threshold, 0, 94128339Sbde "Number of consecutive interrupts before storm protection is enabled"); 95151658Sjhbstatic TAILQ_HEAD(, intr_event) event_list = 96151658Sjhb TAILQ_HEAD_INITIALIZER(event_list); 97178092Sjeffstatic struct mtx event_lock; 98178092SjeffMTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 99128331Sjhb 100151658Sjhbstatic void intr_event_update(struct intr_event *ie); 101169320Spiso#ifdef INTR_FILTER 102177940Sjhbstatic int intr_event_schedule_thread(struct intr_event *ie, 103177940Sjhb struct intr_thread *ithd); 104177940Sjhbstatic int intr_filter_loop(struct intr_event *ie, 105177940Sjhb struct trapframe *frame, struct intr_thread **ithd); 106169320Spisostatic struct intr_thread *ithread_create(const char *name, 107169320Spiso struct intr_handler *ih); 108169320Spiso#else 109177940Sjhbstatic int intr_event_schedule_thread(struct intr_event *ie); 110151658Sjhbstatic struct intr_thread *ithread_create(const char *name); 111169320Spiso#endif 112151658Sjhbstatic void ithread_destroy(struct intr_thread *ithread); 113169320Spisostatic void ithread_execute_handlers(struct proc *p, 114169320Spiso struct intr_event *ie); 115169320Spiso#ifdef INTR_FILTER 116169320Spisostatic void priv_ithread_execute_handler(struct proc *p, 117169320Spiso struct intr_handler *ih); 118169320Spiso#endif 119128339Sbdestatic void ithread_loop(void *); 120151658Sjhbstatic void ithread_update(struct intr_thread *ithd); 121128339Sbdestatic void start_softintr(void *); 122128339Sbde 123165124Sjhb/* Map an interrupt type to an ithread priority. */ 12472237Sjhbu_char 125151658Sjhbintr_priority(enum intr_type flags) 12665822Sjhb{ 12772237Sjhb u_char pri; 12865822Sjhb 12972237Sjhb flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 13078365Speter INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 13165822Sjhb switch (flags) { 13272237Sjhb case INTR_TYPE_TTY: 133217292Sjhb pri = PI_TTY; 13465822Sjhb break; 13565822Sjhb case INTR_TYPE_BIO: 13665822Sjhb pri = PI_DISK; 13765822Sjhb break; 13865822Sjhb case INTR_TYPE_NET: 13965822Sjhb pri = PI_NET; 14065822Sjhb break; 14165822Sjhb case INTR_TYPE_CAM: 142217292Sjhb pri = PI_DISK; 14365822Sjhb break; 144217292Sjhb case INTR_TYPE_AV: 14578365Speter pri = PI_AV; 14678365Speter break; 14772237Sjhb case INTR_TYPE_CLK: 14872237Sjhb pri = PI_REALTIME; 14972237Sjhb break; 15065822Sjhb case INTR_TYPE_MISC: 15165822Sjhb pri = PI_DULL; /* don't care */ 15265822Sjhb break; 15365822Sjhb default: 15472237Sjhb /* We didn't specify an interrupt level. */ 155151658Sjhb panic("intr_priority: no interrupt type in flags"); 15665822Sjhb } 15765822Sjhb 15865822Sjhb return pri; 15965822Sjhb} 16065822Sjhb 16172237Sjhb/* 162151658Sjhb * Update an ithread based on the associated intr_event. 16372237Sjhb */ 16472237Sjhbstatic void 165151658Sjhbithread_update(struct intr_thread *ithd) 16672237Sjhb{ 167151658Sjhb struct intr_event *ie; 16883366Sjulian struct thread *td; 169151658Sjhb u_char pri; 17067551Sjhb 171151658Sjhb ie = ithd->it_event; 172151658Sjhb td = ithd->it_thread; 17372237Sjhb 174151658Sjhb /* Determine the overall priority of this event. */ 175151658Sjhb if (TAILQ_EMPTY(&ie->ie_handlers)) 176151658Sjhb pri = PRI_MAX_ITHD; 177151658Sjhb else 178151658Sjhb pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 179105354Srobert 180151658Sjhb /* Update name and priority. */ 181173004Sjulian strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 182232700Sjhb#ifdef KTR 183232700Sjhb sched_clear_tdname(td); 184232700Sjhb#endif 185170307Sjeff thread_lock(td); 186151658Sjhb sched_prio(td, pri); 187170307Sjeff thread_unlock(td); 188151658Sjhb} 189151658Sjhb 190151658Sjhb/* 191151658Sjhb * Regenerate the full name of an interrupt event and update its priority. 192151658Sjhb */ 193151658Sjhbstatic void 194151658Sjhbintr_event_update(struct intr_event *ie) 195151658Sjhb{ 196151658Sjhb struct intr_handler *ih; 197151658Sjhb char *last; 198151658Sjhb int missed, space; 199151658Sjhb 200151658Sjhb /* Start off with no entropy and just the name of the event. */ 201151658Sjhb mtx_assert(&ie->ie_lock, MA_OWNED); 202151658Sjhb strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 203151658Sjhb ie->ie_flags &= ~IE_ENTROPY; 204137267Sjhb missed = 0; 205151658Sjhb space = 1; 206151658Sjhb 207151658Sjhb /* Run through all the handlers updating values. */ 208151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 209151658Sjhb if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 210151658Sjhb sizeof(ie->ie_fullname)) { 211151658Sjhb strcat(ie->ie_fullname, " "); 212151658Sjhb strcat(ie->ie_fullname, ih->ih_name); 213151658Sjhb space = 0; 214137267Sjhb } else 215137267Sjhb missed++; 216137267Sjhb if (ih->ih_flags & IH_ENTROPY) 217151658Sjhb ie->ie_flags |= IE_ENTROPY; 218137267Sjhb } 219151658Sjhb 220151658Sjhb /* 221151658Sjhb * If the handler names were too long, add +'s to indicate missing 222151658Sjhb * names. If we run out of room and still have +'s to add, change 223151658Sjhb * the last character from a + to a *. 224151658Sjhb */ 225151658Sjhb last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 226137267Sjhb while (missed-- > 0) { 227151658Sjhb if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 228151658Sjhb if (*last == '+') { 229151658Sjhb *last = '*'; 230151658Sjhb break; 231151658Sjhb } else 232151658Sjhb *last = '+'; 233151658Sjhb } else if (space) { 234151658Sjhb strcat(ie->ie_fullname, " +"); 235151658Sjhb space = 0; 23672237Sjhb } else 237151658Sjhb strcat(ie->ie_fullname, "+"); 23872237Sjhb } 239151658Sjhb 240151658Sjhb /* 241151658Sjhb * If this event has an ithread, update it's priority and 242151658Sjhb * name. 243151658Sjhb */ 244151658Sjhb if (ie->ie_thread != NULL) 245151658Sjhb ithread_update(ie->ie_thread); 246151658Sjhb CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 24772237Sjhb} 24872237Sjhb 24972237Sjhbint 250183298Sobrienintr_event_create(struct intr_event **event, void *source, int flags, int irq, 251177940Sjhb void (*pre_ithread)(void *), void (*post_ithread)(void *), 252271712Sadrian void (*post_filter)(void *), int (*assign_cpu)(void *, int), 253177940Sjhb const char *fmt, ...) 254169320Spiso{ 255169320Spiso struct intr_event *ie; 256169320Spiso va_list ap; 25772237Sjhb 258169320Spiso /* The only valid flag during creation is IE_SOFT. */ 259169320Spiso if ((flags & ~IE_SOFT) != 0) 260169320Spiso return (EINVAL); 261169320Spiso ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 262169320Spiso ie->ie_source = source; 263177940Sjhb ie->ie_pre_ithread = pre_ithread; 264177940Sjhb ie->ie_post_ithread = post_ithread; 265177940Sjhb ie->ie_post_filter = post_filter; 266177181Sjhb ie->ie_assign_cpu = assign_cpu; 267169320Spiso ie->ie_flags = flags; 268178092Sjeff ie->ie_irq = irq; 269177181Sjhb ie->ie_cpu = NOCPU; 270169320Spiso TAILQ_INIT(&ie->ie_handlers); 271169320Spiso mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 272169320Spiso 273169320Spiso va_start(ap, fmt); 274169320Spiso vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 275169320Spiso va_end(ap); 276169320Spiso strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 277178092Sjeff mtx_lock(&event_lock); 278169320Spiso TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 279178092Sjeff mtx_unlock(&event_lock); 280169320Spiso if (event != NULL) 281169320Spiso *event = ie; 282169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 283169320Spiso return (0); 284169320Spiso} 285169320Spiso 286177181Sjhb/* 287177181Sjhb * Bind an interrupt event to the specified CPU. Note that not all 288177181Sjhb * platforms support binding an interrupt to a CPU. For those 289177181Sjhb * platforms this request will fail. For supported platforms, any 290177181Sjhb * associated ithreads as well as the primary interrupt context will 291177181Sjhb * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 292177181Sjhb * the interrupt event. 293177181Sjhb */ 294151658Sjhbint 295271712Sadrianintr_event_bind(struct intr_event *ie, int cpu) 296177181Sjhb{ 297178092Sjeff lwpid_t id; 298177181Sjhb int error; 299177181Sjhb 300177181Sjhb /* Need a CPU to bind to. */ 301177181Sjhb if (cpu != NOCPU && CPU_ABSENT(cpu)) 302177181Sjhb return (EINVAL); 303177181Sjhb 304177181Sjhb if (ie->ie_assign_cpu == NULL) 305177181Sjhb return (EOPNOTSUPP); 306195249Sjhb 307195249Sjhb error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 308195249Sjhb if (error) 309195249Sjhb return (error); 310195249Sjhb 311178092Sjeff /* 312195249Sjhb * If we have any ithreads try to set their mask first to verify 313195249Sjhb * permissions, etc. 314178092Sjeff */ 315177181Sjhb mtx_lock(&ie->ie_lock); 316178092Sjeff if (ie->ie_thread != NULL) { 317178092Sjeff id = ie->ie_thread->it_thread->td_tid; 318177181Sjhb mtx_unlock(&ie->ie_lock); 319267716Smelifaro error = cpuset_setithread(id, cpu); 320178092Sjeff if (error) 321178092Sjeff return (error); 322178092Sjeff } else 323178092Sjeff mtx_unlock(&ie->ie_lock); 324177181Sjhb error = ie->ie_assign_cpu(ie->ie_source, cpu); 325195249Sjhb if (error) { 326195249Sjhb mtx_lock(&ie->ie_lock); 327195249Sjhb if (ie->ie_thread != NULL) { 328267716Smelifaro cpu = ie->ie_cpu; 329195249Sjhb id = ie->ie_thread->it_thread->td_tid; 330195249Sjhb mtx_unlock(&ie->ie_lock); 331267716Smelifaro (void)cpuset_setithread(id, cpu); 332195249Sjhb } else 333195249Sjhb mtx_unlock(&ie->ie_lock); 334177181Sjhb return (error); 335195249Sjhb } 336195249Sjhb 337177181Sjhb mtx_lock(&ie->ie_lock); 338177181Sjhb ie->ie_cpu = cpu; 339177181Sjhb mtx_unlock(&ie->ie_lock); 340178092Sjeff 341178092Sjeff return (error); 342178092Sjeff} 343178092Sjeff 344178092Sjeffstatic struct intr_event * 345178092Sjeffintr_lookup(int irq) 346178092Sjeff{ 347178092Sjeff struct intr_event *ie; 348178092Sjeff 349178092Sjeff mtx_lock(&event_lock); 350178092Sjeff TAILQ_FOREACH(ie, &event_list, ie_list) 351178092Sjeff if (ie->ie_irq == irq && 352178092Sjeff (ie->ie_flags & IE_SOFT) == 0 && 353178092Sjeff TAILQ_FIRST(&ie->ie_handlers) != NULL) 354178092Sjeff break; 355178092Sjeff mtx_unlock(&event_lock); 356178092Sjeff return (ie); 357178092Sjeff} 358178092Sjeff 359178092Sjeffint 360178092Sjeffintr_setaffinity(int irq, void *m) 361178092Sjeff{ 362178092Sjeff struct intr_event *ie; 363178092Sjeff cpuset_t *mask; 364178092Sjeff u_char cpu; 365178092Sjeff int n; 366178092Sjeff 367178092Sjeff mask = m; 368178092Sjeff cpu = NOCPU; 369178092Sjeff /* 370178092Sjeff * If we're setting all cpus we can unbind. Otherwise make sure 371178092Sjeff * only one cpu is in the set. 372178092Sjeff */ 373178092Sjeff if (CPU_CMP(cpuset_root, mask)) { 374178092Sjeff for (n = 0; n < CPU_SETSIZE; n++) { 375178092Sjeff if (!CPU_ISSET(n, mask)) 376178092Sjeff continue; 377178092Sjeff if (cpu != NOCPU) 378178092Sjeff return (EINVAL); 379178092Sjeff cpu = (u_char)n; 380178092Sjeff } 381178092Sjeff } 382178092Sjeff ie = intr_lookup(irq); 383178092Sjeff if (ie == NULL) 384178092Sjeff return (ESRCH); 385194987Sjhb return (intr_event_bind(ie, cpu)); 386178092Sjeff} 387178092Sjeff 388178092Sjeffint 389178092Sjeffintr_getaffinity(int irq, void *m) 390178092Sjeff{ 391178092Sjeff struct intr_event *ie; 392178092Sjeff cpuset_t *mask; 393178092Sjeff 394178092Sjeff mask = m; 395178092Sjeff ie = intr_lookup(irq); 396178092Sjeff if (ie == NULL) 397178092Sjeff return (ESRCH); 398178092Sjeff CPU_ZERO(mask); 399178092Sjeff mtx_lock(&ie->ie_lock); 400178092Sjeff if (ie->ie_cpu == NOCPU) 401178092Sjeff CPU_COPY(cpuset_root, mask); 402178092Sjeff else 403178092Sjeff CPU_SET(ie->ie_cpu, mask); 404178092Sjeff mtx_unlock(&ie->ie_lock); 405177181Sjhb return (0); 406177181Sjhb} 407177181Sjhb 408177181Sjhbint 409151658Sjhbintr_event_destroy(struct intr_event *ie) 410151658Sjhb{ 411151658Sjhb 412178092Sjeff mtx_lock(&event_lock); 413151658Sjhb mtx_lock(&ie->ie_lock); 414151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 415151658Sjhb mtx_unlock(&ie->ie_lock); 416178092Sjeff mtx_unlock(&event_lock); 417151658Sjhb return (EBUSY); 418151658Sjhb } 419151658Sjhb TAILQ_REMOVE(&event_list, ie, ie_list); 420157728Sjhb#ifndef notyet 421157728Sjhb if (ie->ie_thread != NULL) { 422157728Sjhb ithread_destroy(ie->ie_thread); 423157728Sjhb ie->ie_thread = NULL; 424157728Sjhb } 425157728Sjhb#endif 426151658Sjhb mtx_unlock(&ie->ie_lock); 427178092Sjeff mtx_unlock(&event_lock); 428151658Sjhb mtx_destroy(&ie->ie_lock); 429151658Sjhb free(ie, M_ITHREAD); 430151658Sjhb return (0); 431151658Sjhb} 432151658Sjhb 433169320Spiso#ifndef INTR_FILTER 434151658Sjhbstatic struct intr_thread * 435151658Sjhbithread_create(const char *name) 436151658Sjhb{ 437151658Sjhb struct intr_thread *ithd; 438151658Sjhb struct thread *td; 439151658Sjhb int error; 440151658Sjhb 441151658Sjhb ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 442151658Sjhb 443173004Sjulian error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 444173004Sjulian &td, RFSTOPPED | RFHIGHPID, 445173051Sjulian 0, "intr", "%s", name); 446151658Sjhb if (error) 447172836Sjulian panic("kproc_create() failed with %d", error); 448170307Sjeff thread_lock(td); 449164936Sjulian sched_class(td, PRI_ITHD); 450103216Sjulian TD_SET_IWAIT(td); 451170307Sjeff thread_unlock(td); 452151658Sjhb td->td_pflags |= TDP_ITHREAD; 453151658Sjhb ithd->it_thread = td; 454151658Sjhb CTR2(KTR_INTR, "%s: created %s", __func__, name); 455151658Sjhb return (ithd); 45672237Sjhb} 457169320Spiso#else 458169320Spisostatic struct intr_thread * 459169320Spisoithread_create(const char *name, struct intr_handler *ih) 460169320Spiso{ 461169320Spiso struct intr_thread *ithd; 462169320Spiso struct thread *td; 463169320Spiso int error; 46472237Sjhb 465169320Spiso ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 466169320Spiso 467173153Sjulian error = kproc_kthread_add(ithread_loop, ih, &intrproc, 468173004Sjulian &td, RFSTOPPED | RFHIGHPID, 469173051Sjulian 0, "intr", "%s", name); 470169320Spiso if (error) 471172836Sjulian panic("kproc_create() failed with %d", error); 472170307Sjeff thread_lock(td); 473169320Spiso sched_class(td, PRI_ITHD); 474169320Spiso TD_SET_IWAIT(td); 475170307Sjeff thread_unlock(td); 476169320Spiso td->td_pflags |= TDP_ITHREAD; 477169320Spiso ithd->it_thread = td; 478169320Spiso CTR2(KTR_INTR, "%s: created %s", __func__, name); 479169320Spiso return (ithd); 480169320Spiso} 481169320Spiso#endif 482169320Spiso 483151658Sjhbstatic void 484151658Sjhbithread_destroy(struct intr_thread *ithread) 48572237Sjhb{ 48683366Sjulian struct thread *td; 48772237Sjhb 488157784Sscottl CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 489151658Sjhb td = ithread->it_thread; 490170307Sjeff thread_lock(td); 49176771Sjhb ithread->it_flags |= IT_DEAD; 492103216Sjulian if (TD_AWAITING_INTR(td)) { 493103216Sjulian TD_CLR_IWAIT(td); 494166188Sjeff sched_add(td, SRQ_INTR); 49572237Sjhb } 496170307Sjeff thread_unlock(td); 49772237Sjhb} 49872237Sjhb 499169320Spiso#ifndef INTR_FILTER 50072237Sjhbint 501151658Sjhbintr_event_add_handler(struct intr_event *ie, const char *name, 502166901Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 503166901Spiso enum intr_type flags, void **cookiep) 50472237Sjhb{ 505151658Sjhb struct intr_handler *ih, *temp_ih; 506151658Sjhb struct intr_thread *it; 50772237Sjhb 508166901Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 50972237Sjhb return (EINVAL); 51072237Sjhb 511151658Sjhb /* Allocate and populate an interrupt handler structure. */ 512151658Sjhb ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 513166901Spiso ih->ih_filter = filter; 51472237Sjhb ih->ih_handler = handler; 51572237Sjhb ih->ih_argument = arg; 516198134Sjhb strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 517151658Sjhb ih->ih_event = ie; 51872237Sjhb ih->ih_pri = pri; 519166901Spiso if (flags & INTR_EXCL) 52072237Sjhb ih->ih_flags = IH_EXCLUSIVE; 52172237Sjhb if (flags & INTR_MPSAFE) 52272237Sjhb ih->ih_flags |= IH_MPSAFE; 52372237Sjhb if (flags & INTR_ENTROPY) 52472237Sjhb ih->ih_flags |= IH_ENTROPY; 52572237Sjhb 526151658Sjhb /* We can only have one exclusive handler in a event. */ 527151658Sjhb mtx_lock(&ie->ie_lock); 528151658Sjhb if (!TAILQ_EMPTY(&ie->ie_handlers)) { 529151658Sjhb if ((flags & INTR_EXCL) || 530151658Sjhb (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 531151658Sjhb mtx_unlock(&ie->ie_lock); 532151658Sjhb free(ih, M_ITHREAD); 533151658Sjhb return (EINVAL); 534151658Sjhb } 535122002Sjhb } 53672237Sjhb 537151658Sjhb /* Create a thread if we need one. */ 538166901Spiso while (ie->ie_thread == NULL && handler != NULL) { 539151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) 540157815Sjhb msleep(ie, &ie->ie_lock, 0, "ithread", 0); 541151658Sjhb else { 542151658Sjhb ie->ie_flags |= IE_ADDING_THREAD; 543151658Sjhb mtx_unlock(&ie->ie_lock); 544151658Sjhb it = ithread_create("intr: newborn"); 545151658Sjhb mtx_lock(&ie->ie_lock); 546151658Sjhb ie->ie_flags &= ~IE_ADDING_THREAD; 547151658Sjhb ie->ie_thread = it; 548151658Sjhb it->it_event = ie; 549151658Sjhb ithread_update(it); 550151658Sjhb wakeup(ie); 551151658Sjhb } 552151658Sjhb } 553239095Skan 554239095Skan /* Add the new handler to the event in priority order. */ 555239095Skan TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 556239095Skan if (temp_ih->ih_pri > ih->ih_pri) 557239095Skan break; 558239095Skan } 559239095Skan if (temp_ih == NULL) 560239095Skan TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 561239095Skan else 562239095Skan TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 563239095Skan intr_event_update(ie); 564239095Skan 565151658Sjhb CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 566151658Sjhb ie->ie_name); 567151658Sjhb mtx_unlock(&ie->ie_lock); 568151658Sjhb 56972237Sjhb if (cookiep != NULL) 57072237Sjhb *cookiep = ih; 57172237Sjhb return (0); 57272237Sjhb} 573169320Spiso#else 574169320Spisoint 575169320Spisointr_event_add_handler(struct intr_event *ie, const char *name, 576169320Spiso driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 577169320Spiso enum intr_type flags, void **cookiep) 578169320Spiso{ 579169320Spiso struct intr_handler *ih, *temp_ih; 580169320Spiso struct intr_thread *it; 58172237Sjhb 582169320Spiso if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 583169320Spiso return (EINVAL); 584169320Spiso 585169320Spiso /* Allocate and populate an interrupt handler structure. */ 586169320Spiso ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 587169320Spiso ih->ih_filter = filter; 588169320Spiso ih->ih_handler = handler; 589169320Spiso ih->ih_argument = arg; 590198134Sjhb strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 591169320Spiso ih->ih_event = ie; 592169320Spiso ih->ih_pri = pri; 593169320Spiso if (flags & INTR_EXCL) 594169320Spiso ih->ih_flags = IH_EXCLUSIVE; 595169320Spiso if (flags & INTR_MPSAFE) 596169320Spiso ih->ih_flags |= IH_MPSAFE; 597169320Spiso if (flags & INTR_ENTROPY) 598169320Spiso ih->ih_flags |= IH_ENTROPY; 599169320Spiso 600169320Spiso /* We can only have one exclusive handler in a event. */ 601169320Spiso mtx_lock(&ie->ie_lock); 602169320Spiso if (!TAILQ_EMPTY(&ie->ie_handlers)) { 603169320Spiso if ((flags & INTR_EXCL) || 604169320Spiso (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 605169320Spiso mtx_unlock(&ie->ie_lock); 606169320Spiso free(ih, M_ITHREAD); 607169320Spiso return (EINVAL); 608169320Spiso } 609169320Spiso } 610169320Spiso 611169320Spiso /* For filtered handlers, create a private ithread to run on. */ 612239095Skan if (filter != NULL && handler != NULL) { 613169320Spiso mtx_unlock(&ie->ie_lock); 614239095Skan it = ithread_create("intr: newborn", ih); 615169320Spiso mtx_lock(&ie->ie_lock); 616239095Skan it->it_event = ie; 617169320Spiso ih->ih_thread = it; 618230231Spluknet ithread_update(it); /* XXX - do we really need this?!?!? */ 619169320Spiso } else { /* Create the global per-event thread if we need one. */ 620169320Spiso while (ie->ie_thread == NULL && handler != NULL) { 621169320Spiso if (ie->ie_flags & IE_ADDING_THREAD) 622169320Spiso msleep(ie, &ie->ie_lock, 0, "ithread", 0); 623169320Spiso else { 624169320Spiso ie->ie_flags |= IE_ADDING_THREAD; 625169320Spiso mtx_unlock(&ie->ie_lock); 626169320Spiso it = ithread_create("intr: newborn", ih); 627169320Spiso mtx_lock(&ie->ie_lock); 628169320Spiso ie->ie_flags &= ~IE_ADDING_THREAD; 629169320Spiso ie->ie_thread = it; 630169320Spiso it->it_event = ie; 631169320Spiso ithread_update(it); 632169320Spiso wakeup(ie); 633169320Spiso } 634169320Spiso } 635169320Spiso } 636239095Skan 637239095Skan /* Add the new handler to the event in priority order. */ 638239095Skan TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 639239095Skan if (temp_ih->ih_pri > ih->ih_pri) 640239095Skan break; 641239095Skan } 642239095Skan if (temp_ih == NULL) 643239095Skan TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 644239095Skan else 645239095Skan TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 646239095Skan intr_event_update(ie); 647239095Skan 648169320Spiso CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 649169320Spiso ie->ie_name); 650169320Spiso mtx_unlock(&ie->ie_lock); 651169320Spiso 652169320Spiso if (cookiep != NULL) 653169320Spiso *cookiep = ih; 654169320Spiso return (0); 655169320Spiso} 656169320Spiso#endif 657169320Spiso 658165125Sjhb/* 659198134Sjhb * Append a description preceded by a ':' to the name of the specified 660198134Sjhb * interrupt handler. 661198134Sjhb */ 662198134Sjhbint 663198134Sjhbintr_event_describe_handler(struct intr_event *ie, void *cookie, 664198134Sjhb const char *descr) 665198134Sjhb{ 666198134Sjhb struct intr_handler *ih; 667198134Sjhb size_t space; 668198134Sjhb char *start; 669198134Sjhb 670198134Sjhb mtx_lock(&ie->ie_lock); 671198134Sjhb#ifdef INVARIANTS 672198134Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 673198134Sjhb if (ih == cookie) 674198134Sjhb break; 675198134Sjhb } 676198134Sjhb if (ih == NULL) { 677198134Sjhb mtx_unlock(&ie->ie_lock); 678198149Sjhb panic("handler %p not found in interrupt event %p", cookie, ie); 679198134Sjhb } 680198134Sjhb#endif 681198134Sjhb ih = cookie; 682198134Sjhb 683198134Sjhb /* 684198134Sjhb * Look for an existing description by checking for an 685198134Sjhb * existing ":". This assumes device names do not include 686198134Sjhb * colons. If one is found, prepare to insert the new 687198134Sjhb * description at that point. If one is not found, find the 688198134Sjhb * end of the name to use as the insertion point. 689198134Sjhb */ 690229272Sed start = strchr(ih->ih_name, ':'); 691198134Sjhb if (start == NULL) 692229272Sed start = strchr(ih->ih_name, 0); 693198134Sjhb 694198134Sjhb /* 695198134Sjhb * See if there is enough remaining room in the string for the 696198134Sjhb * description + ":". The "- 1" leaves room for the trailing 697198134Sjhb * '\0'. The "+ 1" accounts for the colon. 698198134Sjhb */ 699198134Sjhb space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 700198134Sjhb if (strlen(descr) + 1 > space) { 701198134Sjhb mtx_unlock(&ie->ie_lock); 702198134Sjhb return (ENOSPC); 703198134Sjhb } 704198134Sjhb 705198134Sjhb /* Append a colon followed by the description. */ 706198134Sjhb *start = ':'; 707198134Sjhb strcpy(start + 1, descr); 708198134Sjhb intr_event_update(ie); 709198134Sjhb mtx_unlock(&ie->ie_lock); 710198134Sjhb return (0); 711198134Sjhb} 712198134Sjhb 713198134Sjhb/* 714165125Sjhb * Return the ie_source field from the intr_event an intr_handler is 715165125Sjhb * associated with. 716165125Sjhb */ 717165125Sjhbvoid * 718165125Sjhbintr_handler_source(void *cookie) 719165125Sjhb{ 720165125Sjhb struct intr_handler *ih; 721165125Sjhb struct intr_event *ie; 722165125Sjhb 723165125Sjhb ih = (struct intr_handler *)cookie; 724165125Sjhb if (ih == NULL) 725165125Sjhb return (NULL); 726165125Sjhb ie = ih->ih_event; 727165125Sjhb KASSERT(ie != NULL, 728165125Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 729165125Sjhb ih->ih_name)); 730165125Sjhb return (ie->ie_source); 731165125Sjhb} 732165125Sjhb 733219819Sjeff/* 734219819Sjeff * Sleep until an ithread finishes executing an interrupt handler. 735219819Sjeff * 736219819Sjeff * XXX Doesn't currently handle interrupt filters or fast interrupt 737219819Sjeff * handlers. This is intended for compatibility with linux drivers 738219819Sjeff * only. Do not use in BSD code. 739219819Sjeff */ 740219819Sjeffvoid 741219819Sjeff_intr_drain(int irq) 742219819Sjeff{ 743219819Sjeff struct intr_event *ie; 744219819Sjeff struct intr_thread *ithd; 745219819Sjeff struct thread *td; 746219819Sjeff 747219819Sjeff ie = intr_lookup(irq); 748219819Sjeff if (ie == NULL) 749219819Sjeff return; 750219819Sjeff if (ie->ie_thread == NULL) 751219819Sjeff return; 752219819Sjeff ithd = ie->ie_thread; 753219819Sjeff td = ithd->it_thread; 754221055Sjeff /* 755221055Sjeff * We set the flag and wait for it to be cleared to avoid 756221055Sjeff * long delays with potentially busy interrupt handlers 757221055Sjeff * were we to only sample TD_AWAITING_INTR() every tick. 758221055Sjeff */ 759219819Sjeff thread_lock(td); 760219819Sjeff if (!TD_AWAITING_INTR(td)) { 761219819Sjeff ithd->it_flags |= IT_WAIT; 762221055Sjeff while (ithd->it_flags & IT_WAIT) { 763221055Sjeff thread_unlock(td); 764221055Sjeff pause("idrain", 1); 765221055Sjeff thread_lock(td); 766221055Sjeff } 767219819Sjeff } 768221055Sjeff thread_unlock(td); 769219819Sjeff return; 770219819Sjeff} 771219819Sjeff 772219819Sjeff 773169320Spiso#ifndef INTR_FILTER 77472237Sjhbint 775151658Sjhbintr_event_remove_handler(void *cookie) 77672237Sjhb{ 777151658Sjhb struct intr_handler *handler = (struct intr_handler *)cookie; 778151658Sjhb struct intr_event *ie; 77972237Sjhb#ifdef INVARIANTS 780151658Sjhb struct intr_handler *ih; 78172237Sjhb#endif 782151658Sjhb#ifdef notyet 783151658Sjhb int dead; 784151658Sjhb#endif 78572237Sjhb 78672759Sjhb if (handler == NULL) 78772237Sjhb return (EINVAL); 788151658Sjhb ie = handler->ih_event; 789151658Sjhb KASSERT(ie != NULL, 790151658Sjhb ("interrupt handler \"%s\" has a NULL interrupt event", 791165124Sjhb handler->ih_name)); 792151658Sjhb mtx_lock(&ie->ie_lock); 79387593Sobrien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 794151658Sjhb ie->ie_name); 79572237Sjhb#ifdef INVARIANTS 796151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 79772759Sjhb if (ih == handler) 79872759Sjhb goto ok; 799151658Sjhb mtx_unlock(&ie->ie_lock); 800151658Sjhb panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 801151658Sjhb ih->ih_name, ie->ie_name); 80272759Sjhbok: 80372237Sjhb#endif 80472839Sjhb /* 805151658Sjhb * If there is no ithread, then just remove the handler and return. 806151658Sjhb * XXX: Note that an INTR_FAST handler might be running on another 807151658Sjhb * CPU! 808151658Sjhb */ 809151658Sjhb if (ie->ie_thread == NULL) { 810151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 811151658Sjhb mtx_unlock(&ie->ie_lock); 812151658Sjhb free(handler, M_ITHREAD); 813151658Sjhb return (0); 814151658Sjhb } 815151658Sjhb 816151658Sjhb /* 81772839Sjhb * If the interrupt thread is already running, then just mark this 81872839Sjhb * handler as being dead and let the ithread do the actual removal. 819124505Struckman * 820124505Struckman * During a cold boot while cold is set, msleep() does not sleep, 821124505Struckman * so we have to remove the handler here rather than letting the 822124505Struckman * thread do it. 82372839Sjhb */ 824170307Sjeff thread_lock(ie->ie_thread->it_thread); 825151658Sjhb if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 82672839Sjhb handler->ih_flags |= IH_DEAD; 82772839Sjhb 82872839Sjhb /* 82972839Sjhb * Ensure that the thread will process the handler list 83072839Sjhb * again and remove this handler if it has already passed 83172839Sjhb * it on the list. 83272839Sjhb */ 833252683Salfred atomic_store_rel_int(&ie->ie_thread->it_need, 1); 834151658Sjhb } else 835151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 836170307Sjeff thread_unlock(ie->ie_thread->it_thread); 837151658Sjhb while (handler->ih_flags & IH_DEAD) 838157815Sjhb msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 839151658Sjhb intr_event_update(ie); 840151658Sjhb#ifdef notyet 841151658Sjhb /* 842151658Sjhb * XXX: This could be bad in the case of ppbus(8). Also, I think 843151658Sjhb * this could lead to races of stale data when servicing an 844151658Sjhb * interrupt. 845151658Sjhb */ 846151658Sjhb dead = 1; 847151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 848151658Sjhb if (!(ih->ih_flags & IH_FAST)) { 849151658Sjhb dead = 0; 850151658Sjhb break; 851151658Sjhb } 852151658Sjhb } 853151658Sjhb if (dead) { 854151658Sjhb ithread_destroy(ie->ie_thread); 855151658Sjhb ie->ie_thread = NULL; 856151658Sjhb } 857151658Sjhb#endif 858151658Sjhb mtx_unlock(&ie->ie_lock); 85976771Sjhb free(handler, M_ITHREAD); 86072237Sjhb return (0); 86172237Sjhb} 86272237Sjhb 863177940Sjhbstatic int 864151658Sjhbintr_event_schedule_thread(struct intr_event *ie) 86572759Sjhb{ 866151658Sjhb struct intr_entropy entropy; 867151658Sjhb struct intr_thread *it; 86883366Sjulian struct thread *td; 869101176Sjulian struct thread *ctd; 87072759Sjhb struct proc *p; 87172759Sjhb 87272759Sjhb /* 87372759Sjhb * If no ithread or no handlers, then we have a stray interrupt. 87472759Sjhb */ 875151658Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 876151658Sjhb ie->ie_thread == NULL) 87772759Sjhb return (EINVAL); 87872759Sjhb 879101176Sjulian ctd = curthread; 880151658Sjhb it = ie->ie_thread; 881151658Sjhb td = it->it_thread; 882133191Srwatson p = td->td_proc; 883151658Sjhb 88472759Sjhb /* 88572759Sjhb * If any of the handlers for this ithread claim to be good 88672759Sjhb * sources of entropy, then gather some. 88772759Sjhb */ 888151658Sjhb if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 889133191Srwatson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 890173004Sjulian p->p_pid, td->td_name); 891151658Sjhb entropy.event = (uintptr_t)ie; 892151658Sjhb entropy.td = ctd; 893256377Smarkm random_harvest(&entropy, sizeof(entropy), 2, 89472759Sjhb RANDOM_INTERRUPT); 89572759Sjhb } 89672759Sjhb 897151658Sjhb KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 89872759Sjhb 89972759Sjhb /* 90072759Sjhb * Set it_need to tell the thread to keep running if it is already 901170307Sjeff * running. Then, lock the thread and see if we actually need to 902170307Sjeff * put it on the runqueue. 90372759Sjhb */ 904252683Salfred atomic_store_rel_int(&it->it_need, 1); 905170307Sjeff thread_lock(td); 906103216Sjulian if (TD_AWAITING_INTR(td)) { 907151658Sjhb CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 908173004Sjulian td->td_name); 909103216Sjulian TD_CLR_IWAIT(td); 910166188Sjeff sched_add(td, SRQ_INTR); 91172759Sjhb } else { 912151658Sjhb CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 913173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 91472759Sjhb } 915170307Sjeff thread_unlock(td); 91672759Sjhb 91772759Sjhb return (0); 91872759Sjhb} 919169320Spiso#else 920169320Spisoint 921169320Spisointr_event_remove_handler(void *cookie) 922169320Spiso{ 923169320Spiso struct intr_handler *handler = (struct intr_handler *)cookie; 924169320Spiso struct intr_event *ie; 925169320Spiso struct intr_thread *it; 926169320Spiso#ifdef INVARIANTS 927169320Spiso struct intr_handler *ih; 928169320Spiso#endif 929169320Spiso#ifdef notyet 930169320Spiso int dead; 931169320Spiso#endif 93272759Sjhb 933169320Spiso if (handler == NULL) 934169320Spiso return (EINVAL); 935169320Spiso ie = handler->ih_event; 936169320Spiso KASSERT(ie != NULL, 937169320Spiso ("interrupt handler \"%s\" has a NULL interrupt event", 938169320Spiso handler->ih_name)); 939169320Spiso mtx_lock(&ie->ie_lock); 940169320Spiso CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 941169320Spiso ie->ie_name); 942169320Spiso#ifdef INVARIANTS 943169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 944169320Spiso if (ih == handler) 945169320Spiso goto ok; 946169320Spiso mtx_unlock(&ie->ie_lock); 947169320Spiso panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 948169320Spiso ih->ih_name, ie->ie_name); 949169320Spisook: 950169320Spiso#endif 951169320Spiso /* 952169320Spiso * If there are no ithreads (per event and per handler), then 953169320Spiso * just remove the handler and return. 954169320Spiso * XXX: Note that an INTR_FAST handler might be running on another CPU! 955169320Spiso */ 956169320Spiso if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 957169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 958169320Spiso mtx_unlock(&ie->ie_lock); 959169320Spiso free(handler, M_ITHREAD); 960169320Spiso return (0); 961169320Spiso } 962169320Spiso 963169320Spiso /* Private or global ithread? */ 964169320Spiso it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 965169320Spiso /* 966169320Spiso * If the interrupt thread is already running, then just mark this 967169320Spiso * handler as being dead and let the ithread do the actual removal. 968169320Spiso * 969169320Spiso * During a cold boot while cold is set, msleep() does not sleep, 970169320Spiso * so we have to remove the handler here rather than letting the 971169320Spiso * thread do it. 972169320Spiso */ 973170307Sjeff thread_lock(it->it_thread); 974169320Spiso if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 975169320Spiso handler->ih_flags |= IH_DEAD; 976169320Spiso 977169320Spiso /* 978169320Spiso * Ensure that the thread will process the handler list 979169320Spiso * again and remove this handler if it has already passed 980169320Spiso * it on the list. 981169320Spiso */ 982252683Salfred atomic_store_rel_int(&it->it_need, 1); 983169320Spiso } else 984169320Spiso TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 985170307Sjeff thread_unlock(it->it_thread); 986169320Spiso while (handler->ih_flags & IH_DEAD) 987169320Spiso msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 988169320Spiso /* 989169320Spiso * At this point, the handler has been disconnected from the event, 990169320Spiso * so we can kill the private ithread if any. 991169320Spiso */ 992169320Spiso if (handler->ih_thread) { 993169320Spiso ithread_destroy(handler->ih_thread); 994169320Spiso handler->ih_thread = NULL; 995169320Spiso } 996169320Spiso intr_event_update(ie); 997169320Spiso#ifdef notyet 998169320Spiso /* 999169320Spiso * XXX: This could be bad in the case of ppbus(8). Also, I think 1000169320Spiso * this could lead to races of stale data when servicing an 1001169320Spiso * interrupt. 1002169320Spiso */ 1003169320Spiso dead = 1; 1004169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1005169320Spiso if (handler != NULL) { 1006169320Spiso dead = 0; 1007169320Spiso break; 1008169320Spiso } 1009169320Spiso } 1010169320Spiso if (dead) { 1011169320Spiso ithread_destroy(ie->ie_thread); 1012169320Spiso ie->ie_thread = NULL; 1013169320Spiso } 1014169320Spiso#endif 1015169320Spiso mtx_unlock(&ie->ie_lock); 1016169320Spiso free(handler, M_ITHREAD); 1017169320Spiso return (0); 1018169320Spiso} 1019169320Spiso 1020177940Sjhbstatic int 1021169320Spisointr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 1022169320Spiso{ 1023169320Spiso struct intr_entropy entropy; 1024169320Spiso struct thread *td; 1025169320Spiso struct thread *ctd; 1026169320Spiso struct proc *p; 1027169320Spiso 1028169320Spiso /* 1029169320Spiso * If no ithread or no handlers, then we have a stray interrupt. 1030169320Spiso */ 1031169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 1032169320Spiso return (EINVAL); 1033169320Spiso 1034169320Spiso ctd = curthread; 1035169320Spiso td = it->it_thread; 1036169320Spiso p = td->td_proc; 1037169320Spiso 1038169320Spiso /* 1039169320Spiso * If any of the handlers for this ithread claim to be good 1040169320Spiso * sources of entropy, then gather some. 1041169320Spiso */ 1042169320Spiso if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 1043169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 1044173004Sjulian p->p_pid, td->td_name); 1045169320Spiso entropy.event = (uintptr_t)ie; 1046169320Spiso entropy.td = ctd; 1047256377Smarkm random_harvest(&entropy, sizeof(entropy), 2, 1048169320Spiso RANDOM_INTERRUPT); 1049169320Spiso } 1050169320Spiso 1051169320Spiso KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 1052169320Spiso 1053169320Spiso /* 1054169320Spiso * Set it_need to tell the thread to keep running if it is already 1055170307Sjeff * running. Then, lock the thread and see if we actually need to 1056170307Sjeff * put it on the runqueue. 1057169320Spiso */ 1058252683Salfred atomic_store_rel_int(&it->it_need, 1); 1059170307Sjeff thread_lock(td); 1060169320Spiso if (TD_AWAITING_INTR(td)) { 1061169320Spiso CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 1062173122Sjulian td->td_name); 1063169320Spiso TD_CLR_IWAIT(td); 1064169320Spiso sched_add(td, SRQ_INTR); 1065169320Spiso } else { 1066169320Spiso CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1067173004Sjulian __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 1068169320Spiso } 1069170307Sjeff thread_unlock(td); 1070169320Spiso 1071169320Spiso return (0); 1072169320Spiso} 1073169320Spiso#endif 1074169320Spiso 1075151699Sjhb/* 1076192305Srwatson * Allow interrupt event binding for software interrupt handlers -- a no-op, 1077192305Srwatson * since interrupts are generated in software rather than being directed by 1078192305Srwatson * a PIC. 1079192305Srwatson */ 1080192305Srwatsonstatic int 1081271712Sadrianswi_assign_cpu(void *arg, int cpu) 1082192305Srwatson{ 1083192305Srwatson 1084192305Srwatson return (0); 1085192305Srwatson} 1086192305Srwatson 1087192305Srwatson/* 1088151699Sjhb * Add a software interrupt handler to a specified event. If a given event 1089151699Sjhb * is not specified, then a new event is created. 1090151699Sjhb */ 109172759Sjhbint 1092151658Sjhbswi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 109372237Sjhb void *arg, int pri, enum intr_type flags, void **cookiep) 109472237Sjhb{ 1095151658Sjhb struct intr_event *ie; 109672237Sjhb int error; 109766698Sjhb 1098169320Spiso if (flags & INTR_ENTROPY) 109972759Sjhb return (EINVAL); 110072759Sjhb 1101151658Sjhb ie = (eventp != NULL) ? *eventp : NULL; 110266698Sjhb 1103151658Sjhb if (ie != NULL) { 1104151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1105151658Sjhb return (EINVAL); 110672759Sjhb } else { 1107178092Sjeff error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1108192305Srwatson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 110967551Sjhb if (error) 111072237Sjhb return (error); 1111151658Sjhb if (eventp != NULL) 1112151658Sjhb *eventp = ie; 111366698Sjhb } 1114177859Sjeff error = intr_event_add_handler(ie, name, NULL, handler, arg, 1115217292Sjhb PI_SWI(pri), flags, cookiep); 1116247778Sdavide return (error); 111766698Sjhb} 111866698Sjhb 111966698Sjhb/* 1120151658Sjhb * Schedule a software interrupt thread. 112166698Sjhb */ 112267551Sjhbvoid 112372237Sjhbswi_sched(void *cookie, int flags) 112466698Sjhb{ 1125151658Sjhb struct intr_handler *ih = (struct intr_handler *)cookie; 1126151658Sjhb struct intr_event *ie = ih->ih_event; 1127240921Sjhb struct intr_entropy entropy; 112872759Sjhb int error; 112966698Sjhb 1130151658Sjhb CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1131151658Sjhb ih->ih_need); 1132151658Sjhb 1133240921Sjhb if (harvest.swi) { 1134240921Sjhb CTR2(KTR_INTR, "swi_sched: pid %d (%s) gathering entropy", 1135240921Sjhb curproc->p_pid, curthread->td_name); 1136240921Sjhb entropy.event = (uintptr_t)ih; 1137240921Sjhb entropy.td = curthread; 1138256377Smarkm random_harvest(&entropy, sizeof(entropy), 1, 1139255362Smarkm RANDOM_SWI); 1140240921Sjhb } 1141240921Sjhb 114267551Sjhb /* 114372759Sjhb * Set ih_need for this handler so that if the ithread is already 114472759Sjhb * running it will execute this handler on the next pass. Otherwise, 114572759Sjhb * it will execute it the next time it runs. 114667551Sjhb */ 114772237Sjhb atomic_store_rel_int(&ih->ih_need, 1); 1148163474Sbde 114972237Sjhb if (!(flags & SWI_DELAY)) { 1150170291Sattilio PCPU_INC(cnt.v_soft); 1151169320Spiso#ifdef INTR_FILTER 1152169320Spiso error = intr_event_schedule_thread(ie, ie->ie_thread); 1153169320Spiso#else 1154151658Sjhb error = intr_event_schedule_thread(ie); 1155169320Spiso#endif 115672759Sjhb KASSERT(error == 0, ("stray software interrupt")); 115766698Sjhb } 115866698Sjhb} 115966698Sjhb 1160151699Sjhb/* 1161151699Sjhb * Remove a software interrupt handler. Currently this code does not 1162151699Sjhb * remove the associated interrupt event if it becomes empty. Calling code 1163151699Sjhb * may do so manually via intr_event_destroy(), but that's not really 1164151699Sjhb * an optimal interface. 1165151699Sjhb */ 1166151699Sjhbint 1167151699Sjhbswi_remove(void *cookie) 1168151699Sjhb{ 1169151699Sjhb 1170151699Sjhb return (intr_event_remove_handler(cookie)); 1171151699Sjhb} 1172151699Sjhb 1173169320Spiso#ifdef INTR_FILTER 1174151658Sjhbstatic void 1175169320Spisopriv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1176169320Spiso{ 1177169320Spiso struct intr_event *ie; 1178169320Spiso 1179169320Spiso ie = ih->ih_event; 1180169320Spiso /* 1181169320Spiso * If this handler is marked for death, remove it from 1182169320Spiso * the list of handlers and wake up the sleeper. 1183169320Spiso */ 1184169320Spiso if (ih->ih_flags & IH_DEAD) { 1185169320Spiso mtx_lock(&ie->ie_lock); 1186169320Spiso TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1187169320Spiso ih->ih_flags &= ~IH_DEAD; 1188169320Spiso wakeup(ih); 1189169320Spiso mtx_unlock(&ie->ie_lock); 1190169320Spiso return; 1191169320Spiso } 1192169320Spiso 1193169320Spiso /* Execute this handler. */ 1194169320Spiso CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1195169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1196169320Spiso ih->ih_name, ih->ih_flags); 1197169320Spiso 1198169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1199169320Spiso mtx_lock(&Giant); 1200169320Spiso ih->ih_handler(ih->ih_argument); 1201169320Spiso if (!(ih->ih_flags & IH_MPSAFE)) 1202169320Spiso mtx_unlock(&Giant); 1203169320Spiso} 1204169320Spiso#endif 1205169320Spiso 1206183052Sjhb/* 1207183052Sjhb * This is a public function for use by drivers that mux interrupt 1208183052Sjhb * handlers for child devices from their interrupt handler. 1209183052Sjhb */ 1210183052Sjhbvoid 1211183052Sjhbintr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1212151658Sjhb{ 1213151658Sjhb struct intr_handler *ih, *ihn; 1214151658Sjhb 1215151658Sjhb TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1216151658Sjhb /* 1217151658Sjhb * If this handler is marked for death, remove it from 1218151658Sjhb * the list of handlers and wake up the sleeper. 1219151658Sjhb */ 1220151658Sjhb if (ih->ih_flags & IH_DEAD) { 1221151658Sjhb mtx_lock(&ie->ie_lock); 1222151658Sjhb TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1223151658Sjhb ih->ih_flags &= ~IH_DEAD; 1224151658Sjhb wakeup(ih); 1225151658Sjhb mtx_unlock(&ie->ie_lock); 1226151658Sjhb continue; 1227151658Sjhb } 1228151658Sjhb 1229167080Spiso /* Skip filter only handlers */ 1230167080Spiso if (ih->ih_handler == NULL) 1231167080Spiso continue; 1232167080Spiso 1233151658Sjhb /* 1234151658Sjhb * For software interrupt threads, we only execute 1235151658Sjhb * handlers that have their need flag set. Hardware 1236151658Sjhb * interrupt threads always invoke all of their handlers. 1237151658Sjhb */ 1238151658Sjhb if (ie->ie_flags & IE_SOFT) { 1239252683Salfred if (atomic_load_acq_int(&ih->ih_need) == 0) 1240151658Sjhb continue; 1241151658Sjhb else 1242151658Sjhb atomic_store_rel_int(&ih->ih_need, 0); 1243151658Sjhb } 1244151658Sjhb 1245151658Sjhb /* Execute this handler. */ 1246151658Sjhb CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1247169320Spiso __func__, p->p_pid, (void *)ih->ih_handler, 1248169320Spiso ih->ih_argument, ih->ih_name, ih->ih_flags); 1249151658Sjhb 1250151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1251151658Sjhb mtx_lock(&Giant); 1252151658Sjhb ih->ih_handler(ih->ih_argument); 1253151658Sjhb if (!(ih->ih_flags & IH_MPSAFE)) 1254151658Sjhb mtx_unlock(&Giant); 1255151658Sjhb } 1256183052Sjhb} 1257183052Sjhb 1258183052Sjhbstatic void 1259183052Sjhbithread_execute_handlers(struct proc *p, struct intr_event *ie) 1260183052Sjhb{ 1261183052Sjhb 1262183052Sjhb /* Interrupt handlers should not sleep. */ 1263151658Sjhb if (!(ie->ie_flags & IE_SOFT)) 1264183052Sjhb THREAD_NO_SLEEPING(); 1265183052Sjhb intr_event_execute_handlers(p, ie); 1266183052Sjhb if (!(ie->ie_flags & IE_SOFT)) 1267151658Sjhb THREAD_SLEEPING_OK(); 1268151658Sjhb 1269151658Sjhb /* 1270151658Sjhb * Interrupt storm handling: 1271151658Sjhb * 1272151658Sjhb * If this interrupt source is currently storming, then throttle 1273151658Sjhb * it to only fire the handler once per clock tick. 1274151658Sjhb * 1275151658Sjhb * If this interrupt source is not currently storming, but the 1276151658Sjhb * number of back to back interrupts exceeds the storm threshold, 1277151658Sjhb * then enter storming mode. 1278151658Sjhb */ 1279167173Sjhb if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1280167173Sjhb !(ie->ie_flags & IE_SOFT)) { 1281168850Snjl /* Report the message only once every second. */ 1282168850Snjl if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1283151658Sjhb printf( 1284168850Snjl "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1285151658Sjhb ie->ie_name); 1286151658Sjhb } 1287167173Sjhb pause("istorm", 1); 1288151658Sjhb } else 1289151658Sjhb ie->ie_count++; 1290151658Sjhb 1291151658Sjhb /* 1292151658Sjhb * Now that all the handlers have had a chance to run, reenable 1293151658Sjhb * the interrupt source. 1294151658Sjhb */ 1295177940Sjhb if (ie->ie_post_ithread != NULL) 1296177940Sjhb ie->ie_post_ithread(ie->ie_source); 1297151658Sjhb} 1298151658Sjhb 1299169320Spiso#ifndef INTR_FILTER 130066698Sjhb/* 130172237Sjhb * This is the main code for interrupt threads. 130266698Sjhb */ 1303104094Sphkstatic void 130472237Sjhbithread_loop(void *arg) 130566698Sjhb{ 1306151658Sjhb struct intr_thread *ithd; 1307151658Sjhb struct intr_event *ie; 130883366Sjulian struct thread *td; 130972237Sjhb struct proc *p; 1310219819Sjeff int wake; 1311151658Sjhb 131283366Sjulian td = curthread; 131383366Sjulian p = td->td_proc; 1314151658Sjhb ithd = (struct intr_thread *)arg; 1315151658Sjhb KASSERT(ithd->it_thread == td, 131687593Sobrien ("%s: ithread and proc linkage out of sync", __func__)); 1317151658Sjhb ie = ithd->it_event; 1318151658Sjhb ie->ie_count = 0; 1319219819Sjeff wake = 0; 132066698Sjhb 132167551Sjhb /* 132267551Sjhb * As long as we have interrupts outstanding, go through the 132367551Sjhb * list of handlers, giving each one a go at it. 132467551Sjhb */ 132566698Sjhb for (;;) { 132672237Sjhb /* 132772237Sjhb * If we are an orphaned thread, then just die. 132872237Sjhb */ 132972237Sjhb if (ithd->it_flags & IT_DEAD) { 1330151658Sjhb CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1331173004Sjulian p->p_pid, td->td_name); 133272237Sjhb free(ithd, M_ITHREAD); 1333173044Sjulian kthread_exit(); 133472237Sjhb } 133572237Sjhb 1336151658Sjhb /* 1337151658Sjhb * Service interrupts. If another interrupt arrives while 1338151658Sjhb * we are running, it will set it_need to note that we 1339151658Sjhb * should make another pass. 1340151658Sjhb */ 1341252683Salfred while (atomic_load_acq_int(&ithd->it_need) != 0) { 134267551Sjhb /* 1343151658Sjhb * This might need a full read and write barrier 1344151658Sjhb * to make sure that this write posts before any 1345151658Sjhb * of the memory or device accesses in the 1346151658Sjhb * handlers. 134767551Sjhb */ 134872237Sjhb atomic_store_rel_int(&ithd->it_need, 0); 1349151658Sjhb ithread_execute_handlers(p, ie); 135066698Sjhb } 1351128331Sjhb WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1352128331Sjhb mtx_assert(&Giant, MA_NOTOWNED); 135367551Sjhb 135466698Sjhb /* 135566698Sjhb * Processed all our interrupts. Now get the sched 135667551Sjhb * lock. This may take a while and it_need may get 135766698Sjhb * set again, so we have to check it again. 135866698Sjhb */ 1359170307Sjeff thread_lock(td); 1360252683Salfred if ((atomic_load_acq_int(&ithd->it_need) == 0) && 1361252683Salfred !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1362128331Sjhb TD_SET_IWAIT(td); 1363151658Sjhb ie->ie_count = 0; 1364178272Sjeff mi_switch(SW_VOL | SWT_IWAIT, NULL); 136566698Sjhb } 1366219819Sjeff if (ithd->it_flags & IT_WAIT) { 1367219819Sjeff wake = 1; 1368219819Sjeff ithd->it_flags &= ~IT_WAIT; 1369219819Sjeff } 1370170307Sjeff thread_unlock(td); 1371219819Sjeff if (wake) { 1372219819Sjeff wakeup(ithd); 1373219819Sjeff wake = 0; 1374219819Sjeff } 137566698Sjhb } 137666698Sjhb} 1377177940Sjhb 1378177940Sjhb/* 1379177940Sjhb * Main interrupt handling body. 1380177940Sjhb * 1381177940Sjhb * Input: 1382177940Sjhb * o ie: the event connected to this interrupt. 1383177940Sjhb * o frame: some archs (i.e. i386) pass a frame to some. 1384177940Sjhb * handlers as their main argument. 1385177940Sjhb * Return value: 1386177940Sjhb * o 0: everything ok. 1387177940Sjhb * o EINVAL: stray interrupt. 1388177940Sjhb */ 1389177940Sjhbint 1390177940Sjhbintr_event_handle(struct intr_event *ie, struct trapframe *frame) 1391177940Sjhb{ 1392177940Sjhb struct intr_handler *ih; 1393208988Smav struct trapframe *oldframe; 1394177940Sjhb struct thread *td; 1395177940Sjhb int error, ret, thread; 1396177940Sjhb 1397177940Sjhb td = curthread; 1398177940Sjhb 1399177940Sjhb /* An interrupt with no event or handlers is a stray interrupt. */ 1400177940Sjhb if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1401177940Sjhb return (EINVAL); 1402177940Sjhb 1403177940Sjhb /* 1404177940Sjhb * Execute fast interrupt handlers directly. 1405177940Sjhb * To support clock handlers, if a handler registers 1406177940Sjhb * with a NULL argument, then we pass it a pointer to 1407177940Sjhb * a trapframe as its argument. 1408177940Sjhb */ 1409177940Sjhb td->td_intr_nesting_level++; 1410177940Sjhb thread = 0; 1411177940Sjhb ret = 0; 1412177940Sjhb critical_enter(); 1413208988Smav oldframe = td->td_intr_frame; 1414208988Smav td->td_intr_frame = frame; 1415177940Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1416177940Sjhb if (ih->ih_filter == NULL) { 1417177940Sjhb thread = 1; 1418177940Sjhb continue; 1419177940Sjhb } 1420177940Sjhb CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1421177940Sjhb ih->ih_filter, ih->ih_argument == NULL ? frame : 1422177940Sjhb ih->ih_argument, ih->ih_name); 1423177940Sjhb if (ih->ih_argument == NULL) 1424177940Sjhb ret = ih->ih_filter(frame); 1425177940Sjhb else 1426177940Sjhb ret = ih->ih_filter(ih->ih_argument); 1427203061Savg KASSERT(ret == FILTER_STRAY || 1428203061Savg ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1429203061Savg (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1430203061Savg ("%s: incorrect return value %#x from %s", __func__, ret, 1431203061Savg ih->ih_name)); 1432203061Savg 1433177940Sjhb /* 1434177940Sjhb * Wrapper handler special handling: 1435177940Sjhb * 1436177940Sjhb * in some particular cases (like pccard and pccbb), 1437177940Sjhb * the _real_ device handler is wrapped in a couple of 1438177940Sjhb * functions - a filter wrapper and an ithread wrapper. 1439177940Sjhb * In this case (and just in this case), the filter wrapper 1440177940Sjhb * could ask the system to schedule the ithread and mask 1441177940Sjhb * the interrupt source if the wrapped handler is composed 1442177940Sjhb * of just an ithread handler. 1443177940Sjhb * 1444177940Sjhb * TODO: write a generic wrapper to avoid people rolling 1445177940Sjhb * their own 1446177940Sjhb */ 1447177940Sjhb if (!thread) { 1448177940Sjhb if (ret == FILTER_SCHEDULE_THREAD) 1449177940Sjhb thread = 1; 1450177940Sjhb } 1451177940Sjhb } 1452208988Smav td->td_intr_frame = oldframe; 1453177940Sjhb 1454177940Sjhb if (thread) { 1455177940Sjhb if (ie->ie_pre_ithread != NULL) 1456177940Sjhb ie->ie_pre_ithread(ie->ie_source); 1457177940Sjhb } else { 1458177940Sjhb if (ie->ie_post_filter != NULL) 1459177940Sjhb ie->ie_post_filter(ie->ie_source); 1460177940Sjhb } 1461177940Sjhb 1462177940Sjhb /* Schedule the ithread if needed. */ 1463177940Sjhb if (thread) { 1464177940Sjhb error = intr_event_schedule_thread(ie); 1465182024Skmacy#ifndef XEN 1466177940Sjhb KASSERT(error == 0, ("bad stray interrupt")); 1467182024Skmacy#else 1468182024Skmacy if (error != 0) 1469182024Skmacy log(LOG_WARNING, "bad stray interrupt"); 1470182024Skmacy#endif 1471177940Sjhb } 1472177940Sjhb critical_exit(); 1473177940Sjhb td->td_intr_nesting_level--; 1474177940Sjhb return (0); 1475177940Sjhb} 1476169320Spiso#else 1477169320Spiso/* 1478169320Spiso * This is the main code for interrupt threads. 1479169320Spiso */ 1480169320Spisostatic void 1481169320Spisoithread_loop(void *arg) 1482169320Spiso{ 1483169320Spiso struct intr_thread *ithd; 1484169320Spiso struct intr_handler *ih; 1485169320Spiso struct intr_event *ie; 1486169320Spiso struct thread *td; 1487169320Spiso struct proc *p; 1488169320Spiso int priv; 1489219819Sjeff int wake; 149066698Sjhb 1491169320Spiso td = curthread; 1492169320Spiso p = td->td_proc; 1493169320Spiso ih = (struct intr_handler *)arg; 1494169320Spiso priv = (ih->ih_thread != NULL) ? 1 : 0; 1495169320Spiso ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1496169320Spiso KASSERT(ithd->it_thread == td, 1497169320Spiso ("%s: ithread and proc linkage out of sync", __func__)); 1498169320Spiso ie = ithd->it_event; 1499169320Spiso ie->ie_count = 0; 1500219819Sjeff wake = 0; 1501169320Spiso 1502169320Spiso /* 1503169320Spiso * As long as we have interrupts outstanding, go through the 1504169320Spiso * list of handlers, giving each one a go at it. 1505169320Spiso */ 1506169320Spiso for (;;) { 1507169320Spiso /* 1508169320Spiso * If we are an orphaned thread, then just die. 1509169320Spiso */ 1510169320Spiso if (ithd->it_flags & IT_DEAD) { 1511169320Spiso CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1512173004Sjulian p->p_pid, td->td_name); 1513169320Spiso free(ithd, M_ITHREAD); 1514173044Sjulian kthread_exit(); 1515169320Spiso } 1516169320Spiso 1517169320Spiso /* 1518169320Spiso * Service interrupts. If another interrupt arrives while 1519169320Spiso * we are running, it will set it_need to note that we 1520169320Spiso * should make another pass. 1521169320Spiso */ 1522252683Salfred while (atomic_load_acq_int(&ithd->it_need) != 0) { 1523169320Spiso /* 1524169320Spiso * This might need a full read and write barrier 1525169320Spiso * to make sure that this write posts before any 1526169320Spiso * of the memory or device accesses in the 1527169320Spiso * handlers. 1528169320Spiso */ 1529169320Spiso atomic_store_rel_int(&ithd->it_need, 0); 1530169320Spiso if (priv) 1531169320Spiso priv_ithread_execute_handler(p, ih); 1532169320Spiso else 1533169320Spiso ithread_execute_handlers(p, ie); 1534169320Spiso } 1535169320Spiso WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1536169320Spiso mtx_assert(&Giant, MA_NOTOWNED); 1537169320Spiso 1538169320Spiso /* 1539169320Spiso * Processed all our interrupts. Now get the sched 1540169320Spiso * lock. This may take a while and it_need may get 1541169320Spiso * set again, so we have to check it again. 1542169320Spiso */ 1543170307Sjeff thread_lock(td); 1544252683Salfred if ((atomic_load_acq_int(&ithd->it_need) == 0) && 1545252683Salfred !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1546169320Spiso TD_SET_IWAIT(td); 1547169320Spiso ie->ie_count = 0; 1548178272Sjeff mi_switch(SW_VOL | SWT_IWAIT, NULL); 1549169320Spiso } 1550219819Sjeff if (ithd->it_flags & IT_WAIT) { 1551219819Sjeff wake = 1; 1552219819Sjeff ithd->it_flags &= ~IT_WAIT; 1553219819Sjeff } 1554170307Sjeff thread_unlock(td); 1555219819Sjeff if (wake) { 1556219819Sjeff wakeup(ithd); 1557219819Sjeff wake = 0; 1558219819Sjeff } 1559169320Spiso } 1560169320Spiso} 1561169320Spiso 1562169320Spiso/* 1563169320Spiso * Main loop for interrupt filter. 1564169320Spiso * 1565169320Spiso * Some architectures (i386, amd64 and arm) require the optional frame 1566169320Spiso * parameter, and use it as the main argument for fast handler execution 1567169320Spiso * when ih_argument == NULL. 1568169320Spiso * 1569169320Spiso * Return value: 1570169320Spiso * o FILTER_STRAY: No filter recognized the event, and no 1571169320Spiso * filter-less handler is registered on this 1572169320Spiso * line. 1573169320Spiso * o FILTER_HANDLED: A filter claimed the event and served it. 1574169320Spiso * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1575169320Spiso * least one filter-less handler on this line. 1576169320Spiso * o FILTER_HANDLED | 1577169320Spiso * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1578169320Spiso * scheduling the per-handler ithread. 1579169320Spiso * 1580169320Spiso * In case an ithread has to be scheduled, in *ithd there will be a 1581169320Spiso * pointer to a struct intr_thread containing the thread to be 1582169320Spiso * scheduled. 1583169320Spiso */ 1584169320Spiso 1585177940Sjhbstatic int 1586169320Spisointr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1587169320Spiso struct intr_thread **ithd) 1588169320Spiso{ 1589169320Spiso struct intr_handler *ih; 1590169320Spiso void *arg; 1591169320Spiso int ret, thread_only; 1592169320Spiso 1593169320Spiso ret = 0; 1594169320Spiso thread_only = 0; 1595169320Spiso TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1596169320Spiso /* 1597169320Spiso * Execute fast interrupt handlers directly. 1598169320Spiso * To support clock handlers, if a handler registers 1599169320Spiso * with a NULL argument, then we pass it a pointer to 1600169320Spiso * a trapframe as its argument. 1601169320Spiso */ 1602169320Spiso arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1603169320Spiso 1604169320Spiso CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1605169320Spiso ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1606169320Spiso 1607169320Spiso if (ih->ih_filter != NULL) 1608169320Spiso ret = ih->ih_filter(arg); 1609169320Spiso else { 1610169320Spiso thread_only = 1; 1611169320Spiso continue; 1612169320Spiso } 1613203061Savg KASSERT(ret == FILTER_STRAY || 1614203061Savg ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1615203061Savg (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1616203061Savg ("%s: incorrect return value %#x from %s", __func__, ret, 1617203061Savg ih->ih_name)); 1618169320Spiso if (ret & FILTER_STRAY) 1619169320Spiso continue; 1620169320Spiso else { 1621169320Spiso *ithd = ih->ih_thread; 1622169320Spiso return (ret); 1623169320Spiso } 1624169320Spiso } 1625169320Spiso 1626169320Spiso /* 1627169320Spiso * No filters handled the interrupt and we have at least 1628169320Spiso * one handler without a filter. In this case, we schedule 1629169320Spiso * all of the filter-less handlers to run in the ithread. 1630169320Spiso */ 1631169320Spiso if (thread_only) { 1632169320Spiso *ithd = ie->ie_thread; 1633169320Spiso return (FILTER_SCHEDULE_THREAD); 1634169320Spiso } 1635169320Spiso return (FILTER_STRAY); 1636169320Spiso} 1637169320Spiso 1638169320Spiso/* 1639169320Spiso * Main interrupt handling body. 1640169320Spiso * 1641169320Spiso * Input: 1642169320Spiso * o ie: the event connected to this interrupt. 1643169320Spiso * o frame: some archs (i.e. i386) pass a frame to some. 1644169320Spiso * handlers as their main argument. 1645169320Spiso * Return value: 1646169320Spiso * o 0: everything ok. 1647169320Spiso * o EINVAL: stray interrupt. 1648169320Spiso */ 1649169320Spisoint 1650169320Spisointr_event_handle(struct intr_event *ie, struct trapframe *frame) 1651169320Spiso{ 1652169320Spiso struct intr_thread *ithd; 1653208988Smav struct trapframe *oldframe; 1654169320Spiso struct thread *td; 1655169320Spiso int thread; 1656169320Spiso 1657169320Spiso ithd = NULL; 1658169320Spiso td = curthread; 1659169320Spiso 1660169320Spiso if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1661169320Spiso return (EINVAL); 1662169320Spiso 1663169320Spiso td->td_intr_nesting_level++; 1664169320Spiso thread = 0; 1665169320Spiso critical_enter(); 1666208988Smav oldframe = td->td_intr_frame; 1667208988Smav td->td_intr_frame = frame; 1668177940Sjhb thread = intr_filter_loop(ie, frame, &ithd); 1669169320Spiso if (thread & FILTER_HANDLED) { 1670177940Sjhb if (ie->ie_post_filter != NULL) 1671177940Sjhb ie->ie_post_filter(ie->ie_source); 1672169320Spiso } else { 1673177940Sjhb if (ie->ie_pre_ithread != NULL) 1674177940Sjhb ie->ie_pre_ithread(ie->ie_source); 1675169320Spiso } 1676208988Smav td->td_intr_frame = oldframe; 1677169320Spiso critical_exit(); 1678169320Spiso 1679169320Spiso /* Interrupt storm logic */ 1680169320Spiso if (thread & FILTER_STRAY) { 1681169320Spiso ie->ie_count++; 1682169320Spiso if (ie->ie_count < intr_storm_threshold) 1683169320Spiso printf("Interrupt stray detection not present\n"); 1684169320Spiso } 1685169320Spiso 1686169320Spiso /* Schedule an ithread if needed. */ 1687169320Spiso if (thread & FILTER_SCHEDULE_THREAD) { 1688169320Spiso if (intr_event_schedule_thread(ie, ithd) != 0) 1689169320Spiso panic("%s: impossible stray interrupt", __func__); 1690169320Spiso } 1691169320Spiso td->td_intr_nesting_level--; 1692169320Spiso return (0); 1693169320Spiso} 1694169320Spiso#endif 1695169320Spiso 1696121482Sjhb#ifdef DDB 169772237Sjhb/* 1698121482Sjhb * Dump details about an interrupt handler 1699121482Sjhb */ 1700121482Sjhbstatic void 1701151658Sjhbdb_dump_intrhand(struct intr_handler *ih) 1702121482Sjhb{ 1703121482Sjhb int comma; 1704121482Sjhb 1705121482Sjhb db_printf("\t%-10s ", ih->ih_name); 1706121482Sjhb switch (ih->ih_pri) { 1707121482Sjhb case PI_REALTIME: 1708121482Sjhb db_printf("CLK "); 1709121482Sjhb break; 1710121482Sjhb case PI_AV: 1711121482Sjhb db_printf("AV "); 1712121482Sjhb break; 1713217292Sjhb case PI_TTY: 1714121482Sjhb db_printf("TTY "); 1715121482Sjhb break; 1716121482Sjhb case PI_NET: 1717121482Sjhb db_printf("NET "); 1718121482Sjhb break; 1719121482Sjhb case PI_DISK: 1720121482Sjhb db_printf("DISK"); 1721121482Sjhb break; 1722121482Sjhb case PI_DULL: 1723121482Sjhb db_printf("DULL"); 1724121482Sjhb break; 1725121482Sjhb default: 1726121482Sjhb if (ih->ih_pri >= PI_SOFT) 1727121482Sjhb db_printf("SWI "); 1728121482Sjhb else 1729121482Sjhb db_printf("%4u", ih->ih_pri); 1730121482Sjhb break; 1731121482Sjhb } 1732121482Sjhb db_printf(" "); 1733249163Skib if (ih->ih_filter != NULL) { 1734249163Skib db_printf("[F]"); 1735249163Skib db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1736249163Skib } 1737249163Skib if (ih->ih_handler != NULL) { 1738249163Skib if (ih->ih_filter != NULL) 1739249163Skib db_printf(","); 1740249163Skib db_printf("[H]"); 1741249163Skib db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1742249163Skib } 1743121482Sjhb db_printf("(%p)", ih->ih_argument); 1744121482Sjhb if (ih->ih_need || 1745166901Spiso (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1746121482Sjhb IH_MPSAFE)) != 0) { 1747121482Sjhb db_printf(" {"); 1748121482Sjhb comma = 0; 1749121482Sjhb if (ih->ih_flags & IH_EXCLUSIVE) { 1750121482Sjhb if (comma) 1751121482Sjhb db_printf(", "); 1752121482Sjhb db_printf("EXCL"); 1753121482Sjhb comma = 1; 1754121482Sjhb } 1755121482Sjhb if (ih->ih_flags & IH_ENTROPY) { 1756121482Sjhb if (comma) 1757121482Sjhb db_printf(", "); 1758121482Sjhb db_printf("ENTROPY"); 1759121482Sjhb comma = 1; 1760121482Sjhb } 1761121482Sjhb if (ih->ih_flags & IH_DEAD) { 1762121482Sjhb if (comma) 1763121482Sjhb db_printf(", "); 1764121482Sjhb db_printf("DEAD"); 1765121482Sjhb comma = 1; 1766121482Sjhb } 1767121482Sjhb if (ih->ih_flags & IH_MPSAFE) { 1768121482Sjhb if (comma) 1769121482Sjhb db_printf(", "); 1770121482Sjhb db_printf("MPSAFE"); 1771121482Sjhb comma = 1; 1772121482Sjhb } 1773121482Sjhb if (ih->ih_need) { 1774121482Sjhb if (comma) 1775121482Sjhb db_printf(", "); 1776121482Sjhb db_printf("NEED"); 1777121482Sjhb } 1778121482Sjhb db_printf("}"); 1779121482Sjhb } 1780121482Sjhb db_printf("\n"); 1781121482Sjhb} 1782121482Sjhb 1783121482Sjhb/* 1784151658Sjhb * Dump details about a event. 1785121482Sjhb */ 1786121482Sjhbvoid 1787151658Sjhbdb_dump_intr_event(struct intr_event *ie, int handlers) 1788121482Sjhb{ 1789151658Sjhb struct intr_handler *ih; 1790151658Sjhb struct intr_thread *it; 1791121482Sjhb int comma; 1792121482Sjhb 1793151658Sjhb db_printf("%s ", ie->ie_fullname); 1794151658Sjhb it = ie->ie_thread; 1795151658Sjhb if (it != NULL) 1796151658Sjhb db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1797151658Sjhb else 1798151658Sjhb db_printf("(no thread)"); 1799151658Sjhb if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1800151658Sjhb (it != NULL && it->it_need)) { 1801121482Sjhb db_printf(" {"); 1802121482Sjhb comma = 0; 1803151658Sjhb if (ie->ie_flags & IE_SOFT) { 1804121482Sjhb db_printf("SOFT"); 1805121482Sjhb comma = 1; 1806121482Sjhb } 1807151658Sjhb if (ie->ie_flags & IE_ENTROPY) { 1808121482Sjhb if (comma) 1809121482Sjhb db_printf(", "); 1810121482Sjhb db_printf("ENTROPY"); 1811121482Sjhb comma = 1; 1812121482Sjhb } 1813151658Sjhb if (ie->ie_flags & IE_ADDING_THREAD) { 1814121482Sjhb if (comma) 1815121482Sjhb db_printf(", "); 1816151658Sjhb db_printf("ADDING_THREAD"); 1817121482Sjhb comma = 1; 1818121482Sjhb } 1819151658Sjhb if (it != NULL && it->it_need) { 1820121482Sjhb if (comma) 1821121482Sjhb db_printf(", "); 1822121482Sjhb db_printf("NEED"); 1823121482Sjhb } 1824121482Sjhb db_printf("}"); 1825121482Sjhb } 1826121482Sjhb db_printf("\n"); 1827121482Sjhb 1828121482Sjhb if (handlers) 1829151658Sjhb TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1830121482Sjhb db_dump_intrhand(ih); 1831121482Sjhb} 1832151658Sjhb 1833151658Sjhb/* 1834151658Sjhb * Dump data about interrupt handlers 1835151658Sjhb */ 1836151658SjhbDB_SHOW_COMMAND(intr, db_show_intr) 1837151658Sjhb{ 1838151658Sjhb struct intr_event *ie; 1839160312Sjhb int all, verbose; 1840151658Sjhb 1841229272Sed verbose = strchr(modif, 'v') != NULL; 1842229272Sed all = strchr(modif, 'a') != NULL; 1843151658Sjhb TAILQ_FOREACH(ie, &event_list, ie_list) { 1844151658Sjhb if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1845151658Sjhb continue; 1846151658Sjhb db_dump_intr_event(ie, verbose); 1847160312Sjhb if (db_pager_quit) 1848160312Sjhb break; 1849151658Sjhb } 1850151658Sjhb} 1851121482Sjhb#endif /* DDB */ 1852121482Sjhb 1853121482Sjhb/* 185467551Sjhb * Start standard software interrupt threads 185566698Sjhb */ 185667551Sjhbstatic void 185772237Sjhbstart_softintr(void *dummy) 185867551Sjhb{ 185972237Sjhb 1860177859Sjeff if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1861177859Sjeff panic("died while creating vm swi ithread"); 186266698Sjhb} 1863177253SrwatsonSYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1864177253Srwatson NULL); 186566698Sjhb 1866151658Sjhb/* 186777582Stmm * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 186877582Stmm * The data for this machine dependent, and the declarations are in machine 186977582Stmm * dependent code. The layout of intrnames and intrcnt however is machine 187077582Stmm * independent. 187177582Stmm * 187277582Stmm * We do not know the length of intrcnt and intrnames at compile time, so 187377582Stmm * calculate things at run time. 187477582Stmm */ 187577582Stmmstatic int 187677582Stmmsysctl_intrnames(SYSCTL_HANDLER_ARGS) 187777582Stmm{ 1878224187Sattilio return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 187977582Stmm} 188077582Stmm 188177582StmmSYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 188277582Stmm NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 188377582Stmm 188477582Stmmstatic int 188577582Stmmsysctl_intrcnt(SYSCTL_HANDLER_ARGS) 188677582Stmm{ 1887232751Sjmallett#ifdef SCTL_MASK32 1888232751Sjmallett uint32_t *intrcnt32; 1889232751Sjmallett unsigned i; 1890232751Sjmallett int error; 1891232751Sjmallett 1892232751Sjmallett if (req->flags & SCTL_MASK32) { 1893232751Sjmallett if (!req->oldptr) 1894232751Sjmallett return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1895232751Sjmallett intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1896232751Sjmallett if (intrcnt32 == NULL) 1897232751Sjmallett return (ENOMEM); 1898232751Sjmallett for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1899232751Sjmallett intrcnt32[i] = intrcnt[i]; 1900232751Sjmallett error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1901232751Sjmallett free(intrcnt32, M_TEMP); 1902232751Sjmallett return (error); 1903232751Sjmallett } 1904232751Sjmallett#endif 1905224187Sattilio return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 190677582Stmm} 190777582Stmm 190877582StmmSYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 190977582Stmm NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1910121482Sjhb 1911121482Sjhb#ifdef DDB 1912121482Sjhb/* 1913121482Sjhb * DDB command to dump the interrupt statistics. 1914121482Sjhb */ 1915121482SjhbDB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1916121482Sjhb{ 1917121482Sjhb u_long *i; 1918121482Sjhb char *cp; 1919224187Sattilio u_int j; 1920121482Sjhb 1921121482Sjhb cp = intrnames; 1922224187Sattilio j = 0; 1923224187Sattilio for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1924224187Sattilio i++, j++) { 1925121482Sjhb if (*cp == '\0') 1926121482Sjhb break; 1927121482Sjhb if (*i != 0) 1928121482Sjhb db_printf("%s\t%lu\n", cp, *i); 1929121482Sjhb cp += strlen(cp) + 1; 1930121482Sjhb } 1931121482Sjhb} 1932121482Sjhb#endif 1933