intr_machdep.c revision 331017
1/*- 2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/11/sys/x86/x86/intr_machdep.c 331017 2018-03-15 19:08:33Z kevans $ 27 */ 28 29/* 30 * Machine dependent interrupt code for x86. For x86, we have to 31 * deal with different PICs. Thus, we use the passed in vector to lookup 32 * an interrupt source associated with that vector. The interrupt source 33 * describes which PIC the source belongs to and includes methods to handle 34 * that source. 35 */ 36 37#include "opt_atpic.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/bus.h> 42#include <sys/interrupt.h> 43#include <sys/ktr.h> 44#include <sys/kernel.h> 45#include <sys/lock.h> 46#include <sys/mutex.h> 47#include <sys/proc.h> 48#include <sys/smp.h> 49#include <sys/sx.h> 50#include <sys/syslog.h> 51#include <sys/systm.h> 52#include <sys/vmmeter.h> 53#include <machine/clock.h> 54#include <machine/intr_machdep.h> 55#include <machine/smp.h> 56#ifdef DDB 57#include <ddb/ddb.h> 58#endif 59 60#ifndef DEV_ATPIC 61#include <machine/segments.h> 62#include <machine/frame.h> 63#include <dev/ic/i8259.h> 64#include <x86/isa/icu.h> 65#ifdef PC98 66#include <pc98/cbus/cbus.h> 67#else 68#include <isa/isareg.h> 69#endif 70#endif 71 72#define MAX_STRAY_LOG 5 73 74typedef void (*mask_fn)(void *); 75 76static int intrcnt_index; 77static struct intsrc *interrupt_sources[NUM_IO_INTS]; 78static struct sx intrsrc_lock; 79static struct mtx intrpic_lock; 80static struct mtx intrcnt_lock; 81static TAILQ_HEAD(pics_head, pic) pics; 82 83#if defined(SMP) && !defined(EARLY_AP_STARTUP) 84static int assign_cpu; 85#endif 86 87u_long intrcnt[INTRCNT_COUNT]; 88char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)]; 89size_t sintrcnt = sizeof(intrcnt); 90size_t sintrnames = sizeof(intrnames); 91 92static int intr_assign_cpu(void *arg, int cpu); 93static void intr_disable_src(void *arg); 94static void intr_init(void *__dummy); 95static int intr_pic_registered(struct pic *pic); 96static void intrcnt_setname(const char *name, int index); 97static void intrcnt_updatename(struct intsrc *is); 98static void intrcnt_register(struct intsrc *is); 99 100static int 101intr_pic_registered(struct pic *pic) 102{ 103 struct pic *p; 104 105 TAILQ_FOREACH(p, &pics, pics) { 106 if (p == pic) 107 return (1); 108 } 109 return (0); 110} 111 112/* 113 * Register a new interrupt controller (PIC). This is to support suspend 114 * and resume where we suspend/resume controllers rather than individual 115 * sources. This also allows controllers with no active sources (such as 116 * 8259As in a system using the APICs) to participate in suspend and resume. 117 */ 118int 119intr_register_pic(struct pic *pic) 120{ 121 int error; 122 123 mtx_lock(&intrpic_lock); 124 if (intr_pic_registered(pic)) 125 error = EBUSY; 126 else { 127 TAILQ_INSERT_TAIL(&pics, pic, pics); 128 error = 0; 129 } 130 mtx_unlock(&intrpic_lock); 131 return (error); 132} 133 134/* 135 * Register a new interrupt source with the global interrupt system. 136 * The global interrupts need to be disabled when this function is 137 * called. 138 */ 139int 140intr_register_source(struct intsrc *isrc) 141{ 142 int error, vector; 143 144 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); 145 vector = isrc->is_pic->pic_vector(isrc); 146 if (interrupt_sources[vector] != NULL) 147 return (EEXIST); 148 error = intr_event_create(&isrc->is_event, isrc, 0, vector, 149 intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source, 150 (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:", 151 vector); 152 if (error) 153 return (error); 154 sx_xlock(&intrsrc_lock); 155 if (interrupt_sources[vector] != NULL) { 156 sx_xunlock(&intrsrc_lock); 157 intr_event_destroy(isrc->is_event); 158 return (EEXIST); 159 } 160 intrcnt_register(isrc); 161 interrupt_sources[vector] = isrc; 162 isrc->is_handlers = 0; 163 sx_xunlock(&intrsrc_lock); 164 return (0); 165} 166 167struct intsrc * 168intr_lookup_source(int vector) 169{ 170 171 if (vector < 0 || vector >= nitems(interrupt_sources)) 172 return (NULL); 173 return (interrupt_sources[vector]); 174} 175 176int 177intr_add_handler(const char *name, int vector, driver_filter_t filter, 178 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) 179{ 180 struct intsrc *isrc; 181 int error; 182 183 isrc = intr_lookup_source(vector); 184 if (isrc == NULL) 185 return (EINVAL); 186 error = intr_event_add_handler(isrc->is_event, name, filter, handler, 187 arg, intr_priority(flags), flags, cookiep); 188 if (error == 0) { 189 sx_xlock(&intrsrc_lock); 190 intrcnt_updatename(isrc); 191 isrc->is_handlers++; 192 if (isrc->is_handlers == 1) { 193 isrc->is_pic->pic_enable_intr(isrc); 194 isrc->is_pic->pic_enable_source(isrc); 195 } 196 sx_xunlock(&intrsrc_lock); 197 } 198 return (error); 199} 200 201int 202intr_remove_handler(void *cookie) 203{ 204 struct intsrc *isrc; 205 int error; 206 207 isrc = intr_handler_source(cookie); 208 error = intr_event_remove_handler(cookie); 209 if (error == 0) { 210 sx_xlock(&intrsrc_lock); 211 isrc->is_handlers--; 212 if (isrc->is_handlers == 0) { 213 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); 214 isrc->is_pic->pic_disable_intr(isrc); 215 } 216 intrcnt_updatename(isrc); 217 sx_xunlock(&intrsrc_lock); 218 } 219 return (error); 220} 221 222int 223intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) 224{ 225 struct intsrc *isrc; 226 227 isrc = intr_lookup_source(vector); 228 if (isrc == NULL) 229 return (EINVAL); 230 return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); 231} 232 233static void 234intr_disable_src(void *arg) 235{ 236 struct intsrc *isrc; 237 238 isrc = arg; 239 isrc->is_pic->pic_disable_source(isrc, PIC_EOI); 240} 241 242void 243intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) 244{ 245 struct intr_event *ie; 246 int vector; 247 248 /* 249 * We count software interrupts when we process them. The 250 * code here follows previous practice, but there's an 251 * argument for counting hardware interrupts when they're 252 * processed too. 253 */ 254 (*isrc->is_count)++; 255 PCPU_INC(cnt.v_intr); 256 257 ie = isrc->is_event; 258 259 /* 260 * XXX: We assume that IRQ 0 is only used for the ISA timer 261 * device (clk). 262 */ 263 vector = isrc->is_pic->pic_vector(isrc); 264 if (vector == 0) 265 clkintr_pending = 1; 266 267 /* 268 * For stray interrupts, mask and EOI the source, bump the 269 * stray count, and log the condition. 270 */ 271 if (intr_event_handle(ie, frame) != 0) { 272 isrc->is_pic->pic_disable_source(isrc, PIC_EOI); 273 (*isrc->is_straycount)++; 274 if (*isrc->is_straycount < MAX_STRAY_LOG) 275 log(LOG_ERR, "stray irq%d\n", vector); 276 else if (*isrc->is_straycount == MAX_STRAY_LOG) 277 log(LOG_CRIT, 278 "too many stray irq %d's: not logging anymore\n", 279 vector); 280 } 281} 282 283void 284intr_resume(bool suspend_cancelled) 285{ 286 struct pic *pic; 287 288#ifndef DEV_ATPIC 289 atpic_reset(); 290#endif 291 mtx_lock(&intrpic_lock); 292 TAILQ_FOREACH(pic, &pics, pics) { 293 if (pic->pic_resume != NULL) 294 pic->pic_resume(pic, suspend_cancelled); 295 } 296 mtx_unlock(&intrpic_lock); 297} 298 299void 300intr_suspend(void) 301{ 302 struct pic *pic; 303 304 mtx_lock(&intrpic_lock); 305 TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) { 306 if (pic->pic_suspend != NULL) 307 pic->pic_suspend(pic); 308 } 309 mtx_unlock(&intrpic_lock); 310} 311 312static int 313intr_assign_cpu(void *arg, int cpu) 314{ 315#ifdef SMP 316 struct intsrc *isrc; 317 int error; 318 319#ifdef EARLY_AP_STARTUP 320 MPASS(mp_ncpus == 1 || smp_started); 321 322 /* Nothing to do if there is only a single CPU. */ 323 if (mp_ncpus > 1 && cpu != NOCPU) { 324#else 325 /* 326 * Don't do anything during early boot. We will pick up the 327 * assignment once the APs are started. 328 */ 329 if (assign_cpu && cpu != NOCPU) { 330#endif 331 isrc = arg; 332 sx_xlock(&intrsrc_lock); 333 error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); 334 sx_xunlock(&intrsrc_lock); 335 } else 336 error = 0; 337 return (error); 338#else 339 return (EOPNOTSUPP); 340#endif 341} 342 343static void 344intrcnt_setname(const char *name, int index) 345{ 346 347 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", 348 MAXCOMLEN, name); 349} 350 351static void 352intrcnt_updatename(struct intsrc *is) 353{ 354 355 intrcnt_setname(is->is_event->ie_fullname, is->is_index); 356} 357 358static void 359intrcnt_register(struct intsrc *is) 360{ 361 char straystr[MAXCOMLEN + 1]; 362 363 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); 364 mtx_lock_spin(&intrcnt_lock); 365 is->is_index = intrcnt_index; 366 intrcnt_index += 2; 367 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", 368 is->is_pic->pic_vector(is)); 369 intrcnt_updatename(is); 370 is->is_count = &intrcnt[is->is_index]; 371 intrcnt_setname(straystr, is->is_index + 1); 372 is->is_straycount = &intrcnt[is->is_index + 1]; 373 mtx_unlock_spin(&intrcnt_lock); 374} 375 376void 377intrcnt_add(const char *name, u_long **countp) 378{ 379 380 mtx_lock_spin(&intrcnt_lock); 381 *countp = &intrcnt[intrcnt_index]; 382 intrcnt_setname(name, intrcnt_index); 383 intrcnt_index++; 384 mtx_unlock_spin(&intrcnt_lock); 385} 386 387static void 388intr_init(void *dummy __unused) 389{ 390 391 intrcnt_setname("???", 0); 392 intrcnt_index = 1; 393 TAILQ_INIT(&pics); 394 mtx_init(&intrpic_lock, "intrpic", NULL, MTX_DEF); 395 sx_init(&intrsrc_lock, "intrsrc"); 396 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); 397} 398SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); 399 400static void 401intr_init_final(void *dummy __unused) 402{ 403 404 /* 405 * Enable interrupts on the BSP after all of the interrupt 406 * controllers are initialized. Device interrupts are still 407 * disabled in the interrupt controllers until interrupt 408 * handlers are registered. Interrupts are enabled on each AP 409 * after their first context switch. 410 */ 411 enable_intr(); 412} 413SYSINIT(intr_init_final, SI_SUB_INTR, SI_ORDER_ANY, intr_init_final, NULL); 414 415#ifndef DEV_ATPIC 416/* Initialize the two 8259A's to a known-good shutdown state. */ 417void 418atpic_reset(void) 419{ 420 421 outb(IO_ICU1, ICW1_RESET | ICW1_IC4); 422 outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS); 423 outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID)); 424 outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE); 425 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); 426 outb(IO_ICU1, OCW3_SEL | OCW3_RR); 427 428 outb(IO_ICU2, ICW1_RESET | ICW1_IC4); 429 outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8); 430 outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID); 431 outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE); 432 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); 433 outb(IO_ICU2, OCW3_SEL | OCW3_RR); 434} 435#endif 436 437/* Add a description to an active interrupt handler. */ 438int 439intr_describe(u_int vector, void *ih, const char *descr) 440{ 441 struct intsrc *isrc; 442 int error; 443 444 isrc = intr_lookup_source(vector); 445 if (isrc == NULL) 446 return (EINVAL); 447 error = intr_event_describe_handler(isrc->is_event, ih, descr); 448 if (error) 449 return (error); 450 intrcnt_updatename(isrc); 451 return (0); 452} 453 454void 455intr_reprogram(void) 456{ 457 struct intsrc *is; 458 int v; 459 460 sx_xlock(&intrsrc_lock); 461 for (v = 0; v < NUM_IO_INTS; v++) { 462 is = interrupt_sources[v]; 463 if (is == NULL) 464 continue; 465 if (is->is_pic->pic_reprogram_pin != NULL) 466 is->is_pic->pic_reprogram_pin(is); 467 } 468 sx_xunlock(&intrsrc_lock); 469} 470 471#ifdef DDB 472/* 473 * Dump data about interrupt handlers 474 */ 475DB_SHOW_COMMAND(irqs, db_show_irqs) 476{ 477 struct intsrc **isrc; 478 int i, verbose; 479 480 if (strcmp(modif, "v") == 0) 481 verbose = 1; 482 else 483 verbose = 0; 484 isrc = interrupt_sources; 485 for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) 486 if (*isrc != NULL) 487 db_dump_intr_event((*isrc)->is_event, verbose); 488} 489#endif 490 491#ifdef SMP 492/* 493 * Support for balancing interrupt sources across CPUs. For now we just 494 * allocate CPUs round-robin. 495 */ 496 497cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1); 498static int current_cpu; 499 500/* 501 * Return the CPU that the next interrupt source should use. For now 502 * this just returns the next local APIC according to round-robin. 503 */ 504u_int 505intr_next_cpu(void) 506{ 507 u_int apic_id; 508 509#ifdef EARLY_AP_STARTUP 510 MPASS(mp_ncpus == 1 || smp_started); 511 if (mp_ncpus == 1) 512 return (PCPU_GET(apic_id)); 513#else 514 /* Leave all interrupts on the BSP during boot. */ 515 if (!assign_cpu) 516 return (PCPU_GET(apic_id)); 517#endif 518 519 mtx_lock_spin(&icu_lock); 520 apic_id = cpu_apic_ids[current_cpu]; 521 do { 522 current_cpu++; 523 if (current_cpu > mp_maxid) 524 current_cpu = 0; 525 } while (!CPU_ISSET(current_cpu, &intr_cpus)); 526 mtx_unlock_spin(&icu_lock); 527 return (apic_id); 528} 529 530/* Attempt to bind the specified IRQ to the specified CPU. */ 531int 532intr_bind(u_int vector, u_char cpu) 533{ 534 struct intsrc *isrc; 535 536 isrc = intr_lookup_source(vector); 537 if (isrc == NULL) 538 return (EINVAL); 539 return (intr_event_bind(isrc->is_event, cpu)); 540} 541 542/* 543 * Add a CPU to our mask of valid CPUs that can be destinations of 544 * interrupts. 545 */ 546void 547intr_add_cpu(u_int cpu) 548{ 549 550 if (cpu >= MAXCPU) 551 panic("%s: Invalid CPU ID", __func__); 552 if (bootverbose) 553 printf("INTR: Adding local APIC %d as a target\n", 554 cpu_apic_ids[cpu]); 555 556 CPU_SET(cpu, &intr_cpus); 557} 558 559#ifndef EARLY_AP_STARTUP 560/* 561 * Distribute all the interrupt sources among the available CPUs once the 562 * AP's have been launched. 563 */ 564static void 565intr_shuffle_irqs(void *arg __unused) 566{ 567 struct intsrc *isrc; 568 int i; 569 570 /* Don't bother on UP. */ 571 if (mp_ncpus == 1) 572 return; 573 574 /* Round-robin assign a CPU to each enabled source. */ 575 sx_xlock(&intrsrc_lock); 576 assign_cpu = 1; 577 for (i = 0; i < NUM_IO_INTS; i++) { 578 isrc = interrupt_sources[i]; 579 if (isrc != NULL && isrc->is_handlers > 0) { 580 /* 581 * If this event is already bound to a CPU, 582 * then assign the source to that CPU instead 583 * of picking one via round-robin. Note that 584 * this is careful to only advance the 585 * round-robin if the CPU assignment succeeds. 586 */ 587 if (isrc->is_event->ie_cpu != NOCPU) 588 (void)isrc->is_pic->pic_assign_cpu(isrc, 589 cpu_apic_ids[isrc->is_event->ie_cpu]); 590 else if (isrc->is_pic->pic_assign_cpu(isrc, 591 cpu_apic_ids[current_cpu]) == 0) 592 (void)intr_next_cpu(); 593 594 } 595 } 596 sx_xunlock(&intrsrc_lock); 597} 598SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, 599 NULL); 600#endif 601#else 602/* 603 * Always route interrupts to the current processor in the UP case. 604 */ 605u_int 606intr_next_cpu(void) 607{ 608 609 return (PCPU_GET(apic_id)); 610} 611#endif 612