pic.c revision 1.31
1/* $NetBSD: pic.c,v 1.31 2015/04/12 08:55:14 matt Exp $ */ 2/*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#define _INTR_PRIVATE 32#include "opt_ddb.h" 33#include "opt_multiprocessor.h" 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.31 2015/04/12 08:55:14 matt Exp $"); 37 38#include <sys/param.h> 39#include <sys/atomic.h> 40#include <sys/cpu.h> 41#include <sys/evcnt.h> 42#include <sys/intr.h> 43#include <sys/kernel.h> 44#include <sys/kmem.h> 45#include <sys/xcall.h> 46#include <sys/ipi.h> 47 48#if defined(__arm__) 49#include <arm/armreg.h> 50#include <arm/cpufunc.h> 51#elif defined(__aarch64__) 52#include <aarch64/locore.h> 53#define I32_bit DAIF_I 54#define F32_bit DAIF_F 55#endif 56 57#ifdef DDB 58#include <arm/db_machdep.h> 59#endif 60 61#include <arm/pic/picvar.h> 62 63#if defined(__HAVE_PIC_PENDING_INTRS) 64/* 65 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 66 * the assumption that a PIC (pic_softc) shall only have all its interrupts 67 * come from the same CPU. In other words, interrupts from a single PIC will 68 * not be distributed among multiple CPUs. 69 */ 70struct pic_pending { 71 volatile uint32_t blocked_pics; 72 volatile uint32_t pending_pics; 73 volatile uint32_t pending_ipls; 74}; 75static uint32_t 76 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 77static struct pic_softc * 78 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 79static void 80 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 81static void 82 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 83 84#ifdef MULTIPROCESSOR 85percpu_t *pic_pending_percpu; 86#else 87struct pic_pending pic_pending; 88#endif /* MULTIPROCESSOR */ 89#endif /* __HAVE_PIC_PENDING_INTRS */ 90 91struct pic_softc *pic_list[PIC_MAXPICS]; 92#if PIC_MAXPICS > 32 93#error PIC_MAXPICS > 32 not supported 94#endif 95struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 96struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 97struct intrsource **pic_iplsource[NIPL] = { 98 [0 ... NIPL-1] = pic__iplsources, 99}; 100size_t pic_ipl_offset[NIPL+1]; 101size_t pic_sourcebase; 102static struct evcnt pic_deferral_ev = 103 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 104EVCNT_ATTACH_STATIC(pic_deferral_ev); 105 106#ifdef __HAVE_PIC_SET_PRIORITY 107void 108pic_set_priority(struct cpu_info *ci, int newipl) 109{ 110 register_t psw = cpsid(I32_bit); 111 if (pic_list[0] != NULL) 112 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 113 ci->ci_cpl = newipl; 114 if ((psw & I32_bit) == 0) 115 cpsie(I32_bit); 116} 117#endif 118 119#ifdef MULTIPROCESSOR 120int 121pic_ipi_nop(void *arg) 122{ 123 /* do nothing */ 124 return 1; 125} 126 127int 128pic_ipi_xcall(void *arg) 129{ 130 xc_ipi_handler(); 131 return 1; 132} 133 134int 135pic_ipi_generic(void *arg) 136{ 137 ipi_cpu_handler(); 138 return 1; 139} 140 141#ifdef DDB 142int 143pic_ipi_ddb(void *arg) 144{ 145// printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 146 kdb_trap(-1, arg); 147 return 1; 148} 149#endif 150 151void 152intr_cpu_init(struct cpu_info *ci) 153{ 154 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 155 struct pic_softc * const pic = pic_list[slot]; 156 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 157 (*pic->pic_ops->pic_cpu_init)(pic, ci); 158 } 159 } 160} 161 162typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 163 164void 165intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 166{ 167 KASSERT(ipi < NIPI); 168 bool __diagused sent_p = false; 169 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 170 struct pic_softc * const pic = pic_list[slot]; 171 if (pic == NULL || pic->pic_cpus == NULL) 172 continue; 173 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) { 174 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 175 // If we were targeting a single CPU or this pic 176 // handles all cpus, we're done. 177 if (kcp != NULL || pic->pic_cpus == kcpuset_running) 178 return; 179 sent_p = true; 180 } 181 } 182 KASSERT(cold || sent_p); 183} 184#endif /* MULTIPROCESSOR */ 185 186#ifdef __HAVE_PIC_FAST_SOFTINTS 187int 188pic_handle_softint(void *arg) 189{ 190 void softint_switch(lwp_t *, int); 191 struct cpu_info * const ci = curcpu(); 192 const size_t softint = (size_t) arg; 193 int s = splhigh(); 194 ci->ci_intr_depth--; // don't count these as interrupts 195 softint_switch(ci->ci_softlwps[softint], s); 196 ci->ci_intr_depth++; 197 splx(s); 198 return 1; 199} 200#endif 201 202int 203pic_handle_intr(void *arg) 204{ 205 struct pic_softc * const pic = arg; 206 int rv; 207 208 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 209 210 return rv > 0; 211} 212 213#if defined(__HAVE_PIC_PENDING_INTRS) 214void 215pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 216{ 217 const uint32_t ipl_mask = __BIT(is->is_ipl); 218 219 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 220 __BIT(is->is_irq & 0x1f)); 221 222 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 223#ifdef MULTIPROCESSOR 224 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 225#else 226 struct pic_pending *pend = &pic_pending; 227#endif 228 atomic_or_32(&pend->pending_ipls, ipl_mask); 229 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 230#ifdef MULTIPROCESSOR 231 percpu_putref(pic_pending_percpu); 232#endif 233} 234 235void 236pic_mark_pending(struct pic_softc *pic, int irq) 237{ 238 struct intrsource * const is = pic->pic_sources[irq]; 239 240 KASSERT(irq < pic->pic_maxsources); 241 KASSERT(is != NULL); 242 243 pic_mark_pending_source(pic, is); 244} 245 246uint32_t 247pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 248 uint32_t pending) 249{ 250 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 251 struct intrsource *is; 252 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 253 uint32_t ipl_mask = 0; 254 255 if (pending == 0) 256 return ipl_mask; 257 258 KASSERT((irq_base & 31) == 0); 259 260 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 261 262 atomic_or_32(ipending, pending); 263 while (pending != 0) { 264 int n = ffs(pending); 265 if (n-- == 0) 266 break; 267 is = isbase[n]; 268 KASSERT(is != NULL); 269 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 270 pending &= ~__BIT(n); 271 ipl_mask |= __BIT(is->is_ipl); 272 } 273 274 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 275#ifdef MULTIPROCESSOR 276 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 277#else 278 struct pic_pending *pend = &pic_pending; 279#endif 280 atomic_or_32(&pend->pending_ipls, ipl_mask); 281 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 282#ifdef MULTIPROCESSOR 283 percpu_putref(pic_pending_percpu); 284#endif 285 return ipl_mask; 286} 287 288uint32_t 289pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 290 uint32_t pending, int ipl) 291{ 292 uint32_t ipl_irq_mask = 0; 293 uint32_t irq_mask; 294 295 for (;;) { 296 int irq = ffs(pending); 297 if (irq-- == 0) 298 return ipl_irq_mask; 299 300 irq_mask = __BIT(irq); 301#if 1 302 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 303 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 304#else 305 if (pic->pic_sources[irq_base + irq] == NULL) { 306 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 307 irq_base, irq); 308 } else 309#endif 310 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 311 ipl_irq_mask |= irq_mask; 312 313 pending &= ~irq_mask; 314 } 315} 316#endif /* __HAVE_PIC_PENDING_INTRS */ 317 318void 319pic_dispatch(struct intrsource *is, void *frame) 320{ 321 int (*func)(void *) = is->is_func; 322 void *arg = is->is_arg; 323 324 if (__predict_false(arg == NULL)) { 325 if (__predict_false(frame == NULL)) { 326 pic_deferral_ev.ev_count++; 327 return; 328 } 329 arg = frame; 330 } 331 332#ifdef MULTIPROCESSOR 333 if (!is->is_mpsafe) { 334 KERNEL_LOCK(1, NULL); 335 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 336 const u_int l_blcnt __diagused = curlwp->l_blcnt; 337 (void)(*func)(arg); 338 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 339 KASSERT(l_blcnt == curlwp->l_blcnt); 340 KERNEL_UNLOCK_ONE(NULL); 341 } else 342#endif 343 (void)(*func)(arg); 344 345 346 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 347 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 348 pcpu->pcpu_evs[is->is_irq].ev_count++; 349 percpu_putref(is->is_pic->pic_percpu); 350} 351 352#if defined(__HAVE_PIC_PENDING_INTRS) 353void 354pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 355 void *frame) 356{ 357 const uint32_t ipl_mask = __BIT(ipl); 358 struct intrsource *is; 359 volatile uint32_t *ipending = pic->pic_pending_irqs; 360 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 361 size_t irq_base; 362#if PIC_MAXSOURCES > 32 363 size_t irq_count; 364 int poi = 0; /* Possibility of interrupting */ 365#endif 366 uint32_t pending_irqs; 367 uint32_t blocked_irqs; 368 int irq; 369 bool progress __diagused = false; 370 371 KASSERT(pic->pic_pending_ipls & ipl_mask); 372 373 irq_base = 0; 374#if PIC_MAXSOURCES > 32 375 irq_count = 0; 376#endif 377 378 for (;;) { 379 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 380 *ipending, ipl); 381 KASSERT((pending_irqs & *ipending) == pending_irqs); 382 KASSERT((pending_irqs & ~(*ipending)) == 0); 383 if (pending_irqs == 0) { 384#if PIC_MAXSOURCES > 32 385 irq_count += 32; 386 if (__predict_true(irq_count >= pic->pic_maxsources)) { 387 if (!poi) 388 /*Interrupt at this level was handled.*/ 389 break; 390 irq_base = 0; 391 irq_count = 0; 392 poi = 0; 393 ipending = pic->pic_pending_irqs; 394 iblocked = pic->pic_blocked_irqs; 395 } else { 396 irq_base += 32; 397 ipending++; 398 iblocked++; 399 KASSERT(irq_base <= pic->pic_maxsources); 400 } 401 continue; 402#else 403 break; 404#endif 405 } 406 progress = true; 407 blocked_irqs = 0; 408 do { 409 irq = ffs(pending_irqs) - 1; 410 KASSERT(irq >= 0); 411 412 atomic_and_32(ipending, ~__BIT(irq)); 413 is = pic->pic_sources[irq_base + irq]; 414 if (is != NULL) { 415 cpsie(I32_bit); 416 pic_dispatch(is, frame); 417 cpsid(I32_bit); 418#if PIC_MAXSOURCES > 32 419 /* 420 * There is a possibility of interrupting 421 * from cpsie() to cpsid(). 422 */ 423 poi = 1; 424#endif 425 blocked_irqs |= __BIT(irq); 426 } else { 427 KASSERT(0); 428 } 429 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 430 irq_base, *ipending, ipl); 431 } while (pending_irqs); 432 if (blocked_irqs) { 433 atomic_or_32(iblocked, blocked_irqs); 434 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 435 } 436 } 437 438 KASSERT(progress); 439 /* 440 * Since interrupts are disabled, we don't have to be too careful 441 * about these. 442 */ 443 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 444 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 445} 446 447static void 448pic_list_unblock_irqs(struct pic_pending *pend) 449{ 450 uint32_t blocked_pics = pend->blocked_pics; 451 452 pend->blocked_pics = 0; 453 454 for (;;) { 455 struct pic_softc *pic; 456#if PIC_MAXSOURCES > 32 457 volatile uint32_t *iblocked; 458 uint32_t blocked; 459 size_t irq_base; 460#endif 461 462 int pic_id = ffs(blocked_pics); 463 if (pic_id-- == 0) 464 return; 465 466 pic = pic_list[pic_id]; 467 KASSERT(pic != NULL); 468#if PIC_MAXSOURCES > 32 469 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 470 irq_base < pic->pic_maxsources; 471 irq_base += 32, iblocked++) { 472 if ((blocked = *iblocked) != 0) { 473 (*pic->pic_ops->pic_unblock_irqs)(pic, 474 irq_base, blocked); 475 atomic_and_32(iblocked, ~blocked); 476 } 477 } 478#else 479 KASSERT(pic->pic_blocked_irqs[0] != 0); 480 (*pic->pic_ops->pic_unblock_irqs)(pic, 481 0, pic->pic_blocked_irqs[0]); 482 pic->pic_blocked_irqs[0] = 0; 483#endif 484 blocked_pics &= ~__BIT(pic_id); 485 } 486} 487 488 489struct pic_softc * 490pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 491{ 492 uint32_t pending_pics = pend->pending_pics; 493 struct pic_softc *pic; 494 495 for (;;) { 496 int pic_id = ffs(pending_pics); 497 if (pic_id-- == 0) 498 return NULL; 499 500 pic = pic_list[pic_id]; 501 KASSERT(pic != NULL); 502 if (pic->pic_pending_ipls & ipl_mask) 503 return pic; 504 pending_pics &= ~__BIT(pic_id); 505 } 506} 507 508void 509pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 510 void *frame) 511{ 512 const uint32_t ipl_mask = __BIT(ipl); 513 struct pic_softc *pic; 514 515 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 516 pic_deliver_irqs(pend, pic, ipl, frame); 517 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 518 } 519 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 520} 521#endif /* __HAVE_PIC_PENDING_INTRS */ 522 523void 524pic_do_pending_ints(register_t psw, int newipl, void *frame) 525{ 526 struct cpu_info * const ci = curcpu(); 527 if (__predict_false(newipl == IPL_HIGH)) { 528 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 529 return; 530 } 531#if defined(__HAVE_PIC_PENDING_INTRS) 532#ifdef MULTIPROCESSOR 533 struct pic_pending *pend = percpu_getref(pic_pending_percpu); 534#else 535 struct pic_pending *pend = &pic_pending; 536#endif 537 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 538 KASSERT(pend->pending_ipls < __BIT(NIPL)); 539 for (;;) { 540 int ipl = 31 - __builtin_clz(pend->pending_ipls); 541 KASSERT(ipl < NIPL); 542 if (ipl <= newipl) 543 break; 544 545 pic_set_priority(ci, ipl); 546 pic_list_deliver_irqs(pend, psw, ipl, frame); 547 pic_list_unblock_irqs(pend); 548 } 549 } 550#ifdef MULTIPROCESSOR 551 percpu_putref(pic_pending_percpu); 552#endif 553#endif /* __HAVE_PIC_PENDING_INTRS */ 554#ifdef __HAVE_PREEEMPTION 555 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 556 pic_set_priority(ci, IPL_SCHED); 557 kpreempt(0); 558 } 559#endif 560 if (ci->ci_cpl != newipl) 561 pic_set_priority(ci, newipl); 562} 563 564static void 565pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 566{ 567 struct pic_percpu * const pcpu = v0; 568 struct pic_softc * const pic = v1; 569 570 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 571 KM_SLEEP); 572 KASSERT(pcpu->pcpu_evs != NULL); 573 574#define PCPU_NAMELEN 32 575#ifdef DIAGNOSTIC 576 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 577#endif 578 579 KASSERT(namelen < PCPU_NAMELEN); 580 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 581#ifdef MULTIPROCESSOR 582 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 583 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 584#else 585 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 586#endif 587 pcpu->pcpu_magic = PICPERCPU_MAGIC; 588#if 0 589 printf("%s: %s %s: <%s>\n", 590 __func__, ci->ci_data.cpu_name, pic->pic_name, 591 pcpu->pcpu_name); 592#endif 593} 594 595#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 596static void 597pic_pending_zero(void *v0, void *v1, struct cpu_info *ci) 598{ 599 struct pic_pending * const p = v0; 600 memset(p, 0, sizeof(*p)); 601} 602#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 603 604void 605pic_add(struct pic_softc *pic, int irqbase) 606{ 607 int slot, maybe_slot = -1; 608 609 KASSERT(strlen(pic->pic_name) > 0); 610 611#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 612 if (__predict_false(pic_pending_percpu == NULL)) { 613 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 614 KASSERT(pic_pending_percpu != NULL); 615 616 /* 617 * Now zero the per-cpu pending data. 618 */ 619 percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL); 620 } 621#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 622 623 for (slot = 0; slot < PIC_MAXPICS; slot++) { 624 struct pic_softc * const xpic = pic_list[slot]; 625 if (xpic == NULL) { 626 if (maybe_slot < 0) 627 maybe_slot = slot; 628 if (irqbase < 0) 629 break; 630 continue; 631 } 632 if (irqbase < 0 || xpic->pic_irqbase < 0) 633 continue; 634 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 635 continue; 636 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 637 continue; 638 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 639 " with pic %s (%zu sources @ irq %u)", 640 pic->pic_name, pic->pic_maxsources, irqbase, 641 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 642 } 643 slot = maybe_slot; 644#if 0 645 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 646 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 647#endif 648 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 649 pic->pic_maxsources); 650 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 651 652 /* 653 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 654 * allocate its evcnts and then attach an evcnt for each pin. 655 * We can't allocate the evcnt structures directly since 656 * percpu will move the contents of percpu memory around and 657 * corrupt the pointers in the evcnts themselves. Remember, any 658 * problem can be solved with sufficient indirection. 659 */ 660 pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); 661 KASSERT(pic->pic_percpu != NULL); 662 663 /* 664 * Now allocate the per-cpu evcnts. 665 */ 666 percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); 667 668 pic->pic_sources = &pic_sources[pic_sourcebase]; 669 pic->pic_irqbase = irqbase; 670 pic_sourcebase += pic->pic_maxsources; 671 pic->pic_id = slot; 672#ifdef __HAVE_PIC_SET_PRIORITY 673 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 674#endif 675#ifdef MULTIPROCESSOR 676 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL)); 677#endif 678 pic_list[slot] = pic; 679} 680 681int 682pic_alloc_irq(struct pic_softc *pic) 683{ 684 int irq; 685 686 for (irq = 0; irq < pic->pic_maxsources; irq++) { 687 if (pic->pic_sources[irq] == NULL) 688 return irq; 689 } 690 691 return -1; 692} 693 694static void 695pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 696{ 697 struct pic_percpu * const pcpu = v0; 698 struct intrsource * const is = v1; 699 700 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 701 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 702 pcpu->pcpu_name, is->is_source); 703} 704 705void * 706pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 707 int (*func)(void *), void *arg) 708{ 709 struct intrsource *is; 710 int off, nipl; 711 712 if (pic->pic_sources[irq]) { 713 printf("pic_establish_intr: pic %s irq %d already present\n", 714 pic->pic_name, irq); 715 return NULL; 716 } 717 718 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 719 if (is == NULL) 720 return NULL; 721 722 is->is_pic = pic; 723 is->is_irq = irq; 724 is->is_ipl = ipl; 725 is->is_type = type & 0xff; 726 is->is_func = func; 727 is->is_arg = arg; 728#ifdef MULTIPROCESSOR 729 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM; 730#endif 731 732 if (pic->pic_ops->pic_source_name) 733 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 734 sizeof(is->is_source)); 735 else 736 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 737 738 /* 739 * Now attach the per-cpu evcnts. 740 */ 741 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 742 743 pic->pic_sources[irq] = is; 744 745 /* 746 * First try to use an existing slot which is empty. 747 */ 748 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 749 if (pic__iplsources[off] == NULL) { 750 is->is_iplidx = off - pic_ipl_offset[ipl]; 751 pic__iplsources[off] = is; 752 return is; 753 } 754 } 755 756 /* 757 * Move up all the sources by one. 758 */ 759 if (ipl < NIPL) { 760 off = pic_ipl_offset[ipl+1]; 761 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 762 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 763 } 764 765 /* 766 * Advance the offset of all IPLs higher than this. Include an 767 * extra one as well. Thus the number of sources per ipl is 768 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 769 */ 770 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 771 pic_ipl_offset[nipl]++; 772 773 /* 774 * Insert into the previously made position at the end of this IPL's 775 * sources. 776 */ 777 off = pic_ipl_offset[ipl + 1] - 1; 778 is->is_iplidx = off - pic_ipl_offset[ipl]; 779 pic__iplsources[off] = is; 780 781 (*pic->pic_ops->pic_establish_irq)(pic, is); 782 783 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 784 __BIT(is->is_irq & 0x1f)); 785 786 /* We're done. */ 787 return is; 788} 789 790static void 791pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 792{ 793 struct pic_percpu * const pcpu = v0; 794 struct intrsource * const is = v1; 795 796 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 797 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 798} 799 800void 801pic_disestablish_source(struct intrsource *is) 802{ 803 struct pic_softc * const pic = is->is_pic; 804 const int irq = is->is_irq; 805 806 KASSERT(is == pic->pic_sources[irq]); 807 808 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 809 pic->pic_sources[irq] = NULL; 810 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 811 /* 812 * Now detach the per-cpu evcnts. 813 */ 814 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 815 816 kmem_free(is, sizeof(*is)); 817} 818 819void * 820intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 821{ 822 KASSERT(!cpu_intr_p()); 823 KASSERT(!cpu_softintr_p()); 824 825 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 826 struct pic_softc * const pic = pic_list[slot]; 827 if (pic == NULL || pic->pic_irqbase < 0) 828 continue; 829 if (pic->pic_irqbase <= irq 830 && irq < pic->pic_irqbase + pic->pic_maxsources) { 831 return pic_establish_intr(pic, irq - pic->pic_irqbase, 832 ipl, type, func, arg); 833 } 834 } 835 836 return NULL; 837} 838 839void 840intr_disestablish(void *ih) 841{ 842 struct intrsource * const is = ih; 843 844 KASSERT(!cpu_intr_p()); 845 KASSERT(!cpu_softintr_p()); 846 847 pic_disestablish_source(is); 848} 849