pic.c revision 1.60
1/* $NetBSD: pic.c,v 1.60 2020/10/26 07:16:41 skrll Exp $ */ 2/*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#define _INTR_PRIVATE 32#include "opt_ddb.h" 33#include "opt_multiprocessor.h" 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.60 2020/10/26 07:16:41 skrll Exp $"); 37 38#include <sys/param.h> 39#include <sys/atomic.h> 40#include <sys/cpu.h> 41#include <sys/evcnt.h> 42#include <sys/interrupt.h> 43#include <sys/intr.h> 44#include <sys/ipi.h> 45#include <sys/kernel.h> 46#include <sys/kmem.h> 47#include <sys/mutex.h> 48#include <sys/once.h> 49#include <sys/xcall.h> 50 51#include <arm/armreg.h> 52#include <arm/cpufunc.h> 53#include <arm/locore.h> /* for compat aarch64 */ 54 55#ifdef DDB 56#include <arm/db_machdep.h> 57#endif 58 59#include <arm/pic/picvar.h> 60 61#if defined(__HAVE_PIC_PENDING_INTRS) 62/* 63 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 64 * the assumption that a PIC (pic_softc) shall only have all its interrupts 65 * come from the same CPU. In other words, interrupts from a single PIC will 66 * not be distributed among multiple CPUs. 67 */ 68struct pic_pending { 69 volatile uint32_t blocked_pics; 70 volatile uint32_t pending_pics; 71 volatile uint32_t pending_ipls; 72}; 73static uint32_t 74 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 75static struct pic_softc * 76 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 77static void 78 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 79static void 80 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 81 82#ifdef MULTIPROCESSOR 83percpu_t *pic_pending_percpu; 84static struct pic_pending * 85pic_pending_get(void) 86{ 87 return percpu_getref(pic_pending_percpu); 88} 89static void 90pic_pending_put(struct pic_pending *pend) 91{ 92 percpu_putref(pic_pending_percpu); 93} 94#else 95struct pic_pending pic_pending; 96#define pic_pending_get() (&pic_pending) 97#define pic_pending_put(pend) __nothing 98#endif /* MULTIPROCESSOR */ 99#endif /* __HAVE_PIC_PENDING_INTRS */ 100 101struct pic_softc *pic_list[PIC_MAXPICS]; 102#if PIC_MAXPICS > 32 103#error PIC_MAXPICS > 32 not supported 104#endif 105struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 106struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 107struct intrsource **pic_iplsource[NIPL] = { 108 [0 ... NIPL-1] = pic__iplsources, 109}; 110size_t pic_ipl_offset[NIPL+1]; 111 112static kmutex_t pic_lock; 113static size_t pic_sourcebase; 114static int pic_lastbase; 115static struct evcnt pic_deferral_ev = 116 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 117EVCNT_ATTACH_STATIC(pic_deferral_ev); 118 119static int pic_init(void); 120 121#ifdef __HAVE_PIC_SET_PRIORITY 122void 123pic_set_priority(struct cpu_info *ci, int newipl) 124{ 125 register_t psw = cpsid(I32_bit); 126 if (pic_list[0] != NULL) 127 (pic_list[0]->pic_ops->pic_set_priority)(pic_list[0], newipl); 128 ci->ci_cpl = newipl; 129 if ((psw & I32_bit) == 0) 130 cpsie(I32_bit); 131} 132#endif 133 134#ifdef MULTIPROCESSOR 135int 136pic_ipi_ast(void *arg) 137{ 138 setsoftast(curcpu()); 139 return 1; 140} 141 142int 143pic_ipi_nop(void *arg) 144{ 145 /* do nothing */ 146 return 1; 147} 148 149int 150pic_ipi_xcall(void *arg) 151{ 152 xc_ipi_handler(); 153 return 1; 154} 155 156int 157pic_ipi_generic(void *arg) 158{ 159 ipi_cpu_handler(); 160 return 1; 161} 162 163#ifdef DDB 164int 165pic_ipi_ddb(void *arg) 166{ 167// printf("%s: %s: tf=%p\n", __func__, curcpu()->ci_cpuname, arg); 168 kdb_trap(-1, arg); 169 return 1; 170} 171#endif /* DDB */ 172 173#ifdef __HAVE_PREEMPTION 174int 175pic_ipi_kpreempt(void *arg) 176{ 177 atomic_or_uint(&curcpu()->ci_astpending, __BIT(1)); 178 return 1; 179} 180#endif /* __HAVE_PREEMPTION */ 181 182void 183intr_cpu_init(struct cpu_info *ci) 184{ 185 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 186 struct pic_softc * const pic = pic_list[slot]; 187 if (pic != NULL && pic->pic_ops->pic_cpu_init != NULL) { 188 (*pic->pic_ops->pic_cpu_init)(pic, ci); 189 } 190 } 191} 192 193typedef void (*pic_ipi_send_func_t)(struct pic_softc *, u_long); 194 195void 196intr_ipi_send(const kcpuset_t *kcp, u_long ipi) 197{ 198 struct cpu_info * const ci = curcpu(); 199 KASSERT(ipi < NIPI); 200 bool __diagused sent_p = false; 201 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 202 struct pic_softc * const pic = pic_list[slot]; 203 if (pic == NULL || pic->pic_cpus == NULL) 204 continue; 205 if (kcp == NULL || kcpuset_intersecting_p(kcp, pic->pic_cpus)) { 206 /* 207 * Never send to ourself. 208 * 209 * This test uses pointer comparison for systems 210 * that have a pic per cpu, e.g. RPI[23]. GIC sets 211 * pic_cpus to kcpuset_running and handles "not for 212 * self" internally. 213 */ 214 if (pic->pic_cpus == ci->ci_kcpuset) 215 continue; 216 217 (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi); 218 219 /* 220 * If we were targeting a single CPU or this pic 221 * handles all cpus, we're done. 222 */ 223 if (kcp != NULL || pic->pic_cpus == kcpuset_running) 224 return; 225 sent_p = true; 226 } 227 } 228 KASSERTMSG(cold || sent_p || ncpu <= 1, "cold %d sent_p %d ncpu %d", 229 cold, sent_p, ncpu); 230} 231#endif /* MULTIPROCESSOR */ 232 233#ifdef __HAVE_PIC_FAST_SOFTINTS 234int 235pic_handle_softint(void *arg) 236{ 237 void softint_switch(lwp_t *, int); 238 struct cpu_info * const ci = curcpu(); 239 const size_t softint = (size_t) arg; 240 int s = splhigh(); 241 ci->ci_intr_depth--; // don't count these as interrupts 242 softint_switch(ci->ci_softlwps[softint], s); 243 ci->ci_intr_depth++; 244 splx(s); 245 return 1; 246} 247#endif 248 249int 250pic_handle_intr(void *arg) 251{ 252 struct pic_softc * const pic = arg; 253 int rv; 254 255 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 256 257 return rv > 0; 258} 259 260#if defined(__HAVE_PIC_PENDING_INTRS) 261void 262pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 263{ 264 const uint32_t ipl_mask = __BIT(is->is_ipl); 265 266 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 267 __BIT(is->is_irq & 0x1f)); 268 269 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 270 struct pic_pending *pend = pic_pending_get(); 271 atomic_or_32(&pend->pending_ipls, ipl_mask); 272 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 273 pic_pending_put(pend); 274} 275 276void 277pic_mark_pending(struct pic_softc *pic, int irq) 278{ 279 struct intrsource * const is = pic->pic_sources[irq]; 280 281 KASSERT(irq < pic->pic_maxsources); 282 KASSERT(is != NULL); 283 284 pic_mark_pending_source(pic, is); 285} 286 287uint32_t 288pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 289 uint32_t pending) 290{ 291 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 292 struct intrsource *is; 293 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 294 uint32_t ipl_mask = 0; 295 296 if (pending == 0) 297 return ipl_mask; 298 299 KASSERT((irq_base & 31) == 0); 300 301 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 302 303 atomic_or_32(ipending, pending); 304 while (pending != 0) { 305 int n = ffs(pending); 306 if (n-- == 0) 307 break; 308 is = isbase[n]; 309 KASSERT(is != NULL); 310 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 311 pending &= ~__BIT(n); 312 ipl_mask |= __BIT(is->is_ipl); 313 } 314 315 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 316 struct pic_pending *pend = pic_pending_get(); 317 atomic_or_32(&pend->pending_ipls, ipl_mask); 318 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 319 pic_pending_put(pend); 320 return ipl_mask; 321} 322 323uint32_t 324pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 325 uint32_t pending, int ipl) 326{ 327 uint32_t ipl_irq_mask = 0; 328 uint32_t irq_mask; 329 330 for (;;) { 331 int irq = ffs(pending); 332 if (irq-- == 0) 333 return ipl_irq_mask; 334 335 irq_mask = __BIT(irq); 336#if 1 337 KASSERTMSG(pic->pic_sources[irq_base + irq] != NULL, 338 "%s: irq_base %zu irq %d\n", __func__, irq_base, irq); 339#else 340 if (pic->pic_sources[irq_base + irq] == NULL) { 341 aprint_error("stray interrupt? irq_base=%zu irq=%d\n", 342 irq_base, irq); 343 } else 344#endif 345 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 346 ipl_irq_mask |= irq_mask; 347 348 pending &= ~irq_mask; 349 } 350} 351#endif /* __HAVE_PIC_PENDING_INTRS */ 352 353void 354pic_dispatch(struct intrsource *is, void *frame) 355{ 356 int (*func)(void *) = is->is_func; 357 void *arg = is->is_arg; 358 359 if (__predict_false(arg == NULL)) { 360 if (__predict_false(frame == NULL)) { 361 pic_deferral_ev.ev_count++; 362 return; 363 } 364 arg = frame; 365 } 366 367#ifdef MULTIPROCESSOR 368 if (!is->is_mpsafe) { 369 KERNEL_LOCK(1, NULL); 370 const u_int ci_blcnt __diagused = curcpu()->ci_biglock_count; 371 const u_int l_blcnt __diagused = curlwp->l_blcnt; 372 (void)(*func)(arg); 373 KASSERT(ci_blcnt == curcpu()->ci_biglock_count); 374 KASSERT(l_blcnt == curlwp->l_blcnt); 375 KERNEL_UNLOCK_ONE(NULL); 376 } else 377#endif 378 (void)(*func)(arg); 379 380 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 381 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 382 pcpu->pcpu_evs[is->is_irq].ev_count++; 383 percpu_putref(is->is_pic->pic_percpu); 384} 385 386#if defined(__HAVE_PIC_PENDING_INTRS) 387void 388pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 389 void *frame) 390{ 391 const uint32_t ipl_mask = __BIT(ipl); 392 struct intrsource *is; 393 volatile uint32_t *ipending = pic->pic_pending_irqs; 394 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 395 size_t irq_base; 396#if PIC_MAXSOURCES > 32 397 size_t irq_count; 398 int poi = 0; /* Possibility of interrupting */ 399#endif 400 uint32_t pending_irqs; 401 uint32_t blocked_irqs; 402 int irq; 403 bool progress __diagused = false; 404 405 KASSERT(pic->pic_pending_ipls & ipl_mask); 406 407 irq_base = 0; 408#if PIC_MAXSOURCES > 32 409 irq_count = 0; 410#endif 411 412 for (;;) { 413 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 414 *ipending, ipl); 415 KASSERT((pending_irqs & *ipending) == pending_irqs); 416 KASSERT((pending_irqs & ~(*ipending)) == 0); 417 if (pending_irqs == 0) { 418#if PIC_MAXSOURCES > 32 419 irq_count += 32; 420 if (__predict_true(irq_count >= pic->pic_maxsources)) { 421 if (!poi) 422 /*Interrupt at this level was handled.*/ 423 break; 424 irq_base = 0; 425 irq_count = 0; 426 poi = 0; 427 ipending = pic->pic_pending_irqs; 428 iblocked = pic->pic_blocked_irqs; 429 } else { 430 irq_base += 32; 431 ipending++; 432 iblocked++; 433 KASSERT(irq_base <= pic->pic_maxsources); 434 } 435 continue; 436#else 437 break; 438#endif 439 } 440 progress = true; 441 blocked_irqs = 0; 442 do { 443 irq = ffs(pending_irqs) - 1; 444 KASSERT(irq >= 0); 445 446 atomic_and_32(ipending, ~__BIT(irq)); 447 is = pic->pic_sources[irq_base + irq]; 448 if (is != NULL) { 449 cpsie(I32_bit); 450 pic_dispatch(is, frame); 451 cpsid(I32_bit); 452#if PIC_MAXSOURCES > 32 453 /* 454 * There is a possibility of interrupting 455 * from cpsie() to cpsid(). 456 */ 457 poi = 1; 458#endif 459 blocked_irqs |= __BIT(irq); 460 } else { 461 KASSERT(0); 462 } 463 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 464 irq_base, *ipending, ipl); 465 } while (pending_irqs); 466 if (blocked_irqs) { 467 atomic_or_32(iblocked, blocked_irqs); 468 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 469 } 470 } 471 472 KASSERT(progress); 473 /* 474 * Since interrupts are disabled, we don't have to be too careful 475 * about these. 476 */ 477 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 478 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 479} 480 481static void 482pic_list_unblock_irqs(struct pic_pending *pend) 483{ 484 uint32_t blocked_pics = pend->blocked_pics; 485 486 pend->blocked_pics = 0; 487 488 for (;;) { 489 struct pic_softc *pic; 490#if PIC_MAXSOURCES > 32 491 volatile uint32_t *iblocked; 492 uint32_t blocked; 493 size_t irq_base; 494#endif 495 496 int pic_id = ffs(blocked_pics); 497 if (pic_id-- == 0) 498 return; 499 500 pic = pic_list[pic_id]; 501 KASSERT(pic != NULL); 502#if PIC_MAXSOURCES > 32 503 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 504 irq_base < pic->pic_maxsources; 505 irq_base += 32, iblocked++) { 506 if ((blocked = *iblocked) != 0) { 507 (*pic->pic_ops->pic_unblock_irqs)(pic, 508 irq_base, blocked); 509 atomic_and_32(iblocked, ~blocked); 510 } 511 } 512#else 513 KASSERT(pic->pic_blocked_irqs[0] != 0); 514 (*pic->pic_ops->pic_unblock_irqs)(pic, 515 0, pic->pic_blocked_irqs[0]); 516 pic->pic_blocked_irqs[0] = 0; 517#endif 518 blocked_pics &= ~__BIT(pic_id); 519 } 520} 521 522struct pic_softc * 523pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 524{ 525 uint32_t pending_pics = pend->pending_pics; 526 struct pic_softc *pic; 527 528 for (;;) { 529 int pic_id = ffs(pending_pics); 530 if (pic_id-- == 0) 531 return NULL; 532 533 pic = pic_list[pic_id]; 534 KASSERT(pic != NULL); 535 if (pic->pic_pending_ipls & ipl_mask) 536 return pic; 537 pending_pics &= ~__BIT(pic_id); 538 } 539} 540 541void 542pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 543 void *frame) 544{ 545 const uint32_t ipl_mask = __BIT(ipl); 546 struct pic_softc *pic; 547 548 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 549 pic_deliver_irqs(pend, pic, ipl, frame); 550 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 551 } 552 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 553} 554#endif /* __HAVE_PIC_PENDING_INTRS */ 555 556void 557pic_do_pending_ints(register_t psw, int newipl, void *frame) 558{ 559 struct cpu_info * const ci = curcpu(); 560 if (__predict_false(newipl == IPL_HIGH)) { 561 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 562 return; 563 } 564#if defined(__HAVE_PIC_PENDING_INTRS) 565 struct pic_pending *pend = pic_pending_get(); 566 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 567 KASSERT(pend->pending_ipls < __BIT(NIPL)); 568 for (;;) { 569 int ipl = 31 - __builtin_clz(pend->pending_ipls); 570 KASSERT(ipl < NIPL); 571 if (ipl <= newipl) 572 break; 573 574 pic_set_priority(ci, ipl); 575 pic_list_deliver_irqs(pend, psw, ipl, frame); 576 pic_list_unblock_irqs(pend); 577 } 578 } 579 pic_pending_put(pend); 580#endif /* __HAVE_PIC_PENDING_INTRS */ 581#ifdef __HAVE_PREEMPTION 582 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 583 pic_set_priority(ci, IPL_SCHED); 584 kpreempt(0); 585 } 586#endif 587 if (ci->ci_cpl != newipl) 588 pic_set_priority(ci, newipl); 589} 590 591static void 592pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 593{ 594 struct pic_percpu * const pcpu = v0; 595 struct pic_softc * const pic = v1; 596 597 pcpu->pcpu_evs = kmem_zalloc(pic->pic_maxsources * sizeof(pcpu->pcpu_evs[0]), 598 KM_SLEEP); 599 KASSERT(pcpu->pcpu_evs != NULL); 600 601#define PCPU_NAMELEN 32 602#ifdef DIAGNOSTIC 603 const size_t namelen = strlen(pic->pic_name) + 4 + strlen(ci->ci_data.cpu_name); 604#endif 605 606 KASSERT(namelen < PCPU_NAMELEN); 607 pcpu->pcpu_name = kmem_alloc(PCPU_NAMELEN, KM_SLEEP); 608#ifdef MULTIPROCESSOR 609 snprintf(pcpu->pcpu_name, PCPU_NAMELEN, 610 "%s (%s)", pic->pic_name, ci->ci_data.cpu_name); 611#else 612 strlcpy(pcpu->pcpu_name, pic->pic_name, PCPU_NAMELEN); 613#endif 614 pcpu->pcpu_magic = PICPERCPU_MAGIC; 615#if 0 616 printf("%s: %s %s: <%s>\n", 617 __func__, ci->ci_data.cpu_name, pic->pic_name, 618 pcpu->pcpu_name); 619#endif 620} 621 622static int 623pic_init(void) 624{ 625 626 mutex_init(&pic_lock, MUTEX_DEFAULT, IPL_HIGH); 627 628 return 0; 629} 630 631int 632pic_add(struct pic_softc *pic, int irqbase) 633{ 634 int slot, maybe_slot = -1; 635 size_t sourcebase; 636 static ONCE_DECL(pic_once); 637 638 RUN_ONCE(&pic_once, pic_init); 639 640 KASSERT(strlen(pic->pic_name) > 0); 641 642#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 643 if (__predict_false(pic_pending_percpu == NULL)) 644 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 645#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 646 647 mutex_enter(&pic_lock); 648 if (irqbase == PIC_IRQBASE_ALLOC) { 649 irqbase = pic_lastbase; 650 } 651 for (slot = 0; slot < PIC_MAXPICS; slot++) { 652 struct pic_softc * const xpic = pic_list[slot]; 653 if (xpic == NULL) { 654 if (maybe_slot < 0) 655 maybe_slot = slot; 656 if (irqbase < 0) 657 break; 658 continue; 659 } 660 if (irqbase < 0 || xpic->pic_irqbase < 0) 661 continue; 662 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 663 continue; 664 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 665 continue; 666 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 667 " with pic %s (%zu sources @ irq %u)", 668 pic->pic_name, pic->pic_maxsources, irqbase, 669 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 670 } 671 slot = maybe_slot; 672#if 0 673 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 674 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 675#endif 676 KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", 677 pic->pic_maxsources); 678 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 679 sourcebase = pic_sourcebase; 680 pic_sourcebase += pic->pic_maxsources; 681 if (pic_lastbase < irqbase + pic->pic_maxsources) 682 pic_lastbase = irqbase + pic->pic_maxsources; 683 mutex_exit(&pic_lock); 684 685 /* 686 * Allocate a pointer to each cpu's evcnts and then, for each cpu, 687 * allocate its evcnts and then attach an evcnt for each pin. 688 * We can't allocate the evcnt structures directly since 689 * percpu will move the contents of percpu memory around and 690 * corrupt the pointers in the evcnts themselves. Remember, any 691 * problem can be solved with sufficient indirection. 692 */ 693 pic->pic_percpu = percpu_create(sizeof(struct pic_percpu), 694 pic_percpu_allocate, NULL, pic); 695 696 pic->pic_sources = &pic_sources[sourcebase]; 697 pic->pic_irqbase = irqbase; 698 pic->pic_id = slot; 699#ifdef __HAVE_PIC_SET_PRIORITY 700 KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); 701#endif 702#ifdef MULTIPROCESSOR 703 KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL)); 704#endif 705 pic_list[slot] = pic; 706 707 return irqbase; 708} 709 710int 711pic_alloc_irq(struct pic_softc *pic) 712{ 713 int irq; 714 715 for (irq = 0; irq < pic->pic_maxsources; irq++) { 716 if (pic->pic_sources[irq] == NULL) 717 return irq; 718 } 719 720 return -1; 721} 722 723static void 724pic_percpu_evcnt_attach(void *v0, void *v1, struct cpu_info *ci) 725{ 726 struct pic_percpu * const pcpu = v0; 727 struct intrsource * const is = v1; 728 729 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 730 evcnt_attach_dynamic(&pcpu->pcpu_evs[is->is_irq], EVCNT_TYPE_INTR, NULL, 731 pcpu->pcpu_name, is->is_source); 732} 733 734void * 735pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 736 int (*func)(void *), void *arg, const char *xname) 737{ 738 struct intrsource *is; 739 int off, nipl; 740 741 if (pic->pic_sources[irq]) { 742 printf("pic_establish_intr: pic %s irq %d already present\n", 743 pic->pic_name, irq); 744 return NULL; 745 } 746 747 is = kmem_zalloc(sizeof(*is), KM_SLEEP); 748 is->is_pic = pic; 749 is->is_irq = irq; 750 is->is_ipl = ipl; 751 is->is_type = type & 0xff; 752 is->is_func = func; 753 is->is_arg = arg; 754#ifdef MULTIPROCESSOR 755 is->is_mpsafe = (type & IST_MPSAFE) || ipl != IPL_VM; 756#endif 757 758 if (pic->pic_ops->pic_source_name) 759 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 760 sizeof(is->is_source)); 761 else 762 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 763 764 /* 765 * Now attach the per-cpu evcnts. 766 */ 767 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); 768 769 pic->pic_sources[irq] = is; 770 771 /* 772 * First try to use an existing slot which is empty. 773 */ 774 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 775 if (pic__iplsources[off] == NULL) { 776 is->is_iplidx = off - pic_ipl_offset[ipl]; 777 pic__iplsources[off] = is; 778 goto unblock; 779 } 780 } 781 782 /* 783 * Move up all the sources by one. 784 */ 785 if (ipl < NIPL) { 786 off = pic_ipl_offset[ipl+1]; 787 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 788 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 789 } 790 791 /* 792 * Advance the offset of all IPLs higher than this. Include an 793 * extra one as well. Thus the number of sources per ipl is 794 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 795 */ 796 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 797 pic_ipl_offset[nipl]++; 798 799 /* 800 * Insert into the previously made position at the end of this IPL's 801 * sources. 802 */ 803 off = pic_ipl_offset[ipl + 1] - 1; 804 is->is_iplidx = off - pic_ipl_offset[ipl]; 805 pic__iplsources[off] = is; 806 807 (*pic->pic_ops->pic_establish_irq)(pic, is); 808 809unblock: 810 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 811 __BIT(is->is_irq & 0x1f)); 812 813 if (xname) { 814 if (is->is_xname == NULL) 815 is->is_xname = kmem_zalloc(INTRDEVNAMEBUF, KM_SLEEP); 816 if (is->is_xname[0] != '\0') 817 strlcat(is->is_xname, ", ", INTRDEVNAMEBUF); 818 strlcat(is->is_xname, xname, INTRDEVNAMEBUF); 819 } 820 821 /* We're done. */ 822 return is; 823} 824 825static void 826pic_percpu_evcnt_deattach(void *v0, void *v1, struct cpu_info *ci) 827{ 828 struct pic_percpu * const pcpu = v0; 829 struct intrsource * const is = v1; 830 831 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 832 evcnt_detach(&pcpu->pcpu_evs[is->is_irq]); 833} 834 835void 836pic_disestablish_source(struct intrsource *is) 837{ 838 struct pic_softc * const pic = is->is_pic; 839 const int irq = is->is_irq; 840 841 KASSERT(is == pic->pic_sources[irq]); 842 843 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 844 pic->pic_sources[irq] = NULL; 845 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 846 if (is->is_xname != NULL) { 847 kmem_free(is->is_xname, INTRDEVNAMEBUF); 848 is->is_xname = NULL; 849 } 850 /* 851 * Now detach the per-cpu evcnts. 852 */ 853 percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_deattach, is); 854 855 kmem_free(is, sizeof(*is)); 856} 857 858void * 859intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 860{ 861 return intr_establish_xname(irq, ipl, type, func, arg, NULL); 862} 863 864void * 865intr_establish_xname(int irq, int ipl, int type, int (*func)(void *), void *arg, 866 const char *xname) 867{ 868 KASSERT(!cpu_intr_p()); 869 KASSERT(!cpu_softintr_p()); 870 871 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 872 struct pic_softc * const pic = pic_list[slot]; 873 if (pic == NULL || pic->pic_irqbase < 0) 874 continue; 875 if (pic->pic_irqbase <= irq 876 && irq < pic->pic_irqbase + pic->pic_maxsources) { 877 return pic_establish_intr(pic, irq - pic->pic_irqbase, 878 ipl, type, func, arg, xname); 879 } 880 } 881 882 return NULL; 883} 884 885void 886intr_disestablish(void *ih) 887{ 888 struct intrsource * const is = ih; 889 890 KASSERT(!cpu_intr_p()); 891 KASSERT(!cpu_softintr_p()); 892 893 pic_disestablish_source(is); 894} 895 896void 897intr_mask(void *ih) 898{ 899 struct intrsource * const is = ih; 900 struct pic_softc * const pic = is->is_pic; 901 const int irq = is->is_irq; 902 903 if (atomic_inc_32_nv(&is->is_mask_count) == 1) 904 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 905} 906 907void 908intr_unmask(void *ih) 909{ 910 struct intrsource * const is = ih; 911 struct pic_softc * const pic = is->is_pic; 912 const int irq = is->is_irq; 913 914 if (atomic_dec_32_nv(&is->is_mask_count) == 0) 915 (*pic->pic_ops->pic_unblock_irqs)(pic, irq & ~0x1f, __BIT(irq & 0x1f)); 916} 917 918const char * 919intr_string(intr_handle_t irq, char *buf, size_t len) 920{ 921 for (size_t slot = 0; slot < PIC_MAXPICS; slot++) { 922 struct pic_softc * const pic = pic_list[slot]; 923 if (pic == NULL || pic->pic_irqbase < 0) 924 continue; 925 if (pic->pic_irqbase <= irq 926 && irq < pic->pic_irqbase + pic->pic_maxsources) { 927 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase]; 928 snprintf(buf, len, "%s %s", pic->pic_name, is->is_source); 929 return buf; 930 } 931 } 932 933 return NULL; 934} 935 936static struct intrsource * 937intr_get_source(const char *intrid) 938{ 939 struct intrsource *is; 940 intrid_t buf; 941 size_t slot; 942 int irq; 943 944 KASSERT(mutex_owned(&cpu_lock)); 945 946 for (slot = 0; slot < PIC_MAXPICS; slot++) { 947 struct pic_softc * const pic = pic_list[slot]; 948 if (pic == NULL || pic->pic_irqbase < 0) 949 continue; 950 for (irq = 0; irq < pic->pic_maxsources; irq++) { 951 is = pic->pic_sources[irq]; 952 if (is == NULL || is->is_source[0] == '\0') 953 continue; 954 955 snprintf(buf, sizeof(buf), "%s %s", pic->pic_name, is->is_source); 956 if (strcmp(buf, intrid) == 0) 957 return is; 958 } 959 } 960 961 return NULL; 962} 963 964struct intrids_handler * 965interrupt_construct_intrids(const kcpuset_t *cpuset) 966{ 967 struct intrids_handler *iih; 968 struct intrsource *is; 969 int count, irq, n; 970 size_t slot; 971 972 if (kcpuset_iszero(cpuset)) 973 return NULL; 974 975 count = 0; 976 for (slot = 0; slot < PIC_MAXPICS; slot++) { 977 struct pic_softc * const pic = pic_list[slot]; 978 if (pic != NULL && pic->pic_irqbase >= 0) { 979 for (irq = 0; irq < pic->pic_maxsources; irq++) { 980 is = pic->pic_sources[irq]; 981 if (is && is->is_source[0] != '\0') 982 count++; 983 } 984 } 985 } 986 987 iih = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP); 988 iih->iih_nids = count; 989 990 for (n = 0, slot = 0; n < count && slot < PIC_MAXPICS; slot++) { 991 struct pic_softc * const pic = pic_list[slot]; 992 if (pic == NULL || pic->pic_irqbase < 0) 993 continue; 994 for (irq = 0; irq < pic->pic_maxsources; irq++) { 995 is = pic->pic_sources[irq]; 996 if (is == NULL || is->is_source[0] == '\0') 997 continue; 998 999 snprintf(iih->iih_intrids[n++], sizeof(intrid_t), "%s %s", 1000 pic->pic_name, is->is_source); 1001 } 1002 } 1003 1004 return iih; 1005} 1006 1007void 1008interrupt_destruct_intrids(struct intrids_handler *iih) 1009{ 1010 if (iih == NULL) 1011 return; 1012 1013 kmem_free(iih, sizeof(int) + sizeof(intrid_t) * iih->iih_nids); 1014} 1015 1016void 1017interrupt_get_available(kcpuset_t *cpuset) 1018{ 1019 CPU_INFO_ITERATOR cii; 1020 struct cpu_info *ci; 1021 1022 kcpuset_zero(cpuset); 1023 1024 mutex_enter(&cpu_lock); 1025 for (CPU_INFO_FOREACH(cii, ci)) { 1026 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) 1027 kcpuset_set(cpuset, cpu_index(ci)); 1028 } 1029 mutex_exit(&cpu_lock); 1030} 1031 1032void 1033interrupt_get_devname(const char *intrid, char *buf, size_t len) 1034{ 1035 struct intrsource *is; 1036 1037 mutex_enter(&cpu_lock); 1038 is = intr_get_source(intrid); 1039 if (is == NULL || is->is_xname == NULL) 1040 buf[0] = '\0'; 1041 else 1042 strlcpy(buf, is->is_xname, len); 1043 mutex_exit(&cpu_lock); 1044} 1045 1046struct interrupt_get_count_arg { 1047 struct intrsource *is; 1048 uint64_t count; 1049 u_int cpu_idx; 1050}; 1051 1052static void 1053interrupt_get_count_cb(void *v0, void *v1, struct cpu_info *ci) 1054{ 1055 struct pic_percpu * const pcpu = v0; 1056 struct interrupt_get_count_arg * const arg = v1; 1057 1058 if (arg->cpu_idx != cpu_index(ci)) 1059 return; 1060 1061 arg->count = pcpu->pcpu_evs[arg->is->is_irq].ev_count; 1062} 1063 1064uint64_t 1065interrupt_get_count(const char *intrid, u_int cpu_idx) 1066{ 1067 struct interrupt_get_count_arg arg; 1068 struct intrsource *is; 1069 uint64_t count; 1070 1071 count = 0; 1072 1073 mutex_enter(&cpu_lock); 1074 is = intr_get_source(intrid); 1075 if (is != NULL && is->is_pic != NULL) { 1076 arg.is = is; 1077 arg.count = 0; 1078 arg.cpu_idx = cpu_idx; 1079 percpu_foreach(is->is_pic->pic_percpu, interrupt_get_count_cb, &arg); 1080 count = arg.count; 1081 } 1082 mutex_exit(&cpu_lock); 1083 1084 return count; 1085} 1086 1087#ifdef MULTIPROCESSOR 1088void 1089interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) 1090{ 1091 struct intrsource *is; 1092 struct pic_softc *pic; 1093 1094 kcpuset_zero(cpuset); 1095 1096 mutex_enter(&cpu_lock); 1097 is = intr_get_source(intrid); 1098 if (is != NULL) { 1099 pic = is->is_pic; 1100 if (pic && pic->pic_ops->pic_get_affinity) 1101 pic->pic_ops->pic_get_affinity(pic, is->is_irq, cpuset); 1102 } 1103 mutex_exit(&cpu_lock); 1104} 1105 1106int 1107interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset, 1108 kcpuset_t *oldset) 1109{ 1110 struct intrsource *is; 1111 int error; 1112 1113 mutex_enter(&cpu_lock); 1114 is = intr_get_source(intrid); 1115 if (is == NULL) { 1116 error = ENOENT; 1117 } else { 1118 error = interrupt_distribute(is, newset, oldset); 1119 } 1120 mutex_exit(&cpu_lock); 1121 1122 return error; 1123} 1124 1125int 1126interrupt_distribute(void *ih, const kcpuset_t *newset, kcpuset_t *oldset) 1127{ 1128 struct intrsource * const is = ih; 1129 struct pic_softc * const pic = is->is_pic; 1130 1131 if (pic == NULL) 1132 return EOPNOTSUPP; 1133 if (pic->pic_ops->pic_set_affinity == NULL || 1134 pic->pic_ops->pic_get_affinity == NULL) 1135 return EOPNOTSUPP; 1136 1137 if (!is->is_mpsafe) 1138 return EINVAL; 1139 1140 if (oldset != NULL) 1141 pic->pic_ops->pic_get_affinity(pic, is->is_irq, oldset); 1142 1143 return pic->pic_ops->pic_set_affinity(pic, is->is_irq, newset); 1144} 1145#endif 1146