pic.c revision 1.4
1/* $NetBSD: pic.c,v 1.4 2008/12/30 05:43:14 matt Exp $ */ 2/*- 3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30#include <sys/cdefs.h> 31__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.4 2008/12/30 05:43:14 matt Exp $"); 32 33#define _INTR_PRIVATE 34#include <sys/param.h> 35#include <sys/evcnt.h> 36#include <sys/atomic.h> 37#include <sys/malloc.h> 38#include <sys/mallocvar.h> 39#include <sys/atomic.h> 40 41#include <arm/armreg.h> 42#include <arm/cpu.h> 43#include <arm/cpufunc.h> 44 45#include <arm/pic/picvar.h> 46 47MALLOC_DEFINE(M_INTRSOURCE, "intrsource", "interrupt source"); 48 49static uint32_t 50 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 51static struct pic_softc * 52 pic_list_find_pic_by_pending_ipl(uint32_t); 53static void 54 pic_deliver_irqs(struct pic_softc *, int, void *); 55static void 56 pic_list_deliver_irqs(register_t, int, void *); 57 58struct pic_softc *pic_list[PIC_MAXPICS]; 59#if PIC_MAXPICS > 32 60#error PIC_MAXPICS > 32 not supported 61#endif 62volatile uint32_t pic_blocked_pics; 63volatile uint32_t pic_pending_pics; 64volatile uint32_t pic_pending_ipls; 65struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 66struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 67struct intrsource **pic_iplsource[NIPL] = { 68 [0 ... NIPL-1] = pic__iplsources, 69}; 70size_t pic_ipl_offset[NIPL+1]; 71size_t pic_sourcebase; 72static struct evcnt pic_deferral_ev = 73 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "deferred", "intr"); 74EVCNT_ATTACH_STATIC(pic_deferral_ev); 75 76 77 78int 79pic_handle_intr(void *arg) 80{ 81 struct pic_softc * const pic = arg; 82 int rv; 83 84 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 85 86 return rv > 0; 87} 88 89void 90pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 91{ 92 const uint32_t ipl_mask = __BIT(is->is_ipl); 93 94 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 95 __BIT(is->is_irq & 0x1f)); 96 97 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 98 atomic_or_32(&pic_pending_ipls, ipl_mask); 99 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 100} 101 102void 103pic_mark_pending(struct pic_softc *pic, int irq) 104{ 105 struct intrsource * const is = pic->pic_sources[irq]; 106 107 KASSERT(irq < pic->pic_maxsources); 108 KASSERT(is != NULL); 109 110 pic_mark_pending_source(pic, is); 111} 112 113uint32_t 114pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 115 uint32_t pending) 116{ 117 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 118 struct intrsource *is; 119 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 120 uint32_t ipl_mask = 0; 121 122 if (pending == 0) 123 return ipl_mask; 124 125 KASSERT((irq_base & 31) == 0); 126 127 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 128 129 atomic_or_32(ipending, pending); 130 while (pending != 0) { 131 int n = ffs(pending); 132 if (n-- == 0) 133 break; 134 is = isbase[n]; 135 KASSERT(is != NULL); 136 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 137 pending &= ~__BIT(n); 138 ipl_mask |= __BIT(is->is_ipl); 139 } 140 141 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 142 atomic_or_32(&pic_pending_ipls, ipl_mask); 143 atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id)); 144 145 return ipl_mask; 146} 147 148uint32_t 149pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 150 uint32_t pending, int ipl) 151{ 152 uint32_t ipl_irq_mask = 0; 153 uint32_t irq_mask; 154 155 for (;;) { 156 int irq = ffs(pending); 157 if (irq-- == 0) 158 return ipl_irq_mask; 159 160 irq_mask = __BIT(irq); 161 KASSERT(pic->pic_sources[irq_base + irq] != NULL); 162 if (pic->pic_sources[irq_base + irq]->is_ipl == ipl) 163 ipl_irq_mask |= irq_mask; 164 165 pending &= ~irq_mask; 166 } 167} 168 169void 170pic_dispatch(struct intrsource *is, void *frame) 171{ 172 int rv; 173 174 if (__predict_false(is->is_arg == NULL) 175 && __predict_true(frame != NULL)) { 176 rv = (*is->is_func)(frame); 177 } else if (__predict_true(is->is_arg != NULL)) { 178 rv = (*is->is_func)(is->is_arg); 179 } else { 180 pic_deferral_ev.ev_count++; 181 return; 182 } 183 is->is_ev.ev_count++; 184} 185 186void 187pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame) 188{ 189 const uint32_t ipl_mask = __BIT(ipl); 190 struct intrsource *is; 191 volatile uint32_t *ipending = pic->pic_pending_irqs; 192 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 193 size_t irq_base; 194#if PIC_MAXSOURCES > 32 195 size_t irq_count; 196#endif 197 uint32_t pending_irqs; 198 uint32_t blocked_irqs; 199 int irq; 200 bool progress = false; 201 202 KASSERT(pic->pic_pending_ipls & ipl_mask); 203 204 irq_base = 0; 205#if PIC_MAXSOURCES > 32 206 irq_count = 0; 207#endif 208 209 for (;;) { 210 pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, 211 *ipending, ipl); 212 KASSERT((pending_irqs & *ipending) == pending_irqs); 213 KASSERT((pending_irqs & ~(*ipending)) == 0); 214 if (pending_irqs == 0) { 215#if PIC_MAXSOURCES > 32 216 irq_count += 32; 217 if (__predict_true(irq_count >= pic->pic_maxsources)) 218 break; 219 irq_base += 32; 220 ipending++; 221 iblocked++; 222 if (irq_base >= pic->pic_maxsources) { 223 ipending = pic->pic_pending_irqs; 224 iblocked = pic->pic_blocked_irqs; 225 } 226 continue; 227#else 228 break; 229#endif 230 } 231 progress = true; 232 blocked_irqs = pending_irqs; 233 do { 234 irq = ffs(pending_irqs) - 1; 235 KASSERT(irq >= 0); 236 237 atomic_and_32(ipending, ~__BIT(irq)); 238 is = pic->pic_sources[irq_base + irq]; 239 if (is != NULL) { 240 cpsie(I32_bit); 241 pic_dispatch(is, frame); 242 cpsid(I32_bit); 243 } else { 244 KASSERT(0); 245 blocked_irqs &= ~__BIT(irq); 246 } 247 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 248 irq_base, *ipending, ipl); 249 } while (pending_irqs); 250 if (blocked_irqs) { 251 atomic_or_32(iblocked, blocked_irqs); 252 atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id)); 253 } 254 } 255 256 KASSERT(progress); 257 /* 258 * Since interrupts are disabled, we don't have to be too careful 259 * about these. 260 */ 261 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 262 atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id)); 263} 264 265static void 266pic_list_unblock_irqs(void) 267{ 268 uint32_t blocked_pics = pic_blocked_pics; 269 270 pic_blocked_pics = 0; 271 for (;;) { 272 struct pic_softc *pic; 273#if PIC_MAXSOURCES > 32 274 volatile uint32_t *iblocked; 275 uint32_t blocked; 276 size_t irq_base; 277#endif 278 279 int pic_id = ffs(blocked_pics); 280 if (pic_id-- == 0) 281 return; 282 283 pic = pic_list[pic_id]; 284 KASSERT(pic != NULL); 285#if PIC_MAXSOURCES > 32 286 for (irq_base = 0, iblocked = pic->pic_blocked_irqs; 287 irq_base < pic->pic_maxsources; 288 irq_base += 32, iblocked++) { 289 if ((blocked = *iblocked) != 0) { 290 (*pic->pic_ops->pic_unblock_irqs)(pic, 291 irq_base, blocked); 292 atomic_and_32(iblocked, ~blocked); 293 } 294 } 295#else 296 KASSERT(pic->pic_blocked_irqs[0] != 0); 297 (*pic->pic_ops->pic_unblock_irqs)(pic, 298 0, pic->pic_blocked_irqs[0]); 299 pic->pic_blocked_irqs[0] = 0; 300#endif 301 blocked_pics &= ~__BIT(pic_id); 302 } 303} 304 305 306struct pic_softc * 307pic_list_find_pic_by_pending_ipl(uint32_t ipl_mask) 308{ 309 uint32_t pending_pics = pic_pending_pics; 310 struct pic_softc *pic; 311 312 for (;;) { 313 int pic_id = ffs(pending_pics); 314 if (pic_id-- == 0) 315 return NULL; 316 317 pic = pic_list[pic_id]; 318 KASSERT(pic != NULL); 319 if (pic->pic_pending_ipls & ipl_mask) 320 return pic; 321 pending_pics &= ~__BIT(pic_id); 322 } 323} 324 325void 326pic_list_deliver_irqs(register_t psw, int ipl, void *frame) 327{ 328 const uint32_t ipl_mask = __BIT(ipl); 329 struct pic_softc *pic; 330 331 while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) { 332 pic_deliver_irqs(pic, ipl, frame); 333 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 334 } 335 atomic_and_32(&pic_pending_ipls, ~ipl_mask); 336} 337 338void 339pic_do_pending_ints(register_t psw, int newipl, void *frame) 340{ 341 struct cpu_info * const ci = curcpu(); 342 if (__predict_false(newipl == IPL_HIGH)) 343 return; 344 while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 345 KASSERT(pic_pending_ipls < __BIT(NIPL)); 346 for (;;) { 347 int ipl = 31 - __builtin_clz(pic_pending_ipls); 348 KASSERT(ipl < NIPL); 349 if (ipl <= newipl) 350 break; 351 352 ci->ci_cpl = ipl; 353 pic_list_deliver_irqs(psw, ipl, frame); 354 pic_list_unblock_irqs(); 355 } 356 } 357 if (ci->ci_cpl != newipl) 358 ci->ci_cpl = newipl; 359#ifdef __HAVE_FAST_SOFTINTS 360 cpu_dosoftints(); 361#endif 362} 363 364void 365pic_add(struct pic_softc *pic, int irqbase) 366{ 367 int slot, maybe_slot = -1; 368 369 for (slot = 0; slot < PIC_MAXPICS; slot++) { 370 struct pic_softc * const xpic = pic_list[slot]; 371 if (xpic == NULL) { 372 if (maybe_slot < 0) 373 maybe_slot = slot; 374 if (irqbase < 0) 375 break; 376 continue; 377 } 378 if (irqbase < 0 || xpic->pic_irqbase < 0) 379 continue; 380 if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) 381 continue; 382 if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) 383 continue; 384 panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" 385 " with pic %s (%zu sources @ irq %u)", 386 pic->pic_name, pic->pic_maxsources, irqbase, 387 xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); 388 } 389 slot = maybe_slot; 390#if 0 391 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", 392 pic->pic_name, pic_sourcebase, pic->pic_maxsources); 393#endif 394 KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); 395 396 pic->pic_sources = &pic_sources[pic_sourcebase]; 397 pic->pic_irqbase = irqbase; 398 pic_sourcebase += pic->pic_maxsources; 399 pic->pic_id = slot; 400 pic_list[slot] = pic; 401} 402 403int 404pic_alloc_irq(struct pic_softc *pic) 405{ 406 int irq; 407 408 for (irq = 0; irq < pic->pic_maxsources; irq++) { 409 if (pic->pic_sources[irq] == NULL) 410 return irq; 411 } 412 413 return -1; 414} 415 416void * 417pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, 418 int (*func)(void *), void *arg) 419{ 420 struct intrsource *is; 421 int off, nipl; 422 423 if (pic->pic_sources[irq]) { 424 printf("pic_establish_intr: pic %s irq %d already present\n", 425 pic->pic_name, irq); 426 return NULL; 427 } 428 429 is = malloc(sizeof(*is), M_INTRSOURCE, M_NOWAIT|M_ZERO); 430 if (is == NULL) 431 return NULL; 432 433 is->is_pic = pic; 434 is->is_irq = irq; 435 is->is_ipl = ipl; 436 is->is_type = type; 437 is->is_func = func; 438 is->is_arg = arg; 439 440 if (pic->pic_ops->pic_source_name) 441 (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, 442 sizeof(is->is_source)); 443 else 444 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 445 446 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL, 447 pic->pic_name, is->is_source); 448 449 pic->pic_sources[irq] = is; 450 451 /* 452 * First try to use an existing slot which is empty. 453 */ 454 for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { 455 if (pic__iplsources[off] == NULL) { 456 is->is_iplidx = off - pic_ipl_offset[ipl]; 457 pic__iplsources[off] = is; 458 return is; 459 } 460 } 461 462 /* 463 * Move up all the sources by one. 464 */ 465 if (ipl < NIPL) { 466 off = pic_ipl_offset[ipl+1]; 467 memmove(&pic__iplsources[off+1], &pic__iplsources[off], 468 sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); 469 } 470 471 /* 472 * Advance the offset of all IPLs higher than this. Include an 473 * extra one as well. Thus the number of sources per ipl is 474 * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. 475 */ 476 for (nipl = ipl + 1; nipl <= NIPL; nipl++) 477 pic_ipl_offset[nipl]++; 478 479 /* 480 * Insert into the previously made position at the end of this IPL's 481 * sources. 482 */ 483 off = pic_ipl_offset[ipl + 1] - 1; 484 is->is_iplidx = off - pic_ipl_offset[ipl]; 485 pic__iplsources[off] = is; 486 487 (*pic->pic_ops->pic_establish_irq)(pic, is); 488 489 (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, 490 __BIT(is->is_irq & 0x1f)); 491 492 /* We're done. */ 493 return is; 494} 495 496void 497pic_disestablish_source(struct intrsource *is) 498{ 499 struct pic_softc * const pic = is->is_pic; 500 const int irq = is->is_irq; 501 502 (*pic->pic_ops->pic_block_irqs)(pic, irq & ~31, __BIT(irq)); 503 pic->pic_sources[irq] = NULL; 504 pic__iplsources[pic_ipl_offset[is->is_ipl] + is->is_iplidx] = NULL; 505 evcnt_detach(&is->is_ev); 506 507 free(is, M_INTRSOURCE); 508} 509 510int 511_splraise(int newipl) 512{ 513 struct cpu_info * const ci = curcpu(); 514 const int oldipl = ci->ci_cpl; 515 KASSERT(newipl < NIPL); 516 if (newipl > ci->ci_cpl) 517 ci->ci_cpl = newipl; 518 return oldipl; 519} 520int 521_spllower(int newipl) 522{ 523 struct cpu_info * const ci = curcpu(); 524 const int oldipl = ci->ci_cpl; 525 KASSERT(panicstr || newipl <= ci->ci_cpl); 526 if (newipl < ci->ci_cpl) { 527 register_t psw = disable_interrupts(I32_bit); 528 pic_do_pending_ints(psw, newipl, NULL); 529 restore_interrupts(psw); 530 } 531 return oldipl; 532} 533 534void 535splx(int savedipl) 536{ 537 struct cpu_info * const ci = curcpu(); 538 KASSERT(savedipl < NIPL); 539 if (savedipl < ci->ci_cpl) { 540 register_t psw = disable_interrupts(I32_bit); 541 pic_do_pending_ints(psw, savedipl, NULL); 542 restore_interrupts(psw); 543 } 544 ci->ci_cpl = savedipl; 545} 546 547void * 548intr_establish(int irq, int ipl, int type, int (*func)(void *), void *arg) 549{ 550 int slot; 551 552 for (slot = 0; slot < PIC_MAXPICS; slot++) { 553 struct pic_softc * const pic = pic_list[slot]; 554 if (pic == NULL || pic->pic_irqbase < 0) 555 continue; 556 if (pic->pic_irqbase <= irq 557 && irq < pic->pic_irqbase + pic->pic_maxsources) { 558 return pic_establish_intr(pic, irq - pic->pic_irqbase, 559 ipl, type, func, arg); 560 } 561 } 562 563 return NULL; 564} 565 566void 567intr_disestablish(void *ih) 568{ 569 struct intrsource * const is = ih; 570 pic_disestablish_source(is); 571} 572