e500_intr.c revision 1.43
1/* $NetBSD: e500_intr.c,v 1.43 2020/07/06 09:34:16 rin Exp $ */ 2/*- 3 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 * 10 * This material is based upon work supported by the Defense Advanced Research 11 * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 * Contract No. N66001-09-C-2073. 13 * Approved for Public Release, Distribution Unlimited 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37#define __INTR_PRIVATE 38 39#include <sys/cdefs.h> 40__KERNEL_RCSID(0, "$NetBSD: e500_intr.c,v 1.43 2020/07/06 09:34:16 rin Exp $"); 41 42#ifdef _KERNEL_OPT 43#include "opt_ddb.h" 44#include "opt_mpc85xx.h" 45#include "opt_multiprocessor.h" 46#endif 47 48#include <sys/param.h> 49#include <sys/proc.h> 50#include <sys/intr.h> 51#include <sys/cpu.h> 52#include <sys/kmem.h> 53#include <sys/atomic.h> 54#include <sys/bus.h> 55#include <sys/xcall.h> 56#include <sys/ipi.h> 57#include <sys/bitops.h> 58#include <sys/interrupt.h> 59 60#include <uvm/uvm_extern.h> 61 62#ifdef __HAVE_FAST_SOFTINTS 63#include <powerpc/softint.h> 64#endif 65 66#include <powerpc/spr.h> 67#include <powerpc/booke/spr.h> 68 69#include <powerpc/booke/cpuvar.h> 70#include <powerpc/booke/e500reg.h> 71#include <powerpc/booke/e500var.h> 72#include <powerpc/booke/openpicreg.h> 73 74#define IPL2CTPR(ipl) ((ipl) + 15 - IPL_HIGH) 75#define CTPR2IPL(ctpr) ((ctpr) - (15 - IPL_HIGH)) 76 77#define IST_PERCPU_P(ist) ((ist) >= IST_TIMER) 78 79struct e500_intr_irq_info { 80 bus_addr_t irq_vpr; 81 bus_addr_t irq_dr; 82 u_int irq_vector; 83}; 84 85struct intr_source { 86 int (*is_func)(void *); 87 void *is_arg; 88 int8_t is_ipl; 89 uint8_t is_ist; 90 uint8_t is_irq; 91 uint8_t is_refcnt; 92 bus_size_t is_vpr; 93 bus_size_t is_dr; 94 char is_source[INTRIDBUF]; 95 char is_xname[INTRDEVNAMEBUF]; 96}; 97 98#define INTR_SOURCE_INITIALIZER \ 99 { .is_func = e500_intr_spurious, .is_arg = NULL, \ 100 .is_irq = -1, .is_ipl = IPL_NONE, .is_ist = IST_NONE, \ 101 .is_source = "", .is_xname = "", } 102 103struct e500_intr_name { 104 uint8_t in_irq; 105 const char in_name[15]; 106}; 107 108static const struct e500_intr_name e500_onchip_intr_names[] = { 109 { ISOURCE_L2, "l2" }, 110 { ISOURCE_ECM, "ecm" }, 111 { ISOURCE_DDR, "ddr" }, 112 { ISOURCE_LBC, "lbc" }, 113 { ISOURCE_DMA_CHAN1, "dma-chan1" }, 114 { ISOURCE_DMA_CHAN2, "dma-chan2" }, 115 { ISOURCE_DMA_CHAN3, "dma-chan3" }, 116 { ISOURCE_DMA_CHAN4, "dma-chan4" }, 117 { ISOURCE_PCI1, "pci1" }, 118 { ISOURCE_PCIEX2, "pcie2" }, 119 { ISOURCE_PCIEX , "pcie1" }, 120 { ISOURCE_PCIEX3, "pcie3" }, 121 { ISOURCE_USB1, "usb1" }, 122 { ISOURCE_ETSEC1_TX, "etsec1-tx" }, 123 { ISOURCE_ETSEC1_RX, "etsec1-rx" }, 124 { ISOURCE_ETSEC3_TX, "etsec3-tx" }, 125 { ISOURCE_ETSEC3_RX, "etsec3-rx" }, 126 { ISOURCE_ETSEC3_ERR, "etsec3-err" }, 127 { ISOURCE_ETSEC1_ERR, "etsec1-err" }, 128 { ISOURCE_ETSEC2_TX, "etsec2-tx" }, 129 { ISOURCE_ETSEC2_RX, "etsec2-rx" }, 130 { ISOURCE_ETSEC4_TX, "etsec4-tx" }, 131 { ISOURCE_ETSEC4_RX, "etsec4-rx" }, 132 { ISOURCE_ETSEC4_ERR, "etsec4-err" }, 133 { ISOURCE_ETSEC2_ERR, "etsec2-err" }, 134 { ISOURCE_DUART, "duart" }, 135 { ISOURCE_I2C, "i2c" }, 136 { ISOURCE_PERFMON, "perfmon" }, 137 { ISOURCE_SECURITY1, "sec1" }, 138 { ISOURCE_GPIO, "gpio" }, 139 { ISOURCE_SRIO_EWPU, "srio-ewpu" }, 140 { ISOURCE_SRIO_ODBELL, "srio-odbell" }, 141 { ISOURCE_SRIO_IDBELL, "srio-idbell" }, 142 { ISOURCE_SRIO_OMU1, "srio-omu1" }, 143 { ISOURCE_SRIO_IMU1, "srio-imu1" }, 144 { ISOURCE_SRIO_OMU2, "srio-omu2" }, 145 { ISOURCE_SRIO_IMU2, "srio-imu2" }, 146 { ISOURCE_SECURITY2, "sec2" }, 147 { ISOURCE_SPI, "spi" }, 148 { ISOURCE_ETSEC1_PTP, "etsec1-ptp" }, 149 { ISOURCE_ETSEC2_PTP, "etsec2-ptp" }, 150 { ISOURCE_ETSEC3_PTP, "etsec3-ptp" }, 151 { ISOURCE_ETSEC4_PTP, "etsec4-ptp" }, 152 { ISOURCE_ESDHC, "esdhc" }, 153 { 0, "" }, 154}; 155 156const struct e500_intr_name default_external_intr_names[] = { 157 { 0, "" }, 158}; 159 160static const struct e500_intr_name e500_msigroup_intr_names[] = { 161 { 0, "msigroup0" }, 162 { 1, "msigroup1" }, 163 { 2, "msigroup2" }, 164 { 3, "msigroup3" }, 165 { 4, "msigroup4" }, 166 { 5, "msigroup5" }, 167 { 6, "msigroup6" }, 168 { 7, "msigroup7" }, 169 { 0, "" }, 170}; 171 172static const struct e500_intr_name e500_timer_intr_names[] = { 173 { 0, "timer0" }, 174 { 1, "timer1" }, 175 { 2, "timer2" }, 176 { 3, "timer3" }, 177 { 0, "" }, 178}; 179 180static const struct e500_intr_name e500_ipi_intr_names[] = { 181 { 0, "ipi0" }, 182 { 1, "ipi1" }, 183 { 2, "ipi2" }, 184 { 3, "ipi3" }, 185 { 0, "" }, 186}; 187 188static const struct e500_intr_name e500_mi_intr_names[] = { 189 { 0, "mi0" }, 190 { 1, "mi1" }, 191 { 2, "mi2" }, 192 { 3, "mi3" }, 193 { 0, "" }, 194}; 195 196struct e500_intr_info { 197 u_int ii_external_sources; 198 uint32_t ii_onchip_bitmap[2]; 199 u_int ii_onchip_sources; 200 u_int ii_msigroup_sources; 201 u_int ii_ipi_sources; /* per-cpu */ 202 u_int ii_timer_sources; /* per-cpu */ 203 u_int ii_mi_sources; /* per-cpu */ 204 u_int ii_percpu_sources; 205 const struct e500_intr_name *ii_external_intr_names; 206 const struct e500_intr_name *ii_onchip_intr_names; 207 u_int8_t ii_ist_vectors[IST_MAX+1]; 208}; 209 210static kmutex_t e500_intr_lock __cacheline_aligned; 211static struct e500_intr_info e500_intr_info; 212 213#define INTR_INFO_DECL(lc_chip, UC_CHIP) \ 214static const struct e500_intr_info lc_chip##_intr_info = { \ 215 .ii_external_sources = UC_CHIP ## _EXTERNALSOURCES, \ 216 .ii_onchip_bitmap = UC_CHIP ## _ONCHIPBITMAP, \ 217 .ii_onchip_sources = UC_CHIP ## _ONCHIPSOURCES, \ 218 .ii_msigroup_sources = UC_CHIP ## _MSIGROUPSOURCES, \ 219 .ii_timer_sources = UC_CHIP ## _TIMERSOURCES, \ 220 .ii_ipi_sources = UC_CHIP ## _IPISOURCES, \ 221 .ii_mi_sources = UC_CHIP ## _MISOURCES, \ 222 .ii_percpu_sources = UC_CHIP ## _TIMERSOURCES \ 223 + UC_CHIP ## _IPISOURCES + UC_CHIP ## _MISOURCES, \ 224 .ii_external_intr_names = lc_chip ## _external_intr_names, \ 225 .ii_onchip_intr_names = lc_chip ## _onchip_intr_names, \ 226 .ii_ist_vectors = { \ 227 [IST_NONE] = ~0, \ 228 [IST_EDGE] = 0, \ 229 [IST_LEVEL_LOW] = 0, \ 230 [IST_LEVEL_HIGH] = 0, \ 231 [IST_PULSE] = 0, \ 232 [IST_ONCHIP] = UC_CHIP ## _EXTERNALSOURCES, \ 233 [IST_MSIGROUP] = UC_CHIP ## _EXTERNALSOURCES \ 234 + UC_CHIP ## _ONCHIPSOURCES, \ 235 [IST_TIMER] = UC_CHIP ## _EXTERNALSOURCES \ 236 + UC_CHIP ## _ONCHIPSOURCES \ 237 + UC_CHIP ## _MSIGROUPSOURCES, \ 238 [IST_IPI] = UC_CHIP ## _EXTERNALSOURCES \ 239 + UC_CHIP ## _ONCHIPSOURCES \ 240 + UC_CHIP ## _MSIGROUPSOURCES \ 241 + UC_CHIP ## _TIMERSOURCES, \ 242 [IST_MI] = UC_CHIP ## _EXTERNALSOURCES \ 243 + UC_CHIP ## _ONCHIPSOURCES \ 244 + UC_CHIP ## _MSIGROUPSOURCES \ 245 + UC_CHIP ## _TIMERSOURCES \ 246 + UC_CHIP ## _IPISOURCES, \ 247 [IST_MAX] = UC_CHIP ## _EXTERNALSOURCES \ 248 + UC_CHIP ## _ONCHIPSOURCES \ 249 + UC_CHIP ## _MSIGROUPSOURCES \ 250 + UC_CHIP ## _TIMERSOURCES \ 251 + UC_CHIP ## _IPISOURCES \ 252 + UC_CHIP ## _MISOURCES, \ 253 }, \ 254} 255 256#ifdef MPC8536 257#define mpc8536_external_intr_names default_external_intr_names 258const struct e500_intr_name mpc8536_onchip_intr_names[] = { 259 { ISOURCE_SATA2, "sata2" }, 260 { ISOURCE_USB2, "usb2" }, 261 { ISOURCE_USB3, "usb3" }, 262 { ISOURCE_SATA1, "sata1" }, 263 { 0, "" }, 264}; 265 266INTR_INFO_DECL(mpc8536, MPC8536); 267#endif 268 269#ifdef MPC8544 270#define mpc8544_external_intr_names default_external_intr_names 271const struct e500_intr_name mpc8544_onchip_intr_names[] = { 272 { 0, "" }, 273}; 274 275INTR_INFO_DECL(mpc8544, MPC8544); 276#endif 277#ifdef MPC8548 278#define mpc8548_external_intr_names default_external_intr_names 279const struct e500_intr_name mpc8548_onchip_intr_names[] = { 280 { ISOURCE_PCI1, "pci1" }, 281 { ISOURCE_PCI2, "pci2" }, 282 { 0, "" }, 283}; 284 285INTR_INFO_DECL(mpc8548, MPC8548); 286#endif 287#ifdef MPC8555 288#define mpc8555_external_intr_names default_external_intr_names 289const struct e500_intr_name mpc8555_onchip_intr_names[] = { 290 { ISOURCE_PCI2, "pci2" }, 291 { ISOURCE_CPM, "CPM" }, 292 { 0, "" }, 293}; 294 295INTR_INFO_DECL(mpc8555, MPC8555); 296#endif 297#ifdef MPC8568 298#define mpc8568_external_intr_names default_external_intr_names 299const struct e500_intr_name mpc8568_onchip_intr_names[] = { 300 { ISOURCE_QEB_LOW, "QEB low" }, 301 { ISOURCE_QEB_PORT, "QEB port" }, 302 { ISOURCE_QEB_IECC, "QEB iram ecc" }, 303 { ISOURCE_QEB_MUECC, "QEB ram ecc" }, 304 { ISOURCE_TLU1, "tlu1" }, 305 { ISOURCE_QEB_HIGH, "QEB high" }, 306 { 0, "" }, 307}; 308 309INTR_INFO_DECL(mpc8568, MPC8568); 310#endif 311#ifdef MPC8572 312#define mpc8572_external_intr_names default_external_intr_names 313const struct e500_intr_name mpc8572_onchip_intr_names[] = { 314 { ISOURCE_PCIEX3_MPC8572, "pcie3" }, 315 { ISOURCE_FEC, "fec" }, 316 { ISOURCE_PME_GENERAL, "pme" }, 317 { ISOURCE_TLU1, "tlu1" }, 318 { ISOURCE_TLU2, "tlu2" }, 319 { ISOURCE_PME_CHAN1, "pme-chan1" }, 320 { ISOURCE_PME_CHAN2, "pme-chan2" }, 321 { ISOURCE_PME_CHAN3, "pme-chan3" }, 322 { ISOURCE_PME_CHAN4, "pme-chan4" }, 323 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 324 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 325 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 326 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 327 { 0, "" }, 328}; 329 330INTR_INFO_DECL(mpc8572, MPC8572); 331#endif 332 333#ifdef P1025 334#define p1025_external_intr_names default_external_intr_names 335const struct e500_intr_name p1025_onchip_intr_names[] = { 336 { ISOURCE_PCIEX3_MPC8572, "pcie3" }, 337 { ISOURCE_ETSEC1_G1_TX, "etsec1-g1-tx" }, 338 { ISOURCE_ETSEC1_G1_RX, "etsec1-g1-rx" }, 339 { ISOURCE_ETSEC1_G1_ERR, "etsec1-g1-error" }, 340 { ISOURCE_ETSEC2_G1_TX, "etsec2-g1-tx" }, 341 { ISOURCE_ETSEC2_G1_RX, "etsec2-g1-rx" }, 342 { ISOURCE_ETSEC2_G1_ERR, "etsec2-g1-error" }, 343 { ISOURCE_ETSEC3_G1_TX, "etsec3-g1-tx" }, 344 { ISOURCE_ETSEC3_G1_RX, "etsec3-g1-rx" }, 345 { ISOURCE_ETSEC3_G1_ERR, "etsec3-g1-error" }, 346 { ISOURCE_QEB_MUECC, "qeb-low" }, 347 { ISOURCE_QEB_HIGH, "qeb-crit" }, 348 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 349 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 350 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 351 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 352 { 0, "" }, 353}; 354 355INTR_INFO_DECL(p1025, P1025); 356#endif 357 358#ifdef P2020 359#define p20x0_external_intr_names default_external_intr_names 360const struct e500_intr_name p20x0_onchip_intr_names[] = { 361 { ISOURCE_PCIEX3_MPC8572, "pcie3" }, 362 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 363 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 364 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 365 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 366 { 0, "" }, 367}; 368 369INTR_INFO_DECL(p20x0, P20x0); 370#endif 371 372#ifdef P1023 373#define p1023_external_intr_names default_external_intr_names 374const struct e500_intr_name p1023_onchip_intr_names[] = { 375 { ISOURCE_FMAN, "fman" }, 376 { ISOURCE_MDIO, "mdio" }, 377 { ISOURCE_QMAN0, "qman0" }, 378 { ISOURCE_BMAN0, "bman0" }, 379 { ISOURCE_QMAN1, "qman1" }, 380 { ISOURCE_BMAN1, "bman1" }, 381 { ISOURCE_QMAN2, "qman2" }, 382 { ISOURCE_BMAN2, "bman2" }, 383 { ISOURCE_SECURITY2_P1023, "sec2" }, 384 { ISOURCE_SEC_GENERAL, "sec-general" }, 385 { ISOURCE_DMA2_CHAN1, "dma2-chan1" }, 386 { ISOURCE_DMA2_CHAN2, "dma2-chan2" }, 387 { ISOURCE_DMA2_CHAN3, "dma2-chan3" }, 388 { ISOURCE_DMA2_CHAN4, "dma2-chan4" }, 389 { 0, "" }, 390}; 391 392INTR_INFO_DECL(p1023, P1023); 393#endif 394 395static const char ist_names[][12] = { 396 [IST_NONE] = "none", 397 [IST_EDGE] = "edge", 398 [IST_LEVEL_LOW] = "level-", 399 [IST_LEVEL_HIGH] = "level+", 400 [IST_PULSE] = "pulse", 401 [IST_MSI] = "msi", 402 [IST_ONCHIP] = "onchip", 403 [IST_MSIGROUP] = "msigroup", 404 [IST_TIMER] = "timer", 405 [IST_IPI] = "ipi", 406 [IST_MI] = "msgint", 407}; 408 409static struct intr_source *e500_intr_sources; 410static const struct intr_source *e500_intr_last_source; 411 412static void *e500_intr_establish(int, int, int, int (*)(void *), void *, 413 const char *); 414static void e500_intr_disestablish(void *); 415static void e500_intr_cpu_attach(struct cpu_info *ci); 416static void e500_intr_cpu_hatch(struct cpu_info *ci); 417static void e500_intr_cpu_send_ipi(cpuid_t, uintptr_t); 418static void e500_intr_init(void); 419static void e500_intr_init_precpu(void); 420static const char *e500_intr_string(int, int, char *, size_t); 421static const char *e500_intr_typename(int); 422static void e500_critintr(struct trapframe *tf); 423static void e500_decrintr(struct trapframe *tf); 424static void e500_extintr(struct trapframe *tf); 425static void e500_fitintr(struct trapframe *tf); 426static void e500_wdogintr(struct trapframe *tf); 427static void e500_spl0(void); 428static int e500_splraise(int); 429static void e500_splx(int); 430static const char *e500_intr_all_name_lookup(int, int); 431 432const struct intrsw e500_intrsw = { 433 .intrsw_establish = e500_intr_establish, 434 .intrsw_disestablish = e500_intr_disestablish, 435 .intrsw_init = e500_intr_init, 436 .intrsw_cpu_attach = e500_intr_cpu_attach, 437 .intrsw_cpu_hatch = e500_intr_cpu_hatch, 438 .intrsw_cpu_send_ipi = e500_intr_cpu_send_ipi, 439 .intrsw_string = e500_intr_string, 440 .intrsw_typename = e500_intr_typename, 441 442 .intrsw_critintr = e500_critintr, 443 .intrsw_decrintr = e500_decrintr, 444 .intrsw_extintr = e500_extintr, 445 .intrsw_fitintr = e500_fitintr, 446 .intrsw_wdogintr = e500_wdogintr, 447 448 .intrsw_splraise = e500_splraise, 449 .intrsw_splx = e500_splx, 450 .intrsw_spl0 = e500_spl0, 451 452#ifdef __HAVE_FAST_SOFTINTS 453 .intrsw_softint_init_md = powerpc_softint_init_md, 454 .intrsw_softint_trigger = powerpc_softint_trigger, 455#endif 456}; 457 458static bool wdog_barked; 459 460static inline uint32_t 461openpic_read(struct cpu_softc *cpu, bus_size_t offset) 462{ 463 464 return bus_space_read_4(cpu->cpu_bst, cpu->cpu_bsh, 465 OPENPIC_BASE + offset); 466} 467 468static inline void 469openpic_write(struct cpu_softc *cpu, bus_size_t offset, uint32_t val) 470{ 471 472 return bus_space_write_4(cpu->cpu_bst, cpu->cpu_bsh, 473 OPENPIC_BASE + offset, val); 474} 475 476static const char * 477e500_intr_external_name_lookup(int irq) 478{ 479 prop_array_t extirqs = board_info_get_object("external-irqs"); 480 prop_string_t irqname = prop_array_get(extirqs, irq); 481 KASSERT(irqname != NULL); 482 KASSERT(prop_object_type(irqname) == PROP_TYPE_STRING); 483 484 return prop_string_cstring_nocopy(irqname); 485} 486 487static const char * 488e500_intr_name_lookup(const struct e500_intr_name *names, int irq) 489{ 490 for (; names->in_name[0] != '\0'; names++) { 491 if (names->in_irq == irq) 492 return names->in_name; 493 } 494 495 return NULL; 496} 497 498static const char * 499e500_intr_onchip_name_lookup(int irq) 500{ 501 const char *name; 502 503 name = e500_intr_name_lookup(e500_intr_info.ii_onchip_intr_names, irq); 504 if (name == NULL) 505 name = e500_intr_name_lookup(e500_onchip_intr_names, irq); 506 507 return name; 508} 509 510static inline void 511e500_splset(struct cpu_info *ci, int ipl) 512{ 513 struct cpu_softc * const cpu = ci->ci_softc; 514 515#ifdef __HAVE_FAST_SOFTINTS /* XXX */ 516 KASSERT((curlwp->l_pflag & LP_INTR) == 0 || ipl != IPL_NONE); 517#endif 518 const u_int ctpr = IPL2CTPR(ipl); 519 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == IPL2CTPR(ci->ci_cpl)); 520 openpic_write(cpu, OPENPIC_CTPR, ctpr); 521 KASSERT(openpic_read(cpu, OPENPIC_CTPR) == ctpr); 522#ifdef DIAGNOSTIC 523 cpu->cpu_spl_tb[ipl][ci->ci_cpl] = mftb(); 524#endif 525 ci->ci_cpl = ipl; 526} 527 528static void 529e500_spl0(void) 530{ 531 wrtee(0); 532 533 struct cpu_info * const ci = curcpu(); 534 535#ifdef __HAVE_FAST_SOFTINTS 536 if (__predict_false(ci->ci_data.cpu_softints != 0)) { 537 e500_splset(ci, IPL_HIGH); 538 wrtee(PSL_EE); 539 powerpc_softint(ci, IPL_NONE, 540 (vaddr_t)__builtin_return_address(0)); 541 wrtee(0); 542 } 543#endif /* __HAVE_FAST_SOFTINTS */ 544 e500_splset(ci, IPL_NONE); 545 546 wrtee(PSL_EE); 547} 548 549static void 550e500_splx(int ipl) 551{ 552 struct cpu_info * const ci = curcpu(); 553 const int old_ipl = ci->ci_cpl; 554 555 /* if we paniced because of watchdog, PSL_CE will be clear. */ 556 KASSERT(wdog_barked || (mfmsr() & PSL_CE)); 557 558 if (ipl == old_ipl) 559 return; 560 561 if (__predict_false(ipl > old_ipl)) { 562 printf("%s: %p: cpl=%u: ignoring splx(%u) to raise ipl\n", 563 __func__, __builtin_return_address(0), old_ipl, ipl); 564 if (old_ipl == IPL_NONE) 565 Debugger(); 566 } 567 568 // const 569 register_t msr = wrtee(0); 570#ifdef __HAVE_FAST_SOFTINTS 571 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << ipl); 572 if (__predict_false(softints != 0)) { 573 e500_splset(ci, IPL_HIGH); 574 wrtee(msr); 575 powerpc_softint(ci, ipl, 576 (vaddr_t)__builtin_return_address(0)); 577 wrtee(0); 578 } 579#endif /* __HAVE_FAST_SOFTINTS */ 580 e500_splset(ci, ipl); 581#if 1 582 if (ipl < IPL_VM && old_ipl >= IPL_VM) 583 msr = PSL_EE; 584#endif 585 wrtee(msr); 586} 587 588static int 589e500_splraise(int ipl) 590{ 591 struct cpu_info * const ci = curcpu(); 592 const int old_ipl = ci->ci_cpl; 593 594 /* if we paniced because of watchdog, PSL_CE will be clear. */ 595 KASSERT(wdog_barked || (mfmsr() & PSL_CE)); 596 597 if (old_ipl < ipl) { 598 //const 599 register_t msr = wrtee(0); 600 e500_splset(ci, ipl); 601#if 0 602 if (old_ipl < IPL_VM && ipl >= IPL_VM) 603 msr = 0; 604#endif 605 wrtee(msr); 606 } 607#if 0 608 else if (ipl == IPL_NONE) { 609 panic("%s: %p: cpl=%u: attempt to splraise(IPL_NONE)", 610 __func__, __builtin_return_address(0), old_ipl); 611 } else if (old_ipl > ipl) { 612 printf("%s: %p: cpl=%u: ignoring splraise(%u) to lower ipl\n", 613 __func__, __builtin_return_address(0), old_ipl, ipl); 614 } 615#endif 616 617 return old_ipl; 618} 619 620static int 621e500_intr_spurious(void *arg) 622{ 623 return 0; 624} 625 626static bool 627e500_intr_irq_info_get(struct cpu_info *ci, u_int irq, int ipl, int ist, 628 struct e500_intr_irq_info *ii) 629{ 630 const struct e500_intr_info * const info = &e500_intr_info; 631 bool ok; 632 633#if DEBUG > 2 634 printf("%s(%p,irq=%u,ipl=%u,ist=%u,%p)\n", __func__, ci, irq, ipl, ist, ii); 635#endif 636 637 if (ipl < IPL_VM || ipl > IPL_HIGH) { 638#if DEBUG > 2 639 printf("%s:%d ipl=%u\n", __func__, __LINE__, ipl); 640#endif 641 return false; 642 } 643 644 if (ist <= IST_NONE || ist >= IST_MAX) { 645#if DEBUG > 2 646 printf("%s:%d ist=%u\n", __func__, __LINE__, ist); 647#endif 648 return false; 649 } 650 651 ii->irq_vector = irq + info->ii_ist_vectors[ist]; 652 if (IST_PERCPU_P(ist) && ist != IST_IPI) 653 ii->irq_vector += ci->ci_cpuid * info->ii_percpu_sources; 654 655 switch (ist) { 656 default: 657 ii->irq_vpr = OPENPIC_EIVPR(irq); 658 ii->irq_dr = OPENPIC_EIDR(irq); 659 ok = irq < info->ii_external_sources 660 && (ist == IST_EDGE 661 || ist == IST_LEVEL_LOW 662 || ist == IST_LEVEL_HIGH); 663 break; 664 case IST_PULSE: 665 ok = false; 666 break; 667 case IST_ONCHIP: 668 ii->irq_vpr = OPENPIC_IIVPR(irq); 669 ii->irq_dr = OPENPIC_IIDR(irq); 670 ok = irq < 32 * __arraycount(info->ii_onchip_bitmap); 671#if DEBUG > 2 672 printf("%s: irq=%u: ok=%u\n", __func__, irq, ok); 673#endif 674 ok = ok && (info->ii_onchip_bitmap[irq/32] & (1 << (irq & 31))); 675#if DEBUG > 2 676 printf("%s: %08x%08x -> %08x%08x: ok=%u\n", __func__, 677 irq < 32 ? 0 : (1 << irq), irq < 32 ? (1 << irq) : 0, 678 info->ii_onchip_bitmap[1], info->ii_onchip_bitmap[0], 679 ok); 680#endif 681 break; 682 case IST_MSIGROUP: 683 ii->irq_vpr = OPENPIC_MSIVPR(irq); 684 ii->irq_dr = OPENPIC_MSIDR(irq); 685 ok = irq < info->ii_msigroup_sources 686 && ipl == IPL_VM; 687 break; 688 case IST_TIMER: 689 ii->irq_vpr = OPENPIC_GTVPR(ci->ci_cpuid, irq); 690 ii->irq_dr = OPENPIC_GTDR(ci->ci_cpuid, irq); 691 ok = irq < info->ii_timer_sources; 692#if DEBUG > 2 693 printf("%s: IST_TIMER irq=%u: ok=%u\n", __func__, irq, ok); 694#endif 695 break; 696 case IST_IPI: 697 ii->irq_vpr = OPENPIC_IPIVPR(irq); 698 ii->irq_dr = OPENPIC_IPIDR(irq); 699 ok = irq < info->ii_ipi_sources; 700 break; 701 case IST_MI: 702 ii->irq_vpr = OPENPIC_MIVPR(irq); 703 ii->irq_dr = OPENPIC_MIDR(irq); 704 ok = irq < info->ii_mi_sources; 705 break; 706 } 707 708 return ok; 709} 710 711static const char * 712e500_intr_string(int irq, int ist, char *buf, size_t len) 713{ 714 struct cpu_info * const ci = curcpu(); 715 struct cpu_softc * const cpu = ci->ci_softc; 716 struct e500_intr_irq_info ii; 717 718 if (!e500_intr_irq_info_get(ci, irq, IPL_VM, ist, &ii)) 719 return NULL; 720 721 strlcpy(buf, cpu->cpu_evcnt_intrs[ii.irq_vector].ev_name, len); 722 return buf; 723} 724 725__CTASSERT(__arraycount(ist_names) == IST_MAX); 726 727static const char * 728e500_intr_typename(int ist) 729{ 730 if (IST_NONE <= ist && ist < IST_MAX) 731 return ist_names[ist]; 732 733 return NULL; 734} 735 736static void * 737e500_intr_cpu_establish(struct cpu_info *ci, int irq, int ipl, int ist, 738 int (*handler)(void *), void *arg, const char *xname) 739{ 740 struct cpu_softc * const cpu = ci->ci_softc; 741 struct e500_intr_irq_info ii; 742 743 KASSERT(ipl >= IPL_VM && ipl <= IPL_HIGH); 744 KASSERT(ist > IST_NONE && ist < IST_MAX && ist != IST_MSI); 745 746 if (!e500_intr_irq_info_get(ci, irq, ipl, ist, &ii)) { 747 printf("%s: e500_intr_irq_info_get(%p,%u,%u,%u,%p) failed\n", 748 __func__, ci, irq, ipl, ist, &ii); 749 return NULL; 750 } 751 752 if (xname == NULL) { 753 xname = e500_intr_all_name_lookup(irq, ist); 754 if (xname == NULL) 755 xname = "unknown"; 756 } 757 758 struct intr_source * const is = &e500_intr_sources[ii.irq_vector]; 759 mutex_enter(&e500_intr_lock); 760 if (is->is_ipl != IPL_NONE) { 761 /* XXX IPI0 is shared by all CPU. */ 762 if (is->is_ist != IST_IPI || 763 is->is_irq != irq || 764 is->is_ipl != ipl || 765 is->is_ist != ist || 766 is->is_func != handler || 767 is->is_arg != arg) { 768 mutex_exit(&e500_intr_lock); 769 return NULL; 770 } 771 } 772 773 is->is_func = handler; 774 is->is_arg = arg; 775 is->is_ipl = ipl; 776 is->is_ist = ist; 777 is->is_irq = irq; 778 is->is_refcnt++; 779 is->is_vpr = ii.irq_vpr; 780 is->is_dr = ii.irq_dr; 781 switch (ist) { 782 case IST_EDGE: 783 case IST_LEVEL_LOW: 784 case IST_LEVEL_HIGH: 785 snprintf(is->is_source, sizeof(is->is_source), "extirq %d", 786 irq); 787 break; 788 case IST_ONCHIP: 789 snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); 790 break; 791 case IST_MSIGROUP: 792 snprintf(is->is_source, sizeof(is->is_source), "msigroup %d", 793 irq); 794 break; 795 case IST_TIMER: 796 snprintf(is->is_source, sizeof(is->is_source), "timer %d", irq); 797 break; 798 case IST_IPI: 799 snprintf(is->is_source, sizeof(is->is_source), "ipi %d", irq); 800 break; 801 case IST_MI: 802 snprintf(is->is_source, sizeof(is->is_source), "mi %d", irq); 803 break; 804 case IST_PULSE: 805 default: 806 panic("%s: invalid ist (%d)\n", __func__, ist); 807 } 808 strlcpy(is->is_xname, xname, sizeof(is->is_xname)); 809 810 uint32_t vpr = VPR_PRIORITY_MAKE(IPL2CTPR(ipl)) 811 | VPR_VECTOR_MAKE(((ii.irq_vector + 1) << 4) | ipl) 812 | (ist == IST_LEVEL_LOW 813 ? VPR_LEVEL_LOW 814 : (ist == IST_LEVEL_HIGH 815 ? VPR_LEVEL_HIGH 816 : (ist == IST_ONCHIP 817 ? VPR_P_HIGH 818 : 0))); 819 820 /* 821 * All interrupts go to the primary except per-cpu interrupts which get 822 * routed to the appropriate cpu. 823 */ 824 uint32_t dr = openpic_read(cpu, ii.irq_dr); 825 826 dr |= 1 << (IST_PERCPU_P(ist) ? ci->ci_cpuid : 0); 827 828 /* 829 * Update the vector/priority and destination registers keeping the 830 * interrupt masked. 831 */ 832 const register_t msr = wrtee(0); /* disable interrupts */ 833 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK); 834 openpic_write(cpu, ii.irq_dr, dr); 835 836 /* 837 * Now unmask the interrupt. 838 */ 839 openpic_write(cpu, ii.irq_vpr, vpr); 840 841 wrtee(msr); /* re-enable interrupts */ 842 843 mutex_exit(&e500_intr_lock); 844 845 return is; 846} 847 848static void * 849e500_intr_establish(int irq, int ipl, int ist, int (*handler)(void *), 850 void *arg, const char *xname) 851{ 852 return e500_intr_cpu_establish(curcpu(), irq, ipl, ist, handler, arg, 853 xname); 854} 855 856static void 857e500_intr_disestablish(void *vis) 858{ 859 struct cpu_softc * const cpu = curcpu()->ci_softc; 860 struct intr_source * const is = vis; 861 struct e500_intr_irq_info ii; 862 863 KASSERT(e500_intr_sources <= is); 864 KASSERT(is < e500_intr_last_source); 865 KASSERT(!cpu_intr_p()); 866 867 bool ok = e500_intr_irq_info_get(curcpu(), is->is_irq, is->is_ipl, 868 is->is_ist, &ii); 869 (void)ok; /* appease gcc */ 870 KASSERT(ok); 871 KASSERT(is - e500_intr_sources == ii.irq_vector); 872 873 mutex_enter(&e500_intr_lock); 874 875 if (is->is_refcnt-- > 1) { 876 mutex_exit(&e500_intr_lock); 877 return; 878 } 879 880 /* 881 * Mask the source using the mask (MSK) bit in the vector/priority reg. 882 */ 883 uint32_t vpr = openpic_read(cpu, ii.irq_vpr); 884 openpic_write(cpu, ii.irq_vpr, VPR_MSK | vpr); 885 886 /* 887 * Wait for the Activity (A) bit for the source to be cleared. 888 */ 889 while (openpic_read(cpu, ii.irq_vpr) & VPR_A) 890 ; 891 892 /* 893 * Now the source can be modified. 894 */ 895 openpic_write(cpu, ii.irq_dr, 0); /* stop delivery */ 896 openpic_write(cpu, ii.irq_vpr, VPR_MSK); /* mask/reset it */ 897 898 *is = (struct intr_source)INTR_SOURCE_INITIALIZER; 899 900 mutex_exit(&e500_intr_lock); 901} 902 903static void 904e500_critintr(struct trapframe *tf) 905{ 906 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1); 907} 908 909static void 910e500_decrintr(struct trapframe *tf) 911{ 912 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1); 913} 914 915static void 916e500_fitintr(struct trapframe *tf) 917{ 918 panic("%s: srr0/srr1=%#lx/%#lx", __func__, tf->tf_srr0, tf->tf_srr1); 919} 920 921static void 922e500_wdogintr(struct trapframe *tf) 923{ 924 struct cpu_info * const ci = curcpu(); 925 mtspr(SPR_TSR, TSR_ENW|TSR_WIS); 926 wdog_barked = true; 927 dump_splhist(ci, NULL); 928 dump_trapframe(tf, NULL); 929 panic("%s: tf=%p tb=%"PRId64" srr0/srr1=%#lx/%#lx" 930 " cpl=%d idepth=%d, mtxcount=%d", 931 __func__, tf, mftb(), tf->tf_srr0, tf->tf_srr1, 932 ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count); 933} 934 935static void 936e500_extintr(struct trapframe *tf) 937{ 938 struct cpu_info * const ci = curcpu(); 939 struct cpu_softc * const cpu = ci->ci_softc; 940 const int old_ipl = ci->ci_cpl; 941 942 /* if we paniced because of watchdog, PSL_CE will be clear. */ 943 KASSERT(wdog_barked || (mfmsr() & PSL_CE)); 944 945#if 0 946// printf("%s(%p): idepth=%d enter\n", __func__, tf, ci->ci_idepth); 947 if ((register_t)tf >= (register_t)curlwp->l_addr + USPACE 948 || (register_t)tf < (register_t)curlwp->l_addr + NBPG) { 949 printf("%s(entry): pid %d.%d (%s): srr0/srr1=%#lx/%#lx: invalid tf addr %p\n", 950 __func__, curlwp->l_proc->p_pid, curlwp->l_lid, 951 curlwp->l_proc->p_comm, tf->tf_srr0, tf->tf_srr1, tf); 952 } 953#endif 954 955 956 ci->ci_data.cpu_nintr++; 957 tf->tf_cf.cf_idepth = ci->ci_idepth++; 958 cpu->cpu_pcpls[ci->ci_idepth] = old_ipl; 959#if 1 960 if (mfmsr() & PSL_EE) 961 panic("%s(%p): MSR[EE] is on (%#lx)!", __func__, tf, mfmsr()); 962 if (old_ipl == IPL_HIGH 963 || IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 964 panic("%s(%p): old_ipl(%u) == IPL_HIGH(%u) " 965 "|| old_ipl + %u != OPENPIC_CTPR (%u)", 966 __func__, tf, old_ipl, IPL_HIGH, 967 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 968#else 969 if (old_ipl >= IPL_VM) 970 panic("%s(%p): old_ipl(%u) >= IPL_VM(%u) CTPR=%u", 971 __func__, tf, old_ipl, IPL_VM, openpic_read(cpu, OPENPIC_CTPR)); 972#endif 973 974 for (;;) { 975 /* 976 * Find out the pending interrupt. 977 */ 978 KASSERTMSG((mfmsr() & PSL_EE) == 0, 979 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr()); 980 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 981 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 982 __func__, tf, __LINE__, old_ipl, 983 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 984 const uint32_t iack = openpic_read(cpu, OPENPIC_IACK); 985#ifdef DIAGNOSTIC 986 const int ipl = iack & 0xf; 987#endif 988 const int irq = (iack >> 4) - 1; 989#if 0 990 printf("%s: iack=%d ipl=%d irq=%d <%s>\n", 991 __func__, iack, ipl, irq, 992 (iack != IRQ_SPURIOUS ? 993 cpu->cpu_evcnt_intrs[irq].ev_name : "spurious")); 994#endif 995 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 996 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 997 __func__, tf, __LINE__, old_ipl, 998 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 999 if (iack == IRQ_SPURIOUS) 1000 break; 1001 1002 struct intr_source * const is = &e500_intr_sources[irq]; 1003 if (__predict_true(is < e500_intr_last_source)) { 1004 /* 1005 * Timer interrupts get their argument overriden with 1006 * the pointer to the trapframe. 1007 */ 1008 KASSERTMSG(is->is_ipl == ipl, 1009 "iack %#x: is %p: irq %d ipl %d != iack ipl %d", 1010 iack, is, irq, is->is_ipl, ipl); 1011 void *arg = (is->is_ist == IST_TIMER ? tf : is->is_arg); 1012 if (is->is_ipl <= old_ipl) 1013 panic("%s(%p): %s (%u): is->is_ipl (%u) <= old_ipl (%u)\n", 1014 __func__, tf, 1015 cpu->cpu_evcnt_intrs[irq].ev_name, irq, 1016 is->is_ipl, old_ipl); 1017 KASSERT(is->is_ipl > old_ipl); 1018 e500_splset(ci, is->is_ipl); /* change IPL */ 1019 if (__predict_false(is->is_func == NULL)) { 1020 aprint_error_dev(ci->ci_dev, 1021 "interrupt from unestablished irq %d\n", 1022 irq); 1023 } else { 1024 int (*func)(void *) = is->is_func; 1025 wrtee(PSL_EE); 1026 int rv = (*func)(arg); 1027 wrtee(0); 1028#if DEBUG > 2 1029 printf("%s: %s handler %p(%p) returned %d\n", 1030 __func__, 1031 cpu->cpu_evcnt_intrs[irq].ev_name, 1032 func, arg, rv); 1033#endif 1034 if (rv == 0) 1035 cpu->cpu_evcnt_spurious_intr.ev_count++; 1036 } 1037 e500_splset(ci, old_ipl); /* restore IPL */ 1038 cpu->cpu_evcnt_intrs[irq].ev_count++; 1039 } else { 1040 aprint_error_dev(ci->ci_dev, 1041 "interrupt from illegal irq %d\n", irq); 1042 cpu->cpu_evcnt_spurious_intr.ev_count++; 1043 } 1044 /* 1045 * If this is a nested interrupt, simply ack it and exit 1046 * because the loop we interrupted will complete looking 1047 * for interrupts. 1048 */ 1049 KASSERTMSG((mfmsr() & PSL_EE) == 0, 1050 "%s(%p): MSR[EE] left on (%#lx)!", __func__, tf, mfmsr()); 1051 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 1052 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 1053 __func__, tf, __LINE__, old_ipl, 1054 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 1055 1056 openpic_write(cpu, OPENPIC_EOI, 0); 1057 if (IPL2CTPR(old_ipl) != openpic_read(cpu, OPENPIC_CTPR)) 1058 panic("%s(%p): %d: old_ipl(%u) + %u != OPENPIC_CTPR (%u)", 1059 __func__, tf, __LINE__, old_ipl, 1060 15 - IPL_HIGH, openpic_read(cpu, OPENPIC_CTPR)); 1061 if (ci->ci_idepth > 0) 1062 break; 1063 } 1064 1065 ci->ci_idepth--; 1066 1067#ifdef __HAVE_FAST_SOFTINTS 1068 /* 1069 * Before exiting, deal with any softints that need to be dealt with. 1070 */ 1071 const u_int softints = ci->ci_data.cpu_softints & (IPL_SOFTMASK << old_ipl); 1072 if (__predict_false(softints != 0)) { 1073 KASSERT(old_ipl < IPL_VM); 1074 e500_splset(ci, IPL_HIGH); /* pop to high */ 1075 wrtee(PSL_EE); /* reenable interrupts */ 1076 powerpc_softint(ci, old_ipl, /* deal with them */ 1077 tf->tf_srr0); 1078 wrtee(0); /* disable interrupts */ 1079 e500_splset(ci, old_ipl); /* and drop back */ 1080 } 1081#endif /* __HAVE_FAST_SOFTINTS */ 1082 KASSERT(ci->ci_cpl == old_ipl); 1083 1084 /* 1085 * If we interrupted while power-saving and we need to exit idle, 1086 * we need to clear PSL_POW so we won't go back into power-saving. 1087 */ 1088 if (__predict_false(tf->tf_srr1 & PSL_POW) && ci->ci_want_resched) 1089 tf->tf_srr1 &= ~PSL_POW; 1090 1091// printf("%s(%p): idepth=%d exit\n", __func__, tf, ci->ci_idepth); 1092} 1093 1094static void 1095e500_intr_init(void) 1096{ 1097 struct cpu_info * const ci = curcpu(); 1098 struct cpu_softc * const cpu = ci->ci_softc; 1099 const uint32_t frr = openpic_read(cpu, OPENPIC_FRR); 1100 const u_int nirq = FRR_NIRQ_GET(frr) + 1; 1101// const u_int ncpu = FRR_NCPU_GET(frr) + 1; 1102 struct intr_source *is; 1103 struct e500_intr_info * const ii = &e500_intr_info; 1104 1105 const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 1106 switch (svr) { 1107#ifdef MPC8536 1108 case SVR_MPC8536v1 >> 16: 1109 *ii = mpc8536_intr_info; 1110 break; 1111#endif 1112#ifdef MPC8544 1113 case SVR_MPC8544v1 >> 16: 1114 *ii = mpc8544_intr_info; 1115 break; 1116#endif 1117#ifdef MPC8548 1118 case SVR_MPC8543v1 >> 16: 1119 case SVR_MPC8548v1 >> 16: 1120 *ii = mpc8548_intr_info; 1121 break; 1122#endif 1123#ifdef MPC8555 1124 case SVR_MPC8541v1 >> 16: 1125 case SVR_MPC8555v1 >> 16: 1126 *ii = mpc8555_intr_info; 1127 break; 1128#endif 1129#ifdef MPC8568 1130 case SVR_MPC8568v1 >> 16: 1131 *ii = mpc8568_intr_info; 1132 break; 1133#endif 1134#ifdef MPC8572 1135 case SVR_MPC8572v1 >> 16: 1136 *ii = mpc8572_intr_info; 1137 break; 1138#endif 1139#ifdef P1023 1140 case SVR_P1017v1 >> 16: 1141 case SVR_P1023v1 >> 16: 1142 *ii = p1023_intr_info; 1143 break; 1144#endif 1145#ifdef P1025 1146 case SVR_P1016v1 >> 16: 1147 case SVR_P1025v1 >> 16: 1148 *ii = p1025_intr_info; 1149 break; 1150#endif 1151#ifdef P2020 1152 case SVR_P2010v2 >> 16: 1153 case SVR_P2020v2 >> 16: 1154 *ii = p20x0_intr_info; 1155 break; 1156#endif 1157 default: 1158 panic("%s: don't know how to deal with SVR %#jx", 1159 __func__, (uintmax_t)mfspr(SPR_SVR)); 1160 } 1161 1162 /* 1163 * Initialize interrupt handler lock 1164 */ 1165 mutex_init(&e500_intr_lock, MUTEX_DEFAULT, IPL_HIGH); 1166 1167 /* 1168 * We need to be in mixed mode. 1169 */ 1170 openpic_write(cpu, OPENPIC_GCR, GCR_M); 1171 1172 /* 1173 * Make we and the openpic both agree about the current SPL level. 1174 */ 1175 e500_splset(ci, ci->ci_cpl); 1176 1177 /* 1178 * Allow the required number of interrupt sources. 1179 */ 1180 is = kmem_zalloc(nirq * sizeof(*is), KM_SLEEP); 1181 e500_intr_sources = is; 1182 e500_intr_last_source = is + nirq; 1183 1184 /* 1185 * Initialize all the external interrupts as active low. 1186 */ 1187 for (u_int irq = 0; irq < e500_intr_info.ii_external_sources; irq++) { 1188 openpic_write(cpu, OPENPIC_EIVPR(irq), 1189 VPR_VECTOR_MAKE(irq) | VPR_LEVEL_LOW); 1190 } 1191} 1192 1193static void 1194e500_intr_init_precpu(void) 1195{ 1196 struct cpu_info const *ci = curcpu(); 1197 struct cpu_softc * const cpu = ci->ci_softc; 1198 bus_addr_t dr; 1199 1200 /* 1201 * timer's DR is set to be delivered to cpu0 as initial value. 1202 */ 1203 for (u_int irq = 0; irq < e500_intr_info.ii_timer_sources; irq++) { 1204 dr = OPENPIC_GTDR(ci->ci_cpuid, irq); 1205 openpic_write(cpu, dr, 0); /* stop delivery */ 1206 } 1207} 1208 1209static void 1210e500_idlespin(void) 1211{ 1212 KASSERTMSG(curcpu()->ci_cpl == IPL_NONE, 1213 "%s: cpu%u: ci_cpl (%d) != 0", __func__, cpu_number(), 1214 curcpu()->ci_cpl); 1215 KASSERTMSG(CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR)) == IPL_NONE, 1216 "%s: cpu%u: CTPR (%d) != IPL_NONE", __func__, cpu_number(), 1217 CTPR2IPL(openpic_read(curcpu()->ci_softc, OPENPIC_CTPR))); 1218 KASSERT(mfmsr() & PSL_EE); 1219 1220 if (powersave > 0) 1221 mtmsr(mfmsr() | PSL_POW); 1222} 1223 1224static void 1225e500_intr_cpu_attach(struct cpu_info *ci) 1226{ 1227 struct cpu_softc * const cpu = ci->ci_softc; 1228 const char * const xname = device_xname(ci->ci_dev); 1229 1230 const u_int32_t frr = openpic_read(cpu, OPENPIC_FRR); 1231 const u_int nirq = FRR_NIRQ_GET(frr) + 1; 1232// const u_int ncpu = FRR_NCPU_GET(frr) + 1; 1233 1234 const struct e500_intr_info * const info = &e500_intr_info; 1235 1236 cpu->cpu_clock_gtbcr = OPENPIC_GTBCR(ci->ci_cpuid, E500_CLOCK_TIMER); 1237 1238 cpu->cpu_evcnt_intrs = 1239 kmem_zalloc(nirq * sizeof(cpu->cpu_evcnt_intrs[0]), KM_SLEEP); 1240 1241 struct evcnt *evcnt = cpu->cpu_evcnt_intrs; 1242 for (size_t j = 0; j < info->ii_external_sources; j++, evcnt++) { 1243 const char *name = e500_intr_external_name_lookup(j); 1244 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, NULL, xname, name); 1245 } 1246 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_ONCHIP]); 1247 for (size_t j = 0; j < info->ii_onchip_sources; j++, evcnt++) { 1248 if (info->ii_onchip_bitmap[j / 32] & __BIT(j & 31)) { 1249 const char *name = e500_intr_onchip_name_lookup(j); 1250 if (name != NULL) { 1251 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1252 NULL, xname, name); 1253#ifdef DIAGNOSTIC 1254 } else { 1255 printf("%s: missing evcnt for onchip irq %zu\n", 1256 __func__, j); 1257#endif 1258 } 1259 } 1260 } 1261 1262 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_MSIGROUP]); 1263 for (size_t j = 0; j < info->ii_msigroup_sources; j++, evcnt++) { 1264 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1265 NULL, xname, e500_msigroup_intr_names[j].in_name); 1266 } 1267 1268 KASSERT(evcnt == cpu->cpu_evcnt_intrs + info->ii_ist_vectors[IST_TIMER]); 1269 evcnt += ci->ci_cpuid * info->ii_percpu_sources; 1270 for (size_t j = 0; j < info->ii_timer_sources; j++, evcnt++) { 1271 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1272 NULL, xname, e500_timer_intr_names[j].in_name); 1273 } 1274 1275 for (size_t j = 0; j < info->ii_ipi_sources; j++, evcnt++) { 1276 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1277 NULL, xname, e500_ipi_intr_names[j].in_name); 1278 } 1279 1280 for (size_t j = 0; j < info->ii_mi_sources; j++, evcnt++) { 1281 evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, 1282 NULL, xname, e500_mi_intr_names[j].in_name); 1283 } 1284 1285 ci->ci_idlespin = e500_idlespin; 1286} 1287 1288static void 1289e500_intr_cpu_send_ipi(cpuid_t target, uint32_t ipimsg) 1290{ 1291 struct cpu_info * const ci = curcpu(); 1292 struct cpu_softc * const cpu = ci->ci_softc; 1293 uint32_t dstmask; 1294 1295 if (target >= CPU_MAXNUM) { 1296 CPU_INFO_ITERATOR cii; 1297 struct cpu_info *dst_ci; 1298 1299 KASSERT(target == IPI_DST_NOTME || target == IPI_DST_ALL); 1300 1301 dstmask = 0; 1302 for (CPU_INFO_FOREACH(cii, dst_ci)) { 1303 if (target == IPI_DST_ALL || ci != dst_ci) { 1304 dstmask |= 1 << cpu_index(ci); 1305 if (ipimsg) 1306 atomic_or_32(&dst_ci->ci_pending_ipis, 1307 ipimsg); 1308 } 1309 } 1310 } else { 1311 struct cpu_info * const dst_ci = cpu_lookup(target); 1312 KASSERT(dst_ci != NULL); 1313 KASSERTMSG(target == cpu_index(dst_ci), 1314 "%s: target (%lu) != cpu_index(cpu%u)", 1315 __func__, target, cpu_index(dst_ci)); 1316 dstmask = (1 << target); 1317 if (ipimsg) 1318 atomic_or_32(&dst_ci->ci_pending_ipis, ipimsg); 1319 } 1320 1321 openpic_write(cpu, OPENPIC_IPIDR(0), dstmask); 1322} 1323 1324typedef void (*ipifunc_t)(void); 1325 1326#ifdef __HAVE_PREEMPTION 1327static void 1328e500_ipi_kpreempt(void) 1329{ 1330 poowerpc_softint_trigger(1 << IPL_NONE); 1331} 1332#endif 1333 1334static void 1335e500_ipi_suspend(void) 1336{ 1337 1338#ifdef MULTIPROCESSOR 1339 cpu_pause(NULL); 1340#endif /* MULTIPROCESSOR */ 1341} 1342 1343static void 1344e500_ipi_ast(void) 1345{ 1346 curcpu()->ci_onproc->l_md.md_astpending = 1; 1347} 1348 1349static const ipifunc_t e500_ipifuncs[] = { 1350 [ilog2(IPI_XCALL)] = xc_ipi_handler, 1351 [ilog2(IPI_GENERIC)] = ipi_cpu_handler, 1352 [ilog2(IPI_HALT)] = e500_ipi_halt, 1353#ifdef __HAVE_PREEMPTION 1354 [ilog2(IPI_KPREEMPT)] = e500_ipi_kpreempt, 1355#endif 1356 [ilog2(IPI_TLB1SYNC)] = e500_tlb1_sync, 1357 [ilog2(IPI_SUSPEND)] = e500_ipi_suspend, 1358 [ilog2(IPI_AST)] = e500_ipi_ast, 1359}; 1360 1361static int 1362e500_ipi_intr(void *v) 1363{ 1364 struct cpu_info * const ci = curcpu(); 1365 1366 ci->ci_ev_ipi.ev_count++; 1367 1368 uint32_t pending_ipis = atomic_swap_32(&ci->ci_pending_ipis, 0); 1369 for (u_int ipi = 31; pending_ipis != 0; ipi--, pending_ipis <<= 1) { 1370 const u_int bits = __builtin_clz(pending_ipis); 1371 ipi -= bits; 1372 pending_ipis <<= bits; 1373 KASSERT(e500_ipifuncs[ipi] != NULL); 1374 (*e500_ipifuncs[ipi])(); 1375 } 1376 1377 return 1; 1378} 1379 1380static void 1381e500_intr_cpu_hatch(struct cpu_info *ci) 1382{ 1383 char iname[INTRIDBUF]; 1384 1385 /* Initialize percpu interrupts. */ 1386 e500_intr_init_precpu(); 1387 1388 /* 1389 * Establish clock interrupt for this CPU. 1390 */ 1391 snprintf(iname, sizeof(iname), "%s clock", device_xname(ci->ci_dev)); 1392 if (e500_intr_cpu_establish(ci, E500_CLOCK_TIMER, IPL_CLOCK, IST_TIMER, 1393 e500_clock_intr, NULL, iname) == NULL) 1394 panic("%s: failed to establish clock interrupt!", __func__); 1395 1396 /* 1397 * Establish the IPI interrupts for this CPU. 1398 */ 1399 if (e500_intr_cpu_establish(ci, 0, IPL_VM, IST_IPI, e500_ipi_intr, 1400 NULL, "ipi") == NULL) 1401 panic("%s: failed to establish ipi interrupt!", __func__); 1402 1403 /* 1404 * Enable watchdog interrupts. 1405 */ 1406 uint32_t tcr = mfspr(SPR_TCR); 1407 tcr |= TCR_WIE; 1408 mtspr(SPR_TCR, tcr); 1409} 1410 1411static const char * 1412e500_intr_all_name_lookup(int irq, int ist) 1413{ 1414 const struct e500_intr_info * const info = &e500_intr_info; 1415 1416 switch (ist) { 1417 default: 1418 if (irq < info->ii_external_sources && 1419 (ist == IST_EDGE || 1420 ist == IST_LEVEL_LOW || 1421 ist == IST_LEVEL_HIGH)) 1422 return e500_intr_name_lookup( 1423 info->ii_external_intr_names, irq); 1424 break; 1425 1426 case IST_PULSE: 1427 break; 1428 1429 case IST_ONCHIP: 1430 if (irq < info->ii_onchip_sources) 1431 return e500_intr_onchip_name_lookup(irq); 1432 break; 1433 1434 case IST_MSIGROUP: 1435 if (irq < info->ii_msigroup_sources) 1436 return e500_intr_name_lookup(e500_msigroup_intr_names, 1437 irq); 1438 break; 1439 1440 case IST_TIMER: 1441 if (irq < info->ii_timer_sources) 1442 return e500_intr_name_lookup(e500_timer_intr_names, 1443 irq); 1444 break; 1445 1446 case IST_IPI: 1447 if (irq < info->ii_ipi_sources) 1448 return e500_intr_name_lookup(e500_ipi_intr_names, irq); 1449 break; 1450 1451 case IST_MI: 1452 if (irq < info->ii_mi_sources) 1453 return e500_intr_name_lookup(e500_mi_intr_names, irq); 1454 break; 1455 } 1456 1457 return NULL; 1458} 1459 1460static void 1461e500_intr_get_affinity(struct intr_source *is, kcpuset_t *cpuset) 1462{ 1463 struct cpu_info * const ci = curcpu(); 1464 struct cpu_softc * const cpu = ci->ci_softc; 1465 struct e500_intr_irq_info ii; 1466 1467 kcpuset_zero(cpuset); 1468 1469 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) { 1470 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, 1471 is->is_ist, &ii)) { 1472 uint32_t dr = openpic_read(cpu, ii.irq_dr); 1473 while (dr != 0) { 1474 u_int n = ffs(dr); 1475 if (n-- == 0) 1476 break; 1477 dr &= ~(1 << n); 1478 kcpuset_set(cpuset, n); 1479 } 1480 } 1481 } 1482} 1483 1484static int 1485e500_intr_set_affinity(struct intr_source *is, const kcpuset_t *cpuset) 1486{ 1487 struct cpu_info * const ci = curcpu(); 1488 struct cpu_softc * const cpu = ci->ci_softc; 1489 struct e500_intr_irq_info ii; 1490 uint32_t ecpuset, tcpuset; 1491 1492 KASSERT(mutex_owned(&cpu_lock)); 1493 KASSERT(mutex_owned(&e500_intr_lock)); 1494 KASSERT(!kcpuset_iszero(cpuset)); 1495 1496 kcpuset_export_u32(cpuset, &ecpuset, sizeof(ecpuset)); 1497 tcpuset = ecpuset; 1498 while (tcpuset != 0) { 1499 u_int cpu_idx = ffs(tcpuset); 1500 if (cpu_idx-- == 0) 1501 break; 1502 1503 tcpuset &= ~(1 << cpu_idx); 1504 struct cpu_info * const newci = cpu_lookup(cpu_idx); 1505 if (newci == NULL) 1506 return EINVAL; 1507 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 1508 return EINVAL; 1509 } 1510 1511 if (!e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, 1512 &ii)) 1513 return ENXIO; 1514 1515 /* 1516 * Update the vector/priority and destination registers keeping the 1517 * interrupt masked. 1518 */ 1519 const register_t msr = wrtee(0); /* disable interrupts */ 1520 1521 uint32_t vpr = openpic_read(cpu, ii.irq_vpr); 1522 openpic_write(cpu, ii.irq_vpr, vpr | VPR_MSK); 1523 1524 /* 1525 * Wait for the Activity (A) bit for the source to be cleared. 1526 */ 1527 while (openpic_read(cpu, ii.irq_vpr) & VPR_A) 1528 continue; 1529 1530 /* 1531 * Update destination register 1532 */ 1533 openpic_write(cpu, ii.irq_dr, ecpuset); 1534 1535 /* 1536 * Now unmask the interrupt. 1537 */ 1538 openpic_write(cpu, ii.irq_vpr, vpr); 1539 1540 wrtee(msr); /* re-enable interrupts */ 1541 1542 return 0; 1543} 1544 1545static bool 1546e500_intr_is_affinity_intrsource(struct intr_source *is, 1547 const kcpuset_t *cpuset) 1548{ 1549 struct cpu_info * const ci = curcpu(); 1550 struct cpu_softc * const cpu = ci->ci_softc; 1551 struct e500_intr_irq_info ii; 1552 bool result = false; 1553 1554 if (is->is_ipl != IPL_NONE && !IST_PERCPU_P(is->is_ist)) { 1555 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, 1556 is->is_ist, &ii)) { 1557 uint32_t dr = openpic_read(cpu, ii.irq_dr); 1558 while (dr != 0 && !result) { 1559 u_int n = ffs(dr); 1560 if (n-- == 0) 1561 break; 1562 dr &= ~(1 << n); 1563 result = kcpuset_isset(cpuset, n); 1564 } 1565 } 1566 } 1567 return result; 1568} 1569 1570static struct intr_source * 1571e500_intr_get_source(const char *intrid) 1572{ 1573 struct intr_source *is; 1574 1575 mutex_enter(&e500_intr_lock); 1576 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) { 1577 if (is->is_source[0] == '\0') 1578 continue; 1579 1580 if (!strncmp(intrid, is->is_source, sizeof(is->is_source) - 1)) 1581 break; 1582 } 1583 if (is == e500_intr_last_source) 1584 is = NULL; 1585 mutex_exit(&e500_intr_lock); 1586 return is; 1587} 1588 1589uint64_t 1590interrupt_get_count(const char *intrid, u_int cpu_idx) 1591{ 1592 struct cpu_info * const ci = cpu_lookup(cpu_idx); 1593 struct cpu_softc * const cpu = ci->ci_softc; 1594 struct intr_source *is; 1595 struct e500_intr_irq_info ii; 1596 1597 is = e500_intr_get_source(intrid); 1598 if (is == NULL) 1599 return 0; 1600 1601 if (e500_intr_irq_info_get(ci, is->is_irq, is->is_ipl, is->is_ist, &ii)) 1602 return cpu->cpu_evcnt_intrs[ii.irq_vector].ev_count; 1603 return 0; 1604} 1605 1606void 1607interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) 1608{ 1609 struct intr_source *is; 1610 1611 kcpuset_zero(cpuset); 1612 1613 is = e500_intr_get_source(intrid); 1614 if (is == NULL) 1615 return; 1616 1617 mutex_enter(&e500_intr_lock); 1618 e500_intr_get_affinity(is, cpuset); 1619 mutex_exit(&e500_intr_lock); 1620} 1621 1622void 1623interrupt_get_available(kcpuset_t *cpuset) 1624{ 1625 CPU_INFO_ITERATOR cii; 1626 struct cpu_info *ci; 1627 1628 kcpuset_zero(cpuset); 1629 1630 mutex_enter(&cpu_lock); 1631 for (CPU_INFO_FOREACH(cii, ci)) { 1632 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) 1633 kcpuset_set(cpuset, cpu_index(ci)); 1634 } 1635 mutex_exit(&cpu_lock); 1636} 1637 1638void 1639interrupt_get_devname(const char *intrid, char *buf, size_t len) 1640{ 1641 struct intr_source *is; 1642 1643 if (len == 0) 1644 return; 1645 1646 buf[0] = '\0'; 1647 1648 is = e500_intr_get_source(intrid); 1649 if (is != NULL) 1650 strlcpy(buf, is->is_xname, len); 1651} 1652 1653struct intrids_handler * 1654interrupt_construct_intrids(const kcpuset_t *cpuset) 1655{ 1656 struct intr_source *is; 1657 struct intrids_handler *ii_handler; 1658 intrid_t *ids; 1659 int i, n; 1660 1661 if (kcpuset_iszero(cpuset)) 1662 return NULL; 1663 1664 n = 0; 1665 mutex_enter(&e500_intr_lock); 1666 for (is = e500_intr_sources; is < e500_intr_last_source; ++is) { 1667 if (e500_intr_is_affinity_intrsource(is, cpuset)) 1668 ++n; 1669 } 1670 mutex_exit(&e500_intr_lock); 1671 1672 const size_t alloc_size = sizeof(int) + sizeof(intrid_t) * n; 1673 ii_handler = kmem_zalloc(alloc_size, KM_SLEEP); 1674 ii_handler->iih_nids = n; 1675 if (n == 0) 1676 return ii_handler; 1677 1678 ids = ii_handler->iih_intrids; 1679 mutex_enter(&e500_intr_lock); 1680 for (i = 0, is = e500_intr_sources; 1681 i < n && is < e500_intr_last_source; 1682 ++is) { 1683 if (!e500_intr_is_affinity_intrsource(is, cpuset)) 1684 continue; 1685 1686 if (is->is_source[0] != '\0') { 1687 strlcpy(ids[i], is->is_source, sizeof(ids[0])); 1688 ++i; 1689 } 1690 } 1691 mutex_exit(&e500_intr_lock); 1692 1693 return ii_handler; 1694} 1695 1696void 1697interrupt_destruct_intrids(struct intrids_handler *ii_handler) 1698{ 1699 size_t iih_size; 1700 1701 if (ii_handler == NULL) 1702 return; 1703 1704 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids; 1705 kmem_free(ii_handler, iih_size); 1706} 1707 1708static int 1709interrupt_distribute_locked(struct intr_source *is, const kcpuset_t *newset, 1710 kcpuset_t *oldset) 1711{ 1712 int error; 1713 1714 KASSERT(mutex_owned(&cpu_lock)); 1715 1716 if (is->is_ipl == IPL_NONE || IST_PERCPU_P(is->is_ist)) 1717 return EINVAL; 1718 1719 mutex_enter(&e500_intr_lock); 1720 if (oldset != NULL) 1721 e500_intr_get_affinity(is, oldset); 1722 error = e500_intr_set_affinity(is, newset); 1723 mutex_exit(&e500_intr_lock); 1724 1725 return error; 1726} 1727 1728int 1729interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset) 1730{ 1731 int error; 1732 1733 mutex_enter(&cpu_lock); 1734 error = interrupt_distribute_locked(ich, newset, oldset); 1735 mutex_exit(&cpu_lock); 1736 1737 return error; 1738} 1739 1740int 1741interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset, 1742 kcpuset_t *oldset) 1743{ 1744 struct intr_source *is; 1745 int error; 1746 1747 is = e500_intr_get_source(intrid); 1748 if (is != NULL) { 1749 mutex_enter(&cpu_lock); 1750 error = interrupt_distribute_locked(is, newset, oldset); 1751 mutex_exit(&cpu_lock); 1752 } else 1753 error = ENOENT; 1754 1755 return error; 1756} 1757