1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * linux/kernel/irq/handle.c 4 * 5 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 6 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 7 * 8 * This file contains the core interrupt handling code. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 * 12 */ 13 14#include <linux/irq.h> 15#include <linux/sched.h> 16#include <linux/slab.h> 17#include <linux/module.h> 18#include <linux/random.h> 19#include <linux/interrupt.h> 20#include <linux/kernel_stat.h> 21#include <linux/rculist.h> 22#include <linux/hash.h> 23#include <linux/radix-tree.h> 24#include <trace/events/irq.h> 25 26#if defined(CONFIG_BUZZZ) 27#include <asm/buzzz.h> 28#endif /* CONFIG_BUZZZ */ 29 30#include "internals.h" 31 32#include <typedefs.h> 33#include <bcmdefs.h> 34 35/* 36 * lockdep: we want to handle all irq_desc locks as a single lock-class: 37 */ 38struct lock_class_key irq_desc_lock_class; 39 40/** 41 * handle_bad_irq - handle spurious and unhandled irqs 42 * @irq: the interrupt number 43 * @desc: description of the interrupt 44 * 45 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 46 */ 47void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 48{ 49#if defined(BUZZZ_KEVT_LVL) && (BUZZZ_KEVT_LVL >= 1) 50 buzzz_kevt_log1(BUZZZ_KEVT_ID_IRQ_BAD, irq); 51#endif /* BUZZZ_KEVT_LVL */ 52 53 print_irq_desc(irq, desc); 54 kstat_incr_irqs_this_cpu(irq, desc); 55 ack_bad_irq(irq); 56} 57 58#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 59static void __init init_irq_default_affinity(void) 60{ 61 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 62 cpumask_setall(irq_default_affinity); 63} 64#else 65static void __init init_irq_default_affinity(void) 66{ 67} 68#endif 69 70/* 71 * Linux has a controller-independent interrupt architecture. 72 * Every controller has a 'controller-template', that is used 73 * by the main code to do the right thing. Each driver-visible 74 * interrupt source is transparently wired to the appropriate 75 * controller. Thus drivers need not be aware of the 76 * interrupt-controller. 77 * 78 * The code is designed to be easily extended with new/different 79 * interrupt controllers, without having to do assembly magic or 80 * having to touch the generic code. 81 * 82 * Controller mappings for all interrupt sources: 83 */ 84int nr_irqs = NR_IRQS; 85EXPORT_SYMBOL_GPL(nr_irqs); 86 87#ifdef CONFIG_SPARSE_IRQ 88 89static struct irq_desc irq_desc_init = { 90 .irq = -1, 91 .status = IRQ_DISABLED, 92 .chip = &no_irq_chip, 93 .handle_irq = handle_bad_irq, 94 .depth = 1, 95 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 96}; 97 98void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 99{ 100 void *ptr; 101 102 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 103 GFP_ATOMIC, node); 104 105 /* 106 * don't overwite if can not get new one 107 * init_copy_kstat_irqs() could still use old one 108 */ 109 if (ptr) { 110 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); 111 desc->kstat_irqs = ptr; 112 } 113} 114 115static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) 116{ 117 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 118 119 raw_spin_lock_init(&desc->lock); 120 desc->irq = irq; 121#ifdef CONFIG_SMP 122 desc->node = node; 123#endif 124 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 125 init_kstat_irqs(desc, node, nr_cpu_ids); 126 if (!desc->kstat_irqs) { 127 printk(KERN_ERR "can not alloc kstat_irqs\n"); 128 BUG_ON(1); 129 } 130 if (!alloc_desc_masks(desc, node, false)) { 131 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 132 BUG_ON(1); 133 } 134 init_desc_masks(desc); 135 arch_init_chip_data(desc, node); 136} 137 138/* 139 * Protect the sparse_irqs: 140 */ 141DEFINE_RAW_SPINLOCK(sparse_irq_lock); 142 143static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); 144 145static void set_irq_desc(unsigned int irq, struct irq_desc *desc) 146{ 147 radix_tree_insert(&irq_desc_tree, irq, desc); 148} 149 150struct irq_desc *irq_to_desc(unsigned int irq) 151{ 152 return radix_tree_lookup(&irq_desc_tree, irq); 153} 154 155void replace_irq_desc(unsigned int irq, struct irq_desc *desc) 156{ 157 void **ptr; 158 159 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); 160 if (ptr) 161 radix_tree_replace_slot(ptr, desc); 162} 163 164static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 165 [0 ... NR_IRQS_LEGACY-1] = { 166 .irq = -1, 167 .status = IRQ_DISABLED, 168 .chip = &no_irq_chip, 169 .handle_irq = handle_bad_irq, 170 .depth = 1, 171 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 172 } 173}; 174 175static unsigned int *kstat_irqs_legacy; 176 177int __init early_irq_init(void) 178{ 179 struct irq_desc *desc; 180 int legacy_count; 181 int node; 182 int i; 183 184 init_irq_default_affinity(); 185 186 /* initialize nr_irqs based on nr_cpu_ids */ 187 arch_probe_nr_irqs(); 188 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 189 190 desc = irq_desc_legacy; 191 legacy_count = ARRAY_SIZE(irq_desc_legacy); 192 node = first_online_node; 193 194 /* allocate based on nr_cpu_ids */ 195 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 196 sizeof(int), GFP_NOWAIT, node); 197 198 for (i = 0; i < legacy_count; i++) { 199 desc[i].irq = i; 200#ifdef CONFIG_SMP 201 desc[i].node = node; 202#endif 203 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 204 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 205 alloc_desc_masks(&desc[i], node, true); 206 init_desc_masks(&desc[i]); 207 set_irq_desc(i, &desc[i]); 208 } 209 210 return arch_early_irq_init(); 211} 212 213struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 214{ 215 struct irq_desc *desc; 216 unsigned long flags; 217 218 if (irq >= nr_irqs) { 219 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 220 irq, nr_irqs); 221 return NULL; 222 } 223 224 desc = irq_to_desc(irq); 225 if (desc) 226 return desc; 227 228 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 229 230 /* We have to check it to avoid races with another CPU */ 231 desc = irq_to_desc(irq); 232 if (desc) 233 goto out_unlock; 234 235 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 236 237 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 238 if (!desc) { 239 printk(KERN_ERR "can not alloc irq_desc\n"); 240 BUG_ON(1); 241 } 242 init_one_irq_desc(irq, desc, node); 243 244 set_irq_desc(irq, desc); 245 246out_unlock: 247 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 248 249 return desc; 250} 251 252#else /* !CONFIG_SPARSE_IRQ */ 253 254struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 255 [0 ... NR_IRQS-1] = { 256 .status = IRQ_DISABLED, 257 .chip = &no_irq_chip, 258 .handle_irq = handle_bad_irq, 259 .depth = 1, 260 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 261 } 262}; 263 264static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 265int __init early_irq_init(void) 266{ 267 struct irq_desc *desc; 268 int count; 269 int i; 270 271 init_irq_default_affinity(); 272 273 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 274 275 desc = irq_desc; 276 count = ARRAY_SIZE(irq_desc); 277 278 for (i = 0; i < count; i++) { 279 desc[i].irq = i; 280 alloc_desc_masks(&desc[i], 0, true); 281 init_desc_masks(&desc[i]); 282 desc[i].kstat_irqs = kstat_irqs_all[i]; 283 } 284 return arch_early_irq_init(); 285} 286 287struct irq_desc *irq_to_desc(unsigned int irq) 288{ 289 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 290} 291 292struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 293{ 294 return irq_to_desc(irq); 295} 296#endif /* !CONFIG_SPARSE_IRQ */ 297 298void clear_kstat_irqs(struct irq_desc *desc) 299{ 300 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 301} 302 303/* 304 * What should we do if we get a hw irq event on an illegal vector? 305 * Each architecture has to answer this themself. 306 */ 307static void ack_bad(unsigned int irq) 308{ 309 struct irq_desc *desc = irq_to_desc(irq); 310 311#if defined(BUZZZ_KEVT_LVL) && (BUZZZ_KEVT_LVL >= 1) 312 buzzz_kevt_log1(BUZZZ_KEVT_ID_IRQ_ACK_BAD, irq); 313#endif /* BUZZZ_KEVT_LVL */ 314 315 print_irq_desc(irq, desc); 316 ack_bad_irq(irq); 317} 318 319/* 320 * NOP functions 321 */ 322static void noop(unsigned int irq) 323{ 324} 325 326static unsigned int noop_ret(unsigned int irq) 327{ 328 return 0; 329} 330 331/* 332 * Generic no controller implementation 333 */ 334struct irq_chip no_irq_chip = { 335 .name = "none", 336 .startup = noop_ret, 337 .shutdown = noop, 338 .enable = noop, 339 .disable = noop, 340 .ack = ack_bad, 341 .end = noop, 342}; 343 344/* 345 * Generic dummy implementation which can be used for 346 * real dumb interrupt sources 347 */ 348struct irq_chip dummy_irq_chip = { 349 .name = "dummy", 350 .startup = noop_ret, 351 .shutdown = noop, 352 .enable = noop, 353 .disable = noop, 354 .ack = noop, 355 .mask = noop, 356 .unmask = noop, 357 .end = noop, 358}; 359 360/* 361 * Special, empty irq handler: 362 */ 363irqreturn_t no_action(int cpl, void *dev_id) 364{ 365 return IRQ_NONE; 366} 367 368static void warn_no_thread(unsigned int irq, struct irqaction *action) 369{ 370 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) 371 return; 372 373 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " 374 "but no thread function available.", irq, action->name); 375} 376 377/** 378 * handle_IRQ_event - irq action chain handler 379 * @irq: the interrupt number 380 * @action: the interrupt action chain for this irq 381 * 382 * Handles the action chain of an irq event 383 */ 384irqreturn_t BCMFASTPATH handle_IRQ_event(unsigned int irq, struct irqaction *action) 385{ 386 irqreturn_t ret, retval = IRQ_NONE; 387 unsigned int status = 0; 388 389 do { 390 trace_irq_handler_entry(irq, action); 391 392#if defined(BUZZZ_KEVT_LVL) && (BUZZZ_KEVT_LVL >= 1) 393 buzzz_kevt_log2(BUZZZ_KEVT_ID_IRQ_ENTRY, irq, (int)(action->handler)); 394#endif /* BUZZZ_KEVT_LVL */ 395 396 ret = action->handler(irq, action->dev_id); 397 398#if defined(BUZZZ_KEVT_LVL) && (BUZZZ_KEVT_LVL >= 1) 399 buzzz_kevt_log2(BUZZZ_KEVT_ID_IRQ_EXIT, irq, (int)(action->handler)); 400#endif /* BUZZZ_KEVT_LVL */ 401 402 trace_irq_handler_exit(irq, action, ret); 403 404 switch (ret) { 405 case IRQ_WAKE_THREAD: 406 /* 407 * Set result to handled so the spurious check 408 * does not trigger. 409 */ 410 ret = IRQ_HANDLED; 411 412 /* 413 * Catch drivers which return WAKE_THREAD but 414 * did not set up a thread function 415 */ 416 if (unlikely(!action->thread_fn)) { 417 warn_no_thread(irq, action); 418 break; 419 } 420 421 /* 422 * Wake up the handler thread for this 423 * action. In case the thread crashed and was 424 * killed we just pretend that we handled the 425 * interrupt. The hardirq handler above has 426 * disabled the device interrupt, so no irq 427 * storm is lurking. 428 */ 429 if (likely(!test_bit(IRQTF_DIED, 430 &action->thread_flags))) { 431 set_bit(IRQTF_RUNTHREAD, &action->thread_flags); 432 wake_up_process(action->thread); 433 } 434 435 /* Fall through to add to randomness */ 436 case IRQ_HANDLED: 437 status |= action->flags; 438 break; 439 440 default: 441 break; 442 } 443 444 retval |= ret; 445 action = action->next; 446 } while (action); 447 448 if (status & IRQF_SAMPLE_RANDOM) 449 add_interrupt_randomness(irq); 450 local_irq_disable(); 451 452 return retval; 453} 454 455#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 456 457#ifdef CONFIG_ENABLE_WARN_DEPRECATED 458# warning __do_IRQ is deprecated. Please convert to proper flow handlers 459#endif 460 461/** 462 * __do_IRQ - original all in one highlevel IRQ handler 463 * @irq: the interrupt number 464 * 465 * __do_IRQ handles all normal device IRQ's (the special 466 * SMP cross-CPU interrupts have their own specific 467 * handlers). 468 * 469 * This is the original x86 implementation which is used for every 470 * interrupt type. 471 */ 472unsigned int BCMFASTPATH __do_IRQ(unsigned int irq) 473{ 474 struct irq_desc *desc = irq_to_desc(irq); 475 struct irqaction *action; 476 unsigned int status; 477 478 kstat_incr_irqs_this_cpu(irq, desc); 479 480 if (CHECK_IRQ_PER_CPU(desc->status)) { 481 irqreturn_t action_ret; 482 483 /* 484 * No locking required for CPU-local interrupts: 485 */ 486 if (desc->chip->ack) 487 desc->chip->ack(irq); 488 if (likely(!(desc->status & IRQ_DISABLED))) { 489 action_ret = handle_IRQ_event(irq, desc->action); 490 if (!noirqdebug) 491 note_interrupt(irq, desc, action_ret); 492 } 493 desc->chip->end(irq); 494 return 1; 495 } 496 497 raw_spin_lock(&desc->lock); 498 if (desc->chip->ack) 499 desc->chip->ack(irq); 500 /* 501 * REPLAY is when Linux resends an IRQ that was dropped earlier 502 * WAITING is used by probe to mark irqs that are being tested 503 */ 504 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 505 status |= IRQ_PENDING; /* we _want_ to handle it */ 506 507 /* 508 * If the IRQ is disabled for whatever reason, we cannot 509 * use the action we have. 510 */ 511 action = NULL; 512 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 513 action = desc->action; 514 status &= ~IRQ_PENDING; /* we commit to handling */ 515 status |= IRQ_INPROGRESS; /* we are handling it */ 516 } 517 desc->status = status; 518 519 /* 520 * If there is no IRQ handler or it was disabled, exit early. 521 * Since we set PENDING, if another processor is handling 522 * a different instance of this same irq, the other processor 523 * will take care of it. 524 */ 525 if (unlikely(!action)) 526 goto out; 527 528 /* 529 * Edge triggered interrupts need to remember 530 * pending events. 531 * This applies to any hw interrupts that allow a second 532 * instance of the same irq to arrive while we are in do_IRQ 533 * or in the handler. But the code here only handles the _second_ 534 * instance of the irq, not the third or fourth. So it is mostly 535 * useful for irq hardware that does not mask cleanly in an 536 * SMP environment. 537 */ 538 for (;;) { 539 irqreturn_t action_ret; 540 541 raw_spin_unlock(&desc->lock); 542 543 action_ret = handle_IRQ_event(irq, action); 544 if (!noirqdebug) 545 note_interrupt(irq, desc, action_ret); 546 547 raw_spin_lock(&desc->lock); 548 if (likely(!(desc->status & IRQ_PENDING))) 549 break; 550 desc->status &= ~IRQ_PENDING; 551 } 552 desc->status &= ~IRQ_INPROGRESS; 553 554out: 555 /* 556 * The ->end() handler has to deal with interrupts which got 557 * disabled while the handler was running. 558 */ 559 desc->chip->end(irq); 560 raw_spin_unlock(&desc->lock); 561 562 return 1; 563} 564#endif 565 566void early_init_irq_lock_class(void) 567{ 568 struct irq_desc *desc; 569 int i; 570 571 for_each_irq_desc(i, desc) { 572 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 573 } 574} 575 576unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 577{ 578 struct irq_desc *desc = irq_to_desc(irq); 579 return desc ? desc->kstat_irqs[cpu] : 0; 580} 581EXPORT_SYMBOL(kstat_irqs_cpu); 582