• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/arm/kernel/
1/*
2 *  linux/arch/arm/kernel/irq.c
3 *
4 *  Copyright (C) 1992 Linus Torvalds
5 *  Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 *
7 *  Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
8 *  Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
9 *  Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 *  This file contains the code used by various IRQ handling routines:
16 *  asking for different IRQ's should be done through these routines
17 *  instead of just grabbing them. Thus setups with different IRQ numbers
18 *  shouldn't result in any weird surprises, and installing new handlers
19 *  should be easier.
20 *
21 *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
22 *  Naturally it's not a 1:1 relation, but there are similarities.
23 */
24#include <linux/kernel_stat.h>
25#include <linux/module.h>
26#include <linux/signal.h>
27#include <linux/ioport.h>
28#include <linux/interrupt.h>
29#include <linux/irq.h>
30#include <linux/random.h>
31#include <linux/smp.h>
32#include <linux/init.h>
33#include <linux/seq_file.h>
34#include <linux/errno.h>
35#include <linux/list.h>
36#include <linux/kallsyms.h>
37#include <linux/proc_fs.h>
38
39#include <asm/system.h>
40#include <asm/mach/irq.h>
41#include <asm/mach/time.h>
42
43#include <typedefs.h>
44#include <bcmdefs.h>
45
46/*
47 * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
48 */
49#ifndef irq_finish
50#define irq_finish(irq) do { } while (0)
51#endif
52
53unsigned int arch_nr_irqs;
54void (*init_arch_irq)(void) __initdata = NULL;
55unsigned long irq_err_count;
56
57int show_interrupts(struct seq_file *p, void *v)
58{
59	int i = *(loff_t *) v, cpu;
60	struct irq_desc *desc;
61	struct irqaction * action;
62	unsigned long flags;
63
64	if (i == 0) {
65		char cpuname[12];
66
67		seq_printf(p, "    ");
68		for_each_present_cpu(cpu) {
69			sprintf(cpuname, "CPU%d", cpu);
70			seq_printf(p, " %10s", cpuname);
71		}
72		seq_putc(p, '\n');
73	}
74
75	if (i < nr_irqs) {
76		desc = irq_to_desc(i);
77		raw_spin_lock_irqsave(&desc->lock, flags);
78		action = desc->action;
79		if (!action)
80			goto unlock;
81
82		seq_printf(p, "%3d: ", i);
83		for_each_present_cpu(cpu)
84			seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
85		seq_printf(p, " %10s", desc->chip->name ? : "-");
86		seq_printf(p, "  %s", action->name);
87		for (action = action->next; action; action = action->next)
88			seq_printf(p, ", %s", action->name);
89
90		seq_putc(p, '\n');
91unlock:
92		raw_spin_unlock_irqrestore(&desc->lock, flags);
93	} else if (i == nr_irqs) {
94#ifdef CONFIG_FIQ
95		show_fiq_list(p, v);
96#endif
97#ifdef CONFIG_SMP
98		show_ipi_list(p);
99		show_local_irqs(p);
100#endif
101		seq_printf(p, "Err: %10lu\n", irq_err_count);
102	}
103	return 0;
104}
105
106/*
107 * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
108 * come via this function.  Instead, they should provide their
109 * own 'handler'
110 */
111asmlinkage void BCMFASTPATH asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
112{
113	struct pt_regs *old_regs = set_irq_regs(regs);
114
115	irq_enter();
116
117	/*
118	 * Some hardware gives randomly wrong interrupts.  Rather
119	 * than crashing, do something sensible.
120	 */
121	if (unlikely(irq >= nr_irqs)) {
122		if (printk_ratelimit())
123			printk(KERN_WARNING "Bad IRQ%u\n", irq);
124		ack_bad_irq(irq);
125	} else {
126		generic_handle_irq(irq);
127	}
128
129	irq_finish(irq);
130
131	irq_exit();
132	set_irq_regs(old_regs);
133}
134
135void set_irq_flags(unsigned int irq, unsigned int iflags)
136{
137	struct irq_desc *desc;
138	unsigned long flags;
139
140	if (irq >= nr_irqs) {
141		printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
142		return;
143	}
144
145	desc = irq_to_desc(irq);
146	raw_spin_lock_irqsave(&desc->lock, flags);
147	desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
148	if (iflags & IRQF_VALID)
149		desc->status &= ~IRQ_NOREQUEST;
150	if (iflags & IRQF_PROBE)
151		desc->status &= ~IRQ_NOPROBE;
152	if (!(iflags & IRQF_NOAUTOEN))
153		desc->status &= ~IRQ_NOAUTOEN;
154	raw_spin_unlock_irqrestore(&desc->lock, flags);
155}
156
157void __init init_IRQ(void)
158{
159	struct irq_desc *desc;
160	int irq;
161
162	for (irq = 0; irq < nr_irqs; irq++) {
163		desc = irq_to_desc_alloc_node(irq, 0);
164		desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
165	}
166
167	init_arch_irq();
168}
169
170#ifdef CONFIG_SPARSE_IRQ
171int __init arch_probe_nr_irqs(void)
172{
173	nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
174	return 0;
175}
176#endif
177
178#ifdef CONFIG_HOTPLUG_CPU
179
180static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
181{
182	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
183
184	raw_spin_lock_irq(&desc->lock);
185	desc->chip->set_affinity(irq, cpumask_of(cpu));
186	raw_spin_unlock_irq(&desc->lock);
187}
188
189/*
190 * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
191 * the affinity settings do not allow other CPUs, force them onto any
192 * available CPU.
193 */
194void migrate_irqs(void)
195{
196	unsigned int i, cpu = smp_processor_id();
197	struct irq_desc *desc;
198
199	for_each_irq_desc(i, desc) {
200		if (desc->node == cpu) {
201			unsigned int newcpu = cpumask_any_and(desc->affinity,
202							      cpu_online_mask);
203			if (newcpu >= nr_cpu_ids) {
204				if (printk_ratelimit())
205					printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
206					       i, cpu);
207
208				cpumask_setall(desc->affinity);
209				newcpu = cpumask_any_and(desc->affinity,
210							 cpu_online_mask);
211			}
212
213			route_irq(desc, i, newcpu);
214		}
215	}
216}
217#endif /* CONFIG_HOTPLUG_CPU */
218