1/*
2 *	linux/arch/x86_64/kernel/irq.c
3 *
4 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the lowest level x86_64-specific interrupt
7 * entry and irq statistics code. All the remaining irq logic is
8 * done by the generic kernel/irq/ code and in the
9 * x86_64-specific irq controller code. (e.g. i8259.c and
10 * io_apic.c.)
11 */
12
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/seq_file.h>
16#include <linux/module.h>
17#include <linux/delay.h>
18#include <asm/uaccess.h>
19#include <asm/io_apic.h>
20#include <asm/idle.h>
21#include <asm/smp.h>
22
23atomic_t irq_err_count;
24
25#ifdef CONFIG_DEBUG_STACKOVERFLOW
26/*
27 * Probabilistic stack overflow check:
28 *
29 * Only check the stack in process context, because everything else
30 * runs on the big interrupt stacks. Checking reliably is too expensive,
31 * so we just check from interrupts.
32 */
33static inline void stack_overflow_check(struct pt_regs *regs)
34{
35	u64 curbase = (u64)task_stack_page(current);
36	static unsigned long warned = -60*HZ;
37
38	if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
39	    regs->rsp <  curbase + sizeof(struct thread_info) + 128 &&
40	    time_after(jiffies, warned + 60*HZ)) {
41		printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
42		       current->comm, curbase, regs->rsp);
43		show_stack(NULL,NULL);
44		warned = jiffies;
45	}
46}
47#endif
48
49/*
50 * Generic, controller-independent functions:
51 */
52
53int show_interrupts(struct seq_file *p, void *v)
54{
55	int i = *(loff_t *) v, j;
56	struct irqaction * action;
57	unsigned long flags;
58
59	if (i == 0) {
60		seq_printf(p, "           ");
61		for_each_online_cpu(j)
62			seq_printf(p, "CPU%-8d",j);
63		seq_putc(p, '\n');
64	}
65
66	if (i < NR_IRQS) {
67		spin_lock_irqsave(&irq_desc[i].lock, flags);
68		action = irq_desc[i].action;
69		if (!action)
70			goto skip;
71		seq_printf(p, "%3d: ",i);
72#ifndef CONFIG_SMP
73		seq_printf(p, "%10u ", kstat_irqs(i));
74#else
75		for_each_online_cpu(j)
76			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
77#endif
78		seq_printf(p, " %8s", irq_desc[i].chip->name);
79		seq_printf(p, "-%-8s", irq_desc[i].name);
80
81		seq_printf(p, "  %s", action->name);
82		for (action=action->next; action; action = action->next)
83			seq_printf(p, ", %s", action->name);
84		seq_putc(p, '\n');
85skip:
86		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
87	} else if (i == NR_IRQS) {
88		seq_printf(p, "NMI: ");
89		for_each_online_cpu(j)
90			seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
91		seq_putc(p, '\n');
92		seq_printf(p, "LOC: ");
93		for_each_online_cpu(j)
94			seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
95		seq_putc(p, '\n');
96		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97	}
98	return 0;
99}
100
101/*
102 * do_IRQ handles all normal device IRQ's (the special
103 * SMP cross-CPU interrupts have their own specific
104 * handlers).
105 */
106asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
107{
108	struct pt_regs *old_regs = set_irq_regs(regs);
109
110	/* high bit used in ret_from_ code  */
111	unsigned vector = ~regs->orig_rax;
112	unsigned irq;
113
114	exit_idle();
115	irq_enter();
116	irq = __get_cpu_var(vector_irq)[vector];
117
118#ifdef CONFIG_DEBUG_STACKOVERFLOW
119	stack_overflow_check(regs);
120#endif
121
122	if (likely(irq < NR_IRQS))
123		generic_handle_irq(irq);
124	else {
125		if (!disable_apic)
126			ack_APIC_irq();
127
128		if (printk_ratelimit())
129			printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
130				__func__, smp_processor_id(), vector);
131	}
132
133	irq_exit();
134
135	set_irq_regs(old_regs);
136	return 1;
137}
138
139#ifdef CONFIG_HOTPLUG_CPU
140void fixup_irqs(cpumask_t map)
141{
142	unsigned int irq;
143	static int warned;
144
145	for (irq = 0; irq < NR_IRQS; irq++) {
146		cpumask_t mask;
147		int break_affinity = 0;
148		int set_affinity = 1;
149
150		if (irq == 2)
151			continue;
152
153		/* interrupt's are disabled at this point */
154		spin_lock(&irq_desc[irq].lock);
155
156		if (!irq_has_action(irq) ||
157		    cpus_equal(irq_desc[irq].affinity, map)) {
158			spin_unlock(&irq_desc[irq].lock);
159			continue;
160		}
161
162		cpus_and(mask, irq_desc[irq].affinity, map);
163		if (cpus_empty(mask)) {
164			break_affinity = 1;
165			mask = map;
166		}
167
168		if (irq_desc[irq].chip->mask)
169			irq_desc[irq].chip->mask(irq);
170
171		if (irq_desc[irq].chip->set_affinity)
172			irq_desc[irq].chip->set_affinity(irq, mask);
173		else if (!(warned++))
174			set_affinity = 0;
175
176		if (irq_desc[irq].chip->unmask)
177			irq_desc[irq].chip->unmask(irq);
178
179		spin_unlock(&irq_desc[irq].lock);
180
181		if (break_affinity && set_affinity)
182			printk("Broke affinity for irq %i\n", irq);
183		else if (!set_affinity)
184			printk("Cannot set affinity for irq %i\n", irq);
185	}
186
187	/* That doesn't seem sufficient.  Give it 1ms. */
188	local_irq_enable();
189	mdelay(1);
190	local_irq_disable();
191}
192#endif
193
194extern void call_softirq(void);
195
196asmlinkage void do_softirq(void)
197{
198 	__u32 pending;
199 	unsigned long flags;
200
201 	if (in_interrupt())
202 		return;
203
204 	local_irq_save(flags);
205 	pending = local_softirq_pending();
206 	/* Switch to interrupt stack */
207 	if (pending) {
208		call_softirq();
209		WARN_ON_ONCE(softirq_count());
210	}
211 	local_irq_restore(flags);
212}
213EXPORT_SYMBOL(do_softirq);
214