• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/sparc/kernel/
1/* irq.c: UltraSparc IRQ handling/init/registry.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
5 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
6 */
7
8#include <linux/module.h>
9#include <linux/sched.h>
10#include <linux/linkage.h>
11#include <linux/ptrace.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <linux/signal.h>
15#include <linux/mm.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/random.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/ftrace.h>
24#include <linux/irq.h>
25#include <linux/kmemleak.h>
26
27#include <asm/ptrace.h>
28#include <asm/processor.h>
29#include <asm/atomic.h>
30#include <asm/system.h>
31#include <asm/irq.h>
32#include <asm/io.h>
33#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
36#include <asm/prom.h>
37#include <asm/timer.h>
38#include <asm/smp.h>
39#include <asm/starfire.h>
40#include <asm/uaccess.h>
41#include <asm/cache.h>
42#include <asm/cpudata.h>
43#include <asm/auxio.h>
44#include <asm/head.h>
45#include <asm/hypervisor.h>
46#include <asm/cacheflush.h>
47
48#include "entry.h"
49#include "cpumap.h"
50#include "kstack.h"
51
52#define NUM_IVECS	(IMAP_INR + 1)
53
54struct ino_bucket *ivector_table;
55unsigned long ivector_table_pa;
56
57/* On several sun4u processors, it is illegal to mix bypass and
58 * non-bypass accesses.  Therefore we access all INO buckets
59 * using bypass accesses only.
60 */
61static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
62{
63	unsigned long ret;
64
65	__asm__ __volatile__("ldxa	[%1] %2, %0"
66			     : "=&r" (ret)
67			     : "r" (bucket_pa +
68				    offsetof(struct ino_bucket,
69					     __irq_chain_pa)),
70			       "i" (ASI_PHYS_USE_EC));
71
72	return ret;
73}
74
75static void bucket_clear_chain_pa(unsigned long bucket_pa)
76{
77	__asm__ __volatile__("stxa	%%g0, [%0] %1"
78			     : /* no outputs */
79			     : "r" (bucket_pa +
80				    offsetof(struct ino_bucket,
81					     __irq_chain_pa)),
82			       "i" (ASI_PHYS_USE_EC));
83}
84
85static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
86{
87	unsigned int ret;
88
89	__asm__ __volatile__("lduwa	[%1] %2, %0"
90			     : "=&r" (ret)
91			     : "r" (bucket_pa +
92				    offsetof(struct ino_bucket,
93					     __virt_irq)),
94			       "i" (ASI_PHYS_USE_EC));
95
96	return ret;
97}
98
99static void bucket_set_virt_irq(unsigned long bucket_pa,
100				unsigned int virt_irq)
101{
102	__asm__ __volatile__("stwa	%0, [%1] %2"
103			     : /* no outputs */
104			     : "r" (virt_irq),
105			       "r" (bucket_pa +
106				    offsetof(struct ino_bucket,
107					     __virt_irq)),
108			       "i" (ASI_PHYS_USE_EC));
109}
110
111#define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)
112
113static struct {
114	unsigned int dev_handle;
115	unsigned int dev_ino;
116	unsigned int in_use;
117} virt_irq_table[NR_IRQS];
118static DEFINE_SPINLOCK(virt_irq_alloc_lock);
119
120unsigned char virt_irq_alloc(unsigned int dev_handle,
121			     unsigned int dev_ino)
122{
123	unsigned long flags;
124	unsigned char ent;
125
126	BUILD_BUG_ON(NR_IRQS >= 256);
127
128	spin_lock_irqsave(&virt_irq_alloc_lock, flags);
129
130	for (ent = 1; ent < NR_IRQS; ent++) {
131		if (!virt_irq_table[ent].in_use)
132			break;
133	}
134	if (ent >= NR_IRQS) {
135		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
136		ent = 0;
137	} else {
138		virt_irq_table[ent].dev_handle = dev_handle;
139		virt_irq_table[ent].dev_ino = dev_ino;
140		virt_irq_table[ent].in_use = 1;
141	}
142
143	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
144
145	return ent;
146}
147
148#ifdef CONFIG_PCI_MSI
149void virt_irq_free(unsigned int virt_irq)
150{
151	unsigned long flags;
152
153	if (virt_irq >= NR_IRQS)
154		return;
155
156	spin_lock_irqsave(&virt_irq_alloc_lock, flags);
157
158	virt_irq_table[virt_irq].in_use = 0;
159
160	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
161}
162#endif
163
164/*
165 * /proc/interrupts printing:
166 */
167
168int show_interrupts(struct seq_file *p, void *v)
169{
170	int i = *(loff_t *) v, j;
171	struct irqaction * action;
172	unsigned long flags;
173
174	if (i == 0) {
175		seq_printf(p, "           ");
176		for_each_online_cpu(j)
177			seq_printf(p, "CPU%d       ",j);
178		seq_putc(p, '\n');
179	}
180
181	if (i < NR_IRQS) {
182		raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
183		action = irq_desc[i].action;
184		if (!action)
185			goto skip;
186		seq_printf(p, "%3d: ",i);
187#ifndef CONFIG_SMP
188		seq_printf(p, "%10u ", kstat_irqs(i));
189#else
190		for_each_online_cpu(j)
191			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
192#endif
193		seq_printf(p, " %9s", irq_desc[i].chip->name);
194		seq_printf(p, "  %s", action->name);
195
196		for (action=action->next; action; action = action->next)
197			seq_printf(p, ", %s", action->name);
198
199		seq_putc(p, '\n');
200skip:
201		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
202	} else if (i == NR_IRQS) {
203		seq_printf(p, "NMI: ");
204		for_each_online_cpu(j)
205			seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
206		seq_printf(p, "     Non-maskable interrupts\n");
207	}
208	return 0;
209}
210
211static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
212{
213	unsigned int tid;
214
215	if (this_is_starfire) {
216		tid = starfire_translate(imap, cpuid);
217		tid <<= IMAP_TID_SHIFT;
218		tid &= IMAP_TID_UPA;
219	} else {
220		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
221			unsigned long ver;
222
223			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
224			if ((ver >> 32UL) == __JALAPENO_ID ||
225			    (ver >> 32UL) == __SERRANO_ID) {
226				tid = cpuid << IMAP_TID_SHIFT;
227				tid &= IMAP_TID_JBUS;
228			} else {
229				unsigned int a = cpuid & 0x1f;
230				unsigned int n = (cpuid >> 5) & 0x1f;
231
232				tid = ((a << IMAP_AID_SHIFT) |
233				       (n << IMAP_NID_SHIFT));
234				tid &= (IMAP_AID_SAFARI |
235					IMAP_NID_SAFARI);
236			}
237		} else {
238			tid = cpuid << IMAP_TID_SHIFT;
239			tid &= IMAP_TID_UPA;
240		}
241	}
242
243	return tid;
244}
245
246struct irq_handler_data {
247	unsigned long	iclr;
248	unsigned long	imap;
249
250	void		(*pre_handler)(unsigned int, void *, void *);
251	void		*arg1;
252	void		*arg2;
253};
254
255#ifdef CONFIG_SMP
256static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
257{
258	cpumask_t mask;
259	int cpuid;
260
261	cpumask_copy(&mask, affinity);
262	if (cpus_equal(mask, cpu_online_map)) {
263		cpuid = map_to_cpu(virt_irq);
264	} else {
265		cpumask_t tmp;
266
267		cpus_and(tmp, cpu_online_map, mask);
268		cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
269	}
270
271	return cpuid;
272}
273#else
274#define irq_choose_cpu(virt_irq, affinity)	\
275	real_hard_smp_processor_id()
276#endif
277
278static void sun4u_irq_enable(unsigned int virt_irq)
279{
280	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
281
282	if (likely(data)) {
283		unsigned long cpuid, imap, val;
284		unsigned int tid;
285
286		cpuid = irq_choose_cpu(virt_irq,
287				       irq_desc[virt_irq].affinity);
288		imap = data->imap;
289
290		tid = sun4u_compute_tid(imap, cpuid);
291
292		val = upa_readq(imap);
293		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
294			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
295		val |= tid | IMAP_VALID;
296		upa_writeq(val, imap);
297		upa_writeq(ICLR_IDLE, data->iclr);
298	}
299}
300
301static int sun4u_set_affinity(unsigned int virt_irq,
302			       const struct cpumask *mask)
303{
304	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
305
306	if (likely(data)) {
307		unsigned long cpuid, imap, val;
308		unsigned int tid;
309
310		cpuid = irq_choose_cpu(virt_irq, mask);
311		imap = data->imap;
312
313		tid = sun4u_compute_tid(imap, cpuid);
314
315		val = upa_readq(imap);
316		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
317			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
318		val |= tid | IMAP_VALID;
319		upa_writeq(val, imap);
320		upa_writeq(ICLR_IDLE, data->iclr);
321	}
322
323	return 0;
324}
325
326/* Don't do anything.  The desc->status check for IRQ_DISABLED in
327 * handler_irq() will skip the handler call and that will leave the
328 * interrupt in the sent state.  The next ->enable() call will hit the
329 * ICLR register to reset the state machine.
330 *
331 * This scheme is necessary, instead of clearing the Valid bit in the
332 * IMAP register, to handle the case of IMAP registers being shared by
333 * multiple INOs (and thus ICLR registers).  Since we use a different
334 * virtual IRQ for each shared IMAP instance, the generic code thinks
335 * there is only one user so it prematurely calls ->disable() on
336 * free_irq().
337 *
338 * We have to provide an explicit ->disable() method instead of using
339 * NULL to get the default.  The reason is that if the generic code
340 * sees that, it also hooks up a default ->shutdown method which
341 * invokes ->mask() which we do not want.  See irq_chip_set_defaults().
342 */
343static void sun4u_irq_disable(unsigned int virt_irq)
344{
345}
346
347static void sun4u_irq_eoi(unsigned int virt_irq)
348{
349	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
350	struct irq_desc *desc = irq_desc + virt_irq;
351
352	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
353		return;
354
355	if (likely(data))
356		upa_writeq(ICLR_IDLE, data->iclr);
357}
358
359static void sun4v_irq_enable(unsigned int virt_irq)
360{
361	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
362	unsigned long cpuid = irq_choose_cpu(virt_irq,
363					     irq_desc[virt_irq].affinity);
364	int err;
365
366	err = sun4v_intr_settarget(ino, cpuid);
367	if (err != HV_EOK)
368		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
369		       "err(%d)\n", ino, cpuid, err);
370	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
371	if (err != HV_EOK)
372		printk(KERN_ERR "sun4v_intr_setstate(%x): "
373		       "err(%d)\n", ino, err);
374	err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
375	if (err != HV_EOK)
376		printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
377		       ino, err);
378}
379
380static int sun4v_set_affinity(unsigned int virt_irq,
381			       const struct cpumask *mask)
382{
383	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
384	unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
385	int err;
386
387	err = sun4v_intr_settarget(ino, cpuid);
388	if (err != HV_EOK)
389		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
390		       "err(%d)\n", ino, cpuid, err);
391
392	return 0;
393}
394
395static void sun4v_irq_disable(unsigned int virt_irq)
396{
397	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
398	int err;
399
400	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
401	if (err != HV_EOK)
402		printk(KERN_ERR "sun4v_intr_setenabled(%x): "
403		       "err(%d)\n", ino, err);
404}
405
406static void sun4v_irq_eoi(unsigned int virt_irq)
407{
408	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
409	struct irq_desc *desc = irq_desc + virt_irq;
410	int err;
411
412	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
413		return;
414
415	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
416	if (err != HV_EOK)
417		printk(KERN_ERR "sun4v_intr_setstate(%x): "
418		       "err(%d)\n", ino, err);
419}
420
421static void sun4v_virq_enable(unsigned int virt_irq)
422{
423	unsigned long cpuid, dev_handle, dev_ino;
424	int err;
425
426	cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
427
428	dev_handle = virt_irq_table[virt_irq].dev_handle;
429	dev_ino = virt_irq_table[virt_irq].dev_ino;
430
431	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
432	if (err != HV_EOK)
433		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
434		       "err(%d)\n",
435		       dev_handle, dev_ino, cpuid, err);
436	err = sun4v_vintr_set_state(dev_handle, dev_ino,
437				    HV_INTR_STATE_IDLE);
438	if (err != HV_EOK)
439		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
440		       "HV_INTR_STATE_IDLE): err(%d)\n",
441		       dev_handle, dev_ino, err);
442	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
443				    HV_INTR_ENABLED);
444	if (err != HV_EOK)
445		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
446		       "HV_INTR_ENABLED): err(%d)\n",
447		       dev_handle, dev_ino, err);
448}
449
450static int sun4v_virt_set_affinity(unsigned int virt_irq,
451				    const struct cpumask *mask)
452{
453	unsigned long cpuid, dev_handle, dev_ino;
454	int err;
455
456	cpuid = irq_choose_cpu(virt_irq, mask);
457
458	dev_handle = virt_irq_table[virt_irq].dev_handle;
459	dev_ino = virt_irq_table[virt_irq].dev_ino;
460
461	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
462	if (err != HV_EOK)
463		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
464		       "err(%d)\n",
465		       dev_handle, dev_ino, cpuid, err);
466
467	return 0;
468}
469
470static void sun4v_virq_disable(unsigned int virt_irq)
471{
472	unsigned long dev_handle, dev_ino;
473	int err;
474
475	dev_handle = virt_irq_table[virt_irq].dev_handle;
476	dev_ino = virt_irq_table[virt_irq].dev_ino;
477
478	err = sun4v_vintr_set_valid(dev_handle, dev_ino,
479				    HV_INTR_DISABLED);
480	if (err != HV_EOK)
481		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
482		       "HV_INTR_DISABLED): err(%d)\n",
483		       dev_handle, dev_ino, err);
484}
485
486static void sun4v_virq_eoi(unsigned int virt_irq)
487{
488	struct irq_desc *desc = irq_desc + virt_irq;
489	unsigned long dev_handle, dev_ino;
490	int err;
491
492	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
493		return;
494
495	dev_handle = virt_irq_table[virt_irq].dev_handle;
496	dev_ino = virt_irq_table[virt_irq].dev_ino;
497
498	err = sun4v_vintr_set_state(dev_handle, dev_ino,
499				    HV_INTR_STATE_IDLE);
500	if (err != HV_EOK)
501		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
502		       "HV_INTR_STATE_IDLE): err(%d)\n",
503		       dev_handle, dev_ino, err);
504}
505
506static struct irq_chip sun4u_irq = {
507	.name		= "sun4u",
508	.enable		= sun4u_irq_enable,
509	.disable	= sun4u_irq_disable,
510	.eoi		= sun4u_irq_eoi,
511	.set_affinity	= sun4u_set_affinity,
512};
513
514static struct irq_chip sun4v_irq = {
515	.name		= "sun4v",
516	.enable		= sun4v_irq_enable,
517	.disable	= sun4v_irq_disable,
518	.eoi		= sun4v_irq_eoi,
519	.set_affinity	= sun4v_set_affinity,
520};
521
522static struct irq_chip sun4v_virq = {
523	.name		= "vsun4v",
524	.enable		= sun4v_virq_enable,
525	.disable	= sun4v_virq_disable,
526	.eoi		= sun4v_virq_eoi,
527	.set_affinity	= sun4v_virt_set_affinity,
528};
529
530static void pre_flow_handler(unsigned int virt_irq,
531				      struct irq_desc *desc)
532{
533	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
534	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
535
536	data->pre_handler(ino, data->arg1, data->arg2);
537
538	handle_fasteoi_irq(virt_irq, desc);
539}
540
541void irq_install_pre_handler(int virt_irq,
542			     void (*func)(unsigned int, void *, void *),
543			     void *arg1, void *arg2)
544{
545	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
546	struct irq_desc *desc = irq_desc + virt_irq;
547
548	data->pre_handler = func;
549	data->arg1 = arg1;
550	data->arg2 = arg2;
551
552	desc->handle_irq = pre_flow_handler;
553}
554
555unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
556{
557	struct ino_bucket *bucket;
558	struct irq_handler_data *data;
559	unsigned int virt_irq;
560	int ino;
561
562	BUG_ON(tlb_type == hypervisor);
563
564	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
565	bucket = &ivector_table[ino];
566	virt_irq = bucket_get_virt_irq(__pa(bucket));
567	if (!virt_irq) {
568		virt_irq = virt_irq_alloc(0, ino);
569		bucket_set_virt_irq(__pa(bucket), virt_irq);
570		set_irq_chip_and_handler_name(virt_irq,
571					      &sun4u_irq,
572					      handle_fasteoi_irq,
573					      "IVEC");
574	}
575
576	data = get_irq_chip_data(virt_irq);
577	if (unlikely(data))
578		goto out;
579
580	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
581	if (unlikely(!data)) {
582		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
583		prom_halt();
584	}
585	set_irq_chip_data(virt_irq, data);
586
587	data->imap  = imap;
588	data->iclr  = iclr;
589
590out:
591	return virt_irq;
592}
593
594static unsigned int sun4v_build_common(unsigned long sysino,
595				       struct irq_chip *chip)
596{
597	struct ino_bucket *bucket;
598	struct irq_handler_data *data;
599	unsigned int virt_irq;
600
601	BUG_ON(tlb_type != hypervisor);
602
603	bucket = &ivector_table[sysino];
604	virt_irq = bucket_get_virt_irq(__pa(bucket));
605	if (!virt_irq) {
606		virt_irq = virt_irq_alloc(0, sysino);
607		bucket_set_virt_irq(__pa(bucket), virt_irq);
608		set_irq_chip_and_handler_name(virt_irq, chip,
609					      handle_fasteoi_irq,
610					      "IVEC");
611	}
612
613	data = get_irq_chip_data(virt_irq);
614	if (unlikely(data))
615		goto out;
616
617	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
618	if (unlikely(!data)) {
619		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
620		prom_halt();
621	}
622	set_irq_chip_data(virt_irq, data);
623
624	/* Catch accidental accesses to these things.  IMAP/ICLR handling
625	 * is done by hypervisor calls on sun4v platforms, not by direct
626	 * register accesses.
627	 */
628	data->imap = ~0UL;
629	data->iclr = ~0UL;
630
631out:
632	return virt_irq;
633}
634
635unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
636{
637	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
638
639	return sun4v_build_common(sysino, &sun4v_irq);
640}
641
642unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
643{
644	struct irq_handler_data *data;
645	unsigned long hv_err, cookie;
646	struct ino_bucket *bucket;
647	struct irq_desc *desc;
648	unsigned int virt_irq;
649
650	bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
651	if (unlikely(!bucket))
652		return 0;
653
654	/* The only reference we store to the IRQ bucket is
655	 * by physical address which kmemleak can't see, tell
656	 * it that this object explicitly is not a leak and
657	 * should be scanned.
658	 */
659	kmemleak_not_leak(bucket);
660
661	__flush_dcache_range((unsigned long) bucket,
662			     ((unsigned long) bucket +
663			      sizeof(struct ino_bucket)));
664
665	virt_irq = virt_irq_alloc(devhandle, devino);
666	bucket_set_virt_irq(__pa(bucket), virt_irq);
667
668	set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
669				      handle_fasteoi_irq,
670				      "IVEC");
671
672	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
673	if (unlikely(!data))
674		return 0;
675
676	/* In order to make the LDC channel startup sequence easier,
677	 * especially wrt. locking, we do not let request_irq() enable
678	 * the interrupt.
679	 */
680	desc = irq_desc + virt_irq;
681	desc->status |= IRQ_NOAUTOEN;
682
683	set_irq_chip_data(virt_irq, data);
684
685	/* Catch accidental accesses to these things.  IMAP/ICLR handling
686	 * is done by hypervisor calls on sun4v platforms, not by direct
687	 * register accesses.
688	 */
689	data->imap = ~0UL;
690	data->iclr = ~0UL;
691
692	cookie = ~__pa(bucket);
693	hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
694	if (hv_err) {
695		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
696			    "err=%lu\n", devhandle, devino, hv_err);
697		prom_halt();
698	}
699
700	return virt_irq;
701}
702
703void ack_bad_irq(unsigned int virt_irq)
704{
705	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
706
707	if (!ino)
708		ino = 0xdeadbeef;
709
710	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
711	       ino, virt_irq);
712}
713
714void *hardirq_stack[NR_CPUS];
715void *softirq_stack[NR_CPUS];
716
717void __irq_entry handler_irq(int irq, struct pt_regs *regs)
718{
719	unsigned long pstate, bucket_pa;
720	struct pt_regs *old_regs;
721	void *orig_sp;
722
723	clear_softint(1 << irq);
724
725	old_regs = set_irq_regs(regs);
726	irq_enter();
727
728	/* Grab an atomic snapshot of the pending IVECs.  */
729	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
730			     "wrpr	%0, %3, %%pstate\n\t"
731			     "ldx	[%2], %1\n\t"
732			     "stx	%%g0, [%2]\n\t"
733			     "wrpr	%0, 0x0, %%pstate\n\t"
734			     : "=&r" (pstate), "=&r" (bucket_pa)
735			     : "r" (irq_work_pa(smp_processor_id())),
736			       "i" (PSTATE_IE)
737			     : "memory");
738
739	orig_sp = set_hardirq_stack();
740
741	while (bucket_pa) {
742		struct irq_desc *desc;
743		unsigned long next_pa;
744		unsigned int virt_irq;
745
746		next_pa = bucket_get_chain_pa(bucket_pa);
747		virt_irq = bucket_get_virt_irq(bucket_pa);
748		bucket_clear_chain_pa(bucket_pa);
749
750		desc = irq_desc + virt_irq;
751
752		if (!(desc->status & IRQ_DISABLED))
753			desc->handle_irq(virt_irq, desc);
754
755		bucket_pa = next_pa;
756	}
757
758	restore_hardirq_stack(orig_sp);
759
760	irq_exit();
761	set_irq_regs(old_regs);
762}
763
764void do_softirq(void)
765{
766	unsigned long flags;
767
768	if (in_interrupt())
769		return;
770
771	local_irq_save(flags);
772
773	if (local_softirq_pending()) {
774		void *orig_sp, *sp = softirq_stack[smp_processor_id()];
775
776		sp += THREAD_SIZE - 192 - STACK_BIAS;
777
778		__asm__ __volatile__("mov %%sp, %0\n\t"
779				     "mov %1, %%sp"
780				     : "=&r" (orig_sp)
781				     : "r" (sp));
782		__do_softirq();
783		__asm__ __volatile__("mov %0, %%sp"
784				     : : "r" (orig_sp));
785	}
786
787	local_irq_restore(flags);
788}
789
790#ifdef CONFIG_HOTPLUG_CPU
791void fixup_irqs(void)
792{
793	unsigned int irq;
794
795	for (irq = 0; irq < NR_IRQS; irq++) {
796		unsigned long flags;
797
798		raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
799		if (irq_desc[irq].action &&
800		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
801			if (irq_desc[irq].chip->set_affinity)
802				irq_desc[irq].chip->set_affinity(irq,
803					irq_desc[irq].affinity);
804		}
805		raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
806	}
807
808	tick_ops->disable_irq();
809}
810#endif
811
812struct sun5_timer {
813	u64	count0;
814	u64	limit0;
815	u64	count1;
816	u64	limit1;
817};
818
819static struct sun5_timer *prom_timers;
820static u64 prom_limit0, prom_limit1;
821
822static void map_prom_timers(void)
823{
824	struct device_node *dp;
825	const unsigned int *addr;
826
827	/* PROM timer node hangs out in the top level of device siblings... */
828	dp = of_find_node_by_path("/");
829	dp = dp->child;
830	while (dp) {
831		if (!strcmp(dp->name, "counter-timer"))
832			break;
833		dp = dp->sibling;
834	}
835
836	/* Assume if node is not present, PROM uses different tick mechanism
837	 * which we should not care about.
838	 */
839	if (!dp) {
840		prom_timers = (struct sun5_timer *) 0;
841		return;
842	}
843
844	/* If PROM is really using this, it must be mapped by him. */
845	addr = of_get_property(dp, "address", NULL);
846	if (!addr) {
847		prom_printf("PROM does not have timer mapped, trying to continue.\n");
848		prom_timers = (struct sun5_timer *) 0;
849		return;
850	}
851	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
852}
853
854static void kill_prom_timer(void)
855{
856	if (!prom_timers)
857		return;
858
859	/* Save them away for later. */
860	prom_limit0 = prom_timers->limit0;
861	prom_limit1 = prom_timers->limit1;
862
863	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
864	 * We turn both off here just to be paranoid.
865	 */
866	prom_timers->limit0 = 0;
867	prom_timers->limit1 = 0;
868
869	/* Wheee, eat the interrupt packet too... */
870	__asm__ __volatile__(
871"	mov	0x40, %%g2\n"
872"	ldxa	[%%g0] %0, %%g1\n"
873"	ldxa	[%%g2] %1, %%g1\n"
874"	stxa	%%g0, [%%g0] %0\n"
875"	membar	#Sync\n"
876	: /* no outputs */
877	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
878	: "g1", "g2");
879}
880
881void notrace init_irqwork_curcpu(void)
882{
883	int cpu = hard_smp_processor_id();
884
885	trap_block[cpu].irq_worklist_pa = 0UL;
886}
887
888/* Please be very careful with register_one_mondo() and
889 * sun4v_register_mondo_queues().
890 *
891 * On SMP this gets invoked from the CPU trampoline before
892 * the cpu has fully taken over the trap table from OBP,
893 * and it's kernel stack + %g6 thread register state is
894 * not fully cooked yet.
895 *
896 * Therefore you cannot make any OBP calls, not even prom_printf,
897 * from these two routines.
898 */
899static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
900{
901	unsigned long num_entries = (qmask + 1) / 64;
902	unsigned long status;
903
904	status = sun4v_cpu_qconf(type, paddr, num_entries);
905	if (status != HV_EOK) {
906		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
907			    "err %lu\n", type, paddr, num_entries, status);
908		prom_halt();
909	}
910}
911
912void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
913{
914	struct trap_per_cpu *tb = &trap_block[this_cpu];
915
916	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
917			   tb->cpu_mondo_qmask);
918	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
919			   tb->dev_mondo_qmask);
920	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
921			   tb->resum_qmask);
922	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
923			   tb->nonresum_qmask);
924}
925
926/* Each queue region must be a power of 2 multiple of 64 bytes in
927 * size.  The base real address must be aligned to the size of the
928 * region.  Thus, an 8KB queue must be 8KB aligned, for example.
929 */
930static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
931{
932	unsigned long size = PAGE_ALIGN(qmask + 1);
933	unsigned long order = get_order(size);
934	unsigned long p;
935
936	p = __get_free_pages(GFP_KERNEL, order);
937	if (!p) {
938		prom_printf("SUN4V: Error, cannot allocate queue.\n");
939		prom_halt();
940	}
941
942	*pa_ptr = __pa(p);
943}
944
945static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
946{
947#ifdef CONFIG_SMP
948	unsigned long page;
949
950	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
951
952	page = get_zeroed_page(GFP_KERNEL);
953	if (!page) {
954		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
955		prom_halt();
956	}
957
958	tb->cpu_mondo_block_pa = __pa(page);
959	tb->cpu_list_pa = __pa(page + 64);
960#endif
961}
962
963/* Allocate mondo and error queues for all possible cpus.  */
964static void __init sun4v_init_mondo_queues(void)
965{
966	int cpu;
967
968	for_each_possible_cpu(cpu) {
969		struct trap_per_cpu *tb = &trap_block[cpu];
970
971		alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
972		alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
973		alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
974		alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
975		alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
976		alloc_one_queue(&tb->nonresum_kernel_buf_pa,
977				tb->nonresum_qmask);
978	}
979}
980
981static void __init init_send_mondo_info(void)
982{
983	int cpu;
984
985	for_each_possible_cpu(cpu) {
986		struct trap_per_cpu *tb = &trap_block[cpu];
987
988		init_cpu_send_mondo_info(tb);
989	}
990}
991
992static struct irqaction timer_irq_action = {
993	.name = "timer",
994};
995
996/* Only invoked on boot processor. */
997void __init init_IRQ(void)
998{
999	unsigned long size;
1000
1001	map_prom_timers();
1002	kill_prom_timer();
1003
1004	size = sizeof(struct ino_bucket) * NUM_IVECS;
1005	ivector_table = kzalloc(size, GFP_KERNEL);
1006	if (!ivector_table) {
1007		prom_printf("Fatal error, cannot allocate ivector_table\n");
1008		prom_halt();
1009	}
1010	__flush_dcache_range((unsigned long) ivector_table,
1011			     ((unsigned long) ivector_table) + size);
1012
1013	ivector_table_pa = __pa(ivector_table);
1014
1015	if (tlb_type == hypervisor)
1016		sun4v_init_mondo_queues();
1017
1018	init_send_mondo_info();
1019
1020	if (tlb_type == hypervisor) {
1021		/* Load up the boot cpu's entries.  */
1022		sun4v_register_mondo_queues(hard_smp_processor_id());
1023	}
1024
1025	/* We need to clear any IRQ's pending in the soft interrupt
1026	 * registers, a spurious one could be left around from the
1027	 * PROM timer which we just disabled.
1028	 */
1029	clear_softint(get_softint());
1030
1031	/* Now that ivector table is initialized, it is safe
1032	 * to receive IRQ vector traps.  We will normally take
1033	 * one or two right now, in case some device PROM used
1034	 * to boot us wants to speak to us.  We just ignore them.
1035	 */
1036	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
1037			     "or	%%g1, %0, %%g1\n\t"
1038			     "wrpr	%%g1, 0x0, %%pstate"
1039			     : /* No outputs */
1040			     : "i" (PSTATE_IE)
1041			     : "g1");
1042
1043	irq_desc[0].action = &timer_irq_action;
1044}
1045