1/*  $Id: irq.c,v 1.1.1.1 2007/08/03 18:52:17 Exp $
2 *  arch/sparc/kernel/irq.c:  Interrupt request handling routines. On the
3 *                            Sparc the IRQs are basically 'cast in stone'
4 *                            and you are supposed to probe the prom's device
5 *                            node trees to find out who's got which IRQ.
6 *
7 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 *  Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9 *  Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com)
10 *  Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11 *  Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
12 */
13
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/ptrace.h>
17#include <linux/errno.h>
18#include <linux/linkage.h>
19#include <linux/kernel_stat.h>
20#include <linux/signal.h>
21#include <linux/sched.h>
22#include <linux/interrupt.h>
23#include <linux/slab.h>
24#include <linux/random.h>
25#include <linux/init.h>
26#include <linux/smp.h>
27#include <linux/delay.h>
28#include <linux/threads.h>
29#include <linux/spinlock.h>
30#include <linux/seq_file.h>
31
32#include <asm/ptrace.h>
33#include <asm/processor.h>
34#include <asm/system.h>
35#include <asm/psr.h>
36#include <asm/smp.h>
37#include <asm/vaddrs.h>
38#include <asm/timer.h>
39#include <asm/openprom.h>
40#include <asm/oplib.h>
41#include <asm/traps.h>
42#include <asm/irq.h>
43#include <asm/io.h>
44#include <asm/pgalloc.h>
45#include <asm/pgtable.h>
46#include <asm/pcic.h>
47#include <asm/cacheflush.h>
48#include <asm/irq_regs.h>
49
50#ifdef CONFIG_SMP
51#define SMP_NOP2 "nop; nop;\n\t"
52#define SMP_NOP3 "nop; nop; nop;\n\t"
53#else
54#define SMP_NOP2
55#define SMP_NOP3
56#endif /* SMP */
57unsigned long __local_irq_save(void)
58{
59	unsigned long retval;
60	unsigned long tmp;
61
62	__asm__ __volatile__(
63		"rd	%%psr, %0\n\t"
64		SMP_NOP3	/* Sun4m + Cypress + SMP bug */
65		"or	%0, %2, %1\n\t"
66		"wr	%1, 0, %%psr\n\t"
67		"nop; nop; nop\n"
68		: "=&r" (retval), "=r" (tmp)
69		: "i" (PSR_PIL)
70		: "memory");
71
72	return retval;
73}
74
75void local_irq_enable(void)
76{
77	unsigned long tmp;
78
79	__asm__ __volatile__(
80		"rd	%%psr, %0\n\t"
81		SMP_NOP3	/* Sun4m + Cypress + SMP bug */
82		"andn	%0, %1, %0\n\t"
83		"wr	%0, 0, %%psr\n\t"
84		"nop; nop; nop\n"
85		: "=&r" (tmp)
86		: "i" (PSR_PIL)
87		: "memory");
88}
89
90void local_irq_restore(unsigned long old_psr)
91{
92	unsigned long tmp;
93
94	__asm__ __volatile__(
95		"rd	%%psr, %0\n\t"
96		"and	%2, %1, %2\n\t"
97		SMP_NOP2	/* Sun4m + Cypress + SMP bug */
98		"andn	%0, %1, %0\n\t"
99		"wr	%0, %2, %%psr\n\t"
100		"nop; nop; nop\n"
101		: "=&r" (tmp)
102		: "i" (PSR_PIL), "r" (old_psr)
103		: "memory");
104}
105
106EXPORT_SYMBOL(__local_irq_save);
107EXPORT_SYMBOL(local_irq_enable);
108EXPORT_SYMBOL(local_irq_restore);
109
110/*
111 * Dave Redman (djhr@tadpole.co.uk)
112 *
113 * IRQ numbers.. These are no longer restricted to 15..
114 *
115 * this is done to enable SBUS cards and onboard IO to be masked
116 * correctly. using the interrupt level isn't good enough.
117 *
118 * For example:
119 *   A device interrupting at sbus level6 and the Floppy both come in
120 *   at IRQ11, but enabling and disabling them requires writing to
121 *   different bits in the SLAVIO/SEC.
122 *
123 * As a result of these changes sun4m machines could now support
124 * directed CPU interrupts using the existing enable/disable irq code
125 * with tweaks.
126 *
127 */
128
129static void irq_panic(void)
130{
131    extern char *cputypval;
132    prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
133    prom_halt();
134}
135
136void (*sparc_init_timers)(irq_handler_t ) =
137    (void (*)(irq_handler_t )) irq_panic;
138
139/*
140 * Dave Redman (djhr@tadpole.co.uk)
141 *
142 * There used to be extern calls and hard coded values here.. very sucky!
143 * instead, because some of the devices attach very early, I do something
144 * equally sucky but at least we'll never try to free statically allocated
145 * space or call kmalloc before kmalloc_init :(.
146 *
147 * In fact it's the timer10 that attaches first.. then timer14
148 * then kmalloc_init is called.. then the tty interrupts attach.
149 * hmmm....
150 *
151 */
152#define MAX_STATIC_ALLOC	4
153struct irqaction static_irqaction[MAX_STATIC_ALLOC];
154int static_irq_count;
155
156struct {
157	struct irqaction *action;
158	int flags;
159} sparc_irq[NR_IRQS];
160#define SPARC_IRQ_INPROGRESS 1
161
162/* Used to protect the IRQ action lists */
163DEFINE_SPINLOCK(irq_action_lock);
164
165int show_interrupts(struct seq_file *p, void *v)
166{
167	int i = *(loff_t *) v;
168	struct irqaction * action;
169	unsigned long flags;
170#ifdef CONFIG_SMP
171	int j;
172#endif
173
174	if (sparc_cpu_model == sun4d) {
175		extern int show_sun4d_interrupts(struct seq_file *, void *);
176
177		return show_sun4d_interrupts(p, v);
178	}
179	spin_lock_irqsave(&irq_action_lock, flags);
180	if (i < NR_IRQS) {
181		action = sparc_irq[i].action;
182		if (!action)
183			goto out_unlock;
184		seq_printf(p, "%3d: ", i);
185#ifndef CONFIG_SMP
186		seq_printf(p, "%10u ", kstat_irqs(i));
187#else
188		for_each_online_cpu(j) {
189			seq_printf(p, "%10u ",
190				    kstat_cpu(j).irqs[i]);
191		}
192#endif
193		seq_printf(p, " %c %s",
194			(action->flags & IRQF_DISABLED) ? '+' : ' ',
195			action->name);
196		for (action=action->next; action; action = action->next) {
197			seq_printf(p, ",%s %s",
198				(action->flags & IRQF_DISABLED) ? " +" : "",
199				action->name);
200		}
201		seq_putc(p, '\n');
202	}
203out_unlock:
204	spin_unlock_irqrestore(&irq_action_lock, flags);
205	return 0;
206}
207
208void free_irq(unsigned int irq, void *dev_id)
209{
210	struct irqaction * action;
211	struct irqaction **actionp;
212        unsigned long flags;
213	unsigned int cpu_irq;
214
215	if (sparc_cpu_model == sun4d) {
216		extern void sun4d_free_irq(unsigned int, void *);
217
218		sun4d_free_irq(irq, dev_id);
219		return;
220	}
221	cpu_irq = irq & (NR_IRQS - 1);
222        if (cpu_irq > 14) {  /* 14 irq levels on the sparc */
223                printk("Trying to free bogus IRQ %d\n", irq);
224                return;
225        }
226
227	spin_lock_irqsave(&irq_action_lock, flags);
228
229	actionp = &sparc_irq[cpu_irq].action;
230	action = *actionp;
231
232	if (!action->handler) {
233		printk("Trying to free free IRQ%d\n",irq);
234		goto out_unlock;
235	}
236	if (dev_id) {
237		for (; action; action = action->next) {
238			if (action->dev_id == dev_id)
239				break;
240			actionp = &action->next;
241		}
242		if (!action) {
243			printk("Trying to free free shared IRQ%d\n",irq);
244			goto out_unlock;
245		}
246	} else if (action->flags & IRQF_SHARED) {
247		printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
248		goto out_unlock;
249	}
250	if (action->flags & SA_STATIC_ALLOC)
251	{
252		/* This interrupt is marked as specially allocated
253		 * so it is a bad idea to free it.
254		 */
255		printk("Attempt to free statically allocated IRQ%d (%s)\n",
256		       irq, action->name);
257		goto out_unlock;
258	}
259
260	*actionp = action->next;
261
262	spin_unlock_irqrestore(&irq_action_lock, flags);
263
264	synchronize_irq(irq);
265
266	spin_lock_irqsave(&irq_action_lock, flags);
267
268	kfree(action);
269
270	if (!sparc_irq[cpu_irq].action)
271		disable_irq(irq);
272
273out_unlock:
274	spin_unlock_irqrestore(&irq_action_lock, flags);
275}
276
277EXPORT_SYMBOL(free_irq);
278
279/*
280 * This is called when we want to synchronize with
281 * interrupts. We may for example tell a device to
282 * stop sending interrupts: but to make sure there
283 * are no interrupts that are executing on another
284 * CPU we need to call this function.
285 */
286#ifdef CONFIG_SMP
287void synchronize_irq(unsigned int irq)
288{
289	unsigned int cpu_irq;
290
291	cpu_irq = irq & (NR_IRQS - 1);
292	while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
293		cpu_relax();
294}
295#endif /* SMP */
296
297void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
298{
299        int i;
300	struct irqaction * action;
301	unsigned int cpu_irq;
302
303	cpu_irq = irq & (NR_IRQS - 1);
304	action = sparc_irq[cpu_irq].action;
305
306        printk("IO device interrupt, irq = %d\n", irq);
307        printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
308		    regs->npc, regs->u_regs[14]);
309	if (action) {
310		printk("Expecting: ");
311        	for (i = 0; i < 16; i++)
312                	if (action->handler)
313                        	printk("[%s:%d:0x%x] ", action->name,
314				       (int) i, (unsigned int) action->handler);
315	}
316        printk("AIEEE\n");
317	panic("bogus interrupt received");
318}
319
320void handler_irq(int irq, struct pt_regs * regs)
321{
322	struct pt_regs *old_regs;
323	struct irqaction * action;
324	int cpu = smp_processor_id();
325#ifdef CONFIG_SMP
326	extern void smp4m_irq_rotate(int cpu);
327#endif
328
329	old_regs = set_irq_regs(regs);
330	irq_enter();
331	disable_pil_irq(irq);
332#ifdef CONFIG_SMP
333	/* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */
334	if((sparc_cpu_model==sun4m) && (irq < 10))
335		smp4m_irq_rotate(cpu);
336#endif
337	action = sparc_irq[irq].action;
338	sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
339	kstat_cpu(cpu).irqs[irq]++;
340	do {
341		if (!action || !action->handler)
342			unexpected_irq(irq, NULL, regs);
343		action->handler(irq, action->dev_id);
344		action = action->next;
345	} while (action);
346	sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
347	enable_pil_irq(irq);
348	irq_exit();
349	set_irq_regs(old_regs);
350}
351
352#ifdef CONFIG_BLK_DEV_FD
353extern void floppy_interrupt(int irq, void *dev_id);
354
355void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
356{
357	struct pt_regs *old_regs;
358	int cpu = smp_processor_id();
359
360	old_regs = set_irq_regs(regs);
361	disable_pil_irq(irq);
362	irq_enter();
363	kstat_cpu(cpu).irqs[irq]++;
364	floppy_interrupt(irq, dev_id);
365	irq_exit();
366	enable_pil_irq(irq);
367	set_irq_regs(old_regs);
368	// if (softirq_pending(cpu))
369	//	do_softirq();
370}
371#endif
372
373/* Fast IRQs on the Sparc can only have one routine attached to them,
374 * thus no sharing possible.
375 */
376int request_fast_irq(unsigned int irq,
377		     irq_handler_t handler,
378		     unsigned long irqflags, const char *devname)
379{
380	struct irqaction *action;
381	unsigned long flags;
382	unsigned int cpu_irq;
383	int ret;
384#ifdef CONFIG_SMP
385	struct tt_entry *trap_table;
386	extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
387#endif
388
389	cpu_irq = irq & (NR_IRQS - 1);
390	if(cpu_irq > 14) {
391		ret = -EINVAL;
392		goto out;
393	}
394	if(!handler) {
395		ret = -EINVAL;
396		goto out;
397	}
398
399	spin_lock_irqsave(&irq_action_lock, flags);
400
401	action = sparc_irq[cpu_irq].action;
402	if(action) {
403		if(action->flags & IRQF_SHARED)
404			panic("Trying to register fast irq when already shared.\n");
405		if(irqflags & IRQF_SHARED)
406			panic("Trying to register fast irq as shared.\n");
407
408		/* Anyway, someone already owns it so cannot be made fast. */
409		printk("request_fast_irq: Trying to register yet already owned.\n");
410		ret = -EBUSY;
411		goto out_unlock;
412	}
413
414	/* If this is flagged as statically allocated then we use our
415	 * private struct which is never freed.
416	 */
417	if (irqflags & SA_STATIC_ALLOC) {
418	    if (static_irq_count < MAX_STATIC_ALLOC)
419		action = &static_irqaction[static_irq_count++];
420	    else
421		printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
422		       irq, devname);
423	}
424
425	if (action == NULL)
426	    action = kmalloc(sizeof(struct irqaction),
427						 GFP_ATOMIC);
428
429	if (!action) {
430		ret = -ENOMEM;
431		goto out_unlock;
432	}
433
434	/* Dork with trap table if we get this far. */
435#define INSTANTIATE(table) \
436	table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
437	table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
438		SPARC_BRANCH((unsigned long) handler, \
439			     (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
440	table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
441	table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
442
443	INSTANTIATE(sparc_ttable)
444#ifdef CONFIG_SMP
445	trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
446	trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
447	trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
448#endif
449#undef INSTANTIATE
450	flush_cache_all();
451
452	action->handler = handler;
453	action->flags = irqflags;
454	cpus_clear(action->mask);
455	action->name = devname;
456	action->dev_id = NULL;
457	action->next = NULL;
458
459	sparc_irq[cpu_irq].action = action;
460
461	enable_irq(irq);
462
463	ret = 0;
464out_unlock:
465	spin_unlock_irqrestore(&irq_action_lock, flags);
466out:
467	return ret;
468}
469
470int request_irq(unsigned int irq,
471		irq_handler_t handler,
472		unsigned long irqflags, const char * devname, void *dev_id)
473{
474	struct irqaction * action, **actionp;
475	unsigned long flags;
476	unsigned int cpu_irq;
477	int ret;
478
479	if (sparc_cpu_model == sun4d) {
480		extern int sun4d_request_irq(unsigned int,
481					     irq_handler_t ,
482					     unsigned long, const char *, void *);
483		return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
484	}
485	cpu_irq = irq & (NR_IRQS - 1);
486	if(cpu_irq > 14) {
487		ret = -EINVAL;
488		goto out;
489	}
490	if (!handler) {
491		ret = -EINVAL;
492		goto out;
493	}
494
495	spin_lock_irqsave(&irq_action_lock, flags);
496
497	actionp = &sparc_irq[cpu_irq].action;
498	action = *actionp;
499	if (action) {
500		if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) {
501			ret = -EBUSY;
502			goto out_unlock;
503		}
504		if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
505			printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
506			ret = -EBUSY;
507			goto out_unlock;
508		}
509		for ( ; action; action = *actionp)
510			actionp = &action->next;
511	}
512
513	/* If this is flagged as statically allocated then we use our
514	 * private struct which is never freed.
515	 */
516	if (irqflags & SA_STATIC_ALLOC) {
517		if (static_irq_count < MAX_STATIC_ALLOC)
518			action = &static_irqaction[static_irq_count++];
519		else
520			printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
521	}
522
523	if (action == NULL)
524		action = kmalloc(sizeof(struct irqaction),
525						     GFP_ATOMIC);
526
527	if (!action) {
528		ret = -ENOMEM;
529		goto out_unlock;
530	}
531
532	action->handler = handler;
533	action->flags = irqflags;
534	cpus_clear(action->mask);
535	action->name = devname;
536	action->next = NULL;
537	action->dev_id = dev_id;
538
539	*actionp = action;
540
541	enable_irq(irq);
542
543	ret = 0;
544out_unlock:
545	spin_unlock_irqrestore(&irq_action_lock, flags);
546out:
547	return ret;
548}
549
550EXPORT_SYMBOL(request_irq);
551
552/* We really don't need these at all on the Sparc.  We only have
553 * stubs here because they are exported to modules.
554 */
555unsigned long probe_irq_on(void)
556{
557	return 0;
558}
559
560EXPORT_SYMBOL(probe_irq_on);
561
562int probe_irq_off(unsigned long mask)
563{
564	return 0;
565}
566
567EXPORT_SYMBOL(probe_irq_off);
568
569/* djhr
570 * This could probably be made indirect too and assigned in the CPU
571 * bits of the code. That would be much nicer I think and would also
572 * fit in with the idea of being able to tune your kernel for your machine
573 * by removing unrequired machine and device support.
574 *
575 */
576
577void __init init_IRQ(void)
578{
579	extern void sun4c_init_IRQ( void );
580	extern void sun4m_init_IRQ( void );
581	extern void sun4d_init_IRQ( void );
582
583	switch(sparc_cpu_model) {
584	case sun4c:
585	case sun4:
586		sun4c_init_IRQ();
587		break;
588
589	case sun4m:
590#ifdef CONFIG_PCI
591		pcic_probe();
592		if (pcic_present()) {
593			sun4m_pci_init_IRQ();
594			break;
595		}
596#endif
597		sun4m_init_IRQ();
598		break;
599
600	case sun4d:
601		sun4d_init_IRQ();
602		break;
603
604	default:
605		prom_printf("Cannot initialize IRQs on this Sun machine...");
606		break;
607	}
608	btfixup();
609}
610
611void init_irq_proc(void)
612{
613	/* For now, nothing... */
614}
615