1/*
2 *	linux/arch/alpha/kernel/irq_smp.c
3 *
4 */
5
6#include <linux/kernel.h>
7#include <linux/signal.h>
8#include <linux/sched.h>
9#include <linux/interrupt.h>
10#include <linux/random.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/irq.h>
14
15#include <asm/system.h>
16#include <asm/io.h>
17
18
19/* Who has global_irq_lock. */
20int global_irq_holder = NO_PROC_ID;
21
22/* This protects IRQ's. */
23spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
24
25/* Global IRQ locking depth. */
26static void *previous_irqholder = NULL;
27
28#define MAXCOUNT 100000000
29
30
31static void
32show(char * str, void *where)
33{
34        int cpu = smp_processor_id();
35
36        printk("\n%s, CPU %d: %p\n", str, cpu, where);
37        printk("irq:  %d [%d %d]\n",
38	       irqs_running(),
39               local_irq_count(0),
40               local_irq_count(1));
41
42        printk("bh:   %d [%d %d]\n",
43	       spin_is_locked(&global_bh_lock) ? 1 : 0,
44	       local_bh_count(0),
45	       local_bh_count(1));
46}
47
48static inline void
49wait_on_irq(int cpu, void *where)
50{
51	int count = MAXCOUNT;
52
53	for (;;) {
54
55		/*
56		 * Wait until all interrupts are gone. Wait
57		 * for bottom half handlers unless we're
58		 * already executing in one..
59		 */
60		if (!irqs_running()) {
61			if (local_bh_count(cpu)
62			    || !spin_is_locked(&global_bh_lock))
63				break;
64		}
65
66		/* Duh, we have to loop. Release the lock to avoid deadlocks */
67		spin_unlock(&global_irq_lock);
68
69		for (;;) {
70			if (!--count) {
71				show("wait_on_irq", where);
72				count = MAXCOUNT;
73			}
74			__sti();
75			udelay(1); /* make sure to run pending irqs */
76			__cli();
77
78			if (irqs_running())
79				continue;
80			if (spin_is_locked(&global_irq_lock))
81				continue;
82			if (!local_bh_count(cpu)
83			    && spin_is_locked(&global_bh_lock))
84				continue;
85			if (spin_trylock(&global_irq_lock))
86				break;
87		}
88	}
89}
90
91static inline void
92get_irqlock(int cpu, void* where)
93{
94	if (!spin_trylock(&global_irq_lock)) {
95		/* Do we already hold the lock?  */
96		if (cpu == global_irq_holder)
97			return;
98		/* Uhhuh.. Somebody else got it.  Wait.  */
99		spin_lock(&global_irq_lock);
100	}
101
102	/*
103	 * Ok, we got the lock bit.
104	 * But that's actually just the easy part.. Now
105	 * we need to make sure that nobody else is running
106	 * in an interrupt context.
107	 */
108	wait_on_irq(cpu, where);
109
110	/*
111	 * Finally.
112	 */
113#ifdef CONFIG_DEBUG_SPINLOCK
114	global_irq_lock.task = current;
115	global_irq_lock.previous = where;
116#endif
117	global_irq_holder = cpu;
118	previous_irqholder = where;
119}
120
121void
122__global_cli(void)
123{
124	int cpu = smp_processor_id();
125	void *where = __builtin_return_address(0);
126
127	/*
128	 * Maximize ipl.  If ipl was previously 0 and if this thread
129	 * is not in an irq, then take global_irq_lock.
130	 */
131	if (swpipl(IPL_MAX) == IPL_MIN && !local_irq_count(cpu))
132		get_irqlock(cpu, where);
133}
134
135void
136__global_sti(void)
137{
138        int cpu = smp_processor_id();
139
140        if (!local_irq_count(cpu))
141		release_irqlock(cpu);
142	__sti();
143}
144
145/*
146 * SMP flags value to restore to:
147 * 0 - global cli
148 * 1 - global sti
149 * 2 - local cli
150 * 3 - local sti
151 */
152unsigned long
153__global_save_flags(void)
154{
155        int retval;
156        int local_enabled;
157        unsigned long flags;
158	int cpu = smp_processor_id();
159
160        __save_flags(flags);
161        local_enabled = (!(flags & 7));
162        /* default to local */
163        retval = 2 + local_enabled;
164
165        /* Check for global flags if we're not in an interrupt.  */
166        if (!local_irq_count(cpu)) {
167                if (local_enabled)
168                        retval = 1;
169                if (global_irq_holder == cpu)
170                        retval = 0;
171	}
172	return retval;
173}
174
175void
176__global_restore_flags(unsigned long flags)
177{
178        switch (flags) {
179        case 0:
180                __global_cli();
181                break;
182        case 1:
183                __global_sti();
184                break;
185        case 2:
186                __cli();
187                break;
188        case 3:
189                __sti();
190                break;
191        default:
192                printk(KERN_ERR "global_restore_flags: %08lx (%p)\n",
193                        flags, __builtin_return_address(0));
194        }
195}
196
197/*
198 * From its use, I infer that synchronize_irq() stalls a thread until
199 * the effects of a command to an external device are known to have
200 * taken hold.  Typically, the command is to stop sending interrupts.
201 * The strategy here is wait until there is at most one processor
202 * (this one) in an irq.  The memory barrier serializes the write to
203 * the device and the subsequent accesses of global_irq_count.
204 * --jmartin
205 */
206#define DEBUG_SYNCHRONIZE_IRQ 0
207
208void
209synchronize_irq(void)
210{
211	/* Jay's version.  */
212	if (irqs_running()) {
213		cli();
214		sti();
215	}
216}
217