iq80310_intr.c revision 1.10
1/*	$NetBSD: iq80310_intr.c,v 1.10 2002/04/12 04:52:57 briggs Exp $	*/
2
3/*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Interrupt support for the Intel IQ80310.
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45
46#include <uvm/uvm_extern.h>
47
48#include <machine/bus.h>
49#include <machine/intr.h>
50
51#include <arm/cpufunc.h>
52
53#include <arm/xscale/i80200reg.h>
54#include <arm/xscale/i80200var.h>
55
56#include <evbarm/iq80310/iq80310reg.h>
57#include <evbarm/iq80310/iq80310var.h>
58#include <evbarm/iq80310/obiovar.h>
59
60#if defined(IOP310_TEAMASA_NPWR)
61/*
62 * We have 5 interrupt source bits -- all in XINT3.  All interrupts
63 * can be masked in the CPLD.
64 */
65#define	IRQ_BITS		0x1f
66#define	IRQ_BITS_ALWAYS_ON	0x00
67#else /* Default to stock IQ80310 */
68/*
69 * We have 8 interrupt source bits -- 5 in the XINT3 register, and 3
70 * in the XINT0 register (the upper 3).  Note that the XINT0 IRQs
71 * (SPCI INTA, INTB, and INTC) are always enabled, since they can not
72 * be masked out in the CPLD (it provides only status, not masking,
73 * for those interrupts).
74 */
75#define	IRQ_BITS		0xff
76#define	IRQ_BITS_ALWAYS_ON	0xe0
77#define	IRQ_READ_XINT0		1	/* XXX only if board rev >= F */
78#endif /* list of IQ80310-based designs */
79
80/* Interrupt handler queues. */
81struct intrq intrq[NIRQ];
82
83/* Interrupts to mask at each level. */
84static int imask[NIPL];
85
86/* Current interrupt priority level. */
87__volatile int current_spl_level;
88
89/* Interrupts pending. */
90static __volatile int ipending;
91
92/* Software copy of the IRQs we have enabled. */
93uint32_t intr_enabled;
94
95/*
96 * Map a software interrupt queue index (at the top of the word, and
97 * highest priority softintr is encountered first in an ffs()).
98 */
99#define	SI_TO_IRQBIT(si)	(1U << (31 - (si)))
100
101/*
102 * Map a software interrupt queue to an interrupt priority level.
103 */
104static const int si_to_ipl[SI_NQUEUES] = {
105	IPL_SOFT,		/* SI_SOFT */
106	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
107	IPL_SOFTNET,		/* SI_SOFTNET */
108	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
109};
110
111void	iq80310_intr_dispatch(struct clockframe *frame);
112
113static __inline uint32_t
114iq80310_intstat_read(void)
115{
116	uint32_t intstat;
117
118	intstat = CPLD_READ(IQ80310_XINT3_STATUS) & 0x1f;
119#if defined(IRQ_READ_XINT0)
120	if (IRQ_READ_XINT0)
121		intstat |= (CPLD_READ(IQ80310_XINT0_STATUS) & 0x7) << 5;
122#endif
123
124	/* XXX Why do we have to mask off? */
125	return (intstat & intr_enabled);
126}
127
128static __inline void
129iq80310_set_intrmask(void)
130{
131	uint32_t disabled;
132
133	intr_enabled |= IRQ_BITS_ALWAYS_ON;
134
135	/* The XINT_MASK register sets a bit to *disable*. */
136	disabled = (~intr_enabled) & IRQ_BITS;
137
138	CPLD_WRITE(IQ80310_XINT_MASK, disabled & 0x1f);
139}
140
141static __inline void
142iq80310_enable_irq(int irq)
143{
144
145	intr_enabled |= (1U << irq);
146	iq80310_set_intrmask();
147}
148
149static __inline void
150iq80310_disable_irq(int irq)
151{
152
153	intr_enabled &= ~(1U << irq);
154	iq80310_set_intrmask();
155}
156
157/*
158 * NOTE: This routine must be called with interrupts disabled in the CPSR.
159 */
160static void
161iq80310_intr_calculate_masks(void)
162{
163	struct intrq *iq;
164	struct intrhand *ih;
165	int irq, ipl;
166
167	/* First, figure out which IPLs each IRQ has. */
168	for (irq = 0; irq < NIRQ; irq++) {
169		int levels = 0;
170		iq = &intrq[irq];
171		iq80310_disable_irq(irq);
172		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
173		     ih = TAILQ_NEXT(ih, ih_list))
174			levels |= (1U << ih->ih_ipl);
175		iq->iq_levels = levels;
176	}
177
178	/* Next, figure out which IRQs are used by each IPL. */
179	for (ipl = 0; ipl < NIPL; ipl++) {
180		int irqs = 0;
181		for (irq = 0; irq < NIRQ; irq++) {
182			if (intrq[irq].iq_levels & (1U << ipl))
183				irqs |= (1U << irq);
184		}
185		imask[ipl] = irqs;
186	}
187
188	imask[IPL_NONE] = 0;
189
190	/*
191	 * Initialize the soft interrupt masks to block themselves.
192	 */
193	imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
194	imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
195	imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
196	imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
197
198	/*
199	 * splsoftclock() is the only interface that users of the
200	 * generic software interrupt facility have to block their
201	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
202	 */
203	imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
204
205	/*
206	 * splsoftnet() must also block splsoftclock(), since we don't
207	 * want timer-driven network events to occur while we're
208	 * processing incoming packets.
209	 */
210	imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
211
212	/*
213	 * Enforce a heirarchy that gives "slow" device (or devices with
214	 * limited input buffer space/"real-time" requirements) a better
215	 * chance at not dropping data.
216	 */
217	imask[IPL_BIO] |= imask[IPL_SOFTNET];
218	imask[IPL_NET] |= imask[IPL_BIO];
219	imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
220	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
221
222	/*
223	 * splvm() blocks all interrupts that use the kernel memory
224	 * allocation facilities.
225	 */
226	imask[IPL_IMP] |= imask[IPL_TTY];
227
228	/*
229	 * Audio devices are not allowed to perform memory allocation
230	 * in their interrupt routines, and they have fairly "real-time"
231	 * requirements, so give them a high interrupt priority.
232	 */
233	imask[IPL_AUDIO] |= imask[IPL_IMP];
234
235	/*
236	 * splclock() must block anything that uses the scheduler.
237	 */
238	imask[IPL_CLOCK] |= imask[IPL_AUDIO];
239
240	/*
241	 * No separate statclock on the IQ80310.
242	 */
243	imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
244
245	/*
246	 * splhigh() must block "everything".
247	 */
248	imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
249
250	/*
251	 * XXX We need serial drivers to run at the absolute highest priority
252	 * in order to avoid overruns, so serial > high.
253	 */
254	imask[IPL_SERIAL] |= imask[IPL_HIGH];
255
256	/*
257	 * Now compute which IRQs must be blocked when servicing any
258	 * given IRQ.
259	 */
260	for (irq = 0; irq < NIRQ; irq++) {
261		int irqs = (1U << irq);
262		iq = &intrq[irq];
263		if (TAILQ_FIRST(&iq->iq_list) != NULL)
264			iq80310_enable_irq(irq);
265		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
266		     ih = TAILQ_NEXT(ih, ih_list))
267			irqs |= imask[ih->ih_ipl];
268		iq->iq_mask = irqs;
269	}
270}
271
272static void
273iq80310_do_pending(void)
274{
275	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
276	int new, oldirqstate;
277
278	if (__cpu_simple_lock_try(&processing) == 0)
279		return;
280
281	new = current_spl_level;
282
283	oldirqstate = disable_interrupts(I32_bit);
284
285#define	DO_SOFTINT(si)							\
286	if ((ipending & ~new) & SI_TO_IRQBIT(si)) {			\
287		ipending &= ~SI_TO_IRQBIT(si);				\
288		current_spl_level |= imask[si_to_ipl[(si)]];		\
289		restore_interrupts(oldirqstate);			\
290		softintr_dispatch(si);					\
291		oldirqstate = disable_interrupts(I32_bit);		\
292		current_spl_level = new;				\
293	}
294
295	DO_SOFTINT(SI_SOFTSERIAL);
296	DO_SOFTINT(SI_SOFTNET);
297	DO_SOFTINT(SI_SOFTCLOCK);
298	DO_SOFTINT(SI_SOFT);
299
300	__cpu_simple_unlock(&processing);
301
302	restore_interrupts(oldirqstate);
303}
304
305int
306_splraise(int ipl)
307{
308	int old, oldirqstate;
309
310	oldirqstate = disable_interrupts(I32_bit);
311	old = current_spl_level;
312	current_spl_level |= imask[ipl];
313
314	restore_interrupts(oldirqstate);
315
316	return (old);
317}
318
319__inline void
320splx(int new)
321{
322	int old;
323
324	old = current_spl_level;
325	current_spl_level = new;
326
327	/* If there are software interrupts to process, do it. */
328	if ((ipending & ~IRQ_BITS) & ~new)
329		iq80310_do_pending();
330
331	/*
332	 * If there are pending hardware interrupts (i.e. the
333	 * external interrupt is disabled in the ICU), and all
334	 * hardware interrupts are being unblocked, then re-enable
335	 * the external hardware interrupt.
336	 *
337	 * XXX We have to wait for ALL hardware interrupts to
338	 * XXX be unblocked, because we currently lose if we
339	 * XXX get nested interrupts, and I don't know why yet.
340	 */
341	if ((new & IRQ_BITS) == 0 && (ipending & IRQ_BITS))
342		i80200_intr_enable(INTCTL_IM);
343}
344
345int
346_spllower(int ipl)
347{
348	int old = current_spl_level;
349
350	splx(imask[ipl]);
351	return (old);
352}
353
354void
355_setsoftintr(int si)
356{
357	int oldirqstate;
358
359	oldirqstate = disable_interrupts(I32_bit);
360	ipending |= SI_TO_IRQBIT(si);
361	restore_interrupts(oldirqstate);
362
363	/* Process unmasked pending soft interrupts. */
364	if ((ipending & ~IRQ_BITS) & ~current_spl_level)
365		iq80310_do_pending();
366}
367
368void
369iq80310_intr_init(void)
370{
371	struct intrq *iq;
372	int i;
373
374	/*
375	 * The Secondary PCI interrupts INTA, INTB, and INTC
376	 * area always enabled, since they cannot be masked
377	 * in the CPLD.
378	 */
379	intr_enabled |= IRQ_BITS_ALWAYS_ON;
380
381	for (i = 0; i < NIRQ; i++) {
382		iq = &intrq[i];
383		TAILQ_INIT(&iq->iq_list);
384
385		sprintf(iq->iq_name, "irq %d", i);
386		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
387		    NULL, "iq80310", iq->iq_name);
388	}
389
390	iq80310_intr_calculate_masks();
391
392	/* Enable external interrupts on the i80200. */
393	i80200_extirq_dispatch = iq80310_intr_dispatch;
394	i80200_intr_enable(INTCTL_IM);
395
396	/* Enable IRQs (don't yet use FIQs). */
397	enable_interrupts(I32_bit);
398}
399
400void *
401iq80310_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
402{
403	struct intrq *iq;
404	struct intrhand *ih;
405	u_int oldirqstate;
406
407	if (irq < 0 || irq > NIRQ)
408		panic("iq80310_intr_establish: IRQ %d out of range", irq);
409
410	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
411	if (ih == NULL)
412		return (NULL);
413
414	ih->ih_func = func;
415	ih->ih_arg = arg;
416	ih->ih_ipl = ipl;
417	ih->ih_irq = irq;
418
419	iq = &intrq[irq];
420
421	/* All IQ80310 interrupts are level-triggered. */
422	iq->iq_ist = IST_LEVEL;
423
424	oldirqstate = disable_interrupts(I32_bit);
425
426	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
427
428	iq80310_intr_calculate_masks();
429
430	restore_interrupts(oldirqstate);
431
432	return (ih);
433}
434
435void
436iq80310_intr_disestablish(void *cookie)
437{
438	struct intrhand *ih = cookie;
439	struct intrq *iq = &intrq[ih->ih_irq];
440	int oldirqstate;
441
442	oldirqstate = disable_interrupts(I32_bit);
443
444	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
445
446	iq80310_intr_calculate_masks();
447
448	restore_interrupts(oldirqstate);
449}
450
451void
452iq80310_intr_dispatch(struct clockframe *frame)
453{
454	struct intrq *iq;
455	struct intrhand *ih;
456	int oldirqstate, pcpl, irq, ibit, hwpend;
457
458	/* First, disable external IRQs. */
459	i80200_intr_disable(INTCTL_IM);
460
461	pcpl = current_spl_level;
462
463	for (hwpend = iq80310_intstat_read(); hwpend != 0;) {
464		irq = ffs(hwpend) - 1;
465		ibit = (1U << irq);
466
467		hwpend &= ~ibit;
468
469		if (pcpl & ibit) {
470			/*
471			 * IRQ is masked; mark it as pending and check
472			 * the next one.  Note: external IRQs are already
473			 * disabled.
474			 */
475			ipending |= ibit;
476			continue;
477		}
478
479		ipending &= ~ibit;
480
481		iq = &intrq[irq];
482		iq->iq_ev.ev_count++;
483		uvmexp.intrs++;
484		current_spl_level |= iq->iq_mask;
485		oldirqstate = enable_interrupts(I32_bit);
486		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
487		     ih = TAILQ_NEXT(ih, ih_list)) {
488			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
489		}
490		restore_interrupts(oldirqstate);
491
492		current_spl_level = pcpl;
493	}
494
495	/* Check for pendings soft intrs. */
496	if ((ipending & ~IRQ_BITS) & ~current_spl_level) {
497		oldirqstate = enable_interrupts(I32_bit);
498		iq80310_do_pending();
499		restore_interrupts(oldirqstate);
500	}
501
502	/*
503	 * If no hardware interrupts are masked, re-enable external
504	 * interrupts.
505	 */
506	if ((ipending & IRQ_BITS) == 0)
507		i80200_intr_enable(INTCTL_IM);
508}
509