ep93xx_intr.c revision 1.8
1/* $NetBSD: ep93xx_intr.c,v 1.8 2006/11/24 21:20:05 wiz Exp $ */
2
3/*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jesse Off
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Ichiro FUKUHARA and Naoto Shimazaki.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *        This product includes software developed by the NetBSD
24 *        Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 *    contributors may be used to endorse or promote products derived
27 *    from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.8 2006/11/24 21:20:05 wiz Exp $");
44
45/*
46 * Interrupt support for the Cirrus Logic EP93XX
47 */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/malloc.h>
52#include <sys/termios.h>
53
54#include <uvm/uvm_extern.h>
55
56#include <machine/bus.h>
57#include <machine/intr.h>
58
59#include <arm/cpufunc.h>
60
61#include <arm/ep93xx/ep93xxreg.h>
62#include <arm/ep93xx/ep93xxvar.h>
63
64/* Interrupt handler queues. */
65struct intrq intrq[NIRQ];
66
67/* Interrupts to mask at each level. */
68static u_int32_t vic1_imask[NIPL];
69static u_int32_t vic2_imask[NIPL];
70
71/* Current interrupt priority level. */
72volatile int current_spl_level;
73volatile int hardware_spl_level;
74
75/* Software copy of the IRQs we have enabled. */
76volatile u_int32_t vic1_intr_enabled;
77volatile u_int32_t vic2_intr_enabled;
78
79/* Interrupts pending. */
80static volatile int ipending;
81
82/*
83 * Map a software interrupt queue index (to the unused bits in the
84 * VIC1 register -- XXX will need to revisit this if those bits are
85 * ever used in future steppings).
86 */
87static const u_int32_t si_to_irqbit[SI_NQUEUES] = {
88	EP93XX_INTR_bit30,		/* SI_SOFT */
89	EP93XX_INTR_bit29,		/* SI_SOFTCLOCK */
90	EP93XX_INTR_bit28,		/* SI_SOFTNET */
91	EP93XX_INTR_bit27,		/* SI_SOFTSERIAL */
92};
93
94#define	INT_SWMASK							\
95	((1U << EP93XX_INTR_bit30) | (1U << EP93XX_INTR_bit29) |	\
96	 (1U << EP93XX_INTR_bit28) | (1U << EP93XX_INTR_bit27))
97
98#define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
99
100/*
101 * Map a software interrupt queue to an interrupt priority level.
102 */
103static const int si_to_ipl[SI_NQUEUES] = {
104	IPL_SOFT,		/* SI_SOFT */
105	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
106	IPL_SOFTNET,		/* SI_SOFTNET */
107	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
108};
109
110void	ep93xx_intr_dispatch(struct irqframe *frame);
111
112#define VIC1REG(reg)	*((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
113	EP93XX_AHB_VIC1 + (reg)))
114#define VIC2REG(reg)	*((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
115	EP93XX_AHB_VIC2 + (reg)))
116
117static void
118ep93xx_set_intrmask(u_int32_t vic1_irqs, u_int32_t vic2_irqs)
119{
120	VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
121	VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
122	VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
123	VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
124}
125
126static void
127ep93xx_enable_irq(int irq)
128{
129	if (irq < VIC_NIRQ) {
130		vic1_intr_enabled |= (1U << irq);
131		VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
132	} else {
133		vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
134		VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
135	}
136}
137
138static inline void
139ep93xx_disable_irq(int irq)
140{
141	if (irq < VIC_NIRQ) {
142		vic1_intr_enabled &= ~(1U << irq);
143		VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
144	} else {
145		vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
146		VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
147	}
148}
149
150/*
151 * NOTE: This routine must be called with interrupts disabled in the CPSR.
152 */
153static void
154ep93xx_intr_calculate_masks(void)
155{
156	struct intrq *iq;
157	struct intrhand *ih;
158	int irq, ipl;
159
160	/* First, figure out which IPLs each IRQ has. */
161	for (irq = 0; irq < NIRQ; irq++) {
162		int levels = 0;
163		iq = &intrq[irq];
164		ep93xx_disable_irq(irq);
165		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
166		     ih = TAILQ_NEXT(ih, ih_list))
167			levels |= (1U << ih->ih_ipl);
168		iq->iq_levels = levels;
169	}
170
171	/* Next, figure out which IRQs are used by each IPL. */
172	for (ipl = 0; ipl < NIPL; ipl++) {
173		int vic1_irqs = 0;
174		int vic2_irqs = 0;
175		for (irq = 0; irq < VIC_NIRQ; irq++) {
176			if (intrq[irq].iq_levels & (1U << ipl))
177				vic1_irqs |= (1U << irq);
178		}
179		vic1_imask[ipl] = vic1_irqs;
180		for (irq = 0; irq < VIC_NIRQ; irq++) {
181			if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
182				vic2_irqs |= (1U << irq);
183		}
184		vic2_imask[ipl] = vic2_irqs;
185	}
186
187	vic1_imask[IPL_NONE] = 0;
188	vic2_imask[IPL_NONE] = 0;
189
190	/*
191	 * Initialize the soft interrupt masks to block themselves.
192	 */
193	vic1_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
194	vic1_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
195	vic1_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
196	vic1_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
197
198	/*
199	 * splsoftclock() is the only interface that users of the
200	 * generic software interrupt facility have to block their
201	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
202	 */
203	vic1_imask[IPL_SOFTCLOCK] |= vic1_imask[IPL_SOFT];
204	vic2_imask[IPL_SOFTCLOCK] |= vic2_imask[IPL_SOFT];
205
206	/*
207	 * splsoftnet() must also block splsoftclock(), since we don't
208	 * want timer-driven network events to occur while we're
209	 * processing incoming packets.
210	 */
211	vic1_imask[IPL_SOFTNET] |= vic1_imask[IPL_SOFTCLOCK];
212	vic2_imask[IPL_SOFTNET] |= vic2_imask[IPL_SOFTCLOCK];
213
214	/*
215	 * Enforce a hierarchy that gives "slow" device (or devices with
216	 * limited input buffer space/"real-time" requirements) a better
217	 * chance at not dropping data.
218	 */
219	vic1_imask[IPL_BIO] |= vic1_imask[IPL_SOFTNET];
220	vic2_imask[IPL_BIO] |= vic2_imask[IPL_SOFTNET];
221	vic1_imask[IPL_NET] |= vic1_imask[IPL_BIO];
222	vic2_imask[IPL_NET] |= vic2_imask[IPL_BIO];
223	vic1_imask[IPL_SOFTSERIAL] |= vic1_imask[IPL_NET];
224	vic2_imask[IPL_SOFTSERIAL] |= vic2_imask[IPL_NET];
225	vic1_imask[IPL_TTY] |= vic1_imask[IPL_SOFTSERIAL];
226	vic2_imask[IPL_TTY] |= vic2_imask[IPL_SOFTSERIAL];
227
228	/*
229	 * splvm() blocks all interrupts that use the kernel memory
230	 * allocation facilities.
231	 */
232	vic1_imask[IPL_VM] |= vic1_imask[IPL_TTY];
233	vic2_imask[IPL_VM] |= vic2_imask[IPL_TTY];
234
235	/*
236	 * Audio devices are not allowed to perform memory allocation
237	 * in their interrupt routines, and they have fairly "real-time"
238	 * requirements, so give them a high interrupt priority.
239	 */
240	vic1_imask[IPL_AUDIO] |= vic1_imask[IPL_VM];
241	vic2_imask[IPL_AUDIO] |= vic2_imask[IPL_VM];
242
243	/*
244	 * splclock() must block anything that uses the scheduler.
245	 */
246	vic1_imask[IPL_CLOCK] |= vic1_imask[IPL_AUDIO];
247	vic2_imask[IPL_CLOCK] |= vic2_imask[IPL_AUDIO];
248
249	/*
250	 * No separate statclock on the EP93xx.
251	 */
252	vic1_imask[IPL_STATCLOCK] |= vic1_imask[IPL_CLOCK];
253	vic2_imask[IPL_STATCLOCK] |= vic2_imask[IPL_CLOCK];
254
255	/*
256	 * serial uarts have small buffers that need low-latency servicing
257	 */
258	vic1_imask[IPL_SERIAL] |= vic1_imask[IPL_STATCLOCK];
259	vic2_imask[IPL_SERIAL] |= vic2_imask[IPL_STATCLOCK];
260
261	/*
262	 * splhigh() must block "everything".
263	 */
264	vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SERIAL];
265	vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SERIAL];
266
267	/*
268	 * Now compute which IRQs must be blocked when servicing any
269	 * given IRQ.
270	 */
271	for (irq = 0; irq < NIRQ; irq++) {
272		int	vic1_irqs;
273		int	vic2_irqs;
274
275		if (irq < VIC_NIRQ) {
276			vic1_irqs = (1U << irq);
277			vic2_irqs = 0;
278		} else {
279			vic1_irqs = 0;
280			vic2_irqs = (1U << (irq - VIC_NIRQ));
281		}
282		iq = &intrq[irq];
283		if (TAILQ_FIRST(&iq->iq_list) != NULL)
284			ep93xx_enable_irq(irq);
285		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
286		     ih = TAILQ_NEXT(ih, ih_list)) {
287			vic1_irqs |= vic1_imask[ih->ih_ipl];
288			vic2_irqs |= vic2_imask[ih->ih_ipl];
289		}
290		iq->iq_vic1_mask = vic1_irqs;
291		iq->iq_vic2_mask = vic2_irqs;
292	}
293}
294
295static void
296ep93xx_do_pending(void)
297{
298	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
299	int	new;
300	u_int	oldirqstate, oldirqstate2;
301
302	if (__cpu_simple_lock_try(&processing) == 0)
303		return;
304
305	new = current_spl_level;
306
307	oldirqstate = disable_interrupts(I32_bit);
308
309#define	DO_SOFTINT(si)							\
310	if ((ipending & ~vic1_imask[new]) & SI_TO_IRQBIT(si)) {		\
311		ipending &= ~SI_TO_IRQBIT(si);				\
312		current_spl_level = si_to_ipl[(si)];			\
313		oldirqstate2 = enable_interrupts(I32_bit);		\
314		softintr_dispatch(si);					\
315		restore_interrupts(oldirqstate2);			\
316		current_spl_level = new;				\
317	}
318
319	DO_SOFTINT(SI_SOFTSERIAL);
320	DO_SOFTINT(SI_SOFTNET);
321	DO_SOFTINT(SI_SOFTCLOCK);
322	DO_SOFTINT(SI_SOFT);
323
324	__cpu_simple_unlock(&processing);
325
326	restore_interrupts(oldirqstate);
327}
328
329inline void
330splx(int new)
331{
332	int	old;
333	u_int	oldirqstate;
334
335	oldirqstate = disable_interrupts(I32_bit);
336	old = current_spl_level;
337	current_spl_level = new;
338	if (new != hardware_spl_level) {
339		hardware_spl_level = new;
340		ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
341	}
342	restore_interrupts(oldirqstate);
343
344	/* If there are software interrupts to process, do it. */
345	if ((ipending & INT_SWMASK) & ~vic1_imask[new])
346		ep93xx_do_pending();
347}
348
349int
350_splraise(int ipl)
351{
352	int	old;
353	u_int	oldirqstate;
354
355	oldirqstate = disable_interrupts(I32_bit);
356	old = current_spl_level;
357	current_spl_level = ipl;
358	restore_interrupts(oldirqstate);
359	return (old);
360}
361
362int
363_spllower(int ipl)
364{
365	int	old = current_spl_level;
366
367	if (old <= ipl)
368		return (old);
369	splx(ipl);
370	return (old);
371}
372
373void
374_setsoftintr(int si)
375{
376	u_int	oldirqstate;
377
378	oldirqstate = disable_interrupts(I32_bit);
379	ipending |= SI_TO_IRQBIT(si);
380	restore_interrupts(oldirqstate);
381
382	/* Process unmasked pending soft interrupts. */
383	if ((ipending & INT_SWMASK) & ~vic1_imask[current_spl_level])
384		ep93xx_do_pending();
385}
386
387/*
388 * ep93xx_intr_init:
389 *
390 *	Initialize the rest of the interrupt subsystem, making it
391 *	ready to handle interrupts from devices.
392 */
393void
394ep93xx_intr_init(void)
395{
396	struct intrq *iq;
397	int i;
398
399	vic1_intr_enabled = 0;
400	vic2_intr_enabled = 0;
401
402	for (i = 0; i < NIRQ; i++) {
403		iq = &intrq[i];
404		TAILQ_INIT(&iq->iq_list);
405
406		sprintf(iq->iq_name, "irq %d", i);
407		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
408				     NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
409		                     iq->iq_name);
410	}
411	current_intr_depth = 0;
412	current_spl_level = 0;
413	hardware_spl_level = 0;
414
415	/* All interrupts should use IRQ not FIQ */
416	VIC1REG(EP93XX_VIC_IntSelect) = 0;
417	VIC2REG(EP93XX_VIC_IntSelect) = 0;
418
419	ep93xx_intr_calculate_masks();
420
421	/* Enable IRQs (don't yet use FIQs). */
422	enable_interrupts(I32_bit);
423}
424
425void *
426ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
427{
428	struct intrq*		iq;
429	struct intrhand*	ih;
430	u_int			oldirqstate;
431
432	if (irq < 0 || irq > NIRQ)
433		panic("ep93xx_intr_establish: IRQ %d out of range", irq);
434	if (ipl < 0 || ipl > NIPL)
435		panic("ep93xx_intr_establish: IPL %d out of range", ipl);
436
437	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
438	if (ih == NULL)
439		return (NULL);
440
441	ih->ih_func = ih_func;
442	ih->ih_arg = arg;
443	ih->ih_irq = irq;
444	ih->ih_ipl = ipl;
445
446	iq = &intrq[irq];
447
448	oldirqstate = disable_interrupts(I32_bit);
449	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
450	ep93xx_intr_calculate_masks();
451	restore_interrupts(oldirqstate);
452
453	return (ih);
454}
455
456void
457ep93xx_intr_disestablish(void *cookie)
458{
459	struct intrhand*	ih = cookie;
460	struct intrq*		iq = &intrq[ih->ih_irq];
461	u_int			oldirqstate;
462
463	oldirqstate = disable_interrupts(I32_bit);
464	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
465	ep93xx_intr_calculate_masks();
466	restore_interrupts(oldirqstate);
467}
468
469void
470ep93xx_intr_dispatch(struct irqframe *frame)
471{
472	struct intrq*		iq;
473	struct intrhand*	ih;
474	u_int			oldirqstate;
475	int			pcpl;
476	u_int32_t		vic1_hwpend;
477	u_int32_t		vic2_hwpend;
478	int			irq;
479
480	pcpl = current_spl_level;
481
482	vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
483	vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
484
485	hardware_spl_level = pcpl;
486	ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
487			     vic2_imask[pcpl] | vic2_hwpend);
488
489	vic1_hwpend &= ~vic1_imask[pcpl];
490	vic2_hwpend &= ~vic2_imask[pcpl];
491
492	if (vic1_hwpend) {
493		irq = ffs(vic1_hwpend) - 1;
494
495		iq = &intrq[irq];
496		iq->iq_ev.ev_count++;
497		uvmexp.intrs++;
498		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
499		     ih = TAILQ_NEXT(ih, ih_list)) {
500			current_spl_level = ih->ih_ipl;
501			oldirqstate = enable_interrupts(I32_bit);
502			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
503			restore_interrupts(oldirqstate);
504		}
505	} else if (vic2_hwpend) {
506		irq = ffs(vic2_hwpend) - 1;
507
508		iq = &intrq[irq + VIC_NIRQ];
509		iq->iq_ev.ev_count++;
510		uvmexp.intrs++;
511		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
512		     ih = TAILQ_NEXT(ih, ih_list)) {
513			current_spl_level = ih->ih_ipl;
514			oldirqstate = enable_interrupts(I32_bit);
515			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
516			restore_interrupts(oldirqstate);
517		}
518	}
519
520	current_spl_level = pcpl;
521	hardware_spl_level = pcpl;
522	ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
523
524	/* Check for pendings soft intrs. */
525	if ((ipending & INT_SWMASK) & ~vic1_imask[pcpl]) {
526		ep93xx_do_pending();
527	}
528}
529