i80321_icu.c revision 1.7
1/*	$NetBSD: i80321_icu.c,v 1.7 2003/06/16 20:00:58 thorpej Exp $	*/
2
3/*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#ifndef EVBARM_SPL_NOINLINE
39#define	EVBARM_SPL_NOINLINE
40#endif
41
42/*
43 * Interrupt support for the Intel i80321 I/O Processor.
44 */
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/malloc.h>
49
50#include <uvm/uvm_extern.h>
51
52#include <machine/bus.h>
53#include <machine/intr.h>
54
55#include <arm/cpufunc.h>
56
57#include <arm/xscale/i80321reg.h>
58#include <arm/xscale/i80321var.h>
59
60/* Interrupt handler queues. */
61struct intrq intrq[NIRQ];
62
63/* Interrupts to mask at each level. */
64int i80321_imask[NIPL];
65
66/* Current interrupt priority level. */
67__volatile int current_spl_level;
68
69/* Interrupts pending. */
70__volatile int i80321_ipending;
71
72/* Software copy of the IRQs we have enabled. */
73__volatile uint32_t intr_enabled;
74
75/* Mask if interrupts steered to FIQs. */
76uint32_t intr_steer;
77
78/*
79 * Map a software interrupt queue index (to the unused bits in the
80 * ICU registers -- XXX will need to revisit this if those bits are
81 * ever used in future steppings).
82 */
83static const uint32_t si_to_irqbit[SI_NQUEUES] = {
84	ICU_INT_bit26,		/* SI_SOFT */
85	ICU_INT_bit22,		/* SI_SOFTCLOCK */
86	ICU_INT_bit5,		/* SI_SOFTNET */
87	ICU_INT_bit4,		/* SI_SOFTSERIAL */
88};
89
90#define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
91
92/*
93 * Map a software interrupt queue to an interrupt priority level.
94 */
95static const int si_to_ipl[SI_NQUEUES] = {
96	IPL_SOFT,		/* SI_SOFT */
97	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
98	IPL_SOFTNET,		/* SI_SOFTNET */
99	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
100};
101
102/*
103 * Interrupt bit names.
104 */
105const char *i80321_irqnames[] = {
106	"DMA0 EOT",
107	"DMA0 EOC",
108	"DMA1 EOT",
109	"DMA1 EOC",
110	"irq 4",
111	"irq 5",
112	"AAU EOT",
113	"AAU EOC",
114	"core PMU",
115	"TMR0 (hardclock)",
116	"TMR1",
117	"I2C0",
118	"I2C1",
119	"MU",
120	"BIST",
121	"periph PMU",
122	"XScale PMU",
123	"BIU error",
124	"ATU error",
125	"MCU error",
126	"DMA0 error",
127	"DMA1 error",
128	"irq 22",
129	"AAU error",
130	"MU error",
131	"SSP",
132	"irq 26",
133	"irq 27",
134	"irq 28",
135	"irq 29",
136	"irq 30",
137	"irq 31",
138};
139
140void	i80321_intr_dispatch(struct clockframe *frame);
141
142static __inline uint32_t
143i80321_iintsrc_read(void)
144{
145	uint32_t iintsrc;
146
147	__asm __volatile("mrc p6, 0, %0, c8, c0, 0"
148		: "=r" (iintsrc));
149
150	/*
151	 * The IINTSRC register shows bits that are active even
152	 * if they are masked in INTCTL, so we have to mask them
153	 * off with the interrupts we consider enabled.
154	 */
155	return (iintsrc & intr_enabled);
156}
157
158static __inline void
159i80321_set_intrsteer(void)
160{
161
162	__asm __volatile("mcr p6, 0, %0, c4, c0, 0"
163		:
164		: "r" (intr_steer & ICU_INT_HWMASK));
165}
166
167static __inline void
168i80321_enable_irq(int irq)
169{
170
171	intr_enabled |= (1U << irq);
172	i80321_set_intrmask();
173}
174
175static __inline void
176i80321_disable_irq(int irq)
177{
178
179	intr_enabled &= ~(1U << irq);
180	i80321_set_intrmask();
181}
182
183/*
184 * NOTE: This routine must be called with interrupts disabled in the CPSR.
185 */
186static void
187i80321_intr_calculate_masks(void)
188{
189	struct intrq *iq;
190	struct intrhand *ih;
191	int irq, ipl;
192
193	/* First, figure out which IPLs each IRQ has. */
194	for (irq = 0; irq < NIRQ; irq++) {
195		int levels = 0;
196		iq = &intrq[irq];
197		i80321_disable_irq(irq);
198		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
199		     ih = TAILQ_NEXT(ih, ih_list))
200			levels |= (1U << ih->ih_ipl);
201		iq->iq_levels = levels;
202	}
203
204	/* Next, figure out which IRQs are used by each IPL. */
205	for (ipl = 0; ipl < NIPL; ipl++) {
206		int irqs = 0;
207		for (irq = 0; irq < NIRQ; irq++) {
208			if (intrq[irq].iq_levels & (1U << ipl))
209				irqs |= (1U << irq);
210		}
211		i80321_imask[ipl] = irqs;
212	}
213
214	i80321_imask[IPL_NONE] = 0;
215
216	/*
217	 * Initialize the soft interrupt masks to block themselves.
218	 */
219	i80321_imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
220	i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
221	i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
222	i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
223
224	/*
225	 * splsoftclock() is the only interface that users of the
226	 * generic software interrupt facility have to block their
227	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
228	 */
229	i80321_imask[IPL_SOFTCLOCK] |= i80321_imask[IPL_SOFT];
230
231	/*
232	 * splsoftnet() must also block splsoftclock(), since we don't
233	 * want timer-driven network events to occur while we're
234	 * processing incoming packets.
235	 */
236	i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTCLOCK];
237
238	/*
239	 * Enforce a heirarchy that gives "slow" device (or devices with
240	 * limited input buffer space/"real-time" requirements) a better
241	 * chance at not dropping data.
242	 */
243	i80321_imask[IPL_BIO] |= i80321_imask[IPL_SOFTNET];
244	i80321_imask[IPL_NET] |= i80321_imask[IPL_BIO];
245	i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_NET];
246	i80321_imask[IPL_TTY] |= i80321_imask[IPL_SOFTSERIAL];
247
248	/*
249	 * splvm() blocks all interrupts that use the kernel memory
250	 * allocation facilities.
251	 */
252	i80321_imask[IPL_VM] |= i80321_imask[IPL_TTY];
253
254	/*
255	 * Audio devices are not allowed to perform memory allocation
256	 * in their interrupt routines, and they have fairly "real-time"
257	 * requirements, so give them a high interrupt priority.
258	 */
259	i80321_imask[IPL_AUDIO] |= i80321_imask[IPL_VM];
260
261	/*
262	 * splclock() must block anything that uses the scheduler.
263	 */
264	i80321_imask[IPL_CLOCK] |= i80321_imask[IPL_AUDIO];
265
266	/*
267	 * No separate statclock on the IQ80310.
268	 */
269	i80321_imask[IPL_STATCLOCK] |= i80321_imask[IPL_CLOCK];
270
271	/*
272	 * splhigh() must block "everything".
273	 */
274	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_STATCLOCK];
275
276	/*
277	 * XXX We need serial drivers to run at the absolute highest priority
278	 * in order to avoid overruns, so serial > high.
279	 */
280	i80321_imask[IPL_SERIAL] |= i80321_imask[IPL_HIGH];
281
282	/*
283	 * Now compute which IRQs must be blocked when servicing any
284	 * given IRQ.
285	 */
286	for (irq = 0; irq < NIRQ; irq++) {
287		int irqs = (1U << irq);
288		iq = &intrq[irq];
289		if (TAILQ_FIRST(&iq->iq_list) != NULL)
290			i80321_enable_irq(irq);
291		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
292		     ih = TAILQ_NEXT(ih, ih_list))
293			irqs |= i80321_imask[ih->ih_ipl];
294		iq->iq_mask = irqs;
295	}
296}
297
298__inline void
299i80321_do_pending(void)
300{
301	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
302	int new, oldirqstate;
303
304	if (__cpu_simple_lock_try(&processing) == 0)
305		return;
306
307	new = current_spl_level;
308
309	oldirqstate = disable_interrupts(I32_bit);
310
311#define	DO_SOFTINT(si)							\
312	if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
313		i80321_ipending &= ~SI_TO_IRQBIT(si);			\
314		current_spl_level |= i80321_imask[si_to_ipl[(si)]];	\
315		restore_interrupts(oldirqstate);			\
316		softintr_dispatch(si);					\
317		oldirqstate = disable_interrupts(I32_bit);		\
318		current_spl_level = new;				\
319	}
320
321	DO_SOFTINT(SI_SOFTSERIAL);
322	DO_SOFTINT(SI_SOFTNET);
323	DO_SOFTINT(SI_SOFTCLOCK);
324	DO_SOFTINT(SI_SOFT);
325
326	__cpu_simple_unlock(&processing);
327
328	restore_interrupts(oldirqstate);
329}
330
331void
332splx(int new)
333{
334
335	i80321_splx(new);
336}
337
338int
339_spllower(int ipl)
340{
341
342	return (i80321_spllower(ipl));
343}
344
345int
346_splraise(int ipl)
347{
348
349	return (i80321_splraise(ipl));
350}
351
352void
353_setsoftintr(int si)
354{
355	int oldirqstate;
356
357	oldirqstate = disable_interrupts(I32_bit);
358	i80321_ipending |= SI_TO_IRQBIT(si);
359	restore_interrupts(oldirqstate);
360
361	/* Process unmasked pending soft interrupts. */
362	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level)
363		i80321_do_pending();
364}
365
366/*
367 * i80321_icu_init:
368 *
369 *	Initialize the i80321 ICU.  Called early in bootstrap
370 *	to make sure the ICU is in a pristine state.
371 */
372void
373i80321_icu_init(void)
374{
375
376	intr_enabled = 0;	/* All interrupts disabled */
377	i80321_set_intrmask();
378
379	intr_steer = 0;		/* All interrupts steered to IRQ */
380	i80321_set_intrsteer();
381}
382
383/*
384 * i80321_intr_init:
385 *
386 *	Initialize the rest of the interrupt subsystem, making it
387 *	ready to handle interrupts from devices.
388 */
389void
390i80321_intr_init(void)
391{
392	struct intrq *iq;
393	int i;
394
395	intr_enabled = 0;
396
397	for (i = 0; i < NIRQ; i++) {
398		iq = &intrq[i];
399		TAILQ_INIT(&iq->iq_list);
400
401		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
402		    NULL, "iop321", i80321_irqnames[i]);
403	}
404
405	i80321_intr_calculate_masks();
406
407	/* Enable IRQs (don't yet use FIQs). */
408	enable_interrupts(I32_bit);
409}
410
411void *
412i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
413{
414	struct intrq *iq;
415	struct intrhand *ih;
416	u_int oldirqstate;
417
418	if (irq < 0 || irq > NIRQ)
419		panic("i80321_intr_establish: IRQ %d out of range", irq);
420
421	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
422	if (ih == NULL)
423		return (NULL);
424
425	ih->ih_func = func;
426	ih->ih_arg = arg;
427	ih->ih_ipl = ipl;
428	ih->ih_irq = irq;
429
430	iq = &intrq[irq];
431
432	/* All IOP321 interrupts are level-triggered. */
433	iq->iq_ist = IST_LEVEL;
434
435	oldirqstate = disable_interrupts(I32_bit);
436
437	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
438
439	i80321_intr_calculate_masks();
440
441	restore_interrupts(oldirqstate);
442
443	return (ih);
444}
445
446void
447i80321_intr_disestablish(void *cookie)
448{
449	struct intrhand *ih = cookie;
450	struct intrq *iq = &intrq[ih->ih_irq];
451	int oldirqstate;
452
453	oldirqstate = disable_interrupts(I32_bit);
454
455	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
456
457	i80321_intr_calculate_masks();
458
459	restore_interrupts(oldirqstate);
460}
461
462void
463i80321_intr_dispatch(struct clockframe *frame)
464{
465	struct intrq *iq;
466	struct intrhand *ih;
467	int oldirqstate, pcpl, irq, ibit, hwpend;
468
469	pcpl = current_spl_level;
470
471	hwpend = i80321_iintsrc_read();
472
473	/*
474	 * Disable all the interrupts that are pending.  We will
475	 * reenable them once they are processed and not masked.
476	 */
477	intr_enabled &= ~hwpend;
478	i80321_set_intrmask();
479
480	while (hwpend != 0) {
481		irq = ffs(hwpend) - 1;
482		ibit = (1U << irq);
483
484		hwpend &= ~ibit;
485
486		if (pcpl & ibit) {
487			/*
488			 * IRQ is masked; mark it as pending and check
489			 * the next one.  Note: the IRQ is already disabled.
490			 */
491			i80321_ipending |= ibit;
492			continue;
493		}
494
495		i80321_ipending &= ~ibit;
496
497		iq = &intrq[irq];
498		iq->iq_ev.ev_count++;
499		uvmexp.intrs++;
500		current_spl_level |= iq->iq_mask;
501		oldirqstate = enable_interrupts(I32_bit);
502		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
503		     ih = TAILQ_NEXT(ih, ih_list)) {
504			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
505		}
506		restore_interrupts(oldirqstate);
507
508		current_spl_level = pcpl;
509
510		/* Re-enable this interrupt now that's it's cleared. */
511		intr_enabled |= ibit;
512		i80321_set_intrmask();
513	}
514
515	/* Check for pendings soft intrs. */
516	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) {
517		oldirqstate = enable_interrupts(I32_bit);
518		i80321_do_pending();
519		restore_interrupts(oldirqstate);
520	}
521}
522