i80321_icu.c revision 1.15
1/*	$NetBSD: i80321_icu.c,v 1.15 2007/12/03 15:33:20 ad Exp $	*/
2
3/*
4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.15 2007/12/03 15:33:20 ad Exp $");
40
41#ifndef EVBARM_SPL_NOINLINE
42#define	EVBARM_SPL_NOINLINE
43#endif
44
45/*
46 * Interrupt support for the Intel i80321 I/O Processor.
47 */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/malloc.h>
52
53#include <uvm/uvm_extern.h>
54
55#include <machine/bus.h>
56#include <machine/intr.h>
57
58#include <arm/cpufunc.h>
59
60#include <arm/xscale/i80321reg.h>
61#include <arm/xscale/i80321var.h>
62
63/* Interrupt handler queues. */
64struct intrq intrq[NIRQ];
65
66/* Interrupts to mask at each level. */
67int i80321_imask[NIPL];
68
69/* Current interrupt priority level. */
70volatile int current_spl_level;
71
72/* Interrupts pending. */
73volatile int i80321_ipending;
74
75/* Software copy of the IRQs we have enabled. */
76volatile uint32_t intr_enabled;
77
78/* Mask if interrupts steered to FIQs. */
79uint32_t intr_steer;
80
81/*
82 * Map a software interrupt queue index (to the unused bits in the
83 * ICU registers -- XXX will need to revisit this if those bits are
84 * ever used in future steppings).
85 */
86static const uint32_t si_to_irqbit[4] = {
87	ICU_INT_bit26,		/* SI_SOFTCLOCK */
88	ICU_INT_bit22,		/* SI_SOFTBIO */
89	ICU_INT_bit5,		/* SI_SOFTNET */
90	ICU_INT_bit4,		/* SI_SOFTSERIAL */
91};
92
93#define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
94
95/*
96 * Map a software interrupt queue to an interrupt priority level.
97 */
98static const int si_to_ipl[4] = {
99	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
100	IPL_SOFTBIO,		/* SI_SOFTBIO */
101	IPL_SOFTNET,		/* SI_SOFTNET */
102	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
103};
104
105/*
106 * Interrupt bit names.
107 */
108const char *i80321_irqnames[] = {
109	"DMA0 EOT",
110	"DMA0 EOC",
111	"DMA1 EOT",
112	"DMA1 EOC",
113	"irq 4",
114	"irq 5",
115	"AAU EOT",
116	"AAU EOC",
117	"core PMU",
118	"TMR0 (hardclock)",
119	"TMR1",
120	"I2C0",
121	"I2C1",
122	"MU",
123	"BIST",
124	"periph PMU",
125	"XScale PMU",
126	"BIU error",
127	"ATU error",
128	"MCU error",
129	"DMA0 error",
130	"DMA1 error",
131	"irq 22",
132	"AAU error",
133	"MU error",
134	"SSP",
135	"irq 26",
136	"irq 27",
137	"irq 28",
138	"irq 29",
139	"irq 30",
140	"irq 31",
141};
142
143void	i80321_intr_dispatch(struct clockframe *frame);
144
145static inline uint32_t
146i80321_iintsrc_read(void)
147{
148	uint32_t iintsrc;
149
150	__asm volatile("mrc p6, 0, %0, c8, c0, 0"
151		: "=r" (iintsrc));
152
153	/*
154	 * The IINTSRC register shows bits that are active even
155	 * if they are masked in INTCTL, so we have to mask them
156	 * off with the interrupts we consider enabled.
157	 */
158	return (iintsrc & intr_enabled);
159}
160
161static inline void
162i80321_set_intrsteer(void)
163{
164
165	__asm volatile("mcr p6, 0, %0, c4, c0, 0"
166		:
167		: "r" (intr_steer & ICU_INT_HWMASK));
168}
169
170static inline void
171i80321_enable_irq(int irq)
172{
173
174	intr_enabled |= (1U << irq);
175	i80321_set_intrmask();
176}
177
178static inline void
179i80321_disable_irq(int irq)
180{
181
182	intr_enabled &= ~(1U << irq);
183	i80321_set_intrmask();
184}
185
186/*
187 * NOTE: This routine must be called with interrupts disabled in the CPSR.
188 */
189static void
190i80321_intr_calculate_masks(void)
191{
192	struct intrq *iq;
193	struct intrhand *ih;
194	int irq, ipl;
195
196	/* First, figure out which IPLs each IRQ has. */
197	for (irq = 0; irq < NIRQ; irq++) {
198		int levels = 0;
199		iq = &intrq[irq];
200		i80321_disable_irq(irq);
201		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
202		     ih = TAILQ_NEXT(ih, ih_list))
203			levels |= (1U << ih->ih_ipl);
204		iq->iq_levels = levels;
205	}
206
207	/* Next, figure out which IRQs are used by each IPL. */
208	for (ipl = 0; ipl < NIPL; ipl++) {
209		int irqs = 0;
210		for (irq = 0; irq < NIRQ; irq++) {
211			if (intrq[irq].iq_levels & (1U << ipl))
212				irqs |= (1U << irq);
213		}
214		i80321_imask[ipl] = irqs;
215	}
216
217	i80321_imask[IPL_NONE] = 0;
218
219	/*
220	 * Enforce a hierarchy that gives "slow" device (or devices with
221	 * limited input buffer space/"real-time" requirements) a better
222	 * chance at not dropping data.
223	 */
224	i80321_imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
225	i80321_imask[IPL_SOFTBIO] = SI_TO_IRQBIT(SI_SOFTBIO);
226	i80321_imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
227	i80321_imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
228
229	i80321_imask[IPL_SOFTBIO] |= i80321_imask[IPL_SOFTCLOCK];
230	i80321_imask[IPL_SOFTNET] |= i80321_imask[IPL_SOFTBIO];
231	i80321_imask[IPL_SOFTSERIAL] |= i80321_imask[IPL_SOFTNET];
232	i80321_imask[IPL_VM] |= i80321_imask[IPL_SOFTSERIAL];
233	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
234
235	/*
236	 * Now compute which IRQs must be blocked when servicing any
237	 * given IRQ.
238	 */
239	for (irq = 0; irq < NIRQ; irq++) {
240		int irqs = (1U << irq);
241		iq = &intrq[irq];
242		if (TAILQ_FIRST(&iq->iq_list) != NULL)
243			i80321_enable_irq(irq);
244		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
245		     ih = TAILQ_NEXT(ih, ih_list))
246			irqs |= i80321_imask[ih->ih_ipl];
247		iq->iq_mask = irqs;
248	}
249}
250
251void
252i80321_do_pending(void)
253{
254#ifdef __HAVE_FAST_SOFTINTS
255	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
256	int new, oldirqstate;
257
258	if (__cpu_simple_lock_try(&processing) == 0)
259		return;
260
261	new = current_spl_level;
262
263	oldirqstate = disable_interrupts(I32_bit);
264
265#define	DO_SOFTINT(si)							\
266	if ((i80321_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
267		i80321_ipending &= ~SI_TO_IRQBIT(si);			\
268		current_spl_level |= i80321_imask[si_to_ipl[(si)]];	\
269		restore_interrupts(oldirqstate);			\
270		softintr_dispatch(si);					\
271		oldirqstate = disable_interrupts(I32_bit);		\
272		current_spl_level = new;				\
273	}
274
275	DO_SOFTINT(SI_SOFTSERIAL);
276	DO_SOFTINT(SI_SOFTNET);
277	DO_SOFTINT(SI_SOFTCLOCK);
278	DO_SOFTINT(SI_SOFT);
279
280	__cpu_simple_unlock(&processing);
281
282	restore_interrupts(oldirqstate);
283#endif
284}
285
286void
287splx(int new)
288{
289
290	i80321_splx(new);
291}
292
293int
294_spllower(int ipl)
295{
296
297	return (i80321_spllower(ipl));
298}
299
300int
301_splraise(int ipl)
302{
303
304	return (i80321_splraise(ipl));
305}
306
307void
308_setsoftintr(int si)
309{
310	int oldirqstate;
311
312	oldirqstate = disable_interrupts(I32_bit);
313	i80321_ipending |= SI_TO_IRQBIT(si);
314	restore_interrupts(oldirqstate);
315
316	/* Process unmasked pending soft interrupts. */
317	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level)
318		i80321_do_pending();
319}
320
321/*
322 * i80321_icu_init:
323 *
324 *	Initialize the i80321 ICU.  Called early in bootstrap
325 *	to make sure the ICU is in a pristine state.
326 */
327void
328i80321_icu_init(void)
329{
330
331	intr_enabled = 0;	/* All interrupts disabled */
332	i80321_set_intrmask();
333
334	intr_steer = 0;		/* All interrupts steered to IRQ */
335	i80321_set_intrsteer();
336}
337
338/*
339 * i80321_intr_init:
340 *
341 *	Initialize the rest of the interrupt subsystem, making it
342 *	ready to handle interrupts from devices.
343 */
344void
345i80321_intr_init(void)
346{
347	struct intrq *iq;
348	int i;
349
350	intr_enabled = 0;
351
352	for (i = 0; i < NIRQ; i++) {
353		iq = &intrq[i];
354		TAILQ_INIT(&iq->iq_list);
355
356		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
357		    NULL, "iop321", i80321_irqnames[i]);
358	}
359
360	i80321_intr_calculate_masks();
361
362	/* Enable IRQs (don't yet use FIQs). */
363	enable_interrupts(I32_bit);
364}
365
366void *
367i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
368{
369	struct intrq *iq;
370	struct intrhand *ih;
371	u_int oldirqstate;
372
373	if (irq < 0 || irq > NIRQ)
374		panic("i80321_intr_establish: IRQ %d out of range", irq);
375
376	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
377	if (ih == NULL)
378		return (NULL);
379
380	ih->ih_func = func;
381	ih->ih_arg = arg;
382	ih->ih_ipl = ipl;
383	ih->ih_irq = irq;
384
385	iq = &intrq[irq];
386
387	/* All IOP321 interrupts are level-triggered. */
388	iq->iq_ist = IST_LEVEL;
389
390	oldirqstate = disable_interrupts(I32_bit);
391
392	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
393
394	i80321_intr_calculate_masks();
395
396	restore_interrupts(oldirqstate);
397
398	return (ih);
399}
400
401void
402i80321_intr_disestablish(void *cookie)
403{
404	struct intrhand *ih = cookie;
405	struct intrq *iq = &intrq[ih->ih_irq];
406	int oldirqstate;
407
408	oldirqstate = disable_interrupts(I32_bit);
409
410	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
411
412	i80321_intr_calculate_masks();
413
414	restore_interrupts(oldirqstate);
415}
416
417/*
418 * Hardware interrupt handler.
419 *
420 * If I80321_HPI_ENABLED is defined, this code attempts to deal with
421 * HPI interrupts as best it can.
422 *
423 * The problem is that HPIs cannot be masked at the interrupt controller;
424 * they can only be masked by disabling IRQs in the XScale core.
425 *
426 * So, if an HPI comes in and we determine that it should be masked at
427 * the current IPL then we mark it pending in the usual way and set
428 * I32_bit in the interrupt frame. This ensures that when we return from
429 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
430 * ensure IRQs are enabled later, i80321_splx() has been modified to do
431 * just that when a pending HPI interrupt is unmasked.) Additionally,
432 * because HPIs are level-triggered, the registered handler for the HPI
433 * interrupt will also be invoked with IRQs disabled. If a masked HPI
434 * occurs at the same time as another unmasked higher priority interrupt,
435 * the higher priority handler will also be invoked with IRQs disabled.
436 * As a result, the system could end up executing a lot of code with IRQs
437 * completely disabled if the HPI's IPL is relatively low.
438 *
439 * At the present time, the only known use of HPI is for the console UART
440 * on a couple of boards. This is probably the least intrusive use of HPI
441 * as IPL_SERIAL is the highest priority IPL in the system anyway. The
442 * code has not been tested with HPI hooked up to a class of device which
443 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
444 * perform very poorly if at all, even though the following code has been
445 * designed (hopefully) to cope with it.
446 */
447
448void
449i80321_intr_dispatch(struct clockframe *frame)
450{
451	struct intrq *iq;
452	struct intrhand *ih;
453	int oldirqstate, pcpl, irq, ibit, hwpend;
454#ifdef I80321_HPI_ENABLED
455	int oldpending;
456#endif
457
458	pcpl = current_spl_level;
459
460	hwpend = i80321_iintsrc_read();
461
462	/*
463	 * Disable all the interrupts that are pending.  We will
464	 * reenable them once they are processed and not masked.
465	 */
466	intr_enabled &= ~hwpend;
467	i80321_set_intrmask();
468
469#ifdef I80321_HPI_ENABLED
470	oldirqstate = 0;	/* XXX: quell gcc warning */
471#endif
472
473	while (hwpend != 0) {
474#ifdef I80321_HPI_ENABLED
475		/* Deal with HPI interrupt first */
476		if (__predict_false(hwpend & INT_HPIMASK))
477			irq = ICU_INT_HPI;
478		else
479#endif
480		irq = ffs(hwpend) - 1;
481		ibit = (1U << irq);
482
483		hwpend &= ~ibit;
484
485		if (pcpl & ibit) {
486			/*
487			 * IRQ is masked; mark it as pending and check
488			 * the next one.  Note: the IRQ is already disabled.
489			 */
490#ifdef I80321_HPI_ENABLED
491			if (__predict_false(irq == ICU_INT_HPI)) {
492				/*
493				 * This is an HPI. We *must* disable
494				 * IRQs in the interrupt frame until
495				 * INT_HPIMASK is cleared by a later
496				 * call to splx(). Otherwise the level-
497				 * triggered interrupt will just keep
498				 * coming back.
499				 */
500				frame->cf_if.if_spsr |= I32_bit;
501			}
502#endif
503			i80321_ipending |= ibit;
504			continue;
505		}
506
507#ifdef I80321_HPI_ENABLED
508		oldpending = i80321_ipending | ibit;
509#endif
510		i80321_ipending &= ~ibit;
511
512		iq = &intrq[irq];
513		iq->iq_ev.ev_count++;
514		uvmexp.intrs++;
515		current_spl_level |= iq->iq_mask;
516#ifdef I80321_HPI_ENABLED
517		/*
518		 * Re-enable interrupts iff an HPI is not pending
519		 */
520		if (__predict_true((oldpending & INT_HPIMASK) == 0))
521#endif
522		oldirqstate = enable_interrupts(I32_bit);
523		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
524		     ih = TAILQ_NEXT(ih, ih_list)) {
525			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
526		}
527#ifdef I80321_HPI_ENABLED
528		if (__predict_true((oldpending & INT_HPIMASK) == 0))
529#endif
530		restore_interrupts(oldirqstate);
531#ifdef I80321_HPI_ENABLED
532		else if (irq == ICU_INT_HPI) {
533			/*
534			 * We've just handled the HPI. Make sure IRQs
535			 * are enabled in the interrupt frame.
536			 * Here's hoping the handler really did clear
537			 * down the source...
538			 */
539			frame->cf_if.if_spsr &= ~I32_bit;
540		}
541#endif
542		current_spl_level = pcpl;
543
544		/* Re-enable this interrupt now that's it's cleared. */
545		intr_enabled |= ibit;
546		i80321_set_intrmask();
547
548		/*
549		 * Don't forget to include interrupts which may have
550		 * arrived in the meantime.
551		 */
552		hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~pcpl);
553	}
554
555	/* Check for pendings soft intrs. */
556	if ((i80321_ipending & INT_SWMASK) & ~current_spl_level) {
557#ifdef I80321_HPI_ENABLED
558		/* XXX: This is only necessary if HPI is < IPL_SOFT* */
559		if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
560#endif
561		oldirqstate = enable_interrupts(I32_bit);
562		i80321_do_pending();
563#ifdef I80321_HPI_ENABLED
564		/* XXX: This is only necessary if HPI is < IPL_NET* */
565		if (__predict_true((i80321_ipending & INT_HPIMASK) == 0))
566#endif
567		restore_interrupts(oldirqstate);
568	}
569}
570