1/*	$NetBSD: i80321_icu.c,v 1.22 2011/07/01 20:32:51 dyoung Exp $	*/
2
3/*
4 * Copyright (c) 2001, 2002, 2006 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe and Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: i80321_icu.c,v 1.22 2011/07/01 20:32:51 dyoung Exp $");
40
41#ifndef EVBARM_SPL_NOINLINE
42#define	EVBARM_SPL_NOINLINE
43#endif
44
45/*
46 * Interrupt support for the Intel i80321 I/O Processor.
47 */
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/malloc.h>
52
53#include <uvm/uvm_extern.h>
54
55#include <sys/bus.h>
56#include <machine/intr.h>
57
58#include <arm/cpufunc.h>
59
60#include <arm/xscale/i80321reg.h>
61#include <arm/xscale/i80321var.h>
62
63/* Interrupt handler queues. */
64struct intrq intrq[NIRQ];
65
66/* Interrupts to mask at each level. */
67int i80321_imask[NIPL];
68
69/* Interrupts pending. */
70volatile int i80321_ipending;
71
72/* Software copy of the IRQs we have enabled. */
73volatile uint32_t intr_enabled;
74
75/* Mask if interrupts steered to FIQs. */
76uint32_t intr_steer;
77
78/*
79 * Interrupt bit names.
80 */
81const char * const i80321_irqnames[] = {
82	"DMA0 EOT",
83	"DMA0 EOC",
84	"DMA1 EOT",
85	"DMA1 EOC",
86	"irq 4",
87	"irq 5",
88	"AAU EOT",
89	"AAU EOC",
90	"core PMU",
91	"TMR0 (hardclock)",
92	"TMR1",
93	"I2C0",
94	"I2C1",
95	"MU",
96	"BIST",
97	"periph PMU",
98	"XScale PMU",
99	"BIU error",
100	"ATU error",
101	"MCU error",
102	"DMA0 error",
103	"DMA1 error",
104	"irq 22",
105	"AAU error",
106	"MU error",
107	"SSP",
108	"irq 26",
109	"irq 27",
110	"irq 28",
111	"irq 29",
112	"irq 30",
113	"irq 31",
114};
115
116void	i80321_intr_dispatch(struct clockframe *frame);
117
118static inline uint32_t
119i80321_iintsrc_read(void)
120{
121	uint32_t iintsrc;
122
123	__asm volatile("mrc p6, 0, %0, c8, c0, 0"
124		: "=r" (iintsrc));
125
126	/*
127	 * The IINTSRC register shows bits that are active even
128	 * if they are masked in INTCTL, so we have to mask them
129	 * off with the interrupts we consider enabled.
130	 */
131	return (iintsrc & intr_enabled);
132}
133
134static inline void
135i80321_set_intrsteer(void)
136{
137
138	__asm volatile("mcr p6, 0, %0, c4, c0, 0"
139		:
140		: "r" (intr_steer & ICU_INT_HWMASK));
141}
142
143static inline void
144i80321_enable_irq(int irq)
145{
146
147	intr_enabled |= (1U << irq);
148	i80321_set_intrmask();
149}
150
151static inline void
152i80321_disable_irq(int irq)
153{
154
155	intr_enabled &= ~(1U << irq);
156	i80321_set_intrmask();
157}
158
159/*
160 * NOTE: This routine must be called with interrupts disabled in the CPSR.
161 */
162static void
163i80321_intr_calculate_masks(void)
164{
165	struct intrq *iq;
166	struct intrhand *ih;
167	int irq, ipl;
168
169	/* First, figure out which IPLs each IRQ has. */
170	for (irq = 0; irq < NIRQ; irq++) {
171		int levels = 0;
172		iq = &intrq[irq];
173		i80321_disable_irq(irq);
174		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
175		     ih = TAILQ_NEXT(ih, ih_list))
176			levels |= (1U << ih->ih_ipl);
177		iq->iq_levels = levels;
178	}
179
180	/* Next, figure out which IRQs are used by each IPL. */
181	for (ipl = 0; ipl < NIPL; ipl++) {
182		int irqs = 0;
183		for (irq = 0; irq < NIRQ; irq++) {
184			if (intrq[irq].iq_levels & (1U << ipl))
185				irqs |= (1U << irq);
186		}
187		i80321_imask[ipl] = irqs;
188	}
189
190	KASSERT(i80321_imask[IPL_NONE] == 0);
191	KASSERT(i80321_imask[IPL_SOFTCLOCK] == 0);
192	KASSERT(i80321_imask[IPL_SOFTBIO] == 0);
193	KASSERT(i80321_imask[IPL_SOFTNET] == 0);
194	KASSERT(i80321_imask[IPL_SOFTSERIAL] == 0);
195
196	/*
197	 * Enforce a hierarchy that gives "slow" device (or devices with
198	 * limited input buffer space/"real-time" requirements) a better
199	 * chance at not dropping data.
200	 */
201
202#if 0
203	/*
204	 * This assert might be useful, but only after some interrupts
205	 * are configured.  As it stands now, it will always fire early
206	 * in the initialization phase.  If it's useful enough to re-
207	 * enable, it should be conditionalized on something else like
208	 * having at least something in the levels/irqs above.
209	 */
210	KASSERT(i80321_imask[IPL_VM] != 0);
211#endif
212	i80321_imask[IPL_SCHED] |= i80321_imask[IPL_VM];
213	i80321_imask[IPL_HIGH] |= i80321_imask[IPL_SCHED];
214
215	/*
216	 * Now compute which IRQs must be blocked when servicing any
217	 * given IRQ.
218	 */
219	for (irq = 0; irq < NIRQ; irq++) {
220		int irqs = (1U << irq);
221		iq = &intrq[irq];
222		if (TAILQ_FIRST(&iq->iq_list) != NULL)
223			i80321_enable_irq(irq);
224		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
225		     ih = TAILQ_NEXT(ih, ih_list))
226			irqs |= i80321_imask[ih->ih_ipl];
227		iq->iq_mask = irqs;
228	}
229}
230
231void
232splx(int new)
233{
234	i80321_splx(new);
235}
236
237int
238_spllower(int ipl)
239{
240	return (i80321_spllower(ipl));
241}
242
243int
244_splraise(int ipl)
245{
246	return (i80321_splraise(ipl));
247}
248
249/*
250 * i80321_icu_init:
251 *
252 *	Initialize the i80321 ICU.  Called early in bootstrap
253 *	to make sure the ICU is in a pristine state.
254 */
255void
256i80321_icu_init(void)
257{
258
259	intr_enabled = 0;	/* All interrupts disabled */
260	i80321_set_intrmask();
261
262	intr_steer = 0;		/* All interrupts steered to IRQ */
263	i80321_set_intrsteer();
264}
265
266/*
267 * i80321_intr_init:
268 *
269 *	Initialize the rest of the interrupt subsystem, making it
270 *	ready to handle interrupts from devices.
271 */
272void
273i80321_intr_init(void)
274{
275	struct intrq *iq;
276	int i;
277
278	intr_enabled = 0;
279
280	for (i = 0; i < NIRQ; i++) {
281		iq = &intrq[i];
282		TAILQ_INIT(&iq->iq_list);
283	}
284
285	i80321_intr_calculate_masks();
286
287	/* Enable IRQs (don't yet use FIQs). */
288	enable_interrupts(I32_bit);
289}
290
291void
292i80321_intr_evcnt_attach(void)
293{
294	for (u_int i = 0; i < NIRQ; i++) {
295		struct intrq *iq = &intrq[i];
296		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
297		    NULL, "iop321", i80321_irqnames[i]);
298	}
299
300}
301
302void *
303i80321_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
304{
305	struct intrq *iq;
306	struct intrhand *ih;
307	u_int oldirqstate;
308
309	if (irq < 0 || irq > NIRQ)
310		panic("i80321_intr_establish: IRQ %d out of range", irq);
311
312	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
313	if (ih == NULL)
314		return (NULL);
315
316	ih->ih_func = func;
317	ih->ih_arg = arg;
318	ih->ih_ipl = ipl;
319	ih->ih_irq = irq;
320
321	iq = &intrq[irq];
322
323	/* All IOP321 interrupts are level-triggered. */
324	iq->iq_ist = IST_LEVEL;
325
326	oldirqstate = disable_interrupts(I32_bit);
327
328	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
329
330	i80321_intr_calculate_masks();
331
332	restore_interrupts(oldirqstate);
333
334	return (ih);
335}
336
337void
338i80321_intr_disestablish(void *cookie)
339{
340	struct intrhand *ih = cookie;
341	struct intrq *iq = &intrq[ih->ih_irq];
342	int oldirqstate;
343
344	oldirqstate = disable_interrupts(I32_bit);
345
346	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
347
348	i80321_intr_calculate_masks();
349
350	restore_interrupts(oldirqstate);
351}
352
353/*
354 * Hardware interrupt handler.
355 *
356 * If I80321_HPI_ENABLED is defined, this code attempts to deal with
357 * HPI interrupts as best it can.
358 *
359 * The problem is that HPIs cannot be masked at the interrupt controller;
360 * they can only be masked by disabling IRQs in the XScale core.
361 *
362 * So, if an HPI comes in and we determine that it should be masked at
363 * the current IPL then we mark it pending in the usual way and set
364 * I32_bit in the interrupt frame. This ensures that when we return from
365 * i80321_intr_dispatch(), IRQs will be disabled in the XScale core. (To
366 * ensure IRQs are enabled later, i80321_splx() has been modified to do
367 * just that when a pending HPI interrupt is unmasked.) Additionally,
368 * because HPIs are level-triggered, the registered handler for the HPI
369 * interrupt will also be invoked with IRQs disabled. If a masked HPI
370 * occurs at the same time as another unmasked higher priority interrupt,
371 * the higher priority handler will also be invoked with IRQs disabled.
372 * As a result, the system could end up executing a lot of code with IRQs
373 * completely disabled if the HPI's IPL is relatively low.
374 *
375 * At the present time, the only known use of HPI is for the console UART
376 * on a couple of boards. This is probably the least intrusive use of HPI
377 * as IPL_SERIAL is the highest priority IPL in the system anyway. The
378 * code has not been tested with HPI hooked up to a class of device which
379 * interrupts below IPL_SERIAL. Indeed, such a configuration is likely to
380 * perform very poorly if at all, even though the following code has been
381 * designed (hopefully) to cope with it.
382 */
383
384void
385i80321_intr_dispatch(struct clockframe *frame)
386{
387	struct intrq *iq;
388	struct intrhand *ih;
389	int oldirqstate, irq, ibit, hwpend;
390#ifdef I80321_HPI_ENABLED
391	int oldpending;
392#endif
393	struct cpu_info * const ci = curcpu();
394	const int ppl = ci->ci_cpl;
395	const uint32_t imask = i80321_imask[ppl];
396
397	hwpend = i80321_iintsrc_read();
398
399	/*
400	 * Disable all the interrupts that are pending.  We will
401	 * reenable them once they are processed and not masked.
402	 */
403	intr_enabled &= ~hwpend;
404	i80321_set_intrmask();
405
406#ifdef I80321_HPI_ENABLED
407	oldirqstate = 0;	/* XXX: quell gcc warning */
408#endif
409
410	while (hwpend != 0) {
411#ifdef I80321_HPI_ENABLED
412		/* Deal with HPI interrupt first */
413		if (__predict_false(hwpend & INT_HPIMASK))
414			irq = ICU_INT_HPI;
415		else
416#endif
417		irq = ffs(hwpend) - 1;
418		ibit = (1U << irq);
419
420		hwpend &= ~ibit;
421
422		if (imask & ibit) {
423			/*
424			 * IRQ is masked; mark it as pending and check
425			 * the next one.  Note: the IRQ is already disabled.
426			 */
427#ifdef I80321_HPI_ENABLED
428			if (__predict_false(irq == ICU_INT_HPI)) {
429				/*
430				 * This is an HPI. We *must* disable
431				 * IRQs in the interrupt frame until
432				 * INT_HPIMASK is cleared by a later
433				 * call to splx(). Otherwise the level-
434				 * triggered interrupt will just keep
435				 * coming back.
436				 */
437				frame->cf_if.if_spsr |= I32_bit;
438			}
439#endif
440			i80321_ipending |= ibit;
441			continue;
442		}
443
444#ifdef I80321_HPI_ENABLED
445		oldpending = i80321_ipending | ibit;
446#endif
447		i80321_ipending &= ~ibit;
448
449		iq = &intrq[irq];
450		iq->iq_ev.ev_count++;
451		ci->ci_data.cpu_nintr++;
452#ifdef I80321_HPI_ENABLED
453		/*
454		 * Re-enable interrupts iff an HPI is not pending
455		 */
456		if (__predict_true((oldpending & INT_HPIMASK) == 0)) {
457#endif
458			TAILQ_FOREACH (ih, &iq->iq_list, ih_list) {
459				ci->ci_cpl = ih->ih_ipl;
460				oldirqstate = enable_interrupts(I32_bit);
461				(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
462				restore_interrupts(oldirqstate);
463			}
464#ifdef I80321_HPI_ENABLED
465		} else if (irq == ICU_INT_HPI) {
466			/*
467			 * We've just handled the HPI. Make sure IRQs
468			 * are enabled in the interrupt frame.
469			 * Here's hoping the handler really did clear
470			 * down the source...
471			 */
472			frame->cf_if.if_spsr &= ~I32_bit;
473		}
474#endif
475		ci->ci_cpl = ppl;
476
477		/* Re-enable this interrupt now that's it's cleared. */
478		intr_enabled |= ibit;
479		i80321_set_intrmask();
480
481		/*
482		 * Don't forget to include interrupts which may have
483		 * arrived in the meantime.
484		 */
485		hwpend |= ((i80321_ipending & ICU_INT_HWMASK) & ~imask);
486	}
487
488#ifdef __HAVE_FAST_SOFTINTS
489	cpu_dosoftints();
490#endif
491}
492