ixp12x0_intr.c revision 1.4
1/* $NetBSD: ixp12x0_intr.c,v 1.4 2002/12/02 14:10:13 ichiro Exp $ */
2
3/*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Ichiro FUKUHARA and Naoto Shimazaki.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *        This product includes software developed by the NetBSD
21 *        Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * Interrupt support for the Intel ixp12x0
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/malloc.h>
46#include <sys/termios.h>
47
48#include <uvm/uvm_extern.h>
49
50#include <machine/bus.h>
51#include <machine/intr.h>
52
53#include <arm/cpufunc.h>
54
55#include <arm/ixp12x0/ixp12x0reg.h>
56#include <arm/ixp12x0/ixp12x0var.h>
57#include <arm/ixp12x0/ixp12x0_comreg.h>
58#include <arm/ixp12x0/ixp12x0_comvar.h>
59#include <arm/ixp12x0/ixp12x0_pcireg.h>
60
61extern u_int32_t	ixpcom_cr;	/* current cr from *_com.c */
62extern u_int32_t	ixpcom_imask;	/* tell mask to *_com.c */
63
64/* Interrupt handler queues. */
65struct intrq intrq[NIRQ];
66
67/* Interrupts to mask at each level. */
68static u_int32_t imask[NIPL];
69static u_int32_t pci_imask[NIPL];
70
71/* Current interrupt priority level. */
72__volatile int current_spl_level;
73
74/* Software copy of the IRQs we have enabled. */
75__volatile u_int32_t intr_enabled;
76__volatile u_int32_t pci_intr_enabled;
77
78/* Interrupts pending. */
79static __volatile int ipending;
80
81/*
82 * Map a software interrupt queue index (to the unused bits in the
83 * ICU registers -- XXX will need to revisit this if those bits are
84 * ever used in future steppings).
85 */
86static const uint32_t si_to_irqbit[SI_NQUEUES] = {
87	IXP12X0_INTR_bit30,		/* SI_SOFT */
88	IXP12X0_INTR_bit29,		/* SI_SOFTCLOCK */
89	IXP12X0_INTR_bit28,		/* SI_SOFTNET */
90	IXP12X0_INTR_bit27,		/* SI_SOFTSERIAL */
91};
92
93#define	INT_SWMASK							\
94	((1U << IXP12X0_INTR_bit30) | (1U << IXP12X0_INTR_bit29) |	\
95	 (1U << IXP12X0_INTR_bit28) | (1U << IXP12X0_INTR_bit27))
96
97#define	SI_TO_IRQBIT(si)	(1U << si_to_irqbit[(si)])
98
99/*
100 * Map a software interrupt queue to an interrupt priority level.
101 */
102static const int si_to_ipl[SI_NQUEUES] = {
103	IPL_SOFT,		/* SI_SOFT */
104	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
105	IPL_SOFTNET,		/* SI_SOFTNET */
106	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
107};
108
109void	ixp12x0_intr_dispatch(struct irqframe *frame);
110
111static __inline u_int32_t
112ixp12x0_irq_read(void)
113{
114	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
115}
116
117static __inline u_int32_t
118ixp12x0_pci_irq_read(void)
119{
120	return IXPREG(IXPPCI_IRQ_STATUS);
121}
122
123static void
124ixp12x0_enable_uart_irq(void)
125{
126	ixpcom_imask = 0;
127	if (ixpcom_sc)
128		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
129				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
130}
131
132static void
133ixp12x0_disable_uart_irq(void)
134{
135	ixpcom_imask = CR_RIE | CR_XIE;
136	if (ixpcom_sc)
137		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
138				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
139}
140
141static void
142ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
143{
144	if (irqs & (1U << IXP12X0_INTR_UART)) {
145		ixp12x0_disable_uart_irq();
146	} else {
147		ixp12x0_enable_uart_irq();
148	}
149	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
150	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
151}
152
153static void
154ixp12x0_enable_irq(int irq)
155{
156	if (irq < SYS_NIRQ) {
157		intr_enabled |= (1U << irq);
158		switch (irq) {
159		case IXP12X0_INTR_UART:
160			ixp12x0_enable_uart_irq();
161			break;
162
163		case IXP12X0_INTR_PCI:
164			/* nothing to do */
165			break;
166		default:
167			panic("enable_irq:bad IRQ %d", irq);
168		}
169	} else {
170		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
171		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
172	}
173}
174
175static __inline void
176ixp12x0_disable_irq(int irq)
177{
178	if (irq < SYS_NIRQ) {
179		intr_enabled ^= ~(1U << irq);
180		switch (irq) {
181		case IXP12X0_INTR_UART:
182			ixp12x0_disable_uart_irq();
183			break;
184
185		case IXP12X0_INTR_PCI:
186			/* nothing to do */
187			break;
188		default:
189			/* nothing to do */
190		}
191	} else {
192		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
193		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
194	}
195}
196
197/*
198 * NOTE: This routine must be called with interrupts disabled in the CPSR.
199 */
200static void
201ixp12x0_intr_calculate_masks(void)
202{
203	struct intrq *iq;
204	struct intrhand *ih;
205	int irq, ipl;
206
207	/* First, figure out which IPLs each IRQ has. */
208	for (irq = 0; irq < NIRQ; irq++) {
209		int levels = 0;
210		iq = &intrq[irq];
211		ixp12x0_disable_irq(irq);
212		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
213		     ih = TAILQ_NEXT(ih, ih_list))
214			levels |= (1U << ih->ih_ipl);
215		iq->iq_levels = levels;
216	}
217
218	/* Next, figure out which IRQs are used by each IPL. */
219	for (ipl = 0; ipl < NIPL; ipl++) {
220		int irqs = 0;
221		int pci_irqs = 0;
222		for (irq = 0; irq < SYS_NIRQ; irq++) {
223			if (intrq[irq].iq_levels & (1U << ipl))
224				irqs |= (1U << irq);
225		}
226		imask[ipl] = irqs;
227		for (irq = 0; irq < SYS_NIRQ; irq++) {
228			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
229				pci_irqs |= (1U << irq);
230		}
231		pci_imask[ipl] = pci_irqs;
232	}
233
234	imask[IPL_NONE] = 0;
235	pci_imask[IPL_NONE] = 0;
236
237	/*
238	 * Initialize the soft interrupt masks to block themselves.
239	 */
240	imask[IPL_SOFT] = SI_TO_IRQBIT(SI_SOFT);
241	imask[IPL_SOFTCLOCK] = SI_TO_IRQBIT(SI_SOFTCLOCK);
242	imask[IPL_SOFTNET] = SI_TO_IRQBIT(SI_SOFTNET);
243	imask[IPL_SOFTSERIAL] = SI_TO_IRQBIT(SI_SOFTSERIAL);
244
245	/*
246	 * splsoftclock() is the only interface that users of the
247	 * generic software interrupt facility have to block their
248	 * soft intrs, so splsoftclock() must also block IPL_SOFT.
249	 */
250	imask[IPL_SOFTCLOCK] |= imask[IPL_SOFT];
251	pci_imask[IPL_SOFTCLOCK] |= pci_imask[IPL_SOFT];
252
253	/*
254	 * splsoftnet() must also block splsoftclock(), since we don't
255	 * want timer-driven network events to occur while we're
256	 * processing incoming packets.
257	 */
258	imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
259	pci_imask[IPL_SOFTNET] |= pci_imask[IPL_SOFTCLOCK];
260
261	/*
262	 * Enforce a heirarchy that gives "slow" device (or devices with
263	 * limited input buffer space/"real-time" requirements) a better
264	 * chance at not dropping data.
265	 */
266	imask[IPL_BIO] |= imask[IPL_SOFTNET];
267	pci_imask[IPL_BIO] |= pci_imask[IPL_SOFTNET];
268	imask[IPL_NET] |= imask[IPL_BIO];
269	pci_imask[IPL_NET] |= pci_imask[IPL_BIO];
270	imask[IPL_SOFTSERIAL] |= pci_imask[IPL_NET];
271	pci_imask[IPL_SOFTSERIAL] |= pci_imask[IPL_NET];
272	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
273	pci_imask[IPL_TTY] |= pci_imask[IPL_SOFTSERIAL];
274
275	/*
276	 * splvm() blocks all interrupts that use the kernel memory
277	 * allocation facilities.
278	 */
279	imask[IPL_IMP] |= imask[IPL_TTY];
280	pci_imask[IPL_IMP] |= pci_imask[IPL_TTY];
281
282	/*
283	 * Audio devices are not allowed to perform memory allocation
284	 * in their interrupt routines, and they have fairly "real-time"
285	 * requirements, so give them a high interrupt priority.
286	 */
287	imask[IPL_AUDIO] |= imask[IPL_IMP];
288	pci_imask[IPL_AUDIO] |= pci_imask[IPL_IMP];
289
290	/*
291	 * splclock() must block anything that uses the scheduler.
292	 */
293	imask[IPL_CLOCK] |= imask[IPL_AUDIO];
294	pci_imask[IPL_CLOCK] |= pci_imask[IPL_AUDIO];
295
296	/*
297	 * No separate statclock on the IQ80310.
298	 */
299	imask[IPL_STATCLOCK] |= imask[IPL_CLOCK];
300	pci_imask[IPL_STATCLOCK] |= pci_imask[IPL_CLOCK];
301
302	/*
303	 * splhigh() must block "everything".
304	 */
305	imask[IPL_HIGH] |= imask[IPL_STATCLOCK];
306	pci_imask[IPL_HIGH] |= pci_imask[IPL_STATCLOCK];
307
308	/*
309	 * XXX We need serial drivers to run at the absolute highest priority
310	 * in order to avoid overruns, so serial > high.
311	 */
312	imask[IPL_SERIAL] |= imask[IPL_HIGH];
313	pci_imask[IPL_SERIAL] |= pci_imask[IPL_HIGH];
314
315	/*
316	 * Now compute which IRQs must be blocked when servicing any
317	 * given IRQ.
318	 */
319	for (irq = 0; irq < NIRQ; irq++) {
320		int	irqs;
321		int	pci_irqs;
322
323		if (irq < SYS_NIRQ) {
324			irqs = (1U << irq);
325			pci_irqs = 0;
326		} else {
327			irqs = 0;
328			pci_irqs = (1U << (irq - SYS_NIRQ));
329		}
330		iq = &intrq[irq];
331		if (TAILQ_FIRST(&iq->iq_list) != NULL)
332			ixp12x0_enable_irq(irq);
333		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
334		     ih = TAILQ_NEXT(ih, ih_list)) {
335			irqs |= imask[ih->ih_ipl];
336			pci_irqs |= pci_imask[ih->ih_ipl];
337		}
338		iq->iq_mask = irqs;
339		iq->iq_pci_mask = pci_irqs;
340	}
341}
342
343static void
344ixp12x0_do_pending(void)
345{
346	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
347	int	new;
348	u_int	oldirqstate;
349
350	if (__cpu_simple_lock_try(&processing) == 0)
351		return;
352
353	new = current_spl_level;
354
355	oldirqstate = disable_interrupts(I32_bit);
356
357#define	DO_SOFTINT(si)							\
358	if ((ipending & ~imask[new]) & SI_TO_IRQBIT(si)) {		\
359		ipending &= ~SI_TO_IRQBIT(si);				\
360		current_spl_level = si_to_ipl[(si)];			\
361		restore_interrupts(oldirqstate);			\
362		softintr_dispatch(si);					\
363		oldirqstate = disable_interrupts(I32_bit);		\
364		current_spl_level = new;				\
365	}
366
367	DO_SOFTINT(SI_SOFTSERIAL);
368	DO_SOFTINT(SI_SOFTNET);
369	DO_SOFTINT(SI_SOFTCLOCK);
370	DO_SOFTINT(SI_SOFT);
371
372	__cpu_simple_unlock(&processing);
373
374	restore_interrupts(oldirqstate);
375}
376
377__inline void
378splx(int new)
379{
380	int	old;
381	u_int	oldirqstate;
382
383	if (current_spl_level == new)
384		return;
385	oldirqstate = disable_interrupts(I32_bit);
386	old = current_spl_level;
387	current_spl_level = new;
388	ixp12x0_set_intrmask(imask[new], pci_imask[new]);
389	restore_interrupts(oldirqstate);
390
391	/* If there are software interrupts to process, do it. */
392	if ((ipending & INT_SWMASK) & ~imask[new])
393		ixp12x0_do_pending();
394}
395
396int
397_splraise(int ipl)
398{
399	int	old = current_spl_level;
400
401	if (old >= ipl)
402		return (old);
403	splx(ipl);
404	return (old);
405}
406
407int
408_spllower(int ipl)
409{
410	int	old = current_spl_level;
411
412	if (old <= ipl)
413		return (old);
414	splx(ipl);
415	return (old);
416}
417
418void
419_setsoftintr(int si)
420{
421	u_int	oldirqstate;
422
423	oldirqstate = disable_interrupts(I32_bit);
424	ipending |= SI_TO_IRQBIT(si);
425	restore_interrupts(oldirqstate);
426
427	/* Process unmasked pending soft interrupts. */
428	if ((ipending & INT_SWMASK) & ~imask[current_spl_level])
429		ixp12x0_do_pending();
430}
431
432/*
433 * ixp12x0_intr_init:
434 *
435 *	Initialize the rest of the interrupt subsystem, making it
436 *	ready to handle interrupts from devices.
437 */
438void
439ixp12x0_intr_init(void)
440{
441	struct intrq *iq;
442	int i;
443
444	intr_enabled = 0;
445	pci_intr_enabled = 0;
446
447	for (i = 0; i < NIRQ; i++) {
448		iq = &intrq[i];
449		TAILQ_INIT(&iq->iq_list);
450
451		sprintf(iq->iq_name, "ipl %d", i);
452		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
453				     NULL, "ixpintr", iq->iq_name);
454	}
455	current_intr_depth = 0;
456	current_spl_level = 0;
457
458	/* Enable IRQs (don't yet use FIQs). */
459	enable_interrupts(I32_bit);
460}
461
462void *
463ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
464{
465	struct intrq*		iq;
466	struct intrhand*	ih;
467	u_int			oldirqstate;
468#ifdef DEBUG
469	printf("ixp12x0_intr_establish(%d, %d, %08x, %08x)\n",
470	       irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
471#endif
472	if (irq < 0 || irq > NIRQ)
473		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
474	if (ipl < 0 || ipl > NIPL)
475		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
476
477	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
478	if (ih == NULL)
479		return (NULL);
480
481	ih->ih_func = ih_func;
482	ih->ih_arg = arg;
483	ih->ih_irq = irq;
484	ih->ih_ipl = ipl;
485
486	iq = &intrq[irq];
487
488	oldirqstate = disable_interrupts(I32_bit);
489	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
490	ixp12x0_intr_calculate_masks();
491	restore_interrupts(oldirqstate);
492
493	return (ih);
494}
495
496void
497ixp12x0_intr_disestablish(void *cookie)
498{
499	struct intrhand*	ih = cookie;
500	struct intrq*		iq = &intrq[ih->ih_ipl];
501	u_int			oldirqstate;
502
503	oldirqstate = disable_interrupts(I32_bit);
504	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
505	ixp12x0_intr_calculate_masks();
506	restore_interrupts(oldirqstate);
507}
508
509void
510ixp12x0_intr_dispatch(struct clockframe *frame)
511{
512	struct intrq*		iq;
513	struct intrhand*	ih;
514	u_int			oldirqstate;
515	int			pcpl;
516	u_int32_t		hwpend;
517	u_int32_t		pci_hwpend;
518	int			irq;
519	u_int32_t		ibit;
520
521	pcpl = current_spl_level;
522
523	hwpend = ixp12x0_irq_read();
524	pci_hwpend = ixp12x0_pci_irq_read();
525
526	while (hwpend) {
527		irq = ffs(hwpend) - 1;
528		ibit = (1U << irq);
529
530		iq = &intrq[irq];
531		iq->iq_ev.ev_count++;
532		uvmexp.intrs++;
533		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
534		     ih = TAILQ_NEXT(ih, ih_list)) {
535			int ipl;
536			current_spl_level = ipl = ih->ih_ipl;
537			ixp12x0_set_intrmask(imask[ipl] | hwpend,
538					     pci_imask[ipl] | pci_hwpend);
539			oldirqstate = enable_interrupts(I32_bit);
540			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
541			restore_interrupts(oldirqstate);
542			hwpend &= ~ibit;
543		}
544	}
545	while (pci_hwpend) {
546		irq = ffs(pci_hwpend) - 1;
547		ibit = (1U << irq);
548
549		iq = &intrq[irq + SYS_NIRQ];
550		iq->iq_ev.ev_count++;
551		uvmexp.intrs++;
552		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
553		     ih = TAILQ_NEXT(ih, ih_list)) {
554			int	ipl;
555
556			current_spl_level = ipl = ih->ih_ipl;
557			ixp12x0_set_intrmask(imask[ipl] | hwpend,
558					     pci_imask[ipl] | pci_hwpend);
559			oldirqstate = enable_interrupts(I32_bit);
560			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
561			restore_interrupts(oldirqstate);
562			pci_hwpend &= ~ibit;
563		}
564	}
565
566	splx(pcpl);
567
568	/* Check for pendings soft intrs. */
569	if ((ipending & INT_SWMASK) & ~imask[pcpl]) {
570		oldirqstate = enable_interrupts(I32_bit);
571		ixp12x0_do_pending();
572		restore_interrupts(oldirqstate);
573	}
574}
575