ep93xx_intr.c revision 1.20
1/* $NetBSD: ep93xx_intr.c,v 1.20 2013/12/18 13:03:59 skrll Exp $ */
2
3/*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jesse Off
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Ichiro FUKUHARA and Naoto Shimazaki.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.20 2013/12/18 13:03:59 skrll Exp $");
37
38/*
39 * Interrupt support for the Cirrus Logic EP93XX
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/termios.h>
46
47#include <sys/bus.h>
48#include <sys/intr.h>
49
50#include <arm/locore.h>
51
52#include <arm/ep93xx/ep93xxreg.h>
53#include <arm/ep93xx/ep93xxvar.h>
54
55/* Interrupt handler queues. */
56struct intrq intrq[NIRQ];
57
58/* Interrupts to mask at each level. */
59static uint32_t vic1_imask[NIPL];
60static uint32_t vic2_imask[NIPL];
61
62/* Current interrupt priority level. */
63volatile int hardware_spl_level;
64
65/* Software copy of the IRQs we have enabled. */
66volatile uint32_t vic1_intr_enabled;
67volatile uint32_t vic2_intr_enabled;
68
69/* Interrupts pending. */
70static volatile int ipending;
71
72void	ep93xx_intr_dispatch(struct trapframe *);
73
74#define VIC1REG(reg)	*((volatile uint32_t*) (EP93XX_AHB_VBASE + \
75	EP93XX_AHB_VIC1 + (reg)))
76#define VIC2REG(reg)	*((volatile uint32_t*) (EP93XX_AHB_VBASE + \
77	EP93XX_AHB_VIC2 + (reg)))
78
79static void
80ep93xx_set_intrmask(uint32_t vic1_irqs, uint32_t vic2_irqs)
81{
82	VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
83	VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
84	VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
85	VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
86}
87
88static void
89ep93xx_enable_irq(int irq)
90{
91	if (irq < VIC_NIRQ) {
92		vic1_intr_enabled |= (1U << irq);
93		VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
94	} else {
95		vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
96		VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
97	}
98}
99
100static inline void
101ep93xx_disable_irq(int irq)
102{
103	if (irq < VIC_NIRQ) {
104		vic1_intr_enabled &= ~(1U << irq);
105		VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
106	} else {
107		vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
108		VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
109	}
110}
111
112/*
113 * NOTE: This routine must be called with interrupts disabled in the CPSR.
114 */
115static void
116ep93xx_intr_calculate_masks(void)
117{
118	struct intrq *iq;
119	struct intrhand *ih;
120	int irq, ipl;
121
122	/* First, figure out which IPLs each IRQ has. */
123	for (irq = 0; irq < NIRQ; irq++) {
124		int levels = 0;
125		iq = &intrq[irq];
126		ep93xx_disable_irq(irq);
127		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
128		     ih = TAILQ_NEXT(ih, ih_list))
129			levels |= (1U << ih->ih_ipl);
130		iq->iq_levels = levels;
131	}
132
133	/* Next, figure out which IRQs are used by each IPL. */
134	for (ipl = 0; ipl < NIPL; ipl++) {
135		int vic1_irqs = 0;
136		int vic2_irqs = 0;
137		for (irq = 0; irq < VIC_NIRQ; irq++) {
138			if (intrq[irq].iq_levels & (1U << ipl))
139				vic1_irqs |= (1U << irq);
140		}
141		vic1_imask[ipl] = vic1_irqs;
142		for (irq = 0; irq < VIC_NIRQ; irq++) {
143			if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
144				vic2_irqs |= (1U << irq);
145		}
146		vic2_imask[ipl] = vic2_irqs;
147	}
148
149	KASSERT(vic1_imask[IPL_NONE] == 0);
150	KASSERT(vic2_imask[IPL_NONE] == 0);
151	KASSERT(vic1_imask[IPL_SOFTCLOCK] == 0);
152	KASSERT(vic2_imask[IPL_SOFTCLOCK] == 0);
153	KASSERT(vic1_imask[IPL_SOFTBIO] == 0);
154	KASSERT(vic2_imask[IPL_SOFTBIO] == 0);
155	KASSERT(vic1_imask[IPL_SOFTNET] == 0);
156	KASSERT(vic2_imask[IPL_SOFTNET] == 0);
157	KASSERT(vic1_imask[IPL_SOFTSERIAL] == 0);
158	KASSERT(vic2_imask[IPL_SOFTSERIAL] == 0);
159
160	/*
161	 * splsched() must block anything that uses the scheduler.
162	 */
163	vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM];
164	vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM];
165
166	/*
167	 * splhigh() must block "everything".
168	 */
169	vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED];
170	vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED];
171
172	/*
173	 * Now compute which IRQs must be blocked when servicing any
174	 * given IRQ.
175	 */
176	for (irq = 0; irq < NIRQ; irq++) {
177		int	vic1_irqs;
178		int	vic2_irqs;
179
180		if (irq < VIC_NIRQ) {
181			vic1_irqs = (1U << irq);
182			vic2_irqs = 0;
183		} else {
184			vic1_irqs = 0;
185			vic2_irqs = (1U << (irq - VIC_NIRQ));
186		}
187		iq = &intrq[irq];
188		if (TAILQ_FIRST(&iq->iq_list) != NULL)
189			ep93xx_enable_irq(irq);
190		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
191		     ih = TAILQ_NEXT(ih, ih_list)) {
192			vic1_irqs |= vic1_imask[ih->ih_ipl];
193			vic2_irqs |= vic2_imask[ih->ih_ipl];
194		}
195		iq->iq_vic1_mask = vic1_irqs;
196		iq->iq_vic2_mask = vic2_irqs;
197	}
198}
199
200inline void
201splx(int new)
202{
203	u_int	oldirqstate;
204
205	oldirqstate = disable_interrupts(I32_bit);
206	set_curcpl(new);
207	if (new != hardware_spl_level) {
208		hardware_spl_level = new;
209		ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
210	}
211	restore_interrupts(oldirqstate);
212
213#ifdef __HAVE_FAST_SOFTINTS
214	cpu_dosoftints();
215#endif
216}
217
218int
219_splraise(int ipl)
220{
221	int	old;
222	u_int	oldirqstate;
223
224	oldirqstate = disable_interrupts(I32_bit);
225	old = curcpl();
226	set_curcpl(ipl);
227	restore_interrupts(oldirqstate);
228	return (old);
229}
230
231int
232_spllower(int ipl)
233{
234	int	old = curcpl();
235
236	if (old <= ipl)
237		return (old);
238	splx(ipl);
239	return (old);
240}
241
242/*
243 * ep93xx_intr_init:
244 *
245 *	Initialize the rest of the interrupt subsystem, making it
246 *	ready to handle interrupts from devices.
247 */
248void
249ep93xx_intr_init(void)
250{
251	struct intrq *iq;
252	int i;
253
254	vic1_intr_enabled = 0;
255	vic2_intr_enabled = 0;
256
257	for (i = 0; i < NIRQ; i++) {
258		iq = &intrq[i];
259		TAILQ_INIT(&iq->iq_list);
260
261		sprintf(iq->iq_name, "irq %d", i);
262		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
263				     NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
264		                     iq->iq_name);
265	}
266	curcpu()->ci_intr_depth = 0;
267	set_curcpl(0);
268	hardware_spl_level = 0;
269
270	/* All interrupts should use IRQ not FIQ */
271	VIC1REG(EP93XX_VIC_IntSelect) = 0;
272	VIC2REG(EP93XX_VIC_IntSelect) = 0;
273
274	ep93xx_intr_calculate_masks();
275
276	/* Enable IRQs (don't yet use FIQs). */
277	enable_interrupts(I32_bit);
278}
279
280void *
281ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
282{
283	struct intrq*		iq;
284	struct intrhand*	ih;
285	u_int			oldirqstate;
286
287	if (irq < 0 || irq > NIRQ)
288		panic("ep93xx_intr_establish: IRQ %d out of range", irq);
289	if (ipl < 0 || ipl > NIPL)
290		panic("ep93xx_intr_establish: IPL %d out of range", ipl);
291
292	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
293	if (ih == NULL)
294		return (NULL);
295
296	ih->ih_func = ih_func;
297	ih->ih_arg = arg;
298	ih->ih_irq = irq;
299	ih->ih_ipl = ipl;
300
301	iq = &intrq[irq];
302
303	oldirqstate = disable_interrupts(I32_bit);
304	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
305	ep93xx_intr_calculate_masks();
306	restore_interrupts(oldirqstate);
307
308	return (ih);
309}
310
311void
312ep93xx_intr_disestablish(void *cookie)
313{
314	struct intrhand*	ih = cookie;
315	struct intrq*		iq = &intrq[ih->ih_irq];
316	u_int			oldirqstate;
317
318	oldirqstate = disable_interrupts(I32_bit);
319	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
320	ep93xx_intr_calculate_masks();
321	restore_interrupts(oldirqstate);
322}
323
324void
325ep93xx_intr_dispatch(struct trapframe *frame)
326{
327	struct intrq*		iq;
328	struct intrhand*	ih;
329	u_int			oldirqstate;
330	int			pcpl;
331	uint32_t		vic1_hwpend;
332	uint32_t		vic2_hwpend;
333	int			irq;
334
335	pcpl = curcpl();
336
337	vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
338	vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
339
340	hardware_spl_level = pcpl;
341	ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
342			     vic2_imask[pcpl] | vic2_hwpend);
343
344	vic1_hwpend &= ~vic1_imask[pcpl];
345	vic2_hwpend &= ~vic2_imask[pcpl];
346
347	if (vic1_hwpend) {
348		irq = ffs(vic1_hwpend) - 1;
349
350		iq = &intrq[irq];
351		iq->iq_ev.ev_count++;
352		curcpu()->ci_data.cpu_nintr++;
353		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
354			set_curcpl(ih->ih_ipl);
355			oldirqstate = enable_interrupts(I32_bit);
356			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
357			restore_interrupts(oldirqstate);
358		}
359	} else if (vic2_hwpend) {
360		irq = ffs(vic2_hwpend) - 1;
361
362		iq = &intrq[irq + VIC_NIRQ];
363		iq->iq_ev.ev_count++;
364		curcpu()->ci_data.cpu_nintr++;
365		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
366			set_curcpl(ih->ih_ipl);
367			oldirqstate = enable_interrupts(I32_bit);
368			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
369			restore_interrupts(oldirqstate);
370		}
371	}
372
373	set_curcpl(pcpl);
374	hardware_spl_level = pcpl;
375	ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
376
377#ifdef __HAVE_FAST_SOFTINTS
378	cpu_dosoftints();
379#endif
380}
381