1/*	$Id: at91aic.c,v 1.13 2022/07/21 10:09:20 andvar Exp $	*/
2/*	$NetBSD: at91aic.c,v 1.13 2022/07/21 10:09:20 andvar Exp $	*/
3
4/*
5 * Copyright (c) 2007 Embedtronics Oy.
6 * All rights reserved.
7 *
8 * Based on ep93xx_intr.c
9 * Copyright (c) 2002 The NetBSD Foundation, Inc.
10 * All rights reserved.
11 *
12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Jesse Off
14 *
15 * This code is derived from software contributed to The NetBSD Foundation
16 * by Ichiro FUKUHARA and Naoto Shimazaki.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 *    notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 *    notice, this list of conditions and the following disclaimer in the
25 *    documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40
41/*
42 * Interrupt support for the Atmel's AT91xx9xxx family controllers
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kmem.h>
48#include <sys/termios.h>
49
50#include <uvm/uvm_extern.h>
51
52#include <sys/bus.h>
53#include <machine/intr.h>
54
55#include <arm/cpufunc.h>
56
57#include <arm/at91/at91reg.h>
58#include <arm/at91/at91var.h>
59#include <arm/at91/at91aicreg.h>
60#include <arm/at91/at91aicvar.h>
61
62#define	NIRQ	32
63
64/* Interrupt handler queues. */
65struct intrq intrq[NIRQ];
66
67/* Interrupts to mask at each level. */
68static uint32_t aic_imask[NIPL];
69
70/* Software copy of the IRQs we have enabled. */
71volatile uint32_t aic_intr_enabled;
72
73#define	AICREG(reg)	*((volatile uint32_t*) (AT91AIC_BASE + (reg)))
74
75static int	at91aic_match(device_t, cfdata_t, void *);
76static void	at91aic_attach(device_t, device_t, void *);
77
78CFATTACH_DECL_NEW(at91aic, 0,
79	      at91aic_match, at91aic_attach, NULL, NULL);
80
81static int
82at91aic_match(device_t parent, cfdata_t match, void *aux)
83{
84	if (strcmp(match->cf_name, "at91aic") == 0)
85		return 2;
86	return 0;
87}
88
89static void
90at91aic_attach(device_t parent, device_t self, void *aux)
91{
92	int i;
93
94	(void)parent; (void)self; (void)aux;
95	for (i = 0; i < NIRQ; i++) {
96		evcnt_attach_dynamic(&intrq[i].iq_ev, EVCNT_TYPE_INTR,
97				     NULL, "aic", intrq[i].iq_name);
98	}
99	printf("\n");
100}
101
102static inline void
103at91_set_intrmask(uint32_t aic_irqs)
104{
105	AICREG(AIC_IDCR)	= aic_irqs;
106	AICREG(AIC_IECR)	= aic_intr_enabled & ~aic_irqs;
107}
108
109static inline void
110at91_enable_irq(int irq)
111{
112	aic_intr_enabled       |= (1U << irq);
113	AICREG(AIC_IECR)	= (1U << irq);
114}
115
116static inline void
117at91_disable_irq(int irq)
118{
119	aic_intr_enabled       &= ~(1U << irq);
120	AICREG(AIC_IDCR)	=  (1U << irq);
121}
122
123/*
124 * NOTE: This routine must be called with interrupts disabled in the CPSR.
125 */
126static void
127at91aic_calculate_masks(void)
128{
129	struct intrq *iq;
130	struct intrhand *ih;
131	int irq, ipl;
132
133	/* First, figure out which IPLs each IRQ has. */
134	for (irq = 0; irq < NIRQ; irq++) {
135		int levels = 0;
136		iq = &intrq[irq];
137		at91_disable_irq(irq);
138		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
139		     ih = TAILQ_NEXT(ih, ih_list))
140			levels |= (1U << ih->ih_ipl);
141		iq->iq_levels = levels;
142	}
143
144	/* Next, figure out which IRQs are used by each IPL. */
145	for (ipl = 0; ipl < NIPL; ipl++) {
146		int aic_irqs = 0;
147		for (irq = 0; irq < AIC_NIRQ; irq++) {
148			if (intrq[irq].iq_levels & (1U << ipl))
149				aic_irqs |= (1U << irq);
150		}
151		aic_imask[ipl] = aic_irqs;
152	}
153
154	/* IPL_NONE must open up all interrupts */
155	KASSERT(aic_imask[IPL_NONE] == 0);
156	KASSERT(aic_imask[IPL_SOFTCLOCK] == 0);
157	KASSERT(aic_imask[IPL_SOFTBIO] == 0);
158	KASSERT(aic_imask[IPL_SOFTNET] == 0);
159	KASSERT(aic_imask[IPL_SOFTSERIAL] == 0);
160
161	/*
162	 * Enforce a hierarchy that gives "slow" device (or devices with
163	 * limited input buffer space/"real-time" requirements) a better
164	 * chance at not dropping data.
165	 */
166	aic_imask[IPL_SCHED] |= aic_imask[IPL_VM];
167	aic_imask[IPL_HIGH] |= aic_imask[IPL_SCHED];
168
169	/*
170	 * Now compute which IRQs must be blocked when servicing any
171	 * given IRQ.
172	 */
173	for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
174		iq = &intrq[irq];
175		if (TAILQ_FIRST(&iq->iq_list) != NULL)
176			at91_enable_irq(irq);
177	}
178	/*
179	 * update current mask
180	 */
181	at91_set_intrmask(aic_imask[curcpl()]);
182}
183
184inline void
185splx(int new)
186{
187	int	old;
188	u_int	oldirqstate;
189
190	oldirqstate = disable_interrupts(I32_bit);
191	old = curcpl();
192	if (old != new) {
193		set_curcpl(new);
194		at91_set_intrmask(aic_imask[new]);
195	}
196	restore_interrupts(oldirqstate);
197#ifdef __HAVE_FAST_SOFTINTS
198	cpu_dosoftints();
199#endif
200}
201
202int
203_splraise(int ipl)
204{
205	int	old;
206	u_int	oldirqstate;
207
208	oldirqstate = disable_interrupts(I32_bit);
209	old = curcpl();
210	if (old != ipl) {
211		set_curcpl(ipl);
212		at91_set_intrmask(aic_imask[ipl]);
213	}
214	restore_interrupts(oldirqstate);
215
216	return (old);
217}
218
219int
220_spllower(int ipl)
221{
222	int	old = curcpl();
223
224	if (old <= ipl)
225		return (old);
226	splx(ipl);
227#ifdef __HAVE_FAST_SOFTINTS
228	cpu_dosoftints();
229#endif
230	return (old);
231}
232
233/*
234 * at91aic_init:
235 *
236 *	Initialize the rest of the interrupt subsystem, making it
237 *	ready to handle interrupts from devices.
238 */
239void
240at91aic_init(void)
241{
242	struct intrq *iq;
243	int i;
244
245	aic_intr_enabled = 0;
246
247	// disable interrupts:
248	AICREG(AIC_IDCR)	= -1;
249
250	for (i = 0; i < NIRQ; i++) {
251		iq = &intrq[i];
252		TAILQ_INIT(&iq->iq_list);
253
254		snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i);
255	}
256
257	/* All interrupts should use IRQ not FIQ */
258
259	AICREG(AIC_IDCR)	= -1;	/* disable interrupts	*/
260	AICREG(AIC_ICCR)	= -1;	/* clear all interrupts	*/
261	AICREG(AIC_DCR)		= 0;	/* not in debug mode, just to make sure */
262	for (i = 0; i < NIRQ; i++) {
263	  AICREG(AIC_SMR(i))	= 0;	/* disable interrupt */
264	  AICREG(AIC_SVR(i))	= (uint32_t)&intrq[i];	// address of interrupt queue
265	}
266	AICREG(AIC_FVR)		= 0;	// fast interrupt...
267	AICREG(AIC_SPU)		= 0;	// spurious interrupt vector
268
269	AICREG(AIC_EOICR)	= 0;	/* clear logic... */
270	AICREG(AIC_EOICR)	= 0;	/* clear logic... */
271
272	at91aic_calculate_masks();
273
274	/* Enable IRQs (don't yet use FIQs). */
275	enable_interrupts(I32_bit);
276}
277
278void *
279at91aic_intr_establish(int irq, int ipl, int type, int (*ih_func)(void *), void *arg)
280{
281	struct intrq*		iq;
282	struct intrhand*	ih;
283	u_int			oldirqstate;
284	unsigned		ok;
285	uint32_t		smr;
286
287	if (irq < 0 || irq >= NIRQ)
288		panic("intr_establish: IRQ %d out of range", irq);
289	if (ipl < 0 || ipl >= NIPL)
290		panic("intr_establish: IPL %d out of range", ipl);
291
292	smr = 1;		// all interrupts have priority one.. ok?
293	switch (type) {
294	case _INTR_LOW_LEVEL:
295		smr |= AIC_SMR_SRCTYPE_LVL_LO;
296		break;
297	case INTR_HIGH_LEVEL:
298		smr |= AIC_SMR_SRCTYPE_LVL_HI;
299		break;
300	case INTR_FALLING_EDGE:
301		smr |= AIC_SMR_SRCTYPE_FALLING;
302		break;
303	case INTR_RISING_EDGE:
304		smr |= AIC_SMR_SRCTYPE_RISING;
305		break;
306	default:
307		panic("intr_establish: interrupt type %d is invalid", type);
308	}
309
310	ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
311	if (ih == NULL)
312		return (NULL);
313
314	ih->ih_func = ih_func;
315	ih->ih_arg = arg;
316	ih->ih_irq = irq;
317	ih->ih_ipl = ipl;
318
319	iq = &intrq[irq];
320
321	oldirqstate = disable_interrupts(I32_bit);
322	if (TAILQ_FIRST(&iq->iq_list) == NULL || (iq->iq_type & ~type) == 0) {
323		AICREG(AIC_SMR(irq)) = smr;
324		iq->iq_type = type;
325		TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
326		at91aic_calculate_masks();
327		ok = 1;
328	} else
329		ok = 0;
330	restore_interrupts(oldirqstate);
331
332	if (ok) {
333#ifdef	AT91AIC_DEBUG
334		int i;
335		printf("\n");
336		for (i = 0; i < NIPL; i++) {
337			printf("IPL%d: aic_imask=0x%08X\n", i, aic_imask[i]);
338		}
339#endif
340	} else {
341		kmem_free(ih, sizeof(*ih));
342		ih = NULL;
343	}
344
345	return (ih);
346}
347
348void
349at91aic_intr_disestablish(void *cookie)
350{
351	struct intrhand*	ih = cookie;
352	struct intrq*		iq = &intrq[ih->ih_irq];
353	u_int			oldirqstate;
354
355	oldirqstate = disable_interrupts(I32_bit);
356	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
357	at91aic_calculate_masks();
358	restore_interrupts(oldirqstate);
359}
360
361#include <arm/at91/at91reg.h>
362#include <arm/at91/at91dbgureg.h>
363#include <arm/at91/at91pdcreg.h>
364
365static inline void intr_process(struct intrq *iq, int pcpl, struct trapframe *frame);
366
367static inline void
368intr_process(struct intrq *iq, int pcpl, struct trapframe *frame)
369{
370	struct intrhand*	ih;
371	u_int			oldirqstate, intr;
372
373	intr = iq - intrq;
374
375	iq->iq_ev.ev_count++;
376	curcpu()->ci_data.cpu_nintr++;
377
378	if ((1U << intr) & aic_imask[pcpl]) {
379		panic("interrupt %d should be masked! (aic_imask=0x%X)", intr, aic_imask[pcpl]);
380	}
381
382	if (iq->iq_busy) {
383		panic("interrupt %d busy!", intr);
384	}
385
386	iq->iq_busy = 1;
387
388	for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
389	     ih = TAILQ_NEXT(ih, ih_list)) {
390		set_curcpl(ih->ih_ipl);
391		at91_set_intrmask(aic_imask[ih->ih_ipl]);
392		oldirqstate = enable_interrupts(I32_bit);
393		(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
394		restore_interrupts(oldirqstate);
395	}
396
397	if (!iq->iq_busy) {
398		panic("interrupt %d not busy!", intr);
399	}
400	iq->iq_busy = 0;
401
402	set_curcpl(pcpl);
403	at91_set_intrmask(aic_imask[pcpl]);
404}
405
406void
407at91aic_intr_dispatch(struct trapframe *frame)
408{
409	struct intrq*		iq;
410	int			pcpl = curcpl();
411
412	iq = (struct intrq *)AICREG(AIC_IVR);	// get current queue
413
414	// OK, service interrupt
415	if (iq)
416		intr_process(iq, pcpl, frame);
417
418	AICREG(AIC_EOICR) = 0;			// end of interrupt
419}
420
421#if 0
422void
423at91aic_intr_poll(int irq)
424{
425	u_int		oldirqstate;
426	uint32_t	ipr;
427	int		pcpl = curcpl();
428
429	oldirqstate = disable_interrupts(I32_bit);
430	ipr = 	AICREG(AIC_IPR);
431	if ((ipr & (1U << irq) & ~aic_imask[pcpl]))
432		intr_process(&intrq[irq], pcpl, NULL);
433	restore_interrupts(oldirqstate);
434#ifdef __HAVE_FAST_SOFTINTS
435	cpu_dosoftints();
436#endif
437}
438#endif
439
440void
441at91aic_intr_poll(void *ihp, int flags)
442{
443	struct intrhand* ih = ihp;
444	u_int		oldirqstate, irq = ih->ih_irq;
445	uint32_t	ipr;
446	int		pcpl = curcpl();
447
448	oldirqstate = disable_interrupts(I32_bit);
449	ipr = AICREG(AIC_IPR);
450	if ((ipr & (1U << irq))
451	    && (flags || !(aic_imask[pcpl] & (1U << irq)))) {
452		set_curcpl(ih->ih_ipl);
453		at91_set_intrmask(aic_imask[ih->ih_ipl]);
454		(void)enable_interrupts(I32_bit);
455		(void)(*ih->ih_func)(ih->ih_arg ? ih->ih_arg : NULL);
456		(void)disable_interrupts(I32_bit);
457		set_curcpl(pcpl);
458		at91_set_intrmask(aic_imask[pcpl]);
459	}
460	restore_interrupts(oldirqstate);
461
462#ifdef __HAVE_FAST_SOFTINTS
463	cpu_dosoftints();
464#endif
465}
466