1/*	$NetBSD: intr.c,v 1.20.2.1 2012/06/12 19:35:46 riz Exp $ */
2
3/*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.20.2.1 2012/06/12 19:35:46 riz Exp $");
31
32#include "opt_interrupt.h"
33#include "opt_multiprocessor.h"
34#include "opt_pic.h"
35
36#define __INTR_PRIVATE
37
38#include <sys/param.h>
39#include <sys/cpu.h>
40#include <sys/kernel.h>
41#include <sys/kmem.h>
42
43#include <powerpc/psl.h>
44#include <powerpc/pic/picvar.h>
45
46#if defined(PIC_I8259) || defined (PIC_PREPIVR)
47#include <machine/isa_machdep.h>
48#endif
49
50#ifdef MULTIPROCESSOR
51#include <powerpc/pic/ipivar.h>
52#endif
53
54#ifdef __HAVE_FAST_SOFTINTS
55#include <powerpc/softint.h>
56#endif
57
58#define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
59
60#define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
61
62struct pic_ops *pics[MAX_PICS];
63int num_pics = 0;
64int max_base = 0;
65uint8_t	virq_map[NIRQ];
66imask_t virq_mask = HWIRQ_MASK;
67imask_t	imask[NIPL];
68int	primary_pic = 0;
69
70static int	fakeintr(void *);
71static int	mapirq(int);
72static void	intr_calculatemasks(void);
73static struct pic_ops *find_pic_by_hwirq(int);
74
75static struct intr_source intrsources[NVIRQ];
76
77void
78pic_init(void)
79{
80	/* everything is in bss, no reason to zero it. */
81}
82
83int
84pic_add(struct pic_ops *pic)
85{
86
87	if (num_pics >= MAX_PICS)
88		return -1;
89
90	pics[num_pics] = pic;
91	pic->pic_intrbase = max_base;
92	max_base += pic->pic_numintrs;
93	num_pics++;
94
95	return pic->pic_intrbase;
96}
97
98void
99pic_finish_setup(void)
100{
101	for (size_t i = 0; i < num_pics; i++) {
102		struct pic_ops * const pic = pics[i];
103		if (pic->pic_finish_setup != NULL)
104			pic->pic_finish_setup(pic);
105	}
106}
107
108static struct pic_ops *
109find_pic_by_hwirq(int hwirq)
110{
111	for (u_int base = 0; base < num_pics; base++) {
112		struct pic_ops * const pic = pics[base];
113		if (pic->pic_intrbase <= hwirq
114		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
115			return pic;
116		}
117	}
118	return NULL;
119}
120
121static int
122fakeintr(void *arg)
123{
124
125	return 0;
126}
127
128/*
129 * Register an interrupt handler.
130 */
131void *
132intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
133    void *ih_arg)
134{
135	struct intrhand **p, *q, *ih;
136	struct pic_ops *pic;
137	static struct intrhand fakehand;
138	int maxipl = ipl;
139
140	if (maxipl == IPL_NONE)
141		maxipl = IPL_HIGH;
142
143	if (hwirq >= max_base) {
144		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
145		    max_base - 1);
146	}
147
148	pic = find_pic_by_hwirq(hwirq);
149	if (pic == NULL) {
150
151		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
152	}
153
154	const int virq = mapirq(hwirq);
155
156	/* no point in sleeping unless someone can free memory. */
157	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
158	if (ih == NULL)
159		panic("intr_establish: can't allocate handler info");
160
161	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
162		panic("intr_establish: bogus irq (%d) or type (%d)",
163		    hwirq, type);
164
165	struct intr_source * const is = &intrsources[virq];
166
167	switch (is->is_type) {
168	case IST_NONE:
169		is->is_type = type;
170		break;
171	case IST_EDGE_FALLING:
172	case IST_EDGE_RISING:
173	case IST_LEVEL_LOW:
174	case IST_LEVEL_HIGH:
175		if (type == is->is_type)
176			break;
177		/* FALLTHROUGH */
178	case IST_PULSE:
179		if (type != IST_NONE)
180			panic("intr_establish: can't share %s with %s",
181			    intr_typename(is->is_type),
182			    intr_typename(type));
183		break;
184	}
185	if (is->is_hand == NULL) {
186		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
187		    is->is_hwirq);
188		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
189		    pic->pic_name, is->is_source);
190	}
191
192	/*
193	 * Figure out where to put the handler.
194	 * This is O(N^2), but we want to preserve the order, and N is
195	 * generally small.
196	 */
197	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
198		maxipl = max(maxipl, q->ih_ipl);
199	}
200
201	/*
202	 * Actually install a fake handler momentarily, since we might be doing
203	 * this with interrupts enabled and don't want the real routine called
204	 * until masking is set up.
205	 */
206	fakehand.ih_ipl = ipl;
207	fakehand.ih_fun = fakeintr;
208	*p = &fakehand;
209
210	/*
211	 * Poke the real handler in now.
212	 */
213	ih->ih_fun = ih_fun;
214	ih->ih_arg = ih_arg;
215	ih->ih_next = NULL;
216	ih->ih_ipl = ipl;
217	ih->ih_virq = virq;
218	*p = ih;
219
220	if (pic->pic_establish_irq != NULL)
221		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
222		    is->is_type, maxipl);
223
224	/*
225	 * Remember the highest IPL used by this handler.
226	 */
227	is->is_ipl = maxipl;
228
229	/*
230	 * now that the handler is established we're actually ready to
231	 * calculate the masks
232	 */
233	intr_calculatemasks();
234
235
236	return ih;
237}
238
239void
240dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
241{
242}
243
244/*
245 * Deregister an interrupt handler.
246 */
247void
248intr_disestablish(void *arg)
249{
250	struct intrhand * const ih = arg;
251	const int virq = ih->ih_virq;
252	struct intr_source * const is = &intrsources[virq];
253	struct intrhand **p, **q;
254	int maxipl = IPL_NONE;
255
256	if (!PIC_VIRQ_LEGAL_P(virq))
257		panic("intr_disestablish: bogus virq %d", virq);
258
259	/*
260	 * Remove the handler from the chain.
261	 * This is O(n^2), too.
262	 */
263	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
264		struct intrhand * const tmp_ih = *p;
265		if (tmp_ih == ih) {
266			q = p;
267		} else {
268			maxipl = max(maxipl, tmp_ih->ih_ipl);
269		}
270	}
271	if (q)
272		*q = ih->ih_next;
273	else
274		panic("intr_disestablish: handler not registered");
275	kmem_intr_free((void *)ih, sizeof(*ih));
276
277	/*
278	 * Reset the IPL for this source now that we've removed a handler.
279	 */
280	is->is_ipl = maxipl;
281
282	intr_calculatemasks();
283
284	if (is->is_hand == NULL) {
285		is->is_type = IST_NONE;
286		evcnt_detach(&is->is_ev);
287		/*
288		 * Make the virutal IRQ available again.
289		 */
290		virq_map[virq] = 0;
291		virq_mask |= PIC_VIRQ_TO_MASK(virq);
292	}
293}
294
295/*
296 * Map max_base irqs into 32 (bits).
297 */
298static int
299mapirq(int hwirq)
300{
301	struct pic_ops *pic;
302
303	if (hwirq >= max_base)
304		panic("invalid irq %d", hwirq);
305
306	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
307		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
308
309	if (virq_map[hwirq])
310		return virq_map[hwirq];
311
312	if (virq_mask == 0)
313		panic("virq overflow");
314
315	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
316	struct intr_source * const is = intrsources + virq;
317
318	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
319
320	is->is_hwirq = hwirq;
321	is->is_pic = pic;
322	virq_map[hwirq] = virq;
323#ifdef PIC_DEBUG
324	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
325#endif
326	return virq;
327}
328
329static const char * const intr_typenames[] = {
330   [IST_NONE]  = "none",
331   [IST_PULSE] = "pulsed",
332   [IST_EDGE_FALLING]  = "falling edge triggered",
333   [IST_EDGE_RISING]  = "rising edge triggered",
334   [IST_LEVEL_LOW] = "low level triggered",
335   [IST_LEVEL_HIGH] = "high level triggered",
336};
337
338const char *
339intr_typename(int type)
340{
341	KASSERT((unsigned int) type < __arraycount(intr_typenames));
342	KASSERT(intr_typenames[type] != NULL);
343	return intr_typenames[type];
344}
345
346/*
347 * Recalculate the interrupt masks from scratch.
348 * We could code special registry and deregistry versions of this function that
349 * would be faster, but the code would be nastier, and we don't expect this to
350 * happen very much anyway.
351 */
352static void
353intr_calculatemasks(void)
354{
355	imask_t newmask[NIPL];
356	struct intr_source *is;
357	struct intrhand *ih;
358	int irq;
359
360	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
361		newmask[ipl] = 0;
362	}
363
364	/* First, figure out which ipl each IRQ uses. */
365	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
366		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
367			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
368		}
369	}
370
371	/*
372	 * IPL_NONE is used for hardware interrupts that are never blocked,
373	 * and do not block anything else.
374	 */
375	newmask[IPL_NONE] = 0;
376
377	/*
378	 * strict hierarchy - all IPLs block everything blocked by any lower
379	 * IPL
380	 */
381	for (u_int ipl = 1; ipl < NIPL; ipl++) {
382		newmask[ipl] |= newmask[ipl - 1];
383	}
384
385#ifdef PIC_DEBUG
386	for (u_int ipl = 0; ipl < NIPL; ipl++) {
387		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
388	}
389#endif
390
391	/*
392	 * Disable all interrupts.
393	 */
394	for (u_int base = 0; base < num_pics; base++) {
395		struct pic_ops * const pic = pics[base];
396		for (u_int i = 0; i < pic->pic_numintrs; i++) {
397			pic->pic_disable_irq(pic, i);
398		}
399	}
400
401	/*
402	 * Now that all interrupts are disabled, update the ipl masks.
403	 */
404	for (u_int ipl = 0; ipl < NIPL; ipl++) {
405		imask[ipl] = newmask[ipl];
406	}
407
408	/*
409	 * Lastly, enable IRQs actually in use.
410	 */
411	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
412		if (is->is_hand)
413			pic_enable_irq(is->is_hwirq);
414	}
415}
416
417void
418pic_enable_irq(int hwirq)
419{
420	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
421	if (pic == NULL)
422		panic("%s: bogus IRQ %d", __func__, hwirq);
423	const int type = intrsources[virq_map[hwirq]].is_type;
424	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
425}
426
427void
428pic_mark_pending(int hwirq)
429{
430	struct cpu_info * const ci = curcpu();
431
432	const int virq = virq_map[hwirq];
433	if (virq == 0)
434		printf("IRQ %d maps to 0\n", hwirq);
435
436	const register_t msr = mfmsr();
437	mtmsr(msr & ~PSL_EE);
438	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
439	mtmsr(msr);
440}
441
442static void
443intr_deliver(struct intr_source *is, int virq)
444{
445	bool locked = false;
446	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
447		KASSERTMSG(ih->ih_fun != NULL,
448		    "%s: irq %d, hwirq %d, is %p ih %p: "
449		     "NULL interrupt handler!\n", __func__,
450		     virq, is->is_hwirq, is, ih);
451		if (ih->ih_ipl == IPL_VM) {
452			if (!locked) {
453				KERNEL_LOCK(1, NULL);
454				locked = true;
455			}
456		} else if (locked) {
457			KERNEL_UNLOCK_ONE(NULL);
458			locked = false;
459		}
460		(*ih->ih_fun)(ih->ih_arg);
461	}
462	if (locked) {
463		KERNEL_UNLOCK_ONE(NULL);
464	}
465	is->is_ev.ev_count++;
466}
467
468void
469pic_do_pending_int(void)
470{
471	struct cpu_info * const ci = curcpu();
472	imask_t vpend;
473
474	if (ci->ci_iactive)
475		return;
476
477	ci->ci_iactive = 1;
478
479	const register_t emsr = mfmsr();
480	const register_t dmsr = emsr & ~PSL_EE;
481
482	KASSERT(emsr & PSL_EE);
483	mtmsr(dmsr);
484
485	const int pcpl = ci->ci_cpl;
486#ifdef __HAVE_FAST_SOFTINTS
487again:
488#endif
489
490	/* Do now unmasked pendings */
491	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
492		ci->ci_idepth++;
493		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
494
495		/* Get most significant pending bit */
496		const int virq = PIC_VIRQ_MS_PENDING(vpend);
497		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
498
499		struct intr_source * const is = &intrsources[virq];
500		struct pic_ops * const pic = is->is_pic;
501
502		splraise(is->is_ipl);
503		mtmsr(emsr);
504		intr_deliver(is, virq);
505		mtmsr(dmsr);
506		ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
507
508		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
509		    is->is_type);
510		ci->ci_idepth--;
511	}
512
513#ifdef __HAVE_FAST_SOFTINTS
514	const u_int softints = ci->ci_data.cpu_softints &
515				 (IPL_SOFTMASK << pcpl);
516
517	/* make sure there are no bits to screw with the line above */
518	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
519
520	if (__predict_false(softints != 0)) {
521		ci->ci_cpl = IPL_HIGH;
522		mtmsr(emsr);
523		powerpc_softint(ci, pcpl,
524		    (vaddr_t)__builtin_return_address(0));
525		mtmsr(dmsr);
526		ci->ci_cpl = pcpl;
527		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
528			goto again;
529	}
530#endif
531
532	ci->ci_iactive = 0;
533	mtmsr(emsr);
534}
535
536int
537pic_handle_intr(void *cookie)
538{
539	struct pic_ops *pic = cookie;
540	struct cpu_info *ci = curcpu();
541	int picirq;
542
543	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
544	if (picirq == 255)
545		return 0;
546
547	const register_t msr = mfmsr();
548	const int pcpl = ci->ci_cpl;
549
550	do {
551#ifdef MULTIPROCESSOR
552		/* THIS IS WRONG XXX */
553		if (picirq == ipiops.ppc_ipi_vector) {
554			ci->ci_cpl = IPL_HIGH;
555			ipi_intr(NULL);
556			ci->ci_cpl = pcpl;
557			pic->pic_ack_irq(pic, picirq);
558			continue;
559		}
560#endif
561
562		const int virq = virq_map[picirq + pic->pic_intrbase];
563		KASSERT(virq != 0);
564		KASSERT(picirq < pic->pic_numintrs);
565		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
566		struct intr_source * const is = &intrsources[virq];
567
568		if ((imask[pcpl] & v_imen) != 0) {
569			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
570			pic->pic_disable_irq(pic, picirq);
571		} else {
572			/* this interrupt is no longer pending */
573			ci->ci_ipending &= ~v_imen;
574			ci->ci_idepth++;
575
576			splraise(is->is_ipl);
577			mtmsr(msr | PSL_EE);
578			intr_deliver(is, virq);
579			mtmsr(msr);
580			ci->ci_cpl = pcpl;
581
582			ci->ci_data.cpu_nintr++;
583			ci->ci_idepth--;
584		}
585		pic->pic_ack_irq(pic, picirq);
586	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
587
588	mtmsr(msr | PSL_EE);
589	splx(pcpl);	/* Process pendings. */
590	mtmsr(msr);
591
592	return 0;
593}
594
595void
596pic_ext_intr(void)
597{
598
599	KASSERT(pics[primary_pic] != NULL);
600	pic_handle_intr(pics[primary_pic]);
601
602	return;
603
604}
605
606int
607splraise(int ncpl)
608{
609	struct cpu_info *ci = curcpu();
610	int ocpl;
611
612	if (ncpl == ci->ci_cpl) return ncpl;
613	__asm volatile("sync; eieio");	/* don't reorder.... */
614	ocpl = ci->ci_cpl;
615	KASSERT(ncpl < NIPL);
616	ci->ci_cpl = max(ncpl, ocpl);
617	__asm volatile("sync; eieio");	/* reorder protect */
618	__insn_barrier();
619	return ocpl;
620}
621
622static inline bool
623have_pending_intr_p(struct cpu_info *ci, int ncpl)
624{
625	if (ci->ci_ipending & ~imask[ncpl])
626		return true;
627#ifdef __HAVE_FAST_SOFTINTS
628	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
629		return true;
630#endif
631	return false;
632}
633
634void
635splx(int ncpl)
636{
637	struct cpu_info *ci = curcpu();
638
639	__insn_barrier();
640	__asm volatile("sync; eieio");	/* reorder protect */
641	ci->ci_cpl = ncpl;
642	if (have_pending_intr_p(ci, ncpl))
643		pic_do_pending_int();
644
645	__asm volatile("sync; eieio");	/* reorder protect */
646}
647
648int
649spllower(int ncpl)
650{
651	struct cpu_info *ci = curcpu();
652	int ocpl;
653
654	__insn_barrier();
655	__asm volatile("sync; eieio");	/* reorder protect */
656	ocpl = ci->ci_cpl;
657	ci->ci_cpl = ncpl;
658	if (have_pending_intr_p(ci, ncpl))
659		pic_do_pending_int();
660	__asm volatile("sync; eieio");	/* reorder protect */
661	return ocpl;
662}
663
664void
665genppc_cpu_configure(void)
666{
667	aprint_normal("vmmask %x schedmask %x highmask %x\n",
668	    (u_int)imask[IPL_VM] & 0x7fffffff,
669	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
670	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
671
672	spl0();
673}
674
675#if defined(PIC_PREPIVR) || defined(PIC_I8259)
676/*
677 * isa_intr_alloc needs to be done here, because it needs direct access to
678 * the various interrupt handler structures.
679 */
680
681int
682genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
683    int mask, int type, int *irq_p)
684{
685	int irq, vi;
686	int maybe_irq = -1;
687	int shared_depth = 0;
688	struct intr_source *is;
689
690	if (pic == NULL)
691		return 1;
692
693	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
694	     mask >>= 1, irq++) {
695		if ((mask & 1) == 0)
696			continue;
697		vi = virq_map[irq + pic->pic_intrbase];
698		if (!vi) {
699			*irq_p = irq;
700			return 0;
701		}
702		is = &intrsources[vi];
703		if (is->is_type == IST_NONE) {
704			*irq_p = irq;
705			return 0;
706		}
707		/* Level interrupts can be shared */
708		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
709			struct intrhand *ih = is->is_hand;
710			int depth;
711
712			if (maybe_irq == -1) {
713				maybe_irq = irq;
714				continue;
715			}
716			for (depth = 0; ih != NULL; ih = ih->ih_next)
717				depth++;
718			if (depth < shared_depth) {
719				maybe_irq = irq;
720				shared_depth = depth;
721			}
722		}
723	}
724	if (maybe_irq != -1) {
725		*irq_p = maybe_irq;
726		return 0;
727	}
728	return 1;
729}
730#endif
731