1/*	$NetBSD$ */
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 *	This product includes software developed by the University of
14 *	California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 *    notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *    notice, this list of conditions and the following disclaimer in the
23 *    documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)intr.c	8.3 (Berkeley) 11/11/93
41 */
42
43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD$");
45
46#include "opt_multiprocessor.h"
47#include "opt_sparc_arch.h"
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/malloc.h>
53#include <sys/cpu.h>
54#include <sys/intr.h>
55#include <sys/atomic.h>
56
57#include <uvm/uvm_extern.h>
58
59#include <dev/cons.h>
60
61#include <machine/ctlreg.h>
62#include <machine/instr.h>
63#include <machine/trap.h>
64#include <machine/promlib.h>
65
66#include <sparc/sparc/asm.h>
67#include <sparc/sparc/cpuvar.h>
68
69#if defined(MULTIPROCESSOR) && defined(DDB)
70#include <machine/db_machdep.h>
71#endif
72
73#if defined(MULTIPROCESSOR)
74static int intr_biglock_wrapper(void *);
75
76void *xcall_cookie;
77#endif
78
79void	strayintr(struct clockframe *);
80#ifdef DIAGNOSTIC
81void	bogusintr(struct clockframe *);
82#endif
83
84/*
85 * Stray interrupt handler.  Clear it if possible.
86 * If not, and if we get 10 interrupts in 10 seconds, panic.
87 * XXXSMP: We are holding the kernel lock at entry & exit.
88 */
89void
90strayintr(struct clockframe *fp)
91{
92	static int straytime, nstray;
93	char bits[64];
94	int timesince;
95
96#if defined(MULTIPROCESSOR)
97	/*
98	 * XXX
99	 *
100	 * Don't whine about zs interrupts on MP.  We sometimes get
101	 * stray interrupts when polled kernel output on cpu>0 eats
102	 * the interrupt and cpu0 sees it.
103	 */
104#define ZS_INTR_IPL	12
105	if (fp->ipl == ZS_INTR_IPL)
106		return;
107#endif
108
109	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
110	printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
111	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
112
113	timesince = time_uptime - straytime;
114	if (timesince <= 10) {
115		if (++nstray > 10)
116			panic("crazy interrupts");
117	} else {
118		straytime = time_uptime;
119		nstray = 1;
120	}
121}
122
123
124#ifdef DIAGNOSTIC
125/*
126 * Bogus interrupt for which neither hard nor soft interrupt bit in
127 * the IPR was set.
128 */
129void
130bogusintr(struct clockframe *fp)
131{
132	char bits[64];
133
134#if defined(MULTIPROCESSOR)
135	/*
136	 * XXX as above.
137	 */
138	if (fp->ipl == ZS_INTR_IPL)
139		return;
140#endif
141
142	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
143	printf("cpu%d: bogus interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
144	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
145}
146#endif /* DIAGNOSTIC */
147
148/*
149 * Get module ID of interrupt target.
150 */
151u_int
152getitr(void)
153{
154#if defined(MULTIPROCESSOR)
155	u_int v;
156
157	if (!CPU_ISSUN4M || sparc_ncpus <= 1)
158		return (0);
159
160	v = *((u_int *)ICR_ITR);
161	return (v + 8);
162#else
163	return (0);
164#endif
165}
166
167/*
168 * Set interrupt target.
169 * Return previous value.
170 */
171u_int
172setitr(u_int mid)
173{
174#if defined(MULTIPROCESSOR)
175	u_int v;
176
177	if (!CPU_ISSUN4M || sparc_ncpus <= 1)
178		return (0);
179
180	v = *((u_int *)ICR_ITR);
181	*((u_int *)ICR_ITR) = CPU_MID2CPUNO(mid);
182	return (v + 8);
183#else
184	return (0);
185#endif
186}
187
188#if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
189void	nmi_hard(void);
190void	nmi_soft(struct trapframe *);
191
192int	(*memerr_handler)(void);
193int	(*sbuserr_handler)(void);
194int	(*vmeerr_handler)(void);
195int	(*moduleerr_handler)(void);
196
197#if defined(MULTIPROCESSOR)
198static volatile u_int	nmi_hard_wait = 0;
199int			drop_into_rom_on_fatal = 1;
200#endif
201
202void
203nmi_hard(void)
204{
205	/*
206	 * A level 15 hard interrupt.
207	 */
208	int fatal = 0;
209	uint32_t si;
210	char bits[64];
211	u_int afsr, afva;
212
213	/* Tally */
214	cpuinfo.ci_intrcnt[15].ev_count++;
215	cpuinfo.ci_data.cpu_nintr++;
216
217	afsr = afva = 0;
218	if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) {
219		snprintb(bits, sizeof(bits), AFSR_BITS, afsr);
220		printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n",
221			cpuinfo.mid, bits,
222			(afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva);
223	}
224
225#if defined(MULTIPROCESSOR)
226	/*
227	 * Increase nmi_hard_wait.  If we aren't the master, loop while this
228	 * variable is non-zero.  If we are the master, loop while this
229	 * variable is less than the number of cpus.
230	 */
231	atomic_inc_uint(&nmi_hard_wait);
232
233	if (cpuinfo.master == 0) {
234		while (nmi_hard_wait)
235			;
236		return;
237	} else {
238		int n = 100000;
239
240		while (nmi_hard_wait < sparc_ncpus) {
241			DELAY(1);
242			if (n-- > 0)
243				continue;
244			printf("nmi_hard: SMP botch.");
245			break;
246		}
247	}
248#endif
249
250	/*
251	 * Examine pending system interrupts.
252	 */
253	si = *((uint32_t *)ICR_SI_PEND);
254	snprintb(bits, sizeof(bits), SINTR_BITS, si);
255	printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(), bits);
256
257
258	if ((si & SINTR_M) != 0) {
259		/* ECC memory error */
260		if (memerr_handler != NULL)
261			fatal |= (*memerr_handler)();
262	}
263	if ((si & SINTR_I) != 0) {
264		/* MBus/SBus async error */
265		if (sbuserr_handler != NULL)
266			fatal |= (*sbuserr_handler)();
267	}
268	if ((si & SINTR_V) != 0) {
269		/* VME async error */
270		if (vmeerr_handler != NULL)
271			fatal |= (*vmeerr_handler)();
272	}
273	if ((si & SINTR_ME) != 0) {
274		/* Module async error */
275		if (moduleerr_handler != NULL)
276			fatal |= (*moduleerr_handler)();
277	}
278
279#if defined(MULTIPROCESSOR)
280	/*
281	 * Tell everyone else we've finished dealing with the hard NMI.
282	 */
283	nmi_hard_wait = 0;
284	if (fatal && drop_into_rom_on_fatal) {
285		prom_abort();
286		return;
287	}
288#endif
289
290	if (fatal)
291		panic("nmi");
292}
293
294/*
295 * Non-maskable soft interrupt level 15 handler
296 */
297void
298nmi_soft(struct trapframe *tf)
299{
300
301	/* Tally */
302	cpuinfo.ci_sintrcnt[15].ev_count++;
303	cpuinfo.ci_data.cpu_nintr++;
304
305	if (cpuinfo.mailbox) {
306		/* Check PROM messages */
307		uint8_t msg = *(uint8_t *)cpuinfo.mailbox;
308		switch (msg) {
309		case OPENPROM_MBX_STOP:
310		case OPENPROM_MBX_WD:
311			/* In case there's an xcall in progress (unlikely) */
312			spl0();
313#ifdef MULTIPROCESSOR
314			cpu_ready_mask &= ~(1 << cpu_number());
315#endif
316			prom_cpustop(0);
317			break;
318		case OPENPROM_MBX_ABORT:
319		case OPENPROM_MBX_BPT:
320			prom_cpuidle(0);
321			/*
322			 * We emerge here after someone does a
323			 * prom_resumecpu(ournode).
324			 */
325			return;
326		default:
327			break;
328		}
329	}
330
331#if defined(MULTIPROCESSOR)
332	switch (cpuinfo.msg_lev15.tag) {
333	case XPMSG15_PAUSECPU:
334		/* XXX - assumes DDB is the only user of mp_pause_cpu() */
335		cpuinfo.flags |= CPUFLG_PAUSED;
336#if defined(DDB)
337		/* trap(T_DBPAUSE) */
338		__asm("ta 0x8b");
339#else
340		while (cpuinfo.flags & CPUFLG_PAUSED)
341			/* spin */;
342#endif /* DDB */
343	}
344	cpuinfo.msg_lev15.tag = 0;
345#endif /* MULTIPROCESSOR */
346}
347
348#if defined(MULTIPROCESSOR)
349/*
350 * Respond to an xcall() request from another CPU.
351 *
352 * This is also called directly from xcall() if we notice an
353 * incoming message while we're waiting to grab the xpmsg_lock.
354 * We pass the address of xcallintr() itself to indicate that
355 * this is not a real interrupt.
356 */
357void
358xcallintr(void *v)
359{
360
361	kpreempt_disable();
362
363	/* Tally */
364	if (v != xcallintr)
365		cpuinfo.ci_sintrcnt[13].ev_count++;
366
367	/* notyet - cpuinfo.msg.received = 1; */
368	switch (cpuinfo.msg.tag) {
369	case XPMSG_FUNC:
370	    {
371		volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
372
373		if (p->func)
374			(*p->func)(p->arg0, p->arg1, p->arg2);
375		break;
376	    }
377	}
378	cpuinfo.msg.tag = 0;
379	cpuinfo.msg.complete = 1;
380
381	kpreempt_enable();
382}
383#endif /* MULTIPROCESSOR */
384#endif /* SUN4M || SUN4D */
385
386
387#ifdef MSIIEP
388/*
389 * It's easier to make this separate so that not to further obscure
390 * SUN4M case with more ifdefs.  There's no common functionality
391 * anyway.
392 */
393
394#include <sparc/sparc/msiiepreg.h>
395
396void	nmi_hard_msiiep(void);
397void	nmi_soft_msiiep(void);
398
399
400void
401nmi_hard_msiiep(void)
402{
403	uint32_t si;
404	char bits[128];
405	int fatal = 0;
406
407	si = mspcic_read_4(pcic_sys_ipr);
408	snprintb(bits, sizeof(bits), MSIIEP_SYS_IPR_BITS, si);
409	printf("NMI: system interrupts: %s\n", bits);
410
411
412	if (si & MSIIEP_SYS_IPR_MEM_FAULT) {
413		uint32_t afsr, afar, mfsr, mfar;
414
415		afar = *(volatile uint32_t *)MSIIEP_AFAR;
416		afsr = *(volatile uint32_t *)MSIIEP_AFSR;
417
418		mfar = *(volatile uint32_t *)MSIIEP_MFAR;
419		mfsr = *(volatile uint32_t *)MSIIEP_MFSR;
420
421		if (afsr & MSIIEP_AFSR_ERR) {
422			snprintb(bits, sizeof(bits), MSIIEP_AFSR_BITS, afsr);
423			printf("async fault: afsr=%s; afar=%08x\n", bits, afsr);
424		}
425
426		if (mfsr & MSIIEP_MFSR_ERR) {
427			snprintb(bits, sizeof(bits), MSIIEP_MFSR_BITS, mfsr);
428			printf("mem fault: mfsr=%s; mfar=%08x\n", bits, mfsr);
429		}
430
431		fatal = 0;
432	}
433
434	if (si & MSIIEP_SYS_IPR_SERR) {	/* XXX */
435		printf("serr#\n");
436		fatal = 0;
437	}
438
439	if (si & MSIIEP_SYS_IPR_DMA_ERR) {
440		printf("dma: %08x\n",
441		       mspcic_read_stream_4(pcic_iotlb_err_addr));
442		fatal = 0;
443	}
444
445	if (si & MSIIEP_SYS_IPR_PIO_ERR) {
446		printf("pio: addr=%08x, cmd=%x\n",
447		       mspcic_read_stream_4(pcic_pio_err_addr),
448		       mspcic_read_stream_1(pcic_pio_err_cmd));
449		fatal = 0;
450	}
451
452	if (fatal)
453		panic("nmi");
454
455	/* Clear the NMI if it was PCIC related */
456	mspcic_write_1(pcic_sys_ipr_clr, MSIIEP_SYS_IPR_CLR_ALL);
457}
458
459
460void
461nmi_soft_msiiep(void)
462{
463
464	panic("soft nmi");
465}
466
467#endif /* MSIIEP */
468
469
470/*
471 * Level 15 interrupts are special, and not vectored here.
472 * Only `prewired' interrupts appear here; boot-time configured devices
473 * are attached via intr_establish() below.
474 */
475struct intrhand *intrhand[15] = {
476	NULL,			/*  0 = error */
477	NULL,			/*  1 = software level 1 + Sbus */
478	NULL,	 		/*  2 = Sbus level 2 (4m: Sbus L1) */
479	NULL,			/*  3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/
480	NULL,			/*  4 = software level 4 (tty softint) (scsi) */
481	NULL,			/*  5 = Ethernet + Sbus level 4 (4m: Sbus L3) */
482	NULL,			/*  6 = software level 6 (not used) (4m: enet)*/
483	NULL,			/*  7 = video + Sbus level 5 */
484	NULL,			/*  8 = Sbus level 6 */
485	NULL,			/*  9 = Sbus level 7 */
486	NULL, 			/* 10 = counter 0 = clock */
487	NULL,			/* 11 = floppy */
488	NULL,			/* 12 = zs hardware interrupt */
489	NULL,			/* 13 = audio chip */
490	NULL, 			/* 14 = counter 1 = profiling timer */
491};
492
493/*
494 * Soft interrupts use a separate set of handler chains.
495 * This is necessary since soft interrupt handlers do not return a value
496 * and therefore cannot be mixed with hardware interrupt handlers on a
497 * shared handler chain.
498 */
499struct intrhand *sintrhand[15] = { NULL };
500
501static void
502ih_insert(struct intrhand **head, struct intrhand *ih)
503{
504	struct intrhand **p, *q;
505	/*
506	 * This is O(N^2) for long chains, but chains are never long
507	 * and we do want to preserve order.
508	 */
509	for (p = head; (q = *p) != NULL; p = &q->ih_next)
510		continue;
511	*p = ih;
512	ih->ih_next = NULL;
513}
514
515static void
516ih_remove(struct intrhand **head, struct intrhand *ih)
517{
518	struct intrhand **p, *q;
519
520	for (p = head; (q = *p) != ih; p = &q->ih_next)
521		continue;
522	if (q == NULL)
523		panic("intr_remove: intrhand %p fun %p arg %p",
524			ih, ih->ih_fun, ih->ih_arg);
525
526	*p = q->ih_next;
527	q->ih_next = NULL;
528}
529
530static int fastvec;		/* marks fast vectors (see below) */
531extern int sparc_interrupt4m[];
532extern int sparc_interrupt44c[];
533
534#ifdef DIAGNOSTIC
535static void
536check_tv(int level)
537{
538	struct trapvec *tv;
539	int displ;
540
541	/* double check for legal hardware interrupt */
542	tv = &trapbase[T_L1INT - 1 + level];
543	displ = (CPU_ISSUN4M || CPU_ISSUN4D)
544		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
545		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
546
547	/* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
548	if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
549	    tv->tv_instr[1] != I_BA(0, displ) ||
550	    tv->tv_instr[2] != I_RDPSR(I_L0))
551		panic("intr_establish(%d)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
552		    level,
553		    tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
554		    I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
555}
556#endif
557
558/*
559 * Wire a fast trap vector.  Only one such fast trap is legal for any
560 * interrupt, and it must be a hardware interrupt.
561 */
562static void
563inst_fasttrap(int level, void (*vec)(void))
564{
565	struct trapvec *tv;
566	u_long hi22, lo10;
567	int s;
568
569	if (CPU_ISSUN4 || CPU_ISSUN4C) {
570		/* Can't wire to softintr slots */
571		if (level == 1 || level == 4 || level == 6)
572			return;
573	}
574
575#ifdef DIAGNOSTIC
576	check_tv(level);
577#endif
578
579	tv = &trapbase[T_L1INT - 1 + level];
580	hi22 = ((u_long)vec) >> 10;
581	lo10 = ((u_long)vec) & 0x3ff;
582	s = splhigh();
583
584	/* kernel text is write protected -- let us in for a moment */
585	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
586	    VM_PROT_READ|VM_PROT_WRITE);
587	cpuinfo.cache_flush_all();
588	tv->tv_instr[0] = I_SETHI(I_L3, hi22);	/* sethi %hi(vec),%l3 */
589	tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */
590	tv->tv_instr[2] = I_RDPSR(I_L0);	/* mov %psr, %l0 */
591	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
592	cpuinfo.cache_flush_all();
593	fastvec |= 1 << level;
594	splx(s);
595}
596
597/*
598 * Uninstall a fast trap handler.
599 */
600static void
601uninst_fasttrap(int level)
602{
603	struct trapvec *tv;
604	int displ;	/* suspenders, belt, and buttons too */
605	int s;
606
607	tv = &trapbase[T_L1INT - 1 + level];
608	s = splhigh();
609	displ = (CPU_ISSUN4M || CPU_ISSUN4D)
610		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
611		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
612
613	/* kernel text is write protected -- let us in for a moment */
614	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
615	    VM_PROT_READ|VM_PROT_WRITE);
616	cpuinfo.cache_flush_all();
617	tv->tv_instr[0] = I_MOVi(I_L3, level);
618	tv->tv_instr[1] = I_BA(0, displ);
619	tv->tv_instr[2] = I_RDPSR(I_L0);
620	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
621	cpuinfo.cache_flush_all();
622	fastvec &= ~(1 << level);
623	splx(s);
624}
625
626/*
627 * Attach an interrupt handler to the vector chain for the given level.
628 * This is not possible if it has been taken away as a fast vector.
629 */
630void
631intr_establish(int level, int classipl,
632	       struct intrhand *ih, void (*vec)(void),
633	       bool maybe_mpsafe)
634{
635	int s = splhigh();
636#ifdef MULTIPROCESSOR
637	bool mpsafe;
638#endif /* MULTIPROCESSOR */
639	if (classipl == 0)
640		classipl = level;
641
642#ifdef MULTIPROCESSOR
643	mpsafe = (classipl != IPL_VM) || maybe_mpsafe;
644#endif
645
646#ifdef DIAGNOSTIC
647	if (CPU_ISSUN4C) {
648		/*
649		 * Check reserved softintr slots on SUN4C only.
650		 * No check for SUN4, as 4/300's have
651		 * esp0 at level 4 and le0 at level 6.
652		 */
653		if (level == 1 || level == 4 || level == 6)
654			panic("intr_establish: reserved softintr level");
655	}
656#endif
657
658	/*
659	 * If a `fast vector' is currently tied to this level, we must
660	 * first undo that.
661	 */
662	if (fastvec & (1 << level)) {
663		printf("intr_establish: untie fast vector at level %d\n",
664		    level);
665		uninst_fasttrap(level);
666	} else if (vec != NULL &&
667		   intrhand[level] == NULL && sintrhand[level] == NULL) {
668		inst_fasttrap(level, vec);
669	}
670
671	/* A requested IPL cannot exceed its device class level */
672	if (classipl < level)
673		panic("intr_establish: class lvl (%d) < pil (%d)\n",
674			classipl, level);
675
676	/* pre-shift to PIL field in %psr */
677	ih->ih_classipl = (classipl << 8) & PSR_PIL;
678
679#ifdef MULTIPROCESSOR
680	if (!mpsafe) {
681		ih->ih_realfun = ih->ih_fun;
682		ih->ih_realarg = ih->ih_arg;
683		ih->ih_fun = intr_biglock_wrapper;
684		ih->ih_arg = ih;
685	}
686#endif /* MULTIPROCESSOR */
687
688	ih_insert(&intrhand[level], ih);
689	splx(s);
690}
691
692void
693intr_disestablish(int level, struct intrhand *ih)
694{
695
696	ih_remove(&intrhand[level], ih);
697}
698
699/*
700 * This is a softintr cookie.  NB that sic_pilreq MUST be the
701 * first element in the struct, because the softintr_schedule()
702 * macro in intr.h casts cookies to int * to get it.  On a
703 * sun4m, sic_pilreq is an actual processor interrupt level that
704 * is passed to raise(), and on a sun4 or sun4c sic_pilreq is a
705 * bit to set in the interrupt enable register with ienab_bis().
706 */
707struct softintr_cookie {
708	int sic_pilreq;		/* CPU-specific bits; MUST be first! */
709	int sic_pil;		/* Actual machine PIL that is used */
710	struct intrhand sic_hand;
711};
712
713/*
714 * softintr_init(): initialise the MI softintr system.
715 */
716void
717sparc_softintr_init(void)
718{
719
720#if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D))
721	/* Establish a standard soft interrupt handler for cross calls */
722	xcall_cookie = sparc_softintr_establish(13, xcallintr, NULL);
723#endif
724}
725
726/*
727 * softintr_establish(): MI interface.  establish a func(arg) as a
728 * software interrupt.
729 */
730void *
731sparc_softintr_establish(int level, void (*fun)(void *), void *arg)
732{
733	struct softintr_cookie *sic;
734	struct intrhand *ih;
735	int pilreq;
736	int pil;
737#ifdef MULTIPROCESSOR
738	bool mpsafe = (level != IPL_VM);
739#endif /* MULTIPROCESSOR */
740
741	/*
742	 * On a sun4m, the processor interrupt level is stored
743	 * in the softintr cookie to be passed to raise().
744	 *
745	 * On a sun4 or sun4c the appropriate bit to set
746	 * in the interrupt enable register is stored in
747	 * the softintr cookie to be passed to ienab_bis().
748	 */
749	pil = pilreq = level;
750	if (CPU_ISSUN4 || CPU_ISSUN4C) {
751		/* Select the most suitable of three available softint levels */
752		if (level >= 1 && level < 4) {
753			pil = 1;
754			pilreq = IE_L1;
755		} else if (level >= 4 && level < 6) {
756			pil = 4;
757			pilreq = IE_L4;
758		} else {
759			pil = 6;
760			pilreq = IE_L6;
761		}
762	}
763
764	sic = malloc(sizeof(*sic), M_DEVBUF, 0);
765	sic->sic_pil = pil;
766	sic->sic_pilreq = pilreq;
767	ih = &sic->sic_hand;
768#ifdef MULTIPROCESSOR
769	if (!mpsafe) {
770		ih->ih_realfun = (int (*)(void *))fun;
771		ih->ih_realarg = arg;
772		ih->ih_fun = intr_biglock_wrapper;
773		ih->ih_arg = ih;
774	} else
775#endif /* MULTIPROCESSOR */
776	{
777		ih->ih_fun = (int (*)(void *))fun;
778		ih->ih_arg = arg;
779	}
780
781	/*
782	 * Always run the handler at the requested level, which might
783	 * be higher than the hardware can provide.
784	 *
785	 * pre-shift to PIL field in %psr
786	 */
787	ih->ih_classipl = (level << 8) & PSR_PIL;
788
789	if (fastvec & (1 << pil)) {
790		printf("softintr_establish: untie fast vector at level %d\n",
791		    pil);
792		uninst_fasttrap(level);
793	}
794
795	ih_insert(&sintrhand[pil], ih);
796	return (void *)sic;
797}
798
799/*
800 * softintr_disestablish(): MI interface.  disestablish the specified
801 * software interrupt.
802 */
803void
804sparc_softintr_disestablish(void *cookie)
805{
806	struct softintr_cookie *sic = cookie;
807
808	ih_remove(&sintrhand[sic->sic_pil], &sic->sic_hand);
809	free(cookie, M_DEVBUF);
810}
811
812#if 0
813void
814sparc_softintr_schedule(void *cookie)
815{
816	struct softintr_cookie *sic = cookie;
817	if (CPU_ISSUN4M || CPU_ISSUN4D) {
818#if defined(SUN4M) || defined(SUN4D)
819		extern void raise(int,int);
820		raise(0, sic->sic_pilreq);
821#endif
822	} else {
823#if defined(SUN4) || defined(SUN4C)
824		ienab_bis(sic->sic_pilreq);
825#endif
826	}
827}
828#endif
829
830#ifdef MULTIPROCESSOR
831
832/*
833 * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
834 */
835
836static int
837intr_biglock_wrapper(void *vp)
838{
839	struct intrhand *ih = vp;
840	int ret;
841
842	KERNEL_LOCK(1, NULL);
843
844	ret = (*ih->ih_realfun)(ih->ih_realarg);
845
846	KERNEL_UNLOCK_ONE(NULL);
847
848	return ret;
849}
850#endif /* MULTIPROCESSOR */
851
852bool
853cpu_intr_p(void)
854{
855	int idepth;
856
857	kpreempt_disable();
858	idepth = curcpu()->ci_idepth;
859	kpreempt_enable();
860
861	return idepth != 0;
862}
863