1/*	$OpenBSD: interrupt.c,v 1.74 2021/04/29 12:49:19 visa Exp $ */
2
3/*
4 * Copyright (c) 2001-2004 Opsycon AB  (www.opsycon.se / www.opsycon.com)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/atomic.h>
32#include <sys/evcount.h>
33#include <sys/kernel.h>
34#include <sys/proc.h>
35#include <sys/user.h>
36
37#include <uvm/uvm_extern.h>
38
39#include <machine/cpu.h>
40#include <mips64/mips_cpu.h>
41#include <machine/intr.h>
42#include <machine/frame.h>
43
44#ifdef DDB
45#include <mips64/db_machdep.h>
46#include <ddb/db_sym.h>
47#endif
48
49void	dummy_splx(int);
50void	interrupt(struct trapframe *);
51
52static struct evcount soft_count;
53static int soft_irq = 0;
54
55uint32_t idle_mask;
56int	last_low_int;
57
58struct {
59	uint32_t int_mask;
60	uint32_t (*int_hand)(uint32_t, struct trapframe *);
61} cpu_int_tab[NLOWINT];
62
63void	(*splx_hand)(int) = &dummy_splx;
64
65/*
66 *  Modern versions of MIPS processors have extended interrupt
67 *  capabilities. How these are handled differs from implementation
68 *  to implementation. This code tries to hide away some of these
69 *  in "higher level" interrupt code.
70 *
71 *  Basically there are <n> interrupt inputs to the processor and
72 *  typically the HW designer ties these interrupts to various
73 *  sources in the HW. The low level code does not deal with interrupts
74 *  in more than it dispatches handling to the code that has registered
75 *  an interrupt handler for that particular interrupt. More than one
76 *  handler can register to an interrupt input and one handler may register
77 *  for more than one interrupt input. A handler is only called once even
78 *  if it registers for more than one interrupt input.
79 *
80 *  The interrupt mechanism in this port uses a delayed masking model
81 *  where interrupts are not really masked when doing an spl(). Instead
82 *  a masked interrupt will be taken and validated in the various
83 *  handlers. If the handler finds that an interrupt is masked it will
84 *  register this interrupt as pending and return a new mask to this
85 *  code that will turn off the interrupt hardware wise. Later when
86 *  the pending interrupt is unmasked it will be processed as usual
87 *  and the regular hardware mask will be restored.
88 */
89
90/*
91 * Handle an interrupt. Both kernel and user mode are handled here.
92 *
93 * The interrupt handler is called with the CR_INT bits set that
94 * were given when the handler was registered.
95 * The handler should return a similar word with a mask indicating
96 * which CR_INT bits have been handled.
97 */
98
99void
100interrupt(struct trapframe *trapframe)
101{
102	struct cpu_info *ci = curcpu();
103	u_int32_t pending;
104	int i, s;
105
106	/*
107	 *  Paranoic? Perhaps. But if we got here with the enable
108	 *  bit reset a mtc0 COP_0_STATUS_REG may have been interrupted.
109	 *  If this was a disable and the pipeline had advanced long
110	 *  enough... i don't know but better safe than sorry...
111	 *  The main effect is not the interrupts but the spl mechanism.
112	 */
113	if (!(trapframe->sr & SR_INT_ENAB))
114		return;
115
116	ci->ci_intrdepth++;
117
118#ifdef DEBUG_INTERRUPT
119	trapdebug_enter(ci, trapframe, T_INT);
120#endif
121	atomic_inc_int(&uvmexp.intrs);
122
123	/* Mask out interrupts from cause that are unmasked */
124	pending = trapframe->cause & CR_INT_MASK & trapframe->sr;
125
126	if (pending & SOFT_INT_MASK_0) {
127		clearsoftintr0();
128		atomic_inc_long((unsigned long *)&soft_count.ec_count);
129	}
130
131	for (i = 0; i <= last_low_int; i++) {
132		uint32_t active;
133		active = cpu_int_tab[i].int_mask & pending;
134		if (active != 0)
135			(*cpu_int_tab[i].int_hand)(active, trapframe);
136	}
137
138	/*
139	 * Dispatch soft interrupts if current ipl allows them.
140	 */
141	if (ci->ci_ipl < IPL_SOFTINT && ci->ci_softpending != 0) {
142		s = splraise(IPL_SOFTHIGH);
143		dosoftint();
144		ci->ci_ipl = s;	/* no-overhead splx */
145	}
146
147	ci->ci_intrdepth--;
148}
149
150
151/*
152 * Set up handler for external interrupt events.
153 * Use CR_INT_<n> to select the proper interrupt condition to dispatch on.
154 * We also enable the software ints here since they are always on.
155 */
156void
157set_intr(int pri, uint32_t mask,
158    uint32_t (*int_hand)(uint32_t, struct trapframe *))
159{
160	if ((idle_mask & SOFT_INT_MASK) == 0) {
161		evcount_attach(&soft_count, "soft", &soft_irq);
162		idle_mask |= SOFT_INT_MASK;
163	}
164	if (pri < 0 || pri >= NLOWINT)
165		panic("set_intr: too high priority (%d), increase NLOWINT",
166		    pri);
167
168	if (pri > last_low_int)
169		last_low_int = pri;
170
171	if ((mask & ~CR_INT_MASK) != 0)
172		panic("set_intr: invalid mask 0x%x", mask);
173
174	if (cpu_int_tab[pri].int_mask != 0 &&
175	   (cpu_int_tab[pri].int_mask != mask ||
176	    cpu_int_tab[pri].int_hand != int_hand))
177		panic("set_intr: int already set at pri %d", pri);
178
179	cpu_int_tab[pri].int_hand = int_hand;
180	cpu_int_tab[pri].int_mask = mask;
181	idle_mask |= mask;
182}
183
184void
185dummy_splx(int newcpl)
186{
187	/* Dummy handler */
188}
189
190/*
191 *  splinit() is special in that sense that it require us to update
192 *  the interrupt mask in the CPU since it may be the first time we arm
193 *  the interrupt system. This function is called right after
194 *  autoconfiguration has completed in autoconf.c.
195 *  We enable everything in idle_mask.
196 */
197void
198splinit()
199{
200	struct proc *p = curproc;
201	struct pcb *pcb = &p->p_addr->u_pcb;
202
203	/*
204	 * Update proc0 pcb to contain proper values.
205	 */
206	pcb->pcb_context.val[11] = (pcb->pcb_regs.sr & ~SR_INT_MASK) |
207	    (idle_mask & SR_INT_MASK);
208
209	spl0();
210	(void)updateimask(0);
211}
212
213void
214register_splx_handler(void (*handler)(int))
215{
216	splx_hand = handler;
217}
218
219int
220splraise(int newipl)
221{
222	struct cpu_info *ci = curcpu();
223        int oldipl;
224
225	oldipl = ci->ci_ipl;
226	if (oldipl < newipl)
227		ci->ci_ipl = newipl;
228	return oldipl;
229}
230
231void
232splx(int newipl)
233{
234	(*splx_hand)(newipl);
235}
236
237int
238spllower(int newipl)
239{
240	struct cpu_info *ci = curcpu();
241	int oldipl;
242
243	oldipl = ci->ci_ipl;
244	splx(newipl);
245	return oldipl;
246}
247
248#ifdef DIAGNOSTIC
249void
250splassert_check(int wantipl, const char *func)
251{
252	struct cpu_info *ci = curcpu();
253
254	if (ci->ci_ipl < wantipl)
255		splassert_fail(wantipl, ci->ci_ipl, func);
256
257	if (wantipl == IPL_NONE && ci->ci_intrdepth != 0)
258		splassert_fail(-1, ci->ci_intrdepth, func);
259}
260#endif
261