1/*	$NetBSD: spl.S,v 1.23 2009/11/27 03:23:04 rmind Exp $	*/
2
3/*
4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
40 * All rights reserved.
41 *
42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Charles M. Hannum and Andrew Doran.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 *    notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 *    notice, this list of conditions and the following disclaimer in the
52 *    documentation and/or other materials provided with the distribution.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67#include "opt_ddb.h"
68
69#define ALIGN_TEXT	.align 16,0x90
70
71#include <machine/asm.h>
72#include <machine/trap.h>
73#include <machine/segments.h>
74#include <machine/frameasm.h>
75
76#include "assym.h"
77
78	.text
79
80#ifndef XEN
81/*
82 * Xsoftintr()
83 *
84 * Switch to the LWP assigned to handle interrupts from the given
85 * source.  We borrow the VM context from the interrupted LWP.
86 *
87 * On entry:
88 *
89 *	%rax		intrsource
90 *	%r13		address to return to
91 */
92IDTVEC(softintr)
93	pushq	$_C_LABEL(softintr_ret)	/* set up struct switchframe */
94	pushq	%rbx
95	pushq	%r12
96	pushq	%r13
97	pushq	%r14
98	pushq	%r15
99	movl	$IPL_HIGH,CPUVAR(ILEVEL)
100	movq	CPUVAR(CURLWP),%r15
101	movq	IS_LWP(%rax),%rdi	/* switch to handler LWP */
102	movq	L_PCB(%rdi),%rdx
103	movq	L_PCB(%r15),%rcx
104	movq	%rdi,CPUVAR(CURLWP)
105	movq	%rsp,PCB_RSP(%rcx)
106	movq	%rbp,PCB_RBP(%rcx)
107	movq	PCB_RSP0(%rdx),%rsp	/* onto new stack */
108	sti
109	movq	%r15,%rdi		/* interrupted LWP */
110	movl	IS_MAXLEVEL(%rax),%esi	/* ipl to run at */
111	call	_C_LABEL(softint_dispatch)/* run handlers */
112	cli
113	movq	L_PCB(%r15),%rcx
114	movq	PCB_RSP(%rcx),%rsp
115	xchgq	%r15,CPUVAR(CURLWP)	/* must be globally visible */
116	popq	%r15			/* unwind switchframe */
117	addq	$(5 * 8),%rsp
118	jmp	*%r13			/* back to splx/doreti */
119
120/*
121 * softintr_ret()
122 *
123 * Trampoline function that gets returned to by cpu_switchto() when
124 * an interrupt handler blocks.  On entry:
125 *
126 *	%rax		prevlwp from cpu_switchto()
127 */
128NENTRY(softintr_ret)
129	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
130	movl	$0, L_CTXSWTCH(%rax)	/* %rax from cpu_switchto */
131	cli
132	jmp	*%r13			/* back to splx/doreti */
133
134/*
135 * void softint_trigger(uintptr_t machdep);
136 *
137 * Software interrupt registration.
138 */
139NENTRY(softint_trigger)
140	orl	%edi,CPUVAR(IPENDING)	/* atomic on local cpu */
141	ret
142
143
144/*
145 * Xpreemptrecurse()
146 *
147 * Handles preemption interrupts via Xspllower().
148 */
149IDTVEC(preemptrecurse)
150	movl	$IPL_PREEMPT, CPUVAR(ILEVEL)
151	sti
152	xorq	%rdi, %rdi
153	call	_C_LABEL(kpreempt)
154	cli
155	jmp	*%r13
156
157/*
158 * Xpreemptresume()
159 *
160 * Handles preemption interrupts via Xdoreti().
161 */
162IDTVEC(preemptresume)
163	movl	$IPL_PREEMPT, CPUVAR(ILEVEL)
164	sti
165	testq   $SEL_RPL, TF_CS(%rsp)
166	jnz	1f
167	movq	TF_RIP(%rsp), %rdi
168	call	_C_LABEL(kpreempt)		# from kernel
169	cli
170	jmp	*%r13
1711:
172	call	_C_LABEL(preempt)		# from user
173	cli
174	jmp	*%r13
175
176/*
177 * int splraise(int s);
178 */
179ENTRY(splraise)
180	movl	CPUVAR(ILEVEL),%eax
181	cmpl	%edi,%eax
182	cmoval	%eax,%edi
183	movl	%edi,CPUVAR(ILEVEL)
184	ret
185
186/*
187 * void spllower(int s);
188 *
189 * Must be the same size as i686_spllower().  This must use
190 * pushf/cli/popf as it is used early in boot where interrupts
191 * are disabled via eflags/IE.
192 */
193ENTRY(spllower)
194	cmpl	CPUVAR(ILEVEL), %edi
195	jae	1f
196	movl	CPUVAR(IUNMASK)(,%rdi,4), %edx
197	pushf
198	cli
199	testl	CPUVAR(IPENDING), %edx
200	jnz	2f
201	movl	%edi, CPUVAR(ILEVEL)
202	popf
2031:
204	ret
205	ret
2062:
207	popf
208	jmp	_C_LABEL(Xspllower)
209	nop
210	nop
211	.align	16
212#ifdef GPROF
213	nop
214	.align	16
215#endif
216LABEL(spllower_end)
217
218#endif /* !XEN */
219
220/*
221 * void	cx8_spllower(int s);
222 *
223 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
224 *
225 * edx : eax = old level / old ipending
226 * ecx : ebx = new level / old ipending
227 */
228ENTRY(cx8_spllower)
229	movl	CPUVAR(ILEVEL),%edx
230	movq	%rbx,%r8
231	cmpl	%edx,%edi			/* new level is lower? */
232	jae	1f
2330:
234	movl	CPUVAR(IPENDING),%eax
235	movl	%edi,%ecx
236	testl	%eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */
237	movl	%eax,%ebx
238	/*
239	 * On the P4 this jump is cheaper than patching in junk
240	 * using cmov.  Is cmpxchg expensive if it fails?
241	 */
242	jnz	2f
243	cmpxchg8b CPUVAR(ISTATE)		/* swap in new ilevel */
244	jnz	0b
2451:
246	movq	%r8,%rbx
247	ret
2482:
249	movq	%r8,%rbx
250LABEL(cx8_spllower_patch)
251	jmp	_C_LABEL(Xspllower)
252
253	.align	16
254LABEL(cx8_spllower_end)
255
256/*
257 * void Xspllower(int s);
258 *
259 * Process pending interrupts.
260 *
261 * Important registers:
262 *   ebx - cpl
263 *   r13 - address to resume loop at
264 *
265 * It is important that the bit scan instruction is bsr, it will get
266 * the highest 2 bits (currently the IPI and clock handlers) first,
267 * to avoid deadlocks where one CPU sends an IPI, another one is at
268 * splhigh() and defers it, lands in here via splx(), and handles
269 * a lower-prio one first, which needs to take the kernel lock -->
270 * the sending CPU will never see the that CPU accept the IPI
271 * (see pmap_tlb_shootnow).
272 */
273	nop
274	.align	4	/* Avoid confusion with cx8_spllower_end */
275
276IDTVEC(spllower)
277	pushq	%rbx
278	pushq	%r13
279	pushq	%r12
280	movl	%edi,%ebx
281	leaq	1f(%rip),%r13		# address to resume loop at
2821:	movl	%ebx,%eax		# get cpl
283	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
284	CLI(si)
285	andl	CPUVAR(IPENDING),%eax		# any non-masked bits left?
286	jz	2f
287	bsrl	%eax,%eax
288	btrl	%eax,CPUVAR(IPENDING)
289	movq	CPUVAR(ISOURCES)(,%rax,8),%rax
290	jmp	*IS_RECURSE(%rax)
2912:
292	movl	%ebx,CPUVAR(ILEVEL)
293	STI(si)
294	popq	%r12
295	popq	%r13
296	popq	%rbx
297	ret
298
299/*
300 * Handle return from interrupt after device handler finishes.
301 *
302 * Important registers:
303 *   ebx - cpl to restore
304 *   r13 - address to resume loop at
305 */
306IDTVEC(doreti)
307	popq	%rbx			# get previous priority
308	decl	CPUVAR(IDEPTH)
309	leaq	1f(%rip),%r13
3101:	movl	%ebx,%eax
311	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
312	CLI(si)
313	andl	CPUVAR(IPENDING),%eax
314	jz	2f
315	bsrl    %eax,%eax               # slow, but not worth optimizing
316	btrl    %eax,CPUVAR(IPENDING)
317	movq	CPUVAR(ISOURCES)(,%rax, 8),%rax
318	jmp	*IS_RESUME(%rax)
3192:	/* Check for ASTs on exit to user mode. */
320	movl	%ebx,CPUVAR(ILEVEL)
3215:
322	testb   $SEL_RPL,TF_CS(%rsp)
323	jz	6f
324	.globl doreti_checkast
325doreti_checkast:
326	movq	CPUVAR(CURLWP),%r14
327	CHECK_ASTPENDING(%r14)
328	je	3f
329	CLEAR_ASTPENDING(%r14)
330	STI(si)
331	movl	$T_ASTFLT,TF_TRAPNO(%rsp)	/* XXX undo later.. */
332	/* Pushed T_ASTFLT into tf_trapno on entry. */
333	movq	%rsp,%rdi
334	call	_C_LABEL(trap)
335	CLI(si)
336	jmp	doreti_checkast
3373:
338	CHECK_DEFERRED_SWITCH
339	jnz	9f
3406:
341	INTRFASTEXIT
3429:
343	STI(si)
344	call	_C_LABEL(do_pmap_load)
345	CLI(si)
346	jmp	doreti_checkast		/* recheck ASTs */
347
348#ifdef XEN
349NENTRY(call_evtchn_do_event)
350	incl	CPUVAR(IDEPTH)
351	call    _C_LABEL(evtchn_do_event)
352	decl	CPUVAR(IDEPTH)
353	ret
354#ifdef DOM0OPS
355NENTRY(call_xenevt_event)
356	incl	CPUVAR(IDEPTH)
357	call    _C_LABEL(xenevt_event)
358	decl	CPUVAR(IDEPTH)
359	ret
360#endif /* DOM0OPS */
361#endif /* XEN */
362