1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/elf_common.h>
29
30#include <machine/asm.h>
31#include <machine/armreg.h>
32#include "assym.inc"
33
34	.text
35
36/*
37 * This is limited to 28 instructions as it's placed in the exception vector
38 * slot that is 32 instructions long. We need one for the branch, and three
39 * for the prologue.
40 */
41.macro	save_registers_head el
42.if \el == 1
43	mov	x18, sp
44	stp	x0,  x1,  [sp, #(TF_X - TF_SIZE - 128)]!
45.else
46	stp	x0,  x1,  [sp, #(TF_X - TF_SIZE)]!
47.endif
48	stp	x2,  x3,  [sp, #(2  * 8)]
49	stp	x4,  x5,  [sp, #(4  * 8)]
50	stp	x6,  x7,  [sp, #(6  * 8)]
51	stp	x8,  x9,  [sp, #(8  * 8)]
52	stp	x10, x11, [sp, #(10 * 8)]
53	stp	x12, x13, [sp, #(12 * 8)]
54	stp	x14, x15, [sp, #(14 * 8)]
55	stp	x16, x17, [sp, #(16 * 8)]
56	stp	x18, x19, [sp, #(18 * 8)]
57	stp	x20, x21, [sp, #(20 * 8)]
58	stp	x22, x23, [sp, #(22 * 8)]
59	stp	x24, x25, [sp, #(24 * 8)]
60	stp	x26, x27, [sp, #(26 * 8)]
61	stp	x28, x29, [sp, #(28 * 8)]
62.if \el == 0
63	mrs	x18, sp_el0
64.endif
65	mrs	x10, elr_el1
66	mrs	x11, spsr_el1
67	mrs	x12, esr_el1
68	mrs	x13, far_el1
69	stp	x18,  lr, [sp, #(TF_SP - TF_X)]!
70	stp	x10, x11, [sp, #(TF_ELR)]
71	stp	x12, x13, [sp, #(TF_ESR)]
72	mrs	x18, tpidr_el1
73.endm
74
75.macro	save_registers el
76	add	x29, sp, #(TF_SIZE)
77.if \el == 0
78#if defined(PERTHREAD_SSP)
79	/* Load the SSP canary to sp_el0 */
80	ldr	x1, [x18, #(PC_CURTHREAD)]
81	add	x1, x1, #(TD_MD_CANARY)
82	msr	sp_el0, x1
83#endif
84
85	/* Apply the SSBD (CVE-2018-3639) workaround if needed */
86	ldr	x1, [x18, #PC_SSBD]
87	cbz	x1, 1f
88	mov	w0, #1
89	blr	x1
901:
91
92	ldr	x0, [x18, #PC_CURTHREAD]
93	bl	ptrauth_exit_el0
94
95	ldr	x0, [x18, #(PC_CURTHREAD)]
96	bl	dbg_monitor_enter
97
98	/* Unmask debug and SError exceptions */
99	msr	daifclr, #(DAIF_D | DAIF_A)
100.else
101	/*
102	 * Unmask debug and SError exceptions.
103	 * For EL1, debug exceptions are conditionally unmasked in
104	 * do_el1h_sync().
105	 */
106	msr	daifclr, #(DAIF_A)
107.endif
108.endm
109
110.macro	restore_registers el
111	/*
112	 * Mask all exceptions, x18 may change in the interrupt exception
113	 * handler.
114	 */
115	msr	daifset, #(DAIF_ALL)
116.if \el == 0
117	ldr	x0, [x18, #PC_CURTHREAD]
118	mov	x1, sp
119	bl	dbg_monitor_exit
120
121	ldr	x0, [x18, #PC_CURTHREAD]
122	bl	ptrauth_enter_el0
123
124	/* Remove the SSBD (CVE-2018-3639) workaround if needed */
125	ldr	x1, [x18, #PC_SSBD]
126	cbz	x1, 1f
127	mov	w0, #0
128	blr	x1
1291:
130.endif
131	ldp	x18,  lr, [sp, #(TF_SP)]
132	ldp	x10, x11, [sp, #(TF_ELR)]
133.if \el == 0
134	msr	sp_el0, x18
135.endif
136	msr	spsr_el1, x11
137	msr	elr_el1, x10
138	ldp	x0,  x1,  [sp, #(TF_X + 0  * 8)]
139	ldp	x2,  x3,  [sp, #(TF_X + 2  * 8)]
140	ldp	x4,  x5,  [sp, #(TF_X + 4  * 8)]
141	ldp	x6,  x7,  [sp, #(TF_X + 6  * 8)]
142	ldp	x8,  x9,  [sp, #(TF_X + 8  * 8)]
143	ldp	x10, x11, [sp, #(TF_X + 10 * 8)]
144	ldp	x12, x13, [sp, #(TF_X + 12 * 8)]
145	ldp	x14, x15, [sp, #(TF_X + 14 * 8)]
146	ldp	x16, x17, [sp, #(TF_X + 16 * 8)]
147.if \el == 0
148	/*
149	 * We only restore the callee saved registers when returning to
150	 * userland as they may have been updated by a system call or signal.
151	 */
152	ldp	x18, x19, [sp, #(TF_X + 18 * 8)]
153	ldp	x20, x21, [sp, #(TF_X + 20 * 8)]
154	ldp	x22, x23, [sp, #(TF_X + 22 * 8)]
155	ldp	x24, x25, [sp, #(TF_X + 24 * 8)]
156	ldp	x26, x27, [sp, #(TF_X + 26 * 8)]
157	ldp	x28, x29, [sp, #(TF_X + 28 * 8)]
158.else
159	ldr	     x29, [sp, #(TF_X + 29 * 8)]
160.endif
161.if \el == 0
162	add	sp, sp, #(TF_SIZE)
163.else
164	mov	sp, x18
165	mrs	x18, tpidr_el1
166.endif
167.endm
168
169.macro	do_ast
170	mrs	x19, daif
171	/* Make sure the IRQs are enabled before calling ast() */
172	bic	x19, x19, #PSR_I
1731:
174	/*
175	 * Mask interrupts while checking the ast pending flag
176	 */
177	msr	daifset, #(DAIF_INTR)
178
179	/* Read the current thread AST mask */
180	ldr	x1, [x18, #PC_CURTHREAD]	/* Load curthread */
181	ldr	w1, [x1, #(TD_AST)]
182
183	/* Check if we have a non-zero AST mask */
184	cbz	w1, 2f
185
186	/* Restore interrupts */
187	msr	daif, x19
188
189	/* handle the ast */
190	mov	x0, sp
191	bl	_C_LABEL(ast)
192
193	/* Re-check for new ast scheduled */
194	b	1b
1952:
196.endm
197
198#ifdef KMSAN
199/*
200 * The KMSAN runtime relies on a TLS block to track initialization and origin
201 * state for function parameters and return values.  To keep this state
202 * consistent in the face of asynchronous kernel-mode traps, the runtime
203 * maintains a stack of blocks: when handling an exception or interrupt,
204 * kmsan_intr_enter() pushes the new block to be used until the handler is
205 * complete, at which point kmsan_intr_leave() restores the previous block.
206 *
207 * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that
208 * may have happened while in kernel-mode.  In particular, they are not required
209 * around amd64_syscall() or ast() calls.  Otherwise, kmsan_intr_enter() can be
210 * called unconditionally, without distinguishing between entry from user-mode
211 * or kernel-mode.
212 */
213#define	KMSAN_ENTER	bl kmsan_intr_enter
214#define	KMSAN_LEAVE	bl kmsan_intr_leave
215#else
216#define	KMSAN_ENTER
217#define	KMSAN_LEAVE
218#endif
219
220ENTRY(handle_el1h_sync)
221	save_registers 1
222	KMSAN_ENTER
223	ldr	x0, [x18, #PC_CURTHREAD]
224	mov	x1, sp
225	bl	do_el1h_sync
226	KMSAN_LEAVE
227	restore_registers 1
228	ERET
229END(handle_el1h_sync)
230
231ENTRY(handle_el1h_irq)
232	save_registers 1
233	KMSAN_ENTER
234	mov	x0, sp
235	bl	intr_irq_handler
236	KMSAN_LEAVE
237	restore_registers 1
238	ERET
239END(handle_el1h_irq)
240
241ENTRY(handle_el1h_serror)
242	save_registers 1
243	KMSAN_ENTER
244	mov	x0, sp
2451:	bl	do_serror
246	b	1b
247	KMSAN_LEAVE
248END(handle_el1h_serror)
249
250ENTRY(handle_el0_sync)
251	save_registers 0
252	KMSAN_ENTER
253	ldr	x0, [x18, #PC_CURTHREAD]
254	mov	x1, sp
255	str	x1, [x0, #TD_FRAME]
256	bl	do_el0_sync
257	do_ast
258	KMSAN_LEAVE
259	restore_registers 0
260	ERET
261END(handle_el0_sync)
262
263ENTRY(handle_el0_irq)
264	save_registers 0
265	KMSAN_ENTER
266	mov	x0, sp
267	bl	intr_irq_handler
268	do_ast
269	KMSAN_LEAVE
270	restore_registers 0
271	ERET
272END(handle_el0_irq)
273
274ENTRY(handle_el0_serror)
275	save_registers 0
276	KMSAN_ENTER
277	mov	x0, sp
2781:	bl	do_serror
279	b	1b
280	KMSAN_LEAVE
281END(handle_el0_serror)
282
283ENTRY(handle_empty_exception)
284	save_registers 0
285	KMSAN_ENTER
286	mov	x0, sp
2871:	bl	unhandled_exception
288	b	1b
289	KMSAN_LEAVE
290END(handle_empty_exception)
291
292.macro	vector	name, el
293	.align 7
294	save_registers_head \el
295	b	handle_\name
296	dsb	sy
297	isb
298	/* Break instruction to ensure we aren't executing code here. */
299	brk	0x42
300.endm
301
302.macro	vempty el
303	vector	empty_exception \el
304.endm
305
306	.align 11
307	.globl exception_vectors
308exception_vectors:
309	vempty 1		/* Synchronous EL1t */
310	vempty 1		/* IRQ EL1t */
311	vempty 1		/* FIQ EL1t */
312	vempty 1		/* Error EL1t */
313
314	vector el1h_sync 1	/* Synchronous EL1h */
315	vector el1h_irq 1	/* IRQ EL1h */
316	vempty 1		/* FIQ EL1h */
317	vector el1h_serror 1	/* Error EL1h */
318
319	vector el0_sync 0	/* Synchronous 64-bit EL0 */
320	vector el0_irq 0	/* IRQ 64-bit EL0 */
321	vempty 0		/* FIQ 64-bit EL0 */
322	vector el0_serror 0	/* Error 64-bit EL0 */
323
324	vector el0_sync 0	/* Synchronous 32-bit EL0 */
325	vector el0_irq 0	/* IRQ 32-bit EL0 */
326	vempty 0		/* FIQ 32-bit EL0 */
327	vector el0_serror 0	/* Error 32-bit EL0 */
328
329GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
330