1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Andrew Turner under sponsorship from
7 * the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include "assym.inc"
33#include "opt_kstack_pages.h"
34#include "opt_sched.h"
35
36#include <sys/elf_common.h>
37
38#include <machine/asm.h>
39#include <machine/armreg.h>
40.macro clear_step_flag pcbflags, tmp
41	tbz	\pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
42	mrs	\tmp, mdscr_el1
43	bic	\tmp, \tmp, #MDSCR_SS
44	msr	mdscr_el1, \tmp
45	isb
46999:
47.endm
48
49.macro set_step_flag pcbflags, tmp
50	tbz	\pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
51	mrs	\tmp, mdscr_el1
52	orr	\tmp, \tmp, #MDSCR_SS
53	msr	mdscr_el1, \tmp
54	isb
55999:
56.endm
57
58/*
59 * void cpu_throw(struct thread *old, struct thread *new)
60 */
61ENTRY(cpu_throw)
62	/* Of old == NULL skip disabling stepping */
63	cbz	x0, 1f
64
65	/* If we were single stepping, disable it */
66	ldr	x4, [x0, #TD_PCB]
67	ldr	w5, [x4, #PCB_FLAGS]
68	clear_step_flag w5, x6
691:
70
71#ifdef VFP
72	/* Backup the new thread pointer around a call to C code */
73	mov	x19, x1
74	bl	vfp_discard
75	mov	x0, x19
76#else
77	mov	x0, x1
78#endif
79
80	/* This returns the thread pointer so no need to save it */
81	bl	ptrauth_switch
82#ifdef PERTHREAD_SSP
83	mov	x19, x0
84#endif
85	/* This returns the thread pcb */
86	bl	pmap_switch
87	mov	x4, x0
88#ifdef PERTHREAD_SSP
89	/* Update the per-thread stack canary pointer. */
90	add	x19, x19, #(TD_MD_CANARY)
91	msr	sp_el0, x19
92#endif
93
94	/* If we are single stepping, enable it */
95	ldr	w5, [x4, #PCB_FLAGS]
96	set_step_flag w5, x6
97
98	/* Restore the registers */
99	ldp	x5, x6, [x4, #PCB_SP]
100	mov	sp, x5
101	msr	tpidr_el0, x6
102	ldr	x6, [x4, #PCB_TPIDRRO]
103	msr	tpidrro_el0, x6
104	ldp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
105	ldp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
106	ldp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
107	ldp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
108	ldp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
109	ldp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
110
111	ret
112END(cpu_throw)
113
114/*
115 * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
116 *
117 * x0 = old
118 * x1 = new
119 * x2 = mtx
120 * x3 to x7, x16 and x17 are caller saved
121 */
122ENTRY(cpu_switch)
123	/*
124	 * Save the old context.
125	 */
126	ldr	x4, [x0, #TD_PCB]
127
128	/* Store the callee-saved registers */
129	stp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
130	stp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
131	stp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
132	stp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
133	stp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
134	stp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
135	/* And the old stack pointer */
136	mov	x5, sp
137	mrs	x6, tpidrro_el0
138	str	x6, [x4, #PCB_TPIDRRO]
139	mrs	x6, tpidr_el0
140	stp	x5, x6, [x4, #PCB_SP]
141
142	/* If we were single stepping, disable it */
143	ldr	w5, [x4, #PCB_FLAGS]
144	clear_step_flag w5, x6
145
146	mov	x19, x0
147	mov	x20, x1
148	mov	x21, x2
149
150#ifdef VFP
151	/* Load the pcb address */
152	mov	x1, x4
153	bl	vfp_save_state
154	mov	x0, x20
155#else
156	mov	x0, x1
157#endif
158
159	/* This returns the thread pointer so no need to save it */
160	bl	ptrauth_switch
161	/* This returns the thread pcb */
162	bl	pmap_switch
163	/* Move the new pcb out of the way */
164	mov	x4, x0
165
166	mov	x2, x21
167	mov	x1, x20
168	mov	x0, x19
169#ifdef PERTHREAD_SSP
170	/* Update the per-thread stack canary pointer. */
171	add	x20, x20, #(TD_MD_CANARY)
172	msr	sp_el0, x20
173#endif
174
175	/*
176	 * Release the old thread.
177	 */
178	stlr	x2, [x0, #TD_LOCK]
179#if defined(SCHED_ULE) && defined(SMP)
180	/* Spin if TD_LOCK points to a blocked_lock */
181	ldr	x2, =_C_LABEL(blocked_lock)
1821:
183	ldar	x3, [x1, #TD_LOCK]
184	cmp	x3, x2
185	b.eq	1b
186#endif
187
188	/* If we are single stepping, enable it */
189	ldr	w5, [x4, #PCB_FLAGS]
190	set_step_flag w5, x6
191
192	/* Restore the registers */
193	ldp	x5, x6, [x4, #PCB_SP]
194	mov	sp, x5
195	msr	tpidr_el0, x6
196	ldr	x6, [x4, #PCB_TPIDRRO]
197	msr	tpidrro_el0, x6
198	ldp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
199	ldp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
200	ldp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
201	ldp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
202	ldp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
203	ldp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
204
205	ret
206END(cpu_switch)
207
208ENTRY(fork_trampoline)
209	mov	x0, x19
210	mov	x1, x20
211	mov	x2, sp
212	mov	fp, #0	/* Stack traceback stops here. */
213	bl	_C_LABEL(fork_exit)
214
215	/*
216	 * Disable interrupts as we are setting userspace specific
217	 * state that we won't handle correctly in an interrupt while
218	 * in the kernel.
219	 */
220	msr	daifset, #(DAIF_D | DAIF_INTR)
221
222	ldr	x0, [x18, #PC_CURTHREAD]
223	bl	ptrauth_enter_el0
224
225	/* Restore sp, lr, elr, and spsr */
226	ldp	x18, lr, [sp, #TF_SP]
227	ldp	x10, x11, [sp, #TF_ELR]
228	msr	sp_el0, x18
229	msr	spsr_el1, x11
230	msr	elr_el1, x10
231
232	/* Restore the CPU registers */
233	ldp	x0, x1, [sp, #TF_X + 0 * 8]
234	ldp	x2, x3, [sp, #TF_X + 2 * 8]
235	ldp	x4, x5, [sp, #TF_X + 4 * 8]
236	ldp	x6, x7, [sp, #TF_X + 6 * 8]
237	ldp	x8, x9, [sp, #TF_X + 8 * 8]
238	ldp	x10, x11, [sp, #TF_X + 10 * 8]
239	ldp	x12, x13, [sp, #TF_X + 12 * 8]
240	ldp	x14, x15, [sp, #TF_X + 14 * 8]
241	ldp	x16, x17, [sp, #TF_X + 16 * 8]
242	ldp	x18, x19, [sp, #TF_X + 18 * 8]
243	ldp	x20, x21, [sp, #TF_X + 20 * 8]
244	ldp	x22, x23, [sp, #TF_X + 22 * 8]
245	ldp	x24, x25, [sp, #TF_X + 24 * 8]
246	ldp	x26, x27, [sp, #TF_X + 26 * 8]
247	ldp	x28, x29, [sp, #TF_X + 28 * 8]
248
249	/*
250	 * No need for interrupts reenabling since PSR
251	 * will be set to the desired value anyway.
252	 */
253	ERET
254
255END(fork_trampoline)
256
257ENTRY(savectx)
258	/* Store the callee-saved registers */
259	stp	x19, x20, [x0, #PCB_REGS + (PCB_X19 + 0) * 8]
260	stp	x21, x22, [x0, #PCB_REGS + (PCB_X19 + 2) * 8]
261	stp	x23, x24, [x0, #PCB_REGS + (PCB_X19 + 4) * 8]
262	stp	x25, x26, [x0, #PCB_REGS + (PCB_X19 + 6) * 8]
263	stp	x27, x28, [x0, #PCB_REGS + (PCB_X19 + 8) * 8]
264	stp	x29, lr, [x0, #PCB_REGS + (PCB_X19 + 10) * 8]
265	/* And the old stack pointer */
266	mov	x5, sp
267	mrs	x6, tpidrro_el0
268	str	x6, [x0, #PCB_TPIDRRO]
269	mrs	x6, tpidr_el0
270	stp	x5, x6, [x0, #PCB_SP]
271
272	/* Store the VFP registers */
273#ifdef VFP
274	mov	x28, lr
275	bl	vfp_save_state_savectx
276	mov	lr, x28
277#endif
278
279	ret
280END(savectx)
281
282GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
283