swtch.S revision 281494
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Andrew Turner under sponsorship from
7 * the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include "assym.s"
33
34#include <machine/asm.h>
35
36__FBSDID("$FreeBSD: head/sys/arm64/arm64/swtch.S 281494 2015-04-13 14:43:10Z andrew $");
37
38/*
39 * void cpu_throw(struct thread *old, struct thread *new)
40 */
41ENTRY(cpu_throw)
42#ifdef SMP
43#error cpu_throw needs to be ported to support SMP
44#endif
45
46#ifdef VFP
47	/* Backup the new thread pointer around a call to C code */
48	mov	x19, x1
49	bl	vfp_discard
50	mov	x1, x19
51#endif
52
53	/* Store the new curthread */
54	str	x1, [x18, #PC_CURTHREAD]
55	/* And the new pcb */
56	ldr	x4, [x1, #TD_PCB]
57	str	x4, [x18, #PC_CURPCB]
58
59	/*
60	 * TODO: We may need to flush the cache here.
61	 */
62
63	/* Switch to the new pmap */
64	ldr	x5, [x4, #PCB_L1ADDR]
65	msr	ttbr0_el1, x5
66	isb
67
68	/* Invalidate the TLB */
69	dsb	sy
70	tlbi	vmalle1is
71	dsb	sy
72	isb
73
74	/* Restore the registers */
75	ldp	x5, x6, [x4, #PCB_SP]
76	mov	sp, x5
77	msr	tpidr_el0, x6
78	ldp	x8, x9, [x4, #PCB_REGS + 8 * 8]
79	ldp	x10, x11, [x4, #PCB_REGS + 10 * 8]
80	ldp	x12, x13, [x4, #PCB_REGS + 12 * 8]
81	ldp	x14, x15, [x4, #PCB_REGS + 14 * 8]
82	ldp	x16, x17, [x4, #PCB_REGS + 16 * 8]
83	ldr	     x19, [x4, #PCB_REGS + 19 * 8]
84	ldp	x20, x21, [x4, #PCB_REGS + 20 * 8]
85	ldp	x22, x23, [x4, #PCB_REGS + 22 * 8]
86	ldp	x24, x25, [x4, #PCB_REGS + 24 * 8]
87	ldp	x26, x27, [x4, #PCB_REGS + 26 * 8]
88	ldp	x28, x29, [x4, #PCB_REGS + 28 * 8]
89	ldr	x30, [x4, #PCB_REGS + 30 * 8]
90
91	ret
92END(cpu_throw)
93
94/*
95 * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
96 *
97 * x0 = old
98 * x1 = new
99 * x2 = mtx
100 * x3 to x7, x16 and x17 are caller saved
101 */
102ENTRY(cpu_switch)
103#ifdef SMP
104#error cpu_switch needs to be ported to support SMP
105#endif
106
107	/* Store the new curthread */
108	str	x1, [x18, #PC_CURTHREAD]
109	/* And the new pcb */
110	ldr	x4, [x1, #TD_PCB]
111	str	x4, [x18, #PC_CURPCB]
112
113	/*
114	 * Save the old context.
115	 */
116	ldr	x4, [x0, #TD_PCB]
117
118	/* Store the callee-saved registers */
119	stp	x8, x9, [x4, #PCB_REGS + 8 * 8]
120	stp	x10, x11, [x4, #PCB_REGS + 10 * 8]
121	stp	x12, x13, [x4, #PCB_REGS + 12 * 8]
122	stp	x14, x15, [x4, #PCB_REGS + 14 * 8]
123	stp	x16, x17, [x4, #PCB_REGS + 16 * 8]
124	stp	x18, x19, [x4, #PCB_REGS + 18 * 8]
125	stp	x20, x21, [x4, #PCB_REGS + 20 * 8]
126	stp	x22, x23, [x4, #PCB_REGS + 22 * 8]
127	stp	x24, x25, [x4, #PCB_REGS + 24 * 8]
128	stp	x26, x27, [x4, #PCB_REGS + 26 * 8]
129	stp	x28, x29, [x4, #PCB_REGS + 28 * 8]
130	str	x30, [x4, #PCB_REGS + 30 * 8]
131	/* And the old stack pointer */
132	mov	x5, sp
133	mrs	x6, tpidr_el0
134	stp	x5, x6, [x4, #PCB_SP]
135
136#ifdef VFP
137	mov	x19, x0
138	mov	x20, x1
139	mov	x21, x2
140	bl	vfp_save_state
141	mov	x2, x21
142	mov	x1, x20
143	mov	x0, x19
144#endif
145
146	/*
147	 * Restore the saved context.
148	 */
149	ldr	x4, [x1, #TD_PCB]
150
151	/*
152	 * TODO: We may need to flush the cache here if switching
153	 * to a user process.
154	 */
155
156	/* Switch to the new pmap */
157	ldr	x5, [x4, #PCB_L1ADDR]
158	msr	ttbr0_el1, x5
159	isb
160
161	/* Invalidate the TLB */
162	dsb	sy
163	tlbi	vmalle1is
164	dsb	sy
165	isb
166
167	/* Release the old thread */
168	str	x2, [x0, #TD_LOCK]
169#if defined(SCHED_ULE) && defined(SMP)
170#error We may need to wait for the lock here
171#endif
172
173	/* Restore the registers */
174	ldp	x5, x6, [x4, #PCB_SP]
175	mov	sp, x5
176	msr	tpidr_el0, x6
177	ldp	x8, x9, [x4, #PCB_REGS + 8 * 8]
178	ldp	x10, x11, [x4, #PCB_REGS + 10 * 8]
179	ldp	x12, x13, [x4, #PCB_REGS + 12 * 8]
180	ldp	x14, x15, [x4, #PCB_REGS + 14 * 8]
181	ldp	x16, x17, [x4, #PCB_REGS + 16 * 8]
182	ldr	     x19, [x4, #PCB_REGS + 19 * 8]
183	ldp	x20, x21, [x4, #PCB_REGS + 20 * 8]
184	ldp	x22, x23, [x4, #PCB_REGS + 22 * 8]
185	ldp	x24, x25, [x4, #PCB_REGS + 24 * 8]
186	ldp	x26, x27, [x4, #PCB_REGS + 26 * 8]
187	ldp	x28, x29, [x4, #PCB_REGS + 28 * 8]
188	ldr	x30, [x4, #PCB_REGS + 30 * 8]
189
190	str	xzr, [x4, #PCB_REGS + 18 * 8]
191	ret
192.Lcpu_switch_panic_str:
193	.asciz "cpu_switch: %p\0"
194END(cpu_switch)
195
196ENTRY(fork_trampoline)
197	mov	x0, x8
198	mov	x1, x9
199	mov	x2, sp
200	mov	fp, #0	/* Stack traceback stops here. */
201	bl	_C_LABEL(fork_exit)
202
203	/* Restore sp and lr */
204	ldp	x0, x1, [sp]
205	msr	sp_el0, x0
206	mov	lr, x1
207
208	/* Restore the registers other than x0 and x1 */
209	ldp	x2, x3, [sp, #TF_X + 2 * 8]
210	ldp	x4, x5, [sp, #TF_X + 4 * 8]
211	ldp	x6, x7, [sp, #TF_X + 6 * 8]
212	ldp	x8, x9, [sp, #TF_X + 8 * 8]
213	ldp	x10, x11, [sp, #TF_X + 10 * 8]
214	ldp	x12, x13, [sp, #TF_X + 12 * 8]
215	ldp	x14, x15, [sp, #TF_X + 14 * 8]
216	ldp	x16, x17, [sp, #TF_X + 16 * 8]
217	ldr	     x19, [sp, #TF_X + 19 * 8]
218	ldp	x20, x21, [sp, #TF_X + 20 * 8]
219	ldp	x22, x23, [sp, #TF_X + 22 * 8]
220	ldp	x24, x25, [sp, #TF_X + 24 * 8]
221	ldp	x26, x27, [sp, #TF_X + 26 * 8]
222	ldp	x28, x29, [sp, #TF_X + 28 * 8]
223	/* Skip x30 as it was restored above as lr */
224
225	/*
226	 * Disable interrupts to avoid
227	 * overwriting spsr_el1 by an IRQ exception.
228	 */
229	msr	daifset, #2
230
231	/* Restore elr and spsr */
232	ldp	x0, x1, [sp, #16]
233	msr	elr_el1, x0
234	msr	spsr_el1, x1
235
236	/* Finally x0 and x1 */
237	ldp	x0, x1, [sp, #TF_X + 0 * 8]
238	ldr	x18, [sp, #TF_X + 18 * 8]
239
240	/*
241	 * No need for interrupts reenabling since PSR
242	 * will be set to the desired value anyway.
243	 */
244	eret
245
246END(fork_trampoline)
247
248ENTRY(savectx)
249	adr	x0, .Lsavectx_panic_str
250	bl	panic
251	ret
252.Lsavectx_panic_str:
253	.asciz "savectx"
254END(savectx)
255
256