1/* $NetBSD: cpu_switch.S,v 1.6 2024/05/02 18:18:17 skrll Exp $ */
2
3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_ddb.h"
33
34#include <machine/asm.h>
35#include "assym.h"
36
37/*
38 * struct lwp *
39 * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning);
40 */
41ENTRY_NP(cpu_switchto)
42	addi	sp, sp, -TF_LEN		// allocate trapframe
43
44	REG_S	ra, TF_RA(sp)		// save return address
45	REG_S	s0, TF_S0(sp)		// save callee saved address
46	REG_S	s1, TF_S1(sp)		// save callee saved address
47	REG_S	s2, TF_S2(sp)		// save callee saved address
48	REG_S	s3, TF_S3(sp)		// save callee saved address
49	REG_S	s4, TF_S4(sp)		// save callee saved address
50	REG_S	s5, TF_S5(sp)		// save callee saved address
51	REG_S	s6, TF_S6(sp)		// save callee saved address
52	REG_S	s7, TF_S7(sp)		// save callee saved address
53	REG_S	s8, TF_S8(sp)		// save callee saved address
54	REG_S	s9, TF_S9(sp)		// save callee saved address
55	REG_S	s10, TF_S10(sp)		// save callee saved address
56	REG_S	s11, TF_S11(sp)		// save callee saved address
57
58	REG_S	sp, L_MD_KTF(a0)	// record trapframe pointer
59
60	csrrci	t0, sstatus, SR_SIE	// # disable interrupts
61
62	mv	tp, a1			// # put the new lwp in thread pointer
63
64	PTR_L	t1, L_CPU(tp)		// # get curcpu
65
66	/*
67	 * Issue barriers to coordinate mutex_exit on this CPU with
68	 * mutex_vector_enter on another CPU.
69	 *
70	 * 1. Any prior mutex_exit by oldlwp must be visible to other
71	 *    CPUs before we set ci_curlwp := newlwp on this one,
72	 *    requiring a store-before-store barrier.
73	 *
74	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
75	 *    before any subsequent mutex_exit by newlwp can even test
76	 *    whether there might be waiters, requiring a
77	 *    store-before-load barrier.
78	 *
79	 * See kern_mutex.c for details -- this is necessary for
80	 * adaptive mutexes to detect whether the lwp is on the CPU in
81	 * order to safely block without requiring atomic r/m/w in
82	 * mutex_exit.
83	 */
84	fence	w,w
85	PTR_S	tp, CI_CURLWP(t1)	// # update curcpu with the new curlwp
86	fence	w,r
87
88	REG_L	sp, L_MD_KTF(tp)	// # load its kernel stack pointer
89
90	csrw	sstatus, t0		// enable interrupts
91	REG_L	s0, TF_S0(sp)		// restore callee saved
92	REG_L	s1, TF_S1(sp)		// restore callee saved
93	REG_L	s2, TF_S2(sp)		// restore callee saved
94	REG_L	s3, TF_S3(sp)		// restore callee saved
95	REG_L	s4, TF_S4(sp)		// restore callee saved
96	REG_L	s5, TF_S5(sp)		// restore callee saved
97	REG_L	s6, TF_S6(sp)		// restore callee saved
98	REG_L	s7, TF_S7(sp)		// restore callee saved
99	REG_L	s8, TF_S8(sp)		// restore callee saved
100	REG_L	s9, TF_S9(sp)		// restore callee saved
101	REG_L	s10, TF_S10(sp)		// restore callee saved
102	REG_L	s11, TF_S11(sp)		// restore callee saved
103
104	REG_L	ra, TF_RA(sp)		// restore return address
105
106	addi	sp, sp, TF_LEN		// remove trapframe
107
108	//	a0 = oldl
109	//	a1 = newl
110	//	tp = newl
111	//	t1 = curcpu()
112
113	ret
114END(cpu_switchto)
115
116/*
117 * Called at IPL_SCHED:
118 *	a0 = old lwp (from cpu_switchto)
119 *	a1 = new lwp (from cpu_switchto)
120 *	s0 = func
121 *	s1 = arg
122 */
123ENTRY_NP(lwp_trampoline)
124	call	_C_LABEL(lwp_startup)	// call lwp startup
125
126	// If the saved func returns, we are returning to user land.
127	PTR_LA	ra, exception_userexit
128	mv	a0, s2			// get saved arg
129	jr	s1			// call saved func
130END(lwp_trampoline)
131
132
133ENTRY_NP(cpu_fast_switchto_cleanup)
134//	PTR_L	t0, L_CPU(tp)		// Get curcpu()
135//	INT_L	t1, CI_MTX_COUNT(t0)	// get mutex count
136//	addi	t1, t1, 1		// increment mutex count
137//	INT_S	t1, CI_MTX_COUNT(t0)	// save it
138	mv	ra, a1			// Restore real RA
139#if IPL_SCHED != IPL_HIGH
140	tail	_C_LABEL(splhigh)	// go back to IPL HIGH
141#else
142	ret				// just return
143#endif
144END(cpu_fast_switchto_cleanup)
145
146
147/*
148 * void
149 * cpu_fast_switchto(struct lwp *, int s);
150 */
151ENTRY_NP(cpu_fast_switchto)
152	addi	sp, sp, -(TF_LEN + CALLFRAME_SIZ)
153	REG_S	a0, (TF_LEN + CALLFRAME_S0)(sp)
154	REG_S	ra, (TF_LEN + CALLFRAME_RA)(sp)
155
156	PTR_LA	t2, _C_LABEL(cpu_fast_switchto_cleanup)
157
158	REG_S	t2, TF_RA(sp)		// return to someplace else
159	REG_S	s0, TF_S0(sp)		// save callee saved register
160	REG_S	s1, TF_S1(sp)		// save callee saved register
161	REG_S	s2, TF_S2(sp)		// save callee saved register
162	REG_S	s3, TF_S3(sp)		// save callee saved register
163	REG_S	s4, TF_S4(sp)		// save callee saved register
164	REG_S	s5, TF_S5(sp)		// save callee saved register
165	REG_S	s6, TF_S6(sp)		// save callee saved register
166	REG_S	s7, TF_S7(sp)		// save callee saved register
167	REG_S	s8, TF_S8(sp)		// save callee saved register
168	REG_S	s9, TF_S9(sp)		// save callee saved register
169	REG_S	s10, TF_S10(sp)		// save callee saved register
170	REG_S	s11, TF_S11(sp)		// save callee saved register
171	csrr	t4, sstatus		// get status register (for intr state)
172	REG_S	t4, TF_SR(sp)		// save it
173
174	mv	s0, tp			// remember curlwp
175	mv	s1, sp			// remember kernel stack
176
177	csrrci	t0, sstatus, SR_SIE	// disable interrupts
178	PTR_L	t1, L_CPU(tp)		// get curcpu()
179
180	PTR_S	sp, L_MD_KTF(tp)	// save trapframe ptr in oldlwp
181	mv	tp, a0			// set thread pointer to newlwp
182	fence	w,w			// for mutex_enter; see cpu_switchto
183	PTR_S	tp, CI_CURLWP(t1)	// update curlwp
184	/*
185	 * No need for barrier after ci->ci_curlwp = softlwp -- when we
186	 * enter a softint lwp, it can't be holding any mutexes, so it
187	 * can't release any until after it has acquired them, so we
188	 * need not participate in the protocol with mutex_vector_enter
189	 * barriers here.
190	 */
191	PTR_L	sp, L_MD_KTF(tp)	// switch to its stack
192#ifdef __HAVE_FAST_SOFTINTS
193	csrw	sstatus, t0		// reenable interrupts
194	call	_C_LABEL(softint_dispatch)
195	csrrci	t0, sstatus, SR_SIE	// disable interrupts
196#endif	/* __HAVE_FAST_SOFTINTS */
197	PTR_L	t1, L_CPU(tp)		// get curcpu() again
198	mv	tp, s0			// return to pinned lwp
199	fence	w,w			// for mutex_enter; see cpu_switchto
200	PTR_S	tp, CI_CURLWP(t1)	// restore curlwp
201	fence	w,r			// for mutex_enter; see cpu_switchto
202	csrw	sstatus, t0		// reenable interrupts
203	mv	sp, s1			// restore stack pointer
204
205	REG_L	ra, TF_RA(sp)		// get return address
206	REG_L	s0, TF_S0(sp)		// restore register we used
207	REG_L	s1, TF_S1(sp)		// restore register we used
208
209	REG_L	a0, (TF_LEN + CALLFRAME_S0)(sp)	// Pass the softlwp
210	REG_L	a1, (TF_LEN + CALLFRAME_RA)(sp)	// Pass the real RA
211
212	addi	sp, sp, TF_LEN+CALLFRAME_SIZ	// drop trapframe/callframe
213	ret				// return
214END(cpu_fast_switchto)
215
216
217/*
218 * RISC-V only has a simple exception handler handles both synchronous traps
219 * and interrupts.
220 */
221
222ENTRY_NP(cpu_exception_handler)
223	csrrw	tp, sscratch, tp	// swap scratch and thread pointer
224	beqz	tp, .Lexception_kernel	//   tp == 0, already on kernel stack
225	//
226	// The exception happened while user code was executing.  We need to
227	// get the pointer to the user trapframe from the LWP md area.  Then we
228	// save t1 and tp so we have a register to work with and to get curlwp
229	// into tp.  We also save the saved SP into the trapframe.
230	// Upon entry on an exception from user, sscratch will contain curlwp.
231	//
232	REG_S	sp, L_MD_USP(tp)	// save user stack pointer temporarily
233	PTR_L	sp, L_MD_UTF(tp)	// trapframe pointer loaded
234	REG_S	t1, TF_T1(sp)		// save t1
235	REG_L	t1, L_MD_USP(tp)	// get user stack pointer
236	REG_S	t1, TF_SP(sp)		// save user stack pointer in trapframe
237
238	csrrw	t1, sscratch, zero	// swap saved thread pointer with 0
239	REG_S	t1, TF_TP(sp)		// save thread pointer in trapframe
240	li	t1, 0			// indicate user exception
241	j	.Lexception_common
242
243	//
244	// The exception happened while we were already in the kernel.  That
245	// means tp already has curlwp and sp has the kernel stack pointer so
246	// just need to restore it and then adjust it down for space for the
247	// trap frame.  We save t1 so we can use it to save the original sp
248	// into the trapframe for use by the exception exiting code.
249	//
250.Lexception_kernel:
251	csrrw	tp, sscratch, zero	// get back our thread pointer
252	addi	sp, sp, -TF_LEN		// allocate stack frame
253	REG_S	t1, TF_T1(sp)		// save t1
254	addi	t1, sp, TF_LEN
255	REG_S	t1, TF_SP(sp)		// save SP
256	li	t1, 1			// indicate kernel exception
257
258.Lexception_common:
259	// Now we save all the temporary registers into the trapframe since
260	// they will most certainly be changed.
261	REG_S	ra, TF_RA(sp)		// save return address
262	REG_S	gp, TF_GP(sp)		// save gp
263	REG_S	a0, TF_A0(sp)		// save a0
264	REG_S	a1, TF_A1(sp)		// save a1
265	REG_S	a2, TF_A2(sp)		// save a2
266	REG_S	a3, TF_A3(sp)		// save a3
267	REG_S	a4, TF_A4(sp)		// save a4
268	REG_S	a5, TF_A5(sp)		// save a5
269	REG_S	a6, TF_A6(sp)		// save a6
270	REG_S	a7, TF_A7(sp)		// save a7
271	REG_S	t0, TF_T0(sp)		// save t0
272					// t1 is already saved
273	REG_S	t2, TF_T2(sp)		// save t2
274	REG_S	t3, TF_T3(sp)		// save t3
275	REG_S	t4, TF_T4(sp)		// save t4
276	REG_S	t5, TF_T5(sp)		// save t5
277	REG_S	t6, TF_T6(sp)		// save t6
278
279#if defined(DDB)
280	REG_S	s0, TF_S0(sp)		// save s0 - frame pointer - useful
281	REG_S	s1, TF_S1(sp)		// save s1
282	REG_S	s2, TF_S2(sp)		// save s2
283	REG_S	s3, TF_S3(sp)		// save s3
284	REG_S	s4, TF_S4(sp)		// save s4
285	REG_S	s5, TF_S5(sp)		// save s5
286	REG_S	s6, TF_S6(sp)		// save s6
287	REG_S	s7, TF_S7(sp)		// save s7
288	REG_S	s8, TF_S8(sp)		// save s8
289	REG_S	s9, TF_S9(sp)		// save s9
290	REG_S	s10, TF_S10(sp)		// save s10
291	REG_S	s11, TF_S11(sp)		// save s11
292#endif
293
294	/* Set the global pointer */
295	.option push
296	.option norelax
297	lla	gp, __global_pointer$
298	.option pop
299
300	// Now we get the trap CRSs
301	mv	a0, sp			// trapframe pointer
302	csrr	a1, sepc		// get exception pc
303	csrr	a2, sstatus		// get status
304	csrr	a3, scause		// get cause
305
306	REG_S	a1, TF_PC(sp)
307	REG_S	a2, TF_SR(sp)
308	REG_S	a3, TF_CAUSE(sp)	// save cause
309
310	// Now we've saved the trapfame, the cause is still in a3.
311
312	bltz	a3, intr_handler	// MSB is set if interrupt
313
314	// stval is only relevant for non-interrupts
315	csrr	a4, stval		// get stval
316	REG_S	a4, TF_TVAL(sp)
317
318	beqz	t1, trap_user		// this was a user trap
319
320	// This was a kernel exception
321	call	_C_LABEL(cpu_trap)	// just call trap to handle it
322
323ALTENTRY(exception_kernexit)
324	// If we got here, we are returning from a kernel exception (either a
325	// trap or interrupt).  Simply return the volatile registers and the
326	// exception PC and status, load the saved SP from the trapframe, and
327	// return from the exception
328
329	REG_L	ra, TF_RA(sp)		// restore return address
330	REG_L	gp, TF_GP(sp)		// restore gp
331	REG_L	s0, TF_S0(sp)		// only restore from userland
332	REG_L	s1, TF_S1(sp)		// only restore from userland
333	REG_L	s2, TF_S2(sp)		// only restore from userland
334	REG_L	s3, TF_S3(sp)		// only restore from userland
335	REG_L	s4, TF_S4(sp)		// only restore from userland
336	REG_L	s5, TF_S5(sp)		// only restore from userland
337	REG_L	s6, TF_S6(sp)		// only restore from userland
338	REG_L	s7, TF_S7(sp)		// only restore from userland
339	REG_L	s8, TF_S8(sp)		// only restore from userland
340	REG_L	s9, TF_S9(sp)		// only restore from userland
341	REG_L	s10, TF_S10(sp)		// only restore from userland
342	REG_L	s11, TF_S11(sp)		// only restore from userland
343	REG_L	a0, TF_A0(sp)		// restore a0
344	REG_L	a1, TF_A1(sp)		// restore a1
345	REG_L	a2, TF_A2(sp)		// restore a2
346	REG_L	a3, TF_A3(sp)		// restore a3
347	REG_L	a4, TF_A4(sp)		// restore a4
348	REG_L	a5, TF_A5(sp)		// restore a5
349	REG_L	a6, TF_A6(sp)		// restore a6
350	REG_L	a7, TF_A7(sp)		// restore a7
351	REG_L	t2, TF_T2(sp)		// restore t2
352	REG_L	t3, TF_T3(sp)		// restore t3
353	REG_L	t4, TF_T4(sp)		// restore t4
354	REG_L	t5, TF_T5(sp)		// restore t5
355	REG_L	t6, TF_T6(sp)		// restore t6
356
357	REG_L	t0, TF_PC(sp)		// fetch exception PC
358	REG_L	t1, TF_SR(sp)		// fetch status
359
360	csrw	sstatus, t1		// restore sstatus (needs to have SIE=0)
361	csrw	sepc, t0		// restore exception PC
362
363	REG_L	t0, TF_T0(sp)		// restore t0
364	REG_L	t1, TF_T1(sp)		// restore t1
365	REG_L	sp, TF_SP(sp)		// restore SP
366	sret				// and we're done
367
368trap_user:
369#if 0
370	/* Already saved */
371	REG_S	s0, TF_S0(sp)		// only save from userland
372	REG_S	s1, TF_S1(sp)		// only save from userland
373	REG_S	s2, TF_S2(sp)		// only save from userland
374	REG_S	s3, TF_S3(sp)		// only save from userland
375	REG_S	s4, TF_S4(sp)		// only save from userland
376	REG_S	s5, TF_S5(sp)		// only save from userland
377	REG_S	s6, TF_S6(sp)		// only save from userland
378	REG_S	s7, TF_S7(sp)		// only save from userland
379	REG_S	s8, TF_S8(sp)		// only save from userland
380	REG_S	s9, TF_S9(sp)		// only save from userland
381	REG_S	s10, TF_S10(sp)		// only save from userland
382	REG_S	s11, TF_S11(sp)		// only save from userland
383#endif
384
385	csrsi	sstatus, SR_SIE		// reenable interrupts
386	li	t0, SR_SUM | SR_FS
387	csrc	sstatus, t0		// disable user memory access and FP
388
389	li	t0, CAUSE_SYSCALL	// let's see if this was a syscall
390	beq	a3, t0, trap_syscall	//   yes it was
391
392	call	_C_LABEL(cpu_trap)	// nope, just a regular trap
393
394_C_LABEL(exception_userexit):
395	csrci	sstatus, SR_SIE		// disable interrupts
396	INT_L	t0, L_MD_ASTPENDING(tp)	// ast pending?
397	bnez	t0, trap_doast		//   yes, handle it.
398	csrw	sscratch, tp		// show we are coming from userland
399	REG_L	tp, TF_TP(sp)		// only restore from userland
400#if 0
401	REG_L	s0, TF_S0(sp)		// only restore from userland
402	REG_L	s1, TF_S1(sp)		// only restore from userland
403	REG_L	s2, TF_S2(sp)		// only restore from userland
404	REG_L	s3, TF_S3(sp)		// only restore from userland
405	REG_L	s4, TF_S4(sp)		// only restore from userland
406	REG_L	s5, TF_S5(sp)		// only restore from userland
407	REG_L	s6, TF_S6(sp)		// only restore from userland
408	REG_L	s7, TF_S7(sp)		// only restore from userland
409	REG_L	s8, TF_S8(sp)		// only restore from userland
410	REG_L	s9, TF_S9(sp)		// only restore from userland
411	REG_L	s10, TF_S10(sp)		// only restore from userland
412	REG_L	s11, TF_S11(sp)		// only restore from userland
413#endif
414	j	exception_kernexit
415
416trap_syscall:
417	PTR_LA	ra, exception_userexit
418	PTR_L	t0, L_PROC(tp)		// get proc struct
419	PTR_L	t0, P_MD_SYSCALL(t0)	// get syscall address from proc
420	jr	t0			// and jump to it
421
422intr_user:
423	REG_S	s0, TF_S0(sp)		// only save from userland
424	REG_S	s1, TF_S1(sp)		// only save from userland
425	REG_S	s2, TF_S2(sp)		// only save from userland
426	REG_S	s3, TF_S3(sp)		// only save from userland
427	REG_S	s4, TF_S4(sp)		// only save from userland
428	REG_S	s5, TF_S5(sp)		// only save from userland
429	REG_S	s6, TF_S6(sp)		// only save from userland
430	REG_S	s7, TF_S7(sp)		// only save from userland
431	REG_S	s8, TF_S8(sp)		// only save from userland
432	REG_S	s9, TF_S9(sp)		// only save from userland
433	REG_S	s10, TF_S10(sp)		// only save from userland
434	REG_S	s11, TF_S11(sp)		// only save from userland
435	PTR_LA	ra, exception_userexit
436
437	tail	_C_LABEL(cpu_intr)	// handle interrupt
438
439/*
440 */
441
442trap_doast:
443	INT_S	zero, L_MD_ASTPENDING(tp)
444	csrsi	sstatus, SR_SIE		// reenable interrupts
445	mv	a0, sp			// only argument is trapframe
446	// ra is still exception_userexit ?
447	tail	_C_LABEL(cpu_ast)
448
449
450/*
451 */
452
453intr_handler:
454	beqz	t1, intr_user
455	PTR_LA	ra, exception_kernexit
456	tail	_C_LABEL(cpu_intr)
457END(cpu_exception_handler)
458
459
460/*
461 * int
462 * cpu_set_onfault(struct faultbuf *fb)
463 */
464ENTRY_NP(cpu_set_onfault)
465	REG_S	ra, FB_RA(a0)
466	REG_S	s0, FB_S0(a0)
467	REG_S	s1, FB_S1(a0)
468	REG_S	s2, FB_S2(a0)
469	REG_S	s3, FB_S3(a0)
470	REG_S	s4, FB_S4(a0)
471	REG_S	s5, FB_S5(a0)
472	REG_S	s6, FB_S6(a0)
473	REG_S	s7, FB_S7(a0)
474	REG_S	s8, FB_S8(a0)
475	REG_S	s9, FB_S9(a0)
476	REG_S	s10, FB_S10(a0)
477	REG_S	s11, FB_S11(a0)
478	REG_S	sp, FB_SP(a0)
479	PTR_S	a0, L_MD_ONFAULT(tp)
480	li	a0, 0
481	ret
482END(cpu_set_onfault)
483
484
485ENTRY_NP(setjmp)
486	REG_S	ra, FB_RA(a0)
487	REG_S	s0, FB_S0(a0)
488	REG_S	s1, FB_S1(a0)
489	REG_S	s2, FB_S2(a0)
490	REG_S	s3, FB_S3(a0)
491	REG_S	s4, FB_S4(a0)
492	REG_S	s5, FB_S5(a0)
493	REG_S	s6, FB_S6(a0)
494	REG_S	s7, FB_S7(a0)
495	REG_S	s8, FB_S8(a0)
496	REG_S	s9, FB_S9(a0)
497	REG_S	s10, FB_S10(a0)
498	REG_S	s11, FB_S11(a0)
499	REG_S	sp, FB_SP(a0)
500	li	a0, 0
501	ret
502END(setjmp)
503
504
505ENTRY_NP(longjmp)
506	REG_L	ra, FB_RA(a0)
507	REG_L	s0, FB_S0(a0)
508	REG_L	s1, FB_S1(a0)
509	REG_L	s2, FB_S2(a0)
510	REG_L	s3, FB_S3(a0)
511	REG_L	s4, FB_S4(a0)
512	REG_L	s5, FB_S5(a0)
513	REG_L	s6, FB_S6(a0)
514	REG_L	s7, FB_S7(a0)
515	REG_L	s8, FB_S8(a0)
516	REG_L	s9, FB_S9(a0)
517	REG_L	s10, FB_S10(a0)
518	REG_L	s11, FB_S11(a0)
519	REG_L	sp, FB_SP(a0)
520	mv	a0, a1
521	ret
522END(longjmp)
523