1129198Scognet/*	$NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $	*/
2129198Scognet
3139735Simp/*-
4129198Scognet * Copyright 2003 Wasabi Systems, Inc.
5129198Scognet * All rights reserved.
6129198Scognet *
7129198Scognet * Written by Steve C. Woodford for Wasabi Systems, Inc.
8129198Scognet *
9129198Scognet * Redistribution and use in source and binary forms, with or without
10129198Scognet * modification, are permitted provided that the following conditions
11129198Scognet * are met:
12129198Scognet * 1. Redistributions of source code must retain the above copyright
13129198Scognet *    notice, this list of conditions and the following disclaimer.
14129198Scognet * 2. Redistributions in binary form must reproduce the above copyright
15129198Scognet *    notice, this list of conditions and the following disclaimer in the
16129198Scognet *    documentation and/or other materials provided with the distribution.
17129198Scognet * 3. All advertising materials mentioning features or use of this software
18129198Scognet *    must display the following acknowledgement:
19129198Scognet *      This product includes software developed for the NetBSD Project by
20129198Scognet *      Wasabi Systems, Inc.
21129198Scognet * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22129198Scognet *    or promote products derived from this software without specific prior
23129198Scognet *    written permission.
24129198Scognet *
25129198Scognet * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28129198Scognet * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35129198Scognet * POSSIBILITY OF SUCH DAMAGE.
36129198Scognet */
37139735Simp/*-
38129198Scognet * Copyright (c) 1994-1998 Mark Brinicombe.
39129198Scognet * Copyright (c) 1994 Brini.
40129198Scognet * All rights reserved.
41129198Scognet *
42129198Scognet * This code is derived from software written for Brini by Mark Brinicombe
43129198Scognet *
44129198Scognet * Redistribution and use in source and binary forms, with or without
45129198Scognet * modification, are permitted provided that the following conditions
46129198Scognet * are met:
47129198Scognet * 1. Redistributions of source code must retain the above copyright
48129198Scognet *    notice, this list of conditions and the following disclaimer.
49129198Scognet * 2. Redistributions in binary form must reproduce the above copyright
50129198Scognet *    notice, this list of conditions and the following disclaimer in the
51129198Scognet *    documentation and/or other materials provided with the distribution.
52129198Scognet * 3. All advertising materials mentioning features or use of this software
53129198Scognet *    must display the following acknowledgement:
54129198Scognet *	This product includes software developed by Brini.
55129198Scognet * 4. The name of the company nor the name of the author may be used to
56129198Scognet *    endorse or promote products derived from this software without specific
57129198Scognet *    prior written permission.
58129198Scognet *
59129198Scognet * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60129198Scognet * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61129198Scognet * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62129198Scognet * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63129198Scognet * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64129198Scognet * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65129198Scognet * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69129198Scognet * SUCH DAMAGE.
70129198Scognet *
71129198Scognet * RiscBSD kernel project
72129198Scognet *
73129198Scognet * cpuswitch.S
74129198Scognet *
75129198Scognet * cpu switching functions
76129198Scognet *
77129198Scognet * Created      : 15/10/94
78129198Scognet *
79129198Scognet */
80129198Scognet
81137274Scognet#include "assym.s"
82245477Scognet#include "opt_sched.h"
83137274Scognet
84129198Scognet#include <machine/asm.h>
85129198Scognet#include <machine/asmacros.h>
86129198Scognet#include <machine/armreg.h>
87295089Smmel#include <machine/sysreg.h>
88262941Sian#include <machine/vfp.h>
89262941Sian
90129198Scognet__FBSDID("$FreeBSD: stable/11/sys/arm/arm/swtch-v6.S 331988 2018-04-04 06:11:05Z mmel $");
91129198Scognet
92295089Smmel#if defined(SMP)
93261415Scognet#define GET_PCPU(tmp, tmp2) \
94295089Smmel	mrc 	CP15_MPIDR(tmp);	\
95261415Scognet	and	tmp, tmp, #0xf;		\
96261415Scognet	ldr 	tmp2, .Lcurpcpu+4;	\
97261415Scognet	mul 	tmp, tmp, tmp2;		\
98261415Scognet	ldr	tmp2, .Lcurpcpu;	\
99261415Scognet	add	tmp, tmp, tmp2;
100239268Sgonzo#else
101129198Scognet
102261415Scognet#define GET_PCPU(tmp, tmp2) \
103239268Sgonzo	ldr	tmp, .Lcurpcpu
104239268Sgonzo#endif
105129198Scognet
106262941Sian#ifdef VFP
107262941Sian	.fpu vfp	/* allow VFP instructions */
108262941Sian#endif
109262941Sian
110261415Scognet.Lcurpcpu:
111295090Smmel	.word	_C_LABEL(__pcpu)
112261415Scognet	.word	PCPU_SIZE
113171780Scognet.Lblocked_lock:
114171780Scognet	.word	_C_LABEL(blocked_lock)
115239268Sgonzo
116298740SmmelENTRY(cpu_context_switch)
117280712Sian	DSB
118298740Smmel	/*
119298740Smmel	* We can directly switch between translation tables only when the
120298740Smmel	* size of the mapping for any given virtual address is the same
121298740Smmel	* in the old and new translation tables.
122298740Smmel	* Thus, we must switch to kernel pmap translation table as
123298740Smmel	* intermediate mapping because all sizes of these mappings are same
124298740Smmel	* (or unmapped). The same is true for switch from kernel pmap
125298740Smmel	* translation table to new pmap one.
126298740Smmel	*/
127298740Smmel	mov	r2, #(CPU_ASID_KERNEL)
128298740Smmel	ldr	r1, =(_C_LABEL(pmap_kern_ttb))
129298740Smmel	ldr	r1, [r1]
130298740Smmel	mcr	CP15_TTBR0(r1)		/* switch to kernel TTB */
131280712Sian	ISB
132298740Smmel	mcr	CP15_TLBIASID(r2)	/* flush not global TLBs */
133298740Smmel	DSB
134298740Smmel	mcr	CP15_TTBR0(r0)		/* switch to new TTB */
135298740Smmel	ISB
136280712Sian	/*
137298740Smmel	* We must flush not global TLBs again because PT2MAP mapping
138298740Smmel	* is different.
139298740Smmel	*/
140298740Smmel	mcr	CP15_TLBIASID(r2)	/* flush not global TLBs */
141298740Smmel	/*
142280712Sian	* Flush entire Branch Target Cache because of the branch predictor
143280712Sian	* is not architecturally invisible. See ARM Architecture Reference
144280712Sian	* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
145280712Sian	* predictors and Requirements for branch predictor maintenance
146280712Sian	* operations sections.
147280712Sian	*/
148331988Smmel	/*
149331988Smmel	 * Additionally, to mitigate mistrained branch predictor attack
150331988Smmel	 * we must invalidate it on affected CPUs. Unfortunately, BPIALL
151331988Smmel	 * is effectively NOP on Cortex-A15 so it needs special treatment.
152331988Smmel	 */
153331988Smmel	ldr	r0, [r8, #PC_BP_HARDEN_KIND]
154331988Smmel	cmp	r0, #PCPU_BP_HARDEN_KIND_ICIALLU
155331988Smmel	mcrne	CP15_BPIALL		/* Flush entire Branch Target Cache   */
156331988Smmel	mcreq	CP15_ICIALLU		/* This is the only way how to flush  */
157331988Smmel					/* Branch Target Cache on Cortex-A15. */
158280712Sian	DSB
159280712Sian	mov	pc, lr
160280712SianEND(cpu_context_switch)
161280712Sian
162280712Sian/*
163280712Sian * cpu_throw(oldtd, newtd)
164280712Sian *
165280712Sian * Remove current thread state,	then select the	next thread to run
166280712Sian * and load its	state.
167280712Sian * r0 =	oldtd
168280712Sian * r1 =	newtd
169280712Sian */
170280712SianENTRY(cpu_throw)
171280712Sian	mov	r10, r0			/* r10 = oldtd */
172280712Sian	mov	r11, r1			/* r11 = newtd */
173280712Sian
174280712Sian#ifdef VFP				/* This thread is dying, disable */
175280712Sian	bl	_C_LABEL(vfp_discard)	/* VFP without preserving state. */
176280712Sian#endif
177280712Sian	GET_PCPU(r8, r9)		/* r8 = current pcpu */
178295090Smmel	ldr	r4, [r8, #PC_CPUID]	/* r4 = current cpu id */
179280712Sian
180295090Smmel	cmp	r10, #0			/* old thread? */
181280712Sian	beq	2f			/* no, skip */
182280712Sian
183280712Sian	/* Remove this CPU from the active list. */
184280712Sian	ldr	r5, [r8, #PC_CURPMAP]
185280712Sian	mov	r0, #(PM_ACTIVE)
186280712Sian	add	r5, r0			/* r5 = old pm_active */
187280712Sian
188280712Sian	/* Compute position and mask. */
189280712Sian#if _NCPUWORDS > 1
190280712Sian	lsr	r0, r4, #3
191280712Sian	bic	r0, #3
192280712Sian	add	r5, r0			/* r5 = position in old pm_active */
193280712Sian	mov	r2, #1
194280712Sian	and	r0, r4, #31
195280712Sian	lsl	r2, r0			/* r2 = mask */
196280712Sian#else
197280712Sian	mov	r2, #1
198280712Sian	lsl	r2, r4			/* r2 = mask */
199280712Sian#endif
200280712Sian	/* Clear cpu from old active list. */
201280712Sian#ifdef SMP
202280712Sian1:	ldrex	r0, [r5]
203280712Sian	bic	r0, r2
204280712Sian	strex	r1, r0, [r5]
205280712Sian	teq	r1, #0
206280712Sian	bne	1b
207280712Sian#else
208280712Sian	ldr	r0, [r5]
209280712Sian	bic	r0, r2
210280712Sian	str	r0, [r5]
211280712Sian#endif
212280712Sian
213280712Sian2:
214280712Sian#ifdef INVARIANTS
215280712Sian	cmp	r11, #0			/* new thread? */
216280712Sian	beq	badsw1			/* no, panic */
217280712Sian#endif
218280712Sian	ldr	r7, [r11, #(TD_PCB)]	/* r7 = new PCB */
219280712Sian
220280712Sian	/*
221280712Sian	 * Registers at this point
222280712Sian	 *   r4  = current cpu id
223280712Sian	 *   r7  = new PCB
224280712Sian	 *   r8  = current pcpu
225280712Sian	 *   r11 = newtd
226280712Sian	 */
227280712Sian
228280712Sian	/* MMU switch to new thread. */
229295090Smmel	ldr	r0, [r7, #(PCB_PAGEDIR)]
230280712Sian#ifdef INVARIANTS
231280712Sian	cmp	r0, #0			/* new thread? */
232280712Sian	beq	badsw4			/* no, panic */
233280712Sian#endif
234280712Sian	bl	_C_LABEL(cpu_context_switch)
235280712Sian
236280712Sian	/*
237280712Sian	 * Set new PMAP as current one.
238280712Sian	 * Insert cpu to new active list.
239280712Sian	 */
240280712Sian
241280712Sian	ldr	r6, [r11, #(TD_PROC)]	/* newtd->proc */
242280712Sian	ldr	r6, [r6, #(P_VMSPACE)]	/* newtd->proc->vmspace */
243280712Sian	add	r6, #VM_PMAP		/* newtd->proc->vmspace->pmap */
244280712Sian	str	r6, [r8, #PC_CURPMAP]	/* store to curpmap */
245280712Sian
246280712Sian	mov	r0, #PM_ACTIVE
247280712Sian	add	r6, r0			/* r6 = new pm_active */
248280712Sian
249280712Sian	/* compute position and mask */
250280712Sian#if _NCPUWORDS > 1
251280712Sian	lsr	r0, r4, #3
252280712Sian	bic	r0, #3
253280712Sian	add	r6, r0			/* r6 = position in new pm_active */
254280712Sian	mov	r2, #1
255280712Sian	and	r0, r4, #31
256280712Sian	lsl	r2, r0			/* r2 = mask */
257280712Sian#else
258280712Sian	mov	r2, #1
259280712Sian	lsl	r2, r4 			/* r2 = mask */
260280712Sian#endif
261280712Sian	/* Set cpu to new active list. */
262280712Sian#ifdef SMP
263280712Sian1:	ldrex	r0, [r6]
264280712Sian	orr	r0, r2
265280712Sian	strex	r1, r0, [r6]
266280712Sian	teq	r1, #0
267280712Sian	bne	1b
268280712Sian#else
269280712Sian	ldr	r0, [r6]
270280712Sian	orr	r0, r2
271280712Sian	str	r0, [r6]
272280712Sian#endif
273280712Sian	/*
274280712Sian	 * Registers at this point.
275280712Sian	 *   r7  = new PCB
276280712Sian	 *   r8  = current pcpu
277280712Sian	 *   r11 = newtd
278280712Sian	 * They must match the ones in sw1 position !!!
279280712Sian	 */
280280712Sian	DMB
281280712Sian	b	sw1	/* share new thread init with cpu_switch() */
282280712SianEND(cpu_throw)
283280712Sian
284280712Sian/*
285280712Sian * cpu_switch(oldtd, newtd, lock)
286280712Sian *
287280712Sian * Save the current thread state, then select the next thread to run
288280712Sian * and load its state.
289280712Sian * r0 = oldtd
290280712Sian * r1 = newtd
291280712Sian * r2 = lock (new lock for old thread)
292280712Sian */
293280712SianENTRY(cpu_switch)
294280712Sian	/* Interrupts are disabled. */
295280712Sian#ifdef INVARIANTS
296280712Sian	cmp	r0, #0			/* old thread? */
297280712Sian	beq	badsw2			/* no, panic */
298280712Sian#endif
299280712Sian	/* Save all the registers in the old thread's pcb. */
300280712Sian	ldr	r3, [r0, #(TD_PCB)]
301280712Sian	add	r3, #(PCB_R4)
302280712Sian	stmia	r3, {r4-r12, sp, lr, pc}
303307136Sed	mrc	CP15_TPIDRURW(r4)
304307136Sed	str	r4, [r3, #(PCB_TPIDRURW - PCB_R4)]
305280712Sian
306280712Sian#ifdef INVARIANTS
307280712Sian	cmp	r1, #0			/* new thread? */
308280712Sian	beq	badsw3			/* no, panic */
309280712Sian#endif
310280712Sian	/*
311280712Sian	 * Save arguments. Note that we can now use r0-r14 until
312280712Sian	 * it is time to restore them for the new thread. However,
313280712Sian	 * some registers are not safe over function call.
314280712Sian	 */
315295090Smmel	mov	r9, r2			/* r9 = lock */
316280712Sian	mov	r10, r0			/* r10 = oldtd */
317280712Sian	mov	r11, r1			/* r11 = newtd */
318280712Sian
319295090Smmel	GET_PCPU(r8, r3)		/* r8 = current PCPU */
320280712Sian	ldr	r7, [r11, #(TD_PCB)]	/* r7 = newtd->td_pcb */
321280712Sian
322280712Sian
323280712Sian
324280712Sian#ifdef VFP
325280712Sian	ldr	r3, [r10, #(TD_PCB)]
326280712Sian	fmrx	r0, fpexc		/* If the VFP is enabled */
327280712Sian	tst	r0, #(VFPEXC_EN)	/* the current thread has */
328280712Sian	movne	r1, #1			/* used it, so go save */
329280712Sian	addne	r0, r3, #(PCB_VFPSTATE)	/* the state into the PCB */
330280712Sian	blne	_C_LABEL(vfp_store)	/* and disable the VFP. */
331280712Sian#endif
332280712Sian
333280712Sian	/*
334280712Sian	 * MMU switch. If we're switching to a thread with the same
335280712Sian	 * address space as the outgoing one, we can skip the MMU switch.
336280712Sian	 */
337280712Sian	mrc	CP15_TTBR0(r1)		/* r1 = old TTB */
338280712Sian	ldr	r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
339280712Sian	cmp	r0, r1			/* Switching to the TTB? */
340280712Sian	beq	sw0			/* same TTB, skip */
341280712Sian
342280712Sian#ifdef INVARIANTS
343280712Sian	cmp	r0, #0			/* new thread? */
344280712Sian	beq	badsw4			/* no, panic */
345280712Sian#endif
346280712Sian
347280712Sian	bl	cpu_context_switch	/* new TTB as argument */
348280712Sian
349280712Sian	/*
350280712Sian	 * Registers at this point
351280712Sian	 *   r7  = new PCB
352280712Sian	 *   r8  = current pcpu
353280712Sian	 *   r9  = lock
354280712Sian	 *   r10 = oldtd
355280712Sian	 *   r11 = newtd
356280712Sian	 */
357280712Sian
358280712Sian	/*
359280712Sian	 * Set new PMAP as current one.
360280712Sian	 * Update active list on PMAPs.
361280712Sian	 */
362280712Sian	ldr	r6, [r11, #TD_PROC]	/* newtd->proc */
363280712Sian	ldr	r6, [r6, #P_VMSPACE]	/* newtd->proc->vmspace */
364280712Sian	add	r6, #VM_PMAP		/* newtd->proc->vmspace->pmap */
365280712Sian
366295090Smmel	ldr	r5, [r8, #PC_CURPMAP]	/* get old curpmap */
367295090Smmel	str	r6, [r8, #PC_CURPMAP]	/* and save new one */
368280712Sian
369280712Sian	mov	r0, #PM_ACTIVE
370280712Sian	add	r5, r0			/* r5 = old pm_active */
371280712Sian	add	r6, r0			/* r6 = new pm_active */
372280712Sian
373280712Sian	/* Compute position and mask. */
374280712Sian	ldr	r4, [r8, #PC_CPUID]
375280712Sian#if _NCPUWORDS > 1
376280712Sian	lsr	r0, r4, #3
377280712Sian	bic	r0, #3
378280712Sian	add	r5, r0			/* r5 = position in old pm_active */
379280712Sian	add	r6, r0			/* r6 = position in new pm_active */
380280712Sian	mov	r2, #1
381280712Sian	and	r0, r4, #31
382280712Sian	lsl	r2, r0			/* r2 = mask */
383280712Sian#else
384280712Sian	mov	r2, #1
385280712Sian	lsl	r2, r4			/* r2 = mask */
386280712Sian#endif
387280712Sian	/* Clear cpu from old active list. */
388280712Sian#ifdef SMP
389280712Sian1:	ldrex	r0, [r5]
390280712Sian	bic	r0, r2
391280712Sian	strex	r1, r0, [r5]
392280712Sian	teq	r1, #0
393280712Sian	bne	1b
394280712Sian#else
395280712Sian	ldr	r0, [r5]
396280712Sian	bic	r0, r2
397280712Sian	str	r0, [r5]
398280712Sian#endif
399280712Sian	/* Set cpu to new active list. */
400280712Sian#ifdef SMP
401295090Smmel1:	ldrex	r0, [r6]
402280712Sian	orr	r0, r2
403280712Sian	strex	r1, r0, [r6]
404280712Sian	teq	r1, #0
405280712Sian	bne	1b
406280712Sian#else
407280712Sian	ldr	r0, [r6]
408280712Sian	orr	r0, r2
409280712Sian	str	r0, [r6]
410280712Sian#endif
411280712Sian
412280712Siansw0:
413280712Sian	/*
414280712Sian	 * Registers at this point
415280712Sian	 *   r7  = new PCB
416280712Sian	 *   r8  = current pcpu
417280712Sian	 *   r9  = lock
418280712Sian	 *   r10 = oldtd
419280712Sian	 *   r11 = newtd
420280712Sian	 */
421280712Sian
422280712Sian	/* Change the old thread lock. */
423280712Sian	add	r5, r10, #TD_LOCK
424280712Sian	DMB
425280712Sian1:	ldrex	r0, [r5]
426280712Sian	strex	r1, r9, [r5]
427280712Sian	teq	r1, #0
428280712Sian	bne	1b
429280712Sian	DMB
430280712Sian
431280712Siansw1:
432280712Sian	clrex
433280712Sian	/*
434280712Sian	 * Registers at this point
435280712Sian	 *   r7  = new PCB
436280712Sian	 *   r8  = current pcpu
437280712Sian	 *   r11 = newtd
438280712Sian	 */
439280712Sian
440280712Sian#if defined(SMP) && defined(SCHED_ULE)
441280712Sian	/*
442280712Sian	 * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
443280712Sian	 * QQQ: What does it mean in reality and why is it done?
444280712Sian	 */
445280712Sian	ldr	r6, =blocked_lock
446280712Sian1:
447280712Sian	ldr	r3, [r11, #TD_LOCK]	/* atomic write regular read */
448280712Sian	cmp	r3, r6
449280712Sian	beq	1b
450280712Sian#endif
451280712Sian
452280712Sian	/* We have a new curthread now so make a note it */
453280712Sian	str	r11, [r8, #PC_CURTHREAD]
454280712Sian	mcr	CP15_TPIDRPRW(r11)
455280712Sian
456280712Sian	/* store pcb in per cpu structure */
457280712Sian	str	r7, [r8, #PC_CURPCB]
458280712Sian
459280712Sian	/*
460280712Sian	 * Restore all saved registers and return. Note that some saved
461301961Skib	 * registers can be changed when either cpu_fork(), cpu_copy_thread(),
462301961Skib	 * cpu_fork_kthread_handler(), or makectx() was called.
463307136Sed	 *
464307136Sed	 * The value of TPIDRURW is also written into TPIDRURO, as
465307136Sed	 * userspace still uses TPIDRURO, modifying it through
466307136Sed	 * sysarch(ARM_SET_TP, addr).
467280712Sian	 */
468307136Sed	ldr	r3, [r7, #PCB_TPIDRURW]
469307136Sed	mcr	CP15_TPIDRURW(r3)	/* write tls thread reg 2 */
470307136Sed	mcr	CP15_TPIDRURO(r3)	/* write tls thread reg 3 */
471280712Sian	add	r3, r7, #PCB_R4
472280712Sian	ldmia	r3, {r4-r12, sp, pc}
473280712Sian
474280712Sian#ifdef INVARIANTS
475280712Sianbadsw1:
476280712Sian	ldr	r0, =sw1_panic_str
477280712Sian	bl	_C_LABEL(panic)
478280712Sian1:	nop
479295090Smmel	b	1b
480280712Sian
481280712Sianbadsw2:
482280712Sian	ldr	r0, =sw2_panic_str
483280712Sian	bl	_C_LABEL(panic)
484280712Sian1:	nop
485280712Sian	b	1b
486280712Sian
487280712Sianbadsw3:
488280712Sian	ldr	r0, =sw3_panic_str
489280712Sian	bl	_C_LABEL(panic)
490280712Sian1:	nop
491280712Sian	b	1b
492280712Sian
493280712Sianbadsw4:
494280712Sian	ldr	r0, =sw4_panic_str
495280712Sian	bl	_C_LABEL(panic)
496280712Sian1:	nop
497280712Sian	b	1b
498280712Sian
499280712Siansw1_panic_str:
500280712Sian	.asciz	"cpu_throw: no newthread supplied.\n"
501280712Siansw2_panic_str:
502280712Sian	.asciz	"cpu_switch: no curthread supplied.\n"
503280712Siansw3_panic_str:
504280712Sian	.asciz	"cpu_switch: no newthread supplied.\n"
505280712Siansw4_panic_str:
506280712Sian	.asciz	"cpu_switch: new pagedir is NULL.\n"
507280712Sian#endif
508280712SianEND(cpu_switch)
509