swtch-v6.S revision 295066
1/*	$NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $	*/
2
3/*-
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*-
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 *    notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 *    notice, this list of conditions and the following disclaimer in the
51 *    documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 *    must display the following acknowledgement:
54 *	This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 *    endorse or promote products derived from this software without specific
57 *    prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created      : 15/10/94
78 *
79 */
80
81#include "assym.s"
82#include "opt_sched.h"
83
84#include <machine/acle-compat.h>
85#include <machine/asm.h>
86#include <machine/asmacros.h>
87#include <machine/armreg.h>
88#include <machine/vfp.h>
89
90__FBSDID("$FreeBSD: head/sys/arm/arm/swtch-v6.S 295066 2016-01-30 08:02:12Z mmel $");
91
92#if __ARM_ARCH >= 6 && defined(SMP)
93#define GET_PCPU(tmp, tmp2) \
94	mrc 	p15, 0, tmp, c0, c0, 5;	\
95	and	tmp, tmp, #0xf;		\
96	ldr 	tmp2, .Lcurpcpu+4;	\
97	mul 	tmp, tmp, tmp2;		\
98	ldr	tmp2, .Lcurpcpu;	\
99	add	tmp, tmp, tmp2;
100#else
101
102#define GET_PCPU(tmp, tmp2) \
103	ldr	tmp, .Lcurpcpu
104#endif
105
106#ifdef VFP
107	.fpu vfp	/* allow VFP instructions */
108#endif
109
110.Lcurpcpu:
111	.word   _C_LABEL(__pcpu)
112	.word	PCPU_SIZE
113.Lblocked_lock:
114	.word	_C_LABEL(blocked_lock)
115
116
117#include <machine/sysreg.h>
118
119ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function?	*/
120	DSB
121	mcr	CP15_TTBR0(r0)		/* set the new TTB */
122	ISB
123	mov	r0, #(CPU_ASID_KERNEL)
124	mcr	CP15_TLBIASID(r0)	/* flush not global TLBs */
125	/*
126	* Flush entire Branch Target Cache because of the branch predictor
127	* is not architecturally invisible. See ARM Architecture Reference
128	* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
129	* predictors and Requirements for branch predictor maintenance
130	* operations sections.
131	*
132	* QQQ: The predictor is virtually addressed and holds virtual target
133	*      addresses. Therefore, if mapping is changed, the predictor cache
134	*      must be flushed.The flush is part of entire i-cache invalidation
135	*      what is always called when code mapping is changed. So herein,
136	*      it's the only place where standalone predictor flush must be
137	*      executed in kernel (except self modifying code case).
138	*/
139	mcr   CP15_BPIALL       /* and flush entire Branch Target Cache */
140	DSB
141	mov	pc, lr
142END(cpu_context_switch)
143
144/*
145 * cpu_throw(oldtd, newtd)
146 *
147 * Remove current thread state,	then select the	next thread to run
148 * and load its	state.
149 * r0 =	oldtd
150 * r1 =	newtd
151 */
152ENTRY(cpu_throw)
153	mov	r10, r0			/* r10 = oldtd */
154	mov	r11, r1			/* r11 = newtd */
155
156#ifdef VFP				/* This thread is dying, disable */
157	bl	_C_LABEL(vfp_discard)	/* VFP without preserving state. */
158#endif
159	GET_PCPU(r8, r9)		/* r8 = current pcpu */
160	ldr     r4, [r8, #PC_CPUID]	/* r4 = current cpu id */
161
162	cmp	r10, #0	/* old thread? */
163	beq	2f			/* no, skip */
164
165	/* Remove this CPU from the active list. */
166	ldr	r5, [r8, #PC_CURPMAP]
167	mov	r0, #(PM_ACTIVE)
168	add	r5, r0			/* r5 = old pm_active */
169
170	/* Compute position and mask. */
171#if _NCPUWORDS > 1
172	lsr	r0, r4, #3
173	bic	r0, #3
174	add	r5, r0			/* r5 = position in old pm_active */
175	mov	r2, #1
176	and	r0, r4, #31
177	lsl	r2, r0			/* r2 = mask */
178#else
179	mov	r2, #1
180	lsl	r2, r4			/* r2 = mask */
181#endif
182	/* Clear cpu from old active list. */
183#ifdef SMP
1841:	ldrex	r0, [r5]
185	bic	r0, r2
186	strex	r1, r0, [r5]
187	teq	r1, #0
188	bne	1b
189#else
190	ldr	r0, [r5]
191	bic	r0, r2
192	str	r0, [r5]
193#endif
194
1952:
196#ifdef INVARIANTS
197	cmp	r11, #0			/* new thread? */
198	beq	badsw1			/* no, panic */
199#endif
200	ldr	r7, [r11, #(TD_PCB)]	/* r7 = new PCB */
201
202	/*
203	 * Registers at this point
204	 *   r4  = current cpu id
205	 *   r7  = new PCB
206	 *   r8  = current pcpu
207	 *   r11 = newtd
208	 */
209
210	/* MMU switch to new thread. */
211	ldr     r0, [r7, #(PCB_PAGEDIR)]
212#ifdef INVARIANTS
213	cmp	r0, #0			/* new thread? */
214	beq	badsw4			/* no, panic */
215#endif
216	bl	_C_LABEL(cpu_context_switch)
217
218	/*
219	 * Set new PMAP as current one.
220	 * Insert cpu to new active list.
221	 */
222
223	ldr	r6, [r11, #(TD_PROC)]	/* newtd->proc */
224	ldr	r6, [r6, #(P_VMSPACE)]	/* newtd->proc->vmspace */
225	add	r6, #VM_PMAP		/* newtd->proc->vmspace->pmap */
226	str	r6, [r8, #PC_CURPMAP]	/* store to curpmap */
227
228	mov	r0, #PM_ACTIVE
229	add	r6, r0			/* r6 = new pm_active */
230
231	/* compute position and mask */
232#if _NCPUWORDS > 1
233	lsr	r0, r4, #3
234	bic	r0, #3
235	add	r6, r0			/* r6 = position in new pm_active */
236	mov	r2, #1
237	and	r0, r4, #31
238	lsl	r2, r0			/* r2 = mask */
239#else
240	mov	r2, #1
241	lsl	r2, r4 			/* r2 = mask */
242#endif
243	/* Set cpu to new active list. */
244#ifdef SMP
2451:	ldrex	r0, [r6]
246	orr	r0, r2
247	strex	r1, r0, [r6]
248	teq	r1, #0
249	bne	1b
250#else
251	ldr	r0, [r6]
252	orr	r0, r2
253	str	r0, [r6]
254#endif
255	/*
256	 * Registers at this point.
257	 *   r7  = new PCB
258	 *   r8  = current pcpu
259	 *   r11 = newtd
260	 * They must match the ones in sw1 position !!!
261	 */
262	DMB
263	b	sw1	/* share new thread init with cpu_switch() */
264END(cpu_throw)
265
266/*
267 * cpu_switch(oldtd, newtd, lock)
268 *
269 * Save the current thread state, then select the next thread to run
270 * and load its state.
271 * r0 = oldtd
272 * r1 = newtd
273 * r2 = lock (new lock for old thread)
274 */
275ENTRY(cpu_switch)
276	/* Interrupts are disabled. */
277#ifdef INVARIANTS
278	cmp	r0, #0			/* old thread? */
279	beq	badsw2			/* no, panic */
280#endif
281	/* Save all the registers in the old thread's pcb. */
282	ldr	r3, [r0, #(TD_PCB)]
283	add	r3, #(PCB_R4)
284	stmia	r3, {r4-r12, sp, lr, pc}
285
286#ifdef INVARIANTS
287	cmp	r1, #0			/* new thread? */
288	beq	badsw3			/* no, panic */
289#endif
290	/*
291	 * Save arguments. Note that we can now use r0-r14 until
292	 * it is time to restore them for the new thread. However,
293	 * some registers are not safe over function call.
294	 */
295	mov	r9, r2			/* r9  = lock */
296	mov	r10, r0			/* r10 = oldtd */
297	mov	r11, r1			/* r11 = newtd */
298
299	GET_PCPU(r8, r3)		/* r8  = current PCPU */
300	ldr	r7, [r11, #(TD_PCB)]	/* r7 = newtd->td_pcb */
301
302
303
304#ifdef VFP
305	ldr	r3, [r10, #(TD_PCB)]
306	fmrx	r0, fpexc		/* If the VFP is enabled */
307	tst	r0, #(VFPEXC_EN)	/* the current thread has */
308	movne	r1, #1			/* used it, so go save */
309	addne	r0, r3, #(PCB_VFPSTATE)	/* the state into the PCB */
310	blne	_C_LABEL(vfp_store)	/* and disable the VFP. */
311#endif
312
313	/*
314	 * MMU switch. If we're switching to a thread with the same
315	 * address space as the outgoing one, we can skip the MMU switch.
316	 */
317	mrc	CP15_TTBR0(r1)		/* r1 = old TTB */
318	ldr	r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
319	cmp	r0, r1			/* Switching to the TTB? */
320	beq	sw0			/* same TTB, skip */
321
322#ifdef INVARIANTS
323	cmp	r0, #0			/* new thread? */
324	beq	badsw4			/* no, panic */
325#endif
326
327	bl	cpu_context_switch	/* new TTB as argument */
328
329	/*
330	 * Registers at this point
331	 *   r7  = new PCB
332	 *   r8  = current pcpu
333	 *   r9  = lock
334	 *   r10 = oldtd
335	 *   r11 = newtd
336	 */
337
338	/*
339	 * Set new PMAP as current one.
340	 * Update active list on PMAPs.
341	 */
342	ldr	r6, [r11, #TD_PROC]	/* newtd->proc */
343	ldr	r6, [r6, #P_VMSPACE]	/* newtd->proc->vmspace */
344	add	r6, #VM_PMAP		/* newtd->proc->vmspace->pmap */
345
346	ldr	r5, [r8, #PC_CURPMAP]   /* get old curpmap */
347	str	r6, [r8, #PC_CURPMAP]   /* and save new one */
348
349	mov	r0, #PM_ACTIVE
350	add	r5, r0			/* r5 = old pm_active */
351	add	r6, r0			/* r6 = new pm_active */
352
353	/* Compute position and mask. */
354	ldr	r4, [r8, #PC_CPUID]
355#if _NCPUWORDS > 1
356	lsr	r0, r4, #3
357	bic	r0, #3
358	add	r5, r0			/* r5 = position in old pm_active */
359	add	r6, r0			/* r6 = position in new pm_active */
360	mov	r2, #1
361	and	r0, r4, #31
362	lsl	r2, r0			/* r2 = mask */
363#else
364	mov	r2, #1
365	lsl	r2, r4			/* r2 = mask */
366#endif
367	/* Clear cpu from old active list. */
368#ifdef SMP
3691:	ldrex	r0, [r5]
370	bic	r0, r2
371	strex	r1, r0, [r5]
372	teq	r1, #0
373	bne	1b
374#else
375	ldr	r0, [r5]
376	bic	r0, r2
377	str	r0, [r5]
378#endif
379	/* Set cpu to new active list. */
380#ifdef SMP
3811:      ldrex	r0, [r6]
382	orr	r0, r2
383	strex	r1, r0, [r6]
384	teq	r1, #0
385	bne	1b
386#else
387	ldr	r0, [r6]
388	orr	r0, r2
389	str	r0, [r6]
390#endif
391
392sw0:
393	/*
394	 * Registers at this point
395	 *   r7  = new PCB
396	 *   r8  = current pcpu
397	 *   r9  = lock
398	 *   r10 = oldtd
399	 *   r11 = newtd
400	 */
401
402	/* Change the old thread lock. */
403	add	r5, r10, #TD_LOCK
404	DMB
4051:	ldrex	r0, [r5]
406	strex	r1, r9, [r5]
407	teq	r1, #0
408	bne	1b
409	DMB
410
411sw1:
412	clrex
413	/*
414	 * Registers at this point
415	 *   r7  = new PCB
416	 *   r8  = current pcpu
417	 *   r11 = newtd
418	 */
419
420#if defined(SMP) && defined(SCHED_ULE)
421	/*
422	 * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
423	 * QQQ: What does it mean in reality and why is it done?
424	 */
425	ldr	r6, =blocked_lock
4261:
427	ldr	r3, [r11, #TD_LOCK]	/* atomic write regular read */
428	cmp	r3, r6
429	beq	1b
430#endif
431	/* Set the new tls */
432	ldr     r0, [r11, #(TD_MD + MD_TP)]
433	mcr	CP15_TPIDRURO(r0)	/* write tls thread reg 2 */
434
435	/* We have a new curthread now so make a note it */
436	str	r11, [r8, #PC_CURTHREAD]
437	mcr	CP15_TPIDRPRW(r11)
438
439	/* store pcb in per cpu structure */
440	str	r7, [r8, #PC_CURPCB]
441
442	/*
443	 * Restore all saved registers and return. Note that some saved
444	 * registers can be changed when either cpu_fork(), cpu_set_upcall(),
445	 * cpu_set_fork_handler(), or makectx() was called.
446	 */
447	add	r3, r7, #PCB_R4
448	ldmia	r3, {r4-r12, sp, pc}
449
450#ifdef INVARIANTS
451badsw1:
452	ldr	r0, =sw1_panic_str
453	bl	_C_LABEL(panic)
4541:	nop
455	b       1b
456
457badsw2:
458	ldr	r0, =sw2_panic_str
459	bl	_C_LABEL(panic)
4601:	nop
461	b	1b
462
463badsw3:
464	ldr	r0, =sw3_panic_str
465	bl	_C_LABEL(panic)
4661:	nop
467	b	1b
468
469badsw4:
470	ldr	r0, =sw4_panic_str
471	bl	_C_LABEL(panic)
4721:	nop
473	b	1b
474
475sw1_panic_str:
476	.asciz	"cpu_throw: no newthread supplied.\n"
477sw2_panic_str:
478	.asciz	"cpu_switch: no curthread supplied.\n"
479sw3_panic_str:
480	.asciz	"cpu_switch: no newthread supplied.\n"
481sw4_panic_str:
482	.asciz	"cpu_switch: new pagedir is NULL.\n"
483#endif
484END(cpu_switch)
485