swtch.S revision 245477
1/*	$NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $	*/
2
3/*-
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*-
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 *    notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 *    notice, this list of conditions and the following disclaimer in the
51 *    documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 *    must display the following acknowledgement:
54 *	This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 *    endorse or promote products derived from this software without specific
57 *    prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created      : 15/10/94
78 *
79 */
80
81#include "assym.s"
82#include "opt_sched.h"
83
84#include <machine/asm.h>
85#include <machine/asmacros.h>
86#include <machine/armreg.h>
87__FBSDID("$FreeBSD: head/sys/arm/arm/swtch.S 245477 2013-01-15 22:09:11Z cognet $");
88
89#define DOMAIN_CLIENT	0x01
90
91#ifdef _ARM_ARCH_6
92#define GET_PCPU(tmp) \
93	mrc p15, 0, tmp, c13, c0, 4;
94#else
95.Lcurpcpu:
96	.word	_C_LABEL(__pcpu)
97
98#define GET_PCPU(tmp) \
99	ldr	tmp, .Lcurpcpu
100#endif
101
102.Lcpufuncs:
103	.word	_C_LABEL(cpufuncs)
104.Lblocked_lock:
105	.word	_C_LABEL(blocked_lock)
106
107ENTRY(cpu_throw)
108	mov	r5, r1
109
110	/*
111	 * r0 = oldtd
112	 * r5 = newtd
113	 */
114
115	GET_PCPU(r7)
116
117#ifdef ARM_VFP_SUPPORT
118	/*
119	 * vfp_discard will clear pcpu->pc_vfpcthread, and modify
120	 * and modify the control as needed.
121	 */
122	ldr     r4, [r7, #(PC_VFPCTHREAD)]      /* this thread using vfp? */
123	cmp     r0, r4
124	bne     3f
125	bl      _C_LABEL(vfp_discard)           /* yes, shut down vfp */
1263:
127#endif		/* ARM_VFP_SUPPORT */
128
129	ldr	r7, [r5, #(TD_PCB)]		/* r7 = new thread's PCB */
130
131	/* Switch to lwp0 context */
132
133	ldr	r9, .Lcpufuncs
134#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B)
135	mov	lr, pc
136	ldr	pc, [r9, #CF_IDCACHE_WBINV_ALL]
137#endif
138	ldr	r0, [r7, #(PCB_PL1VEC)]
139	ldr	r1, [r7, #(PCB_DACR)]
140	/*
141	 * r0 = Pointer to L1 slot for vector_page (or NULL)
142	 * r1 = lwp0's DACR
143	 * r5 = lwp0
144	 * r6 = exit func
145	 * r7 = lwp0's PCB
146	 * r9 = cpufuncs
147	 */
148
149	/*
150	 * Ensure the vector table is accessible by fixing up lwp0's L1
151	 */
152	cmp	r0, #0			/* No need to fixup vector table? */
153	ldrne	r3, [r0]		/* But if yes, fetch current value */
154	ldrne	r2, [r7, #(PCB_L1VEC)]	/* Fetch new vector_page value */
155	mcr	p15, 0, r1, c3, c0, 0	/* Update DACR for lwp0's context */
156	cmpne	r3, r2			/* Stuffing the same value? */
157	strne	r2, [r0]		/* Store if not. */
158
159#ifdef PMAP_INCLUDE_PTE_SYNC
160	/*
161	 * Need to sync the cache to make sure that last store is
162	 * visible to the MMU.
163	 */
164	movne	r1, #4
165	movne	lr, pc
166	ldrne	pc, [r9, #CF_DCACHE_WB_RANGE]
167#endif /* PMAP_INCLUDE_PTE_SYNC */
168
169	/*
170	 * Note: We don't do the same optimisation as cpu_switch() with
171	 * respect to avoiding flushing the TLB if we're switching to
172	 * the same L1 since this process' VM space may be about to go
173	 * away, so we don't want *any* turds left in the TLB.
174	 */
175
176	/* Switch the memory to the new process */
177	ldr	r0, [r7, #(PCB_PAGEDIR)]
178	mov	lr, pc
179	ldr	pc, [r9, #CF_CONTEXT_SWITCH]
180
181	/* Restore all the save registers */
182#ifndef _ARM_ARCH_5E
183	add	r1, r7, #PCB_R8
184	ldmia	r1, {r8-r13}
185#else
186	ldr	r8, [r7, #(PCB_R8)]
187	ldr	r9, [r7, #(PCB_R9)]
188	ldr	r10, [r7, #(PCB_R10)]
189	ldr	r11, [r7, #(PCB_R11)]
190	ldr	r12, [r7, #(PCB_R12)]
191	ldr	r13, [r7, #(PCB_SP)]
192#endif
193
194	/* We have a new curthread now so make a note it */
195	GET_CURTHREAD_PTR(r6)
196	str	r5, [r6]
197
198	/* Set the new tp */
199	ldr	r6, [r5, #(TD_MD + MD_TP)]
200#ifdef ARM_TP_ADDRESS
201	ldr	r4, =ARM_TP_ADDRESS
202	str	r6, [r4]
203	ldr	r6, [r5, #(TD_MD + MD_RAS_START)]
204	str	r6, [r4, #4] /* ARM_RAS_START */
205	ldr	r6, [r5, #(TD_MD + MD_RAS_END)]
206	str	r6, [r4, #8] /* ARM_RAS_END */
207#else
208	mcr p15, 0, r6, c13, c0, 3
209#endif
210	/* Hook in a new pcb */
211	GET_PCPU(r6)
212	str	r7, [r6, #PC_CURPCB]
213
214	ldmfd	sp!, {r4-r7, pc}
215
216ENTRY(cpu_switch)
217	stmfd	sp!, {r4-r7, lr}
218	mov	r6, r2 /* Save the mutex */
219
220.Lswitch_resume:
221	/* rem: r0 = old lwp */
222	/* rem: interrupts are disabled */
223
224	/* Process is now on a processor. */
225	/* We have a new curthread now so make a note it */
226	GET_CURTHREAD_PTR(r7)
227	str	r1, [r7]
228
229	/* Hook in a new pcb */
230	GET_PCPU(r7)
231	ldr	r2, [r1, #TD_PCB]
232	str	r2, [r7, #PC_CURPCB]
233
234	/* rem: r1 = new process */
235	/* rem: interrupts are enabled */
236
237	/* Stage two : Save old context */
238
239	/* Get the user structure for the old thread. */
240	ldr	r2, [r0, #(TD_PCB)]
241	mov	r4, r0 /* Save the old thread. */
242
243	/* Save all the registers in the old thread's pcb */
244#ifndef _ARM_ARCH_5E
245	add	r7, r2, #(PCB_R8)
246	stmia	r7, {r8-r13}
247#else
248	strd	r8, [r2, #(PCB_R8)]
249	strd	r10, [r2, #(PCB_R10)]
250	strd	r12, [r2, #(PCB_R12)]
251#endif
252	str	pc, [r2, #(PCB_PC)]
253
254	/*
255	 * NOTE: We can now use r8-r13 until it is time to restore
256	 * them for the new process.
257	 */
258#ifdef ARM_TP_ADDRESS
259	/* Store the old tp */
260	ldr	r3, =ARM_TP_ADDRESS
261	ldr	r9, [r3]
262	str	r9, [r0, #(TD_MD + MD_TP)]
263	ldr	r9, [r3, #4]
264	str	r9, [r0, #(TD_MD + MD_RAS_START)]
265	ldr	r9, [r3, #8]
266	str	r9, [r0, #(TD_MD + MD_RAS_END)]
267
268	/* Set the new tp */
269	ldr	r9, [r1, #(TD_MD + MD_TP)]
270	str	r9, [r3]
271	ldr	r9, [r1, #(TD_MD + MD_RAS_START)]
272	str	r9, [r3, #4]
273	ldr	r9, [r1, #(TD_MD + MD_RAS_END)]
274	str	r9, [r3, #8]
275#else
276	/* Store the old tp */
277	mrc p15, 0, r9, c13, c0, 3
278	str	r9, [r0, #(TD_MD + MD_TP)]
279
280	/* Set the new tp */
281	ldr	r9, [r1, #(TD_MD + MD_TP)]
282	mcr p15, 0, r9, c13, c0, 3
283#endif
284
285	/* Get the user structure for the new process in r9 */
286	ldr	r9, [r1, #(TD_PCB)]
287
288        mrs	r3, cpsr
289	/*
290	 * We can do that, since
291	 * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE
292	 */
293	orr	r8, r3, #(PSR_UND32_MODE)
294        msr	cpsr_c, r8
295
296	str	sp, [r2, #(PCB_UND_SP)]
297
298        msr	cpsr_c, r3		/* Restore the old mode */
299	/* rem: r2 = old PCB */
300	/* rem: r9 = new PCB */
301	/* rem: interrupts are enabled */
302
303#ifdef ARM_VFP_SUPPORT
304	/*
305	 * vfp_store will clear pcpu->pc_vfpcthread, save
306	 * registers and state, and modify the control as needed.
307	 * a future exception will bounce the backup settings in the fp unit.
308	 * XXX vfp_store can't change r4
309	 */
310	GET_PCPU(r7)
311	ldr	r8, [r7, #(PC_VFPCTHREAD)]
312	cmp	r4, r8				/* old thread used vfp? */
313	bne	1f				/* no, don't save */
314	cmp	r1, r4				/* same thread ? */
315	beq	1f				/* yes, skip vfp store */
316#ifdef SMP
317	ldr	r8, [r7, #(PC_CPU)]		/* last used on this cpu? */
318	ldr	r3, [r2, #(PCB_VFPCPU)]
319	cmp	r8, r3		/* last cpu to use these registers? */
320	bne	1f		/* no. these values are stale */
321#endif
322	add	r0, r2, #(PCB_VFPSTATE)
323	bl	_C_LABEL(vfp_store)
3241:
325#endif		/* ARM_VFP_SUPPORT */
326
327	/* r1 now free! */
328
329	/* Third phase : restore saved context */
330
331	/* rem: r2 = old PCB */
332	/* rem: r9 = new PCB */
333	/* rem: interrupts are enabled */
334
335	ldr	r5, [r9, #(PCB_DACR)]		/* r5 = new DACR */
336	mov	r2, #DOMAIN_CLIENT
337	cmp     r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
338	beq     .Lcs_context_switched        /* Yup. Don't flush cache */
339	mrc	p15, 0, r0, c3, c0, 0		/* r0 = old DACR */
340	/*
341	 * Get the new L1 table pointer into r11.  If we're switching to
342	 * an LWP with the same address space as the outgoing one, we can
343	 * skip the cache purge and the TTB load.
344	 *
345	 * To avoid data dep stalls that would happen anyway, we try
346	 * and get some useful work done in the mean time.
347	 */
348	mrc	p15, 0, r10, c2, c0, 0		/* r10 = old L1 */
349	ldr	r11, [r9, #(PCB_PAGEDIR)]	/* r11 = new L1 */
350
351
352	teq	r10, r11			/* Same L1? */
353	cmpeq	r0, r5				/* Same DACR? */
354	beq	.Lcs_context_switched		/* yes! */
355
356#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B)
357	/*
358	 * Definately need to flush the cache.
359	 */
360
361	ldr	r1, .Lcpufuncs
362	mov	lr, pc
363	ldr	pc, [r1, #CF_IDCACHE_WBINV_ALL]
364#endif
365.Lcs_cache_purge_skipped:
366	/* rem: r6 = lock */
367	/* rem: r9 = new PCB */
368	/* rem: r10 = old L1 */
369	/* rem: r11 = new L1 */
370
371	mov	r2, #0x00000000
372	ldr	r7, [r9, #(PCB_PL1VEC)]
373
374	/*
375	 * Ensure the vector table is accessible by fixing up the L1
376	 */
377	cmp	r7, #0			/* No need to fixup vector table? */
378	ldrne	r2, [r7]		/* But if yes, fetch current value */
379	ldrne	r0, [r9, #(PCB_L1VEC)]	/* Fetch new vector_page value */
380	mcr	p15, 0, r5, c3, c0, 0	/* Update DACR for new context */
381	cmpne	r2, r0			/* Stuffing the same value? */
382#ifndef PMAP_INCLUDE_PTE_SYNC
383	strne	r0, [r7]		/* Nope, update it */
384#else
385	beq	.Lcs_same_vector
386	str	r0, [r7]		/* Otherwise, update it */
387
388	/*
389	 * Need to sync the cache to make sure that last store is
390	 * visible to the MMU.
391	 */
392	ldr	r2, .Lcpufuncs
393	mov	r0, r7
394	mov	r1, #4
395	mov	lr, pc
396	ldr	pc, [r2, #CF_DCACHE_WB_RANGE]
397
398.Lcs_same_vector:
399#endif /* PMAP_INCLUDE_PTE_SYNC */
400
401	cmp	r10, r11		/* Switching to the same L1? */
402	ldr	r10, .Lcpufuncs
403	beq	.Lcs_same_l1		/* Yup. */
404	/*
405	 * Do a full context switch, including full TLB flush.
406	 */
407	mov	r0, r11
408	mov	lr, pc
409	ldr	pc, [r10, #CF_CONTEXT_SWITCH]
410
411	b	.Lcs_context_switched
412
413	/*
414	 * We're switching to a different process in the same L1.
415	 * In this situation, we only need to flush the TLB for the
416	 * vector_page mapping, and even then only if r7 is non-NULL.
417	 */
418.Lcs_same_l1:
419	cmp	r7, #0
420	movne	r0, #0			/* We *know* vector_page's VA is 0x0 */
421	movne	lr, pc
422	ldrne	pc, [r10, #CF_TLB_FLUSHID_SE]
423	/*
424	 * We can do that, since
425	 * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE
426	 */
427
428.Lcs_context_switched:
429
430	/* Release the old thread */
431	str	r6, [r4, #TD_LOCK]
432#if defined(SCHED_ULE) && defined(SMP)
433	ldr	r6, .Lblocked_lock
434	GET_CURTHREAD_PTR(r3)
435
4361:
437	ldr	r4, [r3, #TD_LOCK]
438	cmp	r4, r6
439	beq	1b
440#endif
441
442	/* XXXSCW: Safe to re-enable FIQs here */
443
444	/* rem: r9 = new PCB */
445
446        mrs	r3, cpsr
447	/*
448	 * We can do that, since
449	 * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE
450	 */
451	orr	r2, r3, #(PSR_UND32_MODE)
452	msr	cpsr_c, r2
453
454	ldr	sp, [r9, #(PCB_UND_SP)]
455
456        msr	cpsr_c, r3		/* Restore the old mode */
457	/* Restore all the save registers */
458#ifndef _ARM_ARCH_5E
459	add	r7, r9, #PCB_R8
460	ldmia	r7, {r8-r13}
461	sub	r7, r7, #PCB_R8		/* restore PCB pointer */
462#else
463	mov	r7, r9
464	ldr	r8, [r7, #(PCB_R8)]
465	ldr	r9, [r7, #(PCB_R9)]
466	ldr	r10, [r7, #(PCB_R10)]
467	ldr	r11, [r7, #(PCB_R11)]
468	ldr	r12, [r7, #(PCB_R12)]
469	ldr	r13, [r7, #(PCB_SP)]
470#endif
471
472	/* rem: r6 = lock */
473	/* rem: r7 = new pcb */
474
475#ifdef ARMFPE
476	add	r0, r7, #(USER_SIZE) & 0x00ff
477	add	r0, r0, #(USER_SIZE) & 0xff00
478	bl	_C_LABEL(arm_fpe_core_changecontext)
479#endif
480
481	/* rem: r5 = new lwp's proc */
482	/* rem: r6 = lock */
483	/* rem: r7 = new PCB */
484
485.Lswitch_return:
486
487	/*
488	 * Pull the registers that got pushed when either savectx() or
489	 * cpu_switch() was called and return.
490	 */
491	ldmfd	sp!, {r4-r7, pc}
492#ifdef DIAGNOSTIC
493.Lswitch_bogons:
494	adr	r0, .Lswitch_panic_str
495	bl	_C_LABEL(panic)
4961:	nop
497	b	1b
498
499.Lswitch_panic_str:
500	.asciz	"cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
501#endif
502ENTRY(savectx)
503	stmfd   sp!, {r4-r7, lr}
504	/*
505	 * r0 = pcb
506	 */
507	/* Store all the registers in the process's pcb */
508	add	r2, r0, #(PCB_R8)
509	stmia	r2, {r8-r13}
510#ifdef ARM_VFP_SUPPORT
511	/*
512	 * vfp_store will clear pcpu->pc_vfpcthread, save
513	 * registers and state, and modify the control as needed.
514	 * a future exception will bounce the backup settings in the fp unit.
515	 */
516	GET_PCPU(r7)
517	ldr	r4, [r7, #(PC_VFPCTHREAD)]      /* vfp thread */
518	ldr	r2, [r7, #(PC_CURTHREAD)]       /* current thread */
519	cmp	r4, r2
520	bne	1f
521#ifdef SMP
522	ldr	r2, [r7, #(PC_CPU)]     /* last used on this cpu? */
523	ldr	r3, [r0, #(PCB_VFPCPU)]
524	cmp	r2, r3
525	bne	1f              /* no. these values are stale */
526#endif
527	add	r0, r0, #(PCB_VFPSTATE)
528	bl	_C_LABEL(vfp_store)
5291:
530#endif		/* ARM_VFP_SUPPORT */
531	ldmfd	sp!, {r4-r7, pc}
532
533ENTRY(fork_trampoline)
534	mov	r1, r5
535	mov	r2, sp
536	mov	r0, r4
537	mov	fp, #0
538	bl	_C_LABEL(fork_exit)
539	/* Kill irq"s */
540	mrs	r0, cpsr
541	orr	r0, r0, #(I32_bit|F32_bit)
542	msr	cpsr_c, r0
543	DO_AST
544	PULLFRAME
545
546	movs	pc, lr			/* Exit */
547
548AST_LOCALS
549