1/*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/appleapiopts.h>
30#include <ppc/asm.h>					// EXT, LEXT
31#include <machine/cpu_capabilities.h>
32#include <machine/commpage.h>
33
34        .text
35        .align	2
36
37#define	MP_SPIN_TRIES   1000
38
39
40/* The user mode spinlock library.  There are many versions,
41 * in order to take advantage of a few special cases:
42 *	- no barrier instructions (SYNC,ISYNC) are needed if UP
43 *	- 64-bit processors can use LWSYNC instead of SYNC (if MP)
44 *  - 32-bit processors can use ISYNC/EIEIO instead of SYNC (if MP)
45 *	- branch hints appropriate to the processor (+ vs ++ etc)
46 *	- potentially custom relinquish strategies (not used at present)
47 *	- fixes for errata as necessary
48 *
49 * The convention for lockwords is that 0==free and -1==locked.
50 */
51
52
53spinlock_32_try_mp:
54		mr		r5, r3
55		li		r3, 1
561:
57        lwarx	r4,0,r5
58		li		r6,-1		// locked == -1
59        cmpwi	r4,0
60        bne-	2f
61        stwcx.	r6,0,r5
62        isync				// cancel speculative execution
63        beqlr+
64        b		1b
652:
66        li		r3,0        // we did not get the lock
67        blr
68
69	COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,kCommPage32)
70
71
72spinlock_32_try_up:
73		mr		r5, r3
74		li		r3, 1
751:
76        lwarx	r4,0,r5
77		li		r6,-1		// locked == -1
78        cmpwi	r4,0
79        bne-	2f
80        stwcx.	r6,0,r5
81        beqlr+
82        b		1b
832:
84        li		r3,0        // we did not get the lock
85        blr
86
87    COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,kCommPage32)
88
89
90spinlock_32_lock_mp:
91        li		r5,MP_SPIN_TRIES
921:
93        lwarx	r4,0,r3
94		li		r6,-1		// locked == -1
95        cmpwi	r4,0
96        bne-	2f
97        stwcx.	r6,0,r3
98        isync				// cancel speculative execution
99        beqlr+				// we return void
100        b		1b
1012:
102        subic.	r5,r5,1		// try again before relinquish?
103        bne		1b
104        ba		_COMM_PAGE_RELINQUISH
105
106    COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,kCommPage32)
107
108
109spinlock_32_lock_up:
1101:
111        lwarx	r4,0,r3
112		li		r6,-1		// locked == -1
113        cmpwi	r4,0
114        bnea-	_COMM_PAGE_RELINQUISH	// always depress on UP (let lock owner run)
115        stwcx.	r6,0,r3
116        beqlr+				// we return void
117        b		1b
118
119    COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,kCommPage32)
120
121
122spinlock_32_unlock_mp:
123        li		r4,0
124        isync				// complete prior stores before unlock
125		eieio				// (using isync/eieio is faster than a sync)
126        stw		r4,0(r3)
127        blr
128
129    COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,kCommPage32)
130
131
132spinlock_32_unlock_up:
133        li		r4,0
134        stw		r4,0(r3)
135        blr
136
137    COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,kCommPage32)
138
139
140spinlock_64_try_mp:
141		mr		r5, r3
142		li		r3, 1
1431:
144        lwarx	r4,0,r5
145		li		r6,-1		// locked == -1
146        cmpwi	r4,0
147        bne--	2f
148        stwcx.	r6,0,r5
149        isync				// cancel speculative execution
150        beqlr++
151        b		1b
1522:
153        li		r6,-4
154        stwcx.	r5,r6,r1	// clear the pending reservation (using red zone)
155        li		r3,0        // we did not get the lock
156        blr
157
158    COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,kCommPageBoth)
159
160
161spinlock_64_try_up:
162		mr		r5, r3
163		li		r3, 1
1641:
165        lwarx	r4,0,r5
166		li		r6,-1		// locked == -1
167        cmpwi	r4,0
168        bne--	2f
169        stwcx.	r6,0,r5
170        beqlr++
171        b		1b
1722:
173        li		r6,-4
174        stwcx.	r5,r6,r1	// clear the pending reservation (using red zone)
175        li		r3,0        // we did not get the lock
176        blr
177
178    COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,kCommPageBoth)
179
180
181spinlock_64_lock_mp:
182        li		r5,MP_SPIN_TRIES
1831:
184        lwarx	r4,0,r3
185		li		r6,-1		// locked == -1
186        cmpwi	r4,0
187        bne--	2f
188        stwcx.	r6,0,r3
189        isync				// cancel speculative execution
190        beqlr++				// we return void
191        b		1b
1922:
193        li		r6,-4
194        stwcx.	r3,r6,r1	// clear the pending reservation (using red zone)
195        subic.	r5,r5,1		// try again before relinquish?
196        bne--	1b			// mispredict this one (a cheap back-off)
197        ba		_COMM_PAGE_RELINQUISH
198
199    COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,kCommPageBoth)
200
201
202spinlock_64_lock_up:
2031:
204        lwarx	r4,0,r3
205		li		r6,-1		// locked == -1
206        cmpwi	r4,0
207        bne--	2f
208        stwcx.	r6,0,r3
209        beqlr++				// we return void
210        b		1b
2112:							// always relinquish on UP (let lock owner run)
212        li		r6,-4
213        stwcx.	r3,r6,r1	// clear the pending reservation (using red zone)
214		ba		_COMM_PAGE_RELINQUISH
215
216    COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,kCommPageBoth)
217
218
219spinlock_64_unlock_mp:
220        lwsync				// complete prior stores before unlock
221        li		r4,0
222        stw		r4,0(r3)
223        blr
224
225    COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,kCommPageBoth)
226
227
228spinlock_64_unlock_up:
229        li		r4,0
230        stw		r4,0(r3)
231        blr
232
233    COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,kCommPageBoth)
234
235
236spinlock_relinquish:
237        mr		r12,r3		// preserve lockword ptr across relinquish
238        li		r3,0		// THREAD_NULL
239        li		r4,1		// SWITCH_OPTION_DEPRESS
240        li		r5,1		// timeout (ms)
241        li		r0,-61		// SYSCALL_THREAD_SWITCH
242        sc					// relinquish
243        mr		r3,r12
244        ba		_COMM_PAGE_SPINLOCK_LOCK
245
246    COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,kCommPageBoth)
247
248