1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1989 Carnegie-Mellon University
34 * All rights reserved.  The CMU software License Agreement specifies
35 * the terms and conditions for use and redistribution.
36 */
37
38#include <mach_rt.h>
39#include <platforms.h>
40#include <mach_ldebug.h>
41#include <i386/asm.h>
42#include <i386/eflags.h>
43#include <i386/trap.h>
44#include <config_dtrace.h>
45
46#include "assym.s"
47
48#define	PAUSE		rep; nop
49
50/*
51 *	When performance isn't the only concern, it's
52 *	nice to build stack frames...
53 */
54#define	BUILD_STACK_FRAMES   (GPROF || \
55				((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB))
56
57#if	BUILD_STACK_FRAMES
58
59/* Stack-frame-relative: */
60#define	L_PC		B_PC
61#define	L_ARG0		B_ARG0
62#define	L_ARG1		B_ARG1
63
64#define LEAF_ENTRY(name)	\
65	Entry(name);		\
66	FRAME;			\
67	MCOUNT
68
69#define LEAF_ENTRY2(n1,n2)	\
70	Entry(n1);		\
71	Entry(n2);		\
72	FRAME;			\
73	MCOUNT
74
75#define LEAF_RET		\
76	EMARF;			\
77	ret
78
79#else	/* BUILD_STACK_FRAMES */
80
81/* Stack-pointer-relative: */
82#define	L_PC		S_PC
83#define	L_ARG0		S_ARG0
84#define	L_ARG1		S_ARG1
85
86#define LEAF_ENTRY(name)	\
87	Entry(name)
88
89#define LEAF_ENTRY2(n1,n2)	\
90	Entry(n1);		\
91	Entry(n2)
92
93#define LEAF_RET		\
94	ret
95
96#endif	/* BUILD_STACK_FRAMES */
97
98
99/* Non-leaf routines always have a stack frame: */
100
101#define NONLEAF_ENTRY(name)	\
102	Entry(name);		\
103	FRAME;			\
104	MCOUNT
105
106#define NONLEAF_ENTRY2(n1,n2)	\
107	Entry(n1);		\
108	Entry(n2);		\
109	FRAME;			\
110	MCOUNT
111
112#define NONLEAF_RET		\
113	EMARF;			\
114	ret
115
116
117#define	M_ILK		(%edx)
118#define	M_LOCKED	MUTEX_LOCKED(%edx)
119#define	M_WAITERS	MUTEX_WAITERS(%edx)
120#define	M_PROMOTED_PRI	MUTEX_PROMOTED_PRI(%edx)
121#define M_ITAG		MUTEX_ITAG(%edx)
122#define M_PTR		MUTEX_PTR(%edx)
123#if	MACH_LDEBUG
124#define	M_TYPE		MUTEX_TYPE(%edx)
125#define	M_PC		MUTEX_PC(%edx)
126#define	M_THREAD	MUTEX_THREAD(%edx)
127#endif	/* MACH_LDEBUG */
128
129#include <i386/mp.h>
130#define	CX(addr,reg)	addr(,reg,4)
131
132#if	MACH_LDEBUG
133/*
134 *  Routines for general lock debugging.
135 */
136
137/*
138 * Checks for expected lock types and calls "panic" on
139 * mismatch.  Detects calls to Mutex functions with
140 * type simplelock and vice versa.
141 */
142#define	CHECK_MUTEX_TYPE()					\
143	cmpl	$ MUTEX_TAG,M_TYPE			;	\
144	je	1f					;	\
145	pushl	$2f					;	\
146	call	EXT(panic)				;	\
147	hlt						;	\
148	.data						;	\
1492:	String	"not a mutex!"				;	\
150	.text						;	\
1511:
152
153/*
154 * If one or more simplelocks are currently held by a thread,
155 * an attempt to acquire a mutex will cause this check to fail
156 * (since a mutex lock may context switch, holding a simplelock
157 * is not a good thing).
158 */
159#if	MACH_RT
160#define CHECK_PREEMPTION_LEVEL()				\
161	cmpl	$0,%gs:CPU_PREEMPTION_LEVEL		;	\
162	je	1f					;	\
163	pushl	$2f					;	\
164	call	EXT(panic)				;	\
165	hlt						;	\
166	.data						;	\
1672:	String	"preemption_level != 0!"		;	\
168	.text						;	\
1691:
170#else	/* MACH_RT */
171#define	CHECK_PREEMPTION_LEVEL()
172#endif	/* MACH_RT */
173
174#define	CHECK_NO_SIMPLELOCKS()					\
175	cmpl	$0,%gs:CPU_SIMPLE_LOCK_COUNT		;	\
176	je	1f					;	\
177	pushl	$2f					;	\
178	call	EXT(panic)				;	\
179	hlt						;	\
180	.data						;	\
1812:	String	"simple_locks_held!"			;	\
182	.text						;	\
1831:
184
185/*
186 * Verifies return to the correct thread in "unlock" situations.
187 */
188#define	CHECK_THREAD(thd)					\
189	movl	%gs:CPU_ACTIVE_THREAD,%ecx		;	\
190	testl	%ecx,%ecx				;	\
191	je	1f					;	\
192	cmpl	%ecx,thd				;	\
193	je	1f					;	\
194	pushl	$2f					;	\
195	call	EXT(panic)				;	\
196	hlt						;	\
197	.data						;	\
1982:	String	"wrong thread!"				;	\
199	.text						;	\
2001:
201
202#define	CHECK_MYLOCK(thd)					\
203	movl	%gs:CPU_ACTIVE_THREAD,%ecx		;	\
204	testl	%ecx,%ecx				;	\
205	je	1f					;	\
206	cmpl	%ecx,thd				;	\
207	jne	1f					;	\
208	pushl	$2f					;	\
209	call	EXT(panic)				;	\
210	hlt						;	\
211	.data						;	\
2122:	String	"mylock attempt!"			;	\
213	.text						;	\
2141:
215
216#define	METER_SIMPLE_LOCK_LOCK(reg)				\
217	pushl	reg					;	\
218	call	EXT(meter_simple_lock)			;	\
219	popl	reg
220
221#define	METER_SIMPLE_LOCK_UNLOCK(reg)				\
222	pushl	reg					;	\
223	call	EXT(meter_simple_unlock)		;	\
224	popl	reg
225
226#else	/* MACH_LDEBUG */
227#define	CHECK_MUTEX_TYPE()
228#define	CHECK_SIMPLE_LOCK_TYPE
229#define	CHECK_THREAD(thd)
230#define CHECK_PREEMPTION_LEVEL()
231#define	CHECK_NO_SIMPLELOCKS()
232#define	CHECK_MYLOCK(thd)
233#define	METER_SIMPLE_LOCK_LOCK(reg)
234#define	METER_SIMPLE_LOCK_UNLOCK(reg)
235#endif	/* MACH_LDEBUG */
236
237
238#define PREEMPTION_DISABLE				\
239	incl	%gs:CPU_PREEMPTION_LEVEL
240
241
242#define	PREEMPTION_ENABLE				\
243	decl	%gs:CPU_PREEMPTION_LEVEL	;	\
244	jne	9f				;	\
245	pushf					;	\
246	testl	$ EFL_IF,(%esp)			;	\
247	je	8f				;	\
248	cli					;	\
249	movl	%gs:CPU_PENDING_AST,%eax	;	\
250	testl	$ AST_URGENT,%eax		;	\
251	je	8f				;	\
252	movl	%gs:CPU_INTERRUPT_LEVEL,%eax	;	\
253	testl	%eax,%eax			;	\
254	jne	8f				;	\
255	popf					;	\
256	int	$(T_PREEMPT)			;	\
257	jmp	9f				;	\
2588:							\
259	popf					;	\
2609:
261
262
263
264#if	CONFIG_DTRACE
265#define	LOCKSTAT_LABEL(lab) \
266	.data				;\
267	.globl	lab			;\
268	lab:				;\
269	.long 9f			;\
270	.text				;\
271	9:
272
273	.globl	_lockstat_probe
274	.globl	_lockstat_probemap
275
276#define	LOCKSTAT_RECORD(id, lck) \
277	push	%ebp					;	\
278	mov	%esp,%ebp				;	\
279	sub	$0x38,%esp	/* size of dtrace_probe args */ ; \
280	movl	_lockstat_probemap + (id * 4),%eax	;	\
281	test	%eax,%eax				;	\
282	je	9f					;	\
283	movl	$0,36(%esp)				;	\
284	movl	$0,40(%esp)				;	\
285	movl	$0,28(%esp)				;	\
286	movl	$0,32(%esp)				;	\
287	movl	$0,20(%esp)				;	\
288	movl	$0,24(%esp)				;	\
289	movl	$0,12(%esp)				;	\
290	movl	$0,16(%esp)				;	\
291	movl	lck,4(%esp)	/* copy lock pointer to arg 1 */ ; \
292	movl	$0,8(%esp)				;	\
293	movl	%eax,(%esp) 				; 	\
294	call	*_lockstat_probe			;	\
2959:	leave
296	/* ret - left to subsequent code, e.g. return values */
297
298#define	LOCKSTAT_RECORD2(id, lck, arg) \
299	push	%ebp					;	\
300	mov	%esp,%ebp				;	\
301	sub	$0x38,%esp	/* size of dtrace_probe args */ ; \
302	movl	_lockstat_probemap + (id * 4),%eax	;	\
303	test	%eax,%eax				;	\
304	je	9f					;	\
305	movl	$0,36(%esp)				;	\
306	movl	$0,40(%esp)				;	\
307	movl	$0,28(%esp)				;	\
308	movl	$0,32(%esp)				;	\
309	movl	$0,20(%esp)				;	\
310	movl	$0,24(%esp)				;	\
311	movl	$0,12(%esp)				;	\
312	movl	$0,16(%esp)				;	\
313	movl	lck,4(%esp)	/* copy lock pointer to arg 1 */ ; \
314	movl	arg,8(%esp)				;	\
315	movl	%eax,(%esp) 				; 	\
316	call	*_lockstat_probe			;	\
3179:	leave
318	/* ret - left to subsequent code, e.g. return values */
319#endif
320
321
322/*
323 *	void hw_lock_init(hw_lock_t)
324 *
325 *	Initialize a hardware lock.
326 */
327LEAF_ENTRY(hw_lock_init)
328	movl	L_ARG0,%edx		/* fetch lock pointer */
329	movl	$0,(%edx)		/* clear the lock */
330	LEAF_RET
331
332
333/*
334 *	void hw_lock_byte_init(uint8_t *)
335 *
336 *	Initialize a hardware byte lock.
337 */
338LEAF_ENTRY(hw_lock_byte_init)
339	movl	L_ARG0,%edx		/* fetch lock pointer */
340	movb	$0,(%edx)		/* clear the lock */
341	LEAF_RET
342
343/*
344 *	void hw_lock_lock(hw_lock_t)
345 *
346 *	Acquire lock, spinning until it becomes available.
347 *	MACH_RT:  also return with preemption disabled.
348 */
349LEAF_ENTRY(hw_lock_lock)
350	movl	L_ARG0,%edx		/* fetch lock pointer */
351
352	movl	%gs:CPU_ACTIVE_THREAD,%ecx
353	PREEMPTION_DISABLE
3541:
355	movl	(%edx), %eax
356	testl	%eax,%eax		/* lock locked? */
357	jne	3f			/* branch if so */
358	lock; cmpxchgl	%ecx,(%edx)	/* try to acquire the HW lock */
359	jne	3f
360	movl	$1,%eax			/* In case this was a timeout call */
361	LEAF_RET			/* if yes, then nothing left to do */
3623:
363	PAUSE				/* pause for hyper-threading */
364	jmp	1b			/* try again */
365
366/*
367 *	void	hw_lock_byte_lock(uint8_t *lock_byte)
368 *
369 *	Acquire byte sized lock operand, spinning until it becomes available.
370 *	MACH_RT:  also return with preemption disabled.
371 */
372
373LEAF_ENTRY(hw_lock_byte_lock)
374	movl	L_ARG0,%edx		/* Load lock pointer */
375	PREEMPTION_DISABLE
376	movl	$1, %ecx		/* Set lock value */
3771:
378	movb	(%edx), %al		/* Load byte at address */
379	testb	%al,%al			/* lock locked? */
380	jne	3f			/* branch if so */
381	lock; cmpxchgb	%cl,(%edx)	/* attempt atomic compare exchange */
382	jne	3f
383	LEAF_RET			/* if yes, then nothing left to do */
3843:
385	PAUSE				/* pause for hyper-threading */
386	jmp	1b			/* try again */
387
388/*
389 *	unsigned int hw_lock_to(hw_lock_t, unsigned int)
390 *
391 *	Acquire lock, spinning until it becomes available or timeout.
392 *	MACH_RT:  also return with preemption disabled.
393 */
394LEAF_ENTRY(hw_lock_to)
3951:
396	movl	L_ARG0,%edx		/* fetch lock pointer */
397	movl	%gs:CPU_ACTIVE_THREAD,%ecx
398	/*
399	 * Attempt to grab the lock immediately
400	 * - fastpath without timeout nonsense.
401	 */
402	PREEMPTION_DISABLE
403	movl	(%edx), %eax
404	testl	%eax,%eax		/* lock locked? */
405	jne	2f			/* branch if so */
406	lock; cmpxchgl	%ecx,(%edx)	/* try to acquire the HW lock */
407	jne	2f			/* branch on failure */
408	movl	$1,%eax
409	LEAF_RET
410
4112:
412#define	INNER_LOOP_COUNT	1000
413	/*
414	 * Failed to get the lock so set the timeout
415	 * and then spin re-checking the lock but pausing
416	 * every so many (INNER_LOOP_COUNT) spins to check for timeout.
417	 */
418	movl	L_ARG1,%ecx		/* fetch timeout */
419	push	%edi
420	push	%ebx
421	mov	%edx,%edi
422
423	lfence
424	rdtsc				/* read cyclecount into %edx:%eax */
425	lfence
426	addl	%ecx,%eax		/* fetch and timeout */
427	adcl	$0,%edx			/* add carry */
428	mov	%edx,%ecx
429	mov	%eax,%ebx		/* %ecx:%ebx is the timeout expiry */
4304:
431	/*
432	 * The inner-loop spin to look for the lock being freed.
433	 */
434	mov	$(INNER_LOOP_COUNT),%edx
4355:
436	PAUSE				/* pause for hyper-threading */
437	movl	(%edi),%eax		/* spin checking lock value in cache */
438	testl	%eax,%eax
439	je	6f			/* zero => unlocked, try to grab it */
440	decl	%edx			/* decrement inner loop count */
441	jnz	5b			/* time to check for timeout? */
442
443	/*
444	 * Here after spinning INNER_LOOP_COUNT times, check for timeout
445	 */
446	lfence
447	rdtsc				/* cyclecount into %edx:%eax */
448	lfence
449	cmpl	%ecx,%edx		/* compare high-order 32-bits */
450	jb	4b			/* continue spinning if less, or */
451	cmpl	%ebx,%eax		/* compare low-order 32-bits */
452	jb	4b			/* continue if less, else bail */
453	xor	%eax,%eax		/* with 0 return value */
454	pop	%ebx
455	pop	%edi
456	LEAF_RET
457
4586:
459	/*
460	 * Here to try to grab the lock that now appears to be free
461	 * after contention.
462	 */
463	movl	%gs:CPU_ACTIVE_THREAD,%edx
464	lock; cmpxchgl	%edx,(%edi)	/* try to acquire the HW lock */
465	jne	4b			/* no - spin again */
466	movl	$1,%eax			/* yes */
467	pop	%ebx
468	pop	%edi
469	LEAF_RET
470
471/*
472 *	void hw_lock_unlock(hw_lock_t)
473 *
474 *	Unconditionally release lock.
475 *	MACH_RT:  release preemption level.
476 */
477LEAF_ENTRY(hw_lock_unlock)
478	movl	L_ARG0,%edx		/* fetch lock pointer */
479	movl	$0,(%edx)		/* clear the lock */
480	PREEMPTION_ENABLE
481	LEAF_RET
482/*
483 *	void hw_lock_byte_unlock(uint8_t *lock_byte)
484 *
485 *	Unconditionally release byte sized lock operand.
486 *	MACH_RT:  release preemption level.
487 */
488
489LEAF_ENTRY(hw_lock_byte_unlock)
490	movl	L_ARG0,%edx		/* Load lock pointer */
491	movb	$0,(%edx)		/* Clear the lock byte */
492	PREEMPTION_ENABLE
493	LEAF_RET
494
495/*
496 *	void i386_lock_unlock_with_flush(hw_lock_t)
497 *
498 *	Unconditionally release lock, followed by a cacheline flush of
499 *	the line corresponding to the lock dword. This routine is currently
500 *	used with certain locks which are susceptible to lock starvation,
501 *	minimizing cache affinity for lock acquisitions. A queued spinlock
502 *	or other mechanism that ensures fairness would obviate the need
503 *	for this routine, but ideally few or no spinlocks should exhibit
504 *	enough contention to require such measures.
505 *	MACH_RT:  release preemption level.
506 */
507LEAF_ENTRY(i386_lock_unlock_with_flush)
508	movl	L_ARG0,%edx		/* Fetch lock pointer */
509	movl	$0,(%edx)		/* Clear the lock */
510	mfence				/* Serialize prior stores */
511	clflush	(%edx)			/* Write back and invalidate line */
512	PREEMPTION_ENABLE
513	LEAF_RET
514
515/*
516 *	unsigned int hw_lock_try(hw_lock_t)
517 *	MACH_RT:  returns with preemption disabled on success.
518 */
519LEAF_ENTRY(hw_lock_try)
520	movl	L_ARG0,%edx		/* fetch lock pointer */
521
522	movl	%gs:CPU_ACTIVE_THREAD,%ecx
523	PREEMPTION_DISABLE
524	movl	(%edx),%eax
525	testl	%eax,%eax
526	jne	1f
527	lock; cmpxchgl	%ecx,(%edx)	/* try to acquire the HW lock */
528	jne	1f
529
530	movl	$1,%eax			/* success */
531	LEAF_RET
532
5331:
534	PREEMPTION_ENABLE		/* failure:  release preemption... */
535	xorl	%eax,%eax		/* ...and return failure */
536	LEAF_RET
537
538/*
539 *	unsigned int hw_lock_held(hw_lock_t)
540 *	MACH_RT:  doesn't change preemption state.
541 *	N.B.  Racy, of course.
542 */
543LEAF_ENTRY(hw_lock_held)
544	movl	L_ARG0,%edx		/* fetch lock pointer */
545
546	movl	(%edx),%eax		/* check lock value */
547	testl	%eax,%eax
548	movl	$1,%ecx
549	cmovne	%ecx,%eax		/* 0 => unlocked, 1 => locked */
550	LEAF_RET
551
552LEAF_ENTRY(mutex_init)
553	movl	L_ARG0,%edx		/* fetch lock pointer */
554	xorl	%eax,%eax
555	movl	%eax,M_ILK		/* clear interlock */
556	movl	%eax,M_LOCKED		/* clear locked flag */
557	movw	%ax,M_WAITERS		/* init waiter count */
558	movw	%ax,M_PROMOTED_PRI
559
560#if	MACH_LDEBUG
561	movl	$ MUTEX_TAG,M_TYPE	/* set lock type */
562	movl	%eax,M_PC		/* init caller pc */
563	movl	%eax,M_THREAD		/* and owning thread */
564#endif
565
566	LEAF_RET
567
568/*
569 * Reader-writer lock fastpaths. These currently exist for the
570 * shared lock acquire and release paths (where they reduce overhead
571 * considerably)--more can be added as necessary (DRK).
572 */
573
574/*
575 * These should reflect the layout of the bitfield embedded within
576 * the lck_rw_t structure (see i386/locks.h).
577 */
578#define LCK_RW_INTERLOCK 0x1
579#define LCK_RW_WANT_UPGRADE 0x2
580#define LCK_RW_WANT_WRITE 0x4
581#define LCK_R_WAITING 0x8
582#define LCK_W_WAITING 0x10
583
584#define	RW_LOCK_SHARED_MASK ((LCK_RW_INTERLOCK<<16) |	\
585	((LCK_RW_WANT_UPGRADE|LCK_RW_WANT_WRITE) << 24))
586/*
587 *		void lck_rw_lock_shared(lck_rw_t*)
588 *
589 */
590
591Entry(lck_rw_lock_shared)
592	movl	S_ARG0, %edx
5931:
594	movl	(%edx), %eax		/* Load state bitfield and interlock */
595	testl	$(RW_LOCK_SHARED_MASK), %eax	/* Eligible for fastpath? */
596	jne	3f
597	movl	%eax, %ecx
598	incl	%ecx				/* Increment reader refcount */
599	lock
600	cmpxchgl %ecx, (%edx)			/* Attempt atomic exchange */
601	jne	2f
602
603#if	CONFIG_DTRACE
604	/*
605	 * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_ACQUIRE
606	 * Implemented by swapping between return and no-op instructions.
607	 * See bsd/dev/dtrace/lockstat.c.
608	 */
609	LOCKSTAT_LABEL(_lck_rw_lock_shared_lockstat_patch_point)
610	ret
611	/* Fall thru when patched, counting on lock pointer in %edx  */
612	LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, %edx)
613#endif
614	ret
615
6162:
617	PAUSE
618	jmp	1b
6193:
620	jmp	EXT(lck_rw_lock_shared_gen)
621
622
623/*
624 *		lck_rw_type_t lck_rw_done(lck_rw_t*)
625 *
626 */
627
628.data
629rwl_release_error_str:
630	.asciz	"Releasing non-exclusive RW lock without a reader refcount!"
631.text
632
633#define RW_LOCK_RELEASE_MASK ((LCK_RW_INTERLOCK<<16) |	\
634	((LCK_RW_WANT_UPGRADE|LCK_RW_WANT_WRITE|LCK_R_WAITING|LCK_W_WAITING) << 24))
635Entry(lck_rw_done)
636	movl	S_ARG0,	%edx
6371:
638	movl	(%edx), %eax		/* Load state bitfield and interlock */
639	testl	$(RW_LOCK_RELEASE_MASK), %eax	/* Eligible for fastpath? */
640	jne	3f
641	movl	%eax, %ecx
642	/* Assert refcount */
643	testl	$(0xFFFF), %ecx
644	jne	5f
645	movl	$(rwl_release_error_str), S_ARG0
646	jmp	EXT(panic)
6475:
648	decl	%ecx			/* Decrement reader count */
649	lock
650	cmpxchgl %ecx, (%edx)
651	jne	2f
652	movl	$(RW_SHARED), %eax	/* Indicate that the lock was shared */
653#if	CONFIG_DTRACE
654	/* Dtrace lockstat probe: LS_RW_DONE_RELEASE as reader */
655	LOCKSTAT_LABEL(_lck_rw_done_lockstat_patch_point)
656	ret
657	/*
658	 * Note: Dtrace's convention is 0 ==> reader, which is
659	 * a different absolute value than $(RW_SHARED)
660	 * %edx contains the lock address already from the above
661	 */
662	LOCKSTAT_RECORD2(LS_LCK_RW_DONE_RELEASE, %edx, $0)
663	movl	$(RW_SHARED), %eax	/* Indicate that the lock was shared */
664#endif
665	ret
666
6672:
668	PAUSE
669	jmp	1b
6703:
671	jmp	EXT(lck_rw_done_gen)
672
673
674NONLEAF_ENTRY2(mutex_lock_spin,_mutex_lock_spin)
675
676	movl	B_ARG0,%edx		/* fetch lock pointer */
677	pushf				/* save interrupt state */
678
679	CHECK_MUTEX_TYPE()
680	CHECK_NO_SIMPLELOCKS()
681	CHECK_PREEMPTION_LEVEL()
682
683	movl	M_ILK,%eax		/* read interlock */
684	testl	%eax,%eax		/* unlocked? */
685	jne	Lmls_ilk_loop		/* no, go spin */
686Lmls_retry:
687	cli				/* disable interrupts */
688	movl	%gs:CPU_ACTIVE_THREAD,%ecx
689
690	/* eax == 0 at this point */
691	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
692	jne	Lmls_ilk_fail		/* branch on failure to spin loop */
693
694	movl	M_LOCKED,%ecx		/* get lock owner */
695	testl	%ecx,%ecx		/* is the mutex locked? */
696	jne	Lml_fail		/* yes, fall back to a normal mutex lock */
697	movl	$(MUTEX_LOCKED_AS_SPIN),M_LOCKED	/* indicate ownership as a spin lock */
698
699#if	MACH_LDEBUG
700	movl	%gs:CPU_ACTIVE_THREAD,%ecx
701	movl	%ecx,M_THREAD
702	movl	B_PC,%ecx
703	movl	%ecx,M_PC
704#endif
705	PREEMPTION_DISABLE
706	popf				/* restore interrupt state */
707	leave				/* return with the interlock held */
708#if	CONFIG_DTRACE
709	LOCKSTAT_LABEL(_mutex_lock_spin_lockstat_patch_point)
710	ret
711	/* %edx contains the lock address from above */
712	LOCKSTAT_RECORD(LS_MUTEX_LOCK_SPIN_ACQUIRE, %edx)
713#endif
714	ret
715
716Lmls_ilk_fail:
717	popf				/* restore interrupt state */
718	pushf				/* resave interrupt state on stack */
719
720Lmls_ilk_loop:
721	PAUSE
722	movl	M_ILK,%eax		/* read interlock */
723	testl	%eax,%eax		/* unlocked? */
724	je	Lmls_retry		/* yes, go for it */
725	jmp	Lmls_ilk_loop		/* no, keep spinning */
726
727
728NONLEAF_ENTRY2(mutex_lock,_mutex_lock)
729
730	movl	B_ARG0,%edx		/* fetch lock pointer */
731	pushf				/* save interrupt state */
732
733	CHECK_MUTEX_TYPE()
734	CHECK_NO_SIMPLELOCKS()
735	CHECK_PREEMPTION_LEVEL()
736
737	movl	M_ILK,%eax		/* is interlock held */
738	testl	%eax,%eax
739	jne	Lml_ilk_loop		/* yes, go do the spin loop */
740Lml_retry:
741	cli				/* disable interrupts */
742	movl	%gs:CPU_ACTIVE_THREAD,%ecx
743
744	/* eax == 0 at this point */
745	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
746	jne	Lml_ilk_fail		/* branch on failure to spin loop */
747
748	movl	M_LOCKED,%ecx		/* get lock owner */
749	testl	%ecx,%ecx		/* is the mutex locked? */
750	jne	Lml_fail		/* yes, we lose */
751Lml_acquire:
752	movl	%gs:CPU_ACTIVE_THREAD,%ecx
753	movl	%ecx,M_LOCKED
754
755#if	MACH_LDEBUG
756	movl	%ecx,M_THREAD
757	movl	B_PC,%ecx
758	movl	%ecx,M_PC
759#endif
760	cmpw	$0,M_WAITERS		/* are there any waiters? */
761	jne	Lml_waiters		/* yes, more work to do */
762Lml_return:
763	xorl	%eax,%eax
764	movl	%eax,M_ILK
765
766	popf				/* restore interrupt state */
767	leave
768#if	CONFIG_DTRACE
769	LOCKSTAT_LABEL(_mutex_lock_lockstat_patch_point)
770	ret
771	/* %edx still contains the lock pointer */
772	LOCKSTAT_RECORD(LS_MUTEX_LOCK_ACQUIRE, %edx)
773#endif
774	ret
775
776	/*
777	 * We got the mutex, but there are waiters.  Update information
778	 * on waiters.
779	 */
780Lml_waiters:
781	pushl	%edx			/* save mutex address */
782	pushl	%edx
783	call	EXT(lck_mtx_lock_acquire)
784	addl	$4,%esp
785	popl	%edx			/* restore mutex address */
786	jmp	Lml_return
787
788Lml_restart:
789Lml_ilk_fail:
790	popf				/* restore interrupt state */
791	pushf				/* resave interrupt state on stack */
792
793Lml_ilk_loop:
794	PAUSE
795	movl	M_ILK,%eax		/* read interlock */
796	testl	%eax,%eax		/* unlocked? */
797	je	Lml_retry		/* yes, go try to grab it */
798	jmp	Lml_ilk_loop		/* no - keep spinning */
799
800Lml_fail:
801	/*
802	 * Check if the owner is on another processor and therefore
803	 * we should try to spin before blocking.
804	 */
805	testl	$(OnProc),ACT_SPF(%ecx)
806	jz	Lml_block
807
808	/*
809	 * Here if owner is on another processor:
810	 *  - release the interlock
811	 *  - spin on the holder until release or timeout
812	 *  - in either case re-acquire the interlock
813	 *  - if released, acquire it
814	 *  - otherwise drop thru to block.
815	 */
816	xorl	%eax,%eax
817	movl	%eax,M_ILK		/* zero interlock */
818	popf
819	pushf				/* restore interrupt state */
820
821	push	%edx			/* lock address */
822	call	EXT(lck_mtx_lock_spinwait)	/* call out to do spinning */
823	addl	$4,%esp
824	movl	B_ARG0,%edx		/* refetch mutex address */
825
826	/* Re-acquire interlock - interrupts currently enabled */
827	movl	M_ILK,%eax		/* is interlock held */
828	testl	%eax,%eax
829	jne	Lml_ilk_reloop		/* yes, go do the spin loop */
830Lml_reget_retry:
831	cli				/* disable interrupts */
832	movl	%gs:CPU_ACTIVE_THREAD,%ecx
833
834	/* eax == 0 at this point */
835	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
836	jne	Lml_ilk_refail		/* branch on failure to spin loop */
837
838	movl	M_LOCKED,%ecx		/* get lock owner */
839	testl	%ecx,%ecx		/* is the mutex free? */
840	je	Lml_acquire		/* yes, acquire */
841
842Lml_block:
843	CHECK_MYLOCK(M_THREAD)
844	pushl	M_LOCKED
845	pushl	%edx			/* push mutex address */
846	call	EXT(lck_mtx_lock_wait)	/* wait for the lock */
847	addl	$8,%esp			/* returns with interlock dropped */
848	movl	B_ARG0,%edx		/* refetch mutex address */
849	jmp	Lml_restart		/* and start over */
850
851Lml_ilk_refail:
852	popf				/* restore interrupt state */
853	pushf				/* resave interrupt state on stack */
854
855Lml_ilk_reloop:
856	PAUSE
857	movl	M_ILK,%eax		/* read interlock */
858	testl	%eax,%eax		/* unlocked? */
859	je	Lml_reget_retry		/* yes, go try to grab it */
860	jmp	Lml_ilk_reloop		/* no - keep spinning */
861
862
863
864NONLEAF_ENTRY2(mutex_try_spin,_mutex_try_spin)
865
866	movl	B_ARG0,%edx		/* fetch lock pointer */
867	pushf				/* save interrupt state */
868
869	CHECK_MUTEX_TYPE()
870	CHECK_NO_SIMPLELOCKS()
871
872	movl	M_ILK,%eax
873	testl	%eax,%eax		/* is the interlock held? */
874	jne	Lmts_ilk_loop		/* yes, go to spin loop */
875Lmts_retry:
876	cli				/* disable interrupts */
877	movl	%gs:CPU_ACTIVE_THREAD,%ecx
878
879	/* eax == 0 at this point */
880	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
881	jne	Lmts_ilk_fail		/* branch on failure to spin loop */
882
883	movl	M_LOCKED,%ecx		/* get lock owner */
884	testl	%ecx,%ecx		/* is the mutex locked? */
885	jne	Lmt_fail		/* yes, we lose */
886Lmts_acquire:
887	movl	$(MUTEX_LOCKED_AS_SPIN),M_LOCKED	/* indicate ownership as a spin lock */
888
889#if	MACH_LDEBUG
890	movl	%gs:CPU_ACTIVE_THREAD,%ecx
891	movl	%ecx,M_THREAD
892	movl	B_PC,%ecx
893	movl	%ecx,M_PC
894#endif
895	PREEMPTION_DISABLE		/* no, return with interlock held */
896	popf				/* restore interrupt state */
897	movl	$1,%eax
898	leave
899#if	CONFIG_DTRACE
900	LOCKSTAT_LABEL(_mutex_try_spin_lockstat_patch_point)
901	ret
902	/* %edx inherits the lock pointer from above */
903	LOCKSTAT_RECORD(LS_MUTEX_TRY_SPIN_ACQUIRE, %edx)
904	movl	$1,%eax
905#endif
906	ret
907
908Lmts_ilk_fail:
909	popf				/* restore interrupt state */
910	pushf				/* resave interrupt state on stack */
911
912Lmts_ilk_loop:
913	PAUSE
914	/*
915	 * need to do this check outside of the interlock in
916	 * case this lock is held as a simple lock which means
917	 * we won't be able to take the interlock
918 	 */
919	movl	M_LOCKED,%eax
920	testl	%eax,%eax		/* is the mutex locked? */
921	jne	Lmt_fail_no_ilk		/* yes, go return failure */
922
923	movl	M_ILK,%eax		/* read interlock */
924	testl	%eax,%eax		/* unlocked? */
925	je	Lmts_retry		/* yes, go try to grab it */
926	jmp	Lmts_ilk_loop		/* keep spinning */
927
928
929
930NONLEAF_ENTRY2(mutex_try,_mutex_try)
931
932	movl	B_ARG0,%edx		/* fetch lock pointer */
933	pushf				/* save interrupt state */
934
935	CHECK_MUTEX_TYPE()
936	CHECK_NO_SIMPLELOCKS()
937
938	movl	M_ILK,%eax		/* read interlock */
939	testl	%eax,%eax		/* unlocked? */
940	jne	Lmt_ilk_loop		/* yes, go try to grab it */
941Lmt_retry:
942	cli				/* disable interrupts */
943	movl	%gs:CPU_ACTIVE_THREAD,%ecx
944
945	/* eax == 0 at this point */
946	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
947	jne	Lmt_ilk_fail		/* branch on failure to spin loop */
948
949	movl	M_LOCKED,%ecx		/* get lock owner */
950	testl	%ecx,%ecx		/* is the mutex locked? */
951	jne	Lmt_fail		/* yes, we lose */
952Lmt_acquire:
953	movl	%gs:CPU_ACTIVE_THREAD,%ecx
954	movl	%ecx,M_LOCKED
955
956#if	MACH_LDEBUG
957	movl	%ecx,M_THREAD
958	movl	B_PC,%ecx
959	movl	%ecx,M_PC
960#endif
961	cmpw	$0,M_WAITERS		/* are there any waiters? */
962	jne	Lmt_waiters		/* yes, more work to do */
963Lmt_return:
964	xorl	%eax,%eax
965	movl	%eax,M_ILK
966	popf				/* restore interrupt state */
967
968	movl	$1,%eax
969	leave
970#if	CONFIG_DTRACE
971	LOCKSTAT_LABEL(_mutex_try_lockstat_patch_point)
972	ret
973	/* inherit the lock pointer in %edx from above */
974	LOCKSTAT_RECORD(LS_MUTEX_TRY_LOCK_ACQUIRE, %edx)
975	movl	$1,%eax
976#endif
977	ret
978
979Lmt_waiters:
980	pushl	%edx			/* save mutex address */
981	pushl	%edx
982	call	EXT(lck_mtx_lock_acquire)
983	addl	$4,%esp
984	popl	%edx			/* restore mutex address */
985	jmp	Lmt_return
986
987Lmt_ilk_fail:
988	popf				/* restore interrupt state */
989	pushf				/* resave interrupt state on stack */
990
991Lmt_ilk_loop:
992	PAUSE
993	/*
994	 * need to do this check outside of the interlock in
995	 * case this lock is held as a simple lock which means
996	 * we won't be able to take the interlock
997 	 */
998	movl	M_LOCKED,%eax		/* get lock owner */
999	testl	%eax,%eax		/* is the mutex locked? */
1000	jne	Lmt_fail_no_ilk		/* yes, go return failure */
1001
1002	movl	M_ILK,%eax		/* read interlock */
1003	testl	%eax,%eax		/* unlocked? */
1004	je	Lmt_retry		/* yes, go try to grab it */
1005	jmp	Lmt_ilk_loop		/* no - keep spinning */
1006
1007Lmt_fail:
1008	xorl	%eax,%eax
1009	movl	%eax,M_ILK
1010
1011Lmt_fail_no_ilk:
1012	xorl	%eax,%eax
1013	popf				/* restore interrupt state */
1014	NONLEAF_RET
1015
1016
1017
1018LEAF_ENTRY(mutex_convert_spin)
1019	movl	L_ARG0,%edx		/* fetch lock pointer */
1020
1021	movl	M_LOCKED,%ecx		/* is this the spin variant of the mutex */
1022	cmpl	$(MUTEX_LOCKED_AS_SPIN),%ecx
1023	jne	Lmcs_exit		/* already owned as a mutex, just return */
1024
1025	movl	M_ILK,%ecx		/* convert from spin version to mutex */
1026	movl	%ecx,M_LOCKED		/* take control of the mutex */
1027
1028	cmpw	$0,M_WAITERS		/* are there any waiters? */
1029	jne	Lmcs_waiters		/* yes, more work to do */
1030
1031Lmcs_return:
1032	xorl	%ecx,%ecx
1033	movl	%ecx,M_ILK		/* clear interlock */
1034	PREEMPTION_ENABLE
1035Lmcs_exit:
1036#if	CONFIG_DTRACE
1037	LOCKSTAT_LABEL(_mutex_convert_spin_lockstat_patch_point)
1038	ret
1039	/* inherit %edx from above */
1040	LOCKSTAT_RECORD(LS_MUTEX_CONVERT_SPIN_ACQUIRE, %edx)
1041#endif
1042	ret
1043
1044
1045Lmcs_waiters:
1046	pushl	%edx			/* save mutex address */
1047	pushl	%edx
1048	call	EXT(lck_mtx_lock_acquire)
1049	addl	$4,%esp
1050	popl	%edx			/* restore mutex address */
1051	jmp	Lmcs_return
1052
1053
1054
1055NONLEAF_ENTRY(mutex_unlock)
1056	movl	B_ARG0,%edx		/* fetch lock pointer */
1057
1058	movl	M_LOCKED,%ecx		/* is this the spin variant of the mutex */
1059	cmpl	$(MUTEX_LOCKED_AS_SPIN),%ecx
1060	jne	Lmu_enter		/* no, go treat like a real mutex */
1061
1062	cmpw	$0,M_WAITERS		/* are there any waiters? */
1063	jne	Lmus_wakeup		/* yes, more work to do */
1064
1065Lmus_drop_ilk:
1066	xorl	%ecx,%ecx
1067	movl	%ecx,M_LOCKED		/* yes, clear the spin indicator */
1068	movl	%ecx,M_ILK		/* release the interlock */
1069	PREEMPTION_ENABLE		/* and re-enable preemption */
1070	leave
1071#if	CONFIG_DTRACE
1072	LOCKSTAT_LABEL(_mutex_unlock_lockstat_patch_point)
1073	ret
1074	/* inherit lock pointer in %edx from above */
1075	LOCKSTAT_RECORD(LS_MUTEX_UNLOCK_RELEASE, %edx)
1076#endif
1077	ret
1078
1079Lmus_wakeup:
1080	pushl	%edx			/* save mutex address */
1081	pushl	%edx			/* push mutex address */
1082	call	EXT(lck_mtx_unlockspin_wakeup)	/* yes, wake a thread */
1083	addl	$4,%esp
1084	popl	%edx			/* restore mutex pointer */
1085	jmp	Lmus_drop_ilk
1086
1087Lmu_enter:
1088	pushf				/* save interrupt state */
1089
1090	CHECK_MUTEX_TYPE()
1091	CHECK_THREAD(M_THREAD)
1092
1093	movl	M_ILK,%eax		/* read interlock */
1094	testl	%eax,%eax		/* unlocked? */
1095	jne	Lmu_ilk_loop		/* yes, go try to grab it */
1096Lmu_retry:
1097	cli				/* disable interrupts */
1098	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1099
1100	/* eax == 0 at this point */
1101	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1102	jne	Lmu_ilk_fail		/* branch on failure to spin loop */
1103
1104	cmpw	$0,M_WAITERS		/* are there any waiters? */
1105	jne	Lmu_wakeup		/* yes, more work to do */
1106
1107Lmu_doit:
1108#if	MACH_LDEBUG
1109	movl	$0,M_THREAD		/* disown thread */
1110#endif
1111	xorl	%ecx,%ecx
1112	movl	%ecx,M_LOCKED		/* unlock the mutex */
1113	movl	%ecx,M_ILK		/* release the interlock */
1114	popf				/* restore interrupt state */
1115	leave
1116#if	CONFIG_DTRACE
1117	LOCKSTAT_LABEL(_mutex_unlock2_lockstat_patch_point)
1118	ret
1119	/* inherit %edx from above */
1120	LOCKSTAT_RECORD(LS_MUTEX_UNLOCK_RELEASE, %edx)
1121#endif
1122	ret
1123
1124Lmu_ilk_fail:
1125	popf				/* restore interrupt state */
1126	pushf				/* resave interrupt state on stack */
1127
1128Lmu_ilk_loop:
1129	PAUSE
1130	movl	M_ILK,%eax		/* read interlock */
1131	testl	%eax,%eax		/* unlocked? */
1132	je	Lmu_retry		/* yes, go try to grab it */
1133	jmp	Lmu_ilk_loop		/* no - keep spinning */
1134
1135Lmu_wakeup:
1136	pushl	M_LOCKED
1137	pushl	%edx			/* push mutex address */
1138	call	EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
1139	addl	$8,%esp
1140	movl	B_ARG0,%edx		/* restore lock pointer */
1141	jmp	Lmu_doit
1142
1143/*
1144 *	void lck_mtx_assert(lck_mtx_t* l, unsigned int)
1145 *	void _mutex_assert(mutex_t, unsigned int)
1146 *	Takes the address of a lock, and an assertion type as parameters.
1147 *	The assertion can take one of two forms determine by the type
1148 *	parameter: either the lock is held by the current thread, and the
1149 *	type is	LCK_MTX_ASSERT_OWNED, or it isn't and the type is
1150 *	LCK_MTX_ASSERT_NOT_OWNED. Calls panic on assertion failure.
1151 *
1152 */
1153
1154Entry(lck_mtx_assert)
1155Entry(_mutex_assert)
1156	movl	S_ARG0,%edx			/* Load lock address */
1157	movl	%gs:CPU_ACTIVE_THREAD,%ecx	/* Load current thread */
1158
1159	cmpl	$(MUTEX_IND),M_ITAG		/* Is this an indirect mutex? */
1160	cmove	M_PTR,%edx			/* If so, take indirection */
1161
1162	movl	M_LOCKED,%eax			/* Load lock word */
1163	cmpl	$(MUTEX_LOCKED_AS_SPIN),%eax	/* check for spin variant */
1164	cmove	M_ILK,%eax			/* yes, spin lock owner is in the interlock */
1165
1166	cmpl	$(MUTEX_ASSERT_OWNED),S_ARG1	/* Determine assert type */
1167	jne	2f				/* Assert ownership? */
1168	cmpl	%eax,%ecx			/* Current thread match? */
1169	jne	3f				/* no, go panic */
11701:						/* yes, we own it */
1171	ret					/* just return */
11722:
1173	cmpl	%eax,%ecx			/* Current thread match? */
1174	jne	1b				/* No, return */
1175	movl	%edx,S_ARG1			/* Prep assertion failure */
1176	movl	$(mutex_assert_owned_str),S_ARG0
1177	jmp	4f
11783:
1179	movl	%edx,S_ARG1			/* Prep assertion failure */
1180	movl	$(mutex_assert_not_owned_str),S_ARG0
11814:
1182	jmp	EXT(panic)
1183
1184.data
1185mutex_assert_not_owned_str:
1186	.asciz	"mutex (%p) not owned\n"
1187mutex_assert_owned_str:
1188	.asciz	"mutex (%p) owned\n"
1189.text
1190
1191/* This preprocessor define controls whether the R-M-W update of the
1192 * per-group statistics elements are atomic (LOCK-prefixed)
1193 * Enabled by default.
1194 */
1195#define ATOMIC_STAT_UPDATES 1
1196
1197#if defined(ATOMIC_STAT_UPDATES)
1198#define LOCK_IF_ATOMIC_STAT_UPDATES lock
1199#else
1200#define LOCK_IF_ATOMIC_STAT_UPDATES
1201#endif /* ATOMIC_STAT_UPDATES */
1202
1203
1204/*
1205 * lck_mtx_lock()
1206 * lck_mtx_try_lock()
1207 * lck_mutex_unlock()
1208 * lck_mtx_lock_spin()
1209 * lck_mtx_convert_spin()
1210 *
1211 * These are variants of mutex_lock(), mutex_try(), mutex_unlock()
1212 * mutex_lock_spin and mutex_convert_spin without
1213 * DEBUG checks (which require fields not present in lck_mtx_t's).
1214 */
1215
1216NONLEAF_ENTRY(lck_mtx_lock_spin)
1217
1218	movl	B_ARG0,%edx		/* fetch lock pointer */
1219	pushf				/* save interrupt state */
1220
1221	CHECK_NO_SIMPLELOCKS()
1222	CHECK_PREEMPTION_LEVEL()
1223
1224	movl	M_ILK,%eax		/* read interlock */
1225	testl	%eax,%eax		/* unlocked? */
1226	jne	Llmls_eval_ilk		/* no, go see if indirect */
1227Llmls_retry:
1228	cli				/* disable interrupts */
1229	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1230
1231	/* eax == 0 at this point */
1232	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1233	jne	Llmls_ilk_fail		/* branch on failure to spin loop */
1234
1235	movl	M_LOCKED,%ecx		/* get lock owner */
1236	testl	%ecx,%ecx		/* is the mutex locked? */
1237	jne	Llml_fail		/* yes, fall back to a normal mutex */
1238
1239Llmls_acquire:
1240	movl	$(MUTEX_LOCKED_AS_SPIN),M_LOCKED	/* indicate ownership as a spin lock */
1241	PREEMPTION_DISABLE
1242	popf				/* restore interrupt state */
1243	NONLEAF_RET			/* return with the interlock held */
1244
1245Llmls_ilk_fail:
1246	popf				/* restore interrupt state */
1247	pushf				/* resave interrupt state on stack */
1248
1249Llmls_ilk_loop:
1250	PAUSE
1251	movl	M_ILK,%eax		/* read interlock */
1252	testl	%eax,%eax		/* unlocked? */
1253	je	Llmls_retry		/* yes - go try to grab it */
1254
1255	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1256	jne	Llmls_ilk_loop		/* no - keep spinning  */
1257
1258	pushl	%edx
1259	call	EXT(lck_mtx_interlock_panic)
1260	/*
1261	 * shouldn't return from here, but just in case
1262	 */
1263	popl	%edx
1264	jmp	Llmls_ilk_loop
1265
1266
1267Llmls_eval_ilk:
1268	cmpl	$(MUTEX_IND),M_ITAG	/* Is this an indirect mutex? */
1269	cmove	M_PTR,%edx		/* If so, take indirection */
1270	jne	Llmls_ilk_loop		/* If not, go to spin loop */
1271
1272Llmls_lck_ext:
1273	pushl	%esi			/* Used to hold the lock group ptr */
1274	pushl	%edi			/* Used for stat update records */
1275	movl	MUTEX_GRP(%edx),%esi	/* Load lock group */
1276	xorl	%edi,%edi		/* Clear stat update records */
1277	/* 64-bit increment of acquire attempt statistic (per-group) */
1278	LOCK_IF_ATOMIC_STAT_UPDATES
1279	addl	$1, GRP_MTX_STAT_UTIL(%esi)
1280	jnc	1f
1281	incl	GRP_MTX_STAT_UTIL+4(%esi)
12821:
1283	movl	M_ILK,%eax		/* read interlock */
1284	testl	%eax,%eax		/* unlocked? */
1285	jne	Llmls_ext_ilk_loop	/* no, go to spin loop */
1286Llmls_ext_retry:
1287	cli				/* disable interrupts */
1288	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1289
1290	/* eax == 0 at this point */
1291	lock; cmpxchgl %ecx,M_ILK	/* atomic compare and exchange */
1292	jne     Llmls_ext_ilk_fail	/* branch on failure to retry */
1293
1294	movl	M_LOCKED,%ecx		/* get lock owner */
1295	testl   %ecx,%ecx		/* is the mutex locked? */
1296	jne	Llml_ext_fail		/* yes, we lose */
1297
1298	popl	%edi
1299	popl	%esi
1300	jmp	Llmls_acquire
1301
1302Llmls_ext_ilk_fail:
1303	/*
1304	 * Slow path: call out to do the spinning.
1305	 */
1306	movl	8(%esp),%ecx
1307	pushl	%ecx
1308	popf				/* restore interrupt state */
1309
1310Llmls_ext_ilk_loop:
1311	PAUSE
1312	movl	M_ILK,%eax		/* read interlock */
1313	testl	%eax,%eax		/* unlocked? */
1314	je	Llmls_ext_retry		/* yes - go try to grab it */
1315
1316	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1317	jne	Llmls_ext_ilk_loop		/* no - keep spinning  */
1318
1319	pushl	%edx
1320	call	EXT(lck_mtx_interlock_panic)
1321	/*
1322	 * shouldn't return from here, but just in case
1323	 */
1324	popl	%edx
1325	jmp	Llmls_ext_ilk_loop	/* no - keep spinning  */
1326
1327
1328
1329NONLEAF_ENTRY(lck_mtx_lock)
1330
1331	movl	B_ARG0,%edx		/* fetch lock pointer */
1332	pushf				/* save interrupt state */
1333
1334	CHECK_NO_SIMPLELOCKS()
1335	CHECK_PREEMPTION_LEVEL()
1336
1337	movl	M_ILK,%eax		/* read interlock */
1338	testl	%eax,%eax		/* unlocked? */
1339	jne	Llml_eval_ilk		/* no, go see if indirect */
1340Llml_retry:
1341	cli				/* disable interrupts */
1342	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1343
1344	/* eax == 0 at this point */
1345	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1346	jne	Llml_ilk_fail		/* branch on failure to spin loop */
1347
1348	movl	M_LOCKED,%ecx		/* get lock owner */
1349	testl	%ecx,%ecx		/* is the mutex locked? */
1350	jne	Llml_fail		/* yes, we lose */
1351Llml_acquire:
1352	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1353	movl	%ecx,M_LOCKED
1354
1355	cmpw	$0,M_WAITERS		/* are there any waiters? */
1356	jne	Lml_waiters		/* yes, more work to do */
1357Llml_return:
1358	xorl	%eax,%eax
1359	movl	%eax,M_ILK
1360
1361	popf				/* restore interrupt state */
1362	leave
1363#if	CONFIG_DTRACE
1364	LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
1365	ret
1366	/* inherit lock pointer in %edx above */
1367	LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %edx)
1368#endif
1369	ret
1370
1371Llml_waiters:
1372	pushl	%edx			/* save mutex address */
1373	pushl	%edx
1374	call	EXT(lck_mtx_lock_acquire)
1375	addl	$4,%esp
1376	popl	%edx			/* restore mutex address */
1377	jmp	Llml_return
1378
1379Llml_restart:
1380Llml_ilk_fail:
1381	popf				/* restore interrupt state */
1382	pushf				/* resave interrupt state on stack */
1383
1384Llml_ilk_loop:
1385	PAUSE
1386	movl	M_ILK,%eax		/* read interlock */
1387	testl	%eax,%eax		/* unlocked? */
1388	je	Llml_retry		/* yes - go try to grab it */
1389
1390	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1391	jne	Llml_ilk_loop		/* no - keep spinning  */
1392
1393	pushl	%edx
1394	call	EXT(lck_mtx_interlock_panic)
1395	/*
1396	 * shouldn't return from here, but just in case
1397	 */
1398	popl	%edx
1399	jmp	Llml_ilk_loop		/* no - keep spinning  */
1400
1401Llml_fail:
1402	/*
1403	 * Check if the owner is on another processor and therefore
1404	 * we should try to spin before blocking.
1405	 */
1406	testl	$(OnProc),ACT_SPF(%ecx)
1407	jz	Llml_block
1408
1409	/*
1410	 * Here if owner is on another processor:
1411	 *  - release the interlock
1412	 *  - spin on the holder until release or timeout
1413	 *  - in either case re-acquire the interlock
1414	 *  - if released, acquire it
1415	 *  - otherwise drop thru to block.
1416	 */
1417	xorl	%eax,%eax
1418	movl	%eax,M_ILK		/* zero interlock */
1419	popf
1420	pushf				/* restore interrupt state */
1421	pushl	%edx			/* save mutex address */
1422	pushl	%edx
1423	call	EXT(lck_mtx_lock_spinwait)
1424	addl	$4,%esp
1425	popl	%edx			/* restore mutex address */
1426
1427	/* Re-acquire interlock */
1428	movl	M_ILK,%eax		/* read interlock */
1429	testl	%eax,%eax		/* unlocked? */
1430	jne	Llml_ilk_refail		/* no, go to spin loop */
1431Llml_reget_retry:
1432	cli				/* disable interrupts */
1433	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1434
1435	/* eax == 0 at this point */
1436	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1437	jne	Llml_ilk_refail		/* branch on failure to retry */
1438
1439	movl	M_LOCKED,%ecx		/* get lock owner */
1440	testl	%ecx,%ecx		/* is the mutex free? */
1441	je	Llml_acquire		/* yes, acquire */
1442
1443Llml_block:
1444	CHECK_MYLOCK(M_THREAD)
1445	pushl	%edx			/* save mutex address */
1446	pushl	M_LOCKED
1447	pushl	%edx			/* push mutex address */
1448	/*
1449	 * N.B.: lck_mtx_lock_wait is called here with interrupts disabled
1450	 * Consider reworking.
1451	 */
1452	call	EXT(lck_mtx_lock_wait)	/* wait for the lock */
1453	addl	$8,%esp
1454	popl	%edx			/* restore mutex address */
1455	jmp	Llml_restart		/* and start over */
1456
1457Llml_ilk_refail:
1458	popf				/* restore interrupt state */
1459	pushf				/* resave interrupt state on stack */
1460
1461Llml_ilk_reloop:
1462	PAUSE
1463	movl	M_ILK,%eax		/* read interlock */
1464	testl	%eax,%eax		/* unlocked? */
1465	je	Llml_reget_retry	/* yes - go try to grab it */
1466
1467	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1468	jne	Llml_ilk_reloop		/* no - keep spinning  */
1469
1470	pushl	%edx
1471	call	EXT(lck_mtx_interlock_panic)
1472	/*
1473	 * shouldn't return from here, but just in case
1474	 */
1475	popl	%edx
1476	jmp	Llml_ilk_reloop		/* no - keep spinning  */
1477
1478
1479Llml_eval_ilk:
1480	cmpl	$(MUTEX_IND),M_ITAG	/* Is this an indirect mutex? */
1481	cmove	M_PTR,%edx		/* If so, take indirection */
1482	jne	Llml_ilk_loop		/* If not, go to spin loop */
1483
1484/*
1485 * Entry into statistics codepath for lck_mtx_lock:
1486 * EDX: real lock pointer
1487 * first dword on stack contains flags
1488 */
1489
1490/* Enable this preprocessor define to record the first miss alone
1491 * By default, we count every miss, hence multiple misses may be
1492 * recorded for a single lock acquire attempt via lck_mtx_lock
1493 */
1494#undef LOG_FIRST_MISS_ALONE
1495
1496/*
1497 * N.B.: On x86, statistics are currently recorded for all indirect mutexes.
1498 * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained
1499 * as a 64-bit quantity (this matches the existing PowerPC implementation,
1500 * and the new x86 specific statistics are also maintained as 32-bit
1501 * quantities).
1502 */
1503
1504Llml_lck_ext:
1505	pushl	%esi			/* Used to hold the lock group ptr */
1506	pushl	%edi			/* Used for stat update records */
1507	movl	MUTEX_GRP(%edx),%esi	/* Load lock group */
1508	xorl	%edi,%edi		/* Clear stat update records */
1509	/* 64-bit increment of acquire attempt statistic (per-group) */
1510	LOCK_IF_ATOMIC_STAT_UPDATES
1511	addl	$1, GRP_MTX_STAT_UTIL(%esi)
1512	jnc	1f
1513	incl	GRP_MTX_STAT_UTIL+4(%esi)
15141:
1515	movl	M_ILK,%eax		/* read interlock */
1516	testl	%eax,%eax		/* unlocked? */
1517	jne	Llml_ext_ilk_loop	/* no, go to spin loop */
1518Llml_ext_get_hw:
1519	cli
1520	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1521
1522	/* eax == 0 at this point */
1523	lock; cmpxchgl %ecx,M_ILK	/* atomic compare and exchange */
1524	jne	Llml_ext_ilk_fail	/* branch on failure to retry */
1525
1526	movl	M_LOCKED,%ecx		/* get lock owner */
1527	testl	%ecx,%ecx		/* is the mutex locked? */
1528	jne	Llml_ext_fail		/* yes, we lose */
1529
1530Llml_ext_acquire:
1531	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1532	movl	%ecx,M_LOCKED
1533
1534	cmpw	$0,M_WAITERS		/* are there any waiters? */
1535	jne	Llml_ext_waiters	/* yes, more work to do */
1536Llml_ext_return:
1537	xorl	%eax,%eax
1538	movl	%eax,M_ILK
1539
1540	popl	%edi
1541	popl	%esi
1542	popf				/* restore interrupt state */
1543	leave
1544#if	CONFIG_DTRACE
1545	LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
1546	ret
1547	/* inherit lock pointer in %edx above */
1548	LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %edx)
1549#endif
1550	ret
1551
1552Llml_ext_waiters:
1553	pushl	%edx			/* save mutex address */
1554	pushl	%edx
1555	call	EXT(lck_mtx_lock_acquire)
1556	addl	$4,%esp
1557	popl	%edx			/* restore mutex address */
1558	jmp	Llml_ext_return
1559
1560Llml_ext_restart:
1561Llml_ext_ilk_fail:
1562	movl	8(%esp),%ecx
1563	pushl	%ecx
1564	popf				/* restore interrupt state */
1565
1566Llml_ext_ilk_loop:
1567	PAUSE
1568	movl	M_ILK,%eax		/* read interlock */
1569	testl	%eax,%eax		/* unlocked? */
1570	je	Llml_ext_get_hw		/* yes - go try to grab it */
1571
1572	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1573	jne	Llml_ext_ilk_loop	/* no - keep spinning  */
1574
1575	pushl	%edx
1576	call	EXT(lck_mtx_interlock_panic)
1577	/*
1578	 * shouldn't return from here, but just in case
1579	 */
1580	popl	%edx
1581	jmp	Llml_ext_ilk_loop
1582
1583
1584Llml_ext_fail:
1585#ifdef LOG_FIRST_MISS_ALONE
1586	testl	$1, %edi
1587	jnz	1f
1588#endif /* LOG_FIRST_MISS_ALONE */
1589	/* Record that a lock acquire attempt missed (per-group statistic) */
1590	LOCK_IF_ATOMIC_STAT_UPDATES
1591	incl	GRP_MTX_STAT_MISS(%esi)
1592#ifdef LOG_FIRST_MISS_ALONE
1593	orl	$1, %edi
1594#endif /* LOG_FIRST_MISS_ALONE */
15951:
1596	/*
1597	 * Check if the owner is on another processor and therefore
1598	 * we should try to spin before blocking.
1599	 */
1600	testl	$(OnProc),ACT_SPF(%ecx)
1601	jnz	2f
1602	/*
1603	 * Record the "direct wait" statistic, which indicates if a
1604	 * miss proceeded to block directly without spinning--occurs
1605	 * if the owner of the mutex isn't running on another processor
1606	 * at the time of the check.
1607	 */
1608	LOCK_IF_ATOMIC_STAT_UPDATES
1609	incl	GRP_MTX_STAT_DIRECT_WAIT(%esi)
1610	jmp	Llml_ext_block
16112:
1612	/*
1613	 * Here if owner is on another processor:
1614	 *  - release the interlock
1615	 *  - spin on the holder until release or timeout
1616	 *  - in either case re-acquire the interlock
1617	 *  - if released, acquire it
1618	 *  - otherwise drop thru to block.
1619	 */
1620	xorl	%eax,%eax
1621	movl	%eax,M_ILK		/* zero interlock */
1622
1623	pushl	8(%esp)			/* Make another copy of EFLAGS image */
1624	popf				/* Restore interrupt state */
1625	pushl	%edx			/* save mutex address */
1626	pushl	%edx
1627	call	EXT(lck_mtx_lock_spinwait)
1628	addl	$4,%esp
1629	popl	%edx			/* restore mutex address */
1630
1631	/* Re-acquire interlock */
1632	movl	M_ILK,%eax		/* read interlock */
1633	testl	%eax,%eax		/* unlocked? */
1634	jne	Llml_ext_ilk_refail	/* no, go to spin loop */
1635Llml_ext_reget_retry:
1636	cli				/* disable interrupts */
1637	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1638
1639	/* eax == 0 at this point */
1640	lock; cmpxchgl %ecx,M_ILK	/* atomic compare and exchange */
1641	jne	Llml_ext_ilk_refail	/* branch on failure to spin loop */
1642
1643	movl	M_LOCKED,%ecx		/* get lock owner */
1644	testl	%ecx,%ecx		/* is the mutex free? */
1645	je	Llml_ext_acquire	/* yes, acquire */
1646
1647Llml_ext_block:
1648	/* If we wanted to count waits just once per lock acquire, we'd
1649	 * skip over the stat update here
1650	 */
1651	LOCK_IF_ATOMIC_STAT_UPDATES
1652	/* Record that a lock miss proceeded to block */
1653	incl	GRP_MTX_STAT_WAIT(%esi)
16541:
1655	CHECK_MYLOCK(M_THREAD)
1656	pushl	%edx			/* save mutex address */
1657	pushl	M_LOCKED
1658	pushl	%edx			/* push mutex address */
1659	/*
1660	 * N.B.: lck_mtx_lock_wait is called here with interrupts disabled
1661	 * Consider reworking.
1662	 */
1663	call	EXT(lck_mtx_lock_wait)	/* wait for the lock */
1664	addl	$8,%esp
1665	popl	%edx			/* restore mutex address */
1666	jmp	Llml_ext_restart	/* and start over */
1667
1668Llml_ext_ilk_refail:
1669	movl	8(%esp),%ecx
1670	pushl	%ecx
1671	popf				/* restore interrupt state */
1672
1673Llml_ext_ilk_reloop:
1674	PAUSE
1675	movl	M_ILK,%eax		/* read interlock */
1676	testl	%eax,%eax		/* unlocked? */
1677	je	Llml_ext_reget_retry	/* yes - go try to grab it */
1678
1679	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1680	jne	Llml_ext_ilk_reloop	/* no - keep spinning  */
1681
1682	pushl	%edx
1683	call	EXT(lck_mtx_interlock_panic)
1684	/*
1685	 * shouldn't return from here, but just in case
1686	 */
1687	popl	%edx
1688	jmp	Llml_ext_ilk_reloop
1689
1690
1691
1692NONLEAF_ENTRY(lck_mtx_try_lock_spin)
1693
1694	movl	B_ARG0,%edx		/* fetch lock pointer */
1695	pushf				/* save interrupt state */
1696
1697	CHECK_NO_SIMPLELOCKS()
1698	CHECK_PREEMPTION_LEVEL()
1699
1700	movl	M_ILK,%eax		/* read interlock */
1701	testl	%eax,%eax		/* unlocked? */
1702	jne	Llmts_eval_ilk		/* no, go see if indirect */
1703Llmts_retry:
1704	cli				/* disable interrupts */
1705	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1706
1707	/* eax == 0 at this point */
1708	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1709	jne	Llmts_ilk_fail		/* branch on failure to retry */
1710
1711	movl	M_LOCKED,%ecx		/* get lock owner */
1712	testl	%ecx,%ecx		/* is the mutex locked? */
1713	jne	Llmt_fail		/* yes, we lose */
1714
1715	movl	$(MUTEX_LOCKED_AS_SPIN),M_LOCKED	/* no, indicate ownership as a spin lock */
1716	PREEMPTION_DISABLE		/* and return with interlock held */
1717
1718	movl	$1,%eax			/* return success */
1719	popf				/* restore interrupt state */
1720	leave
1721#if	CONFIG_DTRACE
1722	LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
1723	ret
1724	/* inherit lock pointer in %edx above */
1725	LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %edx)
1726	movl	$1,%eax			/* return success */
1727#endif
1728	ret
1729
1730Llmts_ilk_fail:
1731	popf				/* restore interrupt state */
1732	pushf				/* resave interrupt state */
1733
1734Llmts_ilk_loop:
1735	PAUSE
1736	/*
1737	 * need to do this check outside of the interlock in
1738	 * case this lock is held as a simple lock which means
1739	 * we won't be able to take the interlock
1740 	 */
1741	movl	M_LOCKED,%eax		/* get lock owner */
1742	testl	%eax,%eax		/* is the mutex locked? */
1743	jne	Llmt_fail_no_ilk	/* yes, go return failure */
1744
1745	movl	M_ILK,%eax		/* read interlock */
1746	testl	%eax,%eax		/* unlocked? */
1747	je	Llmts_retry		/* yes - go try to grab it */
1748
1749	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1750	jne	Llmts_ilk_loop		/* no - keep spinning  */
1751
1752	pushl	%edx
1753	call	EXT(lck_mtx_interlock_panic)
1754	/*
1755	 * shouldn't return from here, but just in case
1756	 */
1757	popl	%edx
1758	jmp	Llmts_ilk_loop
1759
1760Llmts_eval_ilk:
1761	cmpl	$(MUTEX_IND),M_ITAG	/* Is this an indirect mutex? */
1762	cmove	M_PTR,%edx		/* If so, take indirection */
1763	jne	Llmts_ilk_loop		/* If not, go to spin loop */
1764
1765	/*
1766	 * bump counter on indirect lock
1767	 */
1768	pushl	%esi			/* Used to hold the lock group ptr */
1769	movl	MUTEX_GRP(%edx),%esi	/* Load lock group */
1770	/* 64-bit increment of acquire attempt statistic (per-group) */
1771	LOCK_IF_ATOMIC_STAT_UPDATES
1772	addl	$1, GRP_MTX_STAT_UTIL(%esi)
1773	jnc	1f
1774	incl	GRP_MTX_STAT_UTIL+4(%esi)
17751:
1776	popl	%esi
1777	jmp	Llmts_ilk_loop
1778
1779
1780
1781NONLEAF_ENTRY(lck_mtx_try_lock)
1782
1783	movl	B_ARG0,%edx		/* fetch lock pointer */
1784	pushf				/* save interrupt state */
1785
1786	CHECK_NO_SIMPLELOCKS()
1787	CHECK_PREEMPTION_LEVEL()
1788
1789	movl	M_ILK,%eax		/* read interlock */
1790	testl	%eax,%eax		/* unlocked? */
1791	jne	Llmt_eval_ilk		/* no, go see if indirect */
1792Llmt_retry:
1793	cli				/* disable interrupts */
1794	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1795
1796	/* eax == 0 at this point */
1797	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1798	jne	Llmt_ilk_fail		/* branch on failure to retry */
1799
1800	movl	M_LOCKED,%ecx		/* get lock owner */
1801	testl	%ecx,%ecx		/* is the mutex locked? */
1802	jne	Llmt_fail		/* yes, we lose */
1803Llmt_acquire:
1804	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1805	movl	%ecx,M_LOCKED
1806
1807	cmpw	$0,M_WAITERS		/* are there any waiters? */
1808	jne	Llmt_waiters		/* yes, more work to do */
1809Llmt_return:
1810	xorl	%eax,%eax
1811	movl	%eax,M_ILK
1812
1813	popf				/* restore interrupt state */
1814
1815	movl	$1,%eax			/* return success */
1816	leave
1817#if	CONFIG_DTRACE
1818	/* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
1819	LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
1820	ret
1821	/* inherit lock pointer in %edx from above */
1822	LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %edx)
1823	movl	$1,%eax			/* return success */
1824#endif
1825	ret
1826
1827Llmt_waiters:
1828	pushl	%edx			/* save mutex address */
1829	pushl	%edx
1830	call	EXT(lck_mtx_lock_acquire)
1831	addl	$4,%esp
1832	popl	%edx			/* restore mutex address */
1833	jmp	Llmt_return
1834
1835Llmt_ilk_fail:
1836	popf				/* restore interrupt state */
1837	pushf				/* resave interrupt state */
1838
1839Llmt_ilk_loop:
1840	PAUSE
1841	/*
1842	 * need to do this check outside of the interlock in
1843	 * case this lock is held as a simple lock which means
1844	 * we won't be able to take the interlock
1845 	 */
1846	movl	M_LOCKED,%eax		/* get lock owner */
1847	testl	%eax,%eax		/* is the mutex locked? */
1848	jne	Llmt_fail_no_ilk	/* yes, go return failure */
1849
1850	movl	M_ILK,%eax		/* read interlock */
1851	testl	%eax,%eax		/* unlocked? */
1852	je	Llmt_retry		/* yes - go try to grab it */
1853
1854	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
1855	jne	Llmt_ilk_loop		/* no - keep spinning  */
1856
1857	pushl	%edx
1858	call	EXT(lck_mtx_interlock_panic)
1859	/*
1860	 * shouldn't return from here, but just in case
1861	 */
1862	popl	%edx
1863	jmp	Llmt_ilk_loop
1864
1865Llmt_fail:
1866	xorl	%eax,%eax		/* Zero interlock value */
1867	movl	%eax,M_ILK
1868
1869Llmt_fail_no_ilk:
1870	popf				/* restore interrupt state */
1871
1872	cmpl	%edx,B_ARG0
1873	jne	Llmt_fail_indirect
1874
1875	xorl	%eax,%eax
1876	/* Note that we don't record a dtrace event for trying and missing */
1877	NONLEAF_RET
1878
1879Llmt_fail_indirect:
1880	pushl	%esi			/* Used to hold the lock group ptr */
1881	movl	MUTEX_GRP(%edx),%esi	/* Load lock group */
1882
1883	/* Record mutex acquire attempt miss statistic */
1884	LOCK_IF_ATOMIC_STAT_UPDATES
1885	incl	GRP_MTX_STAT_MISS(%esi)
1886
1887	popl	%esi
1888	xorl	%eax,%eax
1889	NONLEAF_RET
1890
1891Llmt_eval_ilk:
1892	cmpl	$(MUTEX_IND),M_ITAG	/* Is this an indirect mutex? */
1893	cmove	M_PTR,%edx		/* If so, take indirection */
1894	jne	Llmt_ilk_loop		/* If not, go to spin loop */
1895
1896	/*
1897	 * bump counter for indirect lock
1898  	 */
1899	pushl	%esi			/* Used to hold the lock group ptr */
1900	movl	MUTEX_GRP(%edx),%esi	/* Load lock group */
1901
1902	/* 64-bit increment of acquire attempt statistic (per-group) */
1903	LOCK_IF_ATOMIC_STAT_UPDATES
1904	addl	$1, GRP_MTX_STAT_UTIL(%esi)
1905	jnc	1f
1906	incl	GRP_MTX_STAT_UTIL+4(%esi)
19071:
1908	pop	%esi
1909	jmp	Llmt_ilk_loop
1910
1911
1912
1913LEAF_ENTRY(lck_mtx_convert_spin)
1914	movl	L_ARG0,%edx		/* fetch lock pointer */
1915
1916	cmpl	$(MUTEX_IND),M_ITAG	/* Is this an indirect mutex? */
1917	cmove	M_PTR,%edx		/* If so, take indirection */
1918
1919	movl	M_LOCKED,%ecx		/* is this the spin variant of the mutex */
1920	cmpl	$(MUTEX_LOCKED_AS_SPIN),%ecx
1921	jne	Llmcs_exit		/* already owned as a mutex, just return */
1922
1923	movl	M_ILK,%ecx		/* convert from spin version to mutex */
1924	movl	%ecx,M_LOCKED		/* take control of the mutex */
1925
1926	cmpw	$0,M_WAITERS		/* are there any waiters? */
1927	jne	Llmcs_waiters		/* yes, more work to do */
1928
1929Llmcs_return:
1930	xorl	%ecx,%ecx
1931	movl	%ecx,M_ILK		/* clear interlock */
1932	PREEMPTION_ENABLE
1933Llmcs_exit:
1934	LEAF_RET
1935
1936Llmcs_waiters:
1937	pushl	%edx			/* save mutex address */
1938	pushl	%edx
1939	call	EXT(lck_mtx_lock_acquire)
1940	addl	$4,%esp
1941	popl	%edx			/* restore mutex address */
1942	jmp	Llmcs_return
1943
1944
1945
1946NONLEAF_ENTRY(lck_mtx_unlock)
1947
1948	movl	B_ARG0,%edx		/* fetch lock pointer */
1949
1950	cmpl	$(MUTEX_IND),M_ITAG	/* Is this an indirect mutex? */
1951	cmove	M_PTR,%edx		/* If so, take indirection */
1952
1953	movl	M_LOCKED,%ecx		/* is this the spin variant of the mutex */
1954	cmpl	$(MUTEX_LOCKED_AS_SPIN),%ecx
1955	jne	Llmu_enter		/* no, go treat like a real mutex */
1956
1957	cmpw	$0,M_WAITERS		/* are there any waiters? */
1958	jne	Llmus_wakeup		/* yes, more work to do */
1959
1960Llmu_drop_ilk:
1961	xorl	%eax,%eax
1962	movl	%eax,M_LOCKED		/* clear spin indicator */
1963	movl	%eax,M_ILK		/* release the interlock */
1964
1965	PREEMPTION_ENABLE		/* and re-enable preemption */
1966	leave
1967#if	CONFIG_DTRACE
1968	/* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */
1969	LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
1970	ret
1971	/* inherit lock pointer in %edx from above */
1972	LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %edx)
1973#endif
1974	ret
1975
1976Llmus_wakeup:
1977	pushl	%edx			/* save mutex address */
1978	pushl	%edx			/* push mutex address */
1979	call	EXT(lck_mtx_unlockspin_wakeup)	/* yes, wake a thread */
1980	addl	$4,%esp
1981	popl	%edx			/* restore mutex pointer */
1982	jmp	Llmu_drop_ilk
1983
1984
1985Llmu_enter:
1986	pushf				/* save interrupt state */
1987
1988	movl	M_ILK,%eax		/* read interlock */
1989	testl	%eax,%eax		/* unlocked? */
1990	jne	Llmu_ilk_loop		/* no - go to spin loop */
1991Llmu_retry:
1992	cli				/* disable interrupts */
1993	movl	%gs:CPU_ACTIVE_THREAD,%ecx
1994
1995	/* eax == 0 at this point */
1996	lock; cmpxchgl	%ecx,M_ILK	/* atomic compare and exchange */
1997	jne	Llmu_ilk_fail		/* branch on failure to spin loop */
1998
1999	cmpw	$0,M_WAITERS		/* are there any waiters? */
2000	jne	Llmu_wakeup		/* yes, more work to do */
2001
2002Llmu_doit:
2003	xorl	%ecx,%ecx
2004	movl	%ecx,M_LOCKED		/* unlock the mutex */
2005	movl	%ecx,M_ILK		/* clear the interlock */
2006
2007	popf				/* restore interrupt state */
2008	leave
2009#if	CONFIG_DTRACE
2010	LOCKSTAT_LABEL(_lck_mtx_unlock2_lockstat_patch_point)
2011	ret
2012	/* inherit lock pointer in %edx above */
2013	LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %edx)
2014#endif
2015	ret
2016
2017Llmu_ilk_fail:
2018	popf				/* restore interrupt state */
2019	pushf				/* resave interrupt state */
2020
2021Llmu_ilk_loop:
2022	PAUSE
2023	movl	M_ILK,%eax		/* read interlock */
2024	testl	%eax,%eax		/* unlocked? */
2025	je	Llmu_retry		/* yes - go try to grab it */
2026
2027	cmpl	$(MUTEX_DESTROYED),%eax	/* check to see if its marked destroyed */
2028	jne	Llmu_ilk_loop		/* no - keep spinning  */
2029
2030	pushl	%edx
2031	call	EXT(lck_mtx_interlock_panic)
2032	/*
2033	 * shouldn't return from here, but just in case
2034	 */
2035	popl	%edx
2036	jmp	Llmu_ilk_loop
2037
2038Llmu_wakeup:
2039	pushl	%edx			/* save mutex address */
2040	pushl	M_LOCKED
2041	pushl	%edx			/* push mutex address */
2042	call	EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
2043	addl	$8,%esp
2044	popl	%edx			/* restore mutex pointer */
2045	xorl	%ecx,%ecx
2046	movl	%ecx,M_LOCKED		/* unlock the mutex */
2047
2048	movl	%ecx,M_ILK
2049
2050	popf				/* restore interrupt state */
2051
2052	leave
2053#if	CONFIG_DTRACE
2054	/* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */
2055	LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point)
2056	ret
2057	/* inherit lock pointer in %edx from above */
2058	LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %edx)
2059#endif
2060	ret
2061
2062
2063LEAF_ENTRY(lck_mtx_ilk_unlock)
2064	movl	L_ARG0,%edx		/* no indirection here */
2065
2066	xorl	%eax,%eax
2067	movl	%eax,M_ILK
2068
2069	LEAF_RET
2070
2071
2072LEAF_ENTRY(_disable_preemption)
2073#if	MACH_RT
2074	_DISABLE_PREEMPTION
2075#endif	/* MACH_RT */
2076	LEAF_RET
2077
2078LEAF_ENTRY(_enable_preemption)
2079#if	MACH_RT
2080#if	MACH_ASSERT
2081	cmpl	$0,%gs:CPU_PREEMPTION_LEVEL
2082	jg	1f
2083	pushl	%gs:CPU_PREEMPTION_LEVEL
2084	pushl	$2f
2085	call	EXT(panic)
2086	hlt
2087	.data
20882:	String	"_enable_preemption: preemption_level(%d)  < 0!"
2089	.text
20901:
2091#endif	/* MACH_ASSERT */
2092	_ENABLE_PREEMPTION
2093#endif	/* MACH_RT */
2094	LEAF_RET
2095
2096LEAF_ENTRY(_enable_preemption_no_check)
2097#if	MACH_RT
2098#if	MACH_ASSERT
2099	cmpl	$0,%gs:CPU_PREEMPTION_LEVEL
2100	jg	1f
2101	pushl	$2f
2102	call	EXT(panic)
2103	hlt
2104	.data
21052:	String	"_enable_preemption_no_check: preemption_level <= 0!"
2106	.text
21071:
2108#endif	/* MACH_ASSERT */
2109	_ENABLE_PREEMPTION_NO_CHECK
2110#endif	/* MACH_RT */
2111	LEAF_RET
2112
2113
2114LEAF_ENTRY(_mp_disable_preemption)
2115#if	MACH_RT
2116	_DISABLE_PREEMPTION
2117#endif	/* MACH_RT */
2118	LEAF_RET
2119
2120LEAF_ENTRY(_mp_enable_preemption)
2121#if	MACH_RT
2122#if	MACH_ASSERT
2123	cmpl	$0,%gs:CPU_PREEMPTION_LEVEL
2124	jg	1f
2125	pushl	%gs:CPU_PREEMPTION_LEVEL
2126	pushl	$2f
2127	call	EXT(panic)
2128	hlt
2129	.data
21302:	String	"_mp_enable_preemption: preemption_level (%d) <= 0!"
2131	.text
21321:
2133#endif	/* MACH_ASSERT */
2134	_ENABLE_PREEMPTION
2135#endif	/* MACH_RT */
2136	LEAF_RET
2137
2138LEAF_ENTRY(_mp_enable_preemption_no_check)
2139#if	MACH_RT
2140#if	MACH_ASSERT
2141	cmpl	$0,%gs:CPU_PREEMPTION_LEVEL
2142	jg	1f
2143	pushl	$2f
2144	call	EXT(panic)
2145	hlt
2146	.data
21472:	String	"_mp_enable_preemption_no_check: preemption_level <= 0!"
2148	.text
21491:
2150#endif	/* MACH_ASSERT */
2151	_ENABLE_PREEMPTION_NO_CHECK
2152#endif	/* MACH_RT */
2153	LEAF_RET
2154
2155
2156LEAF_ENTRY(i_bit_set)
2157	movl	L_ARG0,%edx
2158	movl	L_ARG1,%eax
2159	lock
2160	bts	%edx,(%eax)
2161	LEAF_RET
2162
2163LEAF_ENTRY(i_bit_clear)
2164	movl	L_ARG0,%edx
2165	movl	L_ARG1,%eax
2166	lock
2167	btr	%edx,(%eax)
2168	LEAF_RET
2169
2170
2171LEAF_ENTRY(bit_lock)
2172	movl	L_ARG0,%ecx
2173	movl	L_ARG1,%eax
21741:
2175	lock
2176	bts	%ecx,(%eax)
2177	jb	1b
2178	LEAF_RET
2179
2180
2181LEAF_ENTRY(bit_lock_try)
2182	movl	L_ARG0,%ecx
2183	movl	L_ARG1,%eax
2184	lock
2185	bts	%ecx,(%eax)
2186	jb	bit_lock_failed
2187	LEAF_RET		/* %eax better not be null ! */
2188bit_lock_failed:
2189	xorl	%eax,%eax
2190	LEAF_RET
2191
2192LEAF_ENTRY(bit_unlock)
2193	movl	L_ARG0,%ecx
2194	movl	L_ARG1,%eax
2195	lock
2196	btr	%ecx,(%eax)
2197	LEAF_RET
2198
2199/*
2200 * Atomic primitives, prototyped in kern/simple_lock.h
2201 */
2202LEAF_ENTRY(hw_atomic_add)
2203	movl	L_ARG0, %ecx		/* Load address of operand */
2204	movl	L_ARG1, %eax		/* Load addend */
2205	movl	%eax, %edx
2206	lock
2207	xaddl	%eax, (%ecx)		/* Atomic exchange and add */
2208	addl	%edx, %eax		/* Calculate result */
2209	LEAF_RET
2210
2211LEAF_ENTRY(hw_atomic_sub)
2212	movl	L_ARG0, %ecx		/* Load address of operand */
2213	movl	L_ARG1, %eax		/* Load subtrahend */
2214	negl	%eax
2215	movl	%eax, %edx
2216	lock
2217	xaddl	%eax, (%ecx)		/* Atomic exchange and add */
2218	addl	%edx, %eax		/* Calculate result */
2219	LEAF_RET
2220
2221LEAF_ENTRY(hw_atomic_or)
2222	movl	L_ARG0, %ecx		/* Load address of operand */
2223	movl	(%ecx), %eax
22241:
2225	movl	L_ARG1, %edx		/* Load mask */
2226	orl	%eax, %edx
2227	lock
2228	cmpxchgl	%edx, (%ecx)	/* Atomic CAS */
2229	jne	1b
2230	movl	%edx, %eax		/* Result */
2231	LEAF_RET
2232/*
2233 * A variant of hw_atomic_or which doesn't return a value.
2234 * The implementation is thus comparatively more efficient.
2235 */
2236
2237LEAF_ENTRY(hw_atomic_or_noret)
2238	movl	L_ARG0, %ecx		/* Load address of operand */
2239	movl	L_ARG1, %edx		/* Load mask */
2240	lock
2241	orl	%edx, (%ecx)		/* Atomic OR */
2242	LEAF_RET
2243
2244LEAF_ENTRY(hw_atomic_and)
2245	movl	L_ARG0, %ecx		/* Load address of operand */
2246	movl	(%ecx), %eax
22471:
2248	movl	L_ARG1, %edx		/* Load mask */
2249	andl	%eax, %edx
2250	lock
2251	cmpxchgl	%edx, (%ecx)	/* Atomic CAS */
2252	jne	1b
2253	movl	%edx, %eax		/* Result */
2254	LEAF_RET
2255/*
2256 * A variant of hw_atomic_and which doesn't return a value.
2257 * The implementation is thus comparatively more efficient.
2258 */
2259
2260LEAF_ENTRY(hw_atomic_and_noret)
2261	movl	L_ARG0, %ecx		/* Load address of operand */
2262	movl	L_ARG1, %edx		/* Load mask */
2263	lock
2264	andl	%edx, (%ecx)		/* Atomic OR */
2265	LEAF_RET
2266