1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#include <sys/cpuvar.h>
32#else	/* lint */
33#include "assym.h"
34#endif	/* lint */
35
36#include <sys/t_lock.h>
37#include <sys/mutex.h>
38#include <sys/mutex_impl.h>
39#include <sys/rwlock_impl.h>
40#include <sys/asm_linkage.h>
41#include <sys/machlock.h>
42#include <sys/machthread.h>
43#include <sys/lockstat.h>
44
45/* #define DEBUG */
46
47#ifdef DEBUG
48#include <sys/machparam.h>
49#endif /* DEBUG */
50
51/************************************************************************
52 *		ATOMIC OPERATIONS
53 */
54
55/*
56 * uint8_t	ldstub(uint8_t *cp)
57 *
58 * Store 0xFF at the specified location, and return its previous content.
59 */
60
61#if defined(lint)
62uint8_t
63ldstub(uint8_t *cp)
64{
65	uint8_t	rv;
66	rv = *cp;
67	*cp = 0xFF;
68	return rv;
69}
70#else	/* lint */
71
72	ENTRY(ldstub)
73	retl
74	ldstub	[%o0], %o0
75	SET_SIZE(ldstub)
76
77#endif	/* lint */
78
79/************************************************************************
80 *		MEMORY BARRIERS -- see atomic.h for full descriptions.
81 */
82
83#if defined(lint)
84
85void
86membar_enter(void)
87{}
88
89void
90membar_exit(void)
91{}
92
93void
94membar_producer(void)
95{}
96
97void
98membar_consumer(void)
99{}
100
101#else	/* lint */
102
103#ifdef SF_ERRATA_51
104	.align 32
105	ENTRY(membar_return)
106	retl
107	nop
108	SET_SIZE(membar_return)
109#define	MEMBAR_RETURN	ba,pt %icc, membar_return
110#else
111#define	MEMBAR_RETURN	retl
112#endif
113
114	ENTRY(membar_enter)
115	MEMBAR_RETURN
116	membar	#StoreLoad|#StoreStore
117	SET_SIZE(membar_enter)
118
119	ENTRY(membar_exit)
120	MEMBAR_RETURN
121	membar	#LoadStore|#StoreStore
122	SET_SIZE(membar_exit)
123
124	ENTRY(membar_producer)
125	MEMBAR_RETURN
126	membar	#StoreStore
127	SET_SIZE(membar_producer)
128
129	ENTRY(membar_consumer)
130	MEMBAR_RETURN
131	membar	#LoadLoad
132	SET_SIZE(membar_consumer)
133
134#endif	/* lint */
135
136/************************************************************************
137 *		MINIMUM LOCKS
138 */
139
140#if defined(lint)
141
142/*
143 * lock_try(lp), ulock_try(lp)
144 *	- returns non-zero on success.
145 *	- doesn't block interrupts so don't use this to spin on a lock.
146 *	- uses "0xFF is busy, anything else is free" model.
147 *
148 *      ulock_try() is for a lock in the user address space.
149 *      For all V7/V8 sparc systems they are same since the kernel and
150 *      user are mapped in a user' context.
151 *      For V9 platforms the lock_try and ulock_try are different impl.
152 */
153
154int
155lock_try(lock_t *lp)
156{
157	return (0xFF ^ ldstub(lp));
158}
159
160int
161lock_spin_try(lock_t *lp)
162{
163	return (0xFF ^ ldstub(lp));
164}
165
166void
167lock_set(lock_t *lp)
168{
169	extern void lock_set_spin(lock_t *);
170
171	if (!lock_try(lp))
172		lock_set_spin(lp);
173	membar_enter();
174}
175
176void
177lock_clear(lock_t *lp)
178{
179	membar_exit();
180	*lp = 0;
181}
182
183int
184ulock_try(lock_t *lp)
185{
186	return (0xFF ^ ldstub(lp));
187}
188
189void
190ulock_clear(lock_t *lp)
191{
192	membar_exit();
193	*lp = 0;
194}
195
196#else	/* lint */
197
198	.align	32
199	ENTRY(lock_try)
200	ldstub	[%o0], %o1		! try to set lock, get value in %o1
201	brnz,pn	%o1, 1f
202	membar	#LoadLoad
203.lock_try_lockstat_patch_point:
204	retl
205	or	%o0, 1, %o0		! ensure lo32 != 0
2061:
207	retl
208	clr	%o0
209	SET_SIZE(lock_try)
210
211	.align	32
212	ENTRY(lock_spin_try)
213	ldstub	[%o0], %o1		! try to set lock, get value in %o1
214	brnz,pn	%o1, 1f
215	membar	#LoadLoad
216	retl
217	or	%o0, 1, %o0		! ensure lo32 != 0
2181:
219	retl
220	clr	%o0
221	SET_SIZE(lock_spin_try)
222
223	.align	32
224	ENTRY(lock_set)
225	ldstub	[%o0], %o1
226	brnz,pn	%o1, 1f			! go to C for the hard case
227	membar	#LoadLoad
228.lock_set_lockstat_patch_point:
229	retl
230	nop
2311:
232	sethi	%hi(lock_set_spin), %o2	! load up for jump to C
233	jmp	%o2 + %lo(lock_set_spin)
234	nop				! delay: do nothing
235	SET_SIZE(lock_set)
236
237	ENTRY(lock_clear)
238	membar	#LoadStore|#StoreStore
239.lock_clear_lockstat_patch_point:
240	retl
241	clrb	[%o0]
242	SET_SIZE(lock_clear)
243
244	.align	32
245	ENTRY(ulock_try)
246	ldstuba	[%o0]ASI_USER, %o1	! try to set lock, get value in %o1
247	xor	%o1, 0xff, %o0		! delay - return non-zero if success
248	retl
249	  membar	#LoadLoad
250	SET_SIZE(ulock_try)
251
252	ENTRY(ulock_clear)
253	membar  #LoadStore|#StoreStore
254	retl
255	  stba	%g0, [%o0]ASI_USER	! clear lock
256	SET_SIZE(ulock_clear)
257
258#endif	/* lint */
259
260
261/*
262 * lock_set_spl(lp, new_pil, *old_pil_addr)
263 * 	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
264 */
265
266#if defined(lint)
267
268/* ARGSUSED */
269void
270lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
271{
272	extern int splr(int);
273	extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
274	int old_pil;
275
276	old_pil = splr(new_pil);
277	if (!lock_try(lp)) {
278		lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
279	} else {
280		*old_pil_addr = (u_short)old_pil;
281		membar_enter();
282	}
283}
284
285#else	/* lint */
286
287	ENTRY(lock_set_spl)
288	rdpr	%pil, %o3			! %o3 = current pil
289	cmp	%o3, %o1			! is current pil high enough?
290	bl,a,pt %icc, 1f			! if not, write %pil in delay
291	wrpr	%g0, %o1, %pil
2921:
293	ldstub	[%o0], %o4			! try the lock
294	brnz,pn	%o4, 2f				! go to C for the miss case
295	membar	#LoadLoad
296.lock_set_spl_lockstat_patch_point:
297	retl
298	sth	%o3, [%o2]			! delay - save original pil
2992:
300	sethi	%hi(lock_set_spl_spin), %o5	! load up jmp to C
301	jmp	%o5 + %lo(lock_set_spl_spin)	! jmp to lock_set_spl_spin
302	nop					! delay: do nothing
303	SET_SIZE(lock_set_spl)
304
305#endif	/* lint */
306
307/*
308 * lock_clear_splx(lp, s)
309 */
310
311#if defined(lint)
312
313void
314lock_clear_splx(lock_t *lp, int s)
315{
316	extern void splx(int);
317
318	lock_clear(lp);
319	splx(s);
320}
321
322#else	/* lint */
323
324	ENTRY(lock_clear_splx)
325	ldn	[THREAD_REG + T_CPU], %o2	! get CPU pointer
326	membar	#LoadStore|#StoreStore
327	ld	[%o2 + CPU_BASE_SPL], %o2
328	clrb	[%o0]				! clear lock
329	cmp	%o2, %o1			! compare new to base
330	movl	%xcc, %o1, %o2			! use new pri if base is less
331.lock_clear_splx_lockstat_patch_point:
332	retl
333	wrpr	%g0, %o2, %pil
334	SET_SIZE(lock_clear_splx)
335
336#endif	/* lint */
337
338/*
339 * mutex_enter() and mutex_exit().
340 *
341 * These routines handle the simple cases of mutex_enter() (adaptive
342 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
343 * If anything complicated is going on we punt to mutex_vector_enter().
344 *
345 * mutex_tryenter() is similar to mutex_enter() but returns zero if
346 * the lock cannot be acquired, nonzero on success.
347 *
348 * If mutex_exit() gets preempted in the window between checking waiters
349 * and clearing the lock, we can miss wakeups.  Disabling preemption
350 * in the mutex code is prohibitively expensive, so instead we detect
351 * mutex preemption by examining the trapped PC in the interrupt path.
352 * If we interrupt a thread in mutex_exit() that has not yet cleared
353 * the lock, pil_interrupt() resets its PC back to the beginning of
354 * mutex_exit() so it will check again for waiters when it resumes.
355 *
356 * The lockstat code below is activated when the lockstat driver
357 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
358 * Note that we don't need to test lockstat_event_mask here -- we won't
359 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
360 */
361
362#if defined (lint)
363
364/* ARGSUSED */
365void
366mutex_enter(kmutex_t *lp)
367{}
368
369/* ARGSUSED */
370int
371mutex_tryenter(kmutex_t *lp)
372{ return (0); }
373
374/* ARGSUSED */
375void
376mutex_exit(kmutex_t *lp)
377{}
378
379/* ARGSUSED */
380void *
381mutex_owner_running(mutex_impl_t *lp)
382{ return (NULL); }
383
384#else
385	.align	32
386	ENTRY(mutex_enter)
387	mov	THREAD_REG, %o1
388	casx	[%o0], %g0, %o1			! try to acquire as adaptive
389	brnz,pn	%o1, 1f				! locked or wrong type
390	membar	#LoadLoad
391.mutex_enter_lockstat_patch_point:
392	retl
393	nop
3941:
395	sethi	%hi(mutex_vector_enter), %o2	! load up for jump to C
396	jmp	%o2 + %lo(mutex_vector_enter)
397	nop
398	SET_SIZE(mutex_enter)
399
400	ENTRY(mutex_tryenter)
401	mov	THREAD_REG, %o1
402	casx	[%o0], %g0, %o1			! try to acquire as adaptive
403	brnz,pn	%o1, 1f				! locked or wrong type continue
404	membar	#LoadLoad
405.mutex_tryenter_lockstat_patch_point:
406	retl
407	or	%o0, 1, %o0			! ensure lo32 != 0
4081:
409	sethi	%hi(mutex_vector_tryenter), %o2		! hi bits
410	jmp	%o2 + %lo(mutex_vector_tryenter)	! go to C
411	nop
412	SET_SIZE(mutex_tryenter)
413
414	ENTRY(mutex_adaptive_tryenter)
415	mov	THREAD_REG, %o1
416	casx	[%o0], %g0, %o1			! try to acquire as adaptive
417	brnz,pn	%o1, 0f				! locked or wrong type
418	membar	#LoadLoad
419	retl
420	or	%o0, 1, %o0			! ensure lo32 != 0
4210:
422	retl
423	mov	%g0, %o0
424	SET_SIZE(mutex_adaptive_tryenter)
425
426	! these need to be together and cache aligned for performance.
427	.align 64
428	.global	mutex_exit_critical_size
429	.global	mutex_exit_critical_start
430	.global mutex_owner_running_critical_size
431	.global mutex_owner_running_critical_start
432
433mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
434
435	.align	32
436
437	ENTRY(mutex_exit)
438mutex_exit_critical_start:		! If we are interrupted, restart here
439	ldn	[%o0], %o1		! get the owner field
440	membar	#LoadStore|#StoreStore
441	cmp	THREAD_REG, %o1		! do we own lock with no waiters?
442	be,a,pt	%ncc, 1f		! if so, drive on ...
443	stn	%g0, [%o0]		! delay: clear lock if we owned it
444.mutex_exit_critical_end:		! for pil_interrupt() hook
445	ba,a,pt	%xcc, mutex_vector_exit	! go to C for the hard cases
4461:
447.mutex_exit_lockstat_patch_point:
448	retl
449	nop
450	SET_SIZE(mutex_exit)
451
452mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
453
454	.align  32
455
456	ENTRY(mutex_owner_running)
457mutex_owner_running_critical_start:	! If interrupted restart here
458	ldn	[%o0], %o1		! get the owner field
459	and	%o1, MUTEX_THREAD, %o1	! remove the waiters bit if any
460	brz,pn	%o1, 1f			! if so, drive on ...
461	nop
462	ldn	[%o1+T_CPU], %o2	! get owner->t_cpu
463	ldn	[%o2+CPU_THREAD], %o3	! get owner->t_cpu->cpu_thread
464.mutex_owner_running_critical_end:	! for pil_interrupt() hook
465	cmp	%o1, %o3		! owner == running thread?
466	be,a,pt	%xcc, 2f		! yes, go return cpu
467	nop
4681:
469	retl
470	mov	%g0, %o0		! return 0 (owner not running)
4712:
472	retl
473	mov	%o2, %o0		! owner running, return cpu
474	SET_SIZE(mutex_owner_running)
475
476#endif	/* lint */
477
478/*
479 * rw_enter() and rw_exit().
480 *
481 * These routines handle the simple cases of rw_enter (write-locking an unheld
482 * lock or read-locking a lock that's neither write-locked nor write-wanted)
483 * and rw_exit (no waiters or not the last reader).  If anything complicated
484 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
485 */
486#if defined(lint)
487
488/* ARGSUSED */
489void
490rw_enter(krwlock_t *lp, krw_t rw)
491{}
492
493/* ARGSUSED */
494void
495rw_exit(krwlock_t *lp)
496{}
497
498#else
499
500	.align	16
501	ENTRY(rw_enter)
502	cmp	%o1, RW_WRITER			! entering as writer?
503	be,a,pn	%icc, 2f			! if so, go do it ...
504	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
505	ld	[THREAD_REG + T_KPRI_REQ], %o3	! begin THREAD_KPRI_REQUEST()
506	ldn	[%o0], %o4			! %o4 = old lock value
507	inc	%o3				! bump kpri
508	st	%o3, [THREAD_REG + T_KPRI_REQ]	! store new kpri
5091:
510	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
511	bz,pt	%xcc, 3f	 		! if so, prepare to block
512	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
513	sethi	%hi(rw_enter_sleep), %o2	! load up jump
514	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
515	nop					! delay: do nothing
5163:
517	casx	[%o0], %o4, %o5			! try to grab read lock
518	cmp	%o4, %o5			! did we get it?
519#ifdef sun4v
520	be,a,pt %xcc, 0f
521	membar  #LoadLoad
522	sethi	%hi(rw_enter_sleep), %o2	! load up jump
523	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
524	nop					! delay: do nothing
5250:
526#else /* sun4v */
527	bne,pn	%xcc, 1b			! if not, try again
528	mov	%o5, %o4			! delay: %o4 = old lock value
529	membar	#LoadLoad
530#endif /* sun4v */
531.rw_read_enter_lockstat_patch_point:
532	retl
533	nop
5342:
535	casx	[%o0], %g0, %o5			! try to grab write lock
536	brz,pt %o5, 4f				! branch around if we got it
537	membar	#LoadLoad			! done regardless of where we go
538	sethi	%hi(rw_enter_sleep), %o2
539	jmp	%o2 + %lo(rw_enter_sleep)	! jump to rw_enter_sleep if not
540	nop					! delay: do nothing
5414:
542.rw_write_enter_lockstat_patch_point:
543	retl
544	nop
545	SET_SIZE(rw_enter)
546
547	.align	16
548	ENTRY(rw_exit)
549	ldn	[%o0], %o4			! %o4 = old lock value
550	membar	#LoadStore|#StoreStore		! membar_exit()
551	subcc	%o4, RW_READ_LOCK, %o5		! %o5 = new lock value if reader
552	bnz,pn	%xcc, 2f			! single reader, no waiters?
553	clr	%o1
5541:
555	ld	[THREAD_REG + T_KPRI_REQ], %g1	! begin THREAD_KPRI_RELEASE()
556	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
557	casx	[%o0], %o4, %o5			! try to drop lock
558	cmp	%o4, %o5			! did we succeed?
559	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
560	dec	%g1				! delay: drop kpri
561.rw_read_exit_lockstat_patch_point:
562	retl
563	st	%g1, [THREAD_REG + T_KPRI_REQ]	! delay: store new kpri
5642:
565	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
566	bnz,a,pt %xcc, 3f
567	or	THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
568	cmp	%o5, RW_READ_LOCK		! would lock still be held?
569	bge,pt	%xcc, 1b			! if so, go ahead and drop it
570	nop
571	ba,pt	%xcc, rw_exit_wakeup		! otherwise, wake waiters
572	nop
5733:
574	casx	[%o0], %o4, %o1			! try to drop write lock
575	cmp	%o4, %o1			! did we succeed?
576	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
577	nop
578.rw_write_exit_lockstat_patch_point:
579	retl
580	nop
581	SET_SIZE(rw_exit)
582
583#endif
584
585#if defined(lint)
586
587void
588lockstat_hot_patch(void)
589{}
590
591#else
592
593#define	RETL			0x81c3e008
594#define	NOP			0x01000000
595#define BA			0x10800000
596
597#define	DISP22			((1 << 22) - 1)
598#define	ANNUL			0x20000000
599
600#define	HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)		\
601	ba	1f;							\
602	rd	%pc, %o0;						\
603	save	%sp, -SA(MINFRAME), %sp;				\
604	set	lockstat_probemap, %l1;					\
605	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
606	brz,pn	%o0, 0f;						\
607	ldub	[THREAD_REG + T_LOCKSTAT], %l0;				\
608	add	%l0, 1, %l2;						\
609	stub	%l2, [THREAD_REG + T_LOCKSTAT];				\
610	set	lockstat_probe, %g1;					\
611	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
612	brz,a,pn %o0, 0f;						\
613	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
614	ldn	[%g1], %g2;						\
615	mov	rs, %o2;						\
616	jmpl	%g2, %o7;						\
617	mov	%i0, %o1;						\
618	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
6190:	ret;								\
620	restore	%g0, 1, %o0;	/* for mutex_tryenter / lock_try */	\
6211:	set	addr, %o1;						\
622	sub	%o0, %o1, %o0;						\
623	srl	%o0, 2, %o0;						\
624	inc	%o0;							\
625	set	DISP22, %o1;						\
626	and	%o1, %o0, %o0;						\
627	set	BA, %o1;						\
628	or	%o1, %o0, %o0;						\
629	sethi	%hi(annul), %o2;					\
630	add	%o0, %o2, %o2;						\
631	set	addr, %o0;						\
632	set	normal_instr, %o1;					\
633	ld	[%i0 + (event * DTRACE_IDSIZE)], %o3;			\
634	tst	%o3;							\
635	movnz	%icc, %o2, %o1;						\
636	call	hot_patch_kernel_text;					\
637	mov	4, %o2;							\
638	membar	#Sync
639
640#define	HOT_PATCH(addr, event, normal_instr)	\
641	HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
642
643#define	HOT_PATCH_ARG(addr, event, normal_instr, arg)	\
644	HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
645
646#define HOT_PATCH_ANNULLED(addr, event, normal_instr)	\
647	HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
648
649	ENTRY(lockstat_hot_patch)
650	save	%sp, -SA(MINFRAME), %sp
651	set	lockstat_probemap, %i0
652	HOT_PATCH(.mutex_enter_lockstat_patch_point,
653		LS_MUTEX_ENTER_ACQUIRE, RETL)
654	HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
655		LS_MUTEX_TRYENTER_ACQUIRE, RETL)
656	HOT_PATCH(.mutex_exit_lockstat_patch_point,
657		LS_MUTEX_EXIT_RELEASE, RETL)
658	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
659		LS_RW_ENTER_ACQUIRE, RETL)
660	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
661		LS_RW_ENTER_ACQUIRE, RETL)
662	HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
663		LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
664	HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
665		LS_RW_EXIT_RELEASE, RETL, RW_READER)
666	HOT_PATCH(.lock_set_lockstat_patch_point,
667		LS_LOCK_SET_ACQUIRE, RETL)
668	HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
669		LS_LOCK_TRY_ACQUIRE, RETL)
670	HOT_PATCH(.lock_clear_lockstat_patch_point,
671		LS_LOCK_CLEAR_RELEASE, RETL)
672	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
673		LS_LOCK_SET_SPL_ACQUIRE, RETL)
674	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
675		LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
676	ret
677	restore
678	SET_SIZE(lockstat_hot_patch)
679
680#endif	/* lint */
681
682/*
683 * asm_mutex_spin_enter(mutex_t *)
684 *
685 * For use by assembly interrupt handler only.
686 * Does not change spl, since the interrupt handler is assumed to be
687 * running at high level already.
688 * Traps may be off, so cannot panic.
689 * Does not keep statistics on the lock.
690 *
691 * Entry:	%l6 - points to mutex
692 * 		%l7 - address of call (returns to %l7+8)
693 * Uses:	%l6, %l5
694 */
695#ifndef lint
696	.align 16
697	ENTRY_NP(asm_mutex_spin_enter)
698	ldstub	[%l6 + M_SPINLOCK], %l5	! try to set lock, get value in %l5
6991:
700	tst	%l5
701	bnz	3f			! lock already held - go spin
702	nop
7032:
704	jmp	%l7 + 8			! return
705	membar	#LoadLoad
706	!
707	! Spin on lock without using an atomic operation to prevent the caches
708	! from unnecessarily moving ownership of the line around.
709	!
7103:
711	ldub	[%l6 + M_SPINLOCK], %l5
7124:
713	tst	%l5
714	bz,a	1b			! lock appears to be free, try again
715	ldstub	[%l6 + M_SPINLOCK], %l5	! delay slot - try to set lock
716
717	sethi	%hi(panicstr) , %l5
718	ldn	[%l5 + %lo(panicstr)], %l5
719	tst 	%l5
720	bnz	2b			! after panic, feign success
721	nop
722	b	4b
723	ldub	[%l6 + M_SPINLOCK], %l5	! delay - reload lock
724	SET_SIZE(asm_mutex_spin_enter)
725#endif /* lint */
726
727/*
728 * asm_mutex_spin_exit(mutex_t *)
729 *
730 * For use by assembly interrupt handler only.
731 * Does not change spl, since the interrupt handler is assumed to be
732 * running at high level already.
733 *
734 * Entry:	%l6 - points to mutex
735 * 		%l7 - address of call (returns to %l7+8)
736 * Uses:	none
737 */
738#ifndef lint
739	ENTRY_NP(asm_mutex_spin_exit)
740	membar	#LoadStore|#StoreStore
741	jmp	%l7 + 8			! return
742	clrb	[%l6 + M_SPINLOCK]	! delay - clear lock
743	SET_SIZE(asm_mutex_spin_exit)
744#endif /* lint */
745
746/*
747 * thread_onproc()
748 * Set thread in onproc state for the specified CPU.
749 * Also set the thread lock pointer to the CPU's onproc lock.
750 * Since the new lock isn't held, the store ordering is important.
751 * If not done in assembler, the compiler could reorder the stores.
752 */
753#if defined(lint)
754
755void
756thread_onproc(kthread_id_t t, cpu_t *cp)
757{
758	t->t_state = TS_ONPROC;
759	t->t_lockp = &cp->cpu_thread_lock;
760}
761
762#else	/* lint */
763
764	ENTRY(thread_onproc)
765	set	TS_ONPROC, %o2		! TS_ONPROC state
766	st	%o2, [%o0 + T_STATE]	! store state
767	add	%o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
768	retl				! return
769	stn	%o3, [%o0 + T_LOCKP]	! delay - store new lock pointer
770	SET_SIZE(thread_onproc)
771
772#endif	/* lint */
773
774/* delay function used in some mutex code - just do 3 nop cas ops */
775#if defined(lint)
776
777/* ARGSUSED */
778void
779cas_delay(void *addr)
780{}
781#else	/* lint */
782	ENTRY(cas_delay)
783	casx [%o0], %g0, %g0
784	casx [%o0], %g0, %g0
785	retl
786	casx [%o0], %g0, %g0
787	SET_SIZE(cas_delay)
788#endif	/* lint */
789
790#if defined(lint)
791
792/*
793 * alternative delay function for some niagara processors.   The rd
794 * instruction uses less resources than casx on those cpus.
795 */
796/* ARGSUSED */
797void
798rdccr_delay(void)
799{}
800#else	/* lint */
801	ENTRY(rdccr_delay)
802	rd	%ccr, %g0
803	rd	%ccr, %g0
804	retl
805	rd	%ccr, %g0
806	SET_SIZE(rdccr_delay)
807#endif	/* lint */
808
809/*
810 * mutex_delay_default(void)
811 * Spins for approx a few hundred processor cycles and returns to caller.
812 */
813#if defined(lint)
814
815void
816mutex_delay_default(void)
817{}
818
819#else	/* lint */
820
821	ENTRY(mutex_delay_default)
822	mov	72,%o0
8231:	brgz	%o0, 1b
824	dec	%o0
825	retl
826	nop
827	SET_SIZE(mutex_delay_default)
828
829#endif  /* lint */
830