1/*-
2 * See the file LICENSE for redistribution information.
3 *
4 * Copyright (c) 1996,2008 Oracle.  All rights reserved.
5 *
6 * $Id: mutex_int.h,v 12.41 2008/01/08 20:58:18 bostic Exp $
7 */
8
9#ifndef _DB_MUTEX_INT_H_
10#define	_DB_MUTEX_INT_H_
11
12#if defined(__cplusplus)
13extern "C" {
14#endif
15
16/*********************************************************************
17 * POSIX.1 pthreads interface.
18 *********************************************************************/
19#if defined(HAVE_MUTEX_PTHREADS)
20#include <pthread.h>
21
22#define	MUTEX_FIELDS							\
23	pthread_mutex_t mutex;		/* Mutex. */			\
24	pthread_cond_t  cond;		/* Condition variable. */
25#endif
26
27#ifdef HAVE_MUTEX_UI_THREADS
28#include <thread.h>
29#endif
30
31/*********************************************************************
32 * Solaris lwp threads interface.
33 *
34 * !!!
35 * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
36 * which are available), for two reasons.  First, the Solaris C library
37 * includes versions of the both UI and POSIX thread mutex interfaces, but
38 * they are broken in that they don't support inter-process locking, and
39 * there's no way to detect it, e.g., calls to configure the mutexes for
40 * inter-process locking succeed without error.  So, we use LWP mutexes so
41 * that we don't fail in fairly undetectable ways because the application
42 * wasn't linked with the appropriate threads library.  Second, there were
43 * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
44 * before loading the libthread/libpthread threads libraries (e.g., by using
45 * dlopen to load the DB library), the pwrite64 interface would be translated
46 * into a call to pwrite and DB would drop core.
47 *********************************************************************/
48#ifdef HAVE_MUTEX_SOLARIS_LWP
49/*
50 * XXX
51 * Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
52 * Solaris manual page as the correct include to use, it causes the Solaris
53 * compiler on SunOS 2.6 to fail.
54 */
55#include <synch.h>
56
57#define	MUTEX_FIELDS							\
58	lwp_mutex_t mutex;		/* Mutex. */			\
59	lwp_cond_t cond;		/* Condition variable. */
60#endif
61
62/*********************************************************************
63 * Solaris/Unixware threads interface.
64 *********************************************************************/
65#ifdef HAVE_MUTEX_UI_THREADS
66#include <thread.h>
67#include <synch.h>
68
69#define	MUTEX_FIELDS							\
70	mutex_t mutex;			/* Mutex. */			\
71	cond_t  cond;			/* Condition variable. */
72#endif
73
74/*********************************************************************
75 * AIX C library functions.
76 *********************************************************************/
77#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
78#include <sys/atomic_op.h>
79typedef int tsl_t;
80
81#ifdef LOAD_ACTUAL_MUTEX_CODE
82#define	MUTEX_INIT(x)	0
83#define	MUTEX_SET(x)	(!_check_lock(x, 0, 1))
84#define	MUTEX_UNSET(x)	_clear_lock(x, 0)
85#endif
86#endif
87
88/*********************************************************************
89 * Apple/Darwin library functions.
90 *********************************************************************/
91#ifdef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY
92typedef u_int32_t tsl_t;
93
94#ifdef LOAD_ACTUAL_MUTEX_CODE
95extern int _spin_lock_try(tsl_t *);
96extern void _spin_unlock(tsl_t *);
97#define	MUTEX_SET(tsl)          _spin_lock_try(tsl)
98#define	MUTEX_UNSET(tsl)        _spin_unlock(tsl)
99#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
100#endif
101#endif
102
103/*********************************************************************
104 * General C library functions (msemaphore).
105 *
106 * !!!
107 * Check for HPPA as a special case, because it requires unusual alignment,
108 * and doesn't support semaphores in malloc(3) or shmget(2) memory.
109 *
110 * !!!
111 * Do not remove the MSEM_IF_NOWAIT flag.  The problem is that if a single
112 * process makes two msem_lock() calls in a row, the second one returns an
113 * error.  We depend on the fact that we can lock against ourselves in the
114 * locking subsystem, where we set up a mutex so that we can block ourselves.
115 * Tested on OSF1 v4.0.
116 *********************************************************************/
117#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
118#define	MUTEX_ALIGN	16
119#endif
120
121#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
122#include <sys/mman.h>
123typedef msemaphore tsl_t;
124
125#ifdef LOAD_ACTUAL_MUTEX_CODE
126#define	MUTEX_INIT(x)	(msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
127#define	MUTEX_SET(x)	(!msem_lock(x, MSEM_IF_NOWAIT))
128#define	MUTEX_UNSET(x)	msem_unlock(x, 0)
129#endif
130#endif
131
132/*********************************************************************
133 * Plan 9 library functions.
134 *********************************************************************/
135#ifdef HAVE_MUTEX_PLAN9
136typedef Lock tsl_t;
137
138#define	MUTEX_INIT(x)	(memset(x, 0, sizeof(Lock)), 0)
139#define	MUTEX_SET(x)	canlock(x)
140#define	MUTEX_UNSET(x)	unlock(x)
141#endif
142
143/*********************************************************************
144 * Reliant UNIX C library functions.
145 *********************************************************************/
146#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
147#include <ulocks.h>
148typedef spinlock_t tsl_t;
149
150#ifdef LOAD_ACTUAL_MUTEX_CODE
151#define	MUTEX_INIT(x)	(initspin(x, 1), 0)
152#define	MUTEX_SET(x)	(cspinlock(x) == 0)
153#define	MUTEX_UNSET(x)	spinunlock(x)
154#endif
155#endif
156
157/*********************************************************************
158 * General C library functions (POSIX 1003.1 sema_XXX).
159 *
160 * !!!
161 * Never selected by autoconfig in this release (semaphore calls are known
162 * to not work in Solaris 5.5).
163 *********************************************************************/
164#ifdef HAVE_MUTEX_SEMA_INIT
165#include <synch.h>
166typedef sema_t tsl_t;
167
168#ifdef LOAD_ACTUAL_MUTEX_CODE
169#define	MUTEX_DESTROY(x) sema_destroy(x)
170#define	MUTEX_INIT(x)	 (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
171#define	MUTEX_SET(x)	 (sema_wait(x) == 0)
172#define	MUTEX_UNSET(x)	 sema_post(x)
173#endif
174#endif
175
176/*********************************************************************
177 * SGI C library functions.
178 *********************************************************************/
179#ifdef HAVE_MUTEX_SGI_INIT_LOCK
180#include <abi_mutex.h>
181typedef abilock_t tsl_t;
182
183#ifdef LOAD_ACTUAL_MUTEX_CODE
184#define	MUTEX_INIT(x)	(init_lock(x) != 0)
185#define	MUTEX_SET(x)	(!acquire_lock(x))
186#define	MUTEX_UNSET(x)	release_lock(x)
187#endif
188#endif
189
190/*********************************************************************
191 * Solaris C library functions.
192 *
193 * !!!
194 * These are undocumented functions, but they're the only ones that work
195 * correctly as far as we know.
196 *********************************************************************/
197#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
198#include <sys/atomic.h>
199#include <sys/machlock.h>
200typedef lock_t tsl_t;
201
202/*
203 * The functions are declared in <sys/machlock.h>, but under #ifdef KERNEL.
204 * Re-declare them here to avoid warnings.
205 */
206extern  int _lock_try(lock_t *);
207extern void _lock_clear(lock_t *);
208
209#ifdef LOAD_ACTUAL_MUTEX_CODE
210#define	MUTEX_INIT(x)	0
211#define	MUTEX_SET(x)	_lock_try(x)
212#define	MUTEX_UNSET(x)	_lock_clear(x)
213#define	MUTEX_MEMBAR(x)	membar_enter();
214#endif
215#endif
216
217/*********************************************************************
218 * VMS.
219 *********************************************************************/
220#ifdef HAVE_MUTEX_VMS
221#include <sys/mman.h>
222#include <builtins.h>
223typedef volatile unsigned char tsl_t;
224
225#ifdef LOAD_ACTUAL_MUTEX_CODE
226#ifdef __ALPHA
227#define	MUTEX_SET(tsl)		(!__TESTBITSSI(tsl, 0))
228#else /* __VAX */
229#define	MUTEX_SET(tsl)		(!(int)_BBSSI(0, tsl))
230#endif
231#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
232#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
233#endif
234#endif
235
236/*********************************************************************
237 * VxWorks
238 * Use basic binary semaphores in VxWorks, as we currently do not need
239 * any special features.  We do need the ability to single-thread the
240 * entire system, however, because VxWorks doesn't support the open(2)
241 * flag O_EXCL, the mechanism we normally use to single thread access
242 * when we're first looking for a DB environment.
243 *********************************************************************/
244#ifdef HAVE_MUTEX_VXWORKS
245#include "taskLib.h"
246typedef SEM_ID tsl_t;
247
248#ifdef LOAD_ACTUAL_MUTEX_CODE
249#define	MUTEX_SET(tsl)		(semTake((*tsl), WAIT_FOREVER) == OK)
250#define	MUTEX_UNSET(tsl)	(semGive((*tsl)))
251#define	MUTEX_INIT(tsl)							\
252	((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
253#define	MUTEX_DESTROY(tsl)	semDelete(*tsl)
254#endif
255#endif
256
257/*********************************************************************
258 * Win16
259 *
260 * Win16 spinlocks are simple because we cannot possibly be preempted.
261 *
262 * !!!
263 * We should simplify this by always returning a no-need-to-lock lock
264 * when we initialize the mutex.
265 *********************************************************************/
266#ifdef HAVE_MUTEX_WIN16
267typedef unsigned int tsl_t;
268
269#ifdef LOAD_ACTUAL_MUTEX_CODE
270#define	MUTEX_INIT(x)		0
271#define	MUTEX_SET(tsl)		(*(tsl) = 1)
272#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
273#endif
274#endif
275
276/*********************************************************************
277 * Win32
278 *********************************************************************/
279#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)
280#define	MUTEX_FIELDS							\
281	LONG volatile tas;						\
282	LONG nwaiters;							\
283	u_int32_t id;	/* ID used for creating events */		\
284
285#if defined(LOAD_ACTUAL_MUTEX_CODE)
286#define	MUTEX_SET(tsl)		(!InterlockedExchange((PLONG)tsl, 1))
287#define	MUTEX_UNSET(tsl)	InterlockedExchange((PLONG)tsl, 0)
288#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
289
290/*
291 * From Intel's performance tuning documentation (and see SR #6975):
292 * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf
293 *
294 * "For this reason, it is highly recommended that you insert the PAUSE
295 * instruction into all spin-wait code immediately. Using the PAUSE
296 * instruction does not affect the correctness of programs on existing
297 * platforms, and it improves performance on Pentium 4 processor platforms."
298 */
299#ifdef HAVE_MUTEX_WIN32
300#if !defined(_WIN64) && !defined(DB_WINCE)
301#define	MUTEX_PAUSE		{__asm{_emit 0xf3}; __asm{_emit 0x90}}
302#endif
303#endif
304#ifdef HAVE_MUTEX_WIN32_GCC
305#define	MUTEX_PAUSE		asm volatile ("rep; nop" : : );
306#endif
307#endif
308#endif
309
310/*********************************************************************
311 * 68K/gcc assembly.
312 *********************************************************************/
313#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
314typedef unsigned char tsl_t;
315
316#ifdef LOAD_ACTUAL_MUTEX_CODE
317/* gcc/68K: 0 is clear, 1 is set. */
318#define	MUTEX_SET(tsl) ({						\
319	register tsl_t *__l = (tsl);					\
320	int __r;							\
321	    asm volatile("tas  %1; \n					\
322			  seq  %0"					\
323		: "=dm" (__r), "=m" (*__l)				\
324		: "1" (*__l)						\
325		);							\
326	__r & 1;							\
327})
328
329#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
330#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
331#endif
332#endif
333
334/*********************************************************************
335 * ALPHA/gcc assembly.
336 *********************************************************************/
337#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
338typedef u_int32_t tsl_t;
339
340#define	MUTEX_ALIGN	4
341
342#ifdef LOAD_ACTUAL_MUTEX_CODE
343/*
344 * For gcc/alpha.  Should return 0 if could not acquire the lock, 1 if
345 * lock was acquired properly.
346 */
347static inline int
348MUTEX_SET(tsl_t *tsl) {
349	register tsl_t *__l = tsl;
350	register tsl_t __r;
351	asm volatile(
352		"1:	ldl_l	%0,%2\n"
353		"	blbs	%0,2f\n"
354		"	or	$31,1,%0\n"
355		"	stl_c	%0,%1\n"
356		"	beq	%0,3f\n"
357		"	mb\n"
358		"	br	3f\n"
359		"2:	xor	%0,%0\n"
360		"3:"
361		: "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
362	return __r;
363}
364
365/*
366 * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
367 * might be necessary before unlocking
368 */
369static inline int
370MUTEX_UNSET(tsl_t *tsl) {
371	asm volatile("	mb\n");
372	return *tsl = 0;
373}
374
375#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
376#endif
377#endif
378
379/*********************************************************************
380 * Tru64/cc assembly.
381 *********************************************************************/
382#ifdef HAVE_MUTEX_TRU64_CC_ASSEMBLY
383typedef volatile u_int32_t tsl_t;
384
385#define	MUTEX_ALIGN	4
386
387#ifdef LOAD_ACTUAL_MUTEX_CODE
388#include <alpha/builtins.h>
389#define	MUTEX_SET(tsl)		(__LOCK_LONG_RETRY((tsl), 1) != 0)
390#define	MUTEX_UNSET(tsl)	(__UNLOCK_LONG(tsl))
391
392#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)
393#endif
394#endif
395
396/*********************************************************************
397 * ARM/gcc assembly.
398 *********************************************************************/
399#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
400typedef unsigned char tsl_t;
401
402#ifdef LOAD_ACTUAL_MUTEX_CODE
403/* gcc/arm: 0 is clear, 1 is set. */
404#define	MUTEX_SET(tsl) ({						\
405	int __r;							\
406	asm volatile(							\
407		"swpb	%0, %1, [%2]\n\t"				\
408		"eor	%0, %0, #1\n\t"					\
409	    : "=&r" (__r)						\
410	    : "r" (1), "r" (tsl)					\
411	    );								\
412	__r & 1;							\
413})
414
415#define	MUTEX_UNSET(tsl)	(*(volatile tsl_t *)(tsl) = 0)
416#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
417#endif
418#endif
419
420/*********************************************************************
421 * HPPA/gcc assembly.
422 *********************************************************************/
423#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
424typedef u_int32_t tsl_t;
425
426#define	MUTEX_ALIGN	16
427
428#ifdef LOAD_ACTUAL_MUTEX_CODE
429/*
430 * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
431 * The 32-bit word used by that instruction must be 16-byte aligned.  We could
432 * use the "aligned" attribute in GCC but that doesn't work for stack variables.
433 */
434#define	MUTEX_SET(tsl) ({						\
435	register tsl_t *__l = (tsl);					\
436	int __r;							\
437	asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l));	\
438	__r & 1;							\
439})
440
441#define	MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = -1)
442#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)
443#endif
444#endif
445
446/*********************************************************************
447 * IA64/gcc assembly.
448 *********************************************************************/
449#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
450typedef volatile unsigned char tsl_t;
451
452#ifdef LOAD_ACTUAL_MUTEX_CODE
453/* gcc/ia64: 0 is clear, 1 is set. */
454#define	MUTEX_SET(tsl) ({						\
455	register tsl_t *__l = (tsl);					\
456	long __r;							\
457	asm volatile("xchg1 %0=%1,%2" :					\
458		     "=r"(__r), "+m"(*__l) : "r"(1));			\
459	__r ^ 1;							\
460})
461
462/*
463 * Store through a "volatile" pointer so we get a store with "release"
464 * semantics.
465 */
466#define	MUTEX_UNSET(tsl)	(*(tsl_t *)(tsl) = 0)
467#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
468#endif
469#endif
470
471/*********************************************************************
472 * PowerPC/gcc assembly.
473 *********************************************************************/
474#if defined(HAVE_MUTEX_PPC_GCC_ASSEMBLY)
475typedef u_int32_t tsl_t;
476
477#ifdef LOAD_ACTUAL_MUTEX_CODE
478/*
479 * The PowerPC does a sort of pseudo-atomic locking.  You set up a
480 * 'reservation' on a chunk of memory containing a mutex by loading the
481 * mutex value with LWARX.  If the mutex has an 'unlocked' (arbitrary)
482 * value, you then try storing into it with STWCX.  If no other process or
483 * thread broke your 'reservation' by modifying the memory containing the
484 * mutex, then the STCWX succeeds; otherwise it fails and you try to get
485 * a reservation again.
486 *
487 * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
488 * entire cache line, normally 32 bytes, aligned naturally.  If the mutex
489 * lives near data that gets changed a lot, there's a chance that you'll
490 * see more broken reservations than you might otherwise.  The only
491 * situation in which this might be a problem is if one processor is
492 * beating on a variable in the same cache block as the mutex while another
493 * processor tries to acquire the mutex.  That's bad news regardless
494 * because of the way it bashes caches, but if you can't guarantee that a
495 * mutex will reside in a relatively quiescent cache line, you might
496 * consider padding the mutex to force it to live in a cache line by
497 * itself.  No, you aren't guaranteed that cache lines are 32 bytes.  Some
498 * embedded processors use 16-byte cache lines, while some 64-bit
499 * processors use 128-bit cache lines.  But assuming a 32-byte cache line
500 * won't get you into trouble for now.
501 *
502 * If mutex locking is a bottleneck, then you can speed it up by adding a
503 * regular LWZ load before the LWARX load, so that you can test for the
504 * common case of a locked mutex without wasting cycles making a reservation.
505 *
506 * gcc/ppc: 0 is clear, 1 is set.
507 */
508static inline int
509MUTEX_SET(int *tsl)  {
510        int __r;
511        asm volatile (
512"0:                             \n\t"
513"       lwarx   %0,0,%1         \n\t"
514"       cmpwi   %0,0            \n\t"
515"       bne-    1f              \n\t"
516"       stwcx.  %1,0,%1         \n\t"
517"       isync                   \n\t"
518"       beq+    2f              \n\t"
519"       b       0b              \n\t"
520"1:                             \n\t"
521"       li      %1,0            \n\t"
522"2:                             \n\t"
523         : "=&r" (__r), "+r" (tsl)
524         :
525         : "cr0", "memory");
526         return (int)tsl;
527}
528
529static inline int
530MUTEX_UNSET(tsl_t *tsl) {
531         asm volatile("sync" : : : "memory");
532         return *tsl = 0;
533}
534#define	MUTEX_INIT(tsl)		MUTEX_UNSET(tsl)
535#endif
536#endif
537
538/*********************************************************************
539 * OS/390 C
540 *********************************************************************/
541#ifdef HAVE_MUTEX_S390_CC_ASSEMBLY
542typedef int tsl_t;
543
544#ifdef LOAD_ACTUAL_MUTEX_CODE
545/*
546 * cs() is declared in <stdlib.h> but is built in to the compiler.
547 * Must use LANGLVL(EXTENDED) to get its declaration.
548 */
549#define	MUTEX_SET(tsl)		(!cs(&zero, (tsl), 1))
550#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
551#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
552#endif
553#endif
554
555/*********************************************************************
556 * S/390 32-bit assembly.
557 *********************************************************************/
558#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
559typedef int tsl_t;
560
561#ifdef LOAD_ACTUAL_MUTEX_CODE
562/* gcc/S390: 0 is clear, 1 is set. */
563static inline int
564MUTEX_SET(tsl_t *tsl) {							\
565	register tsl_t *__l = (tsl);					\
566	int __r;							\
567  asm volatile(								\
568       "    la    1,%1\n"						\
569       "    lhi   0,1\n"						\
570       "    l     %0,%1\n"						\
571       "0:  cs    %0,0,0(1)\n"						\
572       "    jl    0b"							\
573       : "=&d" (__r), "+m" (*__l)					\
574       : : "0", "1", "cc");						\
575	return !__r;							\
576}
577
578#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
579#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
580#endif
581#endif
582
583/*********************************************************************
584 * SCO/cc assembly.
585 *********************************************************************/
586#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
587typedef unsigned char tsl_t;
588
589#ifdef LOAD_ACTUAL_MUTEX_CODE
590/*
591 * UnixWare has threads in libthread, but OpenServer doesn't (yet).
592 *
593 * cc/x86: 0 is clear, 1 is set.
594 */
595#if defined(__USLC__)
596asm int
597_tsl_set(void *tsl)
598{
599%mem tsl
600	movl	tsl, %ecx
601	movl	$1, %eax
602	lock
603	xchgb	(%ecx),%al
604	xorl	$1,%eax
605}
606#endif
607
608#define	MUTEX_SET(tsl)		_tsl_set(tsl)
609#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
610#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
611#endif
612#endif
613
614/*********************************************************************
615 * Sparc/gcc assembly.
616 *********************************************************************/
617#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
618typedef unsigned char tsl_t;
619
620#ifdef LOAD_ACTUAL_MUTEX_CODE
621/*
622 *
623 * The ldstub instruction takes the location specified by its first argument
624 * (a register containing a memory address) and loads its contents into its
625 * second argument (a register) and atomically sets the contents the location
626 * specified by its first argument to a byte of 1s.  (The value in the second
627 * argument is never read, but only overwritten.)
628 *
629 * The stbar is needed for v8, and is implemented as membar #sync on v9,
630 * so is functional there as well.  For v7, stbar may generate an illegal
631 * instruction and we have no way to tell what we're running on.  Some
632 * operating systems notice and skip this instruction in the fault handler.
633 *
634 * gcc/sparc: 0 is clear, 1 is set.
635 */
636#define	MUTEX_SET(tsl) ({						\
637	register tsl_t *__l = (tsl);					\
638	register tsl_t __r;						\
639	asm volatile							\
640	    ("ldstub [%1],%0; stbar"					\
641	    : "=r"( __r) : "r" (__l));					\
642	!__r;								\
643})
644
645#define	MUTEX_UNSET(tsl)	(*(tsl) = 0)
646#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
647#define	MUTEX_MEMBAR(x)          ({asm volatile("stbar");})
648#endif
649#endif
650
651/*********************************************************************
652 * UTS/cc assembly.
653 *********************************************************************/
654#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
655typedef int tsl_t;
656
657#ifdef LOAD_ACTUAL_MUTEX_CODE
658#define	MUTEX_INIT(x)	0
659#define	MUTEX_SET(x)	(!uts_lock(x, 1))
660#define	MUTEX_UNSET(x)	(*(x) = 0)
661#endif
662#endif
663
664/*********************************************************************
665 * MIPS/gcc assembly.
666 *********************************************************************/
667#ifdef HAVE_MUTEX_MIPS_GCC_ASSEMBLY
668typedef u_int32_t tsl_t;
669
670#define	MUTEX_ALIGN	4
671
672#ifdef LOAD_ACTUAL_MUTEX_CODE
673/*
674 * For gcc/MIPS.  Should return 0 if could not acquire the lock, 1 if
675 * lock was acquired properly.
676 */
677static inline int
678MUTEX_SET(tsl_t *tsl) {
679       register tsl_t *__l = tsl;
680       register tsl_t __r;
681       asm volatile(
682               "       .set push           \n"
683               "       .set mips2          \n"
684               "       .set noreorder      \n"
685               "       .set nomacro        \n"
686               "1:     ll      %0,%1       \n"
687               "       bne     %0,$0,1f    \n"
688               "       xori    %0,%0,1     \n"
689               "       sc      %0,%1       \n"
690               "       beql    %0,$0,1b    \n"
691               "       xori    %0,1        \n"
692               "1:     .set pop              "
693               : "=&r" (__r), "+R" (*__l));
694       return __r;
695}
696
697#define	MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = 0)
698#define	MUTEX_INIT(tsl)         (MUTEX_UNSET(tsl), 0)
699#endif
700#endif
701
702/*********************************************************************
703 * x86/gcc (32- and 64-bit) assembly.
704 *********************************************************************/
705#if defined(HAVE_MUTEX_X86_GCC_ASSEMBLY) || \
706    defined(HAVE_MUTEX_X86_64_GCC_ASSEMBLY)
707typedef unsigned char tsl_t;
708
709#ifdef LOAD_ACTUAL_MUTEX_CODE
710/* gcc/x86: 0 is clear, 1 is set. */
711#define	MUTEX_SET(tsl) ({						\
712	tsl_t __r;							\
713	asm volatile("movb $1, %b0\n\t"					\
714		"xchgb %b0,%1"						\
715	    : "=&q" (__r)						\
716	    : "m" (*(volatile tsl_t *)(tsl))				\
717	    : "memory", "cc");						\
718	!__r;								\
719})
720
721#define	MUTEX_UNSET(tsl)        (*(volatile tsl_t *)(tsl) = 0)
722#define	MUTEX_INIT(tsl)		(MUTEX_UNSET(tsl), 0)
723/*
724 * We need to pass a valid address to generate the memory barrier
725 * otherwise PURIFY will complain.  Use something referenced recently
726 * and initialized.
727 */
728#if defined(HAVE_MUTEX_X86_GCC_ASSEMBLY)
729#define	MUTEX_MEMBAR(addr)						\
730    ({ asm volatile ("lock; addl $0, %0" ::"m" (addr): "memory"); 1; })
731#else
732#define	MUTEX_MEMBAR(addr)						\
733    ({ asm volatile ("mfence" ::: "memory"); 1; })
734#endif
735
736/*
737 * From Intel's performance tuning documentation (and see SR #6975):
738 * ftp://download.intel.com/design/perftool/cbts/appnotes/sse2/w_spinlock.pdf
739 *
740 * "For this reason, it is highly recommended that you insert the PAUSE
741 * instruction into all spin-wait code immediately. Using the PAUSE
742 * instruction does not affect the correctness of programs on existing
743 * platforms, and it improves performance on Pentium 4 processor platforms."
744 */
745#define	MUTEX_PAUSE		asm volatile ("rep; nop" : : );
746#endif
747#endif
748
749/*
750 * Mutex alignment defaults to sizeof(unsigned int).
751 *
752 * !!!
753 * Various systems require different alignments for mutexes (the worst we've
754 * seen so far is 16-bytes on some HP architectures).  Malloc(3) is assumed
755 * to return reasonable alignment, all other mutex users must ensure proper
756 * alignment locally.
757 */
758#ifndef	MUTEX_ALIGN
759#define	MUTEX_ALIGN	sizeof(unsigned int)
760#endif
761
762/*
763 * Mutex destruction defaults to a no-op.
764 */
765#ifndef	MUTEX_DESTROY
766#define	MUTEX_DESTROY(x)
767#endif
768
769/*
770 * DB_MUTEXMGR --
771 *	The mutex manager encapsulates the mutex system.
772 */
773struct __db_mutexmgr {
774	/* These fields are never updated after creation, so not protected. */
775	DB_ENV	*dbenv;			/* Environment */
776	REGINFO	 reginfo;		/* Region information */
777
778	void	*mutex_array;		/* Base of the mutex array */
779};
780
781/* Macros to lock/unlock the mutex region as a whole. */
782#define	MUTEX_SYSTEM_LOCK(dbenv)					\
783	MUTEX_LOCK(dbenv, ((DB_MUTEXREGION *)				\
784	    (dbenv)->mutex_handle->reginfo.primary)->mtx_region)
785#define	MUTEX_SYSTEM_UNLOCK(dbenv)					\
786	MUTEX_UNLOCK(dbenv, ((DB_MUTEXREGION *)				\
787	    (dbenv)->mutex_handle->reginfo.primary)->mtx_region)
788
789/*
790 * DB_MUTEXREGION --
791 *	The primary mutex data structure in the shared memory region.
792 */
793typedef struct __db_mutexregion {
794	/* These fields are initialized at create time and never modified. */
795	roff_t		mutex_off_alloc;/* Offset of mutex array */
796	roff_t		mutex_off;	/* Adjusted offset of mutex array */
797	size_t		mutex_size;	/* Size of the aligned mutex */
798	roff_t		thread_off;	/* Offset of the thread area. */
799
800	db_mutex_t	mtx_region;	/* Region mutex. */
801
802	/* Protected using the region mutex. */
803	u_int32_t	mutex_next;	/* Next free mutex */
804
805	DB_MUTEX_STAT	stat;		/* Mutex statistics */
806} DB_MUTEXREGION;
807
808struct __db_mutex_t {			/* Mutex. */
809#ifdef MUTEX_FIELDS
810	MUTEX_FIELDS			/* Opaque thread mutex structures. */
811#endif
812#if defined(HAVE_MUTEX_HYBRID) ||					\
813    (!defined(MUTEX_FIELDS) && !defined(HAVE_MUTEX_FCNTL))
814	tsl_t		tas;		/* Test and set. */
815#endif
816#ifdef HAVE_MUTEX_HYBRID
817	volatile u_int32_t wait;	/* Count of waiters. */
818#endif
819	pid_t		pid;		/* Process owning mutex */
820	db_threadid_t	tid;		/* Thread owning mutex */
821
822	u_int32_t mutex_next_link;	/* Linked list of free mutexes. */
823
824#ifdef HAVE_STATISTICS
825	int	  alloc_id;		/* Allocation ID. */
826
827	u_int32_t mutex_set_wait;	/* Granted after wait. */
828	u_int32_t mutex_set_nowait;	/* Granted without waiting. */
829#endif
830
831	/*
832	 * A subset of the flag arguments for __mutex_alloc().
833	 *
834	 * Flags should be an unsigned integer even if it's not required by
835	 * the possible flags values, getting a single byte on some machines
836	 * is expensive, and the mutex structure is a MP hot spot.
837	 */
838	volatile u_int32_t flags;		/* MUTEX_XXX */
839};
840
841/* Macro to get a reference to a specific mutex. */
842#define	MUTEXP_SET(indx)						\
843	(DB_MUTEX *)							\
844	    ((u_int8_t *)mtxmgr->mutex_array + (indx) * mtxregion->mutex_size);
845
846#if defined(__cplusplus)
847}
848#endif
849#endif /* !_DB_MUTEX_INT_H_ */
850