1/*
2 * Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#ifndef _OPENSOLARIS_SYS_ATOMIC_H_
30#define	_OPENSOLARIS_SYS_ATOMIC_H_
31
32#ifndef _STANDALONE
33
34#include <sys/types.h>
35#include <machine/atomic.h>
36
37#define	atomic_sub_64	atomic_subtract_64
38
39#if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE))
40#define	I386_HAVE_ATOMIC64
41#endif
42
43#if defined(__i386__) || defined(__amd64__) || defined(__arm__)
44/* No spurious failures from fcmpset. */
45#define	STRONG_FCMPSET
46#endif
47
48#if !defined(__LP64__) && !defined(__mips_n32) && \
49	!defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \
50	!defined(HAS_EMULATED_ATOMIC64)
51extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
52extern void atomic_dec_64(volatile uint64_t *target);
53extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value);
54extern uint64_t atomic_load_64(volatile uint64_t *a);
55extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
56extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
57    uint64_t newval);
58#endif
59
60#define	membar_consumer()		atomic_thread_fence_acq()
61#define	membar_producer()		atomic_thread_fence_rel()
62#define	membar_sync()			atomic_thread_fence_seq_cst()
63
64static __inline uint32_t
65atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
66{
67	return (atomic_fetchadd_32(target, delta) + delta);
68}
69
70static __inline uint_t
71atomic_add_int_nv(volatile uint_t *target, int delta)
72{
73	return (atomic_add_32_nv(target, delta));
74}
75
76static __inline void
77atomic_inc_32(volatile uint32_t *target)
78{
79	atomic_add_32(target, 1);
80}
81
82static __inline uint32_t
83atomic_inc_32_nv(volatile uint32_t *target)
84{
85	return (atomic_add_32_nv(target, 1));
86}
87
88static __inline void
89atomic_dec_32(volatile uint32_t *target)
90{
91	atomic_subtract_32(target, 1);
92}
93
94static __inline uint32_t
95atomic_dec_32_nv(volatile uint32_t *target)
96{
97	return (atomic_add_32_nv(target, -1));
98}
99
100#ifndef __sparc64__
101static inline uint32_t
102atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
103{
104#ifdef STRONG_FCMPSET
105	(void) atomic_fcmpset_32(target, &cmp, newval);
106#else
107	uint32_t expected = cmp;
108
109	do {
110		if (atomic_fcmpset_32(target, &cmp, newval))
111			break;
112	} while (cmp == expected);
113#endif
114	return (cmp);
115}
116#endif
117
118#if defined(__LP64__) || defined(__mips_n32) || \
119	defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \
120	defined(HAS_EMULATED_ATOMIC64)
121static __inline void
122atomic_dec_64(volatile uint64_t *target)
123{
124	atomic_subtract_64(target, 1);
125}
126
127static inline uint64_t
128atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
129{
130	return (atomic_fetchadd_64(target, delta) + delta);
131}
132
133#ifndef __sparc64__
134static inline uint64_t
135atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
136{
137#ifdef STRONG_FCMPSET
138	(void) atomic_fcmpset_64(target, &cmp, newval);
139#else
140	uint64_t expected = cmp;
141
142	do {
143		if (atomic_fcmpset_64(target, &cmp, newval))
144			break;
145	} while (cmp == expected);
146#endif
147	return (cmp);
148}
149#endif
150#endif
151
152static __inline void
153atomic_inc_64(volatile uint64_t *target)
154{
155	atomic_add_64(target, 1);
156}
157
158static __inline uint64_t
159atomic_inc_64_nv(volatile uint64_t *target)
160{
161	return (atomic_add_64_nv(target, 1));
162}
163
164static __inline uint64_t
165atomic_dec_64_nv(volatile uint64_t *target)
166{
167	return (atomic_add_64_nv(target, -1));
168}
169
170#ifdef __LP64__
171static __inline void *
172atomic_cas_ptr(volatile void *target, void *cmp,  void *newval)
173{
174	return ((void *)atomic_cas_64((volatile uint64_t *)target,
175	    (uint64_t)cmp, (uint64_t)newval));
176}
177#else
178static __inline void *
179atomic_cas_ptr(volatile void *target, void *cmp,  void *newval)
180{
181	return ((void *)atomic_cas_32((volatile uint32_t *)target,
182	    (uint32_t)cmp, (uint32_t)newval));
183}
184#endif	/* __LP64__ */
185
186#else /* _STANDALONE */
187/*
188 * sometimes atomic_add_64 is defined, sometimes not, but the
189 * following is always right for the boot loader.
190 */
191#undef atomic_add_64
192#define	atomic_add_64(ptr, val) *(ptr) += val
193#undef atomic_sub_64
194#define	atomic_sub_64(ptr, val) *(ptr) -= val
195#endif /* !_STANDALONE */
196
197#endif	/* !_OPENSOLARIS_SYS_ATOMIC_H_ */
198