Deleted Added
full compact
atomic.h (302408) atomic.h (315371)
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
27 * $FreeBSD: stable/11/sys/mips/include/atomic.h 285283 2015-07-08 18:12:24Z kib $
27 * $FreeBSD: stable/11/sys/mips/include/atomic.h 315371 2017-03-16 06:00:27Z mjg $
28 */
29
30#ifndef _MACHINE_ATOMIC_H_
31#define _MACHINE_ATOMIC_H_
32
33#ifndef _SYS_CDEFS_H_
34#error this file needs sys/cdefs.h as a prerequisite
35#endif
36
37/*
38 * Note: All the 64-bit atomic operations are only atomic when running
39 * in 64-bit mode. It is assumed that code compiled for n32 and n64
40 * fits into this definition and no further safeties are needed.
41 *
42 * It is also assumed that the add, subtract and other arithmetic is
43 * done on numbers not pointers. The special rules for n32 pointers
44 * do not have atomic operations defined for them, but generally shouldn't
45 * need atomic operations.
46 */
47#ifndef __MIPS_PLATFORM_SYNC_NOPS
48#define __MIPS_PLATFORM_SYNC_NOPS ""
49#endif
50
51static __inline void
52mips_sync(void)
53{
54 __asm __volatile (".set noreorder\n"
55 "\tsync\n"
56 __MIPS_PLATFORM_SYNC_NOPS
57 ".set reorder\n"
58 : : : "memory");
59}
60
61#define mb() mips_sync()
62#define wmb() mips_sync()
63#define rmb() mips_sync()
64
65/*
66 * Various simple arithmetic on memory which is atomic in the presence
67 * of interrupts and SMP safe.
68 */
69
70void atomic_set_8(__volatile uint8_t *, uint8_t);
71void atomic_clear_8(__volatile uint8_t *, uint8_t);
72void atomic_add_8(__volatile uint8_t *, uint8_t);
73void atomic_subtract_8(__volatile uint8_t *, uint8_t);
74
75void atomic_set_16(__volatile uint16_t *, uint16_t);
76void atomic_clear_16(__volatile uint16_t *, uint16_t);
77void atomic_add_16(__volatile uint16_t *, uint16_t);
78void atomic_subtract_16(__volatile uint16_t *, uint16_t);
79
80static __inline void
81atomic_set_32(__volatile uint32_t *p, uint32_t v)
82{
83 uint32_t temp;
84
85 __asm __volatile (
86 "1:\tll %0, %3\n\t" /* load old value */
87 "or %0, %2, %0\n\t" /* calculate new value */
88 "sc %0, %1\n\t" /* attempt to store */
89 "beqz %0, 1b\n\t" /* spin if failed */
90 : "=&r" (temp), "=m" (*p)
91 : "r" (v), "m" (*p)
92 : "memory");
93
94}
95
96static __inline void
97atomic_clear_32(__volatile uint32_t *p, uint32_t v)
98{
99 uint32_t temp;
100 v = ~v;
101
102 __asm __volatile (
103 "1:\tll %0, %3\n\t" /* load old value */
104 "and %0, %2, %0\n\t" /* calculate new value */
105 "sc %0, %1\n\t" /* attempt to store */
106 "beqz %0, 1b\n\t" /* spin if failed */
107 : "=&r" (temp), "=m" (*p)
108 : "r" (v), "m" (*p)
109 : "memory");
110}
111
112static __inline void
113atomic_add_32(__volatile uint32_t *p, uint32_t v)
114{
115 uint32_t temp;
116
117 __asm __volatile (
118 "1:\tll %0, %3\n\t" /* load old value */
119 "addu %0, %2, %0\n\t" /* calculate new value */
120 "sc %0, %1\n\t" /* attempt to store */
121 "beqz %0, 1b\n\t" /* spin if failed */
122 : "=&r" (temp), "=m" (*p)
123 : "r" (v), "m" (*p)
124 : "memory");
125}
126
127static __inline void
128atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
129{
130 uint32_t temp;
131
132 __asm __volatile (
133 "1:\tll %0, %3\n\t" /* load old value */
134 "subu %0, %2\n\t" /* calculate new value */
135 "sc %0, %1\n\t" /* attempt to store */
136 "beqz %0, 1b\n\t" /* spin if failed */
137 : "=&r" (temp), "=m" (*p)
138 : "r" (v), "m" (*p)
139 : "memory");
140}
141
142static __inline uint32_t
143atomic_readandclear_32(__volatile uint32_t *addr)
144{
145 uint32_t result,temp;
146
147 __asm __volatile (
148 "1:\tll %0,%3\n\t" /* load current value, asserting lock */
149 "li %1,0\n\t" /* value to store */
150 "sc %1,%2\n\t" /* attempt to store */
151 "beqz %1, 1b\n\t" /* if the store failed, spin */
152 : "=&r"(result), "=&r"(temp), "=m" (*addr)
153 : "m" (*addr)
154 : "memory");
155
156 return result;
157}
158
159static __inline uint32_t
160atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
161{
162 uint32_t result,temp;
163
164 __asm __volatile (
165 "1:\tll %0,%3\n\t" /* load current value, asserting lock */
166 "or %1,$0,%4\n\t"
167 "sc %1,%2\n\t" /* attempt to store */
168 "beqz %1, 1b\n\t" /* if the store failed, spin */
169 : "=&r"(result), "=&r"(temp), "=m" (*addr)
170 : "m" (*addr), "r" (value)
171 : "memory");
172
173 return result;
174}
175
176#if defined(__mips_n64) || defined(__mips_n32)
177static __inline void
178atomic_set_64(__volatile uint64_t *p, uint64_t v)
179{
180 uint64_t temp;
181
182 __asm __volatile (
183 "1:\n\t"
184 "lld %0, %3\n\t" /* load old value */
185 "or %0, %2, %0\n\t" /* calculate new value */
186 "scd %0, %1\n\t" /* attempt to store */
187 "beqz %0, 1b\n\t" /* spin if failed */
188 : "=&r" (temp), "=m" (*p)
189 : "r" (v), "m" (*p)
190 : "memory");
191
192}
193
194static __inline void
195atomic_clear_64(__volatile uint64_t *p, uint64_t v)
196{
197 uint64_t temp;
198 v = ~v;
199
200 __asm __volatile (
201 "1:\n\t"
202 "lld %0, %3\n\t" /* load old value */
203 "and %0, %2, %0\n\t" /* calculate new value */
204 "scd %0, %1\n\t" /* attempt to store */
205 "beqz %0, 1b\n\t" /* spin if failed */
206 : "=&r" (temp), "=m" (*p)
207 : "r" (v), "m" (*p)
208 : "memory");
209}
210
211static __inline void
212atomic_add_64(__volatile uint64_t *p, uint64_t v)
213{
214 uint64_t temp;
215
216 __asm __volatile (
217 "1:\n\t"
218 "lld %0, %3\n\t" /* load old value */
219 "daddu %0, %2, %0\n\t" /* calculate new value */
220 "scd %0, %1\n\t" /* attempt to store */
221 "beqz %0, 1b\n\t" /* spin if failed */
222 : "=&r" (temp), "=m" (*p)
223 : "r" (v), "m" (*p)
224 : "memory");
225}
226
227static __inline void
228atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
229{
230 uint64_t temp;
231
232 __asm __volatile (
233 "1:\n\t"
234 "lld %0, %3\n\t" /* load old value */
235 "dsubu %0, %2\n\t" /* calculate new value */
236 "scd %0, %1\n\t" /* attempt to store */
237 "beqz %0, 1b\n\t" /* spin if failed */
238 : "=&r" (temp), "=m" (*p)
239 : "r" (v), "m" (*p)
240 : "memory");
241}
242
243static __inline uint64_t
244atomic_readandclear_64(__volatile uint64_t *addr)
245{
246 uint64_t result,temp;
247
248 __asm __volatile (
249 "1:\n\t"
250 "lld %0, %3\n\t" /* load old value */
251 "li %1, 0\n\t" /* value to store */
252 "scd %1, %2\n\t" /* attempt to store */
253 "beqz %1, 1b\n\t" /* if the store failed, spin */
254 : "=&r"(result), "=&r"(temp), "=m" (*addr)
255 : "m" (*addr)
256 : "memory");
257
258 return result;
259}
260
261static __inline uint64_t
262atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
263{
264 uint64_t result,temp;
265
266 __asm __volatile (
267 "1:\n\t"
268 "lld %0,%3\n\t" /* Load old value*/
269 "or %1,$0,%4\n\t"
270 "scd %1,%2\n\t" /* attempt to store */
271 "beqz %1, 1b\n\t" /* if the store failed, spin */
272 : "=&r"(result), "=&r"(temp), "=m" (*addr)
273 : "m" (*addr), "r" (value)
274 : "memory");
275
276 return result;
277}
278#endif
279
280#define ATOMIC_ACQ_REL(NAME, WIDTH) \
281static __inline void \
282atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
283{ \
284 atomic_##NAME##_##WIDTH(p, v); \
285 mips_sync(); \
286} \
287 \
288static __inline void \
289atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
290{ \
291 mips_sync(); \
292 atomic_##NAME##_##WIDTH(p, v); \
293}
294
295/* Variants of simple arithmetic with memory barriers. */
296ATOMIC_ACQ_REL(set, 8)
297ATOMIC_ACQ_REL(clear, 8)
298ATOMIC_ACQ_REL(add, 8)
299ATOMIC_ACQ_REL(subtract, 8)
300ATOMIC_ACQ_REL(set, 16)
301ATOMIC_ACQ_REL(clear, 16)
302ATOMIC_ACQ_REL(add, 16)
303ATOMIC_ACQ_REL(subtract, 16)
304ATOMIC_ACQ_REL(set, 32)
305ATOMIC_ACQ_REL(clear, 32)
306ATOMIC_ACQ_REL(add, 32)
307ATOMIC_ACQ_REL(subtract, 32)
308#if defined(__mips_n64) || defined(__mips_n32)
309ATOMIC_ACQ_REL(set, 64)
310ATOMIC_ACQ_REL(clear, 64)
311ATOMIC_ACQ_REL(add, 64)
312ATOMIC_ACQ_REL(subtract, 64)
313#endif
314
315#undef ATOMIC_ACQ_REL
316
317/*
318 * We assume that a = b will do atomic loads and stores.
319 */
320#define ATOMIC_STORE_LOAD(WIDTH) \
321static __inline uint##WIDTH##_t \
322atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p) \
323{ \
324 uint##WIDTH##_t v; \
325 \
326 v = *p; \
327 mips_sync(); \
328 return (v); \
329} \
330 \
331static __inline void \
332atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
333{ \
334 mips_sync(); \
335 *p = v; \
336}
337
338ATOMIC_STORE_LOAD(32)
339ATOMIC_STORE_LOAD(64)
340#if !defined(__mips_n64) && !defined(__mips_n32)
341void atomic_store_64(__volatile uint64_t *, uint64_t *);
342void atomic_load_64(__volatile uint64_t *, uint64_t *);
343#else
344static __inline void
345atomic_store_64(__volatile uint64_t *p, uint64_t *v)
346{
347 *p = *v;
348}
349
350static __inline void
351atomic_load_64(__volatile uint64_t *p, uint64_t *v)
352{
353 *v = *p;
354}
355#endif
356
357#undef ATOMIC_STORE_LOAD
358
359/*
360 * Atomically compare the value stored at *p with cmpval and if the
361 * two values are equal, update the value of *p with newval. Returns
362 * zero if the compare failed, nonzero otherwise.
363 */
364static __inline uint32_t
28 */
29
30#ifndef _MACHINE_ATOMIC_H_
31#define _MACHINE_ATOMIC_H_
32
33#ifndef _SYS_CDEFS_H_
34#error this file needs sys/cdefs.h as a prerequisite
35#endif
36
37/*
38 * Note: All the 64-bit atomic operations are only atomic when running
39 * in 64-bit mode. It is assumed that code compiled for n32 and n64
40 * fits into this definition and no further safeties are needed.
41 *
42 * It is also assumed that the add, subtract and other arithmetic is
43 * done on numbers not pointers. The special rules for n32 pointers
44 * do not have atomic operations defined for them, but generally shouldn't
45 * need atomic operations.
46 */
47#ifndef __MIPS_PLATFORM_SYNC_NOPS
48#define __MIPS_PLATFORM_SYNC_NOPS ""
49#endif
50
51static __inline void
52mips_sync(void)
53{
54 __asm __volatile (".set noreorder\n"
55 "\tsync\n"
56 __MIPS_PLATFORM_SYNC_NOPS
57 ".set reorder\n"
58 : : : "memory");
59}
60
61#define mb() mips_sync()
62#define wmb() mips_sync()
63#define rmb() mips_sync()
64
65/*
66 * Various simple arithmetic on memory which is atomic in the presence
67 * of interrupts and SMP safe.
68 */
69
70void atomic_set_8(__volatile uint8_t *, uint8_t);
71void atomic_clear_8(__volatile uint8_t *, uint8_t);
72void atomic_add_8(__volatile uint8_t *, uint8_t);
73void atomic_subtract_8(__volatile uint8_t *, uint8_t);
74
75void atomic_set_16(__volatile uint16_t *, uint16_t);
76void atomic_clear_16(__volatile uint16_t *, uint16_t);
77void atomic_add_16(__volatile uint16_t *, uint16_t);
78void atomic_subtract_16(__volatile uint16_t *, uint16_t);
79
80static __inline void
81atomic_set_32(__volatile uint32_t *p, uint32_t v)
82{
83 uint32_t temp;
84
85 __asm __volatile (
86 "1:\tll %0, %3\n\t" /* load old value */
87 "or %0, %2, %0\n\t" /* calculate new value */
88 "sc %0, %1\n\t" /* attempt to store */
89 "beqz %0, 1b\n\t" /* spin if failed */
90 : "=&r" (temp), "=m" (*p)
91 : "r" (v), "m" (*p)
92 : "memory");
93
94}
95
96static __inline void
97atomic_clear_32(__volatile uint32_t *p, uint32_t v)
98{
99 uint32_t temp;
100 v = ~v;
101
102 __asm __volatile (
103 "1:\tll %0, %3\n\t" /* load old value */
104 "and %0, %2, %0\n\t" /* calculate new value */
105 "sc %0, %1\n\t" /* attempt to store */
106 "beqz %0, 1b\n\t" /* spin if failed */
107 : "=&r" (temp), "=m" (*p)
108 : "r" (v), "m" (*p)
109 : "memory");
110}
111
112static __inline void
113atomic_add_32(__volatile uint32_t *p, uint32_t v)
114{
115 uint32_t temp;
116
117 __asm __volatile (
118 "1:\tll %0, %3\n\t" /* load old value */
119 "addu %0, %2, %0\n\t" /* calculate new value */
120 "sc %0, %1\n\t" /* attempt to store */
121 "beqz %0, 1b\n\t" /* spin if failed */
122 : "=&r" (temp), "=m" (*p)
123 : "r" (v), "m" (*p)
124 : "memory");
125}
126
127static __inline void
128atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
129{
130 uint32_t temp;
131
132 __asm __volatile (
133 "1:\tll %0, %3\n\t" /* load old value */
134 "subu %0, %2\n\t" /* calculate new value */
135 "sc %0, %1\n\t" /* attempt to store */
136 "beqz %0, 1b\n\t" /* spin if failed */
137 : "=&r" (temp), "=m" (*p)
138 : "r" (v), "m" (*p)
139 : "memory");
140}
141
142static __inline uint32_t
143atomic_readandclear_32(__volatile uint32_t *addr)
144{
145 uint32_t result,temp;
146
147 __asm __volatile (
148 "1:\tll %0,%3\n\t" /* load current value, asserting lock */
149 "li %1,0\n\t" /* value to store */
150 "sc %1,%2\n\t" /* attempt to store */
151 "beqz %1, 1b\n\t" /* if the store failed, spin */
152 : "=&r"(result), "=&r"(temp), "=m" (*addr)
153 : "m" (*addr)
154 : "memory");
155
156 return result;
157}
158
159static __inline uint32_t
160atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
161{
162 uint32_t result,temp;
163
164 __asm __volatile (
165 "1:\tll %0,%3\n\t" /* load current value, asserting lock */
166 "or %1,$0,%4\n\t"
167 "sc %1,%2\n\t" /* attempt to store */
168 "beqz %1, 1b\n\t" /* if the store failed, spin */
169 : "=&r"(result), "=&r"(temp), "=m" (*addr)
170 : "m" (*addr), "r" (value)
171 : "memory");
172
173 return result;
174}
175
176#if defined(__mips_n64) || defined(__mips_n32)
177static __inline void
178atomic_set_64(__volatile uint64_t *p, uint64_t v)
179{
180 uint64_t temp;
181
182 __asm __volatile (
183 "1:\n\t"
184 "lld %0, %3\n\t" /* load old value */
185 "or %0, %2, %0\n\t" /* calculate new value */
186 "scd %0, %1\n\t" /* attempt to store */
187 "beqz %0, 1b\n\t" /* spin if failed */
188 : "=&r" (temp), "=m" (*p)
189 : "r" (v), "m" (*p)
190 : "memory");
191
192}
193
194static __inline void
195atomic_clear_64(__volatile uint64_t *p, uint64_t v)
196{
197 uint64_t temp;
198 v = ~v;
199
200 __asm __volatile (
201 "1:\n\t"
202 "lld %0, %3\n\t" /* load old value */
203 "and %0, %2, %0\n\t" /* calculate new value */
204 "scd %0, %1\n\t" /* attempt to store */
205 "beqz %0, 1b\n\t" /* spin if failed */
206 : "=&r" (temp), "=m" (*p)
207 : "r" (v), "m" (*p)
208 : "memory");
209}
210
211static __inline void
212atomic_add_64(__volatile uint64_t *p, uint64_t v)
213{
214 uint64_t temp;
215
216 __asm __volatile (
217 "1:\n\t"
218 "lld %0, %3\n\t" /* load old value */
219 "daddu %0, %2, %0\n\t" /* calculate new value */
220 "scd %0, %1\n\t" /* attempt to store */
221 "beqz %0, 1b\n\t" /* spin if failed */
222 : "=&r" (temp), "=m" (*p)
223 : "r" (v), "m" (*p)
224 : "memory");
225}
226
227static __inline void
228atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
229{
230 uint64_t temp;
231
232 __asm __volatile (
233 "1:\n\t"
234 "lld %0, %3\n\t" /* load old value */
235 "dsubu %0, %2\n\t" /* calculate new value */
236 "scd %0, %1\n\t" /* attempt to store */
237 "beqz %0, 1b\n\t" /* spin if failed */
238 : "=&r" (temp), "=m" (*p)
239 : "r" (v), "m" (*p)
240 : "memory");
241}
242
243static __inline uint64_t
244atomic_readandclear_64(__volatile uint64_t *addr)
245{
246 uint64_t result,temp;
247
248 __asm __volatile (
249 "1:\n\t"
250 "lld %0, %3\n\t" /* load old value */
251 "li %1, 0\n\t" /* value to store */
252 "scd %1, %2\n\t" /* attempt to store */
253 "beqz %1, 1b\n\t" /* if the store failed, spin */
254 : "=&r"(result), "=&r"(temp), "=m" (*addr)
255 : "m" (*addr)
256 : "memory");
257
258 return result;
259}
260
261static __inline uint64_t
262atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
263{
264 uint64_t result,temp;
265
266 __asm __volatile (
267 "1:\n\t"
268 "lld %0,%3\n\t" /* Load old value*/
269 "or %1,$0,%4\n\t"
270 "scd %1,%2\n\t" /* attempt to store */
271 "beqz %1, 1b\n\t" /* if the store failed, spin */
272 : "=&r"(result), "=&r"(temp), "=m" (*addr)
273 : "m" (*addr), "r" (value)
274 : "memory");
275
276 return result;
277}
278#endif
279
280#define ATOMIC_ACQ_REL(NAME, WIDTH) \
281static __inline void \
282atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
283{ \
284 atomic_##NAME##_##WIDTH(p, v); \
285 mips_sync(); \
286} \
287 \
288static __inline void \
289atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
290{ \
291 mips_sync(); \
292 atomic_##NAME##_##WIDTH(p, v); \
293}
294
295/* Variants of simple arithmetic with memory barriers. */
296ATOMIC_ACQ_REL(set, 8)
297ATOMIC_ACQ_REL(clear, 8)
298ATOMIC_ACQ_REL(add, 8)
299ATOMIC_ACQ_REL(subtract, 8)
300ATOMIC_ACQ_REL(set, 16)
301ATOMIC_ACQ_REL(clear, 16)
302ATOMIC_ACQ_REL(add, 16)
303ATOMIC_ACQ_REL(subtract, 16)
304ATOMIC_ACQ_REL(set, 32)
305ATOMIC_ACQ_REL(clear, 32)
306ATOMIC_ACQ_REL(add, 32)
307ATOMIC_ACQ_REL(subtract, 32)
308#if defined(__mips_n64) || defined(__mips_n32)
309ATOMIC_ACQ_REL(set, 64)
310ATOMIC_ACQ_REL(clear, 64)
311ATOMIC_ACQ_REL(add, 64)
312ATOMIC_ACQ_REL(subtract, 64)
313#endif
314
315#undef ATOMIC_ACQ_REL
316
317/*
318 * We assume that a = b will do atomic loads and stores.
319 */
320#define ATOMIC_STORE_LOAD(WIDTH) \
321static __inline uint##WIDTH##_t \
322atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p) \
323{ \
324 uint##WIDTH##_t v; \
325 \
326 v = *p; \
327 mips_sync(); \
328 return (v); \
329} \
330 \
331static __inline void \
332atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
333{ \
334 mips_sync(); \
335 *p = v; \
336}
337
338ATOMIC_STORE_LOAD(32)
339ATOMIC_STORE_LOAD(64)
340#if !defined(__mips_n64) && !defined(__mips_n32)
341void atomic_store_64(__volatile uint64_t *, uint64_t *);
342void atomic_load_64(__volatile uint64_t *, uint64_t *);
343#else
344static __inline void
345atomic_store_64(__volatile uint64_t *p, uint64_t *v)
346{
347 *p = *v;
348}
349
350static __inline void
351atomic_load_64(__volatile uint64_t *p, uint64_t *v)
352{
353 *v = *p;
354}
355#endif
356
357#undef ATOMIC_STORE_LOAD
358
359/*
360 * Atomically compare the value stored at *p with cmpval and if the
361 * two values are equal, update the value of *p with newval. Returns
362 * zero if the compare failed, nonzero otherwise.
363 */
364static __inline uint32_t
365atomic_cmpset_32(__volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
365atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
366{
367 uint32_t ret;
368
369 __asm __volatile (
370 "1:\tll %0, %4\n\t" /* load old value */
371 "bne %0, %2, 2f\n\t" /* compare */
372 "move %0, %3\n\t" /* value to store */
373 "sc %0, %1\n\t" /* attempt to store */
374 "beqz %0, 1b\n\t" /* if it failed, spin */
375 "j 3f\n\t"
376 "2:\n\t"
377 "li %0, 0\n\t"
378 "3:\n"
379 : "=&r" (ret), "=m" (*p)
380 : "r" (cmpval), "r" (newval), "m" (*p)
381 : "memory");
382
383 return ret;
384}
385
386/*
387 * Atomically compare the value stored at *p with cmpval and if the
388 * two values are equal, update the value of *p with newval. Returns
389 * zero if the compare failed, nonzero otherwise.
390 */
391static __inline uint32_t
392atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
393{
394 int retval;
395
396 retval = atomic_cmpset_32(p, cmpval, newval);
397 mips_sync();
398 return (retval);
399}
400
401static __inline uint32_t
402atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
403{
404 mips_sync();
405 return (atomic_cmpset_32(p, cmpval, newval));
406}
407
366{
367 uint32_t ret;
368
369 __asm __volatile (
370 "1:\tll %0, %4\n\t" /* load old value */
371 "bne %0, %2, 2f\n\t" /* compare */
372 "move %0, %3\n\t" /* value to store */
373 "sc %0, %1\n\t" /* attempt to store */
374 "beqz %0, 1b\n\t" /* if it failed, spin */
375 "j 3f\n\t"
376 "2:\n\t"
377 "li %0, 0\n\t"
378 "3:\n"
379 : "=&r" (ret), "=m" (*p)
380 : "r" (cmpval), "r" (newval), "m" (*p)
381 : "memory");
382
383 return ret;
384}
385
386/*
387 * Atomically compare the value stored at *p with cmpval and if the
388 * two values are equal, update the value of *p with newval. Returns
389 * zero if the compare failed, nonzero otherwise.
390 */
391static __inline uint32_t
392atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
393{
394 int retval;
395
396 retval = atomic_cmpset_32(p, cmpval, newval);
397 mips_sync();
398 return (retval);
399}
400
401static __inline uint32_t
402atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
403{
404 mips_sync();
405 return (atomic_cmpset_32(p, cmpval, newval));
406}
407
408static __inline uint32_t
409atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
410{
411 uint32_t ret;
412
413 __asm __volatile (
414 "1:\n\t"
415 "ll %0, %1\n\t" /* load old value */
416 "bne %0, %4, 2f\n\t" /* compare */
417 "move %0, %3\n\t" /* value to store */
418 "sc %0, %1\n\t" /* attempt to store */
419 "beqz %0, 1b\n\t" /* if it failed, spin */
420 "j 3f\n\t"
421 "2:\n\t"
422 "sw %0, %2\n\t" /* save old value */
423 "li %0, 0\n\t"
424 "3:\n"
425 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
426 : "r" (newval), "r" (*cmpval)
427 : "memory");
428 return ret;
429}
430
431static __inline uint32_t
432atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
433{
434 int retval;
435
436 retval = atomic_fcmpset_32(p, cmpval, newval);
437 mips_sync();
438 return (retval);
439}
440
441static __inline uint32_t
442atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
443{
444 mips_sync();
445 return (atomic_fcmpset_32(p, cmpval, newval));
446}
447
408/*
409 * Atomically add the value of v to the integer pointed to by p and return
410 * the previous value of *p.
411 */
412static __inline uint32_t
413atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
414{
415 uint32_t value, temp;
416
417 __asm __volatile (
418 "1:\tll %0, %1\n\t" /* load old value */
419 "addu %2, %3, %0\n\t" /* calculate new value */
420 "sc %2, %1\n\t" /* attempt to store */
421 "beqz %2, 1b\n\t" /* spin if failed */
422 : "=&r" (value), "=m" (*p), "=&r" (temp)
423 : "r" (v), "m" (*p));
424 return (value);
425}
426
427#if defined(__mips_n64) || defined(__mips_n32)
428/*
429 * Atomically compare the value stored at *p with cmpval and if the
430 * two values are equal, update the value of *p with newval. Returns
431 * zero if the compare failed, nonzero otherwise.
432 */
433static __inline uint64_t
448/*
449 * Atomically add the value of v to the integer pointed to by p and return
450 * the previous value of *p.
451 */
452static __inline uint32_t
453atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
454{
455 uint32_t value, temp;
456
457 __asm __volatile (
458 "1:\tll %0, %1\n\t" /* load old value */
459 "addu %2, %3, %0\n\t" /* calculate new value */
460 "sc %2, %1\n\t" /* attempt to store */
461 "beqz %2, 1b\n\t" /* spin if failed */
462 : "=&r" (value), "=m" (*p), "=&r" (temp)
463 : "r" (v), "m" (*p));
464 return (value);
465}
466
467#if defined(__mips_n64) || defined(__mips_n32)
468/*
469 * Atomically compare the value stored at *p with cmpval and if the
470 * two values are equal, update the value of *p with newval. Returns
471 * zero if the compare failed, nonzero otherwise.
472 */
473static __inline uint64_t
434atomic_cmpset_64(__volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
474atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
435{
436 uint64_t ret;
437
438 __asm __volatile (
439 "1:\n\t"
440 "lld %0, %4\n\t" /* load old value */
441 "bne %0, %2, 2f\n\t" /* compare */
442 "move %0, %3\n\t" /* value to store */
443 "scd %0, %1\n\t" /* attempt to store */
444 "beqz %0, 1b\n\t" /* if it failed, spin */
445 "j 3f\n\t"
446 "2:\n\t"
447 "li %0, 0\n\t"
448 "3:\n"
449 : "=&r" (ret), "=m" (*p)
450 : "r" (cmpval), "r" (newval), "m" (*p)
451 : "memory");
452
453 return ret;
454}
455
456/*
457 * Atomically compare the value stored at *p with cmpval and if the
458 * two values are equal, update the value of *p with newval. Returns
459 * zero if the compare failed, nonzero otherwise.
460 */
461static __inline uint64_t
462atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
463{
464 int retval;
465
466 retval = atomic_cmpset_64(p, cmpval, newval);
467 mips_sync();
468 return (retval);
469}
470
471static __inline uint64_t
472atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
473{
474 mips_sync();
475 return (atomic_cmpset_64(p, cmpval, newval));
476}
477
475{
476 uint64_t ret;
477
478 __asm __volatile (
479 "1:\n\t"
480 "lld %0, %4\n\t" /* load old value */
481 "bne %0, %2, 2f\n\t" /* compare */
482 "move %0, %3\n\t" /* value to store */
483 "scd %0, %1\n\t" /* attempt to store */
484 "beqz %0, 1b\n\t" /* if it failed, spin */
485 "j 3f\n\t"
486 "2:\n\t"
487 "li %0, 0\n\t"
488 "3:\n"
489 : "=&r" (ret), "=m" (*p)
490 : "r" (cmpval), "r" (newval), "m" (*p)
491 : "memory");
492
493 return ret;
494}
495
496/*
497 * Atomically compare the value stored at *p with cmpval and if the
498 * two values are equal, update the value of *p with newval. Returns
499 * zero if the compare failed, nonzero otherwise.
500 */
501static __inline uint64_t
502atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
503{
504 int retval;
505
506 retval = atomic_cmpset_64(p, cmpval, newval);
507 mips_sync();
508 return (retval);
509}
510
511static __inline uint64_t
512atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
513{
514 mips_sync();
515 return (atomic_cmpset_64(p, cmpval, newval));
516}
517
518static __inline uint32_t
519atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
520{
521 uint32_t ret;
522
523 __asm __volatile (
524 "1:\n\t"
525 "lld %0, %1\n\t" /* load old value */
526 "bne %0, %4, 2f\n\t" /* compare */
527 "move %0, %3\n\t" /* value to store */
528 "scd %0, %1\n\t" /* attempt to store */
529 "beqz %0, 1b\n\t" /* if it failed, spin */
530 "j 3f\n\t"
531 "2:\n\t"
532 "sd %0, %2\n\t" /* save old value */
533 "li %0, 0\n\t"
534 "3:\n"
535 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
536 : "r" (newval), "r" (*cmpval)
537 : "memory");
538
539 return ret;
540}
541
542static __inline uint64_t
543atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
544{
545 int retval;
546
547 retval = atomic_fcmpset_64(p, cmpval, newval);
548 mips_sync();
549 return (retval);
550}
551
552static __inline uint64_t
553atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
554{
555 mips_sync();
556 return (atomic_fcmpset_64(p, cmpval, newval));
557}
558
478/*
479 * Atomically add the value of v to the integer pointed to by p and return
480 * the previous value of *p.
481 */
482static __inline uint64_t
483atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
484{
485 uint64_t value, temp;
486
487 __asm __volatile (
488 "1:\n\t"
489 "lld %0, %1\n\t" /* load old value */
490 "daddu %2, %3, %0\n\t" /* calculate new value */
491 "scd %2, %1\n\t" /* attempt to store */
492 "beqz %2, 1b\n\t" /* spin if failed */
493 : "=&r" (value), "=m" (*p), "=&r" (temp)
494 : "r" (v), "m" (*p));
495 return (value);
496}
497#endif
498
499static __inline void
500atomic_thread_fence_acq(void)
501{
502
503 mips_sync();
504}
505
506static __inline void
507atomic_thread_fence_rel(void)
508{
509
510 mips_sync();
511}
512
513static __inline void
514atomic_thread_fence_acq_rel(void)
515{
516
517 mips_sync();
518}
519
520static __inline void
521atomic_thread_fence_seq_cst(void)
522{
523
524 mips_sync();
525}
526
527/* Operations on chars. */
528#define atomic_set_char atomic_set_8
529#define atomic_set_acq_char atomic_set_acq_8
530#define atomic_set_rel_char atomic_set_rel_8
531#define atomic_clear_char atomic_clear_8
532#define atomic_clear_acq_char atomic_clear_acq_8
533#define atomic_clear_rel_char atomic_clear_rel_8
534#define atomic_add_char atomic_add_8
535#define atomic_add_acq_char atomic_add_acq_8
536#define atomic_add_rel_char atomic_add_rel_8
537#define atomic_subtract_char atomic_subtract_8
538#define atomic_subtract_acq_char atomic_subtract_acq_8
539#define atomic_subtract_rel_char atomic_subtract_rel_8
540
541/* Operations on shorts. */
542#define atomic_set_short atomic_set_16
543#define atomic_set_acq_short atomic_set_acq_16
544#define atomic_set_rel_short atomic_set_rel_16
545#define atomic_clear_short atomic_clear_16
546#define atomic_clear_acq_short atomic_clear_acq_16
547#define atomic_clear_rel_short atomic_clear_rel_16
548#define atomic_add_short atomic_add_16
549#define atomic_add_acq_short atomic_add_acq_16
550#define atomic_add_rel_short atomic_add_rel_16
551#define atomic_subtract_short atomic_subtract_16
552#define atomic_subtract_acq_short atomic_subtract_acq_16
553#define atomic_subtract_rel_short atomic_subtract_rel_16
554
555/* Operations on ints. */
556#define atomic_set_int atomic_set_32
557#define atomic_set_acq_int atomic_set_acq_32
558#define atomic_set_rel_int atomic_set_rel_32
559#define atomic_clear_int atomic_clear_32
560#define atomic_clear_acq_int atomic_clear_acq_32
561#define atomic_clear_rel_int atomic_clear_rel_32
562#define atomic_add_int atomic_add_32
563#define atomic_add_acq_int atomic_add_acq_32
564#define atomic_add_rel_int atomic_add_rel_32
565#define atomic_subtract_int atomic_subtract_32
566#define atomic_subtract_acq_int atomic_subtract_acq_32
567#define atomic_subtract_rel_int atomic_subtract_rel_32
568#define atomic_cmpset_int atomic_cmpset_32
569#define atomic_cmpset_acq_int atomic_cmpset_acq_32
570#define atomic_cmpset_rel_int atomic_cmpset_rel_32
559/*
560 * Atomically add the value of v to the integer pointed to by p and return
561 * the previous value of *p.
562 */
563static __inline uint64_t
564atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
565{
566 uint64_t value, temp;
567
568 __asm __volatile (
569 "1:\n\t"
570 "lld %0, %1\n\t" /* load old value */
571 "daddu %2, %3, %0\n\t" /* calculate new value */
572 "scd %2, %1\n\t" /* attempt to store */
573 "beqz %2, 1b\n\t" /* spin if failed */
574 : "=&r" (value), "=m" (*p), "=&r" (temp)
575 : "r" (v), "m" (*p));
576 return (value);
577}
578#endif
579
580static __inline void
581atomic_thread_fence_acq(void)
582{
583
584 mips_sync();
585}
586
587static __inline void
588atomic_thread_fence_rel(void)
589{
590
591 mips_sync();
592}
593
594static __inline void
595atomic_thread_fence_acq_rel(void)
596{
597
598 mips_sync();
599}
600
601static __inline void
602atomic_thread_fence_seq_cst(void)
603{
604
605 mips_sync();
606}
607
608/* Operations on chars. */
609#define atomic_set_char atomic_set_8
610#define atomic_set_acq_char atomic_set_acq_8
611#define atomic_set_rel_char atomic_set_rel_8
612#define atomic_clear_char atomic_clear_8
613#define atomic_clear_acq_char atomic_clear_acq_8
614#define atomic_clear_rel_char atomic_clear_rel_8
615#define atomic_add_char atomic_add_8
616#define atomic_add_acq_char atomic_add_acq_8
617#define atomic_add_rel_char atomic_add_rel_8
618#define atomic_subtract_char atomic_subtract_8
619#define atomic_subtract_acq_char atomic_subtract_acq_8
620#define atomic_subtract_rel_char atomic_subtract_rel_8
621
622/* Operations on shorts. */
623#define atomic_set_short atomic_set_16
624#define atomic_set_acq_short atomic_set_acq_16
625#define atomic_set_rel_short atomic_set_rel_16
626#define atomic_clear_short atomic_clear_16
627#define atomic_clear_acq_short atomic_clear_acq_16
628#define atomic_clear_rel_short atomic_clear_rel_16
629#define atomic_add_short atomic_add_16
630#define atomic_add_acq_short atomic_add_acq_16
631#define atomic_add_rel_short atomic_add_rel_16
632#define atomic_subtract_short atomic_subtract_16
633#define atomic_subtract_acq_short atomic_subtract_acq_16
634#define atomic_subtract_rel_short atomic_subtract_rel_16
635
636/* Operations on ints. */
637#define atomic_set_int atomic_set_32
638#define atomic_set_acq_int atomic_set_acq_32
639#define atomic_set_rel_int atomic_set_rel_32
640#define atomic_clear_int atomic_clear_32
641#define atomic_clear_acq_int atomic_clear_acq_32
642#define atomic_clear_rel_int atomic_clear_rel_32
643#define atomic_add_int atomic_add_32
644#define atomic_add_acq_int atomic_add_acq_32
645#define atomic_add_rel_int atomic_add_rel_32
646#define atomic_subtract_int atomic_subtract_32
647#define atomic_subtract_acq_int atomic_subtract_acq_32
648#define atomic_subtract_rel_int atomic_subtract_rel_32
649#define atomic_cmpset_int atomic_cmpset_32
650#define atomic_cmpset_acq_int atomic_cmpset_acq_32
651#define atomic_cmpset_rel_int atomic_cmpset_rel_32
652#define atomic_fcmpset_int atomic_fcmpset_32
653#define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
654#define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
571#define atomic_load_acq_int atomic_load_acq_32
572#define atomic_store_rel_int atomic_store_rel_32
573#define atomic_readandclear_int atomic_readandclear_32
574#define atomic_readandset_int atomic_readandset_32
575#define atomic_fetchadd_int atomic_fetchadd_32
576
577/*
578 * I think the following is right, even for n32. For n32 the pointers
579 * are still 32-bits, so we need to operate on them as 32-bit quantities,
580 * even though they are sign extended in operation. For longs, there's
581 * no question because they are always 32-bits.
582 */
583#ifdef __mips_n64
584/* Operations on longs. */
585#define atomic_set_long atomic_set_64
586#define atomic_set_acq_long atomic_set_acq_64
587#define atomic_set_rel_long atomic_set_rel_64
588#define atomic_clear_long atomic_clear_64
589#define atomic_clear_acq_long atomic_clear_acq_64
590#define atomic_clear_rel_long atomic_clear_rel_64
591#define atomic_add_long atomic_add_64
592#define atomic_add_acq_long atomic_add_acq_64
593#define atomic_add_rel_long atomic_add_rel_64
594#define atomic_subtract_long atomic_subtract_64
595#define atomic_subtract_acq_long atomic_subtract_acq_64
596#define atomic_subtract_rel_long atomic_subtract_rel_64
597#define atomic_cmpset_long atomic_cmpset_64
598#define atomic_cmpset_acq_long atomic_cmpset_acq_64
599#define atomic_cmpset_rel_long atomic_cmpset_rel_64
655#define atomic_load_acq_int atomic_load_acq_32
656#define atomic_store_rel_int atomic_store_rel_32
657#define atomic_readandclear_int atomic_readandclear_32
658#define atomic_readandset_int atomic_readandset_32
659#define atomic_fetchadd_int atomic_fetchadd_32
660
661/*
662 * I think the following is right, even for n32. For n32 the pointers
663 * are still 32-bits, so we need to operate on them as 32-bit quantities,
664 * even though they are sign extended in operation. For longs, there's
665 * no question because they are always 32-bits.
666 */
667#ifdef __mips_n64
668/* Operations on longs. */
669#define atomic_set_long atomic_set_64
670#define atomic_set_acq_long atomic_set_acq_64
671#define atomic_set_rel_long atomic_set_rel_64
672#define atomic_clear_long atomic_clear_64
673#define atomic_clear_acq_long atomic_clear_acq_64
674#define atomic_clear_rel_long atomic_clear_rel_64
675#define atomic_add_long atomic_add_64
676#define atomic_add_acq_long atomic_add_acq_64
677#define atomic_add_rel_long atomic_add_rel_64
678#define atomic_subtract_long atomic_subtract_64
679#define atomic_subtract_acq_long atomic_subtract_acq_64
680#define atomic_subtract_rel_long atomic_subtract_rel_64
681#define atomic_cmpset_long atomic_cmpset_64
682#define atomic_cmpset_acq_long atomic_cmpset_acq_64
683#define atomic_cmpset_rel_long atomic_cmpset_rel_64
684#define atomic_fcmpset_long atomic_fcmpset_64
685#define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
686#define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
600#define atomic_load_acq_long atomic_load_acq_64
601#define atomic_store_rel_long atomic_store_rel_64
602#define atomic_fetchadd_long atomic_fetchadd_64
603#define atomic_readandclear_long atomic_readandclear_64
604
605#else /* !__mips_n64 */
606
607/* Operations on longs. */
608#define atomic_set_long(p, v) \
609 atomic_set_32((volatile u_int *)(p), (u_int)(v))
610#define atomic_set_acq_long(p, v) \
611 atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
612#define atomic_set_rel_long(p, v) \
613 atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
614#define atomic_clear_long(p, v) \
615 atomic_clear_32((volatile u_int *)(p), (u_int)(v))
616#define atomic_clear_acq_long(p, v) \
617 atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
618#define atomic_clear_rel_long(p, v) \
619 atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
620#define atomic_add_long(p, v) \
621 atomic_add_32((volatile u_int *)(p), (u_int)(v))
622#define atomic_add_acq_long(p, v) \
623 atomic_add_32((volatile u_int *)(p), (u_int)(v))
624#define atomic_add_rel_long(p, v) \
625 atomic_add_32((volatile u_int *)(p), (u_int)(v))
626#define atomic_subtract_long(p, v) \
627 atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
628#define atomic_subtract_acq_long(p, v) \
629 atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
630#define atomic_subtract_rel_long(p, v) \
631 atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
632#define atomic_cmpset_long(p, cmpval, newval) \
633 atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval), \
634 (u_int)(newval))
635#define atomic_cmpset_acq_long(p, cmpval, newval) \
636 atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval), \
637 (u_int)(newval))
638#define atomic_cmpset_rel_long(p, cmpval, newval) \
639 atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval), \
640 (u_int)(newval))
687#define atomic_load_acq_long atomic_load_acq_64
688#define atomic_store_rel_long atomic_store_rel_64
689#define atomic_fetchadd_long atomic_fetchadd_64
690#define atomic_readandclear_long atomic_readandclear_64
691
692#else /* !__mips_n64 */
693
694/* Operations on longs. */
695#define atomic_set_long(p, v) \
696 atomic_set_32((volatile u_int *)(p), (u_int)(v))
697#define atomic_set_acq_long(p, v) \
698 atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
699#define atomic_set_rel_long(p, v) \
700 atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
701#define atomic_clear_long(p, v) \
702 atomic_clear_32((volatile u_int *)(p), (u_int)(v))
703#define atomic_clear_acq_long(p, v) \
704 atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
705#define atomic_clear_rel_long(p, v) \
706 atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
707#define atomic_add_long(p, v) \
708 atomic_add_32((volatile u_int *)(p), (u_int)(v))
709#define atomic_add_acq_long(p, v) \
710 atomic_add_32((volatile u_int *)(p), (u_int)(v))
711#define atomic_add_rel_long(p, v) \
712 atomic_add_32((volatile u_int *)(p), (u_int)(v))
713#define atomic_subtract_long(p, v) \
714 atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
715#define atomic_subtract_acq_long(p, v) \
716 atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
717#define atomic_subtract_rel_long(p, v) \
718 atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
719#define atomic_cmpset_long(p, cmpval, newval) \
720 atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval), \
721 (u_int)(newval))
722#define atomic_cmpset_acq_long(p, cmpval, newval) \
723 atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval), \
724 (u_int)(newval))
725#define atomic_cmpset_rel_long(p, cmpval, newval) \
726 atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval), \
727 (u_int)(newval))
728#define atomic_fcmpset_long(p, cmpval, newval) \
729 atomic_fcmpset_32((volatile u_int *)(p), (u_int *)(cmpval), \
730 (u_int)(newval))
731#define atomic_fcmpset_acq_long(p, cmpval, newval) \
732 atomic_fcmpset_acq_32((volatile u_int *)(p), (u_int *)(cmpval), \
733 (u_int)(newval))
734#define atomic_fcmpset_rel_long(p, cmpval, newval) \
735 atomic_fcmpset_rel_32((volatile u_int *)(p), (u_int *)(cmpval), \
736 (u_int)(newval))
641#define atomic_load_acq_long(p) \
642 (u_long)atomic_load_acq_32((volatile u_int *)(p))
643#define atomic_store_rel_long(p, v) \
644 atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
645#define atomic_fetchadd_long(p, v) \
646 atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
647#define atomic_readandclear_long(p) \
648 atomic_readandclear_32((volatile u_int *)(p))
649
650#endif /* __mips_n64 */
651
652/* Operations on pointers. */
653#define atomic_set_ptr atomic_set_long
654#define atomic_set_acq_ptr atomic_set_acq_long
655#define atomic_set_rel_ptr atomic_set_rel_long
656#define atomic_clear_ptr atomic_clear_long
657#define atomic_clear_acq_ptr atomic_clear_acq_long
658#define atomic_clear_rel_ptr atomic_clear_rel_long
659#define atomic_add_ptr atomic_add_long
660#define atomic_add_acq_ptr atomic_add_acq_long
661#define atomic_add_rel_ptr atomic_add_rel_long
662#define atomic_subtract_ptr atomic_subtract_long
663#define atomic_subtract_acq_ptr atomic_subtract_acq_long
664#define atomic_subtract_rel_ptr atomic_subtract_rel_long
665#define atomic_cmpset_ptr atomic_cmpset_long
666#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
667#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
737#define atomic_load_acq_long(p) \
738 (u_long)atomic_load_acq_32((volatile u_int *)(p))
739#define atomic_store_rel_long(p, v) \
740 atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
741#define atomic_fetchadd_long(p, v) \
742 atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
743#define atomic_readandclear_long(p) \
744 atomic_readandclear_32((volatile u_int *)(p))
745
746#endif /* __mips_n64 */
747
748/* Operations on pointers. */
749#define atomic_set_ptr atomic_set_long
750#define atomic_set_acq_ptr atomic_set_acq_long
751#define atomic_set_rel_ptr atomic_set_rel_long
752#define atomic_clear_ptr atomic_clear_long
753#define atomic_clear_acq_ptr atomic_clear_acq_long
754#define atomic_clear_rel_ptr atomic_clear_rel_long
755#define atomic_add_ptr atomic_add_long
756#define atomic_add_acq_ptr atomic_add_acq_long
757#define atomic_add_rel_ptr atomic_add_rel_long
758#define atomic_subtract_ptr atomic_subtract_long
759#define atomic_subtract_acq_ptr atomic_subtract_acq_long
760#define atomic_subtract_rel_ptr atomic_subtract_rel_long
761#define atomic_cmpset_ptr atomic_cmpset_long
762#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
763#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
764#define atomic_fcmpset_ptr atomic_fcmpset_long
765#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
766#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
668#define atomic_load_acq_ptr atomic_load_acq_long
669#define atomic_store_rel_ptr atomic_store_rel_long
670#define atomic_readandclear_ptr atomic_readandclear_long
671
672#endif /* ! _MACHINE_ATOMIC_H_ */
767#define atomic_load_acq_ptr atomic_load_acq_long
768#define atomic_store_rel_ptr atomic_store_rel_long
769#define atomic_readandclear_ptr atomic_readandclear_long
770
771#endif /* ! _MACHINE_ATOMIC_H_ */