1251695Sed/*-
2251695Sed * Copyright (c) 2013 Ed Schouten <ed@FreeBSD.org>
3251695Sed * All rights reserved.
4251695Sed *
5251695Sed * Redistribution and use in source and binary forms, with or without
6251695Sed * modification, are permitted provided that the following conditions
7251695Sed * are met:
8251695Sed * 1. Redistributions of source code must retain the above copyright
9251695Sed *    notice, this list of conditions and the following disclaimer.
10251695Sed * 2. Redistributions in binary form must reproduce the above copyright
11251695Sed *    notice, this list of conditions and the following disclaimer in the
12251695Sed *    documentation and/or other materials provided with the distribution.
13251695Sed *
14251695Sed * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15251695Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16251695Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17251695Sed * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18251695Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19251695Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20251695Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21251695Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22251695Sed * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23251695Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24251695Sed * SUCH DAMAGE.
25251695Sed */
26251695Sed
27251695Sed#include <sys/cdefs.h>
28251695Sed__FBSDID("$FreeBSD$");
29251695Sed
30251695Sed#include <sys/param.h>
31251695Sed#include <sys/stdatomic.h>
32251695Sed#include <sys/types.h>
33251695Sed
34251695Sed#include <machine/cpufunc.h>
35251781Sed#include <machine/sysarch.h>
36251695Sed
37251695Sed#ifdef _KERNEL
38251695Sed#include "opt_global.h"
39251695Sed#endif
40251695Sed
41251695Sed/*
42251695Sed * Executing statements with interrupts disabled.
43251695Sed */
44251695Sed
45251781Sed#if defined(_KERNEL) && !defined(SMP)
46251695Sed#define	WITHOUT_INTERRUPTS(s) do {					\
47251695Sed	register_t regs;						\
48251695Sed									\
49251695Sed	regs = intr_disable();						\
50251695Sed	do s while (0);							\
51251695Sed	intr_restore(regs);						\
52251695Sed} while (0)
53251781Sed#endif /* _KERNEL && !SMP */
54251695Sed
55251695Sed/*
56251695Sed * Memory barriers.
57251695Sed *
58251695Sed * It turns out __sync_synchronize() does not emit any code when used
59251695Sed * with GCC 4.2. Implement our own version that does work reliably.
60251695Sed *
61251695Sed * Although __sync_lock_test_and_set() should only perform an acquire
62251695Sed * barrier, make it do a full barrier like the other functions. This
63251695Sed * should make <stdatomic.h>'s atomic_exchange_explicit() work reliably.
64251695Sed */
65251695Sed
66251781Sed#if defined(_KERNEL) && !defined(SMP)
67251695Sedstatic inline void
68251695Seddo_sync(void)
69251695Sed{
70251695Sed
71251695Sed	__asm volatile ("" : : : "memory");
72251781Sed}
73251695Sed#elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
74251781Sedstatic inline void
75251781Seddo_sync(void)
76251781Sed{
77251781Sed
78251695Sed	__asm volatile ("dmb" : : : "memory");
79251781Sed}
80251781Sed#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
81251781Sed    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
82251781Sed    defined(__ARM_ARCH_6ZK__)
83251781Sedstatic inline void
84251781Seddo_sync(void)
85251781Sed{
86251781Sed
87251695Sed	__asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
88251781Sed}
89251695Sed#endif
90251695Sed
91251695Sed#if defined(__CLANG_ATOMICS) || defined(__GNUC_ATOMICS)
92251695Sed
93251695Sed/*
94251695Sed * New C11 __atomic_* API.
95251695Sed */
96251695Sed
97251695Sed#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
98251695Sed    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
99251695Sed    defined(__ARM_ARCH_6ZK__) || \
100251695Sed    defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
101251695Sed
102251695Sed/* These systems should be supported by the compiler. */
103251695Sed
104251781Sed#else /* __ARM_ARCH_5__ */
105251695Sed
106251695Sed/* Clang doesn't allow us to reimplement builtins without this. */
107251695Sed#ifdef __clang__
108251695Sed#pragma redefine_extname __sync_synchronize_ext __sync_synchronize
109251695Sed#define __sync_synchronize __sync_synchronize_ext
110251695Sed#endif
111251695Sed
112251695Sedvoid
113251695Sed__sync_synchronize(void)
114251695Sed{
115251695Sed}
116251695Sed
117251781Sed#ifdef _KERNEL
118251781Sed
119251781Sed#ifdef SMP
120251781Sed#error "On SMP systems we should have proper atomic operations."
121251781Sed#endif
122251781Sed
123251695Sed/*
124251695Sed * On uniprocessor systems, we can perform the atomic operations by
125251695Sed * disabling interrupts.
126251695Sed */
127251695Sed
128251695Sed#define	EMIT_LOAD_N(N, uintN_t)						\
129251695SeduintN_t									\
130251695Sed__atomic_load_##N(uintN_t *mem, int model __unused)			\
131251695Sed{									\
132251695Sed	uintN_t ret;							\
133251695Sed									\
134251695Sed	WITHOUT_INTERRUPTS({						\
135251695Sed		ret = *mem;						\
136251695Sed	});								\
137251695Sed	return (ret);							\
138251695Sed}
139251695Sed
140251695Sed#define	EMIT_STORE_N(N, uintN_t)					\
141251695Sedvoid									\
142251695Sed__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused)	\
143251695Sed{									\
144251695Sed									\
145251695Sed	WITHOUT_INTERRUPTS({						\
146251695Sed		*mem = val;						\
147251695Sed	});								\
148251695Sed}
149251695Sed
150251695Sed#define	EMIT_COMPARE_EXCHANGE_N(N, uintN_t)				\
151251695Sed_Bool									\
152251695Sed__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *expected,		\
153251695Sed    uintN_t desired, int success __unused, int failure __unused)	\
154251695Sed{									\
155251695Sed	_Bool ret;							\
156251695Sed									\
157251695Sed	WITHOUT_INTERRUPTS({						\
158251695Sed		if (*mem == *expected) {				\
159251695Sed			*mem = desired;					\
160251695Sed			ret = 1;					\
161251695Sed		} else {						\
162251695Sed			*expected = *mem;				\
163251695Sed			ret = 0;					\
164251695Sed		}							\
165251695Sed	});								\
166251695Sed	return (ret);							\
167251695Sed}
168251695Sed
169251695Sed#define	EMIT_FETCH_OP_N(N, uintN_t, name, op)				\
170251695SeduintN_t									\
171251695Sed__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused)	\
172251695Sed{									\
173251695Sed	uintN_t ret;							\
174251695Sed									\
175251695Sed	WITHOUT_INTERRUPTS({						\
176251695Sed		ret = *mem;						\
177251695Sed		*mem op val;						\
178251695Sed	});								\
179251695Sed	return (ret);							\
180251695Sed}
181251695Sed
182251695Sed#define	EMIT_ALL_OPS_N(N, uintN_t)					\
183251695SedEMIT_LOAD_N(N, uintN_t)							\
184251695SedEMIT_STORE_N(N, uintN_t)						\
185251695SedEMIT_COMPARE_EXCHANGE_N(N, uintN_t)					\
186251695SedEMIT_FETCH_OP_N(N, uintN_t, exchange, =)				\
187251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_add, +=)				\
188251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_and, &=)				\
189251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_or, |=)				\
190251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_sub, -=)				\
191251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_xor, ^=)
192251695Sed
193251695SedEMIT_ALL_OPS_N(1, uint8_t)
194251695SedEMIT_ALL_OPS_N(2, uint16_t)
195251695SedEMIT_ALL_OPS_N(4, uint32_t)
196251695SedEMIT_ALL_OPS_N(8, uint64_t)
197255092Stheraven#undef	EMIT_ALL_OPS_N
198251695Sed
199251781Sed#else /* !_KERNEL */
200251695Sed
201251781Sed/*
202251781Sed * For userspace on uniprocessor systems, we can implement the atomic
203251781Sed * operations by using a Restartable Atomic Sequence. This makes the
204251781Sed * kernel restart the code from the beginning when interrupted.
205251781Sed */
206251695Sed
207251781Sed#define	EMIT_LOAD_N(N, uintN_t)						\
208251781SeduintN_t									\
209251781Sed__atomic_load_##N(uintN_t *mem, int model __unused)			\
210251781Sed{									\
211251781Sed									\
212251781Sed	return (*mem);							\
213251781Sed}
214251781Sed
215251781Sed#define	EMIT_STORE_N(N, uintN_t)					\
216251781Sedvoid									\
217251781Sed__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused)	\
218251781Sed{									\
219251781Sed									\
220251781Sed	*mem = val;							\
221251781Sed}
222251781Sed
223251781Sed#define	EMIT_EXCHANGE_N(N, uintN_t, ldr, str)				\
224251781SeduintN_t									\
225251781Sed__atomic_exchange_##N(uintN_t *mem, uintN_t val, int model __unused)	\
226251781Sed{									\
227251781Sed	uint32_t old, temp, ras_start;					\
228251781Sed									\
229251781Sed	ras_start = ARM_RAS_START;					\
230251781Sed	__asm volatile (						\
231251781Sed		/* Set up Restartable Atomic Sequence. */		\
232251781Sed		"1:"							\
233251781Sed		"\tadr   %2, 1b\n"					\
234251781Sed		"\tstr   %2, [%5]\n"					\
235251781Sed		"\tadr   %2, 2f\n"					\
236251781Sed		"\tstr   %2, [%5, #4]\n"				\
237251781Sed									\
238251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
239251781Sed		"\t"str" %3, %1\n"	/* Store new value. */		\
240251781Sed									\
241251781Sed		/* Tear down Restartable Atomic Sequence. */		\
242251781Sed		"2:"							\
243251781Sed		"\tmov   %2, #0x00000000\n"				\
244251781Sed		"\tstr   %2, [%5]\n"					\
245251781Sed		"\tmov   %2, #0xffffffff\n"				\
246251781Sed		"\tstr   %2, [%5, #4]\n"				\
247251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
248251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
249251781Sed	return (old);							\
250251781Sed}
251251781Sed
252251781Sed#define	EMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq)			\
253251781Sed_Bool									\
254251781Sed__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *pexpected,		\
255251781Sed    uintN_t desired, int success __unused, int failure __unused)	\
256251781Sed{									\
257251781Sed	uint32_t expected, old, temp, ras_start;			\
258251781Sed									\
259251781Sed	expected = *pexpected;						\
260251781Sed	ras_start = ARM_RAS_START;					\
261251781Sed	__asm volatile (						\
262251781Sed		/* Set up Restartable Atomic Sequence. */		\
263251781Sed		"1:"							\
264251781Sed		"\tadr   %2, 1b\n"					\
265251781Sed		"\tstr   %2, [%6]\n"					\
266251781Sed		"\tadr   %2, 2f\n"					\
267251781Sed		"\tstr   %2, [%6, #4]\n"				\
268251781Sed									\
269251781Sed		"\t"ldr" %0, %5\n"	/* Load old value. */		\
270251781Sed		"\tcmp   %0, %3\n"	/* Compare to expected value. */\
271251781Sed		"\t"streq" %4, %1\n"	/* Store new value. */		\
272251781Sed									\
273251781Sed		/* Tear down Restartable Atomic Sequence. */		\
274251781Sed		"2:"							\
275251781Sed		"\tmov   %2, #0x00000000\n"				\
276251781Sed		"\tstr   %2, [%6]\n"					\
277251781Sed		"\tmov   %2, #0xffffffff\n"				\
278251781Sed		"\tstr   %2, [%6, #4]\n"				\
279251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
280251781Sed		: "r" (expected), "r" (desired), "m" (*mem),		\
281251781Sed		  "r" (ras_start));					\
282251781Sed	if (old == expected) {						\
283251781Sed		return (1);						\
284251781Sed	} else {							\
285251781Sed		*pexpected = old;					\
286251781Sed		return (0);						\
287251781Sed	}								\
288251781Sed}
289251781Sed
290251781Sed#define	EMIT_FETCH_OP_N(N, uintN_t, ldr, str, name, op)			\
291251781SeduintN_t									\
292251781Sed__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused)	\
293251781Sed{									\
294251781Sed	uint32_t old, temp, ras_start;					\
295251781Sed									\
296251781Sed	ras_start = ARM_RAS_START;					\
297251781Sed	__asm volatile (						\
298251781Sed		/* Set up Restartable Atomic Sequence. */		\
299251781Sed		"1:"							\
300251781Sed		"\tadr   %2, 1b\n"					\
301251781Sed		"\tstr   %2, [%5]\n"					\
302251781Sed		"\tadr   %2, 2f\n"					\
303251781Sed		"\tstr   %2, [%5, #4]\n"				\
304251781Sed									\
305251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
306251781Sed		"\t"op"  %2, %0, %3\n"	/* Calculate new value. */	\
307251781Sed		"\t"str" %2, %1\n"	/* Store new value. */		\
308251781Sed									\
309251781Sed		/* Tear down Restartable Atomic Sequence. */		\
310251781Sed		"2:"							\
311251781Sed		"\tmov   %2, #0x00000000\n"				\
312251781Sed		"\tstr   %2, [%5]\n"					\
313251781Sed		"\tmov   %2, #0xffffffff\n"				\
314251781Sed		"\tstr   %2, [%5, #4]\n"				\
315251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
316251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
317251781Sed	return (old);							\
318251781Sed}
319251781Sed
320251781Sed#define	EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq)			\
321251781SedEMIT_LOAD_N(N, uintN_t)							\
322251781SedEMIT_STORE_N(N, uintN_t)						\
323251781SedEMIT_EXCHANGE_N(N, uintN_t, ldr, str)					\
324251781SedEMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq)				\
325251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_add, "add")			\
326251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_and, "and")			\
327251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_or, "orr")			\
328251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_sub, "sub")			\
329251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_xor, "eor")
330251781Sed
331251781SedEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq")
332251781SedEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq")
333251781SedEMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq")
334255092Stheraven#undef	EMIT_ALL_OPS_N
335251781Sed
336251781Sed#endif /* _KERNEL */
337251781Sed
338251695Sed#endif
339251695Sed
340251695Sed#endif /* __CLANG_ATOMICS || __GNUC_ATOMICS */
341251695Sed
342255092Stheraven#if defined(__SYNC_ATOMICS) || defined(EMIT_SYNC_ATOMICS)
343251781Sed
344255092Stheraven#ifdef __clang__
345255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_1_c __sync_lock_test_and_set_1
346255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_2_c __sync_lock_test_and_set_2
347255092Stheraven#pragma	redefine_extname __sync_lock_test_and_set_4_c __sync_lock_test_and_set_4
348255092Stheraven#pragma	redefine_extname __sync_val_compare_and_swap_1_c __sync_val_compare_and_swap_1
349255092Stheraven#pragma	redefine_extname __sync_val_compare_and_swap_2_c __sync_val_compare_and_swap_2
350255092Stheraven#pragma	redefine_extname __sync_val_compare_and_swap_4_c __sync_val_compare_and_swap_4
351255092Stheraven#pragma	redefine_extname __sync_fetch_and_add_1_c __sync_fetch_and_add_1
352255092Stheraven#pragma	redefine_extname __sync_fetch_and_add_2_c __sync_fetch_and_add_2
353255092Stheraven#pragma	redefine_extname __sync_fetch_and_add_4_c __sync_fetch_and_add_4
354255092Stheraven#pragma	redefine_extname __sync_fetch_and_and_1_c __sync_fetch_and_and_1
355255092Stheraven#pragma	redefine_extname __sync_fetch_and_and_2_c __sync_fetch_and_and_2
356255092Stheraven#pragma	redefine_extname __sync_fetch_and_and_4_c __sync_fetch_and_and_4
357255092Stheraven#pragma	redefine_extname __sync_fetch_and_or_1_c __sync_fetch_and_or_1
358255092Stheraven#pragma	redefine_extname __sync_fetch_and_or_2_c __sync_fetch_and_or_2
359255092Stheraven#pragma	redefine_extname __sync_fetch_and_or_4_c __sync_fetch_and_or_4
360255092Stheraven#pragma	redefine_extname __sync_fetch_and_xor_1_c __sync_fetch_and_xor_1
361255092Stheraven#pragma	redefine_extname __sync_fetch_and_xor_2_c __sync_fetch_and_xor_2
362255092Stheraven#pragma	redefine_extname __sync_fetch_and_xor_4_c __sync_fetch_and_xor_4
363255092Stheraven#pragma	redefine_extname __sync_fetch_and_sub_1_c __sync_fetch_and_sub_1
364255092Stheraven#pragma	redefine_extname __sync_fetch_and_sub_2_c __sync_fetch_and_sub_2
365255092Stheraven#pragma	redefine_extname __sync_fetch_and_sub_4_c __sync_fetch_and_sub_4
366255092Stheraven#endif
367255092Stheraven
368251695Sed/*
369251695Sed * Old __sync_* API.
370251695Sed */
371251695Sed
372251695Sed#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
373251695Sed    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
374251695Sed    defined(__ARM_ARCH_6ZK__) || \
375251695Sed    defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
376251695Sed
377251695Sed/* Implementations for old GCC versions, lacking support for atomics. */
378251695Sed
379251695Sedtypedef union {
380251695Sed	uint8_t		v8[4];
381251695Sed	uint32_t	v32;
382251695Sed} reg_t;
383251695Sed
384251695Sed/*
385251695Sed * Given a memory address pointing to an 8-bit or 16-bit integer, return
386251695Sed * the address of the 32-bit word containing it.
387251695Sed */
388251695Sed
389251695Sedstatic inline uint32_t *
390251695Sedround_to_word(void *ptr)
391251695Sed{
392251695Sed
393251695Sed	return ((uint32_t *)((intptr_t)ptr & ~3));
394251695Sed}
395251695Sed
396251695Sed/*
397251695Sed * Utility functions for loading and storing 8-bit and 16-bit integers
398251695Sed * in 32-bit words at an offset corresponding with the location of the
399251695Sed * atomic variable.
400251695Sed */
401251695Sed
402251695Sedstatic inline void
403251695Sedput_1(reg_t *r, const uint8_t *offset_ptr, uint8_t val)
404251695Sed{
405251695Sed	size_t offset;
406251695Sed
407251695Sed	offset = (intptr_t)offset_ptr & 3;
408251695Sed	r->v8[offset] = val;
409251695Sed}
410251695Sed
411251695Sedstatic inline uint8_t
412251695Sedget_1(const reg_t *r, const uint8_t *offset_ptr)
413251695Sed{
414251695Sed	size_t offset;
415251695Sed
416251695Sed	offset = (intptr_t)offset_ptr & 3;
417251695Sed	return (r->v8[offset]);
418251695Sed}
419251695Sed
420251695Sedstatic inline void
421251695Sedput_2(reg_t *r, const uint16_t *offset_ptr, uint16_t val)
422251695Sed{
423251695Sed	size_t offset;
424251695Sed	union {
425251695Sed		uint16_t in;
426251695Sed		uint8_t out[2];
427251695Sed	} bytes;
428251695Sed
429251695Sed	offset = (intptr_t)offset_ptr & 3;
430251695Sed	bytes.in = val;
431251695Sed	r->v8[offset] = bytes.out[0];
432251695Sed	r->v8[offset + 1] = bytes.out[1];
433251695Sed}
434251695Sed
435251695Sedstatic inline uint16_t
436251695Sedget_2(const reg_t *r, const uint16_t *offset_ptr)
437251695Sed{
438251695Sed	size_t offset;
439251695Sed	union {
440251695Sed		uint8_t in[2];
441251695Sed		uint16_t out;
442251695Sed	} bytes;
443251695Sed
444251695Sed	offset = (intptr_t)offset_ptr & 3;
445251695Sed	bytes.in[0] = r->v8[offset];
446251695Sed	bytes.in[1] = r->v8[offset + 1];
447251695Sed	return (bytes.out);
448251695Sed}
449251695Sed
450251695Sed/*
451251695Sed * 8-bit and 16-bit routines.
452251695Sed *
453251695Sed * These operations are not natively supported by the CPU, so we use
454251695Sed * some shifting and bitmasking on top of the 32-bit instructions.
455251695Sed */
456251695Sed
457251695Sed#define	EMIT_LOCK_TEST_AND_SET_N(N, uintN_t)				\
458251695SeduintN_t									\
459255092Stheraven__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val)			\
460251695Sed{									\
461251695Sed	uint32_t *mem32;						\
462251695Sed	reg_t val32, negmask, old;					\
463251695Sed	uint32_t temp1, temp2;						\
464251695Sed									\
465251695Sed	mem32 = round_to_word(mem);					\
466251695Sed	val32.v32 = 0x00000000;						\
467251695Sed	put_##N(&val32, mem, val);					\
468251695Sed	negmask.v32 = 0xffffffff;					\
469251695Sed	put_##N(&negmask, mem, 0);					\
470251695Sed									\
471251695Sed	do_sync();							\
472251695Sed	__asm volatile (						\
473251695Sed		"1:"							\
474251695Sed		"\tldrex %0, %6\n"	/* Load old value. */		\
475251695Sed		"\tand   %2, %5, %0\n"	/* Remove the old value. */	\
476251695Sed		"\torr   %2, %2, %4\n"	/* Put in the new value. */	\
477251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
478251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
479251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
480251695Sed		: "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1),	\
481251695Sed		  "=&r" (temp2)						\
482251695Sed		: "r" (val32.v32), "r" (negmask.v32), "m" (*mem32));	\
483251695Sed	return (get_##N(&old, mem));					\
484251695Sed}
485251695Sed
486251695SedEMIT_LOCK_TEST_AND_SET_N(1, uint8_t)
487251695SedEMIT_LOCK_TEST_AND_SET_N(2, uint16_t)
488251695Sed
489251695Sed#define	EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t)				\
490251695SeduintN_t									\
491255092Stheraven__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected,		\
492251695Sed    uintN_t desired)							\
493251695Sed{									\
494251695Sed	uint32_t *mem32;						\
495251781Sed	reg_t expected32, desired32, posmask, old;			\
496251781Sed	uint32_t negmask, temp1, temp2;					\
497251695Sed									\
498251695Sed	mem32 = round_to_word(mem);					\
499251695Sed	expected32.v32 = 0x00000000;					\
500251695Sed	put_##N(&expected32, mem, expected);				\
501251695Sed	desired32.v32 = 0x00000000;					\
502251695Sed	put_##N(&desired32, mem, desired);				\
503251695Sed	posmask.v32 = 0x00000000;					\
504251695Sed	put_##N(&posmask, mem, ~0);					\
505251781Sed	negmask = ~posmask.v32;						\
506251695Sed									\
507251695Sed	do_sync();							\
508251695Sed	__asm volatile (						\
509251695Sed		"1:"							\
510251695Sed		"\tldrex %0, %8\n"	/* Load old value. */		\
511251695Sed		"\tand   %2, %6, %0\n"	/* Isolate the old value. */	\
512251695Sed		"\tcmp   %2, %4\n"	/* Compare to expected value. */\
513251695Sed		"\tbne   2f\n"		/* Values are unequal. */	\
514251695Sed		"\tand   %2, %7, %0\n"	/* Remove the old value. */	\
515251695Sed		"\torr   %2, %5\n"	/* Put in the new value. */	\
516251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
517251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
518251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
519251695Sed		"2:"							\
520251695Sed		: "=&r" (old), "=m" (*mem32), "=&r" (temp1),		\
521251695Sed		  "=&r" (temp2)						\
522251695Sed		: "r" (expected32.v32), "r" (desired32.v32),		\
523251781Sed		  "r" (posmask.v32), "r" (negmask), "m" (*mem32));	\
524251695Sed	return (get_##N(&old, mem));					\
525251695Sed}
526251695Sed
527251695SedEMIT_VAL_COMPARE_AND_SWAP_N(1, uint8_t)
528251695SedEMIT_VAL_COMPARE_AND_SWAP_N(2, uint16_t)
529251695Sed
530251695Sed#define	EMIT_ARITHMETIC_FETCH_AND_OP_N(N, uintN_t, name, op)		\
531251695SeduintN_t									\
532255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val)				\
533251695Sed{									\
534251695Sed	uint32_t *mem32;						\
535251781Sed	reg_t val32, posmask, old;					\
536251781Sed	uint32_t negmask, temp1, temp2;					\
537251695Sed									\
538251695Sed	mem32 = round_to_word(mem);					\
539251695Sed	val32.v32 = 0x00000000;						\
540251695Sed	put_##N(&val32, mem, val);					\
541251695Sed	posmask.v32 = 0x00000000;					\
542251695Sed	put_##N(&posmask, mem, ~0);					\
543251781Sed	negmask = ~posmask.v32;						\
544251695Sed									\
545251695Sed	do_sync();							\
546251695Sed	__asm volatile (						\
547251695Sed		"1:"							\
548251695Sed		"\tldrex %0, %7\n"	/* Load old value. */		\
549251695Sed		"\t"op"  %2, %0, %4\n"	/* Calculate new value. */	\
550251695Sed		"\tand   %2, %5\n"	/* Isolate the new value. */	\
551251695Sed		"\tand   %3, %6, %0\n"	/* Remove the old value. */	\
552251695Sed		"\torr   %2, %2, %3\n"	/* Put in the new value. */	\
553251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
554251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
555251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
556251695Sed		: "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1),	\
557251695Sed		  "=&r" (temp2)						\
558251781Sed		: "r" (val32.v32), "r" (posmask.v32), "r" (negmask),	\
559251781Sed		  "m" (*mem32));					\
560251695Sed	return (get_##N(&old, mem));					\
561251695Sed}
562251695Sed
563251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_add, "add")
564251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_sub, "sub")
565251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_add, "add")
566251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_sub, "sub")
567251695Sed
568251695Sed#define	EMIT_BITWISE_FETCH_AND_OP_N(N, uintN_t, name, op, idempotence)	\
569251695SeduintN_t									\
570255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val)				\
571251695Sed{									\
572251695Sed	uint32_t *mem32;						\
573251695Sed	reg_t val32, old;						\
574251695Sed	uint32_t temp1, temp2;						\
575251695Sed									\
576251695Sed	mem32 = round_to_word(mem);					\
577251695Sed	val32.v32 = idempotence ? 0xffffffff : 0x00000000;		\
578251695Sed	put_##N(&val32, mem, val);					\
579251695Sed									\
580251695Sed	do_sync();							\
581251695Sed	__asm volatile (						\
582251695Sed		"1:"							\
583251695Sed		"\tldrex %0, %5\n"	/* Load old value. */		\
584251695Sed		"\t"op"  %2, %4, %0\n"	/* Calculate new value. */	\
585251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
586251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
587251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
588251695Sed		: "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1),	\
589251695Sed		  "=&r" (temp2)						\
590251695Sed		: "r" (val32.v32), "m" (*mem32));			\
591251695Sed	return (get_##N(&old, mem));					\
592251695Sed}
593251695Sed
594251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_and, "and", 1)
595251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_or, "orr", 0)
596251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_xor, "eor", 0)
597251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_and, "and", 1)
598251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_or, "orr", 0)
599251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_xor, "eor", 0)
600251695Sed
601251695Sed/*
602251695Sed * 32-bit routines.
603251695Sed */
604251695Sed
605251695Seduint32_t
606255092Stheraven__sync_lock_test_and_set_4_c(uint32_t *mem, uint32_t val)
607251781Sed{
608251781Sed	uint32_t old, temp;
609251781Sed
610251781Sed	do_sync();
611251781Sed	__asm volatile (
612251781Sed		"1:"
613251781Sed		"\tldrex %0, %4\n"	/* Load old value. */
614251781Sed		"\tstrex %2, %3, %1\n"	/* Attempt to store. */
615251781Sed		"\tcmp   %2, #0\n"	/* Did it succeed? */
616251781Sed		"\tbne   1b\n"		/* Spin if failed. */
617251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)
618251781Sed		: "r" (val), "m" (*mem));
619251781Sed	return (old);
620251781Sed}
621251781Sed
622251781Seduint32_t
623255092Stheraven__sync_val_compare_and_swap_4_c(uint32_t *mem, uint32_t expected,
624251695Sed    uint32_t desired)
625251695Sed{
626251781Sed	uint32_t old, temp;
627251695Sed
628251695Sed	do_sync();
629251695Sed	__asm volatile (
630251695Sed		"1:"
631251781Sed		"\tldrex %0, %5\n"	/* Load old value. */
632251781Sed		"\tcmp   %0, %3\n"	/* Compare to expected value. */
633251695Sed		"\tbne   2f\n"		/* Values are unequal. */
634251781Sed		"\tstrex %2, %4, %1\n"	/* Attempt to store. */
635251781Sed		"\tcmp   %2, #0\n"	/* Did it succeed? */
636251695Sed		"\tbne   1b\n"		/* Spin if failed. */
637251695Sed		"2:"
638251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)
639251695Sed		: "r" (expected), "r" (desired), "m" (*mem));
640251695Sed	return (old);
641251695Sed}
642251695Sed
643251695Sed#define	EMIT_FETCH_AND_OP_4(name, op)					\
644251695Seduint32_t								\
645255092Stheraven__sync_##name##_4##_c(uint32_t *mem, uint32_t val)				\
646251695Sed{									\
647251695Sed	uint32_t old, temp1, temp2;					\
648251695Sed									\
649251695Sed	do_sync();							\
650251695Sed	__asm volatile (						\
651251695Sed		"1:"							\
652251695Sed		"\tldrex %0, %5\n"	/* Load old value. */		\
653251781Sed		"\t"op"  %2, %0, %4\n"	/* Calculate new value. */	\
654251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
655251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
656251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
657251695Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp1),		\
658251695Sed		  "=&r" (temp2)						\
659251695Sed		: "r" (val), "m" (*mem));				\
660251695Sed	return (old);							\
661251695Sed}
662251695Sed
663251781SedEMIT_FETCH_AND_OP_4(fetch_and_add, "add")
664251781SedEMIT_FETCH_AND_OP_4(fetch_and_and, "and")
665251781SedEMIT_FETCH_AND_OP_4(fetch_and_or, "orr")
666251781SedEMIT_FETCH_AND_OP_4(fetch_and_sub, "sub")
667251781SedEMIT_FETCH_AND_OP_4(fetch_and_xor, "eor")
668251695Sed
669255738Szbb#ifndef __clang__
670255738Szbb__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1);
671255738Szbb__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2);
672255738Szbb__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4);
673255738Szbb__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1);
674255738Szbb__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2);
675255738Szbb__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4);
676255738Szbb__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1);
677255738Szbb__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2);
678255738Szbb__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4);
679255738Szbb__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1);
680255738Szbb__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2);
681255738Szbb__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4);
682255738Szbb__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1);
683255738Szbb__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2);
684255738Szbb__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4);
685255738Szbb__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1);
686255738Szbb__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2);
687255738Szbb__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4);
688255738Szbb__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1);
689255738Szbb__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2);
690255738Szbb__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4);
691255738Szbb#endif
692255738Szbb
693251781Sed#else /* __ARM_ARCH_5__ */
694251695Sed
695251781Sed#ifdef _KERNEL
696251781Sed
697251695Sed#ifdef SMP
698251695Sed#error "On SMP systems we should have proper atomic operations."
699251695Sed#endif
700251695Sed
701251695Sed/*
702251695Sed * On uniprocessor systems, we can perform the atomic operations by
703251695Sed * disabling interrupts.
704251695Sed */
705251695Sed
706251695Sed#define	EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t)				\
707251695SeduintN_t									\
708251695Sed__sync_val_compare_and_swap_##N(uintN_t *mem, uintN_t expected,		\
709251695Sed    uintN_t desired)							\
710251695Sed{									\
711251695Sed	uintN_t ret;							\
712251695Sed									\
713251695Sed	WITHOUT_INTERRUPTS({						\
714251695Sed		ret = *mem;						\
715251695Sed		if (*mem == expected)					\
716251695Sed			*mem = desired;					\
717251695Sed	});								\
718251695Sed	return (ret);							\
719251695Sed}
720251695Sed
721251695Sed#define	EMIT_FETCH_AND_OP_N(N, uintN_t, name, op)			\
722251695SeduintN_t									\
723251695Sed__sync_##name##_##N(uintN_t *mem, uintN_t val)				\
724251695Sed{									\
725251695Sed	uintN_t ret;							\
726251695Sed									\
727251695Sed	WITHOUT_INTERRUPTS({						\
728251695Sed		ret = *mem;						\
729251695Sed		*mem op val;						\
730251695Sed	});								\
731251695Sed	return (ret);							\
732251695Sed}
733251695Sed
734251695Sed#define	EMIT_ALL_OPS_N(N, uintN_t)					\
735251695SedEMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t)					\
736251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, lock_test_and_set, =)			\
737251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_add, +=)			\
738251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_and, &=)			\
739251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_or, |=)			\
740251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_sub, -=)			\
741251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_xor, ^=)
742251695Sed
743251695SedEMIT_ALL_OPS_N(1, uint8_t)
744251695SedEMIT_ALL_OPS_N(2, uint16_t)
745251695SedEMIT_ALL_OPS_N(4, uint32_t)
746251695SedEMIT_ALL_OPS_N(8, uint64_t)
747255092Stheraven#undef	EMIT_ALL_OPS_N
748251695Sed
749251781Sed#else /* !_KERNEL */
750251695Sed
751251781Sed/*
752251781Sed * For userspace on uniprocessor systems, we can implement the atomic
753251781Sed * operations by using a Restartable Atomic Sequence. This makes the
754251781Sed * kernel restart the code from the beginning when interrupted.
755251781Sed */
756251695Sed
757251781Sed#define	EMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str)			\
758251781SeduintN_t									\
759255092Stheraven__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val)			\
760251781Sed{									\
761251781Sed	uint32_t old, temp, ras_start;					\
762251781Sed									\
763251781Sed	ras_start = ARM_RAS_START;					\
764251781Sed	__asm volatile (						\
765251781Sed		/* Set up Restartable Atomic Sequence. */		\
766251781Sed		"1:"							\
767251781Sed		"\tadr   %2, 1b\n"					\
768251781Sed		"\tstr   %2, [%5]\n"					\
769251781Sed		"\tadr   %2, 2f\n"					\
770251781Sed		"\tstr   %2, [%5, #4]\n"				\
771251781Sed									\
772251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
773251781Sed		"\t"str" %3, %1\n"	/* Store new value. */		\
774251781Sed									\
775251781Sed		/* Tear down Restartable Atomic Sequence. */		\
776251781Sed		"2:"							\
777251781Sed		"\tmov   %2, #0x00000000\n"				\
778251781Sed		"\tstr   %2, [%5]\n"					\
779251781Sed		"\tmov   %2, #0xffffffff\n"				\
780251781Sed		"\tstr   %2, [%5, #4]\n"				\
781251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
782251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
783251781Sed	return (old);							\
784251781Sed}
785251781Sed
786251781Sed#define	EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq)		\
787251781SeduintN_t									\
788255092Stheraven__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected,		\
789251781Sed    uintN_t desired)							\
790251781Sed{									\
791251781Sed	uint32_t old, temp, ras_start;					\
792251781Sed									\
793251781Sed	ras_start = ARM_RAS_START;					\
794251781Sed	__asm volatile (						\
795251781Sed		/* Set up Restartable Atomic Sequence. */		\
796251781Sed		"1:"							\
797251781Sed		"\tadr   %2, 1b\n"					\
798251781Sed		"\tstr   %2, [%6]\n"					\
799251781Sed		"\tadr   %2, 2f\n"					\
800251781Sed		"\tstr   %2, [%6, #4]\n"				\
801251781Sed									\
802251781Sed		"\t"ldr" %0, %5\n"	/* Load old value. */		\
803251781Sed		"\tcmp   %0, %3\n"	/* Compare to expected value. */\
804251781Sed		"\t"streq" %4, %1\n"	/* Store new value. */		\
805251781Sed									\
806251781Sed		/* Tear down Restartable Atomic Sequence. */		\
807251781Sed		"2:"							\
808251781Sed		"\tmov   %2, #0x00000000\n"				\
809251781Sed		"\tstr   %2, [%6]\n"					\
810251781Sed		"\tmov   %2, #0xffffffff\n"				\
811251781Sed		"\tstr   %2, [%6, #4]\n"				\
812251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
813251781Sed		: "r" (expected), "r" (desired), "m" (*mem),		\
814251781Sed		  "r" (ras_start));					\
815251781Sed	return (old);							\
816251781Sed}
817251781Sed
818251781Sed#define	EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, name, op)		\
819251781SeduintN_t									\
820255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val)				\
821251781Sed{									\
822251781Sed	uint32_t old, temp, ras_start;					\
823251781Sed									\
824251781Sed	ras_start = ARM_RAS_START;					\
825251781Sed	__asm volatile (						\
826251781Sed		/* Set up Restartable Atomic Sequence. */		\
827251781Sed		"1:"							\
828251781Sed		"\tadr   %2, 1b\n"					\
829251781Sed		"\tstr   %2, [%5]\n"					\
830251781Sed		"\tadr   %2, 2f\n"					\
831251781Sed		"\tstr   %2, [%5, #4]\n"				\
832251781Sed									\
833251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
834251781Sed		"\t"op"  %2, %0, %3\n"	/* Calculate new value. */	\
835251781Sed		"\t"str" %2, %1\n"	/* Store new value. */		\
836251781Sed									\
837251781Sed		/* Tear down Restartable Atomic Sequence. */		\
838251781Sed		"2:"							\
839251781Sed		"\tmov   %2, #0x00000000\n"				\
840251781Sed		"\tstr   %2, [%5]\n"					\
841251781Sed		"\tmov   %2, #0xffffffff\n"				\
842251781Sed		"\tstr   %2, [%5, #4]\n"				\
843251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
844251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
845251781Sed	return (old);							\
846251781Sed}
847251781Sed
848251781Sed#define	EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq)			\
849251781SedEMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str)				\
850251781SedEMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq)			\
851251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_add, "add")		\
852251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_and, "and")		\
853251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_or, "orr")		\
854251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_sub, "sub")		\
855251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_xor, "eor")
856251781Sed
857251781SedEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "streqb")
858251781SedEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "streqh")
859251781SedEMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq")
860251781Sed
861255092Stheraven#ifndef __clang__
862255092Stheraven__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1);
863255092Stheraven__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2);
864255092Stheraven__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4);
865255092Stheraven__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1);
866255092Stheraven__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2);
867255092Stheraven__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4);
868255092Stheraven__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1);
869255092Stheraven__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2);
870255092Stheraven__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4);
871255092Stheraven__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1);
872255092Stheraven__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2);
873255092Stheraven__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4);
874255092Stheraven__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1);
875255092Stheraven__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2);
876255092Stheraven__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4);
877255092Stheraven__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1);
878255092Stheraven__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2);
879255092Stheraven__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4);
880255092Stheraven__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1);
881255092Stheraven__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2);
882255092Stheraven__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4);
883255092Stheraven#endif
884255092Stheraven
885255738Szbb#endif /* _KERNEL */
886255738Szbb
887255738Szbb#endif
888255738Szbb
889251695Sed#endif /* __SYNC_ATOMICS */
890