stdatomic.c revision 282763
1251695Sed/*-
2251695Sed * Copyright (c) 2013 Ed Schouten <ed@FreeBSD.org>
3251695Sed * All rights reserved.
4251695Sed *
5251695Sed * Redistribution and use in source and binary forms, with or without
6251695Sed * modification, are permitted provided that the following conditions
7251695Sed * are met:
8251695Sed * 1. Redistributions of source code must retain the above copyright
9251695Sed *    notice, this list of conditions and the following disclaimer.
10251695Sed * 2. Redistributions in binary form must reproduce the above copyright
11251695Sed *    notice, this list of conditions and the following disclaimer in the
12251695Sed *    documentation and/or other materials provided with the distribution.
13251695Sed *
14251695Sed * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15251695Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16251695Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17251695Sed * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18251695Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19251695Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20251695Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21251695Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22251695Sed * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23251695Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24251695Sed * SUCH DAMAGE.
25251695Sed */
26251695Sed
27251695Sed#include <sys/cdefs.h>
28251695Sed__FBSDID("$FreeBSD: head/sys/arm/arm/stdatomic.c 282763 2015-05-11 08:57:23Z andrew $");
29251695Sed
30251695Sed#include <sys/param.h>
31251695Sed#include <sys/stdatomic.h>
32251695Sed#include <sys/types.h>
33251695Sed
34282763Sandrew#include <machine/acle-compat.h>
35251695Sed#include <machine/cpufunc.h>
36251781Sed#include <machine/sysarch.h>
37251695Sed
38251695Sed/*
39251695Sed * Executing statements with interrupts disabled.
40251695Sed */
41251695Sed
42251781Sed#if defined(_KERNEL) && !defined(SMP)
43251695Sed#define	WITHOUT_INTERRUPTS(s) do {					\
44251695Sed	register_t regs;						\
45251695Sed									\
46251695Sed	regs = intr_disable();						\
47251695Sed	do s while (0);							\
48251695Sed	intr_restore(regs);						\
49251695Sed} while (0)
50251781Sed#endif /* _KERNEL && !SMP */
51251695Sed
52251695Sed/*
53251695Sed * Memory barriers.
54251695Sed *
55251695Sed * It turns out __sync_synchronize() does not emit any code when used
56251695Sed * with GCC 4.2. Implement our own version that does work reliably.
57251695Sed *
58251695Sed * Although __sync_lock_test_and_set() should only perform an acquire
59251695Sed * barrier, make it do a full barrier like the other functions. This
60251695Sed * should make <stdatomic.h>'s atomic_exchange_explicit() work reliably.
61251695Sed */
62251695Sed
63251781Sed#if defined(_KERNEL) && !defined(SMP)
64251695Sedstatic inline void
65251695Seddo_sync(void)
66251695Sed{
67251695Sed
68251695Sed	__asm volatile ("" : : : "memory");
69251781Sed}
70282763Sandrew#elif __ARM_ARCH >= 7
71251781Sedstatic inline void
72251781Seddo_sync(void)
73251781Sed{
74251781Sed
75251695Sed	__asm volatile ("dmb" : : : "memory");
76251781Sed}
77282763Sandrew#elif __ARM_ARCH >= 6
78251781Sedstatic inline void
79251781Seddo_sync(void)
80251781Sed{
81251781Sed
82251695Sed	__asm volatile ("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
83251781Sed}
84251695Sed#endif
85251695Sed
86251695Sed#if defined(__CLANG_ATOMICS) || defined(__GNUC_ATOMICS)
87251695Sed
88251695Sed/*
89251695Sed * New C11 __atomic_* API.
90251695Sed */
91251695Sed
92282763Sandrew/* ARMv6+ systems should be supported by the compiler. */
93282763Sandrew#if __ARM_ARCH <= 5
94251695Sed
95251695Sed/* Clang doesn't allow us to reimplement builtins without this. */
96251695Sed#ifdef __clang__
97251695Sed#pragma redefine_extname __sync_synchronize_ext __sync_synchronize
98251695Sed#define __sync_synchronize __sync_synchronize_ext
99251695Sed#endif
100251695Sed
101251695Sedvoid
102251695Sed__sync_synchronize(void)
103251695Sed{
104251695Sed}
105251695Sed
106251781Sed#ifdef _KERNEL
107251781Sed
108251781Sed#ifdef SMP
109251781Sed#error "On SMP systems we should have proper atomic operations."
110251781Sed#endif
111251781Sed
112251695Sed/*
113251695Sed * On uniprocessor systems, we can perform the atomic operations by
114251695Sed * disabling interrupts.
115251695Sed */
116251695Sed
117251695Sed#define	EMIT_LOAD_N(N, uintN_t)						\
118251695SeduintN_t									\
119251695Sed__atomic_load_##N(uintN_t *mem, int model __unused)			\
120251695Sed{									\
121251695Sed	uintN_t ret;							\
122251695Sed									\
123251695Sed	WITHOUT_INTERRUPTS({						\
124251695Sed		ret = *mem;						\
125251695Sed	});								\
126251695Sed	return (ret);							\
127251695Sed}
128251695Sed
129251695Sed#define	EMIT_STORE_N(N, uintN_t)					\
130251695Sedvoid									\
131251695Sed__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused)	\
132251695Sed{									\
133251695Sed									\
134251695Sed	WITHOUT_INTERRUPTS({						\
135251695Sed		*mem = val;						\
136251695Sed	});								\
137251695Sed}
138251695Sed
139251695Sed#define	EMIT_COMPARE_EXCHANGE_N(N, uintN_t)				\
140251695Sed_Bool									\
141251695Sed__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *expected,		\
142251695Sed    uintN_t desired, int success __unused, int failure __unused)	\
143251695Sed{									\
144251695Sed	_Bool ret;							\
145251695Sed									\
146251695Sed	WITHOUT_INTERRUPTS({						\
147251695Sed		if (*mem == *expected) {				\
148251695Sed			*mem = desired;					\
149251695Sed			ret = 1;					\
150251695Sed		} else {						\
151251695Sed			*expected = *mem;				\
152251695Sed			ret = 0;					\
153251695Sed		}							\
154251695Sed	});								\
155251695Sed	return (ret);							\
156251695Sed}
157251695Sed
158251695Sed#define	EMIT_FETCH_OP_N(N, uintN_t, name, op)				\
159251695SeduintN_t									\
160251695Sed__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused)	\
161251695Sed{									\
162251695Sed	uintN_t ret;							\
163251695Sed									\
164251695Sed	WITHOUT_INTERRUPTS({						\
165251695Sed		ret = *mem;						\
166251695Sed		*mem op val;						\
167251695Sed	});								\
168251695Sed	return (ret);							\
169251695Sed}
170251695Sed
171251695Sed#define	EMIT_ALL_OPS_N(N, uintN_t)					\
172251695SedEMIT_LOAD_N(N, uintN_t)							\
173251695SedEMIT_STORE_N(N, uintN_t)						\
174251695SedEMIT_COMPARE_EXCHANGE_N(N, uintN_t)					\
175251695SedEMIT_FETCH_OP_N(N, uintN_t, exchange, =)				\
176251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_add, +=)				\
177251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_and, &=)				\
178251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_or, |=)				\
179251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_sub, -=)				\
180251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_xor, ^=)
181251695Sed
182251695SedEMIT_ALL_OPS_N(1, uint8_t)
183251695SedEMIT_ALL_OPS_N(2, uint16_t)
184251695SedEMIT_ALL_OPS_N(4, uint32_t)
185251695SedEMIT_ALL_OPS_N(8, uint64_t)
186255092Stheraven#undef	EMIT_ALL_OPS_N
187251695Sed
188251781Sed#else /* !_KERNEL */
189251695Sed
190251781Sed/*
191251781Sed * For userspace on uniprocessor systems, we can implement the atomic
192251781Sed * operations by using a Restartable Atomic Sequence. This makes the
193251781Sed * kernel restart the code from the beginning when interrupted.
194251781Sed */
195251695Sed
196251781Sed#define	EMIT_LOAD_N(N, uintN_t)						\
197251781SeduintN_t									\
198251781Sed__atomic_load_##N(uintN_t *mem, int model __unused)			\
199251781Sed{									\
200251781Sed									\
201251781Sed	return (*mem);							\
202251781Sed}
203251781Sed
204251781Sed#define	EMIT_STORE_N(N, uintN_t)					\
205251781Sedvoid									\
206251781Sed__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused)	\
207251781Sed{									\
208251781Sed									\
209251781Sed	*mem = val;							\
210251781Sed}
211251781Sed
212251781Sed#define	EMIT_EXCHANGE_N(N, uintN_t, ldr, str)				\
213251781SeduintN_t									\
214251781Sed__atomic_exchange_##N(uintN_t *mem, uintN_t val, int model __unused)	\
215251781Sed{									\
216251781Sed	uint32_t old, temp, ras_start;					\
217251781Sed									\
218251781Sed	ras_start = ARM_RAS_START;					\
219251781Sed	__asm volatile (						\
220251781Sed		/* Set up Restartable Atomic Sequence. */		\
221251781Sed		"1:"							\
222251781Sed		"\tadr   %2, 1b\n"					\
223251781Sed		"\tstr   %2, [%5]\n"					\
224251781Sed		"\tadr   %2, 2f\n"					\
225251781Sed		"\tstr   %2, [%5, #4]\n"				\
226251781Sed									\
227251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
228251781Sed		"\t"str" %3, %1\n"	/* Store new value. */		\
229251781Sed									\
230251781Sed		/* Tear down Restartable Atomic Sequence. */		\
231251781Sed		"2:"							\
232251781Sed		"\tmov   %2, #0x00000000\n"				\
233251781Sed		"\tstr   %2, [%5]\n"					\
234251781Sed		"\tmov   %2, #0xffffffff\n"				\
235251781Sed		"\tstr   %2, [%5, #4]\n"				\
236251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
237251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
238251781Sed	return (old);							\
239251781Sed}
240251781Sed
241251781Sed#define	EMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq)			\
242251781Sed_Bool									\
243251781Sed__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *pexpected,		\
244251781Sed    uintN_t desired, int success __unused, int failure __unused)	\
245251781Sed{									\
246251781Sed	uint32_t expected, old, temp, ras_start;			\
247251781Sed									\
248251781Sed	expected = *pexpected;						\
249251781Sed	ras_start = ARM_RAS_START;					\
250251781Sed	__asm volatile (						\
251251781Sed		/* Set up Restartable Atomic Sequence. */		\
252251781Sed		"1:"							\
253251781Sed		"\tadr   %2, 1b\n"					\
254251781Sed		"\tstr   %2, [%6]\n"					\
255251781Sed		"\tadr   %2, 2f\n"					\
256251781Sed		"\tstr   %2, [%6, #4]\n"				\
257251781Sed									\
258251781Sed		"\t"ldr" %0, %5\n"	/* Load old value. */		\
259251781Sed		"\tcmp   %0, %3\n"	/* Compare to expected value. */\
260251781Sed		"\t"streq" %4, %1\n"	/* Store new value. */		\
261251781Sed									\
262251781Sed		/* Tear down Restartable Atomic Sequence. */		\
263251781Sed		"2:"							\
264251781Sed		"\tmov   %2, #0x00000000\n"				\
265251781Sed		"\tstr   %2, [%6]\n"					\
266251781Sed		"\tmov   %2, #0xffffffff\n"				\
267251781Sed		"\tstr   %2, [%6, #4]\n"				\
268251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
269251781Sed		: "r" (expected), "r" (desired), "m" (*mem),		\
270251781Sed		  "r" (ras_start));					\
271251781Sed	if (old == expected) {						\
272251781Sed		return (1);						\
273251781Sed	} else {							\
274251781Sed		*pexpected = old;					\
275251781Sed		return (0);						\
276251781Sed	}								\
277251781Sed}
278251781Sed
279251781Sed#define	EMIT_FETCH_OP_N(N, uintN_t, ldr, str, name, op)			\
280251781SeduintN_t									\
281251781Sed__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused)	\
282251781Sed{									\
283251781Sed	uint32_t old, temp, ras_start;					\
284251781Sed									\
285251781Sed	ras_start = ARM_RAS_START;					\
286251781Sed	__asm volatile (						\
287251781Sed		/* Set up Restartable Atomic Sequence. */		\
288251781Sed		"1:"							\
289251781Sed		"\tadr   %2, 1b\n"					\
290251781Sed		"\tstr   %2, [%5]\n"					\
291251781Sed		"\tadr   %2, 2f\n"					\
292251781Sed		"\tstr   %2, [%5, #4]\n"				\
293251781Sed									\
294251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
295251781Sed		"\t"op"  %2, %0, %3\n"	/* Calculate new value. */	\
296251781Sed		"\t"str" %2, %1\n"	/* Store new value. */		\
297251781Sed									\
298251781Sed		/* Tear down Restartable Atomic Sequence. */		\
299251781Sed		"2:"							\
300251781Sed		"\tmov   %2, #0x00000000\n"				\
301251781Sed		"\tstr   %2, [%5]\n"					\
302251781Sed		"\tmov   %2, #0xffffffff\n"				\
303251781Sed		"\tstr   %2, [%5, #4]\n"				\
304251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
305251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
306251781Sed	return (old);							\
307251781Sed}
308251781Sed
309251781Sed#define	EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq)			\
310251781SedEMIT_LOAD_N(N, uintN_t)							\
311251781SedEMIT_STORE_N(N, uintN_t)						\
312251781SedEMIT_EXCHANGE_N(N, uintN_t, ldr, str)					\
313251781SedEMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq)				\
314251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_add, "add")			\
315251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_and, "and")			\
316251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_or, "orr")			\
317251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_sub, "sub")			\
318251781SedEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_xor, "eor")
319251781Sed
320251781SedEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq")
321251781SedEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq")
322251781SedEMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq")
323255092Stheraven#undef	EMIT_ALL_OPS_N
324251781Sed
325251781Sed#endif /* _KERNEL */
326251781Sed
327282763Sandrew#endif /* __ARM_ARCH */
328251695Sed
329251695Sed#endif /* __CLANG_ATOMICS || __GNUC_ATOMICS */
330251695Sed
331255092Stheraven#if defined(__SYNC_ATOMICS) || defined(EMIT_SYNC_ATOMICS)
332251781Sed
333255092Stheraven#ifdef __clang__
334255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_1_c __sync_lock_test_and_set_1
335255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_2_c __sync_lock_test_and_set_2
336255092Stheraven#pragma	redefine_extname __sync_lock_test_and_set_4_c __sync_lock_test_and_set_4
337255092Stheraven#pragma	redefine_extname __sync_val_compare_and_swap_1_c __sync_val_compare_and_swap_1
338255092Stheraven#pragma	redefine_extname __sync_val_compare_and_swap_2_c __sync_val_compare_and_swap_2
339255092Stheraven#pragma	redefine_extname __sync_val_compare_and_swap_4_c __sync_val_compare_and_swap_4
340255092Stheraven#pragma	redefine_extname __sync_fetch_and_add_1_c __sync_fetch_and_add_1
341255092Stheraven#pragma	redefine_extname __sync_fetch_and_add_2_c __sync_fetch_and_add_2
342255092Stheraven#pragma	redefine_extname __sync_fetch_and_add_4_c __sync_fetch_and_add_4
343255092Stheraven#pragma	redefine_extname __sync_fetch_and_and_1_c __sync_fetch_and_and_1
344255092Stheraven#pragma	redefine_extname __sync_fetch_and_and_2_c __sync_fetch_and_and_2
345255092Stheraven#pragma	redefine_extname __sync_fetch_and_and_4_c __sync_fetch_and_and_4
346255092Stheraven#pragma	redefine_extname __sync_fetch_and_or_1_c __sync_fetch_and_or_1
347255092Stheraven#pragma	redefine_extname __sync_fetch_and_or_2_c __sync_fetch_and_or_2
348255092Stheraven#pragma	redefine_extname __sync_fetch_and_or_4_c __sync_fetch_and_or_4
349255092Stheraven#pragma	redefine_extname __sync_fetch_and_xor_1_c __sync_fetch_and_xor_1
350255092Stheraven#pragma	redefine_extname __sync_fetch_and_xor_2_c __sync_fetch_and_xor_2
351255092Stheraven#pragma	redefine_extname __sync_fetch_and_xor_4_c __sync_fetch_and_xor_4
352255092Stheraven#pragma	redefine_extname __sync_fetch_and_sub_1_c __sync_fetch_and_sub_1
353255092Stheraven#pragma	redefine_extname __sync_fetch_and_sub_2_c __sync_fetch_and_sub_2
354255092Stheraven#pragma	redefine_extname __sync_fetch_and_sub_4_c __sync_fetch_and_sub_4
355255092Stheraven#endif
356255092Stheraven
357251695Sed/*
358251695Sed * Old __sync_* API.
359251695Sed */
360251695Sed
361282763Sandrew#if __ARM_ARCH >= 6
362251695Sed
363251695Sed/* Implementations for old GCC versions, lacking support for atomics. */
364251695Sed
365251695Sedtypedef union {
366251695Sed	uint8_t		v8[4];
367251695Sed	uint32_t	v32;
368251695Sed} reg_t;
369251695Sed
370251695Sed/*
371251695Sed * Given a memory address pointing to an 8-bit or 16-bit integer, return
372251695Sed * the address of the 32-bit word containing it.
373251695Sed */
374251695Sed
375251695Sedstatic inline uint32_t *
376251695Sedround_to_word(void *ptr)
377251695Sed{
378251695Sed
379251695Sed	return ((uint32_t *)((intptr_t)ptr & ~3));
380251695Sed}
381251695Sed
382251695Sed/*
383251695Sed * Utility functions for loading and storing 8-bit and 16-bit integers
384251695Sed * in 32-bit words at an offset corresponding with the location of the
385251695Sed * atomic variable.
386251695Sed */
387251695Sed
388251695Sedstatic inline void
389251695Sedput_1(reg_t *r, const uint8_t *offset_ptr, uint8_t val)
390251695Sed{
391251695Sed	size_t offset;
392251695Sed
393251695Sed	offset = (intptr_t)offset_ptr & 3;
394251695Sed	r->v8[offset] = val;
395251695Sed}
396251695Sed
397251695Sedstatic inline uint8_t
398251695Sedget_1(const reg_t *r, const uint8_t *offset_ptr)
399251695Sed{
400251695Sed	size_t offset;
401251695Sed
402251695Sed	offset = (intptr_t)offset_ptr & 3;
403251695Sed	return (r->v8[offset]);
404251695Sed}
405251695Sed
406251695Sedstatic inline void
407251695Sedput_2(reg_t *r, const uint16_t *offset_ptr, uint16_t val)
408251695Sed{
409251695Sed	size_t offset;
410251695Sed	union {
411251695Sed		uint16_t in;
412251695Sed		uint8_t out[2];
413251695Sed	} bytes;
414251695Sed
415251695Sed	offset = (intptr_t)offset_ptr & 3;
416251695Sed	bytes.in = val;
417251695Sed	r->v8[offset] = bytes.out[0];
418251695Sed	r->v8[offset + 1] = bytes.out[1];
419251695Sed}
420251695Sed
421251695Sedstatic inline uint16_t
422251695Sedget_2(const reg_t *r, const uint16_t *offset_ptr)
423251695Sed{
424251695Sed	size_t offset;
425251695Sed	union {
426251695Sed		uint8_t in[2];
427251695Sed		uint16_t out;
428251695Sed	} bytes;
429251695Sed
430251695Sed	offset = (intptr_t)offset_ptr & 3;
431251695Sed	bytes.in[0] = r->v8[offset];
432251695Sed	bytes.in[1] = r->v8[offset + 1];
433251695Sed	return (bytes.out);
434251695Sed}
435251695Sed
436251695Sed/*
437251695Sed * 8-bit and 16-bit routines.
438251695Sed *
439251695Sed * These operations are not natively supported by the CPU, so we use
440251695Sed * some shifting and bitmasking on top of the 32-bit instructions.
441251695Sed */
442251695Sed
443251695Sed#define	EMIT_LOCK_TEST_AND_SET_N(N, uintN_t)				\
444251695SeduintN_t									\
445255092Stheraven__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val)			\
446251695Sed{									\
447251695Sed	uint32_t *mem32;						\
448251695Sed	reg_t val32, negmask, old;					\
449251695Sed	uint32_t temp1, temp2;						\
450251695Sed									\
451251695Sed	mem32 = round_to_word(mem);					\
452251695Sed	val32.v32 = 0x00000000;						\
453251695Sed	put_##N(&val32, mem, val);					\
454251695Sed	negmask.v32 = 0xffffffff;					\
455251695Sed	put_##N(&negmask, mem, 0);					\
456251695Sed									\
457251695Sed	do_sync();							\
458251695Sed	__asm volatile (						\
459251695Sed		"1:"							\
460251695Sed		"\tldrex %0, %6\n"	/* Load old value. */		\
461251695Sed		"\tand   %2, %5, %0\n"	/* Remove the old value. */	\
462251695Sed		"\torr   %2, %2, %4\n"	/* Put in the new value. */	\
463251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
464251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
465251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
466251695Sed		: "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1),	\
467251695Sed		  "=&r" (temp2)						\
468251695Sed		: "r" (val32.v32), "r" (negmask.v32), "m" (*mem32));	\
469251695Sed	return (get_##N(&old, mem));					\
470251695Sed}
471251695Sed
472251695SedEMIT_LOCK_TEST_AND_SET_N(1, uint8_t)
473251695SedEMIT_LOCK_TEST_AND_SET_N(2, uint16_t)
474251695Sed
475251695Sed#define	EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t)				\
476251695SeduintN_t									\
477255092Stheraven__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected,		\
478251695Sed    uintN_t desired)							\
479251695Sed{									\
480251695Sed	uint32_t *mem32;						\
481251781Sed	reg_t expected32, desired32, posmask, old;			\
482251781Sed	uint32_t negmask, temp1, temp2;					\
483251695Sed									\
484251695Sed	mem32 = round_to_word(mem);					\
485251695Sed	expected32.v32 = 0x00000000;					\
486251695Sed	put_##N(&expected32, mem, expected);				\
487251695Sed	desired32.v32 = 0x00000000;					\
488251695Sed	put_##N(&desired32, mem, desired);				\
489251695Sed	posmask.v32 = 0x00000000;					\
490251695Sed	put_##N(&posmask, mem, ~0);					\
491251781Sed	negmask = ~posmask.v32;						\
492251695Sed									\
493251695Sed	do_sync();							\
494251695Sed	__asm volatile (						\
495251695Sed		"1:"							\
496251695Sed		"\tldrex %0, %8\n"	/* Load old value. */		\
497251695Sed		"\tand   %2, %6, %0\n"	/* Isolate the old value. */	\
498251695Sed		"\tcmp   %2, %4\n"	/* Compare to expected value. */\
499251695Sed		"\tbne   2f\n"		/* Values are unequal. */	\
500251695Sed		"\tand   %2, %7, %0\n"	/* Remove the old value. */	\
501251695Sed		"\torr   %2, %5\n"	/* Put in the new value. */	\
502251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
503251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
504251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
505251695Sed		"2:"							\
506251695Sed		: "=&r" (old), "=m" (*mem32), "=&r" (temp1),		\
507251695Sed		  "=&r" (temp2)						\
508251695Sed		: "r" (expected32.v32), "r" (desired32.v32),		\
509251781Sed		  "r" (posmask.v32), "r" (negmask), "m" (*mem32));	\
510251695Sed	return (get_##N(&old, mem));					\
511251695Sed}
512251695Sed
513251695SedEMIT_VAL_COMPARE_AND_SWAP_N(1, uint8_t)
514251695SedEMIT_VAL_COMPARE_AND_SWAP_N(2, uint16_t)
515251695Sed
516251695Sed#define	EMIT_ARITHMETIC_FETCH_AND_OP_N(N, uintN_t, name, op)		\
517251695SeduintN_t									\
518255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val)				\
519251695Sed{									\
520251695Sed	uint32_t *mem32;						\
521251781Sed	reg_t val32, posmask, old;					\
522251781Sed	uint32_t negmask, temp1, temp2;					\
523251695Sed									\
524251695Sed	mem32 = round_to_word(mem);					\
525251695Sed	val32.v32 = 0x00000000;						\
526251695Sed	put_##N(&val32, mem, val);					\
527251695Sed	posmask.v32 = 0x00000000;					\
528251695Sed	put_##N(&posmask, mem, ~0);					\
529251781Sed	negmask = ~posmask.v32;						\
530251695Sed									\
531251695Sed	do_sync();							\
532251695Sed	__asm volatile (						\
533251695Sed		"1:"							\
534251695Sed		"\tldrex %0, %7\n"	/* Load old value. */		\
535251695Sed		"\t"op"  %2, %0, %4\n"	/* Calculate new value. */	\
536251695Sed		"\tand   %2, %5\n"	/* Isolate the new value. */	\
537251695Sed		"\tand   %3, %6, %0\n"	/* Remove the old value. */	\
538251695Sed		"\torr   %2, %2, %3\n"	/* Put in the new value. */	\
539251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
540251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
541251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
542251695Sed		: "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1),	\
543251695Sed		  "=&r" (temp2)						\
544251781Sed		: "r" (val32.v32), "r" (posmask.v32), "r" (negmask),	\
545251781Sed		  "m" (*mem32));					\
546251695Sed	return (get_##N(&old, mem));					\
547251695Sed}
548251695Sed
549251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_add, "add")
550251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_sub, "sub")
551251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_add, "add")
552251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_sub, "sub")
553251695Sed
554251695Sed#define	EMIT_BITWISE_FETCH_AND_OP_N(N, uintN_t, name, op, idempotence)	\
555251695SeduintN_t									\
556255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val)				\
557251695Sed{									\
558251695Sed	uint32_t *mem32;						\
559251695Sed	reg_t val32, old;						\
560251695Sed	uint32_t temp1, temp2;						\
561251695Sed									\
562251695Sed	mem32 = round_to_word(mem);					\
563251695Sed	val32.v32 = idempotence ? 0xffffffff : 0x00000000;		\
564251695Sed	put_##N(&val32, mem, val);					\
565251695Sed									\
566251695Sed	do_sync();							\
567251695Sed	__asm volatile (						\
568251695Sed		"1:"							\
569251695Sed		"\tldrex %0, %5\n"	/* Load old value. */		\
570251695Sed		"\t"op"  %2, %4, %0\n"	/* Calculate new value. */	\
571251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
572251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
573251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
574251695Sed		: "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1),	\
575251695Sed		  "=&r" (temp2)						\
576251695Sed		: "r" (val32.v32), "m" (*mem32));			\
577251695Sed	return (get_##N(&old, mem));					\
578251695Sed}
579251695Sed
580251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_and, "and", 1)
581251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_or, "orr", 0)
582251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_xor, "eor", 0)
583251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_and, "and", 1)
584251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_or, "orr", 0)
585251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_xor, "eor", 0)
586251695Sed
587251695Sed/*
588251695Sed * 32-bit routines.
589251695Sed */
590251695Sed
591251695Seduint32_t
592255092Stheraven__sync_lock_test_and_set_4_c(uint32_t *mem, uint32_t val)
593251781Sed{
594251781Sed	uint32_t old, temp;
595251781Sed
596251781Sed	do_sync();
597251781Sed	__asm volatile (
598251781Sed		"1:"
599251781Sed		"\tldrex %0, %4\n"	/* Load old value. */
600251781Sed		"\tstrex %2, %3, %1\n"	/* Attempt to store. */
601251781Sed		"\tcmp   %2, #0\n"	/* Did it succeed? */
602251781Sed		"\tbne   1b\n"		/* Spin if failed. */
603251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)
604251781Sed		: "r" (val), "m" (*mem));
605251781Sed	return (old);
606251781Sed}
607251781Sed
608251781Seduint32_t
609255092Stheraven__sync_val_compare_and_swap_4_c(uint32_t *mem, uint32_t expected,
610251695Sed    uint32_t desired)
611251695Sed{
612251781Sed	uint32_t old, temp;
613251695Sed
614251695Sed	do_sync();
615251695Sed	__asm volatile (
616251695Sed		"1:"
617251781Sed		"\tldrex %0, %5\n"	/* Load old value. */
618251781Sed		"\tcmp   %0, %3\n"	/* Compare to expected value. */
619251695Sed		"\tbne   2f\n"		/* Values are unequal. */
620251781Sed		"\tstrex %2, %4, %1\n"	/* Attempt to store. */
621251781Sed		"\tcmp   %2, #0\n"	/* Did it succeed? */
622251695Sed		"\tbne   1b\n"		/* Spin if failed. */
623251695Sed		"2:"
624251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)
625251695Sed		: "r" (expected), "r" (desired), "m" (*mem));
626251695Sed	return (old);
627251695Sed}
628251695Sed
629251695Sed#define	EMIT_FETCH_AND_OP_4(name, op)					\
630251695Seduint32_t								\
631255092Stheraven__sync_##name##_4##_c(uint32_t *mem, uint32_t val)				\
632251695Sed{									\
633251695Sed	uint32_t old, temp1, temp2;					\
634251695Sed									\
635251695Sed	do_sync();							\
636251695Sed	__asm volatile (						\
637251695Sed		"1:"							\
638251695Sed		"\tldrex %0, %5\n"	/* Load old value. */		\
639251781Sed		"\t"op"  %2, %0, %4\n"	/* Calculate new value. */	\
640251695Sed		"\tstrex %3, %2, %1\n"	/* Attempt to store. */		\
641251695Sed		"\tcmp   %3, #0\n"	/* Did it succeed? */		\
642251695Sed		"\tbne   1b\n"		/* Spin if failed. */		\
643251695Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp1),		\
644251695Sed		  "=&r" (temp2)						\
645251695Sed		: "r" (val), "m" (*mem));				\
646251695Sed	return (old);							\
647251695Sed}
648251695Sed
649251781SedEMIT_FETCH_AND_OP_4(fetch_and_add, "add")
650251781SedEMIT_FETCH_AND_OP_4(fetch_and_and, "and")
651251781SedEMIT_FETCH_AND_OP_4(fetch_and_or, "orr")
652251781SedEMIT_FETCH_AND_OP_4(fetch_and_sub, "sub")
653251781SedEMIT_FETCH_AND_OP_4(fetch_and_xor, "eor")
654251695Sed
655255738Szbb#ifndef __clang__
656255738Szbb__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1);
657255738Szbb__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2);
658255738Szbb__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4);
659255738Szbb__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1);
660255738Szbb__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2);
661255738Szbb__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4);
662255738Szbb__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1);
663255738Szbb__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2);
664255738Szbb__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4);
665255738Szbb__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1);
666255738Szbb__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2);
667255738Szbb__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4);
668255738Szbb__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1);
669255738Szbb__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2);
670255738Szbb__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4);
671255738Szbb__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1);
672255738Szbb__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2);
673255738Szbb__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4);
674255738Szbb__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1);
675255738Szbb__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2);
676255738Szbb__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4);
677255738Szbb#endif
678255738Szbb
679282763Sandrew#else /* __ARM_ARCH < 6 */
680251695Sed
681251781Sed#ifdef _KERNEL
682251781Sed
683251695Sed#ifdef SMP
684251695Sed#error "On SMP systems we should have proper atomic operations."
685251695Sed#endif
686251695Sed
687251695Sed/*
688251695Sed * On uniprocessor systems, we can perform the atomic operations by
689251695Sed * disabling interrupts.
690251695Sed */
691251695Sed
692251695Sed#define	EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t)				\
693251695SeduintN_t									\
694251695Sed__sync_val_compare_and_swap_##N(uintN_t *mem, uintN_t expected,		\
695251695Sed    uintN_t desired)							\
696251695Sed{									\
697251695Sed	uintN_t ret;							\
698251695Sed									\
699251695Sed	WITHOUT_INTERRUPTS({						\
700251695Sed		ret = *mem;						\
701251695Sed		if (*mem == expected)					\
702251695Sed			*mem = desired;					\
703251695Sed	});								\
704251695Sed	return (ret);							\
705251695Sed}
706251695Sed
707251695Sed#define	EMIT_FETCH_AND_OP_N(N, uintN_t, name, op)			\
708251695SeduintN_t									\
709251695Sed__sync_##name##_##N(uintN_t *mem, uintN_t val)				\
710251695Sed{									\
711251695Sed	uintN_t ret;							\
712251695Sed									\
713251695Sed	WITHOUT_INTERRUPTS({						\
714251695Sed		ret = *mem;						\
715251695Sed		*mem op val;						\
716251695Sed	});								\
717251695Sed	return (ret);							\
718251695Sed}
719251695Sed
720251695Sed#define	EMIT_ALL_OPS_N(N, uintN_t)					\
721251695SedEMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t)					\
722251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, lock_test_and_set, =)			\
723251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_add, +=)			\
724251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_and, &=)			\
725251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_or, |=)			\
726251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_sub, -=)			\
727251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_xor, ^=)
728251695Sed
729251695SedEMIT_ALL_OPS_N(1, uint8_t)
730251695SedEMIT_ALL_OPS_N(2, uint16_t)
731251695SedEMIT_ALL_OPS_N(4, uint32_t)
732251695SedEMIT_ALL_OPS_N(8, uint64_t)
733255092Stheraven#undef	EMIT_ALL_OPS_N
734251695Sed
735251781Sed#else /* !_KERNEL */
736251695Sed
737251781Sed/*
738251781Sed * For userspace on uniprocessor systems, we can implement the atomic
739251781Sed * operations by using a Restartable Atomic Sequence. This makes the
740251781Sed * kernel restart the code from the beginning when interrupted.
741251781Sed */
742251695Sed
743251781Sed#define	EMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str)			\
744251781SeduintN_t									\
745255092Stheraven__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val)			\
746251781Sed{									\
747251781Sed	uint32_t old, temp, ras_start;					\
748251781Sed									\
749251781Sed	ras_start = ARM_RAS_START;					\
750251781Sed	__asm volatile (						\
751251781Sed		/* Set up Restartable Atomic Sequence. */		\
752251781Sed		"1:"							\
753251781Sed		"\tadr   %2, 1b\n"					\
754251781Sed		"\tstr   %2, [%5]\n"					\
755251781Sed		"\tadr   %2, 2f\n"					\
756251781Sed		"\tstr   %2, [%5, #4]\n"				\
757251781Sed									\
758251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
759251781Sed		"\t"str" %3, %1\n"	/* Store new value. */		\
760251781Sed									\
761251781Sed		/* Tear down Restartable Atomic Sequence. */		\
762251781Sed		"2:"							\
763251781Sed		"\tmov   %2, #0x00000000\n"				\
764251781Sed		"\tstr   %2, [%5]\n"					\
765251781Sed		"\tmov   %2, #0xffffffff\n"				\
766251781Sed		"\tstr   %2, [%5, #4]\n"				\
767251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
768251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
769251781Sed	return (old);							\
770251781Sed}
771251781Sed
772251781Sed#define	EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq)		\
773251781SeduintN_t									\
774255092Stheraven__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected,		\
775251781Sed    uintN_t desired)							\
776251781Sed{									\
777251781Sed	uint32_t old, temp, ras_start;					\
778251781Sed									\
779251781Sed	ras_start = ARM_RAS_START;					\
780251781Sed	__asm volatile (						\
781251781Sed		/* Set up Restartable Atomic Sequence. */		\
782251781Sed		"1:"							\
783251781Sed		"\tadr   %2, 1b\n"					\
784251781Sed		"\tstr   %2, [%6]\n"					\
785251781Sed		"\tadr   %2, 2f\n"					\
786251781Sed		"\tstr   %2, [%6, #4]\n"				\
787251781Sed									\
788251781Sed		"\t"ldr" %0, %5\n"	/* Load old value. */		\
789251781Sed		"\tcmp   %0, %3\n"	/* Compare to expected value. */\
790251781Sed		"\t"streq" %4, %1\n"	/* Store new value. */		\
791251781Sed									\
792251781Sed		/* Tear down Restartable Atomic Sequence. */		\
793251781Sed		"2:"							\
794251781Sed		"\tmov   %2, #0x00000000\n"				\
795251781Sed		"\tstr   %2, [%6]\n"					\
796251781Sed		"\tmov   %2, #0xffffffff\n"				\
797251781Sed		"\tstr   %2, [%6, #4]\n"				\
798251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
799251781Sed		: "r" (expected), "r" (desired), "m" (*mem),		\
800251781Sed		  "r" (ras_start));					\
801251781Sed	return (old);							\
802251781Sed}
803251781Sed
804251781Sed#define	EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, name, op)		\
805251781SeduintN_t									\
806255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val)				\
807251781Sed{									\
808251781Sed	uint32_t old, temp, ras_start;					\
809251781Sed									\
810251781Sed	ras_start = ARM_RAS_START;					\
811251781Sed	__asm volatile (						\
812251781Sed		/* Set up Restartable Atomic Sequence. */		\
813251781Sed		"1:"							\
814251781Sed		"\tadr   %2, 1b\n"					\
815251781Sed		"\tstr   %2, [%5]\n"					\
816251781Sed		"\tadr   %2, 2f\n"					\
817251781Sed		"\tstr   %2, [%5, #4]\n"				\
818251781Sed									\
819251781Sed		"\t"ldr" %0, %4\n"	/* Load old value. */		\
820251781Sed		"\t"op"  %2, %0, %3\n"	/* Calculate new value. */	\
821251781Sed		"\t"str" %2, %1\n"	/* Store new value. */		\
822251781Sed									\
823251781Sed		/* Tear down Restartable Atomic Sequence. */		\
824251781Sed		"2:"							\
825251781Sed		"\tmov   %2, #0x00000000\n"				\
826251781Sed		"\tstr   %2, [%5]\n"					\
827251781Sed		"\tmov   %2, #0xffffffff\n"				\
828251781Sed		"\tstr   %2, [%5, #4]\n"				\
829251781Sed		: "=&r" (old), "=m" (*mem), "=&r" (temp)		\
830251781Sed		: "r" (val), "m" (*mem), "r" (ras_start));		\
831251781Sed	return (old);							\
832251781Sed}
833251781Sed
834251781Sed#define	EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq)			\
835251781SedEMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str)				\
836251781SedEMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq)			\
837251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_add, "add")		\
838251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_and, "and")		\
839251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_or, "orr")		\
840251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_sub, "sub")		\
841251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_xor, "eor")
842251781Sed
843275564Sandrew#ifdef __clang__
844275564SandrewEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq")
845275564SandrewEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq")
846275564Sandrew#else
847251781SedEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "streqb")
848251781SedEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "streqh")
849275564Sandrew#endif
850251781SedEMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq")
851251781Sed
852255092Stheraven#ifndef __clang__
853255092Stheraven__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1);
854255092Stheraven__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2);
855255092Stheraven__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4);
856255092Stheraven__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1);
857255092Stheraven__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2);
858255092Stheraven__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4);
859255092Stheraven__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1);
860255092Stheraven__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2);
861255092Stheraven__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4);
862255092Stheraven__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1);
863255092Stheraven__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2);
864255092Stheraven__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4);
865255092Stheraven__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1);
866255092Stheraven__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2);
867255092Stheraven__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4);
868255092Stheraven__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1);
869255092Stheraven__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2);
870255092Stheraven__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4);
871255092Stheraven__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1);
872255092Stheraven__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2);
873255092Stheraven__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4);
874282763Sandrew#endif /* __ARM_ARCH */
875255092Stheraven
876255738Szbb#endif /* _KERNEL */
877255738Szbb
878255738Szbb#endif
879255738Szbb
880251695Sed#endif /* __SYNC_ATOMICS */
881