1/*-
2 * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifdef __arm__
28#include <arm/atomic.h>
29#else /* !__arm__ */
30
31#ifndef	_MACHINE_ATOMIC_H_
32#define	_MACHINE_ATOMIC_H_
33
34#define	isb()		__asm __volatile("isb" : : : "memory")
35
36/*
37 * Options for DMB and DSB:
38 *	oshld	Outer Shareable, load
39 *	oshst	Outer Shareable, store
40 *	osh	Outer Shareable, all
41 *	nshld	Non-shareable, load
42 *	nshst	Non-shareable, store
43 *	nsh	Non-shareable, all
44 *	ishld	Inner Shareable, load
45 *	ishst	Inner Shareable, store
46 *	ish	Inner Shareable, all
47 *	ld	Full system, load
48 *	st	Full system, store
49 *	sy	Full system, all
50 */
51#define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
52#define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
53
54#define	mb()	dmb(sy)	/* Full system memory barrier all */
55#define	wmb()	dmb(st)	/* Full system memory barrier store */
56#define	rmb()	dmb(ld)	/* Full system memory barrier load */
57
58#ifdef _KERNEL
59extern _Bool lse_supported;
60#endif
61
62#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
63#include <sys/atomic_san.h>
64#else
65
66#include <sys/atomic_common.h>
67
68#ifdef _KERNEL
69
70#ifdef LSE_ATOMICS
71#define	_ATOMIC_LSE_SUPPORTED	1
72#else
73#define	_ATOMIC_LSE_SUPPORTED	lse_supported
74#endif
75#else
76#define	_ATOMIC_LSE_SUPPORTED	0
77#endif
78
79#define	_ATOMIC_OP_PROTO(t, op, bar, flav)				\
80static __inline void							\
81atomic_##op##_##bar##t##flav(volatile uint##t##_t *p, uint##t##_t val)
82
83#define	_ATOMIC_OP_IMPL(t, w, s, op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
84_ATOMIC_OP_PROTO(t, op, bar, _llsc)					\
85{									\
86	uint##t##_t tmp;						\
87	int res;							\
88									\
89	pre;								\
90	__asm __volatile(						\
91	    "1: ld"#a"xr"#s"	%"#w"0, [%2]\n"				\
92	    "   "#llsc_asm_op"	%"#w"0, %"#w"0, %"#w"3\n"		\
93	    "   st"#l"xr"#s"	%w1, %"#w"0, [%2]\n"			\
94	    "   cbnz		%w1, 1b\n"				\
95	    : "=&r"(tmp), "=&r"(res)					\
96	    : "r" (p), "r" (val)					\
97	    : "memory"							\
98	);								\
99}									\
100									\
101_ATOMIC_OP_PROTO(t, op, bar, _lse)					\
102{									\
103	uint##t##_t tmp;						\
104									\
105	pre;								\
106	__asm __volatile(						\
107	    ".arch_extension lse\n"					\
108	    "ld"#lse_asm_op#a#l#s"	%"#w"2, %"#w"0, [%1]\n"		\
109	    ".arch_extension nolse\n"					\
110	    : "=r" (tmp)						\
111	    : "r" (p), "r" (val)					\
112	    : "memory"							\
113	);								\
114}									\
115									\
116_ATOMIC_OP_PROTO(t, op, bar, )						\
117{									\
118	if (_ATOMIC_LSE_SUPPORTED)					\
119		atomic_##op##_##bar##t##_lse(p, val);			\
120	else								\
121		atomic_##op##_##bar##t##_llsc(p, val);			\
122}
123
124#define	__ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, bar, a, l)	\
125	_ATOMIC_OP_IMPL(8,  w, b, op, llsc_asm_op, lse_asm_op, pre,	\
126	    bar, a, l)							\
127	_ATOMIC_OP_IMPL(16, w, h, op, llsc_asm_op, lse_asm_op, pre,	\
128	    bar, a, l)							\
129	_ATOMIC_OP_IMPL(32, w,  , op, llsc_asm_op, lse_asm_op, pre,	\
130	    bar, a, l)							\
131	_ATOMIC_OP_IMPL(64,  ,  , op, llsc_asm_op, lse_asm_op, pre,	\
132	    bar, a, l)
133
134#define	_ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre)			\
135	__ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre,     ,  ,  )	\
136	__ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, acq_, a,  )	\
137	__ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, rel_,  , l)
138
139_ATOMIC_OP(add,      add, add, )
140_ATOMIC_OP(clear,    bic, clr, )
141_ATOMIC_OP(set,      orr, set, )
142_ATOMIC_OP(subtract, add, add, val = -val)
143
144#define	_ATOMIC_CMPSET_PROTO(t, bar, flav)				\
145static __inline int							\
146atomic_cmpset_##bar##t##flav(volatile uint##t##_t *p,			\
147    uint##t##_t cmpval, uint##t##_t newval)
148
149#define	_ATOMIC_FCMPSET_PROTO(t, bar, flav)				\
150static __inline int							\
151atomic_fcmpset_##bar##t##flav(volatile uint##t##_t *p,			\
152    uint##t##_t *cmpval, uint##t##_t newval)
153
154#define	_ATOMIC_CMPSET_IMPL(t, w, s, bar, a, l)				\
155_ATOMIC_CMPSET_PROTO(t, bar, _llsc)					\
156{									\
157	uint##t##_t tmp;						\
158	int res;							\
159									\
160	__asm __volatile(						\
161	    "1: mov		%w1, #1\n"				\
162	    "   ld"#a"xr"#s"	%"#w"0, [%2]\n"				\
163	    "   cmp		%"#w"0, %"#w"3\n"			\
164	    "   b.ne		2f\n"					\
165	    "   st"#l"xr"#s"	%w1, %"#w"4, [%2]\n"			\
166	    "   cbnz		%w1, 1b\n"				\
167	    "2:"							\
168	    : "=&r"(tmp), "=&r"(res)					\
169	    : "r" (p), "r" (cmpval), "r" (newval)			\
170	    : "cc", "memory"						\
171	);								\
172									\
173	return (!res);							\
174}									\
175									\
176_ATOMIC_CMPSET_PROTO(t, bar, _lse)					\
177{									\
178	uint##t##_t oldval;						\
179	int res;							\
180									\
181	oldval = cmpval;						\
182	__asm __volatile(						\
183	    ".arch_extension lse\n"					\
184	    "cas"#a#l#s"	%"#w"1, %"#w"4, [%3]\n"			\
185	    "cmp		%"#w"1, %"#w"2\n"			\
186	    "cset		%w0, eq\n"				\
187	    ".arch_extension nolse\n"					\
188	    : "=r" (res), "+&r" (cmpval)				\
189	    : "r" (oldval), "r" (p), "r" (newval)			\
190	    : "cc", "memory"						\
191	);								\
192									\
193	return (res);							\
194}									\
195									\
196_ATOMIC_CMPSET_PROTO(t, bar, )						\
197{									\
198	if (_ATOMIC_LSE_SUPPORTED)					\
199		return (atomic_cmpset_##bar##t##_lse(p, cmpval,		\
200		    newval));						\
201	else								\
202		return (atomic_cmpset_##bar##t##_llsc(p, cmpval,	\
203		    newval));						\
204}									\
205									\
206_ATOMIC_FCMPSET_PROTO(t, bar, _llsc)					\
207{									\
208	uint##t##_t _cmpval, tmp;					\
209	int res;							\
210									\
211	_cmpval = *cmpval;						\
212	__asm __volatile(						\
213	    "   mov		%w1, #1\n"				\
214	    "   ld"#a"xr"#s"	%"#w"0, [%2]\n"				\
215	    "   cmp		%"#w"0, %"#w"3\n"			\
216	    "   b.ne		1f\n"					\
217	    "   st"#l"xr"#s"	%w1, %"#w"4, [%2]\n"			\
218	    "1:"							\
219	    : "=&r"(tmp), "=&r"(res)					\
220	    : "r" (p), "r" (_cmpval), "r" (newval)			\
221	    : "cc", "memory"						\
222	);								\
223	*cmpval = tmp;							\
224									\
225	return (!res);							\
226}									\
227									\
228_ATOMIC_FCMPSET_PROTO(t, bar, _lse)					\
229{									\
230	uint##t##_t _cmpval, tmp;					\
231	int res;							\
232									\
233	_cmpval = tmp = *cmpval;					\
234	__asm __volatile(						\
235	    ".arch_extension lse\n"					\
236	    "cas"#a#l#s"	%"#w"1, %"#w"4, [%3]\n"			\
237	    "cmp		%"#w"1, %"#w"2\n"			\
238	    "cset		%w0, eq\n"				\
239	    ".arch_extension nolse\n"					\
240	    : "=r" (res), "+&r" (tmp)					\
241	    : "r" (_cmpval), "r" (p), "r" (newval)			\
242	    : "cc", "memory"						\
243	);								\
244	*cmpval = tmp;							\
245									\
246	return (res);							\
247}									\
248									\
249_ATOMIC_FCMPSET_PROTO(t, bar, )						\
250{									\
251	if (_ATOMIC_LSE_SUPPORTED)					\
252		return (atomic_fcmpset_##bar##t##_lse(p, cmpval,	\
253		    newval));						\
254	else								\
255		return (atomic_fcmpset_##bar##t##_llsc(p, cmpval,	\
256		    newval));						\
257}
258
259#define	_ATOMIC_CMPSET(bar, a, l)					\
260	_ATOMIC_CMPSET_IMPL(8,  w, b, bar, a, l)			\
261	_ATOMIC_CMPSET_IMPL(16, w, h, bar, a, l)			\
262	_ATOMIC_CMPSET_IMPL(32, w,  , bar, a, l)			\
263	_ATOMIC_CMPSET_IMPL(64,  ,  , bar, a, l)
264
265#define	atomic_cmpset_8		atomic_cmpset_8
266#define	atomic_fcmpset_8	atomic_fcmpset_8
267#define	atomic_cmpset_16	atomic_cmpset_16
268#define	atomic_fcmpset_16	atomic_fcmpset_16
269
270_ATOMIC_CMPSET(    ,  , )
271_ATOMIC_CMPSET(acq_, a, )
272_ATOMIC_CMPSET(rel_,  ,l)
273
274#define	_ATOMIC_FETCHADD_PROTO(t, flav)					\
275static __inline uint##t##_t						\
276atomic_fetchadd_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
277
278#define	_ATOMIC_FETCHADD_IMPL(t, w)					\
279_ATOMIC_FETCHADD_PROTO(t, _llsc)					\
280{									\
281	uint##t##_t ret, tmp;						\
282	int res;							\
283									\
284	__asm __volatile(						\
285	    "1: ldxr	%"#w"2, [%3]\n"					\
286	    "   add	%"#w"0, %"#w"2, %"#w"4\n"			\
287	    "   stxr	%w1, %"#w"0, [%3]\n"				\
288            "   cbnz	%w1, 1b\n"					\
289	    : "=&r" (tmp), "=&r" (res), "=&r" (ret)			\
290	    : "r" (p), "r" (val)					\
291	    : "memory"							\
292	);								\
293									\
294	return (ret);							\
295}									\
296									\
297_ATOMIC_FETCHADD_PROTO(t, _lse)						\
298{									\
299	uint##t##_t ret;						\
300									\
301	__asm __volatile(						\
302	    ".arch_extension lse\n"					\
303	    "ldadd	%"#w"2, %"#w"0, [%1]\n"				\
304	    ".arch_extension nolse\n"					\
305	    : "=r" (ret)						\
306	    : "r" (p), "r" (val)					\
307	    : "memory"							\
308	);								\
309									\
310	return (ret);							\
311}									\
312									\
313_ATOMIC_FETCHADD_PROTO(t, )						\
314{									\
315	if (_ATOMIC_LSE_SUPPORTED)					\
316		return (atomic_fetchadd_##t##_lse(p, val));		\
317	else								\
318		return (atomic_fetchadd_##t##_llsc(p, val));		\
319}
320
321_ATOMIC_FETCHADD_IMPL(32, w)
322_ATOMIC_FETCHADD_IMPL(64,  )
323
324#define	_ATOMIC_SWAP_PROTO(t, flav)					\
325static __inline uint##t##_t						\
326atomic_swap_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
327
328#define	_ATOMIC_READANDCLEAR_PROTO(t, flav)				\
329static __inline uint##t##_t						\
330atomic_readandclear_##t##flav(volatile uint##t##_t *p)
331
332#define	_ATOMIC_SWAP_IMPL(t, w, zreg)					\
333_ATOMIC_SWAP_PROTO(t, _llsc)						\
334{									\
335	uint##t##_t ret;						\
336	int res;							\
337									\
338	__asm __volatile(						\
339	    "1: ldxr	%"#w"1, [%2]\n"					\
340	    "   stxr	%w0, %"#w"3, [%2]\n"				\
341            "   cbnz	%w0, 1b\n"					\
342	    : "=&r" (res), "=&r" (ret)					\
343	    : "r" (p), "r" (val)					\
344	    : "memory"							\
345	);								\
346									\
347	return (ret);							\
348}									\
349									\
350_ATOMIC_SWAP_PROTO(t, _lse)						\
351{									\
352	uint##t##_t ret;						\
353									\
354	__asm __volatile(						\
355	    ".arch_extension lse\n"					\
356	    "swp	%"#w"2, %"#w"0, [%1]\n"				\
357	    ".arch_extension nolse\n"					\
358	    : "=r" (ret)						\
359	    : "r" (p), "r" (val)					\
360	    : "memory"							\
361	);								\
362									\
363	return (ret);							\
364}									\
365									\
366_ATOMIC_SWAP_PROTO(t, )							\
367{									\
368	if (_ATOMIC_LSE_SUPPORTED)					\
369		return (atomic_swap_##t##_lse(p, val));			\
370	else								\
371		return (atomic_swap_##t##_llsc(p, val));		\
372}									\
373									\
374_ATOMIC_READANDCLEAR_PROTO(t, _llsc)					\
375{									\
376	uint##t##_t ret;						\
377	int res;							\
378									\
379	__asm __volatile(						\
380	    "1: ldxr	%"#w"1, [%2]\n"					\
381	    "   stxr	%w0, "#zreg", [%2]\n"				\
382	    "   cbnz	%w0, 1b\n"					\
383	    : "=&r" (res), "=&r" (ret)					\
384	    : "r" (p)							\
385	    : "memory"							\
386	);								\
387									\
388	return (ret);							\
389}									\
390									\
391_ATOMIC_READANDCLEAR_PROTO(t, _lse)					\
392{									\
393	return (atomic_swap_##t##_lse(p, 0));				\
394}									\
395									\
396_ATOMIC_READANDCLEAR_PROTO(t, )						\
397{									\
398	if (_ATOMIC_LSE_SUPPORTED)					\
399		return (atomic_readandclear_##t##_lse(p));		\
400	else								\
401		return (atomic_readandclear_##t##_llsc(p));		\
402}
403
404_ATOMIC_SWAP_IMPL(32, w, wzr)
405_ATOMIC_SWAP_IMPL(64,  , xzr)
406
407#define	_ATOMIC_TEST_OP_PROTO(t, op, bar, flav)				\
408static __inline int							\
409atomic_testand##op##_##bar##t##flav(volatile uint##t##_t *p, u_int val)
410
411#define	_ATOMIC_TEST_OP_IMPL(t, w, op, llsc_asm_op, lse_asm_op, bar, a)	\
412_ATOMIC_TEST_OP_PROTO(t, op, bar, _llsc)				\
413{									\
414	uint##t##_t mask, old, tmp;					\
415	int res;							\
416									\
417	mask = ((uint##t##_t)1) << (val & (t - 1));			\
418	__asm __volatile(						\
419	    "1: ld"#a"xr	%"#w"2, [%3]\n"				\
420	    "  "#llsc_asm_op"	%"#w"0, %"#w"2, %"#w"4\n"		\
421	    "   stxr		%w1, %"#w"0, [%3]\n"			\
422	    "   cbnz		%w1, 1b\n"				\
423	    : "=&r" (tmp), "=&r" (res), "=&r" (old)			\
424	    : "r" (p), "r" (mask)					\
425	    : "memory"							\
426	);								\
427									\
428	return ((old & mask) != 0);					\
429}									\
430									\
431_ATOMIC_TEST_OP_PROTO(t, op, bar, _lse)					\
432{									\
433	uint##t##_t mask, old;						\
434									\
435	mask = ((uint##t##_t)1) << (val & (t - 1));			\
436	__asm __volatile(						\
437	    ".arch_extension lse\n"					\
438	    "ld"#lse_asm_op#a"	%"#w"2, %"#w"0, [%1]\n"			\
439	    ".arch_extension nolse\n"					\
440	    : "=r" (old)						\
441	    : "r" (p), "r" (mask)					\
442	    : "memory"							\
443	);								\
444									\
445	return ((old & mask) != 0);					\
446}									\
447									\
448_ATOMIC_TEST_OP_PROTO(t, op, bar, )					\
449{									\
450	if (_ATOMIC_LSE_SUPPORTED)					\
451		return (atomic_testand##op##_##bar##t##_lse(p, val));	\
452	else								\
453		return (atomic_testand##op##_##bar##t##_llsc(p, val));	\
454}
455
456#define	_ATOMIC_TEST_OP(op, llsc_asm_op, lse_asm_op)			\
457	_ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op,     ,  ) \
458	_ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op, acq_, a) \
459	_ATOMIC_TEST_OP_IMPL(64,  , op, llsc_asm_op, lse_asm_op,     ,  ) \
460	_ATOMIC_TEST_OP_IMPL(64,  , op, llsc_asm_op, lse_asm_op, acq_, a)
461
462_ATOMIC_TEST_OP(clear, bic, clr)
463_ATOMIC_TEST_OP(set,   orr, set)
464
465#define	_ATOMIC_LOAD_ACQ_IMPL(t, w, s)					\
466static __inline uint##t##_t						\
467atomic_load_acq_##t(volatile uint##t##_t *p)				\
468{									\
469	uint##t##_t ret;						\
470									\
471	__asm __volatile(						\
472	    "ldar"#s"	%"#w"0, [%1]\n"					\
473	    : "=&r" (ret)						\
474	    : "r" (p)							\
475	    : "memory");						\
476									\
477	return (ret);							\
478}
479
480#define	atomic_load_acq_8	atomic_load_acq_8
481#define	atomic_load_acq_16	atomic_load_acq_16
482_ATOMIC_LOAD_ACQ_IMPL(8,  w, b)
483_ATOMIC_LOAD_ACQ_IMPL(16, w, h)
484_ATOMIC_LOAD_ACQ_IMPL(32, w,  )
485_ATOMIC_LOAD_ACQ_IMPL(64,  ,  )
486
487#define	_ATOMIC_STORE_REL_IMPL(t, w, s)					\
488static __inline void							\
489atomic_store_rel_##t(volatile uint##t##_t *p, uint##t##_t val)		\
490{									\
491	__asm __volatile(						\
492	    "stlr"#s"	%"#w"0, [%1]\n"					\
493	    :								\
494	    : "r" (val), "r" (p)					\
495	    : "memory");						\
496}
497
498_ATOMIC_STORE_REL_IMPL(8,  w, b)
499_ATOMIC_STORE_REL_IMPL(16, w, h)
500_ATOMIC_STORE_REL_IMPL(32, w,  )
501_ATOMIC_STORE_REL_IMPL(64,  ,  )
502
503#define	atomic_add_char			atomic_add_8
504#define	atomic_fcmpset_char		atomic_fcmpset_8
505#define	atomic_clear_char		atomic_clear_8
506#define	atomic_cmpset_char		atomic_cmpset_8
507#define	atomic_fetchadd_char		atomic_fetchadd_8
508#define	atomic_readandclear_char	atomic_readandclear_8
509#define	atomic_set_char			atomic_set_8
510#define	atomic_swap_char		atomic_swap_8
511#define	atomic_subtract_char		atomic_subtract_8
512#define	atomic_testandclear_char	atomic_testandclear_8
513#define	atomic_testandset_char		atomic_testandset_8
514
515#define	atomic_add_acq_char		atomic_add_acq_8
516#define	atomic_fcmpset_acq_char		atomic_fcmpset_acq_8
517#define	atomic_clear_acq_char		atomic_clear_acq_8
518#define	atomic_cmpset_acq_char		atomic_cmpset_acq_8
519#define	atomic_load_acq_char		atomic_load_acq_8
520#define	atomic_set_acq_char		atomic_set_acq_8
521#define	atomic_subtract_acq_char	atomic_subtract_acq_8
522#define	atomic_testandset_acq_char	atomic_testandset_acq_8
523
524#define	atomic_add_rel_char		atomic_add_rel_8
525#define	atomic_fcmpset_rel_char		atomic_fcmpset_rel_8
526#define	atomic_clear_rel_char		atomic_clear_rel_8
527#define	atomic_cmpset_rel_char		atomic_cmpset_rel_8
528#define	atomic_set_rel_char		atomic_set_rel_8
529#define	atomic_subtract_rel_char	atomic_subtract_rel_8
530#define	atomic_store_rel_char		atomic_store_rel_8
531
532#define	atomic_add_short		atomic_add_16
533#define	atomic_fcmpset_short		atomic_fcmpset_16
534#define	atomic_clear_short		atomic_clear_16
535#define	atomic_cmpset_short		atomic_cmpset_16
536#define	atomic_fetchadd_short		atomic_fetchadd_16
537#define	atomic_readandclear_short	atomic_readandclear_16
538#define	atomic_set_short		atomic_set_16
539#define	atomic_swap_short		atomic_swap_16
540#define	atomic_subtract_short		atomic_subtract_16
541#define	atomic_testandclear_short	atomic_testandclear_16
542#define	atomic_testandset_short		atomic_testandset_16
543
544#define	atomic_add_acq_short		atomic_add_acq_16
545#define	atomic_fcmpset_acq_short	atomic_fcmpset_acq_16
546#define	atomic_clear_acq_short		atomic_clear_acq_16
547#define	atomic_cmpset_acq_short		atomic_cmpset_acq_16
548#define	atomic_load_acq_short		atomic_load_acq_16
549#define	atomic_set_acq_short		atomic_set_acq_16
550#define	atomic_subtract_acq_short	atomic_subtract_acq_16
551#define	atomic_testandset_acq_short	atomic_testandset_acq_16
552
553#define	atomic_add_rel_short		atomic_add_rel_16
554#define	atomic_fcmpset_rel_short	atomic_fcmpset_rel_16
555#define	atomic_clear_rel_short		atomic_clear_rel_16
556#define	atomic_cmpset_rel_short		atomic_cmpset_rel_16
557#define	atomic_set_rel_short		atomic_set_rel_16
558#define	atomic_subtract_rel_short	atomic_subtract_rel_16
559#define	atomic_store_rel_short		atomic_store_rel_16
560
561#define	atomic_add_int			atomic_add_32
562#define	atomic_fcmpset_int		atomic_fcmpset_32
563#define	atomic_clear_int		atomic_clear_32
564#define	atomic_cmpset_int		atomic_cmpset_32
565#define	atomic_fetchadd_int		atomic_fetchadd_32
566#define	atomic_readandclear_int		atomic_readandclear_32
567#define	atomic_set_int			atomic_set_32
568#define	atomic_swap_int			atomic_swap_32
569#define	atomic_subtract_int		atomic_subtract_32
570#define	atomic_testandclear_int		atomic_testandclear_32
571#define	atomic_testandset_int		atomic_testandset_32
572
573#define	atomic_add_acq_int		atomic_add_acq_32
574#define	atomic_fcmpset_acq_int		atomic_fcmpset_acq_32
575#define	atomic_clear_acq_int		atomic_clear_acq_32
576#define	atomic_cmpset_acq_int		atomic_cmpset_acq_32
577#define	atomic_load_acq_int		atomic_load_acq_32
578#define	atomic_set_acq_int		atomic_set_acq_32
579#define	atomic_subtract_acq_int		atomic_subtract_acq_32
580#define	atomic_testandset_acq_int	atomic_testandset_acq_32
581
582#define	atomic_add_rel_int		atomic_add_rel_32
583#define	atomic_fcmpset_rel_int		atomic_fcmpset_rel_32
584#define	atomic_clear_rel_int		atomic_clear_rel_32
585#define	atomic_cmpset_rel_int		atomic_cmpset_rel_32
586#define	atomic_set_rel_int		atomic_set_rel_32
587#define	atomic_subtract_rel_int		atomic_subtract_rel_32
588#define	atomic_store_rel_int		atomic_store_rel_32
589
590#define	atomic_add_long			atomic_add_64
591#define	atomic_fcmpset_long		atomic_fcmpset_64
592#define	atomic_clear_long		atomic_clear_64
593#define	atomic_cmpset_long		atomic_cmpset_64
594#define	atomic_fetchadd_long		atomic_fetchadd_64
595#define	atomic_readandclear_long	atomic_readandclear_64
596#define	atomic_set_long			atomic_set_64
597#define	atomic_swap_long		atomic_swap_64
598#define	atomic_subtract_long		atomic_subtract_64
599#define	atomic_testandclear_long	atomic_testandclear_64
600#define	atomic_testandset_long		atomic_testandset_64
601
602#define	atomic_add_ptr			atomic_add_64
603#define	atomic_fcmpset_ptr		atomic_fcmpset_64
604#define	atomic_clear_ptr		atomic_clear_64
605#define	atomic_cmpset_ptr		atomic_cmpset_64
606#define	atomic_fetchadd_ptr		atomic_fetchadd_64
607#define	atomic_readandclear_ptr		atomic_readandclear_64
608#define	atomic_set_ptr			atomic_set_64
609#define	atomic_swap_ptr			atomic_swap_64
610#define	atomic_subtract_ptr		atomic_subtract_64
611
612#define	atomic_add_acq_long		atomic_add_acq_64
613#define	atomic_fcmpset_acq_long		atomic_fcmpset_acq_64
614#define	atomic_clear_acq_long		atomic_clear_acq_64
615#define	atomic_cmpset_acq_long		atomic_cmpset_acq_64
616#define	atomic_load_acq_long		atomic_load_acq_64
617#define	atomic_set_acq_long		atomic_set_acq_64
618#define	atomic_subtract_acq_long	atomic_subtract_acq_64
619#define	atomic_testandset_acq_long	atomic_testandset_acq_64
620
621#define	atomic_add_acq_ptr		atomic_add_acq_64
622#define	atomic_fcmpset_acq_ptr		atomic_fcmpset_acq_64
623#define	atomic_clear_acq_ptr		atomic_clear_acq_64
624#define	atomic_cmpset_acq_ptr		atomic_cmpset_acq_64
625#define	atomic_load_acq_ptr		atomic_load_acq_64
626#define	atomic_set_acq_ptr		atomic_set_acq_64
627#define	atomic_subtract_acq_ptr		atomic_subtract_acq_64
628
629#define	atomic_add_rel_long		atomic_add_rel_64
630#define	atomic_fcmpset_rel_long		atomic_fcmpset_rel_64
631#define	atomic_clear_rel_long		atomic_clear_rel_64
632#define	atomic_cmpset_rel_long		atomic_cmpset_rel_64
633#define	atomic_set_rel_long		atomic_set_rel_64
634#define	atomic_subtract_rel_long	atomic_subtract_rel_64
635#define	atomic_store_rel_long		atomic_store_rel_64
636
637#define	atomic_add_rel_ptr		atomic_add_rel_64
638#define	atomic_fcmpset_rel_ptr		atomic_fcmpset_rel_64
639#define	atomic_clear_rel_ptr		atomic_clear_rel_64
640#define	atomic_cmpset_rel_ptr		atomic_cmpset_rel_64
641#define	atomic_set_rel_ptr		atomic_set_rel_64
642#define	atomic_subtract_rel_ptr		atomic_subtract_rel_64
643#define	atomic_store_rel_ptr		atomic_store_rel_64
644
645static __inline void
646atomic_thread_fence_acq(void)
647{
648
649	dmb(ld);
650}
651
652static __inline void
653atomic_thread_fence_rel(void)
654{
655
656	dmb(sy);
657}
658
659static __inline void
660atomic_thread_fence_acq_rel(void)
661{
662
663	dmb(sy);
664}
665
666static __inline void
667atomic_thread_fence_seq_cst(void)
668{
669
670	dmb(sy);
671}
672
673#endif /* KCSAN && !KCSAN_RUNTIME */
674#endif /* _MACHINE_ATOMIC_H_ */
675
676#endif /* !__arm__ */
677