1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/11.0/sys/i386/include/atomic.h 299912 2016-05-16 07:19:33Z sephe $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#ifdef _KERNEL
36#include <machine/md_var.h>
37#include <machine/specialreg.h>
38#endif
39
40#ifndef __OFFSETOF_MONITORBUF
41/*
42 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
43 *
44 * The open-coded number is used instead of the symbolic expression to
45 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46 * An assertion in i386/vm_machdep.c ensures that the value is correct.
47 */
48#define	__OFFSETOF_MONITORBUF	0x180
49
50static __inline void
51__mbk(void)
52{
53
54	__asm __volatile("lock; addl $0,%%fs:%0"
55	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
56}
57
58static __inline void
59__mbu(void)
60{
61
62	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
63}
64#endif
65
66/*
67 * Various simple operations on memory, each of which is atomic in the
68 * presence of interrupts and multiple processors.
69 *
70 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
71 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
72 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
73 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
74 *
75 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
76 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
77 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
78 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
79 *
80 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
81 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
82 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
83 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
84 * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
85 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
86 *
87 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
88 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
89 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
90 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
91 * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
92 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
93 */
94
95/*
96 * The above functions are expanded inline in the statically-linked
97 * kernel.  Lock prefixes are generated if an SMP kernel is being
98 * built.
99 *
100 * Kernel modules call real functions which are built into the kernel.
101 * This allows kernel modules to be portable between UP and SMP systems.
102 */
103#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
104#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
105void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
106void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
107
108int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
109u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
110int	atomic_testandset_int(volatile u_int *p, u_int v);
111int	atomic_testandclear_int(volatile u_int *p, u_int v);
112void	atomic_thread_fence_acq(void);
113void	atomic_thread_fence_acq_rel(void);
114void	atomic_thread_fence_rel(void);
115void	atomic_thread_fence_seq_cst(void);
116
117#define	ATOMIC_LOAD(TYPE)					\
118u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
119#define	ATOMIC_STORE(TYPE)					\
120void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
121
122int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
123uint64_t	atomic_load_acq_64(volatile uint64_t *);
124void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
125uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
126
127#else /* !KLD_MODULE && __GNUCLIKE_ASM */
128
129/*
130 * For userland, always use lock prefixes so that the binaries will run
131 * on both SMP and !SMP systems.
132 */
133#if defined(SMP) || !defined(_KERNEL)
134#define	MPLOCKED	"lock ; "
135#else
136#define	MPLOCKED
137#endif
138
139/*
140 * The assembly is volatilized to avoid code chunk removal by the compiler.
141 * GCC aggressively reorders operations and memory clobbering is necessary
142 * in order to avoid that for memory barriers.
143 */
144#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
145static __inline void					\
146atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
147{							\
148	__asm __volatile(MPLOCKED OP			\
149	: "+m" (*p)					\
150	: CONS (V)					\
151	: "cc");					\
152}							\
153							\
154static __inline void					\
155atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
156{							\
157	__asm __volatile(MPLOCKED OP			\
158	: "+m" (*p)					\
159	: CONS (V)					\
160	: "memory", "cc");				\
161}							\
162struct __hack
163
164/*
165 * Atomic compare and set, used by the mutex functions
166 *
167 * if (*dst == expect) *dst = src (all 32 bit words)
168 *
169 * Returns 0 on failure, non-zero on success
170 */
171
172#ifdef CPU_DISABLE_CMPXCHG
173
174static __inline int
175atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
176{
177	u_char res;
178
179	__asm __volatile(
180	"	pushfl ;		"
181	"	cli ;			"
182	"	cmpl	%3,%1 ;		"
183	"	jne	1f ;		"
184	"	movl	%2,%1 ;		"
185	"1:				"
186	"       sete	%0 ;		"
187	"	popfl ;			"
188	"# atomic_cmpset_int"
189	: "=q" (res),			/* 0 */
190	  "+m" (*dst)			/* 1 */
191	: "r" (src),			/* 2 */
192	  "r" (expect)			/* 3 */
193	: "memory");
194	return (res);
195}
196
197#else /* !CPU_DISABLE_CMPXCHG */
198
199static __inline int
200atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
201{
202	u_char res;
203
204	__asm __volatile(
205	"	" MPLOCKED "		"
206	"	cmpxchgl %3,%1 ;	"
207	"       sete	%0 ;		"
208	"# atomic_cmpset_int"
209	: "=q" (res),			/* 0 */
210	  "+m" (*dst),			/* 1 */
211	  "+a" (expect)			/* 2 */
212	: "r" (src)			/* 3 */
213	: "memory", "cc");
214	return (res);
215}
216
217#endif /* CPU_DISABLE_CMPXCHG */
218
219/*
220 * Atomically add the value of v to the integer pointed to by p and return
221 * the previous value of *p.
222 */
223static __inline u_int
224atomic_fetchadd_int(volatile u_int *p, u_int v)
225{
226
227	__asm __volatile(
228	"	" MPLOCKED "		"
229	"	xaddl	%0,%1 ;		"
230	"# atomic_fetchadd_int"
231	: "+r" (v),			/* 0 */
232	  "+m" (*p)			/* 1 */
233	: : "cc");
234	return (v);
235}
236
237static __inline int
238atomic_testandset_int(volatile u_int *p, u_int v)
239{
240	u_char res;
241
242	__asm __volatile(
243	"	" MPLOCKED "		"
244	"	btsl	%2,%1 ;		"
245	"	setc	%0 ;		"
246	"# atomic_testandset_int"
247	: "=q" (res),			/* 0 */
248	  "+m" (*p)			/* 1 */
249	: "Ir" (v & 0x1f)		/* 2 */
250	: "cc");
251	return (res);
252}
253
254static __inline int
255atomic_testandclear_int(volatile u_int *p, u_int v)
256{
257	u_char res;
258
259	__asm __volatile(
260	"	" MPLOCKED "		"
261	"	btrl	%2,%1 ;		"
262	"	setc	%0 ;		"
263	"# atomic_testandclear_int"
264	: "=q" (res),			/* 0 */
265	  "+m" (*p)			/* 1 */
266	: "Ir" (v & 0x1f)		/* 2 */
267	: "cc");
268	return (res);
269}
270
271/*
272 * We assume that a = b will do atomic loads and stores.  Due to the
273 * IA32 memory model, a simple store guarantees release semantics.
274 *
275 * However, a load may pass a store if they are performed on distinct
276 * addresses, so we need Store/Load barrier for sequentially
277 * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
278 * Store/Load barrier, as recommended by the AMD Software Optimization
279 * Guide, and not mfence.  In the kernel, we use a private per-cpu
280 * cache line for "mem", to avoid introducing false data
281 * dependencies.  In user space, we use the word at the top of the
282 * stack.
283 *
284 * For UP kernels, however, the memory of the single processor is
285 * always consistent, so we only need to stop the compiler from
286 * reordering accesses in a way that violates the semantics of acquire
287 * and release.
288 */
289
290#if defined(_KERNEL)
291#if defined(SMP)
292#define	__storeload_barrier()	__mbk()
293#else /* _KERNEL && UP */
294#define	__storeload_barrier()	__compiler_membar()
295#endif /* SMP */
296#else /* !_KERNEL */
297#define	__storeload_barrier()	__mbu()
298#endif /* _KERNEL*/
299
300#define	ATOMIC_LOAD(TYPE)					\
301static __inline u_##TYPE					\
302atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
303{								\
304	u_##TYPE res;						\
305								\
306	res = *p;						\
307	__compiler_membar();					\
308	return (res);						\
309}								\
310struct __hack
311
312#define	ATOMIC_STORE(TYPE)					\
313static __inline void						\
314atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
315{								\
316								\
317	__compiler_membar();					\
318	*p = v;							\
319}								\
320struct __hack
321
322static __inline void
323atomic_thread_fence_acq(void)
324{
325
326	__compiler_membar();
327}
328
329static __inline void
330atomic_thread_fence_rel(void)
331{
332
333	__compiler_membar();
334}
335
336static __inline void
337atomic_thread_fence_acq_rel(void)
338{
339
340	__compiler_membar();
341}
342
343static __inline void
344atomic_thread_fence_seq_cst(void)
345{
346
347	__storeload_barrier();
348}
349
350#ifdef _KERNEL
351
352#ifdef WANT_FUNCTIONS
353int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
354int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
355uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
356uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
357void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
358void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
359uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
360uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
361#endif
362
363/* I486 does not support SMP or CMPXCHG8B. */
364static __inline int
365atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
366{
367	volatile uint32_t *p;
368	u_char res;
369
370	p = (volatile uint32_t *)dst;
371	__asm __volatile(
372	"	pushfl ;		"
373	"	cli ;			"
374	"	xorl	%1,%%eax ;	"
375	"	xorl	%2,%%edx ;	"
376	"	orl	%%edx,%%eax ;	"
377	"	jne	1f ;		"
378	"	movl	%4,%1 ;		"
379	"	movl	%5,%2 ;		"
380	"1:				"
381	"	sete	%3 ;		"
382	"	popfl"
383	: "+A" (expect),		/* 0 */
384	  "+m" (*p),			/* 1 */
385	  "+m" (*(p + 1)),		/* 2 */
386	  "=q" (res)			/* 3 */
387	: "r" ((uint32_t)src),		/* 4 */
388	  "r" ((uint32_t)(src >> 32))	/* 5 */
389	: "memory", "cc");
390	return (res);
391}
392
393static __inline uint64_t
394atomic_load_acq_64_i386(volatile uint64_t *p)
395{
396	volatile uint32_t *q;
397	uint64_t res;
398
399	q = (volatile uint32_t *)p;
400	__asm __volatile(
401	"	pushfl ;		"
402	"	cli ;			"
403	"	movl	%1,%%eax ;	"
404	"	movl	%2,%%edx ;	"
405	"	popfl"
406	: "=&A" (res)			/* 0 */
407	: "m" (*q),			/* 1 */
408	  "m" (*(q + 1))		/* 2 */
409	: "memory");
410	return (res);
411}
412
413static __inline void
414atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
415{
416	volatile uint32_t *q;
417
418	q = (volatile uint32_t *)p;
419	__asm __volatile(
420	"	pushfl ;		"
421	"	cli ;			"
422	"	movl	%%eax,%0 ;	"
423	"	movl	%%edx,%1 ;	"
424	"	popfl"
425	: "=m" (*q),			/* 0 */
426	  "=m" (*(q + 1))		/* 1 */
427	: "A" (v)			/* 2 */
428	: "memory");
429}
430
431static __inline uint64_t
432atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
433{
434	volatile uint32_t *q;
435	uint64_t res;
436
437	q = (volatile uint32_t *)p;
438	__asm __volatile(
439	"	pushfl ;		"
440	"	cli ;			"
441	"	movl	%1,%%eax ;	"
442	"	movl	%2,%%edx ;	"
443	"	movl	%4,%2 ;		"
444	"	movl	%3,%1 ;		"
445	"	popfl"
446	: "=&A" (res),			/* 0 */
447	  "+m" (*q),			/* 1 */
448	  "+m" (*(q + 1))		/* 2 */
449	: "r" ((uint32_t)v),		/* 3 */
450	  "r" ((uint32_t)(v >> 32)));	/* 4 */
451	return (res);
452}
453
454static __inline int
455atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
456{
457	u_char res;
458
459	__asm __volatile(
460	"	" MPLOCKED "		"
461	"	cmpxchg8b %1 ;		"
462	"	sete	%0"
463	: "=q" (res),			/* 0 */
464	  "+m" (*dst),			/* 1 */
465	  "+A" (expect)			/* 2 */
466	: "b" ((uint32_t)src),		/* 3 */
467	  "c" ((uint32_t)(src >> 32))	/* 4 */
468	: "memory", "cc");
469	return (res);
470}
471
472static __inline uint64_t
473atomic_load_acq_64_i586(volatile uint64_t *p)
474{
475	uint64_t res;
476
477	__asm __volatile(
478	"	movl	%%ebx,%%eax ;	"
479	"	movl	%%ecx,%%edx ;	"
480	"	" MPLOCKED "		"
481	"	cmpxchg8b %1"
482	: "=&A" (res),			/* 0 */
483	  "+m" (*p)			/* 1 */
484	: : "memory", "cc");
485	return (res);
486}
487
488static __inline void
489atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
490{
491
492	__asm __volatile(
493	"	movl	%%eax,%%ebx ;	"
494	"	movl	%%edx,%%ecx ;	"
495	"1:				"
496	"	" MPLOCKED "		"
497	"	cmpxchg8b %0 ;		"
498	"	jne	1b"
499	: "+m" (*p),			/* 0 */
500	  "+A" (v)			/* 1 */
501	: : "ebx", "ecx", "memory", "cc");
502}
503
504static __inline uint64_t
505atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
506{
507
508	__asm __volatile(
509	"	movl	%%eax,%%ebx ;	"
510	"	movl	%%edx,%%ecx ;	"
511	"1:				"
512	"	" MPLOCKED "		"
513	"	cmpxchg8b %0 ;		"
514	"	jne	1b"
515	: "+m" (*p),			/* 0 */
516	  "+A" (v)			/* 1 */
517	: : "ebx", "ecx", "memory", "cc");
518	return (v);
519}
520
521static __inline int
522atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
523{
524
525	if ((cpu_feature & CPUID_CX8) == 0)
526		return (atomic_cmpset_64_i386(dst, expect, src));
527	else
528		return (atomic_cmpset_64_i586(dst, expect, src));
529}
530
531static __inline uint64_t
532atomic_load_acq_64(volatile uint64_t *p)
533{
534
535	if ((cpu_feature & CPUID_CX8) == 0)
536		return (atomic_load_acq_64_i386(p));
537	else
538		return (atomic_load_acq_64_i586(p));
539}
540
541static __inline void
542atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
543{
544
545	if ((cpu_feature & CPUID_CX8) == 0)
546		atomic_store_rel_64_i386(p, v);
547	else
548		atomic_store_rel_64_i586(p, v);
549}
550
551static __inline uint64_t
552atomic_swap_64(volatile uint64_t *p, uint64_t v)
553{
554
555	if ((cpu_feature & CPUID_CX8) == 0)
556		return (atomic_swap_64_i386(p, v));
557	else
558		return (atomic_swap_64_i586(p, v));
559}
560
561#endif /* _KERNEL */
562
563#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
564
565ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
566ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
567ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
568ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
569
570ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
571ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
572ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
573ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
574
575ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
576ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
577ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
578ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
579
580ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
581ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
582ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
583ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
584
585#define	ATOMIC_LOADSTORE(TYPE)				\
586	ATOMIC_LOAD(TYPE);				\
587	ATOMIC_STORE(TYPE)
588
589ATOMIC_LOADSTORE(char);
590ATOMIC_LOADSTORE(short);
591ATOMIC_LOADSTORE(int);
592ATOMIC_LOADSTORE(long);
593
594#undef ATOMIC_ASM
595#undef ATOMIC_LOAD
596#undef ATOMIC_STORE
597#undef ATOMIC_LOADSTORE
598
599#ifndef WANT_FUNCTIONS
600
601static __inline int
602atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
603{
604
605	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
606	    (u_int)src));
607}
608
609static __inline u_long
610atomic_fetchadd_long(volatile u_long *p, u_long v)
611{
612
613	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
614}
615
616static __inline int
617atomic_testandset_long(volatile u_long *p, u_int v)
618{
619
620	return (atomic_testandset_int((volatile u_int *)p, v));
621}
622
623static __inline int
624atomic_testandclear_long(volatile u_long *p, u_int v)
625{
626
627	return (atomic_testandclear_int((volatile u_int *)p, v));
628}
629
630/* Read the current value and store a new value in the destination. */
631#ifdef __GNUCLIKE_ASM
632
633static __inline u_int
634atomic_swap_int(volatile u_int *p, u_int v)
635{
636
637	__asm __volatile(
638	"	xchgl	%1,%0 ;		"
639	"# atomic_swap_int"
640	: "+r" (v),			/* 0 */
641	  "+m" (*p));			/* 1 */
642	return (v);
643}
644
645static __inline u_long
646atomic_swap_long(volatile u_long *p, u_long v)
647{
648
649	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
650}
651
652#else /* !__GNUCLIKE_ASM */
653
654u_int	atomic_swap_int(volatile u_int *p, u_int v);
655u_long	atomic_swap_long(volatile u_long *p, u_long v);
656
657#endif /* __GNUCLIKE_ASM */
658
659#define	atomic_set_acq_char		atomic_set_barr_char
660#define	atomic_set_rel_char		atomic_set_barr_char
661#define	atomic_clear_acq_char		atomic_clear_barr_char
662#define	atomic_clear_rel_char		atomic_clear_barr_char
663#define	atomic_add_acq_char		atomic_add_barr_char
664#define	atomic_add_rel_char		atomic_add_barr_char
665#define	atomic_subtract_acq_char	atomic_subtract_barr_char
666#define	atomic_subtract_rel_char	atomic_subtract_barr_char
667
668#define	atomic_set_acq_short		atomic_set_barr_short
669#define	atomic_set_rel_short		atomic_set_barr_short
670#define	atomic_clear_acq_short		atomic_clear_barr_short
671#define	atomic_clear_rel_short		atomic_clear_barr_short
672#define	atomic_add_acq_short		atomic_add_barr_short
673#define	atomic_add_rel_short		atomic_add_barr_short
674#define	atomic_subtract_acq_short	atomic_subtract_barr_short
675#define	atomic_subtract_rel_short	atomic_subtract_barr_short
676
677#define	atomic_set_acq_int		atomic_set_barr_int
678#define	atomic_set_rel_int		atomic_set_barr_int
679#define	atomic_clear_acq_int		atomic_clear_barr_int
680#define	atomic_clear_rel_int		atomic_clear_barr_int
681#define	atomic_add_acq_int		atomic_add_barr_int
682#define	atomic_add_rel_int		atomic_add_barr_int
683#define	atomic_subtract_acq_int		atomic_subtract_barr_int
684#define	atomic_subtract_rel_int		atomic_subtract_barr_int
685#define	atomic_cmpset_acq_int		atomic_cmpset_int
686#define	atomic_cmpset_rel_int		atomic_cmpset_int
687
688#define	atomic_set_acq_long		atomic_set_barr_long
689#define	atomic_set_rel_long		atomic_set_barr_long
690#define	atomic_clear_acq_long		atomic_clear_barr_long
691#define	atomic_clear_rel_long		atomic_clear_barr_long
692#define	atomic_add_acq_long		atomic_add_barr_long
693#define	atomic_add_rel_long		atomic_add_barr_long
694#define	atomic_subtract_acq_long	atomic_subtract_barr_long
695#define	atomic_subtract_rel_long	atomic_subtract_barr_long
696#define	atomic_cmpset_acq_long		atomic_cmpset_long
697#define	atomic_cmpset_rel_long		atomic_cmpset_long
698
699#define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
700#define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
701
702/* Operations on 8-bit bytes. */
703#define	atomic_set_8		atomic_set_char
704#define	atomic_set_acq_8	atomic_set_acq_char
705#define	atomic_set_rel_8	atomic_set_rel_char
706#define	atomic_clear_8		atomic_clear_char
707#define	atomic_clear_acq_8	atomic_clear_acq_char
708#define	atomic_clear_rel_8	atomic_clear_rel_char
709#define	atomic_add_8		atomic_add_char
710#define	atomic_add_acq_8	atomic_add_acq_char
711#define	atomic_add_rel_8	atomic_add_rel_char
712#define	atomic_subtract_8	atomic_subtract_char
713#define	atomic_subtract_acq_8	atomic_subtract_acq_char
714#define	atomic_subtract_rel_8	atomic_subtract_rel_char
715#define	atomic_load_acq_8	atomic_load_acq_char
716#define	atomic_store_rel_8	atomic_store_rel_char
717
718/* Operations on 16-bit words. */
719#define	atomic_set_16		atomic_set_short
720#define	atomic_set_acq_16	atomic_set_acq_short
721#define	atomic_set_rel_16	atomic_set_rel_short
722#define	atomic_clear_16		atomic_clear_short
723#define	atomic_clear_acq_16	atomic_clear_acq_short
724#define	atomic_clear_rel_16	atomic_clear_rel_short
725#define	atomic_add_16		atomic_add_short
726#define	atomic_add_acq_16	atomic_add_acq_short
727#define	atomic_add_rel_16	atomic_add_rel_short
728#define	atomic_subtract_16	atomic_subtract_short
729#define	atomic_subtract_acq_16	atomic_subtract_acq_short
730#define	atomic_subtract_rel_16	atomic_subtract_rel_short
731#define	atomic_load_acq_16	atomic_load_acq_short
732#define	atomic_store_rel_16	atomic_store_rel_short
733
734/* Operations on 32-bit double words. */
735#define	atomic_set_32		atomic_set_int
736#define	atomic_set_acq_32	atomic_set_acq_int
737#define	atomic_set_rel_32	atomic_set_rel_int
738#define	atomic_clear_32		atomic_clear_int
739#define	atomic_clear_acq_32	atomic_clear_acq_int
740#define	atomic_clear_rel_32	atomic_clear_rel_int
741#define	atomic_add_32		atomic_add_int
742#define	atomic_add_acq_32	atomic_add_acq_int
743#define	atomic_add_rel_32	atomic_add_rel_int
744#define	atomic_subtract_32	atomic_subtract_int
745#define	atomic_subtract_acq_32	atomic_subtract_acq_int
746#define	atomic_subtract_rel_32	atomic_subtract_rel_int
747#define	atomic_load_acq_32	atomic_load_acq_int
748#define	atomic_store_rel_32	atomic_store_rel_int
749#define	atomic_cmpset_32	atomic_cmpset_int
750#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
751#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
752#define	atomic_swap_32		atomic_swap_int
753#define	atomic_readandclear_32	atomic_readandclear_int
754#define	atomic_fetchadd_32	atomic_fetchadd_int
755#define	atomic_testandset_32	atomic_testandset_int
756#define	atomic_testandclear_32	atomic_testandclear_int
757
758/* Operations on pointers. */
759#define	atomic_set_ptr(p, v) \
760	atomic_set_int((volatile u_int *)(p), (u_int)(v))
761#define	atomic_set_acq_ptr(p, v) \
762	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
763#define	atomic_set_rel_ptr(p, v) \
764	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
765#define	atomic_clear_ptr(p, v) \
766	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
767#define	atomic_clear_acq_ptr(p, v) \
768	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
769#define	atomic_clear_rel_ptr(p, v) \
770	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
771#define	atomic_add_ptr(p, v) \
772	atomic_add_int((volatile u_int *)(p), (u_int)(v))
773#define	atomic_add_acq_ptr(p, v) \
774	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
775#define	atomic_add_rel_ptr(p, v) \
776	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
777#define	atomic_subtract_ptr(p, v) \
778	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
779#define	atomic_subtract_acq_ptr(p, v) \
780	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
781#define	atomic_subtract_rel_ptr(p, v) \
782	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
783#define	atomic_load_acq_ptr(p) \
784	atomic_load_acq_int((volatile u_int *)(p))
785#define	atomic_store_rel_ptr(p, v) \
786	atomic_store_rel_int((volatile u_int *)(p), (v))
787#define	atomic_cmpset_ptr(dst, old, new) \
788	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
789#define	atomic_cmpset_acq_ptr(dst, old, new) \
790	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
791	    (u_int)(new))
792#define	atomic_cmpset_rel_ptr(dst, old, new) \
793	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
794	    (u_int)(new))
795#define	atomic_swap_ptr(p, v) \
796	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
797#define	atomic_readandclear_ptr(p) \
798	atomic_readandclear_int((volatile u_int *)(p))
799
800#endif /* !WANT_FUNCTIONS */
801
802#if defined(_KERNEL)
803#define	mb()	__mbk()
804#define	wmb()	__mbk()
805#define	rmb()	__mbk()
806#else
807#define	mb()	__mbu()
808#define	wmb()	__mbu()
809#define	rmb()	__mbu()
810#endif
811
812#endif /* !_MACHINE_ATOMIC_H_ */
813