atomic.h revision 254617
138517Sdfr/*-
238517Sdfr * Copyright (c) 1998 Doug Rabson
338517Sdfr * All rights reserved.
438517Sdfr *
538517Sdfr * Redistribution and use in source and binary forms, with or without
638517Sdfr * modification, are permitted provided that the following conditions
738517Sdfr * are met:
838517Sdfr * 1. Redistributions of source code must retain the above copyright
938517Sdfr *    notice, this list of conditions and the following disclaimer.
1038517Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1138517Sdfr *    notice, this list of conditions and the following disclaimer in the
1238517Sdfr *    documentation and/or other materials provided with the distribution.
1338517Sdfr *
1438517Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1538517Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1638517Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1738517Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1838517Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1938517Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2038517Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2138517Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2238517Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2338517Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2438517Sdfr * SUCH DAMAGE.
2538517Sdfr *
2650477Speter * $FreeBSD: head/sys/i386/include/atomic.h 254617 2013-08-21 22:03:06Z jkim $
2738517Sdfr */
2838517Sdfr#ifndef _MACHINE_ATOMIC_H_
29147855Sjhb#define	_MACHINE_ATOMIC_H_
3038517Sdfr
31143063Sjoerg#ifndef _SYS_CDEFS_H_
32143063Sjoerg#error this file needs sys/cdefs.h as a prerequisite
33143063Sjoerg#endif
34143063Sjoerg
35236456Skib#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
36236456Skib#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
37236456Skib#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
38185162Skmacy
3938517Sdfr/*
40165635Sbde * Various simple operations on memory, each of which is atomic in the
41165635Sbde * presence of interrupts and multiple processors.
4238517Sdfr *
43165633Sbde * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44165633Sbde * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45165633Sbde * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46165633Sbde * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
4748797Salc *
48165633Sbde * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49165633Sbde * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50165633Sbde * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51165633Sbde * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
5248797Salc *
53165633Sbde * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54165633Sbde * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55165633Sbde * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56165633Sbde * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57254617Sjkim * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
58165635Sbde * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
5948797Salc *
60165633Sbde * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
61165633Sbde * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
62165633Sbde * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
63165633Sbde * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
64254617Sjkim * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
65165635Sbde * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
6638517Sdfr */
6738517Sdfr
6848797Salc/*
6949999Salc * The above functions are expanded inline in the statically-linked
7049999Salc * kernel.  Lock prefixes are generated if an SMP kernel is being
7149999Salc * built.
7249999Salc *
7349999Salc * Kernel modules call real functions which are built into the kernel.
7449999Salc * This allows kernel modules to be portable between UP and SMP systems.
7548797Salc */
76147855Sjhb#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
77147855Sjhb#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
78197803Sattiliovoid atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
79197803Sattiliovoid atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
8049999Salc
81208332Sphkint	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
82165633Sbdeu_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
83254617Sjkimint	atomic_testandset_int(volatile u_int *p, u_int v);
8465514Sphk
85236456Skib#define	ATOMIC_LOAD(TYPE, LOP)					\
86236456Skibu_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
87236456Skib#define	ATOMIC_STORE(TYPE)					\
88100251Smarkmvoid		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
8971085Sjhb
90147855Sjhb#else /* !KLD_MODULE && __GNUCLIKE_ASM */
9172358Smarkm
9284679Sjhb/*
93165635Sbde * For userland, always use lock prefixes so that the binaries will run
94165635Sbde * on both SMP and !SMP systems.
9584679Sjhb */
9684679Sjhb#if defined(SMP) || !defined(_KERNEL)
97165630Sbde#define	MPLOCKED	"lock ; "
9890515Sbde#else
99147855Sjhb#define	MPLOCKED
10090515Sbde#endif
10138517Sdfr
10248797Salc/*
103197803Sattilio * The assembly is volatilized to avoid code chunk removal by the compiler.
104197803Sattilio * GCC aggressively reorders operations and memory clobbering is necessary
105197803Sattilio * in order to avoid that for memory barriers.
10648797Salc */
107147855Sjhb#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
10848797Salcstatic __inline void					\
10949043Salcatomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
11048797Salc{							\
111165630Sbde	__asm __volatile(MPLOCKED OP			\
112254612Sjkim	: "+m" (*p)					\
113254612Sjkim	: CONS (V)					\
114216524Skib	: "cc");					\
115122827Sbde}							\
116197803Sattilio							\
117197803Sattiliostatic __inline void					\
118197803Sattilioatomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
119197803Sattilio{							\
120197803Sattilio	__asm __volatile(MPLOCKED OP			\
121254612Sjkim	: "+m" (*p)					\
122254612Sjkim	: CONS (V)					\
123216524Skib	: "memory", "cc");				\
124197803Sattilio}							\
125122827Sbdestruct __hack
126100327Smarkm
127220404Sjkim#if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
128220404Sjkim
129220404Sjkim/* I486 does not support SMP or CMPXCHG8B. */
130220404Sjkimstatic __inline uint64_t
131220404Sjkimatomic_load_acq_64_i386(volatile uint64_t *p)
132220404Sjkim{
133220404Sjkim	volatile uint32_t *high, *low;
134220404Sjkim	uint64_t res;
135220404Sjkim
136220404Sjkim	low = (volatile uint32_t *)p;
137220404Sjkim	high = (volatile uint32_t *)p + 1;
138220404Sjkim	__asm __volatile(
139220404Sjkim	"	pushfl ;		"
140220404Sjkim	"	cli ;			"
141220404Sjkim	"	movl %1,%%eax ;		"
142220404Sjkim	"	movl %2,%%edx ;		"
143220404Sjkim	"	popfl"
144220404Sjkim	: "=&A" (res)			/* 0 */
145220404Sjkim	: "m" (*low),			/* 1 */
146220404Sjkim	  "m" (*high)			/* 2 */
147220404Sjkim	: "memory");
148220404Sjkim
149220404Sjkim	return (res);
150220404Sjkim}
151220404Sjkim
152220404Sjkimstatic __inline void
153220404Sjkimatomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
154220404Sjkim{
155220404Sjkim	volatile uint32_t *high, *low;
156220404Sjkim
157220404Sjkim	low = (volatile uint32_t *)p;
158220404Sjkim	high = (volatile uint32_t *)p + 1;
159220404Sjkim	__asm __volatile(
160220404Sjkim	"	pushfl ;		"
161220404Sjkim	"	cli ;			"
162220404Sjkim	"	movl %%eax,%0 ;		"
163220404Sjkim	"	movl %%edx,%1 ;		"
164220404Sjkim	"	popfl"
165220404Sjkim	: "=m" (*low),			/* 0 */
166220404Sjkim	  "=m" (*high)			/* 1 */
167220404Sjkim	: "A" (v)			/* 2 */
168220404Sjkim	: "memory");
169220404Sjkim}
170220404Sjkim
171220404Sjkimstatic __inline uint64_t
172220404Sjkimatomic_load_acq_64_i586(volatile uint64_t *p)
173220404Sjkim{
174220404Sjkim	uint64_t res;
175220404Sjkim
176220404Sjkim	__asm __volatile(
177220404Sjkim	"	movl %%ebx,%%eax ;	"
178220404Sjkim	"	movl %%ecx,%%edx ;	"
179220404Sjkim	"	" MPLOCKED "		"
180254612Sjkim	"	cmpxchg8b %1"
181220404Sjkim	: "=&A" (res),			/* 0 */
182254612Sjkim	  "+m" (*p)			/* 1 */
183254612Sjkim	: : "memory", "cc");
184220404Sjkim
185220404Sjkim	return (res);
186220404Sjkim}
187220404Sjkim
188220404Sjkimstatic __inline void
189220404Sjkimatomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
190220404Sjkim{
191220404Sjkim
192220404Sjkim	__asm __volatile(
193220404Sjkim	"	movl %%eax,%%ebx ;	"
194220404Sjkim	"	movl %%edx,%%ecx ;	"
195220404Sjkim	"1:				"
196220404Sjkim	"	" MPLOCKED "		"
197254612Sjkim	"	cmpxchg8b %0 ;		"
198220404Sjkim	"	jne 1b"
199254612Sjkim	: "+m" (*p),			/* 0 */
200220404Sjkim	  "+A" (v)			/* 1 */
201254612Sjkim	: : "ebx", "ecx", "memory", "cc");
202220404Sjkim}
203220404Sjkim
204220404Sjkim#endif /* _KERNEL && !WANT_FUNCTIONS */
205220404Sjkim
20665514Sphk/*
20765514Sphk * Atomic compare and set, used by the mutex functions
20865514Sphk *
209208332Sphk * if (*dst == expect) *dst = src (all 32 bit words)
21065514Sphk *
21165514Sphk * Returns 0 on failure, non-zero on success
21265514Sphk */
21365514Sphk
214165635Sbde#ifdef CPU_DISABLE_CMPXCHG
215100327Smarkm
216197910Sattiliostatic __inline int
217208332Sphkatomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
218197910Sattilio{
219197910Sattilio	u_char res;
22065514Sphk
221197910Sattilio	__asm __volatile(
222197910Sattilio	"	pushfl ;		"
223197910Sattilio	"	cli ;			"
224254612Sjkim	"	cmpl	%3,%1 ;		"
225197910Sattilio	"	jne	1f ;		"
226197910Sattilio	"	movl	%2,%1 ;		"
227197910Sattilio	"1:				"
228197910Sattilio	"       sete	%0 ;		"
229197910Sattilio	"	popfl ;			"
230197910Sattilio	"# atomic_cmpset_int"
231197910Sattilio	: "=q" (res),			/* 0 */
232254612Sjkim	  "+m" (*dst)			/* 1 */
233197910Sattilio	: "r" (src),			/* 2 */
234254612Sjkim	  "r" (expect)			/* 3 */
235197910Sattilio	: "memory");
236197910Sattilio
237197910Sattilio	return (res);
238197910Sattilio}
239197910Sattilio
240165635Sbde#else /* !CPU_DISABLE_CMPXCHG */
241100327Smarkm
242197910Sattiliostatic __inline int
243208332Sphkatomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
244197910Sattilio{
245197910Sattilio	u_char res;
24665514Sphk
247197910Sattilio	__asm __volatile(
248197910Sattilio	"	" MPLOCKED "		"
249254614Sjkim	"	cmpxchgl %3,%1 ;	"
250197910Sattilio	"       sete	%0 ;		"
251197910Sattilio	"# atomic_cmpset_int"
252254614Sjkim	: "=q" (res),			/* 0 */
253254614Sjkim	  "+m" (*dst),			/* 1 */
254254614Sjkim	  "+a" (expect)			/* 2 */
255254614Sjkim	: "r" (src)			/* 3 */
256216524Skib	: "memory", "cc");
257197910Sattilio
258197910Sattilio	return (res);
259197910Sattilio}
260197910Sattilio
261165635Sbde#endif /* CPU_DISABLE_CMPXCHG */
262100327Smarkm
263150627Sjhb/*
264150627Sjhb * Atomically add the value of v to the integer pointed to by p and return
265150627Sjhb * the previous value of *p.
266150627Sjhb */
267150627Sjhbstatic __inline u_int
268150627Sjhbatomic_fetchadd_int(volatile u_int *p, u_int v)
269150627Sjhb{
270150627Sjhb
271165633Sbde	__asm __volatile(
272165630Sbde	"	" MPLOCKED "		"
273254610Sjkim	"	xaddl	%0,%1 ;		"
274150627Sjhb	"# atomic_fetchadd_int"
275254610Sjkim	: "+r" (v),			/* 0 */
276254612Sjkim	  "+m" (*p)			/* 1 */
277254612Sjkim	: : "cc");
278150627Sjhb	return (v);
279150627Sjhb}
280150627Sjhb
281254617Sjkimstatic __inline int
282254617Sjkimatomic_testandset_int(volatile u_int *p, u_int v)
283254617Sjkim{
284254617Sjkim	u_char res;
285254617Sjkim
286254617Sjkim	__asm __volatile(
287254617Sjkim	"	" MPLOCKED "		"
288254617Sjkim	"	btsl	%2,%1 ;		"
289254617Sjkim	"	setc	%0 ;		"
290254617Sjkim	"# atomic_testandset_int"
291254617Sjkim	: "=q" (res),			/* 0 */
292254617Sjkim	  "+m" (*p)			/* 1 */
293254617Sjkim	: "Ir" (v & 0x1f)		/* 2 */
294254617Sjkim	: "cc");
295254617Sjkim	return (res);
296254617Sjkim}
297254617Sjkim
298236456Skib/*
299236456Skib * We assume that a = b will do atomic loads and stores.  Due to the
300236456Skib * IA32 memory model, a simple store guarantees release semantics.
301236456Skib *
302236456Skib * However, loads may pass stores, so for atomic_load_acq we have to
303236456Skib * ensure a Store/Load barrier to do the load in SMP kernels.  We use
304236456Skib * "lock cmpxchg" as recommended by the AMD Software Optimization
305236456Skib * Guide, and not mfence.  For UP kernels, however, the cache of the
306236456Skib * single processor is always consistent, so we only need to take care
307236456Skib * of the compiler.
308236456Skib */
309236456Skib#define	ATOMIC_STORE(TYPE)				\
310236456Skibstatic __inline void					\
311236456Skibatomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
312236456Skib{							\
313241374Sattilio	__compiler_membar();				\
314236456Skib	*p = v;						\
315236456Skib}							\
316236456Skibstruct __hack
317236456Skib
318137623Sjhb#if defined(_KERNEL) && !defined(SMP)
319100327Smarkm
320236456Skib#define	ATOMIC_LOAD(TYPE, LOP)				\
32167351Sjhbstatic __inline u_##TYPE				\
32267351Sjhbatomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
32367351Sjhb{							\
324197803Sattilio	u_##TYPE tmp;					\
325197803Sattilio							\
326197803Sattilio	tmp = *p;					\
327241374Sattilio	__compiler_membar();				\
328197803Sattilio	return (tmp);					\
32967351Sjhb}							\
330122827Sbdestruct __hack
331100327Smarkm
332165635Sbde#else /* !(_KERNEL && !SMP) */
33367351Sjhb
334236456Skib#define	ATOMIC_LOAD(TYPE, LOP)				\
33571023Sjhbstatic __inline u_##TYPE				\
33671023Sjhbatomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
33771023Sjhb{							\
33871023Sjhb	u_##TYPE res;					\
33971023Sjhb							\
340165630Sbde	__asm __volatile(MPLOCKED LOP			\
341165635Sbde	: "=a" (res),			/* 0 */		\
342254612Sjkim	  "+m" (*p)			/* 1 */		\
343254612Sjkim	: : "memory", "cc");				\
34471023Sjhb							\
34571023Sjhb	return (res);					\
34671023Sjhb}							\
347122827Sbdestruct __hack
348100327Smarkm
349165635Sbde#endif /* _KERNEL && !SMP */
350100327Smarkm
351147855Sjhb#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
352100251Smarkm
353100251SmarkmATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
354100251SmarkmATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
355100251SmarkmATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
356100251SmarkmATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
35771085Sjhb
358100251SmarkmATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
359100251SmarkmATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
360100251SmarkmATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
361100251SmarkmATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
36271085Sjhb
363100251SmarkmATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
364100251SmarkmATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
365100251SmarkmATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
366100251SmarkmATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
36771085Sjhb
368100251SmarkmATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
369100251SmarkmATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
370100251SmarkmATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
371100251SmarkmATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
37271085Sjhb
373236456SkibATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
374236456SkibATOMIC_LOAD(short, "cmpxchgw %w0,%1");
375236456SkibATOMIC_LOAD(int,   "cmpxchgl %0,%1");
376236456SkibATOMIC_LOAD(long,  "cmpxchgl %0,%1");
37771023Sjhb
378236456SkibATOMIC_STORE(char);
379236456SkibATOMIC_STORE(short);
380236456SkibATOMIC_STORE(int);
381236456SkibATOMIC_STORE(long);
382236456Skib
38371085Sjhb#undef ATOMIC_ASM
384236456Skib#undef ATOMIC_LOAD
385236456Skib#undef ATOMIC_STORE
38667351Sjhb
387165635Sbde#ifndef WANT_FUNCTIONS
388147855Sjhb
389220404Sjkim#ifdef _KERNEL
390220404Sjkimextern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
391220404Sjkimextern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
392220404Sjkim#endif
393220404Sjkim
394147855Sjhbstatic __inline int
395208332Sphkatomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
396147855Sjhb{
397147855Sjhb
398208332Sphk	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
399147855Sjhb	    (u_int)src));
400147855Sjhb}
401147855Sjhb
402177276Spjdstatic __inline u_long
403177276Spjdatomic_fetchadd_long(volatile u_long *p, u_long v)
404177276Spjd{
405177276Spjd
406177276Spjd	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
407177276Spjd}
408177276Spjd
409254617Sjkimstatic __inline int
410254617Sjkimatomic_testandset_long(volatile u_long *p, u_int v)
411254617Sjkim{
412254617Sjkim
413254617Sjkim	return (atomic_testandset_int((volatile u_int *)p, v));
414254617Sjkim}
415254617Sjkim
416254617Sjkim/* Read the current value and store a new value in the destination. */
417147855Sjhb#ifdef __GNUCLIKE_ASM
418147855Sjhb
419147855Sjhbstatic __inline u_int
420254617Sjkimatomic_swap_int(volatile u_int *p, u_int v)
421147855Sjhb{
422147855Sjhb
423165633Sbde	__asm __volatile(
424147855Sjhb	"	xchgl	%1,%0 ;		"
425254617Sjkim	"# atomic_swap_int"
426254617Sjkim	: "+r" (v),			/* 0 */
427254612Sjkim	  "+m" (*p));			/* 1 */
428147855Sjhb
429254617Sjkim	return (v);
430147855Sjhb}
431147855Sjhb
432147855Sjhbstatic __inline u_long
433254617Sjkimatomic_swap_long(volatile u_long *p, u_long v)
434147855Sjhb{
435147855Sjhb
436254617Sjkim	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
437147855Sjhb}
438147855Sjhb
439147855Sjhb#else /* !__GNUCLIKE_ASM */
440147855Sjhb
441254617Sjkimu_int	atomic_swap_int(volatile u_int *p, u_int v);
442254617Sjkimu_long	atomic_swap_long(volatile u_long *p, u_long v);
443147855Sjhb
444147855Sjhb#endif /* __GNUCLIKE_ASM */
445147855Sjhb
446197803Sattilio#define	atomic_set_acq_char		atomic_set_barr_char
447197803Sattilio#define	atomic_set_rel_char		atomic_set_barr_char
448197803Sattilio#define	atomic_clear_acq_char		atomic_clear_barr_char
449197803Sattilio#define	atomic_clear_rel_char		atomic_clear_barr_char
450197803Sattilio#define	atomic_add_acq_char		atomic_add_barr_char
451197803Sattilio#define	atomic_add_rel_char		atomic_add_barr_char
452197803Sattilio#define	atomic_subtract_acq_char	atomic_subtract_barr_char
453197803Sattilio#define	atomic_subtract_rel_char	atomic_subtract_barr_char
45471085Sjhb
455197803Sattilio#define	atomic_set_acq_short		atomic_set_barr_short
456197803Sattilio#define	atomic_set_rel_short		atomic_set_barr_short
457197803Sattilio#define	atomic_clear_acq_short		atomic_clear_barr_short
458197803Sattilio#define	atomic_clear_rel_short		atomic_clear_barr_short
459197803Sattilio#define	atomic_add_acq_short		atomic_add_barr_short
460197803Sattilio#define	atomic_add_rel_short		atomic_add_barr_short
461197803Sattilio#define	atomic_subtract_acq_short	atomic_subtract_barr_short
462197803Sattilio#define	atomic_subtract_rel_short	atomic_subtract_barr_short
46371085Sjhb
464197803Sattilio#define	atomic_set_acq_int		atomic_set_barr_int
465197803Sattilio#define	atomic_set_rel_int		atomic_set_barr_int
466197803Sattilio#define	atomic_clear_acq_int		atomic_clear_barr_int
467197803Sattilio#define	atomic_clear_rel_int		atomic_clear_barr_int
468197803Sattilio#define	atomic_add_acq_int		atomic_add_barr_int
469197803Sattilio#define	atomic_add_rel_int		atomic_add_barr_int
470197803Sattilio#define	atomic_subtract_acq_int		atomic_subtract_barr_int
471197803Sattilio#define	atomic_subtract_rel_int		atomic_subtract_barr_int
472197910Sattilio#define	atomic_cmpset_acq_int		atomic_cmpset_int
473197910Sattilio#define	atomic_cmpset_rel_int		atomic_cmpset_int
47471085Sjhb
475197803Sattilio#define	atomic_set_acq_long		atomic_set_barr_long
476197803Sattilio#define	atomic_set_rel_long		atomic_set_barr_long
477197803Sattilio#define	atomic_clear_acq_long		atomic_clear_barr_long
478197803Sattilio#define	atomic_clear_rel_long		atomic_clear_barr_long
479197803Sattilio#define	atomic_add_acq_long		atomic_add_barr_long
480197803Sattilio#define	atomic_add_rel_long		atomic_add_barr_long
481197803Sattilio#define	atomic_subtract_acq_long	atomic_subtract_barr_long
482197803Sattilio#define	atomic_subtract_rel_long	atomic_subtract_barr_long
483197910Sattilio#define	atomic_cmpset_acq_long		atomic_cmpset_long
484197910Sattilio#define	atomic_cmpset_rel_long		atomic_cmpset_long
48571085Sjhb
486254617Sjkim#define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
487254617Sjkim#define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
488254617Sjkim
489147855Sjhb/* Operations on 8-bit bytes. */
49071085Sjhb#define	atomic_set_8		atomic_set_char
49171085Sjhb#define	atomic_set_acq_8	atomic_set_acq_char
49271085Sjhb#define	atomic_set_rel_8	atomic_set_rel_char
49371085Sjhb#define	atomic_clear_8		atomic_clear_char
49471085Sjhb#define	atomic_clear_acq_8	atomic_clear_acq_char
49571085Sjhb#define	atomic_clear_rel_8	atomic_clear_rel_char
49671085Sjhb#define	atomic_add_8		atomic_add_char
49771085Sjhb#define	atomic_add_acq_8	atomic_add_acq_char
49871085Sjhb#define	atomic_add_rel_8	atomic_add_rel_char
49971085Sjhb#define	atomic_subtract_8	atomic_subtract_char
50071085Sjhb#define	atomic_subtract_acq_8	atomic_subtract_acq_char
50171085Sjhb#define	atomic_subtract_rel_8	atomic_subtract_rel_char
50271085Sjhb#define	atomic_load_acq_8	atomic_load_acq_char
50371085Sjhb#define	atomic_store_rel_8	atomic_store_rel_char
50471085Sjhb
505147855Sjhb/* Operations on 16-bit words. */
50671085Sjhb#define	atomic_set_16		atomic_set_short
50771085Sjhb#define	atomic_set_acq_16	atomic_set_acq_short
50871085Sjhb#define	atomic_set_rel_16	atomic_set_rel_short
50971085Sjhb#define	atomic_clear_16		atomic_clear_short
51071085Sjhb#define	atomic_clear_acq_16	atomic_clear_acq_short
51171085Sjhb#define	atomic_clear_rel_16	atomic_clear_rel_short
51271085Sjhb#define	atomic_add_16		atomic_add_short
51371085Sjhb#define	atomic_add_acq_16	atomic_add_acq_short
51471085Sjhb#define	atomic_add_rel_16	atomic_add_rel_short
51571085Sjhb#define	atomic_subtract_16	atomic_subtract_short
51671085Sjhb#define	atomic_subtract_acq_16	atomic_subtract_acq_short
51771085Sjhb#define	atomic_subtract_rel_16	atomic_subtract_rel_short
51871085Sjhb#define	atomic_load_acq_16	atomic_load_acq_short
51971085Sjhb#define	atomic_store_rel_16	atomic_store_rel_short
52071085Sjhb
521147855Sjhb/* Operations on 32-bit double words. */
52271085Sjhb#define	atomic_set_32		atomic_set_int
52371085Sjhb#define	atomic_set_acq_32	atomic_set_acq_int
52471085Sjhb#define	atomic_set_rel_32	atomic_set_rel_int
52571085Sjhb#define	atomic_clear_32		atomic_clear_int
52671085Sjhb#define	atomic_clear_acq_32	atomic_clear_acq_int
52771085Sjhb#define	atomic_clear_rel_32	atomic_clear_rel_int
52871085Sjhb#define	atomic_add_32		atomic_add_int
52971085Sjhb#define	atomic_add_acq_32	atomic_add_acq_int
53071085Sjhb#define	atomic_add_rel_32	atomic_add_rel_int
53171085Sjhb#define	atomic_subtract_32	atomic_subtract_int
53271085Sjhb#define	atomic_subtract_acq_32	atomic_subtract_acq_int
53371085Sjhb#define	atomic_subtract_rel_32	atomic_subtract_rel_int
53471085Sjhb#define	atomic_load_acq_32	atomic_load_acq_int
53571085Sjhb#define	atomic_store_rel_32	atomic_store_rel_int
53671085Sjhb#define	atomic_cmpset_32	atomic_cmpset_int
53771085Sjhb#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
53871085Sjhb#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
539254617Sjkim#define	atomic_swap_32		atomic_swap_int
54071085Sjhb#define	atomic_readandclear_32	atomic_readandclear_int
541150627Sjhb#define	atomic_fetchadd_32	atomic_fetchadd_int
542254617Sjkim#define	atomic_testandset_32	atomic_testandset_int
54371085Sjhb
544147855Sjhb/* Operations on pointers. */
545157212Sdes#define	atomic_set_ptr(p, v) \
546157212Sdes	atomic_set_int((volatile u_int *)(p), (u_int)(v))
547157212Sdes#define	atomic_set_acq_ptr(p, v) \
548157212Sdes	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
549157212Sdes#define	atomic_set_rel_ptr(p, v) \
550157212Sdes	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
551157212Sdes#define	atomic_clear_ptr(p, v) \
552157212Sdes	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
553157212Sdes#define	atomic_clear_acq_ptr(p, v) \
554157212Sdes	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
555157212Sdes#define	atomic_clear_rel_ptr(p, v) \
556157212Sdes	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
557157212Sdes#define	atomic_add_ptr(p, v) \
558157212Sdes	atomic_add_int((volatile u_int *)(p), (u_int)(v))
559157212Sdes#define	atomic_add_acq_ptr(p, v) \
560157212Sdes	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
561157212Sdes#define	atomic_add_rel_ptr(p, v) \
562157212Sdes	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
563157212Sdes#define	atomic_subtract_ptr(p, v) \
564157212Sdes	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
565157212Sdes#define	atomic_subtract_acq_ptr(p, v) \
566157212Sdes	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
567157212Sdes#define	atomic_subtract_rel_ptr(p, v) \
568157212Sdes	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
569157212Sdes#define	atomic_load_acq_ptr(p) \
570157212Sdes	atomic_load_acq_int((volatile u_int *)(p))
571157212Sdes#define	atomic_store_rel_ptr(p, v) \
572157212Sdes	atomic_store_rel_int((volatile u_int *)(p), (v))
573157212Sdes#define	atomic_cmpset_ptr(dst, old, new) \
574157212Sdes	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
575157212Sdes#define	atomic_cmpset_acq_ptr(dst, old, new) \
576165633Sbde	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
577165633Sbde	    (u_int)(new))
578157212Sdes#define	atomic_cmpset_rel_ptr(dst, old, new) \
579165633Sbde	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
580165633Sbde	    (u_int)(new))
581254617Sjkim#define	atomic_swap_ptr(p, v) \
582254617Sjkim	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
583157212Sdes#define	atomic_readandclear_ptr(p) \
584157212Sdes	atomic_readandclear_int((volatile u_int *)(p))
58565514Sphk
586165635Sbde#endif /* !WANT_FUNCTIONS */
587165633Sbde
588165633Sbde#endif /* !_MACHINE_ATOMIC_H_ */
589