atomic.h revision 216524
1223328Sgavin/*-
2223328Sgavin * Copyright (c) 1998 Doug Rabson
3223328Sgavin * All rights reserved.
4223328Sgavin *
5223328Sgavin * Redistribution and use in source and binary forms, with or without
6223328Sgavin * modification, are permitted provided that the following conditions
7223328Sgavin * are met:
8223328Sgavin * 1. Redistributions of source code must retain the above copyright
9223328Sgavin *    notice, this list of conditions and the following disclaimer.
10223328Sgavin * 2. Redistributions in binary form must reproduce the above copyright
11223328Sgavin *    notice, this list of conditions and the following disclaimer in the
12223328Sgavin *    documentation and/or other materials provided with the distribution.
13223328Sgavin *
14223328Sgavin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15223328Sgavin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16223328Sgavin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17223328Sgavin * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18223328Sgavin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19223328Sgavin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20223328Sgavin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21223328Sgavin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22223328Sgavin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23223328Sgavin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24223328Sgavin * SUCH DAMAGE.
25223328Sgavin *
26223328Sgavin * $FreeBSD: head/sys/amd64/include/atomic.h 216524 2010-12-18 16:41:11Z kib $
27223328Sgavin */
28223328Sgavin#ifndef _MACHINE_ATOMIC_H_
29223328Sgavin#define	_MACHINE_ATOMIC_H_
30223328Sgavin
31223328Sgavin#ifndef _SYS_CDEFS_H_
32223328Sgavin#error this file needs sys/cdefs.h as a prerequisite
33223328Sgavin#endif
34223328Sgavin
35223328Sgavin#define	mb()	__asm __volatile("mfence;" : : : "memory")
36223328Sgavin#define	wmb()	__asm __volatile("sfence;" : : : "memory")
37223328Sgavin#define	rmb()	__asm __volatile("lfence;" : : : "memory")
38223328Sgavin
39223328Sgavin/*
40223328Sgavin * Various simple operations on memory, each of which is atomic in the
41223328Sgavin * presence of interrupts and multiple processors.
42223328Sgavin *
43223328Sgavin * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44223328Sgavin * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45223328Sgavin * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46223328Sgavin * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47223328Sgavin *
48223328Sgavin * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49223328Sgavin * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50223328Sgavin * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51223328Sgavin * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52223328Sgavin *
53223328Sgavin * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54223328Sgavin * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55223328Sgavin * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56223328Sgavin * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57223328Sgavin * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
58223328Sgavin *
59223328Sgavin * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
60223328Sgavin * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
61223328Sgavin * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
62223328Sgavin * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
63223328Sgavin * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
64223328Sgavin */
65223328Sgavin
66223328Sgavin/*
67223328Sgavin * The above functions are expanded inline in the statically-linked
68223328Sgavin * kernel.  Lock prefixes are generated if an SMP kernel is being
69223328Sgavin * built.
70223328Sgavin *
71223328Sgavin * Kernel modules call real functions which are built into the kernel.
72223328Sgavin * This allows kernel modules to be portable between UP and SMP systems.
73223328Sgavin */
74223328Sgavin#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75223328Sgavin#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
76223328Sgavinvoid atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
77223328Sgavinvoid atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
78223328Sgavin
79223328Sgavinint	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
80223328Sgavinint	atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
81223328Sgavinu_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
82223328Sgavinu_long	atomic_fetchadd_long(volatile u_long *p, u_long v);
83223328Sgavin
84223328Sgavin#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
85223328Sgavinu_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
86223328Sgavinvoid		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
87223328Sgavin
88223328Sgavin#else /* !KLD_MODULE && __GNUCLIKE_ASM */
89223328Sgavin
90223328Sgavin/*
91223328Sgavin * For userland, always use lock prefixes so that the binaries will run
92223328Sgavin * on both SMP and !SMP systems.
93223328Sgavin */
94223328Sgavin#if defined(SMP) || !defined(_KERNEL)
95223328Sgavin#define	MPLOCKED	"lock ; "
96223328Sgavin#else
97223328Sgavin#define	MPLOCKED
98223328Sgavin#endif
99223328Sgavin
100223328Sgavin/*
101223328Sgavin * The assembly is volatilized to avoid code chunk removal by the compiler.
102223328Sgavin * GCC aggressively reorders operations and memory clobbering is necessary
103223328Sgavin * in order to avoid that for memory barriers.
104223328Sgavin */
105223328Sgavin#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
106223328Sgavinstatic __inline void					\
107223328Sgavinatomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
108223328Sgavin{							\
109223328Sgavin	__asm __volatile(MPLOCKED OP			\
110223328Sgavin	: "=m" (*p)					\
111223328Sgavin	: CONS (V), "m" (*p)				\
112223328Sgavin	: "cc");					\
113223328Sgavin}							\
114223328Sgavin							\
115223328Sgavinstatic __inline void					\
116223328Sgavinatomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
117223328Sgavin{							\
118223328Sgavin	__asm __volatile(MPLOCKED OP			\
119223328Sgavin	: "=m" (*p)					\
120223328Sgavin	: CONS (V), "m" (*p)				\
121223328Sgavin	: "memory", "cc");				\
122223328Sgavin}							\
123223328Sgavinstruct __hack
124223328Sgavin
125223328Sgavin/*
126223328Sgavin * Atomic compare and set, used by the mutex functions
127223328Sgavin *
128223328Sgavin * if (*dst == expect) *dst = src (all 32 bit words)
129223328Sgavin *
130223328Sgavin * Returns 0 on failure, non-zero on success
131223328Sgavin */
132223328Sgavin
133223328Sgavinstatic __inline int
134223328Sgavinatomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
135223328Sgavin{
136223328Sgavin	u_char res;
137223328Sgavin
138223328Sgavin	__asm __volatile(
139223328Sgavin	"	" MPLOCKED "		"
140223328Sgavin	"	cmpxchgl %2,%1 ;	"
141223328Sgavin	"       sete	%0 ;		"
142223328Sgavin	"1:				"
143223328Sgavin	"# atomic_cmpset_int"
144223328Sgavin	: "=a" (res),			/* 0 */
145223328Sgavin	  "=m" (*dst)			/* 1 */
146223328Sgavin	: "r" (src),			/* 2 */
147223328Sgavin	  "a" (expect),			/* 3 */
148223328Sgavin	  "m" (*dst)			/* 4 */
149223328Sgavin	: "memory", "cc");
150223328Sgavin
151223328Sgavin	return (res);
152223328Sgavin}
153223328Sgavin
154223328Sgavinstatic __inline int
155223328Sgavinatomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
156223328Sgavin{
157223328Sgavin	u_char res;
158223328Sgavin
159223328Sgavin	__asm __volatile(
160223328Sgavin	"	" MPLOCKED "		"
161223328Sgavin	"	cmpxchgq %2,%1 ;	"
162223328Sgavin	"       sete	%0 ;		"
163223328Sgavin	"1:				"
164223328Sgavin	"# atomic_cmpset_long"
165223328Sgavin	: "=a" (res),			/* 0 */
166223328Sgavin	  "=m" (*dst)			/* 1 */
167223328Sgavin	: "r" (src),			/* 2 */
168223328Sgavin	  "a" (expect),			/* 3 */
169223328Sgavin	  "m" (*dst)			/* 4 */
170223328Sgavin	: "memory", "cc");
171223328Sgavin
172223328Sgavin	return (res);
173223328Sgavin}
174223328Sgavin
175223328Sgavin/*
176223328Sgavin * Atomically add the value of v to the integer pointed to by p and return
177223328Sgavin * the previous value of *p.
178223328Sgavin */
179223328Sgavinstatic __inline u_int
180223328Sgavinatomic_fetchadd_int(volatile u_int *p, u_int v)
181223328Sgavin{
182223328Sgavin
183223328Sgavin	__asm __volatile(
184223328Sgavin	"	" MPLOCKED "		"
185223328Sgavin	"	xaddl	%0, %1 ;	"
186223328Sgavin	"# atomic_fetchadd_int"
187223328Sgavin	: "+r" (v),			/* 0 (result) */
188223328Sgavin	  "=m" (*p)			/* 1 */
189223328Sgavin	: "m" (*p)			/* 2 */
190223328Sgavin	: "cc");
191223328Sgavin	return (v);
192223328Sgavin}
193223328Sgavin
194223328Sgavin/*
195223328Sgavin * Atomically add the value of v to the long integer pointed to by p and return
196223328Sgavin * the previous value of *p.
197223328Sgavin */
198223328Sgavinstatic __inline u_long
199223328Sgavinatomic_fetchadd_long(volatile u_long *p, u_long v)
200223328Sgavin{
201223328Sgavin
202223328Sgavin	__asm __volatile(
203223328Sgavin	"	" MPLOCKED "		"
204223328Sgavin	"	xaddq	%0, %1 ;	"
205223328Sgavin	"# atomic_fetchadd_long"
206223328Sgavin	: "+r" (v),			/* 0 (result) */
207223328Sgavin	  "=m" (*p)			/* 1 */
208223328Sgavin	: "m" (*p)			/* 2 */
209223328Sgavin	: "cc");
210223328Sgavin	return (v);
211223328Sgavin}
212223328Sgavin
213223328Sgavin#if defined(_KERNEL) && !defined(SMP)
214223328Sgavin
215223328Sgavin/*
216223328Sgavin * We assume that a = b will do atomic loads and stores.  However, on a
217223328Sgavin * PentiumPro or higher, reads may pass writes, so for that case we have
218223328Sgavin * to use a serializing instruction (i.e. with LOCK) to do the load in
219223328Sgavin * SMP kernels.  For UP kernels, however, the cache of the single processor
220223328Sgavin * is always consistent, so we only need to take care of compiler.
221223328Sgavin */
222223328Sgavin#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
223223328Sgavinstatic __inline u_##TYPE				\
224223328Sgavinatomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
225223328Sgavin{							\
226223328Sgavin	u_##TYPE tmp;					\
227223328Sgavin							\
228223328Sgavin	tmp = *p;					\
229223328Sgavin	__asm __volatile ("" : : : "memory");		\
230223328Sgavin	return (tmp);					\
231223328Sgavin}							\
232223328Sgavin							\
233223328Sgavinstatic __inline void					\
234223328Sgavinatomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
235223328Sgavin{							\
236223328Sgavin	__asm __volatile ("" : : : "memory");		\
237223328Sgavin	*p = v;						\
238223328Sgavin}							\
239223328Sgavinstruct __hack
240223328Sgavin
241223328Sgavin#else /* !(_KERNEL && !SMP) */
242223328Sgavin
243223328Sgavin#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
244223328Sgavinstatic __inline u_##TYPE				\
245223328Sgavinatomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
246223328Sgavin{							\
247223328Sgavin	u_##TYPE res;					\
248223328Sgavin							\
249223328Sgavin	__asm __volatile(MPLOCKED LOP			\
250223328Sgavin	: "=a" (res),			/* 0 */		\
251223328Sgavin	  "=m" (*p)			/* 1 */		\
252223328Sgavin	: "m" (*p)			/* 2 */		\
253223328Sgavin	: "memory", "cc");				\
254223328Sgavin							\
255223328Sgavin	return (res);					\
256223328Sgavin}							\
257223328Sgavin							\
258223328Sgavin/*							\
259223328Sgavin * The XCHG instruction asserts LOCK automagically.	\
260223328Sgavin */							\
261223328Sgavinstatic __inline void					\
262223328Sgavinatomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
263223328Sgavin{							\
264223328Sgavin	__asm __volatile(SOP				\
265223328Sgavin	: "=m" (*p),			/* 0 */		\
266223328Sgavin	  "+r" (v)			/* 1 */		\
267223328Sgavin	: "m" (*p)			/* 2 */		\
268223328Sgavin	: "memory");					\
269223328Sgavin}							\
270223328Sgavinstruct __hack
271223328Sgavin
272223328Sgavin#endif /* _KERNEL && !SMP */
273223328Sgavin
274223328Sgavin#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
275223328Sgavin
276223328SgavinATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
277223328SgavinATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
278223328SgavinATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
279223328SgavinATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
280223328Sgavin
281223328SgavinATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
282223328SgavinATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
283223328SgavinATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
284223328SgavinATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
285223328Sgavin
286223328SgavinATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
287223328SgavinATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
288223328SgavinATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
289223328SgavinATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
290223328Sgavin
291223328SgavinATOMIC_ASM(set,	     long,  "orq %1,%0",   "ir",  v);
292223328SgavinATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
293223328SgavinATOMIC_ASM(add,	     long,  "addq %1,%0",  "ir",  v);
294223328SgavinATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
295223328Sgavin
296223328SgavinATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
297223328SgavinATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
298223328SgavinATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
299223328SgavinATOMIC_STORE_LOAD(long,	"cmpxchgq %0,%1",  "xchgq %1,%0");
300223328Sgavin
301223328Sgavin#undef ATOMIC_ASM
302223328Sgavin#undef ATOMIC_STORE_LOAD
303223328Sgavin
304223328Sgavin#ifndef WANT_FUNCTIONS
305223328Sgavin
306223328Sgavin/* Read the current value and store a zero in the destination. */
307223328Sgavin#ifdef __GNUCLIKE_ASM
308223328Sgavin
309223328Sgavinstatic __inline u_int
310223328Sgavinatomic_readandclear_int(volatile u_int *addr)
311223328Sgavin{
312223328Sgavin	u_int res;
313223328Sgavin
314223328Sgavin	res = 0;
315223328Sgavin	__asm __volatile(
316223328Sgavin	"	xchgl	%1,%0 ;		"
317223328Sgavin	"# atomic_readandclear_int"
318223328Sgavin	: "+r" (res),			/* 0 */
319223328Sgavin	  "=m" (*addr)			/* 1 */
320223328Sgavin	: "m" (*addr));
321223328Sgavin
322223328Sgavin	return (res);
323223328Sgavin}
324223328Sgavin
325223328Sgavinstatic __inline u_long
326223328Sgavinatomic_readandclear_long(volatile u_long *addr)
327223328Sgavin{
328223328Sgavin	u_long res;
329223328Sgavin
330223328Sgavin	res = 0;
331223328Sgavin	__asm __volatile(
332223328Sgavin	"	xchgq	%1,%0 ;		"
333223328Sgavin	"# atomic_readandclear_long"
334223328Sgavin	: "+r" (res),			/* 0 */
335223328Sgavin	  "=m" (*addr)			/* 1 */
336223328Sgavin	: "m" (*addr));
337223328Sgavin
338223328Sgavin	return (res);
339223328Sgavin}
340223328Sgavin
341223328Sgavin#else /* !__GNUCLIKE_ASM */
342223328Sgavin
343223328Sgavinu_int	atomic_readandclear_int(volatile u_int *addr);
344223328Sgavinu_long	atomic_readandclear_long(volatile u_long *addr);
345223328Sgavin
346223328Sgavin#endif /* __GNUCLIKE_ASM */
347223328Sgavin
348223328Sgavin#define	atomic_set_acq_char		atomic_set_barr_char
349223328Sgavin#define	atomic_set_rel_char		atomic_set_barr_char
350223328Sgavin#define	atomic_clear_acq_char		atomic_clear_barr_char
351223328Sgavin#define	atomic_clear_rel_char		atomic_clear_barr_char
352223328Sgavin#define	atomic_add_acq_char		atomic_add_barr_char
353223328Sgavin#define	atomic_add_rel_char		atomic_add_barr_char
354223328Sgavin#define	atomic_subtract_acq_char	atomic_subtract_barr_char
355223328Sgavin#define	atomic_subtract_rel_char	atomic_subtract_barr_char
356223328Sgavin
357223328Sgavin#define	atomic_set_acq_short		atomic_set_barr_short
358223328Sgavin#define	atomic_set_rel_short		atomic_set_barr_short
359223328Sgavin#define	atomic_clear_acq_short		atomic_clear_barr_short
360223328Sgavin#define	atomic_clear_rel_short		atomic_clear_barr_short
361223328Sgavin#define	atomic_add_acq_short		atomic_add_barr_short
362223328Sgavin#define	atomic_add_rel_short		atomic_add_barr_short
363223328Sgavin#define	atomic_subtract_acq_short	atomic_subtract_barr_short
364223328Sgavin#define	atomic_subtract_rel_short	atomic_subtract_barr_short
365223328Sgavin
366223328Sgavin#define	atomic_set_acq_int		atomic_set_barr_int
367223328Sgavin#define	atomic_set_rel_int		atomic_set_barr_int
368223328Sgavin#define	atomic_clear_acq_int		atomic_clear_barr_int
369223328Sgavin#define	atomic_clear_rel_int		atomic_clear_barr_int
370223328Sgavin#define	atomic_add_acq_int		atomic_add_barr_int
371223328Sgavin#define	atomic_add_rel_int		atomic_add_barr_int
372223328Sgavin#define	atomic_subtract_acq_int		atomic_subtract_barr_int
373223328Sgavin#define	atomic_subtract_rel_int		atomic_subtract_barr_int
374223328Sgavin#define	atomic_cmpset_acq_int		atomic_cmpset_int
375223328Sgavin#define	atomic_cmpset_rel_int		atomic_cmpset_int
376223328Sgavin
377223328Sgavin#define	atomic_set_acq_long		atomic_set_barr_long
378223328Sgavin#define	atomic_set_rel_long		atomic_set_barr_long
379223328Sgavin#define	atomic_clear_acq_long		atomic_clear_barr_long
380223328Sgavin#define	atomic_clear_rel_long		atomic_clear_barr_long
381223328Sgavin#define	atomic_add_acq_long		atomic_add_barr_long
382223328Sgavin#define	atomic_add_rel_long		atomic_add_barr_long
383223328Sgavin#define	atomic_subtract_acq_long	atomic_subtract_barr_long
384223328Sgavin#define	atomic_subtract_rel_long	atomic_subtract_barr_long
385223328Sgavin#define	atomic_cmpset_acq_long		atomic_cmpset_long
386223328Sgavin#define	atomic_cmpset_rel_long		atomic_cmpset_long
387223328Sgavin
388223328Sgavin/* Operations on 8-bit bytes. */
389223328Sgavin#define	atomic_set_8		atomic_set_char
390223328Sgavin#define	atomic_set_acq_8	atomic_set_acq_char
391223328Sgavin#define	atomic_set_rel_8	atomic_set_rel_char
392223328Sgavin#define	atomic_clear_8		atomic_clear_char
393223328Sgavin#define	atomic_clear_acq_8	atomic_clear_acq_char
394223328Sgavin#define	atomic_clear_rel_8	atomic_clear_rel_char
395223328Sgavin#define	atomic_add_8		atomic_add_char
396223328Sgavin#define	atomic_add_acq_8	atomic_add_acq_char
397223328Sgavin#define	atomic_add_rel_8	atomic_add_rel_char
398223328Sgavin#define	atomic_subtract_8	atomic_subtract_char
399223328Sgavin#define	atomic_subtract_acq_8	atomic_subtract_acq_char
400223328Sgavin#define	atomic_subtract_rel_8	atomic_subtract_rel_char
401223328Sgavin#define	atomic_load_acq_8	atomic_load_acq_char
402223328Sgavin#define	atomic_store_rel_8	atomic_store_rel_char
403223328Sgavin
404223328Sgavin/* Operations on 16-bit words. */
405223328Sgavin#define	atomic_set_16		atomic_set_short
406223328Sgavin#define	atomic_set_acq_16	atomic_set_acq_short
407223328Sgavin#define	atomic_set_rel_16	atomic_set_rel_short
408223328Sgavin#define	atomic_clear_16		atomic_clear_short
409223328Sgavin#define	atomic_clear_acq_16	atomic_clear_acq_short
410223328Sgavin#define	atomic_clear_rel_16	atomic_clear_rel_short
411223328Sgavin#define	atomic_add_16		atomic_add_short
412223328Sgavin#define	atomic_add_acq_16	atomic_add_acq_short
413223328Sgavin#define	atomic_add_rel_16	atomic_add_rel_short
414223328Sgavin#define	atomic_subtract_16	atomic_subtract_short
415223328Sgavin#define	atomic_subtract_acq_16	atomic_subtract_acq_short
416223328Sgavin#define	atomic_subtract_rel_16	atomic_subtract_rel_short
417223328Sgavin#define	atomic_load_acq_16	atomic_load_acq_short
418223328Sgavin#define	atomic_store_rel_16	atomic_store_rel_short
419223328Sgavin
420223328Sgavin/* Operations on 32-bit double words. */
421223328Sgavin#define	atomic_set_32		atomic_set_int
422223328Sgavin#define	atomic_set_acq_32	atomic_set_acq_int
423223328Sgavin#define	atomic_set_rel_32	atomic_set_rel_int
424223328Sgavin#define	atomic_clear_32		atomic_clear_int
425223328Sgavin#define	atomic_clear_acq_32	atomic_clear_acq_int
426223328Sgavin#define	atomic_clear_rel_32	atomic_clear_rel_int
427223328Sgavin#define	atomic_add_32		atomic_add_int
428223328Sgavin#define	atomic_add_acq_32	atomic_add_acq_int
429223328Sgavin#define	atomic_add_rel_32	atomic_add_rel_int
430223328Sgavin#define	atomic_subtract_32	atomic_subtract_int
431223328Sgavin#define	atomic_subtract_acq_32	atomic_subtract_acq_int
432223328Sgavin#define	atomic_subtract_rel_32	atomic_subtract_rel_int
433223328Sgavin#define	atomic_load_acq_32	atomic_load_acq_int
434223328Sgavin#define	atomic_store_rel_32	atomic_store_rel_int
435223328Sgavin#define	atomic_cmpset_32	atomic_cmpset_int
436223328Sgavin#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
437223328Sgavin#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
438223328Sgavin#define	atomic_readandclear_32	atomic_readandclear_int
439223328Sgavin#define	atomic_fetchadd_32	atomic_fetchadd_int
440223328Sgavin
441223328Sgavin/* Operations on 64-bit quad words. */
442223328Sgavin#define	atomic_set_64		atomic_set_long
443223328Sgavin#define	atomic_set_acq_64	atomic_set_acq_long
444223328Sgavin#define	atomic_set_rel_64	atomic_set_rel_long
445223328Sgavin#define	atomic_clear_64		atomic_clear_long
446223328Sgavin#define	atomic_clear_acq_64	atomic_clear_acq_long
447223328Sgavin#define	atomic_clear_rel_64	atomic_clear_rel_long
448223328Sgavin#define	atomic_add_64		atomic_add_long
449223328Sgavin#define	atomic_add_acq_64	atomic_add_acq_long
450223328Sgavin#define	atomic_add_rel_64	atomic_add_rel_long
451223328Sgavin#define	atomic_subtract_64	atomic_subtract_long
452223328Sgavin#define	atomic_subtract_acq_64	atomic_subtract_acq_long
453223328Sgavin#define	atomic_subtract_rel_64	atomic_subtract_rel_long
454223328Sgavin#define	atomic_load_acq_64	atomic_load_acq_long
455223328Sgavin#define	atomic_store_rel_64	atomic_store_rel_long
456223328Sgavin#define	atomic_cmpset_64	atomic_cmpset_long
457223328Sgavin#define	atomic_cmpset_acq_64	atomic_cmpset_acq_long
458223328Sgavin#define	atomic_cmpset_rel_64	atomic_cmpset_rel_long
459223328Sgavin#define	atomic_readandclear_64	atomic_readandclear_long
460223328Sgavin
461223328Sgavin/* Operations on pointers. */
462223328Sgavin#define	atomic_set_ptr		atomic_set_long
463223328Sgavin#define	atomic_set_acq_ptr	atomic_set_acq_long
464223328Sgavin#define	atomic_set_rel_ptr	atomic_set_rel_long
465223328Sgavin#define	atomic_clear_ptr	atomic_clear_long
466223328Sgavin#define	atomic_clear_acq_ptr	atomic_clear_acq_long
467223328Sgavin#define	atomic_clear_rel_ptr	atomic_clear_rel_long
468223328Sgavin#define	atomic_add_ptr		atomic_add_long
469223328Sgavin#define	atomic_add_acq_ptr	atomic_add_acq_long
470223328Sgavin#define	atomic_add_rel_ptr	atomic_add_rel_long
471223328Sgavin#define	atomic_subtract_ptr	atomic_subtract_long
472223328Sgavin#define	atomic_subtract_acq_ptr	atomic_subtract_acq_long
473223328Sgavin#define	atomic_subtract_rel_ptr	atomic_subtract_rel_long
474223328Sgavin#define	atomic_load_acq_ptr	atomic_load_acq_long
475223328Sgavin#define	atomic_store_rel_ptr	atomic_store_rel_long
476223328Sgavin#define	atomic_cmpset_ptr	atomic_cmpset_long
477223328Sgavin#define	atomic_cmpset_acq_ptr	atomic_cmpset_acq_long
478223328Sgavin#define	atomic_cmpset_rel_ptr	atomic_cmpset_rel_long
479223328Sgavin#define	atomic_readandclear_ptr	atomic_readandclear_long
480223328Sgavin
481223328Sgavin#endif /* !WANT_FUNCTIONS */
482223328Sgavin
483223328Sgavin#endif /* !_MACHINE_ATOMIC_H_ */
484223328Sgavin