atomic.h revision 220404
155682Smarkm/*-
255682Smarkm * Copyright (c) 1998 Doug Rabson
355682Smarkm * All rights reserved.
455682Smarkm *
555682Smarkm * Redistribution and use in source and binary forms, with or without
655682Smarkm * modification, are permitted provided that the following conditions
755682Smarkm * are met:
855682Smarkm * 1. Redistributions of source code must retain the above copyright
955682Smarkm *    notice, this list of conditions and the following disclaimer.
1055682Smarkm * 2. Redistributions in binary form must reproduce the above copyright
1155682Smarkm *    notice, this list of conditions and the following disclaimer in the
1255682Smarkm *    documentation and/or other materials provided with the distribution.
1355682Smarkm *
1455682Smarkm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1555682Smarkm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1655682Smarkm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1755682Smarkm * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1855682Smarkm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1955682Smarkm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2055682Smarkm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2155682Smarkm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2255682Smarkm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2355682Smarkm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2455682Smarkm * SUCH DAMAGE.
2555682Smarkm *
2655682Smarkm * $FreeBSD: head/sys/i386/include/atomic.h 220404 2011-04-06 23:59:59Z jkim $
2755682Smarkm */
2855682Smarkm#ifndef _MACHINE_ATOMIC_H_
2955682Smarkm#define	_MACHINE_ATOMIC_H_
3055682Smarkm
3155682Smarkm#ifndef _SYS_CDEFS_H_
3255682Smarkm#error this file needs sys/cdefs.h as a prerequisite
3355682Smarkm#endif
3455682Smarkm
3555682Smarkm#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
3655682Smarkm#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
3755682Smarkm#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
3855682Smarkm
3955682Smarkm/*
4055682Smarkm * Various simple operations on memory, each of which is atomic in the
4155682Smarkm * presence of interrupts and multiple processors.
4255682Smarkm *
4355682Smarkm * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
4455682Smarkm * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
4555682Smarkm * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
4655682Smarkm * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
4755682Smarkm *
4855682Smarkm * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
4955682Smarkm * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
5055682Smarkm * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
5155682Smarkm * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
5255682Smarkm *
5355682Smarkm * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
5455682Smarkm * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
5555682Smarkm * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
5655682Smarkm * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
5755682Smarkm * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
5855682Smarkm *
5955682Smarkm * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
6055682Smarkm * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
6155682Smarkm * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
6255682Smarkm * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
6355682Smarkm * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
6455682Smarkm */
6555682Smarkm
6655682Smarkm/*
6755682Smarkm * The above functions are expanded inline in the statically-linked
6855682Smarkm * kernel.  Lock prefixes are generated if an SMP kernel is being
6955682Smarkm * built.
7055682Smarkm *
7155682Smarkm * Kernel modules call real functions which are built into the kernel.
7255682Smarkm * This allows kernel modules to be portable between UP and SMP systems.
7355682Smarkm */
7455682Smarkm#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
7555682Smarkm#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
7655682Smarkmvoid atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
7755682Smarkmvoid atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
7855682Smarkm
7955682Smarkmint	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
8055682Smarkmu_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
8155682Smarkm
8255682Smarkm#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
8355682Smarkmu_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
8455682Smarkmvoid		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
8555682Smarkm
8655682Smarkm#else /* !KLD_MODULE && __GNUCLIKE_ASM */
8755682Smarkm
8855682Smarkm/*
8955682Smarkm * For userland, always use lock prefixes so that the binaries will run
9055682Smarkm * on both SMP and !SMP systems.
9155682Smarkm */
9255682Smarkm#if defined(SMP) || !defined(_KERNEL)
93#define	MPLOCKED	"lock ; "
94#else
95#define	MPLOCKED
96#endif
97
98/*
99 * The assembly is volatilized to avoid code chunk removal by the compiler.
100 * GCC aggressively reorders operations and memory clobbering is necessary
101 * in order to avoid that for memory barriers.
102 */
103#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
104static __inline void					\
105atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
106{							\
107	__asm __volatile(MPLOCKED OP			\
108	: "=m" (*p)					\
109	: CONS (V), "m" (*p)				\
110	: "cc");					\
111}							\
112							\
113static __inline void					\
114atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
115{							\
116	__asm __volatile(MPLOCKED OP			\
117	: "=m" (*p)					\
118	: CONS (V), "m" (*p)				\
119	: "memory", "cc");				\
120}							\
121struct __hack
122
123#if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
124
125/* I486 does not support SMP or CMPXCHG8B. */
126static __inline uint64_t
127atomic_load_acq_64_i386(volatile uint64_t *p)
128{
129	volatile uint32_t *high, *low;
130	uint64_t res;
131
132	low = (volatile uint32_t *)p;
133	high = (volatile uint32_t *)p + 1;
134	__asm __volatile(
135	"	pushfl ;		"
136	"	cli ;			"
137	"	movl %1,%%eax ;		"
138	"	movl %2,%%edx ;		"
139	"	popfl"
140	: "=&A" (res)			/* 0 */
141	: "m" (*low),			/* 1 */
142	  "m" (*high)			/* 2 */
143	: "memory");
144
145	return (res);
146}
147
148static __inline void
149atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
150{
151	volatile uint32_t *high, *low;
152
153	low = (volatile uint32_t *)p;
154	high = (volatile uint32_t *)p + 1;
155	__asm __volatile(
156	"	pushfl ;		"
157	"	cli ;			"
158	"	movl %%eax,%0 ;		"
159	"	movl %%edx,%1 ;		"
160	"	popfl"
161	: "=m" (*low),			/* 0 */
162	  "=m" (*high)			/* 1 */
163	: "A" (v)			/* 2 */
164	: "memory");
165}
166
167static __inline uint64_t
168atomic_load_acq_64_i586(volatile uint64_t *p)
169{
170	uint64_t res;
171
172	__asm __volatile(
173	"	movl %%ebx,%%eax ;	"
174	"	movl %%ecx,%%edx ;	"
175	"	" MPLOCKED "		"
176	"	cmpxchg8b %2"
177	: "=&A" (res),			/* 0 */
178	  "=m" (*p)			/* 1 */
179	: "m" (*p)			/* 2 */
180	: "memory", "cc");
181
182	return (res);
183}
184
185static __inline void
186atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
187{
188
189	__asm __volatile(
190	"	movl %%eax,%%ebx ;	"
191	"	movl %%edx,%%ecx ;	"
192	"1:				"
193	"	" MPLOCKED "		"
194	"	cmpxchg8b %2 ;		"
195	"	jne 1b"
196	: "=m" (*p),			/* 0 */
197	  "+A" (v)			/* 1 */
198	: "m" (*p)			/* 2 */
199	: "ebx", "ecx", "memory", "cc");
200}
201
202#endif /* _KERNEL && !WANT_FUNCTIONS */
203
204/*
205 * Atomic compare and set, used by the mutex functions
206 *
207 * if (*dst == expect) *dst = src (all 32 bit words)
208 *
209 * Returns 0 on failure, non-zero on success
210 */
211
212#ifdef CPU_DISABLE_CMPXCHG
213
214static __inline int
215atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
216{
217	u_char res;
218
219	__asm __volatile(
220	"	pushfl ;		"
221	"	cli ;			"
222	"	cmpl	%3,%4 ;		"
223	"	jne	1f ;		"
224	"	movl	%2,%1 ;		"
225	"1:				"
226	"       sete	%0 ;		"
227	"	popfl ;			"
228	"# atomic_cmpset_int"
229	: "=q" (res),			/* 0 */
230	  "=m" (*dst)			/* 1 */
231	: "r" (src),			/* 2 */
232	  "r" (expect),			/* 3 */
233	  "m" (*dst)			/* 4 */
234	: "memory");
235
236	return (res);
237}
238
239#else /* !CPU_DISABLE_CMPXCHG */
240
241static __inline int
242atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
243{
244	u_char res;
245
246	__asm __volatile(
247	"	" MPLOCKED "		"
248	"	cmpxchgl %2,%1 ;	"
249	"       sete	%0 ;		"
250	"1:				"
251	"# atomic_cmpset_int"
252	: "=a" (res),			/* 0 */
253	  "=m" (*dst)			/* 1 */
254	: "r" (src),			/* 2 */
255	  "a" (expect),			/* 3 */
256	  "m" (*dst)			/* 4 */
257	: "memory", "cc");
258
259	return (res);
260}
261
262#endif /* CPU_DISABLE_CMPXCHG */
263
264/*
265 * Atomically add the value of v to the integer pointed to by p and return
266 * the previous value of *p.
267 */
268static __inline u_int
269atomic_fetchadd_int(volatile u_int *p, u_int v)
270{
271
272	__asm __volatile(
273	"	" MPLOCKED "		"
274	"	xaddl	%0, %1 ;	"
275	"# atomic_fetchadd_int"
276	: "+r" (v),			/* 0 (result) */
277	  "=m" (*p)			/* 1 */
278	: "m" (*p)			/* 2 */
279	: "cc");
280	return (v);
281}
282
283#if defined(_KERNEL) && !defined(SMP)
284
285/*
286 * We assume that a = b will do atomic loads and stores.  However, on a
287 * PentiumPro or higher, reads may pass writes, so for that case we have
288 * to use a serializing instruction (i.e. with LOCK) to do the load in
289 * SMP kernels.  For UP kernels, however, the cache of the single processor
290 * is always consistent, so we only need to take care of compiler.
291 */
292#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
293static __inline u_##TYPE				\
294atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
295{							\
296	u_##TYPE tmp;					\
297							\
298	tmp = *p;					\
299	__asm __volatile("" : : : "memory");		\
300	return (tmp);					\
301}							\
302							\
303static __inline void					\
304atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
305{							\
306	__asm __volatile("" : : : "memory");		\
307	*p = v;						\
308}							\
309struct __hack
310
311#else /* !(_KERNEL && !SMP) */
312
313#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
314static __inline u_##TYPE				\
315atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
316{							\
317	u_##TYPE res;					\
318							\
319	__asm __volatile(MPLOCKED LOP			\
320	: "=a" (res),			/* 0 */		\
321	  "=m" (*p)			/* 1 */		\
322	: "m" (*p)			/* 2 */		\
323	: "memory", "cc");				\
324							\
325	return (res);					\
326}							\
327							\
328/*							\
329 * The XCHG instruction asserts LOCK automagically.	\
330 */							\
331static __inline void					\
332atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
333{							\
334	__asm __volatile(SOP				\
335	: "=m" (*p),			/* 0 */		\
336	  "+r" (v)			/* 1 */		\
337	: "m" (*p)			/* 2 */		\
338	: "memory");					\
339}							\
340struct __hack
341
342#endif /* _KERNEL && !SMP */
343
344#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
345
346ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
347ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
348ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
349ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
350
351ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
352ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
353ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
354ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
355
356ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
357ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
358ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
359ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
360
361ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
362ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
363ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
364ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
365
366ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
367ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
368ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
369ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
370
371#undef ATOMIC_ASM
372#undef ATOMIC_STORE_LOAD
373
374#ifndef WANT_FUNCTIONS
375
376#ifdef _KERNEL
377extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
378extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
379#endif
380
381static __inline int
382atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
383{
384
385	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
386	    (u_int)src));
387}
388
389static __inline u_long
390atomic_fetchadd_long(volatile u_long *p, u_long v)
391{
392
393	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
394}
395
396/* Read the current value and store a zero in the destination. */
397#ifdef __GNUCLIKE_ASM
398
399static __inline u_int
400atomic_readandclear_int(volatile u_int *addr)
401{
402	u_int res;
403
404	res = 0;
405	__asm __volatile(
406	"	xchgl	%1,%0 ;		"
407	"# atomic_readandclear_int"
408	: "+r" (res),			/* 0 */
409	  "=m" (*addr)			/* 1 */
410	: "m" (*addr));
411
412	return (res);
413}
414
415static __inline u_long
416atomic_readandclear_long(volatile u_long *addr)
417{
418	u_long res;
419
420	res = 0;
421	__asm __volatile(
422	"	xchgl	%1,%0 ;		"
423	"# atomic_readandclear_long"
424	: "+r" (res),			/* 0 */
425	  "=m" (*addr)			/* 1 */
426	: "m" (*addr));
427
428	return (res);
429}
430
431#else /* !__GNUCLIKE_ASM */
432
433u_int	atomic_readandclear_int(volatile u_int *addr);
434u_long	atomic_readandclear_long(volatile u_long *addr);
435
436#endif /* __GNUCLIKE_ASM */
437
438#define	atomic_set_acq_char		atomic_set_barr_char
439#define	atomic_set_rel_char		atomic_set_barr_char
440#define	atomic_clear_acq_char		atomic_clear_barr_char
441#define	atomic_clear_rel_char		atomic_clear_barr_char
442#define	atomic_add_acq_char		atomic_add_barr_char
443#define	atomic_add_rel_char		atomic_add_barr_char
444#define	atomic_subtract_acq_char	atomic_subtract_barr_char
445#define	atomic_subtract_rel_char	atomic_subtract_barr_char
446
447#define	atomic_set_acq_short		atomic_set_barr_short
448#define	atomic_set_rel_short		atomic_set_barr_short
449#define	atomic_clear_acq_short		atomic_clear_barr_short
450#define	atomic_clear_rel_short		atomic_clear_barr_short
451#define	atomic_add_acq_short		atomic_add_barr_short
452#define	atomic_add_rel_short		atomic_add_barr_short
453#define	atomic_subtract_acq_short	atomic_subtract_barr_short
454#define	atomic_subtract_rel_short	atomic_subtract_barr_short
455
456#define	atomic_set_acq_int		atomic_set_barr_int
457#define	atomic_set_rel_int		atomic_set_barr_int
458#define	atomic_clear_acq_int		atomic_clear_barr_int
459#define	atomic_clear_rel_int		atomic_clear_barr_int
460#define	atomic_add_acq_int		atomic_add_barr_int
461#define	atomic_add_rel_int		atomic_add_barr_int
462#define	atomic_subtract_acq_int		atomic_subtract_barr_int
463#define	atomic_subtract_rel_int		atomic_subtract_barr_int
464#define	atomic_cmpset_acq_int		atomic_cmpset_int
465#define	atomic_cmpset_rel_int		atomic_cmpset_int
466
467#define	atomic_set_acq_long		atomic_set_barr_long
468#define	atomic_set_rel_long		atomic_set_barr_long
469#define	atomic_clear_acq_long		atomic_clear_barr_long
470#define	atomic_clear_rel_long		atomic_clear_barr_long
471#define	atomic_add_acq_long		atomic_add_barr_long
472#define	atomic_add_rel_long		atomic_add_barr_long
473#define	atomic_subtract_acq_long	atomic_subtract_barr_long
474#define	atomic_subtract_rel_long	atomic_subtract_barr_long
475#define	atomic_cmpset_acq_long		atomic_cmpset_long
476#define	atomic_cmpset_rel_long		atomic_cmpset_long
477
478/* Operations on 8-bit bytes. */
479#define	atomic_set_8		atomic_set_char
480#define	atomic_set_acq_8	atomic_set_acq_char
481#define	atomic_set_rel_8	atomic_set_rel_char
482#define	atomic_clear_8		atomic_clear_char
483#define	atomic_clear_acq_8	atomic_clear_acq_char
484#define	atomic_clear_rel_8	atomic_clear_rel_char
485#define	atomic_add_8		atomic_add_char
486#define	atomic_add_acq_8	atomic_add_acq_char
487#define	atomic_add_rel_8	atomic_add_rel_char
488#define	atomic_subtract_8	atomic_subtract_char
489#define	atomic_subtract_acq_8	atomic_subtract_acq_char
490#define	atomic_subtract_rel_8	atomic_subtract_rel_char
491#define	atomic_load_acq_8	atomic_load_acq_char
492#define	atomic_store_rel_8	atomic_store_rel_char
493
494/* Operations on 16-bit words. */
495#define	atomic_set_16		atomic_set_short
496#define	atomic_set_acq_16	atomic_set_acq_short
497#define	atomic_set_rel_16	atomic_set_rel_short
498#define	atomic_clear_16		atomic_clear_short
499#define	atomic_clear_acq_16	atomic_clear_acq_short
500#define	atomic_clear_rel_16	atomic_clear_rel_short
501#define	atomic_add_16		atomic_add_short
502#define	atomic_add_acq_16	atomic_add_acq_short
503#define	atomic_add_rel_16	atomic_add_rel_short
504#define	atomic_subtract_16	atomic_subtract_short
505#define	atomic_subtract_acq_16	atomic_subtract_acq_short
506#define	atomic_subtract_rel_16	atomic_subtract_rel_short
507#define	atomic_load_acq_16	atomic_load_acq_short
508#define	atomic_store_rel_16	atomic_store_rel_short
509
510/* Operations on 32-bit double words. */
511#define	atomic_set_32		atomic_set_int
512#define	atomic_set_acq_32	atomic_set_acq_int
513#define	atomic_set_rel_32	atomic_set_rel_int
514#define	atomic_clear_32		atomic_clear_int
515#define	atomic_clear_acq_32	atomic_clear_acq_int
516#define	atomic_clear_rel_32	atomic_clear_rel_int
517#define	atomic_add_32		atomic_add_int
518#define	atomic_add_acq_32	atomic_add_acq_int
519#define	atomic_add_rel_32	atomic_add_rel_int
520#define	atomic_subtract_32	atomic_subtract_int
521#define	atomic_subtract_acq_32	atomic_subtract_acq_int
522#define	atomic_subtract_rel_32	atomic_subtract_rel_int
523#define	atomic_load_acq_32	atomic_load_acq_int
524#define	atomic_store_rel_32	atomic_store_rel_int
525#define	atomic_cmpset_32	atomic_cmpset_int
526#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
527#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
528#define	atomic_readandclear_32	atomic_readandclear_int
529#define	atomic_fetchadd_32	atomic_fetchadd_int
530
531/* Operations on pointers. */
532#define	atomic_set_ptr(p, v) \
533	atomic_set_int((volatile u_int *)(p), (u_int)(v))
534#define	atomic_set_acq_ptr(p, v) \
535	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
536#define	atomic_set_rel_ptr(p, v) \
537	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
538#define	atomic_clear_ptr(p, v) \
539	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
540#define	atomic_clear_acq_ptr(p, v) \
541	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
542#define	atomic_clear_rel_ptr(p, v) \
543	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
544#define	atomic_add_ptr(p, v) \
545	atomic_add_int((volatile u_int *)(p), (u_int)(v))
546#define	atomic_add_acq_ptr(p, v) \
547	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
548#define	atomic_add_rel_ptr(p, v) \
549	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
550#define	atomic_subtract_ptr(p, v) \
551	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
552#define	atomic_subtract_acq_ptr(p, v) \
553	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
554#define	atomic_subtract_rel_ptr(p, v) \
555	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
556#define	atomic_load_acq_ptr(p) \
557	atomic_load_acq_int((volatile u_int *)(p))
558#define	atomic_store_rel_ptr(p, v) \
559	atomic_store_rel_int((volatile u_int *)(p), (v))
560#define	atomic_cmpset_ptr(dst, old, new) \
561	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
562#define	atomic_cmpset_acq_ptr(dst, old, new) \
563	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
564	    (u_int)(new))
565#define	atomic_cmpset_rel_ptr(dst, old, new) \
566	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
567	    (u_int)(new))
568#define	atomic_readandclear_ptr(p) \
569	atomic_readandclear_int((volatile u_int *)(p))
570
571#endif /* !WANT_FUNCTIONS */
572
573#endif /* !_MACHINE_ATOMIC_H_ */
574