atomic.h revision 254617
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 254617 2013-08-21 22:03:06Z jkim $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
36#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
37#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
38
39/*
40 * Various simple operations on memory, each of which is atomic in the
41 * presence of interrupts and multiple processors.
42 *
43 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47 *
48 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52 *
53 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57 * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
58 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
59 *
60 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
61 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
62 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
63 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
64 * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
65 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
66 */
67
68/*
69 * The above functions are expanded inline in the statically-linked
70 * kernel.  Lock prefixes are generated if an SMP kernel is being
71 * built.
72 *
73 * Kernel modules call real functions which are built into the kernel.
74 * This allows kernel modules to be portable between UP and SMP systems.
75 */
76#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
77#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
78void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
79void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
80
81int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
82u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
83int	atomic_testandset_int(volatile u_int *p, u_int v);
84
85#define	ATOMIC_LOAD(TYPE, LOP)					\
86u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
87#define	ATOMIC_STORE(TYPE)					\
88void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
89
90#else /* !KLD_MODULE && __GNUCLIKE_ASM */
91
92/*
93 * For userland, always use lock prefixes so that the binaries will run
94 * on both SMP and !SMP systems.
95 */
96#if defined(SMP) || !defined(_KERNEL)
97#define	MPLOCKED	"lock ; "
98#else
99#define	MPLOCKED
100#endif
101
102/*
103 * The assembly is volatilized to avoid code chunk removal by the compiler.
104 * GCC aggressively reorders operations and memory clobbering is necessary
105 * in order to avoid that for memory barriers.
106 */
107#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
108static __inline void					\
109atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
110{							\
111	__asm __volatile(MPLOCKED OP			\
112	: "+m" (*p)					\
113	: CONS (V)					\
114	: "cc");					\
115}							\
116							\
117static __inline void					\
118atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
119{							\
120	__asm __volatile(MPLOCKED OP			\
121	: "+m" (*p)					\
122	: CONS (V)					\
123	: "memory", "cc");				\
124}							\
125struct __hack
126
127#if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
128
129/* I486 does not support SMP or CMPXCHG8B. */
130static __inline uint64_t
131atomic_load_acq_64_i386(volatile uint64_t *p)
132{
133	volatile uint32_t *high, *low;
134	uint64_t res;
135
136	low = (volatile uint32_t *)p;
137	high = (volatile uint32_t *)p + 1;
138	__asm __volatile(
139	"	pushfl ;		"
140	"	cli ;			"
141	"	movl %1,%%eax ;		"
142	"	movl %2,%%edx ;		"
143	"	popfl"
144	: "=&A" (res)			/* 0 */
145	: "m" (*low),			/* 1 */
146	  "m" (*high)			/* 2 */
147	: "memory");
148
149	return (res);
150}
151
152static __inline void
153atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
154{
155	volatile uint32_t *high, *low;
156
157	low = (volatile uint32_t *)p;
158	high = (volatile uint32_t *)p + 1;
159	__asm __volatile(
160	"	pushfl ;		"
161	"	cli ;			"
162	"	movl %%eax,%0 ;		"
163	"	movl %%edx,%1 ;		"
164	"	popfl"
165	: "=m" (*low),			/* 0 */
166	  "=m" (*high)			/* 1 */
167	: "A" (v)			/* 2 */
168	: "memory");
169}
170
171static __inline uint64_t
172atomic_load_acq_64_i586(volatile uint64_t *p)
173{
174	uint64_t res;
175
176	__asm __volatile(
177	"	movl %%ebx,%%eax ;	"
178	"	movl %%ecx,%%edx ;	"
179	"	" MPLOCKED "		"
180	"	cmpxchg8b %1"
181	: "=&A" (res),			/* 0 */
182	  "+m" (*p)			/* 1 */
183	: : "memory", "cc");
184
185	return (res);
186}
187
188static __inline void
189atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
190{
191
192	__asm __volatile(
193	"	movl %%eax,%%ebx ;	"
194	"	movl %%edx,%%ecx ;	"
195	"1:				"
196	"	" MPLOCKED "		"
197	"	cmpxchg8b %0 ;		"
198	"	jne 1b"
199	: "+m" (*p),			/* 0 */
200	  "+A" (v)			/* 1 */
201	: : "ebx", "ecx", "memory", "cc");
202}
203
204#endif /* _KERNEL && !WANT_FUNCTIONS */
205
206/*
207 * Atomic compare and set, used by the mutex functions
208 *
209 * if (*dst == expect) *dst = src (all 32 bit words)
210 *
211 * Returns 0 on failure, non-zero on success
212 */
213
214#ifdef CPU_DISABLE_CMPXCHG
215
216static __inline int
217atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
218{
219	u_char res;
220
221	__asm __volatile(
222	"	pushfl ;		"
223	"	cli ;			"
224	"	cmpl	%3,%1 ;		"
225	"	jne	1f ;		"
226	"	movl	%2,%1 ;		"
227	"1:				"
228	"       sete	%0 ;		"
229	"	popfl ;			"
230	"# atomic_cmpset_int"
231	: "=q" (res),			/* 0 */
232	  "+m" (*dst)			/* 1 */
233	: "r" (src),			/* 2 */
234	  "r" (expect)			/* 3 */
235	: "memory");
236
237	return (res);
238}
239
240#else /* !CPU_DISABLE_CMPXCHG */
241
242static __inline int
243atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
244{
245	u_char res;
246
247	__asm __volatile(
248	"	" MPLOCKED "		"
249	"	cmpxchgl %3,%1 ;	"
250	"       sete	%0 ;		"
251	"# atomic_cmpset_int"
252	: "=q" (res),			/* 0 */
253	  "+m" (*dst),			/* 1 */
254	  "+a" (expect)			/* 2 */
255	: "r" (src)			/* 3 */
256	: "memory", "cc");
257
258	return (res);
259}
260
261#endif /* CPU_DISABLE_CMPXCHG */
262
263/*
264 * Atomically add the value of v to the integer pointed to by p and return
265 * the previous value of *p.
266 */
267static __inline u_int
268atomic_fetchadd_int(volatile u_int *p, u_int v)
269{
270
271	__asm __volatile(
272	"	" MPLOCKED "		"
273	"	xaddl	%0,%1 ;		"
274	"# atomic_fetchadd_int"
275	: "+r" (v),			/* 0 */
276	  "+m" (*p)			/* 1 */
277	: : "cc");
278	return (v);
279}
280
281static __inline int
282atomic_testandset_int(volatile u_int *p, u_int v)
283{
284	u_char res;
285
286	__asm __volatile(
287	"	" MPLOCKED "		"
288	"	btsl	%2,%1 ;		"
289	"	setc	%0 ;		"
290	"# atomic_testandset_int"
291	: "=q" (res),			/* 0 */
292	  "+m" (*p)			/* 1 */
293	: "Ir" (v & 0x1f)		/* 2 */
294	: "cc");
295	return (res);
296}
297
298/*
299 * We assume that a = b will do atomic loads and stores.  Due to the
300 * IA32 memory model, a simple store guarantees release semantics.
301 *
302 * However, loads may pass stores, so for atomic_load_acq we have to
303 * ensure a Store/Load barrier to do the load in SMP kernels.  We use
304 * "lock cmpxchg" as recommended by the AMD Software Optimization
305 * Guide, and not mfence.  For UP kernels, however, the cache of the
306 * single processor is always consistent, so we only need to take care
307 * of the compiler.
308 */
309#define	ATOMIC_STORE(TYPE)				\
310static __inline void					\
311atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
312{							\
313	__compiler_membar();				\
314	*p = v;						\
315}							\
316struct __hack
317
318#if defined(_KERNEL) && !defined(SMP)
319
320#define	ATOMIC_LOAD(TYPE, LOP)				\
321static __inline u_##TYPE				\
322atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
323{							\
324	u_##TYPE tmp;					\
325							\
326	tmp = *p;					\
327	__compiler_membar();				\
328	return (tmp);					\
329}							\
330struct __hack
331
332#else /* !(_KERNEL && !SMP) */
333
334#define	ATOMIC_LOAD(TYPE, LOP)				\
335static __inline u_##TYPE				\
336atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
337{							\
338	u_##TYPE res;					\
339							\
340	__asm __volatile(MPLOCKED LOP			\
341	: "=a" (res),			/* 0 */		\
342	  "+m" (*p)			/* 1 */		\
343	: : "memory", "cc");				\
344							\
345	return (res);					\
346}							\
347struct __hack
348
349#endif /* _KERNEL && !SMP */
350
351#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
352
353ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
354ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
355ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
356ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
357
358ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
359ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
360ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
361ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
362
363ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
364ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
365ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
366ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
367
368ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
369ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
370ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
371ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
372
373ATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
374ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
375ATOMIC_LOAD(int,   "cmpxchgl %0,%1");
376ATOMIC_LOAD(long,  "cmpxchgl %0,%1");
377
378ATOMIC_STORE(char);
379ATOMIC_STORE(short);
380ATOMIC_STORE(int);
381ATOMIC_STORE(long);
382
383#undef ATOMIC_ASM
384#undef ATOMIC_LOAD
385#undef ATOMIC_STORE
386
387#ifndef WANT_FUNCTIONS
388
389#ifdef _KERNEL
390extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
391extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
392#endif
393
394static __inline int
395atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
396{
397
398	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
399	    (u_int)src));
400}
401
402static __inline u_long
403atomic_fetchadd_long(volatile u_long *p, u_long v)
404{
405
406	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
407}
408
409static __inline int
410atomic_testandset_long(volatile u_long *p, u_int v)
411{
412
413	return (atomic_testandset_int((volatile u_int *)p, v));
414}
415
416/* Read the current value and store a new value in the destination. */
417#ifdef __GNUCLIKE_ASM
418
419static __inline u_int
420atomic_swap_int(volatile u_int *p, u_int v)
421{
422
423	__asm __volatile(
424	"	xchgl	%1,%0 ;		"
425	"# atomic_swap_int"
426	: "+r" (v),			/* 0 */
427	  "+m" (*p));			/* 1 */
428
429	return (v);
430}
431
432static __inline u_long
433atomic_swap_long(volatile u_long *p, u_long v)
434{
435
436	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
437}
438
439#else /* !__GNUCLIKE_ASM */
440
441u_int	atomic_swap_int(volatile u_int *p, u_int v);
442u_long	atomic_swap_long(volatile u_long *p, u_long v);
443
444#endif /* __GNUCLIKE_ASM */
445
446#define	atomic_set_acq_char		atomic_set_barr_char
447#define	atomic_set_rel_char		atomic_set_barr_char
448#define	atomic_clear_acq_char		atomic_clear_barr_char
449#define	atomic_clear_rel_char		atomic_clear_barr_char
450#define	atomic_add_acq_char		atomic_add_barr_char
451#define	atomic_add_rel_char		atomic_add_barr_char
452#define	atomic_subtract_acq_char	atomic_subtract_barr_char
453#define	atomic_subtract_rel_char	atomic_subtract_barr_char
454
455#define	atomic_set_acq_short		atomic_set_barr_short
456#define	atomic_set_rel_short		atomic_set_barr_short
457#define	atomic_clear_acq_short		atomic_clear_barr_short
458#define	atomic_clear_rel_short		atomic_clear_barr_short
459#define	atomic_add_acq_short		atomic_add_barr_short
460#define	atomic_add_rel_short		atomic_add_barr_short
461#define	atomic_subtract_acq_short	atomic_subtract_barr_short
462#define	atomic_subtract_rel_short	atomic_subtract_barr_short
463
464#define	atomic_set_acq_int		atomic_set_barr_int
465#define	atomic_set_rel_int		atomic_set_barr_int
466#define	atomic_clear_acq_int		atomic_clear_barr_int
467#define	atomic_clear_rel_int		atomic_clear_barr_int
468#define	atomic_add_acq_int		atomic_add_barr_int
469#define	atomic_add_rel_int		atomic_add_barr_int
470#define	atomic_subtract_acq_int		atomic_subtract_barr_int
471#define	atomic_subtract_rel_int		atomic_subtract_barr_int
472#define	atomic_cmpset_acq_int		atomic_cmpset_int
473#define	atomic_cmpset_rel_int		atomic_cmpset_int
474
475#define	atomic_set_acq_long		atomic_set_barr_long
476#define	atomic_set_rel_long		atomic_set_barr_long
477#define	atomic_clear_acq_long		atomic_clear_barr_long
478#define	atomic_clear_rel_long		atomic_clear_barr_long
479#define	atomic_add_acq_long		atomic_add_barr_long
480#define	atomic_add_rel_long		atomic_add_barr_long
481#define	atomic_subtract_acq_long	atomic_subtract_barr_long
482#define	atomic_subtract_rel_long	atomic_subtract_barr_long
483#define	atomic_cmpset_acq_long		atomic_cmpset_long
484#define	atomic_cmpset_rel_long		atomic_cmpset_long
485
486#define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
487#define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
488
489/* Operations on 8-bit bytes. */
490#define	atomic_set_8		atomic_set_char
491#define	atomic_set_acq_8	atomic_set_acq_char
492#define	atomic_set_rel_8	atomic_set_rel_char
493#define	atomic_clear_8		atomic_clear_char
494#define	atomic_clear_acq_8	atomic_clear_acq_char
495#define	atomic_clear_rel_8	atomic_clear_rel_char
496#define	atomic_add_8		atomic_add_char
497#define	atomic_add_acq_8	atomic_add_acq_char
498#define	atomic_add_rel_8	atomic_add_rel_char
499#define	atomic_subtract_8	atomic_subtract_char
500#define	atomic_subtract_acq_8	atomic_subtract_acq_char
501#define	atomic_subtract_rel_8	atomic_subtract_rel_char
502#define	atomic_load_acq_8	atomic_load_acq_char
503#define	atomic_store_rel_8	atomic_store_rel_char
504
505/* Operations on 16-bit words. */
506#define	atomic_set_16		atomic_set_short
507#define	atomic_set_acq_16	atomic_set_acq_short
508#define	atomic_set_rel_16	atomic_set_rel_short
509#define	atomic_clear_16		atomic_clear_short
510#define	atomic_clear_acq_16	atomic_clear_acq_short
511#define	atomic_clear_rel_16	atomic_clear_rel_short
512#define	atomic_add_16		atomic_add_short
513#define	atomic_add_acq_16	atomic_add_acq_short
514#define	atomic_add_rel_16	atomic_add_rel_short
515#define	atomic_subtract_16	atomic_subtract_short
516#define	atomic_subtract_acq_16	atomic_subtract_acq_short
517#define	atomic_subtract_rel_16	atomic_subtract_rel_short
518#define	atomic_load_acq_16	atomic_load_acq_short
519#define	atomic_store_rel_16	atomic_store_rel_short
520
521/* Operations on 32-bit double words. */
522#define	atomic_set_32		atomic_set_int
523#define	atomic_set_acq_32	atomic_set_acq_int
524#define	atomic_set_rel_32	atomic_set_rel_int
525#define	atomic_clear_32		atomic_clear_int
526#define	atomic_clear_acq_32	atomic_clear_acq_int
527#define	atomic_clear_rel_32	atomic_clear_rel_int
528#define	atomic_add_32		atomic_add_int
529#define	atomic_add_acq_32	atomic_add_acq_int
530#define	atomic_add_rel_32	atomic_add_rel_int
531#define	atomic_subtract_32	atomic_subtract_int
532#define	atomic_subtract_acq_32	atomic_subtract_acq_int
533#define	atomic_subtract_rel_32	atomic_subtract_rel_int
534#define	atomic_load_acq_32	atomic_load_acq_int
535#define	atomic_store_rel_32	atomic_store_rel_int
536#define	atomic_cmpset_32	atomic_cmpset_int
537#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
538#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
539#define	atomic_swap_32		atomic_swap_int
540#define	atomic_readandclear_32	atomic_readandclear_int
541#define	atomic_fetchadd_32	atomic_fetchadd_int
542#define	atomic_testandset_32	atomic_testandset_int
543
544/* Operations on pointers. */
545#define	atomic_set_ptr(p, v) \
546	atomic_set_int((volatile u_int *)(p), (u_int)(v))
547#define	atomic_set_acq_ptr(p, v) \
548	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
549#define	atomic_set_rel_ptr(p, v) \
550	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
551#define	atomic_clear_ptr(p, v) \
552	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
553#define	atomic_clear_acq_ptr(p, v) \
554	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
555#define	atomic_clear_rel_ptr(p, v) \
556	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
557#define	atomic_add_ptr(p, v) \
558	atomic_add_int((volatile u_int *)(p), (u_int)(v))
559#define	atomic_add_acq_ptr(p, v) \
560	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
561#define	atomic_add_rel_ptr(p, v) \
562	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
563#define	atomic_subtract_ptr(p, v) \
564	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
565#define	atomic_subtract_acq_ptr(p, v) \
566	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
567#define	atomic_subtract_rel_ptr(p, v) \
568	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
569#define	atomic_load_acq_ptr(p) \
570	atomic_load_acq_int((volatile u_int *)(p))
571#define	atomic_store_rel_ptr(p, v) \
572	atomic_store_rel_int((volatile u_int *)(p), (v))
573#define	atomic_cmpset_ptr(dst, old, new) \
574	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
575#define	atomic_cmpset_acq_ptr(dst, old, new) \
576	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
577	    (u_int)(new))
578#define	atomic_cmpset_rel_ptr(dst, old, new) \
579	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
580	    (u_int)(new))
581#define	atomic_swap_ptr(p, v) \
582	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
583#define	atomic_readandclear_ptr(p) \
584	atomic_readandclear_int((volatile u_int *)(p))
585
586#endif /* !WANT_FUNCTIONS */
587
588#endif /* !_MACHINE_ATOMIC_H_ */
589