1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#ifdef _KERNEL
36#include <machine/md_var.h>
37#include <machine/specialreg.h>
38#endif
39
40#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
43
44/*
45 * Various simple operations on memory, each of which is atomic in the
46 * presence of interrupts and multiple processors.
47 *
48 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
49 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
50 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
51 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
52 *
53 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
54 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
55 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
56 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
57 *
58 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
59 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
60 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
61 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
62 * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
64 *
65 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
66 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
67 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
68 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
69 * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
71 */
72
73/*
74 * The above functions are expanded inline in the statically-linked
75 * kernel.  Lock prefixes are generated if an SMP kernel is being
76 * built.
77 *
78 * Kernel modules call real functions which are built into the kernel.
79 * This allows kernel modules to be portable between UP and SMP systems.
80 */
81#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
83void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
84void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
88int	atomic_testandset_int(volatile u_int *p, u_int v);
89
90#define	ATOMIC_LOAD(TYPE, LOP)					\
91u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
92#define	ATOMIC_STORE(TYPE)					\
93void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
94
95int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
96uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
97
98#else /* !KLD_MODULE && __GNUCLIKE_ASM */
99
100/*
101 * For userland, always use lock prefixes so that the binaries will run
102 * on both SMP and !SMP systems.
103 */
104#if defined(SMP) || !defined(_KERNEL)
105#define	MPLOCKED	"lock ; "
106#else
107#define	MPLOCKED
108#endif
109
110/*
111 * The assembly is volatilized to avoid code chunk removal by the compiler.
112 * GCC aggressively reorders operations and memory clobbering is necessary
113 * in order to avoid that for memory barriers.
114 */
115#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
116static __inline void					\
117atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
118{							\
119	__asm __volatile(MPLOCKED OP			\
120	: "=m" (*p)					\
121	: CONS (V), "m" (*p)				\
122	: "cc");					\
123}							\
124							\
125static __inline void					\
126atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
127{							\
128	__asm __volatile(MPLOCKED OP			\
129	: "=m" (*p)					\
130	: CONS (V), "m" (*p)				\
131	: "memory", "cc");				\
132}							\
133struct __hack
134
135#if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
136
137/* I486 does not support SMP or CMPXCHG8B. */
138static __inline uint64_t
139atomic_load_acq_64_i386(volatile uint64_t *p)
140{
141	volatile uint32_t *high, *low;
142	uint64_t res;
143
144	low = (volatile uint32_t *)p;
145	high = (volatile uint32_t *)p + 1;
146	__asm __volatile(
147	"	pushfl ;		"
148	"	cli ;			"
149	"	movl %1,%%eax ;		"
150	"	movl %2,%%edx ;		"
151	"	popfl"
152	: "=&A" (res)			/* 0 */
153	: "m" (*low),			/* 1 */
154	  "m" (*high)			/* 2 */
155	: "memory");
156
157	return (res);
158}
159
160static __inline void
161atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
162{
163	volatile uint32_t *high, *low;
164
165	low = (volatile uint32_t *)p;
166	high = (volatile uint32_t *)p + 1;
167	__asm __volatile(
168	"	pushfl ;		"
169	"	cli ;			"
170	"	movl %%eax,%0 ;		"
171	"	movl %%edx,%1 ;		"
172	"	popfl"
173	: "=m" (*low),			/* 0 */
174	  "=m" (*high)			/* 1 */
175	: "A" (v)			/* 2 */
176	: "memory");
177}
178
179static __inline uint64_t
180atomic_load_acq_64_i586(volatile uint64_t *p)
181{
182	uint64_t res;
183
184	__asm __volatile(
185	"	movl %%ebx,%%eax ;	"
186	"	movl %%ecx,%%edx ;	"
187	"	" MPLOCKED "		"
188	"	cmpxchg8b %2"
189	: "=&A" (res),			/* 0 */
190	  "=m" (*p)			/* 1 */
191	: "m" (*p)			/* 2 */
192	: "memory", "cc");
193
194	return (res);
195}
196
197static __inline void
198atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
199{
200
201	__asm __volatile(
202	"	movl %%eax,%%ebx ;	"
203	"	movl %%edx,%%ecx ;	"
204	"1:				"
205	"	" MPLOCKED "		"
206	"	cmpxchg8b %2 ;		"
207	"	jne 1b"
208	: "=m" (*p),			/* 0 */
209	  "+A" (v)			/* 1 */
210	: "m" (*p)			/* 2 */
211	: "ebx", "ecx", "memory", "cc");
212}
213
214#endif /* _KERNEL && !WANT_FUNCTIONS */
215
216/*
217 * Atomic compare and set, used by the mutex functions
218 *
219 * if (*dst == expect) *dst = src (all 32 bit words)
220 *
221 * Returns 0 on failure, non-zero on success
222 */
223
224#ifdef CPU_DISABLE_CMPXCHG
225
226static __inline int
227atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
228{
229	u_char res;
230
231	__asm __volatile(
232	"	pushfl ;		"
233	"	cli ;			"
234	"	cmpl	%3,%4 ;		"
235	"	jne	1f ;		"
236	"	movl	%2,%1 ;		"
237	"1:				"
238	"       sete	%0 ;		"
239	"	popfl ;			"
240	"# atomic_cmpset_int"
241	: "=q" (res),			/* 0 */
242	  "=m" (*dst)			/* 1 */
243	: "r" (src),			/* 2 */
244	  "r" (expect),			/* 3 */
245	  "m" (*dst)			/* 4 */
246	: "memory");
247
248	return (res);
249}
250
251#else /* !CPU_DISABLE_CMPXCHG */
252
253static __inline int
254atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
255{
256	u_char res;
257
258	__asm __volatile(
259	"	" MPLOCKED "		"
260	"	cmpxchgl %2,%1 ;	"
261	"       sete	%0 ;		"
262	"1:				"
263	"# atomic_cmpset_int"
264	: "=a" (res),			/* 0 */
265	  "=m" (*dst)			/* 1 */
266	: "r" (src),			/* 2 */
267	  "a" (expect),			/* 3 */
268	  "m" (*dst)			/* 4 */
269	: "memory", "cc");
270
271	return (res);
272}
273
274#endif /* CPU_DISABLE_CMPXCHG */
275
276/*
277 * Atomically add the value of v to the integer pointed to by p and return
278 * the previous value of *p.
279 */
280static __inline u_int
281atomic_fetchadd_int(volatile u_int *p, u_int v)
282{
283
284	__asm __volatile(
285	"	" MPLOCKED "		"
286	"	xaddl	%0, %1 ;	"
287	"# atomic_fetchadd_int"
288	: "+r" (v),			/* 0 (result) */
289	  "=m" (*p)			/* 1 */
290	: "m" (*p)			/* 2 */
291	: "cc");
292	return (v);
293}
294
295static __inline int
296atomic_testandset_int(volatile u_int *p, u_int v)
297{
298	u_char res;
299
300	__asm __volatile(
301	"	" MPLOCKED "		"
302	"	btsl	%2,%1 ;		"
303	"	setc	%0 ;		"
304	"# atomic_testandset_int"
305	: "=q" (res),			/* 0 */
306	  "+m" (*p)			/* 1 */
307	: "Ir" (v & 0x1f)		/* 2 */
308	: "cc");
309	return (res);
310}
311
312/*
313 * We assume that a = b will do atomic loads and stores.  Due to the
314 * IA32 memory model, a simple store guarantees release semantics.
315 *
316 * However, loads may pass stores, so for atomic_load_acq we have to
317 * ensure a Store/Load barrier to do the load in SMP kernels.  We use
318 * "lock cmpxchg" as recommended by the AMD Software Optimization
319 * Guide, and not mfence.  For UP kernels, however, the cache of the
320 * single processor is always consistent, so we only need to take care
321 * of the compiler.
322 */
323#define	ATOMIC_STORE(TYPE)				\
324static __inline void					\
325atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
326{							\
327	__compiler_membar();				\
328	*p = v;						\
329}							\
330struct __hack
331
332#if defined(_KERNEL) && !defined(SMP)
333
334#define	ATOMIC_LOAD(TYPE, LOP)				\
335static __inline u_##TYPE				\
336atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
337{							\
338	u_##TYPE tmp;					\
339							\
340	tmp = *p;					\
341	__compiler_membar();				\
342	return (tmp);					\
343}							\
344struct __hack
345
346#else /* !(_KERNEL && !SMP) */
347
348#define	ATOMIC_LOAD(TYPE, LOP)				\
349static __inline u_##TYPE				\
350atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
351{							\
352	u_##TYPE res;					\
353							\
354	__asm __volatile(MPLOCKED LOP			\
355	: "=a" (res),			/* 0 */		\
356	  "=m" (*p)			/* 1 */		\
357	: "m" (*p)			/* 2 */		\
358	: "memory", "cc");				\
359							\
360	return (res);					\
361}							\
362struct __hack
363
364#endif /* _KERNEL && !SMP */
365
366#ifdef _KERNEL
367
368#ifdef WANT_FUNCTIONS
369int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
370int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
371uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
372uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
373#endif
374
375/* I486 does not support SMP or CMPXCHG8B. */
376static __inline int
377atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
378{
379	volatile uint32_t *p;
380	u_char res;
381
382	p = (volatile uint32_t *)dst;
383	__asm __volatile(
384	"	pushfl ;		"
385	"	cli ;			"
386	"	xorl	%1,%%eax ;	"
387	"	xorl	%2,%%edx ;	"
388	"	orl	%%edx,%%eax ;	"
389	"	jne	1f ;		"
390	"	movl	%4,%1 ;		"
391	"	movl	%5,%2 ;		"
392	"1:				"
393	"	sete	%3 ;		"
394	"	popfl"
395	: "+A" (expect),		/* 0 */
396	  "+m" (*p),			/* 1 */
397	  "+m" (*(p + 1)),		/* 2 */
398	  "=q" (res)			/* 3 */
399	: "r" ((uint32_t)src),		/* 4 */
400	  "r" ((uint32_t)(src >> 32))	/* 5 */
401	: "memory", "cc");
402	return (res);
403}
404
405static __inline uint64_t
406atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
407{
408	volatile uint32_t *q;
409	uint64_t res;
410
411	q = (volatile uint32_t *)p;
412	__asm __volatile(
413	"	pushfl ;		"
414	"	cli ;			"
415	"	movl	%1,%%eax ;	"
416	"	movl	%2,%%edx ;	"
417	"	movl	%4,%2 ;		"
418	"	movl	%3,%1 ;		"
419	"	popfl"
420	: "=&A" (res),			/* 0 */
421	  "+m" (*q),			/* 1 */
422	  "+m" (*(q + 1))		/* 2 */
423	: "r" ((uint32_t)v),		/* 3 */
424	  "r" ((uint32_t)(v >> 32)));	/* 4 */
425	return (res);
426}
427
428static __inline int
429atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
430{
431	u_char res;
432
433	__asm __volatile(
434	"	" MPLOCKED "		"
435	"	cmpxchg8b %1 ;		"
436	"	sete	%0"
437	: "=q" (res),			/* 0 */
438	  "+m" (*dst),			/* 1 */
439	  "+A" (expect)			/* 2 */
440	: "b" ((uint32_t)src),		/* 3 */
441	  "c" ((uint32_t)(src >> 32))	/* 4 */
442	: "memory", "cc");
443	return (res);
444}
445
446static __inline uint64_t
447atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
448{
449
450	__asm __volatile(
451	"	movl	%%eax,%%ebx ;	"
452	"	movl	%%edx,%%ecx ;	"
453	"1:				"
454	"	" MPLOCKED "		"
455	"	cmpxchg8b %0 ;		"
456	"	jne	1b"
457	: "+m" (*p),			/* 0 */
458	  "+A" (v)			/* 1 */
459	: : "ebx", "ecx", "memory", "cc");
460	return (v);
461}
462
463static __inline int
464atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
465{
466
467	if ((cpu_feature & CPUID_CX8) == 0)
468		return (atomic_cmpset_64_i386(dst, expect, src));
469	else
470		return (atomic_cmpset_64_i586(dst, expect, src));
471}
472
473static __inline uint64_t
474atomic_swap_64(volatile uint64_t *p, uint64_t v)
475{
476
477	if ((cpu_feature & CPUID_CX8) == 0)
478		return (atomic_swap_64_i386(p, v));
479	else
480		return (atomic_swap_64_i586(p, v));
481}
482
483#endif /* _KERNEL */
484
485#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
486
487ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
488ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
489ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
490ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
491
492ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
493ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
494ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
495ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
496
497ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
498ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
499ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
500ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
501
502ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
503ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
504ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
505ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
506
507ATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
508ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
509ATOMIC_LOAD(int,   "cmpxchgl %0,%1");
510ATOMIC_LOAD(long,  "cmpxchgl %0,%1");
511
512ATOMIC_STORE(char);
513ATOMIC_STORE(short);
514ATOMIC_STORE(int);
515ATOMIC_STORE(long);
516
517#undef ATOMIC_ASM
518#undef ATOMIC_LOAD
519#undef ATOMIC_STORE
520
521#ifndef WANT_FUNCTIONS
522
523#ifdef _KERNEL
524extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
525extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
526#endif
527
528static __inline int
529atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
530{
531
532	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
533	    (u_int)src));
534}
535
536static __inline u_long
537atomic_fetchadd_long(volatile u_long *p, u_long v)
538{
539
540	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
541}
542
543static __inline int
544atomic_testandset_long(volatile u_long *p, u_int v)
545{
546
547	return (atomic_testandset_int((volatile u_int *)p, v));
548}
549
550/* Read the current value and store a zero in the destination. */
551#ifdef __GNUCLIKE_ASM
552
553static __inline u_int
554atomic_readandclear_int(volatile u_int *addr)
555{
556	u_int res;
557
558	res = 0;
559	__asm __volatile(
560	"	xchgl	%1,%0 ;		"
561	"# atomic_readandclear_int"
562	: "+r" (res),			/* 0 */
563	  "=m" (*addr)			/* 1 */
564	: "m" (*addr));
565
566	return (res);
567}
568
569static __inline u_long
570atomic_readandclear_long(volatile u_long *addr)
571{
572	u_long res;
573
574	res = 0;
575	__asm __volatile(
576	"	xchgl	%1,%0 ;		"
577	"# atomic_readandclear_long"
578	: "+r" (res),			/* 0 */
579	  "=m" (*addr)			/* 1 */
580	: "m" (*addr));
581
582	return (res);
583}
584
585static __inline u_int
586atomic_swap_int(volatile u_int *p, u_int v)
587{
588
589	__asm __volatile(
590	"	xchgl	%1,%0 ;		"
591	"# atomic_swap_int"
592	: "+r" (v),			/* 0 */
593	  "+m" (*p));			/* 1 */
594	return (v);
595}
596
597static __inline u_long
598atomic_swap_long(volatile u_long *p, u_long v)
599{
600
601	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
602}
603
604#else /* !__GNUCLIKE_ASM */
605
606u_int	atomic_readandclear_int(volatile u_int *addr);
607u_long	atomic_readandclear_long(volatile u_long *addr);
608u_int	atomic_swap_int(volatile u_int *p, u_int v);
609u_long	atomic_swap_long(volatile u_long *p, u_long v);
610
611#endif /* __GNUCLIKE_ASM */
612
613#define	atomic_set_acq_char		atomic_set_barr_char
614#define	atomic_set_rel_char		atomic_set_barr_char
615#define	atomic_clear_acq_char		atomic_clear_barr_char
616#define	atomic_clear_rel_char		atomic_clear_barr_char
617#define	atomic_add_acq_char		atomic_add_barr_char
618#define	atomic_add_rel_char		atomic_add_barr_char
619#define	atomic_subtract_acq_char	atomic_subtract_barr_char
620#define	atomic_subtract_rel_char	atomic_subtract_barr_char
621
622#define	atomic_set_acq_short		atomic_set_barr_short
623#define	atomic_set_rel_short		atomic_set_barr_short
624#define	atomic_clear_acq_short		atomic_clear_barr_short
625#define	atomic_clear_rel_short		atomic_clear_barr_short
626#define	atomic_add_acq_short		atomic_add_barr_short
627#define	atomic_add_rel_short		atomic_add_barr_short
628#define	atomic_subtract_acq_short	atomic_subtract_barr_short
629#define	atomic_subtract_rel_short	atomic_subtract_barr_short
630
631#define	atomic_set_acq_int		atomic_set_barr_int
632#define	atomic_set_rel_int		atomic_set_barr_int
633#define	atomic_clear_acq_int		atomic_clear_barr_int
634#define	atomic_clear_rel_int		atomic_clear_barr_int
635#define	atomic_add_acq_int		atomic_add_barr_int
636#define	atomic_add_rel_int		atomic_add_barr_int
637#define	atomic_subtract_acq_int		atomic_subtract_barr_int
638#define	atomic_subtract_rel_int		atomic_subtract_barr_int
639#define	atomic_cmpset_acq_int		atomic_cmpset_int
640#define	atomic_cmpset_rel_int		atomic_cmpset_int
641
642#define	atomic_set_acq_long		atomic_set_barr_long
643#define	atomic_set_rel_long		atomic_set_barr_long
644#define	atomic_clear_acq_long		atomic_clear_barr_long
645#define	atomic_clear_rel_long		atomic_clear_barr_long
646#define	atomic_add_acq_long		atomic_add_barr_long
647#define	atomic_add_rel_long		atomic_add_barr_long
648#define	atomic_subtract_acq_long	atomic_subtract_barr_long
649#define	atomic_subtract_rel_long	atomic_subtract_barr_long
650#define	atomic_cmpset_acq_long		atomic_cmpset_long
651#define	atomic_cmpset_rel_long		atomic_cmpset_long
652
653/* Operations on 8-bit bytes. */
654#define	atomic_set_8		atomic_set_char
655#define	atomic_set_acq_8	atomic_set_acq_char
656#define	atomic_set_rel_8	atomic_set_rel_char
657#define	atomic_clear_8		atomic_clear_char
658#define	atomic_clear_acq_8	atomic_clear_acq_char
659#define	atomic_clear_rel_8	atomic_clear_rel_char
660#define	atomic_add_8		atomic_add_char
661#define	atomic_add_acq_8	atomic_add_acq_char
662#define	atomic_add_rel_8	atomic_add_rel_char
663#define	atomic_subtract_8	atomic_subtract_char
664#define	atomic_subtract_acq_8	atomic_subtract_acq_char
665#define	atomic_subtract_rel_8	atomic_subtract_rel_char
666#define	atomic_load_acq_8	atomic_load_acq_char
667#define	atomic_store_rel_8	atomic_store_rel_char
668
669/* Operations on 16-bit words. */
670#define	atomic_set_16		atomic_set_short
671#define	atomic_set_acq_16	atomic_set_acq_short
672#define	atomic_set_rel_16	atomic_set_rel_short
673#define	atomic_clear_16		atomic_clear_short
674#define	atomic_clear_acq_16	atomic_clear_acq_short
675#define	atomic_clear_rel_16	atomic_clear_rel_short
676#define	atomic_add_16		atomic_add_short
677#define	atomic_add_acq_16	atomic_add_acq_short
678#define	atomic_add_rel_16	atomic_add_rel_short
679#define	atomic_subtract_16	atomic_subtract_short
680#define	atomic_subtract_acq_16	atomic_subtract_acq_short
681#define	atomic_subtract_rel_16	atomic_subtract_rel_short
682#define	atomic_load_acq_16	atomic_load_acq_short
683#define	atomic_store_rel_16	atomic_store_rel_short
684
685/* Operations on 32-bit double words. */
686#define	atomic_set_32		atomic_set_int
687#define	atomic_set_acq_32	atomic_set_acq_int
688#define	atomic_set_rel_32	atomic_set_rel_int
689#define	atomic_clear_32		atomic_clear_int
690#define	atomic_clear_acq_32	atomic_clear_acq_int
691#define	atomic_clear_rel_32	atomic_clear_rel_int
692#define	atomic_add_32		atomic_add_int
693#define	atomic_add_acq_32	atomic_add_acq_int
694#define	atomic_add_rel_32	atomic_add_rel_int
695#define	atomic_subtract_32	atomic_subtract_int
696#define	atomic_subtract_acq_32	atomic_subtract_acq_int
697#define	atomic_subtract_rel_32	atomic_subtract_rel_int
698#define	atomic_load_acq_32	atomic_load_acq_int
699#define	atomic_store_rel_32	atomic_store_rel_int
700#define	atomic_cmpset_32	atomic_cmpset_int
701#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
702#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
703#define	atomic_swap_32		atomic_swap_int
704#define	atomic_readandclear_32	atomic_readandclear_int
705#define	atomic_fetchadd_32	atomic_fetchadd_int
706#define	atomic_testandset_32	atomic_testandset_int
707
708/* Operations on pointers. */
709#define	atomic_set_ptr(p, v) \
710	atomic_set_int((volatile u_int *)(p), (u_int)(v))
711#define	atomic_set_acq_ptr(p, v) \
712	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
713#define	atomic_set_rel_ptr(p, v) \
714	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
715#define	atomic_clear_ptr(p, v) \
716	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
717#define	atomic_clear_acq_ptr(p, v) \
718	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
719#define	atomic_clear_rel_ptr(p, v) \
720	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
721#define	atomic_add_ptr(p, v) \
722	atomic_add_int((volatile u_int *)(p), (u_int)(v))
723#define	atomic_add_acq_ptr(p, v) \
724	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
725#define	atomic_add_rel_ptr(p, v) \
726	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
727#define	atomic_subtract_ptr(p, v) \
728	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
729#define	atomic_subtract_acq_ptr(p, v) \
730	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
731#define	atomic_subtract_rel_ptr(p, v) \
732	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
733#define	atomic_load_acq_ptr(p) \
734	atomic_load_acq_int((volatile u_int *)(p))
735#define	atomic_store_rel_ptr(p, v) \
736	atomic_store_rel_int((volatile u_int *)(p), (v))
737#define	atomic_cmpset_ptr(dst, old, new) \
738	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
739#define	atomic_cmpset_acq_ptr(dst, old, new) \
740	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
741	    (u_int)(new))
742#define	atomic_cmpset_rel_ptr(dst, old, new) \
743	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
744	    (u_int)(new))
745#define	atomic_swap_ptr(p, v) \
746	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
747#define	atomic_readandclear_ptr(p) \
748	atomic_readandclear_int((volatile u_int *)(p))
749
750#endif /* !WANT_FUNCTIONS */
751
752#endif /* !_MACHINE_ATOMIC_H_ */
753