atomic.h revision 326514
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/i386/include/atomic.h 326514 2017-12-04 09:41:57Z hselasky $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#ifdef _KERNEL
36#include <machine/md_var.h>
37#include <machine/specialreg.h>
38#endif
39
40#ifndef __OFFSETOF_MONITORBUF
41/*
42 * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
43 *
44 * The open-coded number is used instead of the symbolic expression to
45 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
46 * An assertion in i386/vm_machdep.c ensures that the value is correct.
47 */
48#define	__OFFSETOF_MONITORBUF	0x180
49
50static __inline void
51__mbk(void)
52{
53
54	__asm __volatile("lock; addl $0,%%fs:%0"
55	    : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
56}
57
58static __inline void
59__mbu(void)
60{
61
62	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
63}
64#endif
65
66/*
67 * Various simple operations on memory, each of which is atomic in the
68 * presence of interrupts and multiple processors.
69 *
70 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
71 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
72 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
73 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
74 *
75 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
76 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
77 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
78 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
79 *
80 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
81 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
82 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
83 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
84 * atomic_swap_int(P, V)	(return (*(u_int *)(P)); *(u_int *)(P) = (V);)
85 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
86 *
87 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
88 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
89 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
90 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
91 * atomic_swap_long(P, V)	(return (*(u_long *)(P)); *(u_long *)(P) = (V);)
92 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
93 */
94
95/*
96 * The above functions are expanded inline in the statically-linked
97 * kernel.  Lock prefixes are generated if an SMP kernel is being
98 * built.
99 *
100 * Kernel modules call real functions which are built into the kernel.
101 * This allows kernel modules to be portable between UP and SMP systems.
102 */
103#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
104#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
105void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
106void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
107
108int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
109int	atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
110u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
111int	atomic_testandset_int(volatile u_int *p, u_int v);
112int	atomic_testandclear_int(volatile u_int *p, u_int v);
113void	atomic_thread_fence_acq(void);
114void	atomic_thread_fence_acq_rel(void);
115void	atomic_thread_fence_rel(void);
116void	atomic_thread_fence_seq_cst(void);
117
118#define	ATOMIC_LOAD(TYPE)					\
119u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p)
120#define	ATOMIC_STORE(TYPE)					\
121void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
122
123int		atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
124uint64_t	atomic_load_acq_64(volatile uint64_t *);
125void		atomic_store_rel_64(volatile uint64_t *, uint64_t);
126uint64_t	atomic_swap_64(volatile uint64_t *, uint64_t);
127uint64_t	atomic_fetchadd_64(volatile uint64_t *, uint64_t);
128
129#else /* !KLD_MODULE && __GNUCLIKE_ASM */
130
131/*
132 * For userland, always use lock prefixes so that the binaries will run
133 * on both SMP and !SMP systems.
134 */
135#if defined(SMP) || !defined(_KERNEL)
136#define	MPLOCKED	"lock ; "
137#else
138#define	MPLOCKED
139#endif
140
141/*
142 * The assembly is volatilized to avoid code chunk removal by the compiler.
143 * GCC aggressively reorders operations and memory clobbering is necessary
144 * in order to avoid that for memory barriers.
145 */
146#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
147static __inline void					\
148atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
149{							\
150	__asm __volatile(MPLOCKED OP			\
151	: "+m" (*p)					\
152	: CONS (V)					\
153	: "cc");					\
154}							\
155							\
156static __inline void					\
157atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
158{							\
159	__asm __volatile(MPLOCKED OP			\
160	: "+m" (*p)					\
161	: CONS (V)					\
162	: "memory", "cc");				\
163}							\
164struct __hack
165
166/*
167 * Atomic compare and set, used by the mutex functions
168 *
169 * if (*dst == expect) *dst = src (all 32 bit words)
170 *
171 * Returns 0 on failure, non-zero on success
172 */
173
174static __inline int
175atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
176{
177	u_char res;
178
179	__asm __volatile(
180	"	" MPLOCKED "		"
181	"	cmpxchgl %3,%1 ;	"
182	"       sete	%0 ;		"
183	"# atomic_cmpset_int"
184	: "=q" (res),			/* 0 */
185	  "+m" (*dst),			/* 1 */
186	  "+a" (expect)			/* 2 */
187	: "r" (src)			/* 3 */
188	: "memory", "cc");
189	return (res);
190}
191
192static __inline int
193atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src)
194{
195	u_char res;
196
197	__asm __volatile(
198	"	" MPLOCKED "		"
199	"	cmpxchgl %3,%1 ;	"
200	"       sete	%0 ;		"
201	"# atomic_cmpset_int"
202	: "=q" (res),			/* 0 */
203	  "+m" (*dst),			/* 1 */
204	  "+a" (*expect)		/* 2 */
205	: "r" (src)			/* 3 */
206	: "memory", "cc");
207	return (res);
208}
209
210/*
211 * Atomically add the value of v to the integer pointed to by p and return
212 * the previous value of *p.
213 */
214static __inline u_int
215atomic_fetchadd_int(volatile u_int *p, u_int v)
216{
217
218	__asm __volatile(
219	"	" MPLOCKED "		"
220	"	xaddl	%0,%1 ;		"
221	"# atomic_fetchadd_int"
222	: "+r" (v),			/* 0 */
223	  "+m" (*p)			/* 1 */
224	: : "cc");
225	return (v);
226}
227
228static __inline int
229atomic_testandset_int(volatile u_int *p, u_int v)
230{
231	u_char res;
232
233	__asm __volatile(
234	"	" MPLOCKED "		"
235	"	btsl	%2,%1 ;		"
236	"	setc	%0 ;		"
237	"# atomic_testandset_int"
238	: "=q" (res),			/* 0 */
239	  "+m" (*p)			/* 1 */
240	: "Ir" (v & 0x1f)		/* 2 */
241	: "cc");
242	return (res);
243}
244
245static __inline int
246atomic_testandclear_int(volatile u_int *p, u_int v)
247{
248	u_char res;
249
250	__asm __volatile(
251	"	" MPLOCKED "		"
252	"	btrl	%2,%1 ;		"
253	"	setc	%0 ;		"
254	"# atomic_testandclear_int"
255	: "=q" (res),			/* 0 */
256	  "+m" (*p)			/* 1 */
257	: "Ir" (v & 0x1f)		/* 2 */
258	: "cc");
259	return (res);
260}
261
262/*
263 * We assume that a = b will do atomic loads and stores.  Due to the
264 * IA32 memory model, a simple store guarantees release semantics.
265 *
266 * However, a load may pass a store if they are performed on distinct
267 * addresses, so we need Store/Load barrier for sequentially
268 * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
269 * Store/Load barrier, as recommended by the AMD Software Optimization
270 * Guide, and not mfence.  In the kernel, we use a private per-cpu
271 * cache line for "mem", to avoid introducing false data
272 * dependencies.  In user space, we use the word at the top of the
273 * stack.
274 *
275 * For UP kernels, however, the memory of the single processor is
276 * always consistent, so we only need to stop the compiler from
277 * reordering accesses in a way that violates the semantics of acquire
278 * and release.
279 */
280
281#if defined(_KERNEL)
282#if defined(SMP)
283#define	__storeload_barrier()	__mbk()
284#else /* _KERNEL && UP */
285#define	__storeload_barrier()	__compiler_membar()
286#endif /* SMP */
287#else /* !_KERNEL */
288#define	__storeload_barrier()	__mbu()
289#endif /* _KERNEL*/
290
291#define	ATOMIC_LOAD(TYPE)					\
292static __inline u_##TYPE					\
293atomic_load_acq_##TYPE(volatile u_##TYPE *p)			\
294{								\
295	u_##TYPE res;						\
296								\
297	res = *p;						\
298	__compiler_membar();					\
299	return (res);						\
300}								\
301struct __hack
302
303#define	ATOMIC_STORE(TYPE)					\
304static __inline void						\
305atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)	\
306{								\
307								\
308	__compiler_membar();					\
309	*p = v;							\
310}								\
311struct __hack
312
313static __inline void
314atomic_thread_fence_acq(void)
315{
316
317	__compiler_membar();
318}
319
320static __inline void
321atomic_thread_fence_rel(void)
322{
323
324	__compiler_membar();
325}
326
327static __inline void
328atomic_thread_fence_acq_rel(void)
329{
330
331	__compiler_membar();
332}
333
334static __inline void
335atomic_thread_fence_seq_cst(void)
336{
337
338	__storeload_barrier();
339}
340
341#ifdef _KERNEL
342
343#ifdef WANT_FUNCTIONS
344int		atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
345int		atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
346uint64_t	atomic_load_acq_64_i386(volatile uint64_t *);
347uint64_t	atomic_load_acq_64_i586(volatile uint64_t *);
348void		atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
349void		atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
350uint64_t	atomic_swap_64_i386(volatile uint64_t *, uint64_t);
351uint64_t	atomic_swap_64_i586(volatile uint64_t *, uint64_t);
352#endif
353
354/* I486 does not support SMP or CMPXCHG8B. */
355static __inline int
356atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
357{
358	volatile uint32_t *p;
359	u_char res;
360
361	p = (volatile uint32_t *)dst;
362	__asm __volatile(
363	"	pushfl ;		"
364	"	cli ;			"
365	"	xorl	%1,%%eax ;	"
366	"	xorl	%2,%%edx ;	"
367	"	orl	%%edx,%%eax ;	"
368	"	jne	1f ;		"
369	"	movl	%4,%1 ;		"
370	"	movl	%5,%2 ;		"
371	"1:				"
372	"	sete	%3 ;		"
373	"	popfl"
374	: "+A" (expect),		/* 0 */
375	  "+m" (*p),			/* 1 */
376	  "+m" (*(p + 1)),		/* 2 */
377	  "=q" (res)			/* 3 */
378	: "r" ((uint32_t)src),		/* 4 */
379	  "r" ((uint32_t)(src >> 32))	/* 5 */
380	: "memory", "cc");
381	return (res);
382}
383
384static __inline uint64_t
385atomic_load_acq_64_i386(volatile uint64_t *p)
386{
387	volatile uint32_t *q;
388	uint64_t res;
389
390	q = (volatile uint32_t *)p;
391	__asm __volatile(
392	"	pushfl ;		"
393	"	cli ;			"
394	"	movl	%1,%%eax ;	"
395	"	movl	%2,%%edx ;	"
396	"	popfl"
397	: "=&A" (res)			/* 0 */
398	: "m" (*q),			/* 1 */
399	  "m" (*(q + 1))		/* 2 */
400	: "memory");
401	return (res);
402}
403
404static __inline void
405atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
406{
407	volatile uint32_t *q;
408
409	q = (volatile uint32_t *)p;
410	__asm __volatile(
411	"	pushfl ;		"
412	"	cli ;			"
413	"	movl	%%eax,%0 ;	"
414	"	movl	%%edx,%1 ;	"
415	"	popfl"
416	: "=m" (*q),			/* 0 */
417	  "=m" (*(q + 1))		/* 1 */
418	: "A" (v)			/* 2 */
419	: "memory");
420}
421
422static __inline uint64_t
423atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
424{
425	volatile uint32_t *q;
426	uint64_t res;
427
428	q = (volatile uint32_t *)p;
429	__asm __volatile(
430	"	pushfl ;		"
431	"	cli ;			"
432	"	movl	%1,%%eax ;	"
433	"	movl	%2,%%edx ;	"
434	"	movl	%4,%2 ;		"
435	"	movl	%3,%1 ;		"
436	"	popfl"
437	: "=&A" (res),			/* 0 */
438	  "+m" (*q),			/* 1 */
439	  "+m" (*(q + 1))		/* 2 */
440	: "r" ((uint32_t)v),		/* 3 */
441	  "r" ((uint32_t)(v >> 32)));	/* 4 */
442	return (res);
443}
444
445static __inline int
446atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
447{
448	u_char res;
449
450	__asm __volatile(
451	"	" MPLOCKED "		"
452	"	cmpxchg8b %1 ;		"
453	"	sete	%0"
454	: "=q" (res),			/* 0 */
455	  "+m" (*dst),			/* 1 */
456	  "+A" (expect)			/* 2 */
457	: "b" ((uint32_t)src),		/* 3 */
458	  "c" ((uint32_t)(src >> 32))	/* 4 */
459	: "memory", "cc");
460	return (res);
461}
462
463static __inline uint64_t
464atomic_load_acq_64_i586(volatile uint64_t *p)
465{
466	uint64_t res;
467
468	__asm __volatile(
469	"	movl	%%ebx,%%eax ;	"
470	"	movl	%%ecx,%%edx ;	"
471	"	" MPLOCKED "		"
472	"	cmpxchg8b %1"
473	: "=&A" (res),			/* 0 */
474	  "+m" (*p)			/* 1 */
475	: : "memory", "cc");
476	return (res);
477}
478
479static __inline void
480atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
481{
482
483	__asm __volatile(
484	"	movl	%%eax,%%ebx ;	"
485	"	movl	%%edx,%%ecx ;	"
486	"1:				"
487	"	" MPLOCKED "		"
488	"	cmpxchg8b %0 ;		"
489	"	jne	1b"
490	: "+m" (*p),			/* 0 */
491	  "+A" (v)			/* 1 */
492	: : "ebx", "ecx", "memory", "cc");
493}
494
495static __inline uint64_t
496atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
497{
498
499	__asm __volatile(
500	"	movl	%%eax,%%ebx ;	"
501	"	movl	%%edx,%%ecx ;	"
502	"1:				"
503	"	" MPLOCKED "		"
504	"	cmpxchg8b %0 ;		"
505	"	jne	1b"
506	: "+m" (*p),			/* 0 */
507	  "+A" (v)			/* 1 */
508	: : "ebx", "ecx", "memory", "cc");
509	return (v);
510}
511
512static __inline int
513atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
514{
515
516	if ((cpu_feature & CPUID_CX8) == 0)
517		return (atomic_cmpset_64_i386(dst, expect, src));
518	else
519		return (atomic_cmpset_64_i586(dst, expect, src));
520}
521
522static __inline uint64_t
523atomic_load_acq_64(volatile uint64_t *p)
524{
525
526	if ((cpu_feature & CPUID_CX8) == 0)
527		return (atomic_load_acq_64_i386(p));
528	else
529		return (atomic_load_acq_64_i586(p));
530}
531
532static __inline void
533atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
534{
535
536	if ((cpu_feature & CPUID_CX8) == 0)
537		atomic_store_rel_64_i386(p, v);
538	else
539		atomic_store_rel_64_i586(p, v);
540}
541
542static __inline uint64_t
543atomic_swap_64(volatile uint64_t *p, uint64_t v)
544{
545
546	if ((cpu_feature & CPUID_CX8) == 0)
547		return (atomic_swap_64_i386(p, v));
548	else
549		return (atomic_swap_64_i586(p, v));
550}
551
552static __inline uint64_t
553atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
554{
555
556	for (;;) {
557		uint64_t t = *p;
558		if (atomic_cmpset_64(p, t, t + v))
559			return (t);
560	}
561}
562
563#endif /* _KERNEL */
564
565#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
566
567ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
568ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
569ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
570ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
571
572ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
573ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
574ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
575ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
576
577ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
578ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
579ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
580ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
581
582ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
583ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
584ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
585ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
586
587#define	ATOMIC_LOADSTORE(TYPE)				\
588	ATOMIC_LOAD(TYPE);				\
589	ATOMIC_STORE(TYPE)
590
591ATOMIC_LOADSTORE(char);
592ATOMIC_LOADSTORE(short);
593ATOMIC_LOADSTORE(int);
594ATOMIC_LOADSTORE(long);
595
596#undef ATOMIC_ASM
597#undef ATOMIC_LOAD
598#undef ATOMIC_STORE
599#undef ATOMIC_LOADSTORE
600
601#ifndef WANT_FUNCTIONS
602
603static __inline int
604atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
605{
606
607	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
608	    (u_int)src));
609}
610
611static __inline u_long
612atomic_fetchadd_long(volatile u_long *p, u_long v)
613{
614
615	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
616}
617
618static __inline int
619atomic_testandset_long(volatile u_long *p, u_int v)
620{
621
622	return (atomic_testandset_int((volatile u_int *)p, v));
623}
624
625static __inline int
626atomic_testandclear_long(volatile u_long *p, u_int v)
627{
628
629	return (atomic_testandclear_int((volatile u_int *)p, v));
630}
631
632/* Read the current value and store a new value in the destination. */
633#ifdef __GNUCLIKE_ASM
634
635static __inline u_int
636atomic_swap_int(volatile u_int *p, u_int v)
637{
638
639	__asm __volatile(
640	"	xchgl	%1,%0 ;		"
641	"# atomic_swap_int"
642	: "+r" (v),			/* 0 */
643	  "+m" (*p));			/* 1 */
644	return (v);
645}
646
647static __inline u_long
648atomic_swap_long(volatile u_long *p, u_long v)
649{
650
651	return (atomic_swap_int((volatile u_int *)p, (u_int)v));
652}
653
654#else /* !__GNUCLIKE_ASM */
655
656u_int	atomic_swap_int(volatile u_int *p, u_int v);
657u_long	atomic_swap_long(volatile u_long *p, u_long v);
658
659#endif /* __GNUCLIKE_ASM */
660
661#define	atomic_set_acq_char		atomic_set_barr_char
662#define	atomic_set_rel_char		atomic_set_barr_char
663#define	atomic_clear_acq_char		atomic_clear_barr_char
664#define	atomic_clear_rel_char		atomic_clear_barr_char
665#define	atomic_add_acq_char		atomic_add_barr_char
666#define	atomic_add_rel_char		atomic_add_barr_char
667#define	atomic_subtract_acq_char	atomic_subtract_barr_char
668#define	atomic_subtract_rel_char	atomic_subtract_barr_char
669
670#define	atomic_set_acq_short		atomic_set_barr_short
671#define	atomic_set_rel_short		atomic_set_barr_short
672#define	atomic_clear_acq_short		atomic_clear_barr_short
673#define	atomic_clear_rel_short		atomic_clear_barr_short
674#define	atomic_add_acq_short		atomic_add_barr_short
675#define	atomic_add_rel_short		atomic_add_barr_short
676#define	atomic_subtract_acq_short	atomic_subtract_barr_short
677#define	atomic_subtract_rel_short	atomic_subtract_barr_short
678
679#define	atomic_set_acq_int		atomic_set_barr_int
680#define	atomic_set_rel_int		atomic_set_barr_int
681#define	atomic_clear_acq_int		atomic_clear_barr_int
682#define	atomic_clear_rel_int		atomic_clear_barr_int
683#define	atomic_add_acq_int		atomic_add_barr_int
684#define	atomic_add_rel_int		atomic_add_barr_int
685#define	atomic_subtract_acq_int		atomic_subtract_barr_int
686#define	atomic_subtract_rel_int		atomic_subtract_barr_int
687#define	atomic_cmpset_acq_int		atomic_cmpset_int
688#define	atomic_cmpset_rel_int		atomic_cmpset_int
689#define	atomic_fcmpset_acq_int		atomic_fcmpset_int
690#define	atomic_fcmpset_rel_int		atomic_fcmpset_int
691
692#define	atomic_set_acq_long		atomic_set_barr_long
693#define	atomic_set_rel_long		atomic_set_barr_long
694#define	atomic_clear_acq_long		atomic_clear_barr_long
695#define	atomic_clear_rel_long		atomic_clear_barr_long
696#define	atomic_add_acq_long		atomic_add_barr_long
697#define	atomic_add_rel_long		atomic_add_barr_long
698#define	atomic_subtract_acq_long	atomic_subtract_barr_long
699#define	atomic_subtract_rel_long	atomic_subtract_barr_long
700#define	atomic_cmpset_acq_long		atomic_cmpset_long
701#define	atomic_cmpset_rel_long		atomic_cmpset_long
702#define	atomic_fcmpset_acq_long		atomic_fcmpset_long
703#define	atomic_fcmpset_rel_long		atomic_fcmpset_long
704
705#define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
706#define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
707
708/* Operations on 8-bit bytes. */
709#define	atomic_set_8		atomic_set_char
710#define	atomic_set_acq_8	atomic_set_acq_char
711#define	atomic_set_rel_8	atomic_set_rel_char
712#define	atomic_clear_8		atomic_clear_char
713#define	atomic_clear_acq_8	atomic_clear_acq_char
714#define	atomic_clear_rel_8	atomic_clear_rel_char
715#define	atomic_add_8		atomic_add_char
716#define	atomic_add_acq_8	atomic_add_acq_char
717#define	atomic_add_rel_8	atomic_add_rel_char
718#define	atomic_subtract_8	atomic_subtract_char
719#define	atomic_subtract_acq_8	atomic_subtract_acq_char
720#define	atomic_subtract_rel_8	atomic_subtract_rel_char
721#define	atomic_load_acq_8	atomic_load_acq_char
722#define	atomic_store_rel_8	atomic_store_rel_char
723
724/* Operations on 16-bit words. */
725#define	atomic_set_16		atomic_set_short
726#define	atomic_set_acq_16	atomic_set_acq_short
727#define	atomic_set_rel_16	atomic_set_rel_short
728#define	atomic_clear_16		atomic_clear_short
729#define	atomic_clear_acq_16	atomic_clear_acq_short
730#define	atomic_clear_rel_16	atomic_clear_rel_short
731#define	atomic_add_16		atomic_add_short
732#define	atomic_add_acq_16	atomic_add_acq_short
733#define	atomic_add_rel_16	atomic_add_rel_short
734#define	atomic_subtract_16	atomic_subtract_short
735#define	atomic_subtract_acq_16	atomic_subtract_acq_short
736#define	atomic_subtract_rel_16	atomic_subtract_rel_short
737#define	atomic_load_acq_16	atomic_load_acq_short
738#define	atomic_store_rel_16	atomic_store_rel_short
739
740/* Operations on 32-bit double words. */
741#define	atomic_set_32		atomic_set_int
742#define	atomic_set_acq_32	atomic_set_acq_int
743#define	atomic_set_rel_32	atomic_set_rel_int
744#define	atomic_clear_32		atomic_clear_int
745#define	atomic_clear_acq_32	atomic_clear_acq_int
746#define	atomic_clear_rel_32	atomic_clear_rel_int
747#define	atomic_add_32		atomic_add_int
748#define	atomic_add_acq_32	atomic_add_acq_int
749#define	atomic_add_rel_32	atomic_add_rel_int
750#define	atomic_subtract_32	atomic_subtract_int
751#define	atomic_subtract_acq_32	atomic_subtract_acq_int
752#define	atomic_subtract_rel_32	atomic_subtract_rel_int
753#define	atomic_load_acq_32	atomic_load_acq_int
754#define	atomic_store_rel_32	atomic_store_rel_int
755#define	atomic_cmpset_32	atomic_cmpset_int
756#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
757#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
758#define	atomic_fcmpset_32	atomic_fcmpset_int
759#define	atomic_fcmpset_acq_32	atomic_fcmpset_acq_int
760#define	atomic_fcmpset_rel_32	atomic_fcmpset_rel_int
761#define	atomic_swap_32		atomic_swap_int
762#define	atomic_readandclear_32	atomic_readandclear_int
763#define	atomic_fetchadd_32	atomic_fetchadd_int
764#define	atomic_testandset_32	atomic_testandset_int
765#define	atomic_testandclear_32	atomic_testandclear_int
766
767/* Operations on pointers. */
768#define	atomic_set_ptr(p, v) \
769	atomic_set_int((volatile u_int *)(p), (u_int)(v))
770#define	atomic_set_acq_ptr(p, v) \
771	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
772#define	atomic_set_rel_ptr(p, v) \
773	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
774#define	atomic_clear_ptr(p, v) \
775	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
776#define	atomic_clear_acq_ptr(p, v) \
777	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
778#define	atomic_clear_rel_ptr(p, v) \
779	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
780#define	atomic_add_ptr(p, v) \
781	atomic_add_int((volatile u_int *)(p), (u_int)(v))
782#define	atomic_add_acq_ptr(p, v) \
783	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
784#define	atomic_add_rel_ptr(p, v) \
785	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
786#define	atomic_subtract_ptr(p, v) \
787	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
788#define	atomic_subtract_acq_ptr(p, v) \
789	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
790#define	atomic_subtract_rel_ptr(p, v) \
791	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
792#define	atomic_load_acq_ptr(p) \
793	atomic_load_acq_int((volatile u_int *)(p))
794#define	atomic_store_rel_ptr(p, v) \
795	atomic_store_rel_int((volatile u_int *)(p), (v))
796#define	atomic_cmpset_ptr(dst, old, new) \
797	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
798#define	atomic_cmpset_acq_ptr(dst, old, new) \
799	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
800	    (u_int)(new))
801#define	atomic_cmpset_rel_ptr(dst, old, new) \
802	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
803	    (u_int)(new))
804#define	atomic_fcmpset_ptr(dst, old, new) \
805	atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
806#define	atomic_fcmpset_acq_ptr(dst, old, new) \
807	atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
808	    (u_int)(new))
809#define	atomic_fcmpset_rel_ptr(dst, old, new) \
810	atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
811	    (u_int)(new))
812#define	atomic_swap_ptr(p, v) \
813	atomic_swap_int((volatile u_int *)(p), (u_int)(v))
814#define	atomic_readandclear_ptr(p) \
815	atomic_readandclear_int((volatile u_int *)(p))
816
817#endif /* !WANT_FUNCTIONS */
818
819#if defined(_KERNEL)
820#define	mb()	__mbk()
821#define	wmb()	__mbk()
822#define	rmb()	__mbk()
823#else
824#define	mb()	__mbu()
825#define	wmb()	__mbu()
826#define	rmb()	__mbu()
827#endif
828
829#endif /* !_MACHINE_ATOMIC_H_ */
830