atomic.h revision 197803
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 197803 2009-10-06 13:45:49Z attilio $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
36#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
37#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
38
39/*
40 * Various simple operations on memory, each of which is atomic in the
41 * presence of interrupts and multiple processors.
42 *
43 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47 *
48 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52 *
53 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
58 *
59 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
60 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
61 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
62 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
63 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
64 */
65
66/*
67 * The above functions are expanded inline in the statically-linked
68 * kernel.  Lock prefixes are generated if an SMP kernel is being
69 * built.
70 *
71 * Kernel modules call real functions which are built into the kernel.
72 * This allows kernel modules to be portable between UP and SMP systems.
73 */
74#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
76void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
77void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
78
79int	atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
80int	atomic_cmpset_barr_int(volatile u_int *dst, u_int exp, u_int src);
81u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
82
83#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
84u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
85void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
86
87#else /* !KLD_MODULE && __GNUCLIKE_ASM */
88
89/*
90 * For userland, always use lock prefixes so that the binaries will run
91 * on both SMP and !SMP systems.
92 */
93#if defined(SMP) || !defined(_KERNEL)
94#define	MPLOCKED	"lock ; "
95#else
96#define	MPLOCKED
97#endif
98
99/*
100 * The assembly is volatilized to avoid code chunk removal by the compiler.
101 * GCC aggressively reorders operations and memory clobbering is necessary
102 * in order to avoid that for memory barriers.
103 */
104#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
105static __inline void					\
106atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
107{							\
108	__asm __volatile(MPLOCKED OP			\
109	: "=m" (*p)					\
110	: CONS (V), "m" (*p));				\
111}							\
112							\
113static __inline void					\
114atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
115{							\
116	__asm __volatile(MPLOCKED OP			\
117	: "=m" (*p)					\
118	: CONS (V), "m" (*p)				\
119	: "memory");					\
120}							\
121struct __hack
122
123/*
124 * Atomic compare and set, used by the mutex functions
125 *
126 * if (*dst == exp) *dst = src (all 32 bit words)
127 *
128 * Returns 0 on failure, non-zero on success
129 */
130
131#ifdef CPU_DISABLE_CMPXCHG
132
133static __inline int
134atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
135{
136	u_char res;
137
138	__asm __volatile(
139	"	pushfl ;		"
140	"	cli ;			"
141	"	cmpl	%3,%4 ;		"
142	"	jne	1f ;		"
143	"	movl	%2,%1 ;		"
144	"1:				"
145	"       sete	%0 ;		"
146	"	popfl ;			"
147	"# atomic_cmpset_int"
148	: "=q" (res),			/* 0 */
149	  "=m" (*dst)			/* 1 */
150	: "r" (src),			/* 2 */
151	  "r" (exp),			/* 3 */
152	  "m" (*dst)			/* 4 */
153	: "memory");
154
155	return (res);
156}
157
158#else /* !CPU_DISABLE_CMPXCHG */
159
160static __inline int
161atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
162{
163	u_char res;
164
165	__asm __volatile(
166	"	" MPLOCKED "		"
167	"	cmpxchgl %2,%1 ;	"
168	"       sete	%0 ;		"
169	"1:				"
170	"# atomic_cmpset_int"
171	: "=a" (res),			/* 0 */
172	  "=m" (*dst)			/* 1 */
173	: "r" (src),			/* 2 */
174	  "a" (exp),			/* 3 */
175	  "m" (*dst)			/* 4 */
176	: "memory");
177
178	return (res);
179}
180
181#endif /* CPU_DISABLE_CMPXCHG */
182
183#define	atomic_cmpset_barr_int		atomic_cmpset_int
184
185/*
186 * Atomically add the value of v to the integer pointed to by p and return
187 * the previous value of *p.
188 */
189static __inline u_int
190atomic_fetchadd_int(volatile u_int *p, u_int v)
191{
192
193	__asm __volatile(
194	"	" MPLOCKED "		"
195	"	xaddl	%0, %1 ;	"
196	"# atomic_fetchadd_int"
197	: "+r" (v),			/* 0 (result) */
198	  "=m" (*p)			/* 1 */
199	: "m" (*p));			/* 2 */
200
201	return (v);
202}
203
204#if defined(_KERNEL) && !defined(SMP)
205
206/*
207 * We assume that a = b will do atomic loads and stores.  However, on a
208 * PentiumPro or higher, reads may pass writes, so for that case we have
209 * to use a serializing instruction (i.e. with LOCK) to do the load in
210 * SMP kernels.  For UP kernels, however, the cache of the single processor
211 * is always consistent, so we only need to take care of compiler.
212 */
213#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
214static __inline u_##TYPE				\
215atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
216{							\
217	u_##TYPE tmp;					\
218							\
219	tmp = *p;					\
220	__asm __volatile("" : : : "memory");		\
221	return (tmp);					\
222}							\
223							\
224static __inline void					\
225atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
226{							\
227	__asm __volatile("" : : : "memory");		\
228	*p = v;						\
229}							\
230struct __hack
231
232#else /* !(_KERNEL && !SMP) */
233
234#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
235static __inline u_##TYPE				\
236atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
237{							\
238	u_##TYPE res;					\
239							\
240	__asm __volatile(MPLOCKED LOP			\
241	: "=a" (res),			/* 0 */		\
242	  "=m" (*p)			/* 1 */		\
243	: "m" (*p)			/* 2 */		\
244	: "memory");					\
245							\
246	return (res);					\
247}							\
248							\
249/*							\
250 * The XCHG instruction asserts LOCK automagically.	\
251 */							\
252static __inline void					\
253atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
254{							\
255	__asm __volatile(SOP				\
256	: "=m" (*p),			/* 0 */		\
257	  "+r" (v)			/* 1 */		\
258	: "m" (*p)			/* 2 */		\
259	: "memory");					\
260}							\
261struct __hack
262
263#endif /* _KERNEL && !SMP */
264
265#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
266
267ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
268ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
269ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
270ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
271
272ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
273ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
274ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
275ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
276
277ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
278ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
279ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
280ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
281
282ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
283ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
284ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
285ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
286
287ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
288ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
289ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
290ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
291
292#undef ATOMIC_ASM
293#undef ATOMIC_STORE_LOAD
294
295#ifndef WANT_FUNCTIONS
296
297static __inline int
298atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
299{
300
301	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
302	    (u_int)src));
303}
304
305static __inline int
306atomic_cmpset_barr_long(volatile u_long *dst, u_long exp, u_long src)
307{
308
309	return (atomic_cmpset_barr_int((volatile u_int *)dst, (u_int)exp,
310	    (u_int)src));
311}
312
313static __inline u_long
314atomic_fetchadd_long(volatile u_long *p, u_long v)
315{
316
317	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
318}
319
320/* Read the current value and store a zero in the destination. */
321#ifdef __GNUCLIKE_ASM
322
323static __inline u_int
324atomic_readandclear_int(volatile u_int *addr)
325{
326	u_int res;
327
328	res = 0;
329	__asm __volatile(
330	"	xchgl	%1,%0 ;		"
331	"# atomic_readandclear_int"
332	: "+r" (res),			/* 0 */
333	  "=m" (*addr)			/* 1 */
334	: "m" (*addr));
335
336	return (res);
337}
338
339static __inline u_long
340atomic_readandclear_long(volatile u_long *addr)
341{
342	u_long res;
343
344	res = 0;
345	__asm __volatile(
346	"	xchgl	%1,%0 ;		"
347	"# atomic_readandclear_long"
348	: "+r" (res),			/* 0 */
349	  "=m" (*addr)			/* 1 */
350	: "m" (*addr));
351
352	return (res);
353}
354
355#else /* !__GNUCLIKE_ASM */
356
357u_int	atomic_readandclear_int(volatile u_int *addr);
358u_long	atomic_readandclear_long(volatile u_long *addr);
359
360#endif /* __GNUCLIKE_ASM */
361
362#define	atomic_set_acq_char		atomic_set_barr_char
363#define	atomic_set_rel_char		atomic_set_barr_char
364#define	atomic_clear_acq_char		atomic_clear_barr_char
365#define	atomic_clear_rel_char		atomic_clear_barr_char
366#define	atomic_add_acq_char		atomic_add_barr_char
367#define	atomic_add_rel_char		atomic_add_barr_char
368#define	atomic_subtract_acq_char	atomic_subtract_barr_char
369#define	atomic_subtract_rel_char	atomic_subtract_barr_char
370
371#define	atomic_set_acq_short		atomic_set_barr_short
372#define	atomic_set_rel_short		atomic_set_barr_short
373#define	atomic_clear_acq_short		atomic_clear_barr_short
374#define	atomic_clear_rel_short		atomic_clear_barr_short
375#define	atomic_add_acq_short		atomic_add_barr_short
376#define	atomic_add_rel_short		atomic_add_barr_short
377#define	atomic_subtract_acq_short	atomic_subtract_barr_short
378#define	atomic_subtract_rel_short	atomic_subtract_barr_short
379
380#define	atomic_set_acq_int		atomic_set_barr_int
381#define	atomic_set_rel_int		atomic_set_barr_int
382#define	atomic_clear_acq_int		atomic_clear_barr_int
383#define	atomic_clear_rel_int		atomic_clear_barr_int
384#define	atomic_add_acq_int		atomic_add_barr_int
385#define	atomic_add_rel_int		atomic_add_barr_int
386#define	atomic_subtract_acq_int		atomic_subtract_barr_int
387#define	atomic_subtract_rel_int		atomic_subtract_barr_int
388#define	atomic_cmpset_acq_int		atomic_cmpset_barr_int
389#define	atomic_cmpset_rel_int		atomic_cmpset_barr_int
390
391#define	atomic_set_acq_long		atomic_set_barr_long
392#define	atomic_set_rel_long		atomic_set_barr_long
393#define	atomic_clear_acq_long		atomic_clear_barr_long
394#define	atomic_clear_rel_long		atomic_clear_barr_long
395#define	atomic_add_acq_long		atomic_add_barr_long
396#define	atomic_add_rel_long		atomic_add_barr_long
397#define	atomic_subtract_acq_long	atomic_subtract_barr_long
398#define	atomic_subtract_rel_long	atomic_subtract_barr_long
399#define	atomic_cmpset_acq_long		atomic_cmpset_barr_long
400#define	atomic_cmpset_rel_long		atomic_cmpset_barr_long
401
402/* Operations on 8-bit bytes. */
403#define	atomic_set_8		atomic_set_char
404#define	atomic_set_acq_8	atomic_set_acq_char
405#define	atomic_set_rel_8	atomic_set_rel_char
406#define	atomic_clear_8		atomic_clear_char
407#define	atomic_clear_acq_8	atomic_clear_acq_char
408#define	atomic_clear_rel_8	atomic_clear_rel_char
409#define	atomic_add_8		atomic_add_char
410#define	atomic_add_acq_8	atomic_add_acq_char
411#define	atomic_add_rel_8	atomic_add_rel_char
412#define	atomic_subtract_8	atomic_subtract_char
413#define	atomic_subtract_acq_8	atomic_subtract_acq_char
414#define	atomic_subtract_rel_8	atomic_subtract_rel_char
415#define	atomic_load_acq_8	atomic_load_acq_char
416#define	atomic_store_rel_8	atomic_store_rel_char
417
418/* Operations on 16-bit words. */
419#define	atomic_set_16		atomic_set_short
420#define	atomic_set_acq_16	atomic_set_acq_short
421#define	atomic_set_rel_16	atomic_set_rel_short
422#define	atomic_clear_16		atomic_clear_short
423#define	atomic_clear_acq_16	atomic_clear_acq_short
424#define	atomic_clear_rel_16	atomic_clear_rel_short
425#define	atomic_add_16		atomic_add_short
426#define	atomic_add_acq_16	atomic_add_acq_short
427#define	atomic_add_rel_16	atomic_add_rel_short
428#define	atomic_subtract_16	atomic_subtract_short
429#define	atomic_subtract_acq_16	atomic_subtract_acq_short
430#define	atomic_subtract_rel_16	atomic_subtract_rel_short
431#define	atomic_load_acq_16	atomic_load_acq_short
432#define	atomic_store_rel_16	atomic_store_rel_short
433
434/* Operations on 32-bit double words. */
435#define	atomic_set_32		atomic_set_int
436#define	atomic_set_acq_32	atomic_set_acq_int
437#define	atomic_set_rel_32	atomic_set_rel_int
438#define	atomic_clear_32		atomic_clear_int
439#define	atomic_clear_acq_32	atomic_clear_acq_int
440#define	atomic_clear_rel_32	atomic_clear_rel_int
441#define	atomic_add_32		atomic_add_int
442#define	atomic_add_acq_32	atomic_add_acq_int
443#define	atomic_add_rel_32	atomic_add_rel_int
444#define	atomic_subtract_32	atomic_subtract_int
445#define	atomic_subtract_acq_32	atomic_subtract_acq_int
446#define	atomic_subtract_rel_32	atomic_subtract_rel_int
447#define	atomic_load_acq_32	atomic_load_acq_int
448#define	atomic_store_rel_32	atomic_store_rel_int
449#define	atomic_cmpset_32	atomic_cmpset_int
450#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
451#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
452#define	atomic_readandclear_32	atomic_readandclear_int
453#define	atomic_fetchadd_32	atomic_fetchadd_int
454
455/* Operations on pointers. */
456#define	atomic_set_ptr(p, v) \
457	atomic_set_int((volatile u_int *)(p), (u_int)(v))
458#define	atomic_set_acq_ptr(p, v) \
459	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
460#define	atomic_set_rel_ptr(p, v) \
461	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
462#define	atomic_clear_ptr(p, v) \
463	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
464#define	atomic_clear_acq_ptr(p, v) \
465	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
466#define	atomic_clear_rel_ptr(p, v) \
467	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
468#define	atomic_add_ptr(p, v) \
469	atomic_add_int((volatile u_int *)(p), (u_int)(v))
470#define	atomic_add_acq_ptr(p, v) \
471	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
472#define	atomic_add_rel_ptr(p, v) \
473	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
474#define	atomic_subtract_ptr(p, v) \
475	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
476#define	atomic_subtract_acq_ptr(p, v) \
477	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
478#define	atomic_subtract_rel_ptr(p, v) \
479	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
480#define	atomic_load_acq_ptr(p) \
481	atomic_load_acq_int((volatile u_int *)(p))
482#define	atomic_store_rel_ptr(p, v) \
483	atomic_store_rel_int((volatile u_int *)(p), (v))
484#define	atomic_cmpset_ptr(dst, old, new) \
485	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
486#define	atomic_cmpset_acq_ptr(dst, old, new) \
487	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
488	    (u_int)(new))
489#define	atomic_cmpset_rel_ptr(dst, old, new) \
490	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
491	    (u_int)(new))
492#define	atomic_readandclear_ptr(p) \
493	atomic_readandclear_int((volatile u_int *)(p))
494
495#endif /* !WANT_FUNCTIONS */
496
497#endif /* !_MACHINE_ATOMIC_H_ */
498