atomic.h revision 216524
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 216524 2010-12-18 16:41:11Z kib $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
36#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
37#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
38
39/*
40 * Various simple operations on memory, each of which is atomic in the
41 * presence of interrupts and multiple processors.
42 *
43 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47 *
48 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52 *
53 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
58 *
59 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
60 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
61 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
62 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
63 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
64 */
65
66/*
67 * The above functions are expanded inline in the statically-linked
68 * kernel.  Lock prefixes are generated if an SMP kernel is being
69 * built.
70 *
71 * Kernel modules call real functions which are built into the kernel.
72 * This allows kernel modules to be portable between UP and SMP systems.
73 */
74#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
76void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);	\
77void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
78
79int	atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
80u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
81
82#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
83u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
84void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86#else /* !KLD_MODULE && __GNUCLIKE_ASM */
87
88/*
89 * For userland, always use lock prefixes so that the binaries will run
90 * on both SMP and !SMP systems.
91 */
92#if defined(SMP) || !defined(_KERNEL)
93#define	MPLOCKED	"lock ; "
94#else
95#define	MPLOCKED
96#endif
97
98/*
99 * The assembly is volatilized to avoid code chunk removal by the compiler.
100 * GCC aggressively reorders operations and memory clobbering is necessary
101 * in order to avoid that for memory barriers.
102 */
103#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
104static __inline void					\
105atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
106{							\
107	__asm __volatile(MPLOCKED OP			\
108	: "=m" (*p)					\
109	: CONS (V), "m" (*p)				\
110	: "cc");					\
111}							\
112							\
113static __inline void					\
114atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
115{							\
116	__asm __volatile(MPLOCKED OP			\
117	: "=m" (*p)					\
118	: CONS (V), "m" (*p)				\
119	: "memory", "cc");				\
120}							\
121struct __hack
122
123/*
124 * Atomic compare and set, used by the mutex functions
125 *
126 * if (*dst == expect) *dst = src (all 32 bit words)
127 *
128 * Returns 0 on failure, non-zero on success
129 */
130
131#ifdef CPU_DISABLE_CMPXCHG
132
133static __inline int
134atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
135{
136	u_char res;
137
138	__asm __volatile(
139	"	pushfl ;		"
140	"	cli ;			"
141	"	cmpl	%3,%4 ;		"
142	"	jne	1f ;		"
143	"	movl	%2,%1 ;		"
144	"1:				"
145	"       sete	%0 ;		"
146	"	popfl ;			"
147	"# atomic_cmpset_int"
148	: "=q" (res),			/* 0 */
149	  "=m" (*dst)			/* 1 */
150	: "r" (src),			/* 2 */
151	  "r" (expect),			/* 3 */
152	  "m" (*dst)			/* 4 */
153	: "memory");
154
155	return (res);
156}
157
158#else /* !CPU_DISABLE_CMPXCHG */
159
160static __inline int
161atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
162{
163	u_char res;
164
165	__asm __volatile(
166	"	" MPLOCKED "		"
167	"	cmpxchgl %2,%1 ;	"
168	"       sete	%0 ;		"
169	"1:				"
170	"# atomic_cmpset_int"
171	: "=a" (res),			/* 0 */
172	  "=m" (*dst)			/* 1 */
173	: "r" (src),			/* 2 */
174	  "a" (expect),			/* 3 */
175	  "m" (*dst)			/* 4 */
176	: "memory", "cc");
177
178	return (res);
179}
180
181#endif /* CPU_DISABLE_CMPXCHG */
182
183/*
184 * Atomically add the value of v to the integer pointed to by p and return
185 * the previous value of *p.
186 */
187static __inline u_int
188atomic_fetchadd_int(volatile u_int *p, u_int v)
189{
190
191	__asm __volatile(
192	"	" MPLOCKED "		"
193	"	xaddl	%0, %1 ;	"
194	"# atomic_fetchadd_int"
195	: "+r" (v),			/* 0 (result) */
196	  "=m" (*p)			/* 1 */
197	: "m" (*p)			/* 2 */
198	: "cc");
199	return (v);
200}
201
202#if defined(_KERNEL) && !defined(SMP)
203
204/*
205 * We assume that a = b will do atomic loads and stores.  However, on a
206 * PentiumPro or higher, reads may pass writes, so for that case we have
207 * to use a serializing instruction (i.e. with LOCK) to do the load in
208 * SMP kernels.  For UP kernels, however, the cache of the single processor
209 * is always consistent, so we only need to take care of compiler.
210 */
211#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
212static __inline u_##TYPE				\
213atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
214{							\
215	u_##TYPE tmp;					\
216							\
217	tmp = *p;					\
218	__asm __volatile("" : : : "memory");		\
219	return (tmp);					\
220}							\
221							\
222static __inline void					\
223atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
224{							\
225	__asm __volatile("" : : : "memory");		\
226	*p = v;						\
227}							\
228struct __hack
229
230#else /* !(_KERNEL && !SMP) */
231
232#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
233static __inline u_##TYPE				\
234atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
235{							\
236	u_##TYPE res;					\
237							\
238	__asm __volatile(MPLOCKED LOP			\
239	: "=a" (res),			/* 0 */		\
240	  "=m" (*p)			/* 1 */		\
241	: "m" (*p)			/* 2 */		\
242	: "memory", "cc");				\
243							\
244	return (res);					\
245}							\
246							\
247/*							\
248 * The XCHG instruction asserts LOCK automagically.	\
249 */							\
250static __inline void					\
251atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
252{							\
253	__asm __volatile(SOP				\
254	: "=m" (*p),			/* 0 */		\
255	  "+r" (v)			/* 1 */		\
256	: "m" (*p)			/* 2 */		\
257	: "memory");					\
258}							\
259struct __hack
260
261#endif /* _KERNEL && !SMP */
262
263#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
264
265ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
266ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
267ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
268ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
269
270ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
271ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
272ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
273ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
274
275ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
276ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
277ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
278ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
279
280ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
281ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
282ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
283ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
284
285ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
286ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
287ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
288ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
289
290#undef ATOMIC_ASM
291#undef ATOMIC_STORE_LOAD
292
293#ifndef WANT_FUNCTIONS
294
295static __inline int
296atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
297{
298
299	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
300	    (u_int)src));
301}
302
303static __inline u_long
304atomic_fetchadd_long(volatile u_long *p, u_long v)
305{
306
307	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
308}
309
310/* Read the current value and store a zero in the destination. */
311#ifdef __GNUCLIKE_ASM
312
313static __inline u_int
314atomic_readandclear_int(volatile u_int *addr)
315{
316	u_int res;
317
318	res = 0;
319	__asm __volatile(
320	"	xchgl	%1,%0 ;		"
321	"# atomic_readandclear_int"
322	: "+r" (res),			/* 0 */
323	  "=m" (*addr)			/* 1 */
324	: "m" (*addr));
325
326	return (res);
327}
328
329static __inline u_long
330atomic_readandclear_long(volatile u_long *addr)
331{
332	u_long res;
333
334	res = 0;
335	__asm __volatile(
336	"	xchgl	%1,%0 ;		"
337	"# atomic_readandclear_long"
338	: "+r" (res),			/* 0 */
339	  "=m" (*addr)			/* 1 */
340	: "m" (*addr));
341
342	return (res);
343}
344
345#else /* !__GNUCLIKE_ASM */
346
347u_int	atomic_readandclear_int(volatile u_int *addr);
348u_long	atomic_readandclear_long(volatile u_long *addr);
349
350#endif /* __GNUCLIKE_ASM */
351
352#define	atomic_set_acq_char		atomic_set_barr_char
353#define	atomic_set_rel_char		atomic_set_barr_char
354#define	atomic_clear_acq_char		atomic_clear_barr_char
355#define	atomic_clear_rel_char		atomic_clear_barr_char
356#define	atomic_add_acq_char		atomic_add_barr_char
357#define	atomic_add_rel_char		atomic_add_barr_char
358#define	atomic_subtract_acq_char	atomic_subtract_barr_char
359#define	atomic_subtract_rel_char	atomic_subtract_barr_char
360
361#define	atomic_set_acq_short		atomic_set_barr_short
362#define	atomic_set_rel_short		atomic_set_barr_short
363#define	atomic_clear_acq_short		atomic_clear_barr_short
364#define	atomic_clear_rel_short		atomic_clear_barr_short
365#define	atomic_add_acq_short		atomic_add_barr_short
366#define	atomic_add_rel_short		atomic_add_barr_short
367#define	atomic_subtract_acq_short	atomic_subtract_barr_short
368#define	atomic_subtract_rel_short	atomic_subtract_barr_short
369
370#define	atomic_set_acq_int		atomic_set_barr_int
371#define	atomic_set_rel_int		atomic_set_barr_int
372#define	atomic_clear_acq_int		atomic_clear_barr_int
373#define	atomic_clear_rel_int		atomic_clear_barr_int
374#define	atomic_add_acq_int		atomic_add_barr_int
375#define	atomic_add_rel_int		atomic_add_barr_int
376#define	atomic_subtract_acq_int		atomic_subtract_barr_int
377#define	atomic_subtract_rel_int		atomic_subtract_barr_int
378#define	atomic_cmpset_acq_int		atomic_cmpset_int
379#define	atomic_cmpset_rel_int		atomic_cmpset_int
380
381#define	atomic_set_acq_long		atomic_set_barr_long
382#define	atomic_set_rel_long		atomic_set_barr_long
383#define	atomic_clear_acq_long		atomic_clear_barr_long
384#define	atomic_clear_rel_long		atomic_clear_barr_long
385#define	atomic_add_acq_long		atomic_add_barr_long
386#define	atomic_add_rel_long		atomic_add_barr_long
387#define	atomic_subtract_acq_long	atomic_subtract_barr_long
388#define	atomic_subtract_rel_long	atomic_subtract_barr_long
389#define	atomic_cmpset_acq_long		atomic_cmpset_long
390#define	atomic_cmpset_rel_long		atomic_cmpset_long
391
392/* Operations on 8-bit bytes. */
393#define	atomic_set_8		atomic_set_char
394#define	atomic_set_acq_8	atomic_set_acq_char
395#define	atomic_set_rel_8	atomic_set_rel_char
396#define	atomic_clear_8		atomic_clear_char
397#define	atomic_clear_acq_8	atomic_clear_acq_char
398#define	atomic_clear_rel_8	atomic_clear_rel_char
399#define	atomic_add_8		atomic_add_char
400#define	atomic_add_acq_8	atomic_add_acq_char
401#define	atomic_add_rel_8	atomic_add_rel_char
402#define	atomic_subtract_8	atomic_subtract_char
403#define	atomic_subtract_acq_8	atomic_subtract_acq_char
404#define	atomic_subtract_rel_8	atomic_subtract_rel_char
405#define	atomic_load_acq_8	atomic_load_acq_char
406#define	atomic_store_rel_8	atomic_store_rel_char
407
408/* Operations on 16-bit words. */
409#define	atomic_set_16		atomic_set_short
410#define	atomic_set_acq_16	atomic_set_acq_short
411#define	atomic_set_rel_16	atomic_set_rel_short
412#define	atomic_clear_16		atomic_clear_short
413#define	atomic_clear_acq_16	atomic_clear_acq_short
414#define	atomic_clear_rel_16	atomic_clear_rel_short
415#define	atomic_add_16		atomic_add_short
416#define	atomic_add_acq_16	atomic_add_acq_short
417#define	atomic_add_rel_16	atomic_add_rel_short
418#define	atomic_subtract_16	atomic_subtract_short
419#define	atomic_subtract_acq_16	atomic_subtract_acq_short
420#define	atomic_subtract_rel_16	atomic_subtract_rel_short
421#define	atomic_load_acq_16	atomic_load_acq_short
422#define	atomic_store_rel_16	atomic_store_rel_short
423
424/* Operations on 32-bit double words. */
425#define	atomic_set_32		atomic_set_int
426#define	atomic_set_acq_32	atomic_set_acq_int
427#define	atomic_set_rel_32	atomic_set_rel_int
428#define	atomic_clear_32		atomic_clear_int
429#define	atomic_clear_acq_32	atomic_clear_acq_int
430#define	atomic_clear_rel_32	atomic_clear_rel_int
431#define	atomic_add_32		atomic_add_int
432#define	atomic_add_acq_32	atomic_add_acq_int
433#define	atomic_add_rel_32	atomic_add_rel_int
434#define	atomic_subtract_32	atomic_subtract_int
435#define	atomic_subtract_acq_32	atomic_subtract_acq_int
436#define	atomic_subtract_rel_32	atomic_subtract_rel_int
437#define	atomic_load_acq_32	atomic_load_acq_int
438#define	atomic_store_rel_32	atomic_store_rel_int
439#define	atomic_cmpset_32	atomic_cmpset_int
440#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
441#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
442#define	atomic_readandclear_32	atomic_readandclear_int
443#define	atomic_fetchadd_32	atomic_fetchadd_int
444
445/* Operations on pointers. */
446#define	atomic_set_ptr(p, v) \
447	atomic_set_int((volatile u_int *)(p), (u_int)(v))
448#define	atomic_set_acq_ptr(p, v) \
449	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
450#define	atomic_set_rel_ptr(p, v) \
451	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
452#define	atomic_clear_ptr(p, v) \
453	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
454#define	atomic_clear_acq_ptr(p, v) \
455	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
456#define	atomic_clear_rel_ptr(p, v) \
457	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
458#define	atomic_add_ptr(p, v) \
459	atomic_add_int((volatile u_int *)(p), (u_int)(v))
460#define	atomic_add_acq_ptr(p, v) \
461	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
462#define	atomic_add_rel_ptr(p, v) \
463	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
464#define	atomic_subtract_ptr(p, v) \
465	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
466#define	atomic_subtract_acq_ptr(p, v) \
467	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
468#define	atomic_subtract_rel_ptr(p, v) \
469	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
470#define	atomic_load_acq_ptr(p) \
471	atomic_load_acq_int((volatile u_int *)(p))
472#define	atomic_store_rel_ptr(p, v) \
473	atomic_store_rel_int((volatile u_int *)(p), (v))
474#define	atomic_cmpset_ptr(dst, old, new) \
475	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
476#define	atomic_cmpset_acq_ptr(dst, old, new) \
477	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
478	    (u_int)(new))
479#define	atomic_cmpset_rel_ptr(dst, old, new) \
480	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
481	    (u_int)(new))
482#define	atomic_readandclear_ptr(p) \
483	atomic_readandclear_int((volatile u_int *)(p))
484
485#endif /* !WANT_FUNCTIONS */
486
487#endif /* !_MACHINE_ATOMIC_H_ */
488