atomic.h revision 185720
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 185720 2008-12-06 21:33:44Z kib $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35#define	mb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
36#define	wmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
37#define	rmb()	__asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
38
39/*
40 * Various simple operations on memory, each of which is atomic in the
41 * presence of interrupts and multiple processors.
42 *
43 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
44 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
45 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
46 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
47 *
48 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
49 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
50 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
51 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
52 *
53 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
54 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
55 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
56 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
57 * atomic_readandclear_int(P)	(return (*(u_int *)(P)); *(u_int *)(P) = 0;)
58 *
59 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
60 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
61 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
62 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
63 * atomic_readandclear_long(P)	(return (*(u_long *)(P)); *(u_long *)(P) = 0;)
64 */
65
66/*
67 * The above functions are expanded inline in the statically-linked
68 * kernel.  Lock prefixes are generated if an SMP kernel is being
69 * built.
70 *
71 * Kernel modules call real functions which are built into the kernel.
72 * This allows kernel modules to be portable between UP and SMP systems.
73 */
74#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
75#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
76void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
77
78int	atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
79u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
80
81#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
82u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
83void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
84
85#else /* !KLD_MODULE && __GNUCLIKE_ASM */
86
87/*
88 * For userland, always use lock prefixes so that the binaries will run
89 * on both SMP and !SMP systems.
90 */
91#if defined(SMP) || !defined(_KERNEL)
92#define	MPLOCKED	"lock ; "
93#else
94#define	MPLOCKED
95#endif
96
97/*
98 * The assembly is volatilized to demark potential before-and-after side
99 * effects if an interrupt or SMP collision were to occur.
100 */
101#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
102static __inline void					\
103atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
104{							\
105	__asm __volatile(MPLOCKED OP			\
106	: "=m" (*p)					\
107	: CONS (V), "m" (*p));				\
108}							\
109struct __hack
110
111/*
112 * Atomic compare and set, used by the mutex functions
113 *
114 * if (*dst == exp) *dst = src (all 32 bit words)
115 *
116 * Returns 0 on failure, non-zero on success
117 */
118
119#ifdef CPU_DISABLE_CMPXCHG
120
121static __inline int
122atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
123{
124	u_char res;
125
126	__asm __volatile(
127	"	pushfl ;		"
128	"	cli ;			"
129	"	cmpl	%3,%4 ;		"
130	"	jne	1f ;		"
131	"	movl	%2,%1 ;		"
132	"1:				"
133	"       sete	%0 ;		"
134	"	popfl ;			"
135	"# atomic_cmpset_int"
136	: "=q" (res),			/* 0 */
137	  "=m" (*dst)			/* 1 */
138	: "r" (src),			/* 2 */
139	  "r" (exp),			/* 3 */
140	  "m" (*dst)			/* 4 */
141	: "memory");
142
143	return (res);
144}
145
146#else /* !CPU_DISABLE_CMPXCHG */
147
148static __inline int
149atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
150{
151	u_char res;
152
153	__asm __volatile(
154	"	" MPLOCKED "		"
155	"	cmpxchgl %2,%1 ;	"
156	"       sete	%0 ;		"
157	"1:				"
158	"# atomic_cmpset_int"
159	: "=a" (res),			/* 0 */
160	  "=m" (*dst)			/* 1 */
161	: "r" (src),			/* 2 */
162	  "a" (exp),			/* 3 */
163	  "m" (*dst)			/* 4 */
164	: "memory");
165
166	return (res);
167}
168
169#endif /* CPU_DISABLE_CMPXCHG */
170
171/*
172 * Atomically add the value of v to the integer pointed to by p and return
173 * the previous value of *p.
174 */
175static __inline u_int
176atomic_fetchadd_int(volatile u_int *p, u_int v)
177{
178
179	__asm __volatile(
180	"	" MPLOCKED "		"
181	"	xaddl	%0, %1 ;	"
182	"# atomic_fetchadd_int"
183	: "+r" (v),			/* 0 (result) */
184	  "=m" (*p)			/* 1 */
185	: "m" (*p));			/* 2 */
186
187	return (v);
188}
189
190#if defined(_KERNEL) && !defined(SMP)
191
192/*
193 * We assume that a = b will do atomic loads and stores.  However, on a
194 * PentiumPro or higher, reads may pass writes, so for that case we have
195 * to use a serializing instruction (i.e. with LOCK) to do the load in
196 * SMP kernels.  For UP kernels, however, the cache of the single processor
197 * is always consistent, so we don't need any memory barriers.
198 */
199#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
200static __inline u_##TYPE				\
201atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
202{							\
203	return (*p);					\
204}							\
205							\
206static __inline void					\
207atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
208{							\
209	*p = v;						\
210}							\
211struct __hack
212
213#else /* !(_KERNEL && !SMP) */
214
215#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
216static __inline u_##TYPE				\
217atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
218{							\
219	u_##TYPE res;					\
220							\
221	__asm __volatile(MPLOCKED LOP			\
222	: "=a" (res),			/* 0 */		\
223	  "=m" (*p)			/* 1 */		\
224	: "m" (*p)			/* 2 */		\
225	: "memory");					\
226							\
227	return (res);					\
228}							\
229							\
230/*							\
231 * The XCHG instruction asserts LOCK automagically.	\
232 */							\
233static __inline void					\
234atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
235{							\
236	__asm __volatile(SOP				\
237	: "=m" (*p),			/* 0 */		\
238	  "+r" (v)			/* 1 */		\
239	: "m" (*p));			/* 2 */		\
240}							\
241struct __hack
242
243#endif /* _KERNEL && !SMP */
244
245#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
246
247ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
248ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
249ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
250ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
251
252ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
253ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
254ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
255ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
256
257ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
258ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
259ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
260ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
261
262ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
263ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
264ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
265ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
266
267ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
268ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
269ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
270ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
271
272#undef ATOMIC_ASM
273#undef ATOMIC_STORE_LOAD
274
275#ifndef WANT_FUNCTIONS
276
277static __inline int
278atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
279{
280
281	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
282	    (u_int)src));
283}
284
285static __inline u_long
286atomic_fetchadd_long(volatile u_long *p, u_long v)
287{
288
289	return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
290}
291
292/* Read the current value and store a zero in the destination. */
293#ifdef __GNUCLIKE_ASM
294
295static __inline u_int
296atomic_readandclear_int(volatile u_int *addr)
297{
298	u_int res;
299
300	res = 0;
301	__asm __volatile(
302	"	xchgl	%1,%0 ;		"
303	"# atomic_readandclear_int"
304	: "+r" (res),			/* 0 */
305	  "=m" (*addr)			/* 1 */
306	: "m" (*addr));
307
308	return (res);
309}
310
311static __inline u_long
312atomic_readandclear_long(volatile u_long *addr)
313{
314	u_long res;
315
316	res = 0;
317	__asm __volatile(
318	"	xchgl	%1,%0 ;		"
319	"# atomic_readandclear_long"
320	: "+r" (res),			/* 0 */
321	  "=m" (*addr)			/* 1 */
322	: "m" (*addr));
323
324	return (res);
325}
326
327#else /* !__GNUCLIKE_ASM */
328
329u_int	atomic_readandclear_int(volatile u_int *addr);
330u_long	atomic_readandclear_long(volatile u_long *addr);
331
332#endif /* __GNUCLIKE_ASM */
333
334/* Acquire and release variants are identical to the normal ones. */
335#define	atomic_set_acq_char		atomic_set_char
336#define	atomic_set_rel_char		atomic_set_char
337#define	atomic_clear_acq_char		atomic_clear_char
338#define	atomic_clear_rel_char		atomic_clear_char
339#define	atomic_add_acq_char		atomic_add_char
340#define	atomic_add_rel_char		atomic_add_char
341#define	atomic_subtract_acq_char	atomic_subtract_char
342#define	atomic_subtract_rel_char	atomic_subtract_char
343
344#define	atomic_set_acq_short		atomic_set_short
345#define	atomic_set_rel_short		atomic_set_short
346#define	atomic_clear_acq_short		atomic_clear_short
347#define	atomic_clear_rel_short		atomic_clear_short
348#define	atomic_add_acq_short		atomic_add_short
349#define	atomic_add_rel_short		atomic_add_short
350#define	atomic_subtract_acq_short	atomic_subtract_short
351#define	atomic_subtract_rel_short	atomic_subtract_short
352
353#define	atomic_set_acq_int		atomic_set_int
354#define	atomic_set_rel_int		atomic_set_int
355#define	atomic_clear_acq_int		atomic_clear_int
356#define	atomic_clear_rel_int		atomic_clear_int
357#define	atomic_add_acq_int		atomic_add_int
358#define	atomic_add_rel_int		atomic_add_int
359#define	atomic_subtract_acq_int		atomic_subtract_int
360#define	atomic_subtract_rel_int		atomic_subtract_int
361#define	atomic_cmpset_acq_int		atomic_cmpset_int
362#define	atomic_cmpset_rel_int		atomic_cmpset_int
363
364#define	atomic_set_acq_long		atomic_set_long
365#define	atomic_set_rel_long		atomic_set_long
366#define	atomic_clear_acq_long		atomic_clear_long
367#define	atomic_clear_rel_long		atomic_clear_long
368#define	atomic_add_acq_long		atomic_add_long
369#define	atomic_add_rel_long		atomic_add_long
370#define	atomic_subtract_acq_long	atomic_subtract_long
371#define	atomic_subtract_rel_long	atomic_subtract_long
372#define	atomic_cmpset_acq_long		atomic_cmpset_long
373#define	atomic_cmpset_rel_long		atomic_cmpset_long
374
375/* Operations on 8-bit bytes. */
376#define	atomic_set_8		atomic_set_char
377#define	atomic_set_acq_8	atomic_set_acq_char
378#define	atomic_set_rel_8	atomic_set_rel_char
379#define	atomic_clear_8		atomic_clear_char
380#define	atomic_clear_acq_8	atomic_clear_acq_char
381#define	atomic_clear_rel_8	atomic_clear_rel_char
382#define	atomic_add_8		atomic_add_char
383#define	atomic_add_acq_8	atomic_add_acq_char
384#define	atomic_add_rel_8	atomic_add_rel_char
385#define	atomic_subtract_8	atomic_subtract_char
386#define	atomic_subtract_acq_8	atomic_subtract_acq_char
387#define	atomic_subtract_rel_8	atomic_subtract_rel_char
388#define	atomic_load_acq_8	atomic_load_acq_char
389#define	atomic_store_rel_8	atomic_store_rel_char
390
391/* Operations on 16-bit words. */
392#define	atomic_set_16		atomic_set_short
393#define	atomic_set_acq_16	atomic_set_acq_short
394#define	atomic_set_rel_16	atomic_set_rel_short
395#define	atomic_clear_16		atomic_clear_short
396#define	atomic_clear_acq_16	atomic_clear_acq_short
397#define	atomic_clear_rel_16	atomic_clear_rel_short
398#define	atomic_add_16		atomic_add_short
399#define	atomic_add_acq_16	atomic_add_acq_short
400#define	atomic_add_rel_16	atomic_add_rel_short
401#define	atomic_subtract_16	atomic_subtract_short
402#define	atomic_subtract_acq_16	atomic_subtract_acq_short
403#define	atomic_subtract_rel_16	atomic_subtract_rel_short
404#define	atomic_load_acq_16	atomic_load_acq_short
405#define	atomic_store_rel_16	atomic_store_rel_short
406
407/* Operations on 32-bit double words. */
408#define	atomic_set_32		atomic_set_int
409#define	atomic_set_acq_32	atomic_set_acq_int
410#define	atomic_set_rel_32	atomic_set_rel_int
411#define	atomic_clear_32		atomic_clear_int
412#define	atomic_clear_acq_32	atomic_clear_acq_int
413#define	atomic_clear_rel_32	atomic_clear_rel_int
414#define	atomic_add_32		atomic_add_int
415#define	atomic_add_acq_32	atomic_add_acq_int
416#define	atomic_add_rel_32	atomic_add_rel_int
417#define	atomic_subtract_32	atomic_subtract_int
418#define	atomic_subtract_acq_32	atomic_subtract_acq_int
419#define	atomic_subtract_rel_32	atomic_subtract_rel_int
420#define	atomic_load_acq_32	atomic_load_acq_int
421#define	atomic_store_rel_32	atomic_store_rel_int
422#define	atomic_cmpset_32	atomic_cmpset_int
423#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
424#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
425#define	atomic_readandclear_32	atomic_readandclear_int
426#define	atomic_fetchadd_32	atomic_fetchadd_int
427
428/* Operations on pointers. */
429#define	atomic_set_ptr(p, v) \
430	atomic_set_int((volatile u_int *)(p), (u_int)(v))
431#define	atomic_set_acq_ptr(p, v) \
432	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
433#define	atomic_set_rel_ptr(p, v) \
434	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
435#define	atomic_clear_ptr(p, v) \
436	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
437#define	atomic_clear_acq_ptr(p, v) \
438	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
439#define	atomic_clear_rel_ptr(p, v) \
440	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
441#define	atomic_add_ptr(p, v) \
442	atomic_add_int((volatile u_int *)(p), (u_int)(v))
443#define	atomic_add_acq_ptr(p, v) \
444	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
445#define	atomic_add_rel_ptr(p, v) \
446	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
447#define	atomic_subtract_ptr(p, v) \
448	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
449#define	atomic_subtract_acq_ptr(p, v) \
450	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
451#define	atomic_subtract_rel_ptr(p, v) \
452	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
453#define	atomic_load_acq_ptr(p) \
454	atomic_load_acq_int((volatile u_int *)(p))
455#define	atomic_store_rel_ptr(p, v) \
456	atomic_store_rel_int((volatile u_int *)(p), (v))
457#define	atomic_cmpset_ptr(dst, old, new) \
458	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
459#define	atomic_cmpset_acq_ptr(dst, old, new) \
460	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
461	    (u_int)(new))
462#define	atomic_cmpset_rel_ptr(dst, old, new) \
463	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
464	    (u_int)(new))
465#define	atomic_readandclear_ptr(p) \
466	atomic_readandclear_int((volatile u_int *)(p))
467
468#endif /* !WANT_FUNCTIONS */
469
470#endif /* !_MACHINE_ATOMIC_H_ */
471