atomic.h revision 165633
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 165633 2006-12-29 14:28:23Z bde $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35/*
36 * Various simple arithmetic on memory which is atomic in the presence
37 * of interrupts and multiple processors.
38 *
39 * atomic_set_char(P, V)	(*(u_char *)(P) |= (V))
40 * atomic_clear_char(P, V)	(*(u_char *)(P) &= ~(V))
41 * atomic_add_char(P, V)	(*(u_char *)(P) += (V))
42 * atomic_subtract_char(P, V)	(*(u_char *)(P) -= (V))
43 *
44 * atomic_set_short(P, V)	(*(u_short *)(P) |= (V))
45 * atomic_clear_short(P, V)	(*(u_short *)(P) &= ~(V))
46 * atomic_add_short(P, V)	(*(u_short *)(P) += (V))
47 * atomic_subtract_short(P, V)	(*(u_short *)(P) -= (V))
48 *
49 * atomic_set_int(P, V)		(*(u_int *)(P) |= (V))
50 * atomic_clear_int(P, V)	(*(u_int *)(P) &= ~(V))
51 * atomic_add_int(P, V)		(*(u_int *)(P) += (V))
52 * atomic_subtract_int(P, V)	(*(u_int *)(P) -= (V))
53 * atomic_readandclear_int(P)	(return *(u_int *)P; *(u_int *)P = 0;)
54 *
55 * atomic_set_long(P, V)	(*(u_long *)(P) |= (V))
56 * atomic_clear_long(P, V)	(*(u_long *)(P) &= ~(V))
57 * atomic_add_long(P, V)	(*(u_long *)(P) += (V))
58 * atomic_subtract_long(P, V)	(*(u_long *)(P) -= (V))
59 * atomic_readandclear_long(P)	(return *(u_long *)P; *(u_long *)P = 0;)
60 */
61
62/*
63 * The above functions are expanded inline in the statically-linked
64 * kernel.  Lock prefixes are generated if an SMP kernel is being
65 * built.
66 *
67 * Kernel modules call real functions which are built into the kernel.
68 * This allows kernel modules to be portable between UP and SMP systems.
69 */
70#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
71#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
72void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
73
74int	atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
75u_int	atomic_fetchadd_int(volatile u_int *p, u_int v);
76
77#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
78u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
79void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
80
81#else /* !KLD_MODULE && __GNUCLIKE_ASM */
82
83/*
84 * For userland, assume the SMP case and use lock prefixes so that
85 * the binaries will run on both types of systems.
86 */
87#if defined(SMP) || !defined(_KERNEL)
88#define	MPLOCKED	"lock ; "
89#else
90#define	MPLOCKED
91#endif
92
93/*
94 * The assembly is volatilized to demark potential before-and-after side
95 * effects if an interrupt or SMP collision were to occur.
96 */
97#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
98static __inline void					\
99atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
100{							\
101	__asm __volatile(MPLOCKED OP			\
102	: "=m" (*p)					\
103	: CONS (V), "m" (*p));				\
104}							\
105struct __hack
106
107/*
108 * Atomic compare and set, used by the mutex functions
109 *
110 * if (*dst == exp) *dst = src (all 32 bit words)
111 *
112 * Returns 0 on failure, non-zero on success
113 */
114
115#if defined(CPU_DISABLE_CMPXCHG)
116
117static __inline int
118atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
119{
120	u_char res;
121
122	__asm __volatile(
123	"	pushfl ;		"
124	"	cli ;			"
125	"	cmpl	%3,%4 ;		"
126	"	jne	1f ;		"
127	"	movl	%2,%1 ;		"
128	"1:				"
129	"       sete	%0 ;		"
130	"	popfl ;			"
131	"# atomic_cmpset_int"
132	: "=q" (res),			/* 0 */
133	  "=m" (*dst)			/* 1 */
134	: "r" (src),			/* 2 */
135	  "r" (exp),			/* 3 */
136	  "m" (*dst)			/* 4 */
137	: "memory");
138
139	return (res);
140}
141
142#else /* defined(CPU_DISABLE_CMPXCHG) */
143
144static __inline int
145atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
146{
147	u_char res;
148
149	__asm __volatile(
150	"	" MPLOCKED "		"
151	"	cmpxchgl %2,%1 ;	"
152	"       sete	%0 ;		"
153	"1:				"
154	"# atomic_cmpset_int"
155	: "=a" (res),			/* 0 */
156	  "=m" (*dst)			/* 1 */
157	: "r" (src),			/* 2 */
158	  "a" (exp),			/* 3 */
159	  "m" (*dst)			/* 4 */
160	: "memory");
161
162	return (res);
163}
164
165#endif /* defined(CPU_DISABLE_CMPXCHG) */
166
167/*
168 * Atomically add the value of v to the integer pointed to by p and return
169 * the previous value of *p.
170 */
171static __inline u_int
172atomic_fetchadd_int(volatile u_int *p, u_int v)
173{
174
175	__asm __volatile(
176	"	" MPLOCKED "		"
177	"	xaddl	%0, %1 ;	"
178	"# atomic_fetchadd_int"
179	: "+r" (v),			/* 0 (result) */
180	  "=m" (*p)			/* 1 */
181	: "m" (*p));			/* 2 */
182
183	return (v);
184}
185
186#if defined(_KERNEL) && !defined(SMP)
187
188/*
189 * We assume that a = b will do atomic loads and stores.  However, on a
190 * PentiumPro or higher, reads may pass writes, so for that case we have
191 * to use a serializing instruction (i.e. with LOCK) to do the load in
192 * SMP kernels.  For UP kernels, however, the cache of the single processor
193 * is always consistent, so we don't need any memory barriers.
194 */
195#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
196static __inline u_##TYPE				\
197atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
198{							\
199	return (*p);					\
200}							\
201							\
202static __inline void					\
203atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
204{							\
205	*p = v;						\
206}							\
207struct __hack
208
209#else /* defined(SMP) */
210
211#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
212static __inline u_##TYPE				\
213atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
214{							\
215	u_##TYPE res;					\
216							\
217	__asm __volatile(MPLOCKED LOP			\
218	: "=a" (res),			/* 0 (result) */\
219	  "=m" (*p)			/* 1 */		\
220	: "m" (*p)			/* 2 */		\
221	: "memory");					\
222							\
223	return (res);					\
224}							\
225							\
226/*							\
227 * The XCHG instruction asserts LOCK automagically.	\
228 */							\
229static __inline void					\
230atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
231{							\
232	__asm __volatile(SOP				\
233	: "=m" (*p),			/* 0 */		\
234	  "+r" (v)			/* 1 */		\
235	: "m" (*p));			/* 2 */		\
236}							\
237struct __hack
238
239#endif /* !defined(SMP) */
240
241#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
242
243ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
244ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
245ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
246ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
247
248ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
249ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
250ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
251ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
252
253ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
254ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
255ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
256ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
257
258ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
259ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
260ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
261ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
262
263ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
264ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
265ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
266ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
267
268#undef ATOMIC_ASM
269#undef ATOMIC_STORE_LOAD
270
271#if !defined(WANT_FUNCTIONS)
272
273static __inline int
274atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
275{
276
277	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
278	    (u_int)src));
279}
280
281/* Read the current value and store a zero in the destination. */
282#ifdef __GNUCLIKE_ASM
283
284static __inline u_int
285atomic_readandclear_int(volatile u_int *addr)
286{
287	u_int result;
288
289	result = 0;
290	__asm __volatile(
291	"	xchgl	%1,%0 ;		"
292	"# atomic_readandclear_int"
293	: "+r" (result),		/* 0 (result) */
294	  "=m" (*addr)			/* 1 (addr) */
295	: "m" (*addr));
296
297	return (result);
298}
299
300static __inline u_long
301atomic_readandclear_long(volatile u_long *addr)
302{
303	u_long result;
304
305	result = 0;
306	__asm __volatile(
307	"	xchgl	%1,%0 ;		"
308	"# atomic_readandclear_long"
309	: "+r" (result),		/* 0 (result) */
310	  "=m" (*addr)			/* 1 (addr) */
311	: "m" (*addr));
312
313	return (result);
314}
315
316#else /* !__GNUCLIKE_ASM */
317
318u_int	atomic_readandclear_int(volatile u_int *);
319u_long	atomic_readandclear_long(volatile u_long *);
320
321#endif /* __GNUCLIKE_ASM */
322
323/* Acquire and release variants are identical to the normal ones. */
324#define	atomic_set_acq_char		atomic_set_char
325#define	atomic_set_rel_char		atomic_set_char
326#define	atomic_clear_acq_char		atomic_clear_char
327#define	atomic_clear_rel_char		atomic_clear_char
328#define	atomic_add_acq_char		atomic_add_char
329#define	atomic_add_rel_char		atomic_add_char
330#define	atomic_subtract_acq_char	atomic_subtract_char
331#define	atomic_subtract_rel_char	atomic_subtract_char
332
333#define	atomic_set_acq_short		atomic_set_short
334#define	atomic_set_rel_short		atomic_set_short
335#define	atomic_clear_acq_short		atomic_clear_short
336#define	atomic_clear_rel_short		atomic_clear_short
337#define	atomic_add_acq_short		atomic_add_short
338#define	atomic_add_rel_short		atomic_add_short
339#define	atomic_subtract_acq_short	atomic_subtract_short
340#define	atomic_subtract_rel_short	atomic_subtract_short
341
342#define	atomic_set_acq_int		atomic_set_int
343#define	atomic_set_rel_int		atomic_set_int
344#define	atomic_clear_acq_int		atomic_clear_int
345#define	atomic_clear_rel_int		atomic_clear_int
346#define	atomic_add_acq_int		atomic_add_int
347#define	atomic_add_rel_int		atomic_add_int
348#define	atomic_subtract_acq_int		atomic_subtract_int
349#define	atomic_subtract_rel_int		atomic_subtract_int
350#define	atomic_cmpset_acq_int		atomic_cmpset_int
351#define	atomic_cmpset_rel_int		atomic_cmpset_int
352
353#define	atomic_set_acq_long		atomic_set_long
354#define	atomic_set_rel_long		atomic_set_long
355#define	atomic_clear_acq_long		atomic_clear_long
356#define	atomic_clear_rel_long		atomic_clear_long
357#define	atomic_add_acq_long		atomic_add_long
358#define	atomic_add_rel_long		atomic_add_long
359#define	atomic_subtract_acq_long	atomic_subtract_long
360#define	atomic_subtract_rel_long	atomic_subtract_long
361#define	atomic_cmpset_acq_long		atomic_cmpset_long
362#define	atomic_cmpset_rel_long		atomic_cmpset_long
363
364/* Operations on 8-bit bytes. */
365#define	atomic_set_8		atomic_set_char
366#define	atomic_set_acq_8	atomic_set_acq_char
367#define	atomic_set_rel_8	atomic_set_rel_char
368#define	atomic_clear_8		atomic_clear_char
369#define	atomic_clear_acq_8	atomic_clear_acq_char
370#define	atomic_clear_rel_8	atomic_clear_rel_char
371#define	atomic_add_8		atomic_add_char
372#define	atomic_add_acq_8	atomic_add_acq_char
373#define	atomic_add_rel_8	atomic_add_rel_char
374#define	atomic_subtract_8	atomic_subtract_char
375#define	atomic_subtract_acq_8	atomic_subtract_acq_char
376#define	atomic_subtract_rel_8	atomic_subtract_rel_char
377#define	atomic_load_acq_8	atomic_load_acq_char
378#define	atomic_store_rel_8	atomic_store_rel_char
379
380/* Operations on 16-bit words. */
381#define	atomic_set_16		atomic_set_short
382#define	atomic_set_acq_16	atomic_set_acq_short
383#define	atomic_set_rel_16	atomic_set_rel_short
384#define	atomic_clear_16		atomic_clear_short
385#define	atomic_clear_acq_16	atomic_clear_acq_short
386#define	atomic_clear_rel_16	atomic_clear_rel_short
387#define	atomic_add_16		atomic_add_short
388#define	atomic_add_acq_16	atomic_add_acq_short
389#define	atomic_add_rel_16	atomic_add_rel_short
390#define	atomic_subtract_16	atomic_subtract_short
391#define	atomic_subtract_acq_16	atomic_subtract_acq_short
392#define	atomic_subtract_rel_16	atomic_subtract_rel_short
393#define	atomic_load_acq_16	atomic_load_acq_short
394#define	atomic_store_rel_16	atomic_store_rel_short
395
396/* Operations on 32-bit double words. */
397#define	atomic_set_32		atomic_set_int
398#define	atomic_set_acq_32	atomic_set_acq_int
399#define	atomic_set_rel_32	atomic_set_rel_int
400#define	atomic_clear_32		atomic_clear_int
401#define	atomic_clear_acq_32	atomic_clear_acq_int
402#define	atomic_clear_rel_32	atomic_clear_rel_int
403#define	atomic_add_32		atomic_add_int
404#define	atomic_add_acq_32	atomic_add_acq_int
405#define	atomic_add_rel_32	atomic_add_rel_int
406#define	atomic_subtract_32	atomic_subtract_int
407#define	atomic_subtract_acq_32	atomic_subtract_acq_int
408#define	atomic_subtract_rel_32	atomic_subtract_rel_int
409#define	atomic_load_acq_32	atomic_load_acq_int
410#define	atomic_store_rel_32	atomic_store_rel_int
411#define	atomic_cmpset_32	atomic_cmpset_int
412#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
413#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
414#define	atomic_readandclear_32	atomic_readandclear_int
415#define	atomic_fetchadd_32	atomic_fetchadd_int
416
417/* Operations on pointers. */
418#define	atomic_set_ptr(p, v) \
419	atomic_set_int((volatile u_int *)(p), (u_int)(v))
420#define	atomic_set_acq_ptr(p, v) \
421	atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
422#define	atomic_set_rel_ptr(p, v) \
423	atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
424#define	atomic_clear_ptr(p, v) \
425	atomic_clear_int((volatile u_int *)(p), (u_int)(v))
426#define	atomic_clear_acq_ptr(p, v) \
427	atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
428#define	atomic_clear_rel_ptr(p, v) \
429	atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
430#define	atomic_add_ptr(p, v) \
431	atomic_add_int((volatile u_int *)(p), (u_int)(v))
432#define	atomic_add_acq_ptr(p, v) \
433	atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
434#define	atomic_add_rel_ptr(p, v) \
435	atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
436#define	atomic_subtract_ptr(p, v) \
437	atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
438#define	atomic_subtract_acq_ptr(p, v) \
439	atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
440#define	atomic_subtract_rel_ptr(p, v) \
441	atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
442#define	atomic_load_acq_ptr(p) \
443	atomic_load_acq_int((volatile u_int *)(p))
444#define	atomic_store_rel_ptr(p, v) \
445	atomic_store_rel_int((volatile u_int *)(p), (v))
446#define	atomic_cmpset_ptr(dst, old, new) \
447	atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
448#define	atomic_cmpset_acq_ptr(dst, old, new) \
449	atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
450	    (u_int)(new))
451#define	atomic_cmpset_rel_ptr(dst, old, new) \
452	atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
453	    (u_int)(new))
454#define	atomic_readandclear_ptr(p) \
455	atomic_readandclear_int((volatile u_int *)(p))
456
457#endif /* !defined(WANT_FUNCTIONS) */
458
459#endif /* !_MACHINE_ATOMIC_H_ */
460