atomic.h revision 147855
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 147855 2005-07-09 12:38:53Z jhb $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define	_MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34
35/*
36 * Various simple arithmetic on memory which is atomic in the presence
37 * of interrupts and multiple processors.
38 *
39 * atomic_set_char(P, V)	(*(u_char*)(P) |= (V))
40 * atomic_clear_char(P, V)	(*(u_char*)(P) &= ~(V))
41 * atomic_add_char(P, V)	(*(u_char*)(P) += (V))
42 * atomic_subtract_char(P, V)	(*(u_char*)(P) -= (V))
43 *
44 * atomic_set_short(P, V)	(*(u_short*)(P) |= (V))
45 * atomic_clear_short(P, V)	(*(u_short*)(P) &= ~(V))
46 * atomic_add_short(P, V)	(*(u_short*)(P) += (V))
47 * atomic_subtract_short(P, V)	(*(u_short*)(P) -= (V))
48 *
49 * atomic_set_int(P, V)		(*(u_int*)(P) |= (V))
50 * atomic_clear_int(P, V)	(*(u_int*)(P) &= ~(V))
51 * atomic_add_int(P, V)		(*(u_int*)(P) += (V))
52 * atomic_subtract_int(P, V)	(*(u_int*)(P) -= (V))
53 * atomic_readandclear_int(P)	(return  *(u_int*)P; *(u_int*)P = 0;)
54 *
55 * atomic_set_long(P, V)	(*(u_long*)(P) |= (V))
56 * atomic_clear_long(P, V)	(*(u_long*)(P) &= ~(V))
57 * atomic_add_long(P, V)	(*(u_long*)(P) += (V))
58 * atomic_subtract_long(P, V)	(*(u_long*)(P) -= (V))
59 * atomic_readandclear_long(P)	(return  *(u_long*)P; *(u_long*)P = 0;)
60 */
61
62/*
63 * The above functions are expanded inline in the statically-linked
64 * kernel.  Lock prefixes are generated if an SMP kernel is being
65 * built.
66 *
67 * Kernel modules call real functions which are built into the kernel.
68 * This allows kernel modules to be portable between UP and SMP systems.
69 */
70#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
71#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
72void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
73
74int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
75
76#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
77u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
78void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
79
80#else /* !KLD_MODULE && __GNUCLIKE_ASM */
81
82/*
83 * For userland, assume the SMP case and use lock prefixes so that
84 * the binaries will run on both types of systems.
85 */
86#if defined(SMP) || !defined(_KERNEL)
87#define	MPLOCKED	lock ;
88#else
89#define	MPLOCKED
90#endif
91
92/*
93 * The assembly is volatilized to demark potential before-and-after side
94 * effects if an interrupt or SMP collision were to occur.
95 */
96#define	ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
97static __inline void					\
98atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
99{							\
100	__asm __volatile(__XSTRING(MPLOCKED) OP		\
101			 : "+m" (*p)			\
102			 : CONS (V));			\
103}							\
104struct __hack
105
106/*
107 * Atomic compare and set, used by the mutex functions
108 *
109 * if (*dst == exp) *dst = src (all 32 bit words)
110 *
111 * Returns 0 on failure, non-zero on success
112 */
113
114#if defined(CPU_DISABLE_CMPXCHG)
115
116static __inline int
117atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
118{
119	int res = exp;
120
121	__asm __volatile(
122	"	pushfl ;		"
123	"	cli ;			"
124	"	cmpl	%0,%2 ;		"
125	"	jne	1f ;		"
126	"	movl	%1,%2 ;		"
127	"1:				"
128	"       sete	%%al;		"
129	"	movzbl	%%al,%0 ;	"
130	"	popfl ;			"
131	"# atomic_cmpset_int"
132	: "+a" (res)			/* 0 (result) */
133	: "r" (src),			/* 1 */
134	  "m" (*(dst))			/* 2 */
135	: "memory");
136
137	return (res);
138}
139
140#else /* defined(CPU_DISABLE_CMPXCHG) */
141
142static __inline int
143atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
144{
145	int res = exp;
146
147	__asm __volatile (
148	"	" __XSTRING(MPLOCKED) "	"
149	"	cmpxchgl %1,%2 ;	"
150	"       setz	%%al ;		"
151	"	movzbl	%%al,%0 ;	"
152	"1:				"
153	"# atomic_cmpset_int"
154	: "+a" (res)			/* 0 (result) */
155	: "r" (src),			/* 1 */
156	  "m" (*(dst))			/* 2 */
157	: "memory");
158
159	return (res);
160}
161
162#endif /* defined(CPU_DISABLE_CMPXCHG) */
163
164#if defined(_KERNEL) && !defined(SMP)
165
166/*
167 * We assume that a = b will do atomic loads and stores.  However, on a
168 * PentiumPro or higher, reads may pass writes, so for that case we have
169 * to use a serializing instruction (i.e. with LOCK) to do the load in
170 * SMP kernels.  For UP kernels, however, the cache of the single processor
171 * is always consistent, so we don't need any memory barriers.
172 */
173#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
174static __inline u_##TYPE				\
175atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
176{							\
177	return (*p);					\
178}							\
179							\
180static __inline void					\
181atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
182{							\
183	*p = v;						\
184}							\
185struct __hack
186
187#else /* defined(SMP) */
188
189#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
190static __inline u_##TYPE				\
191atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
192{							\
193	u_##TYPE res;					\
194							\
195	__asm __volatile(__XSTRING(MPLOCKED) LOP	\
196	: "=a" (res),			/* 0 (result) */\
197	  "+m" (*p)			/* 1 */		\
198	: : "memory");				 	\
199							\
200	return (res);					\
201}							\
202							\
203/*							\
204 * The XCHG instruction asserts LOCK automagically.	\
205 */							\
206static __inline void					\
207atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
208{							\
209	__asm __volatile(SOP				\
210	: "+m" (*p),			/* 0 */		\
211	  "+r" (v)			/* 1 */		\
212	: : "memory");				 	\
213}							\
214struct __hack
215
216#endif	/* !defined(SMP) */
217
218#endif /* KLD_MODULE || !__GNUCLIKE_ASM */
219
220ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
221ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
222ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
223ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
224
225ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
226ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
227ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
228ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
229
230ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
231ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
232ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
233ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
234
235ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
236ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
237ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
238ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
239
240ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
241ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
242ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
243ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
244
245#undef ATOMIC_ASM
246#undef ATOMIC_STORE_LOAD
247
248#if !defined(WANT_FUNCTIONS)
249
250static __inline int
251atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
252{
253
254	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
255	    (u_int)src));
256}
257
258/* Read the current value and store a zero in the destination. */
259#ifdef __GNUCLIKE_ASM
260
261static __inline u_int
262atomic_readandclear_int(volatile u_int *addr)
263{
264	u_int result;
265
266	__asm __volatile (
267	"	xorl	%0,%0 ;		"
268	"	xchgl	%1,%0 ;		"
269	"# atomic_readandclear_int"
270	: "=&r" (result)		/* 0 (result) */
271	: "m" (*addr));			/* 1 (addr) */
272
273	return (result);
274}
275
276static __inline u_long
277atomic_readandclear_long(volatile u_long *addr)
278{
279	u_long result;
280
281	__asm __volatile (
282	"	xorl	%0,%0 ;		"
283	"	xchgl	%1,%0 ;		"
284	"# atomic_readandclear_long"
285	: "=&r" (result)		/* 0 (result) */
286	: "m" (*addr));			/* 1 (addr) */
287
288	return (result);
289}
290
291#else /* !__GNUCLIKE_ASM */
292
293u_int	atomic_readandclear_int(volatile u_int *);
294u_long	atomic_readandclear_long(volatile u_long *);
295
296#endif /* __GNUCLIKE_ASM */
297
298/* Acquire and release variants are identical to the normal ones. */
299#define	atomic_set_acq_char		atomic_set_char
300#define	atomic_set_rel_char		atomic_set_char
301#define	atomic_clear_acq_char		atomic_clear_char
302#define	atomic_clear_rel_char		atomic_clear_char
303#define	atomic_add_acq_char		atomic_add_char
304#define	atomic_add_rel_char		atomic_add_char
305#define	atomic_subtract_acq_char	atomic_subtract_char
306#define	atomic_subtract_rel_char	atomic_subtract_char
307
308#define	atomic_set_acq_short		atomic_set_short
309#define	atomic_set_rel_short		atomic_set_short
310#define	atomic_clear_acq_short		atomic_clear_short
311#define	atomic_clear_rel_short		atomic_clear_short
312#define	atomic_add_acq_short		atomic_add_short
313#define	atomic_add_rel_short		atomic_add_short
314#define	atomic_subtract_acq_short	atomic_subtract_short
315#define	atomic_subtract_rel_short	atomic_subtract_short
316
317#define	atomic_set_acq_int		atomic_set_int
318#define	atomic_set_rel_int		atomic_set_int
319#define	atomic_clear_acq_int		atomic_clear_int
320#define	atomic_clear_rel_int		atomic_clear_int
321#define	atomic_add_acq_int		atomic_add_int
322#define	atomic_add_rel_int		atomic_add_int
323#define	atomic_subtract_acq_int		atomic_subtract_int
324#define	atomic_subtract_rel_int		atomic_subtract_int
325#define	atomic_cmpset_acq_int		atomic_cmpset_int
326#define	atomic_cmpset_rel_int		atomic_cmpset_int
327
328#define	atomic_set_acq_long		atomic_set_long
329#define	atomic_set_rel_long		atomic_set_long
330#define	atomic_clear_acq_long		atomic_clear_long
331#define	atomic_clear_rel_long		atomic_clear_long
332#define	atomic_add_acq_long		atomic_add_long
333#define	atomic_add_rel_long		atomic_add_long
334#define	atomic_subtract_acq_long	atomic_subtract_long
335#define	atomic_subtract_rel_long	atomic_subtract_long
336#define	atomic_cmpset_acq_long		atomic_cmpset_long
337#define	atomic_cmpset_rel_long		atomic_cmpset_long
338
339#define	atomic_cmpset_acq_ptr		atomic_cmpset_ptr
340#define	atomic_cmpset_rel_ptr		atomic_cmpset_ptr
341
342/* Operations on 8-bit bytes. */
343#define	atomic_set_8		atomic_set_char
344#define	atomic_set_acq_8	atomic_set_acq_char
345#define	atomic_set_rel_8	atomic_set_rel_char
346#define	atomic_clear_8		atomic_clear_char
347#define	atomic_clear_acq_8	atomic_clear_acq_char
348#define	atomic_clear_rel_8	atomic_clear_rel_char
349#define	atomic_add_8		atomic_add_char
350#define	atomic_add_acq_8	atomic_add_acq_char
351#define	atomic_add_rel_8	atomic_add_rel_char
352#define	atomic_subtract_8	atomic_subtract_char
353#define	atomic_subtract_acq_8	atomic_subtract_acq_char
354#define	atomic_subtract_rel_8	atomic_subtract_rel_char
355#define	atomic_load_acq_8	atomic_load_acq_char
356#define	atomic_store_rel_8	atomic_store_rel_char
357
358/* Operations on 16-bit words. */
359#define	atomic_set_16		atomic_set_short
360#define	atomic_set_acq_16	atomic_set_acq_short
361#define	atomic_set_rel_16	atomic_set_rel_short
362#define	atomic_clear_16		atomic_clear_short
363#define	atomic_clear_acq_16	atomic_clear_acq_short
364#define	atomic_clear_rel_16	atomic_clear_rel_short
365#define	atomic_add_16		atomic_add_short
366#define	atomic_add_acq_16	atomic_add_acq_short
367#define	atomic_add_rel_16	atomic_add_rel_short
368#define	atomic_subtract_16	atomic_subtract_short
369#define	atomic_subtract_acq_16	atomic_subtract_acq_short
370#define	atomic_subtract_rel_16	atomic_subtract_rel_short
371#define	atomic_load_acq_16	atomic_load_acq_short
372#define	atomic_store_rel_16	atomic_store_rel_short
373
374/* Operations on 32-bit double words. */
375#define	atomic_set_32		atomic_set_int
376#define	atomic_set_acq_32	atomic_set_acq_int
377#define	atomic_set_rel_32	atomic_set_rel_int
378#define	atomic_clear_32		atomic_clear_int
379#define	atomic_clear_acq_32	atomic_clear_acq_int
380#define	atomic_clear_rel_32	atomic_clear_rel_int
381#define	atomic_add_32		atomic_add_int
382#define	atomic_add_acq_32	atomic_add_acq_int
383#define	atomic_add_rel_32	atomic_add_rel_int
384#define	atomic_subtract_32	atomic_subtract_int
385#define	atomic_subtract_acq_32	atomic_subtract_acq_int
386#define	atomic_subtract_rel_32	atomic_subtract_rel_int
387#define	atomic_load_acq_32	atomic_load_acq_int
388#define	atomic_store_rel_32	atomic_store_rel_int
389#define	atomic_cmpset_32	atomic_cmpset_int
390#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
391#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
392#define	atomic_readandclear_32	atomic_readandclear_int
393
394/* Operations on pointers. */
395static __inline int
396atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
397{
398
399	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
400	    (u_int)src));
401}
402
403static __inline void *
404atomic_load_acq_ptr(volatile void *p)
405{
406	/*
407	 * The apparently-bogus cast to intptr_t in the following is to
408	 * avoid a warning from "gcc -Wbad-function-cast".
409	 */
410	return ((void *)(intptr_t)atomic_load_acq_int((volatile u_int *)p));
411}
412
413static __inline void
414atomic_store_rel_ptr(volatile void *p, void *v)
415{
416	atomic_store_rel_int((volatile u_int *)p, (u_int)v);
417}
418
419#define	ATOMIC_PTR(NAME)				\
420static __inline void					\
421atomic_##NAME##_ptr(volatile void *p, uintptr_t v)	\
422{							\
423	atomic_##NAME##_int((volatile u_int *)p, v);	\
424}							\
425							\
426static __inline void					\
427atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v)	\
428{							\
429	atomic_##NAME##_acq_int((volatile u_int *)p, v);\
430}							\
431							\
432static __inline void					\
433atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v)	\
434{							\
435	atomic_##NAME##_rel_int((volatile u_int *)p, v);\
436}
437
438ATOMIC_PTR(set)
439ATOMIC_PTR(clear)
440ATOMIC_PTR(add)
441ATOMIC_PTR(subtract)
442
443#undef ATOMIC_PTR
444
445#endif	/* !defined(WANT_FUNCTIONS) */
446#endif /* ! _MACHINE_ATOMIC_H_ */
447