atomic.h revision 100327
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/include/atomic.h 100327 2002-07-18 15:56:46Z markm $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define _MACHINE_ATOMIC_H_
30
31/*
32 * Various simple arithmetic on memory which is atomic in the presence
33 * of interrupts and multiple processors.
34 *
35 * atomic_set_char(P, V)	(*(u_char*)(P) |= (V))
36 * atomic_clear_char(P, V)	(*(u_char*)(P) &= ~(V))
37 * atomic_add_char(P, V)	(*(u_char*)(P) += (V))
38 * atomic_subtract_char(P, V)	(*(u_char*)(P) -= (V))
39 *
40 * atomic_set_short(P, V)	(*(u_short*)(P) |= (V))
41 * atomic_clear_short(P, V)	(*(u_short*)(P) &= ~(V))
42 * atomic_add_short(P, V)	(*(u_short*)(P) += (V))
43 * atomic_subtract_short(P, V)	(*(u_short*)(P) -= (V))
44 *
45 * atomic_set_int(P, V)		(*(u_int*)(P) |= (V))
46 * atomic_clear_int(P, V)	(*(u_int*)(P) &= ~(V))
47 * atomic_add_int(P, V)		(*(u_int*)(P) += (V))
48 * atomic_subtract_int(P, V)	(*(u_int*)(P) -= (V))
49 * atomic_readandclear_int(P)	(return  *(u_int*)P; *(u_int*)P = 0;)
50 *
51 * atomic_set_long(P, V)	(*(u_long*)(P) |= (V))
52 * atomic_clear_long(P, V)	(*(u_long*)(P) &= ~(V))
53 * atomic_add_long(P, V)	(*(u_long*)(P) += (V))
54 * atomic_subtract_long(P, V)	(*(u_long*)(P) -= (V))
55 * atomic_readandclear_long(P)	(return  *(u_long*)P; *(u_long*)P = 0;)
56 */
57
58/*
59 * The above functions are expanded inline in the statically-linked
60 * kernel.  Lock prefixes are generated if an SMP kernel is being
61 * built.
62 *
63 * Kernel modules call real functions which are built into the kernel.
64 * This allows kernel modules to be portable between UP and SMP systems.
65 */
66#if defined(KLD_MODULE)
67#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)			\
68void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
69
70int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
71
72#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
73u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
74void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
75
76#else /* !KLD_MODULE */
77
78#ifdef __GNUC__
79
80/*
81 * For userland, assume the SMP case and use lock prefixes so that
82 * the binaries will run on both types of systems.
83 */
84#if defined(SMP) || !defined(_KERNEL)
85#define MPLOCKED	lock ;
86#else
87#define MPLOCKED
88#endif
89
90/*
91 * The assembly is volatilized to demark potential before-and-after side
92 * effects if an interrupt or SMP collision were to occur.
93 */
94#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)		\
95static __inline void					\
96atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
97{							\
98	__asm __volatile(__XSTRING(MPLOCKED) OP		\
99			 : "+m" (*p)			\
100			 : CONS (V));			\
101}
102
103#else /* !__GNUC__ */
104
105#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)				\
106extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
107
108#endif /* __GNUC__ */
109
110/*
111 * Atomic compare and set, used by the mutex functions
112 *
113 * if (*dst == exp) *dst = src (all 32 bit words)
114 *
115 * Returns 0 on failure, non-zero on success
116 */
117
118#if defined(__GNUC__)
119
120#if defined(I386_CPU)
121
122static __inline int
123atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
124{
125	int res = exp;
126
127	__asm __volatile(
128	"	pushfl ;		"
129	"	cli ;			"
130	"	cmpl	%0,%2 ;		"
131	"	jne	1f ;		"
132	"	movl	%1,%2 ;		"
133	"1:				"
134	"       sete	%%al;		"
135	"	movzbl	%%al,%0 ;	"
136	"	popfl ;			"
137	"# atomic_cmpset_int"
138	: "+a" (res)			/* 0 (result) */
139	: "r" (src),			/* 1 */
140	  "m" (*(dst))			/* 2 */
141	: "memory");
142
143	return (res);
144}
145
146#else /* defined(I386_CPU) */
147
148static __inline int
149atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
150{
151	int res = exp;
152
153	__asm __volatile (
154	"	" __XSTRING(MPLOCKED) "	"
155	"	cmpxchgl %1,%2 ;	"
156	"       setz	%%al ;		"
157	"	movzbl	%%al,%0 ;	"
158	"1:				"
159	"# atomic_cmpset_int"
160	: "+a" (res)			/* 0 (result) */
161	: "r" (src),			/* 1 */
162	  "m" (*(dst))			/* 2 */
163	: "memory");
164
165	return (res);
166}
167
168#endif /* defined(I386_CPU) */
169
170#endif /* defined(__GNUC__) */
171
172#if defined(__GNUC__)
173
174#if defined(I386_CPU)
175
176/*
177 * We assume that a = b will do atomic loads and stores.
178 *
179 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
180 * memory ordering.  These should only be used on a 386.
181 */
182#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
183static __inline u_##TYPE				\
184atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
185{							\
186	return (*p);					\
187}							\
188							\
189static __inline void					\
190atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
191{							\
192	*p = v;						\
193	__asm __volatile("" : : : "memory");		\
194}
195
196#else /* !defined(I386_CPU) */
197
198#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
199static __inline u_##TYPE				\
200atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
201{							\
202	u_##TYPE res;					\
203							\
204	__asm __volatile(__XSTRING(MPLOCKED) LOP	\
205	: "=a" (res),			/* 0 (result) */\
206	  "+m" (*p)			/* 1 */		\
207	: : "memory");				 	\
208							\
209	return (res);					\
210}							\
211							\
212/*							\
213 * The XCHG instruction asserts LOCK automagically.	\
214 */							\
215static __inline void					\
216atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
217{							\
218	__asm __volatile(SOP				\
219	: "+m" (*p),			/* 0 */		\
220	  "+r" (v)			/* 1 */		\
221	: : "memory");				 	\
222}
223
224#endif	/* defined(I386_CPU) */
225
226#else /* !defined(__GNUC__) */
227
228extern int atomic_cmpset_int(volatile u_int *, u_int, u_int);
229
230#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)				\
231extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p);		\
232extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
233
234#endif /* defined(__GNUC__) */
235
236#endif /* KLD_MODULE */
237
238ATOMIC_ASM(set,	     char,  "orb %b1,%0",  "iq",  v);
239ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
240ATOMIC_ASM(add,	     char,  "addb %b1,%0", "iq",  v);
241ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
242
243ATOMIC_ASM(set,	     short, "orw %w1,%0",  "ir",  v);
244ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
245ATOMIC_ASM(add,	     short, "addw %w1,%0", "ir",  v);
246ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
247
248ATOMIC_ASM(set,	     int,   "orl %1,%0",   "ir",  v);
249ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
250ATOMIC_ASM(add,	     int,   "addl %1,%0",  "ir",  v);
251ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
252
253ATOMIC_ASM(set,	     long,  "orl %1,%0",   "ir",  v);
254ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
255ATOMIC_ASM(add,	     long,  "addl %1,%0",  "ir",  v);
256ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
257
258ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0");
259ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
260ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
261ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0");
262
263#undef ATOMIC_ASM
264#undef ATOMIC_STORE_LOAD
265
266#define	atomic_set_acq_char		atomic_set_char
267#define	atomic_set_rel_char		atomic_set_char
268#define	atomic_clear_acq_char		atomic_clear_char
269#define	atomic_clear_rel_char		atomic_clear_char
270#define	atomic_add_acq_char		atomic_add_char
271#define	atomic_add_rel_char		atomic_add_char
272#define	atomic_subtract_acq_char	atomic_subtract_char
273#define	atomic_subtract_rel_char	atomic_subtract_char
274
275#define	atomic_set_acq_short		atomic_set_short
276#define	atomic_set_rel_short		atomic_set_short
277#define	atomic_clear_acq_short		atomic_clear_short
278#define	atomic_clear_rel_short		atomic_clear_short
279#define	atomic_add_acq_short		atomic_add_short
280#define	atomic_add_rel_short		atomic_add_short
281#define	atomic_subtract_acq_short	atomic_subtract_short
282#define	atomic_subtract_rel_short	atomic_subtract_short
283
284#define	atomic_set_acq_int		atomic_set_int
285#define	atomic_set_rel_int		atomic_set_int
286#define	atomic_clear_acq_int		atomic_clear_int
287#define	atomic_clear_rel_int		atomic_clear_int
288#define	atomic_add_acq_int		atomic_add_int
289#define	atomic_add_rel_int		atomic_add_int
290#define	atomic_subtract_acq_int		atomic_subtract_int
291#define	atomic_subtract_rel_int		atomic_subtract_int
292#define atomic_cmpset_acq_int		atomic_cmpset_int
293#define atomic_cmpset_rel_int		atomic_cmpset_int
294
295#define	atomic_set_acq_long		atomic_set_long
296#define	atomic_set_rel_long		atomic_set_long
297#define	atomic_clear_acq_long		atomic_clear_long
298#define	atomic_clear_rel_long		atomic_clear_long
299#define	atomic_add_acq_long		atomic_add_long
300#define	atomic_add_rel_long		atomic_add_long
301#define	atomic_subtract_acq_long	atomic_subtract_long
302#define	atomic_subtract_rel_long	atomic_subtract_long
303#define	atomic_cmpset_long		atomic_cmpset_int
304#define	atomic_cmpset_acq_long		atomic_cmpset_acq_int
305#define	atomic_cmpset_rel_long		atomic_cmpset_rel_int
306
307#define atomic_cmpset_acq_ptr		atomic_cmpset_ptr
308#define atomic_cmpset_rel_ptr		atomic_cmpset_ptr
309
310#define	atomic_set_8		atomic_set_char
311#define	atomic_set_acq_8	atomic_set_acq_char
312#define	atomic_set_rel_8	atomic_set_rel_char
313#define	atomic_clear_8		atomic_clear_char
314#define	atomic_clear_acq_8	atomic_clear_acq_char
315#define	atomic_clear_rel_8	atomic_clear_rel_char
316#define	atomic_add_8		atomic_add_char
317#define	atomic_add_acq_8	atomic_add_acq_char
318#define	atomic_add_rel_8	atomic_add_rel_char
319#define	atomic_subtract_8	atomic_subtract_char
320#define	atomic_subtract_acq_8	atomic_subtract_acq_char
321#define	atomic_subtract_rel_8	atomic_subtract_rel_char
322#define	atomic_load_acq_8	atomic_load_acq_char
323#define	atomic_store_rel_8	atomic_store_rel_char
324
325#define	atomic_set_16		atomic_set_short
326#define	atomic_set_acq_16	atomic_set_acq_short
327#define	atomic_set_rel_16	atomic_set_rel_short
328#define	atomic_clear_16		atomic_clear_short
329#define	atomic_clear_acq_16	atomic_clear_acq_short
330#define	atomic_clear_rel_16	atomic_clear_rel_short
331#define	atomic_add_16		atomic_add_short
332#define	atomic_add_acq_16	atomic_add_acq_short
333#define	atomic_add_rel_16	atomic_add_rel_short
334#define	atomic_subtract_16	atomic_subtract_short
335#define	atomic_subtract_acq_16	atomic_subtract_acq_short
336#define	atomic_subtract_rel_16	atomic_subtract_rel_short
337#define	atomic_load_acq_16	atomic_load_acq_short
338#define	atomic_store_rel_16	atomic_store_rel_short
339
340#define	atomic_set_32		atomic_set_int
341#define	atomic_set_acq_32	atomic_set_acq_int
342#define	atomic_set_rel_32	atomic_set_rel_int
343#define	atomic_clear_32		atomic_clear_int
344#define	atomic_clear_acq_32	atomic_clear_acq_int
345#define	atomic_clear_rel_32	atomic_clear_rel_int
346#define	atomic_add_32		atomic_add_int
347#define	atomic_add_acq_32	atomic_add_acq_int
348#define	atomic_add_rel_32	atomic_add_rel_int
349#define	atomic_subtract_32	atomic_subtract_int
350#define	atomic_subtract_acq_32	atomic_subtract_acq_int
351#define	atomic_subtract_rel_32	atomic_subtract_rel_int
352#define	atomic_load_acq_32	atomic_load_acq_int
353#define	atomic_store_rel_32	atomic_store_rel_int
354#define	atomic_cmpset_32	atomic_cmpset_int
355#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
356#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
357#define	atomic_readandclear_32	atomic_readandclear_int
358
359#if !defined(WANT_FUNCTIONS)
360static __inline int
361atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
362{
363
364	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
365	    (u_int)src));
366}
367
368static __inline void *
369atomic_load_acq_ptr(volatile void *p)
370{
371	return (void *)atomic_load_acq_int((volatile u_int *)p);
372}
373
374static __inline void
375atomic_store_rel_ptr(volatile void *p, void *v)
376{
377	atomic_store_rel_int((volatile u_int *)p, (u_int)v);
378}
379
380#define ATOMIC_PTR(NAME)				\
381static __inline void					\
382atomic_##NAME##_ptr(volatile void *p, uintptr_t v)	\
383{							\
384	atomic_##NAME##_int((volatile u_int *)p, v);	\
385}							\
386							\
387static __inline void					\
388atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v)	\
389{							\
390	atomic_##NAME##_acq_int((volatile u_int *)p, v);\
391}							\
392							\
393static __inline void					\
394atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v)	\
395{							\
396	atomic_##NAME##_rel_int((volatile u_int *)p, v);\
397}
398
399ATOMIC_PTR(set)
400ATOMIC_PTR(clear)
401ATOMIC_PTR(add)
402ATOMIC_PTR(subtract)
403
404#undef ATOMIC_PTR
405
406#if defined(__GNUC__)
407
408static __inline u_int
409atomic_readandclear_int(volatile u_int *addr)
410{
411	u_int result;
412
413	__asm __volatile (
414	"	xorl	%0,%0 ;		"
415	"	xchgl	%1,%0 ;		"
416	"# atomic_readandclear_int"
417	: "=&r" (result)		/* 0 (result) */
418	: "m" (*addr));			/* 1 (addr) */
419
420	return (result);
421}
422
423static __inline u_long
424atomic_readandclear_long(volatile u_long *addr)
425{
426	u_long result;
427
428	__asm __volatile (
429	"	xorl	%0,%0 ;		"
430	"	xchgl	%1,%0 ;		"
431	"# atomic_readandclear_int"
432	: "=&r" (result)		/* 0 (result) */
433	: "m" (*addr));			/* 1 (addr) */
434
435	return (result);
436}
437
438#else /* !defined(__GNUC__) */
439
440extern u_long	atomic_readandclear_long(volatile u_long *);
441extern u_int	atomic_readandclear_int(volatile u_int *);
442
443#endif /* defined(__GNUC__) */
444
445#endif	/* !defined(WANT_FUNCTIONS) */
446#endif /* ! _MACHINE_ATOMIC_H_ */
447