atomic.h revision 72358
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 72358 2001-02-11 10:44:09Z markm $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define _MACHINE_ATOMIC_H_
30
31/*
32 * Various simple arithmetic on memory which is atomic in the presence
33 * of interrupts and multiple processors.
34 *
35 * atomic_set_char(P, V)	(*(u_char*)(P) |= (V))
36 * atomic_clear_char(P, V)	(*(u_char*)(P) &= ~(V))
37 * atomic_add_char(P, V)	(*(u_char*)(P) += (V))
38 * atomic_subtract_char(P, V)	(*(u_char*)(P) -= (V))
39 *
40 * atomic_set_short(P, V)	(*(u_short*)(P) |= (V))
41 * atomic_clear_short(P, V)	(*(u_short*)(P) &= ~(V))
42 * atomic_add_short(P, V)	(*(u_short*)(P) += (V))
43 * atomic_subtract_short(P, V)	(*(u_short*)(P) -= (V))
44 *
45 * atomic_set_int(P, V)		(*(u_int*)(P) |= (V))
46 * atomic_clear_int(P, V)	(*(u_int*)(P) &= ~(V))
47 * atomic_add_int(P, V)		(*(u_int*)(P) += (V))
48 * atomic_subtract_int(P, V)	(*(u_int*)(P) -= (V))
49 * atomic_readandclear_int(P)	(return  *(u_int*)P; *(u_int*)P = 0;)
50 *
51 * atomic_set_long(P, V)	(*(u_long*)(P) |= (V))
52 * atomic_clear_long(P, V)	(*(u_long*)(P) &= ~(V))
53 * atomic_add_long(P, V)	(*(u_long*)(P) += (V))
54 * atomic_subtract_long(P, V)	(*(u_long*)(P) -= (V))
55 * atomic_readandclear_long(P)	(return  *(u_long*)P; *(u_long*)P = 0;)
56 */
57
58/*
59 * The above functions are expanded inline in the statically-linked
60 * kernel.  Lock prefixes are generated if an SMP kernel is being
61 * built.
62 *
63 * Kernel modules call real functions which are built into the kernel.
64 * This allows kernel modules to be portable between UP and SMP systems.
65 */
66#if defined(KLD_MODULE)
67#define ATOMIC_ASM(NAME, TYPE, OP, V)			\
68void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
69
70int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
71
72#define	ATOMIC_STORE_LOAD(TYPE, LOP, SOP)			\
73u_##TYPE	atomic_load_acq_##TYPE(volatile u_##TYPE *p);	\
74void		atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
75
76#else /* !KLD_MODULE */
77
78#if defined(SMP)
79#if defined(LOCORE)
80#define	MPLOCKED	lock ;
81#else /* !LOCORE */
82#define MPLOCKED	"lock ; "
83#endif /* LOCORE */
84#else /* !SMP */
85#define MPLOCKED
86#endif /* SMP */
87
88#if !defined(LOCORE)
89/*
90 * The assembly is volatilized to demark potential before-and-after side
91 * effects if an interrupt or SMP collision were to occur.
92 */
93#define ATOMIC_ASM(NAME, TYPE, OP, V)			\
94static __inline void					\
95atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
96{							\
97	__asm __volatile(MPLOCKED OP			\
98			 : "=m" (*p)			\
99			 :  "0" (*p), "ir" (V)); 	\
100}
101
102/*
103 * Atomic compare and set, used by the mutex functions
104 *
105 * if (*dst == exp) *dst = src (all 32 bit words)
106 *
107 * Returns 0 on failure, non-zero on success
108 */
109
110#if defined(I386_CPU)
111static __inline int
112atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
113{
114	int res = exp;
115
116	__asm __volatile(
117	"	pushfl ;		"
118	"	cli ;			"
119	"	cmpl	%1,%3 ;		"
120	"	jne	1f ;		"
121	"	movl	%2,%3 ;		"
122	"1:				"
123	"       sete	%%al;		"
124	"	movzbl	%%al,%0 ;	"
125	"	popfl ;			"
126	"# atomic_cmpset_int"
127	: "=a" (res)			/* 0 (result) */
128	: "0" (exp),			/* 1 */
129	  "r" (src),			/* 2 */
130	  "m" (*(dst))			/* 3 */
131	: "memory");
132
133	return (res);
134}
135#else /* defined(I386_CPU) */
136static __inline int
137atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
138{
139	int res = exp;
140
141	__asm __volatile (
142	"	" MPLOCKED "		"
143	"	cmpxchgl %2,%3 ;	"
144	"       setz	%%al ;		"
145	"	movzbl	%%al,%0 ;	"
146	"1:				"
147	"# atomic_cmpset_int"
148	: "=a" (res)			/* 0 (result) */
149	: "0" (exp),			/* 1 */
150	  "r" (src),			/* 2 */
151	  "m" (*(dst))			/* 3 */
152	: "memory");
153
154	return (res);
155}
156#endif /* defined(I386_CPU) */
157
158#if defined(I386_CPU)
159/*
160 * We assume that a = b will do atomic loads and stores.
161 *
162 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
163 * memory ordering.  These should only be used on a 386.
164 */
165#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
166static __inline u_##TYPE				\
167atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
168{							\
169	return (*p);					\
170}							\
171							\
172static __inline void					\
173atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
174{							\
175	*p = v;						\
176	__asm __volatile("" : : : "memory");		\
177}
178#else
179
180#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)		\
181static __inline u_##TYPE				\
182atomic_load_acq_##TYPE(volatile u_##TYPE *p)		\
183{							\
184	u_##TYPE res;					\
185							\
186	__asm __volatile(MPLOCKED LOP			\
187	: "=a" (res),			/* 0 (result) */\
188	  "+m" (*p)			/* 1 */		\
189	: : "cc", "memory");			 	\
190							\
191	return (res);					\
192}							\
193							\
194/*							\
195 * The XCHG instruction asserts LOCK automagically.	\
196 */							\
197static __inline void					\
198atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
199{							\
200	__asm __volatile(SOP				\
201	: "+m" (*p),			/* 0 */		\
202	  "+r" (v)			/* 1 */		\
203	: : "memory");				 	\
204}
205#endif	/* defined(I386_CPU) */
206#endif	/* !defined(LOCORE) */
207#endif /* KLD_MODULE */
208
209#if !defined(LOCORE)
210ATOMIC_ASM(set,	     char,  "orb %b2,%0",   v)
211ATOMIC_ASM(clear,    char,  "andb %b2,%0", ~v)
212ATOMIC_ASM(add,	     char,  "addb %b2,%0",  v)
213ATOMIC_ASM(subtract, char,  "subb %b2,%0",  v)
214
215ATOMIC_ASM(set,	     short, "orw %w2,%0",   v)
216ATOMIC_ASM(clear,    short, "andw %w2,%0", ~v)
217ATOMIC_ASM(add,	     short, "addw %w2,%0",  v)
218ATOMIC_ASM(subtract, short, "subw %w2,%0",  v)
219
220ATOMIC_ASM(set,	     int,   "orl %2,%0",   v)
221ATOMIC_ASM(clear,    int,   "andl %2,%0", ~v)
222ATOMIC_ASM(add,	     int,   "addl %2,%0",  v)
223ATOMIC_ASM(subtract, int,   "subl %2,%0",  v)
224
225ATOMIC_ASM(set,	     long,  "orl %2,%0",   v)
226ATOMIC_ASM(clear,    long,  "andl %2,%0", ~v)
227ATOMIC_ASM(add,	     long,  "addl %2,%0",  v)
228ATOMIC_ASM(subtract, long,  "subl %2,%0",  v)
229
230ATOMIC_STORE_LOAD(char,	"cmpxchgb %b0,%1", "xchgb %b1,%0")
231ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0")
232ATOMIC_STORE_LOAD(int,	"cmpxchgl %0,%1",  "xchgl %1,%0")
233ATOMIC_STORE_LOAD(long,	"cmpxchgl %0,%1",  "xchgl %1,%0")
234
235#undef ATOMIC_ASM
236#undef ATOMIC_STORE_LOAD
237
238#define	atomic_set_acq_char		atomic_set_char
239#define	atomic_set_rel_char		atomic_set_char
240#define	atomic_clear_acq_char		atomic_clear_char
241#define	atomic_clear_rel_char		atomic_clear_char
242#define	atomic_add_acq_char		atomic_add_char
243#define	atomic_add_rel_char		atomic_add_char
244#define	atomic_subtract_acq_char	atomic_subtract_char
245#define	atomic_subtract_rel_char	atomic_subtract_char
246
247#define	atomic_set_acq_short		atomic_set_short
248#define	atomic_set_rel_short		atomic_set_short
249#define	atomic_clear_acq_short		atomic_clear_short
250#define	atomic_clear_rel_short		atomic_clear_short
251#define	atomic_add_acq_short		atomic_add_short
252#define	atomic_add_rel_short		atomic_add_short
253#define	atomic_subtract_acq_short	atomic_subtract_short
254#define	atomic_subtract_rel_short	atomic_subtract_short
255
256#define	atomic_set_acq_int		atomic_set_int
257#define	atomic_set_rel_int		atomic_set_int
258#define	atomic_clear_acq_int		atomic_clear_int
259#define	atomic_clear_rel_int		atomic_clear_int
260#define	atomic_add_acq_int		atomic_add_int
261#define	atomic_add_rel_int		atomic_add_int
262#define	atomic_subtract_acq_int		atomic_subtract_int
263#define	atomic_subtract_rel_int		atomic_subtract_int
264#define atomic_cmpset_acq_int		atomic_cmpset_int
265#define atomic_cmpset_rel_int		atomic_cmpset_int
266
267#define	atomic_set_acq_long		atomic_set_long
268#define	atomic_set_rel_long		atomic_set_long
269#define	atomic_clear_acq_long		atomic_clear_long
270#define	atomic_clear_rel_long		atomic_clear_long
271#define	atomic_add_acq_long		atomic_add_long
272#define	atomic_add_rel_long		atomic_add_long
273#define	atomic_subtract_acq_long	atomic_subtract_long
274#define	atomic_subtract_rel_long	atomic_subtract_long
275#define	atomic_cmpset_long		atomic_cmpset_int
276#define	atomic_cmpset_acq_long		atomic_cmpset_acq_int
277#define	atomic_cmpset_rel_long		atomic_cmpset_rel_int
278
279#define atomic_cmpset_acq_ptr		atomic_cmpset_ptr
280#define atomic_cmpset_rel_ptr		atomic_cmpset_ptr
281
282#define	atomic_set_8		atomic_set_char
283#define	atomic_set_acq_8	atomic_set_acq_char
284#define	atomic_set_rel_8	atomic_set_rel_char
285#define	atomic_clear_8		atomic_clear_char
286#define	atomic_clear_acq_8	atomic_clear_acq_char
287#define	atomic_clear_rel_8	atomic_clear_rel_char
288#define	atomic_add_8		atomic_add_char
289#define	atomic_add_acq_8	atomic_add_acq_char
290#define	atomic_add_rel_8	atomic_add_rel_char
291#define	atomic_subtract_8	atomic_subtract_char
292#define	atomic_subtract_acq_8	atomic_subtract_acq_char
293#define	atomic_subtract_rel_8	atomic_subtract_rel_char
294#define	atomic_load_acq_8	atomic_load_acq_char
295#define	atomic_store_rel_8	atomic_store_rel_char
296
297#define	atomic_set_16		atomic_set_short
298#define	atomic_set_acq_16	atomic_set_acq_short
299#define	atomic_set_rel_16	atomic_set_rel_short
300#define	atomic_clear_16		atomic_clear_short
301#define	atomic_clear_acq_16	atomic_clear_acq_short
302#define	atomic_clear_rel_16	atomic_clear_rel_short
303#define	atomic_add_16		atomic_add_short
304#define	atomic_add_acq_16	atomic_add_acq_short
305#define	atomic_add_rel_16	atomic_add_rel_short
306#define	atomic_subtract_16	atomic_subtract_short
307#define	atomic_subtract_acq_16	atomic_subtract_acq_short
308#define	atomic_subtract_rel_16	atomic_subtract_rel_short
309#define	atomic_load_acq_16	atomic_load_acq_short
310#define	atomic_store_rel_16	atomic_store_rel_short
311
312#define	atomic_set_32		atomic_set_int
313#define	atomic_set_acq_32	atomic_set_acq_int
314#define	atomic_set_rel_32	atomic_set_rel_int
315#define	atomic_clear_32		atomic_clear_int
316#define	atomic_clear_acq_32	atomic_clear_acq_int
317#define	atomic_clear_rel_32	atomic_clear_rel_int
318#define	atomic_add_32		atomic_add_int
319#define	atomic_add_acq_32	atomic_add_acq_int
320#define	atomic_add_rel_32	atomic_add_rel_int
321#define	atomic_subtract_32	atomic_subtract_int
322#define	atomic_subtract_acq_32	atomic_subtract_acq_int
323#define	atomic_subtract_rel_32	atomic_subtract_rel_int
324#define	atomic_load_acq_32	atomic_load_acq_int
325#define	atomic_store_rel_32	atomic_store_rel_int
326#define	atomic_cmpset_32	atomic_cmpset_int
327#define	atomic_cmpset_acq_32	atomic_cmpset_acq_int
328#define	atomic_cmpset_rel_32	atomic_cmpset_rel_int
329#define	atomic_readandclear_32	atomic_readandclear_int
330
331#if !defined(WANT_FUNCTIONS)
332static __inline int
333atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
334{
335
336	return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
337	    (u_int)src));
338}
339
340static __inline void *
341atomic_load_acq_ptr(volatile void *p)
342{
343	return (void *)atomic_load_acq_int((volatile u_int *)p);
344}
345
346static __inline void
347atomic_store_rel_ptr(volatile void *p, void *v)
348{
349	atomic_store_rel_int((volatile u_int *)p, (u_int)v);
350}
351
352#define ATOMIC_PTR(NAME)				\
353static __inline void					\
354atomic_##NAME##_ptr(volatile void *p, uintptr_t v)	\
355{							\
356	atomic_##NAME##_int((volatile u_int *)p, v);	\
357}							\
358							\
359static __inline void					\
360atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v)	\
361{							\
362	atomic_##NAME##_acq_int((volatile u_int *)p, v);\
363}							\
364							\
365static __inline void					\
366atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v)	\
367{							\
368	atomic_##NAME##_rel_int((volatile u_int *)p, v);\
369}
370
371ATOMIC_PTR(set)
372ATOMIC_PTR(clear)
373ATOMIC_PTR(add)
374ATOMIC_PTR(subtract)
375
376#undef ATOMIC_PTR
377
378static __inline u_int
379atomic_readandclear_int(volatile u_int *addr)
380{
381	u_int result;
382
383	__asm __volatile (
384	"	xorl	%0,%0 ;		"
385	"	xchgl	%1,%0 ;		"
386	"# atomic_readandclear_int"
387	: "=&r" (result)		/* 0 (result) */
388	: "m" (*addr));			/* 1 (addr) */
389
390	return (result);
391}
392
393static __inline u_long
394atomic_readandclear_long(volatile u_long *addr)
395{
396	u_long result;
397
398	__asm __volatile (
399	"	xorl	%0,%0 ;		"
400	"	xchgl	%1,%0 ;		"
401	"# atomic_readandclear_int"
402	: "=&r" (result)		/* 0 (result) */
403	: "m" (*addr));			/* 1 (addr) */
404
405	return (result);
406}
407#endif	/* !defined(WANT_FUNCTIONS) */
408#endif	/* !defined(LOCORE) */
409#endif /* ! _MACHINE_ATOMIC_H_ */
410