Deleted Added
full compact
atomic.h (91469) atomic.h (100251)
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/include/atomic.h 91469 2002-02-28 06:17:05Z bmilekic $
26 * $FreeBSD: head/sys/amd64/include/atomic.h 100251 2002-07-17 16:19:37Z markm $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define _MACHINE_ATOMIC_H_
30
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define _MACHINE_ATOMIC_H_
30
31#ifndef __GNUC__
32#ifndef lint
33#error "This file must be compiled with GCC or lint"
34#endif /* lint */
35#endif /* __GNUC__ */
36
31/*
32 * Various simple arithmetic on memory which is atomic in the presence
33 * of interrupts and multiple processors.
34 *
35 * atomic_set_char(P, V) (*(u_char*)(P) |= (V))
36 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
37 * atomic_add_char(P, V) (*(u_char*)(P) += (V))
38 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))

--- 21 unchanged lines hidden (view full) ---

60 * kernel. Lock prefixes are generated if an SMP kernel is being
61 * built.
62 *
63 * Kernel modules call real functions which are built into the kernel.
64 * This allows kernel modules to be portable between UP and SMP systems.
65 */
66#if defined(KLD_MODULE)
67#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
37/*
38 * Various simple arithmetic on memory which is atomic in the presence
39 * of interrupts and multiple processors.
40 *
41 * atomic_set_char(P, V) (*(u_char*)(P) |= (V))
42 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
43 * atomic_add_char(P, V) (*(u_char*)(P) += (V))
44 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))

--- 21 unchanged lines hidden (view full) ---

66 * kernel. Lock prefixes are generated if an SMP kernel is being
67 * built.
68 *
69 * Kernel modules call real functions which are built into the kernel.
70 * This allows kernel modules to be portable between UP and SMP systems.
71 */
72#if defined(KLD_MODULE)
73#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
68void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
74void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
69
70int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
71
72#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
73u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
75
76int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
77
78#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
79u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
74void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v);
80void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
75
76#else /* !KLD_MODULE */
77
78/*
79 * For userland, assume the SMP case and use lock prefixes so that
80 * the binaries will run on both types of systems.
81 */
82#if defined(SMP) || !defined(_KERNEL)
83#define MPLOCKED lock ;
84#else
85#define MPLOCKED
86#endif
87
88/*
89 * The assembly is volatilized to demark potential before-and-after side
90 * effects if an interrupt or SMP collision were to occur.
91 */
81
82#else /* !KLD_MODULE */
83
84/*
85 * For userland, assume the SMP case and use lock prefixes so that
86 * the binaries will run on both types of systems.
87 */
88#if defined(SMP) || !defined(_KERNEL)
89#define MPLOCKED lock ;
90#else
91#define MPLOCKED
92#endif
93
94/*
95 * The assembly is volatilized to demark potential before-and-after side
96 * effects if an interrupt or SMP collision were to occur.
97 */
98#ifdef __GNUC__
92#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
93static __inline void \
94atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
95{ \
96 __asm __volatile(__XSTRING(MPLOCKED) OP \
97 : "+m" (*p) \
98 : CONS (V)); \
99}
99#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
100static __inline void \
101atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
102{ \
103 __asm __volatile(__XSTRING(MPLOCKED) OP \
104 : "+m" (*p) \
105 : CONS (V)); \
106}
107#else /* !__GNUC__ */
108#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
109void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
110#endif /* __GNUC__ */
100
101/*
102 * Atomic compare and set, used by the mutex functions
103 *
104 * if (*dst == exp) *dst = src (all 32 bit words)
105 *
106 * Returns 0 on failure, non-zero on success
107 */
108
111
112/*
113 * Atomic compare and set, used by the mutex functions
114 *
115 * if (*dst == exp) *dst = src (all 32 bit words)
116 *
117 * Returns 0 on failure, non-zero on success
118 */
119
120#if defined(__GNUC__)
109#if defined(I386_CPU)
110static __inline int
111atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
112{
113 int res = exp;
114
115 __asm __volatile(
116 " pushfl ; "

--- 29 unchanged lines hidden (view full) ---

146 : "+a" (res) /* 0 (result) */
147 : "r" (src), /* 1 */
148 "m" (*(dst)) /* 2 */
149 : "memory");
150
151 return (res);
152}
153#endif /* defined(I386_CPU) */
121#if defined(I386_CPU)
122static __inline int
123atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
124{
125 int res = exp;
126
127 __asm __volatile(
128 " pushfl ; "

--- 29 unchanged lines hidden (view full) ---

158 : "+a" (res) /* 0 (result) */
159 : "r" (src), /* 1 */
160 "m" (*(dst)) /* 2 */
161 : "memory");
162
163 return (res);
164}
165#endif /* defined(I386_CPU) */
166#else /* !defined(__GNUC__) */
167static __inline int
168atomic_cmpset_int(volatile u_int *dst __unused, u_int exp __unused,
169 u_int src __unused)
170{
171}
172#endif /* defined(__GNUC__) */
154
173
174#if defined(__GNUC__)
155#if defined(I386_CPU)
156/*
157 * We assume that a = b will do atomic loads and stores.
158 *
159 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
160 * memory ordering. These should only be used on a 386.
161 */
162#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \

--- 4 unchanged lines hidden (view full) ---

167} \
168 \
169static __inline void \
170atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
171{ \
172 *p = v; \
173 __asm __volatile("" : : : "memory"); \
174}
175#if defined(I386_CPU)
176/*
177 * We assume that a = b will do atomic loads and stores.
178 *
179 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
180 * memory ordering. These should only be used on a 386.
181 */
182#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \

--- 4 unchanged lines hidden (view full) ---

187} \
188 \
189static __inline void \
190atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
191{ \
192 *p = v; \
193 __asm __volatile("" : : : "memory"); \
194}
175#else
195#else /* !defined(I386_CPU) */
176
177#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
178static __inline u_##TYPE \
179atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
180{ \
181 u_##TYPE res; \
182 \
183 __asm __volatile(__XSTRING(MPLOCKED) LOP \

--- 11 unchanged lines hidden (view full) ---

195atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
196{ \
197 __asm __volatile(SOP \
198 : "+m" (*p), /* 0 */ \
199 "+r" (v) /* 1 */ \
200 : : "memory"); \
201}
202#endif /* defined(I386_CPU) */
196
197#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
198static __inline u_##TYPE \
199atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
200{ \
201 u_##TYPE res; \
202 \
203 __asm __volatile(__XSTRING(MPLOCKED) LOP \

--- 11 unchanged lines hidden (view full) ---

215atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
216{ \
217 __asm __volatile(SOP \
218 : "+m" (*p), /* 0 */ \
219 "+r" (v) /* 1 */ \
220 : : "memory"); \
221}
222#endif /* defined(I386_CPU) */
223#else /* !defined(__GNUC__) */
224
225/*
226 * XXXX: Dummy functions!!
227 */
228#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
229u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p __unused); \
230void atomic_store_rel_##TYPE(volatile u_##TYPE *p __unused, \
231 u_##TYPE v __unused)
232
233#endif /* defined(__GNUC__) */
203#endif /* KLD_MODULE */
204
234#endif /* KLD_MODULE */
235
205ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v)
206ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v)
207ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v)
208ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v)
236ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
237ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
238ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
239ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
209
240
210ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v)
211ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v)
212ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v)
213ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v)
241ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
242ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
243ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
244ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
214
245
215ATOMIC_ASM(set, int, "orl %1,%0", "ir", v)
216ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v)
217ATOMIC_ASM(add, int, "addl %1,%0", "ir", v)
218ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v)
246ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
247ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
248ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
249ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
219
250
220ATOMIC_ASM(set, long, "orl %1,%0", "ir", v)
221ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v)
222ATOMIC_ASM(add, long, "addl %1,%0", "ir", v)
223ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v)
251ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
252ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
253ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
254ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
224
255
225ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0")
226ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0")
227ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0")
228ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0")
256ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
257ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
258ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0");
259ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0");
229
230#undef ATOMIC_ASM
231#undef ATOMIC_STORE_LOAD
232
233#define atomic_set_acq_char atomic_set_char
234#define atomic_set_rel_char atomic_set_char
235#define atomic_clear_acq_char atomic_clear_char
236#define atomic_clear_rel_char atomic_clear_char

--- 128 unchanged lines hidden (view full) ---

365
366ATOMIC_PTR(set)
367ATOMIC_PTR(clear)
368ATOMIC_PTR(add)
369ATOMIC_PTR(subtract)
370
371#undef ATOMIC_PTR
372
260
261#undef ATOMIC_ASM
262#undef ATOMIC_STORE_LOAD
263
264#define atomic_set_acq_char atomic_set_char
265#define atomic_set_rel_char atomic_set_char
266#define atomic_clear_acq_char atomic_clear_char
267#define atomic_clear_rel_char atomic_clear_char

--- 128 unchanged lines hidden (view full) ---

396
397ATOMIC_PTR(set)
398ATOMIC_PTR(clear)
399ATOMIC_PTR(add)
400ATOMIC_PTR(subtract)
401
402#undef ATOMIC_PTR
403
404#if defined(__GNUC__)
373static __inline u_int
374atomic_readandclear_int(volatile u_int *addr)
375{
376 u_int result;
377
378 __asm __volatile (
379 " xorl %0,%0 ; "
380 " xchgl %1,%0 ; "
381 "# atomic_readandclear_int"
382 : "=&r" (result) /* 0 (result) */
383 : "m" (*addr)); /* 1 (addr) */
384
385 return (result);
386}
405static __inline u_int
406atomic_readandclear_int(volatile u_int *addr)
407{
408 u_int result;
409
410 __asm __volatile (
411 " xorl %0,%0 ; "
412 " xchgl %1,%0 ; "
413 "# atomic_readandclear_int"
414 : "=&r" (result) /* 0 (result) */
415 : "m" (*addr)); /* 1 (addr) */
416
417 return (result);
418}
419#else /* !defined(__GNUC__) */
420/*
421 * XXXX: Dummy!
422 */
423static __inline u_int
424atomic_readandclear_int(volatile u_int *addr __unused)
425{
426}
427#endif /* defined(__GNUC__) */
387
428
429#if defined(__GNUC__)
388static __inline u_long
389atomic_readandclear_long(volatile u_long *addr)
390{
391 u_long result;
392
393 __asm __volatile (
394 " xorl %0,%0 ; "
395 " xchgl %1,%0 ; "
396 "# atomic_readandclear_int"
397 : "=&r" (result) /* 0 (result) */
398 : "m" (*addr)); /* 1 (addr) */
399
400 return (result);
401}
430static __inline u_long
431atomic_readandclear_long(volatile u_long *addr)
432{
433 u_long result;
434
435 __asm __volatile (
436 " xorl %0,%0 ; "
437 " xchgl %1,%0 ; "
438 "# atomic_readandclear_int"
439 : "=&r" (result) /* 0 (result) */
440 : "m" (*addr)); /* 1 (addr) */
441
442 return (result);
443}
444#else /* !defined(__GNUC__) */
445/*
446 * XXXX: Dummy!
447 */
448static __inline u_long
449atomic_readandclear_long(volatile u_long *addr __unused)
450{
451}
452#endif /* defined(__GNUC__) */
402#endif /* !defined(WANT_FUNCTIONS) */
403#endif /* ! _MACHINE_ATOMIC_H_ */
453#endif /* !defined(WANT_FUNCTIONS) */
454#endif /* ! _MACHINE_ATOMIC_H_ */