Deleted Added
full compact
atomic.h (254620) atomic.h (284901)
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/include/atomic.h 254620 2013-08-21 22:30:11Z jkim $
26 * $FreeBSD: head/sys/i386/include/atomic.h 284901 2015-06-28 05:04:08Z kib $
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define _MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34

--- 47 unchanged lines hidden (view full) ---

82#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
83void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
84void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
88int atomic_testandset_int(volatile u_int *p, u_int v);
89
27 */
28#ifndef _MACHINE_ATOMIC_H_
29#define _MACHINE_ATOMIC_H_
30
31#ifndef _SYS_CDEFS_H_
32#error this file needs sys/cdefs.h as a prerequisite
33#endif
34

--- 47 unchanged lines hidden (view full) ---

82#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
83void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
84void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
88int atomic_testandset_int(volatile u_int *p, u_int v);
89
90#define ATOMIC_LOAD(TYPE, LOP) \
90#define ATOMIC_LOAD(TYPE) \
91u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
92#define ATOMIC_STORE(TYPE) \
93void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
94
95int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
96uint64_t atomic_load_acq_64(volatile uint64_t *);
97void atomic_store_rel_64(volatile uint64_t *, uint64_t);
98uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);

--- 124 unchanged lines hidden (view full) ---

223 : "cc");
224 return (res);
225}
226
227/*
228 * We assume that a = b will do atomic loads and stores. Due to the
229 * IA32 memory model, a simple store guarantees release semantics.
230 *
91u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
92#define ATOMIC_STORE(TYPE) \
93void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
94
95int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
96uint64_t atomic_load_acq_64(volatile uint64_t *);
97void atomic_store_rel_64(volatile uint64_t *, uint64_t);
98uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);

--- 124 unchanged lines hidden (view full) ---

223 : "cc");
224 return (res);
225}
226
227/*
228 * We assume that a = b will do atomic loads and stores. Due to the
229 * IA32 memory model, a simple store guarantees release semantics.
230 *
231 * However, loads may pass stores, so for atomic_load_acq we have to
232 * ensure a Store/Load barrier to do the load in SMP kernels. We use
233 * "lock cmpxchg" as recommended by the AMD Software Optimization
234 * Guide, and not mfence. For UP kernels, however, the cache of the
235 * single processor is always consistent, so we only need to take care
236 * of the compiler.
231 * However, a load may pass a store if they are performed on distinct
232 * addresses, so for atomic_load_acq we introduce a Store/Load barrier
233 * before the load in SMP kernels. We use "lock addl $0,mem", as
234 * recommended by the AMD Software Optimization Guide, and not mfence.
235 * In the kernel, we use a private per-cpu cache line as the target
236 * for the locked addition, to avoid introducing false data
237 * dependencies. In userspace, a word at the top of the stack is
238 * utilized.
239 *
240 * For UP kernels, however, the memory of the single processor is
241 * always consistent, so we only need to stop the compiler from
242 * reordering accesses in a way that violates the semantics of acquire
243 * and release.
237 */
244 */
238#define ATOMIC_STORE(TYPE) \
239static __inline void \
240atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
241{ \
242 __compiler_membar(); \
243 *p = v; \
244} \
245struct __hack
245#if defined(_KERNEL)
246
246
247#if defined(_KERNEL) && !defined(SMP)
247/*
248 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
249 *
250 * The open-coded number is used instead of the symbolic expression to
251 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
252 * An assertion in i386/vm_machdep.c ensures that the value is correct.
253 */
254#define OFFSETOF_MONITORBUF 0x180
248
255
249#define ATOMIC_LOAD(TYPE, LOP) \
250static __inline u_##TYPE \
251atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
252{ \
253 u_##TYPE tmp; \
254 \
255 tmp = *p; \
256 __compiler_membar(); \
257 return (tmp); \
258} \
259struct __hack
256#if defined(SMP)
257static __inline void
258__storeload_barrier(void)
259{
260
260
261#else /* !(_KERNEL && !SMP) */
261 __asm __volatile("lock; addl $0,%%fs:%0"
262 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
263}
264#else /* _KERNEL && UP */
265static __inline void
266__storeload_barrier(void)
267{
262
268
263#define ATOMIC_LOAD(TYPE, LOP) \
264static __inline u_##TYPE \
265atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
266{ \
267 u_##TYPE res; \
268 \
269 __asm __volatile(MPLOCKED LOP \
270 : "=a" (res), /* 0 */ \
271 "+m" (*p) /* 1 */ \
272 : : "memory", "cc"); \
273 return (res); \
274} \
269 __compiler_membar();
270}
271#endif /* SMP */
272#else /* !_KERNEL */
273static __inline void
274__storeload_barrier(void)
275{
276
277 __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
278}
279#endif /* _KERNEL*/
280
281#define ATOMIC_LOAD(TYPE) \
282static __inline u_##TYPE \
283atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
284{ \
285 u_##TYPE res; \
286 \
287 __storeload_barrier(); \
288 res = *p; \
289 __compiler_membar(); \
290 return (res); \
291} \
275struct __hack
276
292struct __hack
293
277#endif /* _KERNEL && !SMP */
294#define ATOMIC_STORE(TYPE) \
295static __inline void \
296atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
297{ \
298 \
299 __compiler_membar(); \
300 *p = v; \
301} \
302struct __hack
278
279#ifdef _KERNEL
280
281#ifdef WANT_FUNCTIONS
282int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
283int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
284uint64_t atomic_load_acq_64_i386(volatile uint64_t *);
285uint64_t atomic_load_acq_64_i586(volatile uint64_t *);

--- 220 unchanged lines hidden (view full) ---

506ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
507ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
508
509ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
510ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
511ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
512ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
513
303
304#ifdef _KERNEL
305
306#ifdef WANT_FUNCTIONS
307int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
308int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
309uint64_t atomic_load_acq_64_i386(volatile uint64_t *);
310uint64_t atomic_load_acq_64_i586(volatile uint64_t *);

--- 220 unchanged lines hidden (view full) ---

531ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
532ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
533
534ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
535ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
536ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
537ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
538
514ATOMIC_LOAD(char, "cmpxchgb %b0,%1");
515ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
516ATOMIC_LOAD(int, "cmpxchgl %0,%1");
517ATOMIC_LOAD(long, "cmpxchgl %0,%1");
539#define ATOMIC_LOADSTORE(TYPE) \
540 ATOMIC_LOAD(TYPE); \
541 ATOMIC_STORE(TYPE)
518
542
519ATOMIC_STORE(char);
520ATOMIC_STORE(short);
521ATOMIC_STORE(int);
522ATOMIC_STORE(long);
543ATOMIC_LOADSTORE(char);
544ATOMIC_LOADSTORE(short);
545ATOMIC_LOADSTORE(int);
546ATOMIC_LOADSTORE(long);
523
524#undef ATOMIC_ASM
525#undef ATOMIC_LOAD
526#undef ATOMIC_STORE
547
548#undef ATOMIC_ASM
549#undef ATOMIC_LOAD
550#undef ATOMIC_STORE
551#undef ATOMIC_LOADSTORE
527
528#ifndef WANT_FUNCTIONS
529
530static __inline int
531atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
532{
533
534 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,

--- 189 unchanged lines hidden ---
552
553#ifndef WANT_FUNCTIONS
554
555static __inline int
556atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
557{
558
559 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,

--- 189 unchanged lines hidden ---