atomic.h revision 254612
1/*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/i386/include/atomic.h 254612 2013-08-21 21:14:16Z jkim $ 27 */ 28#ifndef _MACHINE_ATOMIC_H_ 29#define _MACHINE_ATOMIC_H_ 30 31#ifndef _SYS_CDEFS_H_ 32#error this file needs sys/cdefs.h as a prerequisite 33#endif 34 35#define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 36#define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 37#define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc") 38 39/* 40 * Various simple operations on memory, each of which is atomic in the 41 * presence of interrupts and multiple processors. 42 * 43 * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 44 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 45 * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 46 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 47 * 48 * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 49 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 50 * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 51 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 52 * 53 * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 54 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 55 * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 56 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 57 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 58 * 59 * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 60 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 61 * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 62 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 63 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 64 */ 65 66/* 67 * The above functions are expanded inline in the statically-linked 68 * kernel. Lock prefixes are generated if an SMP kernel is being 69 * built. 70 * 71 * Kernel modules call real functions which are built into the kernel. 72 * This allows kernel modules to be portable between UP and SMP systems. 73 */ 74#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 75#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 76void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 77void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 78 79int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); 80u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 81 82#define ATOMIC_LOAD(TYPE, LOP) \ 83u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) 84#define ATOMIC_STORE(TYPE) \ 85void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 86 87#else /* !KLD_MODULE && __GNUCLIKE_ASM */ 88 89/* 90 * For userland, always use lock prefixes so that the binaries will run 91 * on both SMP and !SMP systems. 92 */ 93#if defined(SMP) || !defined(_KERNEL) 94#define MPLOCKED "lock ; " 95#else 96#define MPLOCKED 97#endif 98 99/* 100 * The assembly is volatilized to avoid code chunk removal by the compiler. 101 * GCC aggressively reorders operations and memory clobbering is necessary 102 * in order to avoid that for memory barriers. 103 */ 104#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 105static __inline void \ 106atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 107{ \ 108 __asm __volatile(MPLOCKED OP \ 109 : "+m" (*p) \ 110 : CONS (V) \ 111 : "cc"); \ 112} \ 113 \ 114static __inline void \ 115atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 116{ \ 117 __asm __volatile(MPLOCKED OP \ 118 : "+m" (*p) \ 119 : CONS (V) \ 120 : "memory", "cc"); \ 121} \ 122struct __hack 123 124#if defined(_KERNEL) && !defined(WANT_FUNCTIONS) 125 126/* I486 does not support SMP or CMPXCHG8B. */ 127static __inline uint64_t 128atomic_load_acq_64_i386(volatile uint64_t *p) 129{ 130 volatile uint32_t *high, *low; 131 uint64_t res; 132 133 low = (volatile uint32_t *)p; 134 high = (volatile uint32_t *)p + 1; 135 __asm __volatile( 136 " pushfl ; " 137 " cli ; " 138 " movl %1,%%eax ; " 139 " movl %2,%%edx ; " 140 " popfl" 141 : "=&A" (res) /* 0 */ 142 : "m" (*low), /* 1 */ 143 "m" (*high) /* 2 */ 144 : "memory"); 145 146 return (res); 147} 148 149static __inline void 150atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v) 151{ 152 volatile uint32_t *high, *low; 153 154 low = (volatile uint32_t *)p; 155 high = (volatile uint32_t *)p + 1; 156 __asm __volatile( 157 " pushfl ; " 158 " cli ; " 159 " movl %%eax,%0 ; " 160 " movl %%edx,%1 ; " 161 " popfl" 162 : "=m" (*low), /* 0 */ 163 "=m" (*high) /* 1 */ 164 : "A" (v) /* 2 */ 165 : "memory"); 166} 167 168static __inline uint64_t 169atomic_load_acq_64_i586(volatile uint64_t *p) 170{ 171 uint64_t res; 172 173 __asm __volatile( 174 " movl %%ebx,%%eax ; " 175 " movl %%ecx,%%edx ; " 176 " " MPLOCKED " " 177 " cmpxchg8b %1" 178 : "=&A" (res), /* 0 */ 179 "+m" (*p) /* 1 */ 180 : : "memory", "cc"); 181 182 return (res); 183} 184 185static __inline void 186atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v) 187{ 188 189 __asm __volatile( 190 " movl %%eax,%%ebx ; " 191 " movl %%edx,%%ecx ; " 192 "1: " 193 " " MPLOCKED " " 194 " cmpxchg8b %0 ; " 195 " jne 1b" 196 : "+m" (*p), /* 0 */ 197 "+A" (v) /* 1 */ 198 : : "ebx", "ecx", "memory", "cc"); 199} 200 201#endif /* _KERNEL && !WANT_FUNCTIONS */ 202 203/* 204 * Atomic compare and set, used by the mutex functions 205 * 206 * if (*dst == expect) *dst = src (all 32 bit words) 207 * 208 * Returns 0 on failure, non-zero on success 209 */ 210 211#ifdef CPU_DISABLE_CMPXCHG 212 213static __inline int 214atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 215{ 216 u_char res; 217 218 __asm __volatile( 219 " pushfl ; " 220 " cli ; " 221 " cmpl %3,%1 ; " 222 " jne 1f ; " 223 " movl %2,%1 ; " 224 "1: " 225 " sete %0 ; " 226 " popfl ; " 227 "# atomic_cmpset_int" 228 : "=q" (res), /* 0 */ 229 "+m" (*dst) /* 1 */ 230 : "r" (src), /* 2 */ 231 "r" (expect) /* 3 */ 232 : "memory"); 233 234 return (res); 235} 236 237#else /* !CPU_DISABLE_CMPXCHG */ 238 239static __inline int 240atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 241{ 242 u_char res; 243 244 __asm __volatile( 245 " " MPLOCKED " " 246 " cmpxchgl %2,%1 ; " 247 " sete %0 ; " 248 "# atomic_cmpset_int" 249 : "=a" (res), /* 0 */ 250 "+m" (*dst) /* 1 */ 251 : "r" (src), /* 2 */ 252 "a" (expect) /* 3 */ 253 : "memory", "cc"); 254 255 return (res); 256} 257 258#endif /* CPU_DISABLE_CMPXCHG */ 259 260/* 261 * Atomically add the value of v to the integer pointed to by p and return 262 * the previous value of *p. 263 */ 264static __inline u_int 265atomic_fetchadd_int(volatile u_int *p, u_int v) 266{ 267 268 __asm __volatile( 269 " " MPLOCKED " " 270 " xaddl %0,%1 ; " 271 "# atomic_fetchadd_int" 272 : "+r" (v), /* 0 */ 273 "+m" (*p) /* 1 */ 274 : : "cc"); 275 return (v); 276} 277 278/* 279 * We assume that a = b will do atomic loads and stores. Due to the 280 * IA32 memory model, a simple store guarantees release semantics. 281 * 282 * However, loads may pass stores, so for atomic_load_acq we have to 283 * ensure a Store/Load barrier to do the load in SMP kernels. We use 284 * "lock cmpxchg" as recommended by the AMD Software Optimization 285 * Guide, and not mfence. For UP kernels, however, the cache of the 286 * single processor is always consistent, so we only need to take care 287 * of the compiler. 288 */ 289#define ATOMIC_STORE(TYPE) \ 290static __inline void \ 291atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 292{ \ 293 __compiler_membar(); \ 294 *p = v; \ 295} \ 296struct __hack 297 298#if defined(_KERNEL) && !defined(SMP) 299 300#define ATOMIC_LOAD(TYPE, LOP) \ 301static __inline u_##TYPE \ 302atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 303{ \ 304 u_##TYPE tmp; \ 305 \ 306 tmp = *p; \ 307 __compiler_membar(); \ 308 return (tmp); \ 309} \ 310struct __hack 311 312#else /* !(_KERNEL && !SMP) */ 313 314#define ATOMIC_LOAD(TYPE, LOP) \ 315static __inline u_##TYPE \ 316atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 317{ \ 318 u_##TYPE res; \ 319 \ 320 __asm __volatile(MPLOCKED LOP \ 321 : "=a" (res), /* 0 */ \ 322 "+m" (*p) /* 1 */ \ 323 : : "memory", "cc"); \ 324 \ 325 return (res); \ 326} \ 327struct __hack 328 329#endif /* _KERNEL && !SMP */ 330 331#endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 332 333ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 334ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 335ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 336ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 337 338ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 339ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 340ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 341ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 342 343ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 344ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 345ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 346ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 347 348ATOMIC_ASM(set, long, "orl %1,%0", "ir", v); 349ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v); 350ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); 351ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); 352 353ATOMIC_LOAD(char, "cmpxchgb %b0,%1"); 354ATOMIC_LOAD(short, "cmpxchgw %w0,%1"); 355ATOMIC_LOAD(int, "cmpxchgl %0,%1"); 356ATOMIC_LOAD(long, "cmpxchgl %0,%1"); 357 358ATOMIC_STORE(char); 359ATOMIC_STORE(short); 360ATOMIC_STORE(int); 361ATOMIC_STORE(long); 362 363#undef ATOMIC_ASM 364#undef ATOMIC_LOAD 365#undef ATOMIC_STORE 366 367#ifndef WANT_FUNCTIONS 368 369#ifdef _KERNEL 370extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *); 371extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t); 372#endif 373 374static __inline int 375atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src) 376{ 377 378 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect, 379 (u_int)src)); 380} 381 382static __inline u_long 383atomic_fetchadd_long(volatile u_long *p, u_long v) 384{ 385 386 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v)); 387} 388 389/* Read the current value and store a zero in the destination. */ 390#ifdef __GNUCLIKE_ASM 391 392static __inline u_int 393atomic_readandclear_int(volatile u_int *p) 394{ 395 u_int res; 396 397 res = 0; 398 __asm __volatile( 399 " xchgl %1,%0 ; " 400 "# atomic_readandclear_int" 401 : "+r" (res), /* 0 */ 402 "+m" (*p)); /* 1 */ 403 404 return (res); 405} 406 407static __inline u_long 408atomic_readandclear_long(volatile u_long *p) 409{ 410 u_long res; 411 412 res = 0; 413 __asm __volatile( 414 " xchgl %1,%0 ; " 415 "# atomic_readandclear_long" 416 : "+r" (res), /* 0 */ 417 "+m" (*p)); /* 1 */ 418 419 return (res); 420} 421 422#else /* !__GNUCLIKE_ASM */ 423 424u_int atomic_readandclear_int(volatile u_int *p); 425u_long atomic_readandclear_long(volatile u_long *p); 426 427#endif /* __GNUCLIKE_ASM */ 428 429#define atomic_set_acq_char atomic_set_barr_char 430#define atomic_set_rel_char atomic_set_barr_char 431#define atomic_clear_acq_char atomic_clear_barr_char 432#define atomic_clear_rel_char atomic_clear_barr_char 433#define atomic_add_acq_char atomic_add_barr_char 434#define atomic_add_rel_char atomic_add_barr_char 435#define atomic_subtract_acq_char atomic_subtract_barr_char 436#define atomic_subtract_rel_char atomic_subtract_barr_char 437 438#define atomic_set_acq_short atomic_set_barr_short 439#define atomic_set_rel_short atomic_set_barr_short 440#define atomic_clear_acq_short atomic_clear_barr_short 441#define atomic_clear_rel_short atomic_clear_barr_short 442#define atomic_add_acq_short atomic_add_barr_short 443#define atomic_add_rel_short atomic_add_barr_short 444#define atomic_subtract_acq_short atomic_subtract_barr_short 445#define atomic_subtract_rel_short atomic_subtract_barr_short 446 447#define atomic_set_acq_int atomic_set_barr_int 448#define atomic_set_rel_int atomic_set_barr_int 449#define atomic_clear_acq_int atomic_clear_barr_int 450#define atomic_clear_rel_int atomic_clear_barr_int 451#define atomic_add_acq_int atomic_add_barr_int 452#define atomic_add_rel_int atomic_add_barr_int 453#define atomic_subtract_acq_int atomic_subtract_barr_int 454#define atomic_subtract_rel_int atomic_subtract_barr_int 455#define atomic_cmpset_acq_int atomic_cmpset_int 456#define atomic_cmpset_rel_int atomic_cmpset_int 457 458#define atomic_set_acq_long atomic_set_barr_long 459#define atomic_set_rel_long atomic_set_barr_long 460#define atomic_clear_acq_long atomic_clear_barr_long 461#define atomic_clear_rel_long atomic_clear_barr_long 462#define atomic_add_acq_long atomic_add_barr_long 463#define atomic_add_rel_long atomic_add_barr_long 464#define atomic_subtract_acq_long atomic_subtract_barr_long 465#define atomic_subtract_rel_long atomic_subtract_barr_long 466#define atomic_cmpset_acq_long atomic_cmpset_long 467#define atomic_cmpset_rel_long atomic_cmpset_long 468 469/* Operations on 8-bit bytes. */ 470#define atomic_set_8 atomic_set_char 471#define atomic_set_acq_8 atomic_set_acq_char 472#define atomic_set_rel_8 atomic_set_rel_char 473#define atomic_clear_8 atomic_clear_char 474#define atomic_clear_acq_8 atomic_clear_acq_char 475#define atomic_clear_rel_8 atomic_clear_rel_char 476#define atomic_add_8 atomic_add_char 477#define atomic_add_acq_8 atomic_add_acq_char 478#define atomic_add_rel_8 atomic_add_rel_char 479#define atomic_subtract_8 atomic_subtract_char 480#define atomic_subtract_acq_8 atomic_subtract_acq_char 481#define atomic_subtract_rel_8 atomic_subtract_rel_char 482#define atomic_load_acq_8 atomic_load_acq_char 483#define atomic_store_rel_8 atomic_store_rel_char 484 485/* Operations on 16-bit words. */ 486#define atomic_set_16 atomic_set_short 487#define atomic_set_acq_16 atomic_set_acq_short 488#define atomic_set_rel_16 atomic_set_rel_short 489#define atomic_clear_16 atomic_clear_short 490#define atomic_clear_acq_16 atomic_clear_acq_short 491#define atomic_clear_rel_16 atomic_clear_rel_short 492#define atomic_add_16 atomic_add_short 493#define atomic_add_acq_16 atomic_add_acq_short 494#define atomic_add_rel_16 atomic_add_rel_short 495#define atomic_subtract_16 atomic_subtract_short 496#define atomic_subtract_acq_16 atomic_subtract_acq_short 497#define atomic_subtract_rel_16 atomic_subtract_rel_short 498#define atomic_load_acq_16 atomic_load_acq_short 499#define atomic_store_rel_16 atomic_store_rel_short 500 501/* Operations on 32-bit double words. */ 502#define atomic_set_32 atomic_set_int 503#define atomic_set_acq_32 atomic_set_acq_int 504#define atomic_set_rel_32 atomic_set_rel_int 505#define atomic_clear_32 atomic_clear_int 506#define atomic_clear_acq_32 atomic_clear_acq_int 507#define atomic_clear_rel_32 atomic_clear_rel_int 508#define atomic_add_32 atomic_add_int 509#define atomic_add_acq_32 atomic_add_acq_int 510#define atomic_add_rel_32 atomic_add_rel_int 511#define atomic_subtract_32 atomic_subtract_int 512#define atomic_subtract_acq_32 atomic_subtract_acq_int 513#define atomic_subtract_rel_32 atomic_subtract_rel_int 514#define atomic_load_acq_32 atomic_load_acq_int 515#define atomic_store_rel_32 atomic_store_rel_int 516#define atomic_cmpset_32 atomic_cmpset_int 517#define atomic_cmpset_acq_32 atomic_cmpset_acq_int 518#define atomic_cmpset_rel_32 atomic_cmpset_rel_int 519#define atomic_readandclear_32 atomic_readandclear_int 520#define atomic_fetchadd_32 atomic_fetchadd_int 521 522/* Operations on pointers. */ 523#define atomic_set_ptr(p, v) \ 524 atomic_set_int((volatile u_int *)(p), (u_int)(v)) 525#define atomic_set_acq_ptr(p, v) \ 526 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v)) 527#define atomic_set_rel_ptr(p, v) \ 528 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v)) 529#define atomic_clear_ptr(p, v) \ 530 atomic_clear_int((volatile u_int *)(p), (u_int)(v)) 531#define atomic_clear_acq_ptr(p, v) \ 532 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v)) 533#define atomic_clear_rel_ptr(p, v) \ 534 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v)) 535#define atomic_add_ptr(p, v) \ 536 atomic_add_int((volatile u_int *)(p), (u_int)(v)) 537#define atomic_add_acq_ptr(p, v) \ 538 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v)) 539#define atomic_add_rel_ptr(p, v) \ 540 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v)) 541#define atomic_subtract_ptr(p, v) \ 542 atomic_subtract_int((volatile u_int *)(p), (u_int)(v)) 543#define atomic_subtract_acq_ptr(p, v) \ 544 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v)) 545#define atomic_subtract_rel_ptr(p, v) \ 546 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v)) 547#define atomic_load_acq_ptr(p) \ 548 atomic_load_acq_int((volatile u_int *)(p)) 549#define atomic_store_rel_ptr(p, v) \ 550 atomic_store_rel_int((volatile u_int *)(p), (v)) 551#define atomic_cmpset_ptr(dst, old, new) \ 552 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new)) 553#define atomic_cmpset_acq_ptr(dst, old, new) \ 554 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \ 555 (u_int)(new)) 556#define atomic_cmpset_rel_ptr(dst, old, new) \ 557 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \ 558 (u_int)(new)) 559#define atomic_readandclear_ptr(p) \ 560 atomic_readandclear_int((volatile u_int *)(p)) 561 562#endif /* !WANT_FUNCTIONS */ 563 564#endif /* !_MACHINE_ATOMIC_H_ */ 565