atomic.h revision 197824
1/*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/include/atomic.h 197824 2009-10-06 23:48:28Z attilio $ 27 */ 28#ifndef _MACHINE_ATOMIC_H_ 29#define _MACHINE_ATOMIC_H_ 30 31#ifndef _SYS_CDEFS_H_ 32#error this file needs sys/cdefs.h as a prerequisite 33#endif 34 35#define mb() __asm __volatile("mfence;" : : : "memory") 36#define wmb() __asm __volatile("sfence;" : : : "memory") 37#define rmb() __asm __volatile("lfence;" : : : "memory") 38 39/* 40 * Various simple operations on memory, each of which is atomic in the 41 * presence of interrupts and multiple processors. 42 * 43 * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 44 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 45 * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 46 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 47 * 48 * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 49 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 50 * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 51 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 52 * 53 * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 54 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 55 * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 56 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 57 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 58 * 59 * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 60 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 61 * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 62 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 63 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 64 */ 65 66/* 67 * The above functions are expanded inline in the statically-linked 68 * kernel. Lock prefixes are generated if an SMP kernel is being 69 * built. 70 * 71 * Kernel modules call real functions which are built into the kernel. 72 * This allows kernel modules to be portable between UP and SMP systems. 73 */ 74#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 75#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 76void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 77void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 78 79int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); 80int atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src); 81int atomic_cmpset_barr_int(volatile u_int *dst, u_int exp, u_int src); 82int atomic_cmpset_barr_long(volatile u_long *dst, u_long exp, u_long src); 83u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 84u_long atomic_fetchadd_long(volatile u_long *p, u_long v); 85 86#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 87u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ 88void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 89 90#else /* !KLD_MODULE && __GNUCLIKE_ASM */ 91 92/* 93 * For userland, always use lock prefixes so that the binaries will run 94 * on both SMP and !SMP systems. 95 */ 96#if defined(SMP) || !defined(_KERNEL) 97#define MPLOCKED "lock ; " 98#else 99#define MPLOCKED 100#endif 101 102/* 103 * The assembly is volatilized to avoid code chunk removal by the compiler. 104 * GCC aggressively reorders operations and memory clobbering is necessary 105 * in order to avoid that for memory barriers. 106 */ 107#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 108static __inline void \ 109atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 110{ \ 111 __asm __volatile(MPLOCKED OP \ 112 : "=m" (*p) \ 113 : CONS (V), "m" (*p)); \ 114} \ 115 \ 116static __inline void \ 117atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 118{ \ 119 __asm __volatile(MPLOCKED OP \ 120 : "=m" (*p) \ 121 : CONS (V), "m" (*p) \ 122 : "memory"); \ 123} \ 124struct __hack 125 126/* 127 * Atomic compare and set, used by the mutex functions 128 * 129 * if (*dst == exp) *dst = src (all 32 bit words) 130 * 131 * Returns 0 on failure, non-zero on success 132 */ 133 134#define DEFINE_CMPSET_GEN(NAME, TYPE, OP) \ 135static __inline int \ 136atomic_cmpset_##NAME(volatile u_##TYPE *dst, u_##TYPE exp, u_##TYPE src)\ 137{ \ 138 u_char res; \ 139 \ 140 __asm __volatile( \ 141 " " MPLOCKED " " \ 142 " " OP " %2,%1 ; " \ 143 " sete %0 ; " \ 144 "1: " \ 145 "# atomic_cmpset_##NAME" \ 146 : "=a" (res), /* 0 */ \ 147 "=m" (*dst) /* 1 */ \ 148 : "r" (src), /* 2 */ \ 149 "a" (exp), /* 3 */ \ 150 "m" (*dst) /* 4 */ \ 151 : "memory"); \ 152 \ 153 return (res); \ 154} \ 155struct __hack 156 157DEFINE_CMPSET_GEN(int, int, "cmpxchgl"); 158DEFINE_CMPSET_GEN(long, long, "cmpxchgq"); 159DEFINE_CMPSET_GEN(barr_int, int, "cmpxchgl"); 160DEFINE_CMPSET_GEN(barr_long, long, "cmpxchgq"); 161 162/* 163 * Atomically add the value of v to the integer pointed to by p and return 164 * the previous value of *p. 165 */ 166static __inline u_int 167atomic_fetchadd_int(volatile u_int *p, u_int v) 168{ 169 170 __asm __volatile( 171 " " MPLOCKED " " 172 " xaddl %0, %1 ; " 173 "# atomic_fetchadd_int" 174 : "+r" (v), /* 0 (result) */ 175 "=m" (*p) /* 1 */ 176 : "m" (*p)); /* 2 */ 177 178 return (v); 179} 180 181/* 182 * Atomically add the value of v to the long integer pointed to by p and return 183 * the previous value of *p. 184 */ 185static __inline u_long 186atomic_fetchadd_long(volatile u_long *p, u_long v) 187{ 188 189 __asm __volatile( 190 " " MPLOCKED " " 191 " xaddq %0, %1 ; " 192 "# atomic_fetchadd_long" 193 : "+r" (v), /* 0 (result) */ 194 "=m" (*p) /* 1 */ 195 : "m" (*p)); /* 2 */ 196 197 return (v); 198} 199 200#if defined(_KERNEL) && !defined(SMP) 201 202/* 203 * We assume that a = b will do atomic loads and stores. However, on a 204 * PentiumPro or higher, reads may pass writes, so for that case we have 205 * to use a serializing instruction (i.e. with LOCK) to do the load in 206 * SMP kernels. For UP kernels, however, the cache of the single processor 207 * is always consistent, so we only need to take care of compiler. 208 */ 209#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 210static __inline u_##TYPE \ 211atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 212{ \ 213 u_##TYPE tmp; \ 214 \ 215 tmp = *p; \ 216 __asm __volatile ("" : : : "memory"); \ 217 return (tmp); \ 218} \ 219 \ 220static __inline void \ 221atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 222{ \ 223 __asm __volatile ("" : : : "memory"); \ 224 *p = v; \ 225} \ 226struct __hack 227 228#else /* !(_KERNEL && !SMP) */ 229 230#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ 231static __inline u_##TYPE \ 232atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 233{ \ 234 u_##TYPE res; \ 235 \ 236 __asm __volatile(MPLOCKED LOP \ 237 : "=a" (res), /* 0 */ \ 238 "=m" (*p) /* 1 */ \ 239 : "m" (*p) /* 2 */ \ 240 : "memory"); \ 241 \ 242 return (res); \ 243} \ 244 \ 245/* \ 246 * The XCHG instruction asserts LOCK automagically. \ 247 */ \ 248static __inline void \ 249atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 250{ \ 251 __asm __volatile(SOP \ 252 : "=m" (*p), /* 0 */ \ 253 "+r" (v) /* 1 */ \ 254 : "m" (*p) /* 2 */ \ 255 : "memory"); \ 256} \ 257struct __hack 258 259#endif /* _KERNEL && !SMP */ 260 261#endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 262 263ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 264ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 265ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 266ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 267 268ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 269ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 270ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 271ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 272 273ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 274ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 275ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 276ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 277 278ATOMIC_ASM(set, long, "orq %1,%0", "ir", v); 279ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v); 280ATOMIC_ASM(add, long, "addq %1,%0", "ir", v); 281ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v); 282 283ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); 284ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); 285ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); 286ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1", "xchgq %1,%0"); 287 288#undef ATOMIC_ASM 289#undef ATOMIC_STORE_LOAD 290 291#ifndef WANT_FUNCTIONS 292 293/* Read the current value and store a zero in the destination. */ 294#ifdef __GNUCLIKE_ASM 295 296static __inline u_int 297atomic_readandclear_int(volatile u_int *addr) 298{ 299 u_int res; 300 301 res = 0; 302 __asm __volatile( 303 " xchgl %1,%0 ; " 304 "# atomic_readandclear_int" 305 : "+r" (res), /* 0 */ 306 "=m" (*addr) /* 1 */ 307 : "m" (*addr)); 308 309 return (res); 310} 311 312static __inline u_long 313atomic_readandclear_long(volatile u_long *addr) 314{ 315 u_long res; 316 317 res = 0; 318 __asm __volatile( 319 " xchgq %1,%0 ; " 320 "# atomic_readandclear_long" 321 : "+r" (res), /* 0 */ 322 "=m" (*addr) /* 1 */ 323 : "m" (*addr)); 324 325 return (res); 326} 327 328#else /* !__GNUCLIKE_ASM */ 329 330u_int atomic_readandclear_int(volatile u_int *addr); 331u_long atomic_readandclear_long(volatile u_long *addr); 332 333#endif /* __GNUCLIKE_ASM */ 334 335#define atomic_set_acq_char atomic_set_barr_char 336#define atomic_set_rel_char atomic_set_barr_char 337#define atomic_clear_acq_char atomic_clear_barr_char 338#define atomic_clear_rel_char atomic_clear_barr_char 339#define atomic_add_acq_char atomic_add_barr_char 340#define atomic_add_rel_char atomic_add_barr_char 341#define atomic_subtract_acq_char atomic_subtract_barr_char 342#define atomic_subtract_rel_char atomic_subtract_barr_char 343 344#define atomic_set_acq_short atomic_set_barr_short 345#define atomic_set_rel_short atomic_set_barr_short 346#define atomic_clear_acq_short atomic_clear_barr_short 347#define atomic_clear_rel_short atomic_clear_barr_short 348#define atomic_add_acq_short atomic_add_barr_short 349#define atomic_add_rel_short atomic_add_barr_short 350#define atomic_subtract_acq_short atomic_subtract_barr_short 351#define atomic_subtract_rel_short atomic_subtract_barr_short 352 353#define atomic_set_acq_int atomic_set_barr_int 354#define atomic_set_rel_int atomic_set_barr_int 355#define atomic_clear_acq_int atomic_clear_barr_int 356#define atomic_clear_rel_int atomic_clear_barr_int 357#define atomic_add_acq_int atomic_add_barr_int 358#define atomic_add_rel_int atomic_add_barr_int 359#define atomic_subtract_acq_int atomic_subtract_barr_int 360#define atomic_subtract_rel_int atomic_subtract_barr_int 361#define atomic_cmpset_acq_int atomic_cmpset_barr_int 362#define atomic_cmpset_rel_int atomic_cmpset_barr_int 363 364#define atomic_set_acq_long atomic_set_barr_long 365#define atomic_set_rel_long atomic_set_barr_long 366#define atomic_clear_acq_long atomic_clear_barr_long 367#define atomic_clear_rel_long atomic_clear_barr_long 368#define atomic_add_acq_long atomic_add_barr_long 369#define atomic_add_rel_long atomic_add_barr_long 370#define atomic_subtract_acq_long atomic_subtract_barr_long 371#define atomic_subtract_rel_long atomic_subtract_barr_long 372#define atomic_cmpset_acq_long atomic_cmpset_barr_long 373#define atomic_cmpset_rel_long atomic_cmpset_barr_long 374 375/* Operations on 8-bit bytes. */ 376#define atomic_set_8 atomic_set_char 377#define atomic_set_acq_8 atomic_set_acq_char 378#define atomic_set_rel_8 atomic_set_rel_char 379#define atomic_clear_8 atomic_clear_char 380#define atomic_clear_acq_8 atomic_clear_acq_char 381#define atomic_clear_rel_8 atomic_clear_rel_char 382#define atomic_add_8 atomic_add_char 383#define atomic_add_acq_8 atomic_add_acq_char 384#define atomic_add_rel_8 atomic_add_rel_char 385#define atomic_subtract_8 atomic_subtract_char 386#define atomic_subtract_acq_8 atomic_subtract_acq_char 387#define atomic_subtract_rel_8 atomic_subtract_rel_char 388#define atomic_load_acq_8 atomic_load_acq_char 389#define atomic_store_rel_8 atomic_store_rel_char 390 391/* Operations on 16-bit words. */ 392#define atomic_set_16 atomic_set_short 393#define atomic_set_acq_16 atomic_set_acq_short 394#define atomic_set_rel_16 atomic_set_rel_short 395#define atomic_clear_16 atomic_clear_short 396#define atomic_clear_acq_16 atomic_clear_acq_short 397#define atomic_clear_rel_16 atomic_clear_rel_short 398#define atomic_add_16 atomic_add_short 399#define atomic_add_acq_16 atomic_add_acq_short 400#define atomic_add_rel_16 atomic_add_rel_short 401#define atomic_subtract_16 atomic_subtract_short 402#define atomic_subtract_acq_16 atomic_subtract_acq_short 403#define atomic_subtract_rel_16 atomic_subtract_rel_short 404#define atomic_load_acq_16 atomic_load_acq_short 405#define atomic_store_rel_16 atomic_store_rel_short 406 407/* Operations on 32-bit double words. */ 408#define atomic_set_32 atomic_set_int 409#define atomic_set_acq_32 atomic_set_acq_int 410#define atomic_set_rel_32 atomic_set_rel_int 411#define atomic_clear_32 atomic_clear_int 412#define atomic_clear_acq_32 atomic_clear_acq_int 413#define atomic_clear_rel_32 atomic_clear_rel_int 414#define atomic_add_32 atomic_add_int 415#define atomic_add_acq_32 atomic_add_acq_int 416#define atomic_add_rel_32 atomic_add_rel_int 417#define atomic_subtract_32 atomic_subtract_int 418#define atomic_subtract_acq_32 atomic_subtract_acq_int 419#define atomic_subtract_rel_32 atomic_subtract_rel_int 420#define atomic_load_acq_32 atomic_load_acq_int 421#define atomic_store_rel_32 atomic_store_rel_int 422#define atomic_cmpset_32 atomic_cmpset_int 423#define atomic_cmpset_acq_32 atomic_cmpset_acq_int 424#define atomic_cmpset_rel_32 atomic_cmpset_rel_int 425#define atomic_readandclear_32 atomic_readandclear_int 426#define atomic_fetchadd_32 atomic_fetchadd_int 427 428/* Operations on 64-bit quad words. */ 429#define atomic_set_64 atomic_set_long 430#define atomic_set_acq_64 atomic_set_acq_long 431#define atomic_set_rel_64 atomic_set_rel_long 432#define atomic_clear_64 atomic_clear_long 433#define atomic_clear_acq_64 atomic_clear_acq_long 434#define atomic_clear_rel_64 atomic_clear_rel_long 435#define atomic_add_64 atomic_add_long 436#define atomic_add_acq_64 atomic_add_acq_long 437#define atomic_add_rel_64 atomic_add_rel_long 438#define atomic_subtract_64 atomic_subtract_long 439#define atomic_subtract_acq_64 atomic_subtract_acq_long 440#define atomic_subtract_rel_64 atomic_subtract_rel_long 441#define atomic_load_acq_64 atomic_load_acq_long 442#define atomic_store_rel_64 atomic_store_rel_long 443#define atomic_cmpset_64 atomic_cmpset_long 444#define atomic_cmpset_acq_64 atomic_cmpset_acq_long 445#define atomic_cmpset_rel_64 atomic_cmpset_rel_long 446#define atomic_readandclear_64 atomic_readandclear_long 447 448/* Operations on pointers. */ 449#define atomic_set_ptr atomic_set_long 450#define atomic_set_acq_ptr atomic_set_acq_long 451#define atomic_set_rel_ptr atomic_set_rel_long 452#define atomic_clear_ptr atomic_clear_long 453#define atomic_clear_acq_ptr atomic_clear_acq_long 454#define atomic_clear_rel_ptr atomic_clear_rel_long 455#define atomic_add_ptr atomic_add_long 456#define atomic_add_acq_ptr atomic_add_acq_long 457#define atomic_add_rel_ptr atomic_add_rel_long 458#define atomic_subtract_ptr atomic_subtract_long 459#define atomic_subtract_acq_ptr atomic_subtract_acq_long 460#define atomic_subtract_rel_ptr atomic_subtract_rel_long 461#define atomic_load_acq_ptr atomic_load_acq_long 462#define atomic_store_rel_ptr atomic_store_rel_long 463#define atomic_cmpset_ptr atomic_cmpset_long 464#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long 465#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long 466#define atomic_readandclear_ptr atomic_readandclear_long 467 468#endif /* !WANT_FUNCTIONS */ 469 470#endif /* !_MACHINE_ATOMIC_H_ */ 471