atomic.h revision 285830
1138593Ssam/*- 2138593Ssam * Copyright (c) 1998 Doug Rabson 3138593Ssam * All rights reserved. 4138593Ssam * 5138593Ssam * Redistribution and use in source and binary forms, with or without 6138593Ssam * modification, are permitted provided that the following conditions 7138593Ssam * are met: 8138593Ssam * 1. Redistributions of source code must retain the above copyright 9138593Ssam * notice, this list of conditions and the following disclaimer. 10138593Ssam * 2. Redistributions in binary form must reproduce the above copyright 11138593Ssam * notice, this list of conditions and the following disclaimer in the 12138593Ssam * documentation and/or other materials provided with the distribution. 13138593Ssam * 14138593Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15138593Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16138593Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17138593Ssam * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18138593Ssam * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19138593Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20138593Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21138593Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22138593Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23138593Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24138593Ssam * SUCH DAMAGE. 25138593Ssam * 26138593Ssam * $FreeBSD: releng/10.2/sys/amd64/include/atomic.h 254618 2013-08-21 22:05:58Z jkim $ 27138593Ssam */ 28138593Ssam#ifndef _MACHINE_ATOMIC_H_ 29138593Ssam#define _MACHINE_ATOMIC_H_ 30138593Ssam 31138593Ssam#ifndef _SYS_CDEFS_H_ 32138593Ssam#error this file needs sys/cdefs.h as a prerequisite 33138593Ssam#endif 34138593Ssam 35283016Sngie#define mb() __asm __volatile("mfence;" : : : "memory") 36138593Ssam#define wmb() __asm __volatile("sfence;" : : : "memory") 37138593Ssam#define rmb() __asm __volatile("lfence;" : : : "memory") 38138593Ssam 39138593Ssam/* 40202289Semaste * Various simple operations on memory, each of which is atomic in the 41138593Ssam * presence of interrupts and multiple processors. 42138593Ssam * 43138593Ssam * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 44138593Ssam * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 45138593Ssam * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 46166956Ssam * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 47138593Ssam * 48138593Ssam * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 49138593Ssam * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 50138593Ssam * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 51138593Ssam * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 52138593Ssam * 53138593Ssam * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 54138593Ssam * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 55138593Ssam * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 56191121Sbrooks * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 57138593Ssam * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);) 58138593Ssam * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 59138593Ssam * 60166956Ssam * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 61138593Ssam * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 62138593Ssam * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 63138593Ssam * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 64138593Ssam * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);) 65138593Ssam * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 66166956Ssam */ 67138593Ssam 68138593Ssam/* 69138593Ssam * The above functions are expanded inline in the statically-linked 70138593Ssam * kernel. Lock prefixes are generated if an SMP kernel is being 71138593Ssam * built. 72166956Ssam * 73166956Ssam * Kernel modules call real functions which are built into the kernel. 74166956Ssam * This allows kernel modules to be portable between UP and SMP systems. 75138593Ssam */ 76138593Ssam#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 77138593Ssam#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 78138593Ssamvoid atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 79166956Ssamvoid atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 80166956Ssam 81138593Ssamint atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); 82138593Ssamint atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src); 83138593Ssamu_int atomic_fetchadd_int(volatile u_int *p, u_int v); 84166956Ssamu_long atomic_fetchadd_long(volatile u_long *p, u_long v); 85166956Ssamint atomic_testandset_int(volatile u_int *p, u_int v); 86166956Ssamint atomic_testandset_long(volatile u_long *p, u_int v); 87228571Sglebius 88138593Ssam#define ATOMIC_LOAD(TYPE, LOP) \ 89228571Sglebiusu_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) 90228571Sglebius#define ATOMIC_STORE(TYPE) \ 91228571Sglebiusvoid atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 92138593Ssam 93138593Ssam#else /* !KLD_MODULE && __GNUCLIKE_ASM */ 94138593Ssam 95138593Ssam/* 96138593Ssam * For userland, always use lock prefixes so that the binaries will run 97138593Ssam * on both SMP and !SMP systems. 98138593Ssam */ 99138593Ssam#if defined(SMP) || !defined(_KERNEL) 100138593Ssam#define MPLOCKED "lock ; " 101138593Ssam#else 102138593Ssam#define MPLOCKED 103138593Ssam#endif 104138593Ssam 105138593Ssam/* 106138593Ssam * The assembly is volatilized to avoid code chunk removal by the compiler. 107138593Ssam * GCC aggressively reorders operations and memory clobbering is necessary 108138593Ssam * in order to avoid that for memory barriers. 109227738Sglebius */ 110138593Ssam#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 111138593Ssamstatic __inline void \ 112138593Ssamatomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 113138593Ssam{ \ 114138593Ssam __asm __volatile(MPLOCKED OP \ 115202289Semaste : "+m" (*p) \ 116138593Ssam : CONS (V) \ 117138593Ssam : "cc"); \ 118138593Ssam} \ 119138593Ssam \ 120202289Semastestatic __inline void \ 121202289Semasteatomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 122202289Semaste{ \ 123202289Semaste __asm __volatile(MPLOCKED OP \ 124202289Semaste : "+m" (*p) \ 125138593Ssam : CONS (V) \ 126202289Semaste : "memory", "cc"); \ 127138593Ssam} \ 128228574Sglebiusstruct __hack 129138593Ssam 130138593Ssam/* 131138593Ssam * Atomic compare and set, used by the mutex functions 132138593Ssam * 133138593Ssam * if (*dst == expect) *dst = src (all 32 bit words) 134138593Ssam * 135138593Ssam * Returns 0 on failure, non-zero on success 136138593Ssam */ 137138593Ssam 138138593Ssamstatic __inline int 139194799Sdelphijatomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 140138593Ssam{ 141138593Ssam u_char res; 142138593Ssam 143138593Ssam __asm __volatile( 144138593Ssam " " MPLOCKED " " 145138593Ssam " cmpxchgl %3,%1 ; " 146138593Ssam " sete %0 ; " 147138593Ssam "# atomic_cmpset_int" 148138593Ssam : "=q" (res), /* 0 */ 149138593Ssam "+m" (*dst), /* 1 */ 150138593Ssam "+a" (expect) /* 2 */ 151138593Ssam : "r" (src) /* 3 */ 152138593Ssam : "memory", "cc"); 153138593Ssam return (res); 154138593Ssam} 155138593Ssam 156138593Ssamstatic __inline int 157138593Ssamatomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src) 158138593Ssam{ 159147437Sume u_char res; 160147437Sume 161138593Ssam __asm __volatile( 162138593Ssam " " MPLOCKED " " 163138593Ssam " cmpxchgq %3,%1 ; " 164138593Ssam " sete %0 ; " 165138593Ssam "# atomic_cmpset_long" 166147437Sume : "=q" (res), /* 0 */ 167147437Sume "+m" (*dst), /* 1 */ 168138593Ssam "+a" (expect) /* 2 */ 169138593Ssam : "r" (src) /* 3 */ 170138593Ssam : "memory", "cc"); 171138593Ssam return (res); 172138593Ssam} 173138593Ssam 174138593Ssam/* 175138593Ssam * Atomically add the value of v to the integer pointed to by p and return 176138593Ssam * the previous value of *p. 177191121Sbrooks */ 178138593Ssamstatic __inline u_int 179138593Ssamatomic_fetchadd_int(volatile u_int *p, u_int v) 180138593Ssam{ 181138593Ssam 182138593Ssam __asm __volatile( 183138593Ssam " " MPLOCKED " " 184138593Ssam " xaddl %0,%1 ; " 185138593Ssam "# atomic_fetchadd_int" 186138593Ssam : "+r" (v), /* 0 */ 187138593Ssam "+m" (*p) /* 1 */ 188138593Ssam : : "cc"); 189138593Ssam return (v); 190138593Ssam} 191138593Ssam 192138593Ssam/* 193138593Ssam * Atomically add the value of v to the long integer pointed to by p and return 194138593Ssam * the previous value of *p. 195138593Ssam */ 196138593Ssamstatic __inline u_long 197138593Ssamatomic_fetchadd_long(volatile u_long *p, u_long v) 198138593Ssam{ 199138593Ssam 200138593Ssam __asm __volatile( 201138593Ssam " " MPLOCKED " " 202138593Ssam " xaddq %0,%1 ; " 203138593Ssam "# atomic_fetchadd_long" 204224179Sbz : "+r" (v), /* 0 */ 205224179Sbz "+m" (*p) /* 1 */ 206222527Sbz : : "cc"); 207222527Sbz return (v); 208224179Sbz} 209138593Ssam 210138593Ssamstatic __inline int 211atomic_testandset_int(volatile u_int *p, u_int v) 212{ 213 u_char res; 214 215 __asm __volatile( 216 " " MPLOCKED " " 217 " btsl %2,%1 ; " 218 " setc %0 ; " 219 "# atomic_testandset_int" 220 : "=q" (res), /* 0 */ 221 "+m" (*p) /* 1 */ 222 : "Ir" (v & 0x1f) /* 2 */ 223 : "cc"); 224 return (res); 225} 226 227static __inline int 228atomic_testandset_long(volatile u_long *p, u_int v) 229{ 230 u_char res; 231 232 __asm __volatile( 233 " " MPLOCKED " " 234 " btsq %2,%1 ; " 235 " setc %0 ; " 236 "# atomic_testandset_long" 237 : "=q" (res), /* 0 */ 238 "+m" (*p) /* 1 */ 239 : "Jr" ((u_long)(v & 0x3f)) /* 2 */ 240 : "cc"); 241 return (res); 242} 243 244/* 245 * We assume that a = b will do atomic loads and stores. Due to the 246 * IA32 memory model, a simple store guarantees release semantics. 247 * 248 * However, loads may pass stores, so for atomic_load_acq we have to 249 * ensure a Store/Load barrier to do the load in SMP kernels. We use 250 * "lock cmpxchg" as recommended by the AMD Software Optimization 251 * Guide, and not mfence. For UP kernels, however, the cache of the 252 * single processor is always consistent, so we only need to take care 253 * of the compiler. 254 */ 255#define ATOMIC_STORE(TYPE) \ 256static __inline void \ 257atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 258{ \ 259 __compiler_membar(); \ 260 *p = v; \ 261} \ 262struct __hack 263 264#if defined(_KERNEL) && !defined(SMP) 265 266#define ATOMIC_LOAD(TYPE, LOP) \ 267static __inline u_##TYPE \ 268atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 269{ \ 270 u_##TYPE tmp; \ 271 \ 272 tmp = *p; \ 273 __compiler_membar(); \ 274 return (tmp); \ 275} \ 276struct __hack 277 278#else /* !(_KERNEL && !SMP) */ 279 280#define ATOMIC_LOAD(TYPE, LOP) \ 281static __inline u_##TYPE \ 282atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 283{ \ 284 u_##TYPE res; \ 285 \ 286 __asm __volatile(MPLOCKED LOP \ 287 : "=a" (res), /* 0 */ \ 288 "+m" (*p) /* 1 */ \ 289 : : "memory", "cc"); \ 290 return (res); \ 291} \ 292struct __hack 293 294#endif /* _KERNEL && !SMP */ 295 296#endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 297 298ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 299ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 300ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 301ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 302 303ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 304ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 305ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 306ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 307 308ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 309ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 310ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 311ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 312 313ATOMIC_ASM(set, long, "orq %1,%0", "ir", v); 314ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v); 315ATOMIC_ASM(add, long, "addq %1,%0", "ir", v); 316ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v); 317 318ATOMIC_LOAD(char, "cmpxchgb %b0,%1"); 319ATOMIC_LOAD(short, "cmpxchgw %w0,%1"); 320ATOMIC_LOAD(int, "cmpxchgl %0,%1"); 321ATOMIC_LOAD(long, "cmpxchgq %0,%1"); 322 323ATOMIC_STORE(char); 324ATOMIC_STORE(short); 325ATOMIC_STORE(int); 326ATOMIC_STORE(long); 327 328#undef ATOMIC_ASM 329#undef ATOMIC_LOAD 330#undef ATOMIC_STORE 331 332#ifndef WANT_FUNCTIONS 333 334/* Read the current value and store a new value in the destination. */ 335#ifdef __GNUCLIKE_ASM 336 337static __inline u_int 338atomic_swap_int(volatile u_int *p, u_int v) 339{ 340 341 __asm __volatile( 342 " xchgl %1,%0 ; " 343 "# atomic_swap_int" 344 : "+r" (v), /* 0 */ 345 "+m" (*p)); /* 1 */ 346 return (v); 347} 348 349static __inline u_long 350atomic_swap_long(volatile u_long *p, u_long v) 351{ 352 353 __asm __volatile( 354 " xchgq %1,%0 ; " 355 "# atomic_swap_long" 356 : "+r" (v), /* 0 */ 357 "+m" (*p)); /* 1 */ 358 return (v); 359} 360 361#else /* !__GNUCLIKE_ASM */ 362 363u_int atomic_swap_int(volatile u_int *p, u_int v); 364u_long atomic_swap_long(volatile u_long *p, u_long v); 365 366#endif /* __GNUCLIKE_ASM */ 367 368#define atomic_set_acq_char atomic_set_barr_char 369#define atomic_set_rel_char atomic_set_barr_char 370#define atomic_clear_acq_char atomic_clear_barr_char 371#define atomic_clear_rel_char atomic_clear_barr_char 372#define atomic_add_acq_char atomic_add_barr_char 373#define atomic_add_rel_char atomic_add_barr_char 374#define atomic_subtract_acq_char atomic_subtract_barr_char 375#define atomic_subtract_rel_char atomic_subtract_barr_char 376 377#define atomic_set_acq_short atomic_set_barr_short 378#define atomic_set_rel_short atomic_set_barr_short 379#define atomic_clear_acq_short atomic_clear_barr_short 380#define atomic_clear_rel_short atomic_clear_barr_short 381#define atomic_add_acq_short atomic_add_barr_short 382#define atomic_add_rel_short atomic_add_barr_short 383#define atomic_subtract_acq_short atomic_subtract_barr_short 384#define atomic_subtract_rel_short atomic_subtract_barr_short 385 386#define atomic_set_acq_int atomic_set_barr_int 387#define atomic_set_rel_int atomic_set_barr_int 388#define atomic_clear_acq_int atomic_clear_barr_int 389#define atomic_clear_rel_int atomic_clear_barr_int 390#define atomic_add_acq_int atomic_add_barr_int 391#define atomic_add_rel_int atomic_add_barr_int 392#define atomic_subtract_acq_int atomic_subtract_barr_int 393#define atomic_subtract_rel_int atomic_subtract_barr_int 394#define atomic_cmpset_acq_int atomic_cmpset_int 395#define atomic_cmpset_rel_int atomic_cmpset_int 396 397#define atomic_set_acq_long atomic_set_barr_long 398#define atomic_set_rel_long atomic_set_barr_long 399#define atomic_clear_acq_long atomic_clear_barr_long 400#define atomic_clear_rel_long atomic_clear_barr_long 401#define atomic_add_acq_long atomic_add_barr_long 402#define atomic_add_rel_long atomic_add_barr_long 403#define atomic_subtract_acq_long atomic_subtract_barr_long 404#define atomic_subtract_rel_long atomic_subtract_barr_long 405#define atomic_cmpset_acq_long atomic_cmpset_long 406#define atomic_cmpset_rel_long atomic_cmpset_long 407 408#define atomic_readandclear_int(p) atomic_swap_int(p, 0) 409#define atomic_readandclear_long(p) atomic_swap_long(p, 0) 410 411/* Operations on 8-bit bytes. */ 412#define atomic_set_8 atomic_set_char 413#define atomic_set_acq_8 atomic_set_acq_char 414#define atomic_set_rel_8 atomic_set_rel_char 415#define atomic_clear_8 atomic_clear_char 416#define atomic_clear_acq_8 atomic_clear_acq_char 417#define atomic_clear_rel_8 atomic_clear_rel_char 418#define atomic_add_8 atomic_add_char 419#define atomic_add_acq_8 atomic_add_acq_char 420#define atomic_add_rel_8 atomic_add_rel_char 421#define atomic_subtract_8 atomic_subtract_char 422#define atomic_subtract_acq_8 atomic_subtract_acq_char 423#define atomic_subtract_rel_8 atomic_subtract_rel_char 424#define atomic_load_acq_8 atomic_load_acq_char 425#define atomic_store_rel_8 atomic_store_rel_char 426 427/* Operations on 16-bit words. */ 428#define atomic_set_16 atomic_set_short 429#define atomic_set_acq_16 atomic_set_acq_short 430#define atomic_set_rel_16 atomic_set_rel_short 431#define atomic_clear_16 atomic_clear_short 432#define atomic_clear_acq_16 atomic_clear_acq_short 433#define atomic_clear_rel_16 atomic_clear_rel_short 434#define atomic_add_16 atomic_add_short 435#define atomic_add_acq_16 atomic_add_acq_short 436#define atomic_add_rel_16 atomic_add_rel_short 437#define atomic_subtract_16 atomic_subtract_short 438#define atomic_subtract_acq_16 atomic_subtract_acq_short 439#define atomic_subtract_rel_16 atomic_subtract_rel_short 440#define atomic_load_acq_16 atomic_load_acq_short 441#define atomic_store_rel_16 atomic_store_rel_short 442 443/* Operations on 32-bit double words. */ 444#define atomic_set_32 atomic_set_int 445#define atomic_set_acq_32 atomic_set_acq_int 446#define atomic_set_rel_32 atomic_set_rel_int 447#define atomic_clear_32 atomic_clear_int 448#define atomic_clear_acq_32 atomic_clear_acq_int 449#define atomic_clear_rel_32 atomic_clear_rel_int 450#define atomic_add_32 atomic_add_int 451#define atomic_add_acq_32 atomic_add_acq_int 452#define atomic_add_rel_32 atomic_add_rel_int 453#define atomic_subtract_32 atomic_subtract_int 454#define atomic_subtract_acq_32 atomic_subtract_acq_int 455#define atomic_subtract_rel_32 atomic_subtract_rel_int 456#define atomic_load_acq_32 atomic_load_acq_int 457#define atomic_store_rel_32 atomic_store_rel_int 458#define atomic_cmpset_32 atomic_cmpset_int 459#define atomic_cmpset_acq_32 atomic_cmpset_acq_int 460#define atomic_cmpset_rel_32 atomic_cmpset_rel_int 461#define atomic_swap_32 atomic_swap_int 462#define atomic_readandclear_32 atomic_readandclear_int 463#define atomic_fetchadd_32 atomic_fetchadd_int 464#define atomic_testandset_32 atomic_testandset_int 465 466/* Operations on 64-bit quad words. */ 467#define atomic_set_64 atomic_set_long 468#define atomic_set_acq_64 atomic_set_acq_long 469#define atomic_set_rel_64 atomic_set_rel_long 470#define atomic_clear_64 atomic_clear_long 471#define atomic_clear_acq_64 atomic_clear_acq_long 472#define atomic_clear_rel_64 atomic_clear_rel_long 473#define atomic_add_64 atomic_add_long 474#define atomic_add_acq_64 atomic_add_acq_long 475#define atomic_add_rel_64 atomic_add_rel_long 476#define atomic_subtract_64 atomic_subtract_long 477#define atomic_subtract_acq_64 atomic_subtract_acq_long 478#define atomic_subtract_rel_64 atomic_subtract_rel_long 479#define atomic_load_acq_64 atomic_load_acq_long 480#define atomic_store_rel_64 atomic_store_rel_long 481#define atomic_cmpset_64 atomic_cmpset_long 482#define atomic_cmpset_acq_64 atomic_cmpset_acq_long 483#define atomic_cmpset_rel_64 atomic_cmpset_rel_long 484#define atomic_swap_64 atomic_swap_long 485#define atomic_readandclear_64 atomic_readandclear_long 486#define atomic_testandset_64 atomic_testandset_long 487 488/* Operations on pointers. */ 489#define atomic_set_ptr atomic_set_long 490#define atomic_set_acq_ptr atomic_set_acq_long 491#define atomic_set_rel_ptr atomic_set_rel_long 492#define atomic_clear_ptr atomic_clear_long 493#define atomic_clear_acq_ptr atomic_clear_acq_long 494#define atomic_clear_rel_ptr atomic_clear_rel_long 495#define atomic_add_ptr atomic_add_long 496#define atomic_add_acq_ptr atomic_add_acq_long 497#define atomic_add_rel_ptr atomic_add_rel_long 498#define atomic_subtract_ptr atomic_subtract_long 499#define atomic_subtract_acq_ptr atomic_subtract_acq_long 500#define atomic_subtract_rel_ptr atomic_subtract_rel_long 501#define atomic_load_acq_ptr atomic_load_acq_long 502#define atomic_store_rel_ptr atomic_store_rel_long 503#define atomic_cmpset_ptr atomic_cmpset_long 504#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long 505#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long 506#define atomic_swap_ptr atomic_swap_long 507#define atomic_readandclear_ptr atomic_readandclear_long 508 509#endif /* !WANT_FUNCTIONS */ 510 511#endif /* !_MACHINE_ATOMIC_H_ */ 512