1/*- 2 * Copyright (c) 1998 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28#ifndef _MACHINE_ATOMIC_H_ 29#define _MACHINE_ATOMIC_H_ 30 31#ifndef _SYS_CDEFS_H_ 32#error this file needs sys/cdefs.h as a prerequisite 33#endif 34 35/* 36 * To express interprocessor (as opposed to processor and device) memory 37 * ordering constraints, use the atomic_*() functions with acquire and release 38 * semantics rather than the *mb() functions. An architecture's memory 39 * ordering (or memory consistency) model governs the order in which a 40 * program's accesses to different locations may be performed by an 41 * implementation of that architecture. In general, for memory regions 42 * defined as writeback cacheable, the memory ordering implemented by amd64 43 * processors preserves the program ordering of a load followed by a load, a 44 * load followed by a store, and a store followed by a store. Only a store 45 * followed by a load to a different memory location may be reordered. 46 * Therefore, except for special cases, like non-temporal memory accesses or 47 * memory regions defined as write combining, the memory ordering effects 48 * provided by the sfence instruction in the wmb() function and the lfence 49 * instruction in the rmb() function are redundant. In contrast, the 50 * atomic_*() functions with acquire and release semantics do not perform 51 * redundant instructions for ordinary cases of interprocessor memory 52 * ordering on any architecture. 53 */ 54#define mb() __asm __volatile("mfence;" : : : "memory") 55#define wmb() __asm __volatile("sfence;" : : : "memory") 56#ifndef __k1om__ 57#define rmb() __asm __volatile("lfence;" : : : "memory") 58#else // there is no lfence on Xeon Phi 59#define rmb() 60#endif 61 62/* 63 * Various simple operations on memory, each of which is atomic in the 64 * presence of interrupts and multiple processors. 65 * 66 * atomic_set_char(P, V) (*(u_char *)(P) |= (V)) 67 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V)) 68 * atomic_add_char(P, V) (*(u_char *)(P) += (V)) 69 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V)) 70 * 71 * atomic_set_short(P, V) (*(u_short *)(P) |= (V)) 72 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V)) 73 * atomic_add_short(P, V) (*(u_short *)(P) += (V)) 74 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V)) 75 * 76 * atomic_set_int(P, V) (*(u_int *)(P) |= (V)) 77 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V)) 78 * atomic_add_int(P, V) (*(u_int *)(P) += (V)) 79 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V)) 80 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);) 81 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;) 82 * 83 * atomic_set_long(P, V) (*(u_long *)(P) |= (V)) 84 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V)) 85 * atomic_add_long(P, V) (*(u_long *)(P) += (V)) 86 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V)) 87 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);) 88 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;) 89 */ 90 91/* 92 * The above functions are expanded inline in the statically-linked 93 * kernel. Lock prefixes are generated if an SMP kernel is being 94 * built. 95 * 96 * Kernel modules call real functions which are built into the kernel. 97 * This allows kernel modules to be portable between UP and SMP systems. 98 */ 99#if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM) 100#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 101void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \ 102void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 103 104int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src); 105int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src); 106u_int atomic_fetchadd_int(volatile u_int *p, u_int v); 107u_long atomic_fetchadd_long(volatile u_long *p, u_long v); 108int atomic_testandset_int(volatile u_int *p, u_int v); 109int atomic_testandset_long(volatile u_long *p, u_int v); 110int atomic_testandclear_int(volatile u_int *p, u_int v); 111int atomic_testandclear_long(volatile u_long *p, u_int v); 112void atomic_thread_fence_acq(void); 113void atomic_thread_fence_acq_rel(void); 114void atomic_thread_fence_rel(void); 115void atomic_thread_fence_seq_cst(void); 116 117#define ATOMIC_LOAD(TYPE) \ 118u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p) 119#define ATOMIC_STORE(TYPE) \ 120void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) 121 122#else /* !KLD_MODULE && __GNUCLIKE_ASM */ 123 124/* 125 * For userland, always use lock prefixes so that the binaries will run 126 * on both SMP and !SMP systems. 127 */ 128#if defined(SMP) || !defined(_KERNEL) 129#define MPLOCKED "lock ; " 130#else 131#define MPLOCKED 132#endif 133 134/* 135 * The assembly is volatilized to avoid code chunk removal by the compiler. 136 * GCC aggressively reorders operations and memory clobbering is necessary 137 * in order to avoid that for memory barriers. 138 */ 139#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \ 140static __inline void \ 141atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 142{ \ 143 __asm __volatile(MPLOCKED OP \ 144 : "+m" (*p) \ 145 : CONS (V) \ 146 : "cc"); \ 147} \ 148 \ 149static __inline void \ 150atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ 151{ \ 152 __asm __volatile(MPLOCKED OP \ 153 : "+m" (*p) \ 154 : CONS (V) \ 155 : "memory", "cc"); \ 156} \ 157struct __hack 158 159/* 160 * Atomic compare and set, used by the mutex functions 161 * 162 * if (*dst == expect) *dst = src (all 32 bit words) 163 * 164 * Returns 0 on failure, non-zero on success 165 */ 166 167static __inline int 168atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src) 169{ 170 u_char res; 171 172 __asm __volatile( 173 " " MPLOCKED " " 174 " cmpxchgl %3,%1 ; " 175 " sete %0 ; " 176 "# atomic_cmpset_int" 177 : "=q" (res), /* 0 */ 178 "+m" (*dst), /* 1 */ 179 "+a" (expect) /* 2 */ 180 : "r" (src) /* 3 */ 181 : "memory", "cc"); 182 return (res); 183} 184 185static __inline int 186atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src) 187{ 188 u_char res; 189 190 __asm __volatile( 191 " " MPLOCKED " " 192 " cmpxchgq %3,%1 ; " 193 " sete %0 ; " 194 "# atomic_cmpset_long" 195 : "=q" (res), /* 0 */ 196 "+m" (*dst), /* 1 */ 197 "+a" (expect) /* 2 */ 198 : "r" (src) /* 3 */ 199 : "memory", "cc"); 200 return (res); 201} 202 203/* 204 * Atomically add the value of v to the integer pointed to by p and return 205 * the previous value of *p. 206 */ 207static __inline u_int 208atomic_fetchadd_int(volatile u_int *p, u_int v) 209{ 210 211 __asm __volatile( 212 " " MPLOCKED " " 213 " xaddl %0,%1 ; " 214 "# atomic_fetchadd_int" 215 : "+r" (v), /* 0 */ 216 "+m" (*p) /* 1 */ 217 : : "cc"); 218 return (v); 219} 220 221/* 222 * Atomically add the value of v to the long integer pointed to by p and return 223 * the previous value of *p. 224 */ 225static __inline u_long 226atomic_fetchadd_long(volatile u_long *p, u_long v) 227{ 228 229 __asm __volatile( 230 " " MPLOCKED " " 231 " xaddq %0,%1 ; " 232 "# atomic_fetchadd_long" 233 : "+r" (v), /* 0 */ 234 "+m" (*p) /* 1 */ 235 : : "cc"); 236 return (v); 237} 238 239static __inline int 240atomic_testandset_int(volatile u_int *p, u_int v) 241{ 242 u_char res; 243 244 __asm __volatile( 245 " " MPLOCKED " " 246 " btsl %2,%1 ; " 247 " setc %0 ; " 248 "# atomic_testandset_int" 249 : "=q" (res), /* 0 */ 250 "+m" (*p) /* 1 */ 251 : "Ir" (v & 0x1f) /* 2 */ 252 : "cc"); 253 return (res); 254} 255 256static __inline int 257atomic_testandset_long(volatile u_long *p, u_int v) 258{ 259 u_char res; 260 261 __asm __volatile( 262 " " MPLOCKED " " 263 " btsq %2,%1 ; " 264 " setc %0 ; " 265 "# atomic_testandset_long" 266 : "=q" (res), /* 0 */ 267 "+m" (*p) /* 1 */ 268 : "Jr" ((u_long)(v & 0x3f)) /* 2 */ 269 : "cc"); 270 return (res); 271} 272 273static __inline int 274atomic_testandclear_int(volatile u_int *p, u_int v) 275{ 276 u_char res; 277 278 __asm __volatile( 279 " " MPLOCKED " " 280 " btrl %2,%1 ; " 281 " setc %0 ; " 282 "# atomic_testandclear_int" 283 : "=q" (res), /* 0 */ 284 "+m" (*p) /* 1 */ 285 : "Ir" (v & 0x1f) /* 2 */ 286 : "cc"); 287 return (res); 288} 289 290static __inline int 291atomic_testandclear_long(volatile u_long *p, u_int v) 292{ 293 u_char res; 294 295 __asm __volatile( 296 " " MPLOCKED " " 297 " btrq %2,%1 ; " 298 " setc %0 ; " 299 "# atomic_testandclear_long" 300 : "=q" (res), /* 0 */ 301 "+m" (*p) /* 1 */ 302 : "Jr" ((u_long)(v & 0x3f)) /* 2 */ 303 : "cc"); 304 return (res); 305} 306 307/* 308 * We assume that a = b will do atomic loads and stores. Due to the 309 * IA32 memory model, a simple store guarantees release semantics. 310 * 311 * However, a load may pass a store if they are performed on distinct 312 * addresses, so we need a Store/Load barrier for sequentially 313 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a 314 * Store/Load barrier, as recommended by the AMD Software Optimization 315 * Guide, and not mfence. To avoid false data dependencies, we use a 316 * special address for "mem". In the kernel, we use a private per-cpu 317 * cache line. In user space, we use a word in the stack's red zone 318 * (-8(%rsp)). 319 * 320 * For UP kernels, however, the memory of the single processor is 321 * always consistent, so we only need to stop the compiler from 322 * reordering accesses in a way that violates the semantics of acquire 323 * and release. 324 */ 325 326#if defined(_KERNEL) 327 328/* 329 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf). 330 * 331 * The open-coded number is used instead of the symbolic expression to 332 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers. 333 * An assertion in amd64/vm_machdep.c ensures that the value is correct. 334 */ 335#define OFFSETOF_MONITORBUF 0x180 336 337#if defined(SMP) 338static __inline void 339__storeload_barrier(void) 340{ 341 342 __asm __volatile("lock; addl $0,%%gs:%0" 343 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc"); 344} 345#else /* _KERNEL && UP */ 346static __inline void 347__storeload_barrier(void) 348{ 349 350 __compiler_membar(); 351} 352#endif /* SMP */ 353#else /* !_KERNEL */ 354static __inline void 355__storeload_barrier(void) 356{ 357 358 __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc"); 359} 360#endif /* _KERNEL*/ 361 362#define ATOMIC_LOAD(TYPE) \ 363static __inline u_##TYPE \ 364atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ 365{ \ 366 u_##TYPE res; \ 367 \ 368 res = *p; \ 369 __compiler_membar(); \ 370 return (res); \ 371} \ 372struct __hack 373 374#define ATOMIC_STORE(TYPE) \ 375static __inline void \ 376atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \ 377{ \ 378 \ 379 __compiler_membar(); \ 380 *p = v; \ 381} \ 382struct __hack 383 384static __inline void 385atomic_thread_fence_acq(void) 386{ 387 388 __compiler_membar(); 389} 390 391static __inline void 392atomic_thread_fence_rel(void) 393{ 394 395 __compiler_membar(); 396} 397 398static __inline void 399atomic_thread_fence_acq_rel(void) 400{ 401 402 __compiler_membar(); 403} 404 405static __inline void 406atomic_thread_fence_seq_cst(void) 407{ 408 409 __storeload_barrier(); 410} 411 412#endif /* KLD_MODULE || !__GNUCLIKE_ASM */ 413 414ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v); 415ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v); 416ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v); 417ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v); 418 419ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v); 420ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v); 421ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v); 422ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v); 423 424ATOMIC_ASM(set, int, "orl %1,%0", "ir", v); 425ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v); 426ATOMIC_ASM(add, int, "addl %1,%0", "ir", v); 427ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v); 428 429ATOMIC_ASM(set, long, "orq %1,%0", "ir", v); 430ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v); 431ATOMIC_ASM(add, long, "addq %1,%0", "ir", v); 432ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v); 433 434#define ATOMIC_LOADSTORE(TYPE) \ 435 ATOMIC_LOAD(TYPE); \ 436 ATOMIC_STORE(TYPE) 437 438ATOMIC_LOADSTORE(char); 439ATOMIC_LOADSTORE(short); 440ATOMIC_LOADSTORE(int); 441ATOMIC_LOADSTORE(long); 442 443#undef ATOMIC_ASM 444#undef ATOMIC_LOAD 445#undef ATOMIC_STORE 446#undef ATOMIC_LOADSTORE 447#ifndef WANT_FUNCTIONS 448 449/* Read the current value and store a new value in the destination. */ 450#ifdef __GNUCLIKE_ASM 451 452static __inline u_int 453atomic_swap_int(volatile u_int *p, u_int v) 454{ 455 456 __asm __volatile( 457 " xchgl %1,%0 ; " 458 "# atomic_swap_int" 459 : "+r" (v), /* 0 */ 460 "+m" (*p)); /* 1 */ 461 return (v); 462} 463 464static __inline u_long 465atomic_swap_long(volatile u_long *p, u_long v) 466{ 467 468 __asm __volatile( 469 " xchgq %1,%0 ; " 470 "# atomic_swap_long" 471 : "+r" (v), /* 0 */ 472 "+m" (*p)); /* 1 */ 473 return (v); 474} 475 476#else /* !__GNUCLIKE_ASM */ 477 478u_int atomic_swap_int(volatile u_int *p, u_int v); 479u_long atomic_swap_long(volatile u_long *p, u_long v); 480 481#endif /* __GNUCLIKE_ASM */ 482 483#define atomic_set_acq_char atomic_set_barr_char 484#define atomic_set_rel_char atomic_set_barr_char 485#define atomic_clear_acq_char atomic_clear_barr_char 486#define atomic_clear_rel_char atomic_clear_barr_char 487#define atomic_add_acq_char atomic_add_barr_char 488#define atomic_add_rel_char atomic_add_barr_char 489#define atomic_subtract_acq_char atomic_subtract_barr_char 490#define atomic_subtract_rel_char atomic_subtract_barr_char 491 492#define atomic_set_acq_short atomic_set_barr_short 493#define atomic_set_rel_short atomic_set_barr_short 494#define atomic_clear_acq_short atomic_clear_barr_short 495#define atomic_clear_rel_short atomic_clear_barr_short 496#define atomic_add_acq_short atomic_add_barr_short 497#define atomic_add_rel_short atomic_add_barr_short 498#define atomic_subtract_acq_short atomic_subtract_barr_short 499#define atomic_subtract_rel_short atomic_subtract_barr_short 500 501#define atomic_set_acq_int atomic_set_barr_int 502#define atomic_set_rel_int atomic_set_barr_int 503#define atomic_clear_acq_int atomic_clear_barr_int 504#define atomic_clear_rel_int atomic_clear_barr_int 505#define atomic_add_acq_int atomic_add_barr_int 506#define atomic_add_rel_int atomic_add_barr_int 507#define atomic_subtract_acq_int atomic_subtract_barr_int 508#define atomic_subtract_rel_int atomic_subtract_barr_int 509#define atomic_cmpset_acq_int atomic_cmpset_int 510#define atomic_cmpset_rel_int atomic_cmpset_int 511 512#define atomic_set_acq_long atomic_set_barr_long 513#define atomic_set_rel_long atomic_set_barr_long 514#define atomic_clear_acq_long atomic_clear_barr_long 515#define atomic_clear_rel_long atomic_clear_barr_long 516#define atomic_add_acq_long atomic_add_barr_long 517#define atomic_add_rel_long atomic_add_barr_long 518#define atomic_subtract_acq_long atomic_subtract_barr_long 519#define atomic_subtract_rel_long atomic_subtract_barr_long 520#define atomic_cmpset_acq_long atomic_cmpset_long 521#define atomic_cmpset_rel_long atomic_cmpset_long 522 523#define atomic_readandclear_int(p) atomic_swap_int(p, 0) 524#define atomic_readandclear_long(p) atomic_swap_long(p, 0) 525 526/* Operations on 8-bit bytes. */ 527#define atomic_set_8 atomic_set_char 528#define atomic_set_acq_8 atomic_set_acq_char 529#define atomic_set_rel_8 atomic_set_rel_char 530#define atomic_clear_8 atomic_clear_char 531#define atomic_clear_acq_8 atomic_clear_acq_char 532#define atomic_clear_rel_8 atomic_clear_rel_char 533#define atomic_add_8 atomic_add_char 534#define atomic_add_acq_8 atomic_add_acq_char 535#define atomic_add_rel_8 atomic_add_rel_char 536#define atomic_subtract_8 atomic_subtract_char 537#define atomic_subtract_acq_8 atomic_subtract_acq_char 538#define atomic_subtract_rel_8 atomic_subtract_rel_char 539#define atomic_load_acq_8 atomic_load_acq_char 540#define atomic_store_rel_8 atomic_store_rel_char 541 542/* Operations on 16-bit words. */ 543#define atomic_set_16 atomic_set_short 544#define atomic_set_acq_16 atomic_set_acq_short 545#define atomic_set_rel_16 atomic_set_rel_short 546#define atomic_clear_16 atomic_clear_short 547#define atomic_clear_acq_16 atomic_clear_acq_short 548#define atomic_clear_rel_16 atomic_clear_rel_short 549#define atomic_add_16 atomic_add_short 550#define atomic_add_acq_16 atomic_add_acq_short 551#define atomic_add_rel_16 atomic_add_rel_short 552#define atomic_subtract_16 atomic_subtract_short 553#define atomic_subtract_acq_16 atomic_subtract_acq_short 554#define atomic_subtract_rel_16 atomic_subtract_rel_short 555#define atomic_load_acq_16 atomic_load_acq_short 556#define atomic_store_rel_16 atomic_store_rel_short 557 558/* Operations on 32-bit double words. */ 559#define atomic_set_32 atomic_set_int 560#define atomic_set_acq_32 atomic_set_acq_int 561#define atomic_set_rel_32 atomic_set_rel_int 562#define atomic_clear_32 atomic_clear_int 563#define atomic_clear_acq_32 atomic_clear_acq_int 564#define atomic_clear_rel_32 atomic_clear_rel_int 565#define atomic_add_32 atomic_add_int 566#define atomic_add_acq_32 atomic_add_acq_int 567#define atomic_add_rel_32 atomic_add_rel_int 568#define atomic_subtract_32 atomic_subtract_int 569#define atomic_subtract_acq_32 atomic_subtract_acq_int 570#define atomic_subtract_rel_32 atomic_subtract_rel_int 571#define atomic_load_acq_32 atomic_load_acq_int 572#define atomic_store_rel_32 atomic_store_rel_int 573#define atomic_cmpset_32 atomic_cmpset_int 574#define atomic_cmpset_acq_32 atomic_cmpset_acq_int 575#define atomic_cmpset_rel_32 atomic_cmpset_rel_int 576#define atomic_swap_32 atomic_swap_int 577#define atomic_readandclear_32 atomic_readandclear_int 578#define atomic_fetchadd_32 atomic_fetchadd_int 579#define atomic_testandset_32 atomic_testandset_int 580#define atomic_testandclear_32 atomic_testandclear_int 581 582/* Operations on 64-bit quad words. */ 583#define atomic_set_64 atomic_set_long 584#define atomic_set_acq_64 atomic_set_acq_long 585#define atomic_set_rel_64 atomic_set_rel_long 586#define atomic_clear_64 atomic_clear_long 587#define atomic_clear_acq_64 atomic_clear_acq_long 588#define atomic_clear_rel_64 atomic_clear_rel_long 589#define atomic_add_64 atomic_add_long 590#define atomic_add_acq_64 atomic_add_acq_long 591#define atomic_add_rel_64 atomic_add_rel_long 592#define atomic_subtract_64 atomic_subtract_long 593#define atomic_subtract_acq_64 atomic_subtract_acq_long 594#define atomic_subtract_rel_64 atomic_subtract_rel_long 595#define atomic_load_acq_64 atomic_load_acq_long 596#define atomic_store_rel_64 atomic_store_rel_long 597#define atomic_cmpset_64 atomic_cmpset_long 598#define atomic_cmpset_acq_64 atomic_cmpset_acq_long 599#define atomic_cmpset_rel_64 atomic_cmpset_rel_long 600#define atomic_swap_64 atomic_swap_long 601#define atomic_readandclear_64 atomic_readandclear_long 602#define atomic_fetchadd_64 atomic_fetchadd_long 603#define atomic_testandset_64 atomic_testandset_long 604#define atomic_testandclear_64 atomic_testandclear_long 605 606/* Operations on pointers. */ 607#define atomic_set_ptr atomic_set_long 608#define atomic_set_acq_ptr atomic_set_acq_long 609#define atomic_set_rel_ptr atomic_set_rel_long 610#define atomic_clear_ptr atomic_clear_long 611#define atomic_clear_acq_ptr atomic_clear_acq_long 612#define atomic_clear_rel_ptr atomic_clear_rel_long 613#define atomic_add_ptr atomic_add_long 614#define atomic_add_acq_ptr atomic_add_acq_long 615#define atomic_add_rel_ptr atomic_add_rel_long 616#define atomic_subtract_ptr atomic_subtract_long 617#define atomic_subtract_acq_ptr atomic_subtract_acq_long 618#define atomic_subtract_rel_ptr atomic_subtract_rel_long 619#define atomic_load_acq_ptr atomic_load_acq_long 620#define atomic_store_rel_ptr atomic_store_rel_long 621#define atomic_cmpset_ptr atomic_cmpset_long 622#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long 623#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long 624#define atomic_swap_ptr atomic_swap_long 625#define atomic_readandclear_ptr atomic_readandclear_long 626 627#endif /* !WANT_FUNCTIONS */ 628 629#endif /* !_MACHINE_ATOMIC_H_ */ 630