1251695Sed/*- 2251695Sed * Copyright (c) 2013 Ed Schouten <ed@FreeBSD.org> 3251695Sed * All rights reserved. 4251695Sed * 5251695Sed * Redistribution and use in source and binary forms, with or without 6251695Sed * modification, are permitted provided that the following conditions 7251695Sed * are met: 8251695Sed * 1. Redistributions of source code must retain the above copyright 9251695Sed * notice, this list of conditions and the following disclaimer. 10251695Sed * 2. Redistributions in binary form must reproduce the above copyright 11251695Sed * notice, this list of conditions and the following disclaimer in the 12251695Sed * documentation and/or other materials provided with the distribution. 13251695Sed * 14251695Sed * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15251695Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16251695Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17251695Sed * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18251695Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19251695Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20251695Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21251695Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22251695Sed * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23251695Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24251695Sed * SUCH DAMAGE. 25251695Sed */ 26251695Sed 27251695Sed#include <sys/cdefs.h> 28251695Sed__FBSDID("$FreeBSD$"); 29251695Sed 30251695Sed#include <sys/param.h> 31251695Sed#include <sys/stdatomic.h> 32251695Sed#include <sys/types.h> 33251695Sed 34286726Smarcel#include <machine/atomic.h> 35251695Sed#include <machine/cpufunc.h> 36251781Sed#include <machine/sysarch.h> 37251695Sed 38251695Sed/* 39251695Sed * Executing statements with interrupts disabled. 40251695Sed */ 41251695Sed 42251781Sed#if defined(_KERNEL) && !defined(SMP) 43251695Sed#define WITHOUT_INTERRUPTS(s) do { \ 44251695Sed register_t regs; \ 45251695Sed \ 46251695Sed regs = intr_disable(); \ 47251695Sed do s while (0); \ 48251695Sed intr_restore(regs); \ 49251695Sed} while (0) 50251781Sed#endif /* _KERNEL && !SMP */ 51251695Sed 52251695Sed/* 53251695Sed * Memory barriers. 54251695Sed * 55251695Sed * It turns out __sync_synchronize() does not emit any code when used 56251695Sed * with GCC 4.2. Implement our own version that does work reliably. 57251695Sed * 58251695Sed * Although __sync_lock_test_and_set() should only perform an acquire 59251695Sed * barrier, make it do a full barrier like the other functions. This 60251695Sed * should make <stdatomic.h>'s atomic_exchange_explicit() work reliably. 61251695Sed */ 62251695Sed 63251781Sed#if defined(_KERNEL) && !defined(SMP) 64251695Sedstatic inline void 65251695Seddo_sync(void) 66251695Sed{ 67251695Sed 68251695Sed __asm volatile ("" : : : "memory"); 69251781Sed} 70282763Sandrew#elif __ARM_ARCH >= 6 71251781Sedstatic inline void 72251781Seddo_sync(void) 73251781Sed{ 74251781Sed 75286726Smarcel dmb(); 76251781Sed} 77251695Sed#endif 78251695Sed 79251695Sed#if defined(__CLANG_ATOMICS) || defined(__GNUC_ATOMICS) 80251695Sed 81251695Sed/* 82251695Sed * New C11 __atomic_* API. 83251695Sed */ 84251695Sed 85282763Sandrew/* ARMv6+ systems should be supported by the compiler. */ 86282763Sandrew#if __ARM_ARCH <= 5 87251695Sed 88251695Sed/* Clang doesn't allow us to reimplement builtins without this. */ 89251695Sed#ifdef __clang__ 90251695Sed#pragma redefine_extname __sync_synchronize_ext __sync_synchronize 91251695Sed#define __sync_synchronize __sync_synchronize_ext 92251695Sed#endif 93251695Sed 94251695Sedvoid 95251695Sed__sync_synchronize(void) 96251695Sed{ 97251695Sed} 98251695Sed 99251781Sed#ifdef _KERNEL 100251781Sed 101251781Sed#ifdef SMP 102251781Sed#error "On SMP systems we should have proper atomic operations." 103251781Sed#endif 104251781Sed 105251695Sed/* 106251695Sed * On uniprocessor systems, we can perform the atomic operations by 107251695Sed * disabling interrupts. 108251695Sed */ 109251695Sed 110251695Sed#define EMIT_LOAD_N(N, uintN_t) \ 111251695SeduintN_t \ 112251695Sed__atomic_load_##N(uintN_t *mem, int model __unused) \ 113251695Sed{ \ 114251695Sed uintN_t ret; \ 115251695Sed \ 116251695Sed WITHOUT_INTERRUPTS({ \ 117251695Sed ret = *mem; \ 118251695Sed }); \ 119251695Sed return (ret); \ 120251695Sed} 121251695Sed 122251695Sed#define EMIT_STORE_N(N, uintN_t) \ 123251695Sedvoid \ 124251695Sed__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused) \ 125251695Sed{ \ 126251695Sed \ 127251695Sed WITHOUT_INTERRUPTS({ \ 128251695Sed *mem = val; \ 129251695Sed }); \ 130251695Sed} 131251695Sed 132251695Sed#define EMIT_COMPARE_EXCHANGE_N(N, uintN_t) \ 133251695Sed_Bool \ 134251695Sed__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *expected, \ 135251695Sed uintN_t desired, int success __unused, int failure __unused) \ 136251695Sed{ \ 137251695Sed _Bool ret; \ 138251695Sed \ 139251695Sed WITHOUT_INTERRUPTS({ \ 140251695Sed if (*mem == *expected) { \ 141251695Sed *mem = desired; \ 142251695Sed ret = 1; \ 143251695Sed } else { \ 144251695Sed *expected = *mem; \ 145251695Sed ret = 0; \ 146251695Sed } \ 147251695Sed }); \ 148251695Sed return (ret); \ 149251695Sed} 150251695Sed 151251695Sed#define EMIT_FETCH_OP_N(N, uintN_t, name, op) \ 152251695SeduintN_t \ 153251695Sed__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused) \ 154251695Sed{ \ 155251695Sed uintN_t ret; \ 156251695Sed \ 157251695Sed WITHOUT_INTERRUPTS({ \ 158251695Sed ret = *mem; \ 159251695Sed *mem op val; \ 160251695Sed }); \ 161251695Sed return (ret); \ 162251695Sed} 163251695Sed 164251695Sed#define EMIT_ALL_OPS_N(N, uintN_t) \ 165251695SedEMIT_LOAD_N(N, uintN_t) \ 166251695SedEMIT_STORE_N(N, uintN_t) \ 167251695SedEMIT_COMPARE_EXCHANGE_N(N, uintN_t) \ 168251695SedEMIT_FETCH_OP_N(N, uintN_t, exchange, =) \ 169251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_add, +=) \ 170251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_and, &=) \ 171251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_or, |=) \ 172251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_sub, -=) \ 173251695SedEMIT_FETCH_OP_N(N, uintN_t, fetch_xor, ^=) 174251695Sed 175251695SedEMIT_ALL_OPS_N(1, uint8_t) 176251695SedEMIT_ALL_OPS_N(2, uint16_t) 177251695SedEMIT_ALL_OPS_N(4, uint32_t) 178251695SedEMIT_ALL_OPS_N(8, uint64_t) 179255092Stheraven#undef EMIT_ALL_OPS_N 180251695Sed 181251781Sed#else /* !_KERNEL */ 182251695Sed 183251781Sed/* 184251781Sed * For userspace on uniprocessor systems, we can implement the atomic 185251781Sed * operations by using a Restartable Atomic Sequence. This makes the 186251781Sed * kernel restart the code from the beginning when interrupted. 187251781Sed */ 188251695Sed 189251781Sed#define EMIT_LOAD_N(N, uintN_t) \ 190251781SeduintN_t \ 191251781Sed__atomic_load_##N(uintN_t *mem, int model __unused) \ 192251781Sed{ \ 193251781Sed \ 194251781Sed return (*mem); \ 195251781Sed} 196251781Sed 197251781Sed#define EMIT_STORE_N(N, uintN_t) \ 198251781Sedvoid \ 199251781Sed__atomic_store_##N(uintN_t *mem, uintN_t val, int model __unused) \ 200251781Sed{ \ 201251781Sed \ 202251781Sed *mem = val; \ 203251781Sed} 204251781Sed 205251781Sed#define EMIT_EXCHANGE_N(N, uintN_t, ldr, str) \ 206251781SeduintN_t \ 207251781Sed__atomic_exchange_##N(uintN_t *mem, uintN_t val, int model __unused) \ 208251781Sed{ \ 209251781Sed uint32_t old, temp, ras_start; \ 210251781Sed \ 211251781Sed ras_start = ARM_RAS_START; \ 212251781Sed __asm volatile ( \ 213251781Sed /* Set up Restartable Atomic Sequence. */ \ 214251781Sed "1:" \ 215251781Sed "\tadr %2, 1b\n" \ 216251781Sed "\tstr %2, [%5]\n" \ 217251781Sed "\tadr %2, 2f\n" \ 218251781Sed "\tstr %2, [%5, #4]\n" \ 219251781Sed \ 220251781Sed "\t"ldr" %0, %4\n" /* Load old value. */ \ 221251781Sed "\t"str" %3, %1\n" /* Store new value. */ \ 222251781Sed \ 223251781Sed /* Tear down Restartable Atomic Sequence. */ \ 224251781Sed "2:" \ 225251781Sed "\tmov %2, #0x00000000\n" \ 226251781Sed "\tstr %2, [%5]\n" \ 227251781Sed "\tmov %2, #0xffffffff\n" \ 228251781Sed "\tstr %2, [%5, #4]\n" \ 229251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) \ 230251781Sed : "r" (val), "m" (*mem), "r" (ras_start)); \ 231251781Sed return (old); \ 232251781Sed} 233251781Sed 234251781Sed#define EMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq) \ 235251781Sed_Bool \ 236251781Sed__atomic_compare_exchange_##N(uintN_t *mem, uintN_t *pexpected, \ 237251781Sed uintN_t desired, int success __unused, int failure __unused) \ 238251781Sed{ \ 239251781Sed uint32_t expected, old, temp, ras_start; \ 240251781Sed \ 241251781Sed expected = *pexpected; \ 242251781Sed ras_start = ARM_RAS_START; \ 243251781Sed __asm volatile ( \ 244251781Sed /* Set up Restartable Atomic Sequence. */ \ 245251781Sed "1:" \ 246251781Sed "\tadr %2, 1b\n" \ 247251781Sed "\tstr %2, [%6]\n" \ 248251781Sed "\tadr %2, 2f\n" \ 249251781Sed "\tstr %2, [%6, #4]\n" \ 250251781Sed \ 251251781Sed "\t"ldr" %0, %5\n" /* Load old value. */ \ 252251781Sed "\tcmp %0, %3\n" /* Compare to expected value. */\ 253251781Sed "\t"streq" %4, %1\n" /* Store new value. */ \ 254251781Sed \ 255251781Sed /* Tear down Restartable Atomic Sequence. */ \ 256251781Sed "2:" \ 257251781Sed "\tmov %2, #0x00000000\n" \ 258251781Sed "\tstr %2, [%6]\n" \ 259251781Sed "\tmov %2, #0xffffffff\n" \ 260251781Sed "\tstr %2, [%6, #4]\n" \ 261251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) \ 262251781Sed : "r" (expected), "r" (desired), "m" (*mem), \ 263251781Sed "r" (ras_start)); \ 264251781Sed if (old == expected) { \ 265251781Sed return (1); \ 266251781Sed } else { \ 267251781Sed *pexpected = old; \ 268251781Sed return (0); \ 269251781Sed } \ 270251781Sed} 271251781Sed 272288125Sandrew#define EMIT_FETCH_OP_N(N, uintN_t, ldr, str, name, op, ret) \ 273251781SeduintN_t \ 274251781Sed__atomic_##name##_##N(uintN_t *mem, uintN_t val, int model __unused) \ 275251781Sed{ \ 276288125Sandrew uint32_t old, new, ras_start; \ 277251781Sed \ 278251781Sed ras_start = ARM_RAS_START; \ 279251781Sed __asm volatile ( \ 280251781Sed /* Set up Restartable Atomic Sequence. */ \ 281251781Sed "1:" \ 282251781Sed "\tadr %2, 1b\n" \ 283251781Sed "\tstr %2, [%5]\n" \ 284251781Sed "\tadr %2, 2f\n" \ 285251781Sed "\tstr %2, [%5, #4]\n" \ 286251781Sed \ 287251781Sed "\t"ldr" %0, %4\n" /* Load old value. */ \ 288251781Sed "\t"op" %2, %0, %3\n" /* Calculate new value. */ \ 289251781Sed "\t"str" %2, %1\n" /* Store new value. */ \ 290251781Sed \ 291251781Sed /* Tear down Restartable Atomic Sequence. */ \ 292251781Sed "2:" \ 293251781Sed "\tmov %2, #0x00000000\n" \ 294251781Sed "\tstr %2, [%5]\n" \ 295251781Sed "\tmov %2, #0xffffffff\n" \ 296251781Sed "\tstr %2, [%5, #4]\n" \ 297288125Sandrew : "=&r" (old), "=m" (*mem), "=&r" (new) \ 298251781Sed : "r" (val), "m" (*mem), "r" (ras_start)); \ 299288125Sandrew return (ret); \ 300251781Sed} 301251781Sed 302251781Sed#define EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq) \ 303251781SedEMIT_LOAD_N(N, uintN_t) \ 304251781SedEMIT_STORE_N(N, uintN_t) \ 305251781SedEMIT_EXCHANGE_N(N, uintN_t, ldr, str) \ 306251781SedEMIT_COMPARE_EXCHANGE_N(N, uintN_t, ldr, streq) \ 307288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_add, "add", old) \ 308288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_and, "and", old) \ 309288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_or, "orr", old) \ 310288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_sub, "sub", old) \ 311288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, fetch_xor, "eor", old) \ 312288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, add_fetch, "add", new) \ 313288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, and_fetch, "and", new) \ 314288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, or_fetch, "orr", new) \ 315288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, sub_fetch, "sub", new) \ 316288125SandrewEMIT_FETCH_OP_N(N, uintN_t, ldr, str, xor_fetch, "eor", new) 317251781Sed 318251781SedEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq") 319251781SedEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq") 320251781SedEMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq") 321255092Stheraven#undef EMIT_ALL_OPS_N 322251781Sed 323251781Sed#endif /* _KERNEL */ 324251781Sed 325282763Sandrew#endif /* __ARM_ARCH */ 326251695Sed 327251695Sed#endif /* __CLANG_ATOMICS || __GNUC_ATOMICS */ 328251695Sed 329255092Stheraven#if defined(__SYNC_ATOMICS) || defined(EMIT_SYNC_ATOMICS) 330251781Sed 331255092Stheraven#ifdef __clang__ 332255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_1_c __sync_lock_test_and_set_1 333255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_2_c __sync_lock_test_and_set_2 334255092Stheraven#pragma redefine_extname __sync_lock_test_and_set_4_c __sync_lock_test_and_set_4 335255092Stheraven#pragma redefine_extname __sync_val_compare_and_swap_1_c __sync_val_compare_and_swap_1 336255092Stheraven#pragma redefine_extname __sync_val_compare_and_swap_2_c __sync_val_compare_and_swap_2 337255092Stheraven#pragma redefine_extname __sync_val_compare_and_swap_4_c __sync_val_compare_and_swap_4 338255092Stheraven#pragma redefine_extname __sync_fetch_and_add_1_c __sync_fetch_and_add_1 339255092Stheraven#pragma redefine_extname __sync_fetch_and_add_2_c __sync_fetch_and_add_2 340255092Stheraven#pragma redefine_extname __sync_fetch_and_add_4_c __sync_fetch_and_add_4 341255092Stheraven#pragma redefine_extname __sync_fetch_and_and_1_c __sync_fetch_and_and_1 342255092Stheraven#pragma redefine_extname __sync_fetch_and_and_2_c __sync_fetch_and_and_2 343255092Stheraven#pragma redefine_extname __sync_fetch_and_and_4_c __sync_fetch_and_and_4 344255092Stheraven#pragma redefine_extname __sync_fetch_and_or_1_c __sync_fetch_and_or_1 345255092Stheraven#pragma redefine_extname __sync_fetch_and_or_2_c __sync_fetch_and_or_2 346255092Stheraven#pragma redefine_extname __sync_fetch_and_or_4_c __sync_fetch_and_or_4 347255092Stheraven#pragma redefine_extname __sync_fetch_and_xor_1_c __sync_fetch_and_xor_1 348255092Stheraven#pragma redefine_extname __sync_fetch_and_xor_2_c __sync_fetch_and_xor_2 349255092Stheraven#pragma redefine_extname __sync_fetch_and_xor_4_c __sync_fetch_and_xor_4 350255092Stheraven#pragma redefine_extname __sync_fetch_and_sub_1_c __sync_fetch_and_sub_1 351255092Stheraven#pragma redefine_extname __sync_fetch_and_sub_2_c __sync_fetch_and_sub_2 352255092Stheraven#pragma redefine_extname __sync_fetch_and_sub_4_c __sync_fetch_and_sub_4 353255092Stheraven#endif 354255092Stheraven 355251695Sed/* 356251695Sed * Old __sync_* API. 357251695Sed */ 358251695Sed 359282763Sandrew#if __ARM_ARCH >= 6 360251695Sed 361251695Sed/* Implementations for old GCC versions, lacking support for atomics. */ 362251695Sed 363251695Sedtypedef union { 364251695Sed uint8_t v8[4]; 365251695Sed uint32_t v32; 366251695Sed} reg_t; 367251695Sed 368251695Sed/* 369251695Sed * Given a memory address pointing to an 8-bit or 16-bit integer, return 370251695Sed * the address of the 32-bit word containing it. 371251695Sed */ 372251695Sed 373251695Sedstatic inline uint32_t * 374251695Sedround_to_word(void *ptr) 375251695Sed{ 376251695Sed 377251695Sed return ((uint32_t *)((intptr_t)ptr & ~3)); 378251695Sed} 379251695Sed 380251695Sed/* 381251695Sed * Utility functions for loading and storing 8-bit and 16-bit integers 382251695Sed * in 32-bit words at an offset corresponding with the location of the 383251695Sed * atomic variable. 384251695Sed */ 385251695Sed 386251695Sedstatic inline void 387251695Sedput_1(reg_t *r, const uint8_t *offset_ptr, uint8_t val) 388251695Sed{ 389251695Sed size_t offset; 390251695Sed 391251695Sed offset = (intptr_t)offset_ptr & 3; 392251695Sed r->v8[offset] = val; 393251695Sed} 394251695Sed 395251695Sedstatic inline uint8_t 396251695Sedget_1(const reg_t *r, const uint8_t *offset_ptr) 397251695Sed{ 398251695Sed size_t offset; 399251695Sed 400251695Sed offset = (intptr_t)offset_ptr & 3; 401251695Sed return (r->v8[offset]); 402251695Sed} 403251695Sed 404251695Sedstatic inline void 405251695Sedput_2(reg_t *r, const uint16_t *offset_ptr, uint16_t val) 406251695Sed{ 407251695Sed size_t offset; 408251695Sed union { 409251695Sed uint16_t in; 410251695Sed uint8_t out[2]; 411251695Sed } bytes; 412251695Sed 413251695Sed offset = (intptr_t)offset_ptr & 3; 414251695Sed bytes.in = val; 415251695Sed r->v8[offset] = bytes.out[0]; 416251695Sed r->v8[offset + 1] = bytes.out[1]; 417251695Sed} 418251695Sed 419251695Sedstatic inline uint16_t 420251695Sedget_2(const reg_t *r, const uint16_t *offset_ptr) 421251695Sed{ 422251695Sed size_t offset; 423251695Sed union { 424251695Sed uint8_t in[2]; 425251695Sed uint16_t out; 426251695Sed } bytes; 427251695Sed 428251695Sed offset = (intptr_t)offset_ptr & 3; 429251695Sed bytes.in[0] = r->v8[offset]; 430251695Sed bytes.in[1] = r->v8[offset + 1]; 431251695Sed return (bytes.out); 432251695Sed} 433251695Sed 434251695Sed/* 435251695Sed * 8-bit and 16-bit routines. 436251695Sed * 437251695Sed * These operations are not natively supported by the CPU, so we use 438251695Sed * some shifting and bitmasking on top of the 32-bit instructions. 439251695Sed */ 440251695Sed 441251695Sed#define EMIT_LOCK_TEST_AND_SET_N(N, uintN_t) \ 442251695SeduintN_t \ 443255092Stheraven__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val) \ 444251695Sed{ \ 445251695Sed uint32_t *mem32; \ 446251695Sed reg_t val32, negmask, old; \ 447251695Sed uint32_t temp1, temp2; \ 448251695Sed \ 449251695Sed mem32 = round_to_word(mem); \ 450251695Sed val32.v32 = 0x00000000; \ 451251695Sed put_##N(&val32, mem, val); \ 452251695Sed negmask.v32 = 0xffffffff; \ 453251695Sed put_##N(&negmask, mem, 0); \ 454251695Sed \ 455251695Sed do_sync(); \ 456251695Sed __asm volatile ( \ 457251695Sed "1:" \ 458251695Sed "\tldrex %0, %6\n" /* Load old value. */ \ 459251695Sed "\tand %2, %5, %0\n" /* Remove the old value. */ \ 460251695Sed "\torr %2, %2, %4\n" /* Put in the new value. */ \ 461251695Sed "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ 462251695Sed "\tcmp %3, #0\n" /* Did it succeed? */ \ 463251695Sed "\tbne 1b\n" /* Spin if failed. */ \ 464251695Sed : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \ 465251695Sed "=&r" (temp2) \ 466251695Sed : "r" (val32.v32), "r" (negmask.v32), "m" (*mem32)); \ 467251695Sed return (get_##N(&old, mem)); \ 468251695Sed} 469251695Sed 470251695SedEMIT_LOCK_TEST_AND_SET_N(1, uint8_t) 471251695SedEMIT_LOCK_TEST_AND_SET_N(2, uint16_t) 472251695Sed 473251695Sed#define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \ 474251695SeduintN_t \ 475255092Stheraven__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected, \ 476251695Sed uintN_t desired) \ 477251695Sed{ \ 478251695Sed uint32_t *mem32; \ 479251781Sed reg_t expected32, desired32, posmask, old; \ 480251781Sed uint32_t negmask, temp1, temp2; \ 481251695Sed \ 482251695Sed mem32 = round_to_word(mem); \ 483251695Sed expected32.v32 = 0x00000000; \ 484251695Sed put_##N(&expected32, mem, expected); \ 485251695Sed desired32.v32 = 0x00000000; \ 486251695Sed put_##N(&desired32, mem, desired); \ 487251695Sed posmask.v32 = 0x00000000; \ 488251695Sed put_##N(&posmask, mem, ~0); \ 489251781Sed negmask = ~posmask.v32; \ 490251695Sed \ 491251695Sed do_sync(); \ 492251695Sed __asm volatile ( \ 493251695Sed "1:" \ 494251695Sed "\tldrex %0, %8\n" /* Load old value. */ \ 495251695Sed "\tand %2, %6, %0\n" /* Isolate the old value. */ \ 496251695Sed "\tcmp %2, %4\n" /* Compare to expected value. */\ 497251695Sed "\tbne 2f\n" /* Values are unequal. */ \ 498251695Sed "\tand %2, %7, %0\n" /* Remove the old value. */ \ 499251695Sed "\torr %2, %5\n" /* Put in the new value. */ \ 500251695Sed "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ 501251695Sed "\tcmp %3, #0\n" /* Did it succeed? */ \ 502251695Sed "\tbne 1b\n" /* Spin if failed. */ \ 503251695Sed "2:" \ 504251695Sed : "=&r" (old), "=m" (*mem32), "=&r" (temp1), \ 505251695Sed "=&r" (temp2) \ 506251695Sed : "r" (expected32.v32), "r" (desired32.v32), \ 507251781Sed "r" (posmask.v32), "r" (negmask), "m" (*mem32)); \ 508251695Sed return (get_##N(&old, mem)); \ 509251695Sed} 510251695Sed 511251695SedEMIT_VAL_COMPARE_AND_SWAP_N(1, uint8_t) 512251695SedEMIT_VAL_COMPARE_AND_SWAP_N(2, uint16_t) 513251695Sed 514251695Sed#define EMIT_ARITHMETIC_FETCH_AND_OP_N(N, uintN_t, name, op) \ 515251695SeduintN_t \ 516255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val) \ 517251695Sed{ \ 518251695Sed uint32_t *mem32; \ 519251781Sed reg_t val32, posmask, old; \ 520251781Sed uint32_t negmask, temp1, temp2; \ 521251695Sed \ 522251695Sed mem32 = round_to_word(mem); \ 523251695Sed val32.v32 = 0x00000000; \ 524251695Sed put_##N(&val32, mem, val); \ 525251695Sed posmask.v32 = 0x00000000; \ 526251695Sed put_##N(&posmask, mem, ~0); \ 527251781Sed negmask = ~posmask.v32; \ 528251695Sed \ 529251695Sed do_sync(); \ 530251695Sed __asm volatile ( \ 531251695Sed "1:" \ 532251695Sed "\tldrex %0, %7\n" /* Load old value. */ \ 533251695Sed "\t"op" %2, %0, %4\n" /* Calculate new value. */ \ 534251695Sed "\tand %2, %5\n" /* Isolate the new value. */ \ 535251695Sed "\tand %3, %6, %0\n" /* Remove the old value. */ \ 536251695Sed "\torr %2, %2, %3\n" /* Put in the new value. */ \ 537251695Sed "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ 538251695Sed "\tcmp %3, #0\n" /* Did it succeed? */ \ 539251695Sed "\tbne 1b\n" /* Spin if failed. */ \ 540251695Sed : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \ 541251695Sed "=&r" (temp2) \ 542251781Sed : "r" (val32.v32), "r" (posmask.v32), "r" (negmask), \ 543251781Sed "m" (*mem32)); \ 544251695Sed return (get_##N(&old, mem)); \ 545251695Sed} 546251695Sed 547251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_add, "add") 548251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_sub, "sub") 549251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_add, "add") 550251695SedEMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_sub, "sub") 551251695Sed 552251695Sed#define EMIT_BITWISE_FETCH_AND_OP_N(N, uintN_t, name, op, idempotence) \ 553251695SeduintN_t \ 554255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val) \ 555251695Sed{ \ 556251695Sed uint32_t *mem32; \ 557251695Sed reg_t val32, old; \ 558251695Sed uint32_t temp1, temp2; \ 559251695Sed \ 560251695Sed mem32 = round_to_word(mem); \ 561251695Sed val32.v32 = idempotence ? 0xffffffff : 0x00000000; \ 562251695Sed put_##N(&val32, mem, val); \ 563251695Sed \ 564251695Sed do_sync(); \ 565251695Sed __asm volatile ( \ 566251695Sed "1:" \ 567251695Sed "\tldrex %0, %5\n" /* Load old value. */ \ 568251695Sed "\t"op" %2, %4, %0\n" /* Calculate new value. */ \ 569251695Sed "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ 570251695Sed "\tcmp %3, #0\n" /* Did it succeed? */ \ 571251695Sed "\tbne 1b\n" /* Spin if failed. */ \ 572251695Sed : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \ 573251695Sed "=&r" (temp2) \ 574251695Sed : "r" (val32.v32), "m" (*mem32)); \ 575251695Sed return (get_##N(&old, mem)); \ 576251695Sed} 577251695Sed 578251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_and, "and", 1) 579251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_or, "orr", 0) 580251695SedEMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_xor, "eor", 0) 581251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_and, "and", 1) 582251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_or, "orr", 0) 583251695SedEMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_xor, "eor", 0) 584251695Sed 585251695Sed/* 586251695Sed * 32-bit routines. 587251695Sed */ 588251695Sed 589251695Seduint32_t 590255092Stheraven__sync_lock_test_and_set_4_c(uint32_t *mem, uint32_t val) 591251781Sed{ 592251781Sed uint32_t old, temp; 593251781Sed 594251781Sed do_sync(); 595251781Sed __asm volatile ( 596251781Sed "1:" 597251781Sed "\tldrex %0, %4\n" /* Load old value. */ 598251781Sed "\tstrex %2, %3, %1\n" /* Attempt to store. */ 599251781Sed "\tcmp %2, #0\n" /* Did it succeed? */ 600251781Sed "\tbne 1b\n" /* Spin if failed. */ 601251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) 602251781Sed : "r" (val), "m" (*mem)); 603251781Sed return (old); 604251781Sed} 605251781Sed 606251781Seduint32_t 607255092Stheraven__sync_val_compare_and_swap_4_c(uint32_t *mem, uint32_t expected, 608251695Sed uint32_t desired) 609251695Sed{ 610251781Sed uint32_t old, temp; 611251695Sed 612251695Sed do_sync(); 613251695Sed __asm volatile ( 614251695Sed "1:" 615251781Sed "\tldrex %0, %5\n" /* Load old value. */ 616251781Sed "\tcmp %0, %3\n" /* Compare to expected value. */ 617251695Sed "\tbne 2f\n" /* Values are unequal. */ 618251781Sed "\tstrex %2, %4, %1\n" /* Attempt to store. */ 619251781Sed "\tcmp %2, #0\n" /* Did it succeed? */ 620251695Sed "\tbne 1b\n" /* Spin if failed. */ 621251695Sed "2:" 622251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) 623251695Sed : "r" (expected), "r" (desired), "m" (*mem)); 624251695Sed return (old); 625251695Sed} 626251695Sed 627251695Sed#define EMIT_FETCH_AND_OP_4(name, op) \ 628251695Seduint32_t \ 629255092Stheraven__sync_##name##_4##_c(uint32_t *mem, uint32_t val) \ 630251695Sed{ \ 631251695Sed uint32_t old, temp1, temp2; \ 632251695Sed \ 633251695Sed do_sync(); \ 634251695Sed __asm volatile ( \ 635251695Sed "1:" \ 636251695Sed "\tldrex %0, %5\n" /* Load old value. */ \ 637251781Sed "\t"op" %2, %0, %4\n" /* Calculate new value. */ \ 638251695Sed "\tstrex %3, %2, %1\n" /* Attempt to store. */ \ 639251695Sed "\tcmp %3, #0\n" /* Did it succeed? */ \ 640251695Sed "\tbne 1b\n" /* Spin if failed. */ \ 641251695Sed : "=&r" (old), "=m" (*mem), "=&r" (temp1), \ 642251695Sed "=&r" (temp2) \ 643251695Sed : "r" (val), "m" (*mem)); \ 644251695Sed return (old); \ 645251695Sed} 646251695Sed 647251781SedEMIT_FETCH_AND_OP_4(fetch_and_add, "add") 648251781SedEMIT_FETCH_AND_OP_4(fetch_and_and, "and") 649251781SedEMIT_FETCH_AND_OP_4(fetch_and_or, "orr") 650251781SedEMIT_FETCH_AND_OP_4(fetch_and_sub, "sub") 651251781SedEMIT_FETCH_AND_OP_4(fetch_and_xor, "eor") 652251695Sed 653255738Szbb#ifndef __clang__ 654255738Szbb__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1); 655255738Szbb__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2); 656255738Szbb__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4); 657255738Szbb__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1); 658255738Szbb__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2); 659255738Szbb__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4); 660255738Szbb__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1); 661255738Szbb__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2); 662255738Szbb__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4); 663255738Szbb__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1); 664255738Szbb__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2); 665255738Szbb__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4); 666255738Szbb__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1); 667255738Szbb__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2); 668255738Szbb__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4); 669255738Szbb__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1); 670255738Szbb__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2); 671255738Szbb__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4); 672255738Szbb__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1); 673255738Szbb__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2); 674255738Szbb__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4); 675255738Szbb#endif 676255738Szbb 677282763Sandrew#else /* __ARM_ARCH < 6 */ 678251695Sed 679251781Sed#ifdef _KERNEL 680251781Sed 681251695Sed#ifdef SMP 682251695Sed#error "On SMP systems we should have proper atomic operations." 683251695Sed#endif 684251695Sed 685251695Sed/* 686251695Sed * On uniprocessor systems, we can perform the atomic operations by 687251695Sed * disabling interrupts. 688251695Sed */ 689251695Sed 690251695Sed#define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \ 691251695SeduintN_t \ 692251695Sed__sync_val_compare_and_swap_##N(uintN_t *mem, uintN_t expected, \ 693251695Sed uintN_t desired) \ 694251695Sed{ \ 695251695Sed uintN_t ret; \ 696251695Sed \ 697251695Sed WITHOUT_INTERRUPTS({ \ 698251695Sed ret = *mem; \ 699251695Sed if (*mem == expected) \ 700251695Sed *mem = desired; \ 701251695Sed }); \ 702251695Sed return (ret); \ 703251695Sed} 704251695Sed 705251695Sed#define EMIT_FETCH_AND_OP_N(N, uintN_t, name, op) \ 706251695SeduintN_t \ 707251695Sed__sync_##name##_##N(uintN_t *mem, uintN_t val) \ 708251695Sed{ \ 709251695Sed uintN_t ret; \ 710251695Sed \ 711251695Sed WITHOUT_INTERRUPTS({ \ 712251695Sed ret = *mem; \ 713251695Sed *mem op val; \ 714251695Sed }); \ 715251695Sed return (ret); \ 716251695Sed} 717251695Sed 718251695Sed#define EMIT_ALL_OPS_N(N, uintN_t) \ 719251695SedEMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \ 720251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, lock_test_and_set, =) \ 721251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_add, +=) \ 722251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_and, &=) \ 723251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_or, |=) \ 724251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_sub, -=) \ 725251695SedEMIT_FETCH_AND_OP_N(N, uintN_t, fetch_and_xor, ^=) 726251695Sed 727251695SedEMIT_ALL_OPS_N(1, uint8_t) 728251695SedEMIT_ALL_OPS_N(2, uint16_t) 729251695SedEMIT_ALL_OPS_N(4, uint32_t) 730251695SedEMIT_ALL_OPS_N(8, uint64_t) 731255092Stheraven#undef EMIT_ALL_OPS_N 732251695Sed 733251781Sed#else /* !_KERNEL */ 734251695Sed 735251781Sed/* 736251781Sed * For userspace on uniprocessor systems, we can implement the atomic 737251781Sed * operations by using a Restartable Atomic Sequence. This makes the 738251781Sed * kernel restart the code from the beginning when interrupted. 739251781Sed */ 740251695Sed 741251781Sed#define EMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str) \ 742251781SeduintN_t \ 743255092Stheraven__sync_lock_test_and_set_##N##_c(uintN_t *mem, uintN_t val) \ 744251781Sed{ \ 745251781Sed uint32_t old, temp, ras_start; \ 746251781Sed \ 747251781Sed ras_start = ARM_RAS_START; \ 748251781Sed __asm volatile ( \ 749251781Sed /* Set up Restartable Atomic Sequence. */ \ 750251781Sed "1:" \ 751251781Sed "\tadr %2, 1b\n" \ 752251781Sed "\tstr %2, [%5]\n" \ 753251781Sed "\tadr %2, 2f\n" \ 754251781Sed "\tstr %2, [%5, #4]\n" \ 755251781Sed \ 756251781Sed "\t"ldr" %0, %4\n" /* Load old value. */ \ 757251781Sed "\t"str" %3, %1\n" /* Store new value. */ \ 758251781Sed \ 759251781Sed /* Tear down Restartable Atomic Sequence. */ \ 760251781Sed "2:" \ 761251781Sed "\tmov %2, #0x00000000\n" \ 762251781Sed "\tstr %2, [%5]\n" \ 763251781Sed "\tmov %2, #0xffffffff\n" \ 764251781Sed "\tstr %2, [%5, #4]\n" \ 765251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) \ 766251781Sed : "r" (val), "m" (*mem), "r" (ras_start)); \ 767251781Sed return (old); \ 768251781Sed} 769251781Sed 770251781Sed#define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq) \ 771251781SeduintN_t \ 772255092Stheraven__sync_val_compare_and_swap_##N##_c(uintN_t *mem, uintN_t expected, \ 773251781Sed uintN_t desired) \ 774251781Sed{ \ 775251781Sed uint32_t old, temp, ras_start; \ 776251781Sed \ 777251781Sed ras_start = ARM_RAS_START; \ 778251781Sed __asm volatile ( \ 779251781Sed /* Set up Restartable Atomic Sequence. */ \ 780251781Sed "1:" \ 781251781Sed "\tadr %2, 1b\n" \ 782251781Sed "\tstr %2, [%6]\n" \ 783251781Sed "\tadr %2, 2f\n" \ 784251781Sed "\tstr %2, [%6, #4]\n" \ 785251781Sed \ 786251781Sed "\t"ldr" %0, %5\n" /* Load old value. */ \ 787251781Sed "\tcmp %0, %3\n" /* Compare to expected value. */\ 788251781Sed "\t"streq" %4, %1\n" /* Store new value. */ \ 789251781Sed \ 790251781Sed /* Tear down Restartable Atomic Sequence. */ \ 791251781Sed "2:" \ 792251781Sed "\tmov %2, #0x00000000\n" \ 793251781Sed "\tstr %2, [%6]\n" \ 794251781Sed "\tmov %2, #0xffffffff\n" \ 795251781Sed "\tstr %2, [%6, #4]\n" \ 796251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) \ 797251781Sed : "r" (expected), "r" (desired), "m" (*mem), \ 798251781Sed "r" (ras_start)); \ 799251781Sed return (old); \ 800251781Sed} 801251781Sed 802251781Sed#define EMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, name, op) \ 803251781SeduintN_t \ 804255092Stheraven__sync_##name##_##N##_c(uintN_t *mem, uintN_t val) \ 805251781Sed{ \ 806251781Sed uint32_t old, temp, ras_start; \ 807251781Sed \ 808251781Sed ras_start = ARM_RAS_START; \ 809251781Sed __asm volatile ( \ 810251781Sed /* Set up Restartable Atomic Sequence. */ \ 811251781Sed "1:" \ 812251781Sed "\tadr %2, 1b\n" \ 813251781Sed "\tstr %2, [%5]\n" \ 814251781Sed "\tadr %2, 2f\n" \ 815251781Sed "\tstr %2, [%5, #4]\n" \ 816251781Sed \ 817251781Sed "\t"ldr" %0, %4\n" /* Load old value. */ \ 818251781Sed "\t"op" %2, %0, %3\n" /* Calculate new value. */ \ 819251781Sed "\t"str" %2, %1\n" /* Store new value. */ \ 820251781Sed \ 821251781Sed /* Tear down Restartable Atomic Sequence. */ \ 822251781Sed "2:" \ 823251781Sed "\tmov %2, #0x00000000\n" \ 824251781Sed "\tstr %2, [%5]\n" \ 825251781Sed "\tmov %2, #0xffffffff\n" \ 826251781Sed "\tstr %2, [%5, #4]\n" \ 827251781Sed : "=&r" (old), "=m" (*mem), "=&r" (temp) \ 828251781Sed : "r" (val), "m" (*mem), "r" (ras_start)); \ 829251781Sed return (old); \ 830251781Sed} 831251781Sed 832251781Sed#define EMIT_ALL_OPS_N(N, uintN_t, ldr, str, streq) \ 833251781SedEMIT_LOCK_TEST_AND_SET_N(N, uintN_t, ldr, str) \ 834251781SedEMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t, ldr, streq) \ 835251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_add, "add") \ 836251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_and, "and") \ 837251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_or, "orr") \ 838251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_sub, "sub") \ 839251781SedEMIT_FETCH_AND_OP_N(N, uintN_t, ldr, str, fetch_and_xor, "eor") 840251781Sed 841275564Sandrew#ifdef __clang__ 842275564SandrewEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "strbeq") 843275564SandrewEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "strheq") 844275564Sandrew#else 845251781SedEMIT_ALL_OPS_N(1, uint8_t, "ldrb", "strb", "streqb") 846251781SedEMIT_ALL_OPS_N(2, uint16_t, "ldrh", "strh", "streqh") 847275564Sandrew#endif 848251781SedEMIT_ALL_OPS_N(4, uint32_t, "ldr", "str", "streq") 849251781Sed 850255092Stheraven#ifndef __clang__ 851255092Stheraven__strong_reference(__sync_lock_test_and_set_1_c, __sync_lock_test_and_set_1); 852255092Stheraven__strong_reference(__sync_lock_test_and_set_2_c, __sync_lock_test_and_set_2); 853255092Stheraven__strong_reference(__sync_lock_test_and_set_4_c, __sync_lock_test_and_set_4); 854255092Stheraven__strong_reference(__sync_val_compare_and_swap_1_c, __sync_val_compare_and_swap_1); 855255092Stheraven__strong_reference(__sync_val_compare_and_swap_2_c, __sync_val_compare_and_swap_2); 856255092Stheraven__strong_reference(__sync_val_compare_and_swap_4_c, __sync_val_compare_and_swap_4); 857255092Stheraven__strong_reference(__sync_fetch_and_add_1_c, __sync_fetch_and_add_1); 858255092Stheraven__strong_reference(__sync_fetch_and_add_2_c, __sync_fetch_and_add_2); 859255092Stheraven__strong_reference(__sync_fetch_and_add_4_c, __sync_fetch_and_add_4); 860255092Stheraven__strong_reference(__sync_fetch_and_and_1_c, __sync_fetch_and_and_1); 861255092Stheraven__strong_reference(__sync_fetch_and_and_2_c, __sync_fetch_and_and_2); 862255092Stheraven__strong_reference(__sync_fetch_and_and_4_c, __sync_fetch_and_and_4); 863255092Stheraven__strong_reference(__sync_fetch_and_sub_1_c, __sync_fetch_and_sub_1); 864255092Stheraven__strong_reference(__sync_fetch_and_sub_2_c, __sync_fetch_and_sub_2); 865255092Stheraven__strong_reference(__sync_fetch_and_sub_4_c, __sync_fetch_and_sub_4); 866255092Stheraven__strong_reference(__sync_fetch_and_or_1_c, __sync_fetch_and_or_1); 867255092Stheraven__strong_reference(__sync_fetch_and_or_2_c, __sync_fetch_and_or_2); 868255092Stheraven__strong_reference(__sync_fetch_and_or_4_c, __sync_fetch_and_or_4); 869255092Stheraven__strong_reference(__sync_fetch_and_xor_1_c, __sync_fetch_and_xor_1); 870255092Stheraven__strong_reference(__sync_fetch_and_xor_2_c, __sync_fetch_and_xor_2); 871255092Stheraven__strong_reference(__sync_fetch_and_xor_4_c, __sync_fetch_and_xor_4); 872282763Sandrew#endif /* __ARM_ARCH */ 873255092Stheraven 874255738Szbb#endif /* _KERNEL */ 875255738Szbb 876255738Szbb#endif 877255738Szbb 878251695Sed#endif /* __SYNC_ATOMICS */ 879