Lines Matching refs:a64
192 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
193 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
538 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
565 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
784 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
816 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
904 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
914 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
924 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
934 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
949 a64 cur = 0;
950 a64 cmp = *(a64*)(a+8);
951 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);