1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#pragma once
6
7#include <stdbool.h>
8#include <stdint.h>
9#include <zircon/compiler.h>
10
11__BEGIN_CDECLS
12
13// strongly ordered versions of the atomic routines as implemented
14// by the compiler with arch-dependent memory barriers.
15static inline int atomic_swap(volatile int* ptr, int val) {
16    return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
17}
18
19static inline int atomic_add(volatile int* ptr, int val) {
20    return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
21}
22
23static inline int atomic_and(volatile int* ptr, int val) {
24    return __atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST);
25}
26
27static inline int atomic_or(volatile int* ptr, int val) {
28    return __atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST);
29}
30
31static inline int atomic_xor(volatile int* ptr, int val) {
32    return __atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST);
33}
34
35static inline bool atomic_cmpxchg(volatile int* ptr, int* oldval, int newval) {
36    return __atomic_compare_exchange_n(ptr, oldval, newval, false,
37                                       __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
38}
39
40static inline int atomic_load(volatile int* ptr) {
41    return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
42}
43
44static inline void atomic_store(volatile int* ptr, int newval) {
45    __atomic_store_n(ptr, newval, __ATOMIC_SEQ_CST);
46}
47
48// relaxed versions of the above
49static inline int atomic_swap_relaxed(volatile int* ptr, int val) {
50    return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
51}
52
53static inline int atomic_add_relaxed(volatile int* ptr, int val) {
54    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
55}
56
57static inline int atomic_and_relaxed(volatile int* ptr, int val) {
58    return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
59}
60
61static inline int atomic_or_relaxed(volatile int* ptr, int val) {
62    return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
63}
64
65static inline int atomic_xor_relaxed(volatile int* ptr, int val) {
66    return __atomic_fetch_xor(ptr, val, __ATOMIC_RELAXED);
67}
68
69static inline bool atomic_cmpxchg_relaxed(volatile int* ptr, int* oldval, int newval) {
70    return __atomic_compare_exchange_n(ptr, oldval, newval, false,
71                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED);
72}
73
74static int atomic_load_relaxed(volatile int* ptr) {
75    return __atomic_load_n(ptr, __ATOMIC_RELAXED);
76}
77
78static void atomic_store_relaxed(volatile int* ptr, int newval) {
79    __atomic_store_n(ptr, newval, __ATOMIC_RELAXED);
80}
81
82static inline int atomic_add_release(volatile int* ptr, int val) {
83    return __atomic_fetch_add(ptr, val, __ATOMIC_RELEASE);
84}
85
86static inline void atomic_fence(void) {
87    __atomic_thread_fence(__ATOMIC_SEQ_CST);
88}
89
90static inline void atomic_fence_acquire(void) {
91    __atomic_thread_fence(__ATOMIC_ACQUIRE);
92}
93
94static inline uint32_t atomic_load_u32(volatile uint32_t* ptr) {
95    return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
96}
97
98static inline void atomic_store_relaxed_u32(volatile uint32_t* ptr, uint32_t newval) {
99    __atomic_store_n(ptr, newval, __ATOMIC_RELAXED);
100}
101
102// 64-bit versions. Assumes the compiler/platform is LLP so int is 32 bits.
103static inline int64_t atomic_swap_64(volatile int64_t* ptr, int64_t val) {
104    return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
105}
106
107static inline int64_t atomic_add_64(volatile int64_t* ptr, int64_t val) {
108    return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
109}
110
111static inline int64_t atomic_and_64(volatile int64_t* ptr, int64_t val) {
112    return __atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST);
113}
114
115static inline int64_t atomic_or_64(volatile int64_t* ptr, int64_t val) {
116    return __atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST);
117}
118
119static inline int64_t atomic_xor_64(volatile int64_t* ptr, int64_t val) {
120    return __atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST);
121}
122
123static inline bool atomic_cmpxchg_64(volatile int64_t* ptr, int64_t* oldval,
124                                     int64_t newval) {
125    return __atomic_compare_exchange_n(ptr, oldval, newval, false,
126                                       __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
127}
128
129static inline int64_t atomic_load_64(volatile int64_t* ptr) {
130    return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
131}
132
133static inline void atomic_store_64(volatile int64_t* ptr, int64_t newval) {
134    __atomic_store_n(ptr, newval, __ATOMIC_SEQ_CST);
135}
136
137static inline uint64_t atomic_swap_u64(volatile uint64_t* ptr, uint64_t val) {
138    return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
139}
140
141static inline uint64_t atomic_add_u64(volatile uint64_t* ptr, uint64_t val) {
142    return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
143}
144
145static inline uint64_t atomic_and_u64(volatile uint64_t* ptr, uint64_t val) {
146    return __atomic_fetch_and(ptr, val, __ATOMIC_SEQ_CST);
147}
148
149static inline uint64_t atomic_or_u64(volatile uint64_t* ptr, uint64_t val) {
150    return __atomic_fetch_or(ptr, val, __ATOMIC_SEQ_CST);
151}
152
153static inline uint64_t atomic_xor_u64(volatile uint64_t* ptr, uint64_t val) {
154    return __atomic_fetch_xor(ptr, val, __ATOMIC_SEQ_CST);
155}
156
157static inline bool atomic_cmpxchg_u64(volatile uint64_t* ptr, uint64_t* oldval,
158                                      uint64_t newval) {
159    return __atomic_compare_exchange_n(ptr, oldval, newval, false,
160                                       __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
161}
162
163static inline uint64_t atomic_load_u64(volatile uint64_t* ptr) {
164    return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
165}
166
167static inline uint64_t atomic_load_u64_relaxed(volatile uint64_t* ptr) {
168    return __atomic_load_n(ptr, __ATOMIC_RELAXED);
169}
170
171static inline void atomic_store_u64(volatile uint64_t* ptr, uint64_t newval) {
172    __atomic_store_n(ptr, newval, __ATOMIC_SEQ_CST);
173}
174
175static inline void atomic_store_u64_relaxed(volatile uint64_t* ptr, uint64_t newval) {
176    __atomic_store_n(ptr, newval, __ATOMIC_RELAXED);
177}
178
179static inline void atomic_signal_fence(void) {
180    __atomic_signal_fence(__ATOMIC_SEQ_CST);
181}
182
183static inline int64_t atomic_add_64_relaxed(volatile int64_t* ptr, int64_t val) {
184    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
185}
186
187static inline uint64_t atomic_add_u64_relaxed(volatile uint64_t* ptr, uint64_t val) {
188    return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
189}
190
191static inline bool atomic_cmpxchg_u64_relaxed(volatile uint64_t* ptr, uint64_t* oldval,
192                                              uint64_t newval) {
193    return __atomic_compare_exchange_n(ptr, oldval, newval, false,
194                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED);
195}
196
197__END_CDECLS
198