1//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_ATOMIC_H_
10#define SCUDO_ATOMIC_H_
11
12#include "internal_defs.h"
13
14namespace scudo {
15
16enum memory_order {
17  memory_order_relaxed = 0,
18  memory_order_consume = 1,
19  memory_order_acquire = 2,
20  memory_order_release = 3,
21  memory_order_acq_rel = 4,
22  memory_order_seq_cst = 5
23};
24static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
25static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
26static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
27static_assert(memory_order_release == __ATOMIC_RELEASE, "");
28static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
29static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
30
31struct atomic_u8 {
32  typedef u8 Type;
33  volatile Type ValDoNotUse;
34};
35
36struct atomic_u16 {
37  typedef u16 Type;
38  volatile Type ValDoNotUse;
39};
40
41struct atomic_s32 {
42  typedef s32 Type;
43  volatile Type ValDoNotUse;
44};
45
46struct atomic_u32 {
47  typedef u32 Type;
48  volatile Type ValDoNotUse;
49};
50
51struct atomic_u64 {
52  typedef u64 Type;
53  // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
54  ALIGNED(8) volatile Type ValDoNotUse;
55};
56
57struct atomic_uptr {
58  typedef uptr Type;
59  volatile Type ValDoNotUse;
60};
61
62template <typename T>
63inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
64  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
65  typename T::Type V;
66  __atomic_load(&A->ValDoNotUse, &V, MO);
67  return V;
68}
69
70template <typename T>
71inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
72  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
73  __atomic_store(&A->ValDoNotUse, &V, MO);
74}
75
76inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
77
78template <typename T>
79inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
80                                         memory_order MO) {
81  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
82  return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
83}
84
85template <typename T>
86inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
87                                         memory_order MO) {
88  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
89  return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
90}
91
92template <typename T>
93inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
94                                        memory_order MO) {
95  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
96  typename T::Type R;
97  __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
98  return R;
99}
100
101template <typename T>
102inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
103                                           typename T::Type Xchg,
104                                           memory_order MO) {
105  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
106                                   __ATOMIC_RELAXED);
107}
108
109template <typename T>
110inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
111                                         typename T::Type Xchg,
112                                         memory_order MO) {
113  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
114                                   __ATOMIC_RELAXED);
115}
116
117// Clutter-reducing helpers.
118
119template <typename T>
120inline typename T::Type atomic_load_relaxed(const volatile T *A) {
121  return atomic_load(A, memory_order_relaxed);
122}
123
124template <typename T>
125inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
126  atomic_store(A, V, memory_order_relaxed);
127}
128
129template <typename T>
130inline typename T::Type atomic_compare_exchange(volatile T *A,
131                                                typename T::Type Cmp,
132                                                typename T::Type Xchg) {
133  atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
134  return Cmp;
135}
136
137} // namespace scudo
138
139#endif // SCUDO_ATOMIC_H_
140