1353944Sdim//===-- tsan_mutex.cpp ----------------------------------------------------===//
2353944Sdim//
3353944Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4353944Sdim// See https://llvm.org/LICENSE.txt for license information.
5353944Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6353944Sdim//
7353944Sdim//===----------------------------------------------------------------------===//
8353944Sdim//
9353944Sdim// This file is a part of ThreadSanitizer (TSan), a race detector.
10353944Sdim//
11353944Sdim//===----------------------------------------------------------------------===//
12353944Sdim#include "sanitizer_common/sanitizer_libc.h"
13353944Sdim#include "tsan_mutex.h"
14353944Sdim#include "tsan_platform.h"
15353944Sdim#include "tsan_rtl.h"
16353944Sdim
17353944Sdimnamespace __tsan {
18353944Sdim
19353944Sdim// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
20353944Sdim// Readers have preference, can possibly starvate writers.
21353944Sdim
22353944Sdim// The table fixes what mutexes can be locked under what mutexes.
23353944Sdim// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
24353944Sdim// then Report mutex can be locked while under Threads mutex.
25353944Sdim// The leaf mutexes can be locked under any other mutexes.
26353944Sdim// Recursive locking is not supported.
27353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
28353944Sdimconst MutexType MutexTypeLeaf = (MutexType)-1;
29353944Sdimstatic MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
30353944Sdim  /*0  MutexTypeInvalid*/     {},
31353944Sdim  /*1  MutexTypeTrace*/       {MutexTypeLeaf},
32353944Sdim  /*2  MutexTypeThreads*/     {MutexTypeReport},
33353944Sdim  /*3  MutexTypeReport*/      {MutexTypeSyncVar,
34353944Sdim                               MutexTypeMBlock, MutexTypeJavaMBlock},
35353944Sdim  /*4  MutexTypeSyncVar*/     {MutexTypeDDetector},
36353944Sdim  /*5  MutexTypeSyncTab*/     {},  // unused
37353944Sdim  /*6  MutexTypeSlab*/        {MutexTypeLeaf},
38353944Sdim  /*7  MutexTypeAnnotations*/ {},
39353944Sdim  /*8  MutexTypeAtExit*/      {MutexTypeSyncVar},
40353944Sdim  /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
41353944Sdim  /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
42353944Sdim  /*11 MutexTypeDDetector*/   {},
43353944Sdim  /*12 MutexTypeFired*/       {MutexTypeLeaf},
44353944Sdim  /*13 MutexTypeRacy*/        {MutexTypeLeaf},
45353944Sdim  /*14 MutexTypeGlobalProc*/  {},
46353944Sdim};
47353944Sdim
48353944Sdimstatic bool CanLockAdj[MutexTypeCount][MutexTypeCount];
49353944Sdim#endif
50353944Sdim
51353944Sdimvoid InitializeMutex() {
52353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
53353944Sdim  // Build the "can lock" adjacency matrix.
54353944Sdim  // If [i][j]==true, then one can lock mutex j while under mutex i.
55353944Sdim  const int N = MutexTypeCount;
56353944Sdim  int cnt[N] = {};
57353944Sdim  bool leaf[N] = {};
58353944Sdim  for (int i = 1; i < N; i++) {
59353944Sdim    for (int j = 0; j < N; j++) {
60353944Sdim      MutexType z = CanLockTab[i][j];
61353944Sdim      if (z == MutexTypeInvalid)
62353944Sdim        continue;
63353944Sdim      if (z == MutexTypeLeaf) {
64353944Sdim        CHECK(!leaf[i]);
65353944Sdim        leaf[i] = true;
66353944Sdim        continue;
67353944Sdim      }
68353944Sdim      CHECK(!CanLockAdj[i][(int)z]);
69353944Sdim      CanLockAdj[i][(int)z] = true;
70353944Sdim      cnt[i]++;
71353944Sdim    }
72353944Sdim  }
73353944Sdim  for (int i = 0; i < N; i++) {
74353944Sdim    CHECK(!leaf[i] || cnt[i] == 0);
75353944Sdim  }
76353944Sdim  // Add leaf mutexes.
77353944Sdim  for (int i = 0; i < N; i++) {
78353944Sdim    if (!leaf[i])
79353944Sdim      continue;
80353944Sdim    for (int j = 0; j < N; j++) {
81353944Sdim      if (i == j || leaf[j] || j == MutexTypeInvalid)
82353944Sdim        continue;
83353944Sdim      CHECK(!CanLockAdj[j][i]);
84353944Sdim      CanLockAdj[j][i] = true;
85353944Sdim    }
86353944Sdim  }
87353944Sdim  // Build the transitive closure.
88353944Sdim  bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
89353944Sdim  for (int i = 0; i < N; i++) {
90353944Sdim    for (int j = 0; j < N; j++) {
91353944Sdim      CanLockAdj2[i][j] = CanLockAdj[i][j];
92353944Sdim    }
93353944Sdim  }
94353944Sdim  for (int k = 0; k < N; k++) {
95353944Sdim    for (int i = 0; i < N; i++) {
96353944Sdim      for (int j = 0; j < N; j++) {
97353944Sdim        if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
98353944Sdim          CanLockAdj2[i][j] = true;
99353944Sdim        }
100353944Sdim      }
101353944Sdim    }
102353944Sdim  }
103353944Sdim#if 0
104353944Sdim  Printf("Can lock graph:\n");
105353944Sdim  for (int i = 0; i < N; i++) {
106353944Sdim    for (int j = 0; j < N; j++) {
107353944Sdim      Printf("%d ", CanLockAdj[i][j]);
108353944Sdim    }
109353944Sdim    Printf("\n");
110353944Sdim  }
111353944Sdim  Printf("Can lock graph closure:\n");
112353944Sdim  for (int i = 0; i < N; i++) {
113353944Sdim    for (int j = 0; j < N; j++) {
114353944Sdim      Printf("%d ", CanLockAdj2[i][j]);
115353944Sdim    }
116353944Sdim    Printf("\n");
117353944Sdim  }
118353944Sdim#endif
119353944Sdim  // Verify that the graph is acyclic.
120353944Sdim  for (int i = 0; i < N; i++) {
121353944Sdim    if (CanLockAdj2[i][i]) {
122353944Sdim      Printf("Mutex %d participates in a cycle\n", i);
123353944Sdim      Die();
124353944Sdim    }
125353944Sdim  }
126353944Sdim#endif
127353944Sdim}
128353944Sdim
129353944SdimInternalDeadlockDetector::InternalDeadlockDetector() {
130353944Sdim  // Rely on zero initialization because some mutexes can be locked before ctor.
131353944Sdim}
132353944Sdim
133353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
134353944Sdimvoid InternalDeadlockDetector::Lock(MutexType t) {
135353944Sdim  // Printf("LOCK %d @%zu\n", t, seq_ + 1);
136353944Sdim  CHECK_GT(t, MutexTypeInvalid);
137353944Sdim  CHECK_LT(t, MutexTypeCount);
138353944Sdim  u64 max_seq = 0;
139353944Sdim  u64 max_idx = MutexTypeInvalid;
140353944Sdim  for (int i = 0; i != MutexTypeCount; i++) {
141353944Sdim    if (locked_[i] == 0)
142353944Sdim      continue;
143353944Sdim    CHECK_NE(locked_[i], max_seq);
144353944Sdim    if (max_seq < locked_[i]) {
145353944Sdim      max_seq = locked_[i];
146353944Sdim      max_idx = i;
147353944Sdim    }
148353944Sdim  }
149353944Sdim  locked_[t] = ++seq_;
150353944Sdim  if (max_idx == MutexTypeInvalid)
151353944Sdim    return;
152353944Sdim  // Printf("  last %d @%zu\n", max_idx, max_seq);
153353944Sdim  if (!CanLockAdj[max_idx][t]) {
154353944Sdim    Printf("ThreadSanitizer: internal deadlock detected\n");
155353944Sdim    Printf("ThreadSanitizer: can't lock %d while under %zu\n",
156353944Sdim               t, (uptr)max_idx);
157353944Sdim    CHECK(0);
158353944Sdim  }
159353944Sdim}
160353944Sdim
161353944Sdimvoid InternalDeadlockDetector::Unlock(MutexType t) {
162353944Sdim  // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
163353944Sdim  CHECK(locked_[t]);
164353944Sdim  locked_[t] = 0;
165353944Sdim}
166353944Sdim
167353944Sdimvoid InternalDeadlockDetector::CheckNoLocks() {
168353944Sdim  for (int i = 0; i != MutexTypeCount; i++) {
169353944Sdim    CHECK_EQ(locked_[i], 0);
170353944Sdim  }
171353944Sdim}
172353944Sdim#endif
173353944Sdim
174353944Sdimvoid CheckNoLocks(ThreadState *thr) {
175353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
176353944Sdim  thr->internal_deadlock_detector.CheckNoLocks();
177353944Sdim#endif
178353944Sdim}
179353944Sdim
180353944Sdimconst uptr kUnlocked = 0;
181353944Sdimconst uptr kWriteLock = 1;
182353944Sdimconst uptr kReadLock = 2;
183353944Sdim
184353944Sdimclass Backoff {
185353944Sdim public:
186353944Sdim  Backoff()
187353944Sdim    : iter_() {
188353944Sdim  }
189353944Sdim
190353944Sdim  bool Do() {
191353944Sdim    if (iter_++ < kActiveSpinIters)
192353944Sdim      proc_yield(kActiveSpinCnt);
193353944Sdim    else
194353944Sdim      internal_sched_yield();
195353944Sdim    return true;
196353944Sdim  }
197353944Sdim
198353944Sdim  u64 Contention() const {
199353944Sdim    u64 active = iter_ % kActiveSpinIters;
200353944Sdim    u64 passive = iter_ - active;
201353944Sdim    return active + 10 * passive;
202353944Sdim  }
203353944Sdim
204353944Sdim private:
205353944Sdim  int iter_;
206353944Sdim  static const int kActiveSpinIters = 10;
207353944Sdim  static const int kActiveSpinCnt = 20;
208353944Sdim};
209353944Sdim
210353944SdimMutex::Mutex(MutexType type, StatType stat_type) {
211353944Sdim  CHECK_GT(type, MutexTypeInvalid);
212353944Sdim  CHECK_LT(type, MutexTypeCount);
213353944Sdim#if SANITIZER_DEBUG
214353944Sdim  type_ = type;
215353944Sdim#endif
216353944Sdim#if TSAN_COLLECT_STATS
217353944Sdim  stat_type_ = stat_type;
218353944Sdim#endif
219353944Sdim  atomic_store(&state_, kUnlocked, memory_order_relaxed);
220353944Sdim}
221353944Sdim
222353944SdimMutex::~Mutex() {
223353944Sdim  CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
224353944Sdim}
225353944Sdim
226353944Sdimvoid Mutex::Lock() {
227353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
228353944Sdim  cur_thread()->internal_deadlock_detector.Lock(type_);
229353944Sdim#endif
230353944Sdim  uptr cmp = kUnlocked;
231353944Sdim  if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
232353944Sdim                                     memory_order_acquire))
233353944Sdim    return;
234353944Sdim  for (Backoff backoff; backoff.Do();) {
235353944Sdim    if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
236353944Sdim      cmp = kUnlocked;
237353944Sdim      if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
238353944Sdim                                       memory_order_acquire)) {
239353944Sdim#if TSAN_COLLECT_STATS && !SANITIZER_GO
240353944Sdim        StatInc(cur_thread(), stat_type_, backoff.Contention());
241353944Sdim#endif
242353944Sdim        return;
243353944Sdim      }
244353944Sdim    }
245353944Sdim  }
246353944Sdim}
247353944Sdim
248353944Sdimvoid Mutex::Unlock() {
249353944Sdim  uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
250353944Sdim  (void)prev;
251353944Sdim  DCHECK_NE(prev & kWriteLock, 0);
252353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
253353944Sdim  cur_thread()->internal_deadlock_detector.Unlock(type_);
254353944Sdim#endif
255353944Sdim}
256353944Sdim
257353944Sdimvoid Mutex::ReadLock() {
258353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
259353944Sdim  cur_thread()->internal_deadlock_detector.Lock(type_);
260353944Sdim#endif
261353944Sdim  uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
262353944Sdim  if ((prev & kWriteLock) == 0)
263353944Sdim    return;
264353944Sdim  for (Backoff backoff; backoff.Do();) {
265353944Sdim    prev = atomic_load(&state_, memory_order_acquire);
266353944Sdim    if ((prev & kWriteLock) == 0) {
267353944Sdim#if TSAN_COLLECT_STATS && !SANITIZER_GO
268353944Sdim      StatInc(cur_thread(), stat_type_, backoff.Contention());
269353944Sdim#endif
270353944Sdim      return;
271353944Sdim    }
272353944Sdim  }
273353944Sdim}
274353944Sdim
275353944Sdimvoid Mutex::ReadUnlock() {
276353944Sdim  uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
277353944Sdim  (void)prev;
278353944Sdim  DCHECK_EQ(prev & kWriteLock, 0);
279353944Sdim  DCHECK_GT(prev & ~kWriteLock, 0);
280353944Sdim#if SANITIZER_DEBUG && !SANITIZER_GO
281353944Sdim  cur_thread()->internal_deadlock_detector.Unlock(type_);
282353944Sdim#endif
283353944Sdim}
284353944Sdim
285353944Sdimvoid Mutex::CheckLocked() {
286353944Sdim  CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
287353944Sdim}
288353944Sdim
289353944Sdim}  // namespace __tsan
290