1//===-- tsan_rtl_thread.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_rtl.h"
15#include "tsan_mman.h"
16#include "tsan_platform.h"
17#include "tsan_report.h"
18#include "tsan_sync.h"
19
20namespace __tsan {
21
22// ThreadContext implementation.
23
24ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
25
26#if !SANITIZER_GO
27ThreadContext::~ThreadContext() {
28}
29#endif
30
31void ThreadContext::OnReset() { CHECK(!sync); }
32
33#if !SANITIZER_GO
34struct ThreadLeak {
35  ThreadContext *tctx;
36  int count;
37};
38
39static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
40  auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
41  auto *tctx = static_cast<ThreadContext *>(tctx_base);
42  if (tctx->detached || tctx->status != ThreadStatusFinished)
43    return;
44  for (uptr i = 0; i < leaks.Size(); i++) {
45    if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
46      leaks[i].count++;
47      return;
48    }
49  }
50  leaks.PushBack({tctx, 1});
51}
52#endif
53
54// Disabled on Mac because lldb test TestTsanBasic fails:
55// https://reviews.llvm.org/D112603#3163158
56#if !SANITIZER_GO && !SANITIZER_APPLE
57static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
58  if (tctx->tid == kMainTid) {
59    Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
60  } else {
61    Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
62      " created at:\n", tctx->tid, tctx->name);
63    PrintStack(SymbolizeStackId(tctx->creation_stack_id));
64  }
65  Printf("  One of the following ignores was not ended"
66      " (in order of probability)\n");
67  for (uptr i = 0; i < set->Size(); i++) {
68    Printf("  Ignore was enabled at:\n");
69    PrintStack(SymbolizeStackId(set->At(i)));
70  }
71  Die();
72}
73
74static void ThreadCheckIgnore(ThreadState *thr) {
75  if (ctx->after_multithreaded_fork)
76    return;
77  if (thr->ignore_reads_and_writes)
78    ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
79  if (thr->ignore_sync)
80    ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
81}
82#else
83static void ThreadCheckIgnore(ThreadState *thr) {}
84#endif
85
86void ThreadFinalize(ThreadState *thr) {
87  ThreadCheckIgnore(thr);
88#if !SANITIZER_GO
89  if (!ShouldReport(thr, ReportTypeThreadLeak))
90    return;
91  ThreadRegistryLock l(&ctx->thread_registry);
92  Vector<ThreadLeak> leaks;
93  ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
94                                                      &leaks);
95  for (uptr i = 0; i < leaks.Size(); i++) {
96    ScopedReport rep(ReportTypeThreadLeak);
97    rep.AddThread(leaks[i].tctx, true);
98    rep.SetCount(leaks[i].count);
99    OutputReport(thr, rep);
100  }
101#endif
102}
103
104int ThreadCount(ThreadState *thr) {
105  uptr result;
106  ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
107  return (int)result;
108}
109
110struct OnCreatedArgs {
111  VectorClock *sync;
112  uptr sync_epoch;
113  StackID stack;
114};
115
116Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
117  // The main thread and GCD workers don't have a parent thread.
118  Tid parent = kInvalidTid;
119  OnCreatedArgs arg = {nullptr, 0, kInvalidStackID};
120  if (thr) {
121    parent = thr->tid;
122    arg.stack = CurrentStackId(thr, pc);
123    if (!thr->ignore_sync) {
124      SlotLocker locker(thr);
125      thr->clock.ReleaseStore(&arg.sync);
126      arg.sync_epoch = ctx->global_epoch;
127      IncrementEpoch(thr);
128    }
129  }
130  Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent, &arg);
131  DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
132  return tid;
133}
134
135void ThreadContext::OnCreated(void *arg) {
136  OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
137  sync = args->sync;
138  sync_epoch = args->sync_epoch;
139  creation_stack_id = args->stack;
140}
141
142extern "C" void __tsan_stack_initialization() {}
143
144struct OnStartedArgs {
145  ThreadState *thr;
146  uptr stk_addr;
147  uptr stk_size;
148  uptr tls_addr;
149  uptr tls_size;
150};
151
152void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
153                 ThreadType thread_type) {
154  ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
155  if (!thr->ignore_sync) {
156    SlotAttachAndLock(thr);
157    if (thr->tctx->sync_epoch == ctx->global_epoch)
158      thr->clock.Acquire(thr->tctx->sync);
159    SlotUnlock(thr);
160  }
161  Free(thr->tctx->sync);
162
163  uptr stk_addr = 0;
164  uptr stk_size = 0;
165  uptr tls_addr = 0;
166  uptr tls_size = 0;
167#if !SANITIZER_GO
168  if (thread_type != ThreadType::Fiber)
169    GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
170                         &tls_size);
171#endif
172  thr->stk_addr = stk_addr;
173  thr->stk_size = stk_size;
174  thr->tls_addr = tls_addr;
175  thr->tls_size = tls_size;
176
177#if !SANITIZER_GO
178  if (ctx->after_multithreaded_fork) {
179    thr->ignore_interceptors++;
180    ThreadIgnoreBegin(thr, 0);
181    ThreadIgnoreSyncBegin(thr, 0);
182  }
183#endif
184
185#if !SANITIZER_GO
186  // Don't imitate stack/TLS writes for the main thread,
187  // because its initialization is synchronized with all
188  // subsequent threads anyway.
189  if (tid != kMainTid) {
190    if (stk_addr && stk_size) {
191      const uptr pc = StackTrace::GetNextInstructionPc(
192          reinterpret_cast<uptr>(__tsan_stack_initialization));
193      MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
194    }
195
196    if (tls_addr && tls_size)
197      ImitateTlsWrite(thr, tls_addr, tls_size);
198  }
199#endif
200}
201
202void ThreadContext::OnStarted(void *arg) {
203  thr = static_cast<ThreadState *>(arg);
204  DPrintf("#%d: ThreadStart\n", tid);
205  new (thr) ThreadState(tid);
206  if (common_flags()->detect_deadlocks)
207    thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
208  thr->tctx = this;
209#if !SANITIZER_GO
210  thr->is_inited = true;
211#endif
212}
213
214void ThreadFinish(ThreadState *thr) {
215  DPrintf("#%d: ThreadFinish\n", thr->tid);
216  ThreadCheckIgnore(thr);
217  if (thr->stk_addr && thr->stk_size)
218    DontNeedShadowFor(thr->stk_addr, thr->stk_size);
219  if (thr->tls_addr && thr->tls_size)
220    DontNeedShadowFor(thr->tls_addr, thr->tls_size);
221  thr->is_dead = true;
222#if !SANITIZER_GO
223  thr->is_inited = false;
224  thr->ignore_interceptors++;
225  PlatformCleanUpThreadState(thr);
226#endif
227  if (!thr->ignore_sync) {
228    SlotLocker locker(thr);
229    ThreadRegistryLock lock(&ctx->thread_registry);
230    // Note: detached is protected by the thread registry mutex,
231    // the thread may be detaching concurrently in another thread.
232    if (!thr->tctx->detached) {
233      thr->clock.ReleaseStore(&thr->tctx->sync);
234      thr->tctx->sync_epoch = ctx->global_epoch;
235      IncrementEpoch(thr);
236    }
237  }
238#if !SANITIZER_GO
239  UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
240#else
241  Free(thr->shadow_stack);
242#endif
243  thr->shadow_stack = nullptr;
244  thr->shadow_stack_pos = nullptr;
245  thr->shadow_stack_end = nullptr;
246  if (common_flags()->detect_deadlocks)
247    ctx->dd->DestroyLogicalThread(thr->dd_lt);
248  SlotDetach(thr);
249  ctx->thread_registry.FinishThread(thr->tid);
250  thr->~ThreadState();
251}
252
253void ThreadContext::OnFinished() {
254  Lock lock(&ctx->slot_mtx);
255  Lock lock1(&trace.mtx);
256  // Queue all trace parts into the global recycle queue.
257  auto parts = &trace.parts;
258  while (trace.local_head) {
259    CHECK(parts->Queued(trace.local_head));
260    ctx->trace_part_recycle.PushBack(trace.local_head);
261    trace.local_head = parts->Next(trace.local_head);
262  }
263  ctx->trace_part_recycle_finished += parts->Size();
264  if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
265    ctx->trace_part_finished_excess += parts->Size();
266    trace.parts_allocated = 0;
267  } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
268             parts->Size() > 1) {
269    ctx->trace_part_finished_excess += parts->Size() - 1;
270    trace.parts_allocated = 1;
271  }
272  // From now on replay will use trace->final_pos.
273  trace.final_pos = (Event *)atomic_load_relaxed(&thr->trace_pos);
274  atomic_store_relaxed(&thr->trace_pos, 0);
275  thr->tctx = nullptr;
276  thr = nullptr;
277}
278
279struct ConsumeThreadContext {
280  uptr uid;
281  ThreadContextBase *tctx;
282};
283
284Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
285  return ctx->thread_registry.ConsumeThreadUserId(uid);
286}
287
288struct JoinArg {
289  VectorClock *sync;
290  uptr sync_epoch;
291};
292
293void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
294  CHECK_GT(tid, 0);
295  DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
296  JoinArg arg = {};
297  ctx->thread_registry.JoinThread(tid, &arg);
298  if (!thr->ignore_sync) {
299    SlotLocker locker(thr);
300    if (arg.sync_epoch == ctx->global_epoch)
301      thr->clock.Acquire(arg.sync);
302  }
303  Free(arg.sync);
304}
305
306void ThreadContext::OnJoined(void *ptr) {
307  auto arg = static_cast<JoinArg *>(ptr);
308  arg->sync = sync;
309  arg->sync_epoch = sync_epoch;
310  sync = nullptr;
311  sync_epoch = 0;
312}
313
314void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
315
316void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
317  CHECK_GT(tid, 0);
318  ctx->thread_registry.DetachThread(tid, thr);
319}
320
321void ThreadContext::OnDetached(void *arg) { Free(sync); }
322
323void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
324  CHECK_GT(tid, 0);
325  ctx->thread_registry.SetThreadUserId(tid, uid);
326}
327
328void ThreadSetName(ThreadState *thr, const char *name) {
329  ctx->thread_registry.SetThreadName(thr->tid, name);
330}
331
332#if !SANITIZER_GO
333void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
334  Processor *proc = from->proc();
335  ProcUnwire(proc, from);
336  ProcWire(proc, to);
337  set_cur_thread(to);
338}
339
340ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
341  void *mem = Alloc(sizeof(ThreadState));
342  ThreadState *fiber = static_cast<ThreadState *>(mem);
343  internal_memset(fiber, 0, sizeof(*fiber));
344  Tid tid = ThreadCreate(thr, pc, 0, true);
345  FiberSwitchImpl(thr, fiber);
346  ThreadStart(fiber, tid, 0, ThreadType::Fiber);
347  FiberSwitchImpl(fiber, thr);
348  return fiber;
349}
350
351void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
352  FiberSwitchImpl(thr, fiber);
353  ThreadFinish(fiber);
354  FiberSwitchImpl(fiber, thr);
355  Free(fiber);
356}
357
358void FiberSwitch(ThreadState *thr, uptr pc,
359                 ThreadState *fiber, unsigned flags) {
360  if (!(flags & FiberSwitchFlagNoSync))
361    Release(thr, pc, (uptr)fiber);
362  FiberSwitchImpl(thr, fiber);
363  if (!(flags & FiberSwitchFlagNoSync))
364    Acquire(fiber, pc, (uptr)fiber);
365}
366#endif
367
368}  // namespace __tsan
369