1//===-- asan_thread.cc ----------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of AddressSanitizer, an address sanity checker.
9//
10// Thread-related code.
11//===----------------------------------------------------------------------===//
12#include "asan_allocator.h"
13#include "asan_interceptors.h"
14#include "asan_poisoning.h"
15#include "asan_stack.h"
16#include "asan_thread.h"
17#include "asan_mapping.h"
18#include "sanitizer_common/sanitizer_common.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_tls_get_addr.h"
22#include "lsan/lsan_common.h"
23
24namespace __asan {
25
26// AsanThreadContext implementation.
27
28void AsanThreadContext::OnCreated(void *arg) {
29  CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
30  if (args->stack)
31    stack_id = StackDepotPut(*args->stack);
32  thread = args->thread;
33  thread->set_context(this);
34}
35
36void AsanThreadContext::OnFinished() {
37  // Drop the link to the AsanThread object.
38  thread = 0;
39}
40
41// MIPS requires aligned address
42static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
43static ThreadRegistry *asan_thread_registry;
44
45static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
46static LowLevelAllocator allocator_for_thread_context;
47
48static ThreadContextBase *GetAsanThreadContext(u32 tid) {
49  BlockingMutexLock lock(&mu_for_thread_context);
50  return new(allocator_for_thread_context) AsanThreadContext(tid);
51}
52
53ThreadRegistry &asanThreadRegistry() {
54  static bool initialized;
55  // Don't worry about thread_safety - this should be called when there is
56  // a single thread.
57  if (!initialized) {
58    // Never reuse ASan threads: we store pointer to AsanThreadContext
59    // in TSD and can't reliably tell when no more TSD destructors will
60    // be called. It would be wrong to reuse AsanThreadContext for another
61    // thread before all TSD destructors will be called for it.
62    asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
63        GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
64    initialized = true;
65  }
66  return *asan_thread_registry;
67}
68
69AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
70  return static_cast<AsanThreadContext *>(
71      asanThreadRegistry().GetThreadLocked(tid));
72}
73
74// AsanThread implementation.
75
76AsanThread *AsanThread::Create(thread_callback_t start_routine,
77                               void *arg) {
78  uptr PageSize = GetPageSizeCached();
79  uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
80  AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
81  thread->start_routine_ = start_routine;
82  thread->arg_ = arg;
83
84  return thread;
85}
86
87void AsanThread::TSDDtor(void *tsd) {
88  AsanThreadContext *context = (AsanThreadContext*)tsd;
89  VReport(1, "T%d TSDDtor\n", context->tid);
90  if (context->thread)
91    context->thread->Destroy();
92}
93
94void AsanThread::Destroy() {
95  int tid = this->tid();
96  VReport(1, "T%d exited\n", tid);
97
98  malloc_storage().CommitBack();
99  if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
100  asanThreadRegistry().FinishThread(tid);
101  FlushToDeadThreadStats(&stats_);
102  // We also clear the shadow on thread destruction because
103  // some code may still be executing in later TSD destructors
104  // and we don't want it to have any poisoned stack.
105  ClearShadowForThreadStackAndTLS();
106  DeleteFakeStack(tid);
107  uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
108  UnmapOrDie(this, size);
109  DTLS_Destroy();
110}
111
112// We want to create the FakeStack lazyly on the first use, but not eralier
113// than the stack size is known and the procedure has to be async-signal safe.
114FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
115  uptr stack_size = this->stack_size();
116  if (stack_size == 0)  // stack_size is not yet available, don't use FakeStack.
117    return 0;
118  uptr old_val = 0;
119  // fake_stack_ has 3 states:
120  // 0   -- not initialized
121  // 1   -- being initialized
122  // ptr -- initialized
123  // This CAS checks if the state was 0 and if so changes it to state 1,
124  // if that was successful, it initializes the pointer.
125  if (atomic_compare_exchange_strong(
126      reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
127      memory_order_relaxed)) {
128    uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
129    CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
130    stack_size_log =
131        Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
132    stack_size_log =
133        Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
134    fake_stack_ = FakeStack::Create(stack_size_log);
135    SetTLSFakeStack(fake_stack_);
136    return fake_stack_;
137  }
138  return 0;
139}
140
141void AsanThread::Init() {
142  fake_stack_ = 0;  // Will be initialized lazily if needed.
143  CHECK_EQ(this->stack_size(), 0U);
144  SetThreadStackAndTls();
145  CHECK_GT(this->stack_size(), 0U);
146  CHECK(AddrIsInMem(stack_bottom_));
147  CHECK(AddrIsInMem(stack_top_ - 1));
148  ClearShadowForThreadStackAndTLS();
149  int local = 0;
150  VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
151          (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
152          &local);
153  AsanPlatformThreadInit();
154}
155
156thread_return_t AsanThread::ThreadStart(uptr os_id) {
157  Init();
158  asanThreadRegistry().StartThread(tid(), os_id, 0);
159  if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
160
161  if (!start_routine_) {
162    // start_routine_ == 0 if we're on the main thread or on one of the
163    // OS X libdispatch worker threads. But nobody is supposed to call
164    // ThreadStart() for the worker threads.
165    CHECK_EQ(tid(), 0);
166    return 0;
167  }
168
169  thread_return_t res = start_routine_(arg_);
170
171  // On POSIX systems we defer this to the TSD destructor. LSan will consider
172  // the thread's memory as non-live from the moment we call Destroy(), even
173  // though that memory might contain pointers to heap objects which will be
174  // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
175  // the TSD destructors have run might cause false positives in LSan.
176  if (!SANITIZER_POSIX)
177    this->Destroy();
178
179  return res;
180}
181
182void AsanThread::SetThreadStackAndTls() {
183  uptr tls_size = 0;
184  GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_,
185                       &tls_size);
186  stack_top_ = stack_bottom_ + stack_size_;
187  tls_end_ = tls_begin_ + tls_size;
188
189  int local;
190  CHECK(AddrIsInStack((uptr)&local));
191}
192
193void AsanThread::ClearShadowForThreadStackAndTLS() {
194  PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
195  if (tls_begin_ != tls_end_)
196    PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
197}
198
199bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
200                                           StackFrameAccess *access) {
201  uptr bottom = 0;
202  if (AddrIsInStack(addr)) {
203    bottom = stack_bottom();
204  } else if (has_fake_stack()) {
205    bottom = fake_stack()->AddrIsInFakeStack(addr);
206    CHECK(bottom);
207    access->offset = addr - bottom;
208    access->frame_pc = ((uptr*)bottom)[2];
209    access->frame_descr = (const char *)((uptr*)bottom)[1];
210    return true;
211  }
212  uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1);  // align addr.
213  u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
214  u8 *shadow_bottom = (u8*)MemToShadow(bottom);
215
216  while (shadow_ptr >= shadow_bottom &&
217         *shadow_ptr != kAsanStackLeftRedzoneMagic) {
218    shadow_ptr--;
219  }
220
221  while (shadow_ptr >= shadow_bottom &&
222         *shadow_ptr == kAsanStackLeftRedzoneMagic) {
223    shadow_ptr--;
224  }
225
226  if (shadow_ptr < shadow_bottom) {
227    return false;
228  }
229
230  uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
231  CHECK(ptr[0] == kCurrentStackFrameMagic);
232  access->offset = addr - (uptr)ptr;
233  access->frame_pc = ptr[2];
234  access->frame_descr = (const char*)ptr[1];
235  return true;
236}
237
238static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
239                                       void *addr) {
240  AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
241  AsanThread *t = tctx->thread;
242  if (!t) return false;
243  if (t->AddrIsInStack((uptr)addr)) return true;
244  if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
245    return true;
246  return false;
247}
248
249AsanThread *GetCurrentThread() {
250  AsanThreadContext *context =
251      reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
252  if (!context) {
253    if (SANITIZER_ANDROID) {
254      // On Android, libc constructor is called _after_ asan_init, and cleans up
255      // TSD. Try to figure out if this is still the main thread by the stack
256      // address. We are not entirely sure that we have correct main thread
257      // limits, so only do this magic on Android, and only if the found thread
258      // is the main thread.
259      AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
260      if (ThreadStackContainsAddress(tctx, &context)) {
261        SetCurrentThread(tctx->thread);
262        return tctx->thread;
263      }
264    }
265    return 0;
266  }
267  return context->thread;
268}
269
270void SetCurrentThread(AsanThread *t) {
271  CHECK(t->context());
272  VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
273          (void *)GetThreadSelf());
274  // Make sure we do not reset the current AsanThread.
275  CHECK_EQ(0, AsanTSDGet());
276  AsanTSDSet(t->context());
277  CHECK_EQ(t->context(), AsanTSDGet());
278}
279
280u32 GetCurrentTidOrInvalid() {
281  AsanThread *t = GetCurrentThread();
282  return t ? t->tid() : kInvalidTid;
283}
284
285AsanThread *FindThreadByStackAddress(uptr addr) {
286  asanThreadRegistry().CheckLocked();
287  AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
288      asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
289                                                   (void *)addr));
290  return tctx ? tctx->thread : 0;
291}
292
293void EnsureMainThreadIDIsCorrect() {
294  AsanThreadContext *context =
295      reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
296  if (context && (context->tid == 0))
297    context->os_id = GetTid();
298}
299
300__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
301  __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
302      __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
303  if (!context) return 0;
304  return context->thread;
305}
306}  // namespace __asan
307
308// --- Implementation of LSan-specific functions --- {{{1
309namespace __lsan {
310bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
311                           uptr *tls_begin, uptr *tls_end,
312                           uptr *cache_begin, uptr *cache_end) {
313  __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
314  if (!t) return false;
315  *stack_begin = t->stack_bottom();
316  *stack_end = t->stack_top();
317  *tls_begin = t->tls_begin();
318  *tls_end = t->tls_end();
319  // ASan doesn't keep allocator caches in TLS, so these are unused.
320  *cache_begin = 0;
321  *cache_end = 0;
322  return true;
323}
324
325void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
326                            void *arg) {
327  __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
328  if (t && t->has_fake_stack())
329    t->fake_stack()->ForEachFakeFrame(callback, arg);
330}
331
332void LockThreadRegistry() {
333  __asan::asanThreadRegistry().Lock();
334}
335
336void UnlockThreadRegistry() {
337  __asan::asanThreadRegistry().Unlock();
338}
339
340void EnsureMainThreadIDIsCorrect() {
341  __asan::EnsureMainThreadIDIsCorrect();
342}
343}  // namespace __lsan
344