1//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_EXCLUSIVE_H_
10#define SCUDO_TSD_EXCLUSIVE_H_
11
12#include "tsd.h"
13
14namespace scudo {
15
16enum class ThreadState : u8 {
17  NotInitialized = 0,
18  Initialized,
19  TornDown,
20};
21
22template <class Allocator> void teardownThread(void *Ptr);
23
24template <class Allocator> struct TSDRegistryExT {
25  void initLinkerInitialized(Allocator *Instance) {
26    Instance->initLinkerInitialized();
27    CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
28    FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
29        map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
30    FallbackTSD->initLinkerInitialized(Instance);
31    Initialized = true;
32  }
33  void init(Allocator *Instance) {
34    memset(this, 0, sizeof(*this));
35    initLinkerInitialized(Instance);
36  }
37
38  void unmapTestOnly() {
39    unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
40  }
41
42  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
43    if (LIKELY(State != ThreadState::NotInitialized))
44      return;
45    initThread(Instance, MinimalInit);
46  }
47
48  ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
49    if (LIKELY(State == ThreadState::Initialized &&
50               !atomic_load(&Disabled, memory_order_acquire))) {
51      *UnlockRequired = false;
52      return &ThreadTSD;
53    }
54    DCHECK(FallbackTSD);
55    FallbackTSD->lock();
56    *UnlockRequired = true;
57    return FallbackTSD;
58  }
59
60  // To disable the exclusive TSD registry, we effectively lock the fallback TSD
61  // and force all threads to attempt to use it instead of their local one.
62  void disable() {
63    Mutex.lock();
64    FallbackTSD->lock();
65    atomic_store(&Disabled, 1U, memory_order_release);
66  }
67
68  void enable() {
69    atomic_store(&Disabled, 0U, memory_order_release);
70    FallbackTSD->unlock();
71    Mutex.unlock();
72  }
73
74private:
75  void initOnceMaybe(Allocator *Instance) {
76    ScopedLock L(Mutex);
77    if (LIKELY(Initialized))
78      return;
79    initLinkerInitialized(Instance); // Sets Initialized.
80  }
81
82  // Using minimal initialization allows for global initialization while keeping
83  // the thread specific structure untouched. The fallback structure will be
84  // used instead.
85  NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
86    initOnceMaybe(Instance);
87    if (UNLIKELY(MinimalInit))
88      return;
89    CHECK_EQ(
90        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
91    ThreadTSD.initLinkerInitialized(Instance);
92    State = ThreadState::Initialized;
93    Instance->callPostInitCallback();
94  }
95
96  pthread_key_t PThreadKey;
97  bool Initialized;
98  atomic_u8 Disabled;
99  TSD<Allocator> *FallbackTSD;
100  HybridMutex Mutex;
101  static THREADLOCAL ThreadState State;
102  static THREADLOCAL TSD<Allocator> ThreadTSD;
103
104  friend void teardownThread<Allocator>(void *Ptr);
105};
106
107template <class Allocator>
108THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
109template <class Allocator>
110THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
111
112template <class Allocator> void teardownThread(void *Ptr) {
113  typedef TSDRegistryExT<Allocator> TSDRegistryT;
114  Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
115  // The glibc POSIX thread-local-storage deallocation routine calls user
116  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
117  // We want to be called last since other destructors might call free and the
118  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
119  // quarantine and swallowing the cache.
120  if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
121    TSDRegistryT::ThreadTSD.DestructorIterations--;
122    // If pthread_setspecific fails, we will go ahead with the teardown.
123    if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
124                                   Ptr) == 0))
125      return;
126  }
127  TSDRegistryT::ThreadTSD.commitBack(Instance);
128  TSDRegistryT::State = ThreadState::TornDown;
129}
130
131} // namespace scudo
132
133#endif // SCUDO_TSD_EXCLUSIVE_H_
134