1//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_SHARED_H_
10#define SCUDO_TSD_SHARED_H_
11
12#include "linux.h" // for getAndroidTlsPtr()
13#include "tsd.h"
14
15namespace scudo {
16
17template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
18  void initLinkerInitialized(Allocator *Instance) {
19    Instance->initLinkerInitialized();
20    CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
21    const u32 NumberOfCPUs = getNumberOfCPUs();
22    NumberOfTSDs = (SCUDO_ANDROID || NumberOfCPUs == 0)
23                       ? MaxTSDCount
24                       : Min(NumberOfCPUs, MaxTSDCount);
25    for (u32 I = 0; I < NumberOfTSDs; I++)
26      TSDs[I].initLinkerInitialized(Instance);
27    // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
28    // array of TSDs in a random order. For details, see:
29    // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
30    for (u32 I = 0; I < NumberOfTSDs; I++) {
31      u32 A = I + 1;
32      u32 B = NumberOfTSDs;
33      // Find the GCD between I + 1 and NumberOfTSDs. If 1, they are coprimes.
34      while (B != 0) {
35        const u32 T = A;
36        A = B;
37        B = T % B;
38      }
39      if (A == 1)
40        CoPrimes[NumberOfCoPrimes++] = I + 1;
41    }
42    Initialized = true;
43  }
44  void init(Allocator *Instance) {
45    memset(this, 0, sizeof(*this));
46    initLinkerInitialized(Instance);
47  }
48
49  void unmapTestOnly() {
50    setCurrentTSD(nullptr);
51    pthread_key_delete(PThreadKey);
52  }
53
54  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
55                                     UNUSED bool MinimalInit) {
56    if (LIKELY(getCurrentTSD()))
57      return;
58    initThread(Instance);
59  }
60
61  ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
62    TSD<Allocator> *TSD = getCurrentTSD();
63    DCHECK(TSD);
64    *UnlockRequired = true;
65    // Try to lock the currently associated context.
66    if (TSD->tryLock())
67      return TSD;
68    // If that fails, go down the slow path.
69    return getTSDAndLockSlow(TSD);
70  }
71
72  void disable() {
73    Mutex.lock();
74    for (u32 I = 0; I < NumberOfTSDs; I++)
75      TSDs[I].lock();
76  }
77
78  void enable() {
79    for (s32 I = static_cast<s32>(NumberOfTSDs - 1); I >= 0; I--)
80      TSDs[I].unlock();
81    Mutex.unlock();
82  }
83
84private:
85  ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
86#if _BIONIC
87    *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
88#elif SCUDO_LINUX
89    ThreadTSD = CurrentTSD;
90#else
91    CHECK_EQ(
92        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)),
93        0);
94#endif
95  }
96
97  ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
98#if _BIONIC
99    return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
100#elif SCUDO_LINUX
101    return ThreadTSD;
102#else
103    return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey));
104#endif
105  }
106
107  void initOnceMaybe(Allocator *Instance) {
108    ScopedLock L(Mutex);
109    if (LIKELY(Initialized))
110      return;
111    initLinkerInitialized(Instance); // Sets Initialized.
112  }
113
114  NOINLINE void initThread(Allocator *Instance) {
115    initOnceMaybe(Instance);
116    // Initial context assignment is done in a plain round-robin fashion.
117    const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
118    setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
119    Instance->callPostInitCallback();
120  }
121
122  NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
123    if (MaxTSDCount > 1U && NumberOfTSDs > 1U) {
124      // Use the Precedence of the current TSD as our random seed. Since we are
125      // in the slow path, it means that tryLock failed, and as a result it's
126      // very likely that said Precedence is non-zero.
127      const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
128      const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
129      u32 Index = R % NumberOfTSDs;
130      uptr LowestPrecedence = UINTPTR_MAX;
131      TSD<Allocator> *CandidateTSD = nullptr;
132      // Go randomly through at most 4 contexts and find a candidate.
133      for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
134        if (TSDs[Index].tryLock()) {
135          setCurrentTSD(&TSDs[Index]);
136          return &TSDs[Index];
137        }
138        const uptr Precedence = TSDs[Index].getPrecedence();
139        // A 0 precedence here means another thread just locked this TSD.
140        if (Precedence && Precedence < LowestPrecedence) {
141          CandidateTSD = &TSDs[Index];
142          LowestPrecedence = Precedence;
143        }
144        Index += Inc;
145        if (Index >= NumberOfTSDs)
146          Index -= NumberOfTSDs;
147      }
148      if (CandidateTSD) {
149        CandidateTSD->lock();
150        setCurrentTSD(CandidateTSD);
151        return CandidateTSD;
152      }
153    }
154    // Last resort, stick with the current one.
155    CurrentTSD->lock();
156    return CurrentTSD;
157  }
158
159  pthread_key_t PThreadKey;
160  atomic_u32 CurrentIndex;
161  u32 NumberOfTSDs;
162  u32 NumberOfCoPrimes;
163  u32 CoPrimes[MaxTSDCount];
164  bool Initialized;
165  HybridMutex Mutex;
166  TSD<Allocator> TSDs[MaxTSDCount];
167#if SCUDO_LINUX && !_BIONIC
168  static THREADLOCAL TSD<Allocator> *ThreadTSD;
169#endif
170};
171
172#if SCUDO_LINUX && !_BIONIC
173template <class Allocator, u32 MaxTSDCount>
174THREADLOCAL TSD<Allocator>
175    *TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
176#endif
177
178} // namespace scudo
179
180#endif // SCUDO_TSD_SHARED_H_
181