1//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
12// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
13// The only difference with traditional slab allocators is that DenseSlabAlloc
14// allocates/free indices of objects and provide a functionality to map
15// the index onto the real pointer. The index is u32, that is, 2 times smaller
16// than uptr (hense the Dense prefix).
17//===----------------------------------------------------------------------===//
18#ifndef TSAN_DENSE_ALLOC_H
19#define TSAN_DENSE_ALLOC_H
20
21#include "sanitizer_common/sanitizer_common.h"
22#include "tsan_defs.h"
23
24namespace __tsan {
25
26class DenseSlabAllocCache {
27  static const uptr kSize = 128;
28  typedef u32 IndexT;
29  uptr pos;
30  IndexT cache[kSize];
31  template <typename, uptr, uptr, u64>
32  friend class DenseSlabAlloc;
33};
34
35template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
36class DenseSlabAlloc {
37 public:
38  typedef DenseSlabAllocCache Cache;
39  typedef typename Cache::IndexT IndexT;
40
41  static_assert((kL1Size & (kL1Size - 1)) == 0,
42                "kL1Size must be a power-of-two");
43  static_assert((kL2Size & (kL2Size - 1)) == 0,
44                "kL2Size must be a power-of-two");
45  static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
46                "kL1Size/kL2Size are too large");
47  static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
48                "reserved bits don't fit");
49  static_assert(sizeof(T) > sizeof(IndexT),
50                "it doesn't make sense to use dense alloc");
51
52  DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
53
54  explicit DenseSlabAlloc(const char *name)
55      : DenseSlabAlloc(LINKER_INITIALIZED, name) {
56    // It can be very large.
57    // Don't page it in for linker initialized objects.
58    internal_memset(map_, 0, sizeof(map_));
59  }
60
61  ~DenseSlabAlloc() {
62    for (uptr i = 0; i < kL1Size; i++) {
63      if (map_[i] != 0)
64        UnmapOrDie(map_[i], kL2Size * sizeof(T));
65    }
66  }
67
68  IndexT Alloc(Cache *c) {
69    if (c->pos == 0)
70      Refill(c);
71    return c->cache[--c->pos];
72  }
73
74  void Free(Cache *c, IndexT idx) {
75    DCHECK_NE(idx, 0);
76    if (c->pos == Cache::kSize)
77      Drain(c);
78    c->cache[c->pos++] = idx;
79  }
80
81  T *Map(IndexT idx) {
82    DCHECK_NE(idx, 0);
83    DCHECK_LE(idx, kL1Size * kL2Size);
84    return &map_[idx / kL2Size][idx % kL2Size];
85  }
86
87  void FlushCache(Cache *c) {
88    if (!c->pos)
89      return;
90    SpinMutexLock lock(&mtx_);
91    while (c->pos) {
92      IndexT idx = c->cache[--c->pos];
93      *(IndexT*)Map(idx) = freelist_;
94      freelist_ = idx;
95    }
96  }
97
98  void InitCache(Cache *c) {
99    c->pos = 0;
100    internal_memset(c->cache, 0, sizeof(c->cache));
101  }
102
103  uptr AllocatedMemory() const {
104    return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
105  }
106
107 private:
108  T *map_[kL1Size];
109  SpinMutex mtx_;
110  IndexT freelist_ = {0};
111  atomic_uintptr_t fillpos_ = {0};
112  const char *const name_;
113
114  void Refill(Cache *c) {
115    SpinMutexLock lock(&mtx_);
116    if (freelist_ == 0) {
117      uptr fillpos = atomic_load_relaxed(&fillpos_);
118      if (fillpos == kL1Size) {
119        Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
120            name_, kL1Size, kL2Size);
121        Die();
122      }
123      VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
124              fillpos, kL1Size, kL2Size);
125      T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
126      // Reserve 0 as invalid index.
127      IndexT start = fillpos == 0 ? 1 : 0;
128      for (IndexT i = start; i < kL2Size; i++) {
129        new(batch + i) T;
130        *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
131      }
132      *(IndexT*)(batch + kL2Size - 1) = 0;
133      freelist_ = fillpos * kL2Size + start;
134      map_[fillpos] = batch;
135      atomic_store_relaxed(&fillpos_, fillpos + 1);
136    }
137    for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
138      IndexT idx = freelist_;
139      c->cache[c->pos++] = idx;
140      freelist_ = *(IndexT*)Map(idx);
141    }
142  }
143
144  void Drain(Cache *c) {
145    SpinMutexLock lock(&mtx_);
146    for (uptr i = 0; i < Cache::kSize / 2; i++) {
147      IndexT idx = c->cache[--c->pos];
148      *(IndexT*)Map(idx) = freelist_;
149      freelist_ = idx;
150    }
151  }
152};
153
154}  // namespace __tsan
155
156#endif  // TSAN_DENSE_ALLOC_H
157