1//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
12// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
13// The only difference with traditional slab allocators is that DenseSlabAlloc
14// allocates/free indices of objects and provide a functionality to map
15// the index onto the real pointer. The index is u32, that is, 2 times smaller
16// than uptr (hense the Dense prefix).
17//===----------------------------------------------------------------------===//
18#ifndef TSAN_DENSE_ALLOC_H
19#define TSAN_DENSE_ALLOC_H
20
21#include "sanitizer_common/sanitizer_common.h"
22#include "tsan_defs.h"
23#include "tsan_mutex.h"
24
25namespace __tsan {
26
27class DenseSlabAllocCache {
28  static const uptr kSize = 128;
29  typedef u32 IndexT;
30  uptr pos;
31  IndexT cache[kSize];
32  template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
33};
34
35template<typename T, uptr kL1Size, uptr kL2Size>
36class DenseSlabAlloc {
37 public:
38  typedef DenseSlabAllocCache Cache;
39  typedef typename Cache::IndexT IndexT;
40
41  explicit DenseSlabAlloc(const char *name) {
42    // Check that kL1Size and kL2Size are sane.
43    CHECK_EQ(kL1Size & (kL1Size - 1), 0);
44    CHECK_EQ(kL2Size & (kL2Size - 1), 0);
45    CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
46    // Check that it makes sense to use the dense alloc.
47    CHECK_GE(sizeof(T), sizeof(IndexT));
48    internal_memset(map_, 0, sizeof(map_));
49    freelist_ = 0;
50    fillpos_ = 0;
51    name_ = name;
52  }
53
54  ~DenseSlabAlloc() {
55    for (uptr i = 0; i < kL1Size; i++) {
56      if (map_[i] != 0)
57        UnmapOrDie(map_[i], kL2Size * sizeof(T));
58    }
59  }
60
61  IndexT Alloc(Cache *c) {
62    if (c->pos == 0)
63      Refill(c);
64    return c->cache[--c->pos];
65  }
66
67  void Free(Cache *c, IndexT idx) {
68    DCHECK_NE(idx, 0);
69    if (c->pos == Cache::kSize)
70      Drain(c);
71    c->cache[c->pos++] = idx;
72  }
73
74  T *Map(IndexT idx) {
75    DCHECK_NE(idx, 0);
76    DCHECK_LE(idx, kL1Size * kL2Size);
77    return &map_[idx / kL2Size][idx % kL2Size];
78  }
79
80  void FlushCache(Cache *c) {
81    SpinMutexLock lock(&mtx_);
82    while (c->pos) {
83      IndexT idx = c->cache[--c->pos];
84      *(IndexT*)Map(idx) = freelist_;
85      freelist_ = idx;
86    }
87  }
88
89  void InitCache(Cache *c) {
90    c->pos = 0;
91    internal_memset(c->cache, 0, sizeof(c->cache));
92  }
93
94 private:
95  T *map_[kL1Size];
96  SpinMutex mtx_;
97  IndexT freelist_;
98  uptr fillpos_;
99  const char *name_;
100
101  void Refill(Cache *c) {
102    SpinMutexLock lock(&mtx_);
103    if (freelist_ == 0) {
104      if (fillpos_ == kL1Size) {
105        Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
106            name_, kL1Size, kL2Size);
107        Die();
108      }
109      VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
110          name_, fillpos_, kL1Size, kL2Size);
111      T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
112      // Reserve 0 as invalid index.
113      IndexT start = fillpos_ == 0 ? 1 : 0;
114      for (IndexT i = start; i < kL2Size; i++) {
115        new(batch + i) T;
116        *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
117      }
118      *(IndexT*)(batch + kL2Size - 1) = 0;
119      freelist_ = fillpos_ * kL2Size + start;
120      map_[fillpos_++] = batch;
121    }
122    for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
123      IndexT idx = freelist_;
124      c->cache[c->pos++] = idx;
125      freelist_ = *(IndexT*)Map(idx);
126    }
127  }
128
129  void Drain(Cache *c) {
130    SpinMutexLock lock(&mtx_);
131    for (uptr i = 0; i < Cache::kSize / 2; i++) {
132      IndexT idx = c->cache[--c->pos];
133      *(IndexT*)Map(idx) = freelist_;
134      freelist_ = idx;
135    }
136  }
137};
138
139}  // namespace __tsan
140
141#endif  // TSAN_DENSE_ALLOC_H
142