1//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of ThreadSanitizer (TSan), a race detector.
9//
10// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
11// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
12// The only difference with traditional slab allocators is that DenseSlabAlloc
13// allocates/free indices of objects and provide a functionality to map
14// the index onto the real pointer. The index is u32, that is, 2 times smaller
15// than uptr (hense the Dense prefix).
16//===----------------------------------------------------------------------===//
17#ifndef TSAN_DENSE_ALLOC_H
18#define TSAN_DENSE_ALLOC_H
19
20#include "sanitizer_common/sanitizer_common.h"
21#include "tsan_defs.h"
22#include "tsan_mutex.h"
23
24namespace __tsan {
25
26class DenseSlabAllocCache {
27  static const uptr kSize = 128;
28  typedef u32 IndexT;
29  uptr pos;
30  IndexT cache[kSize];
31  template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
32};
33
34template<typename T, uptr kL1Size, uptr kL2Size>
35class DenseSlabAlloc {
36 public:
37  typedef DenseSlabAllocCache Cache;
38  typedef typename Cache::IndexT IndexT;
39
40  DenseSlabAlloc() {
41    // Check that kL1Size and kL2Size are sane.
42    CHECK_EQ(kL1Size & (kL1Size - 1), 0);
43    CHECK_EQ(kL2Size & (kL2Size - 1), 0);
44    CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
45    // Check that it makes sense to use the dense alloc.
46    CHECK_GE(sizeof(T), sizeof(IndexT));
47    internal_memset(map_, 0, sizeof(map_));
48    freelist_ = 0;
49    fillpos_ = 0;
50  }
51
52  ~DenseSlabAlloc() {
53    for (uptr i = 0; i < kL1Size; i++) {
54      if (map_[i] != 0)
55        UnmapOrDie(map_[i], kL2Size * sizeof(T));
56    }
57  }
58
59  IndexT Alloc(Cache *c) {
60    if (c->pos == 0)
61      Refill(c);
62    return c->cache[--c->pos];
63  }
64
65  void Free(Cache *c, IndexT idx) {
66    DCHECK_NE(idx, 0);
67    if (c->pos == Cache::kSize)
68      Drain(c);
69    c->cache[c->pos++] = idx;
70  }
71
72  T *Map(IndexT idx) {
73    DCHECK_NE(idx, 0);
74    DCHECK_LE(idx, kL1Size * kL2Size);
75    return &map_[idx / kL2Size][idx % kL2Size];
76  }
77
78  void FlushCache(Cache *c) {
79    SpinMutexLock lock(&mtx_);
80    while (c->pos) {
81      IndexT idx = c->cache[--c->pos];
82      *(IndexT*)Map(idx) = freelist_;
83      freelist_ = idx;
84    }
85  }
86
87  void InitCache(Cache *c) {
88    c->pos = 0;
89    internal_memset(c->cache, 0, sizeof(c->cache));
90  }
91
92 private:
93  T *map_[kL1Size];
94  SpinMutex mtx_;
95  IndexT freelist_;
96  uptr fillpos_;
97
98  void Refill(Cache *c) {
99    SpinMutexLock lock(&mtx_);
100    if (freelist_ == 0) {
101      if (fillpos_ == kL1Size) {
102        Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
103        Die();
104      }
105      T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
106      // Reserve 0 as invalid index.
107      IndexT start = fillpos_ == 0 ? 1 : 0;
108      for (IndexT i = start; i < kL2Size; i++) {
109        new(batch + i) T();
110        *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
111      }
112      *(IndexT*)(batch + kL2Size - 1) = 0;
113      freelist_ = fillpos_ * kL2Size + start;
114      map_[fillpos_++] = batch;
115    }
116    for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
117      IndexT idx = freelist_;
118      c->cache[c->pos++] = idx;
119      freelist_ = *(IndexT*)Map(idx);
120    }
121  }
122
123  void Drain(Cache *c) {
124    SpinMutexLock lock(&mtx_);
125    for (uptr i = 0; i < Cache::kSize / 2; i++) {
126      IndexT idx = c->cache[--c->pos];
127      *(IndexT*)Map(idx) = freelist_;
128      freelist_ = idx;
129    }
130  }
131};
132
133}  // namespace __tsan
134
135#endif  // TSAN_DENSE_ALLOC_H
136