1//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A fast memory allocator that does not support free() nor realloc().
10// All allocations are forever.
11//===----------------------------------------------------------------------===//
12
13#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
14#define SANITIZER_PERSISTENT_ALLOCATOR_H
15
16#include "sanitizer_internal_defs.h"
17#include "sanitizer_mutex.h"
18#include "sanitizer_atomic.h"
19#include "sanitizer_common.h"
20
21namespace __sanitizer {
22
23template <typename T>
24class PersistentAllocator {
25 public:
26  T *alloc(uptr count = 1);
27  uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
28
29  void TestOnlyUnmap();
30
31 private:
32  T *tryAlloc(uptr count);
33  T *refillAndAlloc(uptr count);
34  mutable StaticSpinMutex mtx;  // Protects alloc of new blocks.
35  atomic_uintptr_t region_pos;  // Region allocator for Node's.
36  atomic_uintptr_t region_end;
37  atomic_uintptr_t mapped_size;
38
39  struct BlockInfo {
40    const BlockInfo *next;
41    uptr ptr;
42    uptr size;
43  };
44  const BlockInfo *curr;
45};
46
47template <typename T>
48inline T *PersistentAllocator<T>::tryAlloc(uptr count) {
49  // Optimisic lock-free allocation, essentially try to bump the region ptr.
50  for (;;) {
51    uptr cmp = atomic_load(&region_pos, memory_order_acquire);
52    uptr end = atomic_load(&region_end, memory_order_acquire);
53    uptr size = count * sizeof(T);
54    if (cmp == 0 || cmp + size > end)
55      return nullptr;
56    if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
57                                     memory_order_acquire))
58      return reinterpret_cast<T *>(cmp);
59  }
60}
61
62template <typename T>
63inline T *PersistentAllocator<T>::alloc(uptr count) {
64  // First, try to allocate optimisitically.
65  T *s = tryAlloc(count);
66  if (LIKELY(s))
67    return s;
68  return refillAndAlloc(count);
69}
70
71template <typename T>
72inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
73  // If failed, lock, retry and alloc new superblock.
74  SpinMutexLock l(&mtx);
75  for (;;) {
76    T *s = tryAlloc(count);
77    if (s)
78      return s;
79    atomic_store(&region_pos, 0, memory_order_relaxed);
80    uptr size = count * sizeof(T) + sizeof(BlockInfo);
81    uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
82    uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
83    BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
84    new_block->next = curr;
85    new_block->ptr = mem;
86    new_block->size = allocsz;
87    curr = new_block;
88
89    atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
90
91    allocsz -= sizeof(BlockInfo);
92    atomic_store(&region_end, mem + allocsz, memory_order_release);
93    atomic_store(&region_pos, mem, memory_order_release);
94  }
95}
96
97template <typename T>
98void PersistentAllocator<T>::TestOnlyUnmap() {
99  while (curr) {
100    uptr mem = curr->ptr;
101    uptr allocsz = curr->size;
102    curr = curr->next;
103    UnmapOrDie((void *)mem, allocsz);
104  }
105  internal_memset(this, 0, sizeof(*this));
106}
107
108} // namespace __sanitizer
109
110#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
111