1185380Ssam//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
2185380Ssam//
3185380Ssam// This file is distributed under the University of Illinois Open Source
4185380Ssam// License. See LICENSE.TXT for details.
5185380Ssam//
6185380Ssam//===----------------------------------------------------------------------===//
7185380Ssam//
8185380Ssam// A fast memory allocator that does not support free() nor realloc().
9185380Ssam// All allocations are forever.
10185380Ssam//===----------------------------------------------------------------------===//
11185380Ssam
12185380Ssam#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
13185380Ssam#define SANITIZER_PERSISTENT_ALLOCATOR_H
14185380Ssam
15185380Ssam#include "sanitizer_internal_defs.h"
16185380Ssam#include "sanitizer_mutex.h"
17203158Srpaulo#include "sanitizer_atomic.h"
18185380Ssam#include "sanitizer_common.h"
19185380Ssam
20185380Ssamnamespace __sanitizer {
21185380Ssam
22185380Ssamclass PersistentAllocator {
23185380Ssam public:
24185380Ssam  void *alloc(uptr size);
25185380Ssam
26185380Ssam private:
27185380Ssam  void *tryAlloc(uptr size);
28185380Ssam  StaticSpinMutex mtx;  // Protects alloc of new blocks for region allocator.
29185380Ssam  atomic_uintptr_t region_pos;  // Region allocator for Node's.
30185380Ssam  atomic_uintptr_t region_end;
31185380Ssam};
32185380Ssam
33185380Ssaminline void *PersistentAllocator::tryAlloc(uptr size) {
34185380Ssam  // Optimisic lock-free allocation, essentially try to bump the region ptr.
35185380Ssam  for (;;) {
36185380Ssam    uptr cmp = atomic_load(&region_pos, memory_order_acquire);
37185380Ssam    uptr end = atomic_load(&region_end, memory_order_acquire);
38185380Ssam    if (cmp == 0 || cmp + size > end) return nullptr;
39185380Ssam    if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
40185380Ssam                                     memory_order_acquire))
41185380Ssam      return (void *)cmp;
42185380Ssam  }
43185380Ssam}
44185380Ssam
45185380Ssaminline void *PersistentAllocator::alloc(uptr size) {
46185380Ssam  // First, try to allocate optimisitically.
47185380Ssam  void *s = tryAlloc(size);
48185380Ssam  if (s) return s;
49185380Ssam  // If failed, lock, retry and alloc new superblock.
50185380Ssam  SpinMutexLock l(&mtx);
51185380Ssam  for (;;) {
52185380Ssam    s = tryAlloc(size);
53185380Ssam    if (s) return s;
54185380Ssam    atomic_store(&region_pos, 0, memory_order_relaxed);
55185380Ssam    uptr allocsz = 64 * 1024;
56185380Ssam    if (allocsz < size) allocsz = size;
57185380Ssam    uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
58185380Ssam    atomic_store(&region_end, mem + allocsz, memory_order_release);
59185380Ssam    atomic_store(&region_pos, mem, memory_order_release);
60185380Ssam  }
61185380Ssam}
62185380Ssam
63185380Ssamextern PersistentAllocator thePersistentAllocator;
64185380Ssaminline void *PersistentAlloc(uptr sz) {
65185380Ssam  return thePersistentAllocator.alloc(sz);
66185380Ssam}
67185380Ssam
68185380Ssam} // namespace __sanitizer
69185380Ssam
70185380Ssam#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
71185380Ssam