1//===-- tsan_mman.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_allocator_checks.h"
14#include "sanitizer_common/sanitizer_allocator_interface.h"
15#include "sanitizer_common/sanitizer_allocator_report.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_errno.h"
18#include "sanitizer_common/sanitizer_placement_new.h"
19#include "tsan_mman.h"
20#include "tsan_rtl.h"
21#include "tsan_report.h"
22#include "tsan_flags.h"
23
24// May be overriden by front-end.
25SANITIZER_WEAK_DEFAULT_IMPL
26void __sanitizer_malloc_hook(void *ptr, uptr size) {
27  (void)ptr;
28  (void)size;
29}
30
31SANITIZER_WEAK_DEFAULT_IMPL
32void __sanitizer_free_hook(void *ptr) {
33  (void)ptr;
34}
35
36namespace __tsan {
37
38struct MapUnmapCallback {
39  void OnMap(uptr p, uptr size) const { }
40  void OnUnmap(uptr p, uptr size) const {
41    // We are about to unmap a chunk of user memory.
42    // Mark the corresponding shadow memory as not needed.
43    DontNeedShadowFor(p, size);
44    // Mark the corresponding meta shadow memory as not needed.
45    // Note the block does not contain any meta info at this point
46    // (this happens after free).
47    const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
48    const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
49    // Block came from LargeMmapAllocator, so must be large.
50    // We rely on this in the calculations below.
51    CHECK_GE(size, 2 * kPageSize);
52    uptr diff = RoundUp(p, kPageSize) - p;
53    if (diff != 0) {
54      p += diff;
55      size -= diff;
56    }
57    diff = p + size - RoundDown(p + size, kPageSize);
58    if (diff != 0)
59      size -= diff;
60    uptr p_meta = (uptr)MemToMeta(p);
61    ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
62  }
63};
64
65static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
66Allocator *allocator() {
67  return reinterpret_cast<Allocator*>(&allocator_placeholder);
68}
69
70struct GlobalProc {
71  Mutex mtx;
72  Processor *proc;
73
74  GlobalProc()
75      : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
76      , proc(ProcCreate()) {
77  }
78};
79
80static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
81GlobalProc *global_proc() {
82  return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
83}
84
85ScopedGlobalProcessor::ScopedGlobalProcessor() {
86  GlobalProc *gp = global_proc();
87  ThreadState *thr = cur_thread();
88  if (thr->proc())
89    return;
90  // If we don't have a proc, use the global one.
91  // There are currently only two known case where this path is triggered:
92  //   __interceptor_free
93  //   __nptl_deallocate_tsd
94  //   start_thread
95  //   clone
96  // and:
97  //   ResetRange
98  //   __interceptor_munmap
99  //   __deallocate_stack
100  //   start_thread
101  //   clone
102  // Ideally, we destroy thread state (and unwire proc) when a thread actually
103  // exits (i.e. when we join/wait it). Then we would not need the global proc
104  gp->mtx.Lock();
105  ProcWire(gp->proc, thr);
106}
107
108ScopedGlobalProcessor::~ScopedGlobalProcessor() {
109  GlobalProc *gp = global_proc();
110  ThreadState *thr = cur_thread();
111  if (thr->proc() != gp->proc)
112    return;
113  ProcUnwire(gp->proc, thr);
114  gp->mtx.Unlock();
115}
116
117void InitializeAllocator() {
118  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
119  allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
120}
121
122void InitializeAllocatorLate() {
123  new(global_proc()) GlobalProc();
124}
125
126void AllocatorProcStart(Processor *proc) {
127  allocator()->InitCache(&proc->alloc_cache);
128  internal_allocator()->InitCache(&proc->internal_alloc_cache);
129}
130
131void AllocatorProcFinish(Processor *proc) {
132  allocator()->DestroyCache(&proc->alloc_cache);
133  internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
134}
135
136void AllocatorPrintStats() {
137  allocator()->PrintStats();
138}
139
140static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
141  if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
142      !flags()->report_signal_unsafe)
143    return;
144  VarSizeStackTrace stack;
145  ObtainCurrentStack(thr, pc, &stack);
146  if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
147    return;
148  ThreadRegistryLock l(ctx->thread_registry);
149  ScopedReport rep(ReportTypeSignalUnsafe);
150  rep.AddStack(stack, true);
151  OutputReport(thr, rep);
152}
153
154static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
155
156void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
157                          bool signal) {
158  if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
159    if (AllocatorMayReturnNull())
160      return nullptr;
161    GET_STACK_TRACE_FATAL(thr, pc);
162    ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
163  }
164  void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
165  if (UNLIKELY(!p)) {
166    SetAllocatorOutOfMemory();
167    if (AllocatorMayReturnNull())
168      return nullptr;
169    GET_STACK_TRACE_FATAL(thr, pc);
170    ReportOutOfMemory(sz, &stack);
171  }
172  if (ctx && ctx->initialized)
173    OnUserAlloc(thr, pc, (uptr)p, sz, true);
174  if (signal)
175    SignalUnsafeCall(thr, pc);
176  return p;
177}
178
179void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
180  ScopedGlobalProcessor sgp;
181  if (ctx && ctx->initialized)
182    OnUserFree(thr, pc, (uptr)p, true);
183  allocator()->Deallocate(&thr->proc()->alloc_cache, p);
184  if (signal)
185    SignalUnsafeCall(thr, pc);
186}
187
188void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
189  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
190}
191
192void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
193  if (UNLIKELY(CheckForCallocOverflow(size, n))) {
194    if (AllocatorMayReturnNull())
195      return SetErrnoOnNull(nullptr);
196    GET_STACK_TRACE_FATAL(thr, pc);
197    ReportCallocOverflow(n, size, &stack);
198  }
199  void *p = user_alloc_internal(thr, pc, n * size);
200  if (p)
201    internal_memset(p, 0, n * size);
202  return SetErrnoOnNull(p);
203}
204
205void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
206  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
207  ctx->metamap.AllocBlock(thr, pc, p, sz);
208  if (write && thr->ignore_reads_and_writes == 0)
209    MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
210  else
211    MemoryResetRange(thr, pc, (uptr)p, sz);
212}
213
214void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
215  CHECK_NE(p, (void*)0);
216  uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
217  DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
218  if (write && thr->ignore_reads_and_writes == 0)
219    MemoryRangeFreed(thr, pc, (uptr)p, sz);
220}
221
222void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
223  // FIXME: Handle "shrinking" more efficiently,
224  // it seems that some software actually does this.
225  if (!p)
226    return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
227  if (!sz) {
228    user_free(thr, pc, p);
229    return nullptr;
230  }
231  void *new_p = user_alloc_internal(thr, pc, sz);
232  if (new_p) {
233    uptr old_sz = user_alloc_usable_size(p);
234    internal_memcpy(new_p, p, min(old_sz, sz));
235    user_free(thr, pc, p);
236  }
237  return SetErrnoOnNull(new_p);
238}
239
240void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
241  if (UNLIKELY(!IsPowerOfTwo(align))) {
242    errno = errno_EINVAL;
243    if (AllocatorMayReturnNull())
244      return nullptr;
245    GET_STACK_TRACE_FATAL(thr, pc);
246    ReportInvalidAllocationAlignment(align, &stack);
247  }
248  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
249}
250
251int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
252                        uptr sz) {
253  if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
254    if (AllocatorMayReturnNull())
255      return errno_EINVAL;
256    GET_STACK_TRACE_FATAL(thr, pc);
257    ReportInvalidPosixMemalignAlignment(align, &stack);
258  }
259  void *ptr = user_alloc_internal(thr, pc, sz, align);
260  if (UNLIKELY(!ptr))
261    // OOM error is already taken care of by user_alloc_internal.
262    return errno_ENOMEM;
263  CHECK(IsAligned((uptr)ptr, align));
264  *memptr = ptr;
265  return 0;
266}
267
268void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
269  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
270    errno = errno_EINVAL;
271    if (AllocatorMayReturnNull())
272      return nullptr;
273    GET_STACK_TRACE_FATAL(thr, pc);
274    ReportInvalidAlignedAllocAlignment(sz, align, &stack);
275  }
276  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
277}
278
279void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
280  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
281}
282
283void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
284  uptr PageSize = GetPageSizeCached();
285  if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
286    errno = errno_ENOMEM;
287    if (AllocatorMayReturnNull())
288      return nullptr;
289    GET_STACK_TRACE_FATAL(thr, pc);
290    ReportPvallocOverflow(sz, &stack);
291  }
292  // pvalloc(0) should allocate one page.
293  sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
294  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
295}
296
297uptr user_alloc_usable_size(const void *p) {
298  if (p == 0)
299    return 0;
300  MBlock *b = ctx->metamap.GetBlock((uptr)p);
301  if (!b)
302    return 0;  // Not a valid pointer.
303  if (b->siz == 0)
304    return 1;  // Zero-sized allocations are actually 1 byte.
305  return b->siz;
306}
307
308void invoke_malloc_hook(void *ptr, uptr size) {
309  ThreadState *thr = cur_thread();
310  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
311    return;
312  __sanitizer_malloc_hook(ptr, size);
313  RunMallocHooks(ptr, size);
314}
315
316void invoke_free_hook(void *ptr) {
317  ThreadState *thr = cur_thread();
318  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
319    return;
320  __sanitizer_free_hook(ptr);
321  RunFreeHooks(ptr);
322}
323
324void *internal_alloc(MBlockType typ, uptr sz) {
325  ThreadState *thr = cur_thread();
326  if (thr->nomalloc) {
327    thr->nomalloc = 0;  // CHECK calls internal_malloc().
328    CHECK(0);
329  }
330  return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
331}
332
333void internal_free(void *p) {
334  ThreadState *thr = cur_thread();
335  if (thr->nomalloc) {
336    thr->nomalloc = 0;  // CHECK calls internal_malloc().
337    CHECK(0);
338  }
339  InternalFree(p, &thr->proc()->internal_alloc_cache);
340}
341
342}  // namespace __tsan
343
344using namespace __tsan;
345
346extern "C" {
347uptr __sanitizer_get_current_allocated_bytes() {
348  uptr stats[AllocatorStatCount];
349  allocator()->GetStats(stats);
350  return stats[AllocatorStatAllocated];
351}
352
353uptr __sanitizer_get_heap_size() {
354  uptr stats[AllocatorStatCount];
355  allocator()->GetStats(stats);
356  return stats[AllocatorStatMapped];
357}
358
359uptr __sanitizer_get_free_bytes() {
360  return 1;
361}
362
363uptr __sanitizer_get_unmapped_bytes() {
364  return 1;
365}
366
367uptr __sanitizer_get_estimated_allocated_size(uptr size) {
368  return size;
369}
370
371int __sanitizer_get_ownership(const void *p) {
372  return allocator()->GetBlockBegin(p) != 0;
373}
374
375uptr __sanitizer_get_allocated_size(const void *p) {
376  return user_alloc_usable_size(p);
377}
378
379void __tsan_on_thread_idle() {
380  ThreadState *thr = cur_thread();
381  thr->clock.ResetCached(&thr->proc()->clock_cache);
382  thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
383  allocator()->SwallowCache(&thr->proc()->alloc_cache);
384  internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
385  ctx->metamap.OnProcIdle(thr->proc());
386}
387}  // extern "C"
388