1//===-- tsan_mman.cpp -----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12#include "sanitizer_common/sanitizer_allocator_checks.h"
13#include "sanitizer_common/sanitizer_allocator_interface.h"
14#include "sanitizer_common/sanitizer_allocator_report.h"
15#include "sanitizer_common/sanitizer_common.h"
16#include "sanitizer_common/sanitizer_errno.h"
17#include "sanitizer_common/sanitizer_placement_new.h"
18#include "tsan_mman.h"
19#include "tsan_rtl.h"
20#include "tsan_report.h"
21#include "tsan_flags.h"
22
23// May be overriden by front-end.
24SANITIZER_WEAK_DEFAULT_IMPL
25void __sanitizer_malloc_hook(void *ptr, uptr size) {
26  (void)ptr;
27  (void)size;
28}
29
30SANITIZER_WEAK_DEFAULT_IMPL
31void __sanitizer_free_hook(void *ptr) {
32  (void)ptr;
33}
34
35namespace __tsan {
36
37struct MapUnmapCallback {
38  void OnMap(uptr p, uptr size) const { }
39  void OnUnmap(uptr p, uptr size) const {
40    // We are about to unmap a chunk of user memory.
41    // Mark the corresponding shadow memory as not needed.
42    DontNeedShadowFor(p, size);
43    // Mark the corresponding meta shadow memory as not needed.
44    // Note the block does not contain any meta info at this point
45    // (this happens after free).
46    const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
47    const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
48    // Block came from LargeMmapAllocator, so must be large.
49    // We rely on this in the calculations below.
50    CHECK_GE(size, 2 * kPageSize);
51    uptr diff = RoundUp(p, kPageSize) - p;
52    if (diff != 0) {
53      p += diff;
54      size -= diff;
55    }
56    diff = p + size - RoundDown(p + size, kPageSize);
57    if (diff != 0)
58      size -= diff;
59    uptr p_meta = (uptr)MemToMeta(p);
60    ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
61  }
62};
63
64static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
65Allocator *allocator() {
66  return reinterpret_cast<Allocator*>(&allocator_placeholder);
67}
68
69struct GlobalProc {
70  Mutex mtx;
71  Processor *proc;
72
73  GlobalProc()
74      : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
75      , proc(ProcCreate()) {
76  }
77};
78
79static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
80GlobalProc *global_proc() {
81  return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
82}
83
84ScopedGlobalProcessor::ScopedGlobalProcessor() {
85  GlobalProc *gp = global_proc();
86  ThreadState *thr = cur_thread();
87  if (thr->proc())
88    return;
89  // If we don't have a proc, use the global one.
90  // There are currently only two known case where this path is triggered:
91  //   __interceptor_free
92  //   __nptl_deallocate_tsd
93  //   start_thread
94  //   clone
95  // and:
96  //   ResetRange
97  //   __interceptor_munmap
98  //   __deallocate_stack
99  //   start_thread
100  //   clone
101  // Ideally, we destroy thread state (and unwire proc) when a thread actually
102  // exits (i.e. when we join/wait it). Then we would not need the global proc
103  gp->mtx.Lock();
104  ProcWire(gp->proc, thr);
105}
106
107ScopedGlobalProcessor::~ScopedGlobalProcessor() {
108  GlobalProc *gp = global_proc();
109  ThreadState *thr = cur_thread();
110  if (thr->proc() != gp->proc)
111    return;
112  ProcUnwire(gp->proc, thr);
113  gp->mtx.Unlock();
114}
115
116static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
117static uptr max_user_defined_malloc_size;
118
119void InitializeAllocator() {
120  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
121  allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
122  max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
123                                     ? common_flags()->max_allocation_size_mb
124                                           << 20
125                                     : kMaxAllowedMallocSize;
126}
127
128void InitializeAllocatorLate() {
129  new(global_proc()) GlobalProc();
130}
131
132void AllocatorProcStart(Processor *proc) {
133  allocator()->InitCache(&proc->alloc_cache);
134  internal_allocator()->InitCache(&proc->internal_alloc_cache);
135}
136
137void AllocatorProcFinish(Processor *proc) {
138  allocator()->DestroyCache(&proc->alloc_cache);
139  internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
140}
141
142void AllocatorPrintStats() {
143  allocator()->PrintStats();
144}
145
146static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
147  if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
148      !flags()->report_signal_unsafe)
149    return;
150  VarSizeStackTrace stack;
151  ObtainCurrentStack(thr, pc, &stack);
152  if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
153    return;
154  ThreadRegistryLock l(ctx->thread_registry);
155  ScopedReport rep(ReportTypeSignalUnsafe);
156  rep.AddStack(stack, true);
157  OutputReport(thr, rep);
158}
159
160
161void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
162                          bool signal) {
163  if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize ||
164      sz > max_user_defined_malloc_size) {
165    if (AllocatorMayReturnNull())
166      return nullptr;
167    uptr malloc_limit =
168        Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
169    GET_STACK_TRACE_FATAL(thr, pc);
170    ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
171  }
172  void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
173  if (UNLIKELY(!p)) {
174    SetAllocatorOutOfMemory();
175    if (AllocatorMayReturnNull())
176      return nullptr;
177    GET_STACK_TRACE_FATAL(thr, pc);
178    ReportOutOfMemory(sz, &stack);
179  }
180  if (ctx && ctx->initialized)
181    OnUserAlloc(thr, pc, (uptr)p, sz, true);
182  if (signal)
183    SignalUnsafeCall(thr, pc);
184  return p;
185}
186
187void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
188  ScopedGlobalProcessor sgp;
189  if (ctx && ctx->initialized)
190    OnUserFree(thr, pc, (uptr)p, true);
191  allocator()->Deallocate(&thr->proc()->alloc_cache, p);
192  if (signal)
193    SignalUnsafeCall(thr, pc);
194}
195
196void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
197  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
198}
199
200void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
201  if (UNLIKELY(CheckForCallocOverflow(size, n))) {
202    if (AllocatorMayReturnNull())
203      return SetErrnoOnNull(nullptr);
204    GET_STACK_TRACE_FATAL(thr, pc);
205    ReportCallocOverflow(n, size, &stack);
206  }
207  void *p = user_alloc_internal(thr, pc, n * size);
208  if (p)
209    internal_memset(p, 0, n * size);
210  return SetErrnoOnNull(p);
211}
212
213void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
214  if (UNLIKELY(CheckForCallocOverflow(size, n))) {
215    if (AllocatorMayReturnNull())
216      return SetErrnoOnNull(nullptr);
217    GET_STACK_TRACE_FATAL(thr, pc);
218    ReportReallocArrayOverflow(size, n, &stack);
219  }
220  return user_realloc(thr, pc, p, size * n);
221}
222
223void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
224  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
225  ctx->metamap.AllocBlock(thr, pc, p, sz);
226  if (write && thr->ignore_reads_and_writes == 0)
227    MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
228  else
229    MemoryResetRange(thr, pc, (uptr)p, sz);
230}
231
232void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
233  CHECK_NE(p, (void*)0);
234  uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
235  DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
236  if (write && thr->ignore_reads_and_writes == 0)
237    MemoryRangeFreed(thr, pc, (uptr)p, sz);
238}
239
240void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
241  // FIXME: Handle "shrinking" more efficiently,
242  // it seems that some software actually does this.
243  if (!p)
244    return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
245  if (!sz) {
246    user_free(thr, pc, p);
247    return nullptr;
248  }
249  void *new_p = user_alloc_internal(thr, pc, sz);
250  if (new_p) {
251    uptr old_sz = user_alloc_usable_size(p);
252    internal_memcpy(new_p, p, min(old_sz, sz));
253    user_free(thr, pc, p);
254  }
255  return SetErrnoOnNull(new_p);
256}
257
258void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
259  if (UNLIKELY(!IsPowerOfTwo(align))) {
260    errno = errno_EINVAL;
261    if (AllocatorMayReturnNull())
262      return nullptr;
263    GET_STACK_TRACE_FATAL(thr, pc);
264    ReportInvalidAllocationAlignment(align, &stack);
265  }
266  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
267}
268
269int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
270                        uptr sz) {
271  if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
272    if (AllocatorMayReturnNull())
273      return errno_EINVAL;
274    GET_STACK_TRACE_FATAL(thr, pc);
275    ReportInvalidPosixMemalignAlignment(align, &stack);
276  }
277  void *ptr = user_alloc_internal(thr, pc, sz, align);
278  if (UNLIKELY(!ptr))
279    // OOM error is already taken care of by user_alloc_internal.
280    return errno_ENOMEM;
281  CHECK(IsAligned((uptr)ptr, align));
282  *memptr = ptr;
283  return 0;
284}
285
286void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
287  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
288    errno = errno_EINVAL;
289    if (AllocatorMayReturnNull())
290      return nullptr;
291    GET_STACK_TRACE_FATAL(thr, pc);
292    ReportInvalidAlignedAllocAlignment(sz, align, &stack);
293  }
294  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
295}
296
297void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
298  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
299}
300
301void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
302  uptr PageSize = GetPageSizeCached();
303  if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
304    errno = errno_ENOMEM;
305    if (AllocatorMayReturnNull())
306      return nullptr;
307    GET_STACK_TRACE_FATAL(thr, pc);
308    ReportPvallocOverflow(sz, &stack);
309  }
310  // pvalloc(0) should allocate one page.
311  sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
312  return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
313}
314
315uptr user_alloc_usable_size(const void *p) {
316  if (p == 0)
317    return 0;
318  MBlock *b = ctx->metamap.GetBlock((uptr)p);
319  if (!b)
320    return 0;  // Not a valid pointer.
321  if (b->siz == 0)
322    return 1;  // Zero-sized allocations are actually 1 byte.
323  return b->siz;
324}
325
326void invoke_malloc_hook(void *ptr, uptr size) {
327  ThreadState *thr = cur_thread();
328  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
329    return;
330  __sanitizer_malloc_hook(ptr, size);
331  RunMallocHooks(ptr, size);
332}
333
334void invoke_free_hook(void *ptr) {
335  ThreadState *thr = cur_thread();
336  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
337    return;
338  __sanitizer_free_hook(ptr);
339  RunFreeHooks(ptr);
340}
341
342void *internal_alloc(MBlockType typ, uptr sz) {
343  ThreadState *thr = cur_thread();
344  if (thr->nomalloc) {
345    thr->nomalloc = 0;  // CHECK calls internal_malloc().
346    CHECK(0);
347  }
348  return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
349}
350
351void internal_free(void *p) {
352  ThreadState *thr = cur_thread();
353  if (thr->nomalloc) {
354    thr->nomalloc = 0;  // CHECK calls internal_malloc().
355    CHECK(0);
356  }
357  InternalFree(p, &thr->proc()->internal_alloc_cache);
358}
359
360}  // namespace __tsan
361
362using namespace __tsan;
363
364extern "C" {
365uptr __sanitizer_get_current_allocated_bytes() {
366  uptr stats[AllocatorStatCount];
367  allocator()->GetStats(stats);
368  return stats[AllocatorStatAllocated];
369}
370
371uptr __sanitizer_get_heap_size() {
372  uptr stats[AllocatorStatCount];
373  allocator()->GetStats(stats);
374  return stats[AllocatorStatMapped];
375}
376
377uptr __sanitizer_get_free_bytes() {
378  return 1;
379}
380
381uptr __sanitizer_get_unmapped_bytes() {
382  return 1;
383}
384
385uptr __sanitizer_get_estimated_allocated_size(uptr size) {
386  return size;
387}
388
389int __sanitizer_get_ownership(const void *p) {
390  return allocator()->GetBlockBegin(p) != 0;
391}
392
393uptr __sanitizer_get_allocated_size(const void *p) {
394  return user_alloc_usable_size(p);
395}
396
397void __tsan_on_thread_idle() {
398  ThreadState *thr = cur_thread();
399  thr->clock.ResetCached(&thr->proc()->clock_cache);
400  thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
401  allocator()->SwallowCache(&thr->proc()->alloc_cache);
402  internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
403  ctx->metamap.OnProcIdle(thr->proc());
404}
405}  // extern "C"
406