1//===-- msan_linux.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of MemorySanitizer.
10//
11// Linux-, NetBSD- and FreeBSD-specific code.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_platform.h"
15#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
16
17#  include <elf.h>
18#  include <link.h>
19#  include <pthread.h>
20#  include <signal.h>
21#  include <stdio.h>
22#  include <stdlib.h>
23#  if SANITIZER_LINUX
24#    include <sys/personality.h>
25#  endif
26#  include <sys/resource.h>
27#  include <sys/time.h>
28#  include <unistd.h>
29#  include <unwind.h>
30
31#  include "msan.h"
32#  include "msan_allocator.h"
33#  include "msan_chained_origin_depot.h"
34#  include "msan_report.h"
35#  include "msan_thread.h"
36#  include "sanitizer_common/sanitizer_common.h"
37#  include "sanitizer_common/sanitizer_procmaps.h"
38#  include "sanitizer_common/sanitizer_stackdepot.h"
39
40namespace __msan {
41
42void ReportMapRange(const char *descr, uptr beg, uptr size) {
43  if (size > 0) {
44    uptr end = beg + size - 1;
45    VPrintf(1, "%s : 0x%zx - 0x%zx\n", descr, beg, end);
46  }
47}
48
49static bool CheckMemoryRangeAvailability(uptr beg, uptr size, bool verbose) {
50  if (size > 0) {
51    uptr end = beg + size - 1;
52    if (!MemoryRangeIsAvailable(beg, end)) {
53      if (verbose)
54        Printf("FATAL: Memory range 0x%zx - 0x%zx is not available.\n", beg,
55               end);
56      return false;
57    }
58  }
59  return true;
60}
61
62static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
63  if (size > 0) {
64    void *addr = MmapFixedNoAccess(beg, size, name);
65    if (beg == 0 && addr) {
66      // Depending on the kernel configuration, we may not be able to protect
67      // the page at address zero.
68      uptr gap = 16 * GetPageSizeCached();
69      beg += gap;
70      size -= gap;
71      addr = MmapFixedNoAccess(beg, size, name);
72    }
73    if ((uptr)addr != beg) {
74      uptr end = beg + size - 1;
75      Printf("FATAL: Cannot protect memory range 0x%zx - 0x%zx (%s).\n", beg,
76             end, name);
77      return false;
78    }
79  }
80  return true;
81}
82
83static void CheckMemoryLayoutSanity() {
84  uptr prev_end = 0;
85  for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
86    uptr start = kMemoryLayout[i].start;
87    uptr end = kMemoryLayout[i].end;
88    MappingDesc::Type type = kMemoryLayout[i].type;
89    CHECK_LT(start, end);
90    CHECK_EQ(prev_end, start);
91    CHECK(addr_is_type(start, type));
92    CHECK(addr_is_type((start + end) / 2, type));
93    CHECK(addr_is_type(end - 1, type));
94    if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
95      uptr addr = start;
96      CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
97      CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
98      CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
99
100      addr = (start + end) / 2;
101      CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
102      CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
103      CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
104
105      addr = end - 1;
106      CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
107      CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
108      CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
109    }
110    prev_end = end;
111  }
112}
113
114static bool InitShadow(bool init_origins, bool dry_run) {
115  // Let user know mapping parameters first.
116  VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
117  for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
118    VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
119            kMemoryLayout[i].end - 1);
120
121  CheckMemoryLayoutSanity();
122
123  if (!MEM_IS_APP(&__msan_init)) {
124    if (!dry_run)
125      Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
126             reinterpret_cast<void *>(&__msan_init));
127    return false;
128  }
129
130  const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
131
132  for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
133    uptr start = kMemoryLayout[i].start;
134    uptr end = kMemoryLayout[i].end;
135    uptr size = end - start;
136    MappingDesc::Type type = kMemoryLayout[i].type;
137
138    // Check if the segment should be mapped based on platform constraints.
139    if (start >= maxVirtualAddress)
140      continue;
141
142    bool map = type == MappingDesc::SHADOW ||
143               (init_origins && type == MappingDesc::ORIGIN);
144    bool protect = type == MappingDesc::INVALID ||
145                   (!init_origins && type == MappingDesc::ORIGIN);
146    CHECK(!(map && protect));
147    if (!map && !protect) {
148      CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
149
150      if (dry_run && type == MappingDesc::ALLOCATOR &&
151          !CheckMemoryRangeAvailability(start, size, !dry_run))
152        return false;
153    }
154    if (map) {
155      if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
156        return false;
157      if (!dry_run &&
158          !MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
159        return false;
160      if (!dry_run && common_flags()->use_madv_dontdump)
161        DontDumpShadowMemory(start, size);
162    }
163    if (protect) {
164      if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
165        return false;
166      if (!dry_run && !ProtectMemoryRange(start, size, kMemoryLayout[i].name))
167        return false;
168    }
169  }
170
171  return true;
172}
173
174bool InitShadowWithReExec(bool init_origins) {
175  // Start with dry run: check layout is ok, but don't print warnings because
176  // warning messages will cause tests to fail (even if we successfully re-exec
177  // after the warning).
178  bool success = InitShadow(__msan_get_track_origins(), true);
179  if (!success) {
180#  if SANITIZER_LINUX
181    // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
182    int old_personality = personality(0xffffffff);
183    bool aslr_on =
184        (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
185
186    if (aslr_on) {
187      VReport(1,
188              "WARNING: MemorySanitizer: memory layout is incompatible, "
189              "possibly due to high-entropy ASLR.\n"
190              "Re-execing with fixed virtual address space.\n"
191              "N.B. reducing ASLR entropy is preferable.\n");
192      CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
193      ReExec();
194    }
195#  endif
196  }
197
198  // The earlier dry run didn't actually map or protect anything. Run again in
199  // non-dry run mode.
200  return success && InitShadow(__msan_get_track_origins(), false);
201}
202
203static void MsanAtExit(void) {
204  if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
205    ReportStats();
206  if (msan_report_count > 0) {
207    ReportAtExitStatistics();
208    if (common_flags()->exitcode)
209      internal__exit(common_flags()->exitcode);
210  }
211}
212
213void InstallAtExitHandler() {
214  atexit(MsanAtExit);
215}
216
217// ---------------------- TSD ---------------- {{{1
218
219#if SANITIZER_NETBSD
220// Thread Static Data cannot be used in early init on NetBSD.
221// Reuse the MSan TSD API for compatibility with existing code
222// with an alternative implementation.
223
224static void (*tsd_destructor)(void *tsd) = nullptr;
225
226struct tsd_key {
227  tsd_key() : key(nullptr) {}
228  ~tsd_key() {
229    CHECK(tsd_destructor);
230    if (key)
231      (*tsd_destructor)(key);
232  }
233  MsanThread *key;
234};
235
236static thread_local struct tsd_key key;
237
238void MsanTSDInit(void (*destructor)(void *tsd)) {
239  CHECK(!tsd_destructor);
240  tsd_destructor = destructor;
241}
242
243MsanThread *GetCurrentThread() {
244  CHECK(tsd_destructor);
245  return key.key;
246}
247
248void SetCurrentThread(MsanThread *tsd) {
249  CHECK(tsd_destructor);
250  CHECK(tsd);
251  CHECK(!key.key);
252  key.key = tsd;
253}
254
255void MsanTSDDtor(void *tsd) {
256  CHECK(tsd_destructor);
257  CHECK_EQ(key.key, tsd);
258  key.key = nullptr;
259  // Make sure that signal handler can not see a stale current thread pointer.
260  atomic_signal_fence(memory_order_seq_cst);
261  MsanThread::TSDDtor(tsd);
262}
263#else
264static pthread_key_t tsd_key;
265static bool tsd_key_inited = false;
266
267void MsanTSDInit(void (*destructor)(void *tsd)) {
268  CHECK(!tsd_key_inited);
269  tsd_key_inited = true;
270  CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
271}
272
273static THREADLOCAL MsanThread* msan_current_thread;
274
275MsanThread *GetCurrentThread() {
276  return msan_current_thread;
277}
278
279void SetCurrentThread(MsanThread *t) {
280  // Make sure we do not reset the current MsanThread.
281  CHECK_EQ(0, msan_current_thread);
282  msan_current_thread = t;
283  // Make sure that MsanTSDDtor gets called at the end.
284  CHECK(tsd_key_inited);
285  pthread_setspecific(tsd_key, (void *)t);
286}
287
288void MsanTSDDtor(void *tsd) {
289  MsanThread *t = (MsanThread*)tsd;
290  if (t->destructor_iterations_ > 1) {
291    t->destructor_iterations_--;
292    CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
293    return;
294  }
295  msan_current_thread = nullptr;
296  // Make sure that signal handler can not see a stale current thread pointer.
297  atomic_signal_fence(memory_order_seq_cst);
298  MsanThread::TSDDtor(tsd);
299}
300#  endif
301
302static void BeforeFork() {
303  // Usually we lock ThreadRegistry, but msan does not have one.
304  LockAllocator();
305  StackDepotLockBeforeFork();
306  ChainedOriginDepotBeforeFork();
307}
308
309static void AfterFork(bool fork_child) {
310  ChainedOriginDepotAfterFork(fork_child);
311  StackDepotUnlockAfterFork(fork_child);
312  UnlockAllocator();
313  // Usually we unlock ThreadRegistry, but msan does not have one.
314}
315
316void InstallAtForkHandler() {
317  pthread_atfork(
318      &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
319      []() { AfterFork(/* fork_child= */ true); });
320}
321
322} // namespace __msan
323
324#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
325