1//=-- lsan_common_linux.cc ------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of LeakSanitizer.
9// Implementation of common leak checking functionality. Linux-specific code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_platform.h"
14#include "lsan_common.h"
15
16#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
17#include <link.h>
18
19#include "sanitizer_common/sanitizer_common.h"
20#include "sanitizer_common/sanitizer_flags.h"
21#include "sanitizer_common/sanitizer_linux.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23
24namespace __lsan {
25
26static const char kLinkerName[] = "ld";
27// We request 2 modules matching "ld", so we can print a warning if there's more
28// than one match. But only the first one is actually used.
29static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
30static LoadedModule *linker = 0;
31
32static bool IsLinker(const char* full_name) {
33  return LibraryNameIs(full_name, kLinkerName);
34}
35
36void InitializePlatformSpecificModules() {
37  internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
38  uptr num_matches = GetListOfModules(
39      reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
40  if (num_matches == 1) {
41    linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
42    return;
43  }
44  if (num_matches == 0)
45    VReport(1, "LeakSanitizer: Dynamic linker not found. "
46            "TLS will not be handled correctly.\n");
47  else if (num_matches > 1)
48    VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
49            "TLS will not be handled correctly.\n", kLinkerName);
50  linker = 0;
51}
52
53static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
54                                        void *data) {
55  Frontier *frontier = reinterpret_cast<Frontier *>(data);
56  for (uptr j = 0; j < info->dlpi_phnum; j++) {
57    const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
58    // We're looking for .data and .bss sections, which reside in writeable,
59    // loadable segments.
60    if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
61        (phdr->p_memsz == 0))
62      continue;
63    uptr begin = info->dlpi_addr + phdr->p_vaddr;
64    uptr end = begin + phdr->p_memsz;
65    uptr allocator_begin = 0, allocator_end = 0;
66    GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
67    if (begin <= allocator_begin && allocator_begin < end) {
68      CHECK_LE(allocator_begin, allocator_end);
69      CHECK_LT(allocator_end, end);
70      if (begin < allocator_begin)
71        ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
72                             kReachable);
73      if (allocator_end < end)
74        ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
75                             kReachable);
76    } else {
77      ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
78    }
79  }
80  return 0;
81}
82
83// Scans global variables for heap pointers.
84void ProcessGlobalRegions(Frontier *frontier) {
85  if (!flags()->use_globals) return;
86  // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
87  // deadlocking by running this under StopTheWorld. However, the lock is
88  // reentrant, so we should be able to fix this by acquiring the lock before
89  // suspending threads.
90  dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
91}
92
93static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
94  CHECK(stack_id);
95  StackTrace stack = map->Get(stack_id);
96  // The top frame is our malloc/calloc/etc. The next frame is the caller.
97  if (stack.size >= 2)
98    return stack.trace[1];
99  return 0;
100}
101
102struct ProcessPlatformAllocParam {
103  Frontier *frontier;
104  StackDepotReverseMap *stack_depot_reverse_map;
105};
106
107// ForEachChunk callback. Identifies unreachable chunks which must be treated as
108// reachable. Marks them as reachable and adds them to the frontier.
109static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
110  CHECK(arg);
111  ProcessPlatformAllocParam *param =
112      reinterpret_cast<ProcessPlatformAllocParam *>(arg);
113  chunk = GetUserBegin(chunk);
114  LsanMetadata m(chunk);
115  if (m.allocated() && m.tag() != kReachable) {
116    u32 stack_id = m.stack_trace_id();
117    uptr caller_pc = 0;
118    if (stack_id > 0)
119      caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
120    // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
121    // it as reachable, as we can't properly report its allocation stack anyway.
122    if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
123      m.set_tag(kReachable);
124      param->frontier->push_back(chunk);
125    }
126  }
127}
128
129// Handles dynamically allocated TLS blocks by treating all chunks allocated
130// from ld-linux.so as reachable.
131// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
132// They are allocated with a __libc_memalign() call in allocate_and_init()
133// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
134// blocks, but we can make sure they come from our own allocator by intercepting
135// __libc_memalign(). On top of that, there is no easy way to reach them. Their
136// addresses are stored in a dynamically allocated array (the DTV) which is
137// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
138// being reachable from the static TLS, and the dynamic TLS being reachable from
139// the DTV. This is because the initial DTV is allocated before our interception
140// mechanism kicks in, and thus we don't recognize it as allocated memory. We
141// can't special-case it either, since we don't know its size.
142// Our solution is to include in the root set all allocations made from
143// ld-linux.so (which is where allocate_and_init() is implemented). This is
144// guaranteed to include all dynamic TLS blocks (and possibly other allocations
145// which we don't care about).
146void ProcessPlatformSpecificAllocations(Frontier *frontier) {
147  if (!flags()->use_tls) return;
148  if (!linker) return;
149  StackDepotReverseMap stack_depot_reverse_map;
150  ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
151  ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
152}
153
154}  // namespace __lsan
155#endif  // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
156