1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is a part of LeakSanitizer.
9// Implementation of common leak checking functionality.
10//
11//===----------------------------------------------------------------------===//
12
13#include "lsan_common.h"
14
15#include "sanitizer_common/sanitizer_common.h"
16#include "sanitizer_common/sanitizer_flags.h"
17#include "sanitizer_common/sanitizer_placement_new.h"
18#include "sanitizer_common/sanitizer_procmaps.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "sanitizer_common/sanitizer_stoptheworld.h"
22#include "sanitizer_common/sanitizer_suppressions.h"
23#include "sanitizer_common/sanitizer_report_decorator.h"
24
25#if CAN_SANITIZE_LEAKS
26namespace __lsan {
27
28// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
29// also to protect the global list of root regions.
30BlockingMutex global_mutex(LINKER_INITIALIZED);
31
32THREADLOCAL int disable_counter;
33bool DisabledInThisThread() { return disable_counter > 0; }
34
35Flags lsan_flags;
36
37static void InitializeFlags(bool standalone) {
38  Flags *f = flags();
39  // Default values.
40  f->report_objects = false;
41  f->resolution = 0;
42  f->max_leaks = 0;
43  f->exitcode = 23;
44  f->use_registers = true;
45  f->use_globals = true;
46  f->use_stacks = true;
47  f->use_tls = true;
48  f->use_root_regions = true;
49  f->use_unaligned = false;
50  f->use_poisoned = false;
51  f->log_pointers = false;
52  f->log_threads = false;
53
54  const char *options = GetEnv("LSAN_OPTIONS");
55  if (options) {
56    ParseFlag(options, &f->use_registers, "use_registers", "");
57    ParseFlag(options, &f->use_globals, "use_globals", "");
58    ParseFlag(options, &f->use_stacks, "use_stacks", "");
59    ParseFlag(options, &f->use_tls, "use_tls", "");
60    ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
61    ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
62    ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
63    ParseFlag(options, &f->report_objects, "report_objects", "");
64    ParseFlag(options, &f->resolution, "resolution", "");
65    CHECK_GE(&f->resolution, 0);
66    ParseFlag(options, &f->max_leaks, "max_leaks", "");
67    CHECK_GE(&f->max_leaks, 0);
68    ParseFlag(options, &f->log_pointers, "log_pointers", "");
69    ParseFlag(options, &f->log_threads, "log_threads", "");
70    ParseFlag(options, &f->exitcode, "exitcode", "");
71  }
72
73  // Set defaults for common flags (only in standalone mode) and parse
74  // them from LSAN_OPTIONS.
75  CommonFlags *cf = common_flags();
76  if (standalone) {
77    SetCommonFlagsDefaults(cf);
78    cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
79    cf->malloc_context_size = 30;
80    cf->detect_leaks = true;
81  }
82  ParseCommonFlagsFromString(cf, options);
83}
84
85#define LOG_POINTERS(...)                           \
86  do {                                              \
87    if (flags()->log_pointers) Report(__VA_ARGS__); \
88  } while (0);
89
90#define LOG_THREADS(...)                           \
91  do {                                             \
92    if (flags()->log_threads) Report(__VA_ARGS__); \
93  } while (0);
94
95static bool suppressions_inited = false;
96
97void InitializeSuppressions() {
98  CHECK(!suppressions_inited);
99  SuppressionContext::InitIfNecessary();
100  if (&__lsan_default_suppressions)
101    SuppressionContext::Get()->Parse(__lsan_default_suppressions());
102  suppressions_inited = true;
103}
104
105struct RootRegion {
106  const void *begin;
107  uptr size;
108};
109
110InternalMmapVector<RootRegion> *root_regions;
111
112void InitializeRootRegions() {
113  CHECK(!root_regions);
114  ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
115  root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
116}
117
118void InitCommonLsan(bool standalone) {
119  InitializeFlags(standalone);
120  InitializeRootRegions();
121  if (common_flags()->detect_leaks) {
122    // Initialization which can fail or print warnings should only be done if
123    // LSan is actually enabled.
124    InitializeSuppressions();
125    InitializePlatformSpecificModules();
126  }
127}
128
129class Decorator: public __sanitizer::SanitizerCommonDecorator {
130 public:
131  Decorator() : SanitizerCommonDecorator() { }
132  const char *Error() { return Red(); }
133  const char *Leak() { return Blue(); }
134  const char *End() { return Default(); }
135};
136
137static inline bool CanBeAHeapPointer(uptr p) {
138  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
139  // bound on heap addresses.
140  const uptr kMinAddress = 4 * 4096;
141  if (p < kMinAddress) return false;
142#ifdef __x86_64__
143  // Accept only canonical form user-space addresses.
144  return ((p >> 47) == 0);
145#else
146  return true;
147#endif
148}
149
150// Scans the memory range, looking for byte patterns that point into allocator
151// chunks. Marks those chunks with |tag| and adds them to |frontier|.
152// There are two usage modes for this function: finding reachable or ignored
153// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
154// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
155// so |frontier| = 0.
156void ScanRangeForPointers(uptr begin, uptr end,
157                          Frontier *frontier,
158                          const char *region_type, ChunkTag tag) {
159  const uptr alignment = flags()->pointer_alignment();
160  LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
161  uptr pp = begin;
162  if (pp % alignment)
163    pp = pp + alignment - pp % alignment;
164  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
165    void *p = *reinterpret_cast<void **>(pp);
166    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
167    uptr chunk = PointsIntoChunk(p);
168    if (!chunk) continue;
169    // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
170    if (chunk == begin) continue;
171    LsanMetadata m(chunk);
172    // Reachable beats ignored beats leaked.
173    if (m.tag() == kReachable) continue;
174    if (m.tag() == kIgnored && tag != kReachable) continue;
175
176    // Do this check relatively late so we can log only the interesting cases.
177    if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
178      LOG_POINTERS(
179          "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
180          "%zu.\n",
181          pp, p, chunk, chunk + m.requested_size(), m.requested_size());
182      continue;
183    }
184
185    m.set_tag(tag);
186    LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
187                 chunk, chunk + m.requested_size(), m.requested_size());
188    if (frontier)
189      frontier->push_back(chunk);
190  }
191}
192
193void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
194  Frontier *frontier = reinterpret_cast<Frontier *>(arg);
195  ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
196}
197
198// Scans thread data (stacks and TLS) for heap pointers.
199static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
200                           Frontier *frontier) {
201  InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
202  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
203  uptr registers_end = registers_begin + registers.size();
204  for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
205    uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
206    LOG_THREADS("Processing thread %d.\n", os_id);
207    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
208    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
209                                              &tls_begin, &tls_end,
210                                              &cache_begin, &cache_end);
211    if (!thread_found) {
212      // If a thread can't be found in the thread registry, it's probably in the
213      // process of destruction. Log this event and move on.
214      LOG_THREADS("Thread %d not found in registry.\n", os_id);
215      continue;
216    }
217    uptr sp;
218    bool have_registers =
219        (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
220    if (!have_registers) {
221      Report("Unable to get registers from thread %d.\n");
222      // If unable to get SP, consider the entire stack to be reachable.
223      sp = stack_begin;
224    }
225
226    if (flags()->use_registers && have_registers)
227      ScanRangeForPointers(registers_begin, registers_end, frontier,
228                           "REGISTERS", kReachable);
229
230    if (flags()->use_stacks) {
231      LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
232      if (sp < stack_begin || sp >= stack_end) {
233        // SP is outside the recorded stack range (e.g. the thread is running a
234        // signal handler on alternate stack). Again, consider the entire stack
235        // range to be reachable.
236        LOG_THREADS("WARNING: stack pointer not in stack range.\n");
237      } else {
238        // Shrink the stack range to ignore out-of-scope values.
239        stack_begin = sp;
240      }
241      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
242                           kReachable);
243      ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
244    }
245
246    if (flags()->use_tls) {
247      LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
248      if (cache_begin == cache_end) {
249        ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
250      } else {
251        // Because LSan should not be loaded with dlopen(), we can assume
252        // that allocator cache will be part of static TLS image.
253        CHECK_LE(tls_begin, cache_begin);
254        CHECK_GE(tls_end, cache_end);
255        if (tls_begin < cache_begin)
256          ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
257                               kReachable);
258        if (tls_end > cache_end)
259          ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
260      }
261    }
262  }
263}
264
265static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
266                              uptr root_end) {
267  MemoryMappingLayout proc_maps(/*cache_enabled*/true);
268  uptr begin, end, prot;
269  while (proc_maps.Next(&begin, &end,
270                        /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
271                        &prot)) {
272    uptr intersection_begin = Max(root_begin, begin);
273    uptr intersection_end = Min(end, root_end);
274    if (intersection_begin >= intersection_end) continue;
275    bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
276    LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
277                 root_begin, root_end, begin, end,
278                 is_readable ? "readable" : "unreadable");
279    if (is_readable)
280      ScanRangeForPointers(intersection_begin, intersection_end, frontier,
281                           "ROOT", kReachable);
282  }
283}
284
285// Scans root regions for heap pointers.
286static void ProcessRootRegions(Frontier *frontier) {
287  if (!flags()->use_root_regions) return;
288  CHECK(root_regions);
289  for (uptr i = 0; i < root_regions->size(); i++) {
290    RootRegion region = (*root_regions)[i];
291    uptr begin_addr = reinterpret_cast<uptr>(region.begin);
292    ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
293  }
294}
295
296static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
297  while (frontier->size()) {
298    uptr next_chunk = frontier->back();
299    frontier->pop_back();
300    LsanMetadata m(next_chunk);
301    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
302                         "HEAP", tag);
303  }
304}
305
306// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
307// which are reachable from it as indirectly leaked.
308static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
309  chunk = GetUserBegin(chunk);
310  LsanMetadata m(chunk);
311  if (m.allocated() && m.tag() != kReachable) {
312    ScanRangeForPointers(chunk, chunk + m.requested_size(),
313                         /* frontier */ 0, "HEAP", kIndirectlyLeaked);
314  }
315}
316
317// ForEachChunk callback. If chunk is marked as ignored, adds its address to
318// frontier.
319static void CollectIgnoredCb(uptr chunk, void *arg) {
320  CHECK(arg);
321  chunk = GetUserBegin(chunk);
322  LsanMetadata m(chunk);
323  if (m.allocated() && m.tag() == kIgnored)
324    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
325}
326
327// Sets the appropriate tag on each chunk.
328static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
329  // Holds the flood fill frontier.
330  Frontier frontier(1);
331
332  ProcessGlobalRegions(&frontier);
333  ProcessThreads(suspended_threads, &frontier);
334  ProcessRootRegions(&frontier);
335  FloodFillTag(&frontier, kReachable);
336  // The check here is relatively expensive, so we do this in a separate flood
337  // fill. That way we can skip the check for chunks that are reachable
338  // otherwise.
339  LOG_POINTERS("Processing platform-specific allocations.\n");
340  ProcessPlatformSpecificAllocations(&frontier);
341  FloodFillTag(&frontier, kReachable);
342
343  LOG_POINTERS("Scanning ignored chunks.\n");
344  CHECK_EQ(0, frontier.size());
345  ForEachChunk(CollectIgnoredCb, &frontier);
346  FloodFillTag(&frontier, kIgnored);
347
348  // Iterate over leaked chunks and mark those that are reachable from other
349  // leaked chunks.
350  LOG_POINTERS("Scanning leaked chunks.\n");
351  ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
352}
353
354static void PrintStackTraceById(u32 stack_trace_id) {
355  CHECK(stack_trace_id);
356  StackDepotGet(stack_trace_id).Print();
357}
358
359// ForEachChunk callback. Aggregates information about unreachable chunks into
360// a LeakReport.
361static void CollectLeaksCb(uptr chunk, void *arg) {
362  CHECK(arg);
363  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
364  chunk = GetUserBegin(chunk);
365  LsanMetadata m(chunk);
366  if (!m.allocated()) return;
367  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
368    uptr resolution = flags()->resolution;
369    u32 stack_trace_id = 0;
370    if (resolution > 0) {
371      StackTrace stack = StackDepotGet(m.stack_trace_id());
372      stack.size = Min(stack.size, resolution);
373      stack_trace_id = StackDepotPut(stack);
374    } else {
375      stack_trace_id = m.stack_trace_id();
376    }
377    leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
378                                m.tag());
379  }
380}
381
382static void PrintMatchedSuppressions() {
383  InternalMmapVector<Suppression *> matched(1);
384  SuppressionContext::Get()->GetMatched(&matched);
385  if (!matched.size())
386    return;
387  const char *line = "-----------------------------------------------------";
388  Printf("%s\n", line);
389  Printf("Suppressions used:\n");
390  Printf("  count      bytes template\n");
391  for (uptr i = 0; i < matched.size(); i++)
392    Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
393           matched[i]->weight, matched[i]->templ);
394  Printf("%s\n\n", line);
395}
396
397struct DoLeakCheckParam {
398  bool success;
399  LeakReport leak_report;
400};
401
402static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
403                                void *arg) {
404  DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
405  CHECK(param);
406  CHECK(!param->success);
407  ClassifyAllChunks(suspended_threads);
408  ForEachChunk(CollectLeaksCb, &param->leak_report);
409  param->success = true;
410}
411
412void DoLeakCheck() {
413  EnsureMainThreadIDIsCorrect();
414  BlockingMutexLock l(&global_mutex);
415  static bool already_done;
416  if (already_done) return;
417  already_done = true;
418  if (&__lsan_is_turned_off && __lsan_is_turned_off())
419      return;
420
421  DoLeakCheckParam param;
422  param.success = false;
423  LockThreadRegistry();
424  LockAllocator();
425  StopTheWorld(DoLeakCheckCallback, &param);
426  UnlockAllocator();
427  UnlockThreadRegistry();
428
429  if (!param.success) {
430    Report("LeakSanitizer has encountered a fatal error.\n");
431    Die();
432  }
433  param.leak_report.ApplySuppressions();
434  uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
435  if (unsuppressed_count > 0) {
436    Decorator d;
437    Printf("\n"
438           "================================================================="
439           "\n");
440    Printf("%s", d.Error());
441    Report("ERROR: LeakSanitizer: detected memory leaks\n");
442    Printf("%s", d.End());
443    param.leak_report.ReportTopLeaks(flags()->max_leaks);
444  }
445  if (common_flags()->print_suppressions)
446    PrintMatchedSuppressions();
447  if (unsuppressed_count > 0) {
448    param.leak_report.PrintSummary();
449    if (flags()->exitcode) {
450      if (common_flags()->coverage)
451        __sanitizer_cov_dump();
452      internal__exit(flags()->exitcode);
453    }
454  }
455}
456
457static Suppression *GetSuppressionForAddr(uptr addr) {
458  Suppression *s;
459
460  // Suppress by module name.
461  const char *module_name;
462  uptr module_offset;
463  if (Symbolizer::GetOrInit()
464          ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
465      SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
466    return s;
467
468  // Suppress by file or function name.
469  static const uptr kMaxAddrFrames = 16;
470  InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
471  for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
472  uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
473      addr, addr_frames.data(), kMaxAddrFrames);
474  for (uptr i = 0; i < addr_frames_num; i++) {
475    if (SuppressionContext::Get()->Match(addr_frames[i].function,
476                                         SuppressionLeak, &s) ||
477        SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
478                                         &s))
479      return s;
480  }
481  return 0;
482}
483
484static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
485  StackTrace stack = StackDepotGet(stack_trace_id);
486  for (uptr i = 0; i < stack.size; i++) {
487    Suppression *s = GetSuppressionForAddr(
488        StackTrace::GetPreviousInstructionPc(stack.trace[i]));
489    if (s) return s;
490  }
491  return 0;
492}
493
494///// LeakReport implementation. /////
495
496// A hard limit on the number of distinct leaks, to avoid quadratic complexity
497// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
498// in real-world applications.
499// FIXME: Get rid of this limit by changing the implementation of LeakReport to
500// use a hash table.
501const uptr kMaxLeaksConsidered = 5000;
502
503void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
504                                uptr leaked_size, ChunkTag tag) {
505  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
506  bool is_directly_leaked = (tag == kDirectlyLeaked);
507  uptr i;
508  for (i = 0; i < leaks_.size(); i++) {
509    if (leaks_[i].stack_trace_id == stack_trace_id &&
510        leaks_[i].is_directly_leaked == is_directly_leaked) {
511      leaks_[i].hit_count++;
512      leaks_[i].total_size += leaked_size;
513      break;
514    }
515  }
516  if (i == leaks_.size()) {
517    if (leaks_.size() == kMaxLeaksConsidered) return;
518    Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
519                  is_directly_leaked, /* is_suppressed */ false };
520    leaks_.push_back(leak);
521  }
522  if (flags()->report_objects) {
523    LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
524    leaked_objects_.push_back(obj);
525  }
526}
527
528static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
529  if (leak1.is_directly_leaked == leak2.is_directly_leaked)
530    return leak1.total_size > leak2.total_size;
531  else
532    return leak1.is_directly_leaked;
533}
534
535void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
536  CHECK(leaks_.size() <= kMaxLeaksConsidered);
537  Printf("\n");
538  if (leaks_.size() == kMaxLeaksConsidered)
539    Printf("Too many leaks! Only the first %zu leaks encountered will be "
540           "reported.\n",
541           kMaxLeaksConsidered);
542
543  uptr unsuppressed_count = UnsuppressedLeakCount();
544  if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
545    Printf("The %zu top leak(s):\n", num_leaks_to_report);
546  InternalSort(&leaks_, leaks_.size(), LeakComparator);
547  uptr leaks_reported = 0;
548  for (uptr i = 0; i < leaks_.size(); i++) {
549    if (leaks_[i].is_suppressed) continue;
550    PrintReportForLeak(i);
551    leaks_reported++;
552    if (leaks_reported == num_leaks_to_report) break;
553  }
554  if (leaks_reported < unsuppressed_count) {
555    uptr remaining = unsuppressed_count - leaks_reported;
556    Printf("Omitting %zu more leak(s).\n", remaining);
557  }
558}
559
560void LeakReport::PrintReportForLeak(uptr index) {
561  Decorator d;
562  Printf("%s", d.Leak());
563  Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
564         leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
565         leaks_[index].total_size, leaks_[index].hit_count);
566  Printf("%s", d.End());
567
568  PrintStackTraceById(leaks_[index].stack_trace_id);
569
570  if (flags()->report_objects) {
571    Printf("Objects leaked above:\n");
572    PrintLeakedObjectsForLeak(index);
573    Printf("\n");
574  }
575}
576
577void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
578  u32 leak_id = leaks_[index].id;
579  for (uptr j = 0; j < leaked_objects_.size(); j++) {
580    if (leaked_objects_[j].leak_id == leak_id)
581      Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
582             leaked_objects_[j].size);
583  }
584}
585
586void LeakReport::PrintSummary() {
587  CHECK(leaks_.size() <= kMaxLeaksConsidered);
588  uptr bytes = 0, allocations = 0;
589  for (uptr i = 0; i < leaks_.size(); i++) {
590      if (leaks_[i].is_suppressed) continue;
591      bytes += leaks_[i].total_size;
592      allocations += leaks_[i].hit_count;
593  }
594  InternalScopedBuffer<char> summary(kMaxSummaryLength);
595  internal_snprintf(summary.data(), summary.size(),
596                    "%zu byte(s) leaked in %zu allocation(s).", bytes,
597                    allocations);
598  ReportErrorSummary(summary.data());
599}
600
601void LeakReport::ApplySuppressions() {
602  for (uptr i = 0; i < leaks_.size(); i++) {
603    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
604    if (s) {
605      s->weight += leaks_[i].total_size;
606      s->hit_count += leaks_[i].hit_count;
607      leaks_[i].is_suppressed = true;
608    }
609  }
610}
611
612uptr LeakReport::UnsuppressedLeakCount() {
613  uptr result = 0;
614  for (uptr i = 0; i < leaks_.size(); i++)
615    if (!leaks_[i].is_suppressed) result++;
616  return result;
617}
618
619}  // namespace __lsan
620#endif  // CAN_SANITIZE_LEAKS
621
622using namespace __lsan;  // NOLINT
623
624extern "C" {
625SANITIZER_INTERFACE_ATTRIBUTE
626void __lsan_ignore_object(const void *p) {
627#if CAN_SANITIZE_LEAKS
628  if (!common_flags()->detect_leaks)
629    return;
630  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
631  // locked.
632  BlockingMutexLock l(&global_mutex);
633  IgnoreObjectResult res = IgnoreObjectLocked(p);
634  if (res == kIgnoreObjectInvalid)
635    VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
636  if (res == kIgnoreObjectAlreadyIgnored)
637    VReport(1, "__lsan_ignore_object(): "
638           "heap object at %p is already being ignored\n", p);
639  if (res == kIgnoreObjectSuccess)
640    VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
641#endif  // CAN_SANITIZE_LEAKS
642}
643
644SANITIZER_INTERFACE_ATTRIBUTE
645void __lsan_register_root_region(const void *begin, uptr size) {
646#if CAN_SANITIZE_LEAKS
647  BlockingMutexLock l(&global_mutex);
648  CHECK(root_regions);
649  RootRegion region = {begin, size};
650  root_regions->push_back(region);
651  VReport(1, "Registered root region at %p of size %llu\n", begin, size);
652#endif  // CAN_SANITIZE_LEAKS
653}
654
655SANITIZER_INTERFACE_ATTRIBUTE
656void __lsan_unregister_root_region(const void *begin, uptr size) {
657#if CAN_SANITIZE_LEAKS
658  BlockingMutexLock l(&global_mutex);
659  CHECK(root_regions);
660  bool removed = false;
661  for (uptr i = 0; i < root_regions->size(); i++) {
662    RootRegion region = (*root_regions)[i];
663    if (region.begin == begin && region.size == size) {
664      removed = true;
665      uptr last_index = root_regions->size() - 1;
666      (*root_regions)[i] = (*root_regions)[last_index];
667      root_regions->pop_back();
668      VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
669      break;
670    }
671  }
672  if (!removed) {
673    Report(
674        "__lsan_unregister_root_region(): region at %p of size %llu has not "
675        "been registered.\n",
676        begin, size);
677    Die();
678  }
679#endif  // CAN_SANITIZE_LEAKS
680}
681
682SANITIZER_INTERFACE_ATTRIBUTE
683void __lsan_disable() {
684#if CAN_SANITIZE_LEAKS
685  __lsan::disable_counter++;
686#endif
687}
688
689SANITIZER_INTERFACE_ATTRIBUTE
690void __lsan_enable() {
691#if CAN_SANITIZE_LEAKS
692  if (!__lsan::disable_counter && common_flags()->detect_leaks) {
693    Report("Unmatched call to __lsan_enable().\n");
694    Die();
695  }
696  __lsan::disable_counter--;
697#endif
698}
699
700SANITIZER_INTERFACE_ATTRIBUTE
701void __lsan_do_leak_check() {
702#if CAN_SANITIZE_LEAKS
703  if (common_flags()->detect_leaks)
704    __lsan::DoLeakCheck();
705#endif  // CAN_SANITIZE_LEAKS
706}
707
708#if !SANITIZER_SUPPORTS_WEAK_HOOKS
709SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
710int __lsan_is_turned_off() {
711  return 0;
712}
713#endif
714}  // extern "C"
715