1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_flag_parser.h"
20#include "sanitizer_common/sanitizer_placement_new.h"
21#include "sanitizer_common/sanitizer_procmaps.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_suppressions.h"
25#include "sanitizer_common/sanitizer_report_decorator.h"
26
27#if CAN_SANITIZE_LEAKS
28namespace __lsan {
29
30// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
31// also to protect the global list of root regions.
32BlockingMutex global_mutex(LINKER_INITIALIZED);
33
34THREADLOCAL int disable_counter;
35bool DisabledInThisThread() { return disable_counter > 0; }
36
37Flags lsan_flags;
38
39void Flags::SetDefaults() {
40#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
41#include "lsan_flags.inc"
42#undef LSAN_FLAG
43}
44
45void RegisterLsanFlags(FlagParser *parser, Flags *f) {
46#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
47  RegisterFlag(parser, #Name, Description, &f->Name);
48#include "lsan_flags.inc"
49#undef LSAN_FLAG
50}
51
52#define LOG_POINTERS(...)                           \
53  do {                                              \
54    if (flags()->log_pointers) Report(__VA_ARGS__); \
55  } while (0);
56
57#define LOG_THREADS(...)                           \
58  do {                                             \
59    if (flags()->log_threads) Report(__VA_ARGS__); \
60  } while (0);
61
62ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
63static SuppressionContext *suppression_ctx = nullptr;
64static const char kSuppressionLeak[] = "leak";
65static const char *kSuppressionTypes[] = { kSuppressionLeak };
66
67void InitializeSuppressions() {
68  CHECK_EQ(nullptr, suppression_ctx);
69  suppression_ctx = new (suppression_placeholder) // NOLINT
70      SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
71  suppression_ctx->ParseFromFile(flags()->suppressions);
72  if (&__lsan_default_suppressions)
73    suppression_ctx->Parse(__lsan_default_suppressions());
74}
75
76static SuppressionContext *GetSuppressionContext() {
77  CHECK(suppression_ctx);
78  return suppression_ctx;
79}
80
81struct RootRegion {
82  const void *begin;
83  uptr size;
84};
85
86InternalMmapVector<RootRegion> *root_regions;
87
88void InitializeRootRegions() {
89  CHECK(!root_regions);
90  ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
91  root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
92}
93
94void InitCommonLsan() {
95  InitializeRootRegions();
96  if (common_flags()->detect_leaks) {
97    // Initialization which can fail or print warnings should only be done if
98    // LSan is actually enabled.
99    InitializeSuppressions();
100    InitializePlatformSpecificModules();
101  }
102}
103
104class Decorator: public __sanitizer::SanitizerCommonDecorator {
105 public:
106  Decorator() : SanitizerCommonDecorator() { }
107  const char *Error() { return Red(); }
108  const char *Leak() { return Blue(); }
109  const char *End() { return Default(); }
110};
111
112static inline bool CanBeAHeapPointer(uptr p) {
113  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
114  // bound on heap addresses.
115  const uptr kMinAddress = 4 * 4096;
116  if (p < kMinAddress) return false;
117#if defined(__x86_64__)
118  // Accept only canonical form user-space addresses.
119  return ((p >> 47) == 0);
120#elif defined(__mips64)
121  return ((p >> 40) == 0);
122#elif defined(__aarch64__)
123  unsigned runtimeVMA =
124    (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
125  return ((p >> runtimeVMA) == 0);
126#else
127  return true;
128#endif
129}
130
131// Scans the memory range, looking for byte patterns that point into allocator
132// chunks. Marks those chunks with |tag| and adds them to |frontier|.
133// There are two usage modes for this function: finding reachable chunks
134// (|tag| = kReachable) and finding indirectly leaked chunks
135// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
136// so |frontier| = 0.
137void ScanRangeForPointers(uptr begin, uptr end,
138                          Frontier *frontier,
139                          const char *region_type, ChunkTag tag) {
140  CHECK(tag == kReachable || tag == kIndirectlyLeaked);
141  const uptr alignment = flags()->pointer_alignment();
142  LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
143  uptr pp = begin;
144  if (pp % alignment)
145    pp = pp + alignment - pp % alignment;
146  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
147    void *p = *reinterpret_cast<void **>(pp);
148    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
149    uptr chunk = PointsIntoChunk(p);
150    if (!chunk) continue;
151    // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
152    if (chunk == begin) continue;
153    LsanMetadata m(chunk);
154    if (m.tag() == kReachable || m.tag() == kIgnored) continue;
155
156    // Do this check relatively late so we can log only the interesting cases.
157    if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
158      LOG_POINTERS(
159          "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
160          "%zu.\n",
161          pp, p, chunk, chunk + m.requested_size(), m.requested_size());
162      continue;
163    }
164
165    m.set_tag(tag);
166    LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
167                 chunk, chunk + m.requested_size(), m.requested_size());
168    if (frontier)
169      frontier->push_back(chunk);
170  }
171}
172
173void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
174  Frontier *frontier = reinterpret_cast<Frontier *>(arg);
175  ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
176}
177
178// Scans thread data (stacks and TLS) for heap pointers.
179static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
180                           Frontier *frontier) {
181  InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
182  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
183  uptr registers_end = registers_begin + registers.size();
184  for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
185    uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
186    LOG_THREADS("Processing thread %d.\n", os_id);
187    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
188    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
189                                              &tls_begin, &tls_end,
190                                              &cache_begin, &cache_end);
191    if (!thread_found) {
192      // If a thread can't be found in the thread registry, it's probably in the
193      // process of destruction. Log this event and move on.
194      LOG_THREADS("Thread %d not found in registry.\n", os_id);
195      continue;
196    }
197    uptr sp;
198    bool have_registers =
199        (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
200    if (!have_registers) {
201      Report("Unable to get registers from thread %d.\n");
202      // If unable to get SP, consider the entire stack to be reachable.
203      sp = stack_begin;
204    }
205
206    if (flags()->use_registers && have_registers)
207      ScanRangeForPointers(registers_begin, registers_end, frontier,
208                           "REGISTERS", kReachable);
209
210    if (flags()->use_stacks) {
211      LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
212      if (sp < stack_begin || sp >= stack_end) {
213        // SP is outside the recorded stack range (e.g. the thread is running a
214        // signal handler on alternate stack). Again, consider the entire stack
215        // range to be reachable.
216        LOG_THREADS("WARNING: stack pointer not in stack range.\n");
217      } else {
218        // Shrink the stack range to ignore out-of-scope values.
219        stack_begin = sp;
220      }
221      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
222                           kReachable);
223      ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
224    }
225
226    if (flags()->use_tls) {
227      LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
228      if (cache_begin == cache_end) {
229        ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
230      } else {
231        // Because LSan should not be loaded with dlopen(), we can assume
232        // that allocator cache will be part of static TLS image.
233        CHECK_LE(tls_begin, cache_begin);
234        CHECK_GE(tls_end, cache_end);
235        if (tls_begin < cache_begin)
236          ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
237                               kReachable);
238        if (tls_end > cache_end)
239          ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
240      }
241    }
242  }
243}
244
245static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
246                              uptr root_end) {
247  MemoryMappingLayout proc_maps(/*cache_enabled*/true);
248  uptr begin, end, prot;
249  while (proc_maps.Next(&begin, &end,
250                        /*offset*/ nullptr, /*filename*/ nullptr,
251                        /*filename_size*/ 0, &prot)) {
252    uptr intersection_begin = Max(root_begin, begin);
253    uptr intersection_end = Min(end, root_end);
254    if (intersection_begin >= intersection_end) continue;
255    bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
256    LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
257                 root_begin, root_end, begin, end,
258                 is_readable ? "readable" : "unreadable");
259    if (is_readable)
260      ScanRangeForPointers(intersection_begin, intersection_end, frontier,
261                           "ROOT", kReachable);
262  }
263}
264
265// Scans root regions for heap pointers.
266static void ProcessRootRegions(Frontier *frontier) {
267  if (!flags()->use_root_regions) return;
268  CHECK(root_regions);
269  for (uptr i = 0; i < root_regions->size(); i++) {
270    RootRegion region = (*root_regions)[i];
271    uptr begin_addr = reinterpret_cast<uptr>(region.begin);
272    ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
273  }
274}
275
276static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
277  while (frontier->size()) {
278    uptr next_chunk = frontier->back();
279    frontier->pop_back();
280    LsanMetadata m(next_chunk);
281    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
282                         "HEAP", tag);
283  }
284}
285
286// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
287// which are reachable from it as indirectly leaked.
288static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
289  chunk = GetUserBegin(chunk);
290  LsanMetadata m(chunk);
291  if (m.allocated() && m.tag() != kReachable) {
292    ScanRangeForPointers(chunk, chunk + m.requested_size(),
293                         /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
294  }
295}
296
297// ForEachChunk callback. If chunk is marked as ignored, adds its address to
298// frontier.
299static void CollectIgnoredCb(uptr chunk, void *arg) {
300  CHECK(arg);
301  chunk = GetUserBegin(chunk);
302  LsanMetadata m(chunk);
303  if (m.allocated() && m.tag() == kIgnored) {
304    LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
305                 chunk, chunk + m.requested_size(), m.requested_size());
306    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
307  }
308}
309
310// Sets the appropriate tag on each chunk.
311static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
312  // Holds the flood fill frontier.
313  Frontier frontier(1);
314
315  ForEachChunk(CollectIgnoredCb, &frontier);
316  ProcessGlobalRegions(&frontier);
317  ProcessThreads(suspended_threads, &frontier);
318  ProcessRootRegions(&frontier);
319  FloodFillTag(&frontier, kReachable);
320
321  // The check here is relatively expensive, so we do this in a separate flood
322  // fill. That way we can skip the check for chunks that are reachable
323  // otherwise.
324  LOG_POINTERS("Processing platform-specific allocations.\n");
325  CHECK_EQ(0, frontier.size());
326  ProcessPlatformSpecificAllocations(&frontier);
327  FloodFillTag(&frontier, kReachable);
328
329  // Iterate over leaked chunks and mark those that are reachable from other
330  // leaked chunks.
331  LOG_POINTERS("Scanning leaked chunks.\n");
332  ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
333}
334
335// ForEachChunk callback. Resets the tags to pre-leak-check state.
336static void ResetTagsCb(uptr chunk, void *arg) {
337  (void)arg;
338  chunk = GetUserBegin(chunk);
339  LsanMetadata m(chunk);
340  if (m.allocated() && m.tag() != kIgnored)
341    m.set_tag(kDirectlyLeaked);
342}
343
344static void PrintStackTraceById(u32 stack_trace_id) {
345  CHECK(stack_trace_id);
346  StackDepotGet(stack_trace_id).Print();
347}
348
349// ForEachChunk callback. Aggregates information about unreachable chunks into
350// a LeakReport.
351static void CollectLeaksCb(uptr chunk, void *arg) {
352  CHECK(arg);
353  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
354  chunk = GetUserBegin(chunk);
355  LsanMetadata m(chunk);
356  if (!m.allocated()) return;
357  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
358    u32 resolution = flags()->resolution;
359    u32 stack_trace_id = 0;
360    if (resolution > 0) {
361      StackTrace stack = StackDepotGet(m.stack_trace_id());
362      stack.size = Min(stack.size, resolution);
363      stack_trace_id = StackDepotPut(stack);
364    } else {
365      stack_trace_id = m.stack_trace_id();
366    }
367    leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
368                                m.tag());
369  }
370}
371
372static void PrintMatchedSuppressions() {
373  InternalMmapVector<Suppression *> matched(1);
374  GetSuppressionContext()->GetMatched(&matched);
375  if (!matched.size())
376    return;
377  const char *line = "-----------------------------------------------------";
378  Printf("%s\n", line);
379  Printf("Suppressions used:\n");
380  Printf("  count      bytes template\n");
381  for (uptr i = 0; i < matched.size(); i++)
382    Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
383        &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
384  Printf("%s\n\n", line);
385}
386
387struct CheckForLeaksParam {
388  bool success;
389  LeakReport leak_report;
390};
391
392static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
393                                  void *arg) {
394  CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
395  CHECK(param);
396  CHECK(!param->success);
397  ClassifyAllChunks(suspended_threads);
398  ForEachChunk(CollectLeaksCb, &param->leak_report);
399  // Clean up for subsequent leak checks. This assumes we did not overwrite any
400  // kIgnored tags.
401  ForEachChunk(ResetTagsCb, nullptr);
402  param->success = true;
403}
404
405static bool CheckForLeaks() {
406  if (&__lsan_is_turned_off && __lsan_is_turned_off())
407      return false;
408  EnsureMainThreadIDIsCorrect();
409  CheckForLeaksParam param;
410  param.success = false;
411  LockThreadRegistry();
412  LockAllocator();
413  DoStopTheWorld(CheckForLeaksCallback, &param);
414  UnlockAllocator();
415  UnlockThreadRegistry();
416
417  if (!param.success) {
418    Report("LeakSanitizer has encountered a fatal error.\n");
419    Die();
420  }
421  param.leak_report.ApplySuppressions();
422  uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
423  if (unsuppressed_count > 0) {
424    Decorator d;
425    Printf("\n"
426           "================================================================="
427           "\n");
428    Printf("%s", d.Error());
429    Report("ERROR: LeakSanitizer: detected memory leaks\n");
430    Printf("%s", d.End());
431    param.leak_report.ReportTopLeaks(flags()->max_leaks);
432  }
433  if (common_flags()->print_suppressions)
434    PrintMatchedSuppressions();
435  if (unsuppressed_count > 0) {
436    param.leak_report.PrintSummary();
437    return true;
438  }
439  return false;
440}
441
442void DoLeakCheck() {
443  BlockingMutexLock l(&global_mutex);
444  static bool already_done;
445  if (already_done) return;
446  already_done = true;
447  bool have_leaks = CheckForLeaks();
448  if (!have_leaks) {
449    return;
450  }
451  if (common_flags()->exitcode) {
452    Die();
453  }
454}
455
456static int DoRecoverableLeakCheck() {
457  BlockingMutexLock l(&global_mutex);
458  bool have_leaks = CheckForLeaks();
459  return have_leaks ? 1 : 0;
460}
461
462static Suppression *GetSuppressionForAddr(uptr addr) {
463  Suppression *s = nullptr;
464
465  // Suppress by module name.
466  SuppressionContext *suppressions = GetSuppressionContext();
467  if (const char *module_name =
468          Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
469    if (suppressions->Match(module_name, kSuppressionLeak, &s))
470      return s;
471
472  // Suppress by file or function name.
473  SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
474  for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
475    if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
476        suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
477      break;
478    }
479  }
480  frames->ClearAll();
481  return s;
482}
483
484static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
485  StackTrace stack = StackDepotGet(stack_trace_id);
486  for (uptr i = 0; i < stack.size; i++) {
487    Suppression *s = GetSuppressionForAddr(
488        StackTrace::GetPreviousInstructionPc(stack.trace[i]));
489    if (s) return s;
490  }
491  return nullptr;
492}
493
494///// LeakReport implementation. /////
495
496// A hard limit on the number of distinct leaks, to avoid quadratic complexity
497// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
498// in real-world applications.
499// FIXME: Get rid of this limit by changing the implementation of LeakReport to
500// use a hash table.
501const uptr kMaxLeaksConsidered = 5000;
502
503void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
504                                uptr leaked_size, ChunkTag tag) {
505  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
506  bool is_directly_leaked = (tag == kDirectlyLeaked);
507  uptr i;
508  for (i = 0; i < leaks_.size(); i++) {
509    if (leaks_[i].stack_trace_id == stack_trace_id &&
510        leaks_[i].is_directly_leaked == is_directly_leaked) {
511      leaks_[i].hit_count++;
512      leaks_[i].total_size += leaked_size;
513      break;
514    }
515  }
516  if (i == leaks_.size()) {
517    if (leaks_.size() == kMaxLeaksConsidered) return;
518    Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
519                  is_directly_leaked, /* is_suppressed */ false };
520    leaks_.push_back(leak);
521  }
522  if (flags()->report_objects) {
523    LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
524    leaked_objects_.push_back(obj);
525  }
526}
527
528static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
529  if (leak1.is_directly_leaked == leak2.is_directly_leaked)
530    return leak1.total_size > leak2.total_size;
531  else
532    return leak1.is_directly_leaked;
533}
534
535void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
536  CHECK(leaks_.size() <= kMaxLeaksConsidered);
537  Printf("\n");
538  if (leaks_.size() == kMaxLeaksConsidered)
539    Printf("Too many leaks! Only the first %zu leaks encountered will be "
540           "reported.\n",
541           kMaxLeaksConsidered);
542
543  uptr unsuppressed_count = UnsuppressedLeakCount();
544  if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
545    Printf("The %zu top leak(s):\n", num_leaks_to_report);
546  InternalSort(&leaks_, leaks_.size(), LeakComparator);
547  uptr leaks_reported = 0;
548  for (uptr i = 0; i < leaks_.size(); i++) {
549    if (leaks_[i].is_suppressed) continue;
550    PrintReportForLeak(i);
551    leaks_reported++;
552    if (leaks_reported == num_leaks_to_report) break;
553  }
554  if (leaks_reported < unsuppressed_count) {
555    uptr remaining = unsuppressed_count - leaks_reported;
556    Printf("Omitting %zu more leak(s).\n", remaining);
557  }
558}
559
560void LeakReport::PrintReportForLeak(uptr index) {
561  Decorator d;
562  Printf("%s", d.Leak());
563  Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
564         leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
565         leaks_[index].total_size, leaks_[index].hit_count);
566  Printf("%s", d.End());
567
568  PrintStackTraceById(leaks_[index].stack_trace_id);
569
570  if (flags()->report_objects) {
571    Printf("Objects leaked above:\n");
572    PrintLeakedObjectsForLeak(index);
573    Printf("\n");
574  }
575}
576
577void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
578  u32 leak_id = leaks_[index].id;
579  for (uptr j = 0; j < leaked_objects_.size(); j++) {
580    if (leaked_objects_[j].leak_id == leak_id)
581      Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
582             leaked_objects_[j].size);
583  }
584}
585
586void LeakReport::PrintSummary() {
587  CHECK(leaks_.size() <= kMaxLeaksConsidered);
588  uptr bytes = 0, allocations = 0;
589  for (uptr i = 0; i < leaks_.size(); i++) {
590      if (leaks_[i].is_suppressed) continue;
591      bytes += leaks_[i].total_size;
592      allocations += leaks_[i].hit_count;
593  }
594  InternalScopedString summary(kMaxSummaryLength);
595  summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
596                 allocations);
597  ReportErrorSummary(summary.data());
598}
599
600void LeakReport::ApplySuppressions() {
601  for (uptr i = 0; i < leaks_.size(); i++) {
602    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
603    if (s) {
604      s->weight += leaks_[i].total_size;
605      atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
606          leaks_[i].hit_count);
607      leaks_[i].is_suppressed = true;
608    }
609  }
610}
611
612uptr LeakReport::UnsuppressedLeakCount() {
613  uptr result = 0;
614  for (uptr i = 0; i < leaks_.size(); i++)
615    if (!leaks_[i].is_suppressed) result++;
616  return result;
617}
618
619} // namespace __lsan
620#endif // CAN_SANITIZE_LEAKS
621
622using namespace __lsan;  // NOLINT
623
624extern "C" {
625SANITIZER_INTERFACE_ATTRIBUTE
626void __lsan_ignore_object(const void *p) {
627#if CAN_SANITIZE_LEAKS
628  if (!common_flags()->detect_leaks)
629    return;
630  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
631  // locked.
632  BlockingMutexLock l(&global_mutex);
633  IgnoreObjectResult res = IgnoreObjectLocked(p);
634  if (res == kIgnoreObjectInvalid)
635    VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
636  if (res == kIgnoreObjectAlreadyIgnored)
637    VReport(1, "__lsan_ignore_object(): "
638           "heap object at %p is already being ignored\n", p);
639  if (res == kIgnoreObjectSuccess)
640    VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
641#endif // CAN_SANITIZE_LEAKS
642}
643
644SANITIZER_INTERFACE_ATTRIBUTE
645void __lsan_register_root_region(const void *begin, uptr size) {
646#if CAN_SANITIZE_LEAKS
647  BlockingMutexLock l(&global_mutex);
648  CHECK(root_regions);
649  RootRegion region = {begin, size};
650  root_regions->push_back(region);
651  VReport(1, "Registered root region at %p of size %llu\n", begin, size);
652#endif // CAN_SANITIZE_LEAKS
653}
654
655SANITIZER_INTERFACE_ATTRIBUTE
656void __lsan_unregister_root_region(const void *begin, uptr size) {
657#if CAN_SANITIZE_LEAKS
658  BlockingMutexLock l(&global_mutex);
659  CHECK(root_regions);
660  bool removed = false;
661  for (uptr i = 0; i < root_regions->size(); i++) {
662    RootRegion region = (*root_regions)[i];
663    if (region.begin == begin && region.size == size) {
664      removed = true;
665      uptr last_index = root_regions->size() - 1;
666      (*root_regions)[i] = (*root_regions)[last_index];
667      root_regions->pop_back();
668      VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
669      break;
670    }
671  }
672  if (!removed) {
673    Report(
674        "__lsan_unregister_root_region(): region at %p of size %llu has not "
675        "been registered.\n",
676        begin, size);
677    Die();
678  }
679#endif // CAN_SANITIZE_LEAKS
680}
681
682SANITIZER_INTERFACE_ATTRIBUTE
683void __lsan_disable() {
684#if CAN_SANITIZE_LEAKS
685  __lsan::disable_counter++;
686#endif
687}
688
689SANITIZER_INTERFACE_ATTRIBUTE
690void __lsan_enable() {
691#if CAN_SANITIZE_LEAKS
692  if (!__lsan::disable_counter && common_flags()->detect_leaks) {
693    Report("Unmatched call to __lsan_enable().\n");
694    Die();
695  }
696  __lsan::disable_counter--;
697#endif
698}
699
700SANITIZER_INTERFACE_ATTRIBUTE
701void __lsan_do_leak_check() {
702#if CAN_SANITIZE_LEAKS
703  if (common_flags()->detect_leaks)
704    __lsan::DoLeakCheck();
705#endif // CAN_SANITIZE_LEAKS
706}
707
708SANITIZER_INTERFACE_ATTRIBUTE
709int __lsan_do_recoverable_leak_check() {
710#if CAN_SANITIZE_LEAKS
711  if (common_flags()->detect_leaks)
712    return __lsan::DoRecoverableLeakCheck();
713#endif // CAN_SANITIZE_LEAKS
714  return 0;
715}
716
717#if !SANITIZER_SUPPORTS_WEAK_HOOKS
718SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
719int __lsan_is_turned_off() {
720  return 0;
721}
722#endif
723} // extern "C"
724