asan_report.cpp revision 360784
1//===-- asan_report.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// This file contains error reporting code.
12//===----------------------------------------------------------------------===//
13
14#include "asan_errors.h"
15#include "asan_flags.h"
16#include "asan_descriptions.h"
17#include "asan_internal.h"
18#include "asan_mapping.h"
19#include "asan_report.h"
20#include "asan_scariness_score.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "sanitizer_common/sanitizer_common.h"
24#include "sanitizer_common/sanitizer_flags.h"
25#include "sanitizer_common/sanitizer_report_decorator.h"
26#include "sanitizer_common/sanitizer_stackdepot.h"
27#include "sanitizer_common/sanitizer_symbolizer.h"
28
29namespace __asan {
30
31// -------------------- User-specified callbacks ----------------- {{{1
32static void (*error_report_callback)(const char*);
33static char *error_message_buffer = nullptr;
34static uptr error_message_buffer_pos = 0;
35static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
36static const unsigned kAsanBuggyPcPoolSize = 25;
37static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
38
39void AppendToErrorMessageBuffer(const char *buffer) {
40  BlockingMutexLock l(&error_message_buf_mutex);
41  if (!error_message_buffer) {
42    error_message_buffer =
43      (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
44    error_message_buffer_pos = 0;
45  }
46  uptr length = internal_strlen(buffer);
47  RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos);
48  uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos;
49  internal_strncpy(error_message_buffer + error_message_buffer_pos,
50                   buffer, remaining);
51  error_message_buffer[kErrorMessageBufferSize - 1] = '\0';
52  // FIXME: reallocate the buffer instead of truncating the message.
53  error_message_buffer_pos += Min(remaining, length);
54}
55
56// ---------------------- Helper functions ----------------------- {{{1
57
58void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
59                     bool in_shadow, const char *after) {
60  Decorator d;
61  str->append("%s%s%x%x%s%s", before,
62              in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
63              byte & 15, d.Default(), after);
64}
65
66static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
67                                const char *zone_name) {
68  if (zone_ptr) {
69    if (zone_name) {
70      Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n",
71                 ptr, zone_ptr, zone_name);
72    } else {
73      Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
74                 ptr, zone_ptr);
75    }
76  } else {
77    Printf("malloc_zone_from_ptr(%p) = 0\n", ptr);
78  }
79}
80
81// ---------------------- Address Descriptions ------------------- {{{1
82
83bool ParseFrameDescription(const char *frame_descr,
84                           InternalMmapVector<StackVarDescr> *vars) {
85  CHECK(frame_descr);
86  const char *p;
87  // This string is created by the compiler and has the following form:
88  // "n alloc_1 alloc_2 ... alloc_n"
89  // where alloc_i looks like "offset size len ObjectName"
90  // or                       "offset size len ObjectName:line".
91  uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
92  if (n_objects == 0)
93    return false;
94
95  for (uptr i = 0; i < n_objects; i++) {
96    uptr beg  = (uptr)internal_simple_strtoll(p, &p, 10);
97    uptr size = (uptr)internal_simple_strtoll(p, &p, 10);
98    uptr len  = (uptr)internal_simple_strtoll(p, &p, 10);
99    if (beg == 0 || size == 0 || *p != ' ') {
100      return false;
101    }
102    p++;
103    char *colon_pos = internal_strchr(p, ':');
104    uptr line = 0;
105    uptr name_len = len;
106    if (colon_pos != nullptr && colon_pos < p + len) {
107      name_len = colon_pos - p;
108      line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10);
109    }
110    StackVarDescr var = {beg, size, p, name_len, line};
111    vars->push_back(var);
112    p += len;
113  }
114
115  return true;
116}
117
118// -------------------- Different kinds of reports ----------------- {{{1
119
120// Use ScopedInErrorReport to run common actions just before and
121// immediately after printing error report.
122class ScopedInErrorReport {
123 public:
124  explicit ScopedInErrorReport(bool fatal = false)
125      : halt_on_error_(fatal || flags()->halt_on_error) {
126    // Make sure the registry and sanitizer report mutexes are locked while
127    // we're printing an error report.
128    // We can lock them only here to avoid self-deadlock in case of
129    // recursive reports.
130    asanThreadRegistry().Lock();
131    Printf(
132        "=================================================================\n");
133  }
134
135  ~ScopedInErrorReport() {
136    if (halt_on_error_ && !__sanitizer_acquire_crash_state()) {
137      asanThreadRegistry().Unlock();
138      return;
139    }
140    ASAN_ON_ERROR();
141    if (current_error_.IsValid()) current_error_.Print();
142
143    // Make sure the current thread is announced.
144    DescribeThread(GetCurrentThread());
145    // We may want to grab this lock again when printing stats.
146    asanThreadRegistry().Unlock();
147    // Print memory stats.
148    if (flags()->print_stats)
149      __asan_print_accumulated_stats();
150
151    if (common_flags()->print_cmdline)
152      PrintCmdline();
153
154    if (common_flags()->print_module_map == 2) PrintModuleMap();
155
156    // Copy the message buffer so that we could start logging without holding a
157    // lock that gets aquired during printing.
158    InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
159    {
160      BlockingMutexLock l(&error_message_buf_mutex);
161      internal_memcpy(buffer_copy.data(),
162                      error_message_buffer, kErrorMessageBufferSize);
163    }
164
165    LogFullErrorReport(buffer_copy.data());
166
167    if (error_report_callback) {
168      error_report_callback(buffer_copy.data());
169    }
170
171    if (halt_on_error_ && common_flags()->abort_on_error) {
172      // On Android the message is truncated to 512 characters.
173      // FIXME: implement "compact" error format, possibly without, or with
174      // highly compressed stack traces?
175      // FIXME: or just use the summary line as abort message?
176      SetAbortMessage(buffer_copy.data());
177    }
178
179    // In halt_on_error = false mode, reset the current error object (before
180    // unlocking).
181    if (!halt_on_error_)
182      internal_memset(&current_error_, 0, sizeof(current_error_));
183
184    if (halt_on_error_) {
185      Report("ABORTING\n");
186      Die();
187    }
188  }
189
190  void ReportError(const ErrorDescription &description) {
191    // Can only report one error per ScopedInErrorReport.
192    CHECK_EQ(current_error_.kind, kErrorKindInvalid);
193    internal_memcpy(&current_error_, &description, sizeof(current_error_));
194  }
195
196  static ErrorDescription &CurrentError() {
197    return current_error_;
198  }
199
200 private:
201  ScopedErrorReportLock error_report_lock_;
202  // Error currently being reported. This enables the destructor to interact
203  // with the debugger and point it to an error description.
204  static ErrorDescription current_error_;
205  bool halt_on_error_;
206};
207
208ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED);
209
210void ReportDeadlySignal(const SignalContext &sig) {
211  ScopedInErrorReport in_report(/*fatal*/ true);
212  ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig);
213  in_report.ReportError(error);
214}
215
216void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
217  ScopedInErrorReport in_report;
218  ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
219  in_report.ReportError(error);
220}
221
222void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
223                                 uptr delete_alignment,
224                                 BufferedStackTrace *free_stack) {
225  ScopedInErrorReport in_report;
226  ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
227                                   delete_size, delete_alignment);
228  in_report.ReportError(error);
229}
230
231void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
232  ScopedInErrorReport in_report;
233  ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
234  in_report.ReportError(error);
235}
236
237void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
238                             AllocType alloc_type,
239                             AllocType dealloc_type) {
240  ScopedInErrorReport in_report;
241  ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
242                               alloc_type, dealloc_type);
243  in_report.ReportError(error);
244}
245
246void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
247  ScopedInErrorReport in_report;
248  ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
249  in_report.ReportError(error);
250}
251
252void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
253                                             BufferedStackTrace *stack) {
254  ScopedInErrorReport in_report;
255  ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
256                                               addr);
257  in_report.ReportError(error);
258}
259
260void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) {
261  ScopedInErrorReport in_report(/*fatal*/ true);
262  ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
263  in_report.ReportError(error);
264}
265
266void ReportReallocArrayOverflow(uptr count, uptr size,
267                                BufferedStackTrace *stack) {
268  ScopedInErrorReport in_report(/*fatal*/ true);
269  ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
270  in_report.ReportError(error);
271}
272
273void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) {
274  ScopedInErrorReport in_report(/*fatal*/ true);
275  ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size);
276  in_report.ReportError(error);
277}
278
279void ReportInvalidAllocationAlignment(uptr alignment,
280                                      BufferedStackTrace *stack) {
281  ScopedInErrorReport in_report(/*fatal*/ true);
282  ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack,
283                                        alignment);
284  in_report.ReportError(error);
285}
286
287void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
288                                        BufferedStackTrace *stack) {
289  ScopedInErrorReport in_report(/*fatal*/ true);
290  ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack,
291                                          size, alignment);
292  in_report.ReportError(error);
293}
294
295void ReportInvalidPosixMemalignAlignment(uptr alignment,
296                                         BufferedStackTrace *stack) {
297  ScopedInErrorReport in_report(/*fatal*/ true);
298  ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack,
299                                           alignment);
300  in_report.ReportError(error);
301}
302
303void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
304                                BufferedStackTrace *stack) {
305  ScopedInErrorReport in_report(/*fatal*/ true);
306  ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size,
307                                  total_size, max_size);
308  in_report.ReportError(error);
309}
310
311void ReportRssLimitExceeded(BufferedStackTrace *stack) {
312  ScopedInErrorReport in_report(/*fatal*/ true);
313  ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack);
314  in_report.ReportError(error);
315}
316
317void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) {
318  ScopedInErrorReport in_report(/*fatal*/ true);
319  ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size);
320  in_report.ReportError(error);
321}
322
323void ReportStringFunctionMemoryRangesOverlap(const char *function,
324                                             const char *offset1, uptr length1,
325                                             const char *offset2, uptr length2,
326                                             BufferedStackTrace *stack) {
327  ScopedInErrorReport in_report;
328  ErrorStringFunctionMemoryRangesOverlap error(
329      GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
330      length2, function);
331  in_report.ReportError(error);
332}
333
334void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
335                                      BufferedStackTrace *stack) {
336  ScopedInErrorReport in_report;
337  ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
338                                        size);
339  in_report.ReportError(error);
340}
341
342void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
343                                                  uptr old_mid, uptr new_mid,
344                                                  BufferedStackTrace *stack) {
345  ScopedInErrorReport in_report;
346  ErrorBadParamsToAnnotateContiguousContainer error(
347      GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
348  in_report.ReportError(error);
349}
350
351void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
352                        const __asan_global *g2, u32 stack_id2) {
353  ScopedInErrorReport in_report;
354  ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
355                          stack_id2);
356  in_report.ReportError(error);
357}
358
359// ----------------------- CheckForInvalidPointerPair ----------- {{{1
360static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
361                                              uptr a1, uptr a2) {
362  ScopedInErrorReport in_report;
363  ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
364  in_report.ReportError(error);
365}
366
367static bool IsInvalidPointerPair(uptr a1, uptr a2) {
368  if (a1 == a2)
369    return false;
370
371  // 256B in shadow memory can be iterated quite fast
372  static const uptr kMaxOffset = 2048;
373
374  uptr left = a1 < a2 ? a1 : a2;
375  uptr right = a1 < a2 ? a2 : a1;
376  uptr offset = right - left;
377  if (offset <= kMaxOffset)
378    return __asan_region_is_poisoned(left, offset);
379
380  AsanThread *t = GetCurrentThread();
381
382  // check whether left is a stack memory pointer
383  if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) {
384    uptr shadow_offset2 = t->GetStackVariableShadowStart(right);
385    return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2;
386  }
387
388  // check whether left is a heap memory address
389  HeapAddressDescription hdesc1, hdesc2;
390  if (GetHeapAddressInformation(left, 0, &hdesc1) &&
391      hdesc1.chunk_access.access_type == kAccessTypeInside)
392    return !GetHeapAddressInformation(right, 0, &hdesc2) ||
393        hdesc2.chunk_access.access_type != kAccessTypeInside ||
394        hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin;
395
396  // check whether left is an address of a global variable
397  GlobalAddressDescription gdesc1, gdesc2;
398  if (GetGlobalAddressInformation(left, 0, &gdesc1))
399    return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) ||
400        !gdesc1.PointsInsideTheSameVariable(gdesc2);
401
402  if (t->GetStackVariableShadowStart(right) ||
403      GetHeapAddressInformation(right, 0, &hdesc2) ||
404      GetGlobalAddressInformation(right - 1, 0, &gdesc2))
405    return true;
406
407  // At this point we know nothing about both a1 and a2 addresses.
408  return false;
409}
410
411static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
412  switch (flags()->detect_invalid_pointer_pairs) {
413    case 0:
414      return;
415    case 1:
416      if (p1 == nullptr || p2 == nullptr)
417        return;
418      break;
419  }
420
421  uptr a1 = reinterpret_cast<uptr>(p1);
422  uptr a2 = reinterpret_cast<uptr>(p2);
423
424  if (IsInvalidPointerPair(a1, a2)) {
425    GET_CALLER_PC_BP_SP;
426    ReportInvalidPointerPair(pc, bp, sp, a1, a2);
427  }
428}
429// ----------------------- Mac-specific reports ----------------- {{{1
430
431void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
432                               BufferedStackTrace *stack) {
433  ScopedInErrorReport in_report;
434  Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
435             "This is an unrecoverable problem, exiting now.\n",
436             addr);
437  PrintZoneForPointer(addr, zone_ptr, zone_name);
438  stack->Print();
439  DescribeAddressIfHeap(addr);
440}
441
442// -------------- SuppressErrorReport -------------- {{{1
443// Avoid error reports duplicating for ASan recover mode.
444static bool SuppressErrorReport(uptr pc) {
445  if (!common_flags()->suppress_equal_pcs) return false;
446  for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) {
447    uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]);
448    if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp,
449                                                   pc, memory_order_relaxed))
450      return false;
451    if (cmp == pc) return true;
452  }
453  Die();
454}
455
456void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
457                        uptr access_size, u32 exp, bool fatal) {
458  if (!fatal && SuppressErrorReport(pc)) return;
459  ENABLE_FRAME_POINTER;
460
461  // Optimization experiments.
462  // The experiments can be used to evaluate potential optimizations that remove
463  // instrumentation (assess false negatives). Instead of completely removing
464  // some instrumentation, compiler can emit special calls into runtime
465  // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
466  // mask of experiments (exp).
467  // The reaction to a non-zero value of exp is to be defined.
468  (void)exp;
469
470  ScopedInErrorReport in_report(fatal);
471  ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
472                     access_size);
473  in_report.ReportError(error);
474}
475
476}  // namespace __asan
477
478// --------------------------- Interface --------------------- {{{1
479using namespace __asan;
480
481void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
482                         uptr access_size, u32 exp) {
483  ENABLE_FRAME_POINTER;
484  bool fatal = flags()->halt_on_error;
485  ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal);
486}
487
488void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
489  BlockingMutexLock l(&error_message_buf_mutex);
490  error_report_callback = callback;
491}
492
493void __asan_describe_address(uptr addr) {
494  // Thread registry must be locked while we're describing an address.
495  asanThreadRegistry().Lock();
496  PrintAddressDescription(addr, 1, "");
497  asanThreadRegistry().Unlock();
498}
499
500int __asan_report_present() {
501  return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
502}
503
504uptr __asan_get_report_pc() {
505  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
506    return ScopedInErrorReport::CurrentError().Generic.pc;
507  return 0;
508}
509
510uptr __asan_get_report_bp() {
511  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
512    return ScopedInErrorReport::CurrentError().Generic.bp;
513  return 0;
514}
515
516uptr __asan_get_report_sp() {
517  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
518    return ScopedInErrorReport::CurrentError().Generic.sp;
519  return 0;
520}
521
522uptr __asan_get_report_address() {
523  ErrorDescription &err = ScopedInErrorReport::CurrentError();
524  if (err.kind == kErrorKindGeneric)
525    return err.Generic.addr_description.Address();
526  else if (err.kind == kErrorKindDoubleFree)
527    return err.DoubleFree.addr_description.addr;
528  return 0;
529}
530
531int __asan_get_report_access_type() {
532  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
533    return ScopedInErrorReport::CurrentError().Generic.is_write;
534  return 0;
535}
536
537uptr __asan_get_report_access_size() {
538  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
539    return ScopedInErrorReport::CurrentError().Generic.access_size;
540  return 0;
541}
542
543const char *__asan_get_report_description() {
544  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
545    return ScopedInErrorReport::CurrentError().Generic.bug_descr;
546  return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
547}
548
549extern "C" {
550SANITIZER_INTERFACE_ATTRIBUTE
551void __sanitizer_ptr_sub(void *a, void *b) {
552  CheckForInvalidPointerPair(a, b);
553}
554SANITIZER_INTERFACE_ATTRIBUTE
555void __sanitizer_ptr_cmp(void *a, void *b) {
556  CheckForInvalidPointerPair(a, b);
557}
558} // extern "C"
559
560// Provide default implementation of __asan_on_error that does nothing
561// and may be overriden by user.
562SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
563