1//===-- asan_report.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// This file contains error reporting code.
12//===----------------------------------------------------------------------===//
13
14#include "asan_errors.h"
15#include "asan_flags.h"
16#include "asan_descriptions.h"
17#include "asan_internal.h"
18#include "asan_mapping.h"
19#include "asan_report.h"
20#include "asan_scariness_score.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "sanitizer_common/sanitizer_common.h"
24#include "sanitizer_common/sanitizer_flags.h"
25#include "sanitizer_common/sanitizer_report_decorator.h"
26#include "sanitizer_common/sanitizer_stackdepot.h"
27#include "sanitizer_common/sanitizer_symbolizer.h"
28
29namespace __asan {
30
31// -------------------- User-specified callbacks ----------------- {{{1
32static void (*error_report_callback)(const char*);
33static char *error_message_buffer = nullptr;
34static uptr error_message_buffer_pos = 0;
35static Mutex error_message_buf_mutex;
36static const unsigned kAsanBuggyPcPoolSize = 25;
37static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
38
39void AppendToErrorMessageBuffer(const char *buffer) {
40  Lock l(&error_message_buf_mutex);
41  if (!error_message_buffer) {
42    error_message_buffer =
43      (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
44    error_message_buffer_pos = 0;
45  }
46  uptr length = internal_strlen(buffer);
47  RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos);
48  uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos;
49  internal_strncpy(error_message_buffer + error_message_buffer_pos,
50                   buffer, remaining);
51  error_message_buffer[kErrorMessageBufferSize - 1] = '\0';
52  // FIXME: reallocate the buffer instead of truncating the message.
53  error_message_buffer_pos += Min(remaining, length);
54}
55
56// ---------------------- Helper functions ----------------------- {{{1
57
58void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
59                     bool in_shadow, const char *after) {
60  Decorator d;
61  str->append("%s%s%x%x%s%s", before,
62              in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
63              byte & 15, d.Default(), after);
64}
65
66static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
67                                const char *zone_name) {
68  if (zone_ptr) {
69    if (zone_name) {
70      Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", (void *)ptr,
71             (void *)zone_ptr, zone_name);
72    } else {
73      Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
74             (void *)ptr, (void *)zone_ptr);
75    }
76  } else {
77    Printf("malloc_zone_from_ptr(%p) = 0\n", (void *)ptr);
78  }
79}
80
81// ---------------------- Address Descriptions ------------------- {{{1
82
83bool ParseFrameDescription(const char *frame_descr,
84                           InternalMmapVector<StackVarDescr> *vars) {
85  CHECK(frame_descr);
86  const char *p;
87  // This string is created by the compiler and has the following form:
88  // "n alloc_1 alloc_2 ... alloc_n"
89  // where alloc_i looks like "offset size len ObjectName"
90  // or                       "offset size len ObjectName:line".
91  uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
92  if (n_objects == 0)
93    return false;
94
95  for (uptr i = 0; i < n_objects; i++) {
96    uptr beg  = (uptr)internal_simple_strtoll(p, &p, 10);
97    uptr size = (uptr)internal_simple_strtoll(p, &p, 10);
98    uptr len  = (uptr)internal_simple_strtoll(p, &p, 10);
99    if (beg == 0 || size == 0 || *p != ' ') {
100      return false;
101    }
102    p++;
103    char *colon_pos = internal_strchr(p, ':');
104    uptr line = 0;
105    uptr name_len = len;
106    if (colon_pos != nullptr && colon_pos < p + len) {
107      name_len = colon_pos - p;
108      line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10);
109    }
110    StackVarDescr var = {beg, size, p, name_len, line};
111    vars->push_back(var);
112    p += len;
113  }
114
115  return true;
116}
117
118// -------------------- Different kinds of reports ----------------- {{{1
119
120// Use ScopedInErrorReport to run common actions just before and
121// immediately after printing error report.
122class ScopedInErrorReport {
123 public:
124  explicit ScopedInErrorReport(bool fatal = false)
125      : halt_on_error_(fatal || flags()->halt_on_error) {
126    // Make sure the registry and sanitizer report mutexes are locked while
127    // we're printing an error report.
128    // We can lock them only here to avoid self-deadlock in case of
129    // recursive reports.
130    asanThreadRegistry().Lock();
131    Printf(
132        "=================================================================\n");
133  }
134
135  ~ScopedInErrorReport() {
136    if (halt_on_error_ && !__sanitizer_acquire_crash_state()) {
137      asanThreadRegistry().Unlock();
138      return;
139    }
140    ASAN_ON_ERROR();
141    if (current_error_.IsValid()) current_error_.Print();
142
143    // Make sure the current thread is announced.
144    DescribeThread(GetCurrentThread());
145    // We may want to grab this lock again when printing stats.
146    asanThreadRegistry().Unlock();
147    // Print memory stats.
148    if (flags()->print_stats)
149      __asan_print_accumulated_stats();
150
151    if (common_flags()->print_cmdline)
152      PrintCmdline();
153
154    if (common_flags()->print_module_map == 2)
155      DumpProcessMap();
156
157    // Copy the message buffer so that we could start logging without holding a
158    // lock that gets acquired during printing.
159    InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
160    {
161      Lock l(&error_message_buf_mutex);
162      internal_memcpy(buffer_copy.data(),
163                      error_message_buffer, kErrorMessageBufferSize);
164      // Clear error_message_buffer so that if we find other errors
165      // we don't re-log this error.
166      error_message_buffer_pos = 0;
167    }
168
169    LogFullErrorReport(buffer_copy.data());
170
171    if (error_report_callback) {
172      error_report_callback(buffer_copy.data());
173    }
174
175    if (halt_on_error_ && common_flags()->abort_on_error) {
176      // On Android the message is truncated to 512 characters.
177      // FIXME: implement "compact" error format, possibly without, or with
178      // highly compressed stack traces?
179      // FIXME: or just use the summary line as abort message?
180      SetAbortMessage(buffer_copy.data());
181    }
182
183    // In halt_on_error = false mode, reset the current error object (before
184    // unlocking).
185    if (!halt_on_error_)
186      internal_memset(&current_error_, 0, sizeof(current_error_));
187
188    if (halt_on_error_) {
189      Report("ABORTING\n");
190      Die();
191    }
192  }
193
194  void ReportError(const ErrorDescription &description) {
195    // Can only report one error per ScopedInErrorReport.
196    CHECK_EQ(current_error_.kind, kErrorKindInvalid);
197    internal_memcpy(&current_error_, &description, sizeof(current_error_));
198  }
199
200  static ErrorDescription &CurrentError() {
201    return current_error_;
202  }
203
204 private:
205  ScopedErrorReportLock error_report_lock_;
206  // Error currently being reported. This enables the destructor to interact
207  // with the debugger and point it to an error description.
208  static ErrorDescription current_error_;
209  bool halt_on_error_;
210};
211
212ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED);
213
214void ReportDeadlySignal(const SignalContext &sig) {
215  ScopedInErrorReport in_report(/*fatal*/ true);
216  ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig);
217  in_report.ReportError(error);
218}
219
220void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
221  ScopedInErrorReport in_report;
222  ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
223  in_report.ReportError(error);
224}
225
226void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
227                                 uptr delete_alignment,
228                                 BufferedStackTrace *free_stack) {
229  ScopedInErrorReport in_report;
230  ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
231                                   delete_size, delete_alignment);
232  in_report.ReportError(error);
233}
234
235void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
236  ScopedInErrorReport in_report;
237  ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
238  in_report.ReportError(error);
239}
240
241void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
242                             AllocType alloc_type,
243                             AllocType dealloc_type) {
244  ScopedInErrorReport in_report;
245  ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
246                               alloc_type, dealloc_type);
247  in_report.ReportError(error);
248}
249
250void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
251  ScopedInErrorReport in_report;
252  ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
253  in_report.ReportError(error);
254}
255
256void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
257                                             BufferedStackTrace *stack) {
258  ScopedInErrorReport in_report;
259  ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
260                                               addr);
261  in_report.ReportError(error);
262}
263
264void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) {
265  ScopedInErrorReport in_report(/*fatal*/ true);
266  ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
267  in_report.ReportError(error);
268}
269
270void ReportReallocArrayOverflow(uptr count, uptr size,
271                                BufferedStackTrace *stack) {
272  ScopedInErrorReport in_report(/*fatal*/ true);
273  ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
274  in_report.ReportError(error);
275}
276
277void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) {
278  ScopedInErrorReport in_report(/*fatal*/ true);
279  ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size);
280  in_report.ReportError(error);
281}
282
283void ReportInvalidAllocationAlignment(uptr alignment,
284                                      BufferedStackTrace *stack) {
285  ScopedInErrorReport in_report(/*fatal*/ true);
286  ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack,
287                                        alignment);
288  in_report.ReportError(error);
289}
290
291void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
292                                        BufferedStackTrace *stack) {
293  ScopedInErrorReport in_report(/*fatal*/ true);
294  ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack,
295                                          size, alignment);
296  in_report.ReportError(error);
297}
298
299void ReportInvalidPosixMemalignAlignment(uptr alignment,
300                                         BufferedStackTrace *stack) {
301  ScopedInErrorReport in_report(/*fatal*/ true);
302  ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack,
303                                           alignment);
304  in_report.ReportError(error);
305}
306
307void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
308                                BufferedStackTrace *stack) {
309  ScopedInErrorReport in_report(/*fatal*/ true);
310  ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size,
311                                  total_size, max_size);
312  in_report.ReportError(error);
313}
314
315void ReportRssLimitExceeded(BufferedStackTrace *stack) {
316  ScopedInErrorReport in_report(/*fatal*/ true);
317  ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack);
318  in_report.ReportError(error);
319}
320
321void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) {
322  ScopedInErrorReport in_report(/*fatal*/ true);
323  ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size);
324  in_report.ReportError(error);
325}
326
327void ReportStringFunctionMemoryRangesOverlap(const char *function,
328                                             const char *offset1, uptr length1,
329                                             const char *offset2, uptr length2,
330                                             BufferedStackTrace *stack) {
331  ScopedInErrorReport in_report;
332  ErrorStringFunctionMemoryRangesOverlap error(
333      GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
334      length2, function);
335  in_report.ReportError(error);
336}
337
338void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
339                                      BufferedStackTrace *stack) {
340  ScopedInErrorReport in_report;
341  ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
342                                        size);
343  in_report.ReportError(error);
344}
345
346void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
347                                                  uptr old_mid, uptr new_mid,
348                                                  BufferedStackTrace *stack) {
349  ScopedInErrorReport in_report;
350  ErrorBadParamsToAnnotateContiguousContainer error(
351      GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
352  in_report.ReportError(error);
353}
354
355void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
356                        const __asan_global *g2, u32 stack_id2) {
357  ScopedInErrorReport in_report;
358  ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
359                          stack_id2);
360  in_report.ReportError(error);
361}
362
363// ----------------------- CheckForInvalidPointerPair ----------- {{{1
364static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
365                                              uptr a1, uptr a2) {
366  ScopedInErrorReport in_report;
367  ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
368  in_report.ReportError(error);
369}
370
371static bool IsInvalidPointerPair(uptr a1, uptr a2) {
372  if (a1 == a2)
373    return false;
374
375  // 256B in shadow memory can be iterated quite fast
376  static const uptr kMaxOffset = 2048;
377
378  uptr left = a1 < a2 ? a1 : a2;
379  uptr right = a1 < a2 ? a2 : a1;
380  uptr offset = right - left;
381  if (offset <= kMaxOffset)
382    return __asan_region_is_poisoned(left, offset);
383
384  AsanThread *t = GetCurrentThread();
385
386  // check whether left is a stack memory pointer
387  if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) {
388    uptr shadow_offset2 = t->GetStackVariableShadowStart(right);
389    return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2;
390  }
391
392  // check whether left is a heap memory address
393  HeapAddressDescription hdesc1, hdesc2;
394  if (GetHeapAddressInformation(left, 0, &hdesc1) &&
395      hdesc1.chunk_access.access_type == kAccessTypeInside)
396    return !GetHeapAddressInformation(right, 0, &hdesc2) ||
397        hdesc2.chunk_access.access_type != kAccessTypeInside ||
398        hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin;
399
400  // check whether left is an address of a global variable
401  GlobalAddressDescription gdesc1, gdesc2;
402  if (GetGlobalAddressInformation(left, 0, &gdesc1))
403    return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) ||
404        !gdesc1.PointsInsideTheSameVariable(gdesc2);
405
406  if (t->GetStackVariableShadowStart(right) ||
407      GetHeapAddressInformation(right, 0, &hdesc2) ||
408      GetGlobalAddressInformation(right - 1, 0, &gdesc2))
409    return true;
410
411  // At this point we know nothing about both a1 and a2 addresses.
412  return false;
413}
414
415static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
416  switch (flags()->detect_invalid_pointer_pairs) {
417    case 0:
418      return;
419    case 1:
420      if (p1 == nullptr || p2 == nullptr)
421        return;
422      break;
423  }
424
425  uptr a1 = reinterpret_cast<uptr>(p1);
426  uptr a2 = reinterpret_cast<uptr>(p2);
427
428  if (IsInvalidPointerPair(a1, a2)) {
429    GET_CALLER_PC_BP_SP;
430    ReportInvalidPointerPair(pc, bp, sp, a1, a2);
431  }
432}
433// ----------------------- Mac-specific reports ----------------- {{{1
434
435void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
436                               BufferedStackTrace *stack) {
437  ScopedInErrorReport in_report;
438  Printf(
439      "mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
440      "This is an unrecoverable problem, exiting now.\n",
441      (void *)addr);
442  PrintZoneForPointer(addr, zone_ptr, zone_name);
443  stack->Print();
444  DescribeAddressIfHeap(addr);
445}
446
447// -------------- SuppressErrorReport -------------- {{{1
448// Avoid error reports duplicating for ASan recover mode.
449static bool SuppressErrorReport(uptr pc) {
450  if (!common_flags()->suppress_equal_pcs) return false;
451  for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) {
452    uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]);
453    if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp,
454                                                   pc, memory_order_relaxed))
455      return false;
456    if (cmp == pc) return true;
457  }
458  Die();
459}
460
461void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
462                        uptr access_size, u32 exp, bool fatal) {
463  if (!fatal && SuppressErrorReport(pc)) return;
464  ENABLE_FRAME_POINTER;
465
466  // Optimization experiments.
467  // The experiments can be used to evaluate potential optimizations that remove
468  // instrumentation (assess false negatives). Instead of completely removing
469  // some instrumentation, compiler can emit special calls into runtime
470  // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
471  // mask of experiments (exp).
472  // The reaction to a non-zero value of exp is to be defined.
473  (void)exp;
474
475  ScopedInErrorReport in_report(fatal);
476  ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
477                     access_size);
478  in_report.ReportError(error);
479}
480
481}  // namespace __asan
482
483// --------------------------- Interface --------------------- {{{1
484using namespace __asan;
485
486void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
487                         uptr access_size, u32 exp) {
488  ENABLE_FRAME_POINTER;
489  bool fatal = flags()->halt_on_error;
490  ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal);
491}
492
493void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
494  Lock l(&error_message_buf_mutex);
495  error_report_callback = callback;
496}
497
498void __asan_describe_address(uptr addr) {
499  // Thread registry must be locked while we're describing an address.
500  asanThreadRegistry().Lock();
501  PrintAddressDescription(addr, 1, "");
502  asanThreadRegistry().Unlock();
503}
504
505int __asan_report_present() {
506  return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
507}
508
509uptr __asan_get_report_pc() {
510  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
511    return ScopedInErrorReport::CurrentError().Generic.pc;
512  return 0;
513}
514
515uptr __asan_get_report_bp() {
516  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
517    return ScopedInErrorReport::CurrentError().Generic.bp;
518  return 0;
519}
520
521uptr __asan_get_report_sp() {
522  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
523    return ScopedInErrorReport::CurrentError().Generic.sp;
524  return 0;
525}
526
527uptr __asan_get_report_address() {
528  ErrorDescription &err = ScopedInErrorReport::CurrentError();
529  if (err.kind == kErrorKindGeneric)
530    return err.Generic.addr_description.Address();
531  else if (err.kind == kErrorKindDoubleFree)
532    return err.DoubleFree.addr_description.addr;
533  return 0;
534}
535
536int __asan_get_report_access_type() {
537  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
538    return ScopedInErrorReport::CurrentError().Generic.is_write;
539  return 0;
540}
541
542uptr __asan_get_report_access_size() {
543  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
544    return ScopedInErrorReport::CurrentError().Generic.access_size;
545  return 0;
546}
547
548const char *__asan_get_report_description() {
549  if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
550    return ScopedInErrorReport::CurrentError().Generic.bug_descr;
551  return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
552}
553
554extern "C" {
555SANITIZER_INTERFACE_ATTRIBUTE
556void __sanitizer_ptr_sub(void *a, void *b) {
557  CheckForInvalidPointerPair(a, b);
558}
559SANITIZER_INTERFACE_ATTRIBUTE
560void __sanitizer_ptr_cmp(void *a, void *b) {
561  CheckForInvalidPointerPair(a, b);
562}
563} // extern "C"
564
565// Provide default implementation of __asan_on_error that does nothing
566// and may be overriden by user.
567SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
568