1//===-- hwasan_report.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of HWAddressSanitizer.
10//
11// Error reporting.
12//===----------------------------------------------------------------------===//
13
14#include "hwasan_report.h"
15
16#include <dlfcn.h>
17
18#include "hwasan.h"
19#include "hwasan_allocator.h"
20#include "hwasan_globals.h"
21#include "hwasan_mapping.h"
22#include "hwasan_thread.h"
23#include "hwasan_thread_list.h"
24#include "sanitizer_common/sanitizer_allocator_internal.h"
25#include "sanitizer_common/sanitizer_common.h"
26#include "sanitizer_common/sanitizer_flags.h"
27#include "sanitizer_common/sanitizer_mutex.h"
28#include "sanitizer_common/sanitizer_report_decorator.h"
29#include "sanitizer_common/sanitizer_stackdepot.h"
30#include "sanitizer_common/sanitizer_stacktrace_printer.h"
31#include "sanitizer_common/sanitizer_symbolizer.h"
32
33using namespace __sanitizer;
34
35namespace __hwasan {
36
37class ScopedReport {
38 public:
39  ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
40    Lock lock(&error_message_lock_);
41    error_message_ptr_ = fatal ? &error_message_ : nullptr;
42    ++hwasan_report_count;
43  }
44
45  ~ScopedReport() {
46    void (*report_cb)(const char *);
47    {
48      Lock lock(&error_message_lock_);
49      report_cb = error_report_callback_;
50      error_message_ptr_ = nullptr;
51    }
52    if (report_cb)
53      report_cb(error_message_.data());
54    if (fatal)
55      SetAbortMessage(error_message_.data());
56    if (common_flags()->print_module_map >= 2 ||
57        (fatal && common_flags()->print_module_map))
58      DumpProcessMap();
59    if (fatal)
60      Die();
61  }
62
63  static void MaybeAppendToErrorMessage(const char *msg) {
64    Lock lock(&error_message_lock_);
65    if (!error_message_ptr_)
66      return;
67    uptr len = internal_strlen(msg);
68    uptr old_size = error_message_ptr_->size();
69    error_message_ptr_->resize(old_size + len);
70    // overwrite old trailing '\0', keep new trailing '\0' untouched.
71    internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
72  }
73
74  static void SetErrorReportCallback(void (*callback)(const char *)) {
75    Lock lock(&error_message_lock_);
76    error_report_callback_ = callback;
77  }
78
79 private:
80  ScopedErrorReportLock error_report_lock_;
81  InternalMmapVector<char> error_message_;
82  bool fatal;
83
84  static InternalMmapVector<char> *error_message_ptr_;
85  static Mutex error_message_lock_;
86  static void (*error_report_callback_)(const char *);
87};
88
89InternalMmapVector<char> *ScopedReport::error_message_ptr_;
90Mutex ScopedReport::error_message_lock_;
91void (*ScopedReport::error_report_callback_)(const char *);
92
93// If there is an active ScopedReport, append to its error message.
94void AppendToErrorMessageBuffer(const char *buffer) {
95  ScopedReport::MaybeAppendToErrorMessage(buffer);
96}
97
98static StackTrace GetStackTraceFromId(u32 id) {
99  CHECK(id);
100  StackTrace res = StackDepotGet(id);
101  CHECK(res.trace);
102  return res;
103}
104
105// A RAII object that holds a copy of the current thread stack ring buffer.
106// The actual stack buffer may change while we are iterating over it (for
107// example, Printf may call syslog() which can itself be built with hwasan).
108class SavedStackAllocations {
109 public:
110  SavedStackAllocations(StackAllocationsRingBuffer *rb) {
111    uptr size = rb->size() * sizeof(uptr);
112    void *storage =
113        MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
114    new (&rb_) StackAllocationsRingBuffer(*rb, storage);
115  }
116
117  ~SavedStackAllocations() {
118    StackAllocationsRingBuffer *rb = get();
119    UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
120  }
121
122  StackAllocationsRingBuffer *get() {
123    return (StackAllocationsRingBuffer *)&rb_;
124  }
125
126 private:
127  uptr rb_;
128};
129
130class Decorator: public __sanitizer::SanitizerCommonDecorator {
131 public:
132  Decorator() : SanitizerCommonDecorator() { }
133  const char *Access() { return Blue(); }
134  const char *Allocation() const { return Magenta(); }
135  const char *Origin() const { return Magenta(); }
136  const char *Name() const { return Green(); }
137  const char *Location() { return Green(); }
138  const char *Thread() { return Green(); }
139};
140
141static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
142                               HeapAllocationRecord *har, uptr *ring_index,
143                               uptr *num_matching_addrs,
144                               uptr *num_matching_addrs_4b) {
145  if (!rb) return false;
146
147  *num_matching_addrs = 0;
148  *num_matching_addrs_4b = 0;
149  for (uptr i = 0, size = rb->size(); i < size; i++) {
150    auto h = (*rb)[i];
151    if (h.tagged_addr <= tagged_addr &&
152        h.tagged_addr + h.requested_size > tagged_addr) {
153      *har = h;
154      *ring_index = i;
155      return true;
156    }
157
158    // Measure the number of heap ring buffer entries that would have matched
159    // if we had only one entry per address (e.g. if the ring buffer data was
160    // stored at the address itself). This will help us tune the allocator
161    // implementation for MTE.
162    if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
163        UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
164      ++*num_matching_addrs;
165    }
166
167    // Measure the number of heap ring buffer entries that would have matched
168    // if we only had 4 tag bits, which is the case for MTE.
169    auto untag_4b = [](uptr p) {
170      return p & ((1ULL << 60) - 1);
171    };
172    if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
173        untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
174      ++*num_matching_addrs_4b;
175    }
176  }
177  return false;
178}
179
180static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
181                                  tag_t addr_tag, uptr untagged_addr) {
182  uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
183  bool found_local = false;
184  for (uptr i = 0; i < frames; i++) {
185    const uptr *record_addr = &(*sa)[i];
186    uptr record = *record_addr;
187    if (!record)
188      break;
189    tag_t base_tag =
190        reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
191    uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
192    uptr pc_mask = (1ULL << kRecordFPShift) - 1;
193    uptr pc = record & pc_mask;
194    FrameInfo frame;
195    if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
196      for (LocalInfo &local : frame.locals) {
197        if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
198          continue;
199        tag_t obj_tag = base_tag ^ local.tag_offset;
200        if (obj_tag != addr_tag)
201          continue;
202        // Calculate the offset from the object address to the faulting
203        // address. Because we only store bits 4-19 of FP (bits 0-3 are
204        // guaranteed to be zero), the calculation is performed mod 2^20 and may
205        // harmlessly underflow if the address mod 2^20 is below the object
206        // address.
207        uptr obj_offset =
208            (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
209        if (obj_offset >= local.size)
210          continue;
211        if (!found_local) {
212          Printf("Potentially referenced stack objects:\n");
213          found_local = true;
214        }
215        Printf("  %s in %s %s:%d\n", local.name, local.function_name,
216               local.decl_file, local.decl_line);
217      }
218      frame.Clear();
219    }
220  }
221
222  if (found_local)
223    return;
224
225  // We didn't find any locals. Most likely we don't have symbols, so dump
226  // the information that we have for offline analysis.
227  InternalScopedString frame_desc;
228  Printf("Previously allocated frames:\n");
229  for (uptr i = 0; i < frames; i++) {
230    const uptr *record_addr = &(*sa)[i];
231    uptr record = *record_addr;
232    if (!record)
233      break;
234    uptr pc_mask = (1ULL << 48) - 1;
235    uptr pc = record & pc_mask;
236    frame_desc.append("  record_addr:0x%zx record:0x%zx",
237                      reinterpret_cast<uptr>(record_addr), record);
238    if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
239      RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
240                  common_flags()->symbolize_vs_style,
241                  common_flags()->strip_path_prefix);
242      frame->ClearAll();
243    }
244    Printf("%s\n", frame_desc.data());
245    frame_desc.clear();
246  }
247}
248
249// Returns true if tag == *tag_ptr, reading tags from short granules if
250// necessary. This may return a false positive if tags 1-15 are used as a
251// regular tag rather than a short granule marker.
252static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
253  if (tag == *tag_ptr)
254    return true;
255  if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
256    return false;
257  uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
258  tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
259  return tag == inline_tag;
260}
261
262// HWASan globals store the size of the global in the descriptor. In cases where
263// we don't have a binary with symbols, we can't grab the size of the global
264// from the debug info - but we might be able to retrieve it from the
265// descriptor. Returns zero if the lookup failed.
266static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
267  // Find the ELF object that this global resides in.
268  Dl_info info;
269  if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
270    return 0;
271  auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
272  auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
273      reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
274
275  // Get the load bias. This is normally the same as the dli_fbase address on
276  // position-independent code, but can be different on non-PIE executables,
277  // binaries using LLD's partitioning feature, or binaries compiled with a
278  // linker script.
279  ElfW(Addr) load_bias = 0;
280  for (const auto &phdr :
281       ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
282    if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
283      continue;
284    load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
285    break;
286  }
287
288  // Walk all globals in this ELF object, looking for the one we're interested
289  // in. Once we find it, we can stop iterating and return the size of the
290  // global we're interested in.
291  for (const hwasan_global &global :
292       HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
293    if (global.addr() <= ptr && ptr < global.addr() + global.size())
294      return global.size();
295
296  return 0;
297}
298
299static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
300                                      tag_t *left, tag_t *right) {
301  Decorator d;
302  uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
303  HwasanChunkView chunk = FindHeapChunkByAddress(mem);
304  if (chunk.IsAllocated()) {
305    uptr offset;
306    const char *whence;
307    if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
308      offset = untagged_addr - chunk.Beg();
309      whence = "inside";
310    } else if (candidate == left) {
311      offset = untagged_addr - chunk.End();
312      whence = "to the right of";
313    } else {
314      offset = chunk.Beg() - untagged_addr;
315      whence = "to the left of";
316    }
317    Printf("%s", d.Error());
318    Printf("\nCause: heap-buffer-overflow\n");
319    Printf("%s", d.Default());
320    Printf("%s", d.Location());
321    Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
322           untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
323           chunk.End());
324    Printf("%s", d.Allocation());
325    Printf("allocated here:\n");
326    Printf("%s", d.Default());
327    GetStackTraceFromId(chunk.GetAllocStackId()).Print();
328    return;
329  }
330  // Check whether the address points into a loaded library. If so, this is
331  // most likely a global variable.
332  const char *module_name;
333  uptr module_address;
334  Symbolizer *sym = Symbolizer::GetOrInit();
335  if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
336    Printf("%s", d.Error());
337    Printf("\nCause: global-overflow\n");
338    Printf("%s", d.Default());
339    DataInfo info;
340    Printf("%s", d.Location());
341    if (sym->SymbolizeData(mem, &info) && info.start) {
342      Printf(
343          "%p is located %zd bytes to the %s of %zd-byte global variable "
344          "%s [%p,%p) in %s\n",
345          untagged_addr,
346          candidate == left ? untagged_addr - (info.start + info.size)
347                            : info.start - untagged_addr,
348          candidate == left ? "right" : "left", info.size, info.name,
349          info.start, info.start + info.size, module_name);
350    } else {
351      uptr size = GetGlobalSizeFromDescriptor(mem);
352      if (size == 0)
353        // We couldn't find the size of the global from the descriptors.
354        Printf(
355            "%p is located to the %s of a global variable in "
356            "\n    #0 0x%x (%s+0x%x)\n",
357            untagged_addr, candidate == left ? "right" : "left", mem,
358            module_name, module_address);
359      else
360        Printf(
361            "%p is located to the %s of a %zd-byte global variable in "
362            "\n    #0 0x%x (%s+0x%x)\n",
363            untagged_addr, candidate == left ? "right" : "left", size, mem,
364            module_name, module_address);
365    }
366    Printf("%s", d.Default());
367  }
368}
369
370void PrintAddressDescription(
371    uptr tagged_addr, uptr access_size,
372    StackAllocationsRingBuffer *current_stack_allocations) {
373  Decorator d;
374  int num_descriptions_printed = 0;
375  uptr untagged_addr = UntagAddr(tagged_addr);
376
377  if (MemIsShadow(untagged_addr)) {
378    Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
379           d.Default());
380    return;
381  }
382
383  // Print some very basic information about the address, if it's a heap.
384  HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
385  if (uptr beg = chunk.Beg()) {
386    uptr size = chunk.ActualSize();
387    Printf("%s[%p,%p) is a %s %s heap chunk; "
388           "size: %zd offset: %zd\n%s",
389           d.Location(),
390           beg, beg + size,
391           chunk.FromSmallHeap() ? "small" : "large",
392           chunk.IsAllocated() ? "allocated" : "unallocated",
393           size, untagged_addr - beg,
394           d.Default());
395  }
396
397  tag_t addr_tag = GetTagFromPointer(tagged_addr);
398
399  bool on_stack = false;
400  // Check stack first. If the address is on the stack of a live thread, we
401  // know it cannot be a heap / global overflow.
402  hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
403    if (t->AddrIsInStack(untagged_addr)) {
404      on_stack = true;
405      // TODO(fmayer): figure out how to distinguish use-after-return and
406      // stack-buffer-overflow.
407      Printf("%s", d.Error());
408      Printf("\nCause: stack tag-mismatch\n");
409      Printf("%s", d.Location());
410      Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
411             t->unique_id());
412      Printf("%s", d.Default());
413      t->Announce();
414
415      auto *sa = (t == GetCurrentThread() && current_stack_allocations)
416                     ? current_stack_allocations
417                     : t->stack_allocations();
418      PrintStackAllocations(sa, addr_tag, untagged_addr);
419      num_descriptions_printed++;
420    }
421  });
422
423  // Check if this looks like a heap buffer overflow by scanning
424  // the shadow left and right and looking for the first adjacent
425  // object with a different memory tag. If that tag matches addr_tag,
426  // check the allocator if it has a live chunk there.
427  tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
428  tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
429  uptr candidate_distance = 0;
430  for (; candidate_distance < 1000; candidate_distance++) {
431    if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
432        TagsEqual(addr_tag, left)) {
433      candidate = left;
434      break;
435    }
436    --left;
437    if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
438        TagsEqual(addr_tag, right)) {
439      candidate = right;
440      break;
441    }
442    ++right;
443  }
444
445  constexpr auto kCloseCandidateDistance = 1;
446
447  if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
448    ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
449    num_descriptions_printed++;
450  }
451
452  hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
453    // Scan all threads' ring buffers to find if it's a heap-use-after-free.
454    HeapAllocationRecord har;
455    uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
456    if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
457                           &ring_index, &num_matching_addrs,
458                           &num_matching_addrs_4b)) {
459      Printf("%s", d.Error());
460      Printf("\nCause: use-after-free\n");
461      Printf("%s", d.Location());
462      Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
463             untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
464             har.requested_size, UntagAddr(har.tagged_addr),
465             UntagAddr(har.tagged_addr) + har.requested_size);
466      Printf("%s", d.Allocation());
467      Printf("freed by thread T%zd here:\n", t->unique_id());
468      Printf("%s", d.Default());
469      GetStackTraceFromId(har.free_context_id).Print();
470
471      Printf("%s", d.Allocation());
472      Printf("previously allocated here:\n", t);
473      Printf("%s", d.Default());
474      GetStackTraceFromId(har.alloc_context_id).Print();
475
476      // Print a developer note: the index of this heap object
477      // in the thread's deallocation ring buffer.
478      Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
479             flags()->heap_history_size);
480      Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
481      Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
482             num_matching_addrs_4b);
483
484      t->Announce();
485      num_descriptions_printed++;
486    }
487  });
488
489  if (candidate && num_descriptions_printed == 0) {
490    ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
491    num_descriptions_printed++;
492  }
493
494  // Print the remaining threads, as an extra information, 1 line per thread.
495  hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
496
497  if (!num_descriptions_printed)
498    // We exhausted our possibilities. Bail out.
499    Printf("HWAddressSanitizer can not describe address in more detail.\n");
500  if (num_descriptions_printed > 1) {
501    Printf(
502        "There are %d potential causes, printed above in order "
503        "of likeliness.\n",
504        num_descriptions_printed);
505  }
506}
507
508void ReportStats() {}
509
510static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
511                                   void (*print_tag)(InternalScopedString &s,
512                                                     tag_t *tag)) {
513  const uptr row_len = 16;  // better be power of two.
514  tag_t *center_row_beg = reinterpret_cast<tag_t *>(
515      RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
516  tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
517  tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
518  InternalScopedString s;
519  for (tag_t *row = beg_row; row < end_row; row += row_len) {
520    s.append("%s", row == center_row_beg ? "=>" : "  ");
521    s.append("%p:", (void *)row);
522    for (uptr i = 0; i < row_len; i++) {
523      s.append("%s", row + i == tag_ptr ? "[" : " ");
524      print_tag(s, &row[i]);
525      s.append("%s", row + i == tag_ptr ? "]" : " ");
526    }
527    s.append("\n");
528  }
529  Printf("%s", s.data());
530}
531
532static void PrintTagsAroundAddr(tag_t *tag_ptr) {
533  Printf(
534      "Memory tags around the buggy address (one tag corresponds to %zd "
535      "bytes):\n", kShadowAlignment);
536  PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
537    s.append("%02x", *tag);
538  });
539
540  Printf(
541      "Tags for short granules around the buggy address (one tag corresponds "
542      "to %zd bytes):\n",
543      kShadowAlignment);
544  PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
545    if (*tag >= 1 && *tag <= kShadowAlignment) {
546      uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
547      s.append("%02x",
548               *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
549    } else {
550      s.append("..");
551    }
552  });
553  Printf(
554      "See "
555      "https://clang.llvm.org/docs/"
556      "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
557      "description of short granule tags\n");
558}
559
560uptr GetTopPc(StackTrace *stack) {
561  return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
562                     : 0;
563}
564
565void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
566  ScopedReport R(flags()->halt_on_error);
567
568  uptr untagged_addr = UntagAddr(tagged_addr);
569  tag_t ptr_tag = GetTagFromPointer(tagged_addr);
570  tag_t *tag_ptr = nullptr;
571  tag_t mem_tag = 0;
572  if (MemIsApp(untagged_addr)) {
573    tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
574    if (MemIsShadow(reinterpret_cast<uptr>(tag_ptr)))
575      mem_tag = *tag_ptr;
576    else
577      tag_ptr = nullptr;
578  }
579  Decorator d;
580  Printf("%s", d.Error());
581  uptr pc = GetTopPc(stack);
582  const char *bug_type = "invalid-free";
583  const Thread *thread = GetCurrentThread();
584  if (thread) {
585    Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
586           SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
587  } else {
588    Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
589           SanitizerToolName, bug_type, untagged_addr, pc);
590  }
591  Printf("%s", d.Access());
592  if (tag_ptr)
593    Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
594  Printf("%s", d.Default());
595
596  stack->Print();
597
598  PrintAddressDescription(tagged_addr, 0, nullptr);
599
600  if (tag_ptr)
601    PrintTagsAroundAddr(tag_ptr);
602
603  ReportErrorSummary(bug_type, stack);
604}
605
606void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
607                           const u8 *expected) {
608  uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
609  u8 actual_expected[kShadowAlignment];
610  internal_memcpy(actual_expected, expected, tail_size);
611  tag_t ptr_tag = GetTagFromPointer(tagged_addr);
612  // Short granule is stashed in the last byte of the magic string. To avoid
613  // confusion, make the expected magic string contain the short granule tag.
614  if (orig_size % kShadowAlignment != 0) {
615    actual_expected[tail_size - 1] = ptr_tag;
616  }
617
618  ScopedReport R(flags()->halt_on_error);
619  Decorator d;
620  uptr untagged_addr = UntagAddr(tagged_addr);
621  Printf("%s", d.Error());
622  const char *bug_type = "allocation-tail-overwritten";
623  Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
624         bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
625  Printf("\n%s", d.Default());
626  Printf(
627      "Stack of invalid access unknown. Issue detected at deallocation "
628      "time.\n");
629  Printf("%s", d.Allocation());
630  Printf("deallocated here:\n");
631  Printf("%s", d.Default());
632  stack->Print();
633  HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
634  if (chunk.Beg()) {
635    Printf("%s", d.Allocation());
636    Printf("allocated here:\n");
637    Printf("%s", d.Default());
638    GetStackTraceFromId(chunk.GetAllocStackId()).Print();
639  }
640
641  InternalScopedString s;
642  CHECK_GT(tail_size, 0U);
643  CHECK_LT(tail_size, kShadowAlignment);
644  u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
645  s.append("Tail contains: ");
646  for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
647    s.append(".. ");
648  for (uptr i = 0; i < tail_size; i++)
649    s.append("%02x ", tail[i]);
650  s.append("\n");
651  s.append("Expected:      ");
652  for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
653    s.append(".. ");
654  for (uptr i = 0; i < tail_size; i++) s.append("%02x ", actual_expected[i]);
655  s.append("\n");
656  s.append("               ");
657  for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
658    s.append("   ");
659  for (uptr i = 0; i < tail_size; i++)
660    s.append("%s ", actual_expected[i] != tail[i] ? "^^" : "  ");
661
662  s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
663    "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
664    "   char *x = new char[20];\n"
665    "   x[25] = 42;\n"
666    "%s does not detect such bugs in uninstrumented code at the time of write,"
667    "\nbut can detect them at the time of free/delete.\n"
668    "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
669    kShadowAlignment, SanitizerToolName);
670  Printf("%s", s.data());
671  GetCurrentThread()->Announce();
672
673  tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
674  PrintTagsAroundAddr(tag_ptr);
675
676  ReportErrorSummary(bug_type, stack);
677}
678
679void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
680                       bool is_store, bool fatal, uptr *registers_frame) {
681  ScopedReport R(fatal);
682  SavedStackAllocations current_stack_allocations(
683      GetCurrentThread()->stack_allocations());
684
685  Decorator d;
686  uptr untagged_addr = UntagAddr(tagged_addr);
687  // TODO: when possible, try to print heap-use-after-free, etc.
688  const char *bug_type = "tag-mismatch";
689  uptr pc = GetTopPc(stack);
690  Printf("%s", d.Error());
691  Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
692         untagged_addr, pc);
693
694  Thread *t = GetCurrentThread();
695
696  sptr offset =
697      __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
698  CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
699  tag_t ptr_tag = GetTagFromPointer(tagged_addr);
700  tag_t *tag_ptr =
701      reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
702  tag_t mem_tag = *tag_ptr;
703
704  Printf("%s", d.Access());
705  if (mem_tag && mem_tag < kShadowAlignment) {
706    tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
707                                                   ~(kShadowAlignment - 1));
708    // If offset is 0, (untagged_addr + offset) is not aligned to granules.
709    // This is the offset of the leftmost accessed byte within the bad granule.
710    u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
711    tag_t short_tag = granule_ptr[kShadowAlignment - 1];
712    // The first mismatch was a short granule that matched the ptr_tag.
713    if (short_tag == ptr_tag) {
714      // If the access starts after the end of the short granule, then the first
715      // bad byte is the first byte of the access; otherwise it is the first
716      // byte past the end of the short granule
717      if (mem_tag > in_granule_offset) {
718        offset += mem_tag - in_granule_offset;
719      }
720    }
721    Printf(
722        "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
723        is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
724        mem_tag, short_tag, t->unique_id());
725  } else {
726    Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
727           is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
728           mem_tag, t->unique_id());
729  }
730  if (offset != 0)
731    Printf("Invalid access starting at offset %zu\n", offset);
732  Printf("%s", d.Default());
733
734  stack->Print();
735
736  PrintAddressDescription(tagged_addr, access_size,
737                          current_stack_allocations.get());
738  t->Announce();
739
740  PrintTagsAroundAddr(tag_ptr);
741
742  if (registers_frame)
743    ReportRegisters(registers_frame, pc);
744
745  ReportErrorSummary(bug_type, stack);
746}
747
748// See the frame breakdown defined in __hwasan_tag_mismatch (from
749// hwasan_tag_mismatch_aarch64.S).
750void ReportRegisters(uptr *frame, uptr pc) {
751  Printf("Registers where the failure occurred (pc %p):\n", pc);
752
753  // We explicitly print a single line (4 registers/line) each iteration to
754  // reduce the amount of logcat error messages printed. Each Printf() will
755  // result in a new logcat line, irrespective of whether a newline is present,
756  // and so we wish to reduce the number of Printf() calls we have to make.
757  Printf("    x0  %016llx  x1  %016llx  x2  %016llx  x3  %016llx\n",
758       frame[0], frame[1], frame[2], frame[3]);
759  Printf("    x4  %016llx  x5  %016llx  x6  %016llx  x7  %016llx\n",
760       frame[4], frame[5], frame[6], frame[7]);
761  Printf("    x8  %016llx  x9  %016llx  x10 %016llx  x11 %016llx\n",
762       frame[8], frame[9], frame[10], frame[11]);
763  Printf("    x12 %016llx  x13 %016llx  x14 %016llx  x15 %016llx\n",
764       frame[12], frame[13], frame[14], frame[15]);
765  Printf("    x16 %016llx  x17 %016llx  x18 %016llx  x19 %016llx\n",
766       frame[16], frame[17], frame[18], frame[19]);
767  Printf("    x20 %016llx  x21 %016llx  x22 %016llx  x23 %016llx\n",
768       frame[20], frame[21], frame[22], frame[23]);
769  Printf("    x24 %016llx  x25 %016llx  x26 %016llx  x27 %016llx\n",
770       frame[24], frame[25], frame[26], frame[27]);
771  // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
772  // passes it to this function.
773  Printf("    x28 %016llx  x29 %016llx  x30 %016llx   sp %016llx\n", frame[28],
774         frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
775}
776
777}  // namespace __hwasan
778
779void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
780  __hwasan::ScopedReport::SetErrorReportCallback(callback);
781}
782