1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between run-time libraries of sanitizers.
11//
12// It declares common functions and classes that are used in both runtimes.
13// Implementation of some functions are provided in sanitizer_common, while
14// others must be defined by run-time library itself.
15//===----------------------------------------------------------------------===//
16#ifndef SANITIZER_COMMON_H
17#define SANITIZER_COMMON_H
18
19#include "sanitizer_flags.h"
20#include "sanitizer_interface_internal.h"
21#include "sanitizer_internal_defs.h"
22#include "sanitizer_libc.h"
23#include "sanitizer_list.h"
24#include "sanitizer_mutex.h"
25
26#if defined(_MSC_VER) && !defined(__clang__)
27extern "C" void _ReadWriteBarrier();
28#pragma intrinsic(_ReadWriteBarrier)
29#endif
30
31namespace __sanitizer {
32
33struct AddressInfo;
34struct BufferedStackTrace;
35struct SignalContext;
36struct StackTrace;
37
38// Constants.
39const uptr kWordSize = SANITIZER_WORDSIZE / 8;
40const uptr kWordSizeInBits = 8 * kWordSize;
41
42const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
43
44const uptr kMaxPathLength = 4096;
45
46const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
47
48static const uptr kErrorMessageBufferSize = 1 << 16;
49
50// Denotes fake PC values that come from JIT/JAVA/etc.
51// For such PC values __tsan_symbolize_external_ex() will be called.
52const u64 kExternalPCBit = 1ULL << 60;
53
54extern const char *SanitizerToolName;  // Can be changed by the tool.
55
56extern atomic_uint32_t current_verbosity;
57INLINE void SetVerbosity(int verbosity) {
58  atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
59}
60INLINE int Verbosity() {
61  return atomic_load(&current_verbosity, memory_order_relaxed);
62}
63
64uptr GetPageSize();
65extern uptr PageSizeCached;
66INLINE uptr GetPageSizeCached() {
67  if (!PageSizeCached)
68    PageSizeCached = GetPageSize();
69  return PageSizeCached;
70}
71uptr GetMmapGranularity();
72uptr GetMaxVirtualAddress();
73uptr GetMaxUserVirtualAddress();
74// Threads
75tid_t GetTid();
76int TgKill(pid_t pid, tid_t tid, int sig);
77uptr GetThreadSelf();
78void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
79                                uptr *stack_bottom);
80void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
81                          uptr *tls_addr, uptr *tls_size);
82
83// Memory management
84void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
85INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
86  return MmapOrDie(size, mem_type, /*raw_report*/ true);
87}
88void UnmapOrDie(void *addr, uptr size);
89// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
90// case returns nullptr.
91void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
92bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
93     WARN_UNUSED_RESULT;
94void *MmapNoReserveOrDie(uptr size, const char *mem_type);
95void *MmapFixedOrDie(uptr fixed_addr, uptr size);
96// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
97// that case returns nullptr.
98void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
99void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
100void *MmapNoAccess(uptr size);
101// Map aligned chunk of address space; size and alignment are powers of two.
102// Dies on all but out of memory errors, in the latter case returns nullptr.
103void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
104                                   const char *mem_type);
105// Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
106// unaccessible memory.
107bool MprotectNoAccess(uptr addr, uptr size);
108bool MprotectReadOnly(uptr addr, uptr size);
109
110void MprotectMallocZones(void *addr, int prot);
111
112// Find an available address space.
113uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
114                              uptr *largest_gap_found, uptr *max_occupied_addr);
115
116// Used to check if we can map shadow memory to a fixed location.
117bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
118// Releases memory pages entirely within the [beg, end] address range. Noop if
119// the provided range does not contain at least one entire page.
120void ReleaseMemoryPagesToOS(uptr beg, uptr end);
121void IncreaseTotalMmap(uptr size);
122void DecreaseTotalMmap(uptr size);
123uptr GetRSS();
124bool NoHugePagesInRegion(uptr addr, uptr length);
125bool DontDumpShadowMemory(uptr addr, uptr length);
126// Check if the built VMA size matches the runtime one.
127void CheckVMASize();
128void RunMallocHooks(const void *ptr, uptr size);
129void RunFreeHooks(const void *ptr);
130
131class ReservedAddressRange {
132 public:
133  uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
134  uptr Map(uptr fixed_addr, uptr size);
135  uptr MapOrDie(uptr fixed_addr, uptr size);
136  void Unmap(uptr addr, uptr size);
137  void *base() const { return base_; }
138  uptr size() const { return size_; }
139
140 private:
141  void* base_;
142  uptr size_;
143  const char* name_;
144  uptr os_handle_;
145};
146
147typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
148                               /*out*/uptr *stats, uptr stats_size);
149
150// Parse the contents of /proc/self/smaps and generate a memory profile.
151// |cb| is a tool-specific callback that fills the |stats| array containing
152// |stats_size| elements.
153void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
154
155// Simple low-level (mmap-based) allocator for internal use. Doesn't have
156// constructor, so all instances of LowLevelAllocator should be
157// linker initialized.
158class LowLevelAllocator {
159 public:
160  // Requires an external lock.
161  void *Allocate(uptr size);
162 private:
163  char *allocated_end_;
164  char *allocated_current_;
165};
166// Set the min alignment of LowLevelAllocator to at least alignment.
167void SetLowLevelAllocateMinAlignment(uptr alignment);
168typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
169// Allows to register tool-specific callbacks for LowLevelAllocator.
170// Passing NULL removes the callback.
171void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
172
173// IO
174void CatastrophicErrorWrite(const char *buffer, uptr length);
175void RawWrite(const char *buffer);
176bool ColorizeReports();
177void RemoveANSIEscapeSequencesFromString(char *buffer);
178void Printf(const char *format, ...);
179void Report(const char *format, ...);
180void SetPrintfAndReportCallback(void (*callback)(const char *));
181#define VReport(level, ...)                                              \
182  do {                                                                   \
183    if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
184  } while (0)
185#define VPrintf(level, ...)                                              \
186  do {                                                                   \
187    if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
188  } while (0)
189
190// Lock sanitizer error reporting and protects against nested errors.
191class ScopedErrorReportLock {
192 public:
193  ScopedErrorReportLock();
194  ~ScopedErrorReportLock();
195
196  static void CheckLocked();
197};
198
199extern uptr stoptheworld_tracer_pid;
200extern uptr stoptheworld_tracer_ppid;
201
202bool IsAccessibleMemoryRange(uptr beg, uptr size);
203
204// Error report formatting.
205const char *StripPathPrefix(const char *filepath,
206                            const char *strip_file_prefix);
207// Strip the directories from the module name.
208const char *StripModuleName(const char *module);
209
210// OS
211uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
212uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
213uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
214const char *GetProcessName();
215void UpdateProcessName();
216void CacheBinaryName();
217void DisableCoreDumperIfNecessary();
218void DumpProcessMap();
219void PrintModuleMap();
220const char *GetEnv(const char *name);
221bool SetEnv(const char *name, const char *value);
222
223u32 GetUid();
224void ReExec();
225void CheckASLR();
226void CheckMPROTECT();
227char **GetArgv();
228char **GetEnviron();
229void PrintCmdline();
230bool StackSizeIsUnlimited();
231uptr GetStackSizeLimitInBytes();
232void SetStackSizeLimitInBytes(uptr limit);
233bool AddressSpaceIsUnlimited();
234void SetAddressSpaceUnlimited();
235void AdjustStackSize(void *attr);
236void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
237void SetSandboxingCallback(void (*f)());
238
239void InitializeCoverage(bool enabled, const char *coverage_dir);
240
241void InitTlsSize();
242uptr GetTlsSize();
243
244// Other
245void SleepForSeconds(int seconds);
246void SleepForMillis(int millis);
247u64 NanoTime();
248u64 MonotonicNanoTime();
249int Atexit(void (*function)(void));
250bool TemplateMatch(const char *templ, const char *str);
251
252// Exit
253void NORETURN Abort();
254void NORETURN Die();
255void NORETURN
256CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
257void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
258                                      const char *mmap_type, error_t err,
259                                      bool raw_report = false);
260
261// Specific tools may override behavior of "Die" and "CheckFailed" functions
262// to do tool-specific job.
263typedef void (*DieCallbackType)(void);
264
265// It's possible to add several callbacks that would be run when "Die" is
266// called. The callbacks will be run in the opposite order. The tools are
267// strongly recommended to setup all callbacks during initialization, when there
268// is only a single thread.
269bool AddDieCallback(DieCallbackType callback);
270bool RemoveDieCallback(DieCallbackType callback);
271
272void SetUserDieCallback(DieCallbackType callback);
273
274typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
275                                       u64, u64);
276void SetCheckFailedCallback(CheckFailedCallbackType callback);
277
278// Callback will be called if soft_rss_limit_mb is given and the limit is
279// exceeded (exceeded==true) or if rss went down below the limit
280// (exceeded==false).
281// The callback should be registered once at the tool init time.
282void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
283
284// Functions related to signal handling.
285typedef void (*SignalHandlerType)(int, void *, void *);
286HandleSignalMode GetHandleSignalMode(int signum);
287void InstallDeadlySignalHandlers(SignalHandlerType handler);
288
289// Signal reporting.
290// Each sanitizer uses slightly different implementation of stack unwinding.
291typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
292                                              const void *callback_context,
293                                              BufferedStackTrace *stack);
294// Print deadly signal report and die.
295void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
296                        UnwindSignalStackCallbackType unwind,
297                        const void *unwind_context);
298
299// Part of HandleDeadlySignal, exposed for asan.
300void StartReportDeadlySignal();
301// Part of HandleDeadlySignal, exposed for asan.
302void ReportDeadlySignal(const SignalContext &sig, u32 tid,
303                        UnwindSignalStackCallbackType unwind,
304                        const void *unwind_context);
305
306// Alternative signal stack (POSIX-only).
307void SetAlternateSignalStack();
308void UnsetAlternateSignalStack();
309
310// We don't want a summary too long.
311const int kMaxSummaryLength = 1024;
312// Construct a one-line string:
313//   SUMMARY: SanitizerToolName: error_message
314// and pass it to __sanitizer_report_error_summary.
315// If alt_tool_name is provided, it's used in place of SanitizerToolName.
316void ReportErrorSummary(const char *error_message,
317                        const char *alt_tool_name = nullptr);
318// Same as above, but construct error_message as:
319//   error_type file:line[:column][ function]
320void ReportErrorSummary(const char *error_type, const AddressInfo &info,
321                        const char *alt_tool_name = nullptr);
322// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
323void ReportErrorSummary(const char *error_type, const StackTrace *trace,
324                        const char *alt_tool_name = nullptr);
325
326void ReportMmapWriteExec(int prot);
327
328// Math
329#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
330extern "C" {
331unsigned char _BitScanForward(unsigned long *index, unsigned long mask);  // NOLINT
332unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);  // NOLINT
333#if defined(_WIN64)
334unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);  // NOLINT
335unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);  // NOLINT
336#endif
337}
338#endif
339
340INLINE uptr MostSignificantSetBitIndex(uptr x) {
341  CHECK_NE(x, 0U);
342  unsigned long up;  // NOLINT
343#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
344# ifdef _WIN64
345  up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
346# else
347  up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
348# endif
349#elif defined(_WIN64)
350  _BitScanReverse64(&up, x);
351#else
352  _BitScanReverse(&up, x);
353#endif
354  return up;
355}
356
357INLINE uptr LeastSignificantSetBitIndex(uptr x) {
358  CHECK_NE(x, 0U);
359  unsigned long up;  // NOLINT
360#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
361# ifdef _WIN64
362  up = __builtin_ctzll(x);
363# else
364  up = __builtin_ctzl(x);
365# endif
366#elif defined(_WIN64)
367  _BitScanForward64(&up, x);
368#else
369  _BitScanForward(&up, x);
370#endif
371  return up;
372}
373
374INLINE bool IsPowerOfTwo(uptr x) {
375  return (x & (x - 1)) == 0;
376}
377
378INLINE uptr RoundUpToPowerOfTwo(uptr size) {
379  CHECK(size);
380  if (IsPowerOfTwo(size)) return size;
381
382  uptr up = MostSignificantSetBitIndex(size);
383  CHECK_LT(size, (1ULL << (up + 1)));
384  CHECK_GT(size, (1ULL << up));
385  return 1ULL << (up + 1);
386}
387
388INLINE uptr RoundUpTo(uptr size, uptr boundary) {
389  RAW_CHECK(IsPowerOfTwo(boundary));
390  return (size + boundary - 1) & ~(boundary - 1);
391}
392
393INLINE uptr RoundDownTo(uptr x, uptr boundary) {
394  return x & ~(boundary - 1);
395}
396
397INLINE bool IsAligned(uptr a, uptr alignment) {
398  return (a & (alignment - 1)) == 0;
399}
400
401INLINE uptr Log2(uptr x) {
402  CHECK(IsPowerOfTwo(x));
403  return LeastSignificantSetBitIndex(x);
404}
405
406// Don't use std::min, std::max or std::swap, to minimize dependency
407// on libstdc++.
408template<class T> T Min(T a, T b) { return a < b ? a : b; }
409template<class T> T Max(T a, T b) { return a > b ? a : b; }
410template<class T> void Swap(T& a, T& b) {
411  T tmp = a;
412  a = b;
413  b = tmp;
414}
415
416// Char handling
417INLINE bool IsSpace(int c) {
418  return (c == ' ') || (c == '\n') || (c == '\t') ||
419         (c == '\f') || (c == '\r') || (c == '\v');
420}
421INLINE bool IsDigit(int c) {
422  return (c >= '0') && (c <= '9');
423}
424INLINE int ToLower(int c) {
425  return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
426}
427
428// A low-level vector based on mmap. May incur a significant memory overhead for
429// small vectors.
430// WARNING: The current implementation supports only POD types.
431template<typename T>
432class InternalMmapVectorNoCtor {
433 public:
434  void Initialize(uptr initial_capacity) {
435    capacity_bytes_ = 0;
436    size_ = 0;
437    data_ = 0;
438    reserve(initial_capacity);
439  }
440  void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
441  T &operator[](uptr i) {
442    CHECK_LT(i, size_);
443    return data_[i];
444  }
445  const T &operator[](uptr i) const {
446    CHECK_LT(i, size_);
447    return data_[i];
448  }
449  void push_back(const T &element) {
450    CHECK_LE(size_, capacity());
451    if (size_ == capacity()) {
452      uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
453      Realloc(new_capacity);
454    }
455    internal_memcpy(&data_[size_++], &element, sizeof(T));
456  }
457  T &back() {
458    CHECK_GT(size_, 0);
459    return data_[size_ - 1];
460  }
461  void pop_back() {
462    CHECK_GT(size_, 0);
463    size_--;
464  }
465  uptr size() const {
466    return size_;
467  }
468  const T *data() const {
469    return data_;
470  }
471  T *data() {
472    return data_;
473  }
474  uptr capacity() const { return capacity_bytes_ / sizeof(T); }
475  void reserve(uptr new_size) {
476    // Never downsize internal buffer.
477    if (new_size > capacity())
478      Realloc(new_size);
479  }
480  void resize(uptr new_size) {
481    if (new_size > size_) {
482      reserve(new_size);
483      internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
484    }
485    size_ = new_size;
486  }
487
488  void clear() { size_ = 0; }
489  bool empty() const { return size() == 0; }
490
491  const T *begin() const {
492    return data();
493  }
494  T *begin() {
495    return data();
496  }
497  const T *end() const {
498    return data() + size();
499  }
500  T *end() {
501    return data() + size();
502  }
503
504  void swap(InternalMmapVectorNoCtor &other) {
505    Swap(data_, other.data_);
506    Swap(capacity_bytes_, other.capacity_bytes_);
507    Swap(size_, other.size_);
508  }
509
510 private:
511  void Realloc(uptr new_capacity) {
512    CHECK_GT(new_capacity, 0);
513    CHECK_LE(size_, new_capacity);
514    uptr new_capacity_bytes =
515        RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
516    T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
517    internal_memcpy(new_data, data_, size_ * sizeof(T));
518    UnmapOrDie(data_, capacity_bytes_);
519    data_ = new_data;
520    capacity_bytes_ = new_capacity_bytes;
521  }
522
523  T *data_;
524  uptr capacity_bytes_;
525  uptr size_;
526};
527
528template <typename T>
529bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
530                const InternalMmapVectorNoCtor<T> &rhs) {
531  if (lhs.size() != rhs.size()) return false;
532  return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
533}
534
535template <typename T>
536bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
537                const InternalMmapVectorNoCtor<T> &rhs) {
538  return !(lhs == rhs);
539}
540
541template<typename T>
542class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
543 public:
544  InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(1); }
545  explicit InternalMmapVector(uptr cnt) {
546    InternalMmapVectorNoCtor<T>::Initialize(cnt);
547    this->resize(cnt);
548  }
549  ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
550  // Disallow copies and moves.
551  InternalMmapVector(const InternalMmapVector &) = delete;
552  InternalMmapVector &operator=(const InternalMmapVector &) = delete;
553  InternalMmapVector(InternalMmapVector &&) = delete;
554  InternalMmapVector &operator=(InternalMmapVector &&) = delete;
555};
556
557class InternalScopedString : public InternalMmapVector<char> {
558 public:
559  explicit InternalScopedString(uptr max_length)
560      : InternalMmapVector<char>(max_length), length_(0) {
561    (*this)[0] = '\0';
562  }
563  uptr length() { return length_; }
564  void clear() {
565    (*this)[0] = '\0';
566    length_ = 0;
567  }
568  void append(const char *format, ...);
569
570 private:
571  uptr length_;
572};
573
574template <class T>
575struct CompareLess {
576  bool operator()(const T &a, const T &b) const { return a < b; }
577};
578
579// HeapSort for arrays and InternalMmapVector.
580template <class T, class Compare = CompareLess<T>>
581void Sort(T *v, uptr size, Compare comp = {}) {
582  if (size < 2)
583    return;
584  // Stage 1: insert elements to the heap.
585  for (uptr i = 1; i < size; i++) {
586    uptr j, p;
587    for (j = i; j > 0; j = p) {
588      p = (j - 1) / 2;
589      if (comp(v[p], v[j]))
590        Swap(v[j], v[p]);
591      else
592        break;
593    }
594  }
595  // Stage 2: swap largest element with the last one,
596  // and sink the new top.
597  for (uptr i = size - 1; i > 0; i--) {
598    Swap(v[0], v[i]);
599    uptr j, max_ind;
600    for (j = 0; j < i; j = max_ind) {
601      uptr left = 2 * j + 1;
602      uptr right = 2 * j + 2;
603      max_ind = j;
604      if (left < i && comp(v[max_ind], v[left]))
605        max_ind = left;
606      if (right < i && comp(v[max_ind], v[right]))
607        max_ind = right;
608      if (max_ind != j)
609        Swap(v[j], v[max_ind]);
610      else
611        break;
612    }
613  }
614}
615
616// Works like std::lower_bound: finds the first element that is not less
617// than the val.
618template <class Container, class Value, class Compare>
619uptr InternalLowerBound(const Container &v, uptr first, uptr last,
620                        const Value &val, Compare comp) {
621  while (last > first) {
622    uptr mid = (first + last) / 2;
623    if (comp(v[mid], val))
624      first = mid + 1;
625    else
626      last = mid;
627  }
628  return first;
629}
630
631enum ModuleArch {
632  kModuleArchUnknown,
633  kModuleArchI386,
634  kModuleArchX86_64,
635  kModuleArchX86_64H,
636  kModuleArchARMV6,
637  kModuleArchARMV7,
638  kModuleArchARMV7S,
639  kModuleArchARMV7K,
640  kModuleArchARM64
641};
642
643// Opens the file 'file_name" and reads up to 'max_len' bytes.
644// The resulting buffer is mmaped and stored in '*buff'.
645// Returns true if file was successfully opened and read.
646bool ReadFileToVector(const char *file_name,
647                      InternalMmapVectorNoCtor<char> *buff,
648                      uptr max_len = 1 << 26, error_t *errno_p = nullptr);
649
650// Opens the file 'file_name" and reads up to 'max_len' bytes.
651// This function is less I/O efficient than ReadFileToVector as it may reread
652// file multiple times to avoid mmap during read attempts. It's used to read
653// procmap, so short reads with mmap in between can produce inconsistent result.
654// The resulting buffer is mmaped and stored in '*buff'.
655// The size of the mmaped region is stored in '*buff_size'.
656// The total number of read bytes is stored in '*read_len'.
657// Returns true if file was successfully opened and read.
658bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
659                      uptr *read_len, uptr max_len = 1 << 26,
660                      error_t *errno_p = nullptr);
661
662// When adding a new architecture, don't forget to also update
663// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
664inline const char *ModuleArchToString(ModuleArch arch) {
665  switch (arch) {
666    case kModuleArchUnknown:
667      return "";
668    case kModuleArchI386:
669      return "i386";
670    case kModuleArchX86_64:
671      return "x86_64";
672    case kModuleArchX86_64H:
673      return "x86_64h";
674    case kModuleArchARMV6:
675      return "armv6";
676    case kModuleArchARMV7:
677      return "armv7";
678    case kModuleArchARMV7S:
679      return "armv7s";
680    case kModuleArchARMV7K:
681      return "armv7k";
682    case kModuleArchARM64:
683      return "arm64";
684  }
685  CHECK(0 && "Invalid module arch");
686  return "";
687}
688
689const uptr kModuleUUIDSize = 16;
690const uptr kMaxSegName = 16;
691
692// Represents a binary loaded into virtual memory (e.g. this can be an
693// executable or a shared object).
694class LoadedModule {
695 public:
696  LoadedModule()
697      : full_name_(nullptr),
698        base_address_(0),
699        max_executable_address_(0),
700        arch_(kModuleArchUnknown),
701        instrumented_(false) {
702    internal_memset(uuid_, 0, kModuleUUIDSize);
703    ranges_.clear();
704  }
705  void set(const char *module_name, uptr base_address);
706  void set(const char *module_name, uptr base_address, ModuleArch arch,
707           u8 uuid[kModuleUUIDSize], bool instrumented);
708  void clear();
709  void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
710                       const char *name = nullptr);
711  bool containsAddress(uptr address) const;
712
713  const char *full_name() const { return full_name_; }
714  uptr base_address() const { return base_address_; }
715  uptr max_executable_address() const { return max_executable_address_; }
716  ModuleArch arch() const { return arch_; }
717  const u8 *uuid() const { return uuid_; }
718  bool instrumented() const { return instrumented_; }
719
720  struct AddressRange {
721    AddressRange *next;
722    uptr beg;
723    uptr end;
724    bool executable;
725    bool writable;
726    char name[kMaxSegName];
727
728    AddressRange(uptr beg, uptr end, bool executable, bool writable,
729                 const char *name)
730        : next(nullptr),
731          beg(beg),
732          end(end),
733          executable(executable),
734          writable(writable) {
735      internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
736    }
737  };
738
739  const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
740
741 private:
742  char *full_name_;  // Owned.
743  uptr base_address_;
744  uptr max_executable_address_;
745  ModuleArch arch_;
746  u8 uuid_[kModuleUUIDSize];
747  bool instrumented_;
748  IntrusiveList<AddressRange> ranges_;
749};
750
751// List of LoadedModules. OS-dependent implementation is responsible for
752// filling this information.
753class ListOfModules {
754 public:
755  ListOfModules() : initialized(false) {}
756  ~ListOfModules() { clear(); }
757  void init();
758  void fallbackInit();  // Uses fallback init if available, otherwise clears
759  const LoadedModule *begin() const { return modules_.begin(); }
760  LoadedModule *begin() { return modules_.begin(); }
761  const LoadedModule *end() const { return modules_.end(); }
762  LoadedModule *end() { return modules_.end(); }
763  uptr size() const { return modules_.size(); }
764  const LoadedModule &operator[](uptr i) const {
765    CHECK_LT(i, modules_.size());
766    return modules_[i];
767  }
768
769 private:
770  void clear() {
771    for (auto &module : modules_) module.clear();
772    modules_.clear();
773  }
774  void clearOrInit() {
775    initialized ? clear() : modules_.Initialize(kInitialCapacity);
776    initialized = true;
777  }
778
779  InternalMmapVectorNoCtor<LoadedModule> modules_;
780  // We rarely have more than 16K loaded modules.
781  static const uptr kInitialCapacity = 1 << 14;
782  bool initialized;
783};
784
785// Callback type for iterating over a set of memory ranges.
786typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
787
788enum AndroidApiLevel {
789  ANDROID_NOT_ANDROID = 0,
790  ANDROID_KITKAT = 19,
791  ANDROID_LOLLIPOP_MR1 = 22,
792  ANDROID_POST_LOLLIPOP = 23
793};
794
795void WriteToSyslog(const char *buffer);
796
797#if SANITIZER_MAC
798void LogFullErrorReport(const char *buffer);
799#else
800INLINE void LogFullErrorReport(const char *buffer) {}
801#endif
802
803#if SANITIZER_LINUX || SANITIZER_MAC
804void WriteOneLineToSyslog(const char *s);
805void LogMessageOnPrintf(const char *str);
806#else
807INLINE void WriteOneLineToSyslog(const char *s) {}
808INLINE void LogMessageOnPrintf(const char *str) {}
809#endif
810
811#if SANITIZER_LINUX
812// Initialize Android logging. Any writes before this are silently lost.
813void AndroidLogInit();
814void SetAbortMessage(const char *);
815#else
816INLINE void AndroidLogInit() {}
817// FIXME: MacOS implementation could use CRSetCrashLogMessage.
818INLINE void SetAbortMessage(const char *) {}
819#endif
820
821#if SANITIZER_ANDROID
822void SanitizerInitializeUnwinder();
823AndroidApiLevel AndroidGetApiLevel();
824#else
825INLINE void AndroidLogWrite(const char *buffer_unused) {}
826INLINE void SanitizerInitializeUnwinder() {}
827INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
828#endif
829
830INLINE uptr GetPthreadDestructorIterations() {
831#if SANITIZER_ANDROID
832  return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
833#elif SANITIZER_POSIX
834  return 4;
835#else
836// Unused on Windows.
837  return 0;
838#endif
839}
840
841void *internal_start_thread(void(*func)(void*), void *arg);
842void internal_join_thread(void *th);
843void MaybeStartBackgroudThread();
844
845// Make the compiler think that something is going on there.
846// Use this inside a loop that looks like memset/memcpy/etc to prevent the
847// compiler from recognising it and turning it into an actual call to
848// memset/memcpy/etc.
849static inline void SanitizerBreakOptimization(void *arg) {
850#if defined(_MSC_VER) && !defined(__clang__)
851  _ReadWriteBarrier();
852#else
853  __asm__ __volatile__("" : : "r" (arg) : "memory");
854#endif
855}
856
857struct SignalContext {
858  void *siginfo;
859  void *context;
860  uptr addr;
861  uptr pc;
862  uptr sp;
863  uptr bp;
864  bool is_memory_access;
865  enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
866
867  // VS2013 doesn't implement unrestricted unions, so we need a trivial default
868  // constructor
869  SignalContext() = default;
870
871  // Creates signal context in a platform-specific manner.
872  // SignalContext is going to keep pointers to siginfo and context without
873  // owning them.
874  SignalContext(void *siginfo, void *context)
875      : siginfo(siginfo),
876        context(context),
877        addr(GetAddress()),
878        is_memory_access(IsMemoryAccess()),
879        write_flag(GetWriteFlag()) {
880    InitPcSpBp();
881  }
882
883  static void DumpAllRegisters(void *context);
884
885  // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
886  int GetType() const;
887
888  // String description of the signal.
889  const char *Describe() const;
890
891  // Returns true if signal is stack overflow.
892  bool IsStackOverflow() const;
893
894 private:
895  // Platform specific initialization.
896  void InitPcSpBp();
897  uptr GetAddress() const;
898  WriteFlag GetWriteFlag() const;
899  bool IsMemoryAccess() const;
900};
901
902void InitializePlatformEarly();
903void MaybeReexec();
904
905template <typename Fn>
906class RunOnDestruction {
907 public:
908  explicit RunOnDestruction(Fn fn) : fn_(fn) {}
909  ~RunOnDestruction() { fn_(); }
910
911 private:
912  Fn fn_;
913};
914
915// A simple scope guard. Usage:
916// auto cleanup = at_scope_exit([]{ do_cleanup; });
917template <typename Fn>
918RunOnDestruction<Fn> at_scope_exit(Fn fn) {
919  return RunOnDestruction<Fn>(fn);
920}
921
922// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
923// if a process uses virtual memory over 4TB (as many sanitizers like
924// to do).  This function will abort the process if running on a kernel
925// that looks vulnerable.
926#if SANITIZER_LINUX && SANITIZER_S390_64
927void AvoidCVE_2016_2143();
928#else
929INLINE void AvoidCVE_2016_2143() {}
930#endif
931
932struct StackDepotStats {
933  uptr n_uniq_ids;
934  uptr allocated;
935};
936
937// The default value for allocator_release_to_os_interval_ms common flag to
938// indicate that sanitizer allocator should not attempt to release memory to OS.
939const s32 kReleaseToOSIntervalNever = -1;
940
941void CheckNoDeepBind(const char *filename, int flag);
942
943// Returns the requested amount of random data (up to 256 bytes) that can then
944// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
945bool GetRandom(void *buffer, uptr length, bool blocking = true);
946
947// Returns the number of logical processors on the system.
948u32 GetNumberOfCPUs();
949extern u32 NumberOfCPUsCached;
950INLINE u32 GetNumberOfCPUsCached() {
951  if (!NumberOfCPUsCached)
952    NumberOfCPUsCached = GetNumberOfCPUs();
953  return NumberOfCPUsCached;
954}
955
956}  // namespace __sanitizer
957
958inline void *operator new(__sanitizer::operator_new_size_type size,
959                          __sanitizer::LowLevelAllocator &alloc) {
960  return alloc.Allocate(size);
961}
962
963#endif  // SANITIZER_COMMON_H
964