1//===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// This file is shared between AddressSanitizer and ThreadSanitizer
9// run-time libraries and implements linux-specific functions from
10// sanitizer_libc.h.
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_platform.h"
14
15#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||                \
16    SANITIZER_OPENBSD || SANITIZER_SOLARIS
17
18#include "sanitizer_allocator_internal.h"
19#include "sanitizer_atomic.h"
20#include "sanitizer_common.h"
21#include "sanitizer_file.h"
22#include "sanitizer_flags.h"
23#include "sanitizer_freebsd.h"
24#include "sanitizer_linux.h"
25#include "sanitizer_placement_new.h"
26#include "sanitizer_procmaps.h"
27
28#if SANITIZER_NETBSD
29#define _RTLD_SOURCE  // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
30#endif
31
32#include <dlfcn.h>  // for dlsym()
33#include <link.h>
34#include <pthread.h>
35#include <signal.h>
36#include <sys/resource.h>
37#include <syslog.h>
38
39#if SANITIZER_FREEBSD
40#include <pthread_np.h>
41#include <osreldate.h>
42#include <sys/sysctl.h>
43#define pthread_getattr_np pthread_attr_get_np
44#endif
45
46#if SANITIZER_OPENBSD
47#include <pthread_np.h>
48#include <sys/sysctl.h>
49#endif
50
51#if SANITIZER_NETBSD
52#include <sys/sysctl.h>
53#include <sys/tls.h>
54#include <lwp.h>
55#endif
56
57#if SANITIZER_SOLARIS
58#include <thread.h>
59#endif
60
61#if SANITIZER_ANDROID
62#include <android/api-level.h>
63#if !defined(CPU_COUNT) && !defined(__aarch64__)
64#include <dirent.h>
65#include <fcntl.h>
66struct __sanitizer::linux_dirent {
67  long           d_ino;
68  off_t          d_off;
69  unsigned short d_reclen;
70  char           d_name[];
71};
72#endif
73#endif
74
75#if !SANITIZER_ANDROID
76#include <elf.h>
77#include <unistd.h>
78#endif
79
80#if SANITIZER_NETBSD
81#include <machine/mcontext.h>
82#endif
83
84namespace __sanitizer {
85
86SANITIZER_WEAK_ATTRIBUTE int
87real_sigaction(int signum, const void *act, void *oldact);
88
89int internal_sigaction(int signum, const void *act, void *oldact) {
90#if !SANITIZER_GO
91  if (&real_sigaction)
92    return real_sigaction(signum, act, oldact);
93#endif
94  return sigaction(signum, (const struct sigaction *)act,
95                   (struct sigaction *)oldact);
96}
97
98void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
99                                uptr *stack_bottom) {
100  CHECK(stack_top);
101  CHECK(stack_bottom);
102  if (at_initialization) {
103    // This is the main thread. Libpthread may not be initialized yet.
104    struct rlimit rl;
105    CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
106
107    // Find the mapping that contains a stack variable.
108    MemoryMappingLayout proc_maps(/*cache_enabled*/true);
109    MemoryMappedSegment segment;
110    uptr prev_end = 0;
111    while (proc_maps.Next(&segment)) {
112      if ((uptr)&rl < segment.end) break;
113      prev_end = segment.end;
114    }
115    CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
116
117    // Get stacksize from rlimit, but clip it so that it does not overlap
118    // with other mappings.
119    uptr stacksize = rl.rlim_cur;
120    if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
121    // When running with unlimited stack size, we still want to set some limit.
122    // The unlimited stack size is caused by 'ulimit -s unlimited'.
123    // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
124    if (stacksize > kMaxThreadStackSize)
125      stacksize = kMaxThreadStackSize;
126    *stack_top = segment.end;
127    *stack_bottom = segment.end - stacksize;
128    return;
129  }
130  uptr stacksize = 0;
131  void *stackaddr = nullptr;
132#if SANITIZER_SOLARIS
133  stack_t ss;
134  CHECK_EQ(thr_stksegment(&ss), 0);
135  stacksize = ss.ss_size;
136  stackaddr = (char *)ss.ss_sp - stacksize;
137#elif SANITIZER_OPENBSD
138  stack_t sattr;
139  CHECK_EQ(pthread_stackseg_np(pthread_self(), &sattr), 0);
140  stackaddr = sattr.ss_sp;
141  stacksize = sattr.ss_size;
142#else  // !SANITIZER_SOLARIS
143  pthread_attr_t attr;
144  pthread_attr_init(&attr);
145  CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
146  my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
147  pthread_attr_destroy(&attr);
148#endif // SANITIZER_SOLARIS
149
150  *stack_top = (uptr)stackaddr + stacksize;
151  *stack_bottom = (uptr)stackaddr;
152}
153
154#if !SANITIZER_GO
155bool SetEnv(const char *name, const char *value) {
156  void *f = dlsym(RTLD_NEXT, "setenv");
157  if (!f)
158    return false;
159  typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
160  setenv_ft setenv_f;
161  CHECK_EQ(sizeof(setenv_f), sizeof(f));
162  internal_memcpy(&setenv_f, &f, sizeof(f));
163  return setenv_f(name, value, 1) == 0;
164}
165#endif
166
167__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
168                                                   int *patch) {
169#ifdef _CS_GNU_LIBC_VERSION
170  char buf[64];
171  uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
172  if (len >= sizeof(buf))
173    return false;
174  buf[len] = 0;
175  static const char kGLibC[] = "glibc ";
176  if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0)
177    return false;
178  const char *p = buf + sizeof(kGLibC) - 1;
179  *major = internal_simple_strtoll(p, &p, 10);
180  *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
181  *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
182  return true;
183#else
184  return false;
185#endif
186}
187
188#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&               \
189    !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
190static uptr g_tls_size;
191
192#ifdef __i386__
193# ifndef __GLIBC_PREREQ
194#  define CHECK_GET_TLS_STATIC_INFO_VERSION 1
195# else
196#  define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
197# endif
198#else
199# define CHECK_GET_TLS_STATIC_INFO_VERSION 0
200#endif
201
202#if CHECK_GET_TLS_STATIC_INFO_VERSION
203# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
204#else
205# define DL_INTERNAL_FUNCTION
206#endif
207
208namespace {
209struct GetTlsStaticInfoCall {
210  typedef void (*get_tls_func)(size_t*, size_t*);
211};
212struct GetTlsStaticInfoRegparmCall {
213  typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
214};
215
216template <typename T>
217void CallGetTls(void* ptr, size_t* size, size_t* align) {
218  typename T::get_tls_func get_tls;
219  CHECK_EQ(sizeof(get_tls), sizeof(ptr));
220  internal_memcpy(&get_tls, &ptr, sizeof(ptr));
221  CHECK_NE(get_tls, 0);
222  get_tls(size, align);
223}
224
225bool CmpLibcVersion(int major, int minor, int patch) {
226  int ma;
227  int mi;
228  int pa;
229  if (!GetLibcVersion(&ma, &mi, &pa))
230    return false;
231  if (ma > major)
232    return true;
233  if (ma < major)
234    return false;
235  if (mi > minor)
236    return true;
237  if (mi < minor)
238    return false;
239  return pa >= patch;
240}
241
242}  // namespace
243
244#if defined(__mips__) || defined(__powerpc64__)
245// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
246// head structure. It lies before the static tls blocks.
247static uptr TlsPreTcbSize() {
248# if defined(__mips__)
249  const uptr kTcbHead = 16; // sizeof (tcbhead_t)
250# elif defined(__powerpc64__)
251  const uptr kTcbHead = 88; // sizeof (tcbhead_t)
252# endif
253  const uptr kTlsAlign = 16;
254  const uptr kTlsPreTcbSize =
255    (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
256  InitTlsSize();
257  g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
258  return kTlsPreTcbSize;
259}
260#endif
261
262void InitTlsSize() {
263  // all current supported platforms have 16 bytes stack alignment
264  const size_t kStackAlign = 16;
265  void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
266  size_t tls_size = 0;
267  size_t tls_align = 0;
268  // On i?86, _dl_get_tls_static_info used to be internal_function, i.e.
269  // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
270  // function in 2.27 and later.
271  if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
272    CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
273                                            &tls_size, &tls_align);
274  else
275    CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
276                                     &tls_size, &tls_align);
277  if (tls_align < kStackAlign)
278    tls_align = kStackAlign;
279  g_tls_size = RoundUpTo(tls_size, tls_align);
280}
281#else
282void InitTlsSize() { }
283#endif  // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
284        // !SANITIZER_NETBSD && !SANITIZER_SOLARIS
285
286#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) ||          \
287     defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) ||    \
288     defined(__arm__)) &&                                                      \
289    SANITIZER_LINUX && !SANITIZER_ANDROID
290// sizeof(struct pthread) from glibc.
291static atomic_uintptr_t thread_descriptor_size;
292
293uptr ThreadDescriptorSize() {
294  uptr val = atomic_load_relaxed(&thread_descriptor_size);
295  if (val)
296    return val;
297#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
298  int major;
299  int minor;
300  int patch;
301  if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
302    /* sizeof(struct pthread) values from various glibc versions.  */
303    if (SANITIZER_X32)
304      val = 1728; // Assume only one particular version for x32.
305    // For ARM sizeof(struct pthread) changed in Glibc 2.23.
306    else if (SANITIZER_ARM)
307      val = minor <= 22 ? 1120 : 1216;
308    else if (minor <= 3)
309      val = FIRST_32_SECOND_64(1104, 1696);
310    else if (minor == 4)
311      val = FIRST_32_SECOND_64(1120, 1728);
312    else if (minor == 5)
313      val = FIRST_32_SECOND_64(1136, 1728);
314    else if (minor <= 9)
315      val = FIRST_32_SECOND_64(1136, 1712);
316    else if (minor == 10)
317      val = FIRST_32_SECOND_64(1168, 1776);
318    else if (minor == 11 || (minor == 12 && patch == 1))
319      val = FIRST_32_SECOND_64(1168, 2288);
320    else if (minor <= 14)
321      val = FIRST_32_SECOND_64(1168, 2304);
322    else
323      val = FIRST_32_SECOND_64(1216, 2304);
324  }
325#elif defined(__mips__)
326  // TODO(sagarthakur): add more values as per different glibc versions.
327  val = FIRST_32_SECOND_64(1152, 1776);
328#elif defined(__aarch64__)
329  // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
330  val = 1776;
331#elif defined(__powerpc64__)
332  val = 1776; // from glibc.ppc64le 2.20-8.fc21
333#elif defined(__s390__)
334  val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
335#endif
336  if (val)
337    atomic_store_relaxed(&thread_descriptor_size, val);
338  return val;
339}
340
341// The offset at which pointer to self is located in the thread descriptor.
342const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
343
344uptr ThreadSelfOffset() {
345  return kThreadSelfOffset;
346}
347
348#if defined(__mips__) || defined(__powerpc64__)
349// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
350// head structure. It lies before the static tls blocks.
351static uptr TlsPreTcbSize() {
352# if defined(__mips__)
353  const uptr kTcbHead = 16; // sizeof (tcbhead_t)
354# elif defined(__powerpc64__)
355  const uptr kTcbHead = 88; // sizeof (tcbhead_t)
356# endif
357  const uptr kTlsAlign = 16;
358  const uptr kTlsPreTcbSize =
359      RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
360  return kTlsPreTcbSize;
361}
362#endif
363
364uptr ThreadSelf() {
365  uptr descr_addr;
366# if defined(__i386__)
367  asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
368# elif defined(__x86_64__)
369  asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
370# elif defined(__mips__)
371  // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
372  // points to the end of the TCB + 0x7000. The pthread_descr structure is
373  // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
374  // TCB and the size of pthread_descr.
375  const uptr kTlsTcbOffset = 0x7000;
376  uptr thread_pointer;
377  asm volatile(".set push;\
378                .set mips64r2;\
379                rdhwr %0,$29;\
380                .set pop" : "=r" (thread_pointer));
381  descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
382# elif defined(__aarch64__) || defined(__arm__)
383  descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
384                                      ThreadDescriptorSize();
385# elif defined(__s390__)
386  descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
387# elif defined(__powerpc64__)
388  // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
389  // points to the end of the TCB + 0x7000. The pthread_descr structure is
390  // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
391  // TCB and the size of pthread_descr.
392  const uptr kTlsTcbOffset = 0x7000;
393  uptr thread_pointer;
394  asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
395  descr_addr = thread_pointer - TlsPreTcbSize();
396# else
397#  error "unsupported CPU arch"
398# endif
399  return descr_addr;
400}
401#endif  // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
402
403#if SANITIZER_FREEBSD
404static void **ThreadSelfSegbase() {
405  void **segbase = 0;
406# if defined(__i386__)
407  // sysarch(I386_GET_GSBASE, segbase);
408  __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
409# elif defined(__x86_64__)
410  // sysarch(AMD64_GET_FSBASE, segbase);
411  __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
412# else
413#  error "unsupported CPU arch"
414# endif
415  return segbase;
416}
417
418uptr ThreadSelf() {
419  return (uptr)ThreadSelfSegbase()[2];
420}
421#endif  // SANITIZER_FREEBSD
422
423#if SANITIZER_NETBSD
424static struct tls_tcb * ThreadSelfTlsTcb() {
425  struct tls_tcb *tcb = nullptr;
426#ifdef __HAVE___LWP_GETTCB_FAST
427  tcb = (struct tls_tcb *)__lwp_gettcb_fast();
428#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
429  tcb = (struct tls_tcb *)__lwp_getprivate_fast();
430#endif
431  return tcb;
432}
433
434uptr ThreadSelf() {
435  return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
436}
437
438int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
439  const Elf_Phdr *hdr = info->dlpi_phdr;
440  const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum;
441
442  for (; hdr != last_hdr; ++hdr) {
443    if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
444      *(uptr*)data = hdr->p_memsz;
445      break;
446    }
447  }
448  return 0;
449}
450#endif  // SANITIZER_NETBSD
451
452#if !SANITIZER_GO
453static void GetTls(uptr *addr, uptr *size) {
454#if SANITIZER_LINUX && !SANITIZER_ANDROID
455# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
456  *addr = ThreadSelf();
457  *size = GetTlsSize();
458  *addr -= *size;
459  *addr += ThreadDescriptorSize();
460# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
461    || defined(__arm__)
462  *addr = ThreadSelf();
463  *size = GetTlsSize();
464# else
465  *addr = 0;
466  *size = 0;
467# endif
468#elif SANITIZER_FREEBSD
469  void** segbase = ThreadSelfSegbase();
470  *addr = 0;
471  *size = 0;
472  if (segbase != 0) {
473    // tcbalign = 16
474    // tls_size = round(tls_static_space, tcbalign);
475    // dtv = segbase[1];
476    // dtv[2] = segbase - tls_static_space;
477    void **dtv = (void**) segbase[1];
478    *addr = (uptr) dtv[2];
479    *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
480  }
481#elif SANITIZER_NETBSD
482  struct tls_tcb * const tcb = ThreadSelfTlsTcb();
483  *addr = 0;
484  *size = 0;
485  if (tcb != 0) {
486    // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program).
487    // ld.elf_so hardcodes the index 1.
488    dl_iterate_phdr(GetSizeFromHdr, size);
489
490    if (*size != 0) {
491      // The block has been found and tcb_dtv[1] contains the base address
492      *addr = (uptr)tcb->tcb_dtv[1];
493    }
494  }
495#elif SANITIZER_OPENBSD
496  *addr = 0;
497  *size = 0;
498#elif SANITIZER_ANDROID
499  *addr = 0;
500  *size = 0;
501#elif SANITIZER_SOLARIS
502  // FIXME
503  *addr = 0;
504  *size = 0;
505#else
506# error "Unknown OS"
507#endif
508}
509#endif
510
511#if !SANITIZER_GO
512uptr GetTlsSize() {
513#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD ||              \
514    SANITIZER_OPENBSD || SANITIZER_SOLARIS
515  uptr addr, size;
516  GetTls(&addr, &size);
517  return size;
518#elif defined(__mips__) || defined(__powerpc64__)
519  return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
520#else
521  return g_tls_size;
522#endif
523}
524#endif
525
526void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
527                          uptr *tls_addr, uptr *tls_size) {
528#if SANITIZER_GO
529  // Stub implementation for Go.
530  *stk_addr = *stk_size = *tls_addr = *tls_size = 0;
531#else
532  GetTls(tls_addr, tls_size);
533
534  uptr stack_top, stack_bottom;
535  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
536  *stk_addr = stack_bottom;
537  *stk_size = stack_top - stack_bottom;
538
539  if (!main) {
540    // If stack and tls intersect, make them non-intersecting.
541    if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
542      CHECK_GT(*tls_addr + *tls_size, *stk_addr);
543      CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
544      *stk_size -= *tls_size;
545      *tls_addr = *stk_addr + *stk_size;
546    }
547  }
548#endif
549}
550
551#if !SANITIZER_NETBSD
552#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
553typedef ElfW(Phdr) Elf_Phdr;
554#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
555#define Elf_Phdr XElf32_Phdr
556#define dl_phdr_info xdl_phdr_info
557#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
558#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
559#endif // !SANITIZER_NETBSD
560
561struct DlIteratePhdrData {
562  InternalMmapVectorNoCtor<LoadedModule> *modules;
563  bool first;
564};
565
566static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
567  DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
568  InternalScopedString module_name(kMaxPathLength);
569  if (data->first) {
570    data->first = false;
571    // First module is the binary itself.
572    ReadBinaryNameCached(module_name.data(), module_name.size());
573  } else if (info->dlpi_name) {
574    module_name.append("%s", info->dlpi_name);
575  }
576  if (module_name[0] == '\0')
577    return 0;
578  LoadedModule cur_module;
579  cur_module.set(module_name.data(), info->dlpi_addr);
580  for (int i = 0; i < (int)info->dlpi_phnum; i++) {
581    const Elf_Phdr *phdr = &info->dlpi_phdr[i];
582    if (phdr->p_type == PT_LOAD) {
583      uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
584      uptr cur_end = cur_beg + phdr->p_memsz;
585      bool executable = phdr->p_flags & PF_X;
586      bool writable = phdr->p_flags & PF_W;
587      cur_module.addAddressRange(cur_beg, cur_end, executable,
588                                 writable);
589    }
590  }
591  data->modules->push_back(cur_module);
592  return 0;
593}
594
595#if SANITIZER_ANDROID && __ANDROID_API__ < 21
596extern "C" __attribute__((weak)) int dl_iterate_phdr(
597    int (*)(struct dl_phdr_info *, size_t, void *), void *);
598#endif
599
600static bool requiresProcmaps() {
601#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
602  // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
603  // The runtime check allows the same library to work with
604  // both K and L (and future) Android releases.
605  return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
606#else
607  return false;
608#endif
609}
610
611static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
612  MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
613  memory_mapping.DumpListOfModules(modules);
614}
615
616void ListOfModules::init() {
617  clearOrInit();
618  if (requiresProcmaps()) {
619    procmapsInit(&modules_);
620  } else {
621    DlIteratePhdrData data = {&modules_, true};
622    dl_iterate_phdr(dl_iterate_phdr_cb, &data);
623  }
624}
625
626// When a custom loader is used, dl_iterate_phdr may not contain the full
627// list of modules. Allow callers to fall back to using procmaps.
628void ListOfModules::fallbackInit() {
629  if (!requiresProcmaps()) {
630    clearOrInit();
631    procmapsInit(&modules_);
632  } else {
633    clear();
634  }
635}
636
637// getrusage does not give us the current RSS, only the max RSS.
638// Still, this is better than nothing if /proc/self/statm is not available
639// for some reason, e.g. due to a sandbox.
640static uptr GetRSSFromGetrusage() {
641  struct rusage usage;
642  if (getrusage(RUSAGE_SELF, &usage))  // Failed, probably due to a sandbox.
643    return 0;
644  return usage.ru_maxrss << 10;  // ru_maxrss is in Kb.
645}
646
647uptr GetRSS() {
648  if (!common_flags()->can_use_proc_maps_statm)
649    return GetRSSFromGetrusage();
650  fd_t fd = OpenFile("/proc/self/statm", RdOnly);
651  if (fd == kInvalidFd)
652    return GetRSSFromGetrusage();
653  char buf[64];
654  uptr len = internal_read(fd, buf, sizeof(buf) - 1);
655  internal_close(fd);
656  if ((sptr)len <= 0)
657    return 0;
658  buf[len] = 0;
659  // The format of the file is:
660  // 1084 89 69 11 0 79 0
661  // We need the second number which is RSS in pages.
662  char *pos = buf;
663  // Skip the first number.
664  while (*pos >= '0' && *pos <= '9')
665    pos++;
666  // Skip whitespaces.
667  while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
668    pos++;
669  // Read the number.
670  uptr rss = 0;
671  while (*pos >= '0' && *pos <= '9')
672    rss = rss * 10 + *pos++ - '0';
673  return rss * GetPageSizeCached();
674}
675
676// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
677// they allocate memory.
678u32 GetNumberOfCPUs() {
679#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
680  u32 ncpu;
681  int req[2];
682  uptr len = sizeof(ncpu);
683  req[0] = CTL_HW;
684  req[1] = HW_NCPU;
685  CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
686  return ncpu;
687#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
688  // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
689  // exist in sched.h. That is the case for toolchains generated with older
690  // NDKs.
691  // This code doesn't work on AArch64 because internal_getdents makes use of
692  // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.
693  uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY);
694  if (internal_iserror(fd))
695    return 0;
696  InternalMmapVector<u8> buffer(4096);
697  uptr bytes_read = buffer.size();
698  uptr n_cpus = 0;
699  u8 *d_type;
700  struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];
701  while (true) {
702    if ((u8 *)entry >= &buffer[bytes_read]) {
703      bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),
704                                     buffer.size());
705      if (internal_iserror(bytes_read) || !bytes_read)
706        break;
707      entry = (struct linux_dirent *)buffer.data();
708    }
709    d_type = (u8 *)entry + entry->d_reclen - 1;
710    if (d_type >= &buffer[bytes_read] ||
711        (u8 *)&entry->d_name[3] >= &buffer[bytes_read])
712      break;
713    if (entry->d_ino != 0 && *d_type == DT_DIR) {
714      if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
715          entry->d_name[2] == 'u' &&
716          entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
717        n_cpus++;
718    }
719    entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
720  }
721  internal_close(fd);
722  return n_cpus;
723#elif SANITIZER_SOLARIS
724  return sysconf(_SC_NPROCESSORS_ONLN);
725#else
726#if defined(CPU_COUNT)
727  cpu_set_t CPUs;
728  CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
729  return CPU_COUNT(&CPUs);
730#else
731  return 1;
732#endif
733#endif
734}
735
736#if SANITIZER_LINUX
737
738# if SANITIZER_ANDROID
739static atomic_uint8_t android_log_initialized;
740
741void AndroidLogInit() {
742  openlog(GetProcessName(), 0, LOG_USER);
743  atomic_store(&android_log_initialized, 1, memory_order_release);
744}
745
746static bool ShouldLogAfterPrintf() {
747  return atomic_load(&android_log_initialized, memory_order_acquire);
748}
749
750extern "C" SANITIZER_WEAK_ATTRIBUTE
751int async_safe_write_log(int pri, const char* tag, const char* msg);
752extern "C" SANITIZER_WEAK_ATTRIBUTE
753int __android_log_write(int prio, const char* tag, const char* msg);
754
755// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
756#define SANITIZER_ANDROID_LOG_INFO 4
757
758// async_safe_write_log is a new public version of __libc_write_log that is
759// used behind syslog. It is preferable to syslog as it will not do any dynamic
760// memory allocation or formatting.
761// If the function is not available, syslog is preferred for L+ (it was broken
762// pre-L) as __android_log_write triggers a racey behavior with the strncpy
763// interceptor. Fallback to __android_log_write pre-L.
764void WriteOneLineToSyslog(const char *s) {
765  if (&async_safe_write_log) {
766    async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);
767  } else if (AndroidGetApiLevel() > ANDROID_KITKAT) {
768    syslog(LOG_INFO, "%s", s);
769  } else {
770    CHECK(&__android_log_write);
771    __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);
772  }
773}
774
775extern "C" SANITIZER_WEAK_ATTRIBUTE
776void android_set_abort_message(const char *);
777
778void SetAbortMessage(const char *str) {
779  if (&android_set_abort_message)
780    android_set_abort_message(str);
781}
782# else
783void AndroidLogInit() {}
784
785static bool ShouldLogAfterPrintf() { return true; }
786
787void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
788
789void SetAbortMessage(const char *str) {}
790# endif  // SANITIZER_ANDROID
791
792void LogMessageOnPrintf(const char *str) {
793  if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
794    WriteToSyslog(str);
795}
796
797#endif  // SANITIZER_LINUX
798
799#if SANITIZER_LINUX && !SANITIZER_GO
800// glibc crashes when using clock_gettime from a preinit_array function as the
801// vDSO function pointers haven't been initialized yet. __progname is
802// initialized after the vDSO function pointers, so if it exists, is not null
803// and is not empty, we can use clock_gettime.
804extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
805INLINE bool CanUseVDSO() {
806  // Bionic is safe, it checks for the vDSO function pointers to be initialized.
807  if (SANITIZER_ANDROID)
808    return true;
809  if (&__progname && __progname && *__progname)
810    return true;
811  return false;
812}
813
814// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
815// clock_gettime. real_clock_gettime only exists if clock_gettime is
816// intercepted, so define it weakly and use it if available.
817extern "C" SANITIZER_WEAK_ATTRIBUTE
818int real_clock_gettime(u32 clk_id, void *tp);
819u64 MonotonicNanoTime() {
820  timespec ts;
821  if (CanUseVDSO()) {
822    if (&real_clock_gettime)
823      real_clock_gettime(CLOCK_MONOTONIC, &ts);
824    else
825      clock_gettime(CLOCK_MONOTONIC, &ts);
826  } else {
827    internal_clock_gettime(CLOCK_MONOTONIC, &ts);
828  }
829  return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
830}
831#else
832// Non-Linux & Go always use the syscall.
833u64 MonotonicNanoTime() {
834  timespec ts;
835  internal_clock_gettime(CLOCK_MONOTONIC, &ts);
836  return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
837}
838#endif  // SANITIZER_LINUX && !SANITIZER_GO
839
840} // namespace __sanitizer
841
842#endif
843