1//===-- sanitizer_common_test.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11//
12//===----------------------------------------------------------------------===//
13#include <algorithm>
14
15#include "sanitizer_common/sanitizer_allocator_internal.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_file.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_libc.h"
20#include "sanitizer_common/sanitizer_platform.h"
21
22#include "sanitizer_pthread_wrappers.h"
23
24#include "gtest/gtest.h"
25
26namespace __sanitizer {
27
28static bool IsSorted(const uptr *array, uptr n) {
29  for (uptr i = 1; i < n; i++) {
30    if (array[i] < array[i - 1]) return false;
31  }
32  return true;
33}
34
35TEST(SanitizerCommon, SortTest) {
36  uptr array[100];
37  uptr n = 100;
38  // Already sorted.
39  for (uptr i = 0; i < n; i++) {
40    array[i] = i;
41  }
42  Sort(array, n);
43  EXPECT_TRUE(IsSorted(array, n));
44  // Reverse order.
45  for (uptr i = 0; i < n; i++) {
46    array[i] = n - 1 - i;
47  }
48  Sort(array, n);
49  EXPECT_TRUE(IsSorted(array, n));
50  // Mixed order.
51  for (uptr i = 0; i < n; i++) {
52    array[i] = (i % 2 == 0) ? i : n - 1 - i;
53  }
54  Sort(array, n);
55  EXPECT_TRUE(IsSorted(array, n));
56  // All equal.
57  for (uptr i = 0; i < n; i++) {
58    array[i] = 42;
59  }
60  Sort(array, n);
61  EXPECT_TRUE(IsSorted(array, n));
62  // All but one sorted.
63  for (uptr i = 0; i < n - 1; i++) {
64    array[i] = i;
65  }
66  array[n - 1] = 42;
67  Sort(array, n);
68  EXPECT_TRUE(IsSorted(array, n));
69  // Minimal case - sort three elements.
70  array[0] = 1;
71  array[1] = 0;
72  Sort(array, 2);
73  EXPECT_TRUE(IsSorted(array, 2));
74}
75
76TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {
77  uptr PageSize = GetPageSizeCached();
78  for (uptr size = 1; size <= 32; size *= 2) {
79    for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
80      for (int iter = 0; iter < 100; iter++) {
81        uptr res = (uptr)MmapAlignedOrDieOnFatalError(
82            size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
83        EXPECT_EQ(0U, res % (alignment * PageSize));
84        internal_memset((void*)res, 1, size * PageSize);
85        UnmapOrDie((void*)res, size * PageSize);
86      }
87    }
88  }
89}
90
91TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) {
92  InternalMmapVector<uptr> v;
93  v.reserve(1);
94  CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr));
95}
96
97TEST(SanitizerCommon, InternalMmapVectorReize) {
98  InternalMmapVector<uptr> v;
99  CHECK_EQ(0U, v.size());
100  CHECK_GE(v.capacity(), v.size());
101
102  v.reserve(1000);
103  CHECK_EQ(0U, v.size());
104  CHECK_GE(v.capacity(), 1000U);
105
106  v.resize(10000);
107  CHECK_EQ(10000U, v.size());
108  CHECK_GE(v.capacity(), v.size());
109  uptr cap = v.capacity();
110
111  v.resize(100);
112  CHECK_EQ(100U, v.size());
113  CHECK_EQ(v.capacity(), cap);
114
115  v.reserve(10);
116  CHECK_EQ(100U, v.size());
117  CHECK_EQ(v.capacity(), cap);
118}
119
120TEST(SanitizerCommon, InternalMmapVector) {
121  InternalMmapVector<uptr> vector;
122  for (uptr i = 0; i < 100; i++) {
123    EXPECT_EQ(i, vector.size());
124    vector.push_back(i);
125  }
126  for (uptr i = 0; i < 100; i++) {
127    EXPECT_EQ(i, vector[i]);
128  }
129  for (int i = 99; i >= 0; i--) {
130    EXPECT_EQ((uptr)i, vector.back());
131    vector.pop_back();
132    EXPECT_EQ((uptr)i, vector.size());
133  }
134  InternalMmapVector<uptr> empty_vector;
135  CHECK_GT(empty_vector.capacity(), 0U);
136  CHECK_EQ(0U, empty_vector.size());
137}
138
139TEST(SanitizerCommon, InternalMmapVectorEq) {
140  InternalMmapVector<uptr> vector1;
141  InternalMmapVector<uptr> vector2;
142  for (uptr i = 0; i < 100; i++) {
143    vector1.push_back(i);
144    vector2.push_back(i);
145  }
146  EXPECT_TRUE(vector1 == vector2);
147  EXPECT_FALSE(vector1 != vector2);
148
149  vector1.push_back(1);
150  EXPECT_FALSE(vector1 == vector2);
151  EXPECT_TRUE(vector1 != vector2);
152
153  vector2.push_back(1);
154  EXPECT_TRUE(vector1 == vector2);
155  EXPECT_FALSE(vector1 != vector2);
156
157  vector1[55] = 1;
158  EXPECT_FALSE(vector1 == vector2);
159  EXPECT_TRUE(vector1 != vector2);
160}
161
162TEST(SanitizerCommon, InternalMmapVectorSwap) {
163  InternalMmapVector<uptr> vector1;
164  InternalMmapVector<uptr> vector2;
165  InternalMmapVector<uptr> vector3;
166  InternalMmapVector<uptr> vector4;
167  for (uptr i = 0; i < 100; i++) {
168    vector1.push_back(i);
169    vector2.push_back(i);
170    vector3.push_back(-i);
171    vector4.push_back(-i);
172  }
173  EXPECT_NE(vector2, vector3);
174  EXPECT_NE(vector1, vector4);
175  vector1.swap(vector3);
176  EXPECT_EQ(vector2, vector3);
177  EXPECT_EQ(vector1, vector4);
178}
179
180void TestThreadInfo(bool main) {
181  uptr stk_addr = 0;
182  uptr stk_size = 0;
183  uptr tls_addr = 0;
184  uptr tls_size = 0;
185  GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);
186
187  int stack_var;
188  EXPECT_NE(stk_addr, (uptr)0);
189  EXPECT_NE(stk_size, (uptr)0);
190  EXPECT_GT((uptr)&stack_var, stk_addr);
191  EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);
192
193#if SANITIZER_LINUX && defined(__x86_64__)
194  static __thread int thread_var;
195  EXPECT_NE(tls_addr, (uptr)0);
196  EXPECT_NE(tls_size, (uptr)0);
197  EXPECT_GT((uptr)&thread_var, tls_addr);
198  EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);
199
200  // Ensure that tls and stack do not intersect.
201  uptr tls_end = tls_addr + tls_size;
202  EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);
203  EXPECT_TRUE(tls_end  < stk_addr || tls_end  >=  stk_addr + stk_size);
204  EXPECT_TRUE((tls_addr < stk_addr) == (tls_end  < stk_addr));
205#endif
206}
207
208static void *WorkerThread(void *arg) {
209  TestThreadInfo(false);
210  return 0;
211}
212
213TEST(SanitizerCommon, ThreadStackTlsMain) {
214  InitTlsSize();
215  TestThreadInfo(true);
216}
217
218TEST(SanitizerCommon, ThreadStackTlsWorker) {
219  InitTlsSize();
220  pthread_t t;
221  PTHREAD_CREATE(&t, 0, WorkerThread, 0);
222  PTHREAD_JOIN(t, 0);
223}
224
225bool UptrLess(uptr a, uptr b) {
226  return a < b;
227}
228
229TEST(SanitizerCommon, InternalLowerBound) {
230  static const uptr kSize = 5;
231  int arr[kSize];
232  arr[0] = 1;
233  arr[1] = 3;
234  arr[2] = 5;
235  arr[3] = 7;
236  arr[4] = 11;
237
238  EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 0, UptrLess));
239  EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 1, UptrLess));
240  EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 2, UptrLess));
241  EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 3, UptrLess));
242  EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 4, UptrLess));
243  EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 5, UptrLess));
244  EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 6, UptrLess));
245  EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 7, UptrLess));
246  EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 8, UptrLess));
247  EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 9, UptrLess));
248  EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 10, UptrLess));
249  EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 11, UptrLess));
250  EXPECT_EQ(5u, InternalLowerBound(arr, 0, kSize, 12, UptrLess));
251}
252
253TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) {
254  std::vector<int> data;
255  auto create_item = [] (size_t i, size_t j) {
256    auto v = i * 10000 + j;
257    return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100;
258  };
259  for (size_t i = 0; i < 1000; ++i) {
260    data.resize(i);
261    for (size_t j = 0; j < i; ++j) {
262      data[j] = create_item(i, j);
263    }
264
265    std::sort(data.begin(), data.end());
266
267    for (size_t j = 0; j < i; ++j) {
268      int val = create_item(i, j);
269      for (auto to_find : {val - 1, val, val + 1}) {
270        uptr expected =
271            std::lower_bound(data.begin(), data.end(), to_find) - data.begin();
272        EXPECT_EQ(expected, InternalLowerBound(data.data(), 0, data.size(),
273                                               to_find, std::less<int>()));
274      }
275    }
276  }
277}
278
279#if SANITIZER_LINUX && !SANITIZER_ANDROID
280TEST(SanitizerCommon, FindPathToBinary) {
281  char *true_path = FindPathToBinary("true");
282  EXPECT_NE((char*)0, internal_strstr(true_path, "/bin/true"));
283  InternalFree(true_path);
284  EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj"));
285}
286#elif SANITIZER_WINDOWS
287TEST(SanitizerCommon, FindPathToBinary) {
288  // ntdll.dll should be on PATH in all supported test environments on all
289  // supported Windows versions.
290  char *ntdll_path = FindPathToBinary("ntdll.dll");
291  EXPECT_NE((char*)0, internal_strstr(ntdll_path, "ntdll.dll"));
292  InternalFree(ntdll_path);
293  EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj"));
294}
295#endif
296
297TEST(SanitizerCommon, StripPathPrefix) {
298  EXPECT_EQ(0, StripPathPrefix(0, "prefix"));
299  EXPECT_STREQ("foo", StripPathPrefix("foo", 0));
300  EXPECT_STREQ("dir/file.cc",
301               StripPathPrefix("/usr/lib/dir/file.cc", "/usr/lib/"));
302  EXPECT_STREQ("/file.cc", StripPathPrefix("/usr/myroot/file.cc", "/myroot"));
303  EXPECT_STREQ("file.h", StripPathPrefix("/usr/lib/./file.h", "/usr/lib/"));
304}
305
306TEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) {
307  RemoveANSIEscapeSequencesFromString(nullptr);
308  const char *buffs[22] = {
309    "Default",                                "Default",
310    "\033[95mLight magenta",                  "Light magenta",
311    "\033[30mBlack\033[32mGreen\033[90mGray", "BlackGreenGray",
312    "\033[106mLight cyan \033[107mWhite ",    "Light cyan White ",
313    "\033[31mHello\033[0m World",             "Hello World",
314    "\033[38;5;82mHello \033[38;5;198mWorld", "Hello World",
315    "123[653456789012",                       "123[653456789012",
316    "Normal \033[5mBlink \033[25mNormal",     "Normal Blink Normal",
317    "\033[106m\033[107m",                     "",
318    "",                                       "",
319    " ",                                      " ",
320  };
321
322  for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) {
323    char *buffer_copy = internal_strdup(buffs[i]);
324    RemoveANSIEscapeSequencesFromString(buffer_copy);
325    EXPECT_STREQ(buffer_copy, buffs[i+1]);
326    InternalFree(buffer_copy);
327  }
328}
329
330TEST(SanitizerCommon, InternalScopedString) {
331  InternalScopedString str(10);
332  EXPECT_EQ(0U, str.length());
333  EXPECT_STREQ("", str.data());
334
335  str.append("foo");
336  EXPECT_EQ(3U, str.length());
337  EXPECT_STREQ("foo", str.data());
338
339  int x = 1234;
340  str.append("%d", x);
341  EXPECT_EQ(7U, str.length());
342  EXPECT_STREQ("foo1234", str.data());
343
344  str.append("%d", x);
345  EXPECT_EQ(9U, str.length());
346  EXPECT_STREQ("foo123412", str.data());
347
348  str.clear();
349  EXPECT_EQ(0U, str.length());
350  EXPECT_STREQ("", str.data());
351
352  str.append("0123456789");
353  EXPECT_EQ(9U, str.length());
354  EXPECT_STREQ("012345678", str.data());
355}
356
357#if SANITIZER_LINUX || SANITIZER_FREEBSD || \
358  SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_IOS
359TEST(SanitizerCommon, GetRandom) {
360  u8 buffer_1[32], buffer_2[32];
361  for (bool blocking : { false, true }) {
362    EXPECT_FALSE(GetRandom(nullptr, 32, blocking));
363    EXPECT_FALSE(GetRandom(buffer_1, 0, blocking));
364    EXPECT_FALSE(GetRandom(buffer_1, 512, blocking));
365    EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2));
366    for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) {
367      for (uptr i = 0; i < 100; i++) {
368        EXPECT_TRUE(GetRandom(buffer_1, size, blocking));
369        EXPECT_TRUE(GetRandom(buffer_2, size, blocking));
370        EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0);
371      }
372    }
373  }
374}
375#endif
376
377TEST(SanitizerCommon, ReservedAddressRangeInit) {
378  uptr init_size = 0xffff;
379  ReservedAddressRange address_range;
380  uptr res = address_range.Init(init_size);
381  CHECK_NE(res, (void*)-1);
382  UnmapOrDie((void*)res, init_size);
383  // Should be able to map into the same space now.
384  ReservedAddressRange address_range2;
385  uptr res2 = address_range2.Init(init_size, nullptr, res);
386  CHECK_EQ(res, res2);
387
388  // TODO(flowerhack): Once this is switched to the "real" implementation
389  // (rather than passing through to MmapNoAccess*), enforce and test "no
390  // double initializations allowed"
391}
392
393TEST(SanitizerCommon, ReservedAddressRangeMap) {
394  constexpr uptr init_size = 0xffff;
395  ReservedAddressRange address_range;
396  uptr res = address_range.Init(init_size);
397  CHECK_NE(res, (void*) -1);
398
399  // Valid mappings should succeed.
400  CHECK_EQ(res, address_range.Map(res, init_size));
401
402  // Valid mappings should be readable.
403  unsigned char buffer[init_size];
404  memcpy(buffer, reinterpret_cast<void *>(res), init_size);
405
406  // TODO(flowerhack): Once this is switched to the "real" implementation, make
407  // sure you can only mmap into offsets in the Init range.
408}
409
410TEST(SanitizerCommon, ReservedAddressRangeUnmap) {
411  uptr PageSize = GetPageSizeCached();
412  uptr init_size = PageSize * 8;
413  ReservedAddressRange address_range;
414  uptr base_addr = address_range.Init(init_size);
415  CHECK_NE(base_addr, (void*)-1);
416  CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
417
418  // Unmapping the entire range should succeed.
419  address_range.Unmap(base_addr, init_size);
420
421  // Map a new range.
422  base_addr = address_range.Init(init_size);
423  CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
424
425  // Windows doesn't allow partial unmappings.
426  #if !SANITIZER_WINDOWS
427
428  // Unmapping at the beginning should succeed.
429  address_range.Unmap(base_addr, PageSize);
430
431  // Unmapping at the end should succeed.
432  uptr new_start = reinterpret_cast<uptr>(address_range.base()) +
433                   address_range.size() - PageSize;
434  address_range.Unmap(new_start, PageSize);
435
436  #endif
437
438  // Unmapping in the middle of the ReservedAddressRange should fail.
439  EXPECT_DEATH(address_range.Unmap(base_addr + (PageSize * 2), PageSize), ".*");
440}
441
442}  // namespace __sanitizer
443