1//===-- tsan_interface_ann.cc ---------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_libc.h"
14#include "sanitizer_common/sanitizer_internal_defs.h"
15#include "sanitizer_common/sanitizer_placement_new.h"
16#include "sanitizer_common/sanitizer_stacktrace.h"
17#include "sanitizer_common/sanitizer_vector.h"
18#include "tsan_interface_ann.h"
19#include "tsan_mutex.h"
20#include "tsan_report.h"
21#include "tsan_rtl.h"
22#include "tsan_mman.h"
23#include "tsan_flags.h"
24#include "tsan_platform.h"
25
26#define CALLERPC ((uptr)__builtin_return_address(0))
27
28using namespace __tsan;  // NOLINT
29
30namespace __tsan {
31
32class ScopedAnnotation {
33 public:
34  ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
35      : thr_(thr) {
36    FuncEntry(thr_, pc);
37    DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
38  }
39
40  ~ScopedAnnotation() {
41    FuncExit(thr_);
42    CheckNoLocks(thr_);
43  }
44 private:
45  ThreadState *const thr_;
46};
47
48#define SCOPED_ANNOTATION_RET(typ, ret) \
49    if (!flags()->enable_annotations) \
50      return ret; \
51    ThreadState *thr = cur_thread(); \
52    const uptr caller_pc = (uptr)__builtin_return_address(0); \
53    StatInc(thr, StatAnnotation); \
54    StatInc(thr, Stat##typ); \
55    ScopedAnnotation sa(thr, __func__, caller_pc); \
56    const uptr pc = StackTrace::GetCurrentPc(); \
57    (void)pc; \
58/**/
59
60#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
61
62static const int kMaxDescLen = 128;
63
64struct ExpectRace {
65  ExpectRace *next;
66  ExpectRace *prev;
67  atomic_uintptr_t hitcount;
68  atomic_uintptr_t addcount;
69  uptr addr;
70  uptr size;
71  char *file;
72  int line;
73  char desc[kMaxDescLen];
74};
75
76struct DynamicAnnContext {
77  Mutex mtx;
78  ExpectRace expect;
79  ExpectRace benign;
80
81  DynamicAnnContext()
82    : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
83  }
84};
85
86static DynamicAnnContext *dyn_ann_ctx;
87static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
88
89static void AddExpectRace(ExpectRace *list,
90    char *f, int l, uptr addr, uptr size, char *desc) {
91  ExpectRace *race = list->next;
92  for (; race != list; race = race->next) {
93    if (race->addr == addr && race->size == size) {
94      atomic_store_relaxed(&race->addcount,
95          atomic_load_relaxed(&race->addcount) + 1);
96      return;
97    }
98  }
99  race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
100  race->addr = addr;
101  race->size = size;
102  race->file = f;
103  race->line = l;
104  race->desc[0] = 0;
105  atomic_store_relaxed(&race->hitcount, 0);
106  atomic_store_relaxed(&race->addcount, 1);
107  if (desc) {
108    int i = 0;
109    for (; i < kMaxDescLen - 1 && desc[i]; i++)
110      race->desc[i] = desc[i];
111    race->desc[i] = 0;
112  }
113  race->prev = list;
114  race->next = list->next;
115  race->next->prev = race;
116  list->next = race;
117}
118
119static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
120  for (ExpectRace *race = list->next; race != list; race = race->next) {
121    uptr maxbegin = max(race->addr, addr);
122    uptr minend = min(race->addr + race->size, addr + size);
123    if (maxbegin < minend)
124      return race;
125  }
126  return 0;
127}
128
129static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
130  ExpectRace *race = FindRace(list, addr, size);
131  if (race == 0)
132    return false;
133  DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
134      race->desc, race->addr, (int)race->size, race->file, race->line);
135  atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
136  return true;
137}
138
139static void InitList(ExpectRace *list) {
140  list->next = list;
141  list->prev = list;
142}
143
144void InitializeDynamicAnnotations() {
145  dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
146  InitList(&dyn_ann_ctx->expect);
147  InitList(&dyn_ann_ctx->benign);
148}
149
150bool IsExpectedReport(uptr addr, uptr size) {
151  ReadLock lock(&dyn_ann_ctx->mtx);
152  if (CheckContains(&dyn_ann_ctx->expect, addr, size))
153    return true;
154  if (CheckContains(&dyn_ann_ctx->benign, addr, size))
155    return true;
156  return false;
157}
158
159static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
160    int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
161  ExpectRace *list = &dyn_ann_ctx->benign;
162  for (ExpectRace *race = list->next; race != list; race = race->next) {
163    (*unique_count)++;
164    const uptr cnt = atomic_load_relaxed(&(race->*counter));
165    if (cnt == 0)
166      continue;
167    *hit_count += cnt;
168    uptr i = 0;
169    for (; i < matched->Size(); i++) {
170      ExpectRace *race0 = &(*matched)[i];
171      if (race->line == race0->line
172          && internal_strcmp(race->file, race0->file) == 0
173          && internal_strcmp(race->desc, race0->desc) == 0) {
174        atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
175        break;
176      }
177    }
178    if (i == matched->Size())
179      matched->PushBack(*race);
180  }
181}
182
183void PrintMatchedBenignRaces() {
184  Lock lock(&dyn_ann_ctx->mtx);
185  int unique_count = 0;
186  int hit_count = 0;
187  int add_count = 0;
188  Vector<ExpectRace> hit_matched;
189  CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
190      &ExpectRace::hitcount);
191  Vector<ExpectRace> add_matched;
192  CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
193      &ExpectRace::addcount);
194  if (hit_matched.Size()) {
195    Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
196        hit_count, (int)internal_getpid());
197    for (uptr i = 0; i < hit_matched.Size(); i++) {
198      Printf("%d %s:%d %s\n",
199          atomic_load_relaxed(&hit_matched[i].hitcount),
200          hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
201    }
202  }
203  if (hit_matched.Size()) {
204    Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
205           " (pid=%d):\n",
206        add_count, unique_count, (int)internal_getpid());
207    for (uptr i = 0; i < add_matched.Size(); i++) {
208      Printf("%d %s:%d %s\n",
209          atomic_load_relaxed(&add_matched[i].addcount),
210          add_matched[i].file, add_matched[i].line, add_matched[i].desc);
211    }
212  }
213}
214
215static void ReportMissedExpectedRace(ExpectRace *race) {
216  Printf("==================\n");
217  Printf("WARNING: ThreadSanitizer: missed expected data race\n");
218  Printf("  %s addr=%zx %s:%d\n",
219      race->desc, race->addr, race->file, race->line);
220  Printf("==================\n");
221}
222}  // namespace __tsan
223
224using namespace __tsan;  // NOLINT
225
226extern "C" {
227void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
228  SCOPED_ANNOTATION(AnnotateHappensBefore);
229  Release(thr, pc, addr);
230}
231
232void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
233  SCOPED_ANNOTATION(AnnotateHappensAfter);
234  Acquire(thr, pc, addr);
235}
236
237void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
238  SCOPED_ANNOTATION(AnnotateCondVarSignal);
239}
240
241void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
242  SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
243}
244
245void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
246  SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
247}
248
249void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
250                                             uptr lock) {
251  SCOPED_ANNOTATION(AnnotateCondVarWait);
252}
253
254void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
255  SCOPED_ANNOTATION(AnnotateRWLockCreate);
256  MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
257}
258
259void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
260  SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
261  MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
262}
263
264void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
265  SCOPED_ANNOTATION(AnnotateRWLockDestroy);
266  MutexDestroy(thr, pc, m);
267}
268
269void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
270                                                uptr is_w) {
271  SCOPED_ANNOTATION(AnnotateRWLockAcquired);
272  if (is_w)
273    MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
274  else
275    MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
276}
277
278void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
279                                                uptr is_w) {
280  SCOPED_ANNOTATION(AnnotateRWLockReleased);
281  if (is_w)
282    MutexUnlock(thr, pc, m);
283  else
284    MutexReadUnlock(thr, pc, m);
285}
286
287void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
288  SCOPED_ANNOTATION(AnnotateTraceMemory);
289}
290
291void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
292  SCOPED_ANNOTATION(AnnotateFlushState);
293}
294
295void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
296                                           uptr size) {
297  SCOPED_ANNOTATION(AnnotateNewMemory);
298}
299
300void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
301  SCOPED_ANNOTATION(AnnotateNoOp);
302}
303
304void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
305  SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
306  Lock lock(&dyn_ann_ctx->mtx);
307  while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
308    ExpectRace *race = dyn_ann_ctx->expect.next;
309    if (atomic_load_relaxed(&race->hitcount) == 0) {
310      ctx->nmissed_expected++;
311      ReportMissedExpectedRace(race);
312    }
313    race->prev->next = race->next;
314    race->next->prev = race->prev;
315    internal_free(race);
316  }
317}
318
319void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
320    char *f, int l, int enable) {
321  SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
322  // FIXME: Reconsider this functionality later. It may be irrelevant.
323}
324
325void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
326    char *f, int l, uptr mu) {
327  SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
328}
329
330void INTERFACE_ATTRIBUTE AnnotatePCQGet(
331    char *f, int l, uptr pcq) {
332  SCOPED_ANNOTATION(AnnotatePCQGet);
333}
334
335void INTERFACE_ATTRIBUTE AnnotatePCQPut(
336    char *f, int l, uptr pcq) {
337  SCOPED_ANNOTATION(AnnotatePCQPut);
338}
339
340void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
341    char *f, int l, uptr pcq) {
342  SCOPED_ANNOTATION(AnnotatePCQDestroy);
343}
344
345void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
346    char *f, int l, uptr pcq) {
347  SCOPED_ANNOTATION(AnnotatePCQCreate);
348}
349
350void INTERFACE_ATTRIBUTE AnnotateExpectRace(
351    char *f, int l, uptr mem, char *desc) {
352  SCOPED_ANNOTATION(AnnotateExpectRace);
353  Lock lock(&dyn_ann_ctx->mtx);
354  AddExpectRace(&dyn_ann_ctx->expect,
355                f, l, mem, 1, desc);
356  DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
357}
358
359static void BenignRaceImpl(
360    char *f, int l, uptr mem, uptr size, char *desc) {
361  Lock lock(&dyn_ann_ctx->mtx);
362  AddExpectRace(&dyn_ann_ctx->benign,
363                f, l, mem, size, desc);
364  DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
365}
366
367// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
368void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
369    char *f, int l, uptr mem, uptr size, char *desc) {
370  SCOPED_ANNOTATION(AnnotateBenignRaceSized);
371  BenignRaceImpl(f, l, mem, size, desc);
372}
373
374void INTERFACE_ATTRIBUTE AnnotateBenignRace(
375    char *f, int l, uptr mem, char *desc) {
376  SCOPED_ANNOTATION(AnnotateBenignRace);
377  BenignRaceImpl(f, l, mem, 1, desc);
378}
379
380void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
381  SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
382  ThreadIgnoreBegin(thr, pc);
383}
384
385void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
386  SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
387  ThreadIgnoreEnd(thr, pc);
388}
389
390void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
391  SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
392  ThreadIgnoreBegin(thr, pc);
393}
394
395void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
396  SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
397  ThreadIgnoreEnd(thr, pc);
398}
399
400void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
401  SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
402  ThreadIgnoreSyncBegin(thr, pc);
403}
404
405void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
406  SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
407  ThreadIgnoreSyncEnd(thr, pc);
408}
409
410void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
411    char *f, int l, uptr addr, uptr size) {
412  SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
413}
414
415void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
416    char *f, int l, uptr addr, uptr size) {
417  SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
418}
419
420void INTERFACE_ATTRIBUTE AnnotateThreadName(
421    char *f, int l, char *name) {
422  SCOPED_ANNOTATION(AnnotateThreadName);
423  ThreadSetName(thr, name);
424}
425
426// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
427// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
428// atomic operations, which should be handled by ThreadSanitizer correctly.
429void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
430  SCOPED_ANNOTATION(AnnotateHappensBefore);
431}
432
433void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
434  SCOPED_ANNOTATION(AnnotateHappensAfter);
435}
436
437void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
438    char *f, int l, uptr mem, uptr sz, char *desc) {
439  SCOPED_ANNOTATION(AnnotateBenignRaceSized);
440  BenignRaceImpl(f, l, mem, sz, desc);
441}
442
443int INTERFACE_ATTRIBUTE RunningOnValgrind() {
444  return flags()->running_on_valgrind;
445}
446
447double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
448  return 10.0;
449}
450
451const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
452  if (internal_strcmp(query, "pure_happens_before") == 0)
453    return "1";
454  else
455    return "0";
456}
457
458void INTERFACE_ATTRIBUTE
459AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
460void INTERFACE_ATTRIBUTE
461AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
462
463// Note: the parameter is called flagz, because flags is already taken
464// by the global function that returns flags.
465INTERFACE_ATTRIBUTE
466void __tsan_mutex_create(void *m, unsigned flagz) {
467  SCOPED_ANNOTATION(__tsan_mutex_create);
468  MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
469}
470
471INTERFACE_ATTRIBUTE
472void __tsan_mutex_destroy(void *m, unsigned flagz) {
473  SCOPED_ANNOTATION(__tsan_mutex_destroy);
474  MutexDestroy(thr, pc, (uptr)m, flagz);
475}
476
477INTERFACE_ATTRIBUTE
478void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
479  SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
480  if (!(flagz & MutexFlagTryLock)) {
481    if (flagz & MutexFlagReadLock)
482      MutexPreReadLock(thr, pc, (uptr)m);
483    else
484      MutexPreLock(thr, pc, (uptr)m);
485  }
486  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
487  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
488}
489
490INTERFACE_ATTRIBUTE
491void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
492  SCOPED_ANNOTATION(__tsan_mutex_post_lock);
493  ThreadIgnoreSyncEnd(thr, pc);
494  ThreadIgnoreEnd(thr, pc);
495  if (!(flagz & MutexFlagTryLockFailed)) {
496    if (flagz & MutexFlagReadLock)
497      MutexPostReadLock(thr, pc, (uptr)m, flagz);
498    else
499      MutexPostLock(thr, pc, (uptr)m, flagz, rec);
500  }
501}
502
503INTERFACE_ATTRIBUTE
504int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
505  SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
506  int ret = 0;
507  if (flagz & MutexFlagReadLock) {
508    CHECK(!(flagz & MutexFlagRecursiveUnlock));
509    MutexReadUnlock(thr, pc, (uptr)m);
510  } else {
511    ret = MutexUnlock(thr, pc, (uptr)m, flagz);
512  }
513  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
514  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
515  return ret;
516}
517
518INTERFACE_ATTRIBUTE
519void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
520  SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
521  ThreadIgnoreSyncEnd(thr, pc);
522  ThreadIgnoreEnd(thr, pc);
523}
524
525INTERFACE_ATTRIBUTE
526void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
527  SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
528  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
529  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
530}
531
532INTERFACE_ATTRIBUTE
533void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
534  SCOPED_ANNOTATION(__tsan_mutex_post_signal);
535  ThreadIgnoreSyncEnd(thr, pc);
536  ThreadIgnoreEnd(thr, pc);
537}
538
539INTERFACE_ATTRIBUTE
540void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
541  SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
542  // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
543  ThreadIgnoreSyncEnd(thr, pc);
544  ThreadIgnoreEnd(thr, pc);
545}
546
547INTERFACE_ATTRIBUTE
548void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
549  SCOPED_ANNOTATION(__tsan_mutex_post_divert);
550  ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
551  ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
552}
553}  // extern "C"
554