tsan_sync.cc revision 238901
1//===-- tsan_sync.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_sync.h"
15#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr)
21  : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22  , addr(addr)
23  , owner_tid(kInvalidTid)
24  , recursion()
25  , is_rw()
26  , is_recursive()
27  , is_broken() {
28}
29
30SyncTab::Part::Part()
31  : mtx(MutexTypeSyncTab, StatMtxSyncTab)
32  , val() {
33}
34
35SyncTab::SyncTab() {
36}
37
38SyncTab::~SyncTab() {
39  for (int i = 0; i < kPartCount; i++) {
40    while (tab_[i].val) {
41      SyncVar *tmp = tab_[i].val;
42      tab_[i].val = tmp->next;
43      DestroyAndFree(tmp);
44    }
45  }
46}
47
48SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
49                             uptr addr, bool write_lock) {
50  Part *p = &tab_[PartIdx(addr)];
51  {
52    ReadLock l(&p->mtx);
53    for (SyncVar *res = p->val; res; res = res->next) {
54      if (res->addr == addr) {
55        if (write_lock)
56          res->mtx.Lock();
57        else
58          res->mtx.ReadLock();
59        return res;
60      }
61    }
62  }
63  {
64    Lock l(&p->mtx);
65    SyncVar *res = p->val;
66    for (; res; res = res->next) {
67      if (res->addr == addr)
68        break;
69    }
70    if (res == 0) {
71      StatInc(thr, StatSyncCreated);
72      void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
73      res = new(mem) SyncVar(addr);
74#ifndef TSAN_GO
75      res->creation_stack.ObtainCurrent(thr, pc);
76#endif
77      res->next = p->val;
78      p->val = res;
79    }
80    if (write_lock)
81      res->mtx.Lock();
82    else
83      res->mtx.ReadLock();
84    return res;
85  }
86}
87
88SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
89  Part *p = &tab_[PartIdx(addr)];
90  SyncVar *res = 0;
91  {
92    Lock l(&p->mtx);
93    SyncVar **prev = &p->val;
94    res = *prev;
95    while (res) {
96      if (res->addr == addr) {
97        *prev = res->next;
98        break;
99      }
100      prev = &res->next;
101      res = *prev;
102    }
103  }
104  if (res) {
105    StatInc(thr, StatSyncDestroyed);
106    res->mtx.Lock();
107    res->mtx.Unlock();
108  }
109  return res;
110}
111
112uptr SyncVar::GetMemoryConsumption() {
113  return sizeof(*this)
114      + clock.size() * sizeof(u64)
115      + read_clock.size() * sizeof(u64)
116      + creation_stack.Size() * sizeof(uptr);
117}
118
119uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
120  uptr mem = 0;
121  for (int i = 0; i < kPartCount; i++) {
122    Part *p = &tab_[i];
123    Lock l(&p->mtx);
124    for (SyncVar *s = p->val; s; s = s->next) {
125      *nsync += 1;
126      mem += s->GetMemoryConsumption();
127    }
128  }
129  return mem;
130}
131
132int SyncTab::PartIdx(uptr addr) {
133  return (addr >> 3) % kPartCount;
134}
135
136StackTrace::StackTrace()
137    : n_()
138    , s_()
139    , c_() {
140}
141
142StackTrace::StackTrace(uptr *buf, uptr cnt)
143    : n_()
144    , s_(buf)
145    , c_(cnt) {
146  CHECK_NE(buf, 0);
147  CHECK_NE(cnt, 0);
148}
149
150StackTrace::~StackTrace() {
151  Reset();
152}
153
154void StackTrace::Reset() {
155  if (s_ && !c_) {
156    CHECK_NE(n_, 0);
157    internal_free(s_);
158    s_ = 0;
159  }
160  n_ = 0;
161}
162
163void StackTrace::Init(const uptr *pcs, uptr cnt) {
164  Reset();
165  if (cnt == 0)
166    return;
167  if (c_) {
168    CHECK_NE(s_, 0);
169    CHECK_LE(cnt, c_);
170  } else {
171    s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
172  }
173  n_ = cnt;
174  internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
175}
176
177void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
178  Reset();
179  n_ = thr->shadow_stack_pos - thr->shadow_stack;
180  if (n_ + !!toppc == 0)
181    return;
182  if (c_) {
183    CHECK_NE(s_, 0);
184    CHECK_LE(n_ + !!toppc, c_);
185  } else {
186    s_ = (uptr*)internal_alloc(MBlockStackTrace,
187                               (n_ + !!toppc) * sizeof(s_[0]));
188  }
189  for (uptr i = 0; i < n_; i++)
190    s_[i] = thr->shadow_stack[i];
191  if (toppc) {
192    s_[n_] = toppc;
193    n_++;
194  }
195}
196
197void StackTrace::CopyFrom(const StackTrace& other) {
198  Reset();
199  Init(other.Begin(), other.Size());
200}
201
202bool StackTrace::IsEmpty() const {
203  return n_ == 0;
204}
205
206uptr StackTrace::Size() const {
207  return n_;
208}
209
210uptr StackTrace::Get(uptr i) const {
211  CHECK_LT(i, n_);
212  return s_[i];
213}
214
215const uptr *StackTrace::Begin() const {
216  return s_;
217}
218
219}  // namespace __tsan
220