tsan_sync.cc revision 276789
1//===-- tsan_sync.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_sync.h"
15#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
22SyncVar::SyncVar()
23    : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
24  Reset(0);
25}
26
27void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28  this->addr = addr;
29  this->uid = uid;
30  this->next = 0;
31
32  creation_stack_id = 0;
33  if (kCppMode)  // Go does not use them
34    creation_stack_id = CurrentStackId(thr, pc);
35  if (common_flags()->detect_deadlocks)
36    DDMutexInit(thr, pc, this);
37}
38
39void SyncVar::Reset(ThreadState *thr) {
40  uid = 0;
41  creation_stack_id = 0;
42  owner_tid = kInvalidTid;
43  last_lock = 0;
44  recursion = 0;
45  is_rw = 0;
46  is_recursive = 0;
47  is_broken = 0;
48  is_linker_init = 0;
49
50  if (thr == 0) {
51    CHECK_EQ(clock.size(), 0);
52    CHECK_EQ(read_clock.size(), 0);
53  } else {
54    clock.Reset(&thr->clock_cache);
55    read_clock.Reset(&thr->clock_cache);
56  }
57}
58
59MetaMap::MetaMap() {
60  atomic_store(&uid_gen_, 0, memory_order_relaxed);
61}
62
63void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
64  u32 idx = block_alloc_.Alloc(&thr->block_cache);
65  MBlock *b = block_alloc_.Map(idx);
66  b->siz = sz;
67  b->tid = thr->tid;
68  b->stk = CurrentStackId(thr, pc);
69  u32 *meta = MemToMeta(p);
70  DCHECK_EQ(*meta, 0);
71  *meta = idx | kFlagBlock;
72}
73
74uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
75  MBlock* b = GetBlock(p);
76  if (b == 0)
77    return 0;
78  uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
79  FreeRange(thr, pc, p, sz);
80  return sz;
81}
82
83void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
84  u32 *meta = MemToMeta(p);
85  u32 *end = MemToMeta(p + sz);
86  if (end == meta)
87    end++;
88  for (; meta < end; meta++) {
89    u32 idx = *meta;
90    *meta = 0;
91    for (;;) {
92      if (idx == 0)
93        break;
94      if (idx & kFlagBlock) {
95        block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
96        break;
97      } else if (idx & kFlagSync) {
98        DCHECK(idx & kFlagSync);
99        SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
100        u32 next = s->next;
101        s->Reset(thr);
102        sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
103        idx = next;
104      } else {
105        CHECK(0);
106      }
107    }
108  }
109}
110
111MBlock* MetaMap::GetBlock(uptr p) {
112  u32 *meta = MemToMeta(p);
113  u32 idx = *meta;
114  for (;;) {
115    if (idx == 0)
116      return 0;
117    if (idx & kFlagBlock)
118      return block_alloc_.Map(idx & ~kFlagMask);
119    DCHECK(idx & kFlagSync);
120    SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
121    idx = s->next;
122  }
123}
124
125SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
126                              uptr addr, bool write_lock) {
127  return GetAndLock(thr, pc, addr, write_lock, true);
128}
129
130SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
131  return GetAndLock(0, 0, addr, true, false);
132}
133
134SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
135                             uptr addr, bool write_lock, bool create) {
136  u32 *meta = MemToMeta(addr);
137  u32 idx0 = *meta;
138  u32 myidx = 0;
139  SyncVar *mys = 0;
140  for (;;) {
141    u32 idx = idx0;
142    for (;;) {
143      if (idx == 0)
144        break;
145      if (idx & kFlagBlock)
146        break;
147      DCHECK(idx & kFlagSync);
148      SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
149      if (s->addr == addr) {
150        if (myidx != 0) {
151          mys->Reset(thr);
152          sync_alloc_.Free(&thr->sync_cache, myidx);
153        }
154        if (write_lock)
155          s->mtx.Lock();
156        else
157          s->mtx.ReadLock();
158        return s;
159      }
160      idx = s->next;
161    }
162    if (!create)
163      return 0;
164    if (*meta != idx0) {
165      idx0 = *meta;
166      continue;
167    }
168
169    if (myidx == 0) {
170      const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
171      myidx = sync_alloc_.Alloc(&thr->sync_cache);
172      mys = sync_alloc_.Map(myidx);
173      mys->Init(thr, pc, addr, uid);
174    }
175    mys->next = idx0;
176    if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
177        myidx | kFlagSync, memory_order_release)) {
178      if (write_lock)
179        mys->mtx.Lock();
180      else
181        mys->mtx.ReadLock();
182      return mys;
183    }
184  }
185}
186
187void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
188  // src and dst can overlap,
189  // there are no concurrent accesses to the regions (e.g. stop-the-world).
190  CHECK_NE(src, dst);
191  CHECK_NE(sz, 0);
192  uptr diff = dst - src;
193  u32 *src_meta = MemToMeta(src);
194  u32 *dst_meta = MemToMeta(dst);
195  u32 *src_meta_end = MemToMeta(src + sz);
196  uptr inc = 1;
197  if (dst > src) {
198    src_meta = MemToMeta(src + sz) - 1;
199    dst_meta = MemToMeta(dst + sz) - 1;
200    src_meta_end = MemToMeta(src) - 1;
201    inc = -1;
202  }
203  for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
204    CHECK_EQ(*dst_meta, 0);
205    u32 idx = *src_meta;
206    *src_meta = 0;
207    *dst_meta = idx;
208    // Patch the addresses in sync objects.
209    while (idx != 0) {
210      if (idx & kFlagBlock)
211        break;
212      CHECK(idx & kFlagSync);
213      SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
214      s->addr += diff;
215      idx = s->next;
216    }
217  }
218}
219
220void MetaMap::OnThreadIdle(ThreadState *thr) {
221  block_alloc_.FlushCache(&thr->block_cache);
222  sync_alloc_.FlushCache(&thr->sync_cache);
223}
224
225}  // namespace __tsan
226