tsan_sync.cc revision 276789
1238901Sandrew//===-- tsan_sync.cc ------------------------------------------------------===// 2238901Sandrew// 3238901Sandrew// The LLVM Compiler Infrastructure 4238901Sandrew// 5238901Sandrew// This file is distributed under the University of Illinois Open Source 6238901Sandrew// License. See LICENSE.TXT for details. 7238901Sandrew// 8238901Sandrew//===----------------------------------------------------------------------===// 9238901Sandrew// 10238901Sandrew// This file is a part of ThreadSanitizer (TSan), a race detector. 11238901Sandrew// 12238901Sandrew//===----------------------------------------------------------------------===// 13238901Sandrew#include "sanitizer_common/sanitizer_placement_new.h" 14238901Sandrew#include "tsan_sync.h" 15238901Sandrew#include "tsan_rtl.h" 16238901Sandrew#include "tsan_mman.h" 17238901Sandrew 18238901Sandrewnamespace __tsan { 19238901Sandrew 20276789Sdimvoid DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s); 21238901Sandrew 22276789SdimSyncVar::SyncVar() 23276789Sdim : mtx(MutexTypeSyncVar, StatMtxSyncVar) { 24276789Sdim Reset(0); 25238901Sandrew} 26238901Sandrew 27276789Sdimvoid SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { 28276789Sdim this->addr = addr; 29276789Sdim this->uid = uid; 30276789Sdim this->next = 0; 31276789Sdim 32276789Sdim creation_stack_id = 0; 33276789Sdim if (kCppMode) // Go does not use them 34276789Sdim creation_stack_id = CurrentStackId(thr, pc); 35276789Sdim if (common_flags()->detect_deadlocks) 36276789Sdim DDMutexInit(thr, pc, this); 37238901Sandrew} 38238901Sandrew 39276789Sdimvoid SyncVar::Reset(ThreadState *thr) { 40276789Sdim uid = 0; 41276789Sdim creation_stack_id = 0; 42276789Sdim owner_tid = kInvalidTid; 43276789Sdim last_lock = 0; 44276789Sdim recursion = 0; 45276789Sdim is_rw = 0; 46276789Sdim is_recursive = 0; 47276789Sdim is_broken = 0; 48276789Sdim is_linker_init = 0; 49276789Sdim 50276789Sdim if (thr == 0) { 51276789Sdim CHECK_EQ(clock.size(), 0); 52276789Sdim CHECK_EQ(read_clock.size(), 0); 53276789Sdim } else { 54276789Sdim clock.Reset(&thr->clock_cache); 55276789Sdim read_clock.Reset(&thr->clock_cache); 56238901Sandrew } 57238901Sandrew} 58238901Sandrew 59276789SdimMetaMap::MetaMap() { 60276789Sdim atomic_store(&uid_gen_, 0, memory_order_relaxed); 61245614Sandrew} 62245614Sandrew 63276789Sdimvoid MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { 64276789Sdim u32 idx = block_alloc_.Alloc(&thr->block_cache); 65276789Sdim MBlock *b = block_alloc_.Map(idx); 66276789Sdim b->siz = sz; 67276789Sdim b->tid = thr->tid; 68276789Sdim b->stk = CurrentStackId(thr, pc); 69276789Sdim u32 *meta = MemToMeta(p); 70276789Sdim DCHECK_EQ(*meta, 0); 71276789Sdim *meta = idx | kFlagBlock; 72245614Sandrew} 73245614Sandrew 74276789Sdimuptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { 75276789Sdim MBlock* b = GetBlock(p); 76276789Sdim if (b == 0) 77276789Sdim return 0; 78276789Sdim uptr sz = RoundUpTo(b->siz, kMetaShadowCell); 79276789Sdim FreeRange(thr, pc, p, sz); 80276789Sdim return sz; 81245614Sandrew} 82245614Sandrew 83276789Sdimvoid MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { 84276789Sdim u32 *meta = MemToMeta(p); 85276789Sdim u32 *end = MemToMeta(p + sz); 86276789Sdim if (end == meta) 87276789Sdim end++; 88276789Sdim for (; meta < end; meta++) { 89276789Sdim u32 idx = *meta; 90276789Sdim *meta = 0; 91276789Sdim for (;;) { 92276789Sdim if (idx == 0) 93245614Sandrew break; 94276789Sdim if (idx & kFlagBlock) { 95276789Sdim block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask); 96276789Sdim break; 97276789Sdim } else if (idx & kFlagSync) { 98276789Sdim DCHECK(idx & kFlagSync); 99276789Sdim SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); 100276789Sdim u32 next = s->next; 101276789Sdim s->Reset(thr); 102276789Sdim sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask); 103276789Sdim idx = next; 104276789Sdim } else { 105276789Sdim CHECK(0); 106238901Sandrew } 107238901Sandrew } 108238901Sandrew } 109238901Sandrew} 110238901Sandrew 111276789SdimMBlock* MetaMap::GetBlock(uptr p) { 112276789Sdim u32 *meta = MemToMeta(p); 113276789Sdim u32 idx = *meta; 114276789Sdim for (;;) { 115276789Sdim if (idx == 0) 116276789Sdim return 0; 117276789Sdim if (idx & kFlagBlock) 118276789Sdim return block_alloc_.Map(idx & ~kFlagMask); 119276789Sdim DCHECK(idx & kFlagSync); 120276789Sdim SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); 121276789Sdim idx = s->next; 122245614Sandrew } 123238901Sandrew} 124238901Sandrew 125276789SdimSyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, 126276789Sdim uptr addr, bool write_lock) { 127276789Sdim return GetAndLock(thr, pc, addr, write_lock, true); 128238901Sandrew} 129238901Sandrew 130276789SdimSyncVar* MetaMap::GetIfExistsAndLock(uptr addr) { 131276789Sdim return GetAndLock(0, 0, addr, true, false); 132238901Sandrew} 133238901Sandrew 134276789SdimSyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, 135276789Sdim uptr addr, bool write_lock, bool create) { 136276789Sdim u32 *meta = MemToMeta(addr); 137276789Sdim u32 idx0 = *meta; 138276789Sdim u32 myidx = 0; 139276789Sdim SyncVar *mys = 0; 140276789Sdim for (;;) { 141276789Sdim u32 idx = idx0; 142276789Sdim for (;;) { 143276789Sdim if (idx == 0) 144276789Sdim break; 145276789Sdim if (idx & kFlagBlock) 146276789Sdim break; 147276789Sdim DCHECK(idx & kFlagSync); 148276789Sdim SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); 149276789Sdim if (s->addr == addr) { 150276789Sdim if (myidx != 0) { 151276789Sdim mys->Reset(thr); 152276789Sdim sync_alloc_.Free(&thr->sync_cache, myidx); 153276789Sdim } 154276789Sdim if (write_lock) 155276789Sdim s->mtx.Lock(); 156276789Sdim else 157276789Sdim s->mtx.ReadLock(); 158276789Sdim return s; 159276789Sdim } 160276789Sdim idx = s->next; 161276789Sdim } 162276789Sdim if (!create) 163276789Sdim return 0; 164276789Sdim if (*meta != idx0) { 165276789Sdim idx0 = *meta; 166276789Sdim continue; 167276789Sdim } 168238901Sandrew 169276789Sdim if (myidx == 0) { 170276789Sdim const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); 171276789Sdim myidx = sync_alloc_.Alloc(&thr->sync_cache); 172276789Sdim mys = sync_alloc_.Map(myidx); 173276789Sdim mys->Init(thr, pc, addr, uid); 174276789Sdim } 175276789Sdim mys->next = idx0; 176276789Sdim if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, 177276789Sdim myidx | kFlagSync, memory_order_release)) { 178276789Sdim if (write_lock) 179276789Sdim mys->mtx.Lock(); 180276789Sdim else 181276789Sdim mys->mtx.ReadLock(); 182276789Sdim return mys; 183276789Sdim } 184238901Sandrew } 185238901Sandrew} 186238901Sandrew 187276789Sdimvoid MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { 188276789Sdim // src and dst can overlap, 189276789Sdim // there are no concurrent accesses to the regions (e.g. stop-the-world). 190276789Sdim CHECK_NE(src, dst); 191276789Sdim CHECK_NE(sz, 0); 192276789Sdim uptr diff = dst - src; 193276789Sdim u32 *src_meta = MemToMeta(src); 194276789Sdim u32 *dst_meta = MemToMeta(dst); 195276789Sdim u32 *src_meta_end = MemToMeta(src + sz); 196276789Sdim uptr inc = 1; 197276789Sdim if (dst > src) { 198276789Sdim src_meta = MemToMeta(src + sz) - 1; 199276789Sdim dst_meta = MemToMeta(dst + sz) - 1; 200276789Sdim src_meta_end = MemToMeta(src) - 1; 201276789Sdim inc = -1; 202238901Sandrew } 203276789Sdim for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) { 204276789Sdim CHECK_EQ(*dst_meta, 0); 205276789Sdim u32 idx = *src_meta; 206276789Sdim *src_meta = 0; 207276789Sdim *dst_meta = idx; 208276789Sdim // Patch the addresses in sync objects. 209276789Sdim while (idx != 0) { 210276789Sdim if (idx & kFlagBlock) 211276789Sdim break; 212276789Sdim CHECK(idx & kFlagSync); 213276789Sdim SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); 214276789Sdim s->addr += diff; 215276789Sdim idx = s->next; 216245614Sandrew } 217238901Sandrew } 218238901Sandrew} 219238901Sandrew 220276789Sdimvoid MetaMap::OnThreadIdle(ThreadState *thr) { 221276789Sdim block_alloc_.FlushCache(&thr->block_cache); 222276789Sdim sync_alloc_.FlushCache(&thr->sync_cache); 223238901Sandrew} 224238901Sandrew 225238901Sandrew} // namespace __tsan 226