tsan_sync.h revision 1.4
1//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file is a part of ThreadSanitizer (TSan), a race detector. 10// 11//===----------------------------------------------------------------------===// 12#ifndef TSAN_SYNC_H 13#define TSAN_SYNC_H 14 15#include "sanitizer_common/sanitizer_atomic.h" 16#include "sanitizer_common/sanitizer_common.h" 17#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" 18#include "tsan_defs.h" 19#include "tsan_clock.h" 20#include "tsan_dense_alloc.h" 21 22namespace __tsan { 23 24// These need to match __tsan_mutex_* flags defined in tsan_interface.h. 25// See documentation there as well. 26enum MutexFlags { 27 MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init 28 MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant 29 MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant 30 MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock 31 MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock 32 MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed 33 MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock 34 MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock 35 MutexFlagNotStatic = 1 << 8, // __tsan_mutex_not_static 36 37 // The following flags are runtime private. 38 // Mutex API misuse was detected, so don't report any more. 39 MutexFlagBroken = 1 << 30, 40 // We did not intercept pre lock event, so handle it on post lock. 41 MutexFlagDoPreLockOnPostLock = 1 << 29, 42 // Must list all mutex creation flags. 43 MutexCreationFlagMask = MutexFlagLinkerInit | 44 MutexFlagWriteReentrant | 45 MutexFlagReadReentrant | 46 MutexFlagNotStatic, 47}; 48 49// SyncVar is a descriptor of a user synchronization object 50// (mutex or an atomic variable). 51struct SyncVar { 52 SyncVar(); 53 54 uptr addr; // overwritten by DenseSlabAlloc freelist 55 Mutex mtx; 56 u64 uid; // Globally unique id. 57 StackID creation_stack_id; 58 Tid owner_tid; // Set only by exclusive owners. 59 u64 last_lock; 60 int recursion; 61 atomic_uint32_t flags; 62 u32 next; // in MetaMap 63 DDMutex dd; 64 SyncClock read_clock; // Used for rw mutexes only. 65 // The clock is placed last, so that it is situated on a different cache line 66 // with the mtx. This reduces contention for hot sync objects. 67 SyncClock clock; 68 69 void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, bool save_stack); 70 void Reset(Processor *proc); 71 72 u64 GetId() const { 73 // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits. 74 return GetLsb((u64)addr | (uid << 48), 60); 75 } 76 bool CheckId(u64 uid) const { 77 CHECK_EQ(uid, GetLsb(uid, 14)); 78 return GetLsb(this->uid, 14) == uid; 79 } 80 static uptr SplitId(u64 id, u64 *uid) { 81 *uid = id >> 48; 82 return (uptr)GetLsb(id, 48); 83 } 84 85 bool IsFlagSet(u32 f) const { 86 return atomic_load_relaxed(&flags) & f; 87 } 88 89 void SetFlags(u32 f) { 90 atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f); 91 } 92 93 void UpdateFlags(u32 flagz) { 94 // Filter out operation flags. 95 if (!(flagz & MutexCreationFlagMask)) 96 return; 97 u32 current = atomic_load_relaxed(&flags); 98 if (current & MutexCreationFlagMask) 99 return; 100 // Note: this can be called from MutexPostReadLock which holds only read 101 // lock on the SyncVar. 102 atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask)); 103 } 104}; 105 106// MetaMap maps app addresses to heap block (MBlock) and sync var (SyncVar) 107// descriptors. It uses 1/2 direct shadow, see tsan_platform.h for the mapping. 108class MetaMap { 109 public: 110 MetaMap(); 111 112 void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); 113 uptr FreeBlock(Processor *proc, uptr p); 114 bool FreeRange(Processor *proc, uptr p, uptr sz); 115 void ResetRange(Processor *proc, uptr p, uptr sz); 116 MBlock* GetBlock(uptr p); 117 118 SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr, 119 bool save_stack) { 120 return GetSync(thr, pc, addr, true, save_stack); 121 } 122 SyncVar *GetSyncIfExists(uptr addr) { 123 return GetSync(nullptr, 0, addr, false, false); 124 } 125 126 void MoveMemory(uptr src, uptr dst, uptr sz); 127 128 void OnProcIdle(Processor *proc); 129 130 struct MemoryStats { 131 uptr mem_block; 132 uptr sync_obj; 133 }; 134 135 MemoryStats GetMemoryStats() const; 136 137 private: 138 static const u32 kFlagMask = 3u << 30; 139 static const u32 kFlagBlock = 1u << 30; 140 static const u32 kFlagSync = 2u << 30; 141 typedef DenseSlabAlloc<MBlock, 1 << 18, 1 << 12, kFlagMask> BlockAlloc; 142 typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc; 143 BlockAlloc block_alloc_; 144 SyncAlloc sync_alloc_; 145 atomic_uint64_t uid_gen_; 146 147 SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create, 148 bool save_stack); 149}; 150 151} // namespace __tsan 152 153#endif // TSAN_SYNC_H 154