1353944Sdim//===-- tsan_rtl_report.cpp -----------------------------------------------===// 2353944Sdim// 3353944Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4353944Sdim// See https://llvm.org/LICENSE.txt for license information. 5353944Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6353944Sdim// 7353944Sdim//===----------------------------------------------------------------------===// 8353944Sdim// 9353944Sdim// This file is a part of ThreadSanitizer (TSan), a race detector. 10353944Sdim// 11353944Sdim//===----------------------------------------------------------------------===// 12353944Sdim 13353944Sdim#include "sanitizer_common/sanitizer_libc.h" 14353944Sdim#include "sanitizer_common/sanitizer_placement_new.h" 15353944Sdim#include "sanitizer_common/sanitizer_stackdepot.h" 16353944Sdim#include "sanitizer_common/sanitizer_common.h" 17353944Sdim#include "sanitizer_common/sanitizer_stacktrace.h" 18353944Sdim#include "tsan_platform.h" 19353944Sdim#include "tsan_rtl.h" 20353944Sdim#include "tsan_suppressions.h" 21353944Sdim#include "tsan_symbolize.h" 22353944Sdim#include "tsan_report.h" 23353944Sdim#include "tsan_sync.h" 24353944Sdim#include "tsan_mman.h" 25353944Sdim#include "tsan_flags.h" 26353944Sdim#include "tsan_fd.h" 27353944Sdim 28353944Sdimnamespace __tsan { 29353944Sdim 30353944Sdimusing namespace __sanitizer; 31353944Sdim 32353944Sdimstatic ReportStack *SymbolizeStack(StackTrace trace); 33353944Sdim 34353944Sdimvoid TsanCheckFailed(const char *file, int line, const char *cond, 35353944Sdim u64 v1, u64 v2) { 36353944Sdim // There is high probability that interceptors will check-fail as well, 37353944Sdim // on the other hand there is no sense in processing interceptors 38353944Sdim // since we are going to die soon. 39353944Sdim ScopedIgnoreInterceptors ignore; 40353944Sdim#if !SANITIZER_GO 41353944Sdim cur_thread()->ignore_sync++; 42353944Sdim cur_thread()->ignore_reads_and_writes++; 43353944Sdim#endif 44353944Sdim Printf("FATAL: ThreadSanitizer CHECK failed: " 45353944Sdim "%s:%d \"%s\" (0x%zx, 0x%zx)\n", 46353944Sdim file, line, cond, (uptr)v1, (uptr)v2); 47353944Sdim PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 48353944Sdim Die(); 49353944Sdim} 50353944Sdim 51353944Sdim// Can be overriden by an application/test to intercept reports. 52353944Sdim#ifdef TSAN_EXTERNAL_HOOKS 53353944Sdimbool OnReport(const ReportDesc *rep, bool suppressed); 54353944Sdim#else 55353944SdimSANITIZER_WEAK_CXX_DEFAULT_IMPL 56353944Sdimbool OnReport(const ReportDesc *rep, bool suppressed) { 57353944Sdim (void)rep; 58353944Sdim return suppressed; 59353944Sdim} 60353944Sdim#endif 61353944Sdim 62353944SdimSANITIZER_WEAK_DEFAULT_IMPL 63353944Sdimvoid __tsan_on_report(const ReportDesc *rep) { 64353944Sdim (void)rep; 65353944Sdim} 66353944Sdim 67353944Sdimstatic void StackStripMain(SymbolizedStack *frames) { 68353944Sdim SymbolizedStack *last_frame = nullptr; 69353944Sdim SymbolizedStack *last_frame2 = nullptr; 70353944Sdim for (SymbolizedStack *cur = frames; cur; cur = cur->next) { 71353944Sdim last_frame2 = last_frame; 72353944Sdim last_frame = cur; 73353944Sdim } 74353944Sdim 75353944Sdim if (last_frame2 == 0) 76353944Sdim return; 77353944Sdim#if !SANITIZER_GO 78353944Sdim const char *last = last_frame->info.function; 79353944Sdim const char *last2 = last_frame2->info.function; 80353944Sdim // Strip frame above 'main' 81353944Sdim if (last2 && 0 == internal_strcmp(last2, "main")) { 82353944Sdim last_frame->ClearAll(); 83353944Sdim last_frame2->next = nullptr; 84353944Sdim // Strip our internal thread start routine. 85353944Sdim } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 86353944Sdim last_frame->ClearAll(); 87353944Sdim last_frame2->next = nullptr; 88353944Sdim // Strip global ctors init. 89353944Sdim } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { 90353944Sdim last_frame->ClearAll(); 91353944Sdim last_frame2->next = nullptr; 92353944Sdim // If both are 0, then we probably just failed to symbolize. 93353944Sdim } else if (last || last2) { 94353944Sdim // Ensure that we recovered stack completely. Trimmed stack 95353944Sdim // can actually happen if we do not instrument some code, 96353944Sdim // so it's only a debug print. However we must try hard to not miss it 97353944Sdim // due to our fault. 98353944Sdim DPrintf("Bottom stack frame is missed\n"); 99353944Sdim } 100353944Sdim#else 101353944Sdim // The last frame always point into runtime (gosched0, goexit0, runtime.main). 102353944Sdim last_frame->ClearAll(); 103353944Sdim last_frame2->next = nullptr; 104353944Sdim#endif 105353944Sdim} 106353944Sdim 107353944SdimReportStack *SymbolizeStackId(u32 stack_id) { 108353944Sdim if (stack_id == 0) 109353944Sdim return 0; 110353944Sdim StackTrace stack = StackDepotGet(stack_id); 111353944Sdim if (stack.trace == nullptr) 112353944Sdim return nullptr; 113353944Sdim return SymbolizeStack(stack); 114353944Sdim} 115353944Sdim 116353944Sdimstatic ReportStack *SymbolizeStack(StackTrace trace) { 117353944Sdim if (trace.size == 0) 118353944Sdim return 0; 119353944Sdim SymbolizedStack *top = nullptr; 120353944Sdim for (uptr si = 0; si < trace.size; si++) { 121353944Sdim const uptr pc = trace.trace[si]; 122353944Sdim uptr pc1 = pc; 123353944Sdim // We obtain the return address, but we're interested in the previous 124353944Sdim // instruction. 125353944Sdim if ((pc & kExternalPCBit) == 0) 126353944Sdim pc1 = StackTrace::GetPreviousInstructionPc(pc); 127353944Sdim SymbolizedStack *ent = SymbolizeCode(pc1); 128353944Sdim CHECK_NE(ent, 0); 129353944Sdim SymbolizedStack *last = ent; 130353944Sdim while (last->next) { 131353944Sdim last->info.address = pc; // restore original pc for report 132353944Sdim last = last->next; 133353944Sdim } 134353944Sdim last->info.address = pc; // restore original pc for report 135353944Sdim last->next = top; 136353944Sdim top = ent; 137353944Sdim } 138353944Sdim StackStripMain(top); 139353944Sdim 140353944Sdim ReportStack *stack = ReportStack::New(); 141353944Sdim stack->frames = top; 142353944Sdim return stack; 143353944Sdim} 144353944Sdim 145353944SdimScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { 146353944Sdim ctx->thread_registry->CheckLocked(); 147353944Sdim void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); 148353944Sdim rep_ = new(mem) ReportDesc; 149353944Sdim rep_->typ = typ; 150353944Sdim rep_->tag = tag; 151353944Sdim ctx->report_mtx.Lock(); 152353944Sdim} 153353944Sdim 154353944SdimScopedReportBase::~ScopedReportBase() { 155353944Sdim ctx->report_mtx.Unlock(); 156353944Sdim DestroyAndFree(rep_); 157353944Sdim rep_ = nullptr; 158353944Sdim} 159353944Sdim 160353944Sdimvoid ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { 161353944Sdim ReportStack **rs = rep_->stacks.PushBack(); 162353944Sdim *rs = SymbolizeStack(stack); 163353944Sdim (*rs)->suppressable = suppressable; 164353944Sdim} 165353944Sdim 166353944Sdimvoid ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, 167353944Sdim StackTrace stack, const MutexSet *mset) { 168353944Sdim void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); 169353944Sdim ReportMop *mop = new(mem) ReportMop; 170353944Sdim rep_->mops.PushBack(mop); 171353944Sdim mop->tid = s.tid(); 172353944Sdim mop->addr = addr + s.addr0(); 173353944Sdim mop->size = s.size(); 174353944Sdim mop->write = s.IsWrite(); 175353944Sdim mop->atomic = s.IsAtomic(); 176353944Sdim mop->stack = SymbolizeStack(stack); 177353944Sdim mop->external_tag = external_tag; 178353944Sdim if (mop->stack) 179353944Sdim mop->stack->suppressable = true; 180353944Sdim for (uptr i = 0; i < mset->Size(); i++) { 181353944Sdim MutexSet::Desc d = mset->Get(i); 182353944Sdim u64 mid = this->AddMutex(d.id); 183353944Sdim ReportMopMutex mtx = {mid, d.write}; 184353944Sdim mop->mset.PushBack(mtx); 185353944Sdim } 186353944Sdim} 187353944Sdim 188353944Sdimvoid ScopedReportBase::AddUniqueTid(int unique_tid) { 189353944Sdim rep_->unique_tids.PushBack(unique_tid); 190353944Sdim} 191353944Sdim 192353944Sdimvoid ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { 193353944Sdim for (uptr i = 0; i < rep_->threads.Size(); i++) { 194353944Sdim if ((u32)rep_->threads[i]->id == tctx->tid) 195353944Sdim return; 196353944Sdim } 197353944Sdim void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); 198353944Sdim ReportThread *rt = new(mem) ReportThread; 199353944Sdim rep_->threads.PushBack(rt); 200353944Sdim rt->id = tctx->tid; 201353944Sdim rt->os_id = tctx->os_id; 202353944Sdim rt->running = (tctx->status == ThreadStatusRunning); 203353944Sdim rt->name = internal_strdup(tctx->name); 204353944Sdim rt->parent_tid = tctx->parent_tid; 205353944Sdim rt->thread_type = tctx->thread_type; 206353944Sdim rt->stack = 0; 207353944Sdim rt->stack = SymbolizeStackId(tctx->creation_stack_id); 208353944Sdim if (rt->stack) 209353944Sdim rt->stack->suppressable = suppressable; 210353944Sdim} 211353944Sdim 212353944Sdim#if !SANITIZER_GO 213353944Sdimstatic bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { 214353944Sdim int unique_id = *(int *)arg; 215353944Sdim return tctx->unique_id == (u32)unique_id; 216353944Sdim} 217353944Sdim 218353944Sdimstatic ThreadContext *FindThreadByUidLocked(int unique_id) { 219353944Sdim ctx->thread_registry->CheckLocked(); 220353944Sdim return static_cast<ThreadContext *>( 221353944Sdim ctx->thread_registry->FindThreadContextLocked( 222353944Sdim FindThreadByUidLockedCallback, &unique_id)); 223353944Sdim} 224353944Sdim 225353944Sdimstatic ThreadContext *FindThreadByTidLocked(int tid) { 226353944Sdim ctx->thread_registry->CheckLocked(); 227353944Sdim return static_cast<ThreadContext*>( 228353944Sdim ctx->thread_registry->GetThreadLocked(tid)); 229353944Sdim} 230353944Sdim 231353944Sdimstatic bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { 232353944Sdim uptr addr = (uptr)arg; 233353944Sdim ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 234353944Sdim if (tctx->status != ThreadStatusRunning) 235353944Sdim return false; 236353944Sdim ThreadState *thr = tctx->thr; 237353944Sdim CHECK(thr); 238353944Sdim return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || 239353944Sdim (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); 240353944Sdim} 241353944Sdim 242353944SdimThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 243353944Sdim ctx->thread_registry->CheckLocked(); 244353944Sdim ThreadContext *tctx = static_cast<ThreadContext*>( 245353944Sdim ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, 246353944Sdim (void*)addr)); 247353944Sdim if (!tctx) 248353944Sdim return 0; 249353944Sdim ThreadState *thr = tctx->thr; 250353944Sdim CHECK(thr); 251353944Sdim *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); 252353944Sdim return tctx; 253353944Sdim} 254353944Sdim#endif 255353944Sdim 256353944Sdimvoid ScopedReportBase::AddThread(int unique_tid, bool suppressable) { 257353944Sdim#if !SANITIZER_GO 258353944Sdim if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) 259353944Sdim AddThread(tctx, suppressable); 260353944Sdim#endif 261353944Sdim} 262353944Sdim 263353944Sdimvoid ScopedReportBase::AddMutex(const SyncVar *s) { 264353944Sdim for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 265353944Sdim if (rep_->mutexes[i]->id == s->uid) 266353944Sdim return; 267353944Sdim } 268353944Sdim void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 269353944Sdim ReportMutex *rm = new(mem) ReportMutex; 270353944Sdim rep_->mutexes.PushBack(rm); 271353944Sdim rm->id = s->uid; 272353944Sdim rm->addr = s->addr; 273353944Sdim rm->destroyed = false; 274353944Sdim rm->stack = SymbolizeStackId(s->creation_stack_id); 275353944Sdim} 276353944Sdim 277353944Sdimu64 ScopedReportBase::AddMutex(u64 id) { 278353944Sdim u64 uid = 0; 279353944Sdim u64 mid = id; 280353944Sdim uptr addr = SyncVar::SplitId(id, &uid); 281353944Sdim SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); 282353944Sdim // Check that the mutex is still alive. 283353944Sdim // Another mutex can be created at the same address, 284353944Sdim // so check uid as well. 285353944Sdim if (s && s->CheckId(uid)) { 286353944Sdim mid = s->uid; 287353944Sdim AddMutex(s); 288353944Sdim } else { 289353944Sdim AddDeadMutex(id); 290353944Sdim } 291353944Sdim if (s) 292353944Sdim s->mtx.Unlock(); 293353944Sdim return mid; 294353944Sdim} 295353944Sdim 296353944Sdimvoid ScopedReportBase::AddDeadMutex(u64 id) { 297353944Sdim for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 298353944Sdim if (rep_->mutexes[i]->id == id) 299353944Sdim return; 300353944Sdim } 301353944Sdim void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 302353944Sdim ReportMutex *rm = new(mem) ReportMutex; 303353944Sdim rep_->mutexes.PushBack(rm); 304353944Sdim rm->id = id; 305353944Sdim rm->addr = 0; 306353944Sdim rm->destroyed = true; 307353944Sdim rm->stack = 0; 308353944Sdim} 309353944Sdim 310353944Sdimvoid ScopedReportBase::AddLocation(uptr addr, uptr size) { 311353944Sdim if (addr == 0) 312353944Sdim return; 313353944Sdim#if !SANITIZER_GO 314353944Sdim int fd = -1; 315353944Sdim int creat_tid = kInvalidTid; 316353944Sdim u32 creat_stack = 0; 317353944Sdim if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { 318353944Sdim ReportLocation *loc = ReportLocation::New(ReportLocationFD); 319353944Sdim loc->fd = fd; 320353944Sdim loc->tid = creat_tid; 321353944Sdim loc->stack = SymbolizeStackId(creat_stack); 322353944Sdim rep_->locs.PushBack(loc); 323353944Sdim ThreadContext *tctx = FindThreadByUidLocked(creat_tid); 324353944Sdim if (tctx) 325353944Sdim AddThread(tctx); 326353944Sdim return; 327353944Sdim } 328353944Sdim MBlock *b = 0; 329353944Sdim Allocator *a = allocator(); 330353944Sdim if (a->PointerIsMine((void*)addr)) { 331353944Sdim void *block_begin = a->GetBlockBegin((void*)addr); 332353944Sdim if (block_begin) 333353944Sdim b = ctx->metamap.GetBlock((uptr)block_begin); 334353944Sdim } 335353944Sdim if (b != 0) { 336353944Sdim ThreadContext *tctx = FindThreadByTidLocked(b->tid); 337353944Sdim ReportLocation *loc = ReportLocation::New(ReportLocationHeap); 338353944Sdim loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); 339353944Sdim loc->heap_chunk_size = b->siz; 340353944Sdim loc->external_tag = b->tag; 341353944Sdim loc->tid = tctx ? tctx->tid : b->tid; 342353944Sdim loc->stack = SymbolizeStackId(b->stk); 343353944Sdim rep_->locs.PushBack(loc); 344353944Sdim if (tctx) 345353944Sdim AddThread(tctx); 346353944Sdim return; 347353944Sdim } 348353944Sdim bool is_stack = false; 349353944Sdim if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 350353944Sdim ReportLocation *loc = 351353944Sdim ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); 352353944Sdim loc->tid = tctx->tid; 353353944Sdim rep_->locs.PushBack(loc); 354353944Sdim AddThread(tctx); 355353944Sdim } 356353944Sdim#endif 357353944Sdim if (ReportLocation *loc = SymbolizeData(addr)) { 358353944Sdim loc->suppressable = true; 359353944Sdim rep_->locs.PushBack(loc); 360353944Sdim return; 361353944Sdim } 362353944Sdim} 363353944Sdim 364353944Sdim#if !SANITIZER_GO 365353944Sdimvoid ScopedReportBase::AddSleep(u32 stack_id) { 366353944Sdim rep_->sleep = SymbolizeStackId(stack_id); 367353944Sdim} 368353944Sdim#endif 369353944Sdim 370353944Sdimvoid ScopedReportBase::SetCount(int count) { rep_->count = count; } 371353944Sdim 372353944Sdimconst ReportDesc *ScopedReportBase::GetReport() const { return rep_; } 373353944Sdim 374353944SdimScopedReport::ScopedReport(ReportType typ, uptr tag) 375353944Sdim : ScopedReportBase(typ, tag) {} 376353944Sdim 377353944SdimScopedReport::~ScopedReport() {} 378353944Sdim 379353944Sdimvoid RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, 380353944Sdim MutexSet *mset, uptr *tag) { 381353944Sdim // This function restores stack trace and mutex set for the thread/epoch. 382353944Sdim // It does so by getting stack trace and mutex set at the beginning of 383353944Sdim // trace part, and then replaying the trace till the given epoch. 384353944Sdim Trace* trace = ThreadTrace(tid); 385353944Sdim ReadLock l(&trace->mtx); 386353944Sdim const int partidx = (epoch / kTracePartSize) % TraceParts(); 387353944Sdim TraceHeader* hdr = &trace->headers[partidx]; 388353944Sdim if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) 389353944Sdim return; 390353944Sdim CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); 391353944Sdim const u64 epoch0 = RoundDown(epoch, TraceSize()); 392353944Sdim const u64 eend = epoch % TraceSize(); 393353944Sdim const u64 ebegin = RoundDown(eend, kTracePartSize); 394353944Sdim DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", 395353944Sdim tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); 396353944Sdim Vector<uptr> stack; 397353944Sdim stack.Resize(hdr->stack0.size + 64); 398353944Sdim for (uptr i = 0; i < hdr->stack0.size; i++) { 399353944Sdim stack[i] = hdr->stack0.trace[i]; 400353944Sdim DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); 401353944Sdim } 402353944Sdim if (mset) 403353944Sdim *mset = hdr->mset0; 404353944Sdim uptr pos = hdr->stack0.size; 405353944Sdim Event *events = (Event*)GetThreadTrace(tid); 406353944Sdim for (uptr i = ebegin; i <= eend; i++) { 407353944Sdim Event ev = events[i]; 408353944Sdim EventType typ = (EventType)(ev >> kEventPCBits); 409353944Sdim uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1)); 410353944Sdim DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); 411353944Sdim if (typ == EventTypeMop) { 412353944Sdim stack[pos] = pc; 413353944Sdim } else if (typ == EventTypeFuncEnter) { 414353944Sdim if (stack.Size() < pos + 2) 415353944Sdim stack.Resize(pos + 2); 416353944Sdim stack[pos++] = pc; 417353944Sdim } else if (typ == EventTypeFuncExit) { 418353944Sdim if (pos > 0) 419353944Sdim pos--; 420353944Sdim } 421353944Sdim if (mset) { 422353944Sdim if (typ == EventTypeLock) { 423353944Sdim mset->Add(pc, true, epoch0 + i); 424353944Sdim } else if (typ == EventTypeUnlock) { 425353944Sdim mset->Del(pc, true); 426353944Sdim } else if (typ == EventTypeRLock) { 427353944Sdim mset->Add(pc, false, epoch0 + i); 428353944Sdim } else if (typ == EventTypeRUnlock) { 429353944Sdim mset->Del(pc, false); 430353944Sdim } 431353944Sdim } 432353944Sdim for (uptr j = 0; j <= pos; j++) 433353944Sdim DPrintf2(" #%zu: %zx\n", j, stack[j]); 434353944Sdim } 435353944Sdim if (pos == 0 && stack[0] == 0) 436353944Sdim return; 437353944Sdim pos++; 438353944Sdim stk->Init(&stack[0], pos); 439353944Sdim ExtractTagFromStack(stk, tag); 440353944Sdim} 441353944Sdim 442353944Sdimstatic bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], 443353944Sdim uptr addr_min, uptr addr_max) { 444353944Sdim bool equal_stack = false; 445353944Sdim RacyStacks hash; 446353944Sdim bool equal_address = false; 447353944Sdim RacyAddress ra0 = {addr_min, addr_max}; 448353944Sdim { 449353944Sdim ReadLock lock(&ctx->racy_mtx); 450353944Sdim if (flags()->suppress_equal_stacks) { 451353944Sdim hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); 452353944Sdim hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); 453353944Sdim for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 454353944Sdim if (hash == ctx->racy_stacks[i]) { 455353944Sdim VPrintf(2, 456353944Sdim "ThreadSanitizer: suppressing report as doubled (stack)\n"); 457353944Sdim equal_stack = true; 458353944Sdim break; 459353944Sdim } 460353944Sdim } 461353944Sdim } 462353944Sdim if (flags()->suppress_equal_addresses) { 463353944Sdim for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { 464353944Sdim RacyAddress ra2 = ctx->racy_addresses[i]; 465353944Sdim uptr maxbeg = max(ra0.addr_min, ra2.addr_min); 466353944Sdim uptr minend = min(ra0.addr_max, ra2.addr_max); 467353944Sdim if (maxbeg < minend) { 468353944Sdim VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); 469353944Sdim equal_address = true; 470353944Sdim break; 471353944Sdim } 472353944Sdim } 473353944Sdim } 474353944Sdim } 475353944Sdim if (!equal_stack && !equal_address) 476353944Sdim return false; 477353944Sdim if (!equal_stack) { 478353944Sdim Lock lock(&ctx->racy_mtx); 479353944Sdim ctx->racy_stacks.PushBack(hash); 480353944Sdim } 481353944Sdim if (!equal_address) { 482353944Sdim Lock lock(&ctx->racy_mtx); 483353944Sdim ctx->racy_addresses.PushBack(ra0); 484353944Sdim } 485353944Sdim return true; 486353944Sdim} 487353944Sdim 488353944Sdimstatic void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], 489353944Sdim uptr addr_min, uptr addr_max) { 490353944Sdim Lock lock(&ctx->racy_mtx); 491353944Sdim if (flags()->suppress_equal_stacks) { 492353944Sdim RacyStacks hash; 493353944Sdim hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); 494353944Sdim hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); 495353944Sdim ctx->racy_stacks.PushBack(hash); 496353944Sdim } 497353944Sdim if (flags()->suppress_equal_addresses) { 498353944Sdim RacyAddress ra0 = {addr_min, addr_max}; 499353944Sdim ctx->racy_addresses.PushBack(ra0); 500353944Sdim } 501353944Sdim} 502353944Sdim 503353944Sdimbool OutputReport(ThreadState *thr, const ScopedReport &srep) { 504353944Sdim if (!flags()->report_bugs || thr->suppress_reports) 505353944Sdim return false; 506353944Sdim atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); 507353944Sdim const ReportDesc *rep = srep.GetReport(); 508353944Sdim CHECK_EQ(thr->current_report, nullptr); 509353944Sdim thr->current_report = rep; 510353944Sdim Suppression *supp = 0; 511353944Sdim uptr pc_or_addr = 0; 512353944Sdim for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) 513353944Sdim pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); 514353944Sdim for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) 515353944Sdim pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); 516353944Sdim for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) 517353944Sdim pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); 518353944Sdim for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) 519353944Sdim pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); 520353944Sdim if (pc_or_addr != 0) { 521353944Sdim Lock lock(&ctx->fired_suppressions_mtx); 522353944Sdim FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; 523353944Sdim ctx->fired_suppressions.push_back(s); 524353944Sdim } 525353944Sdim { 526353944Sdim bool old_is_freeing = thr->is_freeing; 527353944Sdim thr->is_freeing = false; 528353944Sdim bool suppressed = OnReport(rep, pc_or_addr != 0); 529353944Sdim thr->is_freeing = old_is_freeing; 530353944Sdim if (suppressed) { 531353944Sdim thr->current_report = nullptr; 532353944Sdim return false; 533353944Sdim } 534353944Sdim } 535353944Sdim PrintReport(rep); 536353944Sdim __tsan_on_report(rep); 537353944Sdim ctx->nreported++; 538353944Sdim if (flags()->halt_on_error) 539353944Sdim Die(); 540353944Sdim thr->current_report = nullptr; 541353944Sdim return true; 542353944Sdim} 543353944Sdim 544353944Sdimbool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { 545353944Sdim ReadLock lock(&ctx->fired_suppressions_mtx); 546353944Sdim for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 547353944Sdim if (ctx->fired_suppressions[k].type != type) 548353944Sdim continue; 549353944Sdim for (uptr j = 0; j < trace.size; j++) { 550353944Sdim FiredSuppression *s = &ctx->fired_suppressions[k]; 551353944Sdim if (trace.trace[j] == s->pc_or_addr) { 552353944Sdim if (s->supp) 553353944Sdim atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); 554353944Sdim return true; 555353944Sdim } 556353944Sdim } 557353944Sdim } 558353944Sdim return false; 559353944Sdim} 560353944Sdim 561353944Sdimstatic bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { 562353944Sdim ReadLock lock(&ctx->fired_suppressions_mtx); 563353944Sdim for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 564353944Sdim if (ctx->fired_suppressions[k].type != type) 565353944Sdim continue; 566353944Sdim FiredSuppression *s = &ctx->fired_suppressions[k]; 567353944Sdim if (addr == s->pc_or_addr) { 568353944Sdim if (s->supp) 569353944Sdim atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); 570353944Sdim return true; 571353944Sdim } 572353944Sdim } 573353944Sdim return false; 574353944Sdim} 575353944Sdim 576353944Sdimstatic bool RaceBetweenAtomicAndFree(ThreadState *thr) { 577353944Sdim Shadow s0(thr->racy_state[0]); 578353944Sdim Shadow s1(thr->racy_state[1]); 579353944Sdim CHECK(!(s0.IsAtomic() && s1.IsAtomic())); 580353944Sdim if (!s0.IsAtomic() && !s1.IsAtomic()) 581353944Sdim return true; 582353944Sdim if (s0.IsAtomic() && s1.IsFreed()) 583353944Sdim return true; 584353944Sdim if (s1.IsAtomic() && thr->is_freeing) 585353944Sdim return true; 586353944Sdim return false; 587353944Sdim} 588353944Sdim 589353944Sdimvoid ReportRace(ThreadState *thr) { 590353944Sdim CheckNoLocks(thr); 591353944Sdim 592353944Sdim // Symbolizer makes lots of intercepted calls. If we try to process them, 593353944Sdim // at best it will cause deadlocks on internal mutexes. 594353944Sdim ScopedIgnoreInterceptors ignore; 595353944Sdim 596353944Sdim if (!flags()->report_bugs) 597353944Sdim return; 598353944Sdim if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) 599353944Sdim return; 600353944Sdim 601353944Sdim bool freed = false; 602353944Sdim { 603353944Sdim Shadow s(thr->racy_state[1]); 604353944Sdim freed = s.GetFreedAndReset(); 605353944Sdim thr->racy_state[1] = s.raw(); 606353944Sdim } 607353944Sdim 608353944Sdim uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); 609353944Sdim uptr addr_min = 0; 610353944Sdim uptr addr_max = 0; 611353944Sdim { 612353944Sdim uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); 613353944Sdim uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); 614353944Sdim uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); 615353944Sdim uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); 616353944Sdim addr_min = min(a0, a1); 617353944Sdim addr_max = max(e0, e1); 618353944Sdim if (IsExpectedReport(addr_min, addr_max - addr_min)) 619353944Sdim return; 620353944Sdim } 621353944Sdim 622353944Sdim ReportType typ = ReportTypeRace; 623353944Sdim if (thr->is_vptr_access && freed) 624353944Sdim typ = ReportTypeVptrUseAfterFree; 625353944Sdim else if (thr->is_vptr_access) 626353944Sdim typ = ReportTypeVptrRace; 627353944Sdim else if (freed) 628353944Sdim typ = ReportTypeUseAfterFree; 629353944Sdim 630353944Sdim if (IsFiredSuppression(ctx, typ, addr)) 631353944Sdim return; 632353944Sdim 633353944Sdim const uptr kMop = 2; 634353944Sdim VarSizeStackTrace traces[kMop]; 635353944Sdim uptr tags[kMop] = {kExternalTagNone}; 636353944Sdim uptr toppc = TraceTopPC(thr); 637353944Sdim if (toppc >> kEventPCBits) { 638353944Sdim // This is a work-around for a known issue. 639353944Sdim // The scenario where this happens is rather elaborate and requires 640353944Sdim // an instrumented __sanitizer_report_error_summary callback and 641353944Sdim // a __tsan_symbolize_external callback and a race during a range memory 642353944Sdim // access larger than 8 bytes. MemoryAccessRange adds the current PC to 643353944Sdim // the trace and starts processing memory accesses. A first memory access 644353944Sdim // triggers a race, we report it and call the instrumented 645353944Sdim // __sanitizer_report_error_summary, which adds more stuff to the trace 646353944Sdim // since it is intrumented. Then a second memory access in MemoryAccessRange 647353944Sdim // also triggers a race and we get here and call TraceTopPC to get the 648353944Sdim // current PC, however now it contains some unrelated events from the 649353944Sdim // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit 650353944Sdim // event. Later we subtract -1 from it (in GetPreviousInstructionPc) 651353944Sdim // and the resulting PC has kExternalPCBit set, so we pass it to 652353944Sdim // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its 653353944Sdim // rights to crash since the PC is completely bogus. 654353944Sdim // test/tsan/double_race.cpp contains a test case for this. 655353944Sdim toppc = 0; 656353944Sdim } 657353944Sdim ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]); 658353944Sdim if (IsFiredSuppression(ctx, typ, traces[0])) 659353944Sdim return; 660353944Sdim 661353944Sdim // MutexSet is too large to live on stack. 662353944Sdim Vector<u64> mset_buffer; 663353944Sdim mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); 664353944Sdim MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); 665353944Sdim 666353944Sdim Shadow s2(thr->racy_state[1]); 667353944Sdim RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]); 668353944Sdim if (IsFiredSuppression(ctx, typ, traces[1])) 669353944Sdim return; 670353944Sdim 671353944Sdim if (HandleRacyStacks(thr, traces, addr_min, addr_max)) 672353944Sdim return; 673353944Sdim 674353944Sdim // If any of the accesses has a tag, treat this as an "external" race. 675353944Sdim uptr tag = kExternalTagNone; 676353944Sdim for (uptr i = 0; i < kMop; i++) { 677353944Sdim if (tags[i] != kExternalTagNone) { 678353944Sdim typ = ReportTypeExternalRace; 679353944Sdim tag = tags[i]; 680353944Sdim break; 681353944Sdim } 682353944Sdim } 683353944Sdim 684353944Sdim ThreadRegistryLock l0(ctx->thread_registry); 685353944Sdim ScopedReport rep(typ, tag); 686353944Sdim for (uptr i = 0; i < kMop; i++) { 687353944Sdim Shadow s(thr->racy_state[i]); 688353944Sdim rep.AddMemoryAccess(addr, tags[i], s, traces[i], 689353944Sdim i == 0 ? &thr->mset : mset2); 690353944Sdim } 691353944Sdim 692353944Sdim for (uptr i = 0; i < kMop; i++) { 693353944Sdim FastState s(thr->racy_state[i]); 694353944Sdim ThreadContext *tctx = static_cast<ThreadContext*>( 695353944Sdim ctx->thread_registry->GetThreadLocked(s.tid())); 696353944Sdim if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) 697353944Sdim continue; 698353944Sdim rep.AddThread(tctx); 699353944Sdim } 700353944Sdim 701353944Sdim rep.AddLocation(addr_min, addr_max - addr_min); 702353944Sdim 703353944Sdim#if !SANITIZER_GO 704353944Sdim { 705353944Sdim Shadow s(thr->racy_state[1]); 706353944Sdim if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) 707353944Sdim rep.AddSleep(thr->last_sleep_stack_id); 708353944Sdim } 709353944Sdim#endif 710353944Sdim 711353944Sdim if (!OutputReport(thr, rep)) 712353944Sdim return; 713353944Sdim 714353944Sdim AddRacyStacks(thr, traces, addr_min, addr_max); 715353944Sdim} 716353944Sdim 717353944Sdimvoid PrintCurrentStack(ThreadState *thr, uptr pc) { 718353944Sdim VarSizeStackTrace trace; 719353944Sdim ObtainCurrentStack(thr, pc, &trace); 720353944Sdim PrintStack(SymbolizeStack(trace)); 721353944Sdim} 722353944Sdim 723353944Sdim// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes 724353944Sdim// __sanitizer_print_stack_trace exists in the actual unwinded stack, but 725353944Sdim// tail-call to PrintCurrentStackSlow breaks this assumption because 726353944Sdim// __sanitizer_print_stack_trace disappears after tail-call. 727353944Sdim// However, this solution is not reliable enough, please see dvyukov's comment 728353944Sdim// http://reviews.llvm.org/D19148#406208 729353944Sdim// Also see PR27280 comment 2 and 3 for breaking examples and analysis. 730353944SdimALWAYS_INLINE 731353944Sdimvoid PrintCurrentStackSlow(uptr pc) { 732353944Sdim#if !SANITIZER_GO 733353944Sdim uptr bp = GET_CURRENT_FRAME(); 734353944Sdim BufferedStackTrace *ptrace = 735353944Sdim new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) 736353944Sdim BufferedStackTrace(); 737353944Sdim ptrace->Unwind(pc, bp, nullptr, false); 738353944Sdim 739353944Sdim for (uptr i = 0; i < ptrace->size / 2; i++) { 740353944Sdim uptr tmp = ptrace->trace_buffer[i]; 741353944Sdim ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; 742353944Sdim ptrace->trace_buffer[ptrace->size - i - 1] = tmp; 743353944Sdim } 744353944Sdim PrintStack(SymbolizeStack(*ptrace)); 745353944Sdim#endif 746353944Sdim} 747353944Sdim 748353944Sdim} // namespace __tsan 749353944Sdim 750353944Sdimusing namespace __tsan; 751353944Sdim 752353944Sdimextern "C" { 753353944SdimSANITIZER_INTERFACE_ATTRIBUTE 754353944Sdimvoid __sanitizer_print_stack_trace() { 755353944Sdim PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 756353944Sdim} 757353944Sdim} // extern "C" 758