1//===- ThreadSafety.cpp ---------------------------------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// A intra-procedural analysis for thread safety (e.g. deadlocks and race 10// conditions), based off of an annotation system. 11// 12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html 13// for more information. 14// 15//===----------------------------------------------------------------------===// 16 17#include "clang/Analysis/Analyses/ThreadSafety.h" 18#include "clang/AST/Attr.h" 19#include "clang/AST/Decl.h" 20#include "clang/AST/DeclCXX.h" 21#include "clang/AST/DeclGroup.h" 22#include "clang/AST/Expr.h" 23#include "clang/AST/ExprCXX.h" 24#include "clang/AST/OperationKinds.h" 25#include "clang/AST/Stmt.h" 26#include "clang/AST/StmtVisitor.h" 27#include "clang/AST/Type.h" 28#include "clang/Analysis/Analyses/PostOrderCFGView.h" 29#include "clang/Analysis/Analyses/ThreadSafetyCommon.h" 30#include "clang/Analysis/Analyses/ThreadSafetyTIL.h" 31#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" 32#include "clang/Analysis/Analyses/ThreadSafetyUtil.h" 33#include "clang/Analysis/AnalysisDeclContext.h" 34#include "clang/Analysis/CFG.h" 35#include "clang/Basic/Builtins.h" 36#include "clang/Basic/LLVM.h" 37#include "clang/Basic/OperatorKinds.h" 38#include "clang/Basic/SourceLocation.h" 39#include "clang/Basic/Specifiers.h" 40#include "llvm/ADT/ArrayRef.h" 41#include "llvm/ADT/DenseMap.h" 42#include "llvm/ADT/ImmutableMap.h" 43#include "llvm/ADT/Optional.h" 44#include "llvm/ADT/PointerIntPair.h" 45#include "llvm/ADT/STLExtras.h" 46#include "llvm/ADT/SmallVector.h" 47#include "llvm/ADT/StringRef.h" 48#include "llvm/Support/Allocator.h" 49#include "llvm/Support/Casting.h" 50#include "llvm/Support/ErrorHandling.h" 51#include "llvm/Support/raw_ostream.h" 52#include <algorithm> 53#include <cassert> 54#include <functional> 55#include <iterator> 56#include <memory> 57#include <string> 58#include <type_traits> 59#include <utility> 60#include <vector> 61 62using namespace clang; 63using namespace threadSafety; 64 65// Key method definition 66ThreadSafetyHandler::~ThreadSafetyHandler() = default; 67 68/// Issue a warning about an invalid lock expression 69static void warnInvalidLock(ThreadSafetyHandler &Handler, 70 const Expr *MutexExp, const NamedDecl *D, 71 const Expr *DeclExp, StringRef Kind) { 72 SourceLocation Loc; 73 if (DeclExp) 74 Loc = DeclExp->getExprLoc(); 75 76 // FIXME: add a note about the attribute location in MutexExp or D 77 if (Loc.isValid()) 78 Handler.handleInvalidLockExp(Kind, Loc); 79} 80 81namespace { 82 83/// A set of CapabilityExpr objects, which are compiled from thread safety 84/// attributes on a function. 85class CapExprSet : public SmallVector<CapabilityExpr, 4> { 86public: 87 /// Push M onto list, but discard duplicates. 88 void push_back_nodup(const CapabilityExpr &CapE) { 89 iterator It = std::find_if(begin(), end(), 90 [=](const CapabilityExpr &CapE2) { 91 return CapE.equals(CapE2); 92 }); 93 if (It == end()) 94 push_back(CapE); 95 } 96}; 97 98class FactManager; 99class FactSet; 100 101/// This is a helper class that stores a fact that is known at a 102/// particular point in program execution. Currently, a fact is a capability, 103/// along with additional information, such as where it was acquired, whether 104/// it is exclusive or shared, etc. 105/// 106/// FIXME: this analysis does not currently support re-entrant locking. 107class FactEntry : public CapabilityExpr { 108public: 109 /// Where a fact comes from. 110 enum SourceKind { 111 Acquired, ///< The fact has been directly acquired. 112 Asserted, ///< The fact has been asserted to be held. 113 Declared, ///< The fact is assumed to be held by callers. 114 Managed, ///< The fact has been acquired through a scoped capability. 115 }; 116 117private: 118 /// Exclusive or shared. 119 LockKind LKind : 8; 120 121 // How it was acquired. 122 SourceKind Source : 8; 123 124 /// Where it was acquired. 125 SourceLocation AcquireLoc; 126 127public: 128 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 129 SourceKind Src) 130 : CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {} 131 virtual ~FactEntry() = default; 132 133 LockKind kind() const { return LKind; } 134 SourceLocation loc() const { return AcquireLoc; } 135 136 bool asserted() const { return Source == Asserted; } 137 bool declared() const { return Source == Declared; } 138 bool managed() const { return Source == Managed; } 139 140 virtual void 141 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 142 SourceLocation JoinLoc, LockErrorKind LEK, 143 ThreadSafetyHandler &Handler) const = 0; 144 virtual void handleLock(FactSet &FSet, FactManager &FactMan, 145 const FactEntry &entry, ThreadSafetyHandler &Handler, 146 StringRef DiagKind) const = 0; 147 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan, 148 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 149 bool FullyRemove, ThreadSafetyHandler &Handler, 150 StringRef DiagKind) const = 0; 151 152 // Return true if LKind >= LK, where exclusive > shared 153 bool isAtLeast(LockKind LK) const { 154 return (LKind == LK_Exclusive) || (LK == LK_Shared); 155 } 156}; 157 158using FactID = unsigned short; 159 160/// FactManager manages the memory for all facts that are created during 161/// the analysis of a single routine. 162class FactManager { 163private: 164 std::vector<std::unique_ptr<const FactEntry>> Facts; 165 166public: 167 FactID newFact(std::unique_ptr<FactEntry> Entry) { 168 Facts.push_back(std::move(Entry)); 169 return static_cast<unsigned short>(Facts.size() - 1); 170 } 171 172 const FactEntry &operator[](FactID F) const { return *Facts[F]; } 173}; 174 175/// A FactSet is the set of facts that are known to be true at a 176/// particular program point. FactSets must be small, because they are 177/// frequently copied, and are thus implemented as a set of indices into a 178/// table maintained by a FactManager. A typical FactSet only holds 1 or 2 179/// locks, so we can get away with doing a linear search for lookup. Note 180/// that a hashtable or map is inappropriate in this case, because lookups 181/// may involve partial pattern matches, rather than exact matches. 182class FactSet { 183private: 184 using FactVec = SmallVector<FactID, 4>; 185 186 FactVec FactIDs; 187 188public: 189 using iterator = FactVec::iterator; 190 using const_iterator = FactVec::const_iterator; 191 192 iterator begin() { return FactIDs.begin(); } 193 const_iterator begin() const { return FactIDs.begin(); } 194 195 iterator end() { return FactIDs.end(); } 196 const_iterator end() const { return FactIDs.end(); } 197 198 bool isEmpty() const { return FactIDs.size() == 0; } 199 200 // Return true if the set contains only negative facts 201 bool isEmpty(FactManager &FactMan) const { 202 for (const auto FID : *this) { 203 if (!FactMan[FID].negative()) 204 return false; 205 } 206 return true; 207 } 208 209 void addLockByID(FactID ID) { FactIDs.push_back(ID); } 210 211 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) { 212 FactID F = FM.newFact(std::move(Entry)); 213 FactIDs.push_back(F); 214 return F; 215 } 216 217 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) { 218 unsigned n = FactIDs.size(); 219 if (n == 0) 220 return false; 221 222 for (unsigned i = 0; i < n-1; ++i) { 223 if (FM[FactIDs[i]].matches(CapE)) { 224 FactIDs[i] = FactIDs[n-1]; 225 FactIDs.pop_back(); 226 return true; 227 } 228 } 229 if (FM[FactIDs[n-1]].matches(CapE)) { 230 FactIDs.pop_back(); 231 return true; 232 } 233 return false; 234 } 235 236 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) { 237 return std::find_if(begin(), end(), [&](FactID ID) { 238 return FM[ID].matches(CapE); 239 }); 240 } 241 242 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const { 243 auto I = std::find_if(begin(), end(), [&](FactID ID) { 244 return FM[ID].matches(CapE); 245 }); 246 return I != end() ? &FM[*I] : nullptr; 247 } 248 249 const FactEntry *findLockUniv(FactManager &FM, 250 const CapabilityExpr &CapE) const { 251 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 252 return FM[ID].matchesUniv(CapE); 253 }); 254 return I != end() ? &FM[*I] : nullptr; 255 } 256 257 const FactEntry *findPartialMatch(FactManager &FM, 258 const CapabilityExpr &CapE) const { 259 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 260 return FM[ID].partiallyMatches(CapE); 261 }); 262 return I != end() ? &FM[*I] : nullptr; 263 } 264 265 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { 266 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { 267 return FM[ID].valueDecl() == Vd; 268 }); 269 return I != end(); 270 } 271}; 272 273class ThreadSafetyAnalyzer; 274 275} // namespace 276 277namespace clang { 278namespace threadSafety { 279 280class BeforeSet { 281private: 282 using BeforeVect = SmallVector<const ValueDecl *, 4>; 283 284 struct BeforeInfo { 285 BeforeVect Vect; 286 int Visited = 0; 287 288 BeforeInfo() = default; 289 BeforeInfo(BeforeInfo &&) = default; 290 }; 291 292 using BeforeMap = 293 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>; 294 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>; 295 296public: 297 BeforeSet() = default; 298 299 BeforeInfo* insertAttrExprs(const ValueDecl* Vd, 300 ThreadSafetyAnalyzer& Analyzer); 301 302 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd, 303 ThreadSafetyAnalyzer &Analyzer); 304 305 void checkBeforeAfter(const ValueDecl* Vd, 306 const FactSet& FSet, 307 ThreadSafetyAnalyzer& Analyzer, 308 SourceLocation Loc, StringRef CapKind); 309 310private: 311 BeforeMap BMap; 312 CycleMap CycMap; 313}; 314 315} // namespace threadSafety 316} // namespace clang 317 318namespace { 319 320class LocalVariableMap; 321 322using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>; 323 324/// A side (entry or exit) of a CFG node. 325enum CFGBlockSide { CBS_Entry, CBS_Exit }; 326 327/// CFGBlockInfo is a struct which contains all the information that is 328/// maintained for each block in the CFG. See LocalVariableMap for more 329/// information about the contexts. 330struct CFGBlockInfo { 331 // Lockset held at entry to block 332 FactSet EntrySet; 333 334 // Lockset held at exit from block 335 FactSet ExitSet; 336 337 // Context held at entry to block 338 LocalVarContext EntryContext; 339 340 // Context held at exit from block 341 LocalVarContext ExitContext; 342 343 // Location of first statement in block 344 SourceLocation EntryLoc; 345 346 // Location of last statement in block. 347 SourceLocation ExitLoc; 348 349 // Used to replay contexts later 350 unsigned EntryIndex; 351 352 // Is this block reachable? 353 bool Reachable = false; 354 355 const FactSet &getSet(CFGBlockSide Side) const { 356 return Side == CBS_Entry ? EntrySet : ExitSet; 357 } 358 359 SourceLocation getLocation(CFGBlockSide Side) const { 360 return Side == CBS_Entry ? EntryLoc : ExitLoc; 361 } 362 363private: 364 CFGBlockInfo(LocalVarContext EmptyCtx) 365 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {} 366 367public: 368 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M); 369}; 370 371// A LocalVariableMap maintains a map from local variables to their currently 372// valid definitions. It provides SSA-like functionality when traversing the 373// CFG. Like SSA, each definition or assignment to a variable is assigned a 374// unique name (an integer), which acts as the SSA name for that definition. 375// The total set of names is shared among all CFG basic blocks. 376// Unlike SSA, we do not rewrite expressions to replace local variables declrefs 377// with their SSA-names. Instead, we compute a Context for each point in the 378// code, which maps local variables to the appropriate SSA-name. This map 379// changes with each assignment. 380// 381// The map is computed in a single pass over the CFG. Subsequent analyses can 382// then query the map to find the appropriate Context for a statement, and use 383// that Context to look up the definitions of variables. 384class LocalVariableMap { 385public: 386 using Context = LocalVarContext; 387 388 /// A VarDefinition consists of an expression, representing the value of the 389 /// variable, along with the context in which that expression should be 390 /// interpreted. A reference VarDefinition does not itself contain this 391 /// information, but instead contains a pointer to a previous VarDefinition. 392 struct VarDefinition { 393 public: 394 friend class LocalVariableMap; 395 396 // The original declaration for this variable. 397 const NamedDecl *Dec; 398 399 // The expression for this variable, OR 400 const Expr *Exp = nullptr; 401 402 // Reference to another VarDefinition 403 unsigned Ref = 0; 404 405 // The map with which Exp should be interpreted. 406 Context Ctx; 407 408 bool isReference() { return !Exp; } 409 410 private: 411 // Create ordinary variable definition 412 VarDefinition(const NamedDecl *D, const Expr *E, Context C) 413 : Dec(D), Exp(E), Ctx(C) {} 414 415 // Create reference to previous definition 416 VarDefinition(const NamedDecl *D, unsigned R, Context C) 417 : Dec(D), Ref(R), Ctx(C) {} 418 }; 419 420private: 421 Context::Factory ContextFactory; 422 std::vector<VarDefinition> VarDefinitions; 423 std::vector<unsigned> CtxIndices; 424 std::vector<std::pair<const Stmt *, Context>> SavedContexts; 425 426public: 427 LocalVariableMap() { 428 // index 0 is a placeholder for undefined variables (aka phi-nodes). 429 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); 430 } 431 432 /// Look up a definition, within the given context. 433 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) { 434 const unsigned *i = Ctx.lookup(D); 435 if (!i) 436 return nullptr; 437 assert(*i < VarDefinitions.size()); 438 return &VarDefinitions[*i]; 439 } 440 441 /// Look up the definition for D within the given context. Returns 442 /// NULL if the expression is not statically known. If successful, also 443 /// modifies Ctx to hold the context of the return Expr. 444 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) { 445 const unsigned *P = Ctx.lookup(D); 446 if (!P) 447 return nullptr; 448 449 unsigned i = *P; 450 while (i > 0) { 451 if (VarDefinitions[i].Exp) { 452 Ctx = VarDefinitions[i].Ctx; 453 return VarDefinitions[i].Exp; 454 } 455 i = VarDefinitions[i].Ref; 456 } 457 return nullptr; 458 } 459 460 Context getEmptyContext() { return ContextFactory.getEmptyMap(); } 461 462 /// Return the next context after processing S. This function is used by 463 /// clients of the class to get the appropriate context when traversing the 464 /// CFG. It must be called for every assignment or DeclStmt. 465 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) { 466 if (SavedContexts[CtxIndex+1].first == S) { 467 CtxIndex++; 468 Context Result = SavedContexts[CtxIndex].second; 469 return Result; 470 } 471 return C; 472 } 473 474 void dumpVarDefinitionName(unsigned i) { 475 if (i == 0) { 476 llvm::errs() << "Undefined"; 477 return; 478 } 479 const NamedDecl *Dec = VarDefinitions[i].Dec; 480 if (!Dec) { 481 llvm::errs() << "<<NULL>>"; 482 return; 483 } 484 Dec->printName(llvm::errs()); 485 llvm::errs() << "." << i << " " << ((const void*) Dec); 486 } 487 488 /// Dumps an ASCII representation of the variable map to llvm::errs() 489 void dump() { 490 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { 491 const Expr *Exp = VarDefinitions[i].Exp; 492 unsigned Ref = VarDefinitions[i].Ref; 493 494 dumpVarDefinitionName(i); 495 llvm::errs() << " = "; 496 if (Exp) Exp->dump(); 497 else { 498 dumpVarDefinitionName(Ref); 499 llvm::errs() << "\n"; 500 } 501 } 502 } 503 504 /// Dumps an ASCII representation of a Context to llvm::errs() 505 void dumpContext(Context C) { 506 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) { 507 const NamedDecl *D = I.getKey(); 508 D->printName(llvm::errs()); 509 const unsigned *i = C.lookup(D); 510 llvm::errs() << " -> "; 511 dumpVarDefinitionName(*i); 512 llvm::errs() << "\n"; 513 } 514 } 515 516 /// Builds the variable map. 517 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph, 518 std::vector<CFGBlockInfo> &BlockInfo); 519 520protected: 521 friend class VarMapBuilder; 522 523 // Get the current context index 524 unsigned getContextIndex() { return SavedContexts.size()-1; } 525 526 // Save the current context for later replay 527 void saveContext(const Stmt *S, Context C) { 528 SavedContexts.push_back(std::make_pair(S, C)); 529 } 530 531 // Adds a new definition to the given context, and returns a new context. 532 // This method should be called when declaring a new variable. 533 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) { 534 assert(!Ctx.contains(D)); 535 unsigned newID = VarDefinitions.size(); 536 Context NewCtx = ContextFactory.add(Ctx, D, newID); 537 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 538 return NewCtx; 539 } 540 541 // Add a new reference to an existing definition. 542 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { 543 unsigned newID = VarDefinitions.size(); 544 Context NewCtx = ContextFactory.add(Ctx, D, newID); 545 VarDefinitions.push_back(VarDefinition(D, i, Ctx)); 546 return NewCtx; 547 } 548 549 // Updates a definition only if that definition is already in the map. 550 // This method should be called when assigning to an existing variable. 551 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) { 552 if (Ctx.contains(D)) { 553 unsigned newID = VarDefinitions.size(); 554 Context NewCtx = ContextFactory.remove(Ctx, D); 555 NewCtx = ContextFactory.add(NewCtx, D, newID); 556 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx)); 557 return NewCtx; 558 } 559 return Ctx; 560 } 561 562 // Removes a definition from the context, but keeps the variable name 563 // as a valid variable. The index 0 is a placeholder for cleared definitions. 564 Context clearDefinition(const NamedDecl *D, Context Ctx) { 565 Context NewCtx = Ctx; 566 if (NewCtx.contains(D)) { 567 NewCtx = ContextFactory.remove(NewCtx, D); 568 NewCtx = ContextFactory.add(NewCtx, D, 0); 569 } 570 return NewCtx; 571 } 572 573 // Remove a definition entirely frmo the context. 574 Context removeDefinition(const NamedDecl *D, Context Ctx) { 575 Context NewCtx = Ctx; 576 if (NewCtx.contains(D)) { 577 NewCtx = ContextFactory.remove(NewCtx, D); 578 } 579 return NewCtx; 580 } 581 582 Context intersectContexts(Context C1, Context C2); 583 Context createReferenceContext(Context C); 584 void intersectBackEdge(Context C1, Context C2); 585}; 586 587} // namespace 588 589// This has to be defined after LocalVariableMap. 590CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) { 591 return CFGBlockInfo(M.getEmptyContext()); 592} 593 594namespace { 595 596/// Visitor which builds a LocalVariableMap 597class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> { 598public: 599 LocalVariableMap* VMap; 600 LocalVariableMap::Context Ctx; 601 602 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C) 603 : VMap(VM), Ctx(C) {} 604 605 void VisitDeclStmt(const DeclStmt *S); 606 void VisitBinaryOperator(const BinaryOperator *BO); 607}; 608 609} // namespace 610 611// Add new local variables to the variable map 612void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) { 613 bool modifiedCtx = false; 614 const DeclGroupRef DGrp = S->getDeclGroup(); 615 for (const auto *D : DGrp) { 616 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) { 617 const Expr *E = VD->getInit(); 618 619 // Add local variables with trivial type to the variable map 620 QualType T = VD->getType(); 621 if (T.isTrivialType(VD->getASTContext())) { 622 Ctx = VMap->addDefinition(VD, E, Ctx); 623 modifiedCtx = true; 624 } 625 } 626 } 627 if (modifiedCtx) 628 VMap->saveContext(S, Ctx); 629} 630 631// Update local variable definitions in variable map 632void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) { 633 if (!BO->isAssignmentOp()) 634 return; 635 636 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts(); 637 638 // Update the variable map and current context. 639 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) { 640 const ValueDecl *VDec = DRE->getDecl(); 641 if (Ctx.lookup(VDec)) { 642 if (BO->getOpcode() == BO_Assign) 643 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx); 644 else 645 // FIXME -- handle compound assignment operators 646 Ctx = VMap->clearDefinition(VDec, Ctx); 647 VMap->saveContext(BO, Ctx); 648 } 649 } 650} 651 652// Computes the intersection of two contexts. The intersection is the 653// set of variables which have the same definition in both contexts; 654// variables with different definitions are discarded. 655LocalVariableMap::Context 656LocalVariableMap::intersectContexts(Context C1, Context C2) { 657 Context Result = C1; 658 for (const auto &P : C1) { 659 const NamedDecl *Dec = P.first; 660 const unsigned *i2 = C2.lookup(Dec); 661 if (!i2) // variable doesn't exist on second path 662 Result = removeDefinition(Dec, Result); 663 else if (*i2 != P.second) // variable exists, but has different definition 664 Result = clearDefinition(Dec, Result); 665 } 666 return Result; 667} 668 669// For every variable in C, create a new variable that refers to the 670// definition in C. Return a new context that contains these new variables. 671// (We use this for a naive implementation of SSA on loop back-edges.) 672LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) { 673 Context Result = getEmptyContext(); 674 for (const auto &P : C) 675 Result = addReference(P.first, P.second, Result); 676 return Result; 677} 678 679// This routine also takes the intersection of C1 and C2, but it does so by 680// altering the VarDefinitions. C1 must be the result of an earlier call to 681// createReferenceContext. 682void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { 683 for (const auto &P : C1) { 684 unsigned i1 = P.second; 685 VarDefinition *VDef = &VarDefinitions[i1]; 686 assert(VDef->isReference()); 687 688 const unsigned *i2 = C2.lookup(P.first); 689 if (!i2 || (*i2 != i1)) 690 VDef->Ref = 0; // Mark this variable as undefined 691 } 692} 693 694// Traverse the CFG in topological order, so all predecessors of a block 695// (excluding back-edges) are visited before the block itself. At 696// each point in the code, we calculate a Context, which holds the set of 697// variable definitions which are visible at that point in execution. 698// Visible variables are mapped to their definitions using an array that 699// contains all definitions. 700// 701// At join points in the CFG, the set is computed as the intersection of 702// the incoming sets along each edge, E.g. 703// 704// { Context | VarDefinitions } 705// int x = 0; { x -> x1 | x1 = 0 } 706// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 707// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... } 708// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... } 709// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... } 710// 711// This is essentially a simpler and more naive version of the standard SSA 712// algorithm. Those definitions that remain in the intersection are from blocks 713// that strictly dominate the current block. We do not bother to insert proper 714// phi nodes, because they are not used in our analysis; instead, wherever 715// a phi node would be required, we simply remove that definition from the 716// context (E.g. x above). 717// 718// The initial traversal does not capture back-edges, so those need to be 719// handled on a separate pass. Whenever the first pass encounters an 720// incoming back edge, it duplicates the context, creating new definitions 721// that refer back to the originals. (These correspond to places where SSA 722// might have to insert a phi node.) On the second pass, these definitions are 723// set to NULL if the variable has changed on the back-edge (i.e. a phi 724// node was actually required.) E.g. 725// 726// { Context | VarDefinitions } 727// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 } 728// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; } 729// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... } 730// ... { y -> y1 | x3 = 2, x2 = 1, ... } 731void LocalVariableMap::traverseCFG(CFG *CFGraph, 732 const PostOrderCFGView *SortedGraph, 733 std::vector<CFGBlockInfo> &BlockInfo) { 734 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 735 736 CtxIndices.resize(CFGraph->getNumBlockIDs()); 737 738 for (const auto *CurrBlock : *SortedGraph) { 739 unsigned CurrBlockID = CurrBlock->getBlockID(); 740 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 741 742 VisitedBlocks.insert(CurrBlock); 743 744 // Calculate the entry context for the current block 745 bool HasBackEdges = false; 746 bool CtxInit = true; 747 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 748 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 749 // if *PI -> CurrBlock is a back edge, so skip it 750 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) { 751 HasBackEdges = true; 752 continue; 753 } 754 755 unsigned PrevBlockID = (*PI)->getBlockID(); 756 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 757 758 if (CtxInit) { 759 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext; 760 CtxInit = false; 761 } 762 else { 763 CurrBlockInfo->EntryContext = 764 intersectContexts(CurrBlockInfo->EntryContext, 765 PrevBlockInfo->ExitContext); 766 } 767 } 768 769 // Duplicate the context if we have back-edges, so we can call 770 // intersectBackEdges later. 771 if (HasBackEdges) 772 CurrBlockInfo->EntryContext = 773 createReferenceContext(CurrBlockInfo->EntryContext); 774 775 // Create a starting context index for the current block 776 saveContext(nullptr, CurrBlockInfo->EntryContext); 777 CurrBlockInfo->EntryIndex = getContextIndex(); 778 779 // Visit all the statements in the basic block. 780 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext); 781 for (const auto &BI : *CurrBlock) { 782 switch (BI.getKind()) { 783 case CFGElement::Statement: { 784 CFGStmt CS = BI.castAs<CFGStmt>(); 785 VMapBuilder.Visit(CS.getStmt()); 786 break; 787 } 788 default: 789 break; 790 } 791 } 792 CurrBlockInfo->ExitContext = VMapBuilder.Ctx; 793 794 // Mark variables on back edges as "unknown" if they've been changed. 795 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 796 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 797 // if CurrBlock -> *SI is *not* a back edge 798 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 799 continue; 800 801 CFGBlock *FirstLoopBlock = *SI; 802 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext; 803 Context LoopEnd = CurrBlockInfo->ExitContext; 804 intersectBackEdge(LoopBegin, LoopEnd); 805 } 806 } 807 808 // Put an extra entry at the end of the indexed context array 809 unsigned exitID = CFGraph->getExit().getBlockID(); 810 saveContext(nullptr, BlockInfo[exitID].ExitContext); 811} 812 813/// Find the appropriate source locations to use when producing diagnostics for 814/// each block in the CFG. 815static void findBlockLocations(CFG *CFGraph, 816 const PostOrderCFGView *SortedGraph, 817 std::vector<CFGBlockInfo> &BlockInfo) { 818 for (const auto *CurrBlock : *SortedGraph) { 819 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()]; 820 821 // Find the source location of the last statement in the block, if the 822 // block is not empty. 823 if (const Stmt *S = CurrBlock->getTerminatorStmt()) { 824 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc(); 825 } else { 826 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(), 827 BE = CurrBlock->rend(); BI != BE; ++BI) { 828 // FIXME: Handle other CFGElement kinds. 829 if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) { 830 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc(); 831 break; 832 } 833 } 834 } 835 836 if (CurrBlockInfo->ExitLoc.isValid()) { 837 // This block contains at least one statement. Find the source location 838 // of the first statement in the block. 839 for (const auto &BI : *CurrBlock) { 840 // FIXME: Handle other CFGElement kinds. 841 if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) { 842 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc(); 843 break; 844 } 845 } 846 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() && 847 CurrBlock != &CFGraph->getExit()) { 848 // The block is empty, and has a single predecessor. Use its exit 849 // location. 850 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = 851 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc; 852 } 853 } 854} 855 856namespace { 857 858class LockableFactEntry : public FactEntry { 859public: 860 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, 861 SourceKind Src = Acquired) 862 : FactEntry(CE, LK, Loc, Src) {} 863 864 void 865 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 866 SourceLocation JoinLoc, LockErrorKind LEK, 867 ThreadSafetyHandler &Handler) const override { 868 if (!managed() && !asserted() && !negative() && !isUniversal()) { 869 Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc, 870 LEK); 871 } 872 } 873 874 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 875 ThreadSafetyHandler &Handler, 876 StringRef DiagKind) const override { 877 Handler.handleDoubleLock(DiagKind, entry.toString(), loc(), entry.loc()); 878 } 879 880 void handleUnlock(FactSet &FSet, FactManager &FactMan, 881 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 882 bool FullyRemove, ThreadSafetyHandler &Handler, 883 StringRef DiagKind) const override { 884 FSet.removeLock(FactMan, Cp); 885 if (!Cp.negative()) { 886 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 887 !Cp, LK_Exclusive, UnlockLoc)); 888 } 889 } 890}; 891 892class ScopedLockableFactEntry : public FactEntry { 893private: 894 enum UnderlyingCapabilityKind { 895 UCK_Acquired, ///< Any kind of acquired capability. 896 UCK_ReleasedShared, ///< Shared capability that was released. 897 UCK_ReleasedExclusive, ///< Exclusive capability that was released. 898 }; 899 900 using UnderlyingCapability = 901 llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>; 902 903 SmallVector<UnderlyingCapability, 4> UnderlyingMutexes; 904 905public: 906 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc) 907 : FactEntry(CE, LK_Exclusive, Loc, Acquired) {} 908 909 void addLock(const CapabilityExpr &M) { 910 UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired); 911 } 912 913 void addExclusiveUnlock(const CapabilityExpr &M) { 914 UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive); 915 } 916 917 void addSharedUnlock(const CapabilityExpr &M) { 918 UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared); 919 } 920 921 void 922 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, 923 SourceLocation JoinLoc, LockErrorKind LEK, 924 ThreadSafetyHandler &Handler) const override { 925 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 926 const auto *Entry = FSet.findLock( 927 FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false)); 928 if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) || 929 (UnderlyingMutex.getInt() != UCK_Acquired && !Entry)) { 930 // If this scoped lock manages another mutex, and if the underlying 931 // mutex is still/not held, then warn about the underlying mutex. 932 Handler.handleMutexHeldEndOfScope( 933 "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc, 934 LEK); 935 } 936 } 937 } 938 939 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry, 940 ThreadSafetyHandler &Handler, 941 StringRef DiagKind) const override { 942 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 943 CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false); 944 945 if (UnderlyingMutex.getInt() == UCK_Acquired) 946 lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler, 947 DiagKind); 948 else 949 unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind); 950 } 951 } 952 953 void handleUnlock(FactSet &FSet, FactManager &FactMan, 954 const CapabilityExpr &Cp, SourceLocation UnlockLoc, 955 bool FullyRemove, ThreadSafetyHandler &Handler, 956 StringRef DiagKind) const override { 957 assert(!Cp.negative() && "Managing object cannot be negative."); 958 for (const auto &UnderlyingMutex : UnderlyingMutexes) { 959 CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false); 960 961 // Remove/lock the underlying mutex if it exists/is still unlocked; warn 962 // on double unlocking/locking if we're not destroying the scoped object. 963 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler; 964 if (UnderlyingMutex.getInt() == UCK_Acquired) { 965 unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind); 966 } else { 967 LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared 968 ? LK_Shared 969 : LK_Exclusive; 970 lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind); 971 } 972 } 973 if (FullyRemove) 974 FSet.removeLock(FactMan, Cp); 975 } 976 977private: 978 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, 979 LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler, 980 StringRef DiagKind) const { 981 if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) { 982 if (Handler) 983 Handler->handleDoubleLock(DiagKind, Cp.toString(), Fact->loc(), loc); 984 } else { 985 FSet.removeLock(FactMan, !Cp); 986 FSet.addLock(FactMan, 987 std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed)); 988 } 989 } 990 991 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp, 992 SourceLocation loc, ThreadSafetyHandler *Handler, 993 StringRef DiagKind) const { 994 if (FSet.findLock(FactMan, Cp)) { 995 FSet.removeLock(FactMan, Cp); 996 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 997 !Cp, LK_Exclusive, loc)); 998 } else if (Handler) { 999 SourceLocation PrevLoc; 1000 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) 1001 PrevLoc = Neg->loc(); 1002 Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc); 1003 } 1004 } 1005}; 1006 1007/// Class which implements the core thread safety analysis routines. 1008class ThreadSafetyAnalyzer { 1009 friend class BuildLockset; 1010 friend class threadSafety::BeforeSet; 1011 1012 llvm::BumpPtrAllocator Bpa; 1013 threadSafety::til::MemRegionRef Arena; 1014 threadSafety::SExprBuilder SxBuilder; 1015 1016 ThreadSafetyHandler &Handler; 1017 const CXXMethodDecl *CurrentMethod; 1018 LocalVariableMap LocalVarMap; 1019 FactManager FactMan; 1020 std::vector<CFGBlockInfo> BlockInfo; 1021 1022 BeforeSet *GlobalBeforeSet; 1023 1024public: 1025 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) 1026 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} 1027 1028 bool inCurrentScope(const CapabilityExpr &CapE); 1029 1030 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry, 1031 StringRef DiagKind, bool ReqAttr = false); 1032 void removeLock(FactSet &FSet, const CapabilityExpr &CapE, 1033 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind, 1034 StringRef DiagKind); 1035 1036 template <typename AttrType> 1037 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1038 const NamedDecl *D, VarDecl *SelfDecl = nullptr); 1039 1040 template <class AttrType> 1041 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp, 1042 const NamedDecl *D, 1043 const CFGBlock *PredBlock, const CFGBlock *CurrBlock, 1044 Expr *BrE, bool Neg); 1045 1046 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C, 1047 bool &Negate); 1048 1049 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet, 1050 const CFGBlock* PredBlock, 1051 const CFGBlock *CurrBlock); 1052 1053 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1054 SourceLocation JoinLoc, LockErrorKind LEK1, 1055 LockErrorKind LEK2); 1056 1057 void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2, 1058 SourceLocation JoinLoc, LockErrorKind LEK1) { 1059 intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1); 1060 } 1061 1062 void runAnalysis(AnalysisDeclContext &AC); 1063}; 1064 1065} // namespace 1066 1067/// Process acquired_before and acquired_after attributes on Vd. 1068BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, 1069 ThreadSafetyAnalyzer& Analyzer) { 1070 // Create a new entry for Vd. 1071 BeforeInfo *Info = nullptr; 1072 { 1073 // Keep InfoPtr in its own scope in case BMap is modified later and the 1074 // reference becomes invalid. 1075 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd]; 1076 if (!InfoPtr) 1077 InfoPtr.reset(new BeforeInfo()); 1078 Info = InfoPtr.get(); 1079 } 1080 1081 for (const auto *At : Vd->attrs()) { 1082 switch (At->getKind()) { 1083 case attr::AcquiredBefore: { 1084 const auto *A = cast<AcquiredBeforeAttr>(At); 1085 1086 // Read exprs from the attribute, and add them to BeforeVect. 1087 for (const auto *Arg : A->args()) { 1088 CapabilityExpr Cp = 1089 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1090 if (const ValueDecl *Cpvd = Cp.valueDecl()) { 1091 Info->Vect.push_back(Cpvd); 1092 const auto It = BMap.find(Cpvd); 1093 if (It == BMap.end()) 1094 insertAttrExprs(Cpvd, Analyzer); 1095 } 1096 } 1097 break; 1098 } 1099 case attr::AcquiredAfter: { 1100 const auto *A = cast<AcquiredAfterAttr>(At); 1101 1102 // Read exprs from the attribute, and add them to BeforeVect. 1103 for (const auto *Arg : A->args()) { 1104 CapabilityExpr Cp = 1105 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); 1106 if (const ValueDecl *ArgVd = Cp.valueDecl()) { 1107 // Get entry for mutex listed in attribute 1108 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer); 1109 ArgInfo->Vect.push_back(Vd); 1110 } 1111 } 1112 break; 1113 } 1114 default: 1115 break; 1116 } 1117 } 1118 1119 return Info; 1120} 1121 1122BeforeSet::BeforeInfo * 1123BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd, 1124 ThreadSafetyAnalyzer &Analyzer) { 1125 auto It = BMap.find(Vd); 1126 BeforeInfo *Info = nullptr; 1127 if (It == BMap.end()) 1128 Info = insertAttrExprs(Vd, Analyzer); 1129 else 1130 Info = It->second.get(); 1131 assert(Info && "BMap contained nullptr?"); 1132 return Info; 1133} 1134 1135/// Return true if any mutexes in FSet are in the acquired_before set of Vd. 1136void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, 1137 const FactSet& FSet, 1138 ThreadSafetyAnalyzer& Analyzer, 1139 SourceLocation Loc, StringRef CapKind) { 1140 SmallVector<BeforeInfo*, 8> InfoVect; 1141 1142 // Do a depth-first traversal of Vd. 1143 // Return true if there are cycles. 1144 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { 1145 if (!Vd) 1146 return false; 1147 1148 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer); 1149 1150 if (Info->Visited == 1) 1151 return true; 1152 1153 if (Info->Visited == 2) 1154 return false; 1155 1156 if (Info->Vect.empty()) 1157 return false; 1158 1159 InfoVect.push_back(Info); 1160 Info->Visited = 1; 1161 for (const auto *Vdb : Info->Vect) { 1162 // Exclude mutexes in our immediate before set. 1163 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { 1164 StringRef L1 = StartVd->getName(); 1165 StringRef L2 = Vdb->getName(); 1166 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); 1167 } 1168 // Transitively search other before sets, and warn on cycles. 1169 if (traverse(Vdb)) { 1170 if (CycMap.find(Vd) == CycMap.end()) { 1171 CycMap.insert(std::make_pair(Vd, true)); 1172 StringRef L1 = Vd->getName(); 1173 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); 1174 } 1175 } 1176 } 1177 Info->Visited = 2; 1178 return false; 1179 }; 1180 1181 traverse(StartVd); 1182 1183 for (auto *Info : InfoVect) 1184 Info->Visited = 0; 1185} 1186 1187/// Gets the value decl pointer from DeclRefExprs or MemberExprs. 1188static const ValueDecl *getValueDecl(const Expr *Exp) { 1189 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp)) 1190 return getValueDecl(CE->getSubExpr()); 1191 1192 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp)) 1193 return DR->getDecl(); 1194 1195 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) 1196 return ME->getMemberDecl(); 1197 1198 return nullptr; 1199} 1200 1201namespace { 1202 1203template <typename Ty> 1204class has_arg_iterator_range { 1205 using yes = char[1]; 1206 using no = char[2]; 1207 1208 template <typename Inner> 1209 static yes& test(Inner *I, decltype(I->args()) * = nullptr); 1210 1211 template <typename> 1212 static no& test(...); 1213 1214public: 1215 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); 1216}; 1217 1218} // namespace 1219 1220static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { 1221 return A->getName(); 1222} 1223 1224static StringRef ClassifyDiagnostic(QualType VDT) { 1225 // We need to look at the declaration of the type of the value to determine 1226 // which it is. The type should either be a record or a typedef, or a pointer 1227 // or reference thereof. 1228 if (const auto *RT = VDT->getAs<RecordType>()) { 1229 if (const auto *RD = RT->getDecl()) 1230 if (const auto *CA = RD->getAttr<CapabilityAttr>()) 1231 return ClassifyDiagnostic(CA); 1232 } else if (const auto *TT = VDT->getAs<TypedefType>()) { 1233 if (const auto *TD = TT->getDecl()) 1234 if (const auto *CA = TD->getAttr<CapabilityAttr>()) 1235 return ClassifyDiagnostic(CA); 1236 } else if (VDT->isPointerType() || VDT->isReferenceType()) 1237 return ClassifyDiagnostic(VDT->getPointeeType()); 1238 1239 return "mutex"; 1240} 1241 1242static StringRef ClassifyDiagnostic(const ValueDecl *VD) { 1243 assert(VD && "No ValueDecl passed"); 1244 1245 // The ValueDecl is the declaration of a mutex or role (hopefully). 1246 return ClassifyDiagnostic(VD->getType()); 1247} 1248 1249template <typename AttrTy> 1250static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef> 1251ClassifyDiagnostic(const AttrTy *A) { 1252 if (const ValueDecl *VD = getValueDecl(A->getArg())) 1253 return ClassifyDiagnostic(VD); 1254 return "mutex"; 1255} 1256 1257template <typename AttrTy> 1258static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef> 1259ClassifyDiagnostic(const AttrTy *A) { 1260 for (const auto *Arg : A->args()) { 1261 if (const ValueDecl *VD = getValueDecl(Arg)) 1262 return ClassifyDiagnostic(VD); 1263 } 1264 return "mutex"; 1265} 1266 1267bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) { 1268 const threadSafety::til::SExpr *SExp = CapE.sexpr(); 1269 assert(SExp && "Null expressions should be ignored"); 1270 1271 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) { 1272 const ValueDecl *VD = LP->clangDecl(); 1273 // Variables defined in a function are always inaccessible. 1274 if (!VD->isDefinedOutsideFunctionOrMethod()) 1275 return false; 1276 // For now we consider static class members to be inaccessible. 1277 if (isa<CXXRecordDecl>(VD->getDeclContext())) 1278 return false; 1279 // Global variables are always in scope. 1280 return true; 1281 } 1282 1283 // Members are in scope from methods of the same class. 1284 if (const auto *P = dyn_cast<til::Project>(SExp)) { 1285 if (!CurrentMethod) 1286 return false; 1287 const ValueDecl *VD = P->clangDecl(); 1288 return VD->getDeclContext() == CurrentMethod->getDeclContext(); 1289 } 1290 1291 return false; 1292} 1293 1294/// Add a new lock to the lockset, warning if the lock is already there. 1295/// \param ReqAttr -- true if this is part of an initial Requires attribute. 1296void ThreadSafetyAnalyzer::addLock(FactSet &FSet, 1297 std::unique_ptr<FactEntry> Entry, 1298 StringRef DiagKind, bool ReqAttr) { 1299 if (Entry->shouldIgnore()) 1300 return; 1301 1302 if (!ReqAttr && !Entry->negative()) { 1303 // look for the negative capability, and remove it from the fact set. 1304 CapabilityExpr NegC = !*Entry; 1305 const FactEntry *Nen = FSet.findLock(FactMan, NegC); 1306 if (Nen) { 1307 FSet.removeLock(FactMan, NegC); 1308 } 1309 else { 1310 if (inCurrentScope(*Entry) && !Entry->asserted()) 1311 Handler.handleNegativeNotHeld(DiagKind, Entry->toString(), 1312 NegC.toString(), Entry->loc()); 1313 } 1314 } 1315 1316 // Check before/after constraints 1317 if (Handler.issueBetaWarnings() && 1318 !Entry->asserted() && !Entry->declared()) { 1319 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, 1320 Entry->loc(), DiagKind); 1321 } 1322 1323 // FIXME: Don't always warn when we have support for reentrant locks. 1324 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) { 1325 if (!Entry->asserted()) 1326 Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind); 1327 } else { 1328 FSet.addLock(FactMan, std::move(Entry)); 1329 } 1330} 1331 1332/// Remove a lock from the lockset, warning if the lock is not there. 1333/// \param UnlockLoc The source location of the unlock (only used in error msg) 1334void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp, 1335 SourceLocation UnlockLoc, 1336 bool FullyRemove, LockKind ReceivedKind, 1337 StringRef DiagKind) { 1338 if (Cp.shouldIgnore()) 1339 return; 1340 1341 const FactEntry *LDat = FSet.findLock(FactMan, Cp); 1342 if (!LDat) { 1343 SourceLocation PrevLoc; 1344 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp)) 1345 PrevLoc = Neg->loc(); 1346 Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc); 1347 return; 1348 } 1349 1350 // Generic lock removal doesn't care about lock kind mismatches, but 1351 // otherwise diagnose when the lock kinds are mismatched. 1352 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) { 1353 Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), LDat->kind(), 1354 ReceivedKind, LDat->loc(), UnlockLoc); 1355 } 1356 1357 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler, 1358 DiagKind); 1359} 1360 1361/// Extract the list of mutexIDs from the attribute on an expression, 1362/// and push them onto Mtxs, discarding any duplicates. 1363template <typename AttrType> 1364void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1365 const Expr *Exp, const NamedDecl *D, 1366 VarDecl *SelfDecl) { 1367 if (Attr->args_size() == 0) { 1368 // The mutex held is the "this" object. 1369 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl); 1370 if (Cp.isInvalid()) { 1371 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1372 return; 1373 } 1374 //else 1375 if (!Cp.shouldIgnore()) 1376 Mtxs.push_back_nodup(Cp); 1377 return; 1378 } 1379 1380 for (const auto *Arg : Attr->args()) { 1381 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl); 1382 if (Cp.isInvalid()) { 1383 warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr)); 1384 continue; 1385 } 1386 //else 1387 if (!Cp.shouldIgnore()) 1388 Mtxs.push_back_nodup(Cp); 1389 } 1390} 1391 1392/// Extract the list of mutexIDs from a trylock attribute. If the 1393/// trylock applies to the given edge, then push them onto Mtxs, discarding 1394/// any duplicates. 1395template <class AttrType> 1396void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, 1397 const Expr *Exp, const NamedDecl *D, 1398 const CFGBlock *PredBlock, 1399 const CFGBlock *CurrBlock, 1400 Expr *BrE, bool Neg) { 1401 // Find out which branch has the lock 1402 bool branch = false; 1403 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) 1404 branch = BLE->getValue(); 1405 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) 1406 branch = ILE->getValue().getBoolValue(); 1407 1408 int branchnum = branch ? 0 : 1; 1409 if (Neg) 1410 branchnum = !branchnum; 1411 1412 // If we've taken the trylock branch, then add the lock 1413 int i = 0; 1414 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(), 1415 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) { 1416 if (*SI == CurrBlock && i == branchnum) 1417 getMutexIDs(Mtxs, Attr, Exp, D); 1418 } 1419} 1420 1421static bool getStaticBooleanValue(Expr *E, bool &TCond) { 1422 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { 1423 TCond = false; 1424 return true; 1425 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) { 1426 TCond = BLE->getValue(); 1427 return true; 1428 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) { 1429 TCond = ILE->getValue().getBoolValue(); 1430 return true; 1431 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E)) 1432 return getStaticBooleanValue(CE->getSubExpr(), TCond); 1433 return false; 1434} 1435 1436// If Cond can be traced back to a function call, return the call expression. 1437// The negate variable should be called with false, and will be set to true 1438// if the function call is negated, e.g. if (!mu.tryLock(...)) 1439const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond, 1440 LocalVarContext C, 1441 bool &Negate) { 1442 if (!Cond) 1443 return nullptr; 1444 1445 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) { 1446 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect) 1447 return getTrylockCallExpr(CallExp->getArg(0), C, Negate); 1448 return CallExp; 1449 } 1450 else if (const auto *PE = dyn_cast<ParenExpr>(Cond)) 1451 return getTrylockCallExpr(PE->getSubExpr(), C, Negate); 1452 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond)) 1453 return getTrylockCallExpr(CE->getSubExpr(), C, Negate); 1454 else if (const auto *FE = dyn_cast<FullExpr>(Cond)) 1455 return getTrylockCallExpr(FE->getSubExpr(), C, Negate); 1456 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { 1457 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C); 1458 return getTrylockCallExpr(E, C, Negate); 1459 } 1460 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) { 1461 if (UOP->getOpcode() == UO_LNot) { 1462 Negate = !Negate; 1463 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate); 1464 } 1465 return nullptr; 1466 } 1467 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) { 1468 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) { 1469 if (BOP->getOpcode() == BO_NE) 1470 Negate = !Negate; 1471 1472 bool TCond = false; 1473 if (getStaticBooleanValue(BOP->getRHS(), TCond)) { 1474 if (!TCond) Negate = !Negate; 1475 return getTrylockCallExpr(BOP->getLHS(), C, Negate); 1476 } 1477 TCond = false; 1478 if (getStaticBooleanValue(BOP->getLHS(), TCond)) { 1479 if (!TCond) Negate = !Negate; 1480 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1481 } 1482 return nullptr; 1483 } 1484 if (BOP->getOpcode() == BO_LAnd) { 1485 // LHS must have been evaluated in a different block. 1486 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1487 } 1488 if (BOP->getOpcode() == BO_LOr) 1489 return getTrylockCallExpr(BOP->getRHS(), C, Negate); 1490 return nullptr; 1491 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) { 1492 bool TCond, FCond; 1493 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) && 1494 getStaticBooleanValue(COP->getFalseExpr(), FCond)) { 1495 if (TCond && !FCond) 1496 return getTrylockCallExpr(COP->getCond(), C, Negate); 1497 if (!TCond && FCond) { 1498 Negate = !Negate; 1499 return getTrylockCallExpr(COP->getCond(), C, Negate); 1500 } 1501 } 1502 } 1503 return nullptr; 1504} 1505 1506/// Find the lockset that holds on the edge between PredBlock 1507/// and CurrBlock. The edge set is the exit set of PredBlock (passed 1508/// as the ExitSet parameter) plus any trylocks, which are conditionally held. 1509void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, 1510 const FactSet &ExitSet, 1511 const CFGBlock *PredBlock, 1512 const CFGBlock *CurrBlock) { 1513 Result = ExitSet; 1514 1515 const Stmt *Cond = PredBlock->getTerminatorCondition(); 1516 // We don't acquire try-locks on ?: branches, only when its result is used. 1517 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt())) 1518 return; 1519 1520 bool Negate = false; 1521 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()]; 1522 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext; 1523 StringRef CapDiagKind = "mutex"; 1524 1525 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate); 1526 if (!Exp) 1527 return; 1528 1529 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 1530 if(!FunDecl || !FunDecl->hasAttrs()) 1531 return; 1532 1533 CapExprSet ExclusiveLocksToAdd; 1534 CapExprSet SharedLocksToAdd; 1535 1536 // If the condition is a call to a Trylock function, then grab the attributes 1537 for (const auto *Attr : FunDecl->attrs()) { 1538 switch (Attr->getKind()) { 1539 case attr::TryAcquireCapability: { 1540 auto *A = cast<TryAcquireCapabilityAttr>(Attr); 1541 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 1542 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(), 1543 Negate); 1544 CapDiagKind = ClassifyDiagnostic(A); 1545 break; 1546 }; 1547 case attr::ExclusiveTrylockFunction: { 1548 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr); 1549 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, 1550 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1551 CapDiagKind = ClassifyDiagnostic(A); 1552 break; 1553 } 1554 case attr::SharedTrylockFunction: { 1555 const auto *A = cast<SharedTrylockFunctionAttr>(Attr); 1556 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, 1557 PredBlock, CurrBlock, A->getSuccessValue(), Negate); 1558 CapDiagKind = ClassifyDiagnostic(A); 1559 break; 1560 } 1561 default: 1562 break; 1563 } 1564 } 1565 1566 // Add and remove locks. 1567 SourceLocation Loc = Exp->getExprLoc(); 1568 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd) 1569 addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd, 1570 LK_Exclusive, Loc), 1571 CapDiagKind); 1572 for (const auto &SharedLockToAdd : SharedLocksToAdd) 1573 addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd, 1574 LK_Shared, Loc), 1575 CapDiagKind); 1576} 1577 1578namespace { 1579 1580/// We use this class to visit different types of expressions in 1581/// CFGBlocks, and build up the lockset. 1582/// An expression may cause us to add or remove locks from the lockset, or else 1583/// output error messages related to missing locks. 1584/// FIXME: In future, we may be able to not inherit from a visitor. 1585class BuildLockset : public ConstStmtVisitor<BuildLockset> { 1586 friend class ThreadSafetyAnalyzer; 1587 1588 ThreadSafetyAnalyzer *Analyzer; 1589 FactSet FSet; 1590 LocalVariableMap::Context LVarCtx; 1591 unsigned CtxIndex; 1592 1593 // helper functions 1594 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK, 1595 Expr *MutexExp, ProtectedOperationKind POK, 1596 StringRef DiagKind, SourceLocation Loc); 1597 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp, 1598 StringRef DiagKind); 1599 1600 void checkAccess(const Expr *Exp, AccessKind AK, 1601 ProtectedOperationKind POK = POK_VarAccess); 1602 void checkPtAccess(const Expr *Exp, AccessKind AK, 1603 ProtectedOperationKind POK = POK_VarAccess); 1604 1605 void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr); 1606 void examineArguments(const FunctionDecl *FD, 1607 CallExpr::const_arg_iterator ArgBegin, 1608 CallExpr::const_arg_iterator ArgEnd, 1609 bool SkipFirstParam = false); 1610 1611public: 1612 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info) 1613 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet), 1614 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {} 1615 1616 void VisitUnaryOperator(const UnaryOperator *UO); 1617 void VisitBinaryOperator(const BinaryOperator *BO); 1618 void VisitCastExpr(const CastExpr *CE); 1619 void VisitCallExpr(const CallExpr *Exp); 1620 void VisitCXXConstructExpr(const CXXConstructExpr *Exp); 1621 void VisitDeclStmt(const DeclStmt *S); 1622}; 1623 1624} // namespace 1625 1626/// Warn if the LSet does not contain a lock sufficient to protect access 1627/// of at least the passed in AccessKind. 1628void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, 1629 AccessKind AK, Expr *MutexExp, 1630 ProtectedOperationKind POK, 1631 StringRef DiagKind, SourceLocation Loc) { 1632 LockKind LK = getLockKindFromAccessKind(AK); 1633 1634 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1635 if (Cp.isInvalid()) { 1636 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1637 return; 1638 } else if (Cp.shouldIgnore()) { 1639 return; 1640 } 1641 1642 if (Cp.negative()) { 1643 // Negative capabilities act like locks excluded 1644 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp); 1645 if (LDat) { 1646 Analyzer->Handler.handleFunExcludesLock( 1647 DiagKind, D->getNameAsString(), (!Cp).toString(), Loc); 1648 return; 1649 } 1650 1651 // If this does not refer to a negative capability in the same class, 1652 // then stop here. 1653 if (!Analyzer->inCurrentScope(Cp)) 1654 return; 1655 1656 // Otherwise the negative requirement must be propagated to the caller. 1657 LDat = FSet.findLock(Analyzer->FactMan, Cp); 1658 if (!LDat) { 1659 Analyzer->Handler.handleNegativeNotHeld(D, Cp.toString(), Loc); 1660 } 1661 return; 1662 } 1663 1664 const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp); 1665 bool NoError = true; 1666 if (!LDat) { 1667 // No exact match found. Look for a partial match. 1668 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp); 1669 if (LDat) { 1670 // Warn that there's no precise match. 1671 std::string PartMatchStr = LDat->toString(); 1672 StringRef PartMatchName(PartMatchStr); 1673 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1674 LK, Loc, &PartMatchName); 1675 } else { 1676 // Warn that there's no match at all. 1677 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1678 LK, Loc); 1679 } 1680 NoError = false; 1681 } 1682 // Make sure the mutex we found is the right kind. 1683 if (NoError && LDat && !LDat->isAtLeast(LK)) { 1684 Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(), 1685 LK, Loc); 1686 } 1687} 1688 1689/// Warn if the LSet contains the given lock. 1690void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, 1691 Expr *MutexExp, StringRef DiagKind) { 1692 CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp); 1693 if (Cp.isInvalid()) { 1694 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind); 1695 return; 1696 } else if (Cp.shouldIgnore()) { 1697 return; 1698 } 1699 1700 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp); 1701 if (LDat) { 1702 Analyzer->Handler.handleFunExcludesLock( 1703 DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc()); 1704 } 1705} 1706 1707/// Checks guarded_by and pt_guarded_by attributes. 1708/// Whenever we identify an access (read or write) to a DeclRefExpr that is 1709/// marked with guarded_by, we must ensure the appropriate mutexes are held. 1710/// Similarly, we check if the access is to an expression that dereferences 1711/// a pointer marked with pt_guarded_by. 1712void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK, 1713 ProtectedOperationKind POK) { 1714 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts(); 1715 1716 SourceLocation Loc = Exp->getExprLoc(); 1717 1718 // Local variables of reference type cannot be re-assigned; 1719 // map them to their initializer. 1720 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) { 1721 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl()); 1722 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) { 1723 if (const auto *E = VD->getInit()) { 1724 // Guard against self-initialization. e.g., int &i = i; 1725 if (E == Exp) 1726 break; 1727 Exp = E; 1728 continue; 1729 } 1730 } 1731 break; 1732 } 1733 1734 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) { 1735 // For dereferences 1736 if (UO->getOpcode() == UO_Deref) 1737 checkPtAccess(UO->getSubExpr(), AK, POK); 1738 return; 1739 } 1740 1741 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) { 1742 checkPtAccess(AE->getLHS(), AK, POK); 1743 return; 1744 } 1745 1746 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) { 1747 if (ME->isArrow()) 1748 checkPtAccess(ME->getBase(), AK, POK); 1749 else 1750 checkAccess(ME->getBase(), AK, POK); 1751 } 1752 1753 const ValueDecl *D = getValueDecl(Exp); 1754 if (!D || !D->hasAttrs()) 1755 return; 1756 1757 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) { 1758 Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc); 1759 } 1760 1761 for (const auto *I : D->specific_attrs<GuardedByAttr>()) 1762 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, 1763 ClassifyDiagnostic(I), Loc); 1764} 1765 1766/// Checks pt_guarded_by and pt_guarded_var attributes. 1767/// POK is the same operationKind that was passed to checkAccess. 1768void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK, 1769 ProtectedOperationKind POK) { 1770 while (true) { 1771 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) { 1772 Exp = PE->getSubExpr(); 1773 continue; 1774 } 1775 if (const auto *CE = dyn_cast<CastExpr>(Exp)) { 1776 if (CE->getCastKind() == CK_ArrayToPointerDecay) { 1777 // If it's an actual array, and not a pointer, then it's elements 1778 // are protected by GUARDED_BY, not PT_GUARDED_BY; 1779 checkAccess(CE->getSubExpr(), AK, POK); 1780 return; 1781 } 1782 Exp = CE->getSubExpr(); 1783 continue; 1784 } 1785 break; 1786 } 1787 1788 // Pass by reference warnings are under a different flag. 1789 ProtectedOperationKind PtPOK = POK_VarDereference; 1790 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef; 1791 1792 const ValueDecl *D = getValueDecl(Exp); 1793 if (!D || !D->hasAttrs()) 1794 return; 1795 1796 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) 1797 Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK, 1798 Exp->getExprLoc()); 1799 1800 for (auto const *I : D->specific_attrs<PtGuardedByAttr>()) 1801 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, 1802 ClassifyDiagnostic(I), Exp->getExprLoc()); 1803} 1804 1805/// Process a function call, method call, constructor call, 1806/// or destructor call. This involves looking at the attributes on the 1807/// corresponding function/method/constructor/destructor, issuing warnings, 1808/// and updating the locksets accordingly. 1809/// 1810/// FIXME: For classes annotated with one of the guarded annotations, we need 1811/// to treat const method calls as reads and non-const method calls as writes, 1812/// and check that the appropriate locks are held. Non-const method calls with 1813/// the same signature as const method calls can be also treated as reads. 1814/// 1815void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D, 1816 VarDecl *VD) { 1817 SourceLocation Loc = Exp->getExprLoc(); 1818 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; 1819 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; 1820 CapExprSet ScopedReqsAndExcludes; 1821 StringRef CapDiagKind = "mutex"; 1822 1823 // Figure out if we're constructing an object of scoped lockable class 1824 bool isScopedVar = false; 1825 if (VD) { 1826 if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) { 1827 const CXXRecordDecl* PD = CD->getParent(); 1828 if (PD && PD->hasAttr<ScopedLockableAttr>()) 1829 isScopedVar = true; 1830 } 1831 } 1832 1833 for(const Attr *At : D->attrs()) { 1834 switch (At->getKind()) { 1835 // When we encounter a lock function, we need to add the lock to our 1836 // lockset. 1837 case attr::AcquireCapability: { 1838 const auto *A = cast<AcquireCapabilityAttr>(At); 1839 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd 1840 : ExclusiveLocksToAdd, 1841 A, Exp, D, VD); 1842 1843 CapDiagKind = ClassifyDiagnostic(A); 1844 break; 1845 } 1846 1847 // An assert will add a lock to the lockset, but will not generate 1848 // a warning if it is already there, and will not generate a warning 1849 // if it is not removed. 1850 case attr::AssertExclusiveLock: { 1851 const auto *A = cast<AssertExclusiveLockAttr>(At); 1852 1853 CapExprSet AssertLocks; 1854 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1855 for (const auto &AssertLock : AssertLocks) 1856 Analyzer->addLock( 1857 FSet, 1858 std::make_unique<LockableFactEntry>(AssertLock, LK_Exclusive, Loc, 1859 FactEntry::Asserted), 1860 ClassifyDiagnostic(A)); 1861 break; 1862 } 1863 case attr::AssertSharedLock: { 1864 const auto *A = cast<AssertSharedLockAttr>(At); 1865 1866 CapExprSet AssertLocks; 1867 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1868 for (const auto &AssertLock : AssertLocks) 1869 Analyzer->addLock( 1870 FSet, 1871 std::make_unique<LockableFactEntry>(AssertLock, LK_Shared, Loc, 1872 FactEntry::Asserted), 1873 ClassifyDiagnostic(A)); 1874 break; 1875 } 1876 1877 case attr::AssertCapability: { 1878 const auto *A = cast<AssertCapabilityAttr>(At); 1879 CapExprSet AssertLocks; 1880 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD); 1881 for (const auto &AssertLock : AssertLocks) 1882 Analyzer->addLock(FSet, 1883 std::make_unique<LockableFactEntry>( 1884 AssertLock, 1885 A->isShared() ? LK_Shared : LK_Exclusive, Loc, 1886 FactEntry::Asserted), 1887 ClassifyDiagnostic(A)); 1888 break; 1889 } 1890 1891 // When we encounter an unlock function, we need to remove unlocked 1892 // mutexes from the lockset, and flag a warning if they are not there. 1893 case attr::ReleaseCapability: { 1894 const auto *A = cast<ReleaseCapabilityAttr>(At); 1895 if (A->isGeneric()) 1896 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD); 1897 else if (A->isShared()) 1898 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD); 1899 else 1900 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD); 1901 1902 CapDiagKind = ClassifyDiagnostic(A); 1903 break; 1904 } 1905 1906 case attr::RequiresCapability: { 1907 const auto *A = cast<RequiresCapabilityAttr>(At); 1908 for (auto *Arg : A->args()) { 1909 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, 1910 POK_FunctionCall, ClassifyDiagnostic(A), 1911 Exp->getExprLoc()); 1912 // use for adopting a lock 1913 if (isScopedVar) 1914 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD); 1915 } 1916 break; 1917 } 1918 1919 case attr::LocksExcluded: { 1920 const auto *A = cast<LocksExcludedAttr>(At); 1921 for (auto *Arg : A->args()) { 1922 warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A)); 1923 // use for deferring a lock 1924 if (isScopedVar) 1925 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD); 1926 } 1927 break; 1928 } 1929 1930 // Ignore attributes unrelated to thread-safety 1931 default: 1932 break; 1933 } 1934 } 1935 1936 // Remove locks first to allow lock upgrading/downgrading. 1937 // FIXME -- should only fully remove if the attribute refers to 'this'. 1938 bool Dtor = isa<CXXDestructorDecl>(D); 1939 for (const auto &M : ExclusiveLocksToRemove) 1940 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind); 1941 for (const auto &M : SharedLocksToRemove) 1942 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind); 1943 for (const auto &M : GenericLocksToRemove) 1944 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind); 1945 1946 // Add locks. 1947 FactEntry::SourceKind Source = 1948 isScopedVar ? FactEntry::Managed : FactEntry::Acquired; 1949 for (const auto &M : ExclusiveLocksToAdd) 1950 Analyzer->addLock( 1951 FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, Loc, Source), 1952 CapDiagKind); 1953 for (const auto &M : SharedLocksToAdd) 1954 Analyzer->addLock( 1955 FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source), 1956 CapDiagKind); 1957 1958 if (isScopedVar) { 1959 // Add the managing object as a dummy mutex, mapped to the underlying mutex. 1960 SourceLocation MLoc = VD->getLocation(); 1961 DeclRefExpr DRE(VD->getASTContext(), VD, false, VD->getType(), VK_LValue, 1962 VD->getLocation()); 1963 // FIXME: does this store a pointer to DRE? 1964 CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); 1965 1966 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, MLoc); 1967 for (const auto &M : ExclusiveLocksToAdd) 1968 ScopedEntry->addLock(M); 1969 for (const auto &M : SharedLocksToAdd) 1970 ScopedEntry->addLock(M); 1971 for (const auto &M : ScopedReqsAndExcludes) 1972 ScopedEntry->addLock(M); 1973 for (const auto &M : ExclusiveLocksToRemove) 1974 ScopedEntry->addExclusiveUnlock(M); 1975 for (const auto &M : SharedLocksToRemove) 1976 ScopedEntry->addSharedUnlock(M); 1977 Analyzer->addLock(FSet, std::move(ScopedEntry), CapDiagKind); 1978 } 1979} 1980 1981/// For unary operations which read and write a variable, we need to 1982/// check whether we hold any required mutexes. Reads are checked in 1983/// VisitCastExpr. 1984void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) { 1985 switch (UO->getOpcode()) { 1986 case UO_PostDec: 1987 case UO_PostInc: 1988 case UO_PreDec: 1989 case UO_PreInc: 1990 checkAccess(UO->getSubExpr(), AK_Written); 1991 break; 1992 default: 1993 break; 1994 } 1995} 1996 1997/// For binary operations which assign to a variable (writes), we need to check 1998/// whether we hold any required mutexes. 1999/// FIXME: Deal with non-primitive types. 2000void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) { 2001 if (!BO->isAssignmentOp()) 2002 return; 2003 2004 // adjust the context 2005 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx); 2006 2007 checkAccess(BO->getLHS(), AK_Written); 2008} 2009 2010/// Whenever we do an LValue to Rvalue cast, we are reading a variable and 2011/// need to ensure we hold any required mutexes. 2012/// FIXME: Deal with non-primitive types. 2013void BuildLockset::VisitCastExpr(const CastExpr *CE) { 2014 if (CE->getCastKind() != CK_LValueToRValue) 2015 return; 2016 checkAccess(CE->getSubExpr(), AK_Read); 2017} 2018 2019void BuildLockset::examineArguments(const FunctionDecl *FD, 2020 CallExpr::const_arg_iterator ArgBegin, 2021 CallExpr::const_arg_iterator ArgEnd, 2022 bool SkipFirstParam) { 2023 // Currently we can't do anything if we don't know the function declaration. 2024 if (!FD) 2025 return; 2026 2027 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it 2028 // only turns off checking within the body of a function, but we also 2029 // use it to turn off checking in arguments to the function. This 2030 // could result in some false negatives, but the alternative is to 2031 // create yet another attribute. 2032 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>()) 2033 return; 2034 2035 const ArrayRef<ParmVarDecl *> Params = FD->parameters(); 2036 auto Param = Params.begin(); 2037 if (SkipFirstParam) 2038 ++Param; 2039 2040 // There can be default arguments, so we stop when one iterator is at end(). 2041 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd; 2042 ++Param, ++Arg) { 2043 QualType Qt = (*Param)->getType(); 2044 if (Qt->isReferenceType()) 2045 checkAccess(*Arg, AK_Read, POK_PassByRef); 2046 } 2047} 2048 2049void BuildLockset::VisitCallExpr(const CallExpr *Exp) { 2050 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) { 2051 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee()); 2052 // ME can be null when calling a method pointer 2053 const CXXMethodDecl *MD = CE->getMethodDecl(); 2054 2055 if (ME && MD) { 2056 if (ME->isArrow()) { 2057 // Should perhaps be AK_Written if !MD->isConst(). 2058 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read); 2059 } else { 2060 // Should perhaps be AK_Written if !MD->isConst(). 2061 checkAccess(CE->getImplicitObjectArgument(), AK_Read); 2062 } 2063 } 2064 2065 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end()); 2066 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) { 2067 auto OEop = OE->getOperator(); 2068 switch (OEop) { 2069 case OO_Equal: { 2070 const Expr *Target = OE->getArg(0); 2071 const Expr *Source = OE->getArg(1); 2072 checkAccess(Target, AK_Written); 2073 checkAccess(Source, AK_Read); 2074 break; 2075 } 2076 case OO_Star: 2077 case OO_Arrow: 2078 case OO_Subscript: 2079 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) { 2080 // Grrr. operator* can be multiplication... 2081 checkPtAccess(OE->getArg(0), AK_Read); 2082 } 2083 LLVM_FALLTHROUGH; 2084 default: { 2085 // TODO: get rid of this, and rely on pass-by-ref instead. 2086 const Expr *Obj = OE->getArg(0); 2087 checkAccess(Obj, AK_Read); 2088 // Check the remaining arguments. For method operators, the first 2089 // argument is the implicit self argument, and doesn't appear in the 2090 // FunctionDecl, but for non-methods it does. 2091 const FunctionDecl *FD = OE->getDirectCallee(); 2092 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(), 2093 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD)); 2094 break; 2095 } 2096 } 2097 } else { 2098 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end()); 2099 } 2100 2101 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl()); 2102 if(!D || !D->hasAttrs()) 2103 return; 2104 handleCall(Exp, D); 2105} 2106 2107void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) { 2108 const CXXConstructorDecl *D = Exp->getConstructor(); 2109 if (D && D->isCopyConstructor()) { 2110 const Expr* Source = Exp->getArg(0); 2111 checkAccess(Source, AK_Read); 2112 } else { 2113 examineArguments(D, Exp->arg_begin(), Exp->arg_end()); 2114 } 2115} 2116 2117static CXXConstructorDecl * 2118findConstructorForByValueReturn(const CXXRecordDecl *RD) { 2119 // Prefer a move constructor over a copy constructor. If there's more than 2120 // one copy constructor or more than one move constructor, we arbitrarily 2121 // pick the first declared such constructor rather than trying to guess which 2122 // one is more appropriate. 2123 CXXConstructorDecl *CopyCtor = nullptr; 2124 for (auto *Ctor : RD->ctors()) { 2125 if (Ctor->isDeleted()) 2126 continue; 2127 if (Ctor->isMoveConstructor()) 2128 return Ctor; 2129 if (!CopyCtor && Ctor->isCopyConstructor()) 2130 CopyCtor = Ctor; 2131 } 2132 return CopyCtor; 2133} 2134 2135static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args, 2136 SourceLocation Loc) { 2137 ASTContext &Ctx = CD->getASTContext(); 2138 return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc, 2139 CD, true, Args, false, false, false, false, 2140 CXXConstructExpr::CK_Complete, 2141 SourceRange(Loc, Loc)); 2142} 2143 2144void BuildLockset::VisitDeclStmt(const DeclStmt *S) { 2145 // adjust the context 2146 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx); 2147 2148 for (auto *D : S->getDeclGroup()) { 2149 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) { 2150 Expr *E = VD->getInit(); 2151 if (!E) 2152 continue; 2153 E = E->IgnoreParens(); 2154 2155 // handle constructors that involve temporaries 2156 if (auto *EWC = dyn_cast<ExprWithCleanups>(E)) 2157 E = EWC->getSubExpr()->IgnoreParens(); 2158 if (auto *CE = dyn_cast<CastExpr>(E)) 2159 if (CE->getCastKind() == CK_NoOp || 2160 CE->getCastKind() == CK_ConstructorConversion || 2161 CE->getCastKind() == CK_UserDefinedConversion) 2162 E = CE->getSubExpr()->IgnoreParens(); 2163 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E)) 2164 E = BTE->getSubExpr()->IgnoreParens(); 2165 2166 if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) { 2167 const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor()); 2168 if (!CtorD || !CtorD->hasAttrs()) 2169 continue; 2170 handleCall(E, CtorD, VD); 2171 } else if (isa<CallExpr>(E) && E->isRValue()) { 2172 // If the object is initialized by a function call that returns a 2173 // scoped lockable by value, use the attributes on the copy or move 2174 // constructor to figure out what effect that should have on the 2175 // lockset. 2176 // FIXME: Is this really the best way to handle this situation? 2177 auto *RD = E->getType()->getAsCXXRecordDecl(); 2178 if (!RD || !RD->hasAttr<ScopedLockableAttr>()) 2179 continue; 2180 CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD); 2181 if (!CtorD || !CtorD->hasAttrs()) 2182 continue; 2183 handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD); 2184 } 2185 } 2186 } 2187} 2188 2189/// Compute the intersection of two locksets and issue warnings for any 2190/// locks in the symmetric difference. 2191/// 2192/// This function is used at a merge point in the CFG when comparing the lockset 2193/// of each branch being merged. For example, given the following sequence: 2194/// A; if () then B; else C; D; we need to check that the lockset after B and C 2195/// are the same. In the event of a difference, we use the intersection of these 2196/// two locksets at the start of D. 2197/// 2198/// \param FSet1 The first lockset. 2199/// \param FSet2 The second lockset. 2200/// \param JoinLoc The location of the join point for error reporting 2201/// \param LEK1 The error message to report if a mutex is missing from LSet1 2202/// \param LEK2 The error message to report if a mutex is missing from Lset2 2203void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, 2204 const FactSet &FSet2, 2205 SourceLocation JoinLoc, 2206 LockErrorKind LEK1, 2207 LockErrorKind LEK2) { 2208 FactSet FSet1Orig = FSet1; 2209 2210 // Find locks in FSet2 that conflict or are not in FSet1, and warn. 2211 for (const auto &Fact : FSet2) { 2212 const FactEntry &LDat2 = FactMan[Fact]; 2213 2214 FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, LDat2); 2215 if (Iter1 != FSet1.end()) { 2216 const FactEntry &LDat1 = FactMan[*Iter1]; 2217 if (LDat1.kind() != LDat2.kind()) { 2218 Handler.handleExclusiveAndShared("mutex", LDat2.toString(), LDat2.loc(), 2219 LDat1.loc()); 2220 if (LEK1 == LEK_LockedSomePredecessors && 2221 LDat1.kind() != LK_Exclusive) { 2222 // Take the exclusive lock, which is the one in FSet2. 2223 *Iter1 = Fact; 2224 } 2225 } else if (LEK1 == LEK_LockedSomePredecessors && LDat1.asserted() && 2226 !LDat2.asserted()) { 2227 // The non-asserted lock in FSet2 is the one we want to track. 2228 *Iter1 = Fact; 2229 } 2230 } else { 2231 LDat2.handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1, 2232 Handler); 2233 } 2234 } 2235 2236 // Find locks in FSet1 that are not in FSet2, and remove them. 2237 for (const auto &Fact : FSet1Orig) { 2238 const FactEntry *LDat1 = &FactMan[Fact]; 2239 const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1); 2240 2241 if (!LDat2) { 2242 LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2, 2243 Handler); 2244 if (LEK2 == LEK_LockedSomePredecessors) 2245 FSet1.removeLock(FactMan, *LDat1); 2246 } 2247 } 2248} 2249 2250// Return true if block B never continues to its successors. 2251static bool neverReturns(const CFGBlock *B) { 2252 if (B->hasNoReturnElement()) 2253 return true; 2254 if (B->empty()) 2255 return false; 2256 2257 CFGElement Last = B->back(); 2258 if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) { 2259 if (isa<CXXThrowExpr>(S->getStmt())) 2260 return true; 2261 } 2262 return false; 2263} 2264 2265/// Check a function's CFG for thread-safety violations. 2266/// 2267/// We traverse the blocks in the CFG, compute the set of mutexes that are held 2268/// at the end of each block, and issue warnings for thread safety violations. 2269/// Each block in the CFG is traversed exactly once. 2270void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { 2271 // TODO: this whole function needs be rewritten as a visitor for CFGWalker. 2272 // For now, we just use the walker to set things up. 2273 threadSafety::CFGWalker walker; 2274 if (!walker.init(AC)) 2275 return; 2276 2277 // AC.dumpCFG(true); 2278 // threadSafety::printSCFG(walker); 2279 2280 CFG *CFGraph = walker.getGraph(); 2281 const NamedDecl *D = walker.getDecl(); 2282 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D); 2283 CurrentMethod = dyn_cast<CXXMethodDecl>(D); 2284 2285 if (D->hasAttr<NoThreadSafetyAnalysisAttr>()) 2286 return; 2287 2288 // FIXME: Do something a bit more intelligent inside constructor and 2289 // destructor code. Constructors and destructors must assume unique access 2290 // to 'this', so checks on member variable access is disabled, but we should 2291 // still enable checks on other objects. 2292 if (isa<CXXConstructorDecl>(D)) 2293 return; // Don't check inside constructors. 2294 if (isa<CXXDestructorDecl>(D)) 2295 return; // Don't check inside destructors. 2296 2297 Handler.enterFunction(CurrentFunction); 2298 2299 BlockInfo.resize(CFGraph->getNumBlockIDs(), 2300 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap)); 2301 2302 // We need to explore the CFG via a "topological" ordering. 2303 // That way, we will be guaranteed to have information about required 2304 // predecessor locksets when exploring a new block. 2305 const PostOrderCFGView *SortedGraph = walker.getSortedGraph(); 2306 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph); 2307 2308 // Mark entry block as reachable 2309 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true; 2310 2311 // Compute SSA names for local variables 2312 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo); 2313 2314 // Fill in source locations for all CFGBlocks. 2315 findBlockLocations(CFGraph, SortedGraph, BlockInfo); 2316 2317 CapExprSet ExclusiveLocksAcquired; 2318 CapExprSet SharedLocksAcquired; 2319 CapExprSet LocksReleased; 2320 2321 // Add locks from exclusive_locks_required and shared_locks_required 2322 // to initial lockset. Also turn off checking for lock and unlock functions. 2323 // FIXME: is there a more intelligent way to check lock/unlock functions? 2324 if (!SortedGraph->empty() && D->hasAttrs()) { 2325 const CFGBlock *FirstBlock = *SortedGraph->begin(); 2326 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; 2327 2328 CapExprSet ExclusiveLocksToAdd; 2329 CapExprSet SharedLocksToAdd; 2330 StringRef CapDiagKind = "mutex"; 2331 2332 SourceLocation Loc = D->getLocation(); 2333 for (const auto *Attr : D->attrs()) { 2334 Loc = Attr->getLocation(); 2335 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { 2336 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2337 nullptr, D); 2338 CapDiagKind = ClassifyDiagnostic(A); 2339 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) { 2340 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation. 2341 // We must ignore such methods. 2342 if (A->args_size() == 0) 2343 return; 2344 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, 2345 nullptr, D); 2346 getMutexIDs(LocksReleased, A, nullptr, D); 2347 CapDiagKind = ClassifyDiagnostic(A); 2348 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) { 2349 if (A->args_size() == 0) 2350 return; 2351 getMutexIDs(A->isShared() ? SharedLocksAcquired 2352 : ExclusiveLocksAcquired, 2353 A, nullptr, D); 2354 CapDiagKind = ClassifyDiagnostic(A); 2355 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) { 2356 // Don't try to check trylock functions for now. 2357 return; 2358 } else if (isa<SharedTrylockFunctionAttr>(Attr)) { 2359 // Don't try to check trylock functions for now. 2360 return; 2361 } else if (isa<TryAcquireCapabilityAttr>(Attr)) { 2362 // Don't try to check trylock functions for now. 2363 return; 2364 } 2365 } 2366 2367 // FIXME -- Loc can be wrong here. 2368 for (const auto &Mu : ExclusiveLocksToAdd) { 2369 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc, 2370 FactEntry::Declared); 2371 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2372 } 2373 for (const auto &Mu : SharedLocksToAdd) { 2374 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc, 2375 FactEntry::Declared); 2376 addLock(InitialLockset, std::move(Entry), CapDiagKind, true); 2377 } 2378 } 2379 2380 for (const auto *CurrBlock : *SortedGraph) { 2381 unsigned CurrBlockID = CurrBlock->getBlockID(); 2382 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID]; 2383 2384 // Use the default initial lockset in case there are no predecessors. 2385 VisitedBlocks.insert(CurrBlock); 2386 2387 // Iterate through the predecessor blocks and warn if the lockset for all 2388 // predecessors is not the same. We take the entry lockset of the current 2389 // block to be the intersection of all previous locksets. 2390 // FIXME: By keeping the intersection, we may output more errors in future 2391 // for a lock which is not in the intersection, but was in the union. We 2392 // may want to also keep the union in future. As an example, let's say 2393 // the intersection contains Mutex L, and the union contains L and M. 2394 // Later we unlock M. At this point, we would output an error because we 2395 // never locked M; although the real error is probably that we forgot to 2396 // lock M on all code paths. Conversely, let's say that later we lock M. 2397 // In this case, we should compare against the intersection instead of the 2398 // union because the real error is probably that we forgot to unlock M on 2399 // all code paths. 2400 bool LocksetInitialized = false; 2401 SmallVector<CFGBlock *, 8> SpecialBlocks; 2402 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(), 2403 PE = CurrBlock->pred_end(); PI != PE; ++PI) { 2404 // if *PI -> CurrBlock is a back edge 2405 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) 2406 continue; 2407 2408 unsigned PrevBlockID = (*PI)->getBlockID(); 2409 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2410 2411 // Ignore edges from blocks that can't return. 2412 if (neverReturns(*PI) || !PrevBlockInfo->Reachable) 2413 continue; 2414 2415 // Okay, we can reach this block from the entry. 2416 CurrBlockInfo->Reachable = true; 2417 2418 // If the previous block ended in a 'continue' or 'break' statement, then 2419 // a difference in locksets is probably due to a bug in that block, rather 2420 // than in some other predecessor. In that case, keep the other 2421 // predecessor's lockset. 2422 if (const Stmt *Terminator = (*PI)->getTerminatorStmt()) { 2423 if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) { 2424 SpecialBlocks.push_back(*PI); 2425 continue; 2426 } 2427 } 2428 2429 FactSet PrevLockset; 2430 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock); 2431 2432 if (!LocksetInitialized) { 2433 CurrBlockInfo->EntrySet = PrevLockset; 2434 LocksetInitialized = true; 2435 } else { 2436 intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset, 2437 CurrBlockInfo->EntryLoc, 2438 LEK_LockedSomePredecessors); 2439 } 2440 } 2441 2442 // Skip rest of block if it's not reachable. 2443 if (!CurrBlockInfo->Reachable) 2444 continue; 2445 2446 // Process continue and break blocks. Assume that the lockset for the 2447 // resulting block is unaffected by any discrepancies in them. 2448 for (const auto *PrevBlock : SpecialBlocks) { 2449 unsigned PrevBlockID = PrevBlock->getBlockID(); 2450 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID]; 2451 2452 if (!LocksetInitialized) { 2453 CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet; 2454 LocksetInitialized = true; 2455 } else { 2456 // Determine whether this edge is a loop terminator for diagnostic 2457 // purposes. FIXME: A 'break' statement might be a loop terminator, but 2458 // it might also be part of a switch. Also, a subsequent destructor 2459 // might add to the lockset, in which case the real issue might be a 2460 // double lock on the other path. 2461 const Stmt *Terminator = PrevBlock->getTerminatorStmt(); 2462 bool IsLoop = Terminator && isa<ContinueStmt>(Terminator); 2463 2464 FactSet PrevLockset; 2465 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, 2466 PrevBlock, CurrBlock); 2467 2468 // Do not update EntrySet. 2469 intersectAndWarn( 2470 CurrBlockInfo->EntrySet, PrevLockset, PrevBlockInfo->ExitLoc, 2471 IsLoop ? LEK_LockedSomeLoopIterations : LEK_LockedSomePredecessors); 2472 } 2473 } 2474 2475 BuildLockset LocksetBuilder(this, *CurrBlockInfo); 2476 2477 // Visit all the statements in the basic block. 2478 for (const auto &BI : *CurrBlock) { 2479 switch (BI.getKind()) { 2480 case CFGElement::Statement: { 2481 CFGStmt CS = BI.castAs<CFGStmt>(); 2482 LocksetBuilder.Visit(CS.getStmt()); 2483 break; 2484 } 2485 // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now. 2486 case CFGElement::AutomaticObjectDtor: { 2487 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>(); 2488 const auto *DD = AD.getDestructorDecl(AC.getASTContext()); 2489 if (!DD->hasAttrs()) 2490 break; 2491 2492 // Create a dummy expression, 2493 auto *VD = const_cast<VarDecl *>(AD.getVarDecl()); 2494 DeclRefExpr DRE(VD->getASTContext(), VD, false, 2495 VD->getType().getNonReferenceType(), VK_LValue, 2496 AD.getTriggerStmt()->getEndLoc()); 2497 LocksetBuilder.handleCall(&DRE, DD); 2498 break; 2499 } 2500 default: 2501 break; 2502 } 2503 } 2504 CurrBlockInfo->ExitSet = LocksetBuilder.FSet; 2505 2506 // For every back edge from CurrBlock (the end of the loop) to another block 2507 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to 2508 // the one held at the beginning of FirstLoopBlock. We can look up the 2509 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map. 2510 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(), 2511 SE = CurrBlock->succ_end(); SI != SE; ++SI) { 2512 // if CurrBlock -> *SI is *not* a back edge 2513 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI)) 2514 continue; 2515 2516 CFGBlock *FirstLoopBlock = *SI; 2517 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()]; 2518 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID]; 2519 intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet, PreLoop->EntryLoc, 2520 LEK_LockedSomeLoopIterations); 2521 } 2522 } 2523 2524 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()]; 2525 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()]; 2526 2527 // Skip the final check if the exit block is unreachable. 2528 if (!Final->Reachable) 2529 return; 2530 2531 // By default, we expect all locks held on entry to be held on exit. 2532 FactSet ExpectedExitSet = Initial->EntrySet; 2533 2534 // Adjust the expected exit set by adding or removing locks, as declared 2535 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then 2536 // issue the appropriate warning. 2537 // FIXME: the location here is not quite right. 2538 for (const auto &Lock : ExclusiveLocksAcquired) 2539 ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 2540 Lock, LK_Exclusive, D->getLocation())); 2541 for (const auto &Lock : SharedLocksAcquired) 2542 ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>( 2543 Lock, LK_Shared, D->getLocation())); 2544 for (const auto &Lock : LocksReleased) 2545 ExpectedExitSet.removeLock(FactMan, Lock); 2546 2547 // FIXME: Should we call this function for all blocks which exit the function? 2548 intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc, 2549 LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction); 2550 2551 Handler.leaveFunction(CurrentFunction); 2552} 2553 2554/// Check a function's CFG for thread-safety violations. 2555/// 2556/// We traverse the blocks in the CFG, compute the set of mutexes that are held 2557/// at the end of each block, and issue warnings for thread safety violations. 2558/// Each block in the CFG is traversed exactly once. 2559void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, 2560 ThreadSafetyHandler &Handler, 2561 BeforeSet **BSet) { 2562 if (!*BSet) 2563 *BSet = new BeforeSet; 2564 ThreadSafetyAnalyzer Analyzer(Handler, *BSet); 2565 Analyzer.runAnalysis(AC); 2566} 2567 2568void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } 2569 2570/// Helper function that returns a LockKind required for the given level 2571/// of access. 2572LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { 2573 switch (AK) { 2574 case AK_Read : 2575 return LK_Shared; 2576 case AK_Written : 2577 return LK_Exclusive; 2578 } 2579 llvm_unreachable("Unknown AccessKind"); 2580} 2581