1//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//  This file defines ExprEngine's support for calls and returns.
10//
11//===----------------------------------------------------------------------===//
12
13#include "PrettyStackTraceLocationContext.h"
14#include "clang/AST/CXXInheritance.h"
15#include "clang/AST/Decl.h"
16#include "clang/AST/DeclCXX.h"
17#include "clang/Analysis/Analyses/LiveVariables.h"
18#include "clang/Analysis/ConstructionContext.h"
19#include "clang/StaticAnalyzer/Core/CheckerManager.h"
20#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Support/Casting.h"
25#include "llvm/Support/Compiler.h"
26#include "llvm/Support/SaveAndRestore.h"
27
28using namespace clang;
29using namespace ento;
30
31#define DEBUG_TYPE "ExprEngine"
32
33STATISTIC(NumOfDynamicDispatchPathSplits,
34  "The # of times we split the path due to imprecise dynamic dispatch info");
35
36STATISTIC(NumInlinedCalls,
37  "The # of times we inlined a call");
38
39STATISTIC(NumReachedInlineCountMax,
40  "The # of times we reached inline count maximum");
41
42void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
43                                  ExplodedNode *Pred) {
44  // Get the entry block in the CFG of the callee.
45  const StackFrameContext *calleeCtx = CE.getCalleeContext();
46  PrettyStackTraceLocationContext CrashInfo(calleeCtx);
47  const CFGBlock *Entry = CE.getEntry();
48
49  // Validate the CFG.
50  assert(Entry->empty());
51  assert(Entry->succ_size() == 1);
52
53  // Get the solitary successor.
54  const CFGBlock *Succ = *(Entry->succ_begin());
55
56  // Construct an edge representing the starting location in the callee.
57  BlockEdge Loc(Entry, Succ, calleeCtx);
58
59  ProgramStateRef state = Pred->getState();
60
61  // Construct a new node, notify checkers that analysis of the function has
62  // begun, and add the resultant nodes to the worklist.
63  bool isNew;
64  ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
65  Node->addPredecessor(Pred, G);
66  if (isNew) {
67    ExplodedNodeSet DstBegin;
68    processBeginOfFunction(BC, Node, DstBegin, Loc);
69    Engine.enqueue(DstBegin);
70  }
71}
72
73// Find the last statement on the path to the exploded node and the
74// corresponding Block.
75static std::pair<const Stmt*,
76                 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
77  const Stmt *S = nullptr;
78  const CFGBlock *Blk = nullptr;
79  const StackFrameContext *SF = Node->getStackFrame();
80
81  // Back up through the ExplodedGraph until we reach a statement node in this
82  // stack frame.
83  while (Node) {
84    const ProgramPoint &PP = Node->getLocation();
85
86    if (PP.getStackFrame() == SF) {
87      if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
88        S = SP->getStmt();
89        break;
90      } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
91        S = CEE->getCalleeContext()->getCallSite();
92        if (S)
93          break;
94
95        // If there is no statement, this is an implicitly-generated call.
96        // We'll walk backwards over it and then continue the loop to find
97        // an actual statement.
98        Optional<CallEnter> CE;
99        do {
100          Node = Node->getFirstPred();
101          CE = Node->getLocationAs<CallEnter>();
102        } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
103
104        // Continue searching the graph.
105      } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
106        Blk = BE->getSrc();
107      }
108    } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
109      // If we reached the CallEnter for this function, it has no statements.
110      if (CE->getCalleeContext() == SF)
111        break;
112    }
113
114    if (Node->pred_empty())
115      return std::make_pair(nullptr, nullptr);
116
117    Node = *Node->pred_begin();
118  }
119
120  return std::make_pair(S, Blk);
121}
122
123/// Adjusts a return value when the called function's return type does not
124/// match the caller's expression type. This can happen when a dynamic call
125/// is devirtualized, and the overriding method has a covariant (more specific)
126/// return type than the parent's method. For C++ objects, this means we need
127/// to add base casts.
128static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
129                              StoreManager &StoreMgr) {
130  // For now, the only adjustments we handle apply only to locations.
131  if (!V.getAs<Loc>())
132    return V;
133
134  // If the types already match, don't do any unnecessary work.
135  ExpectedTy = ExpectedTy.getCanonicalType();
136  ActualTy = ActualTy.getCanonicalType();
137  if (ExpectedTy == ActualTy)
138    return V;
139
140  // No adjustment is needed between Objective-C pointer types.
141  if (ExpectedTy->isObjCObjectPointerType() &&
142      ActualTy->isObjCObjectPointerType())
143    return V;
144
145  // C++ object pointers may need "derived-to-base" casts.
146  const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
147  const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
148  if (ExpectedClass && ActualClass) {
149    CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
150                       /*DetectVirtual=*/false);
151    if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
152        !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
153      return StoreMgr.evalDerivedToBase(V, Paths.front());
154    }
155  }
156
157  // Unfortunately, Objective-C does not enforce that overridden methods have
158  // covariant return types, so we can't assert that that never happens.
159  // Be safe and return UnknownVal().
160  return UnknownVal();
161}
162
163void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
164                                           ExplodedNode *Pred,
165                                           ExplodedNodeSet &Dst) {
166  // Find the last statement in the function and the corresponding basic block.
167  const Stmt *LastSt = nullptr;
168  const CFGBlock *Blk = nullptr;
169  std::tie(LastSt, Blk) = getLastStmt(Pred);
170  if (!Blk || !LastSt) {
171    Dst.Add(Pred);
172    return;
173  }
174
175  // Here, we destroy the current location context. We use the current
176  // function's entire body as a diagnostic statement, with which the program
177  // point will be associated. However, we only want to use LastStmt as a
178  // reference for what to clean up if it's a ReturnStmt; otherwise, everything
179  // is dead.
180  SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
181  const LocationContext *LCtx = Pred->getLocationContext();
182  removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
183             LCtx->getAnalysisDeclContext()->getBody(),
184             ProgramPoint::PostStmtPurgeDeadSymbolsKind);
185}
186
187static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
188    const StackFrameContext *calleeCtx) {
189  const Decl *RuntimeCallee = calleeCtx->getDecl();
190  const Decl *StaticDecl = Call->getDecl();
191  assert(RuntimeCallee);
192  if (!StaticDecl)
193    return true;
194  return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
195}
196
197/// The call exit is simulated with a sequence of nodes, which occur between
198/// CallExitBegin and CallExitEnd. The following operations occur between the
199/// two program points:
200/// 1. CallExitBegin (triggers the start of call exit sequence)
201/// 2. Bind the return value
202/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
203/// 4. CallExitEnd (switch to the caller context)
204/// 5. PostStmt<CallExpr>
205void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
206  // Step 1 CEBNode was generated before the call.
207  PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
208  const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
209
210  // The parent context might not be a stack frame, so make sure we
211  // look up the first enclosing stack frame.
212  const StackFrameContext *callerCtx =
213    calleeCtx->getParent()->getStackFrame();
214
215  const Stmt *CE = calleeCtx->getCallSite();
216  ProgramStateRef state = CEBNode->getState();
217  // Find the last statement in the function and the corresponding basic block.
218  const Stmt *LastSt = nullptr;
219  const CFGBlock *Blk = nullptr;
220  std::tie(LastSt, Blk) = getLastStmt(CEBNode);
221
222  // Generate a CallEvent /before/ cleaning the state, so that we can get the
223  // correct value for 'this' (if necessary).
224  CallEventManager &CEMgr = getStateManager().getCallEventManager();
225  CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
226
227  // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
228
229  // If the callee returns an expression, bind its value to CallExpr.
230  if (CE) {
231    if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
232      const LocationContext *LCtx = CEBNode->getLocationContext();
233      SVal V = state->getSVal(RS, LCtx);
234
235      // Ensure that the return type matches the type of the returned Expr.
236      if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
237        QualType ReturnedTy =
238          CallEvent::getDeclaredResultType(calleeCtx->getDecl());
239        if (!ReturnedTy.isNull()) {
240          if (const Expr *Ex = dyn_cast<Expr>(CE)) {
241            V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
242                                  getStoreManager());
243          }
244        }
245      }
246
247      state = state->BindExpr(CE, callerCtx, V);
248    }
249
250    // Bind the constructed object value to CXXConstructExpr.
251    if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
252      loc::MemRegionVal This =
253        svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
254      SVal ThisV = state->getSVal(This);
255      ThisV = state->getSVal(ThisV.castAs<Loc>());
256      state = state->BindExpr(CCE, callerCtx, ThisV);
257    }
258
259    if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
260      // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
261      // while to reach the actual CXXNewExpr element from here, so keep the
262      // region for later use.
263      // Additionally cast the return value of the inlined operator new
264      // (which is of type 'void *') to the correct object type.
265      SVal AllocV = state->getSVal(CNE, callerCtx);
266      AllocV = svalBuilder.evalCast(
267          AllocV, CNE->getType(),
268          getContext().getPointerType(getContext().VoidTy));
269
270      state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
271                                         AllocV);
272    }
273  }
274
275  // Step 3: BindedRetNode -> CleanedNodes
276  // If we can find a statement and a block in the inlined function, run remove
277  // dead bindings before returning from the call. This is important to ensure
278  // that we report the issues such as leaks in the stack contexts in which
279  // they occurred.
280  ExplodedNodeSet CleanedNodes;
281  if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
282    static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
283    PostStmt Loc(LastSt, calleeCtx, &retValBind);
284    bool isNew;
285    ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
286    BindedRetNode->addPredecessor(CEBNode, G);
287    if (!isNew)
288      return;
289
290    NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
291    currBldrCtx = &Ctx;
292    // Here, we call the Symbol Reaper with 0 statement and callee location
293    // context, telling it to clean up everything in the callee's context
294    // (and its children). We use the callee's function body as a diagnostic
295    // statement, with which the program point will be associated.
296    removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
297               calleeCtx->getAnalysisDeclContext()->getBody(),
298               ProgramPoint::PostStmtPurgeDeadSymbolsKind);
299    currBldrCtx = nullptr;
300  } else {
301    CleanedNodes.Add(CEBNode);
302  }
303
304  for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
305                                 E = CleanedNodes.end(); I != E; ++I) {
306
307    // Step 4: Generate the CallExit and leave the callee's context.
308    // CleanedNodes -> CEENode
309    CallExitEnd Loc(calleeCtx, callerCtx);
310    bool isNew;
311    ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
312
313    ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
314    CEENode->addPredecessor(*I, G);
315    if (!isNew)
316      return;
317
318    // Step 5: Perform the post-condition check of the CallExpr and enqueue the
319    // result onto the work list.
320    // CEENode -> Dst -> WorkList
321    NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
322    SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
323        &Ctx);
324    SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
325
326    CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
327
328    ExplodedNodeSet DstPostCall;
329    if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
330      ExplodedNodeSet DstPostPostCallCallback;
331      getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
332                                                 CEENode, *UpdatedCall, *this,
333                                                 /*wasInlined=*/true);
334      for (ExplodedNode *I : DstPostPostCallCallback) {
335        getCheckerManager().runCheckersForNewAllocator(
336            cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
337            /*wasInlined=*/true);
338      }
339    } else {
340      getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
341                                                 *UpdatedCall, *this,
342                                                 /*wasInlined=*/true);
343    }
344    ExplodedNodeSet Dst;
345    if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
346      getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
347                                                        *this,
348                                                        /*wasInlined=*/true);
349    } else if (CE &&
350               !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
351                 AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
352      getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
353                                                 *this, /*wasInlined=*/true);
354    } else {
355      Dst.insert(DstPostCall);
356    }
357
358    // Enqueue the next element in the block.
359    for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
360                                   PSI != PSE; ++PSI) {
361      Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
362                                    calleeCtx->getIndex()+1);
363    }
364  }
365}
366
367bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
368  // When there are no branches in the function, it means that there's no
369  // exponential complexity introduced by inlining such function.
370  // Such functions also don't trigger various fundamental problems
371  // with our inlining mechanism, such as the problem of
372  // inlined defensive checks. Hence isLinear().
373  const CFG *Cfg = ADC->getCFG();
374  return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
375}
376
377bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
378  const CFG *Cfg = ADC->getCFG();
379  return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
380}
381
382bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
383  const CFG *Cfg = ADC->getCFG();
384  return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
385}
386
387void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
388                               bool &IsRecursive, unsigned &StackDepth) {
389  IsRecursive = false;
390  StackDepth = 0;
391
392  while (LCtx) {
393    if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
394      const Decl *DI = SFC->getDecl();
395
396      // Mark recursive (and mutually recursive) functions and always count
397      // them when measuring the stack depth.
398      if (DI == D) {
399        IsRecursive = true;
400        ++StackDepth;
401        LCtx = LCtx->getParent();
402        continue;
403      }
404
405      // Do not count the small functions when determining the stack depth.
406      AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
407      if (!isSmall(CalleeADC))
408        ++StackDepth;
409    }
410    LCtx = LCtx->getParent();
411  }
412}
413
414// The GDM component containing the dynamic dispatch bifurcation info. When
415// the exact type of the receiver is not known, we want to explore both paths -
416// one on which we do inline it and the other one on which we don't. This is
417// done to ensure we do not drop coverage.
418// This is the map from the receiver region to a bool, specifying either we
419// consider this region's information precise or not along the given path.
420namespace {
421  enum DynamicDispatchMode {
422    DynamicDispatchModeInlined = 1,
423    DynamicDispatchModeConservative
424  };
425} // end anonymous namespace
426
427REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
428                               const MemRegion *, unsigned)
429
430bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
431                            NodeBuilder &Bldr, ExplodedNode *Pred,
432                            ProgramStateRef State) {
433  assert(D);
434
435  const LocationContext *CurLC = Pred->getLocationContext();
436  const StackFrameContext *CallerSFC = CurLC->getStackFrame();
437  const LocationContext *ParentOfCallee = CallerSFC;
438  if (Call.getKind() == CE_Block &&
439      !cast<BlockCall>(Call).isConversionFromLambda()) {
440    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
441    assert(BR && "If we have the block definition we should have its region");
442    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
443    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
444                                                         cast<BlockDecl>(D),
445                                                         BR);
446  }
447
448  // This may be NULL, but that's fine.
449  const Expr *CallE = Call.getOriginExpr();
450
451  // Construct a new stack frame for the callee.
452  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
453  const StackFrameContext *CalleeSFC =
454      CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
455                               currBldrCtx->blockCount(), currStmtIdx);
456
457  CallEnter Loc(CallE, CalleeSFC, CurLC);
458
459  // Construct a new state which contains the mapping from actual to
460  // formal arguments.
461  State = State->enterStackFrame(Call, CalleeSFC);
462
463  bool isNew;
464  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
465    N->addPredecessor(Pred, G);
466    if (isNew)
467      Engine.getWorkList()->enqueue(N);
468  }
469
470  // If we decided to inline the call, the successor has been manually
471  // added onto the work list so remove it from the node builder.
472  Bldr.takeNodes(Pred);
473
474  NumInlinedCalls++;
475  Engine.FunctionSummaries->bumpNumTimesInlined(D);
476
477  // Mark the decl as visited.
478  if (VisitedCallees)
479    VisitedCallees->insert(D);
480
481  return true;
482}
483
484static ProgramStateRef getInlineFailedState(ProgramStateRef State,
485                                            const Stmt *CallE) {
486  const void *ReplayState = State->get<ReplayWithoutInlining>();
487  if (!ReplayState)
488    return nullptr;
489
490  assert(ReplayState == CallE && "Backtracked to the wrong call.");
491  (void)CallE;
492
493  return State->remove<ReplayWithoutInlining>();
494}
495
496void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
497                               ExplodedNodeSet &dst) {
498  // Perform the previsit of the CallExpr.
499  ExplodedNodeSet dstPreVisit;
500  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
501
502  // Get the call in its initial state. We use this as a template to perform
503  // all the checks.
504  CallEventManager &CEMgr = getStateManager().getCallEventManager();
505  CallEventRef<> CallTemplate
506    = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
507
508  // Evaluate the function call.  We try each of the checkers
509  // to see if the can evaluate the function call.
510  ExplodedNodeSet dstCallEvaluated;
511  for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
512       I != E; ++I) {
513    evalCall(dstCallEvaluated, *I, *CallTemplate);
514  }
515
516  // Finally, perform the post-condition check of the CallExpr and store
517  // the created nodes in 'Dst'.
518  // Note that if the call was inlined, dstCallEvaluated will be empty.
519  // The post-CallExpr check will occur in processCallExit.
520  getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
521                                             *this);
522}
523
524ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
525                                                       const CallEvent &Call) {
526  const Expr *E = Call.getOriginExpr();
527  // FIXME: Constructors to placement arguments of operator new
528  // are not supported yet.
529  if (!E || isa<CXXNewExpr>(E))
530    return State;
531
532  const LocationContext *LC = Call.getLocationContext();
533  for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
534    unsigned I = Call.getASTArgumentIndex(CallI);
535    if (Optional<SVal> V =
536            getObjectUnderConstruction(State, {E, I}, LC)) {
537      SVal VV = *V;
538      (void)VV;
539      assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
540                 ->getStackFrame()->getParent()
541                 ->getStackFrame() == LC->getStackFrame());
542      State = finishObjectConstruction(State, {E, I}, LC);
543    }
544  }
545
546  return State;
547}
548
549void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
550                                            ExplodedNode *Pred,
551                                            const CallEvent &Call) {
552  ProgramStateRef State = Pred->getState();
553  ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
554  if (CleanedState == State) {
555    Dst.insert(Pred);
556    return;
557  }
558
559  const Expr *E = Call.getOriginExpr();
560  const LocationContext *LC = Call.getLocationContext();
561  NodeBuilder B(Pred, Dst, *currBldrCtx);
562  static SimpleProgramPointTag Tag("ExprEngine",
563                                   "Finish argument construction");
564  PreStmt PP(E, LC, &Tag);
565  B.generateNode(PP, CleanedState, Pred);
566}
567
568void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
569                          const CallEvent &Call) {
570  // WARNING: At this time, the state attached to 'Call' may be older than the
571  // state in 'Pred'. This is a minor optimization since CheckerManager will
572  // use an updated CallEvent instance when calling checkers, but if 'Call' is
573  // ever used directly in this function all callers should be updated to pass
574  // the most recent state. (It is probably not worth doing the work here since
575  // for some callers this will not be necessary.)
576
577  // Run any pre-call checks using the generic call interface.
578  ExplodedNodeSet dstPreVisit;
579  getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
580                                            Call, *this);
581
582  // Actually evaluate the function call.  We try each of the checkers
583  // to see if the can evaluate the function call, and get a callback at
584  // defaultEvalCall if all of them fail.
585  ExplodedNodeSet dstCallEvaluated;
586  getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
587                                             Call, *this, EvalCallOptions());
588
589  // If there were other constructors called for object-type arguments
590  // of this call, clean them up.
591  ExplodedNodeSet dstArgumentCleanup;
592  for (ExplodedNode *I : dstCallEvaluated)
593    finishArgumentConstruction(dstArgumentCleanup, I, Call);
594
595  ExplodedNodeSet dstPostCall;
596  getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
597                                             Call, *this);
598
599  // Escaping symbols conjured during invalidating the regions above.
600  // Note that, for inlined calls the nodes were put back into the worklist,
601  // so we can assume that every node belongs to a conservative call at this
602  // point.
603
604  // Run pointerEscape callback with the newly conjured symbols.
605  SmallVector<std::pair<SVal, SVal>, 8> Escaped;
606  for (ExplodedNode *I : dstPostCall) {
607    NodeBuilder B(I, Dst, *currBldrCtx);
608    ProgramStateRef State = I->getState();
609    Escaped.clear();
610    {
611      unsigned Arg = -1;
612      for (const ParmVarDecl *PVD : Call.parameters()) {
613        ++Arg;
614        QualType ParamTy = PVD->getType();
615        if (ParamTy.isNull() ||
616            (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
617          continue;
618        QualType Pointee = ParamTy->getPointeeType();
619        if (Pointee.isConstQualified() || Pointee->isVoidType())
620          continue;
621        if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
622          Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
623      }
624    }
625
626    State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
627                                        PSK_EscapeOutParameters, &Call);
628
629    if (State == I->getState())
630      Dst.insert(I);
631    else
632      B.generateNode(I->getLocation(), State, I);
633  }
634}
635
636ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
637                                            const LocationContext *LCtx,
638                                            ProgramStateRef State) {
639  const Expr *E = Call.getOriginExpr();
640  if (!E)
641    return State;
642
643  // Some method families have known return values.
644  if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
645    switch (Msg->getMethodFamily()) {
646    default:
647      break;
648    case OMF_autorelease:
649    case OMF_retain:
650    case OMF_self: {
651      // These methods return their receivers.
652      return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
653    }
654    }
655  } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
656    SVal ThisV = C->getCXXThisVal();
657    ThisV = State->getSVal(ThisV.castAs<Loc>());
658    return State->BindExpr(E, LCtx, ThisV);
659  }
660
661  SVal R;
662  QualType ResultTy = Call.getResultType();
663  unsigned Count = currBldrCtx->blockCount();
664  if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
665    // Conjure a temporary if the function returns an object by value.
666    SVal Target;
667    assert(RTC->getStmt() == Call.getOriginExpr());
668    EvalCallOptions CallOpts; // FIXME: We won't really need those.
669    std::tie(State, Target) =
670        handleConstructionContext(Call.getOriginExpr(), State, LCtx,
671                                  RTC->getConstructionContext(), CallOpts);
672    const MemRegion *TargetR = Target.getAsRegion();
673    assert(TargetR);
674    // Invalidate the region so that it didn't look uninitialized. If this is
675    // a field or element constructor, we do not want to invalidate
676    // the whole structure. Pointer escape is meaningless because
677    // the structure is a product of conservative evaluation
678    // and therefore contains nothing interesting at this point.
679    RegionAndSymbolInvalidationTraits ITraits;
680    ITraits.setTrait(TargetR,
681        RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
682    State = State->invalidateRegions(TargetR, E, Count, LCtx,
683                                     /* CausesPointerEscape=*/false, nullptr,
684                                     &Call, &ITraits);
685
686    R = State->getSVal(Target.castAs<Loc>(), E->getType());
687  } else {
688    // Conjure a symbol if the return value is unknown.
689
690    // See if we need to conjure a heap pointer instead of
691    // a regular unknown pointer.
692    bool IsHeapPointer = false;
693    if (const auto *CNE = dyn_cast<CXXNewExpr>(E))
694      if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
695        // FIXME: Delegate this to evalCall in MallocChecker?
696        IsHeapPointer = true;
697      }
698
699    R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count)
700                      : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy,
701                                                     Count);
702  }
703  return State->BindExpr(E, LCtx, R);
704}
705
706// Conservatively evaluate call by invalidating regions and binding
707// a conjured return value.
708void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
709                                      ExplodedNode *Pred, ProgramStateRef State) {
710  State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
711  State = bindReturnValue(Call, Pred->getLocationContext(), State);
712
713  // And make the result node.
714  Bldr.generateNode(Call.getProgramPoint(), State, Pred);
715}
716
717ExprEngine::CallInlinePolicy
718ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
719                              AnalyzerOptions &Opts,
720                              const EvalCallOptions &CallOpts) {
721  const LocationContext *CurLC = Pred->getLocationContext();
722  const StackFrameContext *CallerSFC = CurLC->getStackFrame();
723  switch (Call.getKind()) {
724  case CE_Function:
725  case CE_Block:
726    break;
727  case CE_CXXMember:
728  case CE_CXXMemberOperator:
729    if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
730      return CIP_DisallowedAlways;
731    break;
732  case CE_CXXConstructor: {
733    if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
734      return CIP_DisallowedAlways;
735
736    const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
737
738    const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
739
740    auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
741    const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
742                                        : nullptr;
743
744    if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
745        !Opts.MayInlineCXXAllocator)
746      return CIP_DisallowedOnce;
747
748    // FIXME: We don't handle constructors or destructors for arrays properly.
749    // Even once we do, we still need to be careful about implicitly-generated
750    // initializers for array fields in default move/copy constructors.
751    // We still allow construction into ElementRegion targets when they don't
752    // represent array elements.
753    if (CallOpts.IsArrayCtorOrDtor)
754      return CIP_DisallowedOnce;
755
756    // Inlining constructors requires including initializers in the CFG.
757    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
758    assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
759    (void)ADC;
760
761    // If the destructor is trivial, it's always safe to inline the constructor.
762    if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
763      break;
764
765    // For other types, only inline constructors if destructor inlining is
766    // also enabled.
767    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
768      return CIP_DisallowedAlways;
769
770    if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
771      // If we don't handle temporary destructors, we shouldn't inline
772      // their constructors.
773      if (CallOpts.IsTemporaryCtorOrDtor &&
774          !Opts.ShouldIncludeTemporaryDtorsInCFG)
775        return CIP_DisallowedOnce;
776
777      // If we did not find the correct this-region, it would be pointless
778      // to inline the constructor. Instead we will simply invalidate
779      // the fake temporary target.
780      if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
781        return CIP_DisallowedOnce;
782
783      // If the temporary is lifetime-extended by binding it to a reference-type
784      // field within an aggregate, automatic destructors don't work properly.
785      if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
786        return CIP_DisallowedOnce;
787    }
788
789    break;
790  }
791  case CE_CXXInheritedConstructor: {
792    // This doesn't really increase the cost of inlining ever, because
793    // the stack frame of the inherited constructor is trivial.
794    return CIP_Allowed;
795  }
796  case CE_CXXDestructor: {
797    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
798      return CIP_DisallowedAlways;
799
800    // Inlining destructors requires building the CFG correctly.
801    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
802    assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
803    (void)ADC;
804
805    // FIXME: We don't handle constructors or destructors for arrays properly.
806    if (CallOpts.IsArrayCtorOrDtor)
807      return CIP_DisallowedOnce;
808
809    // Allow disabling temporary destructor inlining with a separate option.
810    if (CallOpts.IsTemporaryCtorOrDtor &&
811        !Opts.MayInlineCXXTemporaryDtors)
812      return CIP_DisallowedOnce;
813
814    // If we did not find the correct this-region, it would be pointless
815    // to inline the destructor. Instead we will simply invalidate
816    // the fake temporary target.
817    if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
818      return CIP_DisallowedOnce;
819    break;
820  }
821  case CE_CXXDeallocator:
822    LLVM_FALLTHROUGH;
823  case CE_CXXAllocator:
824    if (Opts.MayInlineCXXAllocator)
825      break;
826    // Do not inline allocators until we model deallocators.
827    // This is unfortunate, but basically necessary for smart pointers and such.
828    return CIP_DisallowedAlways;
829  case CE_ObjCMessage:
830    if (!Opts.MayInlineObjCMethod)
831      return CIP_DisallowedAlways;
832    if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
833          Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
834      return CIP_DisallowedAlways;
835    break;
836  }
837
838  return CIP_Allowed;
839}
840
841/// Returns true if the given C++ class contains a member with the given name.
842static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
843                      StringRef Name) {
844  const IdentifierInfo &II = Ctx.Idents.get(Name);
845  DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
846  if (!RD->lookup(DeclName).empty())
847    return true;
848
849  CXXBasePaths Paths(false, false, false);
850  if (RD->lookupInBases(
851          [DeclName](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
852            return CXXRecordDecl::FindOrdinaryMember(Specifier, Path, DeclName);
853          },
854          Paths))
855    return true;
856
857  return false;
858}
859
860/// Returns true if the given C++ class is a container or iterator.
861///
862/// Our heuristic for this is whether it contains a method named 'begin()' or a
863/// nested type named 'iterator' or 'iterator_category'.
864static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
865  return hasMember(Ctx, RD, "begin") ||
866         hasMember(Ctx, RD, "iterator") ||
867         hasMember(Ctx, RD, "iterator_category");
868}
869
870/// Returns true if the given function refers to a method of a C++ container
871/// or iterator.
872///
873/// We generally do a poor job modeling most containers right now, and might
874/// prefer not to inline their methods.
875static bool isContainerMethod(const ASTContext &Ctx,
876                              const FunctionDecl *FD) {
877  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
878    return isContainerClass(Ctx, MD->getParent());
879  return false;
880}
881
882/// Returns true if the given function is the destructor of a class named
883/// "shared_ptr".
884static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
885  const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
886  if (!Dtor)
887    return false;
888
889  const CXXRecordDecl *RD = Dtor->getParent();
890  if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
891    if (II->isStr("shared_ptr"))
892        return true;
893
894  return false;
895}
896
897/// Returns true if the function in \p CalleeADC may be inlined in general.
898///
899/// This checks static properties of the function, such as its signature and
900/// CFG, to determine whether the analyzer should ever consider inlining it,
901/// in any context.
902bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
903  AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
904  // FIXME: Do not inline variadic calls.
905  if (CallEvent::isVariadic(CalleeADC->getDecl()))
906    return false;
907
908  // Check certain C++-related inlining policies.
909  ASTContext &Ctx = CalleeADC->getASTContext();
910  if (Ctx.getLangOpts().CPlusPlus) {
911    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
912      // Conditionally control the inlining of template functions.
913      if (!Opts.MayInlineTemplateFunctions)
914        if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
915          return false;
916
917      // Conditionally control the inlining of C++ standard library functions.
918      if (!Opts.MayInlineCXXStandardLibrary)
919        if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
920          if (AnalysisDeclContext::isInStdNamespace(FD))
921            return false;
922
923      // Conditionally control the inlining of methods on objects that look
924      // like C++ containers.
925      if (!Opts.MayInlineCXXContainerMethods)
926        if (!AMgr.isInCodeFile(FD->getLocation()))
927          if (isContainerMethod(Ctx, FD))
928            return false;
929
930      // Conditionally control the inlining of the destructor of C++ shared_ptr.
931      // We don't currently do a good job modeling shared_ptr because we can't
932      // see the reference count, so treating as opaque is probably the best
933      // idea.
934      if (!Opts.MayInlineCXXSharedPtrDtor)
935        if (isCXXSharedPtrDtor(FD))
936          return false;
937    }
938  }
939
940  // It is possible that the CFG cannot be constructed.
941  // Be safe, and check if the CalleeCFG is valid.
942  const CFG *CalleeCFG = CalleeADC->getCFG();
943  if (!CalleeCFG)
944    return false;
945
946  // Do not inline large functions.
947  if (isHuge(CalleeADC))
948    return false;
949
950  // It is possible that the live variables analysis cannot be
951  // run.  If so, bail out.
952  if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
953    return false;
954
955  return true;
956}
957
958bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
959                                  const ExplodedNode *Pred,
960                                  const EvalCallOptions &CallOpts) {
961  if (!D)
962    return false;
963
964  AnalysisManager &AMgr = getAnalysisManager();
965  AnalyzerOptions &Opts = AMgr.options;
966  AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
967  AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
968
969  // The auto-synthesized bodies are essential to inline as they are
970  // usually small and commonly used. Note: we should do this check early on to
971  // ensure we always inline these calls.
972  if (CalleeADC->isBodyAutosynthesized())
973    return true;
974
975  if (!AMgr.shouldInlineCall())
976    return false;
977
978  // Check if this function has been marked as non-inlinable.
979  Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
980  if (MayInline.hasValue()) {
981    if (!MayInline.getValue())
982      return false;
983
984  } else {
985    // We haven't actually checked the static properties of this function yet.
986    // Do that now, and record our decision in the function summaries.
987    if (mayInlineDecl(CalleeADC)) {
988      Engine.FunctionSummaries->markMayInline(D);
989    } else {
990      Engine.FunctionSummaries->markShouldNotInline(D);
991      return false;
992    }
993  }
994
995  // Check if we should inline a call based on its kind.
996  // FIXME: this checks both static and dynamic properties of the call, which
997  // means we're redoing a bit of work that could be cached in the function
998  // summary.
999  CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1000  if (CIP != CIP_Allowed) {
1001    if (CIP == CIP_DisallowedAlways) {
1002      assert(!MayInline.hasValue() || MayInline.getValue());
1003      Engine.FunctionSummaries->markShouldNotInline(D);
1004    }
1005    return false;
1006  }
1007
1008  // Do not inline if recursive or we've reached max stack frame count.
1009  bool IsRecursive = false;
1010  unsigned StackDepth = 0;
1011  examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1012  if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1013      (!isSmall(CalleeADC) || IsRecursive))
1014    return false;
1015
1016  // Do not inline large functions too many times.
1017  if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1018       Opts.MaxTimesInlineLarge) &&
1019      isLarge(CalleeADC)) {
1020    NumReachedInlineCountMax++;
1021    return false;
1022  }
1023
1024  if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1025    return false;
1026
1027  return true;
1028}
1029
1030static bool isTrivialObjectAssignment(const CallEvent &Call) {
1031  const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1032  if (!ICall)
1033    return false;
1034
1035  const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1036  if (!MD)
1037    return false;
1038  if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1039    return false;
1040
1041  return MD->isTrivial();
1042}
1043
1044void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1045                                 const CallEvent &CallTemplate,
1046                                 const EvalCallOptions &CallOpts) {
1047  // Make sure we have the most recent state attached to the call.
1048  ProgramStateRef State = Pred->getState();
1049  CallEventRef<> Call = CallTemplate.cloneWithState(State);
1050
1051  // Special-case trivial assignment operators.
1052  if (isTrivialObjectAssignment(*Call)) {
1053    performTrivialCopy(Bldr, Pred, *Call);
1054    return;
1055  }
1056
1057  // Try to inline the call.
1058  // The origin expression here is just used as a kind of checksum;
1059  // this should still be safe even for CallEvents that don't come from exprs.
1060  const Expr *E = Call->getOriginExpr();
1061
1062  ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1063  if (InlinedFailedState) {
1064    // If we already tried once and failed, make sure we don't retry later.
1065    State = InlinedFailedState;
1066  } else {
1067    RuntimeDefinition RD = Call->getRuntimeDefinition();
1068    const Decl *D = RD.getDecl();
1069    if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1070      if (RD.mayHaveOtherDefinitions()) {
1071        AnalyzerOptions &Options = getAnalysisManager().options;
1072
1073        // Explore with and without inlining the call.
1074        if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1075          BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1076          return;
1077        }
1078
1079        // Don't inline if we're not in any dynamic dispatch mode.
1080        if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1081          conservativeEvalCall(*Call, Bldr, Pred, State);
1082          return;
1083        }
1084      }
1085
1086      // We are not bifurcating and we do have a Decl, so just inline.
1087      if (inlineCall(*Call, D, Bldr, Pred, State))
1088        return;
1089    }
1090  }
1091
1092  // If we can't inline it, handle the return value and invalidate the regions.
1093  conservativeEvalCall(*Call, Bldr, Pred, State);
1094}
1095
1096void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1097                               const CallEvent &Call, const Decl *D,
1098                               NodeBuilder &Bldr, ExplodedNode *Pred) {
1099  assert(BifurReg);
1100  BifurReg = BifurReg->StripCasts();
1101
1102  // Check if we've performed the split already - note, we only want
1103  // to split the path once per memory region.
1104  ProgramStateRef State = Pred->getState();
1105  const unsigned *BState =
1106                        State->get<DynamicDispatchBifurcationMap>(BifurReg);
1107  if (BState) {
1108    // If we are on "inline path", keep inlining if possible.
1109    if (*BState == DynamicDispatchModeInlined)
1110      if (inlineCall(Call, D, Bldr, Pred, State))
1111        return;
1112    // If inline failed, or we are on the path where we assume we
1113    // don't have enough info about the receiver to inline, conjure the
1114    // return value and invalidate the regions.
1115    conservativeEvalCall(Call, Bldr, Pred, State);
1116    return;
1117  }
1118
1119  // If we got here, this is the first time we process a message to this
1120  // region, so split the path.
1121  ProgramStateRef IState =
1122      State->set<DynamicDispatchBifurcationMap>(BifurReg,
1123                                               DynamicDispatchModeInlined);
1124  inlineCall(Call, D, Bldr, Pred, IState);
1125
1126  ProgramStateRef NoIState =
1127      State->set<DynamicDispatchBifurcationMap>(BifurReg,
1128                                               DynamicDispatchModeConservative);
1129  conservativeEvalCall(Call, Bldr, Pred, NoIState);
1130
1131  NumOfDynamicDispatchPathSplits++;
1132}
1133
1134void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1135                                 ExplodedNodeSet &Dst) {
1136  ExplodedNodeSet dstPreVisit;
1137  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1138
1139  StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1140
1141  if (RS->getRetValue()) {
1142    for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1143                                  ei = dstPreVisit.end(); it != ei; ++it) {
1144      B.generateNode(RS, *it, (*it)->getState());
1145    }
1146  }
1147}
1148