MemorySSA.cpp revision 355940
1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the MemorySSA class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/MemorySSA.h"
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/DenseMapInfo.h"
16#include "llvm/ADT/DenseSet.h"
17#include "llvm/ADT/DepthFirstIterator.h"
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/IteratedDominanceFrontier.h"
28#include "llvm/Analysis/MemoryLocation.h"
29#include "llvm/Config/llvm-config.h"
30#include "llvm/IR/AssemblyAnnotationWriter.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/Dominators.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/LLVMContext.h"
39#include "llvm/IR/PassManager.h"
40#include "llvm/IR/Use.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/CommandLine.h"
45#include "llvm/Support/Compiler.h"
46#include "llvm/Support/Debug.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/FormattedStream.h"
49#include "llvm/Support/raw_ostream.h"
50#include <algorithm>
51#include <cassert>
52#include <iterator>
53#include <memory>
54#include <utility>
55
56using namespace llvm;
57
58#define DEBUG_TYPE "memoryssa"
59
60INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
61                      true)
62INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
63INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
64INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
65                    true)
66
67INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
68                      "Memory SSA Printer", false, false)
69INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
70INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
71                    "Memory SSA Printer", false, false)
72
73static cl::opt<unsigned> MaxCheckLimit(
74    "memssa-check-limit", cl::Hidden, cl::init(100),
75    cl::desc("The maximum number of stores/phis MemorySSA"
76             "will consider trying to walk past (default = 100)"));
77
78// Always verify MemorySSA if expensive checking is enabled.
79#ifdef EXPENSIVE_CHECKS
80bool llvm::VerifyMemorySSA = true;
81#else
82bool llvm::VerifyMemorySSA = false;
83#endif
84/// Enables memory ssa as a dependency for loop passes in legacy pass manager.
85cl::opt<bool> llvm::EnableMSSALoopDependency(
86    "enable-mssa-loop-dependency", cl::Hidden, cl::init(false),
87    cl::desc("Enable MemorySSA dependency for loop pass manager"));
88
89static cl::opt<bool, true>
90    VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
91                     cl::Hidden, cl::desc("Enable verification of MemorySSA."));
92
93namespace llvm {
94
95/// An assembly annotator class to print Memory SSA information in
96/// comments.
97class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
98  friend class MemorySSA;
99
100  const MemorySSA *MSSA;
101
102public:
103  MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
104
105  void emitBasicBlockStartAnnot(const BasicBlock *BB,
106                                formatted_raw_ostream &OS) override {
107    if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
108      OS << "; " << *MA << "\n";
109  }
110
111  void emitInstructionAnnot(const Instruction *I,
112                            formatted_raw_ostream &OS) override {
113    if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
114      OS << "; " << *MA << "\n";
115  }
116};
117
118} // end namespace llvm
119
120namespace {
121
122/// Our current alias analysis API differentiates heavily between calls and
123/// non-calls, and functions called on one usually assert on the other.
124/// This class encapsulates the distinction to simplify other code that wants
125/// "Memory affecting instructions and related data" to use as a key.
126/// For example, this class is used as a densemap key in the use optimizer.
127class MemoryLocOrCall {
128public:
129  bool IsCall = false;
130
131  MemoryLocOrCall(MemoryUseOrDef *MUD)
132      : MemoryLocOrCall(MUD->getMemoryInst()) {}
133  MemoryLocOrCall(const MemoryUseOrDef *MUD)
134      : MemoryLocOrCall(MUD->getMemoryInst()) {}
135
136  MemoryLocOrCall(Instruction *Inst) {
137    if (auto *C = dyn_cast<CallBase>(Inst)) {
138      IsCall = true;
139      Call = C;
140    } else {
141      IsCall = false;
142      // There is no such thing as a memorylocation for a fence inst, and it is
143      // unique in that regard.
144      if (!isa<FenceInst>(Inst))
145        Loc = MemoryLocation::get(Inst);
146    }
147  }
148
149  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
150
151  const CallBase *getCall() const {
152    assert(IsCall);
153    return Call;
154  }
155
156  MemoryLocation getLoc() const {
157    assert(!IsCall);
158    return Loc;
159  }
160
161  bool operator==(const MemoryLocOrCall &Other) const {
162    if (IsCall != Other.IsCall)
163      return false;
164
165    if (!IsCall)
166      return Loc == Other.Loc;
167
168    if (Call->getCalledValue() != Other.Call->getCalledValue())
169      return false;
170
171    return Call->arg_size() == Other.Call->arg_size() &&
172           std::equal(Call->arg_begin(), Call->arg_end(),
173                      Other.Call->arg_begin());
174  }
175
176private:
177  union {
178    const CallBase *Call;
179    MemoryLocation Loc;
180  };
181};
182
183} // end anonymous namespace
184
185namespace llvm {
186
187template <> struct DenseMapInfo<MemoryLocOrCall> {
188  static inline MemoryLocOrCall getEmptyKey() {
189    return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
190  }
191
192  static inline MemoryLocOrCall getTombstoneKey() {
193    return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
194  }
195
196  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
197    if (!MLOC.IsCall)
198      return hash_combine(
199          MLOC.IsCall,
200          DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
201
202    hash_code hash =
203        hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
204                                      MLOC.getCall()->getCalledValue()));
205
206    for (const Value *Arg : MLOC.getCall()->args())
207      hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
208    return hash;
209  }
210
211  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
212    return LHS == RHS;
213  }
214};
215
216} // end namespace llvm
217
218/// This does one-way checks to see if Use could theoretically be hoisted above
219/// MayClobber. This will not check the other way around.
220///
221/// This assumes that, for the purposes of MemorySSA, Use comes directly after
222/// MayClobber, with no potentially clobbering operations in between them.
223/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
224static bool areLoadsReorderable(const LoadInst *Use,
225                                const LoadInst *MayClobber) {
226  bool VolatileUse = Use->isVolatile();
227  bool VolatileClobber = MayClobber->isVolatile();
228  // Volatile operations may never be reordered with other volatile operations.
229  if (VolatileUse && VolatileClobber)
230    return false;
231  // Otherwise, volatile doesn't matter here. From the language reference:
232  // 'optimizers may change the order of volatile operations relative to
233  // non-volatile operations.'"
234
235  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
236  // is weaker, it can be moved above other loads. We just need to be sure that
237  // MayClobber isn't an acquire load, because loads can't be moved above
238  // acquire loads.
239  //
240  // Note that this explicitly *does* allow the free reordering of monotonic (or
241  // weaker) loads of the same address.
242  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
243  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
244                                                     AtomicOrdering::Acquire);
245  return !(SeqCstUse || MayClobberIsAcquire);
246}
247
248namespace {
249
250struct ClobberAlias {
251  bool IsClobber;
252  Optional<AliasResult> AR;
253};
254
255} // end anonymous namespace
256
257// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
258// ignored if IsClobber = false.
259template <typename AliasAnalysisType>
260static ClobberAlias
261instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
262                         const Instruction *UseInst, AliasAnalysisType &AA) {
263  Instruction *DefInst = MD->getMemoryInst();
264  assert(DefInst && "Defining instruction not actually an instruction");
265  const auto *UseCall = dyn_cast<CallBase>(UseInst);
266  Optional<AliasResult> AR;
267
268  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
269    // These intrinsics will show up as affecting memory, but they are just
270    // markers, mostly.
271    //
272    // FIXME: We probably don't actually want MemorySSA to model these at all
273    // (including creating MemoryAccesses for them): we just end up inventing
274    // clobbers where they don't really exist at all. Please see D43269 for
275    // context.
276    switch (II->getIntrinsicID()) {
277    case Intrinsic::lifetime_start:
278      if (UseCall)
279        return {false, NoAlias};
280      AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
281      return {AR != NoAlias, AR};
282    case Intrinsic::lifetime_end:
283    case Intrinsic::invariant_start:
284    case Intrinsic::invariant_end:
285    case Intrinsic::assume:
286      return {false, NoAlias};
287    default:
288      break;
289    }
290  }
291
292  if (UseCall) {
293    ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
294    AR = isMustSet(I) ? MustAlias : MayAlias;
295    return {isModOrRefSet(I), AR};
296  }
297
298  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
299    if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
300      return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
301
302  ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
303  AR = isMustSet(I) ? MustAlias : MayAlias;
304  return {isModSet(I), AR};
305}
306
307template <typename AliasAnalysisType>
308static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
309                                             const MemoryUseOrDef *MU,
310                                             const MemoryLocOrCall &UseMLOC,
311                                             AliasAnalysisType &AA) {
312  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
313  // to exist while MemoryLocOrCall is pushed through places.
314  if (UseMLOC.IsCall)
315    return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
316                                    AA);
317  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
318                                  AA);
319}
320
321// Return true when MD may alias MU, return false otherwise.
322bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
323                                        AliasAnalysis &AA) {
324  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
325}
326
327namespace {
328
329struct UpwardsMemoryQuery {
330  // True if our original query started off as a call
331  bool IsCall = false;
332  // The pointer location we started the query with. This will be empty if
333  // IsCall is true.
334  MemoryLocation StartingLoc;
335  // This is the instruction we were querying about.
336  const Instruction *Inst = nullptr;
337  // The MemoryAccess we actually got called with, used to test local domination
338  const MemoryAccess *OriginalAccess = nullptr;
339  Optional<AliasResult> AR = MayAlias;
340  bool SkipSelfAccess = false;
341
342  UpwardsMemoryQuery() = default;
343
344  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
345      : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
346    if (!IsCall)
347      StartingLoc = MemoryLocation::get(Inst);
348  }
349};
350
351} // end anonymous namespace
352
353static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
354                           BatchAAResults &AA) {
355  Instruction *Inst = MD->getMemoryInst();
356  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
357    switch (II->getIntrinsicID()) {
358    case Intrinsic::lifetime_end:
359      return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
360    default:
361      return false;
362    }
363  }
364  return false;
365}
366
367template <typename AliasAnalysisType>
368static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
369                                                   const Instruction *I) {
370  // If the memory can't be changed, then loads of the memory can't be
371  // clobbered.
372  return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
373                              AA.pointsToConstantMemory(MemoryLocation(
374                                  cast<LoadInst>(I)->getPointerOperand())));
375}
376
377/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
378/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
379///
380/// This is meant to be as simple and self-contained as possible. Because it
381/// uses no cache, etc., it can be relatively expensive.
382///
383/// \param Start     The MemoryAccess that we want to walk from.
384/// \param ClobberAt A clobber for Start.
385/// \param StartLoc  The MemoryLocation for Start.
386/// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
387/// \param Query     The UpwardsMemoryQuery we used for our search.
388/// \param AA        The AliasAnalysis we used for our search.
389/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
390
391template <typename AliasAnalysisType>
392LLVM_ATTRIBUTE_UNUSED static void
393checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
394                   const MemoryLocation &StartLoc, const MemorySSA &MSSA,
395                   const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
396                   bool AllowImpreciseClobber = false) {
397  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
398
399  if (MSSA.isLiveOnEntryDef(Start)) {
400    assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
401           "liveOnEntry must clobber itself");
402    return;
403  }
404
405  bool FoundClobber = false;
406  DenseSet<ConstMemoryAccessPair> VisitedPhis;
407  SmallVector<ConstMemoryAccessPair, 8> Worklist;
408  Worklist.emplace_back(Start, StartLoc);
409  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
410  // is found, complain.
411  while (!Worklist.empty()) {
412    auto MAP = Worklist.pop_back_val();
413    // All we care about is that nothing from Start to ClobberAt clobbers Start.
414    // We learn nothing from revisiting nodes.
415    if (!VisitedPhis.insert(MAP).second)
416      continue;
417
418    for (const auto *MA : def_chain(MAP.first)) {
419      if (MA == ClobberAt) {
420        if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
421          // instructionClobbersQuery isn't essentially free, so don't use `|=`,
422          // since it won't let us short-circuit.
423          //
424          // Also, note that this can't be hoisted out of the `Worklist` loop,
425          // since MD may only act as a clobber for 1 of N MemoryLocations.
426          FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
427          if (!FoundClobber) {
428            ClobberAlias CA =
429                instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
430            if (CA.IsClobber) {
431              FoundClobber = true;
432              // Not used: CA.AR;
433            }
434          }
435        }
436        break;
437      }
438
439      // We should never hit liveOnEntry, unless it's the clobber.
440      assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
441
442      if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
443        // If Start is a Def, skip self.
444        if (MD == Start)
445          continue;
446
447        assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
448                    .IsClobber &&
449               "Found clobber before reaching ClobberAt!");
450        continue;
451      }
452
453      if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
454        (void)MU;
455        assert (MU == Start &&
456                "Can only find use in def chain if Start is a use");
457        continue;
458      }
459
460      assert(isa<MemoryPhi>(MA));
461      Worklist.append(
462          upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
463          upward_defs_end());
464    }
465  }
466
467  // If the verify is done following an optimization, it's possible that
468  // ClobberAt was a conservative clobbering, that we can now infer is not a
469  // true clobbering access. Don't fail the verify if that's the case.
470  // We do have accesses that claim they're optimized, but could be optimized
471  // further. Updating all these can be expensive, so allow it for now (FIXME).
472  if (AllowImpreciseClobber)
473    return;
474
475  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
476  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
477  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
478         "ClobberAt never acted as a clobber");
479}
480
481namespace {
482
483/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
484/// in one class.
485template <class AliasAnalysisType> class ClobberWalker {
486  /// Save a few bytes by using unsigned instead of size_t.
487  using ListIndex = unsigned;
488
489  /// Represents a span of contiguous MemoryDefs, potentially ending in a
490  /// MemoryPhi.
491  struct DefPath {
492    MemoryLocation Loc;
493    // Note that, because we always walk in reverse, Last will always dominate
494    // First. Also note that First and Last are inclusive.
495    MemoryAccess *First;
496    MemoryAccess *Last;
497    Optional<ListIndex> Previous;
498
499    DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
500            Optional<ListIndex> Previous)
501        : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
502
503    DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
504            Optional<ListIndex> Previous)
505        : DefPath(Loc, Init, Init, Previous) {}
506  };
507
508  const MemorySSA &MSSA;
509  AliasAnalysisType &AA;
510  DominatorTree &DT;
511  UpwardsMemoryQuery *Query;
512  unsigned *UpwardWalkLimit;
513
514  // Phi optimization bookkeeping
515  SmallVector<DefPath, 32> Paths;
516  DenseSet<ConstMemoryAccessPair> VisitedPhis;
517
518  /// Find the nearest def or phi that `From` can legally be optimized to.
519  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
520    assert(From->getNumOperands() && "Phi with no operands?");
521
522    BasicBlock *BB = From->getBlock();
523    MemoryAccess *Result = MSSA.getLiveOnEntryDef();
524    DomTreeNode *Node = DT.getNode(BB);
525    while ((Node = Node->getIDom())) {
526      auto *Defs = MSSA.getBlockDefs(Node->getBlock());
527      if (Defs)
528        return &*Defs->rbegin();
529    }
530    return Result;
531  }
532
533  /// Result of calling walkToPhiOrClobber.
534  struct UpwardsWalkResult {
535    /// The "Result" of the walk. Either a clobber, the last thing we walked, or
536    /// both. Include alias info when clobber found.
537    MemoryAccess *Result;
538    bool IsKnownClobber;
539    Optional<AliasResult> AR;
540  };
541
542  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
543  /// This will update Desc.Last as it walks. It will (optionally) also stop at
544  /// StopAt.
545  ///
546  /// This does not test for whether StopAt is a clobber
547  UpwardsWalkResult
548  walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
549                     const MemoryAccess *SkipStopAt = nullptr) const {
550    assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
551    assert(UpwardWalkLimit && "Need a valid walk limit");
552    bool LimitAlreadyReached = false;
553    // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
554    // it to 1. This will not do any alias() calls. It either returns in the
555    // first iteration in the loop below, or is set back to 0 if all def chains
556    // are free of MemoryDefs.
557    if (!*UpwardWalkLimit) {
558      *UpwardWalkLimit = 1;
559      LimitAlreadyReached = true;
560    }
561
562    for (MemoryAccess *Current : def_chain(Desc.Last)) {
563      Desc.Last = Current;
564      if (Current == StopAt || Current == SkipStopAt)
565        return {Current, false, MayAlias};
566
567      if (auto *MD = dyn_cast<MemoryDef>(Current)) {
568        if (MSSA.isLiveOnEntryDef(MD))
569          return {MD, true, MustAlias};
570
571        if (!--*UpwardWalkLimit)
572          return {Current, true, MayAlias};
573
574        ClobberAlias CA =
575            instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
576        if (CA.IsClobber)
577          return {MD, true, CA.AR};
578      }
579    }
580
581    if (LimitAlreadyReached)
582      *UpwardWalkLimit = 0;
583
584    assert(isa<MemoryPhi>(Desc.Last) &&
585           "Ended at a non-clobber that's not a phi?");
586    return {Desc.Last, false, MayAlias};
587  }
588
589  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
590                   ListIndex PriorNode) {
591    auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
592                                 upward_defs_end());
593    for (const MemoryAccessPair &P : UpwardDefs) {
594      PausedSearches.push_back(Paths.size());
595      Paths.emplace_back(P.second, P.first, PriorNode);
596    }
597  }
598
599  /// Represents a search that terminated after finding a clobber. This clobber
600  /// may or may not be present in the path of defs from LastNode..SearchStart,
601  /// since it may have been retrieved from cache.
602  struct TerminatedPath {
603    MemoryAccess *Clobber;
604    ListIndex LastNode;
605  };
606
607  /// Get an access that keeps us from optimizing to the given phi.
608  ///
609  /// PausedSearches is an array of indices into the Paths array. Its incoming
610  /// value is the indices of searches that stopped at the last phi optimization
611  /// target. It's left in an unspecified state.
612  ///
613  /// If this returns None, NewPaused is a vector of searches that terminated
614  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
615  Optional<TerminatedPath>
616  getBlockingAccess(const MemoryAccess *StopWhere,
617                    SmallVectorImpl<ListIndex> &PausedSearches,
618                    SmallVectorImpl<ListIndex> &NewPaused,
619                    SmallVectorImpl<TerminatedPath> &Terminated) {
620    assert(!PausedSearches.empty() && "No searches to continue?");
621
622    // BFS vs DFS really doesn't make a difference here, so just do a DFS with
623    // PausedSearches as our stack.
624    while (!PausedSearches.empty()) {
625      ListIndex PathIndex = PausedSearches.pop_back_val();
626      DefPath &Node = Paths[PathIndex];
627
628      // If we've already visited this path with this MemoryLocation, we don't
629      // need to do so again.
630      //
631      // NOTE: That we just drop these paths on the ground makes caching
632      // behavior sporadic. e.g. given a diamond:
633      //  A
634      // B C
635      //  D
636      //
637      // ...If we walk D, B, A, C, we'll only cache the result of phi
638      // optimization for A, B, and D; C will be skipped because it dies here.
639      // This arguably isn't the worst thing ever, since:
640      //   - We generally query things in a top-down order, so if we got below D
641      //     without needing cache entries for {C, MemLoc}, then chances are
642      //     that those cache entries would end up ultimately unused.
643      //   - We still cache things for A, so C only needs to walk up a bit.
644      // If this behavior becomes problematic, we can fix without a ton of extra
645      // work.
646      if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
647        continue;
648
649      const MemoryAccess *SkipStopWhere = nullptr;
650      if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
651        assert(isa<MemoryDef>(Query->OriginalAccess));
652        SkipStopWhere = Query->OriginalAccess;
653      }
654
655      UpwardsWalkResult Res = walkToPhiOrClobber(Node,
656                                                 /*StopAt=*/StopWhere,
657                                                 /*SkipStopAt=*/SkipStopWhere);
658      if (Res.IsKnownClobber) {
659        assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
660
661        // If this wasn't a cache hit, we hit a clobber when walking. That's a
662        // failure.
663        TerminatedPath Term{Res.Result, PathIndex};
664        if (!MSSA.dominates(Res.Result, StopWhere))
665          return Term;
666
667        // Otherwise, it's a valid thing to potentially optimize to.
668        Terminated.push_back(Term);
669        continue;
670      }
671
672      if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
673        // We've hit our target. Save this path off for if we want to continue
674        // walking. If we are in the mode of skipping the OriginalAccess, and
675        // we've reached back to the OriginalAccess, do not save path, we've
676        // just looped back to self.
677        if (Res.Result != SkipStopWhere)
678          NewPaused.push_back(PathIndex);
679        continue;
680      }
681
682      assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
683      addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
684    }
685
686    return None;
687  }
688
689  template <typename T, typename Walker>
690  struct generic_def_path_iterator
691      : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
692                                    std::forward_iterator_tag, T *> {
693    generic_def_path_iterator() {}
694    generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
695
696    T &operator*() const { return curNode(); }
697
698    generic_def_path_iterator &operator++() {
699      N = curNode().Previous;
700      return *this;
701    }
702
703    bool operator==(const generic_def_path_iterator &O) const {
704      if (N.hasValue() != O.N.hasValue())
705        return false;
706      return !N.hasValue() || *N == *O.N;
707    }
708
709  private:
710    T &curNode() const { return W->Paths[*N]; }
711
712    Walker *W = nullptr;
713    Optional<ListIndex> N = None;
714  };
715
716  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
717  using const_def_path_iterator =
718      generic_def_path_iterator<const DefPath, const ClobberWalker>;
719
720  iterator_range<def_path_iterator> def_path(ListIndex From) {
721    return make_range(def_path_iterator(this, From), def_path_iterator());
722  }
723
724  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
725    return make_range(const_def_path_iterator(this, From),
726                      const_def_path_iterator());
727  }
728
729  struct OptznResult {
730    /// The path that contains our result.
731    TerminatedPath PrimaryClobber;
732    /// The paths that we can legally cache back from, but that aren't
733    /// necessarily the result of the Phi optimization.
734    SmallVector<TerminatedPath, 4> OtherClobbers;
735  };
736
737  ListIndex defPathIndex(const DefPath &N) const {
738    // The assert looks nicer if we don't need to do &N
739    const DefPath *NP = &N;
740    assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
741           "Out of bounds DefPath!");
742    return NP - &Paths.front();
743  }
744
745  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
746  /// that act as legal clobbers. Note that this won't return *all* clobbers.
747  ///
748  /// Phi optimization algorithm tl;dr:
749  ///   - Find the earliest def/phi, A, we can optimize to
750  ///   - Find if all paths from the starting memory access ultimately reach A
751  ///     - If not, optimization isn't possible.
752  ///     - Otherwise, walk from A to another clobber or phi, A'.
753  ///       - If A' is a def, we're done.
754  ///       - If A' is a phi, try to optimize it.
755  ///
756  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
757  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
758  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
759                             const MemoryLocation &Loc) {
760    assert(Paths.empty() && VisitedPhis.empty() &&
761           "Reset the optimization state.");
762
763    Paths.emplace_back(Loc, Start, Phi, None);
764    // Stores how many "valid" optimization nodes we had prior to calling
765    // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
766    auto PriorPathsSize = Paths.size();
767
768    SmallVector<ListIndex, 16> PausedSearches;
769    SmallVector<ListIndex, 8> NewPaused;
770    SmallVector<TerminatedPath, 4> TerminatedPaths;
771
772    addSearches(Phi, PausedSearches, 0);
773
774    // Moves the TerminatedPath with the "most dominated" Clobber to the end of
775    // Paths.
776    auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
777      assert(!Paths.empty() && "Need a path to move");
778      auto Dom = Paths.begin();
779      for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
780        if (!MSSA.dominates(I->Clobber, Dom->Clobber))
781          Dom = I;
782      auto Last = Paths.end() - 1;
783      if (Last != Dom)
784        std::iter_swap(Last, Dom);
785    };
786
787    MemoryPhi *Current = Phi;
788    while (true) {
789      assert(!MSSA.isLiveOnEntryDef(Current) &&
790             "liveOnEntry wasn't treated as a clobber?");
791
792      const auto *Target = getWalkTarget(Current);
793      // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
794      // optimization for the prior phi.
795      assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
796        return MSSA.dominates(P.Clobber, Target);
797      }));
798
799      // FIXME: This is broken, because the Blocker may be reported to be
800      // liveOnEntry, and we'll happily wait for that to disappear (read: never)
801      // For the moment, this is fine, since we do nothing with blocker info.
802      if (Optional<TerminatedPath> Blocker = getBlockingAccess(
803              Target, PausedSearches, NewPaused, TerminatedPaths)) {
804
805        // Find the node we started at. We can't search based on N->Last, since
806        // we may have gone around a loop with a different MemoryLocation.
807        auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
808          return defPathIndex(N) < PriorPathsSize;
809        });
810        assert(Iter != def_path_iterator());
811
812        DefPath &CurNode = *Iter;
813        assert(CurNode.Last == Current);
814
815        // Two things:
816        // A. We can't reliably cache all of NewPaused back. Consider a case
817        //    where we have two paths in NewPaused; one of which can't optimize
818        //    above this phi, whereas the other can. If we cache the second path
819        //    back, we'll end up with suboptimal cache entries. We can handle
820        //    cases like this a bit better when we either try to find all
821        //    clobbers that block phi optimization, or when our cache starts
822        //    supporting unfinished searches.
823        // B. We can't reliably cache TerminatedPaths back here without doing
824        //    extra checks; consider a case like:
825        //       T
826        //      / \
827        //     D   C
828        //      \ /
829        //       S
830        //    Where T is our target, C is a node with a clobber on it, D is a
831        //    diamond (with a clobber *only* on the left or right node, N), and
832        //    S is our start. Say we walk to D, through the node opposite N
833        //    (read: ignoring the clobber), and see a cache entry in the top
834        //    node of D. That cache entry gets put into TerminatedPaths. We then
835        //    walk up to C (N is later in our worklist), find the clobber, and
836        //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
837        //    the bottom part of D to the cached clobber, ignoring the clobber
838        //    in N. Again, this problem goes away if we start tracking all
839        //    blockers for a given phi optimization.
840        TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
841        return {Result, {}};
842      }
843
844      // If there's nothing left to search, then all paths led to valid clobbers
845      // that we got from our cache; pick the nearest to the start, and allow
846      // the rest to be cached back.
847      if (NewPaused.empty()) {
848        MoveDominatedPathToEnd(TerminatedPaths);
849        TerminatedPath Result = TerminatedPaths.pop_back_val();
850        return {Result, std::move(TerminatedPaths)};
851      }
852
853      MemoryAccess *DefChainEnd = nullptr;
854      SmallVector<TerminatedPath, 4> Clobbers;
855      for (ListIndex Paused : NewPaused) {
856        UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
857        if (WR.IsKnownClobber)
858          Clobbers.push_back({WR.Result, Paused});
859        else
860          // Micro-opt: If we hit the end of the chain, save it.
861          DefChainEnd = WR.Result;
862      }
863
864      if (!TerminatedPaths.empty()) {
865        // If we couldn't find the dominating phi/liveOnEntry in the above loop,
866        // do it now.
867        if (!DefChainEnd)
868          for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
869            DefChainEnd = MA;
870
871        // If any of the terminated paths don't dominate the phi we'll try to
872        // optimize, we need to figure out what they are and quit.
873        const BasicBlock *ChainBB = DefChainEnd->getBlock();
874        for (const TerminatedPath &TP : TerminatedPaths) {
875          // Because we know that DefChainEnd is as "high" as we can go, we
876          // don't need local dominance checks; BB dominance is sufficient.
877          if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
878            Clobbers.push_back(TP);
879        }
880      }
881
882      // If we have clobbers in the def chain, find the one closest to Current
883      // and quit.
884      if (!Clobbers.empty()) {
885        MoveDominatedPathToEnd(Clobbers);
886        TerminatedPath Result = Clobbers.pop_back_val();
887        return {Result, std::move(Clobbers)};
888      }
889
890      assert(all_of(NewPaused,
891                    [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
892
893      // Because liveOnEntry is a clobber, this must be a phi.
894      auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
895
896      PriorPathsSize = Paths.size();
897      PausedSearches.clear();
898      for (ListIndex I : NewPaused)
899        addSearches(DefChainPhi, PausedSearches, I);
900      NewPaused.clear();
901
902      Current = DefChainPhi;
903    }
904  }
905
906  void verifyOptResult(const OptznResult &R) const {
907    assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
908      return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
909    }));
910  }
911
912  void resetPhiOptznState() {
913    Paths.clear();
914    VisitedPhis.clear();
915  }
916
917public:
918  ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
919      : MSSA(MSSA), AA(AA), DT(DT) {}
920
921  AliasAnalysisType *getAA() { return &AA; }
922  /// Finds the nearest clobber for the given query, optimizing phis if
923  /// possible.
924  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
925                            unsigned &UpWalkLimit) {
926    Query = &Q;
927    UpwardWalkLimit = &UpWalkLimit;
928    // Starting limit must be > 0.
929    if (!UpWalkLimit)
930      UpWalkLimit++;
931
932    MemoryAccess *Current = Start;
933    // This walker pretends uses don't exist. If we're handed one, silently grab
934    // its def. (This has the nice side-effect of ensuring we never cache uses)
935    if (auto *MU = dyn_cast<MemoryUse>(Start))
936      Current = MU->getDefiningAccess();
937
938    DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
939    // Fast path for the overly-common case (no crazy phi optimization
940    // necessary)
941    UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
942    MemoryAccess *Result;
943    if (WalkResult.IsKnownClobber) {
944      Result = WalkResult.Result;
945      Q.AR = WalkResult.AR;
946    } else {
947      OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
948                                          Current, Q.StartingLoc);
949      verifyOptResult(OptRes);
950      resetPhiOptznState();
951      Result = OptRes.PrimaryClobber.Clobber;
952    }
953
954#ifdef EXPENSIVE_CHECKS
955    if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
956      checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
957#endif
958    return Result;
959  }
960};
961
962struct RenamePassData {
963  DomTreeNode *DTN;
964  DomTreeNode::const_iterator ChildIt;
965  MemoryAccess *IncomingVal;
966
967  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
968                 MemoryAccess *M)
969      : DTN(D), ChildIt(It), IncomingVal(M) {}
970
971  void swap(RenamePassData &RHS) {
972    std::swap(DTN, RHS.DTN);
973    std::swap(ChildIt, RHS.ChildIt);
974    std::swap(IncomingVal, RHS.IncomingVal);
975  }
976};
977
978} // end anonymous namespace
979
980namespace llvm {
981
982template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
983  ClobberWalker<AliasAnalysisType> Walker;
984  MemorySSA *MSSA;
985
986public:
987  ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
988      : Walker(*M, *A, *D), MSSA(M) {}
989
990  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
991                                              const MemoryLocation &,
992                                              unsigned &);
993  // Third argument (bool), defines whether the clobber search should skip the
994  // original queried access. If true, there will be a follow-up query searching
995  // for a clobber access past "self". Note that the Optimized access is not
996  // updated if a new clobber is found by this SkipSelf search. If this
997  // additional query becomes heavily used we may decide to cache the result.
998  // Walker instantiations will decide how to set the SkipSelf bool.
999  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1000};
1001
1002/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1003/// longer does caching on its own, but the name has been retained for the
1004/// moment.
1005template <class AliasAnalysisType>
1006class MemorySSA::CachingWalker final : public MemorySSAWalker {
1007  ClobberWalkerBase<AliasAnalysisType> *Walker;
1008
1009public:
1010  CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1011      : MemorySSAWalker(M), Walker(W) {}
1012  ~CachingWalker() override = default;
1013
1014  using MemorySSAWalker::getClobberingMemoryAccess;
1015
1016  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1017    return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1018  }
1019  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1020                                          const MemoryLocation &Loc,
1021                                          unsigned &UWL) {
1022    return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1023  }
1024
1025  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1026    unsigned UpwardWalkLimit = MaxCheckLimit;
1027    return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1028  }
1029  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1030                                          const MemoryLocation &Loc) override {
1031    unsigned UpwardWalkLimit = MaxCheckLimit;
1032    return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1033  }
1034
1035  void invalidateInfo(MemoryAccess *MA) override {
1036    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1037      MUD->resetOptimized();
1038  }
1039};
1040
1041template <class AliasAnalysisType>
1042class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1043  ClobberWalkerBase<AliasAnalysisType> *Walker;
1044
1045public:
1046  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1047      : MemorySSAWalker(M), Walker(W) {}
1048  ~SkipSelfWalker() override = default;
1049
1050  using MemorySSAWalker::getClobberingMemoryAccess;
1051
1052  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1053    return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1054  }
1055  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1056                                          const MemoryLocation &Loc,
1057                                          unsigned &UWL) {
1058    return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1059  }
1060
1061  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1062    unsigned UpwardWalkLimit = MaxCheckLimit;
1063    return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1064  }
1065  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1066                                          const MemoryLocation &Loc) override {
1067    unsigned UpwardWalkLimit = MaxCheckLimit;
1068    return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1069  }
1070
1071  void invalidateInfo(MemoryAccess *MA) override {
1072    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1073      MUD->resetOptimized();
1074  }
1075};
1076
1077} // end namespace llvm
1078
1079void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1080                                    bool RenameAllUses) {
1081  // Pass through values to our successors
1082  for (const BasicBlock *S : successors(BB)) {
1083    auto It = PerBlockAccesses.find(S);
1084    // Rename the phi nodes in our successor block
1085    if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1086      continue;
1087    AccessList *Accesses = It->second.get();
1088    auto *Phi = cast<MemoryPhi>(&Accesses->front());
1089    if (RenameAllUses) {
1090      int PhiIndex = Phi->getBasicBlockIndex(BB);
1091      assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1092      Phi->setIncomingValue(PhiIndex, IncomingVal);
1093    } else
1094      Phi->addIncoming(IncomingVal, BB);
1095  }
1096}
1097
1098/// Rename a single basic block into MemorySSA form.
1099/// Uses the standard SSA renaming algorithm.
1100/// \returns The new incoming value.
1101MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1102                                     bool RenameAllUses) {
1103  auto It = PerBlockAccesses.find(BB);
1104  // Skip most processing if the list is empty.
1105  if (It != PerBlockAccesses.end()) {
1106    AccessList *Accesses = It->second.get();
1107    for (MemoryAccess &L : *Accesses) {
1108      if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1109        if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1110          MUD->setDefiningAccess(IncomingVal);
1111        if (isa<MemoryDef>(&L))
1112          IncomingVal = &L;
1113      } else {
1114        IncomingVal = &L;
1115      }
1116    }
1117  }
1118  return IncomingVal;
1119}
1120
1121/// This is the standard SSA renaming algorithm.
1122///
1123/// We walk the dominator tree in preorder, renaming accesses, and then filling
1124/// in phi nodes in our successors.
1125void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1126                           SmallPtrSetImpl<BasicBlock *> &Visited,
1127                           bool SkipVisited, bool RenameAllUses) {
1128  assert(Root && "Trying to rename accesses in an unreachable block");
1129
1130  SmallVector<RenamePassData, 32> WorkStack;
1131  // Skip everything if we already renamed this block and we are skipping.
1132  // Note: You can't sink this into the if, because we need it to occur
1133  // regardless of whether we skip blocks or not.
1134  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1135  if (SkipVisited && AlreadyVisited)
1136    return;
1137
1138  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1139  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1140  WorkStack.push_back({Root, Root->begin(), IncomingVal});
1141
1142  while (!WorkStack.empty()) {
1143    DomTreeNode *Node = WorkStack.back().DTN;
1144    DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1145    IncomingVal = WorkStack.back().IncomingVal;
1146
1147    if (ChildIt == Node->end()) {
1148      WorkStack.pop_back();
1149    } else {
1150      DomTreeNode *Child = *ChildIt;
1151      ++WorkStack.back().ChildIt;
1152      BasicBlock *BB = Child->getBlock();
1153      // Note: You can't sink this into the if, because we need it to occur
1154      // regardless of whether we skip blocks or not.
1155      AlreadyVisited = !Visited.insert(BB).second;
1156      if (SkipVisited && AlreadyVisited) {
1157        // We already visited this during our renaming, which can happen when
1158        // being asked to rename multiple blocks. Figure out the incoming val,
1159        // which is the last def.
1160        // Incoming value can only change if there is a block def, and in that
1161        // case, it's the last block def in the list.
1162        if (auto *BlockDefs = getWritableBlockDefs(BB))
1163          IncomingVal = &*BlockDefs->rbegin();
1164      } else
1165        IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1166      renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1167      WorkStack.push_back({Child, Child->begin(), IncomingVal});
1168    }
1169  }
1170}
1171
1172/// This handles unreachable block accesses by deleting phi nodes in
1173/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1174/// being uses of the live on entry definition.
1175void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1176  assert(!DT->isReachableFromEntry(BB) &&
1177         "Reachable block found while handling unreachable blocks");
1178
1179  // Make sure phi nodes in our reachable successors end up with a
1180  // LiveOnEntryDef for our incoming edge, even though our block is forward
1181  // unreachable.  We could just disconnect these blocks from the CFG fully,
1182  // but we do not right now.
1183  for (const BasicBlock *S : successors(BB)) {
1184    if (!DT->isReachableFromEntry(S))
1185      continue;
1186    auto It = PerBlockAccesses.find(S);
1187    // Rename the phi nodes in our successor block
1188    if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1189      continue;
1190    AccessList *Accesses = It->second.get();
1191    auto *Phi = cast<MemoryPhi>(&Accesses->front());
1192    Phi->addIncoming(LiveOnEntryDef.get(), BB);
1193  }
1194
1195  auto It = PerBlockAccesses.find(BB);
1196  if (It == PerBlockAccesses.end())
1197    return;
1198
1199  auto &Accesses = It->second;
1200  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1201    auto Next = std::next(AI);
1202    // If we have a phi, just remove it. We are going to replace all
1203    // users with live on entry.
1204    if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1205      UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1206    else
1207      Accesses->erase(AI);
1208    AI = Next;
1209  }
1210}
1211
1212MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1213    : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1214      SkipWalker(nullptr), NextID(0) {
1215  // Build MemorySSA using a batch alias analysis. This reuses the internal
1216  // state that AA collects during an alias()/getModRefInfo() call. This is
1217  // safe because there are no CFG changes while building MemorySSA and can
1218  // significantly reduce the time spent by the compiler in AA, because we will
1219  // make queries about all the instructions in the Function.
1220  BatchAAResults BatchAA(*AA);
1221  buildMemorySSA(BatchAA);
1222  // Intentionally leave AA to nullptr while building so we don't accidently
1223  // use non-batch AliasAnalysis.
1224  this->AA = AA;
1225  // Also create the walker here.
1226  getWalker();
1227}
1228
1229MemorySSA::~MemorySSA() {
1230  // Drop all our references
1231  for (const auto &Pair : PerBlockAccesses)
1232    for (MemoryAccess &MA : *Pair.second)
1233      MA.dropAllReferences();
1234}
1235
1236MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1237  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1238
1239  if (Res.second)
1240    Res.first->second = llvm::make_unique<AccessList>();
1241  return Res.first->second.get();
1242}
1243
1244MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1245  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1246
1247  if (Res.second)
1248    Res.first->second = llvm::make_unique<DefsList>();
1249  return Res.first->second.get();
1250}
1251
1252namespace llvm {
1253
1254/// This class is a batch walker of all MemoryUse's in the program, and points
1255/// their defining access at the thing that actually clobbers them.  Because it
1256/// is a batch walker that touches everything, it does not operate like the
1257/// other walkers.  This walker is basically performing a top-down SSA renaming
1258/// pass, where the version stack is used as the cache.  This enables it to be
1259/// significantly more time and memory efficient than using the regular walker,
1260/// which is walking bottom-up.
1261class MemorySSA::OptimizeUses {
1262public:
1263  OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1264               BatchAAResults *BAA, DominatorTree *DT)
1265      : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1266
1267  void optimizeUses();
1268
1269private:
1270  /// This represents where a given memorylocation is in the stack.
1271  struct MemlocStackInfo {
1272    // This essentially is keeping track of versions of the stack. Whenever
1273    // the stack changes due to pushes or pops, these versions increase.
1274    unsigned long StackEpoch;
1275    unsigned long PopEpoch;
1276    // This is the lower bound of places on the stack to check. It is equal to
1277    // the place the last stack walk ended.
1278    // Note: Correctness depends on this being initialized to 0, which densemap
1279    // does
1280    unsigned long LowerBound;
1281    const BasicBlock *LowerBoundBlock;
1282    // This is where the last walk for this memory location ended.
1283    unsigned long LastKill;
1284    bool LastKillValid;
1285    Optional<AliasResult> AR;
1286  };
1287
1288  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1289                           SmallVectorImpl<MemoryAccess *> &,
1290                           DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1291
1292  MemorySSA *MSSA;
1293  CachingWalker<BatchAAResults> *Walker;
1294  BatchAAResults *AA;
1295  DominatorTree *DT;
1296};
1297
1298} // end namespace llvm
1299
1300/// Optimize the uses in a given block This is basically the SSA renaming
1301/// algorithm, with one caveat: We are able to use a single stack for all
1302/// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1303/// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1304/// going to be some position in that stack of possible ones.
1305///
1306/// We track the stack positions that each MemoryLocation needs
1307/// to check, and last ended at.  This is because we only want to check the
1308/// things that changed since last time.  The same MemoryLocation should
1309/// get clobbered by the same store (getModRefInfo does not use invariantness or
1310/// things like this, and if they start, we can modify MemoryLocOrCall to
1311/// include relevant data)
1312void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1313    const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1314    SmallVectorImpl<MemoryAccess *> &VersionStack,
1315    DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1316
1317  /// If no accesses, nothing to do.
1318  MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1319  if (Accesses == nullptr)
1320    return;
1321
1322  // Pop everything that doesn't dominate the current block off the stack,
1323  // increment the PopEpoch to account for this.
1324  while (true) {
1325    assert(
1326        !VersionStack.empty() &&
1327        "Version stack should have liveOnEntry sentinel dominating everything");
1328    BasicBlock *BackBlock = VersionStack.back()->getBlock();
1329    if (DT->dominates(BackBlock, BB))
1330      break;
1331    while (VersionStack.back()->getBlock() == BackBlock)
1332      VersionStack.pop_back();
1333    ++PopEpoch;
1334  }
1335
1336  for (MemoryAccess &MA : *Accesses) {
1337    auto *MU = dyn_cast<MemoryUse>(&MA);
1338    if (!MU) {
1339      VersionStack.push_back(&MA);
1340      ++StackEpoch;
1341      continue;
1342    }
1343
1344    if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1345      MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1346      continue;
1347    }
1348
1349    MemoryLocOrCall UseMLOC(MU);
1350    auto &LocInfo = LocStackInfo[UseMLOC];
1351    // If the pop epoch changed, it means we've removed stuff from top of
1352    // stack due to changing blocks. We may have to reset the lower bound or
1353    // last kill info.
1354    if (LocInfo.PopEpoch != PopEpoch) {
1355      LocInfo.PopEpoch = PopEpoch;
1356      LocInfo.StackEpoch = StackEpoch;
1357      // If the lower bound was in something that no longer dominates us, we
1358      // have to reset it.
1359      // We can't simply track stack size, because the stack may have had
1360      // pushes/pops in the meantime.
1361      // XXX: This is non-optimal, but only is slower cases with heavily
1362      // branching dominator trees.  To get the optimal number of queries would
1363      // be to make lowerbound and lastkill a per-loc stack, and pop it until
1364      // the top of that stack dominates us.  This does not seem worth it ATM.
1365      // A much cheaper optimization would be to always explore the deepest
1366      // branch of the dominator tree first. This will guarantee this resets on
1367      // the smallest set of blocks.
1368      if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1369          !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1370        // Reset the lower bound of things to check.
1371        // TODO: Some day we should be able to reset to last kill, rather than
1372        // 0.
1373        LocInfo.LowerBound = 0;
1374        LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1375        LocInfo.LastKillValid = false;
1376      }
1377    } else if (LocInfo.StackEpoch != StackEpoch) {
1378      // If all that has changed is the StackEpoch, we only have to check the
1379      // new things on the stack, because we've checked everything before.  In
1380      // this case, the lower bound of things to check remains the same.
1381      LocInfo.PopEpoch = PopEpoch;
1382      LocInfo.StackEpoch = StackEpoch;
1383    }
1384    if (!LocInfo.LastKillValid) {
1385      LocInfo.LastKill = VersionStack.size() - 1;
1386      LocInfo.LastKillValid = true;
1387      LocInfo.AR = MayAlias;
1388    }
1389
1390    // At this point, we should have corrected last kill and LowerBound to be
1391    // in bounds.
1392    assert(LocInfo.LowerBound < VersionStack.size() &&
1393           "Lower bound out of range");
1394    assert(LocInfo.LastKill < VersionStack.size() &&
1395           "Last kill info out of range");
1396    // In any case, the new upper bound is the top of the stack.
1397    unsigned long UpperBound = VersionStack.size() - 1;
1398
1399    if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1400      LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1401                        << *(MU->getMemoryInst()) << ")"
1402                        << " because there are "
1403                        << UpperBound - LocInfo.LowerBound
1404                        << " stores to disambiguate\n");
1405      // Because we did not walk, LastKill is no longer valid, as this may
1406      // have been a kill.
1407      LocInfo.LastKillValid = false;
1408      continue;
1409    }
1410    bool FoundClobberResult = false;
1411    unsigned UpwardWalkLimit = MaxCheckLimit;
1412    while (UpperBound > LocInfo.LowerBound) {
1413      if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1414        // For phis, use the walker, see where we ended up, go there
1415        MemoryAccess *Result =
1416            Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1417        // We are guaranteed to find it or something is wrong
1418        while (VersionStack[UpperBound] != Result) {
1419          assert(UpperBound != 0);
1420          --UpperBound;
1421        }
1422        FoundClobberResult = true;
1423        break;
1424      }
1425
1426      MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1427      // If the lifetime of the pointer ends at this instruction, it's live on
1428      // entry.
1429      if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1430        // Reset UpperBound to liveOnEntryDef's place in the stack
1431        UpperBound = 0;
1432        FoundClobberResult = true;
1433        LocInfo.AR = MustAlias;
1434        break;
1435      }
1436      ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1437      if (CA.IsClobber) {
1438        FoundClobberResult = true;
1439        LocInfo.AR = CA.AR;
1440        break;
1441      }
1442      --UpperBound;
1443    }
1444
1445    // Note: Phis always have AliasResult AR set to MayAlias ATM.
1446
1447    // At the end of this loop, UpperBound is either a clobber, or lower bound
1448    // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1449    if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1450      // We were last killed now by where we got to
1451      if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1452        LocInfo.AR = None;
1453      MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1454      LocInfo.LastKill = UpperBound;
1455    } else {
1456      // Otherwise, we checked all the new ones, and now we know we can get to
1457      // LastKill.
1458      MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1459    }
1460    LocInfo.LowerBound = VersionStack.size() - 1;
1461    LocInfo.LowerBoundBlock = BB;
1462  }
1463}
1464
1465/// Optimize uses to point to their actual clobbering definitions.
1466void MemorySSA::OptimizeUses::optimizeUses() {
1467  SmallVector<MemoryAccess *, 16> VersionStack;
1468  DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1469  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1470
1471  unsigned long StackEpoch = 1;
1472  unsigned long PopEpoch = 1;
1473  // We perform a non-recursive top-down dominator tree walk.
1474  for (const auto *DomNode : depth_first(DT->getRootNode()))
1475    optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1476                        LocStackInfo);
1477}
1478
1479void MemorySSA::placePHINodes(
1480    const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1481  // Determine where our MemoryPhi's should go
1482  ForwardIDFCalculator IDFs(*DT);
1483  IDFs.setDefiningBlocks(DefiningBlocks);
1484  SmallVector<BasicBlock *, 32> IDFBlocks;
1485  IDFs.calculate(IDFBlocks);
1486
1487  // Now place MemoryPhi nodes.
1488  for (auto &BB : IDFBlocks)
1489    createMemoryPhi(BB);
1490}
1491
1492void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1493  // We create an access to represent "live on entry", for things like
1494  // arguments or users of globals, where the memory they use is defined before
1495  // the beginning of the function. We do not actually insert it into the IR.
1496  // We do not define a live on exit for the immediate uses, and thus our
1497  // semantics do *not* imply that something with no immediate uses can simply
1498  // be removed.
1499  BasicBlock &StartingPoint = F.getEntryBlock();
1500  LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1501                                     &StartingPoint, NextID++));
1502
1503  // We maintain lists of memory accesses per-block, trading memory for time. We
1504  // could just look up the memory access for every possible instruction in the
1505  // stream.
1506  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1507  // Go through each block, figure out where defs occur, and chain together all
1508  // the accesses.
1509  for (BasicBlock &B : F) {
1510    bool InsertIntoDef = false;
1511    AccessList *Accesses = nullptr;
1512    DefsList *Defs = nullptr;
1513    for (Instruction &I : B) {
1514      MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1515      if (!MUD)
1516        continue;
1517
1518      if (!Accesses)
1519        Accesses = getOrCreateAccessList(&B);
1520      Accesses->push_back(MUD);
1521      if (isa<MemoryDef>(MUD)) {
1522        InsertIntoDef = true;
1523        if (!Defs)
1524          Defs = getOrCreateDefsList(&B);
1525        Defs->push_back(*MUD);
1526      }
1527    }
1528    if (InsertIntoDef)
1529      DefiningBlocks.insert(&B);
1530  }
1531  placePHINodes(DefiningBlocks);
1532
1533  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1534  // filled in with all blocks.
1535  SmallPtrSet<BasicBlock *, 16> Visited;
1536  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1537
1538  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1539  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1540  OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1541
1542  // Mark the uses in unreachable blocks as live on entry, so that they go
1543  // somewhere.
1544  for (auto &BB : F)
1545    if (!Visited.count(&BB))
1546      markUnreachableAsLiveOnEntry(&BB);
1547}
1548
1549MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1550
1551MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1552  if (Walker)
1553    return Walker.get();
1554
1555  if (!WalkerBase)
1556    WalkerBase =
1557        llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1558
1559  Walker =
1560      llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1561  return Walker.get();
1562}
1563
1564MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1565  if (SkipWalker)
1566    return SkipWalker.get();
1567
1568  if (!WalkerBase)
1569    WalkerBase =
1570        llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1571
1572  SkipWalker =
1573      llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1574  return SkipWalker.get();
1575 }
1576
1577
1578// This is a helper function used by the creation routines. It places NewAccess
1579// into the access and defs lists for a given basic block, at the given
1580// insertion point.
1581void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1582                                        const BasicBlock *BB,
1583                                        InsertionPlace Point) {
1584  auto *Accesses = getOrCreateAccessList(BB);
1585  if (Point == Beginning) {
1586    // If it's a phi node, it goes first, otherwise, it goes after any phi
1587    // nodes.
1588    if (isa<MemoryPhi>(NewAccess)) {
1589      Accesses->push_front(NewAccess);
1590      auto *Defs = getOrCreateDefsList(BB);
1591      Defs->push_front(*NewAccess);
1592    } else {
1593      auto AI = find_if_not(
1594          *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1595      Accesses->insert(AI, NewAccess);
1596      if (!isa<MemoryUse>(NewAccess)) {
1597        auto *Defs = getOrCreateDefsList(BB);
1598        auto DI = find_if_not(
1599            *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1600        Defs->insert(DI, *NewAccess);
1601      }
1602    }
1603  } else {
1604    Accesses->push_back(NewAccess);
1605    if (!isa<MemoryUse>(NewAccess)) {
1606      auto *Defs = getOrCreateDefsList(BB);
1607      Defs->push_back(*NewAccess);
1608    }
1609  }
1610  BlockNumberingValid.erase(BB);
1611}
1612
1613void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1614                                      AccessList::iterator InsertPt) {
1615  auto *Accesses = getWritableBlockAccesses(BB);
1616  bool WasEnd = InsertPt == Accesses->end();
1617  Accesses->insert(AccessList::iterator(InsertPt), What);
1618  if (!isa<MemoryUse>(What)) {
1619    auto *Defs = getOrCreateDefsList(BB);
1620    // If we got asked to insert at the end, we have an easy job, just shove it
1621    // at the end. If we got asked to insert before an existing def, we also get
1622    // an iterator. If we got asked to insert before a use, we have to hunt for
1623    // the next def.
1624    if (WasEnd) {
1625      Defs->push_back(*What);
1626    } else if (isa<MemoryDef>(InsertPt)) {
1627      Defs->insert(InsertPt->getDefsIterator(), *What);
1628    } else {
1629      while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1630        ++InsertPt;
1631      // Either we found a def, or we are inserting at the end
1632      if (InsertPt == Accesses->end())
1633        Defs->push_back(*What);
1634      else
1635        Defs->insert(InsertPt->getDefsIterator(), *What);
1636    }
1637  }
1638  BlockNumberingValid.erase(BB);
1639}
1640
1641void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1642  // Keep it in the lookup tables, remove from the lists
1643  removeFromLists(What, false);
1644
1645  // Note that moving should implicitly invalidate the optimized state of a
1646  // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1647  // MemoryDef.
1648  if (auto *MD = dyn_cast<MemoryDef>(What))
1649    MD->resetOptimized();
1650  What->setBlock(BB);
1651}
1652
1653// Move What before Where in the IR.  The end result is that What will belong to
1654// the right lists and have the right Block set, but will not otherwise be
1655// correct. It will not have the right defining access, and if it is a def,
1656// things below it will not properly be updated.
1657void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1658                       AccessList::iterator Where) {
1659  prepareForMoveTo(What, BB);
1660  insertIntoListsBefore(What, BB, Where);
1661}
1662
1663void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1664                       InsertionPlace Point) {
1665  if (isa<MemoryPhi>(What)) {
1666    assert(Point == Beginning &&
1667           "Can only move a Phi at the beginning of the block");
1668    // Update lookup table entry
1669    ValueToMemoryAccess.erase(What->getBlock());
1670    bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1671    (void)Inserted;
1672    assert(Inserted && "Cannot move a Phi to a block that already has one");
1673  }
1674
1675  prepareForMoveTo(What, BB);
1676  insertIntoListsForBlock(What, BB, Point);
1677}
1678
1679MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1680  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1681  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1682  // Phi's always are placed at the front of the block.
1683  insertIntoListsForBlock(Phi, BB, Beginning);
1684  ValueToMemoryAccess[BB] = Phi;
1685  return Phi;
1686}
1687
1688MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1689                                               MemoryAccess *Definition,
1690                                               const MemoryUseOrDef *Template) {
1691  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1692  MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1693  assert(
1694      NewAccess != nullptr &&
1695      "Tried to create a memory access for a non-memory touching instruction");
1696  NewAccess->setDefiningAccess(Definition);
1697  return NewAccess;
1698}
1699
1700// Return true if the instruction has ordering constraints.
1701// Note specifically that this only considers stores and loads
1702// because others are still considered ModRef by getModRefInfo.
1703static inline bool isOrdered(const Instruction *I) {
1704  if (auto *SI = dyn_cast<StoreInst>(I)) {
1705    if (!SI->isUnordered())
1706      return true;
1707  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1708    if (!LI->isUnordered())
1709      return true;
1710  }
1711  return false;
1712}
1713
1714/// Helper function to create new memory accesses
1715template <typename AliasAnalysisType>
1716MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1717                                           AliasAnalysisType *AAP,
1718                                           const MemoryUseOrDef *Template) {
1719  // The assume intrinsic has a control dependency which we model by claiming
1720  // that it writes arbitrarily. Ignore that fake memory dependency here.
1721  // FIXME: Replace this special casing with a more accurate modelling of
1722  // assume's control dependency.
1723  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1724    if (II->getIntrinsicID() == Intrinsic::assume)
1725      return nullptr;
1726
1727  bool Def, Use;
1728  if (Template) {
1729    Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1730    Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1731#if !defined(NDEBUG)
1732    ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1733    bool DefCheck, UseCheck;
1734    DefCheck = isModSet(ModRef) || isOrdered(I);
1735    UseCheck = isRefSet(ModRef);
1736    assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1737#endif
1738  } else {
1739    // Find out what affect this instruction has on memory.
1740    ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1741    // The isOrdered check is used to ensure that volatiles end up as defs
1742    // (atomics end up as ModRef right now anyway).  Until we separate the
1743    // ordering chain from the memory chain, this enables people to see at least
1744    // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1745    // will still give an answer that bypasses other volatile loads.  TODO:
1746    // Separate memory aliasing and ordering into two different chains so that
1747    // we can precisely represent both "what memory will this read/write/is
1748    // clobbered by" and "what instructions can I move this past".
1749    Def = isModSet(ModRef) || isOrdered(I);
1750    Use = isRefSet(ModRef);
1751  }
1752
1753  // It's possible for an instruction to not modify memory at all. During
1754  // construction, we ignore them.
1755  if (!Def && !Use)
1756    return nullptr;
1757
1758  MemoryUseOrDef *MUD;
1759  if (Def)
1760    MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1761  else
1762    MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1763  ValueToMemoryAccess[I] = MUD;
1764  return MUD;
1765}
1766
1767/// Returns true if \p Replacer dominates \p Replacee .
1768bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1769                             const MemoryAccess *Replacee) const {
1770  if (isa<MemoryUseOrDef>(Replacee))
1771    return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1772  const auto *MP = cast<MemoryPhi>(Replacee);
1773  // For a phi node, the use occurs in the predecessor block of the phi node.
1774  // Since we may occur multiple times in the phi node, we have to check each
1775  // operand to ensure Replacer dominates each operand where Replacee occurs.
1776  for (const Use &Arg : MP->operands()) {
1777    if (Arg.get() != Replacee &&
1778        !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1779      return false;
1780  }
1781  return true;
1782}
1783
1784/// Properly remove \p MA from all of MemorySSA's lookup tables.
1785void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1786  assert(MA->use_empty() &&
1787         "Trying to remove memory access that still has uses");
1788  BlockNumbering.erase(MA);
1789  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1790    MUD->setDefiningAccess(nullptr);
1791  // Invalidate our walker's cache if necessary
1792  if (!isa<MemoryUse>(MA))
1793    getWalker()->invalidateInfo(MA);
1794
1795  Value *MemoryInst;
1796  if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1797    MemoryInst = MUD->getMemoryInst();
1798  else
1799    MemoryInst = MA->getBlock();
1800
1801  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1802  if (VMA->second == MA)
1803    ValueToMemoryAccess.erase(VMA);
1804}
1805
1806/// Properly remove \p MA from all of MemorySSA's lists.
1807///
1808/// Because of the way the intrusive list and use lists work, it is important to
1809/// do removal in the right order.
1810/// ShouldDelete defaults to true, and will cause the memory access to also be
1811/// deleted, not just removed.
1812void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1813  BasicBlock *BB = MA->getBlock();
1814  // The access list owns the reference, so we erase it from the non-owning list
1815  // first.
1816  if (!isa<MemoryUse>(MA)) {
1817    auto DefsIt = PerBlockDefs.find(BB);
1818    std::unique_ptr<DefsList> &Defs = DefsIt->second;
1819    Defs->remove(*MA);
1820    if (Defs->empty())
1821      PerBlockDefs.erase(DefsIt);
1822  }
1823
1824  // The erase call here will delete it. If we don't want it deleted, we call
1825  // remove instead.
1826  auto AccessIt = PerBlockAccesses.find(BB);
1827  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1828  if (ShouldDelete)
1829    Accesses->erase(MA);
1830  else
1831    Accesses->remove(MA);
1832
1833  if (Accesses->empty()) {
1834    PerBlockAccesses.erase(AccessIt);
1835    BlockNumberingValid.erase(BB);
1836  }
1837}
1838
1839void MemorySSA::print(raw_ostream &OS) const {
1840  MemorySSAAnnotatedWriter Writer(this);
1841  F.print(OS, &Writer);
1842}
1843
1844#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1845LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1846#endif
1847
1848void MemorySSA::verifyMemorySSA() const {
1849  verifyDefUses(F);
1850  verifyDomination(F);
1851  verifyOrdering(F);
1852  verifyDominationNumbers(F);
1853  // Previously, the verification used to also verify that the clobberingAccess
1854  // cached by MemorySSA is the same as the clobberingAccess found at a later
1855  // query to AA. This does not hold true in general due to the current fragility
1856  // of BasicAA which has arbitrary caps on the things it analyzes before giving
1857  // up. As a result, transformations that are correct, will lead to BasicAA
1858  // returning different Alias answers before and after that transformation.
1859  // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1860  // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1861  // every transformation, which defeats the purpose of using it. For such an
1862  // example, see test4 added in D51960.
1863}
1864
1865/// Verify that all of the blocks we believe to have valid domination numbers
1866/// actually have valid domination numbers.
1867void MemorySSA::verifyDominationNumbers(const Function &F) const {
1868#ifndef NDEBUG
1869  if (BlockNumberingValid.empty())
1870    return;
1871
1872  SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1873  for (const BasicBlock &BB : F) {
1874    if (!ValidBlocks.count(&BB))
1875      continue;
1876
1877    ValidBlocks.erase(&BB);
1878
1879    const AccessList *Accesses = getBlockAccesses(&BB);
1880    // It's correct to say an empty block has valid numbering.
1881    if (!Accesses)
1882      continue;
1883
1884    // Block numbering starts at 1.
1885    unsigned long LastNumber = 0;
1886    for (const MemoryAccess &MA : *Accesses) {
1887      auto ThisNumberIter = BlockNumbering.find(&MA);
1888      assert(ThisNumberIter != BlockNumbering.end() &&
1889             "MemoryAccess has no domination number in a valid block!");
1890
1891      unsigned long ThisNumber = ThisNumberIter->second;
1892      assert(ThisNumber > LastNumber &&
1893             "Domination numbers should be strictly increasing!");
1894      LastNumber = ThisNumber;
1895    }
1896  }
1897
1898  assert(ValidBlocks.empty() &&
1899         "All valid BasicBlocks should exist in F -- dangling pointers?");
1900#endif
1901}
1902
1903/// Verify that the order and existence of MemoryAccesses matches the
1904/// order and existence of memory affecting instructions.
1905void MemorySSA::verifyOrdering(Function &F) const {
1906#ifndef NDEBUG
1907  // Walk all the blocks, comparing what the lookups think and what the access
1908  // lists think, as well as the order in the blocks vs the order in the access
1909  // lists.
1910  SmallVector<MemoryAccess *, 32> ActualAccesses;
1911  SmallVector<MemoryAccess *, 32> ActualDefs;
1912  for (BasicBlock &B : F) {
1913    const AccessList *AL = getBlockAccesses(&B);
1914    const auto *DL = getBlockDefs(&B);
1915    MemoryAccess *Phi = getMemoryAccess(&B);
1916    if (Phi) {
1917      ActualAccesses.push_back(Phi);
1918      ActualDefs.push_back(Phi);
1919    }
1920
1921    for (Instruction &I : B) {
1922      MemoryAccess *MA = getMemoryAccess(&I);
1923      assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1924             "We have memory affecting instructions "
1925             "in this block but they are not in the "
1926             "access list or defs list");
1927      if (MA) {
1928        ActualAccesses.push_back(MA);
1929        if (isa<MemoryDef>(MA))
1930          ActualDefs.push_back(MA);
1931      }
1932    }
1933    // Either we hit the assert, really have no accesses, or we have both
1934    // accesses and an access list.
1935    // Same with defs.
1936    if (!AL && !DL)
1937      continue;
1938    assert(AL->size() == ActualAccesses.size() &&
1939           "We don't have the same number of accesses in the block as on the "
1940           "access list");
1941    assert((DL || ActualDefs.size() == 0) &&
1942           "Either we should have a defs list, or we should have no defs");
1943    assert((!DL || DL->size() == ActualDefs.size()) &&
1944           "We don't have the same number of defs in the block as on the "
1945           "def list");
1946    auto ALI = AL->begin();
1947    auto AAI = ActualAccesses.begin();
1948    while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1949      assert(&*ALI == *AAI && "Not the same accesses in the same order");
1950      ++ALI;
1951      ++AAI;
1952    }
1953    ActualAccesses.clear();
1954    if (DL) {
1955      auto DLI = DL->begin();
1956      auto ADI = ActualDefs.begin();
1957      while (DLI != DL->end() && ADI != ActualDefs.end()) {
1958        assert(&*DLI == *ADI && "Not the same defs in the same order");
1959        ++DLI;
1960        ++ADI;
1961      }
1962    }
1963    ActualDefs.clear();
1964  }
1965#endif
1966}
1967
1968/// Verify the domination properties of MemorySSA by checking that each
1969/// definition dominates all of its uses.
1970void MemorySSA::verifyDomination(Function &F) const {
1971#ifndef NDEBUG
1972  for (BasicBlock &B : F) {
1973    // Phi nodes are attached to basic blocks
1974    if (MemoryPhi *MP = getMemoryAccess(&B))
1975      for (const Use &U : MP->uses())
1976        assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1977
1978    for (Instruction &I : B) {
1979      MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1980      if (!MD)
1981        continue;
1982
1983      for (const Use &U : MD->uses())
1984        assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1985    }
1986  }
1987#endif
1988}
1989
1990/// Verify the def-use lists in MemorySSA, by verifying that \p Use
1991/// appears in the use list of \p Def.
1992void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1993#ifndef NDEBUG
1994  // The live on entry use may cause us to get a NULL def here
1995  if (!Def)
1996    assert(isLiveOnEntryDef(Use) &&
1997           "Null def but use not point to live on entry def");
1998  else
1999    assert(is_contained(Def->users(), Use) &&
2000           "Did not find use in def's use list");
2001#endif
2002}
2003
2004/// Verify the immediate use information, by walking all the memory
2005/// accesses and verifying that, for each use, it appears in the
2006/// appropriate def's use list
2007void MemorySSA::verifyDefUses(Function &F) const {
2008#ifndef NDEBUG
2009  for (BasicBlock &B : F) {
2010    // Phi nodes are attached to basic blocks
2011    if (MemoryPhi *Phi = getMemoryAccess(&B)) {
2012      assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2013                                          pred_begin(&B), pred_end(&B))) &&
2014             "Incomplete MemoryPhi Node");
2015      for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2016        verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2017        assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
2018                   pred_end(&B) &&
2019               "Incoming phi block not a block predecessor");
2020      }
2021    }
2022
2023    for (Instruction &I : B) {
2024      if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
2025        verifyUseInDefs(MA->getDefiningAccess(), MA);
2026      }
2027    }
2028  }
2029#endif
2030}
2031
2032/// Perform a local numbering on blocks so that instruction ordering can be
2033/// determined in constant time.
2034/// TODO: We currently just number in order.  If we numbered by N, we could
2035/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2036/// log2(N) sequences of mixed before and after) without needing to invalidate
2037/// the numbering.
2038void MemorySSA::renumberBlock(const BasicBlock *B) const {
2039  // The pre-increment ensures the numbers really start at 1.
2040  unsigned long CurrentNumber = 0;
2041  const AccessList *AL = getBlockAccesses(B);
2042  assert(AL != nullptr && "Asking to renumber an empty block");
2043  for (const auto &I : *AL)
2044    BlockNumbering[&I] = ++CurrentNumber;
2045  BlockNumberingValid.insert(B);
2046}
2047
2048/// Determine, for two memory accesses in the same block,
2049/// whether \p Dominator dominates \p Dominatee.
2050/// \returns True if \p Dominator dominates \p Dominatee.
2051bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2052                                 const MemoryAccess *Dominatee) const {
2053  const BasicBlock *DominatorBlock = Dominator->getBlock();
2054
2055  assert((DominatorBlock == Dominatee->getBlock()) &&
2056         "Asking for local domination when accesses are in different blocks!");
2057  // A node dominates itself.
2058  if (Dominatee == Dominator)
2059    return true;
2060
2061  // When Dominatee is defined on function entry, it is not dominated by another
2062  // memory access.
2063  if (isLiveOnEntryDef(Dominatee))
2064    return false;
2065
2066  // When Dominator is defined on function entry, it dominates the other memory
2067  // access.
2068  if (isLiveOnEntryDef(Dominator))
2069    return true;
2070
2071  if (!BlockNumberingValid.count(DominatorBlock))
2072    renumberBlock(DominatorBlock);
2073
2074  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2075  // All numbers start with 1
2076  assert(DominatorNum != 0 && "Block was not numbered properly");
2077  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2078  assert(DominateeNum != 0 && "Block was not numbered properly");
2079  return DominatorNum < DominateeNum;
2080}
2081
2082bool MemorySSA::dominates(const MemoryAccess *Dominator,
2083                          const MemoryAccess *Dominatee) const {
2084  if (Dominator == Dominatee)
2085    return true;
2086
2087  if (isLiveOnEntryDef(Dominatee))
2088    return false;
2089
2090  if (Dominator->getBlock() != Dominatee->getBlock())
2091    return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2092  return locallyDominates(Dominator, Dominatee);
2093}
2094
2095bool MemorySSA::dominates(const MemoryAccess *Dominator,
2096                          const Use &Dominatee) const {
2097  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2098    BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2099    // The def must dominate the incoming block of the phi.
2100    if (UseBB != Dominator->getBlock())
2101      return DT->dominates(Dominator->getBlock(), UseBB);
2102    // If the UseBB and the DefBB are the same, compare locally.
2103    return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2104  }
2105  // If it's not a PHI node use, the normal dominates can already handle it.
2106  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2107}
2108
2109const static char LiveOnEntryStr[] = "liveOnEntry";
2110
2111void MemoryAccess::print(raw_ostream &OS) const {
2112  switch (getValueID()) {
2113  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2114  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2115  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2116  }
2117  llvm_unreachable("invalid value id");
2118}
2119
2120void MemoryDef::print(raw_ostream &OS) const {
2121  MemoryAccess *UO = getDefiningAccess();
2122
2123  auto printID = [&OS](MemoryAccess *A) {
2124    if (A && A->getID())
2125      OS << A->getID();
2126    else
2127      OS << LiveOnEntryStr;
2128  };
2129
2130  OS << getID() << " = MemoryDef(";
2131  printID(UO);
2132  OS << ")";
2133
2134  if (isOptimized()) {
2135    OS << "->";
2136    printID(getOptimized());
2137
2138    if (Optional<AliasResult> AR = getOptimizedAccessType())
2139      OS << " " << *AR;
2140  }
2141}
2142
2143void MemoryPhi::print(raw_ostream &OS) const {
2144  bool First = true;
2145  OS << getID() << " = MemoryPhi(";
2146  for (const auto &Op : operands()) {
2147    BasicBlock *BB = getIncomingBlock(Op);
2148    MemoryAccess *MA = cast<MemoryAccess>(Op);
2149    if (!First)
2150      OS << ',';
2151    else
2152      First = false;
2153
2154    OS << '{';
2155    if (BB->hasName())
2156      OS << BB->getName();
2157    else
2158      BB->printAsOperand(OS, false);
2159    OS << ',';
2160    if (unsigned ID = MA->getID())
2161      OS << ID;
2162    else
2163      OS << LiveOnEntryStr;
2164    OS << '}';
2165  }
2166  OS << ')';
2167}
2168
2169void MemoryUse::print(raw_ostream &OS) const {
2170  MemoryAccess *UO = getDefiningAccess();
2171  OS << "MemoryUse(";
2172  if (UO && UO->getID())
2173    OS << UO->getID();
2174  else
2175    OS << LiveOnEntryStr;
2176  OS << ')';
2177
2178  if (Optional<AliasResult> AR = getOptimizedAccessType())
2179    OS << " " << *AR;
2180}
2181
2182void MemoryAccess::dump() const {
2183// Cannot completely remove virtual function even in release mode.
2184#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2185  print(dbgs());
2186  dbgs() << "\n";
2187#endif
2188}
2189
2190char MemorySSAPrinterLegacyPass::ID = 0;
2191
2192MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2193  initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2194}
2195
2196void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2197  AU.setPreservesAll();
2198  AU.addRequired<MemorySSAWrapperPass>();
2199}
2200
2201bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2202  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2203  MSSA.print(dbgs());
2204  if (VerifyMemorySSA)
2205    MSSA.verifyMemorySSA();
2206  return false;
2207}
2208
2209AnalysisKey MemorySSAAnalysis::Key;
2210
2211MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2212                                                 FunctionAnalysisManager &AM) {
2213  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2214  auto &AA = AM.getResult<AAManager>(F);
2215  return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2216}
2217
2218bool MemorySSAAnalysis::Result::invalidate(
2219    Function &F, const PreservedAnalyses &PA,
2220    FunctionAnalysisManager::Invalidator &Inv) {
2221  auto PAC = PA.getChecker<MemorySSAAnalysis>();
2222  return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2223         Inv.invalidate<AAManager>(F, PA) ||
2224         Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2225}
2226
2227PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2228                                            FunctionAnalysisManager &AM) {
2229  OS << "MemorySSA for function: " << F.getName() << "\n";
2230  AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2231
2232  return PreservedAnalyses::all();
2233}
2234
2235PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2236                                             FunctionAnalysisManager &AM) {
2237  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2238
2239  return PreservedAnalyses::all();
2240}
2241
2242char MemorySSAWrapperPass::ID = 0;
2243
2244MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2245  initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2246}
2247
2248void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2249
2250void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2251  AU.setPreservesAll();
2252  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2253  AU.addRequiredTransitive<AAResultsWrapperPass>();
2254}
2255
2256bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2257  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2258  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2259  MSSA.reset(new MemorySSA(F, &AA, &DT));
2260  return false;
2261}
2262
2263void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2264
2265void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2266  MSSA->print(OS);
2267}
2268
2269MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2270
2271/// Walk the use-def chains starting at \p StartingAccess and find
2272/// the MemoryAccess that actually clobbers Loc.
2273///
2274/// \returns our clobbering memory access
2275template <typename AliasAnalysisType>
2276MemoryAccess *
2277MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2278    MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2279    unsigned &UpwardWalkLimit) {
2280  if (isa<MemoryPhi>(StartingAccess))
2281    return StartingAccess;
2282
2283  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2284  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2285    return StartingUseOrDef;
2286
2287  Instruction *I = StartingUseOrDef->getMemoryInst();
2288
2289  // Conservatively, fences are always clobbers, so don't perform the walk if we
2290  // hit a fence.
2291  if (!isa<CallBase>(I) && I->isFenceLike())
2292    return StartingUseOrDef;
2293
2294  UpwardsMemoryQuery Q;
2295  Q.OriginalAccess = StartingUseOrDef;
2296  Q.StartingLoc = Loc;
2297  Q.Inst = I;
2298  Q.IsCall = false;
2299
2300  // Unlike the other function, do not walk to the def of a def, because we are
2301  // handed something we already believe is the clobbering access.
2302  // We never set SkipSelf to true in Q in this method.
2303  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2304                                     ? StartingUseOrDef->getDefiningAccess()
2305                                     : StartingUseOrDef;
2306
2307  MemoryAccess *Clobber =
2308      Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2309  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2310  LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2311  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2312  LLVM_DEBUG(dbgs() << *Clobber << "\n");
2313  return Clobber;
2314}
2315
2316template <typename AliasAnalysisType>
2317MemoryAccess *
2318MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2319    MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2320  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2321  // If this is a MemoryPhi, we can't do anything.
2322  if (!StartingAccess)
2323    return MA;
2324
2325  bool IsOptimized = false;
2326
2327  // If this is an already optimized use or def, return the optimized result.
2328  // Note: Currently, we store the optimized def result in a separate field,
2329  // since we can't use the defining access.
2330  if (StartingAccess->isOptimized()) {
2331    if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2332      return StartingAccess->getOptimized();
2333    IsOptimized = true;
2334  }
2335
2336  const Instruction *I = StartingAccess->getMemoryInst();
2337  // We can't sanely do anything with a fence, since they conservatively clobber
2338  // all memory, and have no locations to get pointers from to try to
2339  // disambiguate.
2340  if (!isa<CallBase>(I) && I->isFenceLike())
2341    return StartingAccess;
2342
2343  UpwardsMemoryQuery Q(I, StartingAccess);
2344
2345  if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2346    MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2347    StartingAccess->setOptimized(LiveOnEntry);
2348    StartingAccess->setOptimizedAccessType(None);
2349    return LiveOnEntry;
2350  }
2351
2352  MemoryAccess *OptimizedAccess;
2353  if (!IsOptimized) {
2354    // Start with the thing we already think clobbers this location
2355    MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2356
2357    // At this point, DefiningAccess may be the live on entry def.
2358    // If it is, we will not get a better result.
2359    if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2360      StartingAccess->setOptimized(DefiningAccess);
2361      StartingAccess->setOptimizedAccessType(None);
2362      return DefiningAccess;
2363    }
2364
2365    OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2366    StartingAccess->setOptimized(OptimizedAccess);
2367    if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2368      StartingAccess->setOptimizedAccessType(None);
2369    else if (Q.AR == MustAlias)
2370      StartingAccess->setOptimizedAccessType(MustAlias);
2371  } else
2372    OptimizedAccess = StartingAccess->getOptimized();
2373
2374  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2375  LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2376  LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2377  LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2378
2379  MemoryAccess *Result;
2380  if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2381      isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2382    assert(isa<MemoryDef>(Q.OriginalAccess));
2383    Q.SkipSelfAccess = true;
2384    Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2385  } else
2386    Result = OptimizedAccess;
2387
2388  LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2389  LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2390
2391  return Result;
2392}
2393
2394MemoryAccess *
2395DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2396  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2397    return Use->getDefiningAccess();
2398  return MA;
2399}
2400
2401MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2402    MemoryAccess *StartingAccess, const MemoryLocation &) {
2403  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2404    return Use->getDefiningAccess();
2405  return StartingAccess;
2406}
2407
2408void MemoryPhi::deleteMe(DerivedUser *Self) {
2409  delete static_cast<MemoryPhi *>(Self);
2410}
2411
2412void MemoryDef::deleteMe(DerivedUser *Self) {
2413  delete static_cast<MemoryDef *>(Self);
2414}
2415
2416void MemoryUse::deleteMe(DerivedUser *Self) {
2417  delete static_cast<MemoryUse *>(Self);
2418}
2419