1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible.  It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe.  This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// This pass uses alias analysis for two purposes:
16//
17//  1. Moving loop invariant loads and calls out of loops.  If we can determine
18//     that a load or call inside of a loop never aliases anything stored to,
19//     we can hoist it or sink it like any other instruction.
20//  2. Scalar Promotion of Memory - If there is a store instruction inside of
21//     the loop, we try to move the store to happen AFTER the loop instead of
22//     inside of the loop.  This can only happen if a few conditions are true:
23//       A. The pointer stored through is loop invariant
24//       B. There are no stores or loads in the loop which _may_ alias the
25//          pointer.  There are no calls in the loop which mod/ref the pointer.
26//     If these conditions are true, we can promote the loads and stores in the
27//     loop of the pointer to use a temporary alloca'd variable.  We then use
28//     the SSAUpdater to construct the appropriate SSA form for the value.
29//
30//===----------------------------------------------------------------------===//
31
32#include "llvm/Transforms/Scalar/LICM.h"
33#include "llvm/ADT/SetOperations.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/AliasSetTracker.h"
37#include "llvm/Analysis/BasicAliasAnalysis.h"
38#include "llvm/Analysis/CaptureTracking.h"
39#include "llvm/Analysis/ConstantFolding.h"
40#include "llvm/Analysis/GlobalsModRef.h"
41#include "llvm/Analysis/GuardUtils.h"
42#include "llvm/Analysis/Loads.h"
43#include "llvm/Analysis/LoopInfo.h"
44#include "llvm/Analysis/LoopIterator.h"
45#include "llvm/Analysis/LoopPass.h"
46#include "llvm/Analysis/MemoryBuiltins.h"
47#include "llvm/Analysis/MemorySSA.h"
48#include "llvm/Analysis/MemorySSAUpdater.h"
49#include "llvm/Analysis/MustExecute.h"
50#include "llvm/Analysis/OptimizationRemarkEmitter.h"
51#include "llvm/Analysis/ScalarEvolution.h"
52#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
53#include "llvm/Analysis/TargetLibraryInfo.h"
54#include "llvm/Analysis/ValueTracking.h"
55#include "llvm/IR/CFG.h"
56#include "llvm/IR/Constants.h"
57#include "llvm/IR/DataLayout.h"
58#include "llvm/IR/DebugInfoMetadata.h"
59#include "llvm/IR/DerivedTypes.h"
60#include "llvm/IR/Dominators.h"
61#include "llvm/IR/Instructions.h"
62#include "llvm/IR/IntrinsicInst.h"
63#include "llvm/IR/LLVMContext.h"
64#include "llvm/IR/Metadata.h"
65#include "llvm/IR/PatternMatch.h"
66#include "llvm/IR/PredIteratorCache.h"
67#include "llvm/InitializePasses.h"
68#include "llvm/Support/CommandLine.h"
69#include "llvm/Support/Debug.h"
70#include "llvm/Support/raw_ostream.h"
71#include "llvm/Transforms/Scalar.h"
72#include "llvm/Transforms/Scalar/LoopPassManager.h"
73#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
74#include "llvm/Transforms/Utils/BasicBlockUtils.h"
75#include "llvm/Transforms/Utils/Local.h"
76#include "llvm/Transforms/Utils/LoopUtils.h"
77#include "llvm/Transforms/Utils/SSAUpdater.h"
78#include <algorithm>
79#include <utility>
80using namespace llvm;
81
82#define DEBUG_TYPE "licm"
83
84STATISTIC(NumCreatedBlocks, "Number of blocks created");
85STATISTIC(NumClonedBranches, "Number of branches cloned");
86STATISTIC(NumSunk, "Number of instructions sunk out of loop");
87STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
88STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
89STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
90STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
91
92/// Memory promotion is enabled by default.
93static cl::opt<bool>
94    DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
95                     cl::desc("Disable memory promotion in LICM pass"));
96
97static cl::opt<bool> ControlFlowHoisting(
98    "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
99    cl::desc("Enable control flow (and PHI) hoisting in LICM"));
100
101static cl::opt<uint32_t> MaxNumUsesTraversed(
102    "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
103    cl::desc("Max num uses visited for identifying load "
104             "invariance in loop using invariant start (default = 8)"));
105
106// Default value of zero implies we use the regular alias set tracker mechanism
107// instead of the cross product using AA to identify aliasing of the memory
108// location we are interested in.
109static cl::opt<int>
110LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
111               cl::desc("How many instruction to cross product using AA"));
112
113// Experimental option to allow imprecision in LICM in pathological cases, in
114// exchange for faster compile. This is to be removed if MemorySSA starts to
115// address the same issue. This flag applies only when LICM uses MemorySSA
116// instead on AliasSetTracker. LICM calls MemorySSAWalker's
117// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
118// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
119// which may not be precise, since optimizeUses is capped. The result is
120// correct, but we may not get as "far up" as possible to get which access is
121// clobbering the one queried.
122cl::opt<unsigned> llvm::SetLicmMssaOptCap(
123    "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
124    cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
125             "for faster compile. Caps the MemorySSA clobbering calls."));
126
127// Experimentally, memory promotion carries less importance than sinking and
128// hoisting. Limit when we do promotion when using MemorySSA, in order to save
129// compile time.
130cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
131    "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
132    cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
133             "effect. When MSSA in LICM is enabled, then this is the maximum "
134             "number of accesses allowed to be present in a loop in order to "
135             "enable memory promotion."));
136
137static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
138static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
139                                  const LoopSafetyInfo *SafetyInfo,
140                                  TargetTransformInfo *TTI, bool &FreeInLoop);
141static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
142                  BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
143                  MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
144                  OptimizationRemarkEmitter *ORE);
145static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
146                 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
147                 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE);
148static bool isSafeToExecuteUnconditionally(Instruction &Inst,
149                                           const DominatorTree *DT,
150                                           const Loop *CurLoop,
151                                           const LoopSafetyInfo *SafetyInfo,
152                                           OptimizationRemarkEmitter *ORE,
153                                           const Instruction *CtxI = nullptr);
154static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
155                                     AliasSetTracker *CurAST, Loop *CurLoop,
156                                     AAResults *AA);
157static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
158                                             Loop *CurLoop,
159                                             SinkAndHoistLICMFlags &Flags);
160static Instruction *cloneInstructionInExitBlock(
161    Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
162    const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
163
164static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
165                             AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
166
167static void moveInstructionBefore(Instruction &I, Instruction &Dest,
168                                  ICFLoopSafetyInfo &SafetyInfo,
169                                  MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
170
171namespace {
172struct LoopInvariantCodeMotion {
173  bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
174                 TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
175                 ScalarEvolution *SE, MemorySSA *MSSA,
176                 OptimizationRemarkEmitter *ORE);
177
178  LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
179                          unsigned LicmMssaNoAccForPromotionCap)
180      : LicmMssaOptCap(LicmMssaOptCap),
181        LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
182
183private:
184  unsigned LicmMssaOptCap;
185  unsigned LicmMssaNoAccForPromotionCap;
186
187  std::unique_ptr<AliasSetTracker>
188  collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA);
189  std::unique_ptr<AliasSetTracker>
190  collectAliasInfoForLoopWithMSSA(Loop *L, AAResults *AA,
191                                  MemorySSAUpdater *MSSAU);
192};
193
194struct LegacyLICMPass : public LoopPass {
195  static char ID; // Pass identification, replacement for typeid
196  LegacyLICMPass(
197      unsigned LicmMssaOptCap = SetLicmMssaOptCap,
198      unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
199      : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
200    initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
201  }
202
203  bool runOnLoop(Loop *L, LPPassManager &LPM) override {
204    if (skipLoop(L))
205      return false;
206
207    auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
208    MemorySSA *MSSA = EnableMSSALoopDependency
209                          ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
210                          : nullptr;
211    // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
212    // pass.  Function analyses need to be preserved across loop transformations
213    // but ORE cannot be preserved (see comment before the pass definition).
214    OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
215    return LICM.runOnLoop(L,
216                          &getAnalysis<AAResultsWrapperPass>().getAAResults(),
217                          &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
218                          &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
219                          &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
220                              *L->getHeader()->getParent()),
221                          &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
222                              *L->getHeader()->getParent()),
223                          SE ? &SE->getSE() : nullptr, MSSA, &ORE);
224  }
225
226  /// This transformation requires natural loop information & requires that
227  /// loop preheaders be inserted into the CFG...
228  ///
229  void getAnalysisUsage(AnalysisUsage &AU) const override {
230    AU.addPreserved<DominatorTreeWrapperPass>();
231    AU.addPreserved<LoopInfoWrapperPass>();
232    AU.addRequired<TargetLibraryInfoWrapperPass>();
233    if (EnableMSSALoopDependency) {
234      AU.addRequired<MemorySSAWrapperPass>();
235      AU.addPreserved<MemorySSAWrapperPass>();
236    }
237    AU.addRequired<TargetTransformInfoWrapperPass>();
238    getLoopAnalysisUsage(AU);
239  }
240
241private:
242  LoopInvariantCodeMotion LICM;
243};
244} // namespace
245
246PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
247                                LoopStandardAnalysisResults &AR, LPMUpdater &) {
248  // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
249  // pass.  Function analyses need to be preserved across loop transformations
250  // but ORE cannot be preserved (see comment before the pass definition).
251  OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
252
253  LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
254  if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.TTI, &AR.SE,
255                      AR.MSSA, &ORE))
256    return PreservedAnalyses::all();
257
258  auto PA = getLoopPassPreservedAnalyses();
259
260  PA.preserve<DominatorTreeAnalysis>();
261  PA.preserve<LoopAnalysis>();
262  if (AR.MSSA)
263    PA.preserve<MemorySSAAnalysis>();
264
265  return PA;
266}
267
268char LegacyLICMPass::ID = 0;
269INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
270                      false, false)
271INITIALIZE_PASS_DEPENDENCY(LoopPass)
272INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
273INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
274INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
275INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
276                    false)
277
278Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
279Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
280                           unsigned LicmMssaNoAccForPromotionCap) {
281  return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
282}
283
284/// Hoist expressions out of the specified loop. Note, alias info for inner
285/// loop is not preserved so it is not a good idea to run LICM multiple
286/// times on one loop.
287bool LoopInvariantCodeMotion::runOnLoop(
288    Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
289    TargetLibraryInfo *TLI, TargetTransformInfo *TTI, ScalarEvolution *SE,
290    MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) {
291  bool Changed = false;
292
293  assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
294
295  // If this loop has metadata indicating that LICM is not to be performed then
296  // just exit.
297  if (hasDisableLICMTransformsHint(L)) {
298    return false;
299  }
300
301  std::unique_ptr<AliasSetTracker> CurAST;
302  std::unique_ptr<MemorySSAUpdater> MSSAU;
303  bool NoOfMemAccTooLarge = false;
304  unsigned LicmMssaOptCounter = 0;
305
306  if (!MSSA) {
307    LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
308    CurAST = collectAliasInfoForLoop(L, LI, AA);
309  } else {
310    LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n");
311    MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
312
313    unsigned AccessCapCount = 0;
314    for (auto *BB : L->getBlocks()) {
315      if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
316        for (const auto &MA : *Accesses) {
317          (void)MA;
318          AccessCapCount++;
319          if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
320            NoOfMemAccTooLarge = true;
321            break;
322          }
323        }
324      }
325      if (NoOfMemAccTooLarge)
326        break;
327    }
328  }
329
330  // Get the preheader block to move instructions into...
331  BasicBlock *Preheader = L->getLoopPreheader();
332
333  // Compute loop safety information.
334  ICFLoopSafetyInfo SafetyInfo;
335  SafetyInfo.computeLoopSafetyInfo(L);
336
337  // We want to visit all of the instructions in this loop... that are not parts
338  // of our subloops (they have already had their invariants hoisted out of
339  // their loop, into this loop, so there is no need to process the BODIES of
340  // the subloops).
341  //
342  // Traverse the body of the loop in depth first order on the dominator tree so
343  // that we are guaranteed to see definitions before we see uses.  This allows
344  // us to sink instructions in one pass, without iteration.  After sinking
345  // instructions, we perform another pass to hoist them out of the loop.
346  SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter,
347                                 LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
348                                 /*IsSink=*/true};
349  if (L->hasDedicatedExits())
350    Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
351                          CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE);
352  Flags.IsSink = false;
353  if (Preheader)
354    Changed |=
355        hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L,
356                    CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE);
357
358  // Now that all loop invariants have been removed from the loop, promote any
359  // memory references to scalars that we can.
360  // Don't sink stores from loops without dedicated block exits. Exits
361  // containing indirect branches are not transformed by loop simplify,
362  // make sure we catch that. An additional load may be generated in the
363  // preheader for SSA updater, so also avoid sinking when no preheader
364  // is available.
365  if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
366      !NoOfMemAccTooLarge) {
367    // Figure out the loop exits and their insertion points
368    SmallVector<BasicBlock *, 8> ExitBlocks;
369    L->getUniqueExitBlocks(ExitBlocks);
370
371    // We can't insert into a catchswitch.
372    bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
373      return isa<CatchSwitchInst>(Exit->getTerminator());
374    });
375
376    if (!HasCatchSwitch) {
377      SmallVector<Instruction *, 8> InsertPts;
378      SmallVector<MemoryAccess *, 8> MSSAInsertPts;
379      InsertPts.reserve(ExitBlocks.size());
380      if (MSSAU)
381        MSSAInsertPts.reserve(ExitBlocks.size());
382      for (BasicBlock *ExitBlock : ExitBlocks) {
383        InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
384        if (MSSAU)
385          MSSAInsertPts.push_back(nullptr);
386      }
387
388      PredIteratorCache PIC;
389
390      bool Promoted = false;
391
392      // Build an AST using MSSA.
393      if (!CurAST.get())
394        CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get());
395
396      // Loop over all of the alias sets in the tracker object.
397      for (AliasSet &AS : *CurAST) {
398        // We can promote this alias set if it has a store, if it is a "Must"
399        // alias set, if the pointer is loop invariant, and if we are not
400        // eliminating any volatile loads or stores.
401        if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
402            !L->isLoopInvariant(AS.begin()->getValue()))
403          continue;
404
405        assert(
406            !AS.empty() &&
407            "Must alias set should have at least one pointer element in it!");
408
409        SmallSetVector<Value *, 8> PointerMustAliases;
410        for (const auto &ASI : AS)
411          PointerMustAliases.insert(ASI.getValue());
412
413        Promoted |= promoteLoopAccessesToScalars(
414            PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
415            DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
416      }
417
418      // Once we have promoted values across the loop body we have to
419      // recursively reform LCSSA as any nested loop may now have values defined
420      // within the loop used in the outer loop.
421      // FIXME: This is really heavy handed. It would be a bit better to use an
422      // SSAUpdater strategy during promotion that was LCSSA aware and reformed
423      // it as it went.
424      if (Promoted)
425        formLCSSARecursively(*L, *DT, LI, SE);
426
427      Changed |= Promoted;
428    }
429  }
430
431  // Check that neither this loop nor its parent have had LCSSA broken. LICM is
432  // specifically moving instructions across the loop boundary and so it is
433  // especially in need of sanity checking here.
434  assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
435  assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&
436         "Parent loop not left in LCSSA form after LICM!");
437
438  if (MSSAU.get() && VerifyMemorySSA)
439    MSSAU->getMemorySSA()->verifyMemorySSA();
440
441  if (Changed && SE)
442    SE->forgetLoopDispositions(L);
443  return Changed;
444}
445
446/// Walk the specified region of the CFG (defined by all blocks dominated by
447/// the specified block, and that are in the current loop) in reverse depth
448/// first order w.r.t the DominatorTree.  This allows us to visit uses before
449/// definitions, allowing us to sink a loop body in one pass without iteration.
450///
451bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
452                      DominatorTree *DT, TargetLibraryInfo *TLI,
453                      TargetTransformInfo *TTI, Loop *CurLoop,
454                      AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
455                      ICFLoopSafetyInfo *SafetyInfo,
456                      SinkAndHoistLICMFlags &Flags,
457                      OptimizationRemarkEmitter *ORE) {
458
459  // Verify inputs.
460  assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
461         CurLoop != nullptr && SafetyInfo != nullptr &&
462         "Unexpected input to sinkRegion.");
463  assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
464         "Either AliasSetTracker or MemorySSA should be initialized.");
465
466  // We want to visit children before parents. We will enque all the parents
467  // before their children in the worklist and process the worklist in reverse
468  // order.
469  SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
470
471  bool Changed = false;
472  for (DomTreeNode *DTN : reverse(Worklist)) {
473    BasicBlock *BB = DTN->getBlock();
474    // Only need to process the contents of this block if it is not part of a
475    // subloop (which would already have been processed).
476    if (inSubLoop(BB, CurLoop, LI))
477      continue;
478
479    for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
480      Instruction &I = *--II;
481
482      // If the instruction is dead, we would try to sink it because it isn't
483      // used in the loop, instead, just delete it.
484      if (isInstructionTriviallyDead(&I, TLI)) {
485        LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
486        salvageKnowledge(&I);
487        salvageDebugInfo(I);
488        ++II;
489        eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
490        Changed = true;
491        continue;
492      }
493
494      // Check to see if we can sink this instruction to the exit blocks
495      // of the loop.  We can do this if the all users of the instruction are
496      // outside of the loop.  In this case, it doesn't even matter if the
497      // operands of the instruction are loop invariant.
498      //
499      bool FreeInLoop = false;
500      if (!I.mayHaveSideEffects() &&
501          isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
502          canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
503                             ORE)) {
504        if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
505          if (!FreeInLoop) {
506            ++II;
507            salvageDebugInfo(I);
508            eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
509          }
510          Changed = true;
511        }
512      }
513    }
514  }
515  if (MSSAU && VerifyMemorySSA)
516    MSSAU->getMemorySSA()->verifyMemorySSA();
517  return Changed;
518}
519
520namespace {
521// This is a helper class for hoistRegion to make it able to hoist control flow
522// in order to be able to hoist phis. The way this works is that we initially
523// start hoisting to the loop preheader, and when we see a loop invariant branch
524// we make note of this. When we then come to hoist an instruction that's
525// conditional on such a branch we duplicate the branch and the relevant control
526// flow, then hoist the instruction into the block corresponding to its original
527// block in the duplicated control flow.
528class ControlFlowHoister {
529private:
530  // Information about the loop we are hoisting from
531  LoopInfo *LI;
532  DominatorTree *DT;
533  Loop *CurLoop;
534  MemorySSAUpdater *MSSAU;
535
536  // A map of blocks in the loop to the block their instructions will be hoisted
537  // to.
538  DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
539
540  // The branches that we can hoist, mapped to the block that marks a
541  // convergence point of their control flow.
542  DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
543
544public:
545  ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
546                     MemorySSAUpdater *MSSAU)
547      : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
548
549  void registerPossiblyHoistableBranch(BranchInst *BI) {
550    // We can only hoist conditional branches with loop invariant operands.
551    if (!ControlFlowHoisting || !BI->isConditional() ||
552        !CurLoop->hasLoopInvariantOperands(BI))
553      return;
554
555    // The branch destinations need to be in the loop, and we don't gain
556    // anything by duplicating conditional branches with duplicate successors,
557    // as it's essentially the same as an unconditional branch.
558    BasicBlock *TrueDest = BI->getSuccessor(0);
559    BasicBlock *FalseDest = BI->getSuccessor(1);
560    if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
561        TrueDest == FalseDest)
562      return;
563
564    // We can hoist BI if one branch destination is the successor of the other,
565    // or both have common successor which we check by seeing if the
566    // intersection of their successors is non-empty.
567    // TODO: This could be expanded to allowing branches where both ends
568    // eventually converge to a single block.
569    SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
570    TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
571    FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
572    BasicBlock *CommonSucc = nullptr;
573    if (TrueDestSucc.count(FalseDest)) {
574      CommonSucc = FalseDest;
575    } else if (FalseDestSucc.count(TrueDest)) {
576      CommonSucc = TrueDest;
577    } else {
578      set_intersect(TrueDestSucc, FalseDestSucc);
579      // If there's one common successor use that.
580      if (TrueDestSucc.size() == 1)
581        CommonSucc = *TrueDestSucc.begin();
582      // If there's more than one pick whichever appears first in the block list
583      // (we can't use the value returned by TrueDestSucc.begin() as it's
584      // unpredicatable which element gets returned).
585      else if (!TrueDestSucc.empty()) {
586        Function *F = TrueDest->getParent();
587        auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
588        auto It = std::find_if(F->begin(), F->end(), IsSucc);
589        assert(It != F->end() && "Could not find successor in function");
590        CommonSucc = &*It;
591      }
592    }
593    // The common successor has to be dominated by the branch, as otherwise
594    // there will be some other path to the successor that will not be
595    // controlled by this branch so any phi we hoist would be controlled by the
596    // wrong condition. This also takes care of avoiding hoisting of loop back
597    // edges.
598    // TODO: In some cases this could be relaxed if the successor is dominated
599    // by another block that's been hoisted and we can guarantee that the
600    // control flow has been replicated exactly.
601    if (CommonSucc && DT->dominates(BI, CommonSucc))
602      HoistableBranches[BI] = CommonSucc;
603  }
604
605  bool canHoistPHI(PHINode *PN) {
606    // The phi must have loop invariant operands.
607    if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
608      return false;
609    // We can hoist phis if the block they are in is the target of hoistable
610    // branches which cover all of the predecessors of the block.
611    SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
612    BasicBlock *BB = PN->getParent();
613    for (BasicBlock *PredBB : predecessors(BB))
614      PredecessorBlocks.insert(PredBB);
615    // If we have less predecessor blocks than predecessors then the phi will
616    // have more than one incoming value for the same block which we can't
617    // handle.
618    // TODO: This could be handled be erasing some of the duplicate incoming
619    // values.
620    if (PredecessorBlocks.size() != pred_size(BB))
621      return false;
622    for (auto &Pair : HoistableBranches) {
623      if (Pair.second == BB) {
624        // Which blocks are predecessors via this branch depends on if the
625        // branch is triangle-like or diamond-like.
626        if (Pair.first->getSuccessor(0) == BB) {
627          PredecessorBlocks.erase(Pair.first->getParent());
628          PredecessorBlocks.erase(Pair.first->getSuccessor(1));
629        } else if (Pair.first->getSuccessor(1) == BB) {
630          PredecessorBlocks.erase(Pair.first->getParent());
631          PredecessorBlocks.erase(Pair.first->getSuccessor(0));
632        } else {
633          PredecessorBlocks.erase(Pair.first->getSuccessor(0));
634          PredecessorBlocks.erase(Pair.first->getSuccessor(1));
635        }
636      }
637    }
638    // PredecessorBlocks will now be empty if for every predecessor of BB we
639    // found a hoistable branch source.
640    return PredecessorBlocks.empty();
641  }
642
643  BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
644    if (!ControlFlowHoisting)
645      return CurLoop->getLoopPreheader();
646    // If BB has already been hoisted, return that
647    if (HoistDestinationMap.count(BB))
648      return HoistDestinationMap[BB];
649
650    // Check if this block is conditional based on a pending branch
651    auto HasBBAsSuccessor =
652        [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
653          return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
654                                       Pair.first->getSuccessor(1) == BB);
655        };
656    auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(),
657                           HasBBAsSuccessor);
658
659    // If not involved in a pending branch, hoist to preheader
660    BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
661    if (It == HoistableBranches.end()) {
662      LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName()
663                        << " as hoist destination for " << BB->getName()
664                        << "\n");
665      HoistDestinationMap[BB] = InitialPreheader;
666      return InitialPreheader;
667    }
668    BranchInst *BI = It->first;
669    assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
670               HoistableBranches.end() &&
671           "BB is expected to be the target of at most one branch");
672
673    LLVMContext &C = BB->getContext();
674    BasicBlock *TrueDest = BI->getSuccessor(0);
675    BasicBlock *FalseDest = BI->getSuccessor(1);
676    BasicBlock *CommonSucc = HoistableBranches[BI];
677    BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
678
679    // Create hoisted versions of blocks that currently don't have them
680    auto CreateHoistedBlock = [&](BasicBlock *Orig) {
681      if (HoistDestinationMap.count(Orig))
682        return HoistDestinationMap[Orig];
683      BasicBlock *New =
684          BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
685      HoistDestinationMap[Orig] = New;
686      DT->addNewBlock(New, HoistTarget);
687      if (CurLoop->getParentLoop())
688        CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
689      ++NumCreatedBlocks;
690      LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
691                        << " as hoist destination for " << Orig->getName()
692                        << "\n");
693      return New;
694    };
695    BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
696    BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
697    BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
698
699    // Link up these blocks with branches.
700    if (!HoistCommonSucc->getTerminator()) {
701      // The new common successor we've generated will branch to whatever that
702      // hoist target branched to.
703      BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
704      assert(TargetSucc && "Expected hoist target to have a single successor");
705      HoistCommonSucc->moveBefore(TargetSucc);
706      BranchInst::Create(TargetSucc, HoistCommonSucc);
707    }
708    if (!HoistTrueDest->getTerminator()) {
709      HoistTrueDest->moveBefore(HoistCommonSucc);
710      BranchInst::Create(HoistCommonSucc, HoistTrueDest);
711    }
712    if (!HoistFalseDest->getTerminator()) {
713      HoistFalseDest->moveBefore(HoistCommonSucc);
714      BranchInst::Create(HoistCommonSucc, HoistFalseDest);
715    }
716
717    // If BI is being cloned to what was originally the preheader then
718    // HoistCommonSucc will now be the new preheader.
719    if (HoistTarget == InitialPreheader) {
720      // Phis in the loop header now need to use the new preheader.
721      InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
722      if (MSSAU)
723        MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
724            HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
725      // The new preheader dominates the loop header.
726      DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
727      DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
728      DT->changeImmediateDominator(HeaderNode, PreheaderNode);
729      // The preheader hoist destination is now the new preheader, with the
730      // exception of the hoist destination of this branch.
731      for (auto &Pair : HoistDestinationMap)
732        if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
733          Pair.second = HoistCommonSucc;
734    }
735
736    // Now finally clone BI.
737    ReplaceInstWithInst(
738        HoistTarget->getTerminator(),
739        BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
740    ++NumClonedBranches;
741
742    assert(CurLoop->getLoopPreheader() &&
743           "Hoisting blocks should not have destroyed preheader");
744    return HoistDestinationMap[BB];
745  }
746};
747} // namespace
748
749/// Walk the specified region of the CFG (defined by all blocks dominated by
750/// the specified block, and that are in the current loop) in depth first
751/// order w.r.t the DominatorTree.  This allows us to visit definitions before
752/// uses, allowing us to hoist a loop body in one pass without iteration.
753///
754bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
755                       DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
756                       AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
757                       ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
758                       SinkAndHoistLICMFlags &Flags,
759                       OptimizationRemarkEmitter *ORE) {
760  // Verify inputs.
761  assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
762         CurLoop != nullptr && SafetyInfo != nullptr &&
763         "Unexpected input to hoistRegion.");
764  assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
765         "Either AliasSetTracker or MemorySSA should be initialized.");
766
767  ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
768
769  // Keep track of instructions that have been hoisted, as they may need to be
770  // re-hoisted if they end up not dominating all of their uses.
771  SmallVector<Instruction *, 16> HoistedInstructions;
772
773  // For PHI hoisting to work we need to hoist blocks before their successors.
774  // We can do this by iterating through the blocks in the loop in reverse
775  // post-order.
776  LoopBlocksRPO Worklist(CurLoop);
777  Worklist.perform(LI);
778  bool Changed = false;
779  for (BasicBlock *BB : Worklist) {
780    // Only need to process the contents of this block if it is not part of a
781    // subloop (which would already have been processed).
782    if (inSubLoop(BB, CurLoop, LI))
783      continue;
784
785    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
786      Instruction &I = *II++;
787      // Try constant folding this instruction.  If all the operands are
788      // constants, it is technically hoistable, but it would be better to
789      // just fold it.
790      if (Constant *C = ConstantFoldInstruction(
791              &I, I.getModule()->getDataLayout(), TLI)) {
792        LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
793                          << '\n');
794        if (CurAST)
795          CurAST->copyValue(&I, C);
796        // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
797        I.replaceAllUsesWith(C);
798        if (isInstructionTriviallyDead(&I, TLI))
799          eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
800        Changed = true;
801        continue;
802      }
803
804      // Try hoisting the instruction out to the preheader.  We can only do
805      // this if all of the operands of the instruction are loop invariant and
806      // if it is safe to hoist the instruction.
807      // TODO: It may be safe to hoist if we are hoisting to a conditional block
808      // and we have accurately duplicated the control flow from the loop header
809      // to that block.
810      if (CurLoop->hasLoopInvariantOperands(&I) &&
811          canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
812                             ORE) &&
813          isSafeToExecuteUnconditionally(
814              I, DT, CurLoop, SafetyInfo, ORE,
815              CurLoop->getLoopPreheader()->getTerminator())) {
816        hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
817              MSSAU, SE, ORE);
818        HoistedInstructions.push_back(&I);
819        Changed = true;
820        continue;
821      }
822
823      // Attempt to remove floating point division out of the loop by
824      // converting it to a reciprocal multiplication.
825      if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
826          CurLoop->isLoopInvariant(I.getOperand(1))) {
827        auto Divisor = I.getOperand(1);
828        auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
829        auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
830        ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
831        SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
832        ReciprocalDivisor->insertBefore(&I);
833
834        auto Product =
835            BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
836        Product->setFastMathFlags(I.getFastMathFlags());
837        SafetyInfo->insertInstructionTo(Product, I.getParent());
838        Product->insertAfter(&I);
839        I.replaceAllUsesWith(Product);
840        eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
841
842        hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
843              SafetyInfo, MSSAU, SE, ORE);
844        HoistedInstructions.push_back(ReciprocalDivisor);
845        Changed = true;
846        continue;
847      }
848
849      auto IsInvariantStart = [&](Instruction &I) {
850        using namespace PatternMatch;
851        return I.use_empty() &&
852               match(&I, m_Intrinsic<Intrinsic::invariant_start>());
853      };
854      auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
855        return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
856               SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
857      };
858      if ((IsInvariantStart(I) || isGuard(&I)) &&
859          CurLoop->hasLoopInvariantOperands(&I) &&
860          MustExecuteWithoutWritesBefore(I)) {
861        hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
862              MSSAU, SE, ORE);
863        HoistedInstructions.push_back(&I);
864        Changed = true;
865        continue;
866      }
867
868      if (PHINode *PN = dyn_cast<PHINode>(&I)) {
869        if (CFH.canHoistPHI(PN)) {
870          // Redirect incoming blocks first to ensure that we create hoisted
871          // versions of those blocks before we hoist the phi.
872          for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
873            PN->setIncomingBlock(
874                i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
875          hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
876                MSSAU, SE, ORE);
877          assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
878          Changed = true;
879          continue;
880        }
881      }
882
883      // Remember possibly hoistable branches so we can actually hoist them
884      // later if needed.
885      if (BranchInst *BI = dyn_cast<BranchInst>(&I))
886        CFH.registerPossiblyHoistableBranch(BI);
887    }
888  }
889
890  // If we hoisted instructions to a conditional block they may not dominate
891  // their uses that weren't hoisted (such as phis where some operands are not
892  // loop invariant). If so make them unconditional by moving them to their
893  // immediate dominator. We iterate through the instructions in reverse order
894  // which ensures that when we rehoist an instruction we rehoist its operands,
895  // and also keep track of where in the block we are rehoisting to to make sure
896  // that we rehoist instructions before the instructions that use them.
897  Instruction *HoistPoint = nullptr;
898  if (ControlFlowHoisting) {
899    for (Instruction *I : reverse(HoistedInstructions)) {
900      if (!llvm::all_of(I->uses(),
901                        [&](Use &U) { return DT->dominates(I, U); })) {
902        BasicBlock *Dominator =
903            DT->getNode(I->getParent())->getIDom()->getBlock();
904        if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
905          if (HoistPoint)
906            assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
907                   "New hoist point expected to dominate old hoist point");
908          HoistPoint = Dominator->getTerminator();
909        }
910        LLVM_DEBUG(dbgs() << "LICM rehoisting to "
911                          << HoistPoint->getParent()->getName()
912                          << ": " << *I << "\n");
913        moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
914        HoistPoint = I;
915        Changed = true;
916      }
917    }
918  }
919  if (MSSAU && VerifyMemorySSA)
920    MSSAU->getMemorySSA()->verifyMemorySSA();
921
922    // Now that we've finished hoisting make sure that LI and DT are still
923    // valid.
924#ifdef EXPENSIVE_CHECKS
925  if (Changed) {
926    assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
927           "Dominator tree verification failed");
928    LI->verify(*DT);
929  }
930#endif
931
932  return Changed;
933}
934
935// Return true if LI is invariant within scope of the loop. LI is invariant if
936// CurLoop is dominated by an invariant.start representing the same memory
937// location and size as the memory location LI loads from, and also the
938// invariant.start has no uses.
939static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
940                                  Loop *CurLoop) {
941  Value *Addr = LI->getOperand(0);
942  const DataLayout &DL = LI->getModule()->getDataLayout();
943  const uint32_t LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
944
945  // if the type is i8 addrspace(x)*, we know this is the type of
946  // llvm.invariant.start operand
947  auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
948                                     LI->getPointerAddressSpace());
949  unsigned BitcastsVisited = 0;
950  // Look through bitcasts until we reach the i8* type (this is invariant.start
951  // operand type).
952  while (Addr->getType() != PtrInt8Ty) {
953    auto *BC = dyn_cast<BitCastInst>(Addr);
954    // Avoid traversing high number of bitcast uses.
955    if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
956      return false;
957    Addr = BC->getOperand(0);
958  }
959
960  unsigned UsesVisited = 0;
961  // Traverse all uses of the load operand value, to see if invariant.start is
962  // one of the uses, and whether it dominates the load instruction.
963  for (auto *U : Addr->users()) {
964    // Avoid traversing for Load operand with high number of users.
965    if (++UsesVisited > MaxNumUsesTraversed)
966      return false;
967    IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
968    // If there are escaping uses of invariant.start instruction, the load maybe
969    // non-invariant.
970    if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
971        !II->use_empty())
972      continue;
973    unsigned InvariantSizeInBits =
974        cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8;
975    // Confirm the invariant.start location size contains the load operand size
976    // in bits. Also, the invariant.start should dominate the load, and we
977    // should not hoist the load out of a loop that contains this dominating
978    // invariant.start.
979    if (LocSizeInBits <= InvariantSizeInBits &&
980        DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
981      return true;
982  }
983
984  return false;
985}
986
987namespace {
988/// Return true if-and-only-if we know how to (mechanically) both hoist and
989/// sink a given instruction out of a loop.  Does not address legality
990/// concerns such as aliasing or speculation safety.
991bool isHoistableAndSinkableInst(Instruction &I) {
992  // Only these instructions are hoistable/sinkable.
993  return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
994          isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
995          isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
996          isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
997          isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
998          isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
999          isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1000}
1001/// Return true if all of the alias sets within this AST are known not to
1002/// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
1003bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1004                const Loop *L) {
1005  if (CurAST) {
1006    for (AliasSet &AS : *CurAST) {
1007      if (!AS.isForwardingAliasSet() && AS.isMod()) {
1008        return false;
1009      }
1010    }
1011    return true;
1012  } else { /*MSSAU*/
1013    for (auto *BB : L->getBlocks())
1014      if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1015        return false;
1016    return true;
1017  }
1018}
1019
1020/// Return true if I is the only Instruction with a MemoryAccess in L.
1021bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1022                        const MemorySSAUpdater *MSSAU) {
1023  for (auto *BB : L->getBlocks())
1024    if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1025      int NotAPhi = 0;
1026      for (const auto &Acc : *Accs) {
1027        if (isa<MemoryPhi>(&Acc))
1028          continue;
1029        const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1030        if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1031          return false;
1032      }
1033    }
1034  return true;
1035}
1036}
1037
1038bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1039                              Loop *CurLoop, AliasSetTracker *CurAST,
1040                              MemorySSAUpdater *MSSAU,
1041                              bool TargetExecutesOncePerLoop,
1042                              SinkAndHoistLICMFlags *Flags,
1043                              OptimizationRemarkEmitter *ORE) {
1044  // If we don't understand the instruction, bail early.
1045  if (!isHoistableAndSinkableInst(I))
1046    return false;
1047
1048  MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1049  if (MSSA)
1050    assert(Flags != nullptr && "Flags cannot be null.");
1051
1052  // Loads have extra constraints we have to verify before we can hoist them.
1053  if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1054    if (!LI->isUnordered())
1055      return false; // Don't sink/hoist volatile or ordered atomic loads!
1056
1057    // Loads from constant memory are always safe to move, even if they end up
1058    // in the same alias set as something that ends up being modified.
1059    if (AA->pointsToConstantMemory(LI->getOperand(0)))
1060      return true;
1061    if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1062      return true;
1063
1064    if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1065      return false; // Don't risk duplicating unordered loads
1066
1067    // This checks for an invariant.start dominating the load.
1068    if (isLoadInvariantInLoop(LI, DT, CurLoop))
1069      return true;
1070
1071    bool Invalidated;
1072    if (CurAST)
1073      Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1074                                             CurLoop, AA);
1075    else
1076      Invalidated = pointerInvalidatedByLoopWithMSSA(
1077          MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags);
1078    // Check loop-invariant address because this may also be a sinkable load
1079    // whose address is not necessarily loop-invariant.
1080    if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1081      ORE->emit([&]() {
1082        return OptimizationRemarkMissed(
1083                   DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1084               << "failed to move load with loop-invariant address "
1085                  "because the loop may invalidate its value";
1086      });
1087
1088    return !Invalidated;
1089  } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1090    // Don't sink or hoist dbg info; it's legal, but not useful.
1091    if (isa<DbgInfoIntrinsic>(I))
1092      return false;
1093
1094    // Don't sink calls which can throw.
1095    if (CI->mayThrow())
1096      return false;
1097
1098    using namespace PatternMatch;
1099    if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1100      // Assumes don't actually alias anything or throw
1101      return true;
1102
1103    if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1104      // Widenable conditions don't actually alias anything or throw
1105      return true;
1106
1107    // Handle simple cases by querying alias analysis.
1108    FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1109    if (Behavior == FMRB_DoesNotAccessMemory)
1110      return true;
1111    if (AAResults::onlyReadsMemory(Behavior)) {
1112      // A readonly argmemonly function only reads from memory pointed to by
1113      // it's arguments with arbitrary offsets.  If we can prove there are no
1114      // writes to this memory in the loop, we can hoist or sink.
1115      if (AAResults::onlyAccessesArgPointees(Behavior)) {
1116        // TODO: expand to writeable arguments
1117        for (Value *Op : CI->arg_operands())
1118          if (Op->getType()->isPointerTy()) {
1119            bool Invalidated;
1120            if (CurAST)
1121              Invalidated = pointerInvalidatedByLoop(
1122                  MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()),
1123                  CurAST, CurLoop, AA);
1124            else
1125              Invalidated = pointerInvalidatedByLoopWithMSSA(
1126                  MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop,
1127                  *Flags);
1128            if (Invalidated)
1129              return false;
1130          }
1131        return true;
1132      }
1133
1134      // If this call only reads from memory and there are no writes to memory
1135      // in the loop, we can hoist or sink the call as appropriate.
1136      if (isReadOnly(CurAST, MSSAU, CurLoop))
1137        return true;
1138    }
1139
1140    // FIXME: This should use mod/ref information to see if we can hoist or
1141    // sink the call.
1142
1143    return false;
1144  } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1145    // Fences alias (most) everything to provide ordering.  For the moment,
1146    // just give up if there are any other memory operations in the loop.
1147    if (CurAST) {
1148      auto Begin = CurAST->begin();
1149      assert(Begin != CurAST->end() && "must contain FI");
1150      if (std::next(Begin) != CurAST->end())
1151        // constant memory for instance, TODO: handle better
1152        return false;
1153      auto *UniqueI = Begin->getUniqueInstruction();
1154      if (!UniqueI)
1155        // other memory op, give up
1156        return false;
1157      (void)FI; // suppress unused variable warning
1158      assert(UniqueI == FI && "AS must contain FI");
1159      return true;
1160    } else // MSSAU
1161      return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1162  } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1163    if (!SI->isUnordered())
1164      return false; // Don't sink/hoist volatile or ordered atomic store!
1165
1166    // We can only hoist a store that we can prove writes a value which is not
1167    // read or overwritten within the loop.  For those cases, we fallback to
1168    // load store promotion instead.  TODO: We can extend this to cases where
1169    // there is exactly one write to the location and that write dominates an
1170    // arbitrary number of reads in the loop.
1171    if (CurAST) {
1172      auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1173
1174      if (AS.isRef() || !AS.isMustAlias())
1175        // Quick exit test, handled by the full path below as well.
1176        return false;
1177      auto *UniqueI = AS.getUniqueInstruction();
1178      if (!UniqueI)
1179        // other memory op, give up
1180        return false;
1181      assert(UniqueI == SI && "AS must contain SI");
1182      return true;
1183    } else { // MSSAU
1184      if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1185        return true;
1186      // If there are more accesses than the Promotion cap, give up, we're not
1187      // walking a list that long.
1188      if (Flags->NoOfMemAccTooLarge)
1189        return false;
1190      // Check store only if there's still "quota" to check clobber.
1191      if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap)
1192        return false;
1193      // If there are interfering Uses (i.e. their defining access is in the
1194      // loop), or ordered loads (stored as Defs!), don't move this store.
1195      // Could do better here, but this is conservatively correct.
1196      // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1197      // moving accesses. Can also extend to dominating uses.
1198      auto *SIMD = MSSA->getMemoryAccess(SI);
1199      for (auto *BB : CurLoop->getBlocks())
1200        if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1201          for (const auto &MA : *Accesses)
1202            if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1203              auto *MD = MU->getDefiningAccess();
1204              if (!MSSA->isLiveOnEntryDef(MD) &&
1205                  CurLoop->contains(MD->getBlock()))
1206                return false;
1207              // Disable hoisting past potentially interfering loads. Optimized
1208              // Uses may point to an access outside the loop, as getClobbering
1209              // checks the previous iteration when walking the backedge.
1210              // FIXME: More precise: no Uses that alias SI.
1211              if (!Flags->IsSink && !MSSA->dominates(SIMD, MU))
1212                return false;
1213            } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1214              if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1215                (void)LI; // Silence warning.
1216                assert(!LI->isUnordered() && "Expected unordered load");
1217                return false;
1218              }
1219              // Any call, while it may not be clobbering SI, it may be a use.
1220              if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1221                // Check if the call may read from the memory locattion written
1222                // to by SI. Check CI's attributes and arguments; the number of
1223                // such checks performed is limited above by NoOfMemAccTooLarge.
1224                ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1225                if (isModOrRefSet(MRI))
1226                  return false;
1227              }
1228            }
1229        }
1230
1231      auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1232      Flags->LicmMssaOptCounter++;
1233      // If there are no clobbering Defs in the loop, store is safe to hoist.
1234      return MSSA->isLiveOnEntryDef(Source) ||
1235             !CurLoop->contains(Source->getBlock());
1236    }
1237  }
1238
1239  assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1240
1241  // We've established mechanical ability and aliasing, it's up to the caller
1242  // to check fault safety
1243  return true;
1244}
1245
1246/// Returns true if a PHINode is a trivially replaceable with an
1247/// Instruction.
1248/// This is true when all incoming values are that instruction.
1249/// This pattern occurs most often with LCSSA PHI nodes.
1250///
1251static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1252  for (const Value *IncValue : PN.incoming_values())
1253    if (IncValue != &I)
1254      return false;
1255
1256  return true;
1257}
1258
1259/// Return true if the instruction is free in the loop.
1260static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1261                         const TargetTransformInfo *TTI) {
1262
1263  if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1264    if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1265        TargetTransformInfo::TCC_Free)
1266      return false;
1267    // For a GEP, we cannot simply use getUserCost because currently it
1268    // optimistically assume that a GEP will fold into addressing mode
1269    // regardless of its users.
1270    const BasicBlock *BB = GEP->getParent();
1271    for (const User *U : GEP->users()) {
1272      const Instruction *UI = cast<Instruction>(U);
1273      if (CurLoop->contains(UI) &&
1274          (BB != UI->getParent() ||
1275           (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1276        return false;
1277    }
1278    return true;
1279  } else
1280    return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1281           TargetTransformInfo::TCC_Free;
1282}
1283
1284/// Return true if the only users of this instruction are outside of
1285/// the loop. If this is true, we can sink the instruction to the exit
1286/// blocks of the loop.
1287///
1288/// We also return true if the instruction could be folded away in lowering.
1289/// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
1290static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1291                                  const LoopSafetyInfo *SafetyInfo,
1292                                  TargetTransformInfo *TTI, bool &FreeInLoop) {
1293  const auto &BlockColors = SafetyInfo->getBlockColors();
1294  bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1295  for (const User *U : I.users()) {
1296    const Instruction *UI = cast<Instruction>(U);
1297    if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1298      const BasicBlock *BB = PN->getParent();
1299      // We cannot sink uses in catchswitches.
1300      if (isa<CatchSwitchInst>(BB->getTerminator()))
1301        return false;
1302
1303      // We need to sink a callsite to a unique funclet.  Avoid sinking if the
1304      // phi use is too muddled.
1305      if (isa<CallInst>(I))
1306        if (!BlockColors.empty() &&
1307            BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1308          return false;
1309    }
1310
1311    if (CurLoop->contains(UI)) {
1312      if (IsFree) {
1313        FreeInLoop = true;
1314        continue;
1315      }
1316      return false;
1317    }
1318  }
1319  return true;
1320}
1321
1322static Instruction *cloneInstructionInExitBlock(
1323    Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1324    const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1325  Instruction *New;
1326  if (auto *CI = dyn_cast<CallInst>(&I)) {
1327    const auto &BlockColors = SafetyInfo->getBlockColors();
1328
1329    // Sinking call-sites need to be handled differently from other
1330    // instructions.  The cloned call-site needs a funclet bundle operand
1331    // appropriate for its location in the CFG.
1332    SmallVector<OperandBundleDef, 1> OpBundles;
1333    for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1334         BundleIdx != BundleEnd; ++BundleIdx) {
1335      OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1336      if (Bundle.getTagID() == LLVMContext::OB_funclet)
1337        continue;
1338
1339      OpBundles.emplace_back(Bundle);
1340    }
1341
1342    if (!BlockColors.empty()) {
1343      const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1344      assert(CV.size() == 1 && "non-unique color for exit block!");
1345      BasicBlock *BBColor = CV.front();
1346      Instruction *EHPad = BBColor->getFirstNonPHI();
1347      if (EHPad->isEHPad())
1348        OpBundles.emplace_back("funclet", EHPad);
1349    }
1350
1351    New = CallInst::Create(CI, OpBundles);
1352  } else {
1353    New = I.clone();
1354  }
1355
1356  ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1357  if (!I.getName().empty())
1358    New->setName(I.getName() + ".le");
1359
1360  if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1361    // Create a new MemoryAccess and let MemorySSA set its defining access.
1362    MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1363        New, nullptr, New->getParent(), MemorySSA::Beginning);
1364    if (NewMemAcc) {
1365      if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1366        MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1367      else {
1368        auto *MemUse = cast<MemoryUse>(NewMemAcc);
1369        MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1370      }
1371    }
1372  }
1373
1374  // Build LCSSA PHI nodes for any in-loop operands. Note that this is
1375  // particularly cheap because we can rip off the PHI node that we're
1376  // replacing for the number and blocks of the predecessors.
1377  // OPT: If this shows up in a profile, we can instead finish sinking all
1378  // invariant instructions, and then walk their operands to re-establish
1379  // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1380  // sinking bottom-up.
1381  for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
1382       ++OI)
1383    if (Instruction *OInst = dyn_cast<Instruction>(*OI))
1384      if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
1385        if (!OLoop->contains(&PN)) {
1386          PHINode *OpPN =
1387              PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1388                              OInst->getName() + ".lcssa", &ExitBlock.front());
1389          for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1390            OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1391          *OI = OpPN;
1392        }
1393  return New;
1394}
1395
1396static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1397                             AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1398  if (AST)
1399    AST->deleteValue(&I);
1400  if (MSSAU)
1401    MSSAU->removeMemoryAccess(&I);
1402  SafetyInfo.removeInstruction(&I);
1403  I.eraseFromParent();
1404}
1405
1406static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1407                                  ICFLoopSafetyInfo &SafetyInfo,
1408                                  MemorySSAUpdater *MSSAU,
1409                                  ScalarEvolution *SE) {
1410  SafetyInfo.removeInstruction(&I);
1411  SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1412  I.moveBefore(&Dest);
1413  if (MSSAU)
1414    if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1415            MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1416      MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1417                         MemorySSA::BeforeTerminator);
1418  if (SE)
1419    SE->forgetValue(&I);
1420}
1421
1422static Instruction *sinkThroughTriviallyReplaceablePHI(
1423    PHINode *TPN, Instruction *I, LoopInfo *LI,
1424    SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1425    const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1426    MemorySSAUpdater *MSSAU) {
1427  assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1428         "Expect only trivially replaceable PHI");
1429  BasicBlock *ExitBlock = TPN->getParent();
1430  Instruction *New;
1431  auto It = SunkCopies.find(ExitBlock);
1432  if (It != SunkCopies.end())
1433    New = It->second;
1434  else
1435    New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1436        *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1437  return New;
1438}
1439
1440static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1441  BasicBlock *BB = PN->getParent();
1442  if (!BB->canSplitPredecessors())
1443    return false;
1444  // It's not impossible to split EHPad blocks, but if BlockColors already exist
1445  // it require updating BlockColors for all offspring blocks accordingly. By
1446  // skipping such corner case, we can make updating BlockColors after splitting
1447  // predecessor fairly simple.
1448  if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1449    return false;
1450  for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1451    BasicBlock *BBPred = *PI;
1452    if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1453        isa<CallBrInst>(BBPred->getTerminator()))
1454      return false;
1455  }
1456  return true;
1457}
1458
1459static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1460                                        LoopInfo *LI, const Loop *CurLoop,
1461                                        LoopSafetyInfo *SafetyInfo,
1462                                        MemorySSAUpdater *MSSAU) {
1463#ifndef NDEBUG
1464  SmallVector<BasicBlock *, 32> ExitBlocks;
1465  CurLoop->getUniqueExitBlocks(ExitBlocks);
1466  SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1467                                             ExitBlocks.end());
1468#endif
1469  BasicBlock *ExitBB = PN->getParent();
1470  assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1471
1472  // Split predecessors of the loop exit to make instructions in the loop are
1473  // exposed to exit blocks through trivially replaceable PHIs while keeping the
1474  // loop in the canonical form where each predecessor of each exit block should
1475  // be contained within the loop. For example, this will convert the loop below
1476  // from
1477  //
1478  // LB1:
1479  //   %v1 =
1480  //   br %LE, %LB2
1481  // LB2:
1482  //   %v2 =
1483  //   br %LE, %LB1
1484  // LE:
1485  //   %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1486  //
1487  // to
1488  //
1489  // LB1:
1490  //   %v1 =
1491  //   br %LE.split, %LB2
1492  // LB2:
1493  //   %v2 =
1494  //   br %LE.split2, %LB1
1495  // LE.split:
1496  //   %p1 = phi [%v1, %LB1]  <-- trivially replaceable
1497  //   br %LE
1498  // LE.split2:
1499  //   %p2 = phi [%v2, %LB2]  <-- trivially replaceable
1500  //   br %LE
1501  // LE:
1502  //   %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1503  //
1504  const auto &BlockColors = SafetyInfo->getBlockColors();
1505  SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1506  while (!PredBBs.empty()) {
1507    BasicBlock *PredBB = *PredBBs.begin();
1508    assert(CurLoop->contains(PredBB) &&
1509           "Expect all predecessors are in the loop");
1510    if (PN->getBasicBlockIndex(PredBB) >= 0) {
1511      BasicBlock *NewPred = SplitBlockPredecessors(
1512          ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1513      // Since we do not allow splitting EH-block with BlockColors in
1514      // canSplitPredecessors(), we can simply assign predecessor's color to
1515      // the new block.
1516      if (!BlockColors.empty())
1517        // Grab a reference to the ColorVector to be inserted before getting the
1518        // reference to the vector we are copying because inserting the new
1519        // element in BlockColors might cause the map to be reallocated.
1520        SafetyInfo->copyColors(NewPred, PredBB);
1521    }
1522    PredBBs.remove(PredBB);
1523  }
1524}
1525
1526/// When an instruction is found to only be used outside of the loop, this
1527/// function moves it to the exit blocks and patches up SSA form as needed.
1528/// This method is guaranteed to remove the original instruction from its
1529/// position, and may either delete it or move it to outside of the loop.
1530///
1531static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1532                 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1533                 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) {
1534  LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1535  ORE->emit([&]() {
1536    return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1537           << "sinking " << ore::NV("Inst", &I);
1538  });
1539  bool Changed = false;
1540  if (isa<LoadInst>(I))
1541    ++NumMovedLoads;
1542  else if (isa<CallInst>(I))
1543    ++NumMovedCalls;
1544  ++NumSunk;
1545
1546  // Iterate over users to be ready for actual sinking. Replace users via
1547  // unreachable blocks with undef and make all user PHIs trivially replaceable.
1548  SmallPtrSet<Instruction *, 8> VisitedUsers;
1549  for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1550    auto *User = cast<Instruction>(*UI);
1551    Use &U = UI.getUse();
1552    ++UI;
1553
1554    if (VisitedUsers.count(User) || CurLoop->contains(User))
1555      continue;
1556
1557    if (!DT->isReachableFromEntry(User->getParent())) {
1558      U = UndefValue::get(I.getType());
1559      Changed = true;
1560      continue;
1561    }
1562
1563    // The user must be a PHI node.
1564    PHINode *PN = cast<PHINode>(User);
1565
1566    // Surprisingly, instructions can be used outside of loops without any
1567    // exits.  This can only happen in PHI nodes if the incoming block is
1568    // unreachable.
1569    BasicBlock *BB = PN->getIncomingBlock(U);
1570    if (!DT->isReachableFromEntry(BB)) {
1571      U = UndefValue::get(I.getType());
1572      Changed = true;
1573      continue;
1574    }
1575
1576    VisitedUsers.insert(PN);
1577    if (isTriviallyReplaceablePHI(*PN, I))
1578      continue;
1579
1580    if (!canSplitPredecessors(PN, SafetyInfo))
1581      return Changed;
1582
1583    // Split predecessors of the PHI so that we can make users trivially
1584    // replaceable.
1585    splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1586
1587    // Should rebuild the iterators, as they may be invalidated by
1588    // splitPredecessorsOfLoopExit().
1589    UI = I.user_begin();
1590    UE = I.user_end();
1591  }
1592
1593  if (VisitedUsers.empty())
1594    return Changed;
1595
1596#ifndef NDEBUG
1597  SmallVector<BasicBlock *, 32> ExitBlocks;
1598  CurLoop->getUniqueExitBlocks(ExitBlocks);
1599  SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1600                                             ExitBlocks.end());
1601#endif
1602
1603  // Clones of this instruction. Don't create more than one per exit block!
1604  SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1605
1606  // If this instruction is only used outside of the loop, then all users are
1607  // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1608  // the instruction.
1609  SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1610  for (auto *UI : Users) {
1611    auto *User = cast<Instruction>(UI);
1612
1613    if (CurLoop->contains(User))
1614      continue;
1615
1616    PHINode *PN = cast<PHINode>(User);
1617    assert(ExitBlockSet.count(PN->getParent()) &&
1618           "The LCSSA PHI is not in an exit block!");
1619    // The PHI must be trivially replaceable.
1620    Instruction *New = sinkThroughTriviallyReplaceablePHI(
1621        PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1622    PN->replaceAllUsesWith(New);
1623    eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1624    Changed = true;
1625  }
1626  return Changed;
1627}
1628
1629/// When an instruction is found to only use loop invariant operands that
1630/// is safe to hoist, this instruction is called to do the dirty work.
1631///
1632static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1633                  BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1634                  MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1635                  OptimizationRemarkEmitter *ORE) {
1636  LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << I
1637                    << "\n");
1638  ORE->emit([&]() {
1639    return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1640                                                         << ore::NV("Inst", &I);
1641  });
1642
1643  // Metadata can be dependent on conditions we are hoisting above.
1644  // Conservatively strip all metadata on the instruction unless we were
1645  // guaranteed to execute I if we entered the loop, in which case the metadata
1646  // is valid in the loop preheader.
1647  if (I.hasMetadataOtherThanDebugLoc() &&
1648      // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1649      // time in isGuaranteedToExecute if we don't actually have anything to
1650      // drop.  It is a compile time optimization, not required for correctness.
1651      !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1652    I.dropUnknownNonDebugMetadata();
1653
1654  if (isa<PHINode>(I))
1655    // Move the new node to the end of the phi list in the destination block.
1656    moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1657  else
1658    // Move the new node to the destination block, before its terminator.
1659    moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1660
1661  // Apply line 0 debug locations when we are moving instructions to different
1662  // basic blocks because we want to avoid jumpy line tables.
1663  if (const DebugLoc &DL = I.getDebugLoc())
1664    I.setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
1665
1666  if (isa<LoadInst>(I))
1667    ++NumMovedLoads;
1668  else if (isa<CallInst>(I))
1669    ++NumMovedCalls;
1670  ++NumHoisted;
1671}
1672
1673/// Only sink or hoist an instruction if it is not a trapping instruction,
1674/// or if the instruction is known not to trap when moved to the preheader.
1675/// or if it is a trapping instruction and is guaranteed to execute.
1676static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1677                                           const DominatorTree *DT,
1678                                           const Loop *CurLoop,
1679                                           const LoopSafetyInfo *SafetyInfo,
1680                                           OptimizationRemarkEmitter *ORE,
1681                                           const Instruction *CtxI) {
1682  if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
1683    return true;
1684
1685  bool GuaranteedToExecute =
1686      SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1687
1688  if (!GuaranteedToExecute) {
1689    auto *LI = dyn_cast<LoadInst>(&Inst);
1690    if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1691      ORE->emit([&]() {
1692        return OptimizationRemarkMissed(
1693                   DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1694               << "failed to hoist load with loop-invariant address "
1695                  "because load is conditionally executed";
1696      });
1697  }
1698
1699  return GuaranteedToExecute;
1700}
1701
1702namespace {
1703class LoopPromoter : public LoadAndStorePromoter {
1704  Value *SomePtr; // Designated pointer to store to.
1705  const SmallSetVector<Value *, 8> &PointerMustAliases;
1706  SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1707  SmallVectorImpl<Instruction *> &LoopInsertPts;
1708  SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1709  PredIteratorCache &PredCache;
1710  AliasSetTracker &AST;
1711  MemorySSAUpdater *MSSAU;
1712  LoopInfo &LI;
1713  DebugLoc DL;
1714  int Alignment;
1715  bool UnorderedAtomic;
1716  AAMDNodes AATags;
1717  ICFLoopSafetyInfo &SafetyInfo;
1718
1719  Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1720    if (Instruction *I = dyn_cast<Instruction>(V))
1721      if (Loop *L = LI.getLoopFor(I->getParent()))
1722        if (!L->contains(BB)) {
1723          // We need to create an LCSSA PHI node for the incoming value and
1724          // store that.
1725          PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1726                                        I->getName() + ".lcssa", &BB->front());
1727          for (BasicBlock *Pred : PredCache.get(BB))
1728            PN->addIncoming(I, Pred);
1729          return PN;
1730        }
1731    return V;
1732  }
1733
1734public:
1735  LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1736               const SmallSetVector<Value *, 8> &PMA,
1737               SmallVectorImpl<BasicBlock *> &LEB,
1738               SmallVectorImpl<Instruction *> &LIP,
1739               SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1740               AliasSetTracker &ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1741               DebugLoc dl, int alignment, bool UnorderedAtomic,
1742               const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1743      : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1744        LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1745        PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1746        Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1747        SafetyInfo(SafetyInfo) {}
1748
1749  bool isInstInList(Instruction *I,
1750                    const SmallVectorImpl<Instruction *> &) const override {
1751    Value *Ptr;
1752    if (LoadInst *LI = dyn_cast<LoadInst>(I))
1753      Ptr = LI->getOperand(0);
1754    else
1755      Ptr = cast<StoreInst>(I)->getPointerOperand();
1756    return PointerMustAliases.count(Ptr);
1757  }
1758
1759  void doExtraRewritesBeforeFinalDeletion() override {
1760    // Insert stores after in the loop exit blocks.  Each exit block gets a
1761    // store of the live-out values that feed them.  Since we've already told
1762    // the SSA updater about the defs in the loop and the preheader
1763    // definition, it is all set and we can start using it.
1764    for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1765      BasicBlock *ExitBlock = LoopExitBlocks[i];
1766      Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1767      LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1768      Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1769      Instruction *InsertPos = LoopInsertPts[i];
1770      StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1771      if (UnorderedAtomic)
1772        NewSI->setOrdering(AtomicOrdering::Unordered);
1773      NewSI->setAlignment(Align(Alignment));
1774      NewSI->setDebugLoc(DL);
1775      if (AATags)
1776        NewSI->setAAMetadata(AATags);
1777
1778      if (MSSAU) {
1779        MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1780        MemoryAccess *NewMemAcc;
1781        if (!MSSAInsertPoint) {
1782          NewMemAcc = MSSAU->createMemoryAccessInBB(
1783              NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1784        } else {
1785          NewMemAcc =
1786              MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1787        }
1788        MSSAInsertPts[i] = NewMemAcc;
1789        MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1790        // FIXME: true for safety, false may still be correct.
1791      }
1792    }
1793  }
1794
1795  void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1796    // Update alias analysis.
1797    AST.copyValue(LI, V);
1798  }
1799  void instructionDeleted(Instruction *I) const override {
1800    SafetyInfo.removeInstruction(I);
1801    AST.deleteValue(I);
1802    if (MSSAU)
1803      MSSAU->removeMemoryAccess(I);
1804  }
1805};
1806
1807
1808/// Return true iff we can prove that a caller of this function can not inspect
1809/// the contents of the provided object in a well defined program.
1810bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) {
1811  if (isa<AllocaInst>(Object))
1812    // Since the alloca goes out of scope, we know the caller can't retain a
1813    // reference to it and be well defined.  Thus, we don't need to check for
1814    // capture.
1815    return true;
1816
1817  // For all other objects we need to know that the caller can't possibly
1818  // have gotten a reference to the object.  There are two components of
1819  // that:
1820  //   1) Object can't be escaped by this function.  This is what
1821  //      PointerMayBeCaptured checks.
1822  //   2) Object can't have been captured at definition site.  For this, we
1823  //      need to know the return value is noalias.  At the moment, we use a
1824  //      weaker condition and handle only AllocLikeFunctions (which are
1825  //      known to be noalias).  TODO
1826  return isAllocLikeFn(Object, TLI) &&
1827    !PointerMayBeCaptured(Object, true, true);
1828}
1829
1830} // namespace
1831
1832/// Try to promote memory values to scalars by sinking stores out of the
1833/// loop and moving loads to before the loop.  We do this by looping over
1834/// the stores in the loop, looking for stores to Must pointers which are
1835/// loop invariant.
1836///
1837bool llvm::promoteLoopAccessesToScalars(
1838    const SmallSetVector<Value *, 8> &PointerMustAliases,
1839    SmallVectorImpl<BasicBlock *> &ExitBlocks,
1840    SmallVectorImpl<Instruction *> &InsertPts,
1841    SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1842    LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1843    Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
1844    ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
1845  // Verify inputs.
1846  assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1847         CurAST != nullptr && SafetyInfo != nullptr &&
1848         "Unexpected Input to promoteLoopAccessesToScalars");
1849
1850  Value *SomePtr = *PointerMustAliases.begin();
1851  BasicBlock *Preheader = CurLoop->getLoopPreheader();
1852
1853  // It is not safe to promote a load/store from the loop if the load/store is
1854  // conditional.  For example, turning:
1855  //
1856  //    for () { if (c) *P += 1; }
1857  //
1858  // into:
1859  //
1860  //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
1861  //
1862  // is not safe, because *P may only be valid to access if 'c' is true.
1863  //
1864  // The safety property divides into two parts:
1865  // p1) The memory may not be dereferenceable on entry to the loop.  In this
1866  //    case, we can't insert the required load in the preheader.
1867  // p2) The memory model does not allow us to insert a store along any dynamic
1868  //    path which did not originally have one.
1869  //
1870  // If at least one store is guaranteed to execute, both properties are
1871  // satisfied, and promotion is legal.
1872  //
1873  // This, however, is not a necessary condition. Even if no store/load is
1874  // guaranteed to execute, we can still establish these properties.
1875  // We can establish (p1) by proving that hoisting the load into the preheader
1876  // is safe (i.e. proving dereferenceability on all paths through the loop). We
1877  // can use any access within the alias set to prove dereferenceability,
1878  // since they're all must alias.
1879  //
1880  // There are two ways establish (p2):
1881  // a) Prove the location is thread-local. In this case the memory model
1882  // requirement does not apply, and stores are safe to insert.
1883  // b) Prove a store dominates every exit block. In this case, if an exit
1884  // blocks is reached, the original dynamic path would have taken us through
1885  // the store, so inserting a store into the exit block is safe. Note that this
1886  // is different from the store being guaranteed to execute. For instance,
1887  // if an exception is thrown on the first iteration of the loop, the original
1888  // store is never executed, but the exit blocks are not executed either.
1889
1890  bool DereferenceableInPH = false;
1891  bool SafeToInsertStore = false;
1892
1893  SmallVector<Instruction *, 64> LoopUses;
1894
1895  // We start with an alignment of one and try to find instructions that allow
1896  // us to prove better alignment.
1897  Align Alignment;
1898  // Keep track of which types of access we see
1899  bool SawUnorderedAtomic = false;
1900  bool SawNotAtomic = false;
1901  AAMDNodes AATags;
1902
1903  const DataLayout &MDL = Preheader->getModule()->getDataLayout();
1904
1905  bool IsKnownThreadLocalObject = false;
1906  if (SafetyInfo->anyBlockMayThrow()) {
1907    // If a loop can throw, we have to insert a store along each unwind edge.
1908    // That said, we can't actually make the unwind edge explicit. Therefore,
1909    // we have to prove that the store is dead along the unwind edge.  We do
1910    // this by proving that the caller can't have a reference to the object
1911    // after return and thus can't possibly load from the object.
1912    Value *Object = GetUnderlyingObject(SomePtr, MDL);
1913    if (!isKnownNonEscaping(Object, TLI))
1914      return false;
1915    // Subtlety: Alloca's aren't visible to callers, but *are* potentially
1916    // visible to other threads if captured and used during their lifetimes.
1917    IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
1918  }
1919
1920  // Check that all of the pointers in the alias set have the same type.  We
1921  // cannot (yet) promote a memory location that is loaded and stored in
1922  // different sizes.  While we are at it, collect alignment and AA info.
1923  for (Value *ASIV : PointerMustAliases) {
1924    // Check that all of the pointers in the alias set have the same type.  We
1925    // cannot (yet) promote a memory location that is loaded and stored in
1926    // different sizes.
1927    if (SomePtr->getType() != ASIV->getType())
1928      return false;
1929
1930    for (User *U : ASIV->users()) {
1931      // Ignore instructions that are outside the loop.
1932      Instruction *UI = dyn_cast<Instruction>(U);
1933      if (!UI || !CurLoop->contains(UI))
1934        continue;
1935
1936      // If there is an non-load/store instruction in the loop, we can't promote
1937      // it.
1938      if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
1939        if (!Load->isUnordered())
1940          return false;
1941
1942        SawUnorderedAtomic |= Load->isAtomic();
1943        SawNotAtomic |= !Load->isAtomic();
1944
1945        Align InstAlignment = Load->getAlign();
1946
1947        // Note that proving a load safe to speculate requires proving
1948        // sufficient alignment at the target location.  Proving it guaranteed
1949        // to execute does as well.  Thus we can increase our guaranteed
1950        // alignment as well.
1951        if (!DereferenceableInPH || (InstAlignment > Alignment))
1952          if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo,
1953                                             ORE, Preheader->getTerminator())) {
1954            DereferenceableInPH = true;
1955            Alignment = std::max(Alignment, InstAlignment);
1956          }
1957      } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
1958        // Stores *of* the pointer are not interesting, only stores *to* the
1959        // pointer.
1960        if (UI->getOperand(1) != ASIV)
1961          continue;
1962        if (!Store->isUnordered())
1963          return false;
1964
1965        SawUnorderedAtomic |= Store->isAtomic();
1966        SawNotAtomic |= !Store->isAtomic();
1967
1968        // If the store is guaranteed to execute, both properties are satisfied.
1969        // We may want to check if a store is guaranteed to execute even if we
1970        // already know that promotion is safe, since it may have higher
1971        // alignment than any other guaranteed stores, in which case we can
1972        // raise the alignment on the promoted store.
1973        Align InstAlignment = Store->getAlign();
1974
1975        if (!DereferenceableInPH || !SafeToInsertStore ||
1976            (InstAlignment > Alignment)) {
1977          if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
1978            DereferenceableInPH = true;
1979            SafeToInsertStore = true;
1980            Alignment = std::max(Alignment, InstAlignment);
1981          }
1982        }
1983
1984        // If a store dominates all exit blocks, it is safe to sink.
1985        // As explained above, if an exit block was executed, a dominating
1986        // store must have been executed at least once, so we are not
1987        // introducing stores on paths that did not have them.
1988        // Note that this only looks at explicit exit blocks. If we ever
1989        // start sinking stores into unwind edges (see above), this will break.
1990        if (!SafeToInsertStore)
1991          SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
1992            return DT->dominates(Store->getParent(), Exit);
1993          });
1994
1995        // If the store is not guaranteed to execute, we may still get
1996        // deref info through it.
1997        if (!DereferenceableInPH) {
1998          DereferenceableInPH = isDereferenceableAndAlignedPointer(
1999              Store->getPointerOperand(), Store->getValueOperand()->getType(),
2000              Store->getAlign(), MDL, Preheader->getTerminator(), DT);
2001        }
2002      } else
2003        return false; // Not a load or store.
2004
2005      // Merge the AA tags.
2006      if (LoopUses.empty()) {
2007        // On the first load/store, just take its AA tags.
2008        UI->getAAMetadata(AATags);
2009      } else if (AATags) {
2010        UI->getAAMetadata(AATags, /* Merge = */ true);
2011      }
2012
2013      LoopUses.push_back(UI);
2014    }
2015  }
2016
2017  // If we found both an unordered atomic instruction and a non-atomic memory
2018  // access, bail.  We can't blindly promote non-atomic to atomic since we
2019  // might not be able to lower the result.  We can't downgrade since that
2020  // would violate memory model.  Also, align 0 is an error for atomics.
2021  if (SawUnorderedAtomic && SawNotAtomic)
2022    return false;
2023
2024  // If we're inserting an atomic load in the preheader, we must be able to
2025  // lower it.  We're only guaranteed to be able to lower naturally aligned
2026  // atomics.
2027  auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2028  if (SawUnorderedAtomic &&
2029      Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2030    return false;
2031
2032  // If we couldn't prove we can hoist the load, bail.
2033  if (!DereferenceableInPH)
2034    return false;
2035
2036  // We know we can hoist the load, but don't have a guaranteed store.
2037  // Check whether the location is thread-local. If it is, then we can insert
2038  // stores along paths which originally didn't have them without violating the
2039  // memory model.
2040  if (!SafeToInsertStore) {
2041    if (IsKnownThreadLocalObject)
2042      SafeToInsertStore = true;
2043    else {
2044      Value *Object = GetUnderlyingObject(SomePtr, MDL);
2045      SafeToInsertStore =
2046          (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2047          !PointerMayBeCaptured(Object, true, true);
2048    }
2049  }
2050
2051  // If we've still failed to prove we can sink the store, give up.
2052  if (!SafeToInsertStore)
2053    return false;
2054
2055  // Otherwise, this is safe to promote, lets do it!
2056  LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2057                    << '\n');
2058  ORE->emit([&]() {
2059    return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2060                              LoopUses[0])
2061           << "Moving accesses to memory location out of the loop";
2062  });
2063  ++NumPromoted;
2064
2065  // Look at all the loop uses, and try to merge their locations.
2066  std::vector<const DILocation *> LoopUsesLocs;
2067  for (auto U : LoopUses)
2068    LoopUsesLocs.push_back(U->getDebugLoc().get());
2069  auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2070
2071  // We use the SSAUpdater interface to insert phi nodes as required.
2072  SmallVector<PHINode *, 16> NewPHIs;
2073  SSAUpdater SSA(&NewPHIs);
2074  LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2075                        InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL,
2076                        Alignment.value(), SawUnorderedAtomic, AATags,
2077                        *SafetyInfo);
2078
2079  // Set up the preheader to have a definition of the value.  It is the live-out
2080  // value from the preheader that uses in the loop will use.
2081  LoadInst *PreheaderLoad = new LoadInst(
2082      SomePtr->getType()->getPointerElementType(), SomePtr,
2083      SomePtr->getName() + ".promoted", Preheader->getTerminator());
2084  if (SawUnorderedAtomic)
2085    PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2086  PreheaderLoad->setAlignment(Alignment);
2087  PreheaderLoad->setDebugLoc(DebugLoc());
2088  if (AATags)
2089    PreheaderLoad->setAAMetadata(AATags);
2090  SSA.AddAvailableValue(Preheader, PreheaderLoad);
2091
2092  if (MSSAU) {
2093    MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2094        PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2095    MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2096    MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2097  }
2098
2099  if (MSSAU && VerifyMemorySSA)
2100    MSSAU->getMemorySSA()->verifyMemorySSA();
2101  // Rewrite all the loads in the loop and remember all the definitions from
2102  // stores in the loop.
2103  Promoter.run(LoopUses);
2104
2105  if (MSSAU && VerifyMemorySSA)
2106    MSSAU->getMemorySSA()->verifyMemorySSA();
2107  // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2108  if (PreheaderLoad->use_empty())
2109    eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2110
2111  return true;
2112}
2113
2114/// Returns an owning pointer to an alias set which incorporates aliasing info
2115/// from L and all subloops of L.
2116std::unique_ptr<AliasSetTracker>
2117LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2118                                                 AAResults *AA) {
2119  auto CurAST = std::make_unique<AliasSetTracker>(*AA);
2120
2121  // Add everything from all the sub loops.
2122  for (Loop *InnerL : L->getSubLoops())
2123    for (BasicBlock *BB : InnerL->blocks())
2124      CurAST->add(*BB);
2125
2126  // And merge in this loop (without anything from inner loops).
2127  for (BasicBlock *BB : L->blocks())
2128    if (LI->getLoopFor(BB) == L)
2129      CurAST->add(*BB);
2130
2131  return CurAST;
2132}
2133
2134std::unique_ptr<AliasSetTracker>
2135LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA(
2136    Loop *L, AAResults *AA, MemorySSAUpdater *MSSAU) {
2137  auto *MSSA = MSSAU->getMemorySSA();
2138  auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L);
2139  CurAST->addAllInstructionsInLoopUsingMSSA();
2140  return CurAST;
2141}
2142
2143static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2144                                     AliasSetTracker *CurAST, Loop *CurLoop,
2145                                     AAResults *AA) {
2146  // First check to see if any of the basic blocks in CurLoop invalidate *V.
2147  bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2148
2149  if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2150    return isInvalidatedAccordingToAST;
2151
2152  // Check with a diagnostic analysis if we can refine the information above.
2153  // This is to identify the limitations of using the AST.
2154  // The alias set mechanism used by LICM has a major weakness in that it
2155  // combines all things which may alias into a single set *before* asking
2156  // modref questions. As a result, a single readonly call within a loop will
2157  // collapse all loads and stores into a single alias set and report
2158  // invalidation if the loop contains any store. For example, readonly calls
2159  // with deopt states have this form and create a general alias set with all
2160  // loads and stores.  In order to get any LICM in loops containing possible
2161  // deopt states we need a more precise invalidation of checking the mod ref
2162  // info of each instruction within the loop and LI. This has a complexity of
2163  // O(N^2), so currently, it is used only as a diagnostic tool since the
2164  // default value of LICMN2Threshold is zero.
2165
2166  // Don't look at nested loops.
2167  if (CurLoop->begin() != CurLoop->end())
2168    return true;
2169
2170  int N = 0;
2171  for (BasicBlock *BB : CurLoop->getBlocks())
2172    for (Instruction &I : *BB) {
2173      if (N >= LICMN2Theshold) {
2174        LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "
2175                          << *(MemLoc.Ptr) << "\n");
2176        return true;
2177      }
2178      N++;
2179      auto Res = AA->getModRefInfo(&I, MemLoc);
2180      if (isModSet(Res)) {
2181        LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "
2182                          << *(MemLoc.Ptr) << "\n");
2183        return true;
2184      }
2185    }
2186  LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n");
2187  return false;
2188}
2189
2190static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2191                                             Loop *CurLoop,
2192                                             SinkAndHoistLICMFlags &Flags) {
2193  // For hoisting, use the walker to determine safety
2194  if (!Flags.IsSink) {
2195    MemoryAccess *Source;
2196    // See declaration of SetLicmMssaOptCap for usage details.
2197    if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap)
2198      Source = MU->getDefiningAccess();
2199    else {
2200      Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2201      Flags.LicmMssaOptCounter++;
2202    }
2203    return !MSSA->isLiveOnEntryDef(Source) &&
2204           CurLoop->contains(Source->getBlock());
2205  }
2206
2207  // For sinking, we'd need to check all Defs below this use. The getClobbering
2208  // call will look on the backedge of the loop, but will check aliasing with
2209  // the instructions on the previous iteration.
2210  // For example:
2211  // for (i ... )
2212  //   load a[i] ( Use (LoE)
2213  //   store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2214  //   i++;
2215  // The load sees no clobbering inside the loop, as the backedge alias check
2216  // does phi translation, and will check aliasing against store a[i-1].
2217  // However sinking the load outside the loop, below the store is incorrect.
2218
2219  // For now, only sink if there are no Defs in the loop, and the existing ones
2220  // precede the use and are in the same block.
2221  // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2222  // needs PostDominatorTreeAnalysis.
2223  // FIXME: More precise: no Defs that alias this Use.
2224  if (Flags.NoOfMemAccTooLarge)
2225    return true;
2226  for (auto *BB : CurLoop->getBlocks())
2227    if (auto *Accesses = MSSA->getBlockDefs(BB))
2228      for (const auto &MA : *Accesses)
2229        if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2230          if (MU->getBlock() != MD->getBlock() ||
2231              !MSSA->locallyDominates(MD, MU))
2232            return true;
2233  return false;
2234}
2235
2236/// Little predicate that returns true if the specified basic block is in
2237/// a subloop of the current one, not the current one itself.
2238///
2239static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2240  assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2241  return LI->getLoopFor(BB) != CurLoop;
2242}
2243