1//===-- LoopPredication.cpp - Guard based loop predication pass -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The LoopPredication pass tries to convert loop variant range checks to loop
10// invariant by widening checks across loop iterations. For example, it will
11// convert
12//
13//   for (i = 0; i < n; i++) {
14//     guard(i < len);
15//     ...
16//   }
17//
18// to
19//
20//   for (i = 0; i < n; i++) {
21//     guard(n - 1 < len);
22//     ...
23//   }
24//
25// After this transformation the condition of the guard is loop invariant, so
26// loop-unswitch can later unswitch the loop by this condition which basically
27// predicates the loop by the widened condition:
28//
29//   if (n - 1 < len)
30//     for (i = 0; i < n; i++) {
31//       ...
32//     }
33//   else
34//     deoptimize
35//
36// It's tempting to rely on SCEV here, but it has proven to be problematic.
37// Generally the facts SCEV provides about the increment step of add
38// recurrences are true if the backedge of the loop is taken, which implicitly
39// assumes that the guard doesn't fail. Using these facts to optimize the
40// guard results in a circular logic where the guard is optimized under the
41// assumption that it never fails.
42//
43// For example, in the loop below the induction variable will be marked as nuw
44// basing on the guard. Basing on nuw the guard predicate will be considered
45// monotonic. Given a monotonic condition it's tempting to replace the induction
46// variable in the condition with its value on the last iteration. But this
47// transformation is not correct, e.g. e = 4, b = 5 breaks the loop.
48//
49//   for (int i = b; i != e; i++)
50//     guard(i u< len)
51//
52// One of the ways to reason about this problem is to use an inductive proof
53// approach. Given the loop:
54//
55//   if (B(0)) {
56//     do {
57//       I = PHI(0, I.INC)
58//       I.INC = I + Step
59//       guard(G(I));
60//     } while (B(I));
61//   }
62//
63// where B(x) and G(x) are predicates that map integers to booleans, we want a
64// loop invariant expression M such the following program has the same semantics
65// as the above:
66//
67//   if (B(0)) {
68//     do {
69//       I = PHI(0, I.INC)
70//       I.INC = I + Step
71//       guard(G(0) && M);
72//     } while (B(I));
73//   }
74//
75// One solution for M is M = forall X . (G(X) && B(X)) => G(X + Step)
76//
77// Informal proof that the transformation above is correct:
78//
79//   By the definition of guards we can rewrite the guard condition to:
80//     G(I) && G(0) && M
81//
82//   Let's prove that for each iteration of the loop:
83//     G(0) && M => G(I)
84//   And the condition above can be simplified to G(Start) && M.
85//
86//   Induction base.
87//     G(0) && M => G(0)
88//
89//   Induction step. Assuming G(0) && M => G(I) on the subsequent
90//   iteration:
91//
92//     B(I) is true because it's the backedge condition.
93//     G(I) is true because the backedge is guarded by this condition.
94//
95//   So M = forall X . (G(X) && B(X)) => G(X + Step) implies G(I + Step).
96//
97// Note that we can use anything stronger than M, i.e. any condition which
98// implies M.
99//
100// When S = 1 (i.e. forward iterating loop), the transformation is supported
101// when:
102//   * The loop has a single latch with the condition of the form:
103//     B(X) = latchStart + X <pred> latchLimit,
104//     where <pred> is u<, u<=, s<, or s<=.
105//   * The guard condition is of the form
106//     G(X) = guardStart + X u< guardLimit
107//
108//   For the ult latch comparison case M is:
109//     forall X . guardStart + X u< guardLimit && latchStart + X <u latchLimit =>
110//        guardStart + X + 1 u< guardLimit
111//
112//   The only way the antecedent can be true and the consequent can be false is
113//   if
114//     X == guardLimit - 1 - guardStart
115//   (and guardLimit is non-zero, but we won't use this latter fact).
116//   If X == guardLimit - 1 - guardStart then the second half of the antecedent is
117//     latchStart + guardLimit - 1 - guardStart u< latchLimit
118//   and its negation is
119//     latchStart + guardLimit - 1 - guardStart u>= latchLimit
120//
121//   In other words, if
122//     latchLimit u<= latchStart + guardLimit - 1 - guardStart
123//   then:
124//   (the ranges below are written in ConstantRange notation, where [A, B) is the
125//   set for (I = A; I != B; I++ /*maywrap*/) yield(I);)
126//
127//      forall X . guardStart + X u< guardLimit &&
128//                 latchStart + X u< latchLimit =>
129//        guardStart + X + 1 u< guardLimit
130//   == forall X . guardStart + X u< guardLimit &&
131//                 latchStart + X u< latchStart + guardLimit - 1 - guardStart =>
132//        guardStart + X + 1 u< guardLimit
133//   == forall X . (guardStart + X) in [0, guardLimit) &&
134//                 (latchStart + X) in [0, latchStart + guardLimit - 1 - guardStart) =>
135//        (guardStart + X + 1) in [0, guardLimit)
136//   == forall X . X in [-guardStart, guardLimit - guardStart) &&
137//                 X in [-latchStart, guardLimit - 1 - guardStart) =>
138//         X in [-guardStart - 1, guardLimit - guardStart - 1)
139//   == true
140//
141//   So the widened condition is:
142//     guardStart u< guardLimit &&
143//     latchStart + guardLimit - 1 - guardStart u>= latchLimit
144//   Similarly for ule condition the widened condition is:
145//     guardStart u< guardLimit &&
146//     latchStart + guardLimit - 1 - guardStart u> latchLimit
147//   For slt condition the widened condition is:
148//     guardStart u< guardLimit &&
149//     latchStart + guardLimit - 1 - guardStart s>= latchLimit
150//   For sle condition the widened condition is:
151//     guardStart u< guardLimit &&
152//     latchStart + guardLimit - 1 - guardStart s> latchLimit
153//
154// When S = -1 (i.e. reverse iterating loop), the transformation is supported
155// when:
156//   * The loop has a single latch with the condition of the form:
157//     B(X) = X <pred> latchLimit, where <pred> is u>, u>=, s>, or s>=.
158//   * The guard condition is of the form
159//     G(X) = X - 1 u< guardLimit
160//
161//   For the ugt latch comparison case M is:
162//     forall X. X-1 u< guardLimit and X u> latchLimit => X-2 u< guardLimit
163//
164//   The only way the antecedent can be true and the consequent can be false is if
165//     X == 1.
166//   If X == 1 then the second half of the antecedent is
167//     1 u> latchLimit, and its negation is latchLimit u>= 1.
168//
169//   So the widened condition is:
170//     guardStart u< guardLimit && latchLimit u>= 1.
171//   Similarly for sgt condition the widened condition is:
172//     guardStart u< guardLimit && latchLimit s>= 1.
173//   For uge condition the widened condition is:
174//     guardStart u< guardLimit && latchLimit u> 1.
175//   For sge condition the widened condition is:
176//     guardStart u< guardLimit && latchLimit s> 1.
177//===----------------------------------------------------------------------===//
178
179#include "llvm/Transforms/Scalar/LoopPredication.h"
180#include "llvm/ADT/Statistic.h"
181#include "llvm/Analysis/AliasAnalysis.h"
182#include "llvm/Analysis/BranchProbabilityInfo.h"
183#include "llvm/Analysis/GuardUtils.h"
184#include "llvm/Analysis/LoopInfo.h"
185#include "llvm/Analysis/LoopPass.h"
186#include "llvm/Analysis/ScalarEvolution.h"
187#include "llvm/Analysis/ScalarEvolutionExpressions.h"
188#include "llvm/IR/Function.h"
189#include "llvm/IR/GlobalValue.h"
190#include "llvm/IR/IntrinsicInst.h"
191#include "llvm/IR/Module.h"
192#include "llvm/IR/PatternMatch.h"
193#include "llvm/InitializePasses.h"
194#include "llvm/Pass.h"
195#include "llvm/Support/CommandLine.h"
196#include "llvm/Support/Debug.h"
197#include "llvm/Transforms/Scalar.h"
198#include "llvm/Transforms/Utils/GuardUtils.h"
199#include "llvm/Transforms/Utils/Local.h"
200#include "llvm/Transforms/Utils/LoopUtils.h"
201#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
202
203#define DEBUG_TYPE "loop-predication"
204
205STATISTIC(TotalConsidered, "Number of guards considered");
206STATISTIC(TotalWidened, "Number of checks widened");
207
208using namespace llvm;
209
210static cl::opt<bool> EnableIVTruncation("loop-predication-enable-iv-truncation",
211                                        cl::Hidden, cl::init(true));
212
213static cl::opt<bool> EnableCountDownLoop("loop-predication-enable-count-down-loop",
214                                        cl::Hidden, cl::init(true));
215
216static cl::opt<bool>
217    SkipProfitabilityChecks("loop-predication-skip-profitability-checks",
218                            cl::Hidden, cl::init(false));
219
220// This is the scale factor for the latch probability. We use this during
221// profitability analysis to find other exiting blocks that have a much higher
222// probability of exiting the loop instead of loop exiting via latch.
223// This value should be greater than 1 for a sane profitability check.
224static cl::opt<float> LatchExitProbabilityScale(
225    "loop-predication-latch-probability-scale", cl::Hidden, cl::init(2.0),
226    cl::desc("scale factor for the latch probability. Value should be greater "
227             "than 1. Lower values are ignored"));
228
229static cl::opt<bool> PredicateWidenableBranchGuards(
230    "loop-predication-predicate-widenable-branches-to-deopt", cl::Hidden,
231    cl::desc("Whether or not we should predicate guards "
232             "expressed as widenable branches to deoptimize blocks"),
233    cl::init(true));
234
235namespace {
236/// Represents an induction variable check:
237///   icmp Pred, <induction variable>, <loop invariant limit>
238struct LoopICmp {
239  ICmpInst::Predicate Pred;
240  const SCEVAddRecExpr *IV;
241  const SCEV *Limit;
242  LoopICmp(ICmpInst::Predicate Pred, const SCEVAddRecExpr *IV,
243           const SCEV *Limit)
244    : Pred(Pred), IV(IV), Limit(Limit) {}
245  LoopICmp() {}
246  void dump() {
247    dbgs() << "LoopICmp Pred = " << Pred << ", IV = " << *IV
248           << ", Limit = " << *Limit << "\n";
249  }
250};
251
252class LoopPredication {
253  AliasAnalysis *AA;
254  DominatorTree *DT;
255  ScalarEvolution *SE;
256  LoopInfo *LI;
257  BranchProbabilityInfo *BPI;
258
259  Loop *L;
260  const DataLayout *DL;
261  BasicBlock *Preheader;
262  LoopICmp LatchCheck;
263
264  bool isSupportedStep(const SCEV* Step);
265  Optional<LoopICmp> parseLoopICmp(ICmpInst *ICI);
266  Optional<LoopICmp> parseLoopLatchICmp();
267
268  /// Return an insertion point suitable for inserting a safe to speculate
269  /// instruction whose only user will be 'User' which has operands 'Ops'.  A
270  /// trivial result would be the at the User itself, but we try to return a
271  /// loop invariant location if possible.
272  Instruction *findInsertPt(Instruction *User, ArrayRef<Value*> Ops);
273  /// Same as above, *except* that this uses the SCEV definition of invariant
274  /// which is that an expression *can be made* invariant via SCEVExpander.
275  /// Thus, this version is only suitable for finding an insert point to be be
276  /// passed to SCEVExpander!
277  Instruction *findInsertPt(Instruction *User, ArrayRef<const SCEV*> Ops);
278
279  /// Return true if the value is known to produce a single fixed value across
280  /// all iterations on which it executes.  Note that this does not imply
281  /// speculation safety.  That must be established separately.
282  bool isLoopInvariantValue(const SCEV* S);
283
284  Value *expandCheck(SCEVExpander &Expander, Instruction *Guard,
285                     ICmpInst::Predicate Pred, const SCEV *LHS,
286                     const SCEV *RHS);
287
288  Optional<Value *> widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander,
289                                        Instruction *Guard);
290  Optional<Value *> widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck,
291                                                        LoopICmp RangeCheck,
292                                                        SCEVExpander &Expander,
293                                                        Instruction *Guard);
294  Optional<Value *> widenICmpRangeCheckDecrementingLoop(LoopICmp LatchCheck,
295                                                        LoopICmp RangeCheck,
296                                                        SCEVExpander &Expander,
297                                                        Instruction *Guard);
298  unsigned collectChecks(SmallVectorImpl<Value *> &Checks, Value *Condition,
299                         SCEVExpander &Expander, Instruction *Guard);
300  bool widenGuardConditions(IntrinsicInst *II, SCEVExpander &Expander);
301  bool widenWidenableBranchGuardConditions(BranchInst *Guard, SCEVExpander &Expander);
302  // If the loop always exits through another block in the loop, we should not
303  // predicate based on the latch check. For example, the latch check can be a
304  // very coarse grained check and there can be more fine grained exit checks
305  // within the loop. We identify such unprofitable loops through BPI.
306  bool isLoopProfitableToPredicate();
307
308  bool predicateLoopExits(Loop *L, SCEVExpander &Rewriter);
309
310public:
311  LoopPredication(AliasAnalysis *AA, DominatorTree *DT,
312                  ScalarEvolution *SE, LoopInfo *LI,
313                  BranchProbabilityInfo *BPI)
314    : AA(AA), DT(DT), SE(SE), LI(LI), BPI(BPI) {};
315  bool runOnLoop(Loop *L);
316};
317
318class LoopPredicationLegacyPass : public LoopPass {
319public:
320  static char ID;
321  LoopPredicationLegacyPass() : LoopPass(ID) {
322    initializeLoopPredicationLegacyPassPass(*PassRegistry::getPassRegistry());
323  }
324
325  void getAnalysisUsage(AnalysisUsage &AU) const override {
326    AU.addRequired<BranchProbabilityInfoWrapperPass>();
327    getLoopAnalysisUsage(AU);
328  }
329
330  bool runOnLoop(Loop *L, LPPassManager &LPM) override {
331    if (skipLoop(L))
332      return false;
333    auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
334    auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
335    auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
336    BranchProbabilityInfo &BPI =
337        getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
338    auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
339    LoopPredication LP(AA, DT, SE, LI, &BPI);
340    return LP.runOnLoop(L);
341  }
342};
343
344char LoopPredicationLegacyPass::ID = 0;
345} // end namespace
346
347INITIALIZE_PASS_BEGIN(LoopPredicationLegacyPass, "loop-predication",
348                      "Loop predication", false, false)
349INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass)
350INITIALIZE_PASS_DEPENDENCY(LoopPass)
351INITIALIZE_PASS_END(LoopPredicationLegacyPass, "loop-predication",
352                    "Loop predication", false, false)
353
354Pass *llvm::createLoopPredicationPass() {
355  return new LoopPredicationLegacyPass();
356}
357
358PreservedAnalyses LoopPredicationPass::run(Loop &L, LoopAnalysisManager &AM,
359                                           LoopStandardAnalysisResults &AR,
360                                           LPMUpdater &U) {
361  Function *F = L.getHeader()->getParent();
362  // For the new PM, we also can't use BranchProbabilityInfo as an analysis
363  // pass. Function analyses need to be preserved across loop transformations
364  // but BPI is not preserved, hence a newly built one is needed.
365  BranchProbabilityInfo BPI(*F, AR.LI, &AR.TLI);
366  LoopPredication LP(&AR.AA, &AR.DT, &AR.SE, &AR.LI, &BPI);
367  if (!LP.runOnLoop(&L))
368    return PreservedAnalyses::all();
369
370  return getLoopPassPreservedAnalyses();
371}
372
373Optional<LoopICmp>
374LoopPredication::parseLoopICmp(ICmpInst *ICI) {
375  auto Pred = ICI->getPredicate();
376  auto *LHS = ICI->getOperand(0);
377  auto *RHS = ICI->getOperand(1);
378
379  const SCEV *LHSS = SE->getSCEV(LHS);
380  if (isa<SCEVCouldNotCompute>(LHSS))
381    return None;
382  const SCEV *RHSS = SE->getSCEV(RHS);
383  if (isa<SCEVCouldNotCompute>(RHSS))
384    return None;
385
386  // Canonicalize RHS to be loop invariant bound, LHS - a loop computable IV
387  if (SE->isLoopInvariant(LHSS, L)) {
388    std::swap(LHS, RHS);
389    std::swap(LHSS, RHSS);
390    Pred = ICmpInst::getSwappedPredicate(Pred);
391  }
392
393  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHSS);
394  if (!AR || AR->getLoop() != L)
395    return None;
396
397  return LoopICmp(Pred, AR, RHSS);
398}
399
400Value *LoopPredication::expandCheck(SCEVExpander &Expander,
401                                    Instruction *Guard,
402                                    ICmpInst::Predicate Pred, const SCEV *LHS,
403                                    const SCEV *RHS) {
404  Type *Ty = LHS->getType();
405  assert(Ty == RHS->getType() && "expandCheck operands have different types?");
406
407  if (SE->isLoopInvariant(LHS, L) && SE->isLoopInvariant(RHS, L)) {
408    IRBuilder<> Builder(Guard);
409    if (SE->isLoopEntryGuardedByCond(L, Pred, LHS, RHS))
410      return Builder.getTrue();
411    if (SE->isLoopEntryGuardedByCond(L, ICmpInst::getInversePredicate(Pred),
412                                     LHS, RHS))
413      return Builder.getFalse();
414  }
415
416  Value *LHSV = Expander.expandCodeFor(LHS, Ty, findInsertPt(Guard, {LHS}));
417  Value *RHSV = Expander.expandCodeFor(RHS, Ty, findInsertPt(Guard, {RHS}));
418  IRBuilder<> Builder(findInsertPt(Guard, {LHSV, RHSV}));
419  return Builder.CreateICmp(Pred, LHSV, RHSV);
420}
421
422
423// Returns true if its safe to truncate the IV to RangeCheckType.
424// When the IV type is wider than the range operand type, we can still do loop
425// predication, by generating SCEVs for the range and latch that are of the
426// same type. We achieve this by generating a SCEV truncate expression for the
427// latch IV. This is done iff truncation of the IV is a safe operation,
428// without loss of information.
429// Another way to achieve this is by generating a wider type SCEV for the
430// range check operand, however, this needs a more involved check that
431// operands do not overflow. This can lead to loss of information when the
432// range operand is of the form: add i32 %offset, %iv. We need to prove that
433// sext(x + y) is same as sext(x) + sext(y).
434// This function returns true if we can safely represent the IV type in
435// the RangeCheckType without loss of information.
436static bool isSafeToTruncateWideIVType(const DataLayout &DL,
437                                       ScalarEvolution &SE,
438                                       const LoopICmp LatchCheck,
439                                       Type *RangeCheckType) {
440  if (!EnableIVTruncation)
441    return false;
442  assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()) >
443             DL.getTypeSizeInBits(RangeCheckType) &&
444         "Expected latch check IV type to be larger than range check operand "
445         "type!");
446  // The start and end values of the IV should be known. This is to guarantee
447  // that truncating the wide type will not lose information.
448  auto *Limit = dyn_cast<SCEVConstant>(LatchCheck.Limit);
449  auto *Start = dyn_cast<SCEVConstant>(LatchCheck.IV->getStart());
450  if (!Limit || !Start)
451    return false;
452  // This check makes sure that the IV does not change sign during loop
453  // iterations. Consider latchType = i64, LatchStart = 5, Pred = ICMP_SGE,
454  // LatchEnd = 2, rangeCheckType = i32. If it's not a monotonic predicate, the
455  // IV wraps around, and the truncation of the IV would lose the range of
456  // iterations between 2^32 and 2^64.
457  bool Increasing;
458  if (!SE.isMonotonicPredicate(LatchCheck.IV, LatchCheck.Pred, Increasing))
459    return false;
460  // The active bits should be less than the bits in the RangeCheckType. This
461  // guarantees that truncating the latch check to RangeCheckType is a safe
462  // operation.
463  auto RangeCheckTypeBitSize = DL.getTypeSizeInBits(RangeCheckType);
464  return Start->getAPInt().getActiveBits() < RangeCheckTypeBitSize &&
465         Limit->getAPInt().getActiveBits() < RangeCheckTypeBitSize;
466}
467
468
469// Return an LoopICmp describing a latch check equivlent to LatchCheck but with
470// the requested type if safe to do so.  May involve the use of a new IV.
471static Optional<LoopICmp> generateLoopLatchCheck(const DataLayout &DL,
472                                                 ScalarEvolution &SE,
473                                                 const LoopICmp LatchCheck,
474                                                 Type *RangeCheckType) {
475
476  auto *LatchType = LatchCheck.IV->getType();
477  if (RangeCheckType == LatchType)
478    return LatchCheck;
479  // For now, bail out if latch type is narrower than range type.
480  if (DL.getTypeSizeInBits(LatchType) < DL.getTypeSizeInBits(RangeCheckType))
481    return None;
482  if (!isSafeToTruncateWideIVType(DL, SE, LatchCheck, RangeCheckType))
483    return None;
484  // We can now safely identify the truncated version of the IV and limit for
485  // RangeCheckType.
486  LoopICmp NewLatchCheck;
487  NewLatchCheck.Pred = LatchCheck.Pred;
488  NewLatchCheck.IV = dyn_cast<SCEVAddRecExpr>(
489      SE.getTruncateExpr(LatchCheck.IV, RangeCheckType));
490  if (!NewLatchCheck.IV)
491    return None;
492  NewLatchCheck.Limit = SE.getTruncateExpr(LatchCheck.Limit, RangeCheckType);
493  LLVM_DEBUG(dbgs() << "IV of type: " << *LatchType
494                    << "can be represented as range check type:"
495                    << *RangeCheckType << "\n");
496  LLVM_DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n");
497  LLVM_DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n");
498  return NewLatchCheck;
499}
500
501bool LoopPredication::isSupportedStep(const SCEV* Step) {
502  return Step->isOne() || (Step->isAllOnesValue() && EnableCountDownLoop);
503}
504
505Instruction *LoopPredication::findInsertPt(Instruction *Use,
506                                           ArrayRef<Value*> Ops) {
507  for (Value *Op : Ops)
508    if (!L->isLoopInvariant(Op))
509      return Use;
510  return Preheader->getTerminator();
511}
512
513Instruction *LoopPredication::findInsertPt(Instruction *Use,
514                                           ArrayRef<const SCEV*> Ops) {
515  // Subtlety: SCEV considers things to be invariant if the value produced is
516  // the same across iterations.  This is not the same as being able to
517  // evaluate outside the loop, which is what we actually need here.
518  for (const SCEV *Op : Ops)
519    if (!SE->isLoopInvariant(Op, L) ||
520        !isSafeToExpandAt(Op, Preheader->getTerminator(), *SE))
521      return Use;
522  return Preheader->getTerminator();
523}
524
525bool LoopPredication::isLoopInvariantValue(const SCEV* S) {
526  // Handling expressions which produce invariant results, but *haven't* yet
527  // been removed from the loop serves two important purposes.
528  // 1) Most importantly, it resolves a pass ordering cycle which would
529  // otherwise need us to iteration licm, loop-predication, and either
530  // loop-unswitch or loop-peeling to make progress on examples with lots of
531  // predicable range checks in a row.  (Since, in the general case,  we can't
532  // hoist the length checks until the dominating checks have been discharged
533  // as we can't prove doing so is safe.)
534  // 2) As a nice side effect, this exposes the value of peeling or unswitching
535  // much more obviously in the IR.  Otherwise, the cost modeling for other
536  // transforms would end up needing to duplicate all of this logic to model a
537  // check which becomes predictable based on a modeled peel or unswitch.
538  //
539  // The cost of doing so in the worst case is an extra fill from the stack  in
540  // the loop to materialize the loop invariant test value instead of checking
541  // against the original IV which is presumable in a register inside the loop.
542  // Such cases are presumably rare, and hint at missing oppurtunities for
543  // other passes.
544
545  if (SE->isLoopInvariant(S, L))
546    // Note: This the SCEV variant, so the original Value* may be within the
547    // loop even though SCEV has proven it is loop invariant.
548    return true;
549
550  // Handle a particular important case which SCEV doesn't yet know about which
551  // shows up in range checks on arrays with immutable lengths.
552  // TODO: This should be sunk inside SCEV.
553  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
554    if (const auto *LI = dyn_cast<LoadInst>(U->getValue()))
555      if (LI->isUnordered() && L->hasLoopInvariantOperands(LI))
556        if (AA->pointsToConstantMemory(LI->getOperand(0)) ||
557            LI->hasMetadata(LLVMContext::MD_invariant_load))
558          return true;
559  return false;
560}
561
562Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
563    LoopICmp LatchCheck, LoopICmp RangeCheck,
564    SCEVExpander &Expander, Instruction *Guard) {
565  auto *Ty = RangeCheck.IV->getType();
566  // Generate the widened condition for the forward loop:
567  //   guardStart u< guardLimit &&
568  //   latchLimit <pred> guardLimit - 1 - guardStart + latchStart
569  // where <pred> depends on the latch condition predicate. See the file
570  // header comment for the reasoning.
571  // guardLimit - guardStart + latchStart - 1
572  const SCEV *GuardStart = RangeCheck.IV->getStart();
573  const SCEV *GuardLimit = RangeCheck.Limit;
574  const SCEV *LatchStart = LatchCheck.IV->getStart();
575  const SCEV *LatchLimit = LatchCheck.Limit;
576  // Subtlety: We need all the values to be *invariant* across all iterations,
577  // but we only need to check expansion safety for those which *aren't*
578  // already guaranteed to dominate the guard.
579  if (!isLoopInvariantValue(GuardStart) ||
580      !isLoopInvariantValue(GuardLimit) ||
581      !isLoopInvariantValue(LatchStart) ||
582      !isLoopInvariantValue(LatchLimit)) {
583    LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
584    return None;
585  }
586  if (!isSafeToExpandAt(LatchStart, Guard, *SE) ||
587      !isSafeToExpandAt(LatchLimit, Guard, *SE)) {
588    LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
589    return None;
590  }
591
592  // guardLimit - guardStart + latchStart - 1
593  const SCEV *RHS =
594      SE->getAddExpr(SE->getMinusSCEV(GuardLimit, GuardStart),
595                     SE->getMinusSCEV(LatchStart, SE->getOne(Ty)));
596  auto LimitCheckPred =
597      ICmpInst::getFlippedStrictnessPredicate(LatchCheck.Pred);
598
599  LLVM_DEBUG(dbgs() << "LHS: " << *LatchLimit << "\n");
600  LLVM_DEBUG(dbgs() << "RHS: " << *RHS << "\n");
601  LLVM_DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n");
602
603  auto *LimitCheck =
604      expandCheck(Expander, Guard, LimitCheckPred, LatchLimit, RHS);
605  auto *FirstIterationCheck = expandCheck(Expander, Guard, RangeCheck.Pred,
606                                          GuardStart, GuardLimit);
607  IRBuilder<> Builder(findInsertPt(Guard, {FirstIterationCheck, LimitCheck}));
608  return Builder.CreateAnd(FirstIterationCheck, LimitCheck);
609}
610
611Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop(
612    LoopICmp LatchCheck, LoopICmp RangeCheck,
613    SCEVExpander &Expander, Instruction *Guard) {
614  auto *Ty = RangeCheck.IV->getType();
615  const SCEV *GuardStart = RangeCheck.IV->getStart();
616  const SCEV *GuardLimit = RangeCheck.Limit;
617  const SCEV *LatchStart = LatchCheck.IV->getStart();
618  const SCEV *LatchLimit = LatchCheck.Limit;
619  // Subtlety: We need all the values to be *invariant* across all iterations,
620  // but we only need to check expansion safety for those which *aren't*
621  // already guaranteed to dominate the guard.
622  if (!isLoopInvariantValue(GuardStart) ||
623      !isLoopInvariantValue(GuardLimit) ||
624      !isLoopInvariantValue(LatchStart) ||
625      !isLoopInvariantValue(LatchLimit)) {
626    LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
627    return None;
628  }
629  if (!isSafeToExpandAt(LatchStart, Guard, *SE) ||
630      !isSafeToExpandAt(LatchLimit, Guard, *SE)) {
631    LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
632    return None;
633  }
634  // The decrement of the latch check IV should be the same as the
635  // rangeCheckIV.
636  auto *PostDecLatchCheckIV = LatchCheck.IV->getPostIncExpr(*SE);
637  if (RangeCheck.IV != PostDecLatchCheckIV) {
638    LLVM_DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: "
639                      << *PostDecLatchCheckIV
640                      << "  and RangeCheckIV: " << *RangeCheck.IV << "\n");
641    return None;
642  }
643
644  // Generate the widened condition for CountDownLoop:
645  // guardStart u< guardLimit &&
646  // latchLimit <pred> 1.
647  // See the header comment for reasoning of the checks.
648  auto LimitCheckPred =
649      ICmpInst::getFlippedStrictnessPredicate(LatchCheck.Pred);
650  auto *FirstIterationCheck = expandCheck(Expander, Guard,
651                                          ICmpInst::ICMP_ULT,
652                                          GuardStart, GuardLimit);
653  auto *LimitCheck = expandCheck(Expander, Guard, LimitCheckPred, LatchLimit,
654                                 SE->getOne(Ty));
655  IRBuilder<> Builder(findInsertPt(Guard, {FirstIterationCheck, LimitCheck}));
656  return Builder.CreateAnd(FirstIterationCheck, LimitCheck);
657}
658
659static void normalizePredicate(ScalarEvolution *SE, Loop *L,
660                               LoopICmp& RC) {
661  // LFTR canonicalizes checks to the ICMP_NE/EQ form; normalize back to the
662  // ULT/UGE form for ease of handling by our caller.
663  if (ICmpInst::isEquality(RC.Pred) &&
664      RC.IV->getStepRecurrence(*SE)->isOne() &&
665      SE->isKnownPredicate(ICmpInst::ICMP_ULE, RC.IV->getStart(), RC.Limit))
666    RC.Pred = RC.Pred == ICmpInst::ICMP_NE ?
667      ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
668}
669
670
671/// If ICI can be widened to a loop invariant condition emits the loop
672/// invariant condition in the loop preheader and return it, otherwise
673/// returns None.
674Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
675                                                       SCEVExpander &Expander,
676                                                       Instruction *Guard) {
677  LLVM_DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
678  LLVM_DEBUG(ICI->dump());
679
680  // parseLoopStructure guarantees that the latch condition is:
681  //   ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=.
682  // We are looking for the range checks of the form:
683  //   i u< guardLimit
684  auto RangeCheck = parseLoopICmp(ICI);
685  if (!RangeCheck) {
686    LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
687    return None;
688  }
689  LLVM_DEBUG(dbgs() << "Guard check:\n");
690  LLVM_DEBUG(RangeCheck->dump());
691  if (RangeCheck->Pred != ICmpInst::ICMP_ULT) {
692    LLVM_DEBUG(dbgs() << "Unsupported range check predicate("
693                      << RangeCheck->Pred << ")!\n");
694    return None;
695  }
696  auto *RangeCheckIV = RangeCheck->IV;
697  if (!RangeCheckIV->isAffine()) {
698    LLVM_DEBUG(dbgs() << "Range check IV is not affine!\n");
699    return None;
700  }
701  auto *Step = RangeCheckIV->getStepRecurrence(*SE);
702  // We cannot just compare with latch IV step because the latch and range IVs
703  // may have different types.
704  if (!isSupportedStep(Step)) {
705    LLVM_DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
706    return None;
707  }
708  auto *Ty = RangeCheckIV->getType();
709  auto CurrLatchCheckOpt = generateLoopLatchCheck(*DL, *SE, LatchCheck, Ty);
710  if (!CurrLatchCheckOpt) {
711    LLVM_DEBUG(dbgs() << "Failed to generate a loop latch check "
712                         "corresponding to range type: "
713                      << *Ty << "\n");
714    return None;
715  }
716
717  LoopICmp CurrLatchCheck = *CurrLatchCheckOpt;
718  // At this point, the range and latch step should have the same type, but need
719  // not have the same value (we support both 1 and -1 steps).
720  assert(Step->getType() ==
721             CurrLatchCheck.IV->getStepRecurrence(*SE)->getType() &&
722         "Range and latch steps should be of same type!");
723  if (Step != CurrLatchCheck.IV->getStepRecurrence(*SE)) {
724    LLVM_DEBUG(dbgs() << "Range and latch have different step values!\n");
725    return None;
726  }
727
728  if (Step->isOne())
729    return widenICmpRangeCheckIncrementingLoop(CurrLatchCheck, *RangeCheck,
730                                               Expander, Guard);
731  else {
732    assert(Step->isAllOnesValue() && "Step should be -1!");
733    return widenICmpRangeCheckDecrementingLoop(CurrLatchCheck, *RangeCheck,
734                                               Expander, Guard);
735  }
736}
737
738unsigned LoopPredication::collectChecks(SmallVectorImpl<Value *> &Checks,
739                                        Value *Condition,
740                                        SCEVExpander &Expander,
741                                        Instruction *Guard) {
742  unsigned NumWidened = 0;
743  // The guard condition is expected to be in form of:
744  //   cond1 && cond2 && cond3 ...
745  // Iterate over subconditions looking for icmp conditions which can be
746  // widened across loop iterations. Widening these conditions remember the
747  // resulting list of subconditions in Checks vector.
748  SmallVector<Value *, 4> Worklist(1, Condition);
749  SmallPtrSet<Value *, 4> Visited;
750  Value *WideableCond = nullptr;
751  do {
752    Value *Condition = Worklist.pop_back_val();
753    if (!Visited.insert(Condition).second)
754      continue;
755
756    Value *LHS, *RHS;
757    using namespace llvm::PatternMatch;
758    if (match(Condition, m_And(m_Value(LHS), m_Value(RHS)))) {
759      Worklist.push_back(LHS);
760      Worklist.push_back(RHS);
761      continue;
762    }
763
764    if (match(Condition,
765              m_Intrinsic<Intrinsic::experimental_widenable_condition>())) {
766      // Pick any, we don't care which
767      WideableCond = Condition;
768      continue;
769    }
770
771    if (ICmpInst *ICI = dyn_cast<ICmpInst>(Condition)) {
772      if (auto NewRangeCheck = widenICmpRangeCheck(ICI, Expander,
773                                                   Guard)) {
774        Checks.push_back(NewRangeCheck.getValue());
775        NumWidened++;
776        continue;
777      }
778    }
779
780    // Save the condition as is if we can't widen it
781    Checks.push_back(Condition);
782  } while (!Worklist.empty());
783  // At the moment, our matching logic for wideable conditions implicitly
784  // assumes we preserve the form: (br (and Cond, WC())).  FIXME
785  // Note that if there were multiple calls to wideable condition in the
786  // traversal, we only need to keep one, and which one is arbitrary.
787  if (WideableCond)
788    Checks.push_back(WideableCond);
789  return NumWidened;
790}
791
792bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard,
793                                           SCEVExpander &Expander) {
794  LLVM_DEBUG(dbgs() << "Processing guard:\n");
795  LLVM_DEBUG(Guard->dump());
796
797  TotalConsidered++;
798  SmallVector<Value *, 4> Checks;
799  unsigned NumWidened = collectChecks(Checks, Guard->getOperand(0), Expander,
800                                      Guard);
801  if (NumWidened == 0)
802    return false;
803
804  TotalWidened += NumWidened;
805
806  // Emit the new guard condition
807  IRBuilder<> Builder(findInsertPt(Guard, Checks));
808  Value *AllChecks = Builder.CreateAnd(Checks);
809  auto *OldCond = Guard->getOperand(0);
810  Guard->setOperand(0, AllChecks);
811  RecursivelyDeleteTriviallyDeadInstructions(OldCond);
812
813  LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n");
814  return true;
815}
816
817bool LoopPredication::widenWidenableBranchGuardConditions(
818    BranchInst *BI, SCEVExpander &Expander) {
819  assert(isGuardAsWidenableBranch(BI) && "Must be!");
820  LLVM_DEBUG(dbgs() << "Processing guard:\n");
821  LLVM_DEBUG(BI->dump());
822
823  TotalConsidered++;
824  SmallVector<Value *, 4> Checks;
825  unsigned NumWidened = collectChecks(Checks, BI->getCondition(),
826                                      Expander, BI);
827  if (NumWidened == 0)
828    return false;
829
830  TotalWidened += NumWidened;
831
832  // Emit the new guard condition
833  IRBuilder<> Builder(findInsertPt(BI, Checks));
834  Value *AllChecks = Builder.CreateAnd(Checks);
835  auto *OldCond = BI->getCondition();
836  BI->setCondition(AllChecks);
837  RecursivelyDeleteTriviallyDeadInstructions(OldCond);
838  assert(isGuardAsWidenableBranch(BI) &&
839         "Stopped being a guard after transform?");
840
841  LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n");
842  return true;
843}
844
845Optional<LoopICmp> LoopPredication::parseLoopLatchICmp() {
846  using namespace PatternMatch;
847
848  BasicBlock *LoopLatch = L->getLoopLatch();
849  if (!LoopLatch) {
850    LLVM_DEBUG(dbgs() << "The loop doesn't have a single latch!\n");
851    return None;
852  }
853
854  auto *BI = dyn_cast<BranchInst>(LoopLatch->getTerminator());
855  if (!BI || !BI->isConditional()) {
856    LLVM_DEBUG(dbgs() << "Failed to match the latch terminator!\n");
857    return None;
858  }
859  BasicBlock *TrueDest = BI->getSuccessor(0);
860  assert(
861      (TrueDest == L->getHeader() || BI->getSuccessor(1) == L->getHeader()) &&
862      "One of the latch's destinations must be the header");
863
864  auto *ICI = dyn_cast<ICmpInst>(BI->getCondition());
865  if (!ICI) {
866    LLVM_DEBUG(dbgs() << "Failed to match the latch condition!\n");
867    return None;
868  }
869  auto Result = parseLoopICmp(ICI);
870  if (!Result) {
871    LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
872    return None;
873  }
874
875  if (TrueDest != L->getHeader())
876    Result->Pred = ICmpInst::getInversePredicate(Result->Pred);
877
878  // Check affine first, so if it's not we don't try to compute the step
879  // recurrence.
880  if (!Result->IV->isAffine()) {
881    LLVM_DEBUG(dbgs() << "The induction variable is not affine!\n");
882    return None;
883  }
884
885  auto *Step = Result->IV->getStepRecurrence(*SE);
886  if (!isSupportedStep(Step)) {
887    LLVM_DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n");
888    return None;
889  }
890
891  auto IsUnsupportedPredicate = [](const SCEV *Step, ICmpInst::Predicate Pred) {
892    if (Step->isOne()) {
893      return Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_SLT &&
894             Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_SLE;
895    } else {
896      assert(Step->isAllOnesValue() && "Step should be -1!");
897      return Pred != ICmpInst::ICMP_UGT && Pred != ICmpInst::ICMP_SGT &&
898             Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_SGE;
899    }
900  };
901
902  normalizePredicate(SE, L, *Result);
903  if (IsUnsupportedPredicate(Step, Result->Pred)) {
904    LLVM_DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred
905                      << ")!\n");
906    return None;
907  }
908
909  return Result;
910}
911
912
913bool LoopPredication::isLoopProfitableToPredicate() {
914  if (SkipProfitabilityChecks || !BPI)
915    return true;
916
917  SmallVector<std::pair<BasicBlock *, BasicBlock *>, 8> ExitEdges;
918  L->getExitEdges(ExitEdges);
919  // If there is only one exiting edge in the loop, it is always profitable to
920  // predicate the loop.
921  if (ExitEdges.size() == 1)
922    return true;
923
924  // Calculate the exiting probabilities of all exiting edges from the loop,
925  // starting with the LatchExitProbability.
926  // Heuristic for profitability: If any of the exiting blocks' probability of
927  // exiting the loop is larger than exiting through the latch block, it's not
928  // profitable to predicate the loop.
929  auto *LatchBlock = L->getLoopLatch();
930  assert(LatchBlock && "Should have a single latch at this point!");
931  auto *LatchTerm = LatchBlock->getTerminator();
932  assert(LatchTerm->getNumSuccessors() == 2 &&
933         "expected to be an exiting block with 2 succs!");
934  unsigned LatchBrExitIdx =
935      LatchTerm->getSuccessor(0) == L->getHeader() ? 1 : 0;
936  BranchProbability LatchExitProbability =
937      BPI->getEdgeProbability(LatchBlock, LatchBrExitIdx);
938
939  // Protect against degenerate inputs provided by the user. Providing a value
940  // less than one, can invert the definition of profitable loop predication.
941  float ScaleFactor = LatchExitProbabilityScale;
942  if (ScaleFactor < 1) {
943    LLVM_DEBUG(
944        dbgs()
945        << "Ignored user setting for loop-predication-latch-probability-scale: "
946        << LatchExitProbabilityScale << "\n");
947    LLVM_DEBUG(dbgs() << "The value is set to 1.0\n");
948    ScaleFactor = 1.0;
949  }
950  const auto LatchProbabilityThreshold =
951      LatchExitProbability * ScaleFactor;
952
953  for (const auto &ExitEdge : ExitEdges) {
954    BranchProbability ExitingBlockProbability =
955        BPI->getEdgeProbability(ExitEdge.first, ExitEdge.second);
956    // Some exiting edge has higher probability than the latch exiting edge.
957    // No longer profitable to predicate.
958    if (ExitingBlockProbability > LatchProbabilityThreshold)
959      return false;
960  }
961  // Using BPI, we have concluded that the most probable way to exit from the
962  // loop is through the latch (or there's no profile information and all
963  // exits are equally likely).
964  return true;
965}
966
967/// If we can (cheaply) find a widenable branch which controls entry into the
968/// loop, return it.
969static BranchInst *FindWidenableTerminatorAboveLoop(Loop *L, LoopInfo &LI) {
970  // Walk back through any unconditional executed blocks and see if we can find
971  // a widenable condition which seems to control execution of this loop.  Note
972  // that we predict that maythrow calls are likely untaken and thus that it's
973  // profitable to widen a branch before a maythrow call with a condition
974  // afterwards even though that may cause the slow path to run in a case where
975  // it wouldn't have otherwise.
976  BasicBlock *BB = L->getLoopPreheader();
977  if (!BB)
978    return nullptr;
979  do {
980    if (BasicBlock *Pred = BB->getSinglePredecessor())
981      if (BB == Pred->getSingleSuccessor()) {
982        BB = Pred;
983        continue;
984      }
985    break;
986  } while (true);
987
988  if (BasicBlock *Pred = BB->getSinglePredecessor()) {
989    auto *Term = Pred->getTerminator();
990
991    Value *Cond, *WC;
992    BasicBlock *IfTrueBB, *IfFalseBB;
993    if (parseWidenableBranch(Term, Cond, WC, IfTrueBB, IfFalseBB) &&
994        IfTrueBB == BB)
995      return cast<BranchInst>(Term);
996  }
997  return nullptr;
998}
999
1000/// Return the minimum of all analyzeable exit counts.  This is an upper bound
1001/// on the actual exit count.  If there are not at least two analyzeable exits,
1002/// returns SCEVCouldNotCompute.
1003static const SCEV *getMinAnalyzeableBackedgeTakenCount(ScalarEvolution &SE,
1004                                                       DominatorTree &DT,
1005                                                       Loop *L) {
1006  SmallVector<BasicBlock *, 16> ExitingBlocks;
1007  L->getExitingBlocks(ExitingBlocks);
1008
1009  SmallVector<const SCEV *, 4> ExitCounts;
1010  for (BasicBlock *ExitingBB : ExitingBlocks) {
1011    const SCEV *ExitCount = SE.getExitCount(L, ExitingBB);
1012    if (isa<SCEVCouldNotCompute>(ExitCount))
1013      continue;
1014    assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
1015           "We should only have known counts for exiting blocks that "
1016           "dominate latch!");
1017    ExitCounts.push_back(ExitCount);
1018  }
1019  if (ExitCounts.size() < 2)
1020    return SE.getCouldNotCompute();
1021  return SE.getUMinFromMismatchedTypes(ExitCounts);
1022}
1023
1024/// This implements an analogous, but entirely distinct transform from the main
1025/// loop predication transform.  This one is phrased in terms of using a
1026/// widenable branch *outside* the loop to allow us to simplify loop exits in a
1027/// following loop.  This is close in spirit to the IndVarSimplify transform
1028/// of the same name, but is materially different widening loosens legality
1029/// sharply.
1030bool LoopPredication::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) {
1031  // The transformation performed here aims to widen a widenable condition
1032  // above the loop such that all analyzeable exit leading to deopt are dead.
1033  // It assumes that the latch is the dominant exit for profitability and that
1034  // exits branching to deoptimizing blocks are rarely taken. It relies on the
1035  // semantics of widenable expressions for legality. (i.e. being able to fall
1036  // down the widenable path spuriously allows us to ignore exit order,
1037  // unanalyzeable exits, side effects, exceptional exits, and other challenges
1038  // which restrict the applicability of the non-WC based version of this
1039  // transform in IndVarSimplify.)
1040  //
1041  // NOTE ON POISON/UNDEF - We're hoisting an expression above guards which may
1042  // imply flags on the expression being hoisted and inserting new uses (flags
1043  // are only correct for current uses).  The result is that we may be
1044  // inserting a branch on the value which can be either poison or undef.  In
1045  // this case, the branch can legally go either way; we just need to avoid
1046  // introducing UB.  This is achieved through the use of the freeze
1047  // instruction.
1048
1049  SmallVector<BasicBlock *, 16> ExitingBlocks;
1050  L->getExitingBlocks(ExitingBlocks);
1051
1052  if (ExitingBlocks.empty())
1053    return false; // Nothing to do.
1054
1055  auto *Latch = L->getLoopLatch();
1056  if (!Latch)
1057    return false;
1058
1059  auto *WidenableBR = FindWidenableTerminatorAboveLoop(L, *LI);
1060  if (!WidenableBR)
1061    return false;
1062
1063  const SCEV *LatchEC = SE->getExitCount(L, Latch);
1064  if (isa<SCEVCouldNotCompute>(LatchEC))
1065    return false; // profitability - want hot exit in analyzeable set
1066
1067  // At this point, we have found an analyzeable latch, and a widenable
1068  // condition above the loop.  If we have a widenable exit within the loop
1069  // (for which we can't compute exit counts), drop the ability to further
1070  // widen so that we gain ability to analyze it's exit count and perform this
1071  // transform.  TODO: It'd be nice to know for sure the exit became
1072  // analyzeable after dropping widenability.
1073  {
1074    bool Invalidate = false;
1075
1076    for (auto *ExitingBB : ExitingBlocks) {
1077      if (LI->getLoopFor(ExitingBB) != L)
1078        continue;
1079
1080      auto *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
1081      if (!BI)
1082        continue;
1083
1084      Use *Cond, *WC;
1085      BasicBlock *IfTrueBB, *IfFalseBB;
1086      if (parseWidenableBranch(BI, Cond, WC, IfTrueBB, IfFalseBB) &&
1087          L->contains(IfTrueBB)) {
1088        WC->set(ConstantInt::getTrue(IfTrueBB->getContext()));
1089        Invalidate = true;
1090      }
1091    }
1092    if (Invalidate)
1093      SE->forgetLoop(L);
1094  }
1095
1096  // The use of umin(all analyzeable exits) instead of latch is subtle, but
1097  // important for profitability.  We may have a loop which hasn't been fully
1098  // canonicalized just yet.  If the exit we chose to widen is provably never
1099  // taken, we want the widened form to *also* be provably never taken.  We
1100  // can't guarantee this as a current unanalyzeable exit may later become
1101  // analyzeable, but we can at least avoid the obvious cases.
1102  const SCEV *MinEC = getMinAnalyzeableBackedgeTakenCount(*SE, *DT, L);
1103  if (isa<SCEVCouldNotCompute>(MinEC) || MinEC->getType()->isPointerTy() ||
1104      !SE->isLoopInvariant(MinEC, L) ||
1105      !isSafeToExpandAt(MinEC, WidenableBR, *SE))
1106    return false;
1107
1108  // Subtlety: We need to avoid inserting additional uses of the WC.  We know
1109  // that it can only have one transitive use at the moment, and thus moving
1110  // that use to just before the branch and inserting code before it and then
1111  // modifying the operand is legal.
1112  auto *IP = cast<Instruction>(WidenableBR->getCondition());
1113  IP->moveBefore(WidenableBR);
1114  Rewriter.setInsertPoint(IP);
1115  IRBuilder<> B(IP);
1116
1117  bool Changed = false;
1118  Value *MinECV = nullptr; // lazily generated if needed
1119  for (BasicBlock *ExitingBB : ExitingBlocks) {
1120    // If our exiting block exits multiple loops, we can only rewrite the
1121    // innermost one.  Otherwise, we're changing how many times the innermost
1122    // loop runs before it exits.
1123    if (LI->getLoopFor(ExitingBB) != L)
1124      continue;
1125
1126    // Can't rewrite non-branch yet.
1127    auto *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
1128    if (!BI)
1129      continue;
1130
1131    // If already constant, nothing to do.
1132    if (isa<Constant>(BI->getCondition()))
1133      continue;
1134
1135    const SCEV *ExitCount = SE->getExitCount(L, ExitingBB);
1136    if (isa<SCEVCouldNotCompute>(ExitCount) ||
1137        ExitCount->getType()->isPointerTy() ||
1138        !isSafeToExpandAt(ExitCount, WidenableBR, *SE))
1139      continue;
1140
1141    const bool ExitIfTrue = !L->contains(*succ_begin(ExitingBB));
1142    BasicBlock *ExitBB = BI->getSuccessor(ExitIfTrue ? 0 : 1);
1143    if (!ExitBB->getPostdominatingDeoptimizeCall())
1144      continue;
1145
1146    /// Here we can be fairly sure that executing this exit will most likely
1147    /// lead to executing llvm.experimental.deoptimize.
1148    /// This is a profitability heuristic, not a legality constraint.
1149
1150    // If we found a widenable exit condition, do two things:
1151    // 1) fold the widened exit test into the widenable condition
1152    // 2) fold the branch to untaken - avoids infinite looping
1153
1154    Value *ECV = Rewriter.expandCodeFor(ExitCount);
1155    if (!MinECV)
1156      MinECV = Rewriter.expandCodeFor(MinEC);
1157    Value *RHS = MinECV;
1158    if (ECV->getType() != RHS->getType()) {
1159      Type *WiderTy = SE->getWiderType(ECV->getType(), RHS->getType());
1160      ECV = B.CreateZExt(ECV, WiderTy);
1161      RHS = B.CreateZExt(RHS, WiderTy);
1162    }
1163    assert(!Latch || DT->dominates(ExitingBB, Latch));
1164    Value *NewCond = B.CreateICmp(ICmpInst::ICMP_UGT, ECV, RHS);
1165    // Freeze poison or undef to an arbitrary bit pattern to ensure we can
1166    // branch without introducing UB.  See NOTE ON POISON/UNDEF above for
1167    // context.
1168    NewCond = B.CreateFreeze(NewCond);
1169
1170    widenWidenableBranch(WidenableBR, NewCond);
1171
1172    Value *OldCond = BI->getCondition();
1173    BI->setCondition(ConstantInt::get(OldCond->getType(), !ExitIfTrue));
1174    Changed = true;
1175  }
1176
1177  if (Changed)
1178    // We just mutated a bunch of loop exits changing there exit counts
1179    // widely.  We need to force recomputation of the exit counts given these
1180    // changes.  Note that all of the inserted exits are never taken, and
1181    // should be removed next time the CFG is modified.
1182    SE->forgetLoop(L);
1183  return Changed;
1184}
1185
1186bool LoopPredication::runOnLoop(Loop *Loop) {
1187  L = Loop;
1188
1189  LLVM_DEBUG(dbgs() << "Analyzing ");
1190  LLVM_DEBUG(L->dump());
1191
1192  Module *M = L->getHeader()->getModule();
1193
1194  // There is nothing to do if the module doesn't use guards
1195  auto *GuardDecl =
1196      M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1197  bool HasIntrinsicGuards = GuardDecl && !GuardDecl->use_empty();
1198  auto *WCDecl = M->getFunction(
1199      Intrinsic::getName(Intrinsic::experimental_widenable_condition));
1200  bool HasWidenableConditions =
1201      PredicateWidenableBranchGuards && WCDecl && !WCDecl->use_empty();
1202  if (!HasIntrinsicGuards && !HasWidenableConditions)
1203    return false;
1204
1205  DL = &M->getDataLayout();
1206
1207  Preheader = L->getLoopPreheader();
1208  if (!Preheader)
1209    return false;
1210
1211  auto LatchCheckOpt = parseLoopLatchICmp();
1212  if (!LatchCheckOpt)
1213    return false;
1214  LatchCheck = *LatchCheckOpt;
1215
1216  LLVM_DEBUG(dbgs() << "Latch check:\n");
1217  LLVM_DEBUG(LatchCheck.dump());
1218
1219  if (!isLoopProfitableToPredicate()) {
1220    LLVM_DEBUG(dbgs() << "Loop not profitable to predicate!\n");
1221    return false;
1222  }
1223  // Collect all the guards into a vector and process later, so as not
1224  // to invalidate the instruction iterator.
1225  SmallVector<IntrinsicInst *, 4> Guards;
1226  SmallVector<BranchInst *, 4> GuardsAsWidenableBranches;
1227  for (const auto BB : L->blocks()) {
1228    for (auto &I : *BB)
1229      if (isGuard(&I))
1230        Guards.push_back(cast<IntrinsicInst>(&I));
1231    if (PredicateWidenableBranchGuards &&
1232        isGuardAsWidenableBranch(BB->getTerminator()))
1233      GuardsAsWidenableBranches.push_back(
1234          cast<BranchInst>(BB->getTerminator()));
1235  }
1236
1237  SCEVExpander Expander(*SE, *DL, "loop-predication");
1238  bool Changed = false;
1239  for (auto *Guard : Guards)
1240    Changed |= widenGuardConditions(Guard, Expander);
1241  for (auto *Guard : GuardsAsWidenableBranches)
1242    Changed |= widenWidenableBranchGuardConditions(Guard, Expander);
1243  Changed |= predicateLoopExits(L, Expander);
1244  return Changed;
1245}
1246