1//===- AggressiveInstCombine.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the aggressive expression pattern combiner classes.
10// Currently, it handles expression patterns for:
11//  * Truncate instruction
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
16#include "AggressiveInstCombineInternal.h"
17#include "llvm-c/Initialization.h"
18#include "llvm-c/Transforms/AggressiveInstCombine.h"
19#include "llvm/Analysis/AliasAnalysis.h"
20#include "llvm/Analysis/BasicAliasAnalysis.h"
21#include "llvm/Analysis/GlobalsModRef.h"
22#include "llvm/Analysis/TargetLibraryInfo.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/LegacyPassManager.h"
27#include "llvm/IR/PatternMatch.h"
28#include "llvm/InitializePasses.h"
29#include "llvm/Pass.h"
30#include "llvm/Transforms/Utils/Local.h"
31using namespace llvm;
32using namespace PatternMatch;
33
34#define DEBUG_TYPE "aggressive-instcombine"
35
36namespace {
37/// Contains expression pattern combiner logic.
38/// This class provides both the logic to combine expression patterns and
39/// combine them. It differs from InstCombiner class in that each pattern
40/// combiner runs only once as opposed to InstCombine's multi-iteration,
41/// which allows pattern combiner to have higher complexity than the O(1)
42/// required by the instruction combiner.
43class AggressiveInstCombinerLegacyPass : public FunctionPass {
44public:
45  static char ID; // Pass identification, replacement for typeid
46
47  AggressiveInstCombinerLegacyPass() : FunctionPass(ID) {
48    initializeAggressiveInstCombinerLegacyPassPass(
49        *PassRegistry::getPassRegistry());
50  }
51
52  void getAnalysisUsage(AnalysisUsage &AU) const override;
53
54  /// Run all expression pattern optimizations on the given /p F function.
55  ///
56  /// \param F function to optimize.
57  /// \returns true if the IR is changed.
58  bool runOnFunction(Function &F) override;
59};
60} // namespace
61
62/// Match a pattern for a bitwise rotate operation that partially guards
63/// against undefined behavior by branching around the rotation when the shift
64/// amount is 0.
65static bool foldGuardedRotateToFunnelShift(Instruction &I) {
66  if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
67    return false;
68
69  // As with the one-use checks below, this is not strictly necessary, but we
70  // are being cautious to avoid potential perf regressions on targets that
71  // do not actually have a rotate instruction (where the funnel shift would be
72  // expanded back into math/shift/logic ops).
73  if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
74    return false;
75
76  // Match V to funnel shift left/right and capture the source operand and
77  // shift amount in X and Y.
78  auto matchRotate = [](Value *V, Value *&X, Value *&Y) {
79    Value *L0, *L1, *R0, *R1;
80    unsigned Width = V->getType()->getScalarSizeInBits();
81    auto Sub = m_Sub(m_SpecificInt(Width), m_Value(R1));
82
83    // rotate_left(X, Y) == (X << Y) | (X >> (Width - Y))
84    auto RotL = m_OneUse(
85        m_c_Or(m_Shl(m_Value(L0), m_Value(L1)), m_LShr(m_Value(R0), Sub)));
86    if (RotL.match(V) && L0 == R0 && L1 == R1) {
87      X = L0;
88      Y = L1;
89      return Intrinsic::fshl;
90    }
91
92    // rotate_right(X, Y) == (X >> Y) | (X << (Width - Y))
93    auto RotR = m_OneUse(
94        m_c_Or(m_LShr(m_Value(L0), m_Value(L1)), m_Shl(m_Value(R0), Sub)));
95    if (RotR.match(V) && L0 == R0 && L1 == R1) {
96      X = L0;
97      Y = L1;
98      return Intrinsic::fshr;
99    }
100
101    return Intrinsic::not_intrinsic;
102  };
103
104  // One phi operand must be a rotate operation, and the other phi operand must
105  // be the source value of that rotate operation:
106  // phi [ rotate(RotSrc, RotAmt), RotBB ], [ RotSrc, GuardBB ]
107  PHINode &Phi = cast<PHINode>(I);
108  Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
109  Value *RotSrc, *RotAmt;
110  Intrinsic::ID IID = matchRotate(P0, RotSrc, RotAmt);
111  if (IID == Intrinsic::not_intrinsic || RotSrc != P1) {
112    IID = matchRotate(P1, RotSrc, RotAmt);
113    if (IID == Intrinsic::not_intrinsic || RotSrc != P0)
114      return false;
115    assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
116           "Pattern must match funnel shift left or right");
117  }
118
119  // The incoming block with our source operand must be the "guard" block.
120  // That must contain a cmp+branch to avoid the rotate when the shift amount
121  // is equal to 0. The other incoming block is the block with the rotate.
122  BasicBlock *GuardBB = Phi.getIncomingBlock(RotSrc == P1);
123  BasicBlock *RotBB = Phi.getIncomingBlock(RotSrc != P1);
124  Instruction *TermI = GuardBB->getTerminator();
125  ICmpInst::Predicate Pred;
126  BasicBlock *PhiBB = Phi.getParent();
127  if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(RotAmt), m_ZeroInt()),
128                         m_SpecificBB(PhiBB), m_SpecificBB(RotBB))))
129    return false;
130
131  if (Pred != CmpInst::ICMP_EQ)
132    return false;
133
134  // We matched a variation of this IR pattern:
135  // GuardBB:
136  //   %cmp = icmp eq i32 %RotAmt, 0
137  //   br i1 %cmp, label %PhiBB, label %RotBB
138  // RotBB:
139  //   %sub = sub i32 32, %RotAmt
140  //   %shr = lshr i32 %X, %sub
141  //   %shl = shl i32 %X, %RotAmt
142  //   %rot = or i32 %shr, %shl
143  //   br label %PhiBB
144  // PhiBB:
145  //   %cond = phi i32 [ %rot, %RotBB ], [ %X, %GuardBB ]
146  // -->
147  // llvm.fshl.i32(i32 %X, i32 %RotAmt)
148  IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
149  Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
150  Phi.replaceAllUsesWith(Builder.CreateCall(F, {RotSrc, RotSrc, RotAmt}));
151  return true;
152}
153
154/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
155/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
156/// of 'and' ops, then we also need to capture the fact that we saw an
157/// "and X, 1", so that's an extra return value for that case.
158struct MaskOps {
159  Value *Root;
160  APInt Mask;
161  bool MatchAndChain;
162  bool FoundAnd1;
163
164  MaskOps(unsigned BitWidth, bool MatchAnds)
165      : Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
166        MatchAndChain(MatchAnds), FoundAnd1(false) {}
167};
168
169/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
170/// chain of 'and' or 'or' instructions looking for shift ops of a common source
171/// value. Examples:
172///   or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
173/// returns { X, 0x129 }
174///   and (and (X >> 1), 1), (X >> 4)
175/// returns { X, 0x12 }
176static bool matchAndOrChain(Value *V, MaskOps &MOps) {
177  Value *Op0, *Op1;
178  if (MOps.MatchAndChain) {
179    // Recurse through a chain of 'and' operands. This requires an extra check
180    // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
181    // in the chain to know that all of the high bits are cleared.
182    if (match(V, m_And(m_Value(Op0), m_One()))) {
183      MOps.FoundAnd1 = true;
184      return matchAndOrChain(Op0, MOps);
185    }
186    if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
187      return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
188  } else {
189    // Recurse through a chain of 'or' operands.
190    if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
191      return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
192  }
193
194  // We need a shift-right or a bare value representing a compare of bit 0 of
195  // the original source operand.
196  Value *Candidate;
197  uint64_t BitIndex = 0;
198  if (!match(V, m_LShr(m_Value(Candidate), m_ConstantInt(BitIndex))))
199    Candidate = V;
200
201  // Initialize result source operand.
202  if (!MOps.Root)
203    MOps.Root = Candidate;
204
205  // The shift constant is out-of-range? This code hasn't been simplified.
206  if (BitIndex >= MOps.Mask.getBitWidth())
207    return false;
208
209  // Fill in the mask bit derived from the shift constant.
210  MOps.Mask.setBit(BitIndex);
211  return MOps.Root == Candidate;
212}
213
214/// Match patterns that correspond to "any-bits-set" and "all-bits-set".
215/// These will include a chain of 'or' or 'and'-shifted bits from a
216/// common source value:
217/// and (or  (lshr X, C), ...), 1 --> (X & CMask) != 0
218/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
219/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
220/// that differ only with a final 'not' of the result. We expect that final
221/// 'not' to be folded with the compare that we create here (invert predicate).
222static bool foldAnyOrAllBitsSet(Instruction &I) {
223  // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
224  // final "and X, 1" instruction must be the final op in the sequence.
225  bool MatchAllBitsSet;
226  if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
227    MatchAllBitsSet = true;
228  else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
229    MatchAllBitsSet = false;
230  else
231    return false;
232
233  MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
234  if (MatchAllBitsSet) {
235    if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
236      return false;
237  } else {
238    if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
239      return false;
240  }
241
242  // The pattern was found. Create a masked compare that replaces all of the
243  // shift and logic ops.
244  IRBuilder<> Builder(&I);
245  Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
246  Value *And = Builder.CreateAnd(MOps.Root, Mask);
247  Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
248                               : Builder.CreateIsNotNull(And);
249  Value *Zext = Builder.CreateZExt(Cmp, I.getType());
250  I.replaceAllUsesWith(Zext);
251  return true;
252}
253
254// Try to recognize below function as popcount intrinsic.
255// This is the "best" algorithm from
256// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
257// Also used in TargetLowering::expandCTPOP().
258//
259// int popcount(unsigned int i) {
260//   i = i - ((i >> 1) & 0x55555555);
261//   i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
262//   i = ((i + (i >> 4)) & 0x0F0F0F0F);
263//   return (i * 0x01010101) >> 24;
264// }
265static bool tryToRecognizePopCount(Instruction &I) {
266  if (I.getOpcode() != Instruction::LShr)
267    return false;
268
269  Type *Ty = I.getType();
270  if (!Ty->isIntOrIntVectorTy())
271    return false;
272
273  unsigned Len = Ty->getScalarSizeInBits();
274  // FIXME: fix Len == 8 and other irregular type lengths.
275  if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
276    return false;
277
278  APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
279  APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
280  APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
281  APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
282  APInt MaskShift = APInt(Len, Len - 8);
283
284  Value *Op0 = I.getOperand(0);
285  Value *Op1 = I.getOperand(1);
286  Value *MulOp0;
287  // Matching "(i * 0x01010101...) >> 24".
288  if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
289       match(Op1, m_SpecificInt(MaskShift))) {
290    Value *ShiftOp0;
291    // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
292    if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
293                                    m_Deferred(ShiftOp0)),
294                            m_SpecificInt(Mask0F)))) {
295      Value *AndOp0;
296      // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
297      if (match(ShiftOp0,
298                m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
299                        m_And(m_LShr(m_Deferred(AndOp0), m_SpecificInt(2)),
300                              m_SpecificInt(Mask33))))) {
301        Value *Root, *SubOp1;
302        // Matching "i - ((i >> 1) & 0x55555555...)".
303        if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
304            match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
305                                m_SpecificInt(Mask55)))) {
306          LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
307          IRBuilder<> Builder(&I);
308          Function *Func = Intrinsic::getDeclaration(
309              I.getModule(), Intrinsic::ctpop, I.getType());
310          I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
311          return true;
312        }
313      }
314    }
315  }
316
317  return false;
318}
319
320/// This is the entry point for folds that could be implemented in regular
321/// InstCombine, but they are separated because they are not expected to
322/// occur frequently and/or have more than a constant-length pattern match.
323static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
324  bool MadeChange = false;
325  for (BasicBlock &BB : F) {
326    // Ignore unreachable basic blocks.
327    if (!DT.isReachableFromEntry(&BB))
328      continue;
329    // Do not delete instructions under here and invalidate the iterator.
330    // Walk the block backwards for efficiency. We're matching a chain of
331    // use->defs, so we're more likely to succeed by starting from the bottom.
332    // Also, we want to avoid matching partial patterns.
333    // TODO: It would be more efficient if we removed dead instructions
334    // iteratively in this loop rather than waiting until the end.
335    for (Instruction &I : make_range(BB.rbegin(), BB.rend())) {
336      MadeChange |= foldAnyOrAllBitsSet(I);
337      MadeChange |= foldGuardedRotateToFunnelShift(I);
338      MadeChange |= tryToRecognizePopCount(I);
339    }
340  }
341
342  // We're done with transforms, so remove dead instructions.
343  if (MadeChange)
344    for (BasicBlock &BB : F)
345      SimplifyInstructionsInBlock(&BB);
346
347  return MadeChange;
348}
349
350/// This is the entry point for all transforms. Pass manager differences are
351/// handled in the callers of this function.
352static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) {
353  bool MadeChange = false;
354  const DataLayout &DL = F.getParent()->getDataLayout();
355  TruncInstCombine TIC(TLI, DL, DT);
356  MadeChange |= TIC.run(F);
357  MadeChange |= foldUnusualPatterns(F, DT);
358  return MadeChange;
359}
360
361void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
362    AnalysisUsage &AU) const {
363  AU.setPreservesCFG();
364  AU.addRequired<DominatorTreeWrapperPass>();
365  AU.addRequired<TargetLibraryInfoWrapperPass>();
366  AU.addPreserved<AAResultsWrapperPass>();
367  AU.addPreserved<BasicAAWrapperPass>();
368  AU.addPreserved<DominatorTreeWrapperPass>();
369  AU.addPreserved<GlobalsAAWrapperPass>();
370}
371
372bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
373  auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
374  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
375  return runImpl(F, TLI, DT);
376}
377
378PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
379                                                 FunctionAnalysisManager &AM) {
380  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
381  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
382  if (!runImpl(F, TLI, DT)) {
383    // No changes, all analyses are preserved.
384    return PreservedAnalyses::all();
385  }
386  // Mark all the analyses that instcombine updates as preserved.
387  PreservedAnalyses PA;
388  PA.preserveSet<CFGAnalyses>();
389  PA.preserve<AAManager>();
390  PA.preserve<GlobalsAA>();
391  return PA;
392}
393
394char AggressiveInstCombinerLegacyPass::ID = 0;
395INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
396                      "aggressive-instcombine",
397                      "Combine pattern based expressions", false, false)
398INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
399INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
400INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",
401                    "Combine pattern based expressions", false, false)
402
403// Initialization Routines
404void llvm::initializeAggressiveInstCombine(PassRegistry &Registry) {
405  initializeAggressiveInstCombinerLegacyPassPass(Registry);
406}
407
408void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R) {
409  initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R));
410}
411
412FunctionPass *llvm::createAggressiveInstCombinerPass() {
413  return new AggressiveInstCombinerLegacyPass();
414}
415
416void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM) {
417  unwrap(PM)->add(createAggressiveInstCombinerPass());
418}
419