1//===-- X86PartialReduction.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass looks for add instructions used by a horizontal reduction to see
10// if we might be able to use pmaddwd or psadbw. Some cases of this require
11// cross basic block knowledge and can't be done in SelectionDAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/CodeGen/TargetPassConfig.h"
18#include "llvm/IR/Constants.h"
19#include "llvm/IR/Instructions.h"
20#include "llvm/IR/IntrinsicsX86.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Operator.h"
23#include "llvm/Pass.h"
24#include "X86TargetMachine.h"
25
26using namespace llvm;
27
28#define DEBUG_TYPE "x86-partial-reduction"
29
30namespace {
31
32class X86PartialReduction : public FunctionPass {
33  const DataLayout *DL;
34  const X86Subtarget *ST;
35
36public:
37  static char ID; // Pass identification, replacement for typeid.
38
39  X86PartialReduction() : FunctionPass(ID) { }
40
41  bool runOnFunction(Function &Fn) override;
42
43  void getAnalysisUsage(AnalysisUsage &AU) const override {
44    AU.setPreservesCFG();
45  }
46
47  StringRef getPassName() const override {
48    return "X86 Partial Reduction";
49  }
50
51private:
52  bool tryMAddReplacement(Instruction *Op);
53  bool trySADReplacement(Instruction *Op);
54};
55}
56
57FunctionPass *llvm::createX86PartialReductionPass() {
58  return new X86PartialReduction();
59}
60
61char X86PartialReduction::ID = 0;
62
63INITIALIZE_PASS(X86PartialReduction, DEBUG_TYPE,
64                "X86 Partial Reduction", false, false)
65
66bool X86PartialReduction::tryMAddReplacement(Instruction *Op) {
67  if (!ST->hasSSE2())
68    return false;
69
70  // Need at least 8 elements.
71  if (cast<FixedVectorType>(Op->getType())->getNumElements() < 8)
72    return false;
73
74  // Element type should be i32.
75  if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
76    return false;
77
78  auto *Mul = dyn_cast<BinaryOperator>(Op);
79  if (!Mul || Mul->getOpcode() != Instruction::Mul)
80    return false;
81
82  Value *LHS = Mul->getOperand(0);
83  Value *RHS = Mul->getOperand(1);
84
85  // LHS and RHS should be only used once or if they are the same then only
86  // used twice. Only check this when SSE4.1 is enabled and we have zext/sext
87  // instructions, otherwise we use punpck to emulate zero extend in stages. The
88  // trunc/ we need to do likely won't introduce new instructions in that case.
89  if (ST->hasSSE41()) {
90    if (LHS == RHS) {
91      if (!isa<Constant>(LHS) && !LHS->hasNUses(2))
92        return false;
93    } else {
94      if (!isa<Constant>(LHS) && !LHS->hasOneUse())
95        return false;
96      if (!isa<Constant>(RHS) && !RHS->hasOneUse())
97        return false;
98    }
99  }
100
101  auto CanShrinkOp = [&](Value *Op) {
102    auto IsFreeTruncation = [&](Value *Op) {
103      if (auto *Cast = dyn_cast<CastInst>(Op)) {
104        if (Cast->getParent() == Mul->getParent() &&
105            (Cast->getOpcode() == Instruction::SExt ||
106             Cast->getOpcode() == Instruction::ZExt) &&
107            Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 16)
108          return true;
109      }
110
111      return isa<Constant>(Op);
112    };
113
114    // If the operation can be freely truncated and has enough sign bits we
115    // can shrink.
116    if (IsFreeTruncation(Op) &&
117        ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
118      return true;
119
120    // SelectionDAG has limited support for truncating through an add or sub if
121    // the inputs are freely truncatable.
122    if (auto *BO = dyn_cast<BinaryOperator>(Op)) {
123      if (BO->getParent() == Mul->getParent() &&
124          IsFreeTruncation(BO->getOperand(0)) &&
125          IsFreeTruncation(BO->getOperand(1)) &&
126          ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16)
127        return true;
128    }
129
130    return false;
131  };
132
133  // Both Ops need to be shrinkable.
134  if (!CanShrinkOp(LHS) && !CanShrinkOp(RHS))
135    return false;
136
137  IRBuilder<> Builder(Mul);
138
139  auto *MulTy = cast<FixedVectorType>(Op->getType());
140  unsigned NumElts = MulTy->getNumElements();
141
142  // Extract even elements and odd elements and add them together. This will
143  // be pattern matched by SelectionDAG to pmaddwd. This instruction will be
144  // half the original width.
145  SmallVector<int, 16> EvenMask(NumElts / 2);
146  SmallVector<int, 16> OddMask(NumElts / 2);
147  for (int i = 0, e = NumElts / 2; i != e; ++i) {
148    EvenMask[i] = i * 2;
149    OddMask[i] = i * 2 + 1;
150  }
151  // Creating a new mul so the replaceAllUsesWith below doesn't replace the
152  // uses in the shuffles we're creating.
153  Value *NewMul = Builder.CreateMul(Mul->getOperand(0), Mul->getOperand(1));
154  Value *EvenElts = Builder.CreateShuffleVector(NewMul, NewMul, EvenMask);
155  Value *OddElts = Builder.CreateShuffleVector(NewMul, NewMul, OddMask);
156  Value *MAdd = Builder.CreateAdd(EvenElts, OddElts);
157
158  // Concatenate zeroes to extend back to the original type.
159  SmallVector<int, 32> ConcatMask(NumElts);
160  std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
161  Value *Zero = Constant::getNullValue(MAdd->getType());
162  Value *Concat = Builder.CreateShuffleVector(MAdd, Zero, ConcatMask);
163
164  Mul->replaceAllUsesWith(Concat);
165  Mul->eraseFromParent();
166
167  return true;
168}
169
170bool X86PartialReduction::trySADReplacement(Instruction *Op) {
171  if (!ST->hasSSE2())
172    return false;
173
174  // TODO: There's nothing special about i32, any integer type above i16 should
175  // work just as well.
176  if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
177    return false;
178
179  // Operand should be a select.
180  auto *SI = dyn_cast<SelectInst>(Op);
181  if (!SI)
182    return false;
183
184  // Select needs to implement absolute value.
185  Value *LHS, *RHS;
186  auto SPR = matchSelectPattern(SI, LHS, RHS);
187  if (SPR.Flavor != SPF_ABS)
188    return false;
189
190  // Need a subtract of two values.
191  auto *Sub = dyn_cast<BinaryOperator>(LHS);
192  if (!Sub || Sub->getOpcode() != Instruction::Sub)
193    return false;
194
195  // Look for zero extend from i8.
196  auto getZeroExtendedVal = [](Value *Op) -> Value * {
197    if (auto *ZExt = dyn_cast<ZExtInst>(Op))
198      if (cast<VectorType>(ZExt->getOperand(0)->getType())
199              ->getElementType()
200              ->isIntegerTy(8))
201        return ZExt->getOperand(0);
202
203    return nullptr;
204  };
205
206  // Both operands of the subtract should be extends from vXi8.
207  Value *Op0 = getZeroExtendedVal(Sub->getOperand(0));
208  Value *Op1 = getZeroExtendedVal(Sub->getOperand(1));
209  if (!Op0 || !Op1)
210    return false;
211
212  IRBuilder<> Builder(SI);
213
214  auto *OpTy = cast<FixedVectorType>(Op->getType());
215  unsigned NumElts = OpTy->getNumElements();
216
217  unsigned IntrinsicNumElts;
218  Intrinsic::ID IID;
219  if (ST->hasBWI() && NumElts >= 64) {
220    IID = Intrinsic::x86_avx512_psad_bw_512;
221    IntrinsicNumElts = 64;
222  } else if (ST->hasAVX2() && NumElts >= 32) {
223    IID = Intrinsic::x86_avx2_psad_bw;
224    IntrinsicNumElts = 32;
225  } else {
226    IID = Intrinsic::x86_sse2_psad_bw;
227    IntrinsicNumElts = 16;
228  }
229
230  Function *PSADBWFn = Intrinsic::getDeclaration(SI->getModule(), IID);
231
232  if (NumElts < 16) {
233    // Pad input with zeroes.
234    SmallVector<int, 32> ConcatMask(16);
235    for (unsigned i = 0; i != NumElts; ++i)
236      ConcatMask[i] = i;
237    for (unsigned i = NumElts; i != 16; ++i)
238      ConcatMask[i] = (i % NumElts) + NumElts;
239
240    Value *Zero = Constant::getNullValue(Op0->getType());
241    Op0 = Builder.CreateShuffleVector(Op0, Zero, ConcatMask);
242    Op1 = Builder.CreateShuffleVector(Op1, Zero, ConcatMask);
243    NumElts = 16;
244  }
245
246  // Intrinsics produce vXi64 and need to be casted to vXi32.
247  auto *I32Ty =
248      FixedVectorType::get(Builder.getInt32Ty(), IntrinsicNumElts / 4);
249
250  assert(NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!");
251  unsigned NumSplits = NumElts / IntrinsicNumElts;
252
253  // First collect the pieces we need.
254  SmallVector<Value *, 4> Ops(NumSplits);
255  for (unsigned i = 0; i != NumSplits; ++i) {
256    SmallVector<int, 64> ExtractMask(IntrinsicNumElts);
257    std::iota(ExtractMask.begin(), ExtractMask.end(), i * IntrinsicNumElts);
258    Value *ExtractOp0 = Builder.CreateShuffleVector(Op0, Op0, ExtractMask);
259    Value *ExtractOp1 = Builder.CreateShuffleVector(Op1, Op0, ExtractMask);
260    Ops[i] = Builder.CreateCall(PSADBWFn, {ExtractOp0, ExtractOp1});
261    Ops[i] = Builder.CreateBitCast(Ops[i], I32Ty);
262  }
263
264  assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits");
265  unsigned Stages = Log2_32(NumSplits);
266  for (unsigned s = Stages; s > 0; --s) {
267    unsigned NumConcatElts =
268        cast<FixedVectorType>(Ops[0]->getType())->getNumElements() * 2;
269    for (unsigned i = 0; i != 1U << (s - 1); ++i) {
270      SmallVector<int, 64> ConcatMask(NumConcatElts);
271      std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
272      Ops[i] = Builder.CreateShuffleVector(Ops[i*2], Ops[i*2+1], ConcatMask);
273    }
274  }
275
276  // At this point the final value should be in Ops[0]. Now we need to adjust
277  // it to the final original type.
278  NumElts = cast<FixedVectorType>(OpTy)->getNumElements();
279  if (NumElts == 2) {
280    // Extract down to 2 elements.
281    Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{0, 1});
282  } else if (NumElts >= 8) {
283    SmallVector<int, 32> ConcatMask(NumElts);
284    unsigned SubElts =
285        cast<FixedVectorType>(Ops[0]->getType())->getNumElements();
286    for (unsigned i = 0; i != SubElts; ++i)
287      ConcatMask[i] = i;
288    for (unsigned i = SubElts; i != NumElts; ++i)
289      ConcatMask[i] = (i % SubElts) + SubElts;
290
291    Value *Zero = Constant::getNullValue(Ops[0]->getType());
292    Ops[0] = Builder.CreateShuffleVector(Ops[0], Zero, ConcatMask);
293  }
294
295  SI->replaceAllUsesWith(Ops[0]);
296  SI->eraseFromParent();
297
298  return true;
299}
300
301// Walk backwards from the ExtractElementInst and determine if it is the end of
302// a horizontal reduction. Return the input to the reduction if we find one.
303static Value *matchAddReduction(const ExtractElementInst &EE) {
304  // Make sure we're extracting index 0.
305  auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand());
306  if (!Index || !Index->isNullValue())
307    return nullptr;
308
309  const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand());
310  if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse())
311    return nullptr;
312
313  unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements();
314  // Ensure the reduction size is a power of 2.
315  if (!isPowerOf2_32(NumElems))
316    return nullptr;
317
318  const Value *Op = BO;
319  unsigned Stages = Log2_32(NumElems);
320  for (unsigned i = 0; i != Stages; ++i) {
321    const auto *BO = dyn_cast<BinaryOperator>(Op);
322    if (!BO || BO->getOpcode() != Instruction::Add)
323      return nullptr;
324
325    // If this isn't the first add, then it should only have 2 users, the
326    // shuffle and another add which we checked in the previous iteration.
327    if (i != 0 && !BO->hasNUses(2))
328      return nullptr;
329
330    Value *LHS = BO->getOperand(0);
331    Value *RHS = BO->getOperand(1);
332
333    auto *Shuffle = dyn_cast<ShuffleVectorInst>(LHS);
334    if (Shuffle) {
335      Op = RHS;
336    } else {
337      Shuffle = dyn_cast<ShuffleVectorInst>(RHS);
338      Op = LHS;
339    }
340
341    // The first operand of the shuffle should be the same as the other operand
342    // of the bin op.
343    if (!Shuffle || Shuffle->getOperand(0) != Op)
344      return nullptr;
345
346    // Verify the shuffle has the expected (at this stage of the pyramid) mask.
347    unsigned MaskEnd = 1 << i;
348    for (unsigned Index = 0; Index < MaskEnd; ++Index)
349      if (Shuffle->getMaskValue(Index) != (int)(MaskEnd + Index))
350        return nullptr;
351  }
352
353  return const_cast<Value *>(Op);
354}
355
356// See if this BO is reachable from this Phi by walking forward through single
357// use BinaryOperators with the same opcode. If we get back then we know we've
358// found a loop and it is safe to step through this Add to find more leaves.
359static bool isReachableFromPHI(PHINode *Phi, BinaryOperator *BO) {
360  // The PHI itself should only have one use.
361  if (!Phi->hasOneUse())
362    return false;
363
364  Instruction *U = cast<Instruction>(*Phi->user_begin());
365  if (U == BO)
366    return true;
367
368  while (U->hasOneUse() && U->getOpcode() == BO->getOpcode())
369    U = cast<Instruction>(*U->user_begin());
370
371  return U == BO;
372}
373
374// Collect all the leaves of the tree of adds that feeds into the horizontal
375// reduction. Root is the Value that is used by the horizontal reduction.
376// We look through single use phis, single use adds, or adds that are used by
377// a phi that forms a loop with the add.
378static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) {
379  SmallPtrSet<Value *, 8> Visited;
380  SmallVector<Value *, 8> Worklist;
381  Worklist.push_back(Root);
382
383  while (!Worklist.empty()) {
384    Value *V = Worklist.pop_back_val();
385     if (!Visited.insert(V).second)
386       continue;
387
388    if (auto *PN = dyn_cast<PHINode>(V)) {
389      // PHI node should have single use unless it is the root node, then it
390      // has 2 uses.
391      if (!PN->hasNUses(PN == Root ? 2 : 1))
392        break;
393
394      // Push incoming values to the worklist.
395      for (Value *InV : PN->incoming_values())
396        Worklist.push_back(InV);
397
398      continue;
399    }
400
401    if (auto *BO = dyn_cast<BinaryOperator>(V)) {
402      if (BO->getOpcode() == Instruction::Add) {
403        // Simple case. Single use, just push its operands to the worklist.
404        if (BO->hasNUses(BO == Root ? 2 : 1)) {
405          for (Value *Op : BO->operands())
406            Worklist.push_back(Op);
407          continue;
408        }
409
410        // If there is additional use, make sure it is an unvisited phi that
411        // gets us back to this node.
412        if (BO->hasNUses(BO == Root ? 3 : 2)) {
413          PHINode *PN = nullptr;
414          for (auto *U : Root->users())
415            if (auto *P = dyn_cast<PHINode>(U))
416              if (!Visited.count(P))
417                PN = P;
418
419          // If we didn't find a 2-input PHI then this isn't a case we can
420          // handle.
421          if (!PN || PN->getNumIncomingValues() != 2)
422            continue;
423
424          // Walk forward from this phi to see if it reaches back to this add.
425          if (!isReachableFromPHI(PN, BO))
426            continue;
427
428          // The phi forms a loop with this Add, push its operands.
429          for (Value *Op : BO->operands())
430            Worklist.push_back(Op);
431        }
432      }
433    }
434
435    // Not an add or phi, make it a leaf.
436    if (auto *I = dyn_cast<Instruction>(V)) {
437      if (!V->hasNUses(I == Root ? 2 : 1))
438        continue;
439
440      // Add this as a leaf.
441      Leaves.push_back(I);
442    }
443  }
444}
445
446bool X86PartialReduction::runOnFunction(Function &F) {
447  if (skipFunction(F))
448    return false;
449
450  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
451  if (!TPC)
452    return false;
453
454  auto &TM = TPC->getTM<X86TargetMachine>();
455  ST = TM.getSubtargetImpl(F);
456
457  DL = &F.getParent()->getDataLayout();
458
459  bool MadeChange = false;
460  for (auto &BB : F) {
461    for (auto &I : BB) {
462      auto *EE = dyn_cast<ExtractElementInst>(&I);
463      if (!EE)
464        continue;
465
466      // First find a reduction tree.
467      // FIXME: Do we need to handle other opcodes than Add?
468      Value *Root = matchAddReduction(*EE);
469      if (!Root)
470        continue;
471
472      SmallVector<Instruction *, 8> Leaves;
473      collectLeaves(Root, Leaves);
474
475      for (Instruction *I : Leaves) {
476        if (tryMAddReplacement(I)) {
477          MadeChange = true;
478          continue;
479        }
480
481        // Don't do SAD matching on the root node. SelectionDAG already
482        // has support for that and currently generates better code.
483        if (I != Root && trySADReplacement(I))
484          MadeChange = true;
485      }
486    }
487  }
488
489  return MadeChange;
490}
491