1//===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass tries to expand memcmp() calls into optimally-sized loads and
10// compares for the target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/Statistic.h"
15#include "llvm/Analysis/ConstantFolding.h"
16#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
17#include "llvm/Analysis/ProfileSummaryInfo.h"
18#include "llvm/Analysis/TargetLibraryInfo.h"
19#include "llvm/Analysis/TargetTransformInfo.h"
20#include "llvm/Analysis/ValueTracking.h"
21#include "llvm/CodeGen/TargetLowering.h"
22#include "llvm/CodeGen/TargetPassConfig.h"
23#include "llvm/CodeGen/TargetSubtargetInfo.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/InitializePasses.h"
26#include "llvm/Transforms/Utils/Local.h"
27#include "llvm/Transforms/Utils/SizeOpts.h"
28#include "llvm/Target/TargetMachine.h"
29
30using namespace llvm;
31
32#define DEBUG_TYPE "expandmemcmp"
33
34STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
35STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
36STATISTIC(NumMemCmpGreaterThanMax,
37          "Number of memcmp calls with size greater than max size");
38STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
39
40static cl::opt<unsigned> MemCmpEqZeroNumLoadsPerBlock(
41    "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
42    cl::desc("The number of loads per basic block for inline expansion of "
43             "memcmp that is only being compared against zero."));
44
45static cl::opt<unsigned> MaxLoadsPerMemcmp(
46    "max-loads-per-memcmp", cl::Hidden,
47    cl::desc("Set maximum number of loads used in expanded memcmp"));
48
49static cl::opt<unsigned> MaxLoadsPerMemcmpOptSize(
50    "max-loads-per-memcmp-opt-size", cl::Hidden,
51    cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
52
53namespace {
54
55
56// This class provides helper functions to expand a memcmp library call into an
57// inline expansion.
58class MemCmpExpansion {
59  struct ResultBlock {
60    BasicBlock *BB = nullptr;
61    PHINode *PhiSrc1 = nullptr;
62    PHINode *PhiSrc2 = nullptr;
63
64    ResultBlock() = default;
65  };
66
67  CallInst *const CI;
68  ResultBlock ResBlock;
69  const uint64_t Size;
70  unsigned MaxLoadSize;
71  uint64_t NumLoadsNonOneByte;
72  const uint64_t NumLoadsPerBlockForZeroCmp;
73  std::vector<BasicBlock *> LoadCmpBlocks;
74  BasicBlock *EndBlock;
75  PHINode *PhiRes;
76  const bool IsUsedForZeroCmp;
77  const DataLayout &DL;
78  IRBuilder<> Builder;
79  // Represents the decomposition in blocks of the expansion. For example,
80  // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
81  // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {1, 32}.
82  struct LoadEntry {
83    LoadEntry(unsigned LoadSize, uint64_t Offset)
84        : LoadSize(LoadSize), Offset(Offset) {
85    }
86
87    // The size of the load for this block, in bytes.
88    unsigned LoadSize;
89    // The offset of this load from the base pointer, in bytes.
90    uint64_t Offset;
91  };
92  using LoadEntryVector = SmallVector<LoadEntry, 8>;
93  LoadEntryVector LoadSequence;
94
95  void createLoadCmpBlocks();
96  void createResultBlock();
97  void setupResultBlockPHINodes();
98  void setupEndBlockPHINodes();
99  Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
100  void emitLoadCompareBlock(unsigned BlockIndex);
101  void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
102                                         unsigned &LoadIndex);
103  void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
104  void emitMemCmpResultBlock();
105  Value *getMemCmpExpansionZeroCase();
106  Value *getMemCmpEqZeroOneBlock();
107  Value *getMemCmpOneBlock();
108  struct LoadPair {
109    Value *Lhs = nullptr;
110    Value *Rhs = nullptr;
111  };
112  LoadPair getLoadPair(Type *LoadSizeType, bool NeedsBSwap, Type *CmpSizeType,
113                       unsigned OffsetBytes);
114
115  static LoadEntryVector
116  computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
117                            unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
118  static LoadEntryVector
119  computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
120                                 unsigned MaxNumLoads,
121                                 unsigned &NumLoadsNonOneByte);
122
123public:
124  MemCmpExpansion(CallInst *CI, uint64_t Size,
125                  const TargetTransformInfo::MemCmpExpansionOptions &Options,
126                  const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout);
127
128  unsigned getNumBlocks();
129  uint64_t getNumLoads() const { return LoadSequence.size(); }
130
131  Value *getMemCmpExpansion();
132};
133
134MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
135    uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
136    const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
137  NumLoadsNonOneByte = 0;
138  LoadEntryVector LoadSequence;
139  uint64_t Offset = 0;
140  while (Size && !LoadSizes.empty()) {
141    const unsigned LoadSize = LoadSizes.front();
142    const uint64_t NumLoadsForThisSize = Size / LoadSize;
143    if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
144      // Do not expand if the total number of loads is larger than what the
145      // target allows. Note that it's important that we exit before completing
146      // the expansion to avoid using a ton of memory to store the expansion for
147      // large sizes.
148      return {};
149    }
150    if (NumLoadsForThisSize > 0) {
151      for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
152        LoadSequence.push_back({LoadSize, Offset});
153        Offset += LoadSize;
154      }
155      if (LoadSize > 1)
156        ++NumLoadsNonOneByte;
157      Size = Size % LoadSize;
158    }
159    LoadSizes = LoadSizes.drop_front();
160  }
161  return LoadSequence;
162}
163
164MemCmpExpansion::LoadEntryVector
165MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
166                                                const unsigned MaxLoadSize,
167                                                const unsigned MaxNumLoads,
168                                                unsigned &NumLoadsNonOneByte) {
169  // These are already handled by the greedy approach.
170  if (Size < 2 || MaxLoadSize < 2)
171    return {};
172
173  // We try to do as many non-overlapping loads as possible starting from the
174  // beginning.
175  const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
176  assert(NumNonOverlappingLoads && "there must be at least one load");
177  // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
178  // an overlapping load.
179  Size = Size - NumNonOverlappingLoads * MaxLoadSize;
180  // Bail if we do not need an overloapping store, this is already handled by
181  // the greedy approach.
182  if (Size == 0)
183    return {};
184  // Bail if the number of loads (non-overlapping + potential overlapping one)
185  // is larger than the max allowed.
186  if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
187    return {};
188
189  // Add non-overlapping loads.
190  LoadEntryVector LoadSequence;
191  uint64_t Offset = 0;
192  for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
193    LoadSequence.push_back({MaxLoadSize, Offset});
194    Offset += MaxLoadSize;
195  }
196
197  // Add the last overlapping load.
198  assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
199  LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
200  NumLoadsNonOneByte = 1;
201  return LoadSequence;
202}
203
204// Initialize the basic block structure required for expansion of memcmp call
205// with given maximum load size and memcmp size parameter.
206// This structure includes:
207// 1. A list of load compare blocks - LoadCmpBlocks.
208// 2. An EndBlock, split from original instruction point, which is the block to
209// return from.
210// 3. ResultBlock, block to branch to for early exit when a
211// LoadCmpBlock finds a difference.
212MemCmpExpansion::MemCmpExpansion(
213    CallInst *const CI, uint64_t Size,
214    const TargetTransformInfo::MemCmpExpansionOptions &Options,
215    const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout)
216    : CI(CI), Size(Size), MaxLoadSize(0), NumLoadsNonOneByte(0),
217      NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
218      IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI) {
219  assert(Size > 0 && "zero blocks");
220  // Scale the max size down if the target can load more bytes than we need.
221  llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
222  while (!LoadSizes.empty() && LoadSizes.front() > Size) {
223    LoadSizes = LoadSizes.drop_front();
224  }
225  assert(!LoadSizes.empty() && "cannot load Size bytes");
226  MaxLoadSize = LoadSizes.front();
227  // Compute the decomposition.
228  unsigned GreedyNumLoadsNonOneByte = 0;
229  LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, Options.MaxNumLoads,
230                                           GreedyNumLoadsNonOneByte);
231  NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
232  assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
233  // If we allow overlapping loads and the load sequence is not already optimal,
234  // use overlapping loads.
235  if (Options.AllowOverlappingLoads &&
236      (LoadSequence.empty() || LoadSequence.size() > 2)) {
237    unsigned OverlappingNumLoadsNonOneByte = 0;
238    auto OverlappingLoads = computeOverlappingLoadSequence(
239        Size, MaxLoadSize, Options.MaxNumLoads, OverlappingNumLoadsNonOneByte);
240    if (!OverlappingLoads.empty() &&
241        (LoadSequence.empty() ||
242         OverlappingLoads.size() < LoadSequence.size())) {
243      LoadSequence = OverlappingLoads;
244      NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
245    }
246  }
247  assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
248}
249
250unsigned MemCmpExpansion::getNumBlocks() {
251  if (IsUsedForZeroCmp)
252    return getNumLoads() / NumLoadsPerBlockForZeroCmp +
253           (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
254  return getNumLoads();
255}
256
257void MemCmpExpansion::createLoadCmpBlocks() {
258  for (unsigned i = 0; i < getNumBlocks(); i++) {
259    BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
260                                        EndBlock->getParent(), EndBlock);
261    LoadCmpBlocks.push_back(BB);
262  }
263}
264
265void MemCmpExpansion::createResultBlock() {
266  ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
267                                   EndBlock->getParent(), EndBlock);
268}
269
270MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(Type *LoadSizeType,
271                                                       bool NeedsBSwap,
272                                                       Type *CmpSizeType,
273                                                       unsigned OffsetBytes) {
274  // Get the memory source at offset `OffsetBytes`.
275  Value *LhsSource = CI->getArgOperand(0);
276  Value *RhsSource = CI->getArgOperand(1);
277  Align LhsAlign = LhsSource->getPointerAlignment(DL);
278  Align RhsAlign = RhsSource->getPointerAlignment(DL);
279  if (OffsetBytes > 0) {
280    auto *ByteType = Type::getInt8Ty(CI->getContext());
281    LhsSource = Builder.CreateConstGEP1_64(
282        ByteType, Builder.CreateBitCast(LhsSource, ByteType->getPointerTo()),
283        OffsetBytes);
284    RhsSource = Builder.CreateConstGEP1_64(
285        ByteType, Builder.CreateBitCast(RhsSource, ByteType->getPointerTo()),
286        OffsetBytes);
287    LhsAlign = commonAlignment(LhsAlign, OffsetBytes);
288    RhsAlign = commonAlignment(RhsAlign, OffsetBytes);
289  }
290  LhsSource = Builder.CreateBitCast(LhsSource, LoadSizeType->getPointerTo());
291  RhsSource = Builder.CreateBitCast(RhsSource, LoadSizeType->getPointerTo());
292
293  // Create a constant or a load from the source.
294  Value *Lhs = nullptr;
295  if (auto *C = dyn_cast<Constant>(LhsSource))
296    Lhs = ConstantFoldLoadFromConstPtr(C, LoadSizeType, DL);
297  if (!Lhs)
298    Lhs = Builder.CreateAlignedLoad(LoadSizeType, LhsSource, LhsAlign);
299
300  Value *Rhs = nullptr;
301  if (auto *C = dyn_cast<Constant>(RhsSource))
302    Rhs = ConstantFoldLoadFromConstPtr(C, LoadSizeType, DL);
303  if (!Rhs)
304    Rhs = Builder.CreateAlignedLoad(LoadSizeType, RhsSource, RhsAlign);
305
306  // Swap bytes if required.
307  if (NeedsBSwap) {
308    Function *Bswap = Intrinsic::getDeclaration(CI->getModule(),
309                                                Intrinsic::bswap, LoadSizeType);
310    Lhs = Builder.CreateCall(Bswap, Lhs);
311    Rhs = Builder.CreateCall(Bswap, Rhs);
312  }
313
314  // Zero extend if required.
315  if (CmpSizeType != nullptr && CmpSizeType != LoadSizeType) {
316    Lhs = Builder.CreateZExt(Lhs, CmpSizeType);
317    Rhs = Builder.CreateZExt(Rhs, CmpSizeType);
318  }
319  return {Lhs, Rhs};
320}
321
322// This function creates the IR instructions for loading and comparing 1 byte.
323// It loads 1 byte from each source of the memcmp parameters with the given
324// GEPIndex. It then subtracts the two loaded values and adds this result to the
325// final phi node for selecting the memcmp result.
326void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
327                                               unsigned OffsetBytes) {
328  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
329  const LoadPair Loads =
330      getLoadPair(Type::getInt8Ty(CI->getContext()), /*NeedsBSwap=*/false,
331                  Type::getInt32Ty(CI->getContext()), OffsetBytes);
332  Value *Diff = Builder.CreateSub(Loads.Lhs, Loads.Rhs);
333
334  PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]);
335
336  if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
337    // Early exit branch if difference found to EndBlock. Otherwise, continue to
338    // next LoadCmpBlock,
339    Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
340                                    ConstantInt::get(Diff->getType(), 0));
341    BranchInst *CmpBr =
342        BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp);
343    Builder.Insert(CmpBr);
344  } else {
345    // The last block has an unconditional branch to EndBlock.
346    BranchInst *CmpBr = BranchInst::Create(EndBlock);
347    Builder.Insert(CmpBr);
348  }
349}
350
351/// Generate an equality comparison for one or more pairs of loaded values.
352/// This is used in the case where the memcmp() call is compared equal or not
353/// equal to zero.
354Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
355                                            unsigned &LoadIndex) {
356  assert(LoadIndex < getNumLoads() &&
357         "getCompareLoadPairs() called with no remaining loads");
358  std::vector<Value *> XorList, OrList;
359  Value *Diff = nullptr;
360
361  const unsigned NumLoads =
362      std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
363
364  // For a single-block expansion, start inserting before the memcmp call.
365  if (LoadCmpBlocks.empty())
366    Builder.SetInsertPoint(CI);
367  else
368    Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
369
370  Value *Cmp = nullptr;
371  // If we have multiple loads per block, we need to generate a composite
372  // comparison using xor+or. The type for the combinations is the largest load
373  // type.
374  IntegerType *const MaxLoadType =
375      NumLoads == 1 ? nullptr
376                    : IntegerType::get(CI->getContext(), MaxLoadSize * 8);
377  for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
378    const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
379    const LoadPair Loads = getLoadPair(
380        IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8),
381        /*NeedsBSwap=*/false, MaxLoadType, CurLoadEntry.Offset);
382
383    if (NumLoads != 1) {
384      // If we have multiple loads per block, we need to generate a composite
385      // comparison using xor+or.
386      Diff = Builder.CreateXor(Loads.Lhs, Loads.Rhs);
387      Diff = Builder.CreateZExt(Diff, MaxLoadType);
388      XorList.push_back(Diff);
389    } else {
390      // If there's only one load per block, we just compare the loaded values.
391      Cmp = Builder.CreateICmpNE(Loads.Lhs, Loads.Rhs);
392    }
393  }
394
395  auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
396    std::vector<Value *> OutList;
397    for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
398      Value *Or = Builder.CreateOr(InList[i], InList[i + 1]);
399      OutList.push_back(Or);
400    }
401    if (InList.size() % 2 != 0)
402      OutList.push_back(InList.back());
403    return OutList;
404  };
405
406  if (!Cmp) {
407    // Pairwise OR the XOR results.
408    OrList = pairWiseOr(XorList);
409
410    // Pairwise OR the OR results until one result left.
411    while (OrList.size() != 1) {
412      OrList = pairWiseOr(OrList);
413    }
414
415    assert(Diff && "Failed to find comparison diff");
416    Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0));
417  }
418
419  return Cmp;
420}
421
422void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
423                                                        unsigned &LoadIndex) {
424  Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
425
426  BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
427                           ? EndBlock
428                           : LoadCmpBlocks[BlockIndex + 1];
429  // Early exit branch if difference found to ResultBlock. Otherwise,
430  // continue to next LoadCmpBlock or EndBlock.
431  BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
432  Builder.Insert(CmpBr);
433
434  // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
435  // since early exit to ResultBlock was not taken (no difference was found in
436  // any of the bytes).
437  if (BlockIndex == LoadCmpBlocks.size() - 1) {
438    Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
439    PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
440  }
441}
442
443// This function creates the IR intructions for loading and comparing using the
444// given LoadSize. It loads the number of bytes specified by LoadSize from each
445// source of the memcmp parameters. It then does a subtract to see if there was
446// a difference in the loaded values. If a difference is found, it branches
447// with an early exit to the ResultBlock for calculating which source was
448// larger. Otherwise, it falls through to the either the next LoadCmpBlock or
449// the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
450// a special case through emitLoadCompareByteBlock. The special handling can
451// simply subtract the loaded values and add it to the result phi node.
452void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
453  // There is one load per block in this case, BlockIndex == LoadIndex.
454  const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
455
456  if (CurLoadEntry.LoadSize == 1) {
457    MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
458    return;
459  }
460
461  Type *LoadSizeType =
462      IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
463  Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
464  assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
465
466  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
467
468  const LoadPair Loads =
469      getLoadPair(LoadSizeType, /*NeedsBSwap=*/DL.isLittleEndian(), MaxLoadType,
470                  CurLoadEntry.Offset);
471
472  // Add the loaded values to the phi nodes for calculating memcmp result only
473  // if result is not used in a zero equality.
474  if (!IsUsedForZeroCmp) {
475    ResBlock.PhiSrc1->addIncoming(Loads.Lhs, LoadCmpBlocks[BlockIndex]);
476    ResBlock.PhiSrc2->addIncoming(Loads.Rhs, LoadCmpBlocks[BlockIndex]);
477  }
478
479  Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Loads.Lhs, Loads.Rhs);
480  BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
481                           ? EndBlock
482                           : LoadCmpBlocks[BlockIndex + 1];
483  // Early exit branch if difference found to ResultBlock. Otherwise, continue
484  // to next LoadCmpBlock or EndBlock.
485  BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp);
486  Builder.Insert(CmpBr);
487
488  // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
489  // since early exit to ResultBlock was not taken (no difference was found in
490  // any of the bytes).
491  if (BlockIndex == LoadCmpBlocks.size() - 1) {
492    Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
493    PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
494  }
495}
496
497// This function populates the ResultBlock with a sequence to calculate the
498// memcmp result. It compares the two loaded source values and returns -1 if
499// src1 < src2 and 1 if src1 > src2.
500void MemCmpExpansion::emitMemCmpResultBlock() {
501  // Special case: if memcmp result is used in a zero equality, result does not
502  // need to be calculated and can simply return 1.
503  if (IsUsedForZeroCmp) {
504    BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
505    Builder.SetInsertPoint(ResBlock.BB, InsertPt);
506    Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
507    PhiRes->addIncoming(Res, ResBlock.BB);
508    BranchInst *NewBr = BranchInst::Create(EndBlock);
509    Builder.Insert(NewBr);
510    return;
511  }
512  BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
513  Builder.SetInsertPoint(ResBlock.BB, InsertPt);
514
515  Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
516                                  ResBlock.PhiSrc2);
517
518  Value *Res =
519      Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1),
520                           ConstantInt::get(Builder.getInt32Ty(), 1));
521
522  BranchInst *NewBr = BranchInst::Create(EndBlock);
523  Builder.Insert(NewBr);
524  PhiRes->addIncoming(Res, ResBlock.BB);
525}
526
527void MemCmpExpansion::setupResultBlockPHINodes() {
528  Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
529  Builder.SetInsertPoint(ResBlock.BB);
530  // Note: this assumes one load per block.
531  ResBlock.PhiSrc1 =
532      Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1");
533  ResBlock.PhiSrc2 =
534      Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2");
535}
536
537void MemCmpExpansion::setupEndBlockPHINodes() {
538  Builder.SetInsertPoint(&EndBlock->front());
539  PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
540}
541
542Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
543  unsigned LoadIndex = 0;
544  // This loop populates each of the LoadCmpBlocks with the IR sequence to
545  // handle multiple loads per block.
546  for (unsigned I = 0; I < getNumBlocks(); ++I) {
547    emitLoadCompareBlockMultipleLoads(I, LoadIndex);
548  }
549
550  emitMemCmpResultBlock();
551  return PhiRes;
552}
553
554/// A memcmp expansion that compares equality with 0 and only has one block of
555/// load and compare can bypass the compare, branch, and phi IR that is required
556/// in the general case.
557Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
558  unsigned LoadIndex = 0;
559  Value *Cmp = getCompareLoadPairs(0, LoadIndex);
560  assert(LoadIndex == getNumLoads() && "some entries were not consumed");
561  return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext()));
562}
563
564/// A memcmp expansion that only has one block of load and compare can bypass
565/// the compare, branch, and phi IR that is required in the general case.
566Value *MemCmpExpansion::getMemCmpOneBlock() {
567  Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8);
568  bool NeedsBSwap = DL.isLittleEndian() && Size != 1;
569
570  // The i8 and i16 cases don't need compares. We zext the loaded values and
571  // subtract them to get the suitable negative, zero, or positive i32 result.
572  if (Size < 4) {
573    const LoadPair Loads =
574        getLoadPair(LoadSizeType, NeedsBSwap, Builder.getInt32Ty(),
575                    /*Offset*/ 0);
576    return Builder.CreateSub(Loads.Lhs, Loads.Rhs);
577  }
578
579  const LoadPair Loads = getLoadPair(LoadSizeType, NeedsBSwap, LoadSizeType,
580                                     /*Offset*/ 0);
581  // The result of memcmp is negative, zero, or positive, so produce that by
582  // subtracting 2 extended compare bits: sub (ugt, ult).
583  // If a target prefers to use selects to get -1/0/1, they should be able
584  // to transform this later. The inverse transform (going from selects to math)
585  // may not be possible in the DAG because the selects got converted into
586  // branches before we got there.
587  Value *CmpUGT = Builder.CreateICmpUGT(Loads.Lhs, Loads.Rhs);
588  Value *CmpULT = Builder.CreateICmpULT(Loads.Lhs, Loads.Rhs);
589  Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty());
590  Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty());
591  return Builder.CreateSub(ZextUGT, ZextULT);
592}
593
594// This function expands the memcmp call into an inline expansion and returns
595// the memcmp result.
596Value *MemCmpExpansion::getMemCmpExpansion() {
597  // Create the basic block framework for a multi-block expansion.
598  if (getNumBlocks() != 1) {
599    BasicBlock *StartBlock = CI->getParent();
600    EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
601    setupEndBlockPHINodes();
602    createResultBlock();
603
604    // If return value of memcmp is not used in a zero equality, we need to
605    // calculate which source was larger. The calculation requires the
606    // two loaded source values of each load compare block.
607    // These will be saved in the phi nodes created by setupResultBlockPHINodes.
608    if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
609
610    // Create the number of required load compare basic blocks.
611    createLoadCmpBlocks();
612
613    // Update the terminator added by splitBasicBlock to branch to the first
614    // LoadCmpBlock.
615    StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
616  }
617
618  Builder.SetCurrentDebugLocation(CI->getDebugLoc());
619
620  if (IsUsedForZeroCmp)
621    return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
622                               : getMemCmpExpansionZeroCase();
623
624  if (getNumBlocks() == 1)
625    return getMemCmpOneBlock();
626
627  for (unsigned I = 0; I < getNumBlocks(); ++I) {
628    emitLoadCompareBlock(I);
629  }
630
631  emitMemCmpResultBlock();
632  return PhiRes;
633}
634
635// This function checks to see if an expansion of memcmp can be generated.
636// It checks for constant compare size that is less than the max inline size.
637// If an expansion cannot occur, returns false to leave as a library call.
638// Otherwise, the library call is replaced with a new IR instruction sequence.
639/// We want to transform:
640/// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
641/// To:
642/// loadbb:
643///  %0 = bitcast i32* %buffer2 to i8*
644///  %1 = bitcast i32* %buffer1 to i8*
645///  %2 = bitcast i8* %1 to i64*
646///  %3 = bitcast i8* %0 to i64*
647///  %4 = load i64, i64* %2
648///  %5 = load i64, i64* %3
649///  %6 = call i64 @llvm.bswap.i64(i64 %4)
650///  %7 = call i64 @llvm.bswap.i64(i64 %5)
651///  %8 = sub i64 %6, %7
652///  %9 = icmp ne i64 %8, 0
653///  br i1 %9, label %res_block, label %loadbb1
654/// res_block:                                        ; preds = %loadbb2,
655/// %loadbb1, %loadbb
656///  %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
657///  %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
658///  %10 = icmp ult i64 %phi.src1, %phi.src2
659///  %11 = select i1 %10, i32 -1, i32 1
660///  br label %endblock
661/// loadbb1:                                          ; preds = %loadbb
662///  %12 = bitcast i32* %buffer2 to i8*
663///  %13 = bitcast i32* %buffer1 to i8*
664///  %14 = bitcast i8* %13 to i32*
665///  %15 = bitcast i8* %12 to i32*
666///  %16 = getelementptr i32, i32* %14, i32 2
667///  %17 = getelementptr i32, i32* %15, i32 2
668///  %18 = load i32, i32* %16
669///  %19 = load i32, i32* %17
670///  %20 = call i32 @llvm.bswap.i32(i32 %18)
671///  %21 = call i32 @llvm.bswap.i32(i32 %19)
672///  %22 = zext i32 %20 to i64
673///  %23 = zext i32 %21 to i64
674///  %24 = sub i64 %22, %23
675///  %25 = icmp ne i64 %24, 0
676///  br i1 %25, label %res_block, label %loadbb2
677/// loadbb2:                                          ; preds = %loadbb1
678///  %26 = bitcast i32* %buffer2 to i8*
679///  %27 = bitcast i32* %buffer1 to i8*
680///  %28 = bitcast i8* %27 to i16*
681///  %29 = bitcast i8* %26 to i16*
682///  %30 = getelementptr i16, i16* %28, i16 6
683///  %31 = getelementptr i16, i16* %29, i16 6
684///  %32 = load i16, i16* %30
685///  %33 = load i16, i16* %31
686///  %34 = call i16 @llvm.bswap.i16(i16 %32)
687///  %35 = call i16 @llvm.bswap.i16(i16 %33)
688///  %36 = zext i16 %34 to i64
689///  %37 = zext i16 %35 to i64
690///  %38 = sub i64 %36, %37
691///  %39 = icmp ne i64 %38, 0
692///  br i1 %39, label %res_block, label %loadbb3
693/// loadbb3:                                          ; preds = %loadbb2
694///  %40 = bitcast i32* %buffer2 to i8*
695///  %41 = bitcast i32* %buffer1 to i8*
696///  %42 = getelementptr i8, i8* %41, i8 14
697///  %43 = getelementptr i8, i8* %40, i8 14
698///  %44 = load i8, i8* %42
699///  %45 = load i8, i8* %43
700///  %46 = zext i8 %44 to i32
701///  %47 = zext i8 %45 to i32
702///  %48 = sub i32 %46, %47
703///  br label %endblock
704/// endblock:                                         ; preds = %res_block,
705/// %loadbb3
706///  %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
707///  ret i32 %phi.res
708static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
709                         const TargetLowering *TLI, const DataLayout *DL,
710                         ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
711  NumMemCmpCalls++;
712
713  // Early exit from expansion if -Oz.
714  if (CI->getFunction()->hasMinSize())
715    return false;
716
717  // Early exit from expansion if size is not a constant.
718  ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2));
719  if (!SizeCast) {
720    NumMemCmpNotConstant++;
721    return false;
722  }
723  const uint64_t SizeVal = SizeCast->getZExtValue();
724
725  if (SizeVal == 0) {
726    return false;
727  }
728  // TTI call to check if target would like to expand memcmp. Also, get the
729  // available load sizes.
730  const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
731  bool OptForSize = CI->getFunction()->hasOptSize() ||
732                    llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
733  auto Options = TTI->enableMemCmpExpansion(OptForSize,
734                                            IsUsedForZeroCmp);
735  if (!Options) return false;
736
737  if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
738    Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
739
740  if (OptForSize &&
741      MaxLoadsPerMemcmpOptSize.getNumOccurrences())
742    Options.MaxNumLoads = MaxLoadsPerMemcmpOptSize;
743
744  if (!OptForSize && MaxLoadsPerMemcmp.getNumOccurrences())
745    Options.MaxNumLoads = MaxLoadsPerMemcmp;
746
747  MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL);
748
749  // Don't expand if this will require more loads than desired by the target.
750  if (Expansion.getNumLoads() == 0) {
751    NumMemCmpGreaterThanMax++;
752    return false;
753  }
754
755  NumMemCmpInlined++;
756
757  Value *Res = Expansion.getMemCmpExpansion();
758
759  // Replace call with result of expansion and erase call.
760  CI->replaceAllUsesWith(Res);
761  CI->eraseFromParent();
762
763  return true;
764}
765
766
767
768class ExpandMemCmpPass : public FunctionPass {
769public:
770  static char ID;
771
772  ExpandMemCmpPass() : FunctionPass(ID) {
773    initializeExpandMemCmpPassPass(*PassRegistry::getPassRegistry());
774  }
775
776  bool runOnFunction(Function &F) override {
777    if (skipFunction(F)) return false;
778
779    auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
780    if (!TPC) {
781      return false;
782    }
783    const TargetLowering* TL =
784        TPC->getTM<TargetMachine>().getSubtargetImpl(F)->getTargetLowering();
785
786    const TargetLibraryInfo *TLI =
787        &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
788    const TargetTransformInfo *TTI =
789        &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
790    auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
791    auto *BFI = (PSI && PSI->hasProfileSummary()) ?
792           &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
793           nullptr;
794    auto PA = runImpl(F, TLI, TTI, TL, PSI, BFI);
795    return !PA.areAllPreserved();
796  }
797
798private:
799  void getAnalysisUsage(AnalysisUsage &AU) const override {
800    AU.addRequired<TargetLibraryInfoWrapperPass>();
801    AU.addRequired<TargetTransformInfoWrapperPass>();
802    AU.addRequired<ProfileSummaryInfoWrapperPass>();
803    LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
804    FunctionPass::getAnalysisUsage(AU);
805  }
806
807  PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
808                            const TargetTransformInfo *TTI,
809                            const TargetLowering* TL,
810                            ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI);
811  // Returns true if a change was made.
812  bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
813                  const TargetTransformInfo *TTI, const TargetLowering* TL,
814                  const DataLayout& DL, ProfileSummaryInfo *PSI,
815                  BlockFrequencyInfo *BFI);
816};
817
818bool ExpandMemCmpPass::runOnBlock(
819    BasicBlock &BB, const TargetLibraryInfo *TLI,
820    const TargetTransformInfo *TTI, const TargetLowering* TL,
821    const DataLayout& DL, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
822  for (Instruction& I : BB) {
823    CallInst *CI = dyn_cast<CallInst>(&I);
824    if (!CI) {
825      continue;
826    }
827    LibFunc Func;
828    if (TLI->getLibFunc(*CI, Func) &&
829        (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
830        expandMemCmp(CI, TTI, TL, &DL, PSI, BFI)) {
831      return true;
832    }
833  }
834  return false;
835}
836
837
838PreservedAnalyses ExpandMemCmpPass::runImpl(
839    Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI,
840    const TargetLowering* TL, ProfileSummaryInfo *PSI,
841    BlockFrequencyInfo *BFI) {
842  const DataLayout& DL = F.getParent()->getDataLayout();
843  bool MadeChanges = false;
844  for (auto BBIt = F.begin(); BBIt != F.end();) {
845    if (runOnBlock(*BBIt, TLI, TTI, TL, DL, PSI, BFI)) {
846      MadeChanges = true;
847      // If changes were made, restart the function from the beginning, since
848      // the structure of the function was changed.
849      BBIt = F.begin();
850    } else {
851      ++BBIt;
852    }
853  }
854  if (MadeChanges)
855    for (BasicBlock &BB : F)
856      SimplifyInstructionsInBlock(&BB);
857  return MadeChanges ? PreservedAnalyses::none() : PreservedAnalyses::all();
858}
859
860} // namespace
861
862char ExpandMemCmpPass::ID = 0;
863INITIALIZE_PASS_BEGIN(ExpandMemCmpPass, "expandmemcmp",
864                      "Expand memcmp() to load/stores", false, false)
865INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
866INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
867INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
868INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
869INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
870                    "Expand memcmp() to load/stores", false, false)
871
872FunctionPass *llvm::createExpandMemCmpPass() {
873  return new ExpandMemCmpPass();
874}
875