1//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a trivial dead store elimination that only considers
11// basic-block local redundant stores.
12//
13// FIXME: This should eventually be extended to be a post-dominator tree
14// traversal.  Doing so would be pretty trivial.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Transforms/Scalar.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/CaptureTracking.h"
24#include "llvm/Analysis/GlobalsModRef.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/MemoryDependenceAnalysis.h"
27#include "llvm/Analysis/TargetLibraryInfo.h"
28#include "llvm/Analysis/ValueTracking.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/Dominators.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/GlobalVariable.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/IntrinsicInst.h"
36#include "llvm/Pass.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/Transforms/Utils/Local.h"
40using namespace llvm;
41
42#define DEBUG_TYPE "dse"
43
44STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
45STATISTIC(NumFastStores, "Number of stores deleted");
46STATISTIC(NumFastOther , "Number of other instrs removed");
47
48namespace {
49  struct DSE : public FunctionPass {
50    AliasAnalysis *AA;
51    MemoryDependenceAnalysis *MD;
52    DominatorTree *DT;
53    const TargetLibraryInfo *TLI;
54
55    static char ID; // Pass identification, replacement for typeid
56    DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
57      initializeDSEPass(*PassRegistry::getPassRegistry());
58    }
59
60    bool runOnFunction(Function &F) override {
61      if (skipOptnoneFunction(F))
62        return false;
63
64      AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
65      MD = &getAnalysis<MemoryDependenceAnalysis>();
66      DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
67      TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
68
69      bool Changed = false;
70      for (BasicBlock &I : F)
71        // Only check non-dead blocks.  Dead blocks may have strange pointer
72        // cycles that will confuse alias analysis.
73        if (DT->isReachableFromEntry(&I))
74          Changed |= runOnBasicBlock(I);
75
76      AA = nullptr; MD = nullptr; DT = nullptr;
77      return Changed;
78    }
79
80    bool runOnBasicBlock(BasicBlock &BB);
81    bool MemoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI);
82    bool HandleFree(CallInst *F);
83    bool handleEndBlock(BasicBlock &BB);
84    void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
85                               SmallSetVector<Value *, 16> &DeadStackObjects,
86                               const DataLayout &DL);
87
88    void getAnalysisUsage(AnalysisUsage &AU) const override {
89      AU.setPreservesCFG();
90      AU.addRequired<DominatorTreeWrapperPass>();
91      AU.addRequired<AAResultsWrapperPass>();
92      AU.addRequired<MemoryDependenceAnalysis>();
93      AU.addRequired<TargetLibraryInfoWrapperPass>();
94      AU.addPreserved<DominatorTreeWrapperPass>();
95      AU.addPreserved<GlobalsAAWrapperPass>();
96      AU.addPreserved<MemoryDependenceAnalysis>();
97    }
98  };
99}
100
101char DSE::ID = 0;
102INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
103INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
104INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
105INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
106INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
107INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
108INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
109
110FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
111
112//===----------------------------------------------------------------------===//
113// Helper functions
114//===----------------------------------------------------------------------===//
115
116/// DeleteDeadInstruction - Delete this instruction.  Before we do, go through
117/// and zero out all the operands of this instruction.  If any of them become
118/// dead, delete them and the computation tree that feeds them.
119///
120/// If ValueSet is non-null, remove any deleted instructions from it as well.
121///
122static void DeleteDeadInstruction(Instruction *I,
123                               MemoryDependenceAnalysis &MD,
124                               const TargetLibraryInfo &TLI,
125                               SmallSetVector<Value*, 16> *ValueSet = nullptr) {
126  SmallVector<Instruction*, 32> NowDeadInsts;
127
128  NowDeadInsts.push_back(I);
129  --NumFastOther;
130
131  // Before we touch this instruction, remove it from memdep!
132  do {
133    Instruction *DeadInst = NowDeadInsts.pop_back_val();
134    ++NumFastOther;
135
136    // This instruction is dead, zap it, in stages.  Start by removing it from
137    // MemDep, which needs to know the operands and needs it to be in the
138    // function.
139    MD.removeInstruction(DeadInst);
140
141    for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
142      Value *Op = DeadInst->getOperand(op);
143      DeadInst->setOperand(op, nullptr);
144
145      // If this operand just became dead, add it to the NowDeadInsts list.
146      if (!Op->use_empty()) continue;
147
148      if (Instruction *OpI = dyn_cast<Instruction>(Op))
149        if (isInstructionTriviallyDead(OpI, &TLI))
150          NowDeadInsts.push_back(OpI);
151    }
152
153    DeadInst->eraseFromParent();
154
155    if (ValueSet) ValueSet->remove(DeadInst);
156  } while (!NowDeadInsts.empty());
157}
158
159
160/// hasMemoryWrite - Does this instruction write some memory?  This only returns
161/// true for things that we can analyze with other helpers below.
162static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
163  if (isa<StoreInst>(I))
164    return true;
165  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
166    switch (II->getIntrinsicID()) {
167    default:
168      return false;
169    case Intrinsic::memset:
170    case Intrinsic::memmove:
171    case Intrinsic::memcpy:
172    case Intrinsic::init_trampoline:
173    case Intrinsic::lifetime_end:
174      return true;
175    }
176  }
177  if (auto CS = CallSite(I)) {
178    if (Function *F = CS.getCalledFunction()) {
179      if (TLI.has(LibFunc::strcpy) &&
180          F->getName() == TLI.getName(LibFunc::strcpy)) {
181        return true;
182      }
183      if (TLI.has(LibFunc::strncpy) &&
184          F->getName() == TLI.getName(LibFunc::strncpy)) {
185        return true;
186      }
187      if (TLI.has(LibFunc::strcat) &&
188          F->getName() == TLI.getName(LibFunc::strcat)) {
189        return true;
190      }
191      if (TLI.has(LibFunc::strncat) &&
192          F->getName() == TLI.getName(LibFunc::strncat)) {
193        return true;
194      }
195    }
196  }
197  return false;
198}
199
200/// getLocForWrite - Return a Location stored to by the specified instruction.
201/// If isRemovable returns true, this function and getLocForRead completely
202/// describe the memory operations for this instruction.
203static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
204  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
205    return MemoryLocation::get(SI);
206
207  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
208    // memcpy/memmove/memset.
209    MemoryLocation Loc = MemoryLocation::getForDest(MI);
210    return Loc;
211  }
212
213  IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
214  if (!II)
215    return MemoryLocation();
216
217  switch (II->getIntrinsicID()) {
218  default:
219    return MemoryLocation(); // Unhandled intrinsic.
220  case Intrinsic::init_trampoline:
221    // FIXME: We don't know the size of the trampoline, so we can't really
222    // handle it here.
223    return MemoryLocation(II->getArgOperand(0));
224  case Intrinsic::lifetime_end: {
225    uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
226    return MemoryLocation(II->getArgOperand(1), Len);
227  }
228  }
229}
230
231/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
232/// instruction if any.
233static MemoryLocation getLocForRead(Instruction *Inst,
234                                    const TargetLibraryInfo &TLI) {
235  assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
236
237  // The only instructions that both read and write are the mem transfer
238  // instructions (memcpy/memmove).
239  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
240    return MemoryLocation::getForSource(MTI);
241  return MemoryLocation();
242}
243
244
245/// isRemovable - If the value of this instruction and the memory it writes to
246/// is unused, may we delete this instruction?
247static bool isRemovable(Instruction *I) {
248  // Don't remove volatile/atomic stores.
249  if (StoreInst *SI = dyn_cast<StoreInst>(I))
250    return SI->isUnordered();
251
252  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
253    switch (II->getIntrinsicID()) {
254    default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
255    case Intrinsic::lifetime_end:
256      // Never remove dead lifetime_end's, e.g. because it is followed by a
257      // free.
258      return false;
259    case Intrinsic::init_trampoline:
260      // Always safe to remove init_trampoline.
261      return true;
262
263    case Intrinsic::memset:
264    case Intrinsic::memmove:
265    case Intrinsic::memcpy:
266      // Don't remove volatile memory intrinsics.
267      return !cast<MemIntrinsic>(II)->isVolatile();
268    }
269  }
270
271  if (auto CS = CallSite(I))
272    return CS.getInstruction()->use_empty();
273
274  return false;
275}
276
277
278/// isShortenable - Returns true if this instruction can be safely shortened in
279/// length.
280static bool isShortenable(Instruction *I) {
281  // Don't shorten stores for now
282  if (isa<StoreInst>(I))
283    return false;
284
285  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
286    switch (II->getIntrinsicID()) {
287      default: return false;
288      case Intrinsic::memset:
289      case Intrinsic::memcpy:
290        // Do shorten memory intrinsics.
291        return true;
292    }
293  }
294
295  // Don't shorten libcalls calls for now.
296
297  return false;
298}
299
300/// getStoredPointerOperand - Return the pointer that is being written to.
301static Value *getStoredPointerOperand(Instruction *I) {
302  if (StoreInst *SI = dyn_cast<StoreInst>(I))
303    return SI->getPointerOperand();
304  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
305    return MI->getDest();
306
307  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
308    switch (II->getIntrinsicID()) {
309    default: llvm_unreachable("Unexpected intrinsic!");
310    case Intrinsic::init_trampoline:
311      return II->getArgOperand(0);
312    }
313  }
314
315  CallSite CS(I);
316  // All the supported functions so far happen to have dest as their first
317  // argument.
318  return CS.getArgument(0);
319}
320
321static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
322                               const TargetLibraryInfo &TLI) {
323  uint64_t Size;
324  if (getObjectSize(V, Size, DL, &TLI))
325    return Size;
326  return MemoryLocation::UnknownSize;
327}
328
329namespace {
330  enum OverwriteResult
331  {
332    OverwriteComplete,
333    OverwriteEnd,
334    OverwriteUnknown
335  };
336}
337
338/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
339/// completely overwrites a store to the 'Earlier' location.
340/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
341/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
342static OverwriteResult isOverwrite(const MemoryLocation &Later,
343                                   const MemoryLocation &Earlier,
344                                   const DataLayout &DL,
345                                   const TargetLibraryInfo &TLI,
346                                   int64_t &EarlierOff, int64_t &LaterOff) {
347  const Value *P1 = Earlier.Ptr->stripPointerCasts();
348  const Value *P2 = Later.Ptr->stripPointerCasts();
349
350  // If the start pointers are the same, we just have to compare sizes to see if
351  // the later store was larger than the earlier store.
352  if (P1 == P2) {
353    // If we don't know the sizes of either access, then we can't do a
354    // comparison.
355    if (Later.Size == MemoryLocation::UnknownSize ||
356        Earlier.Size == MemoryLocation::UnknownSize)
357      return OverwriteUnknown;
358
359    // Make sure that the Later size is >= the Earlier size.
360    if (Later.Size >= Earlier.Size)
361      return OverwriteComplete;
362  }
363
364  // Otherwise, we have to have size information, and the later store has to be
365  // larger than the earlier one.
366  if (Later.Size == MemoryLocation::UnknownSize ||
367      Earlier.Size == MemoryLocation::UnknownSize)
368    return OverwriteUnknown;
369
370  // Check to see if the later store is to the entire object (either a global,
371  // an alloca, or a byval/inalloca argument).  If so, then it clearly
372  // overwrites any other store to the same object.
373  const Value *UO1 = GetUnderlyingObject(P1, DL),
374              *UO2 = GetUnderlyingObject(P2, DL);
375
376  // If we can't resolve the same pointers to the same object, then we can't
377  // analyze them at all.
378  if (UO1 != UO2)
379    return OverwriteUnknown;
380
381  // If the "Later" store is to a recognizable object, get its size.
382  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
383  if (ObjectSize != MemoryLocation::UnknownSize)
384    if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
385      return OverwriteComplete;
386
387  // Okay, we have stores to two completely different pointers.  Try to
388  // decompose the pointer into a "base + constant_offset" form.  If the base
389  // pointers are equal, then we can reason about the two stores.
390  EarlierOff = 0;
391  LaterOff = 0;
392  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
393  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
394
395  // If the base pointers still differ, we have two completely different stores.
396  if (BP1 != BP2)
397    return OverwriteUnknown;
398
399  // The later store completely overlaps the earlier store if:
400  //
401  // 1. Both start at the same offset and the later one's size is greater than
402  //    or equal to the earlier one's, or
403  //
404  //      |--earlier--|
405  //      |--   later   --|
406  //
407  // 2. The earlier store has an offset greater than the later offset, but which
408  //    still lies completely within the later store.
409  //
410  //        |--earlier--|
411  //    |-----  later  ------|
412  //
413  // We have to be careful here as *Off is signed while *.Size is unsigned.
414  if (EarlierOff >= LaterOff &&
415      Later.Size >= Earlier.Size &&
416      uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
417    return OverwriteComplete;
418
419  // The other interesting case is if the later store overwrites the end of
420  // the earlier store
421  //
422  //      |--earlier--|
423  //                |--   later   --|
424  //
425  // In this case we may want to trim the size of earlier to avoid generating
426  // writes to addresses which will definitely be overwritten later
427  if (LaterOff > EarlierOff &&
428      LaterOff < int64_t(EarlierOff + Earlier.Size) &&
429      int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
430    return OverwriteEnd;
431
432  // Otherwise, they don't completely overlap.
433  return OverwriteUnknown;
434}
435
436/// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
437/// memory region into an identical pointer) then it doesn't actually make its
438/// input dead in the traditional sense.  Consider this case:
439///
440///   memcpy(A <- B)
441///   memcpy(A <- A)
442///
443/// In this case, the second store to A does not make the first store to A dead.
444/// The usual situation isn't an explicit A<-A store like this (which can be
445/// trivially removed) but a case where two pointers may alias.
446///
447/// This function detects when it is unsafe to remove a dependent instruction
448/// because the DSE inducing instruction may be a self-read.
449static bool isPossibleSelfRead(Instruction *Inst,
450                               const MemoryLocation &InstStoreLoc,
451                               Instruction *DepWrite,
452                               const TargetLibraryInfo &TLI,
453                               AliasAnalysis &AA) {
454  // Self reads can only happen for instructions that read memory.  Get the
455  // location read.
456  MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
457  if (!InstReadLoc.Ptr) return false;  // Not a reading instruction.
458
459  // If the read and written loc obviously don't alias, it isn't a read.
460  if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
461
462  // Okay, 'Inst' may copy over itself.  However, we can still remove a the
463  // DepWrite instruction if we can prove that it reads from the same location
464  // as Inst.  This handles useful cases like:
465  //   memcpy(A <- B)
466  //   memcpy(A <- B)
467  // Here we don't know if A/B may alias, but we do know that B/B are must
468  // aliases, so removing the first memcpy is safe (assuming it writes <= #
469  // bytes as the second one.
470  MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
471
472  if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
473    return false;
474
475  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
476  // then it can't be considered dead.
477  return true;
478}
479
480
481//===----------------------------------------------------------------------===//
482// DSE Pass
483//===----------------------------------------------------------------------===//
484
485bool DSE::runOnBasicBlock(BasicBlock &BB) {
486  const DataLayout &DL = BB.getModule()->getDataLayout();
487  bool MadeChange = false;
488
489  // Do a top-down walk on the BB.
490  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
491    Instruction *Inst = &*BBI++;
492
493    // Handle 'free' calls specially.
494    if (CallInst *F = isFreeCall(Inst, TLI)) {
495      MadeChange |= HandleFree(F);
496      continue;
497    }
498
499    // If we find something that writes memory, get its memory dependence.
500    if (!hasMemoryWrite(Inst, *TLI))
501      continue;
502
503    // If we're storing the same value back to a pointer that we just
504    // loaded from, then the store can be removed.
505    if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
506
507      auto RemoveDeadInstAndUpdateBBI = [&](Instruction *DeadInst) {
508        // DeleteDeadInstruction can delete the current instruction.  Save BBI
509        // in case we need it.
510        WeakVH NextInst(&*BBI);
511
512        DeleteDeadInstruction(DeadInst, *MD, *TLI);
513
514        if (!NextInst) // Next instruction deleted.
515          BBI = BB.begin();
516        else if (BBI != BB.begin()) // Revisit this instruction if possible.
517          --BBI;
518        ++NumRedundantStores;
519        MadeChange = true;
520      };
521
522      if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
523        if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
524            isRemovable(SI) &&
525            MemoryIsNotModifiedBetween(DepLoad, SI)) {
526
527          DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n  "
528                       << "LOAD: " << *DepLoad << "\n  STORE: " << *SI << '\n');
529
530          RemoveDeadInstAndUpdateBBI(SI);
531          continue;
532        }
533      }
534
535      // Remove null stores into the calloc'ed objects
536      Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
537
538      if (StoredConstant && StoredConstant->isNullValue() &&
539          isRemovable(SI)) {
540        Instruction *UnderlyingPointer = dyn_cast<Instruction>(
541            GetUnderlyingObject(SI->getPointerOperand(), DL));
542
543        if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
544            MemoryIsNotModifiedBetween(UnderlyingPointer, SI)) {
545          DEBUG(dbgs()
546                << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
547                << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
548
549          RemoveDeadInstAndUpdateBBI(SI);
550          continue;
551        }
552      }
553    }
554
555    MemDepResult InstDep = MD->getDependency(Inst);
556
557    // Ignore any store where we can't find a local dependence.
558    // FIXME: cross-block DSE would be fun. :)
559    if (!InstDep.isDef() && !InstDep.isClobber())
560      continue;
561
562    // Figure out what location is being stored to.
563    MemoryLocation Loc = getLocForWrite(Inst, *AA);
564
565    // If we didn't get a useful location, fail.
566    if (!Loc.Ptr)
567      continue;
568
569    while (InstDep.isDef() || InstDep.isClobber()) {
570      // Get the memory clobbered by the instruction we depend on.  MemDep will
571      // skip any instructions that 'Loc' clearly doesn't interact with.  If we
572      // end up depending on a may- or must-aliased load, then we can't optimize
573      // away the store and we bail out.  However, if we depend on on something
574      // that overwrites the memory location we *can* potentially optimize it.
575      //
576      // Find out what memory location the dependent instruction stores.
577      Instruction *DepWrite = InstDep.getInst();
578      MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
579      // If we didn't get a useful location, or if it isn't a size, bail out.
580      if (!DepLoc.Ptr)
581        break;
582
583      // If we find a write that is a) removable (i.e., non-volatile), b) is
584      // completely obliterated by the store to 'Loc', and c) which we know that
585      // 'Inst' doesn't load from, then we can remove it.
586      if (isRemovable(DepWrite) &&
587          !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
588        int64_t InstWriteOffset, DepWriteOffset;
589        OverwriteResult OR =
590            isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset);
591        if (OR == OverwriteComplete) {
592          DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: "
593                << *DepWrite << "\n  KILLER: " << *Inst << '\n');
594
595          // Delete the store and now-dead instructions that feed it.
596          DeleteDeadInstruction(DepWrite, *MD, *TLI);
597          ++NumFastStores;
598          MadeChange = true;
599
600          // DeleteDeadInstruction can delete the current instruction in loop
601          // cases, reset BBI.
602          BBI = Inst->getIterator();
603          if (BBI != BB.begin())
604            --BBI;
605          break;
606        } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
607          // TODO: base this on the target vector size so that if the earlier
608          // store was too small to get vector writes anyway then its likely
609          // a good idea to shorten it
610          // Power of 2 vector writes are probably always a bad idea to optimize
611          // as any store/memset/memcpy is likely using vector instructions so
612          // shortening it to not vector size is likely to be slower
613          MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
614          unsigned DepWriteAlign = DepIntrinsic->getAlignment();
615          if (llvm::isPowerOf2_64(InstWriteOffset) ||
616              ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
617
618            DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW END: "
619                  << *DepWrite << "\n  KILLER (offset "
620                  << InstWriteOffset << ", "
621                  << DepLoc.Size << ")"
622                  << *Inst << '\n');
623
624            Value* DepWriteLength = DepIntrinsic->getLength();
625            Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
626                                                    InstWriteOffset -
627                                                    DepWriteOffset);
628            DepIntrinsic->setLength(TrimmedLength);
629            MadeChange = true;
630          }
631        }
632      }
633
634      // If this is a may-aliased store that is clobbering the store value, we
635      // can keep searching past it for another must-aliased pointer that stores
636      // to the same location.  For example, in:
637      //   store -> P
638      //   store -> Q
639      //   store -> P
640      // we can remove the first store to P even though we don't know if P and Q
641      // alias.
642      if (DepWrite == &BB.front()) break;
643
644      // Can't look past this instruction if it might read 'Loc'.
645      if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
646        break;
647
648      InstDep = MD->getPointerDependencyFrom(Loc, false,
649                                             DepWrite->getIterator(), &BB);
650    }
651  }
652
653  // If this block ends in a return, unwind, or unreachable, all allocas are
654  // dead at its end, which means stores to them are also dead.
655  if (BB.getTerminator()->getNumSuccessors() == 0)
656    MadeChange |= handleEndBlock(BB);
657
658  return MadeChange;
659}
660
661/// Returns true if the memory which is accessed by the second instruction is not
662/// modified between the first and the second instruction.
663/// Precondition: Second instruction must be dominated by the first
664/// instruction.
665bool DSE::MemoryIsNotModifiedBetween(Instruction *FirstI,
666                                     Instruction *SecondI) {
667  SmallVector<BasicBlock *, 16> WorkList;
668  SmallPtrSet<BasicBlock *, 8> Visited;
669  BasicBlock::iterator FirstBBI(FirstI);
670  ++FirstBBI;
671  BasicBlock::iterator SecondBBI(SecondI);
672  BasicBlock *FirstBB = FirstI->getParent();
673  BasicBlock *SecondBB = SecondI->getParent();
674  MemoryLocation MemLoc = MemoryLocation::get(SecondI);
675
676  // Start checking the store-block.
677  WorkList.push_back(SecondBB);
678  bool isFirstBlock = true;
679
680  // Check all blocks going backward until we reach the load-block.
681  while (!WorkList.empty()) {
682    BasicBlock *B = WorkList.pop_back_val();
683
684    // Ignore instructions before LI if this is the FirstBB.
685    BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
686
687    BasicBlock::iterator EI;
688    if (isFirstBlock) {
689      // Ignore instructions after SI if this is the first visit of SecondBB.
690      assert(B == SecondBB && "first block is not the store block");
691      EI = SecondBBI;
692      isFirstBlock = false;
693    } else {
694      // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
695      // In this case we also have to look at instructions after SI.
696      EI = B->end();
697    }
698    for (; BI != EI; ++BI) {
699      Instruction *I = &*BI;
700      if (I->mayWriteToMemory() && I != SecondI) {
701        auto Res = AA->getModRefInfo(I, MemLoc);
702        if (Res != MRI_NoModRef)
703          return false;
704      }
705    }
706    if (B != FirstBB) {
707      assert(B != &FirstBB->getParent()->getEntryBlock() &&
708          "Should not hit the entry block because SI must be dominated by LI");
709      for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
710        if (!Visited.insert(*PredI).second)
711          continue;
712        WorkList.push_back(*PredI);
713      }
714    }
715  }
716  return true;
717}
718
719/// Find all blocks that will unconditionally lead to the block BB and append
720/// them to F.
721static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
722                                   BasicBlock *BB, DominatorTree *DT) {
723  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
724    BasicBlock *Pred = *I;
725    if (Pred == BB) continue;
726    TerminatorInst *PredTI = Pred->getTerminator();
727    if (PredTI->getNumSuccessors() != 1)
728      continue;
729
730    if (DT->isReachableFromEntry(Pred))
731      Blocks.push_back(Pred);
732  }
733}
734
735/// HandleFree - Handle frees of entire structures whose dependency is a store
736/// to a field of that structure.
737bool DSE::HandleFree(CallInst *F) {
738  bool MadeChange = false;
739
740  MemoryLocation Loc = MemoryLocation(F->getOperand(0));
741  SmallVector<BasicBlock *, 16> Blocks;
742  Blocks.push_back(F->getParent());
743  const DataLayout &DL = F->getModule()->getDataLayout();
744
745  while (!Blocks.empty()) {
746    BasicBlock *BB = Blocks.pop_back_val();
747    Instruction *InstPt = BB->getTerminator();
748    if (BB == F->getParent()) InstPt = F;
749
750    MemDepResult Dep =
751        MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
752    while (Dep.isDef() || Dep.isClobber()) {
753      Instruction *Dependency = Dep.getInst();
754      if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
755        break;
756
757      Value *DepPointer =
758          GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
759
760      // Check for aliasing.
761      if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
762        break;
763
764      auto Next = ++Dependency->getIterator();
765
766      // DCE instructions only used to calculate that store
767      DeleteDeadInstruction(Dependency, *MD, *TLI);
768      ++NumFastStores;
769      MadeChange = true;
770
771      // Inst's old Dependency is now deleted. Compute the next dependency,
772      // which may also be dead, as in
773      //    s[0] = 0;
774      //    s[1] = 0; // This has just been deleted.
775      //    free(s);
776      Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
777    }
778
779    if (Dep.isNonLocal())
780      FindUnconditionalPreds(Blocks, BB, DT);
781  }
782
783  return MadeChange;
784}
785
786/// handleEndBlock - Remove dead stores to stack-allocated locations in the
787/// function end block.  Ex:
788/// %A = alloca i32
789/// ...
790/// store i32 1, i32* %A
791/// ret void
792bool DSE::handleEndBlock(BasicBlock &BB) {
793  bool MadeChange = false;
794
795  // Keep track of all of the stack objects that are dead at the end of the
796  // function.
797  SmallSetVector<Value*, 16> DeadStackObjects;
798
799  // Find all of the alloca'd pointers in the entry block.
800  BasicBlock &Entry = BB.getParent()->front();
801  for (Instruction &I : Entry) {
802    if (isa<AllocaInst>(&I))
803      DeadStackObjects.insert(&I);
804
805    // Okay, so these are dead heap objects, but if the pointer never escapes
806    // then it's leaked by this function anyways.
807    else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
808      DeadStackObjects.insert(&I);
809  }
810
811  // Treat byval or inalloca arguments the same, stores to them are dead at the
812  // end of the function.
813  for (Argument &AI : BB.getParent()->args())
814    if (AI.hasByValOrInAllocaAttr())
815      DeadStackObjects.insert(&AI);
816
817  const DataLayout &DL = BB.getModule()->getDataLayout();
818
819  // Scan the basic block backwards
820  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
821    --BBI;
822
823    // If we find a store, check to see if it points into a dead stack value.
824    if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
825      // See through pointer-to-pointer bitcasts
826      SmallVector<Value *, 4> Pointers;
827      GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
828
829      // Stores to stack values are valid candidates for removal.
830      bool AllDead = true;
831      for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
832           E = Pointers.end(); I != E; ++I)
833        if (!DeadStackObjects.count(*I)) {
834          AllDead = false;
835          break;
836        }
837
838      if (AllDead) {
839        Instruction *Dead = &*BBI++;
840
841        DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
842                     << *Dead << "\n  Objects: ";
843              for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
844                   E = Pointers.end(); I != E; ++I) {
845                dbgs() << **I;
846                if (std::next(I) != E)
847                  dbgs() << ", ";
848              }
849              dbgs() << '\n');
850
851        // DCE instructions only used to calculate that store.
852        DeleteDeadInstruction(Dead, *MD, *TLI, &DeadStackObjects);
853        ++NumFastStores;
854        MadeChange = true;
855        continue;
856      }
857    }
858
859    // Remove any dead non-memory-mutating instructions.
860    if (isInstructionTriviallyDead(&*BBI, TLI)) {
861      Instruction *Inst = &*BBI++;
862      DeleteDeadInstruction(Inst, *MD, *TLI, &DeadStackObjects);
863      ++NumFastOther;
864      MadeChange = true;
865      continue;
866    }
867
868    if (isa<AllocaInst>(BBI)) {
869      // Remove allocas from the list of dead stack objects; there can't be
870      // any references before the definition.
871      DeadStackObjects.remove(&*BBI);
872      continue;
873    }
874
875    if (auto CS = CallSite(&*BBI)) {
876      // Remove allocation function calls from the list of dead stack objects;
877      // there can't be any references before the definition.
878      if (isAllocLikeFn(&*BBI, TLI))
879        DeadStackObjects.remove(&*BBI);
880
881      // If this call does not access memory, it can't be loading any of our
882      // pointers.
883      if (AA->doesNotAccessMemory(CS))
884        continue;
885
886      // If the call might load from any of our allocas, then any store above
887      // the call is live.
888      DeadStackObjects.remove_if([&](Value *I) {
889        // See if the call site touches the value.
890        ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
891
892        return A == MRI_ModRef || A == MRI_Ref;
893      });
894
895      // If all of the allocas were clobbered by the call then we're not going
896      // to find anything else to process.
897      if (DeadStackObjects.empty())
898        break;
899
900      continue;
901    }
902
903    MemoryLocation LoadedLoc;
904
905    // If we encounter a use of the pointer, it is no longer considered dead
906    if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
907      if (!L->isUnordered()) // Be conservative with atomic/volatile load
908        break;
909      LoadedLoc = MemoryLocation::get(L);
910    } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
911      LoadedLoc = MemoryLocation::get(V);
912    } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
913      LoadedLoc = MemoryLocation::getForSource(MTI);
914    } else if (!BBI->mayReadFromMemory()) {
915      // Instruction doesn't read memory.  Note that stores that weren't removed
916      // above will hit this case.
917      continue;
918    } else {
919      // Unknown inst; assume it clobbers everything.
920      break;
921    }
922
923    // Remove any allocas from the DeadPointer set that are loaded, as this
924    // makes any stores above the access live.
925    RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
926
927    // If all of the allocas were clobbered by the access then we're not going
928    // to find anything else to process.
929    if (DeadStackObjects.empty())
930      break;
931  }
932
933  return MadeChange;
934}
935
936/// RemoveAccessedObjects - Check to see if the specified location may alias any
937/// of the stack objects in the DeadStackObjects set.  If so, they become live
938/// because the location is being loaded.
939void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
940                                SmallSetVector<Value *, 16> &DeadStackObjects,
941                                const DataLayout &DL) {
942  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
943
944  // A constant can't be in the dead pointer set.
945  if (isa<Constant>(UnderlyingPointer))
946    return;
947
948  // If the kill pointer can be easily reduced to an alloca, don't bother doing
949  // extraneous AA queries.
950  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
951    DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
952    return;
953  }
954
955  // Remove objects that could alias LoadedLoc.
956  DeadStackObjects.remove_if([&](Value *I) {
957    // See if the loaded location could alias the stack location.
958    MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
959    return !AA->isNoAlias(StackLoc, LoadedLoc);
960  });
961}
962