1//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a trivial dead store elimination that only considers
10// basic-block local redundant stores.
11//
12// FIXME: This should eventually be extended to be a post-dominator tree
13// traversal.  Doing so would be pretty trivial.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/PostOrderIterator.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/CaptureTracking.h"
29#include "llvm/Analysis/GlobalsModRef.h"
30#include "llvm/Analysis/MemoryBuiltins.h"
31#include "llvm/Analysis/MemoryDependenceAnalysis.h"
32#include "llvm/Analysis/MemoryLocation.h"
33#include "llvm/Analysis/MemorySSA.h"
34#include "llvm/Analysis/MemorySSAUpdater.h"
35#include "llvm/Analysis/PostDominators.h"
36#include "llvm/Analysis/TargetLibraryInfo.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/BasicBlock.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/InstIterator.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/IntrinsicInst.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/LLVMContext.h"
52#include "llvm/IR/Module.h"
53#include "llvm/IR/PassManager.h"
54#include "llvm/IR/PatternMatch.h"
55#include "llvm/IR/Value.h"
56#include "llvm/InitializePasses.h"
57#include "llvm/Pass.h"
58#include "llvm/Support/Casting.h"
59#include "llvm/Support/CommandLine.h"
60#include "llvm/Support/Debug.h"
61#include "llvm/Support/DebugCounter.h"
62#include "llvm/Support/ErrorHandling.h"
63#include "llvm/Support/MathExtras.h"
64#include "llvm/Support/raw_ostream.h"
65#include "llvm/Transforms/Scalar.h"
66#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
67#include "llvm/Transforms/Utils/Local.h"
68#include <algorithm>
69#include <cassert>
70#include <cstddef>
71#include <cstdint>
72#include <iterator>
73#include <map>
74#include <utility>
75
76using namespace llvm;
77using namespace PatternMatch;
78
79#define DEBUG_TYPE "dse"
80
81STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
82STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
83STATISTIC(NumFastStores, "Number of stores deleted");
84STATISTIC(NumFastOther, "Number of other instrs removed");
85STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
86STATISTIC(NumModifiedStores, "Number of stores modified");
87STATISTIC(NumNoopStores, "Number of noop stores deleted");
88STATISTIC(NumCFGChecks, "Number of stores modified");
89STATISTIC(NumCFGTries, "Number of stores modified");
90STATISTIC(NumCFGSuccess, "Number of stores modified");
91
92DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
93              "Controls which MemoryDefs are eliminated.");
94
95static cl::opt<bool>
96EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
97  cl::init(true), cl::Hidden,
98  cl::desc("Enable partial-overwrite tracking in DSE"));
99
100static cl::opt<bool>
101EnablePartialStoreMerging("enable-dse-partial-store-merging",
102  cl::init(true), cl::Hidden,
103  cl::desc("Enable partial store merging in DSE"));
104
105static cl::opt<bool>
106    EnableMemorySSA("enable-dse-memoryssa", cl::init(false), cl::Hidden,
107                    cl::desc("Use the new MemorySSA-backed DSE."));
108
109static cl::opt<unsigned>
110    MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(100), cl::Hidden,
111                       cl::desc("The number of memory instructions to scan for "
112                                "dead store elimination (default = 100)"));
113
114static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
115    "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
116    cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
117             "other stores per basic block (default = 5000)"));
118
119static cl::opt<unsigned> MemorySSAPathCheckLimit(
120    "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
121    cl::desc("The maximum number of blocks to check when trying to prove that "
122             "all paths to an exit go through a killing block (default = 50)"));
123
124//===----------------------------------------------------------------------===//
125// Helper functions
126//===----------------------------------------------------------------------===//
127using OverlapIntervalsTy = std::map<int64_t, int64_t>;
128using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
129
130/// Delete this instruction.  Before we do, go through and zero out all the
131/// operands of this instruction.  If any of them become dead, delete them and
132/// the computation tree that feeds them.
133/// If ValueSet is non-null, remove any deleted instructions from it as well.
134static void
135deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
136                      MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
137                      InstOverlapIntervalsTy &IOL,
138                      MapVector<Instruction *, bool> &ThrowableInst,
139                      SmallSetVector<const Value *, 16> *ValueSet = nullptr) {
140  SmallVector<Instruction*, 32> NowDeadInsts;
141
142  NowDeadInsts.push_back(I);
143  --NumFastOther;
144
145  // Keeping the iterator straight is a pain, so we let this routine tell the
146  // caller what the next instruction is after we're done mucking about.
147  BasicBlock::iterator NewIter = *BBI;
148
149  // Before we touch this instruction, remove it from memdep!
150  do {
151    Instruction *DeadInst = NowDeadInsts.pop_back_val();
152    // Mark the DeadInst as dead in the list of throwable instructions.
153    auto It = ThrowableInst.find(DeadInst);
154    if (It != ThrowableInst.end())
155      ThrowableInst[It->first] = false;
156    ++NumFastOther;
157
158    // Try to preserve debug information attached to the dead instruction.
159    salvageDebugInfo(*DeadInst);
160    salvageKnowledge(DeadInst);
161
162    // This instruction is dead, zap it, in stages.  Start by removing it from
163    // MemDep, which needs to know the operands and needs it to be in the
164    // function.
165    MD.removeInstruction(DeadInst);
166
167    for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
168      Value *Op = DeadInst->getOperand(op);
169      DeadInst->setOperand(op, nullptr);
170
171      // If this operand just became dead, add it to the NowDeadInsts list.
172      if (!Op->use_empty()) continue;
173
174      if (Instruction *OpI = dyn_cast<Instruction>(Op))
175        if (isInstructionTriviallyDead(OpI, &TLI))
176          NowDeadInsts.push_back(OpI);
177    }
178
179    if (ValueSet) ValueSet->remove(DeadInst);
180    IOL.erase(DeadInst);
181
182    if (NewIter == DeadInst->getIterator())
183      NewIter = DeadInst->eraseFromParent();
184    else
185      DeadInst->eraseFromParent();
186  } while (!NowDeadInsts.empty());
187  *BBI = NewIter;
188  // Pop dead entries from back of ThrowableInst till we find an alive entry.
189  while (!ThrowableInst.empty() && !ThrowableInst.back().second)
190    ThrowableInst.pop_back();
191}
192
193/// Does this instruction write some memory?  This only returns true for things
194/// that we can analyze with other helpers below.
195static bool hasAnalyzableMemoryWrite(Instruction *I,
196                                     const TargetLibraryInfo &TLI) {
197  if (isa<StoreInst>(I))
198    return true;
199  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
200    switch (II->getIntrinsicID()) {
201    default:
202      return false;
203    case Intrinsic::memset:
204    case Intrinsic::memmove:
205    case Intrinsic::memcpy:
206    case Intrinsic::memcpy_element_unordered_atomic:
207    case Intrinsic::memmove_element_unordered_atomic:
208    case Intrinsic::memset_element_unordered_atomic:
209    case Intrinsic::init_trampoline:
210    case Intrinsic::lifetime_end:
211      return true;
212    }
213  }
214  if (auto *CB = dyn_cast<CallBase>(I)) {
215    LibFunc LF;
216    if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
217      switch (LF) {
218      case LibFunc_strcpy:
219      case LibFunc_strncpy:
220      case LibFunc_strcat:
221      case LibFunc_strncat:
222        return true;
223      default:
224        return false;
225      }
226    }
227  }
228  return false;
229}
230
231/// Return a Location stored to by the specified instruction. If isRemovable
232/// returns true, this function and getLocForRead completely describe the memory
233/// operations for this instruction.
234static MemoryLocation getLocForWrite(Instruction *Inst) {
235
236  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
237    return MemoryLocation::get(SI);
238
239  if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
240    // memcpy/memmove/memset.
241    MemoryLocation Loc = MemoryLocation::getForDest(MI);
242    return Loc;
243  }
244
245  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
246    switch (II->getIntrinsicID()) {
247    default:
248      return MemoryLocation(); // Unhandled intrinsic.
249    case Intrinsic::init_trampoline:
250      return MemoryLocation(II->getArgOperand(0));
251    case Intrinsic::lifetime_end: {
252      uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
253      return MemoryLocation(II->getArgOperand(1), Len);
254    }
255    }
256  }
257  if (auto *CB = dyn_cast<CallBase>(Inst))
258    // All the supported TLI functions so far happen to have dest as their
259    // first argument.
260    return MemoryLocation(CB->getArgOperand(0));
261  return MemoryLocation();
262}
263
264/// Return the location read by the specified "hasAnalyzableMemoryWrite"
265/// instruction if any.
266static MemoryLocation getLocForRead(Instruction *Inst,
267                                    const TargetLibraryInfo &TLI) {
268  assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
269
270  // The only instructions that both read and write are the mem transfer
271  // instructions (memcpy/memmove).
272  if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
273    return MemoryLocation::getForSource(MTI);
274  return MemoryLocation();
275}
276
277/// If the value of this instruction and the memory it writes to is unused, may
278/// we delete this instruction?
279static bool isRemovable(Instruction *I) {
280  // Don't remove volatile/atomic stores.
281  if (StoreInst *SI = dyn_cast<StoreInst>(I))
282    return SI->isUnordered();
283
284  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
285    switch (II->getIntrinsicID()) {
286    default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
287    case Intrinsic::lifetime_end:
288      // Never remove dead lifetime_end's, e.g. because it is followed by a
289      // free.
290      return false;
291    case Intrinsic::init_trampoline:
292      // Always safe to remove init_trampoline.
293      return true;
294    case Intrinsic::memset:
295    case Intrinsic::memmove:
296    case Intrinsic::memcpy:
297      // Don't remove volatile memory intrinsics.
298      return !cast<MemIntrinsic>(II)->isVolatile();
299    case Intrinsic::memcpy_element_unordered_atomic:
300    case Intrinsic::memmove_element_unordered_atomic:
301    case Intrinsic::memset_element_unordered_atomic:
302      return true;
303    }
304  }
305
306  // note: only get here for calls with analyzable writes - i.e. libcalls
307  if (auto *CB = dyn_cast<CallBase>(I))
308    return CB->use_empty();
309
310  return false;
311}
312
313/// Returns true if the end of this instruction can be safely shortened in
314/// length.
315static bool isShortenableAtTheEnd(Instruction *I) {
316  // Don't shorten stores for now
317  if (isa<StoreInst>(I))
318    return false;
319
320  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
321    switch (II->getIntrinsicID()) {
322      default: return false;
323      case Intrinsic::memset:
324      case Intrinsic::memcpy:
325      case Intrinsic::memcpy_element_unordered_atomic:
326      case Intrinsic::memset_element_unordered_atomic:
327        // Do shorten memory intrinsics.
328        // FIXME: Add memmove if it's also safe to transform.
329        return true;
330    }
331  }
332
333  // Don't shorten libcalls calls for now.
334
335  return false;
336}
337
338/// Returns true if the beginning of this instruction can be safely shortened
339/// in length.
340static bool isShortenableAtTheBeginning(Instruction *I) {
341  // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
342  // easily done by offsetting the source address.
343  return isa<AnyMemSetInst>(I);
344}
345
346/// Return the pointer that is being written to.
347static Value *getStoredPointerOperand(Instruction *I) {
348  //TODO: factor this to reuse getLocForWrite
349  MemoryLocation Loc = getLocForWrite(I);
350  assert(Loc.Ptr &&
351         "unable to find pointer written for analyzable instruction?");
352  // TODO: most APIs don't expect const Value *
353  return const_cast<Value*>(Loc.Ptr);
354}
355
356static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
357                               const TargetLibraryInfo &TLI,
358                               const Function *F) {
359  uint64_t Size;
360  ObjectSizeOpts Opts;
361  Opts.NullIsUnknownSize = NullPointerIsDefined(F);
362
363  if (getObjectSize(V, Size, DL, &TLI, Opts))
364    return Size;
365  return MemoryLocation::UnknownSize;
366}
367
368namespace {
369
370enum OverwriteResult {
371  OW_Begin,
372  OW_Complete,
373  OW_End,
374  OW_PartialEarlierWithFullLater,
375  OW_Unknown
376};
377
378} // end anonymous namespace
379
380/// Return 'OW_Complete' if a store to the 'Later' location completely
381/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
382/// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
383/// beginning of the 'Earlier' location is overwritten by 'Later'.
384/// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
385/// overwritten by a latter (smaller) store which doesn't write outside the big
386/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
387static OverwriteResult isOverwrite(const MemoryLocation &Later,
388                                   const MemoryLocation &Earlier,
389                                   const DataLayout &DL,
390                                   const TargetLibraryInfo &TLI,
391                                   int64_t &EarlierOff, int64_t &LaterOff,
392                                   Instruction *DepWrite,
393                                   InstOverlapIntervalsTy &IOL,
394                                   AliasAnalysis &AA,
395                                   const Function *F) {
396  // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
397  // get imprecise values here, though (except for unknown sizes).
398  if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
399    return OW_Unknown;
400
401  const uint64_t LaterSize = Later.Size.getValue();
402  const uint64_t EarlierSize = Earlier.Size.getValue();
403
404  const Value *P1 = Earlier.Ptr->stripPointerCasts();
405  const Value *P2 = Later.Ptr->stripPointerCasts();
406
407  // If the start pointers are the same, we just have to compare sizes to see if
408  // the later store was larger than the earlier store.
409  if (P1 == P2 || AA.isMustAlias(P1, P2)) {
410    // Make sure that the Later size is >= the Earlier size.
411    if (LaterSize >= EarlierSize)
412      return OW_Complete;
413  }
414
415  // Check to see if the later store is to the entire object (either a global,
416  // an alloca, or a byval/inalloca argument).  If so, then it clearly
417  // overwrites any other store to the same object.
418  const Value *UO1 = GetUnderlyingObject(P1, DL),
419              *UO2 = GetUnderlyingObject(P2, DL);
420
421  // If we can't resolve the same pointers to the same object, then we can't
422  // analyze them at all.
423  if (UO1 != UO2)
424    return OW_Unknown;
425
426  // If the "Later" store is to a recognizable object, get its size.
427  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
428  if (ObjectSize != MemoryLocation::UnknownSize)
429    if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
430      return OW_Complete;
431
432  // Okay, we have stores to two completely different pointers.  Try to
433  // decompose the pointer into a "base + constant_offset" form.  If the base
434  // pointers are equal, then we can reason about the two stores.
435  EarlierOff = 0;
436  LaterOff = 0;
437  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
438  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
439
440  // If the base pointers still differ, we have two completely different stores.
441  if (BP1 != BP2)
442    return OW_Unknown;
443
444  // The later store completely overlaps the earlier store if:
445  //
446  // 1. Both start at the same offset and the later one's size is greater than
447  //    or equal to the earlier one's, or
448  //
449  //      |--earlier--|
450  //      |--   later   --|
451  //
452  // 2. The earlier store has an offset greater than the later offset, but which
453  //    still lies completely within the later store.
454  //
455  //        |--earlier--|
456  //    |-----  later  ------|
457  //
458  // We have to be careful here as *Off is signed while *.Size is unsigned.
459  if (EarlierOff >= LaterOff &&
460      LaterSize >= EarlierSize &&
461      uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
462    return OW_Complete;
463
464  // We may now overlap, although the overlap is not complete. There might also
465  // be other incomplete overlaps, and together, they might cover the complete
466  // earlier write.
467  // Note: The correctness of this logic depends on the fact that this function
468  // is not even called providing DepWrite when there are any intervening reads.
469  if (EnablePartialOverwriteTracking &&
470      LaterOff < int64_t(EarlierOff + EarlierSize) &&
471      int64_t(LaterOff + LaterSize) >= EarlierOff) {
472
473    // Insert our part of the overlap into the map.
474    auto &IM = IOL[DepWrite];
475    LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
476                      << ", " << int64_t(EarlierOff + EarlierSize)
477                      << ") Later [" << LaterOff << ", "
478                      << int64_t(LaterOff + LaterSize) << ")\n");
479
480    // Make sure that we only insert non-overlapping intervals and combine
481    // adjacent intervals. The intervals are stored in the map with the ending
482    // offset as the key (in the half-open sense) and the starting offset as
483    // the value.
484    int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
485
486    // Find any intervals ending at, or after, LaterIntStart which start
487    // before LaterIntEnd.
488    auto ILI = IM.lower_bound(LaterIntStart);
489    if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
490      // This existing interval is overlapped with the current store somewhere
491      // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
492      // intervals and adjusting our start and end.
493      LaterIntStart = std::min(LaterIntStart, ILI->second);
494      LaterIntEnd = std::max(LaterIntEnd, ILI->first);
495      ILI = IM.erase(ILI);
496
497      // Continue erasing and adjusting our end in case other previous
498      // intervals are also overlapped with the current store.
499      //
500      // |--- ealier 1 ---|  |--- ealier 2 ---|
501      //     |------- later---------|
502      //
503      while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
504        assert(ILI->second > LaterIntStart && "Unexpected interval");
505        LaterIntEnd = std::max(LaterIntEnd, ILI->first);
506        ILI = IM.erase(ILI);
507      }
508    }
509
510    IM[LaterIntEnd] = LaterIntStart;
511
512    ILI = IM.begin();
513    if (ILI->second <= EarlierOff &&
514        ILI->first >= int64_t(EarlierOff + EarlierSize)) {
515      LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
516                        << EarlierOff << ", "
517                        << int64_t(EarlierOff + EarlierSize)
518                        << ") Composite Later [" << ILI->second << ", "
519                        << ILI->first << ")\n");
520      ++NumCompletePartials;
521      return OW_Complete;
522    }
523  }
524
525  // Check for an earlier store which writes to all the memory locations that
526  // the later store writes to.
527  if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
528      int64_t(EarlierOff + EarlierSize) > LaterOff &&
529      uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
530    LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
531                      << EarlierOff << ", "
532                      << int64_t(EarlierOff + EarlierSize)
533                      << ") by a later store [" << LaterOff << ", "
534                      << int64_t(LaterOff + LaterSize) << ")\n");
535    // TODO: Maybe come up with a better name?
536    return OW_PartialEarlierWithFullLater;
537  }
538
539  // Another interesting case is if the later store overwrites the end of the
540  // earlier store.
541  //
542  //      |--earlier--|
543  //                |--   later   --|
544  //
545  // In this case we may want to trim the size of earlier to avoid generating
546  // writes to addresses which will definitely be overwritten later
547  if (!EnablePartialOverwriteTracking &&
548      (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
549       int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
550    return OW_End;
551
552  // Finally, we also need to check if the later store overwrites the beginning
553  // of the earlier store.
554  //
555  //                |--earlier--|
556  //      |--   later   --|
557  //
558  // In this case we may want to move the destination address and trim the size
559  // of earlier to avoid generating writes to addresses which will definitely
560  // be overwritten later.
561  if (!EnablePartialOverwriteTracking &&
562      (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
563    assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
564           "Expect to be handled as OW_Complete");
565    return OW_Begin;
566  }
567  // Otherwise, they don't completely overlap.
568  return OW_Unknown;
569}
570
571/// If 'Inst' might be a self read (i.e. a noop copy of a
572/// memory region into an identical pointer) then it doesn't actually make its
573/// input dead in the traditional sense.  Consider this case:
574///
575///   memmove(A <- B)
576///   memmove(A <- A)
577///
578/// In this case, the second store to A does not make the first store to A dead.
579/// The usual situation isn't an explicit A<-A store like this (which can be
580/// trivially removed) but a case where two pointers may alias.
581///
582/// This function detects when it is unsafe to remove a dependent instruction
583/// because the DSE inducing instruction may be a self-read.
584static bool isPossibleSelfRead(Instruction *Inst,
585                               const MemoryLocation &InstStoreLoc,
586                               Instruction *DepWrite,
587                               const TargetLibraryInfo &TLI,
588                               AliasAnalysis &AA) {
589  // Self reads can only happen for instructions that read memory.  Get the
590  // location read.
591  MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
592  if (!InstReadLoc.Ptr)
593    return false; // Not a reading instruction.
594
595  // If the read and written loc obviously don't alias, it isn't a read.
596  if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
597    return false;
598
599  if (isa<AnyMemCpyInst>(Inst)) {
600    // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
601    // but in practice memcpy(A <- B) either means that A and B are disjoint or
602    // are equal (i.e. there are not partial overlaps).  Given that, if we have:
603    //
604    //   memcpy/memmove(A <- B)  // DepWrite
605    //   memcpy(A <- B)  // Inst
606    //
607    // with Inst reading/writing a >= size than DepWrite, we can reason as
608    // follows:
609    //
610    //   - If A == B then both the copies are no-ops, so the DepWrite can be
611    //     removed.
612    //   - If A != B then A and B are disjoint locations in Inst.  Since
613    //     Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
614    //     Therefore DepWrite can be removed.
615    MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
616
617    if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
618      return false;
619  }
620
621  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
622  // then it can't be considered dead.
623  return true;
624}
625
626/// Returns true if the memory which is accessed by the second instruction is not
627/// modified between the first and the second instruction.
628/// Precondition: Second instruction must be dominated by the first
629/// instruction.
630static bool memoryIsNotModifiedBetween(Instruction *FirstI,
631                                       Instruction *SecondI,
632                                       AliasAnalysis *AA,
633                                       const DataLayout &DL,
634                                       DominatorTree *DT) {
635  // Do a backwards scan through the CFG from SecondI to FirstI. Look for
636  // instructions which can modify the memory location accessed by SecondI.
637  //
638  // While doing the walk keep track of the address to check. It might be
639  // different in different basic blocks due to PHI translation.
640  using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
641  SmallVector<BlockAddressPair, 16> WorkList;
642  // Keep track of the address we visited each block with. Bail out if we
643  // visit a block with different addresses.
644  DenseMap<BasicBlock *, Value *> Visited;
645
646  BasicBlock::iterator FirstBBI(FirstI);
647  ++FirstBBI;
648  BasicBlock::iterator SecondBBI(SecondI);
649  BasicBlock *FirstBB = FirstI->getParent();
650  BasicBlock *SecondBB = SecondI->getParent();
651  MemoryLocation MemLoc = MemoryLocation::get(SecondI);
652  auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
653
654  // Start checking the SecondBB.
655  WorkList.push_back(
656      std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
657  bool isFirstBlock = true;
658
659  // Check all blocks going backward until we reach the FirstBB.
660  while (!WorkList.empty()) {
661    BlockAddressPair Current = WorkList.pop_back_val();
662    BasicBlock *B = Current.first;
663    PHITransAddr &Addr = Current.second;
664    Value *Ptr = Addr.getAddr();
665
666    // Ignore instructions before FirstI if this is the FirstBB.
667    BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
668
669    BasicBlock::iterator EI;
670    if (isFirstBlock) {
671      // Ignore instructions after SecondI if this is the first visit of SecondBB.
672      assert(B == SecondBB && "first block is not the store block");
673      EI = SecondBBI;
674      isFirstBlock = false;
675    } else {
676      // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
677      // In this case we also have to look at instructions after SecondI.
678      EI = B->end();
679    }
680    for (; BI != EI; ++BI) {
681      Instruction *I = &*BI;
682      if (I->mayWriteToMemory() && I != SecondI)
683        if (isModSet(AA->getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
684          return false;
685    }
686    if (B != FirstBB) {
687      assert(B != &FirstBB->getParent()->getEntryBlock() &&
688          "Should not hit the entry block because SI must be dominated by LI");
689      for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
690        PHITransAddr PredAddr = Addr;
691        if (PredAddr.NeedsPHITranslationFromBlock(B)) {
692          if (!PredAddr.IsPotentiallyPHITranslatable())
693            return false;
694          if (PredAddr.PHITranslateValue(B, *PredI, DT, false))
695            return false;
696        }
697        Value *TranslatedPtr = PredAddr.getAddr();
698        auto Inserted = Visited.insert(std::make_pair(*PredI, TranslatedPtr));
699        if (!Inserted.second) {
700          // We already visited this block before. If it was with a different
701          // address - bail out!
702          if (TranslatedPtr != Inserted.first->second)
703            return false;
704          // ... otherwise just skip it.
705          continue;
706        }
707        WorkList.push_back(std::make_pair(*PredI, PredAddr));
708      }
709    }
710  }
711  return true;
712}
713
714/// Find all blocks that will unconditionally lead to the block BB and append
715/// them to F.
716static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
717                                   BasicBlock *BB, DominatorTree *DT) {
718  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
719    BasicBlock *Pred = *I;
720    if (Pred == BB) continue;
721    Instruction *PredTI = Pred->getTerminator();
722    if (PredTI->getNumSuccessors() != 1)
723      continue;
724
725    if (DT->isReachableFromEntry(Pred))
726      Blocks.push_back(Pred);
727  }
728}
729
730/// Handle frees of entire structures whose dependency is a store
731/// to a field of that structure.
732static bool handleFree(CallInst *F, AliasAnalysis *AA,
733                       MemoryDependenceResults *MD, DominatorTree *DT,
734                       const TargetLibraryInfo *TLI,
735                       InstOverlapIntervalsTy &IOL,
736                       MapVector<Instruction *, bool> &ThrowableInst) {
737  bool MadeChange = false;
738
739  MemoryLocation Loc = MemoryLocation(F->getOperand(0));
740  SmallVector<BasicBlock *, 16> Blocks;
741  Blocks.push_back(F->getParent());
742  const DataLayout &DL = F->getModule()->getDataLayout();
743
744  while (!Blocks.empty()) {
745    BasicBlock *BB = Blocks.pop_back_val();
746    Instruction *InstPt = BB->getTerminator();
747    if (BB == F->getParent()) InstPt = F;
748
749    MemDepResult Dep =
750        MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
751    while (Dep.isDef() || Dep.isClobber()) {
752      Instruction *Dependency = Dep.getInst();
753      if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
754          !isRemovable(Dependency))
755        break;
756
757      Value *DepPointer =
758          GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
759
760      // Check for aliasing.
761      if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
762        break;
763
764      LLVM_DEBUG(
765          dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
766                 << *Dependency << '\n');
767
768      // DCE instructions only used to calculate that store.
769      BasicBlock::iterator BBI(Dependency);
770      deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL,
771                            ThrowableInst);
772      ++NumFastStores;
773      MadeChange = true;
774
775      // Inst's old Dependency is now deleted. Compute the next dependency,
776      // which may also be dead, as in
777      //    s[0] = 0;
778      //    s[1] = 0; // This has just been deleted.
779      //    free(s);
780      Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
781    }
782
783    if (Dep.isNonLocal())
784      findUnconditionalPreds(Blocks, BB, DT);
785  }
786
787  return MadeChange;
788}
789
790/// Check to see if the specified location may alias any of the stack objects in
791/// the DeadStackObjects set. If so, they become live because the location is
792/// being loaded.
793static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
794                                  SmallSetVector<const Value *, 16> &DeadStackObjects,
795                                  const DataLayout &DL, AliasAnalysis *AA,
796                                  const TargetLibraryInfo *TLI,
797                                  const Function *F) {
798  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
799
800  // A constant can't be in the dead pointer set.
801  if (isa<Constant>(UnderlyingPointer))
802    return;
803
804  // If the kill pointer can be easily reduced to an alloca, don't bother doing
805  // extraneous AA queries.
806  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
807    DeadStackObjects.remove(UnderlyingPointer);
808    return;
809  }
810
811  // Remove objects that could alias LoadedLoc.
812  DeadStackObjects.remove_if([&](const Value *I) {
813    // See if the loaded location could alias the stack location.
814    MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
815    return !AA->isNoAlias(StackLoc, LoadedLoc);
816  });
817}
818
819/// Remove dead stores to stack-allocated locations in the function end block.
820/// Ex:
821/// %A = alloca i32
822/// ...
823/// store i32 1, i32* %A
824/// ret void
825static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
826                           MemoryDependenceResults *MD,
827                           const TargetLibraryInfo *TLI,
828                           InstOverlapIntervalsTy &IOL,
829                           MapVector<Instruction *, bool> &ThrowableInst) {
830  bool MadeChange = false;
831
832  // Keep track of all of the stack objects that are dead at the end of the
833  // function.
834  SmallSetVector<const Value*, 16> DeadStackObjects;
835
836  // Find all of the alloca'd pointers in the entry block.
837  BasicBlock &Entry = BB.getParent()->front();
838  for (Instruction &I : Entry) {
839    if (isa<AllocaInst>(&I))
840      DeadStackObjects.insert(&I);
841
842    // Okay, so these are dead heap objects, but if the pointer never escapes
843    // then it's leaked by this function anyways.
844    else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
845      DeadStackObjects.insert(&I);
846  }
847
848  // Treat byval or inalloca arguments the same, stores to them are dead at the
849  // end of the function.
850  for (Argument &AI : BB.getParent()->args())
851    if (AI.hasPassPointeeByValueAttr())
852      DeadStackObjects.insert(&AI);
853
854  const DataLayout &DL = BB.getModule()->getDataLayout();
855
856  // Scan the basic block backwards
857  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
858    --BBI;
859
860    // If we find a store, check to see if it points into a dead stack value.
861    if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
862      // See through pointer-to-pointer bitcasts
863      SmallVector<const Value *, 4> Pointers;
864      GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
865
866      // Stores to stack values are valid candidates for removal.
867      bool AllDead = true;
868      for (const Value *Pointer : Pointers)
869        if (!DeadStackObjects.count(Pointer)) {
870          AllDead = false;
871          break;
872        }
873
874      if (AllDead) {
875        Instruction *Dead = &*BBI;
876
877        LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
878                          << *Dead << "\n  Objects: ";
879                   for (SmallVectorImpl<const Value *>::iterator I =
880                            Pointers.begin(),
881                        E = Pointers.end();
882                        I != E; ++I) {
883                     dbgs() << **I;
884                     if (std::next(I) != E)
885                       dbgs() << ", ";
886                   } dbgs()
887                   << '\n');
888
889        // DCE instructions only used to calculate that store.
890        deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, ThrowableInst,
891                              &DeadStackObjects);
892        ++NumFastStores;
893        MadeChange = true;
894        continue;
895      }
896    }
897
898    // Remove any dead non-memory-mutating instructions.
899    if (isInstructionTriviallyDead(&*BBI, TLI)) {
900      LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
901                        << *&*BBI << '\n');
902      deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, ThrowableInst,
903                            &DeadStackObjects);
904      ++NumFastOther;
905      MadeChange = true;
906      continue;
907    }
908
909    if (isa<AllocaInst>(BBI)) {
910      // Remove allocas from the list of dead stack objects; there can't be
911      // any references before the definition.
912      DeadStackObjects.remove(&*BBI);
913      continue;
914    }
915
916    if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
917      // Remove allocation function calls from the list of dead stack objects;
918      // there can't be any references before the definition.
919      if (isAllocLikeFn(&*BBI, TLI))
920        DeadStackObjects.remove(&*BBI);
921
922      // If this call does not access memory, it can't be loading any of our
923      // pointers.
924      if (AA->doesNotAccessMemory(Call))
925        continue;
926
927      // If the call might load from any of our allocas, then any store above
928      // the call is live.
929      DeadStackObjects.remove_if([&](const Value *I) {
930        // See if the call site touches the value.
931        return isRefSet(AA->getModRefInfo(
932            Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
933      });
934
935      // If all of the allocas were clobbered by the call then we're not going
936      // to find anything else to process.
937      if (DeadStackObjects.empty())
938        break;
939
940      continue;
941    }
942
943    // We can remove the dead stores, irrespective of the fence and its ordering
944    // (release/acquire/seq_cst). Fences only constraints the ordering of
945    // already visible stores, it does not make a store visible to other
946    // threads. So, skipping over a fence does not change a store from being
947    // dead.
948    if (isa<FenceInst>(*BBI))
949      continue;
950
951    MemoryLocation LoadedLoc;
952
953    // If we encounter a use of the pointer, it is no longer considered dead
954    if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
955      if (!L->isUnordered()) // Be conservative with atomic/volatile load
956        break;
957      LoadedLoc = MemoryLocation::get(L);
958    } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
959      LoadedLoc = MemoryLocation::get(V);
960    } else if (!BBI->mayReadFromMemory()) {
961      // Instruction doesn't read memory.  Note that stores that weren't removed
962      // above will hit this case.
963      continue;
964    } else {
965      // Unknown inst; assume it clobbers everything.
966      break;
967    }
968
969    // Remove any allocas from the DeadPointer set that are loaded, as this
970    // makes any stores above the access live.
971    removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
972
973    // If all of the allocas were clobbered by the access then we're not going
974    // to find anything else to process.
975    if (DeadStackObjects.empty())
976      break;
977  }
978
979  return MadeChange;
980}
981
982static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
983                         int64_t &EarlierSize, int64_t LaterOffset,
984                         int64_t LaterSize, bool IsOverwriteEnd) {
985  // TODO: base this on the target vector size so that if the earlier
986  // store was too small to get vector writes anyway then its likely
987  // a good idea to shorten it
988  // Power of 2 vector writes are probably always a bad idea to optimize
989  // as any store/memset/memcpy is likely using vector instructions so
990  // shortening it to not vector size is likely to be slower
991  auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
992  unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
993  if (!IsOverwriteEnd)
994    LaterOffset = int64_t(LaterOffset + LaterSize);
995
996  if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
997      !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
998    return false;
999
1000  int64_t NewLength = IsOverwriteEnd
1001                          ? LaterOffset - EarlierOffset
1002                          : EarlierSize - (LaterOffset - EarlierOffset);
1003
1004  if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
1005    // When shortening an atomic memory intrinsic, the newly shortened
1006    // length must remain an integer multiple of the element size.
1007    const uint32_t ElementSize = AMI->getElementSizeInBytes();
1008    if (0 != NewLength % ElementSize)
1009      return false;
1010  }
1011
1012  LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
1013                    << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
1014                    << *EarlierWrite << "\n  KILLER (offset " << LaterOffset
1015                    << ", " << EarlierSize << ")\n");
1016
1017  Value *EarlierWriteLength = EarlierIntrinsic->getLength();
1018  Value *TrimmedLength =
1019      ConstantInt::get(EarlierWriteLength->getType(), NewLength);
1020  EarlierIntrinsic->setLength(TrimmedLength);
1021
1022  EarlierSize = NewLength;
1023  if (!IsOverwriteEnd) {
1024    int64_t OffsetMoved = (LaterOffset - EarlierOffset);
1025    Value *Indices[1] = {
1026        ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
1027    GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
1028        EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
1029        EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
1030    NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
1031    EarlierIntrinsic->setDest(NewDestGEP);
1032    EarlierOffset = EarlierOffset + OffsetMoved;
1033  }
1034  return true;
1035}
1036
1037static bool tryToShortenEnd(Instruction *EarlierWrite,
1038                            OverlapIntervalsTy &IntervalMap,
1039                            int64_t &EarlierStart, int64_t &EarlierSize) {
1040  if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
1041    return false;
1042
1043  OverlapIntervalsTy::iterator OII = --IntervalMap.end();
1044  int64_t LaterStart = OII->second;
1045  int64_t LaterSize = OII->first - LaterStart;
1046
1047  if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
1048      LaterStart + LaterSize >= EarlierStart + EarlierSize) {
1049    if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1050                     LaterSize, true)) {
1051      IntervalMap.erase(OII);
1052      return true;
1053    }
1054  }
1055  return false;
1056}
1057
1058static bool tryToShortenBegin(Instruction *EarlierWrite,
1059                              OverlapIntervalsTy &IntervalMap,
1060                              int64_t &EarlierStart, int64_t &EarlierSize) {
1061  if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
1062    return false;
1063
1064  OverlapIntervalsTy::iterator OII = IntervalMap.begin();
1065  int64_t LaterStart = OII->second;
1066  int64_t LaterSize = OII->first - LaterStart;
1067
1068  if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
1069    assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
1070           "Should have been handled as OW_Complete");
1071    if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1072                     LaterSize, false)) {
1073      IntervalMap.erase(OII);
1074      return true;
1075    }
1076  }
1077  return false;
1078}
1079
1080static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
1081                                            const DataLayout &DL,
1082                                            InstOverlapIntervalsTy &IOL) {
1083  bool Changed = false;
1084  for (auto OI : IOL) {
1085    Instruction *EarlierWrite = OI.first;
1086    MemoryLocation Loc = getLocForWrite(EarlierWrite);
1087    assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1088
1089    const Value *Ptr = Loc.Ptr->stripPointerCasts();
1090    int64_t EarlierStart = 0;
1091    int64_t EarlierSize = int64_t(Loc.Size.getValue());
1092    GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1093    OverlapIntervalsTy &IntervalMap = OI.second;
1094    Changed |=
1095        tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1096    if (IntervalMap.empty())
1097      continue;
1098    Changed |=
1099        tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1100  }
1101  return Changed;
1102}
1103
1104static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1105                               AliasAnalysis *AA, MemoryDependenceResults *MD,
1106                               const DataLayout &DL,
1107                               const TargetLibraryInfo *TLI,
1108                               InstOverlapIntervalsTy &IOL,
1109                               MapVector<Instruction *, bool> &ThrowableInst,
1110                               DominatorTree *DT) {
1111  // Must be a store instruction.
1112  StoreInst *SI = dyn_cast<StoreInst>(Inst);
1113  if (!SI)
1114    return false;
1115
1116  // If we're storing the same value back to a pointer that we just loaded from,
1117  // then the store can be removed.
1118  if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1119    if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1120        isRemovable(SI) &&
1121        memoryIsNotModifiedBetween(DepLoad, SI, AA, DL, DT)) {
1122
1123      LLVM_DEBUG(
1124          dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1125                 << *DepLoad << "\n  STORE: " << *SI << '\n');
1126
1127      deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1128      ++NumRedundantStores;
1129      return true;
1130    }
1131  }
1132
1133  // Remove null stores into the calloc'ed objects
1134  Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1135  if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1136    Instruction *UnderlyingPointer =
1137        dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1138
1139    if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1140        memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA, DL, DT)) {
1141      LLVM_DEBUG(
1142          dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1143                 << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1144
1145      deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1146      ++NumRedundantStores;
1147      return true;
1148    }
1149  }
1150  return false;
1151}
1152
1153static Constant *
1154tryToMergePartialOverlappingStores(StoreInst *Earlier, StoreInst *Later,
1155                                   int64_t InstWriteOffset,
1156                                   int64_t DepWriteOffset, const DataLayout &DL,
1157                                   AliasAnalysis *AA, DominatorTree *DT) {
1158
1159  if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1160      DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
1161      Later && isa<ConstantInt>(Later->getValueOperand()) &&
1162      DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
1163      memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
1164    // If the store we find is:
1165    //   a) partially overwritten by the store to 'Loc'
1166    //   b) the later store is fully contained in the earlier one and
1167    //   c) they both have a constant value
1168    //   d) none of the two stores need padding
1169    // Merge the two stores, replacing the earlier store's value with a
1170    // merge of both values.
1171    // TODO: Deal with other constant types (vectors, etc), and probably
1172    // some mem intrinsics (if needed)
1173
1174    APInt EarlierValue =
1175        cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1176    APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
1177    unsigned LaterBits = LaterValue.getBitWidth();
1178    assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1179    LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1180
1181    // Offset of the smaller store inside the larger store
1182    unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1183    unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
1184                                                   BitOffsetDiff - LaterBits
1185                                             : BitOffsetDiff;
1186    APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1187                                   LShiftAmount + LaterBits);
1188    // Clear the bits we'll be replacing, then OR with the smaller
1189    // store, shifted appropriately.
1190    APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1191    LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *Earlier
1192                      << "\n  Later: " << *Later
1193                      << "\n  Merged Value: " << Merged << '\n');
1194    return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
1195  }
1196  return nullptr;
1197}
1198
1199static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1200                                MemoryDependenceResults *MD, DominatorTree *DT,
1201                                const TargetLibraryInfo *TLI) {
1202  const DataLayout &DL = BB.getModule()->getDataLayout();
1203  bool MadeChange = false;
1204
1205  MapVector<Instruction *, bool> ThrowableInst;
1206
1207  // A map of interval maps representing partially-overwritten value parts.
1208  InstOverlapIntervalsTy IOL;
1209
1210  // Do a top-down walk on the BB.
1211  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1212    // Handle 'free' calls specially.
1213    if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1214      MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, ThrowableInst);
1215      // Increment BBI after handleFree has potentially deleted instructions.
1216      // This ensures we maintain a valid iterator.
1217      ++BBI;
1218      continue;
1219    }
1220
1221    Instruction *Inst = &*BBI++;
1222
1223    if (Inst->mayThrow()) {
1224      ThrowableInst[Inst] = true;
1225      continue;
1226    }
1227
1228    // Check to see if Inst writes to memory.  If not, continue.
1229    if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1230      continue;
1231
1232    // eliminateNoopStore will update in iterator, if necessary.
1233    if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL,
1234                           ThrowableInst, DT)) {
1235      MadeChange = true;
1236      continue;
1237    }
1238
1239    // If we find something that writes memory, get its memory dependence.
1240    MemDepResult InstDep = MD->getDependency(Inst);
1241
1242    // Ignore any store where we can't find a local dependence.
1243    // FIXME: cross-block DSE would be fun. :)
1244    if (!InstDep.isDef() && !InstDep.isClobber())
1245      continue;
1246
1247    // Figure out what location is being stored to.
1248    MemoryLocation Loc = getLocForWrite(Inst);
1249
1250    // If we didn't get a useful location, fail.
1251    if (!Loc.Ptr)
1252      continue;
1253
1254    // Loop until we find a store we can eliminate or a load that
1255    // invalidates the analysis. Without an upper bound on the number of
1256    // instructions examined, this analysis can become very time-consuming.
1257    // However, the potential gain diminishes as we process more instructions
1258    // without eliminating any of them. Therefore, we limit the number of
1259    // instructions we look at.
1260    auto Limit = MD->getDefaultBlockScanLimit();
1261    while (InstDep.isDef() || InstDep.isClobber()) {
1262      // Get the memory clobbered by the instruction we depend on.  MemDep will
1263      // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1264      // end up depending on a may- or must-aliased load, then we can't optimize
1265      // away the store and we bail out.  However, if we depend on something
1266      // that overwrites the memory location we *can* potentially optimize it.
1267      //
1268      // Find out what memory location the dependent instruction stores.
1269      Instruction *DepWrite = InstDep.getInst();
1270      if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1271        break;
1272      MemoryLocation DepLoc = getLocForWrite(DepWrite);
1273      // If we didn't get a useful location, or if it isn't a size, bail out.
1274      if (!DepLoc.Ptr)
1275        break;
1276
1277      // Find the last throwable instruction not removed by call to
1278      // deleteDeadInstruction.
1279      Instruction *LastThrowing = nullptr;
1280      if (!ThrowableInst.empty())
1281        LastThrowing = ThrowableInst.back().first;
1282
1283      // Make sure we don't look past a call which might throw. This is an
1284      // issue because MemoryDependenceAnalysis works in the wrong direction:
1285      // it finds instructions which dominate the current instruction, rather than
1286      // instructions which are post-dominated by the current instruction.
1287      //
1288      // If the underlying object is a non-escaping memory allocation, any store
1289      // to it is dead along the unwind edge. Otherwise, we need to preserve
1290      // the store.
1291      if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
1292        const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1293        bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1294        if (!IsStoreDeadOnUnwind) {
1295            // We're looking for a call to an allocation function
1296            // where the allocation doesn't escape before the last
1297            // throwing instruction; PointerMayBeCaptured
1298            // reasonably fast approximation.
1299            IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1300                !PointerMayBeCaptured(Underlying, false, true);
1301        }
1302        if (!IsStoreDeadOnUnwind)
1303          break;
1304      }
1305
1306      // If we find a write that is a) removable (i.e., non-volatile), b) is
1307      // completely obliterated by the store to 'Loc', and c) which we know that
1308      // 'Inst' doesn't load from, then we can remove it.
1309      // Also try to merge two stores if a later one only touches memory written
1310      // to by the earlier one.
1311      if (isRemovable(DepWrite) &&
1312          !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1313        int64_t InstWriteOffset, DepWriteOffset;
1314        OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
1315                                         InstWriteOffset, DepWrite, IOL, *AA,
1316                                         BB.getParent());
1317        if (OR == OW_Complete) {
1318          LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DepWrite
1319                            << "\n  KILLER: " << *Inst << '\n');
1320
1321          // Delete the store and now-dead instructions that feed it.
1322          deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1323                                ThrowableInst);
1324          ++NumFastStores;
1325          MadeChange = true;
1326
1327          // We erased DepWrite; start over.
1328          InstDep = MD->getDependency(Inst);
1329          continue;
1330        } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1331                   ((OR == OW_Begin &&
1332                     isShortenableAtTheBeginning(DepWrite)))) {
1333          assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1334                                                    "when partial-overwrite "
1335                                                    "tracking is enabled");
1336          // The overwrite result is known, so these must be known, too.
1337          int64_t EarlierSize = DepLoc.Size.getValue();
1338          int64_t LaterSize = Loc.Size.getValue();
1339          bool IsOverwriteEnd = (OR == OW_End);
1340          MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1341                                    InstWriteOffset, LaterSize, IsOverwriteEnd);
1342        } else if (EnablePartialStoreMerging &&
1343                   OR == OW_PartialEarlierWithFullLater) {
1344          auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1345          auto *Later = dyn_cast<StoreInst>(Inst);
1346          if (Constant *C = tryToMergePartialOverlappingStores(
1347                  Earlier, Later, InstWriteOffset, DepWriteOffset, DL, AA,
1348                  DT)) {
1349            auto *SI = new StoreInst(
1350                C, Earlier->getPointerOperand(), false, Earlier->getAlign(),
1351                Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1352
1353            unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1354                                   LLVMContext::MD_alias_scope,
1355                                   LLVMContext::MD_noalias,
1356                                   LLVMContext::MD_nontemporal};
1357            SI->copyMetadata(*DepWrite, MDToKeep);
1358            ++NumModifiedStores;
1359
1360            // Delete the old stores and now-dead instructions that feed them.
1361            deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL,
1362                                  ThrowableInst);
1363            deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1364                                  ThrowableInst);
1365            MadeChange = true;
1366
1367            // We erased DepWrite and Inst (Loc); start over.
1368            break;
1369          }
1370        }
1371      }
1372
1373      // If this is a may-aliased store that is clobbering the store value, we
1374      // can keep searching past it for another must-aliased pointer that stores
1375      // to the same location.  For example, in:
1376      //   store -> P
1377      //   store -> Q
1378      //   store -> P
1379      // we can remove the first store to P even though we don't know if P and Q
1380      // alias.
1381      if (DepWrite == &BB.front()) break;
1382
1383      // Can't look past this instruction if it might read 'Loc'.
1384      if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1385        break;
1386
1387      InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1388                                             DepWrite->getIterator(), &BB,
1389                                             /*QueryInst=*/ nullptr, &Limit);
1390    }
1391  }
1392
1393  if (EnablePartialOverwriteTracking)
1394    MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1395
1396  // If this block ends in a return, unwind, or unreachable, all allocas are
1397  // dead at its end, which means stores to them are also dead.
1398  if (BB.getTerminator()->getNumSuccessors() == 0)
1399    MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, ThrowableInst);
1400
1401  return MadeChange;
1402}
1403
1404static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1405                                MemoryDependenceResults *MD, DominatorTree *DT,
1406                                const TargetLibraryInfo *TLI) {
1407  bool MadeChange = false;
1408  for (BasicBlock &BB : F)
1409    // Only check non-dead blocks.  Dead blocks may have strange pointer
1410    // cycles that will confuse alias analysis.
1411    if (DT->isReachableFromEntry(&BB))
1412      MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1413
1414  return MadeChange;
1415}
1416
1417namespace {
1418//=============================================================================
1419// MemorySSA backed dead store elimination.
1420//
1421// The code below implements dead store elimination using MemorySSA. It uses
1422// the following general approach: given a MemoryDef, walk upwards to find
1423// clobbering MemoryDefs that may be killed by the starting def. Then check
1424// that there are no uses that may read the location of the original MemoryDef
1425// in between both MemoryDefs. A bit more concretely:
1426//
1427// For all MemoryDefs StartDef:
1428// 1. Get the next dominating clobbering MemoryDef (DomAccess) by walking
1429//    upwards.
1430// 2. Check that there are no reads between DomAccess and the StartDef by
1431//    checking all uses starting at DomAccess and walking until we see StartDef.
1432// 3. For each found DomDef, check that:
1433//   1. There are no barrier instructions between DomDef and StartDef (like
1434//       throws or stores with ordering constraints).
1435//   2. StartDef is executed whenever DomDef is executed.
1436//   3. StartDef completely overwrites DomDef.
1437// 4. Erase DomDef from the function and MemorySSA.
1438
1439// Returns true if \p M is an intrisnic that does not read or write memory.
1440bool isNoopIntrinsic(MemoryUseOrDef *M) {
1441  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(M->getMemoryInst())) {
1442    switch (II->getIntrinsicID()) {
1443    case Intrinsic::lifetime_start:
1444    case Intrinsic::lifetime_end:
1445    case Intrinsic::invariant_end:
1446    case Intrinsic::launder_invariant_group:
1447    case Intrinsic::assume:
1448      return true;
1449    case Intrinsic::dbg_addr:
1450    case Intrinsic::dbg_declare:
1451    case Intrinsic::dbg_label:
1452    case Intrinsic::dbg_value:
1453      llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
1454    default:
1455      return false;
1456    }
1457  }
1458  return false;
1459}
1460
1461// Check if we can ignore \p D for DSE.
1462bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
1463  Instruction *DI = D->getMemoryInst();
1464  // Calls that only access inaccessible memory cannot read or write any memory
1465  // locations we consider for elimination.
1466  if (auto *CB = dyn_cast<CallBase>(DI))
1467    if (CB->onlyAccessesInaccessibleMemory())
1468      return true;
1469
1470  // We can eliminate stores to locations not visible to the caller across
1471  // throwing instructions.
1472  if (DI->mayThrow() && !DefVisibleToCaller)
1473    return true;
1474
1475  // We can remove the dead stores, irrespective of the fence and its ordering
1476  // (release/acquire/seq_cst). Fences only constraints the ordering of
1477  // already visible stores, it does not make a store visible to other
1478  // threads. So, skipping over a fence does not change a store from being
1479  // dead.
1480  if (isa<FenceInst>(DI))
1481    return true;
1482
1483  // Skip intrinsics that do not really read or modify memory.
1484  if (isNoopIntrinsic(D))
1485    return true;
1486
1487  return false;
1488}
1489
1490struct DSEState {
1491  Function &F;
1492  AliasAnalysis &AA;
1493  MemorySSA &MSSA;
1494  DominatorTree &DT;
1495  PostDominatorTree &PDT;
1496  const TargetLibraryInfo &TLI;
1497
1498  // All MemoryDefs that potentially could kill other MemDefs.
1499  SmallVector<MemoryDef *, 64> MemDefs;
1500  // Any that should be skipped as they are already deleted
1501  SmallPtrSet<MemoryAccess *, 4> SkipStores;
1502  // Keep track of all of the objects that are invisible to the caller before
1503  // the function returns.
1504  SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
1505  // Keep track of all of the objects that are invisible to the caller after
1506  // the function returns.
1507  SmallPtrSet<const Value *, 16> InvisibleToCallerAfterRet;
1508  // Keep track of blocks with throwing instructions not modeled in MemorySSA.
1509  SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
1510  // Post-order numbers for each basic block. Used to figure out if memory
1511  // accesses are executed before another access.
1512  DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
1513
1514  /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
1515  /// basic block.
1516  DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
1517
1518  DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
1519           PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
1520      : F(F), AA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI) {}
1521
1522  static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1523                      DominatorTree &DT, PostDominatorTree &PDT,
1524                      const TargetLibraryInfo &TLI) {
1525    DSEState State(F, AA, MSSA, DT, PDT, TLI);
1526    // Collect blocks with throwing instructions not modeled in MemorySSA and
1527    // alloc-like objects.
1528    unsigned PO = 0;
1529    for (BasicBlock *BB : post_order(&F)) {
1530      State.PostOrderNumbers[BB] = PO++;
1531      for (Instruction &I : *BB) {
1532        MemoryAccess *MA = MSSA.getMemoryAccess(&I);
1533        if (I.mayThrow() && !MA)
1534          State.ThrowingBlocks.insert(I.getParent());
1535
1536        auto *MD = dyn_cast_or_null<MemoryDef>(MA);
1537        if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
1538            (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
1539          State.MemDefs.push_back(MD);
1540
1541        // Track whether alloca and alloca-like objects are visible in the
1542        // caller before and after the function returns. Alloca objects are
1543        // invalid in the caller, so they are neither visible before or after
1544        // the function returns.
1545        if (isa<AllocaInst>(&I)) {
1546          State.InvisibleToCallerBeforeRet.insert(&I);
1547          State.InvisibleToCallerAfterRet.insert(&I);
1548        }
1549
1550        // For alloca-like objects we need to check if they are captured before
1551        // the function returns and if the return might capture the object.
1552        if (isAllocLikeFn(&I, &TLI)) {
1553          bool CapturesBeforeRet = PointerMayBeCaptured(&I, false, true);
1554          if (!CapturesBeforeRet) {
1555            State.InvisibleToCallerBeforeRet.insert(&I);
1556            if (!PointerMayBeCaptured(&I, true, false))
1557              State.InvisibleToCallerAfterRet.insert(&I);
1558          }
1559        }
1560      }
1561    }
1562
1563    // Treat byval or inalloca arguments the same as Allocas, stores to them are
1564    // dead at the end of the function.
1565    for (Argument &AI : F.args())
1566      if (AI.hasPassPointeeByValueAttr()) {
1567        // For byval, the caller doesn't know the address of the allocation.
1568        if (AI.hasByValAttr())
1569          State.InvisibleToCallerBeforeRet.insert(&AI);
1570        State.InvisibleToCallerAfterRet.insert(&AI);
1571      }
1572
1573    return State;
1574  }
1575
1576  Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1577    if (!I->mayWriteToMemory())
1578      return None;
1579
1580    if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1581      return {MemoryLocation::getForDest(MTI)};
1582
1583    if (auto *CB = dyn_cast<CallBase>(I)) {
1584      LibFunc LF;
1585      if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1586        switch (LF) {
1587        case LibFunc_strcpy:
1588        case LibFunc_strncpy:
1589        case LibFunc_strcat:
1590        case LibFunc_strncat:
1591          return {MemoryLocation(CB->getArgOperand(0))};
1592        default:
1593          break;
1594        }
1595      }
1596      return None;
1597    }
1598
1599    return MemoryLocation::getOrNone(I);
1600  }
1601
1602  /// Returns true if \p Use completely overwrites \p DefLoc.
1603  bool isCompleteOverwrite(MemoryLocation DefLoc, Instruction *UseInst) const {
1604    // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1605    // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1606    // MemoryDef.
1607    if (!UseInst->mayWriteToMemory())
1608      return false;
1609
1610    if (auto *CB = dyn_cast<CallBase>(UseInst))
1611      if (CB->onlyAccessesInaccessibleMemory())
1612        return false;
1613
1614    int64_t InstWriteOffset, DepWriteOffset;
1615    auto CC = getLocForWriteEx(UseInst);
1616    InstOverlapIntervalsTy IOL;
1617
1618    const DataLayout &DL = F.getParent()->getDataLayout();
1619
1620    return CC &&
1621           isOverwrite(*CC, DefLoc, DL, TLI, DepWriteOffset, InstWriteOffset,
1622                       UseInst, IOL, AA, &F) == OW_Complete;
1623  }
1624
1625  /// Returns true if \p Def is not read before returning from the function.
1626  bool isWriteAtEndOfFunction(MemoryDef *Def) {
1627    LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1628                      << *Def->getMemoryInst()
1629                      << ") is at the end the function \n");
1630
1631    auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1632    if (!MaybeLoc) {
1633      LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1634      return false;
1635    }
1636
1637    SmallVector<MemoryAccess *, 4> WorkList;
1638    SmallPtrSet<MemoryAccess *, 8> Visited;
1639    auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1640      if (!Visited.insert(Acc).second)
1641        return;
1642      for (Use &U : Acc->uses())
1643        WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1644    };
1645    PushMemUses(Def);
1646    for (unsigned I = 0; I < WorkList.size(); I++) {
1647      if (WorkList.size() >= MemorySSAScanLimit) {
1648        LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1649        return false;
1650      }
1651
1652      MemoryAccess *UseAccess = WorkList[I];
1653      if (isa<MemoryPhi>(UseAccess)) {
1654        PushMemUses(UseAccess);
1655        continue;
1656      }
1657
1658      // TODO: Checking for aliasing is expensive. Consider reducing the amount
1659      // of times this is called and/or caching it.
1660      Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1661      if (isReadClobber(*MaybeLoc, UseInst)) {
1662        LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1663        return false;
1664      }
1665
1666      if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1667        PushMemUses(UseDef);
1668    }
1669    return true;
1670  }
1671
1672  /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1673  /// pair with the MemoryLocation terminated by \p I and a boolean flag
1674  /// indicating whether \p I is a free-like call.
1675  Optional<std::pair<MemoryLocation, bool>>
1676  getLocForTerminator(Instruction *I) const {
1677    uint64_t Len;
1678    Value *Ptr;
1679    if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1680                                                      m_Value(Ptr))))
1681      return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1682
1683    if (auto *CB = dyn_cast<CallBase>(I)) {
1684      if (isFreeCall(I, &TLI))
1685        return {std::make_pair(MemoryLocation(CB->getArgOperand(0)), true)};
1686    }
1687
1688    return None;
1689  }
1690
1691  /// Returns true if \p I is a memory terminator instruction like
1692  /// llvm.lifetime.end or free.
1693  bool isMemTerminatorInst(Instruction *I) const {
1694    IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1695    return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1696           isFreeCall(I, &TLI);
1697  }
1698
1699  /// Returns true if \p MaybeTerm is a memory terminator for the same
1700  /// underlying object as \p DefLoc.
1701  bool isMemTerminator(MemoryLocation DefLoc, Instruction *MaybeTerm) const {
1702    Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1703        getLocForTerminator(MaybeTerm);
1704
1705    if (!MaybeTermLoc)
1706      return false;
1707
1708    // If the terminator is a free-like call, all accesses to the underlying
1709    // object can be considered terminated.
1710    if (MaybeTermLoc->second) {
1711      DataLayout DL = MaybeTerm->getParent()->getModule()->getDataLayout();
1712      DefLoc = MemoryLocation(GetUnderlyingObject(DefLoc.Ptr, DL));
1713    }
1714    return AA.isMustAlias(MaybeTermLoc->first, DefLoc);
1715  }
1716
1717  // Returns true if \p Use may read from \p DefLoc.
1718  bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst) const {
1719    if (!UseInst->mayReadFromMemory())
1720      return false;
1721
1722    if (auto *CB = dyn_cast<CallBase>(UseInst))
1723      if (CB->onlyAccessesInaccessibleMemory())
1724        return false;
1725
1726    ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc);
1727    // If necessary, perform additional analysis.
1728    if (isRefSet(MR))
1729      MR = AA.callCapturesBefore(UseInst, DefLoc, &DT);
1730    return isRefSet(MR);
1731  }
1732
1733  // Find a MemoryDef writing to \p DefLoc and dominating \p Current, with no
1734  // read access between them or on any other path to a function exit block if
1735  // \p DefLoc is not accessible after the function returns. If there is no such
1736  // MemoryDef, return None. The returned value may not (completely) overwrite
1737  // \p DefLoc. Currently we bail out when we encounter an aliasing MemoryUse
1738  // (read).
1739  Optional<MemoryAccess *>
1740  getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *Current,
1741                  MemoryLocation DefLoc, bool DefVisibleToCallerBeforeRet,
1742                  bool DefVisibleToCallerAfterRet, int &ScanLimit) const {
1743    MemoryAccess *DomAccess;
1744    bool StepAgain;
1745    LLVM_DEBUG(dbgs() << "  trying to get dominating access for " << *Current
1746                      << "\n");
1747    // Find the next clobbering Mod access for DefLoc, starting at Current.
1748    do {
1749      StepAgain = false;
1750      // Reached TOP.
1751      if (MSSA.isLiveOnEntryDef(Current))
1752        return None;
1753
1754      if (isa<MemoryPhi>(Current)) {
1755        DomAccess = Current;
1756        break;
1757      }
1758      MemoryUseOrDef *CurrentUD = cast<MemoryUseOrDef>(Current);
1759      // Look for access that clobber DefLoc.
1760      DomAccess = MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(CurrentUD,
1761                                                                      DefLoc);
1762      if (MSSA.isLiveOnEntryDef(DomAccess))
1763        return None;
1764
1765      if (isa<MemoryPhi>(DomAccess))
1766        break;
1767
1768      // Check if we can skip DomDef for DSE.
1769      MemoryDef *DomDef = dyn_cast<MemoryDef>(DomAccess);
1770      if (DomDef && canSkipDef(DomDef, DefVisibleToCallerBeforeRet)) {
1771        StepAgain = true;
1772        Current = DomDef->getDefiningAccess();
1773      }
1774
1775    } while (StepAgain);
1776
1777    // Accesses to objects accessible after the function returns can only be
1778    // eliminated if the access is killed along all paths to the exit. Collect
1779    // the blocks with killing (=completely overwriting MemoryDefs) and check if
1780    // they cover all paths from DomAccess to any function exit.
1781    SmallPtrSet<BasicBlock *, 16> KillingBlocks = {KillingDef->getBlock()};
1782    LLVM_DEBUG({
1783      dbgs() << "  Checking for reads of " << *DomAccess;
1784      if (isa<MemoryDef>(DomAccess))
1785        dbgs() << " (" << *cast<MemoryDef>(DomAccess)->getMemoryInst() << ")\n";
1786      else
1787        dbgs() << ")\n";
1788    });
1789
1790    SmallSetVector<MemoryAccess *, 32> WorkList;
1791    auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1792      for (Use &U : Acc->uses())
1793        WorkList.insert(cast<MemoryAccess>(U.getUser()));
1794    };
1795    PushMemUses(DomAccess);
1796
1797    // Check if DomDef may be read.
1798    for (unsigned I = 0; I < WorkList.size(); I++) {
1799      MemoryAccess *UseAccess = WorkList[I];
1800
1801      LLVM_DEBUG(dbgs() << "   " << *UseAccess);
1802      if (--ScanLimit == 0) {
1803        LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1804        return None;
1805      }
1806
1807      if (isa<MemoryPhi>(UseAccess)) {
1808        LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
1809        PushMemUses(UseAccess);
1810        continue;
1811      }
1812
1813      Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1814      LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1815
1816      if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess))) {
1817        LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
1818        PushMemUses(UseAccess);
1819        continue;
1820      }
1821
1822      // A memory terminator kills all preceeding MemoryDefs and all succeeding
1823      // MemoryAccesses. We do not have to check it's users.
1824      if (isMemTerminator(DefLoc, UseInst))
1825        continue;
1826
1827      // Uses which may read the original MemoryDef mean we cannot eliminate the
1828      // original MD. Stop walk.
1829      if (isReadClobber(DefLoc, UseInst)) {
1830        LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
1831        return None;
1832      }
1833
1834      // For the KillingDef and DomAccess we only have to check if it reads the
1835      // memory location.
1836      // TODO: It would probably be better to check for self-reads before
1837      // calling the function.
1838      if (KillingDef == UseAccess || DomAccess == UseAccess) {
1839        LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
1840        continue;
1841      }
1842
1843      // Check all uses for MemoryDefs, except for defs completely overwriting
1844      // the original location. Otherwise we have to check uses of *all*
1845      // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1846      // miss cases like the following
1847      //   1 = Def(LoE) ; <----- DomDef stores [0,1]
1848      //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
1849      //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
1850      //                  (The Use points to the *first* Def it may alias)
1851      //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
1852      //                  stores [0,1]
1853      if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1854        if (isCompleteOverwrite(DefLoc, UseInst)) {
1855          if (DefVisibleToCallerAfterRet && UseAccess != DomAccess) {
1856            BasicBlock *MaybeKillingBlock = UseInst->getParent();
1857            if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1858                PostOrderNumbers.find(DomAccess->getBlock())->second) {
1859
1860              LLVM_DEBUG(dbgs() << "    ... found killing block "
1861                                << MaybeKillingBlock->getName() << "\n");
1862              KillingBlocks.insert(MaybeKillingBlock);
1863            }
1864          }
1865        } else
1866          PushMemUses(UseDef);
1867      }
1868    }
1869
1870    // For accesses to locations visible after the function returns, make sure
1871    // that the location is killed (=overwritten) along all paths from DomAccess
1872    // to the exit.
1873    if (DefVisibleToCallerAfterRet) {
1874      assert(!KillingBlocks.empty() &&
1875             "Expected at least a single killing block");
1876      // Find the common post-dominator of all killing blocks.
1877      BasicBlock *CommonPred = *KillingBlocks.begin();
1878      for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1879           I != E; I++) {
1880        if (!CommonPred)
1881          break;
1882        CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1883      }
1884
1885      // If CommonPred is in the set of killing blocks, just check if it
1886      // post-dominates DomAccess.
1887      if (KillingBlocks.count(CommonPred)) {
1888        if (PDT.dominates(CommonPred, DomAccess->getBlock()))
1889          return {DomAccess};
1890        return None;
1891      }
1892
1893      // If the common post-dominator does not post-dominate DomAccess, there
1894      // is a path from DomAccess to an exit not going through a killing block.
1895      if (PDT.dominates(CommonPred, DomAccess->getBlock())) {
1896        SetVector<BasicBlock *> WorkList;
1897
1898        // DomAccess's post-order number provides an upper bound of the blocks
1899        // on a path starting at DomAccess.
1900        unsigned UpperBound =
1901            PostOrderNumbers.find(DomAccess->getBlock())->second;
1902
1903        // If CommonPred is null, there are multiple exits from the function.
1904        // They all have to be added to the worklist.
1905        if (CommonPred)
1906          WorkList.insert(CommonPred);
1907        else
1908          for (BasicBlock *R : PDT.roots())
1909            WorkList.insert(R);
1910
1911        NumCFGTries++;
1912        // Check if all paths starting from an exit node go through one of the
1913        // killing blocks before reaching DomAccess.
1914        for (unsigned I = 0; I < WorkList.size(); I++) {
1915          NumCFGChecks++;
1916          BasicBlock *Current = WorkList[I];
1917          if (KillingBlocks.count(Current))
1918            continue;
1919          if (Current == DomAccess->getBlock())
1920            return None;
1921
1922          // DomAccess is reachable from the entry, so we don't have to explore
1923          // unreachable blocks further.
1924          if (!DT.isReachableFromEntry(Current))
1925            continue;
1926
1927          unsigned CPO = PostOrderNumbers.find(Current)->second;
1928          // Current block is not on a path starting at DomAccess.
1929          if (CPO > UpperBound)
1930            continue;
1931          for (BasicBlock *Pred : predecessors(Current))
1932            WorkList.insert(Pred);
1933
1934          if (WorkList.size() >= MemorySSAPathCheckLimit)
1935            return None;
1936        }
1937        NumCFGSuccess++;
1938        return {DomAccess};
1939      }
1940      return None;
1941    }
1942
1943    // No aliasing MemoryUses of DomAccess found, DomAccess is potentially dead.
1944    return {DomAccess};
1945  }
1946
1947  // Delete dead memory defs
1948  void deleteDeadInstruction(Instruction *SI) {
1949    MemorySSAUpdater Updater(&MSSA);
1950    SmallVector<Instruction *, 32> NowDeadInsts;
1951    NowDeadInsts.push_back(SI);
1952    --NumFastOther;
1953
1954    while (!NowDeadInsts.empty()) {
1955      Instruction *DeadInst = NowDeadInsts.pop_back_val();
1956      ++NumFastOther;
1957
1958      // Try to preserve debug information attached to the dead instruction.
1959      salvageDebugInfo(*DeadInst);
1960      salvageKnowledge(DeadInst);
1961
1962      // Remove the Instruction from MSSA.
1963      if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1964        if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1965          SkipStores.insert(MD);
1966        }
1967        Updater.removeMemoryAccess(MA);
1968      }
1969
1970      auto I = IOLs.find(DeadInst->getParent());
1971      if (I != IOLs.end())
1972        I->second.erase(DeadInst);
1973      // Remove its operands
1974      for (Use &O : DeadInst->operands())
1975        if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1976          O = nullptr;
1977          if (isInstructionTriviallyDead(OpI, &TLI))
1978            NowDeadInsts.push_back(OpI);
1979        }
1980
1981      DeadInst->eraseFromParent();
1982    }
1983  }
1984
1985  // Check for any extra throws between SI and NI that block DSE.  This only
1986  // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1987  // throw are handled during the walk from one def to the next.
1988  bool mayThrowBetween(Instruction *SI, Instruction *NI,
1989                       const Value *SILocUnd) const {
1990    // First see if we can ignore it by using the fact that SI is an
1991    // alloca/alloca like object that is not visible to the caller during
1992    // execution of the function.
1993    if (SILocUnd && InvisibleToCallerBeforeRet.count(SILocUnd))
1994      return false;
1995
1996    if (SI->getParent() == NI->getParent())
1997      return ThrowingBlocks.count(SI->getParent());
1998    return !ThrowingBlocks.empty();
1999  }
2000
2001  // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
2002  // act as barriers:
2003  //  * A memory instruction that may throw and \p SI accesses a non-stack
2004  //  object.
2005  //  * Atomic stores stronger that monotonic.
2006  bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) const {
2007    // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
2008    // like object that does not escape.
2009    if (NI->mayThrow() && !InvisibleToCallerBeforeRet.count(SILocUnd))
2010      return true;
2011
2012    // If NI is an atomic load/store stronger than monotonic, do not try to
2013    // eliminate/reorder it.
2014    if (NI->isAtomic()) {
2015      if (auto *LI = dyn_cast<LoadInst>(NI))
2016        return isStrongerThanMonotonic(LI->getOrdering());
2017      if (auto *SI = dyn_cast<StoreInst>(NI))
2018        return isStrongerThanMonotonic(SI->getOrdering());
2019      llvm_unreachable("other instructions should be skipped in MemorySSA");
2020    }
2021    return false;
2022  }
2023
2024  /// Eliminate writes to objects that are not visible in the caller and are not
2025  /// accessed before returning from the function.
2026  bool eliminateDeadWritesAtEndOfFunction() {
2027    const DataLayout &DL = F.getParent()->getDataLayout();
2028    bool MadeChange = false;
2029    LLVM_DEBUG(
2030        dbgs()
2031        << "Trying to eliminate MemoryDefs at the end of the function\n");
2032    for (int I = MemDefs.size() - 1; I >= 0; I--) {
2033      MemoryDef *Def = MemDefs[I];
2034      if (SkipStores.find(Def) != SkipStores.end() ||
2035          !isRemovable(Def->getMemoryInst()))
2036        continue;
2037
2038      // TODO: Consider doing the underlying object check first, if it is
2039      // beneficial compile-time wise.
2040      if (isWriteAtEndOfFunction(Def)) {
2041        Instruction *DefI = Def->getMemoryInst();
2042        // See through pointer-to-pointer bitcasts
2043        SmallVector<const Value *, 4> Pointers;
2044        GetUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
2045
2046        LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
2047                             "of the function\n");
2048        bool CanKill = true;
2049        for (const Value *Pointer : Pointers) {
2050          if (!InvisibleToCallerAfterRet.count(Pointer)) {
2051            CanKill = false;
2052            break;
2053          }
2054        }
2055
2056        if (CanKill) {
2057          deleteDeadInstruction(DefI);
2058          ++NumFastStores;
2059          MadeChange = true;
2060        }
2061      }
2062    }
2063    return MadeChange;
2064  }
2065
2066  /// \returns true if \p Def is a no-op store, either because it
2067  /// directly stores back a loaded value or stores zero to a calloced object.
2068  bool storeIsNoop(MemoryDef *Def, MemoryLocation DefLoc, const Value *DefUO) {
2069    StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
2070    if (!Store)
2071      return false;
2072
2073    if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
2074      if (LoadI->getPointerOperand() == Store->getOperand(1)) {
2075        auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
2076        // If both accesses share the same defining access, no instructions
2077        // between them can modify the memory location.
2078        return LoadAccess == Def->getDefiningAccess();
2079      }
2080    }
2081
2082    Constant *StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
2083    if (StoredConstant && StoredConstant->isNullValue()) {
2084      auto *DefUOInst = dyn_cast<Instruction>(DefUO);
2085      if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
2086        auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
2087        // If UnderlyingDef is the clobbering access of Def, no instructions
2088        // between them can modify the memory location.
2089        auto *ClobberDef =
2090            MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
2091        return UnderlyingDef == ClobberDef;
2092      }
2093    }
2094    return false;
2095  }
2096};
2097
2098bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
2099                                  MemorySSA &MSSA, DominatorTree &DT,
2100                                  PostDominatorTree &PDT,
2101                                  const TargetLibraryInfo &TLI) {
2102  const DataLayout &DL = F.getParent()->getDataLayout();
2103  bool MadeChange = false;
2104
2105  DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
2106  // For each store:
2107  for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2108    MemoryDef *KillingDef = State.MemDefs[I];
2109    if (State.SkipStores.count(KillingDef))
2110      continue;
2111    Instruction *SI = KillingDef->getMemoryInst();
2112
2113    auto MaybeSILoc = State.getLocForWriteEx(SI);
2114    if (State.isMemTerminatorInst(SI))
2115      MaybeSILoc = State.getLocForTerminator(SI).map(
2116          [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
2117    else
2118      MaybeSILoc = State.getLocForWriteEx(SI);
2119
2120    if (!MaybeSILoc) {
2121      LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2122                        << *SI << "\n");
2123      continue;
2124    }
2125    MemoryLocation SILoc = *MaybeSILoc;
2126    assert(SILoc.Ptr && "SILoc should not be null");
2127    const Value *SILocUnd = GetUnderlyingObject(SILoc.Ptr, DL);
2128
2129    // Check if the store is a no-op.
2130    if (isRemovable(SI) && State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
2131      LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *SI << '\n');
2132      State.deleteDeadInstruction(SI);
2133      NumNoopStores++;
2134      MadeChange = true;
2135      continue;
2136    }
2137
2138    Instruction *DefObj =
2139        const_cast<Instruction *>(dyn_cast<Instruction>(SILocUnd));
2140    bool DefVisibleToCallerBeforeRet =
2141        !State.InvisibleToCallerBeforeRet.count(SILocUnd);
2142    bool DefVisibleToCallerAfterRet =
2143        !State.InvisibleToCallerAfterRet.count(SILocUnd);
2144    if (DefObj && isAllocLikeFn(DefObj, &TLI)) {
2145      if (DefVisibleToCallerBeforeRet)
2146        DefVisibleToCallerBeforeRet =
2147            PointerMayBeCapturedBefore(DefObj, false, true, SI, &DT);
2148    }
2149
2150    MemoryAccess *Current = KillingDef;
2151    LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2152                      << *KillingDef << " (" << *SI << ")\n");
2153
2154    int ScanLimit = MemorySSAScanLimit;
2155    // Worklist of MemoryAccesses that may be killed by KillingDef.
2156    SetVector<MemoryAccess *> ToCheck;
2157    ToCheck.insert(KillingDef->getDefiningAccess());
2158
2159    // Check if MemoryAccesses in the worklist are killed by KillingDef.
2160    for (unsigned I = 0; I < ToCheck.size(); I++) {
2161      Current = ToCheck[I];
2162      if (State.SkipStores.count(Current))
2163        continue;
2164
2165      Optional<MemoryAccess *> Next = State.getDomMemoryDef(
2166          KillingDef, Current, SILoc, DefVisibleToCallerBeforeRet,
2167          DefVisibleToCallerAfterRet, ScanLimit);
2168
2169      if (!Next) {
2170        LLVM_DEBUG(dbgs() << "  finished walk\n");
2171        continue;
2172      }
2173
2174      MemoryAccess *DomAccess = *Next;
2175      LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DomAccess);
2176      if (isa<MemoryPhi>(DomAccess)) {
2177        LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
2178        for (Value *V : cast<MemoryPhi>(DomAccess)->incoming_values()) {
2179          MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2180          BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2181          BasicBlock *PhiBlock = DomAccess->getBlock();
2182
2183          // We only consider incoming MemoryAccesses that come before the
2184          // MemoryPhi. Otherwise we could discover candidates that do not
2185          // strictly dominate our starting def.
2186          if (State.PostOrderNumbers[IncomingBlock] >
2187              State.PostOrderNumbers[PhiBlock])
2188            ToCheck.insert(IncomingAccess);
2189        }
2190        continue;
2191      }
2192      MemoryDef *NextDef = dyn_cast<MemoryDef>(DomAccess);
2193      Instruction *NI = NextDef->getMemoryInst();
2194      LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
2195
2196      // Before we try to remove anything, check for any extra throwing
2197      // instructions that block us from DSEing
2198      if (State.mayThrowBetween(SI, NI, SILocUnd)) {
2199        LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
2200        break;
2201      }
2202
2203      // Check for anything that looks like it will be a barrier to further
2204      // removal
2205      if (State.isDSEBarrier(SILocUnd, NI)) {
2206        LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
2207        continue;
2208      }
2209
2210      ToCheck.insert(NextDef->getDefiningAccess());
2211
2212      if (!hasAnalyzableMemoryWrite(NI, TLI)) {
2213        LLVM_DEBUG(dbgs() << "  ... skip, cannot analyze def\n");
2214        continue;
2215      }
2216
2217      if (!isRemovable(NI)) {
2218        LLVM_DEBUG(dbgs() << "  ... skip, cannot remove def\n");
2219        continue;
2220      }
2221
2222      if (!DebugCounter::shouldExecute(MemorySSACounter))
2223        continue;
2224
2225      MemoryLocation NILoc = *State.getLocForWriteEx(NI);
2226
2227      if (State.isMemTerminatorInst(SI)) {
2228        const Value *NIUnd = GetUnderlyingObject(NILoc.Ptr, DL);
2229        if (!SILocUnd || SILocUnd != NIUnd)
2230          continue;
2231        LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2232                          << "\n  KILLER: " << *SI << '\n');
2233        State.deleteDeadInstruction(NI);
2234        ++NumFastStores;
2235        MadeChange = true;
2236      } else {
2237        // Check if NI overwrites SI.
2238        int64_t InstWriteOffset, DepWriteOffset;
2239        auto Iter = State.IOLs.insert(
2240            std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2241                NI->getParent(), InstOverlapIntervalsTy()));
2242        auto &IOL = Iter.first->second;
2243        OverwriteResult OR = isOverwrite(SILoc, NILoc, DL, TLI, DepWriteOffset,
2244                                         InstWriteOffset, NI, IOL, AA, &F);
2245
2246        if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2247          auto *Earlier = dyn_cast<StoreInst>(NI);
2248          auto *Later = dyn_cast<StoreInst>(SI);
2249          if (Constant *Merged = tryToMergePartialOverlappingStores(
2250                  Earlier, Later, InstWriteOffset, DepWriteOffset, DL, &AA,
2251                  &DT)) {
2252
2253            // Update stored value of earlier store to merged constant.
2254            Earlier->setOperand(0, Merged);
2255            ++NumModifiedStores;
2256            MadeChange = true;
2257
2258            // Remove later store and remove any outstanding overlap intervals
2259            // for the updated store.
2260            State.deleteDeadInstruction(Later);
2261            auto I = State.IOLs.find(Earlier->getParent());
2262            if (I != State.IOLs.end())
2263              I->second.erase(Earlier);
2264            break;
2265          }
2266        }
2267
2268        if (OR == OW_Complete) {
2269          LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2270                            << "\n  KILLER: " << *SI << '\n');
2271          State.deleteDeadInstruction(NI);
2272          ++NumFastStores;
2273          MadeChange = true;
2274        }
2275      }
2276    }
2277  }
2278
2279  if (EnablePartialOverwriteTracking)
2280    for (auto &KV : State.IOLs)
2281      MadeChange |= removePartiallyOverlappedStores(&AA, DL, KV.second);
2282
2283  MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2284  return MadeChange;
2285}
2286} // end anonymous namespace
2287
2288//===----------------------------------------------------------------------===//
2289// DSE Pass
2290//===----------------------------------------------------------------------===//
2291PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2292  AliasAnalysis &AA = AM.getResult<AAManager>(F);
2293  const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2294  DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2295
2296  bool Changed = false;
2297  if (EnableMemorySSA) {
2298    MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2299    PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2300
2301    Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2302  } else {
2303    MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);
2304
2305    Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2306  }
2307
2308#ifdef LLVM_ENABLE_STATS
2309  if (AreStatisticsEnabled())
2310    for (auto &I : instructions(F))
2311      NumRemainingStores += isa<StoreInst>(&I);
2312#endif
2313
2314  if (!Changed)
2315    return PreservedAnalyses::all();
2316
2317  PreservedAnalyses PA;
2318  PA.preserveSet<CFGAnalyses>();
2319  PA.preserve<GlobalsAA>();
2320  if (EnableMemorySSA)
2321    PA.preserve<MemorySSAAnalysis>();
2322  else
2323    PA.preserve<MemoryDependenceAnalysis>();
2324  return PA;
2325}
2326
2327namespace {
2328
2329/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2330class DSELegacyPass : public FunctionPass {
2331public:
2332  static char ID; // Pass identification, replacement for typeid
2333
2334  DSELegacyPass() : FunctionPass(ID) {
2335    initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2336  }
2337
2338  bool runOnFunction(Function &F) override {
2339    if (skipFunction(F))
2340      return false;
2341
2342    AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2343    DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2344    const TargetLibraryInfo &TLI =
2345        getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2346
2347    bool Changed = false;
2348    if (EnableMemorySSA) {
2349      MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2350      PostDominatorTree &PDT =
2351          getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2352
2353      Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2354    } else {
2355      MemoryDependenceResults &MD =
2356          getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
2357
2358      Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2359    }
2360
2361#ifdef LLVM_ENABLE_STATS
2362    if (AreStatisticsEnabled())
2363      for (auto &I : instructions(F))
2364        NumRemainingStores += isa<StoreInst>(&I);
2365#endif
2366
2367    return Changed;
2368  }
2369
2370  void getAnalysisUsage(AnalysisUsage &AU) const override {
2371    AU.setPreservesCFG();
2372    AU.addRequired<AAResultsWrapperPass>();
2373    AU.addRequired<TargetLibraryInfoWrapperPass>();
2374    AU.addPreserved<GlobalsAAWrapperPass>();
2375    AU.addRequired<DominatorTreeWrapperPass>();
2376    AU.addPreserved<DominatorTreeWrapperPass>();
2377
2378    if (EnableMemorySSA) {
2379      AU.addRequired<PostDominatorTreeWrapperPass>();
2380      AU.addRequired<MemorySSAWrapperPass>();
2381      AU.addPreserved<PostDominatorTreeWrapperPass>();
2382      AU.addPreserved<MemorySSAWrapperPass>();
2383    } else {
2384      AU.addRequired<MemoryDependenceWrapperPass>();
2385      AU.addPreserved<MemoryDependenceWrapperPass>();
2386    }
2387  }
2388};
2389
2390} // end anonymous namespace
2391
2392char DSELegacyPass::ID = 0;
2393
2394INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2395                      false)
2396INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2397INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2398INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2399INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2400INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2401INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2402INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2403INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2404                    false)
2405
2406FunctionPass *llvm::createDeadStoreEliminationPass() {
2407  return new DSELegacyPass();
2408}
2409