1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/MapVector.h"
15#include "llvm/ADT/SmallString.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/Analysis/Loads.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DebugInfoMetadata.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/LLVMContext.h"
23#include "llvm/IR/PatternMatch.h"
24#include "llvm/Transforms/InstCombine/InstCombiner.h"
25#include "llvm/Transforms/Utils/Local.h"
26using namespace llvm;
27using namespace PatternMatch;
28
29#define DEBUG_TYPE "instcombine"
30
31STATISTIC(NumDeadStore, "Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
33
34static cl::opt<unsigned> MaxCopiedFromConstantUsers(
35    "instcombine-max-copied-from-constant-users", cl::init(128),
36    cl::desc("Maximum users to visit in copy from constant transform"),
37    cl::Hidden);
38
39/// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
40/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
41/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
42/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
43/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
44/// the alloca, and if the source pointer is a pointer to a constant memory
45/// location, we can optimize this.
46static bool
47isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V,
48                               MemTransferInst *&TheCopy,
49                               SmallVectorImpl<Instruction *> &ToDelete) {
50  // We track lifetime intrinsics as we encounter them.  If we decide to go
51  // ahead and replace the value with the memory location, this lets the caller
52  // quickly eliminate the markers.
53
54  using ValueAndIsOffset = PointerIntPair<Value *, 1, bool>;
55  SmallVector<ValueAndIsOffset, 32> Worklist;
56  SmallPtrSet<ValueAndIsOffset, 32> Visited;
57  Worklist.emplace_back(V, false);
58  while (!Worklist.empty()) {
59    ValueAndIsOffset Elem = Worklist.pop_back_val();
60    if (!Visited.insert(Elem).second)
61      continue;
62    if (Visited.size() > MaxCopiedFromConstantUsers)
63      return false;
64
65    const auto [Value, IsOffset] = Elem;
66    for (auto &U : Value->uses()) {
67      auto *I = cast<Instruction>(U.getUser());
68
69      if (auto *LI = dyn_cast<LoadInst>(I)) {
70        // Ignore non-volatile loads, they are always ok.
71        if (!LI->isSimple()) return false;
72        continue;
73      }
74
75      if (isa<PHINode, SelectInst>(I)) {
76        // We set IsOffset=true, to forbid the memcpy from occurring after the
77        // phi: If one of the phi operands is not based on the alloca, we
78        // would incorrectly omit a write.
79        Worklist.emplace_back(I, true);
80        continue;
81      }
82      if (isa<BitCastInst, AddrSpaceCastInst>(I)) {
83        // If uses of the bitcast are ok, we are ok.
84        Worklist.emplace_back(I, IsOffset);
85        continue;
86      }
87      if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
88        // If the GEP has all zero indices, it doesn't offset the pointer. If it
89        // doesn't, it does.
90        Worklist.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
91        continue;
92      }
93
94      if (auto *Call = dyn_cast<CallBase>(I)) {
95        // If this is the function being called then we treat it like a load and
96        // ignore it.
97        if (Call->isCallee(&U))
98          continue;
99
100        unsigned DataOpNo = Call->getDataOperandNo(&U);
101        bool IsArgOperand = Call->isArgOperand(&U);
102
103        // Inalloca arguments are clobbered by the call.
104        if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
105          return false;
106
107        // If this call site doesn't modify the memory, then we know it is just
108        // a load (but one that potentially returns the value itself), so we can
109        // ignore it if we know that the value isn't captured.
110        bool NoCapture = Call->doesNotCapture(DataOpNo);
111        if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
112            (Call->onlyReadsMemory(DataOpNo) && NoCapture))
113          continue;
114
115        // If this is being passed as a byval argument, the caller is making a
116        // copy, so it is only a read of the alloca.
117        if (IsArgOperand && Call->isByValArgument(DataOpNo))
118          continue;
119      }
120
121      // Lifetime intrinsics can be handled by the caller.
122      if (I->isLifetimeStartOrEnd()) {
123        assert(I->use_empty() && "Lifetime markers have no result to use!");
124        ToDelete.push_back(I);
125        continue;
126      }
127
128      // If this is isn't our memcpy/memmove, reject it as something we can't
129      // handle.
130      MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
131      if (!MI)
132        return false;
133
134      // If the transfer is volatile, reject it.
135      if (MI->isVolatile())
136        return false;
137
138      // If the transfer is using the alloca as a source of the transfer, then
139      // ignore it since it is a load (unless the transfer is volatile).
140      if (U.getOperandNo() == 1)
141        continue;
142
143      // If we already have seen a copy, reject the second one.
144      if (TheCopy) return false;
145
146      // If the pointer has been offset from the start of the alloca, we can't
147      // safely handle this.
148      if (IsOffset) return false;
149
150      // If the memintrinsic isn't using the alloca as the dest, reject it.
151      if (U.getOperandNo() != 0) return false;
152
153      // If the source of the memcpy/move is not constant, reject it.
154      if (isModSet(AA->getModRefInfoMask(MI->getSource())))
155        return false;
156
157      // Otherwise, the transform is safe.  Remember the copy instruction.
158      TheCopy = MI;
159    }
160  }
161  return true;
162}
163
164/// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
165/// modified by a copy from a constant memory location. If we can prove this, we
166/// can replace any uses of the alloca with uses of the memory location
167/// directly.
168static MemTransferInst *
169isOnlyCopiedFromConstantMemory(AAResults *AA,
170                               AllocaInst *AI,
171                               SmallVectorImpl<Instruction *> &ToDelete) {
172  MemTransferInst *TheCopy = nullptr;
173  if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
174    return TheCopy;
175  return nullptr;
176}
177
178/// Returns true if V is dereferenceable for size of alloca.
179static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
180                                           const DataLayout &DL) {
181  if (AI->isArrayAllocation())
182    return false;
183  uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
184  if (!AllocaSize)
185    return false;
186  return isDereferenceableAndAlignedPointer(V, AI->getAlign(),
187                                            APInt(64, AllocaSize), DL);
188}
189
190static Instruction *simplifyAllocaArraySize(InstCombinerImpl &IC,
191                                            AllocaInst &AI, DominatorTree &DT) {
192  // Check for array size of 1 (scalar allocation).
193  if (!AI.isArrayAllocation()) {
194    // i32 1 is the canonical array size for scalar allocations.
195    if (AI.getArraySize()->getType()->isIntegerTy(32))
196      return nullptr;
197
198    // Canonicalize it.
199    return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
200  }
201
202  // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
203  if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
204    if (C->getValue().getActiveBits() <= 64) {
205      Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
206      AllocaInst *New = IC.Builder.CreateAlloca(NewTy, AI.getAddressSpace(),
207                                                nullptr, AI.getName());
208      New->setAlignment(AI.getAlign());
209
210      replaceAllDbgUsesWith(AI, *New, *New, DT);
211
212      // Scan to the end of the allocation instructions, to skip over a block of
213      // allocas if possible...also skip interleaved debug info
214      //
215      BasicBlock::iterator It(New);
216      while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
217        ++It;
218
219      // Now that I is pointing to the first non-allocation-inst in the block,
220      // insert our getelementptr instruction...
221      //
222      Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
223      Value *NullIdx = Constant::getNullValue(IdxTy);
224      Value *Idx[2] = {NullIdx, NullIdx};
225      Instruction *GEP = GetElementPtrInst::CreateInBounds(
226          NewTy, New, Idx, New->getName() + ".sub");
227      IC.InsertNewInstBefore(GEP, *It);
228
229      // Now make everything use the getelementptr instead of the original
230      // allocation.
231      return IC.replaceInstUsesWith(AI, GEP);
232    }
233  }
234
235  if (isa<UndefValue>(AI.getArraySize()))
236    return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
237
238  // Ensure that the alloca array size argument has type intptr_t, so that
239  // any casting is exposed early.
240  Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
241  if (AI.getArraySize()->getType() != IntPtrTy) {
242    Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
243    return IC.replaceOperand(AI, 0, V);
244  }
245
246  return nullptr;
247}
248
249namespace {
250// If I and V are pointers in different address space, it is not allowed to
251// use replaceAllUsesWith since I and V have different types. A
252// non-target-specific transformation should not use addrspacecast on V since
253// the two address space may be disjoint depending on target.
254//
255// This class chases down uses of the old pointer until reaching the load
256// instructions, then replaces the old pointer in the load instructions with
257// the new pointer. If during the chasing it sees bitcast or GEP, it will
258// create new bitcast or GEP with the new pointer and use them in the load
259// instruction.
260class PointerReplacer {
261public:
262  PointerReplacer(InstCombinerImpl &IC, Instruction &Root)
263    : IC(IC), Root(Root) {}
264
265  bool collectUsers();
266  void replacePointer(Value *V);
267
268private:
269  bool collectUsersRecursive(Instruction &I);
270  void replace(Instruction *I);
271  Value *getReplacement(Value *I);
272  bool isAvailable(Instruction *I) const {
273    return I == &Root || Worklist.contains(I);
274  }
275
276  SmallPtrSet<Instruction *, 32> ValuesToRevisit;
277  SmallSetVector<Instruction *, 4> Worklist;
278  MapVector<Value *, Value *> WorkMap;
279  InstCombinerImpl &IC;
280  Instruction &Root;
281};
282} // end anonymous namespace
283
284bool PointerReplacer::collectUsers() {
285  if (!collectUsersRecursive(Root))
286    return false;
287
288  // Ensure that all outstanding (indirect) users of I
289  // are inserted into the Worklist. Return false
290  // otherwise.
291  for (auto *Inst : ValuesToRevisit)
292    if (!Worklist.contains(Inst))
293      return false;
294  return true;
295}
296
297bool PointerReplacer::collectUsersRecursive(Instruction &I) {
298  for (auto *U : I.users()) {
299    auto *Inst = cast<Instruction>(&*U);
300    if (auto *Load = dyn_cast<LoadInst>(Inst)) {
301      if (Load->isVolatile())
302        return false;
303      Worklist.insert(Load);
304    } else if (auto *PHI = dyn_cast<PHINode>(Inst)) {
305      // All incoming values must be instructions for replacability
306      if (any_of(PHI->incoming_values(),
307                 [](Value *V) { return !isa<Instruction>(V); }))
308        return false;
309
310      // If at least one incoming value of the PHI is not in Worklist,
311      // store the PHI for revisiting and skip this iteration of the
312      // loop.
313      if (any_of(PHI->incoming_values(), [this](Value *V) {
314            return !isAvailable(cast<Instruction>(V));
315          })) {
316        ValuesToRevisit.insert(Inst);
317        continue;
318      }
319
320      Worklist.insert(PHI);
321      if (!collectUsersRecursive(*PHI))
322        return false;
323    } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
324      if (!isa<Instruction>(SI->getTrueValue()) ||
325          !isa<Instruction>(SI->getFalseValue()))
326        return false;
327
328      if (!isAvailable(cast<Instruction>(SI->getTrueValue())) ||
329          !isAvailable(cast<Instruction>(SI->getFalseValue()))) {
330        ValuesToRevisit.insert(Inst);
331        continue;
332      }
333      Worklist.insert(SI);
334      if (!collectUsersRecursive(*SI))
335        return false;
336    } else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
337      Worklist.insert(Inst);
338      if (!collectUsersRecursive(*Inst))
339        return false;
340    } else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
341      if (MI->isVolatile())
342        return false;
343      Worklist.insert(Inst);
344    } else if (Inst->isLifetimeStartOrEnd()) {
345      continue;
346    } else {
347      LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *U << '\n');
348      return false;
349    }
350  }
351
352  return true;
353}
354
355Value *PointerReplacer::getReplacement(Value *V) { return WorkMap.lookup(V); }
356
357void PointerReplacer::replace(Instruction *I) {
358  if (getReplacement(I))
359    return;
360
361  if (auto *LT = dyn_cast<LoadInst>(I)) {
362    auto *V = getReplacement(LT->getPointerOperand());
363    assert(V && "Operand not replaced");
364    auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
365                              LT->getAlign(), LT->getOrdering(),
366                              LT->getSyncScopeID());
367    NewI->takeName(LT);
368    copyMetadataForLoad(*NewI, *LT);
369
370    IC.InsertNewInstWith(NewI, *LT);
371    IC.replaceInstUsesWith(*LT, NewI);
372    WorkMap[LT] = NewI;
373  } else if (auto *PHI = dyn_cast<PHINode>(I)) {
374    Type *NewTy = getReplacement(PHI->getIncomingValue(0))->getType();
375    auto *NewPHI = PHINode::Create(NewTy, PHI->getNumIncomingValues(),
376                                   PHI->getName(), PHI);
377    for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I)
378      NewPHI->addIncoming(getReplacement(PHI->getIncomingValue(I)),
379                          PHI->getIncomingBlock(I));
380    WorkMap[PHI] = NewPHI;
381  } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
382    auto *V = getReplacement(GEP->getPointerOperand());
383    assert(V && "Operand not replaced");
384    SmallVector<Value *, 8> Indices;
385    Indices.append(GEP->idx_begin(), GEP->idx_end());
386    auto *NewI =
387        GetElementPtrInst::Create(GEP->getSourceElementType(), V, Indices);
388    IC.InsertNewInstWith(NewI, *GEP);
389    NewI->takeName(GEP);
390    WorkMap[GEP] = NewI;
391  } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
392    auto *V = getReplacement(BC->getOperand(0));
393    assert(V && "Operand not replaced");
394    auto *NewT = PointerType::getWithSamePointeeType(
395        cast<PointerType>(BC->getType()),
396        V->getType()->getPointerAddressSpace());
397    auto *NewI = new BitCastInst(V, NewT);
398    IC.InsertNewInstWith(NewI, *BC);
399    NewI->takeName(BC);
400    WorkMap[BC] = NewI;
401  } else if (auto *SI = dyn_cast<SelectInst>(I)) {
402    auto *NewSI = SelectInst::Create(
403        SI->getCondition(), getReplacement(SI->getTrueValue()),
404        getReplacement(SI->getFalseValue()), SI->getName(), nullptr, SI);
405    IC.InsertNewInstWith(NewSI, *SI);
406    NewSI->takeName(SI);
407    WorkMap[SI] = NewSI;
408  } else if (auto *MemCpy = dyn_cast<MemTransferInst>(I)) {
409    auto *SrcV = getReplacement(MemCpy->getRawSource());
410    // The pointer may appear in the destination of a copy, but we don't want to
411    // replace it.
412    if (!SrcV) {
413      assert(getReplacement(MemCpy->getRawDest()) &&
414             "destination not in replace list");
415      return;
416    }
417
418    IC.Builder.SetInsertPoint(MemCpy);
419    auto *NewI = IC.Builder.CreateMemTransferInst(
420        MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
421        SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
422        MemCpy->isVolatile());
423    AAMDNodes AAMD = MemCpy->getAAMetadata();
424    if (AAMD)
425      NewI->setAAMetadata(AAMD);
426
427    IC.eraseInstFromFunction(*MemCpy);
428    WorkMap[MemCpy] = NewI;
429  } else {
430    llvm_unreachable("should never reach here");
431  }
432}
433
434void PointerReplacer::replacePointer(Value *V) {
435#ifndef NDEBUG
436  auto *PT = cast<PointerType>(Root.getType());
437  auto *NT = cast<PointerType>(V->getType());
438  assert(PT != NT && PT->hasSameElementTypeAs(NT) && "Invalid usage");
439#endif
440  WorkMap[&Root] = V;
441
442  for (Instruction *Workitem : Worklist)
443    replace(Workitem);
444}
445
446Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {
447  if (auto *I = simplifyAllocaArraySize(*this, AI, DT))
448    return I;
449
450  if (AI.getAllocatedType()->isSized()) {
451    // Move all alloca's of zero byte objects to the entry block and merge them
452    // together.  Note that we only do this for alloca's, because malloc should
453    // allocate and return a unique pointer, even for a zero byte allocation.
454    if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinValue() == 0) {
455      // For a zero sized alloca there is no point in doing an array allocation.
456      // This is helpful if the array size is a complicated expression not used
457      // elsewhere.
458      if (AI.isArrayAllocation())
459        return replaceOperand(AI, 0,
460            ConstantInt::get(AI.getArraySize()->getType(), 1));
461
462      // Get the first instruction in the entry block.
463      BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
464      Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
465      if (FirstInst != &AI) {
466        // If the entry block doesn't start with a zero-size alloca then move
467        // this one to the start of the entry block.  There is no problem with
468        // dominance as the array size was forced to a constant earlier already.
469        AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
470        if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
471            DL.getTypeAllocSize(EntryAI->getAllocatedType())
472                    .getKnownMinValue() != 0) {
473          AI.moveBefore(FirstInst);
474          return &AI;
475        }
476
477        // Replace this zero-sized alloca with the one at the start of the entry
478        // block after ensuring that the address will be aligned enough for both
479        // types.
480        const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
481        EntryAI->setAlignment(MaxAlign);
482        if (AI.getType() != EntryAI->getType())
483          return new BitCastInst(EntryAI, AI.getType());
484        return replaceInstUsesWith(AI, EntryAI);
485      }
486    }
487  }
488
489  // Check to see if this allocation is only modified by a memcpy/memmove from
490  // a memory location whose alignment is equal to or exceeds that of the
491  // allocation. If this is the case, we can change all users to use the
492  // constant memory location instead.  This is commonly produced by the CFE by
493  // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
494  // is only subsequently read.
495  SmallVector<Instruction *, 4> ToDelete;
496  if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
497    Value *TheSrc = Copy->getSource();
498    Align AllocaAlign = AI.getAlign();
499    Align SourceAlign = getOrEnforceKnownAlignment(
500      TheSrc, AllocaAlign, DL, &AI, &AC, &DT);
501    if (AllocaAlign <= SourceAlign &&
502        isDereferenceableForAllocaSize(TheSrc, &AI, DL) &&
503        !isa<Instruction>(TheSrc)) {
504      // FIXME: Can we sink instructions without violating dominance when TheSrc
505      // is an instruction instead of a constant or argument?
506      LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
507      LLVM_DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
508      unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
509      auto *DestTy = PointerType::get(AI.getAllocatedType(), SrcAddrSpace);
510      if (AI.getAddressSpace() == SrcAddrSpace) {
511        for (Instruction *Delete : ToDelete)
512          eraseInstFromFunction(*Delete);
513
514        Value *Cast = Builder.CreateBitCast(TheSrc, DestTy);
515        Instruction *NewI = replaceInstUsesWith(AI, Cast);
516        eraseInstFromFunction(*Copy);
517        ++NumGlobalCopies;
518        return NewI;
519      }
520
521      PointerReplacer PtrReplacer(*this, AI);
522      if (PtrReplacer.collectUsers()) {
523        for (Instruction *Delete : ToDelete)
524          eraseInstFromFunction(*Delete);
525
526        Value *Cast = Builder.CreateBitCast(TheSrc, DestTy);
527        PtrReplacer.replacePointer(Cast);
528        ++NumGlobalCopies;
529      }
530    }
531  }
532
533  // At last, use the generic allocation site handler to aggressively remove
534  // unused allocas.
535  return visitAllocSite(AI);
536}
537
538// Are we allowed to form a atomic load or store of this type?
539static bool isSupportedAtomicType(Type *Ty) {
540  return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
541}
542
543/// Helper to combine a load to a new type.
544///
545/// This just does the work of combining a load to a new type. It handles
546/// metadata, etc., and returns the new instruction. The \c NewTy should be the
547/// loaded *value* type. This will convert it to a pointer, cast the operand to
548/// that pointer type, load it, etc.
549///
550/// Note that this will create all of the instructions with whatever insert
551/// point the \c InstCombinerImpl currently is using.
552LoadInst *InstCombinerImpl::combineLoadToNewType(LoadInst &LI, Type *NewTy,
553                                                 const Twine &Suffix) {
554  assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
555         "can't fold an atomic load to requested type");
556
557  Value *Ptr = LI.getPointerOperand();
558  unsigned AS = LI.getPointerAddressSpace();
559  Type *NewPtrTy = NewTy->getPointerTo(AS);
560  Value *NewPtr = nullptr;
561  if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
562        NewPtr->getType() == NewPtrTy))
563    NewPtr = Builder.CreateBitCast(Ptr, NewPtrTy);
564
565  LoadInst *NewLoad = Builder.CreateAlignedLoad(
566      NewTy, NewPtr, LI.getAlign(), LI.isVolatile(), LI.getName() + Suffix);
567  NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
568  copyMetadataForLoad(*NewLoad, LI);
569  return NewLoad;
570}
571
572/// Combine a store to a new type.
573///
574/// Returns the newly created store instruction.
575static StoreInst *combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI,
576                                         Value *V) {
577  assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
578         "can't fold an atomic store of requested type");
579
580  Value *Ptr = SI.getPointerOperand();
581  unsigned AS = SI.getPointerAddressSpace();
582  SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
583  SI.getAllMetadata(MD);
584
585  StoreInst *NewStore = IC.Builder.CreateAlignedStore(
586      V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
587      SI.getAlign(), SI.isVolatile());
588  NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
589  for (const auto &MDPair : MD) {
590    unsigned ID = MDPair.first;
591    MDNode *N = MDPair.second;
592    // Note, essentially every kind of metadata should be preserved here! This
593    // routine is supposed to clone a store instruction changing *only its
594    // type*. The only metadata it makes sense to drop is metadata which is
595    // invalidated when the pointer type changes. This should essentially
596    // never be the case in LLVM, but we explicitly switch over only known
597    // metadata to be conservatively correct. If you are adding metadata to
598    // LLVM which pertains to stores, you almost certainly want to add it
599    // here.
600    switch (ID) {
601    case LLVMContext::MD_dbg:
602    case LLVMContext::MD_DIAssignID:
603    case LLVMContext::MD_tbaa:
604    case LLVMContext::MD_prof:
605    case LLVMContext::MD_fpmath:
606    case LLVMContext::MD_tbaa_struct:
607    case LLVMContext::MD_alias_scope:
608    case LLVMContext::MD_noalias:
609    case LLVMContext::MD_nontemporal:
610    case LLVMContext::MD_mem_parallel_loop_access:
611    case LLVMContext::MD_access_group:
612      // All of these directly apply.
613      NewStore->setMetadata(ID, N);
614      break;
615    case LLVMContext::MD_invariant_load:
616    case LLVMContext::MD_nonnull:
617    case LLVMContext::MD_noundef:
618    case LLVMContext::MD_range:
619    case LLVMContext::MD_align:
620    case LLVMContext::MD_dereferenceable:
621    case LLVMContext::MD_dereferenceable_or_null:
622      // These don't apply for stores.
623      break;
624    }
625  }
626
627  return NewStore;
628}
629
630/// Returns true if instruction represent minmax pattern like:
631///   select ((cmp load V1, load V2), V1, V2).
632static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) {
633  assert(V->getType()->isPointerTy() && "Expected pointer type.");
634  // Ignore possible ty* to ixx* bitcast.
635  V = InstCombiner::peekThroughBitcast(V);
636  // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
637  // pattern.
638  CmpInst::Predicate Pred;
639  Instruction *L1;
640  Instruction *L2;
641  Value *LHS;
642  Value *RHS;
643  if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
644                         m_Value(LHS), m_Value(RHS))))
645    return false;
646  LoadTy = L1->getType();
647  return (match(L1, m_Load(m_Specific(LHS))) &&
648          match(L2, m_Load(m_Specific(RHS)))) ||
649         (match(L1, m_Load(m_Specific(RHS))) &&
650          match(L2, m_Load(m_Specific(LHS))));
651}
652
653/// Combine loads to match the type of their uses' value after looking
654/// through intervening bitcasts.
655///
656/// The core idea here is that if the result of a load is used in an operation,
657/// we should load the type most conducive to that operation. For example, when
658/// loading an integer and converting that immediately to a pointer, we should
659/// instead directly load a pointer.
660///
661/// However, this routine must never change the width of a load or the number of
662/// loads as that would introduce a semantic change. This combine is expected to
663/// be a semantic no-op which just allows loads to more closely model the types
664/// of their consuming operations.
665///
666/// Currently, we also refuse to change the precise type used for an atomic load
667/// or a volatile load. This is debatable, and might be reasonable to change
668/// later. However, it is risky in case some backend or other part of LLVM is
669/// relying on the exact type loaded to select appropriate atomic operations.
670static Instruction *combineLoadToOperationType(InstCombinerImpl &IC,
671                                               LoadInst &Load) {
672  // FIXME: We could probably with some care handle both volatile and ordered
673  // atomic loads here but it isn't clear that this is important.
674  if (!Load.isUnordered())
675    return nullptr;
676
677  if (Load.use_empty())
678    return nullptr;
679
680  // swifterror values can't be bitcasted.
681  if (Load.getPointerOperand()->isSwiftError())
682    return nullptr;
683
684  // Fold away bit casts of the loaded value by loading the desired type.
685  // Note that we should not do this for pointer<->integer casts,
686  // because that would result in type punning.
687  if (Load.hasOneUse()) {
688    // Don't transform when the type is x86_amx, it makes the pass that lower
689    // x86_amx type happy.
690    Type *LoadTy = Load.getType();
691    if (auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
692      assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");
693      if (BC->getType()->isX86_AMXTy())
694        return nullptr;
695    }
696
697    if (auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
698      Type *DestTy = CastUser->getDestTy();
699      if (CastUser->isNoopCast(IC.getDataLayout()) &&
700          LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() &&
701          (!Load.isAtomic() || isSupportedAtomicType(DestTy))) {
702        LoadInst *NewLoad = IC.combineLoadToNewType(Load, DestTy);
703        CastUser->replaceAllUsesWith(NewLoad);
704        IC.eraseInstFromFunction(*CastUser);
705        return &Load;
706      }
707    }
708  }
709
710  // FIXME: We should also canonicalize loads of vectors when their elements are
711  // cast to other types.
712  return nullptr;
713}
714
715static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
716  // FIXME: We could probably with some care handle both volatile and atomic
717  // stores here but it isn't clear that this is important.
718  if (!LI.isSimple())
719    return nullptr;
720
721  Type *T = LI.getType();
722  if (!T->isAggregateType())
723    return nullptr;
724
725  StringRef Name = LI.getName();
726
727  if (auto *ST = dyn_cast<StructType>(T)) {
728    // If the struct only have one element, we unpack.
729    auto NumElements = ST->getNumElements();
730    if (NumElements == 1) {
731      LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
732                                                  ".unpack");
733      NewLoad->setAAMetadata(LI.getAAMetadata());
734      return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
735        PoisonValue::get(T), NewLoad, 0, Name));
736    }
737
738    // We don't want to break loads with padding here as we'd loose
739    // the knowledge that padding exists for the rest of the pipeline.
740    const DataLayout &DL = IC.getDataLayout();
741    auto *SL = DL.getStructLayout(ST);
742    if (SL->hasPadding())
743      return nullptr;
744
745    const auto Align = LI.getAlign();
746    auto *Addr = LI.getPointerOperand();
747    auto *IdxType = Type::getInt32Ty(T->getContext());
748    auto *Zero = ConstantInt::get(IdxType, 0);
749
750    Value *V = PoisonValue::get(T);
751    for (unsigned i = 0; i < NumElements; i++) {
752      Value *Indices[2] = {
753        Zero,
754        ConstantInt::get(IdxType, i),
755      };
756      auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, ArrayRef(Indices),
757                                               Name + ".elt");
758      auto *L = IC.Builder.CreateAlignedLoad(
759          ST->getElementType(i), Ptr,
760          commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
761      // Propagate AA metadata. It'll still be valid on the narrowed load.
762      L->setAAMetadata(LI.getAAMetadata());
763      V = IC.Builder.CreateInsertValue(V, L, i);
764    }
765
766    V->setName(Name);
767    return IC.replaceInstUsesWith(LI, V);
768  }
769
770  if (auto *AT = dyn_cast<ArrayType>(T)) {
771    auto *ET = AT->getElementType();
772    auto NumElements = AT->getNumElements();
773    if (NumElements == 1) {
774      LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
775      NewLoad->setAAMetadata(LI.getAAMetadata());
776      return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
777        PoisonValue::get(T), NewLoad, 0, Name));
778    }
779
780    // Bail out if the array is too large. Ideally we would like to optimize
781    // arrays of arbitrary size but this has a terrible impact on compile time.
782    // The threshold here is chosen arbitrarily, maybe needs a little bit of
783    // tuning.
784    if (NumElements > IC.MaxArraySizeForCombine)
785      return nullptr;
786
787    const DataLayout &DL = IC.getDataLayout();
788    auto EltSize = DL.getTypeAllocSize(ET);
789    const auto Align = LI.getAlign();
790
791    auto *Addr = LI.getPointerOperand();
792    auto *IdxType = Type::getInt64Ty(T->getContext());
793    auto *Zero = ConstantInt::get(IdxType, 0);
794
795    Value *V = PoisonValue::get(T);
796    uint64_t Offset = 0;
797    for (uint64_t i = 0; i < NumElements; i++) {
798      Value *Indices[2] = {
799        Zero,
800        ConstantInt::get(IdxType, i),
801      };
802      auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices),
803                                               Name + ".elt");
804      auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
805                                             commonAlignment(Align, Offset),
806                                             Name + ".unpack");
807      L->setAAMetadata(LI.getAAMetadata());
808      V = IC.Builder.CreateInsertValue(V, L, i);
809      Offset += EltSize;
810    }
811
812    V->setName(Name);
813    return IC.replaceInstUsesWith(LI, V);
814  }
815
816  return nullptr;
817}
818
819// If we can determine that all possible objects pointed to by the provided
820// pointer value are, not only dereferenceable, but also definitively less than
821// or equal to the provided maximum size, then return true. Otherwise, return
822// false (constant global values and allocas fall into this category).
823//
824// FIXME: This should probably live in ValueTracking (or similar).
825static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
826                                     const DataLayout &DL) {
827  SmallPtrSet<Value *, 4> Visited;
828  SmallVector<Value *, 4> Worklist(1, V);
829
830  do {
831    Value *P = Worklist.pop_back_val();
832    P = P->stripPointerCasts();
833
834    if (!Visited.insert(P).second)
835      continue;
836
837    if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
838      Worklist.push_back(SI->getTrueValue());
839      Worklist.push_back(SI->getFalseValue());
840      continue;
841    }
842
843    if (PHINode *PN = dyn_cast<PHINode>(P)) {
844      append_range(Worklist, PN->incoming_values());
845      continue;
846    }
847
848    if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
849      if (GA->isInterposable())
850        return false;
851      Worklist.push_back(GA->getAliasee());
852      continue;
853    }
854
855    // If we know how big this object is, and it is less than MaxSize, continue
856    // searching. Otherwise, return false.
857    if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
858      if (!AI->getAllocatedType()->isSized())
859        return false;
860
861      ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
862      if (!CS)
863        return false;
864
865      TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());
866      if (TS.isScalable())
867        return false;
868      // Make sure that, even if the multiplication below would wrap as an
869      // uint64_t, we still do the right thing.
870      if ((CS->getValue().zext(128) * APInt(128, TS.getFixedValue()))
871              .ugt(MaxSize))
872        return false;
873      continue;
874    }
875
876    if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
877      if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
878        return false;
879
880      uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
881      if (InitSize > MaxSize)
882        return false;
883      continue;
884    }
885
886    return false;
887  } while (!Worklist.empty());
888
889  return true;
890}
891
892// If we're indexing into an object of a known size, and the outer index is
893// not a constant, but having any value but zero would lead to undefined
894// behavior, replace it with zero.
895//
896// For example, if we have:
897// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
898// ...
899// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
900// ... = load i32* %arrayidx, align 4
901// Then we know that we can replace %x in the GEP with i64 0.
902//
903// FIXME: We could fold any GEP index to zero that would cause UB if it were
904// not zero. Currently, we only handle the first such index. Also, we could
905// also search through non-zero constant indices if we kept track of the
906// offsets those indices implied.
907static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
908                                     GetElementPtrInst *GEPI, Instruction *MemI,
909                                     unsigned &Idx) {
910  if (GEPI->getNumOperands() < 2)
911    return false;
912
913  // Find the first non-zero index of a GEP. If all indices are zero, return
914  // one past the last index.
915  auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
916    unsigned I = 1;
917    for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
918      Value *V = GEPI->getOperand(I);
919      if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
920        if (CI->isZero())
921          continue;
922
923      break;
924    }
925
926    return I;
927  };
928
929  // Skip through initial 'zero' indices, and find the corresponding pointer
930  // type. See if the next index is not a constant.
931  Idx = FirstNZIdx(GEPI);
932  if (Idx == GEPI->getNumOperands())
933    return false;
934  if (isa<Constant>(GEPI->getOperand(Idx)))
935    return false;
936
937  SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
938  Type *SourceElementType = GEPI->getSourceElementType();
939  // Size information about scalable vectors is not available, so we cannot
940  // deduce whether indexing at n is undefined behaviour or not. Bail out.
941  if (isa<ScalableVectorType>(SourceElementType))
942    return false;
943
944  Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops);
945  if (!AllocTy || !AllocTy->isSized())
946    return false;
947  const DataLayout &DL = IC.getDataLayout();
948  uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue();
949
950  // If there are more indices after the one we might replace with a zero, make
951  // sure they're all non-negative. If any of them are negative, the overall
952  // address being computed might be before the base address determined by the
953  // first non-zero index.
954  auto IsAllNonNegative = [&]() {
955    for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
956      KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
957      if (Known.isNonNegative())
958        continue;
959      return false;
960    }
961
962    return true;
963  };
964
965  // FIXME: If the GEP is not inbounds, and there are extra indices after the
966  // one we'll replace, those could cause the address computation to wrap
967  // (rendering the IsAllNonNegative() check below insufficient). We can do
968  // better, ignoring zero indices (and other indices we can prove small
969  // enough not to wrap).
970  if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
971    return false;
972
973  // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
974  // also known to be dereferenceable.
975  return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
976         IsAllNonNegative();
977}
978
979// If we're indexing into an object with a variable index for the memory
980// access, but the object has only one element, we can assume that the index
981// will always be zero. If we replace the GEP, return it.
982template <typename T>
983static Instruction *replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr,
984                                          T &MemI) {
985  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
986    unsigned Idx;
987    if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
988      Instruction *NewGEPI = GEPI->clone();
989      NewGEPI->setOperand(Idx,
990        ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
991      NewGEPI->insertBefore(GEPI);
992      MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
993      return NewGEPI;
994    }
995  }
996
997  return nullptr;
998}
999
1000static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
1001  if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1002    return false;
1003
1004  auto *Ptr = SI.getPointerOperand();
1005  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
1006    Ptr = GEPI->getOperand(0);
1007  return (isa<ConstantPointerNull>(Ptr) &&
1008          !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
1009}
1010
1011static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
1012  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
1013    const Value *GEPI0 = GEPI->getOperand(0);
1014    if (isa<ConstantPointerNull>(GEPI0) &&
1015        !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
1016      return true;
1017  }
1018  if (isa<UndefValue>(Op) ||
1019      (isa<ConstantPointerNull>(Op) &&
1020       !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
1021    return true;
1022  return false;
1023}
1024
1025Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) {
1026  Value *Op = LI.getOperand(0);
1027
1028  // Try to canonicalize the loaded type.
1029  if (Instruction *Res = combineLoadToOperationType(*this, LI))
1030    return Res;
1031
1032  // Attempt to improve the alignment.
1033  Align KnownAlign = getOrEnforceKnownAlignment(
1034      Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT);
1035  if (KnownAlign > LI.getAlign())
1036    LI.setAlignment(KnownAlign);
1037
1038  // Replace GEP indices if possible.
1039  if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
1040      Worklist.push(NewGEPI);
1041      return &LI;
1042  }
1043
1044  if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1045    return Res;
1046
1047  // Do really simple store-to-load forwarding and load CSE, to catch cases
1048  // where there are several consecutive memory accesses to the same location,
1049  // separated by a few arithmetic operations.
1050  bool IsLoadCSE = false;
1051  if (Value *AvailableVal = FindAvailableLoadedValue(&LI, *AA, &IsLoadCSE)) {
1052    if (IsLoadCSE)
1053      combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
1054
1055    return replaceInstUsesWith(
1056        LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1057                                           LI.getName() + ".cast"));
1058  }
1059
1060  // None of the following transforms are legal for volatile/ordered atomic
1061  // loads.  Most of them do apply for unordered atomics.
1062  if (!LI.isUnordered()) return nullptr;
1063
1064  // load(gep null, ...) -> unreachable
1065  // load null/undef -> unreachable
1066  // TODO: Consider a target hook for valid address spaces for this xforms.
1067  if (canSimplifyNullLoadOrGEP(LI, Op)) {
1068    // Insert a new store to null instruction before the load to indicate
1069    // that this code is not reachable.  We do this instead of inserting
1070    // an unreachable instruction directly because we cannot modify the
1071    // CFG.
1072    StoreInst *SI = new StoreInst(PoisonValue::get(LI.getType()),
1073                                  Constant::getNullValue(Op->getType()), &LI);
1074    SI->setDebugLoc(LI.getDebugLoc());
1075    return replaceInstUsesWith(LI, PoisonValue::get(LI.getType()));
1076  }
1077
1078  if (Op->hasOneUse()) {
1079    // Change select and PHI nodes to select values instead of addresses: this
1080    // helps alias analysis out a lot, allows many others simplifications, and
1081    // exposes redundancy in the code.
1082    //
1083    // Note that we cannot do the transformation unless we know that the
1084    // introduced loads cannot trap!  Something like this is valid as long as
1085    // the condition is always false: load (select bool %C, int* null, int* %G),
1086    // but it would not be valid if we transformed it to load from null
1087    // unconditionally.
1088    //
1089    if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1090      // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
1091      Align Alignment = LI.getAlign();
1092      if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1093                                      Alignment, DL, SI) &&
1094          isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1095                                      Alignment, DL, SI)) {
1096        LoadInst *V1 =
1097            Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1098                               SI->getOperand(1)->getName() + ".val");
1099        LoadInst *V2 =
1100            Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1101                               SI->getOperand(2)->getName() + ".val");
1102        assert(LI.isUnordered() && "implied by above");
1103        V1->setAlignment(Alignment);
1104        V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1105        V2->setAlignment(Alignment);
1106        V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1107        return SelectInst::Create(SI->getCondition(), V1, V2);
1108      }
1109
1110      // load (select (cond, null, P)) -> load P
1111      if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1112          !NullPointerIsDefined(SI->getFunction(),
1113                                LI.getPointerAddressSpace()))
1114        return replaceOperand(LI, 0, SI->getOperand(2));
1115
1116      // load (select (cond, P, null)) -> load P
1117      if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1118          !NullPointerIsDefined(SI->getFunction(),
1119                                LI.getPointerAddressSpace()))
1120        return replaceOperand(LI, 0, SI->getOperand(1));
1121    }
1122  }
1123  return nullptr;
1124}
1125
1126/// Look for extractelement/insertvalue sequence that acts like a bitcast.
1127///
1128/// \returns underlying value that was "cast", or nullptr otherwise.
1129///
1130/// For example, if we have:
1131///
1132///     %E0 = extractelement <2 x double> %U, i32 0
1133///     %V0 = insertvalue [2 x double] undef, double %E0, 0
1134///     %E1 = extractelement <2 x double> %U, i32 1
1135///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
1136///
1137/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1138/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1139/// Note that %U may contain non-undef values where %V1 has undef.
1140static Value *likeBitCastFromVector(InstCombinerImpl &IC, Value *V) {
1141  Value *U = nullptr;
1142  while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1143    auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1144    if (!E)
1145      return nullptr;
1146    auto *W = E->getVectorOperand();
1147    if (!U)
1148      U = W;
1149    else if (U != W)
1150      return nullptr;
1151    auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1152    if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1153      return nullptr;
1154    V = IV->getAggregateOperand();
1155  }
1156  if (!match(V, m_Undef()) || !U)
1157    return nullptr;
1158
1159  auto *UT = cast<VectorType>(U->getType());
1160  auto *VT = V->getType();
1161  // Check that types UT and VT are bitwise isomorphic.
1162  const auto &DL = IC.getDataLayout();
1163  if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1164    return nullptr;
1165  }
1166  if (auto *AT = dyn_cast<ArrayType>(VT)) {
1167    if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1168      return nullptr;
1169  } else {
1170    auto *ST = cast<StructType>(VT);
1171    if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1172      return nullptr;
1173    for (const auto *EltT : ST->elements()) {
1174      if (EltT != UT->getElementType())
1175        return nullptr;
1176    }
1177  }
1178  return U;
1179}
1180
1181/// Combine stores to match the type of value being stored.
1182///
1183/// The core idea here is that the memory does not have any intrinsic type and
1184/// where we can we should match the type of a store to the type of value being
1185/// stored.
1186///
1187/// However, this routine must never change the width of a store or the number of
1188/// stores as that would introduce a semantic change. This combine is expected to
1189/// be a semantic no-op which just allows stores to more closely model the types
1190/// of their incoming values.
1191///
1192/// Currently, we also refuse to change the precise type used for an atomic or
1193/// volatile store. This is debatable, and might be reasonable to change later.
1194/// However, it is risky in case some backend or other part of LLVM is relying
1195/// on the exact type stored to select appropriate atomic operations.
1196///
1197/// \returns true if the store was successfully combined away. This indicates
1198/// the caller must erase the store instruction. We have to let the caller erase
1199/// the store instruction as otherwise there is no way to signal whether it was
1200/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1201static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI) {
1202  // FIXME: We could probably with some care handle both volatile and ordered
1203  // atomic stores here but it isn't clear that this is important.
1204  if (!SI.isUnordered())
1205    return false;
1206
1207  // swifterror values can't be bitcasted.
1208  if (SI.getPointerOperand()->isSwiftError())
1209    return false;
1210
1211  Value *V = SI.getValueOperand();
1212
1213  // Fold away bit casts of the stored value by storing the original type.
1214  if (auto *BC = dyn_cast<BitCastInst>(V)) {
1215    assert(!BC->getType()->isX86_AMXTy() &&
1216           "store to x86_amx* should not happen!");
1217    V = BC->getOperand(0);
1218    // Don't transform when the type is x86_amx, it makes the pass that lower
1219    // x86_amx type happy.
1220    if (V->getType()->isX86_AMXTy())
1221      return false;
1222    if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1223      combineStoreToNewValue(IC, SI, V);
1224      return true;
1225    }
1226  }
1227
1228  if (Value *U = likeBitCastFromVector(IC, V))
1229    if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1230      combineStoreToNewValue(IC, SI, U);
1231      return true;
1232    }
1233
1234  // FIXME: We should also canonicalize stores of vectors when their elements
1235  // are cast to other types.
1236  return false;
1237}
1238
1239static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
1240  // FIXME: We could probably with some care handle both volatile and atomic
1241  // stores here but it isn't clear that this is important.
1242  if (!SI.isSimple())
1243    return false;
1244
1245  Value *V = SI.getValueOperand();
1246  Type *T = V->getType();
1247
1248  if (!T->isAggregateType())
1249    return false;
1250
1251  if (auto *ST = dyn_cast<StructType>(T)) {
1252    // If the struct only have one element, we unpack.
1253    unsigned Count = ST->getNumElements();
1254    if (Count == 1) {
1255      V = IC.Builder.CreateExtractValue(V, 0);
1256      combineStoreToNewValue(IC, SI, V);
1257      return true;
1258    }
1259
1260    // We don't want to break loads with padding here as we'd loose
1261    // the knowledge that padding exists for the rest of the pipeline.
1262    const DataLayout &DL = IC.getDataLayout();
1263    auto *SL = DL.getStructLayout(ST);
1264    if (SL->hasPadding())
1265      return false;
1266
1267    const auto Align = SI.getAlign();
1268
1269    SmallString<16> EltName = V->getName();
1270    EltName += ".elt";
1271    auto *Addr = SI.getPointerOperand();
1272    SmallString<16> AddrName = Addr->getName();
1273    AddrName += ".repack";
1274
1275    auto *IdxType = Type::getInt32Ty(ST->getContext());
1276    auto *Zero = ConstantInt::get(IdxType, 0);
1277    for (unsigned i = 0; i < Count; i++) {
1278      Value *Indices[2] = {
1279        Zero,
1280        ConstantInt::get(IdxType, i),
1281      };
1282      auto *Ptr =
1283          IC.Builder.CreateInBoundsGEP(ST, Addr, ArrayRef(Indices), AddrName);
1284      auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1285      auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
1286      llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1287      NS->setAAMetadata(SI.getAAMetadata());
1288    }
1289
1290    return true;
1291  }
1292
1293  if (auto *AT = dyn_cast<ArrayType>(T)) {
1294    // If the array only have one element, we unpack.
1295    auto NumElements = AT->getNumElements();
1296    if (NumElements == 1) {
1297      V = IC.Builder.CreateExtractValue(V, 0);
1298      combineStoreToNewValue(IC, SI, V);
1299      return true;
1300    }
1301
1302    // Bail out if the array is too large. Ideally we would like to optimize
1303    // arrays of arbitrary size but this has a terrible impact on compile time.
1304    // The threshold here is chosen arbitrarily, maybe needs a little bit of
1305    // tuning.
1306    if (NumElements > IC.MaxArraySizeForCombine)
1307      return false;
1308
1309    const DataLayout &DL = IC.getDataLayout();
1310    auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1311    const auto Align = SI.getAlign();
1312
1313    SmallString<16> EltName = V->getName();
1314    EltName += ".elt";
1315    auto *Addr = SI.getPointerOperand();
1316    SmallString<16> AddrName = Addr->getName();
1317    AddrName += ".repack";
1318
1319    auto *IdxType = Type::getInt64Ty(T->getContext());
1320    auto *Zero = ConstantInt::get(IdxType, 0);
1321
1322    uint64_t Offset = 0;
1323    for (uint64_t i = 0; i < NumElements; i++) {
1324      Value *Indices[2] = {
1325        Zero,
1326        ConstantInt::get(IdxType, i),
1327      };
1328      auto *Ptr =
1329          IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices), AddrName);
1330      auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1331      auto EltAlign = commonAlignment(Align, Offset);
1332      Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1333      NS->setAAMetadata(SI.getAAMetadata());
1334      Offset += EltSize;
1335    }
1336
1337    return true;
1338  }
1339
1340  return false;
1341}
1342
1343/// equivalentAddressValues - Test if A and B will obviously have the same
1344/// value. This includes recognizing that %t0 and %t1 will have the same
1345/// value in code like this:
1346///   %t0 = getelementptr \@a, 0, 3
1347///   store i32 0, i32* %t0
1348///   %t1 = getelementptr \@a, 0, 3
1349///   %t2 = load i32* %t1
1350///
1351static bool equivalentAddressValues(Value *A, Value *B) {
1352  // Test if the values are trivially equivalent.
1353  if (A == B) return true;
1354
1355  // Test if the values come form identical arithmetic instructions.
1356  // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1357  // its only used to compare two uses within the same basic block, which
1358  // means that they'll always either have the same value or one of them
1359  // will have an undefined value.
1360  if (isa<BinaryOperator>(A) ||
1361      isa<CastInst>(A) ||
1362      isa<PHINode>(A) ||
1363      isa<GetElementPtrInst>(A))
1364    if (Instruction *BI = dyn_cast<Instruction>(B))
1365      if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1366        return true;
1367
1368  // Otherwise they may not be equivalent.
1369  return false;
1370}
1371
1372/// Converts store (bitcast (load (bitcast (select ...)))) to
1373/// store (load (select ...)), where select is minmax:
1374/// select ((cmp load V1, load V2), V1, V2).
1375static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC,
1376                                                StoreInst &SI) {
1377  // bitcast?
1378  if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1379    return false;
1380  // load? integer?
1381  Value *LoadAddr;
1382  if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1383    return false;
1384  auto *LI = cast<LoadInst>(SI.getValueOperand());
1385  if (!LI->getType()->isIntegerTy())
1386    return false;
1387  Type *CmpLoadTy;
1388  if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy))
1389    return false;
1390
1391  // Make sure the type would actually change.
1392  // This condition can be hit with chains of bitcasts.
1393  if (LI->getType() == CmpLoadTy)
1394    return false;
1395
1396  // Make sure we're not changing the size of the load/store.
1397  const auto &DL = IC.getDataLayout();
1398  if (DL.getTypeStoreSizeInBits(LI->getType()) !=
1399      DL.getTypeStoreSizeInBits(CmpLoadTy))
1400    return false;
1401
1402  if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1403        auto *SI = dyn_cast<StoreInst>(U);
1404        return SI && SI->getPointerOperand() != LI &&
1405               InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1406                   LoadAddr &&
1407               !SI->getPointerOperand()->isSwiftError();
1408      }))
1409    return false;
1410
1411  IC.Builder.SetInsertPoint(LI);
1412  LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy);
1413  // Replace all the stores with stores of the newly loaded value.
1414  for (auto *UI : LI->users()) {
1415    auto *USI = cast<StoreInst>(UI);
1416    IC.Builder.SetInsertPoint(USI);
1417    combineStoreToNewValue(IC, *USI, NewLI);
1418  }
1419  IC.replaceInstUsesWith(*LI, PoisonValue::get(LI->getType()));
1420  IC.eraseInstFromFunction(*LI);
1421  return true;
1422}
1423
1424Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {
1425  Value *Val = SI.getOperand(0);
1426  Value *Ptr = SI.getOperand(1);
1427
1428  // Try to canonicalize the stored type.
1429  if (combineStoreToValueType(*this, SI))
1430    return eraseInstFromFunction(SI);
1431
1432  // Attempt to improve the alignment.
1433  const Align KnownAlign = getOrEnforceKnownAlignment(
1434      Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT);
1435  if (KnownAlign > SI.getAlign())
1436    SI.setAlignment(KnownAlign);
1437
1438  // Try to canonicalize the stored type.
1439  if (unpackStoreToAggregate(*this, SI))
1440    return eraseInstFromFunction(SI);
1441
1442  if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1443    return eraseInstFromFunction(SI);
1444
1445  // Replace GEP indices if possible.
1446  if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1447      Worklist.push(NewGEPI);
1448      return &SI;
1449  }
1450
1451  // Don't hack volatile/ordered stores.
1452  // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1453  if (!SI.isUnordered()) return nullptr;
1454
1455  // If the RHS is an alloca with a single use, zapify the store, making the
1456  // alloca dead.
1457  if (Ptr->hasOneUse()) {
1458    if (isa<AllocaInst>(Ptr))
1459      return eraseInstFromFunction(SI);
1460    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1461      if (isa<AllocaInst>(GEP->getOperand(0))) {
1462        if (GEP->getOperand(0)->hasOneUse())
1463          return eraseInstFromFunction(SI);
1464      }
1465    }
1466  }
1467
1468  // If we have a store to a location which is known constant, we can conclude
1469  // that the store must be storing the constant value (else the memory
1470  // wouldn't be constant), and this must be a noop.
1471  if (!isModSet(AA->getModRefInfoMask(Ptr)))
1472    return eraseInstFromFunction(SI);
1473
1474  // Do really simple DSE, to catch cases where there are several consecutive
1475  // stores to the same location, separated by a few arithmetic operations. This
1476  // situation often occurs with bitfield accesses.
1477  BasicBlock::iterator BBI(SI);
1478  for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1479       --ScanInsts) {
1480    --BBI;
1481    // Don't count debug info directives, lest they affect codegen,
1482    // and we skip pointer-to-pointer bitcasts, which are NOPs.
1483    if (BBI->isDebugOrPseudoInst() ||
1484        (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1485      ScanInsts++;
1486      continue;
1487    }
1488
1489    if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1490      // Prev store isn't volatile, and stores to the same location?
1491      if (PrevSI->isUnordered() &&
1492          equivalentAddressValues(PrevSI->getOperand(1), SI.getOperand(1)) &&
1493          PrevSI->getValueOperand()->getType() ==
1494              SI.getValueOperand()->getType()) {
1495        ++NumDeadStore;
1496        // Manually add back the original store to the worklist now, so it will
1497        // be processed after the operands of the removed store, as this may
1498        // expose additional DSE opportunities.
1499        Worklist.push(&SI);
1500        eraseInstFromFunction(*PrevSI);
1501        return nullptr;
1502      }
1503      break;
1504    }
1505
1506    // If this is a load, we have to stop.  However, if the loaded value is from
1507    // the pointer we're loading and is producing the pointer we're storing,
1508    // then *this* store is dead (X = load P; store X -> P).
1509    if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1510      if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1511        assert(SI.isUnordered() && "can't eliminate ordering operation");
1512        return eraseInstFromFunction(SI);
1513      }
1514
1515      // Otherwise, this is a load from some other location.  Stores before it
1516      // may not be dead.
1517      break;
1518    }
1519
1520    // Don't skip over loads, throws or things that can modify memory.
1521    if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1522      break;
1523  }
1524
1525  // store X, null    -> turns into 'unreachable' in SimplifyCFG
1526  // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1527  if (canSimplifyNullStoreOrGEP(SI)) {
1528    if (!isa<PoisonValue>(Val))
1529      return replaceOperand(SI, 0, PoisonValue::get(Val->getType()));
1530    return nullptr;  // Do not modify these!
1531  }
1532
1533  // store undef, Ptr -> noop
1534  // FIXME: This is technically incorrect because it might overwrite a poison
1535  // value. Change to PoisonValue once #52930 is resolved.
1536  if (isa<UndefValue>(Val))
1537    return eraseInstFromFunction(SI);
1538
1539  return nullptr;
1540}
1541
1542/// Try to transform:
1543///   if () { *P = v1; } else { *P = v2 }
1544/// or:
1545///   *P = v1; if () { *P = v2; }
1546/// into a phi node with a store in the successor.
1547bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {
1548  if (!SI.isUnordered())
1549    return false; // This code has not been audited for volatile/ordered case.
1550
1551  // Check if the successor block has exactly 2 incoming edges.
1552  BasicBlock *StoreBB = SI.getParent();
1553  BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1554  if (!DestBB->hasNPredecessors(2))
1555    return false;
1556
1557  // Capture the other block (the block that doesn't contain our store).
1558  pred_iterator PredIter = pred_begin(DestBB);
1559  if (*PredIter == StoreBB)
1560    ++PredIter;
1561  BasicBlock *OtherBB = *PredIter;
1562
1563  // Bail out if all of the relevant blocks aren't distinct. This can happen,
1564  // for example, if SI is in an infinite loop.
1565  if (StoreBB == DestBB || OtherBB == DestBB)
1566    return false;
1567
1568  // Verify that the other block ends in a branch and is not otherwise empty.
1569  BasicBlock::iterator BBI(OtherBB->getTerminator());
1570  BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1571  if (!OtherBr || BBI == OtherBB->begin())
1572    return false;
1573
1574  // If the other block ends in an unconditional branch, check for the 'if then
1575  // else' case. There is an instruction before the branch.
1576  StoreInst *OtherStore = nullptr;
1577  if (OtherBr->isUnconditional()) {
1578    --BBI;
1579    // Skip over debugging info and pseudo probes.
1580    while (BBI->isDebugOrPseudoInst() ||
1581           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1582      if (BBI==OtherBB->begin())
1583        return false;
1584      --BBI;
1585    }
1586    // If this isn't a store, isn't a store to the same location, or is not the
1587    // right kind of store, bail out.
1588    OtherStore = dyn_cast<StoreInst>(BBI);
1589    if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1590        !SI.isSameOperationAs(OtherStore))
1591      return false;
1592  } else {
1593    // Otherwise, the other block ended with a conditional branch. If one of the
1594    // destinations is StoreBB, then we have the if/then case.
1595    if (OtherBr->getSuccessor(0) != StoreBB &&
1596        OtherBr->getSuccessor(1) != StoreBB)
1597      return false;
1598
1599    // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1600    // if/then triangle. See if there is a store to the same ptr as SI that
1601    // lives in OtherBB.
1602    for (;; --BBI) {
1603      // Check to see if we find the matching store.
1604      if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1605        if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1606            !SI.isSameOperationAs(OtherStore))
1607          return false;
1608        break;
1609      }
1610      // If we find something that may be using or overwriting the stored
1611      // value, or if we run out of instructions, we can't do the transform.
1612      if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1613          BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1614        return false;
1615    }
1616
1617    // In order to eliminate the store in OtherBr, we have to make sure nothing
1618    // reads or overwrites the stored value in StoreBB.
1619    for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1620      // FIXME: This should really be AA driven.
1621      if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1622        return false;
1623    }
1624  }
1625
1626  // Insert a PHI node now if we need it.
1627  Value *MergedVal = OtherStore->getOperand(0);
1628  // The debug locations of the original instructions might differ. Merge them.
1629  DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1630                                                     OtherStore->getDebugLoc());
1631  if (MergedVal != SI.getOperand(0)) {
1632    PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1633    PN->addIncoming(SI.getOperand(0), SI.getParent());
1634    PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1635    MergedVal = InsertNewInstBefore(PN, DestBB->front());
1636    PN->setDebugLoc(MergedLoc);
1637  }
1638
1639  // Advance to a place where it is safe to insert the new store and insert it.
1640  BBI = DestBB->getFirstInsertionPt();
1641  StoreInst *NewSI =
1642      new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1643                    SI.getOrdering(), SI.getSyncScopeID());
1644  InsertNewInstBefore(NewSI, *BBI);
1645  NewSI->setDebugLoc(MergedLoc);
1646  NewSI->mergeDIAssignID({&SI, OtherStore});
1647
1648  // If the two stores had AA tags, merge them.
1649  AAMDNodes AATags = SI.getAAMetadata();
1650  if (AATags)
1651    NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
1652
1653  // Nuke the old stores.
1654  eraseInstFromFunction(SI);
1655  eraseInstFromFunction(*OtherStore);
1656  return true;
1657}
1658