InstCombineLoadStoreAlloca.cpp revision 1.1.1.1
1//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for load, store and alloca.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/MapVector.h"
15#include "llvm/ADT/SmallString.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/Analysis/Loads.h"
18#include "llvm/Transforms/Utils/Local.h"
19#include "llvm/IR/ConstantRange.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/DebugInfoMetadata.h"
22#include "llvm/IR/IntrinsicInst.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/MDBuilder.h"
25#include "llvm/IR/PatternMatch.h"
26#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27using namespace llvm;
28using namespace PatternMatch;
29
30#define DEBUG_TYPE "instcombine"
31
32STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
33STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
34
35/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
36/// some part of a constant global variable.  This intentionally only accepts
37/// constant expressions because we can't rewrite arbitrary instructions.
38static bool pointsToConstantGlobal(Value *V) {
39  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
40    return GV->isConstant();
41
42  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
43    if (CE->getOpcode() == Instruction::BitCast ||
44        CE->getOpcode() == Instruction::AddrSpaceCast ||
45        CE->getOpcode() == Instruction::GetElementPtr)
46      return pointsToConstantGlobal(CE->getOperand(0));
47  }
48  return false;
49}
50
51/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
52/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
53/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
54/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
55/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
56/// the alloca, and if the source pointer is a pointer to a constant global, we
57/// can optimize this.
58static bool
59isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
60                               SmallVectorImpl<Instruction *> &ToDelete) {
61  // We track lifetime intrinsics as we encounter them.  If we decide to go
62  // ahead and replace the value with the global, this lets the caller quickly
63  // eliminate the markers.
64
65  SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
66  ValuesToInspect.emplace_back(V, false);
67  while (!ValuesToInspect.empty()) {
68    auto ValuePair = ValuesToInspect.pop_back_val();
69    const bool IsOffset = ValuePair.second;
70    for (auto &U : ValuePair.first->uses()) {
71      auto *I = cast<Instruction>(U.getUser());
72
73      if (auto *LI = dyn_cast<LoadInst>(I)) {
74        // Ignore non-volatile loads, they are always ok.
75        if (!LI->isSimple()) return false;
76        continue;
77      }
78
79      if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
80        // If uses of the bitcast are ok, we are ok.
81        ValuesToInspect.emplace_back(I, IsOffset);
82        continue;
83      }
84      if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
85        // If the GEP has all zero indices, it doesn't offset the pointer. If it
86        // doesn't, it does.
87        ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
88        continue;
89      }
90
91      if (auto *Call = dyn_cast<CallBase>(I)) {
92        // If this is the function being called then we treat it like a load and
93        // ignore it.
94        if (Call->isCallee(&U))
95          continue;
96
97        unsigned DataOpNo = Call->getDataOperandNo(&U);
98        bool IsArgOperand = Call->isArgOperand(&U);
99
100        // Inalloca arguments are clobbered by the call.
101        if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
102          return false;
103
104        // If this is a readonly/readnone call site, then we know it is just a
105        // load (but one that potentially returns the value itself), so we can
106        // ignore it if we know that the value isn't captured.
107        if (Call->onlyReadsMemory() &&
108            (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
109          continue;
110
111        // If this is being passed as a byval argument, the caller is making a
112        // copy, so it is only a read of the alloca.
113        if (IsArgOperand && Call->isByValArgument(DataOpNo))
114          continue;
115      }
116
117      // Lifetime intrinsics can be handled by the caller.
118      if (I->isLifetimeStartOrEnd()) {
119        assert(I->use_empty() && "Lifetime markers have no result to use!");
120        ToDelete.push_back(I);
121        continue;
122      }
123
124      // If this is isn't our memcpy/memmove, reject it as something we can't
125      // handle.
126      MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127      if (!MI)
128        return false;
129
130      // If the transfer is using the alloca as a source of the transfer, then
131      // ignore it since it is a load (unless the transfer is volatile).
132      if (U.getOperandNo() == 1) {
133        if (MI->isVolatile()) return false;
134        continue;
135      }
136
137      // If we already have seen a copy, reject the second one.
138      if (TheCopy) return false;
139
140      // If the pointer has been offset from the start of the alloca, we can't
141      // safely handle this.
142      if (IsOffset) return false;
143
144      // If the memintrinsic isn't using the alloca as the dest, reject it.
145      if (U.getOperandNo() != 0) return false;
146
147      // If the source of the memcpy/move is not a constant global, reject it.
148      if (!pointsToConstantGlobal(MI->getSource()))
149        return false;
150
151      // Otherwise, the transform is safe.  Remember the copy instruction.
152      TheCopy = MI;
153    }
154  }
155  return true;
156}
157
158/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159/// modified by a copy from a constant global.  If we can prove this, we can
160/// replace any uses of the alloca with uses of the global directly.
161static MemTransferInst *
162isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163                               SmallVectorImpl<Instruction *> &ToDelete) {
164  MemTransferInst *TheCopy = nullptr;
165  if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
166    return TheCopy;
167  return nullptr;
168}
169
170/// Returns true if V is dereferenceable for size of alloca.
171static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
172                                           const DataLayout &DL) {
173  if (AI->isArrayAllocation())
174    return false;
175  uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
176  if (!AllocaSize)
177    return false;
178  return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()),
179                                            APInt(64, AllocaSize), DL);
180}
181
182static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
183  // Check for array size of 1 (scalar allocation).
184  if (!AI.isArrayAllocation()) {
185    // i32 1 is the canonical array size for scalar allocations.
186    if (AI.getArraySize()->getType()->isIntegerTy(32))
187      return nullptr;
188
189    // Canonicalize it.
190    Value *V = IC.Builder.getInt32(1);
191    AI.setOperand(0, V);
192    return &AI;
193  }
194
195  // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
196  if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
197    if (C->getValue().getActiveBits() <= 64) {
198      Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
199      AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
200      New->setAlignment(MaybeAlign(AI.getAlignment()));
201
202      // Scan to the end of the allocation instructions, to skip over a block of
203      // allocas if possible...also skip interleaved debug info
204      //
205      BasicBlock::iterator It(New);
206      while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
207        ++It;
208
209      // Now that I is pointing to the first non-allocation-inst in the block,
210      // insert our getelementptr instruction...
211      //
212      Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
213      Value *NullIdx = Constant::getNullValue(IdxTy);
214      Value *Idx[2] = {NullIdx, NullIdx};
215      Instruction *GEP = GetElementPtrInst::CreateInBounds(
216          NewTy, New, Idx, New->getName() + ".sub");
217      IC.InsertNewInstBefore(GEP, *It);
218
219      // Now make everything use the getelementptr instead of the original
220      // allocation.
221      return IC.replaceInstUsesWith(AI, GEP);
222    }
223  }
224
225  if (isa<UndefValue>(AI.getArraySize()))
226    return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
227
228  // Ensure that the alloca array size argument has type intptr_t, so that
229  // any casting is exposed early.
230  Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
231  if (AI.getArraySize()->getType() != IntPtrTy) {
232    Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
233    AI.setOperand(0, V);
234    return &AI;
235  }
236
237  return nullptr;
238}
239
240namespace {
241// If I and V are pointers in different address space, it is not allowed to
242// use replaceAllUsesWith since I and V have different types. A
243// non-target-specific transformation should not use addrspacecast on V since
244// the two address space may be disjoint depending on target.
245//
246// This class chases down uses of the old pointer until reaching the load
247// instructions, then replaces the old pointer in the load instructions with
248// the new pointer. If during the chasing it sees bitcast or GEP, it will
249// create new bitcast or GEP with the new pointer and use them in the load
250// instruction.
251class PointerReplacer {
252public:
253  PointerReplacer(InstCombiner &IC) : IC(IC) {}
254  void replacePointer(Instruction &I, Value *V);
255
256private:
257  void findLoadAndReplace(Instruction &I);
258  void replace(Instruction *I);
259  Value *getReplacement(Value *I);
260
261  SmallVector<Instruction *, 4> Path;
262  MapVector<Value *, Value *> WorkMap;
263  InstCombiner &IC;
264};
265} // end anonymous namespace
266
267void PointerReplacer::findLoadAndReplace(Instruction &I) {
268  for (auto U : I.users()) {
269    auto *Inst = dyn_cast<Instruction>(&*U);
270    if (!Inst)
271      return;
272    LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
273    if (isa<LoadInst>(Inst)) {
274      for (auto P : Path)
275        replace(P);
276      replace(Inst);
277    } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
278      Path.push_back(Inst);
279      findLoadAndReplace(*Inst);
280      Path.pop_back();
281    } else {
282      return;
283    }
284  }
285}
286
287Value *PointerReplacer::getReplacement(Value *V) {
288  auto Loc = WorkMap.find(V);
289  if (Loc != WorkMap.end())
290    return Loc->second;
291  return nullptr;
292}
293
294void PointerReplacer::replace(Instruction *I) {
295  if (getReplacement(I))
296    return;
297
298  if (auto *LT = dyn_cast<LoadInst>(I)) {
299    auto *V = getReplacement(LT->getPointerOperand());
300    assert(V && "Operand not replaced");
301    auto *NewI = new LoadInst(I->getType(), V);
302    NewI->takeName(LT);
303    IC.InsertNewInstWith(NewI, *LT);
304    IC.replaceInstUsesWith(*LT, NewI);
305    WorkMap[LT] = NewI;
306  } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
307    auto *V = getReplacement(GEP->getPointerOperand());
308    assert(V && "Operand not replaced");
309    SmallVector<Value *, 8> Indices;
310    Indices.append(GEP->idx_begin(), GEP->idx_end());
311    auto *NewI = GetElementPtrInst::Create(
312        V->getType()->getPointerElementType(), V, Indices);
313    IC.InsertNewInstWith(NewI, *GEP);
314    NewI->takeName(GEP);
315    WorkMap[GEP] = NewI;
316  } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
317    auto *V = getReplacement(BC->getOperand(0));
318    assert(V && "Operand not replaced");
319    auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
320                                  V->getType()->getPointerAddressSpace());
321    auto *NewI = new BitCastInst(V, NewT);
322    IC.InsertNewInstWith(NewI, *BC);
323    NewI->takeName(BC);
324    WorkMap[BC] = NewI;
325  } else {
326    llvm_unreachable("should never reach here");
327  }
328}
329
330void PointerReplacer::replacePointer(Instruction &I, Value *V) {
331#ifndef NDEBUG
332  auto *PT = cast<PointerType>(I.getType());
333  auto *NT = cast<PointerType>(V->getType());
334  assert(PT != NT && PT->getElementType() == NT->getElementType() &&
335         "Invalid usage");
336#endif
337  WorkMap[&I] = V;
338  findLoadAndReplace(I);
339}
340
341Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
342  if (auto *I = simplifyAllocaArraySize(*this, AI))
343    return I;
344
345  if (AI.getAllocatedType()->isSized()) {
346    // If the alignment is 0 (unspecified), assign it the preferred alignment.
347    if (AI.getAlignment() == 0)
348      AI.setAlignment(
349          MaybeAlign(DL.getPrefTypeAlignment(AI.getAllocatedType())));
350
351    // Move all alloca's of zero byte objects to the entry block and merge them
352    // together.  Note that we only do this for alloca's, because malloc should
353    // allocate and return a unique pointer, even for a zero byte allocation.
354    if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
355      // For a zero sized alloca there is no point in doing an array allocation.
356      // This is helpful if the array size is a complicated expression not used
357      // elsewhere.
358      if (AI.isArrayAllocation()) {
359        AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
360        return &AI;
361      }
362
363      // Get the first instruction in the entry block.
364      BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
365      Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
366      if (FirstInst != &AI) {
367        // If the entry block doesn't start with a zero-size alloca then move
368        // this one to the start of the entry block.  There is no problem with
369        // dominance as the array size was forced to a constant earlier already.
370        AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
371        if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
372            DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
373          AI.moveBefore(FirstInst);
374          return &AI;
375        }
376
377        // If the alignment of the entry block alloca is 0 (unspecified),
378        // assign it the preferred alignment.
379        if (EntryAI->getAlignment() == 0)
380          EntryAI->setAlignment(
381              MaybeAlign(DL.getPrefTypeAlignment(EntryAI->getAllocatedType())));
382        // Replace this zero-sized alloca with the one at the start of the entry
383        // block after ensuring that the address will be aligned enough for both
384        // types.
385        const MaybeAlign MaxAlign(
386            std::max(EntryAI->getAlignment(), AI.getAlignment()));
387        EntryAI->setAlignment(MaxAlign);
388        if (AI.getType() != EntryAI->getType())
389          return new BitCastInst(EntryAI, AI.getType());
390        return replaceInstUsesWith(AI, EntryAI);
391      }
392    }
393  }
394
395  if (AI.getAlignment()) {
396    // Check to see if this allocation is only modified by a memcpy/memmove from
397    // a constant global whose alignment is equal to or exceeds that of the
398    // allocation.  If this is the case, we can change all users to use
399    // the constant global instead.  This is commonly produced by the CFE by
400    // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
401    // is only subsequently read.
402    SmallVector<Instruction *, 4> ToDelete;
403    if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
404      unsigned SourceAlign = getOrEnforceKnownAlignment(
405          Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
406      if (AI.getAlignment() <= SourceAlign &&
407          isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
408        LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
409        LLVM_DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
410        for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
411          eraseInstFromFunction(*ToDelete[i]);
412        Constant *TheSrc = cast<Constant>(Copy->getSource());
413        auto *SrcTy = TheSrc->getType();
414        auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
415                                        SrcTy->getPointerAddressSpace());
416        Constant *Cast =
417            ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
418        if (AI.getType()->getPointerAddressSpace() ==
419            SrcTy->getPointerAddressSpace()) {
420          Instruction *NewI = replaceInstUsesWith(AI, Cast);
421          eraseInstFromFunction(*Copy);
422          ++NumGlobalCopies;
423          return NewI;
424        } else {
425          PointerReplacer PtrReplacer(*this);
426          PtrReplacer.replacePointer(AI, Cast);
427          ++NumGlobalCopies;
428        }
429      }
430    }
431  }
432
433  // At last, use the generic allocation site handler to aggressively remove
434  // unused allocas.
435  return visitAllocSite(AI);
436}
437
438// Are we allowed to form a atomic load or store of this type?
439static bool isSupportedAtomicType(Type *Ty) {
440  return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
441}
442
443/// Helper to combine a load to a new type.
444///
445/// This just does the work of combining a load to a new type. It handles
446/// metadata, etc., and returns the new instruction. The \c NewTy should be the
447/// loaded *value* type. This will convert it to a pointer, cast the operand to
448/// that pointer type, load it, etc.
449///
450/// Note that this will create all of the instructions with whatever insert
451/// point the \c InstCombiner currently is using.
452LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy,
453                                             const Twine &Suffix) {
454  assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
455         "can't fold an atomic load to requested type");
456
457  Value *Ptr = LI.getPointerOperand();
458  unsigned AS = LI.getPointerAddressSpace();
459  Value *NewPtr = nullptr;
460  if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
461        NewPtr->getType()->getPointerElementType() == NewTy &&
462        NewPtr->getType()->getPointerAddressSpace() == AS))
463    NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
464
465  unsigned Align = LI.getAlignment();
466  if (!Align)
467    // If old load did not have an explicit alignment specified,
468    // manually preserve the implied (ABI) alignment of the load.
469    // Else we may inadvertently incorrectly over-promise alignment.
470    Align = getDataLayout().getABITypeAlignment(LI.getType());
471
472  LoadInst *NewLoad = Builder.CreateAlignedLoad(
473      NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix);
474  NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
475  copyMetadataForLoad(*NewLoad, LI);
476  return NewLoad;
477}
478
479/// Combine a store to a new type.
480///
481/// Returns the newly created store instruction.
482static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
483  assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
484         "can't fold an atomic store of requested type");
485
486  Value *Ptr = SI.getPointerOperand();
487  unsigned AS = SI.getPointerAddressSpace();
488  SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
489  SI.getAllMetadata(MD);
490
491  StoreInst *NewStore = IC.Builder.CreateAlignedStore(
492      V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
493      SI.getAlignment(), SI.isVolatile());
494  NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
495  for (const auto &MDPair : MD) {
496    unsigned ID = MDPair.first;
497    MDNode *N = MDPair.second;
498    // Note, essentially every kind of metadata should be preserved here! This
499    // routine is supposed to clone a store instruction changing *only its
500    // type*. The only metadata it makes sense to drop is metadata which is
501    // invalidated when the pointer type changes. This should essentially
502    // never be the case in LLVM, but we explicitly switch over only known
503    // metadata to be conservatively correct. If you are adding metadata to
504    // LLVM which pertains to stores, you almost certainly want to add it
505    // here.
506    switch (ID) {
507    case LLVMContext::MD_dbg:
508    case LLVMContext::MD_tbaa:
509    case LLVMContext::MD_prof:
510    case LLVMContext::MD_fpmath:
511    case LLVMContext::MD_tbaa_struct:
512    case LLVMContext::MD_alias_scope:
513    case LLVMContext::MD_noalias:
514    case LLVMContext::MD_nontemporal:
515    case LLVMContext::MD_mem_parallel_loop_access:
516    case LLVMContext::MD_access_group:
517      // All of these directly apply.
518      NewStore->setMetadata(ID, N);
519      break;
520    case LLVMContext::MD_invariant_load:
521    case LLVMContext::MD_nonnull:
522    case LLVMContext::MD_range:
523    case LLVMContext::MD_align:
524    case LLVMContext::MD_dereferenceable:
525    case LLVMContext::MD_dereferenceable_or_null:
526      // These don't apply for stores.
527      break;
528    }
529  }
530
531  return NewStore;
532}
533
534/// Returns true if instruction represent minmax pattern like:
535///   select ((cmp load V1, load V2), V1, V2).
536static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) {
537  assert(V->getType()->isPointerTy() && "Expected pointer type.");
538  // Ignore possible ty* to ixx* bitcast.
539  V = peekThroughBitcast(V);
540  // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
541  // pattern.
542  CmpInst::Predicate Pred;
543  Instruction *L1;
544  Instruction *L2;
545  Value *LHS;
546  Value *RHS;
547  if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
548                         m_Value(LHS), m_Value(RHS))))
549    return false;
550  LoadTy = L1->getType();
551  return (match(L1, m_Load(m_Specific(LHS))) &&
552          match(L2, m_Load(m_Specific(RHS)))) ||
553         (match(L1, m_Load(m_Specific(RHS))) &&
554          match(L2, m_Load(m_Specific(LHS))));
555}
556
557/// Combine loads to match the type of their uses' value after looking
558/// through intervening bitcasts.
559///
560/// The core idea here is that if the result of a load is used in an operation,
561/// we should load the type most conducive to that operation. For example, when
562/// loading an integer and converting that immediately to a pointer, we should
563/// instead directly load a pointer.
564///
565/// However, this routine must never change the width of a load or the number of
566/// loads as that would introduce a semantic change. This combine is expected to
567/// be a semantic no-op which just allows loads to more closely model the types
568/// of their consuming operations.
569///
570/// Currently, we also refuse to change the precise type used for an atomic load
571/// or a volatile load. This is debatable, and might be reasonable to change
572/// later. However, it is risky in case some backend or other part of LLVM is
573/// relying on the exact type loaded to select appropriate atomic operations.
574static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
575  // FIXME: We could probably with some care handle both volatile and ordered
576  // atomic loads here but it isn't clear that this is important.
577  if (!LI.isUnordered())
578    return nullptr;
579
580  if (LI.use_empty())
581    return nullptr;
582
583  // swifterror values can't be bitcasted.
584  if (LI.getPointerOperand()->isSwiftError())
585    return nullptr;
586
587  Type *Ty = LI.getType();
588  const DataLayout &DL = IC.getDataLayout();
589
590  // Try to canonicalize loads which are only ever stored to operate over
591  // integers instead of any other type. We only do this when the loaded type
592  // is sized and has a size exactly the same as its store size and the store
593  // size is a legal integer type.
594  // Do not perform canonicalization if minmax pattern is found (to avoid
595  // infinite loop).
596  Type *Dummy;
597  if (!Ty->isIntegerTy() && Ty->isSized() &&
598      !(Ty->isVectorTy() && Ty->getVectorIsScalable()) &&
599      DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
600      DL.typeSizeEqualsStoreSize(Ty) &&
601      !DL.isNonIntegralPointerType(Ty) &&
602      !isMinMaxWithLoads(
603          peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true),
604          Dummy)) {
605    if (all_of(LI.users(), [&LI](User *U) {
606          auto *SI = dyn_cast<StoreInst>(U);
607          return SI && SI->getPointerOperand() != &LI &&
608                 !SI->getPointerOperand()->isSwiftError();
609        })) {
610      LoadInst *NewLoad = IC.combineLoadToNewType(
611          LI, Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
612      // Replace all the stores with stores of the newly loaded value.
613      for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
614        auto *SI = cast<StoreInst>(*UI++);
615        IC.Builder.SetInsertPoint(SI);
616        combineStoreToNewValue(IC, *SI, NewLoad);
617        IC.eraseInstFromFunction(*SI);
618      }
619      assert(LI.use_empty() && "Failed to remove all users of the load!");
620      // Return the old load so the combiner can delete it safely.
621      return &LI;
622    }
623  }
624
625  // Fold away bit casts of the loaded value by loading the desired type.
626  // We can do this for BitCastInsts as well as casts from and to pointer types,
627  // as long as those are noops (i.e., the source or dest type have the same
628  // bitwidth as the target's pointers).
629  if (LI.hasOneUse())
630    if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
631      if (CI->isNoopCast(DL))
632        if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
633          LoadInst *NewLoad = IC.combineLoadToNewType(LI, CI->getDestTy());
634          CI->replaceAllUsesWith(NewLoad);
635          IC.eraseInstFromFunction(*CI);
636          return &LI;
637        }
638
639  // FIXME: We should also canonicalize loads of vectors when their elements are
640  // cast to other types.
641  return nullptr;
642}
643
644static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
645  // FIXME: We could probably with some care handle both volatile and atomic
646  // stores here but it isn't clear that this is important.
647  if (!LI.isSimple())
648    return nullptr;
649
650  Type *T = LI.getType();
651  if (!T->isAggregateType())
652    return nullptr;
653
654  StringRef Name = LI.getName();
655  assert(LI.getAlignment() && "Alignment must be set at this point");
656
657  if (auto *ST = dyn_cast<StructType>(T)) {
658    // If the struct only have one element, we unpack.
659    auto NumElements = ST->getNumElements();
660    if (NumElements == 1) {
661      LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
662                                                  ".unpack");
663      AAMDNodes AAMD;
664      LI.getAAMetadata(AAMD);
665      NewLoad->setAAMetadata(AAMD);
666      return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
667        UndefValue::get(T), NewLoad, 0, Name));
668    }
669
670    // We don't want to break loads with padding here as we'd loose
671    // the knowledge that padding exists for the rest of the pipeline.
672    const DataLayout &DL = IC.getDataLayout();
673    auto *SL = DL.getStructLayout(ST);
674    if (SL->hasPadding())
675      return nullptr;
676
677    auto Align = LI.getAlignment();
678    if (!Align)
679      Align = DL.getABITypeAlignment(ST);
680
681    auto *Addr = LI.getPointerOperand();
682    auto *IdxType = Type::getInt32Ty(T->getContext());
683    auto *Zero = ConstantInt::get(IdxType, 0);
684
685    Value *V = UndefValue::get(T);
686    for (unsigned i = 0; i < NumElements; i++) {
687      Value *Indices[2] = {
688        Zero,
689        ConstantInt::get(IdxType, i),
690      };
691      auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
692                                               Name + ".elt");
693      auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
694      auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr,
695                                             EltAlign, Name + ".unpack");
696      // Propagate AA metadata. It'll still be valid on the narrowed load.
697      AAMDNodes AAMD;
698      LI.getAAMetadata(AAMD);
699      L->setAAMetadata(AAMD);
700      V = IC.Builder.CreateInsertValue(V, L, i);
701    }
702
703    V->setName(Name);
704    return IC.replaceInstUsesWith(LI, V);
705  }
706
707  if (auto *AT = dyn_cast<ArrayType>(T)) {
708    auto *ET = AT->getElementType();
709    auto NumElements = AT->getNumElements();
710    if (NumElements == 1) {
711      LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
712      AAMDNodes AAMD;
713      LI.getAAMetadata(AAMD);
714      NewLoad->setAAMetadata(AAMD);
715      return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
716        UndefValue::get(T), NewLoad, 0, Name));
717    }
718
719    // Bail out if the array is too large. Ideally we would like to optimize
720    // arrays of arbitrary size but this has a terrible impact on compile time.
721    // The threshold here is chosen arbitrarily, maybe needs a little bit of
722    // tuning.
723    if (NumElements > IC.MaxArraySizeForCombine)
724      return nullptr;
725
726    const DataLayout &DL = IC.getDataLayout();
727    auto EltSize = DL.getTypeAllocSize(ET);
728    auto Align = LI.getAlignment();
729    if (!Align)
730      Align = DL.getABITypeAlignment(T);
731
732    auto *Addr = LI.getPointerOperand();
733    auto *IdxType = Type::getInt64Ty(T->getContext());
734    auto *Zero = ConstantInt::get(IdxType, 0);
735
736    Value *V = UndefValue::get(T);
737    uint64_t Offset = 0;
738    for (uint64_t i = 0; i < NumElements; i++) {
739      Value *Indices[2] = {
740        Zero,
741        ConstantInt::get(IdxType, i),
742      };
743      auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
744                                               Name + ".elt");
745      auto *L = IC.Builder.CreateAlignedLoad(
746          AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack");
747      AAMDNodes AAMD;
748      LI.getAAMetadata(AAMD);
749      L->setAAMetadata(AAMD);
750      V = IC.Builder.CreateInsertValue(V, L, i);
751      Offset += EltSize;
752    }
753
754    V->setName(Name);
755    return IC.replaceInstUsesWith(LI, V);
756  }
757
758  return nullptr;
759}
760
761// If we can determine that all possible objects pointed to by the provided
762// pointer value are, not only dereferenceable, but also definitively less than
763// or equal to the provided maximum size, then return true. Otherwise, return
764// false (constant global values and allocas fall into this category).
765//
766// FIXME: This should probably live in ValueTracking (or similar).
767static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
768                                     const DataLayout &DL) {
769  SmallPtrSet<Value *, 4> Visited;
770  SmallVector<Value *, 4> Worklist(1, V);
771
772  do {
773    Value *P = Worklist.pop_back_val();
774    P = P->stripPointerCasts();
775
776    if (!Visited.insert(P).second)
777      continue;
778
779    if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
780      Worklist.push_back(SI->getTrueValue());
781      Worklist.push_back(SI->getFalseValue());
782      continue;
783    }
784
785    if (PHINode *PN = dyn_cast<PHINode>(P)) {
786      for (Value *IncValue : PN->incoming_values())
787        Worklist.push_back(IncValue);
788      continue;
789    }
790
791    if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
792      if (GA->isInterposable())
793        return false;
794      Worklist.push_back(GA->getAliasee());
795      continue;
796    }
797
798    // If we know how big this object is, and it is less than MaxSize, continue
799    // searching. Otherwise, return false.
800    if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
801      if (!AI->getAllocatedType()->isSized())
802        return false;
803
804      ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
805      if (!CS)
806        return false;
807
808      uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
809      // Make sure that, even if the multiplication below would wrap as an
810      // uint64_t, we still do the right thing.
811      if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
812        return false;
813      continue;
814    }
815
816    if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
817      if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
818        return false;
819
820      uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
821      if (InitSize > MaxSize)
822        return false;
823      continue;
824    }
825
826    return false;
827  } while (!Worklist.empty());
828
829  return true;
830}
831
832// If we're indexing into an object of a known size, and the outer index is
833// not a constant, but having any value but zero would lead to undefined
834// behavior, replace it with zero.
835//
836// For example, if we have:
837// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
838// ...
839// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
840// ... = load i32* %arrayidx, align 4
841// Then we know that we can replace %x in the GEP with i64 0.
842//
843// FIXME: We could fold any GEP index to zero that would cause UB if it were
844// not zero. Currently, we only handle the first such index. Also, we could
845// also search through non-zero constant indices if we kept track of the
846// offsets those indices implied.
847static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
848                                     Instruction *MemI, unsigned &Idx) {
849  if (GEPI->getNumOperands() < 2)
850    return false;
851
852  // Find the first non-zero index of a GEP. If all indices are zero, return
853  // one past the last index.
854  auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
855    unsigned I = 1;
856    for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
857      Value *V = GEPI->getOperand(I);
858      if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
859        if (CI->isZero())
860          continue;
861
862      break;
863    }
864
865    return I;
866  };
867
868  // Skip through initial 'zero' indices, and find the corresponding pointer
869  // type. See if the next index is not a constant.
870  Idx = FirstNZIdx(GEPI);
871  if (Idx == GEPI->getNumOperands())
872    return false;
873  if (isa<Constant>(GEPI->getOperand(Idx)))
874    return false;
875
876  SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
877  Type *AllocTy =
878    GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
879  if (!AllocTy || !AllocTy->isSized())
880    return false;
881  const DataLayout &DL = IC.getDataLayout();
882  uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
883
884  // If there are more indices after the one we might replace with a zero, make
885  // sure they're all non-negative. If any of them are negative, the overall
886  // address being computed might be before the base address determined by the
887  // first non-zero index.
888  auto IsAllNonNegative = [&]() {
889    for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
890      KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
891      if (Known.isNonNegative())
892        continue;
893      return false;
894    }
895
896    return true;
897  };
898
899  // FIXME: If the GEP is not inbounds, and there are extra indices after the
900  // one we'll replace, those could cause the address computation to wrap
901  // (rendering the IsAllNonNegative() check below insufficient). We can do
902  // better, ignoring zero indices (and other indices we can prove small
903  // enough not to wrap).
904  if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
905    return false;
906
907  // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
908  // also known to be dereferenceable.
909  return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
910         IsAllNonNegative();
911}
912
913// If we're indexing into an object with a variable index for the memory
914// access, but the object has only one element, we can assume that the index
915// will always be zero. If we replace the GEP, return it.
916template <typename T>
917static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
918                                          T &MemI) {
919  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
920    unsigned Idx;
921    if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
922      Instruction *NewGEPI = GEPI->clone();
923      NewGEPI->setOperand(Idx,
924        ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
925      NewGEPI->insertBefore(GEPI);
926      MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
927      return NewGEPI;
928    }
929  }
930
931  return nullptr;
932}
933
934static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
935  if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
936    return false;
937
938  auto *Ptr = SI.getPointerOperand();
939  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
940    Ptr = GEPI->getOperand(0);
941  return (isa<ConstantPointerNull>(Ptr) &&
942          !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
943}
944
945static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
946  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
947    const Value *GEPI0 = GEPI->getOperand(0);
948    if (isa<ConstantPointerNull>(GEPI0) &&
949        !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
950      return true;
951  }
952  if (isa<UndefValue>(Op) ||
953      (isa<ConstantPointerNull>(Op) &&
954       !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
955    return true;
956  return false;
957}
958
959Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
960  Value *Op = LI.getOperand(0);
961
962  // Try to canonicalize the loaded type.
963  if (Instruction *Res = combineLoadToOperationType(*this, LI))
964    return Res;
965
966  // Attempt to improve the alignment.
967  unsigned KnownAlign = getOrEnforceKnownAlignment(
968      Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
969  unsigned LoadAlign = LI.getAlignment();
970  unsigned EffectiveLoadAlign =
971      LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
972
973  if (KnownAlign > EffectiveLoadAlign)
974    LI.setAlignment(MaybeAlign(KnownAlign));
975  else if (LoadAlign == 0)
976    LI.setAlignment(MaybeAlign(EffectiveLoadAlign));
977
978  // Replace GEP indices if possible.
979  if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
980      Worklist.Add(NewGEPI);
981      return &LI;
982  }
983
984  if (Instruction *Res = unpackLoadToAggregate(*this, LI))
985    return Res;
986
987  // Do really simple store-to-load forwarding and load CSE, to catch cases
988  // where there are several consecutive memory accesses to the same location,
989  // separated by a few arithmetic operations.
990  BasicBlock::iterator BBI(LI);
991  bool IsLoadCSE = false;
992  if (Value *AvailableVal = FindAvailableLoadedValue(
993          &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
994    if (IsLoadCSE)
995      combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
996
997    return replaceInstUsesWith(
998        LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
999                                           LI.getName() + ".cast"));
1000  }
1001
1002  // None of the following transforms are legal for volatile/ordered atomic
1003  // loads.  Most of them do apply for unordered atomics.
1004  if (!LI.isUnordered()) return nullptr;
1005
1006  // load(gep null, ...) -> unreachable
1007  // load null/undef -> unreachable
1008  // TODO: Consider a target hook for valid address spaces for this xforms.
1009  if (canSimplifyNullLoadOrGEP(LI, Op)) {
1010    // Insert a new store to null instruction before the load to indicate
1011    // that this code is not reachable.  We do this instead of inserting
1012    // an unreachable instruction directly because we cannot modify the
1013    // CFG.
1014    StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
1015                                  Constant::getNullValue(Op->getType()), &LI);
1016    SI->setDebugLoc(LI.getDebugLoc());
1017    return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
1018  }
1019
1020  if (Op->hasOneUse()) {
1021    // Change select and PHI nodes to select values instead of addresses: this
1022    // helps alias analysis out a lot, allows many others simplifications, and
1023    // exposes redundancy in the code.
1024    //
1025    // Note that we cannot do the transformation unless we know that the
1026    // introduced loads cannot trap!  Something like this is valid as long as
1027    // the condition is always false: load (select bool %C, int* null, int* %G),
1028    // but it would not be valid if we transformed it to load from null
1029    // unconditionally.
1030    //
1031    if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1032      // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
1033      const MaybeAlign Alignment(LI.getAlignment());
1034      if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1035                                      Alignment, DL, SI) &&
1036          isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1037                                      Alignment, DL, SI)) {
1038        LoadInst *V1 =
1039            Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1040                               SI->getOperand(1)->getName() + ".val");
1041        LoadInst *V2 =
1042            Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1043                               SI->getOperand(2)->getName() + ".val");
1044        assert(LI.isUnordered() && "implied by above");
1045        V1->setAlignment(Alignment);
1046        V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1047        V2->setAlignment(Alignment);
1048        V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1049        return SelectInst::Create(SI->getCondition(), V1, V2);
1050      }
1051
1052      // load (select (cond, null, P)) -> load P
1053      if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1054          !NullPointerIsDefined(SI->getFunction(),
1055                                LI.getPointerAddressSpace())) {
1056        LI.setOperand(0, SI->getOperand(2));
1057        return &LI;
1058      }
1059
1060      // load (select (cond, P, null)) -> load P
1061      if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1062          !NullPointerIsDefined(SI->getFunction(),
1063                                LI.getPointerAddressSpace())) {
1064        LI.setOperand(0, SI->getOperand(1));
1065        return &LI;
1066      }
1067    }
1068  }
1069  return nullptr;
1070}
1071
1072/// Look for extractelement/insertvalue sequence that acts like a bitcast.
1073///
1074/// \returns underlying value that was "cast", or nullptr otherwise.
1075///
1076/// For example, if we have:
1077///
1078///     %E0 = extractelement <2 x double> %U, i32 0
1079///     %V0 = insertvalue [2 x double] undef, double %E0, 0
1080///     %E1 = extractelement <2 x double> %U, i32 1
1081///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
1082///
1083/// and the layout of a <2 x double> is isomorphic to a [2 x double],
1084/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1085/// Note that %U may contain non-undef values where %V1 has undef.
1086static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1087  Value *U = nullptr;
1088  while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1089    auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1090    if (!E)
1091      return nullptr;
1092    auto *W = E->getVectorOperand();
1093    if (!U)
1094      U = W;
1095    else if (U != W)
1096      return nullptr;
1097    auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1098    if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1099      return nullptr;
1100    V = IV->getAggregateOperand();
1101  }
1102  if (!isa<UndefValue>(V) ||!U)
1103    return nullptr;
1104
1105  auto *UT = cast<VectorType>(U->getType());
1106  auto *VT = V->getType();
1107  // Check that types UT and VT are bitwise isomorphic.
1108  const auto &DL = IC.getDataLayout();
1109  if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1110    return nullptr;
1111  }
1112  if (auto *AT = dyn_cast<ArrayType>(VT)) {
1113    if (AT->getNumElements() != UT->getNumElements())
1114      return nullptr;
1115  } else {
1116    auto *ST = cast<StructType>(VT);
1117    if (ST->getNumElements() != UT->getNumElements())
1118      return nullptr;
1119    for (const auto *EltT : ST->elements()) {
1120      if (EltT != UT->getElementType())
1121        return nullptr;
1122    }
1123  }
1124  return U;
1125}
1126
1127/// Combine stores to match the type of value being stored.
1128///
1129/// The core idea here is that the memory does not have any intrinsic type and
1130/// where we can we should match the type of a store to the type of value being
1131/// stored.
1132///
1133/// However, this routine must never change the width of a store or the number of
1134/// stores as that would introduce a semantic change. This combine is expected to
1135/// be a semantic no-op which just allows stores to more closely model the types
1136/// of their incoming values.
1137///
1138/// Currently, we also refuse to change the precise type used for an atomic or
1139/// volatile store. This is debatable, and might be reasonable to change later.
1140/// However, it is risky in case some backend or other part of LLVM is relying
1141/// on the exact type stored to select appropriate atomic operations.
1142///
1143/// \returns true if the store was successfully combined away. This indicates
1144/// the caller must erase the store instruction. We have to let the caller erase
1145/// the store instruction as otherwise there is no way to signal whether it was
1146/// combined or not: IC.EraseInstFromFunction returns a null pointer.
1147static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1148  // FIXME: We could probably with some care handle both volatile and ordered
1149  // atomic stores here but it isn't clear that this is important.
1150  if (!SI.isUnordered())
1151    return false;
1152
1153  // swifterror values can't be bitcasted.
1154  if (SI.getPointerOperand()->isSwiftError())
1155    return false;
1156
1157  Value *V = SI.getValueOperand();
1158
1159  // Fold away bit casts of the stored value by storing the original type.
1160  if (auto *BC = dyn_cast<BitCastInst>(V)) {
1161    V = BC->getOperand(0);
1162    if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1163      combineStoreToNewValue(IC, SI, V);
1164      return true;
1165    }
1166  }
1167
1168  if (Value *U = likeBitCastFromVector(IC, V))
1169    if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1170      combineStoreToNewValue(IC, SI, U);
1171      return true;
1172    }
1173
1174  // FIXME: We should also canonicalize stores of vectors when their elements
1175  // are cast to other types.
1176  return false;
1177}
1178
1179static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1180  // FIXME: We could probably with some care handle both volatile and atomic
1181  // stores here but it isn't clear that this is important.
1182  if (!SI.isSimple())
1183    return false;
1184
1185  Value *V = SI.getValueOperand();
1186  Type *T = V->getType();
1187
1188  if (!T->isAggregateType())
1189    return false;
1190
1191  if (auto *ST = dyn_cast<StructType>(T)) {
1192    // If the struct only have one element, we unpack.
1193    unsigned Count = ST->getNumElements();
1194    if (Count == 1) {
1195      V = IC.Builder.CreateExtractValue(V, 0);
1196      combineStoreToNewValue(IC, SI, V);
1197      return true;
1198    }
1199
1200    // We don't want to break loads with padding here as we'd loose
1201    // the knowledge that padding exists for the rest of the pipeline.
1202    const DataLayout &DL = IC.getDataLayout();
1203    auto *SL = DL.getStructLayout(ST);
1204    if (SL->hasPadding())
1205      return false;
1206
1207    auto Align = SI.getAlignment();
1208    if (!Align)
1209      Align = DL.getABITypeAlignment(ST);
1210
1211    SmallString<16> EltName = V->getName();
1212    EltName += ".elt";
1213    auto *Addr = SI.getPointerOperand();
1214    SmallString<16> AddrName = Addr->getName();
1215    AddrName += ".repack";
1216
1217    auto *IdxType = Type::getInt32Ty(ST->getContext());
1218    auto *Zero = ConstantInt::get(IdxType, 0);
1219    for (unsigned i = 0; i < Count; i++) {
1220      Value *Indices[2] = {
1221        Zero,
1222        ConstantInt::get(IdxType, i),
1223      };
1224      auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1225                                               AddrName);
1226      auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1227      auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1228      llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1229      AAMDNodes AAMD;
1230      SI.getAAMetadata(AAMD);
1231      NS->setAAMetadata(AAMD);
1232    }
1233
1234    return true;
1235  }
1236
1237  if (auto *AT = dyn_cast<ArrayType>(T)) {
1238    // If the array only have one element, we unpack.
1239    auto NumElements = AT->getNumElements();
1240    if (NumElements == 1) {
1241      V = IC.Builder.CreateExtractValue(V, 0);
1242      combineStoreToNewValue(IC, SI, V);
1243      return true;
1244    }
1245
1246    // Bail out if the array is too large. Ideally we would like to optimize
1247    // arrays of arbitrary size but this has a terrible impact on compile time.
1248    // The threshold here is chosen arbitrarily, maybe needs a little bit of
1249    // tuning.
1250    if (NumElements > IC.MaxArraySizeForCombine)
1251      return false;
1252
1253    const DataLayout &DL = IC.getDataLayout();
1254    auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1255    auto Align = SI.getAlignment();
1256    if (!Align)
1257      Align = DL.getABITypeAlignment(T);
1258
1259    SmallString<16> EltName = V->getName();
1260    EltName += ".elt";
1261    auto *Addr = SI.getPointerOperand();
1262    SmallString<16> AddrName = Addr->getName();
1263    AddrName += ".repack";
1264
1265    auto *IdxType = Type::getInt64Ty(T->getContext());
1266    auto *Zero = ConstantInt::get(IdxType, 0);
1267
1268    uint64_t Offset = 0;
1269    for (uint64_t i = 0; i < NumElements; i++) {
1270      Value *Indices[2] = {
1271        Zero,
1272        ConstantInt::get(IdxType, i),
1273      };
1274      auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1275                                               AddrName);
1276      auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1277      auto EltAlign = MinAlign(Align, Offset);
1278      Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1279      AAMDNodes AAMD;
1280      SI.getAAMetadata(AAMD);
1281      NS->setAAMetadata(AAMD);
1282      Offset += EltSize;
1283    }
1284
1285    return true;
1286  }
1287
1288  return false;
1289}
1290
1291/// equivalentAddressValues - Test if A and B will obviously have the same
1292/// value. This includes recognizing that %t0 and %t1 will have the same
1293/// value in code like this:
1294///   %t0 = getelementptr \@a, 0, 3
1295///   store i32 0, i32* %t0
1296///   %t1 = getelementptr \@a, 0, 3
1297///   %t2 = load i32* %t1
1298///
1299static bool equivalentAddressValues(Value *A, Value *B) {
1300  // Test if the values are trivially equivalent.
1301  if (A == B) return true;
1302
1303  // Test if the values come form identical arithmetic instructions.
1304  // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1305  // its only used to compare two uses within the same basic block, which
1306  // means that they'll always either have the same value or one of them
1307  // will have an undefined value.
1308  if (isa<BinaryOperator>(A) ||
1309      isa<CastInst>(A) ||
1310      isa<PHINode>(A) ||
1311      isa<GetElementPtrInst>(A))
1312    if (Instruction *BI = dyn_cast<Instruction>(B))
1313      if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1314        return true;
1315
1316  // Otherwise they may not be equivalent.
1317  return false;
1318}
1319
1320/// Converts store (bitcast (load (bitcast (select ...)))) to
1321/// store (load (select ...)), where select is minmax:
1322/// select ((cmp load V1, load V2), V1, V2).
1323static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1324                                                StoreInst &SI) {
1325  // bitcast?
1326  if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1327    return false;
1328  // load? integer?
1329  Value *LoadAddr;
1330  if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1331    return false;
1332  auto *LI = cast<LoadInst>(SI.getValueOperand());
1333  if (!LI->getType()->isIntegerTy())
1334    return false;
1335  Type *CmpLoadTy;
1336  if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy))
1337    return false;
1338
1339  // Make sure the type would actually change.
1340  // This condition can be hit with chains of bitcasts.
1341  if (LI->getType() == CmpLoadTy)
1342    return false;
1343
1344  // Make sure we're not changing the size of the load/store.
1345  const auto &DL = IC.getDataLayout();
1346  if (DL.getTypeStoreSizeInBits(LI->getType()) !=
1347      DL.getTypeStoreSizeInBits(CmpLoadTy))
1348    return false;
1349
1350  if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1351        auto *SI = dyn_cast<StoreInst>(U);
1352        return SI && SI->getPointerOperand() != LI &&
1353               peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1354               !SI->getPointerOperand()->isSwiftError();
1355      }))
1356    return false;
1357
1358  IC.Builder.SetInsertPoint(LI);
1359  LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy);
1360  // Replace all the stores with stores of the newly loaded value.
1361  for (auto *UI : LI->users()) {
1362    auto *USI = cast<StoreInst>(UI);
1363    IC.Builder.SetInsertPoint(USI);
1364    combineStoreToNewValue(IC, *USI, NewLI);
1365  }
1366  IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1367  IC.eraseInstFromFunction(*LI);
1368  return true;
1369}
1370
1371Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1372  Value *Val = SI.getOperand(0);
1373  Value *Ptr = SI.getOperand(1);
1374
1375  // Try to canonicalize the stored type.
1376  if (combineStoreToValueType(*this, SI))
1377    return eraseInstFromFunction(SI);
1378
1379  // Attempt to improve the alignment.
1380  const Align KnownAlign = Align(getOrEnforceKnownAlignment(
1381      Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT));
1382  const MaybeAlign StoreAlign = MaybeAlign(SI.getAlignment());
1383  const Align EffectiveStoreAlign =
1384      StoreAlign ? *StoreAlign : Align(DL.getABITypeAlignment(Val->getType()));
1385
1386  if (KnownAlign > EffectiveStoreAlign)
1387    SI.setAlignment(KnownAlign);
1388  else if (!StoreAlign)
1389    SI.setAlignment(EffectiveStoreAlign);
1390
1391  // Try to canonicalize the stored type.
1392  if (unpackStoreToAggregate(*this, SI))
1393    return eraseInstFromFunction(SI);
1394
1395  if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1396    return eraseInstFromFunction(SI);
1397
1398  // Replace GEP indices if possible.
1399  if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1400      Worklist.Add(NewGEPI);
1401      return &SI;
1402  }
1403
1404  // Don't hack volatile/ordered stores.
1405  // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1406  if (!SI.isUnordered()) return nullptr;
1407
1408  // If the RHS is an alloca with a single use, zapify the store, making the
1409  // alloca dead.
1410  if (Ptr->hasOneUse()) {
1411    if (isa<AllocaInst>(Ptr))
1412      return eraseInstFromFunction(SI);
1413    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1414      if (isa<AllocaInst>(GEP->getOperand(0))) {
1415        if (GEP->getOperand(0)->hasOneUse())
1416          return eraseInstFromFunction(SI);
1417      }
1418    }
1419  }
1420
1421  // If we have a store to a location which is known constant, we can conclude
1422  // that the store must be storing the constant value (else the memory
1423  // wouldn't be constant), and this must be a noop.
1424  if (AA->pointsToConstantMemory(Ptr))
1425    return eraseInstFromFunction(SI);
1426
1427  // Do really simple DSE, to catch cases where there are several consecutive
1428  // stores to the same location, separated by a few arithmetic operations. This
1429  // situation often occurs with bitfield accesses.
1430  BasicBlock::iterator BBI(SI);
1431  for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1432       --ScanInsts) {
1433    --BBI;
1434    // Don't count debug info directives, lest they affect codegen,
1435    // and we skip pointer-to-pointer bitcasts, which are NOPs.
1436    if (isa<DbgInfoIntrinsic>(BBI) ||
1437        (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1438      ScanInsts++;
1439      continue;
1440    }
1441
1442    if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1443      // Prev store isn't volatile, and stores to the same location?
1444      if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1445                                                        SI.getOperand(1))) {
1446        ++NumDeadStore;
1447        // Manually add back the original store to the worklist now, so it will
1448        // be processed after the operands of the removed store, as this may
1449        // expose additional DSE opportunities.
1450        Worklist.Add(&SI);
1451        eraseInstFromFunction(*PrevSI);
1452        return nullptr;
1453      }
1454      break;
1455    }
1456
1457    // If this is a load, we have to stop.  However, if the loaded value is from
1458    // the pointer we're loading and is producing the pointer we're storing,
1459    // then *this* store is dead (X = load P; store X -> P).
1460    if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1461      if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1462        assert(SI.isUnordered() && "can't eliminate ordering operation");
1463        return eraseInstFromFunction(SI);
1464      }
1465
1466      // Otherwise, this is a load from some other location.  Stores before it
1467      // may not be dead.
1468      break;
1469    }
1470
1471    // Don't skip over loads, throws or things that can modify memory.
1472    if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1473      break;
1474  }
1475
1476  // store X, null    -> turns into 'unreachable' in SimplifyCFG
1477  // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1478  if (canSimplifyNullStoreOrGEP(SI)) {
1479    if (!isa<UndefValue>(Val)) {
1480      SI.setOperand(0, UndefValue::get(Val->getType()));
1481      if (Instruction *U = dyn_cast<Instruction>(Val))
1482        Worklist.Add(U);  // Dropped a use.
1483    }
1484    return nullptr;  // Do not modify these!
1485  }
1486
1487  // store undef, Ptr -> noop
1488  if (isa<UndefValue>(Val))
1489    return eraseInstFromFunction(SI);
1490
1491  // If this store is the second-to-last instruction in the basic block
1492  // (excluding debug info and bitcasts of pointers) and if the block ends with
1493  // an unconditional branch, try to move the store to the successor block.
1494  BBI = SI.getIterator();
1495  do {
1496    ++BBI;
1497  } while (isa<DbgInfoIntrinsic>(BBI) ||
1498           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1499
1500  if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1501    if (BI->isUnconditional())
1502      mergeStoreIntoSuccessor(SI);
1503
1504  return nullptr;
1505}
1506
1507/// Try to transform:
1508///   if () { *P = v1; } else { *P = v2 }
1509/// or:
1510///   *P = v1; if () { *P = v2; }
1511/// into a phi node with a store in the successor.
1512bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) {
1513  assert(SI.isUnordered() &&
1514         "This code has not been audited for volatile or ordered store case.");
1515
1516  // Check if the successor block has exactly 2 incoming edges.
1517  BasicBlock *StoreBB = SI.getParent();
1518  BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1519  if (!DestBB->hasNPredecessors(2))
1520    return false;
1521
1522  // Capture the other block (the block that doesn't contain our store).
1523  pred_iterator PredIter = pred_begin(DestBB);
1524  if (*PredIter == StoreBB)
1525    ++PredIter;
1526  BasicBlock *OtherBB = *PredIter;
1527
1528  // Bail out if all of the relevant blocks aren't distinct. This can happen,
1529  // for example, if SI is in an infinite loop.
1530  if (StoreBB == DestBB || OtherBB == DestBB)
1531    return false;
1532
1533  // Verify that the other block ends in a branch and is not otherwise empty.
1534  BasicBlock::iterator BBI(OtherBB->getTerminator());
1535  BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1536  if (!OtherBr || BBI == OtherBB->begin())
1537    return false;
1538
1539  // If the other block ends in an unconditional branch, check for the 'if then
1540  // else' case. There is an instruction before the branch.
1541  StoreInst *OtherStore = nullptr;
1542  if (OtherBr->isUnconditional()) {
1543    --BBI;
1544    // Skip over debugging info.
1545    while (isa<DbgInfoIntrinsic>(BBI) ||
1546           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1547      if (BBI==OtherBB->begin())
1548        return false;
1549      --BBI;
1550    }
1551    // If this isn't a store, isn't a store to the same location, or is not the
1552    // right kind of store, bail out.
1553    OtherStore = dyn_cast<StoreInst>(BBI);
1554    if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1555        !SI.isSameOperationAs(OtherStore))
1556      return false;
1557  } else {
1558    // Otherwise, the other block ended with a conditional branch. If one of the
1559    // destinations is StoreBB, then we have the if/then case.
1560    if (OtherBr->getSuccessor(0) != StoreBB &&
1561        OtherBr->getSuccessor(1) != StoreBB)
1562      return false;
1563
1564    // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1565    // if/then triangle. See if there is a store to the same ptr as SI that
1566    // lives in OtherBB.
1567    for (;; --BBI) {
1568      // Check to see if we find the matching store.
1569      if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1570        if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1571            !SI.isSameOperationAs(OtherStore))
1572          return false;
1573        break;
1574      }
1575      // If we find something that may be using or overwriting the stored
1576      // value, or if we run out of instructions, we can't do the transform.
1577      if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1578          BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1579        return false;
1580    }
1581
1582    // In order to eliminate the store in OtherBr, we have to make sure nothing
1583    // reads or overwrites the stored value in StoreBB.
1584    for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1585      // FIXME: This should really be AA driven.
1586      if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1587        return false;
1588    }
1589  }
1590
1591  // Insert a PHI node now if we need it.
1592  Value *MergedVal = OtherStore->getOperand(0);
1593  // The debug locations of the original instructions might differ. Merge them.
1594  DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1595                                                     OtherStore->getDebugLoc());
1596  if (MergedVal != SI.getOperand(0)) {
1597    PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1598    PN->addIncoming(SI.getOperand(0), SI.getParent());
1599    PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1600    MergedVal = InsertNewInstBefore(PN, DestBB->front());
1601    PN->setDebugLoc(MergedLoc);
1602  }
1603
1604  // Advance to a place where it is safe to insert the new store and insert it.
1605  BBI = DestBB->getFirstInsertionPt();
1606  StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(),
1607                                   MaybeAlign(SI.getAlignment()),
1608                                   SI.getOrdering(), SI.getSyncScopeID());
1609  InsertNewInstBefore(NewSI, *BBI);
1610  NewSI->setDebugLoc(MergedLoc);
1611
1612  // If the two stores had AA tags, merge them.
1613  AAMDNodes AATags;
1614  SI.getAAMetadata(AATags);
1615  if (AATags) {
1616    OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1617    NewSI->setAAMetadata(AATags);
1618  }
1619
1620  // Nuke the old stores.
1621  eraseInstFromFunction(SI);
1622  eraseInstFromFunction(*OtherStore);
1623  return true;
1624}
1625