1//===- ArgumentPromotion.cpp - Promote by-reference arguments -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass promotes "by reference" arguments to be "by value" arguments.  In
10// practice, this means looking for internal functions that have pointer
11// arguments.  If it can prove, through the use of alias analysis, that an
12// argument is *only* loaded, then it can pass the value into the function
13// instead of the address of the value.  This can cause recursive simplification
14// of code and lead to the elimination of allocas (especially in C++ template
15// code like the STL).
16//
17// This pass also handles aggregate arguments that are passed into a function,
18// scalarizing them if the elements of the aggregate are only loaded.  Note that
19// by default it refuses to scalarize aggregates which would require passing in
20// more than three operands to the function, because passing thousands of
21// operands for a large array or structure is unprofitable! This limit can be
22// configured or disabled, however.
23//
24// Note that this transformation could also be done for arguments that are only
25// stored to (returning the value instead), but does not currently.  This case
26// would be best handled when and if LLVM begins supporting multiple return
27// values from functions.
28//
29//===----------------------------------------------------------------------===//
30
31#include "llvm/Transforms/IPO/ArgumentPromotion.h"
32#include "llvm/ADT/DepthFirstIterator.h"
33#include "llvm/ADT/None.h"
34#include "llvm/ADT/Optional.h"
35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/ADT/SmallPtrSet.h"
38#include "llvm/ADT/SmallVector.h"
39#include "llvm/ADT/Statistic.h"
40#include "llvm/ADT/Twine.h"
41#include "llvm/Analysis/AssumptionCache.h"
42#include "llvm/Analysis/BasicAliasAnalysis.h"
43#include "llvm/Analysis/CGSCCPassManager.h"
44#include "llvm/Analysis/CallGraph.h"
45#include "llvm/Analysis/CallGraphSCCPass.h"
46#include "llvm/Analysis/LazyCallGraph.h"
47#include "llvm/Analysis/Loads.h"
48#include "llvm/Analysis/MemoryLocation.h"
49#include "llvm/Analysis/TargetLibraryInfo.h"
50#include "llvm/Analysis/TargetTransformInfo.h"
51#include "llvm/IR/Argument.h"
52#include "llvm/IR/Attributes.h"
53#include "llvm/IR/BasicBlock.h"
54#include "llvm/IR/CFG.h"
55#include "llvm/IR/Constants.h"
56#include "llvm/IR/DataLayout.h"
57#include "llvm/IR/DerivedTypes.h"
58#include "llvm/IR/Function.h"
59#include "llvm/IR/IRBuilder.h"
60#include "llvm/IR/InstrTypes.h"
61#include "llvm/IR/Instruction.h"
62#include "llvm/IR/Instructions.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/NoFolder.h"
66#include "llvm/IR/PassManager.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/Use.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
71#include "llvm/InitializePasses.h"
72#include "llvm/Pass.h"
73#include "llvm/Support/Casting.h"
74#include "llvm/Support/Debug.h"
75#include "llvm/Support/FormatVariadic.h"
76#include "llvm/Support/raw_ostream.h"
77#include "llvm/Transforms/IPO.h"
78#include <algorithm>
79#include <cassert>
80#include <cstdint>
81#include <functional>
82#include <iterator>
83#include <map>
84#include <set>
85#include <string>
86#include <utility>
87#include <vector>
88
89using namespace llvm;
90
91#define DEBUG_TYPE "argpromotion"
92
93STATISTIC(NumArgumentsPromoted, "Number of pointer arguments promoted");
94STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted");
95STATISTIC(NumByValArgsPromoted, "Number of byval arguments promoted");
96STATISTIC(NumArgumentsDead, "Number of dead pointer args eliminated");
97
98/// A vector used to hold the indices of a single GEP instruction
99using IndicesVector = std::vector<uint64_t>;
100
101/// DoPromotion - This method actually performs the promotion of the specified
102/// arguments, and returns the new function.  At this point, we know that it's
103/// safe to do so.
104static Function *
105doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
106            SmallPtrSetImpl<Argument *> &ByValArgsToTransform,
107            Optional<function_ref<void(CallBase &OldCS, CallBase &NewCS)>>
108                ReplaceCallSite) {
109  // Start by computing a new prototype for the function, which is the same as
110  // the old function, but has modified arguments.
111  FunctionType *FTy = F->getFunctionType();
112  std::vector<Type *> Params;
113
114  using ScalarizeTable = std::set<std::pair<Type *, IndicesVector>>;
115
116  // ScalarizedElements - If we are promoting a pointer that has elements
117  // accessed out of it, keep track of which elements are accessed so that we
118  // can add one argument for each.
119  //
120  // Arguments that are directly loaded will have a zero element value here, to
121  // handle cases where there are both a direct load and GEP accesses.
122  std::map<Argument *, ScalarizeTable> ScalarizedElements;
123
124  // OriginalLoads - Keep track of a representative load instruction from the
125  // original function so that we can tell the alias analysis implementation
126  // what the new GEP/Load instructions we are inserting look like.
127  // We need to keep the original loads for each argument and the elements
128  // of the argument that are accessed.
129  std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads;
130
131  // Attribute - Keep track of the parameter attributes for the arguments
132  // that we are *not* promoting. For the ones that we do promote, the parameter
133  // attributes are lost
134  SmallVector<AttributeSet, 8> ArgAttrVec;
135  AttributeList PAL = F->getAttributes();
136
137  // First, determine the new argument list
138  unsigned ArgNo = 0;
139  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
140       ++I, ++ArgNo) {
141    if (ByValArgsToTransform.count(&*I)) {
142      // Simple byval argument? Just add all the struct element types.
143      Type *AgTy = cast<PointerType>(I->getType())->getElementType();
144      StructType *STy = cast<StructType>(AgTy);
145      llvm::append_range(Params, STy->elements());
146      ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(),
147                        AttributeSet());
148      ++NumByValArgsPromoted;
149    } else if (!ArgsToPromote.count(&*I)) {
150      // Unchanged argument
151      Params.push_back(I->getType());
152      ArgAttrVec.push_back(PAL.getParamAttributes(ArgNo));
153    } else if (I->use_empty()) {
154      // Dead argument (which are always marked as promotable)
155      ++NumArgumentsDead;
156    } else {
157      // Okay, this is being promoted. This means that the only uses are loads
158      // or GEPs which are only used by loads
159
160      // In this table, we will track which indices are loaded from the argument
161      // (where direct loads are tracked as no indices).
162      ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
163      for (User *U : make_early_inc_range(I->users())) {
164        Instruction *UI = cast<Instruction>(U);
165        Type *SrcTy;
166        if (LoadInst *L = dyn_cast<LoadInst>(UI))
167          SrcTy = L->getType();
168        else
169          SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
170        // Skip dead GEPs and remove them.
171        if (isa<GetElementPtrInst>(UI) && UI->use_empty()) {
172          UI->eraseFromParent();
173          continue;
174        }
175
176        IndicesVector Indices;
177        Indices.reserve(UI->getNumOperands() - 1);
178        // Since loads will only have a single operand, and GEPs only a single
179        // non-index operand, this will record direct loads without any indices,
180        // and gep+loads with the GEP indices.
181        for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
182             II != IE; ++II)
183          Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
184        // GEPs with a single 0 index can be merged with direct loads
185        if (Indices.size() == 1 && Indices.front() == 0)
186          Indices.clear();
187        ArgIndices.insert(std::make_pair(SrcTy, Indices));
188        LoadInst *OrigLoad;
189        if (LoadInst *L = dyn_cast<LoadInst>(UI))
190          OrigLoad = L;
191        else
192          // Take any load, we will use it only to update Alias Analysis
193          OrigLoad = cast<LoadInst>(UI->user_back());
194        OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
195      }
196
197      // Add a parameter to the function for each element passed in.
198      for (const auto &ArgIndex : ArgIndices) {
199        // not allowed to dereference ->begin() if size() is 0
200        Params.push_back(GetElementPtrInst::getIndexedType(
201            cast<PointerType>(I->getType())->getElementType(),
202            ArgIndex.second));
203        ArgAttrVec.push_back(AttributeSet());
204        assert(Params.back());
205      }
206
207      if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty())
208        ++NumArgumentsPromoted;
209      else
210        ++NumAggregatesPromoted;
211    }
212  }
213
214  Type *RetTy = FTy->getReturnType();
215
216  // Construct the new function type using the new arguments.
217  FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
218
219  // Create the new function body and insert it into the module.
220  Function *NF = Function::Create(NFTy, F->getLinkage(), F->getAddressSpace(),
221                                  F->getName());
222  NF->copyAttributesFrom(F);
223  NF->copyMetadata(F, 0);
224
225  // The new function will have the !dbg metadata copied from the original
226  // function. The original function may not be deleted, and dbg metadata need
227  // to be unique so we need to drop it.
228  F->setSubprogram(nullptr);
229
230  LLVM_DEBUG(dbgs() << "ARG PROMOTION:  Promoting to:" << *NF << "\n"
231                    << "From: " << *F);
232
233  // Recompute the parameter attributes list based on the new arguments for
234  // the function.
235  NF->setAttributes(AttributeList::get(F->getContext(), PAL.getFnAttributes(),
236                                       PAL.getRetAttributes(), ArgAttrVec));
237  ArgAttrVec.clear();
238
239  F->getParent()->getFunctionList().insert(F->getIterator(), NF);
240  NF->takeName(F);
241
242  // Loop over all of the callers of the function, transforming the call sites
243  // to pass in the loaded pointers.
244  //
245  SmallVector<Value *, 16> Args;
246  const DataLayout &DL = F->getParent()->getDataLayout();
247  while (!F->use_empty()) {
248    CallBase &CB = cast<CallBase>(*F->user_back());
249    assert(CB.getCalledFunction() == F);
250    const AttributeList &CallPAL = CB.getAttributes();
251    IRBuilder<NoFolder> IRB(&CB);
252
253    // Loop over the operands, inserting GEP and loads in the caller as
254    // appropriate.
255    auto AI = CB.arg_begin();
256    ArgNo = 0;
257    for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
258         ++I, ++AI, ++ArgNo)
259      if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
260        Args.push_back(*AI); // Unmodified argument
261        ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo));
262      } else if (ByValArgsToTransform.count(&*I)) {
263        // Emit a GEP and load for each element of the struct.
264        Type *AgTy = cast<PointerType>(I->getType())->getElementType();
265        StructType *STy = cast<StructType>(AgTy);
266        Value *Idxs[2] = {
267            ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr};
268        const StructLayout *SL = DL.getStructLayout(STy);
269        Align StructAlign = *I->getParamAlign();
270        for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
271          Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
272          auto *Idx =
273              IRB.CreateGEP(STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i));
274          // TODO: Tell AA about the new values?
275          Align Alignment =
276              commonAlignment(StructAlign, SL->getElementOffset(i));
277          Args.push_back(IRB.CreateAlignedLoad(
278              STy->getElementType(i), Idx, Alignment, Idx->getName() + ".val"));
279          ArgAttrVec.push_back(AttributeSet());
280        }
281      } else if (!I->use_empty()) {
282        // Non-dead argument: insert GEPs and loads as appropriate.
283        ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
284        // Store the Value* version of the indices in here, but declare it now
285        // for reuse.
286        std::vector<Value *> Ops;
287        for (const auto &ArgIndex : ArgIndices) {
288          Value *V = *AI;
289          LoadInst *OrigLoad =
290              OriginalLoads[std::make_pair(&*I, ArgIndex.second)];
291          if (!ArgIndex.second.empty()) {
292            Ops.reserve(ArgIndex.second.size());
293            Type *ElTy = V->getType();
294            for (auto II : ArgIndex.second) {
295              // Use i32 to index structs, and i64 for others (pointers/arrays).
296              // This satisfies GEP constraints.
297              Type *IdxTy =
298                  (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext())
299                                      : Type::getInt64Ty(F->getContext()));
300              Ops.push_back(ConstantInt::get(IdxTy, II));
301              // Keep track of the type we're currently indexing.
302              if (auto *ElPTy = dyn_cast<PointerType>(ElTy))
303                ElTy = ElPTy->getElementType();
304              else
305                ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, II);
306            }
307            // And create a GEP to extract those indices.
308            V = IRB.CreateGEP(ArgIndex.first, V, Ops, V->getName() + ".idx");
309            Ops.clear();
310          }
311          // Since we're replacing a load make sure we take the alignment
312          // of the previous load.
313          LoadInst *newLoad =
314              IRB.CreateLoad(OrigLoad->getType(), V, V->getName() + ".val");
315          newLoad->setAlignment(OrigLoad->getAlign());
316          // Transfer the AA info too.
317          AAMDNodes AAInfo;
318          OrigLoad->getAAMetadata(AAInfo);
319          newLoad->setAAMetadata(AAInfo);
320
321          Args.push_back(newLoad);
322          ArgAttrVec.push_back(AttributeSet());
323        }
324      }
325
326    // Push any varargs arguments on the list.
327    for (; AI != CB.arg_end(); ++AI, ++ArgNo) {
328      Args.push_back(*AI);
329      ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo));
330    }
331
332    SmallVector<OperandBundleDef, 1> OpBundles;
333    CB.getOperandBundlesAsDefs(OpBundles);
334
335    CallBase *NewCS = nullptr;
336    if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
337      NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
338                                 Args, OpBundles, "", &CB);
339    } else {
340      auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", &CB);
341      NewCall->setTailCallKind(cast<CallInst>(&CB)->getTailCallKind());
342      NewCS = NewCall;
343    }
344    NewCS->setCallingConv(CB.getCallingConv());
345    NewCS->setAttributes(
346        AttributeList::get(F->getContext(), CallPAL.getFnAttributes(),
347                           CallPAL.getRetAttributes(), ArgAttrVec));
348    NewCS->copyMetadata(CB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
349    Args.clear();
350    ArgAttrVec.clear();
351
352    // Update the callgraph to know that the callsite has been transformed.
353    if (ReplaceCallSite)
354      (*ReplaceCallSite)(CB, *NewCS);
355
356    if (!CB.use_empty()) {
357      CB.replaceAllUsesWith(NewCS);
358      NewCS->takeName(&CB);
359    }
360
361    // Finally, remove the old call from the program, reducing the use-count of
362    // F.
363    CB.eraseFromParent();
364  }
365
366  // Since we have now created the new function, splice the body of the old
367  // function right into the new function, leaving the old rotting hulk of the
368  // function empty.
369  NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());
370
371  // Loop over the argument list, transferring uses of the old arguments over to
372  // the new arguments, also transferring over the names as well.
373  for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
374                              I2 = NF->arg_begin();
375       I != E; ++I) {
376    if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
377      // If this is an unmodified argument, move the name and users over to the
378      // new version.
379      I->replaceAllUsesWith(&*I2);
380      I2->takeName(&*I);
381      ++I2;
382      continue;
383    }
384
385    if (ByValArgsToTransform.count(&*I)) {
386      // In the callee, we create an alloca, and store each of the new incoming
387      // arguments into the alloca.
388      Instruction *InsertPt = &NF->begin()->front();
389
390      // Just add all the struct element types.
391      Type *AgTy = cast<PointerType>(I->getType())->getElementType();
392      Align StructAlign = *I->getParamAlign();
393      Value *TheAlloca = new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr,
394                                        StructAlign, "", InsertPt);
395      StructType *STy = cast<StructType>(AgTy);
396      Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0),
397                        nullptr};
398      const StructLayout *SL = DL.getStructLayout(STy);
399
400      for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
401        Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
402        Value *Idx = GetElementPtrInst::Create(
403            AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i),
404            InsertPt);
405        I2->setName(I->getName() + "." + Twine(i));
406        Align Alignment = commonAlignment(StructAlign, SL->getElementOffset(i));
407        new StoreInst(&*I2++, Idx, false, Alignment, InsertPt);
408      }
409
410      // Anything that used the arg should now use the alloca.
411      I->replaceAllUsesWith(TheAlloca);
412      TheAlloca->takeName(&*I);
413      continue;
414    }
415
416    // There potentially are metadata uses for things like llvm.dbg.value.
417    // Replace them with undef, after handling the other regular uses.
418    auto RauwUndefMetadata = make_scope_exit(
419        [&]() { I->replaceAllUsesWith(UndefValue::get(I->getType())); });
420
421    if (I->use_empty())
422      continue;
423
424    // Otherwise, if we promoted this argument, then all users are load
425    // instructions (or GEPs with only load users), and all loads should be
426    // using the new argument that we added.
427    ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
428
429    while (!I->use_empty()) {
430      if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
431        assert(ArgIndices.begin()->second.empty() &&
432               "Load element should sort to front!");
433        I2->setName(I->getName() + ".val");
434        LI->replaceAllUsesWith(&*I2);
435        LI->eraseFromParent();
436        LLVM_DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
437                          << "' in function '" << F->getName() << "'\n");
438      } else {
439        GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
440        assert(!GEP->use_empty() &&
441               "GEPs without uses should be cleaned up already");
442        IndicesVector Operands;
443        Operands.reserve(GEP->getNumIndices());
444        for (const Use &Idx : GEP->indices())
445          Operands.push_back(cast<ConstantInt>(Idx)->getSExtValue());
446
447        // GEPs with a single 0 index can be merged with direct loads
448        if (Operands.size() == 1 && Operands.front() == 0)
449          Operands.clear();
450
451        Function::arg_iterator TheArg = I2;
452        for (ScalarizeTable::iterator It = ArgIndices.begin();
453             It->second != Operands; ++It, ++TheArg) {
454          assert(It != ArgIndices.end() && "GEP not handled??");
455        }
456
457        TheArg->setName(formatv("{0}.{1:$[.]}.val", I->getName(),
458                                make_range(Operands.begin(), Operands.end())));
459
460        LLVM_DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
461                          << "' of function '" << NF->getName() << "'\n");
462
463        // All of the uses must be load instructions.  Replace them all with
464        // the argument specified by ArgNo.
465        while (!GEP->use_empty()) {
466          LoadInst *L = cast<LoadInst>(GEP->user_back());
467          L->replaceAllUsesWith(&*TheArg);
468          L->eraseFromParent();
469        }
470        GEP->eraseFromParent();
471      }
472    }
473    // Increment I2 past all of the arguments added for this promoted pointer.
474    std::advance(I2, ArgIndices.size());
475  }
476
477  return NF;
478}
479
480/// Return true if we can prove that all callees pass in a valid pointer for the
481/// specified function argument.
482static bool allCallersPassValidPointerForArgument(Argument *Arg, Type *Ty) {
483  Function *Callee = Arg->getParent();
484  const DataLayout &DL = Callee->getParent()->getDataLayout();
485
486  unsigned ArgNo = Arg->getArgNo();
487
488  // Look at all call sites of the function.  At this point we know we only have
489  // direct callees.
490  for (User *U : Callee->users()) {
491    CallBase &CB = cast<CallBase>(*U);
492
493    if (!isDereferenceablePointer(CB.getArgOperand(ArgNo), Ty, DL))
494      return false;
495  }
496  return true;
497}
498
499/// Returns true if Prefix is a prefix of longer. That means, Longer has a size
500/// that is greater than or equal to the size of prefix, and each of the
501/// elements in Prefix is the same as the corresponding elements in Longer.
502///
503/// This means it also returns true when Prefix and Longer are equal!
504static bool isPrefix(const IndicesVector &Prefix, const IndicesVector &Longer) {
505  if (Prefix.size() > Longer.size())
506    return false;
507  return std::equal(Prefix.begin(), Prefix.end(), Longer.begin());
508}
509
510/// Checks if Indices, or a prefix of Indices, is in Set.
511static bool prefixIn(const IndicesVector &Indices,
512                     std::set<IndicesVector> &Set) {
513  std::set<IndicesVector>::iterator Low;
514  Low = Set.upper_bound(Indices);
515  if (Low != Set.begin())
516    Low--;
517  // Low is now the last element smaller than or equal to Indices. This means
518  // it points to a prefix of Indices (possibly Indices itself), if such
519  // prefix exists.
520  //
521  // This load is safe if any prefix of its operands is safe to load.
522  return Low != Set.end() && isPrefix(*Low, Indices);
523}
524
525/// Mark the given indices (ToMark) as safe in the given set of indices
526/// (Safe). Marking safe usually means adding ToMark to Safe. However, if there
527/// is already a prefix of Indices in Safe, Indices are implicitely marked safe
528/// already. Furthermore, any indices that Indices is itself a prefix of, are
529/// removed from Safe (since they are implicitely safe because of Indices now).
530static void markIndicesSafe(const IndicesVector &ToMark,
531                            std::set<IndicesVector> &Safe) {
532  std::set<IndicesVector>::iterator Low;
533  Low = Safe.upper_bound(ToMark);
534  // Guard against the case where Safe is empty
535  if (Low != Safe.begin())
536    Low--;
537  // Low is now the last element smaller than or equal to Indices. This
538  // means it points to a prefix of Indices (possibly Indices itself), if
539  // such prefix exists.
540  if (Low != Safe.end()) {
541    if (isPrefix(*Low, ToMark))
542      // If there is already a prefix of these indices (or exactly these
543      // indices) marked a safe, don't bother adding these indices
544      return;
545
546    // Increment Low, so we can use it as a "insert before" hint
547    ++Low;
548  }
549  // Insert
550  Low = Safe.insert(Low, ToMark);
551  ++Low;
552  // If there we're a prefix of longer index list(s), remove those
553  std::set<IndicesVector>::iterator End = Safe.end();
554  while (Low != End && isPrefix(ToMark, *Low)) {
555    std::set<IndicesVector>::iterator Remove = Low;
556    ++Low;
557    Safe.erase(Remove);
558  }
559}
560
561/// isSafeToPromoteArgument - As you might guess from the name of this method,
562/// it checks to see if it is both safe and useful to promote the argument.
563/// This method limits promotion of aggregates to only promote up to three
564/// elements of the aggregate in order to avoid exploding the number of
565/// arguments passed in.
566static bool isSafeToPromoteArgument(Argument *Arg, Type *ByValTy, AAResults &AAR,
567                                    unsigned MaxElements) {
568  using GEPIndicesSet = std::set<IndicesVector>;
569
570  // Quick exit for unused arguments
571  if (Arg->use_empty())
572    return true;
573
574  // We can only promote this argument if all of the uses are loads, or are GEP
575  // instructions (with constant indices) that are subsequently loaded.
576  //
577  // Promoting the argument causes it to be loaded in the caller
578  // unconditionally. This is only safe if we can prove that either the load
579  // would have happened in the callee anyway (ie, there is a load in the entry
580  // block) or the pointer passed in at every call site is guaranteed to be
581  // valid.
582  // In the former case, invalid loads can happen, but would have happened
583  // anyway, in the latter case, invalid loads won't happen. This prevents us
584  // from introducing an invalid load that wouldn't have happened in the
585  // original code.
586  //
587  // This set will contain all sets of indices that are loaded in the entry
588  // block, and thus are safe to unconditionally load in the caller.
589  GEPIndicesSet SafeToUnconditionallyLoad;
590
591  // This set contains all the sets of indices that we are planning to promote.
592  // This makes it possible to limit the number of arguments added.
593  GEPIndicesSet ToPromote;
594
595  // If the pointer is always valid, any load with first index 0 is valid.
596
597  if (ByValTy)
598    SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
599
600  // Whenever a new underlying type for the operand is found, make sure it's
601  // consistent with the GEPs and loads we've already seen and, if necessary,
602  // use it to see if all incoming pointers are valid (which implies the 0-index
603  // is safe).
604  Type *BaseTy = ByValTy;
605  auto UpdateBaseTy = [&](Type *NewBaseTy) {
606    if (BaseTy)
607      return BaseTy == NewBaseTy;
608
609    BaseTy = NewBaseTy;
610    if (allCallersPassValidPointerForArgument(Arg, BaseTy)) {
611      assert(SafeToUnconditionallyLoad.empty());
612      SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
613    }
614
615    return true;
616  };
617
618  // First, iterate the entry block and mark loads of (geps of) arguments as
619  // safe.
620  BasicBlock &EntryBlock = Arg->getParent()->front();
621  // Declare this here so we can reuse it
622  IndicesVector Indices;
623  for (Instruction &I : EntryBlock)
624    if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
625      Value *V = LI->getPointerOperand();
626      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
627        V = GEP->getPointerOperand();
628        if (V == Arg) {
629          // This load actually loads (part of) Arg? Check the indices then.
630          Indices.reserve(GEP->getNumIndices());
631          for (Use &Idx : GEP->indices())
632            if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
633              Indices.push_back(CI->getSExtValue());
634            else
635              // We found a non-constant GEP index for this argument? Bail out
636              // right away, can't promote this argument at all.
637              return false;
638
639          if (!UpdateBaseTy(GEP->getSourceElementType()))
640            return false;
641
642          // Indices checked out, mark them as safe
643          markIndicesSafe(Indices, SafeToUnconditionallyLoad);
644          Indices.clear();
645        }
646      } else if (V == Arg) {
647        // Direct loads are equivalent to a GEP with a single 0 index.
648        markIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad);
649
650        if (BaseTy && LI->getType() != BaseTy)
651          return false;
652
653        BaseTy = LI->getType();
654      }
655    }
656
657  // Now, iterate all uses of the argument to see if there are any uses that are
658  // not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
659  SmallVector<LoadInst *, 16> Loads;
660  IndicesVector Operands;
661  for (Use &U : Arg->uses()) {
662    User *UR = U.getUser();
663    Operands.clear();
664    if (LoadInst *LI = dyn_cast<LoadInst>(UR)) {
665      // Don't hack volatile/atomic loads
666      if (!LI->isSimple())
667        return false;
668      Loads.push_back(LI);
669      // Direct loads are equivalent to a GEP with a zero index and then a load.
670      Operands.push_back(0);
671
672      if (!UpdateBaseTy(LI->getType()))
673        return false;
674    } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) {
675      if (GEP->use_empty()) {
676        // Dead GEP's cause trouble later.  Just remove them if we run into
677        // them.
678        continue;
679      }
680
681      if (!UpdateBaseTy(GEP->getSourceElementType()))
682        return false;
683
684      // Ensure that all of the indices are constants.
685      for (Use &Idx : GEP->indices())
686        if (ConstantInt *C = dyn_cast<ConstantInt>(Idx))
687          Operands.push_back(C->getSExtValue());
688        else
689          return false; // Not a constant operand GEP!
690
691      // Ensure that the only users of the GEP are load instructions.
692      for (User *GEPU : GEP->users())
693        if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) {
694          // Don't hack volatile/atomic loads
695          if (!LI->isSimple())
696            return false;
697          Loads.push_back(LI);
698        } else {
699          // Other uses than load?
700          return false;
701        }
702    } else {
703      return false; // Not a load or a GEP.
704    }
705
706    // Now, see if it is safe to promote this load / loads of this GEP. Loading
707    // is safe if Operands, or a prefix of Operands, is marked as safe.
708    if (!prefixIn(Operands, SafeToUnconditionallyLoad))
709      return false;
710
711    // See if we are already promoting a load with these indices. If not, check
712    // to make sure that we aren't promoting too many elements.  If so, nothing
713    // to do.
714    if (ToPromote.find(Operands) == ToPromote.end()) {
715      if (MaxElements > 0 && ToPromote.size() == MaxElements) {
716        LLVM_DEBUG(dbgs() << "argpromotion not promoting argument '"
717                          << Arg->getName()
718                          << "' because it would require adding more "
719                          << "than " << MaxElements
720                          << " arguments to the function.\n");
721        // We limit aggregate promotion to only promoting up to a fixed number
722        // of elements of the aggregate.
723        return false;
724      }
725      ToPromote.insert(std::move(Operands));
726    }
727  }
728
729  if (Loads.empty())
730    return true; // No users, this is a dead argument.
731
732  // Okay, now we know that the argument is only used by load instructions and
733  // it is safe to unconditionally perform all of them. Use alias analysis to
734  // check to see if the pointer is guaranteed to not be modified from entry of
735  // the function to each of the load instructions.
736
737  // Because there could be several/many load instructions, remember which
738  // blocks we know to be transparent to the load.
739  df_iterator_default_set<BasicBlock *, 16> TranspBlocks;
740
741  for (LoadInst *Load : Loads) {
742    // Check to see if the load is invalidated from the start of the block to
743    // the load itself.
744    BasicBlock *BB = Load->getParent();
745
746    MemoryLocation Loc = MemoryLocation::get(Load);
747    if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod))
748      return false; // Pointer is invalidated!
749
750    // Now check every path from the entry block to the load for transparency.
751    // To do this, we perform a depth first search on the inverse CFG from the
752    // loading block.
753    for (BasicBlock *P : predecessors(BB)) {
754      for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks))
755        if (AAR.canBasicBlockModify(*TranspBB, Loc))
756          return false;
757    }
758  }
759
760  // If the path from the entry of the function to each load is free of
761  // instructions that potentially invalidate the load, we can make the
762  // transformation!
763  return true;
764}
765
766bool ArgumentPromotionPass::isDenselyPacked(Type *type, const DataLayout &DL) {
767  // There is no size information, so be conservative.
768  if (!type->isSized())
769    return false;
770
771  // If the alloc size is not equal to the storage size, then there are padding
772  // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
773  if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type))
774    return false;
775
776  // FIXME: This isn't the right way to check for padding in vectors with
777  // non-byte-size elements.
778  if (VectorType *seqTy = dyn_cast<VectorType>(type))
779    return isDenselyPacked(seqTy->getElementType(), DL);
780
781  // For array types, check for padding within members.
782  if (ArrayType *seqTy = dyn_cast<ArrayType>(type))
783    return isDenselyPacked(seqTy->getElementType(), DL);
784
785  if (!isa<StructType>(type))
786    return true;
787
788  // Check for padding within and between elements of a struct.
789  StructType *StructTy = cast<StructType>(type);
790  const StructLayout *Layout = DL.getStructLayout(StructTy);
791  uint64_t StartPos = 0;
792  for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) {
793    Type *ElTy = StructTy->getElementType(i);
794    if (!isDenselyPacked(ElTy, DL))
795      return false;
796    if (StartPos != Layout->getElementOffsetInBits(i))
797      return false;
798    StartPos += DL.getTypeAllocSizeInBits(ElTy);
799  }
800
801  return true;
802}
803
804/// Checks if the padding bytes of an argument could be accessed.
805static bool canPaddingBeAccessed(Argument *arg) {
806  assert(arg->hasByValAttr());
807
808  // Track all the pointers to the argument to make sure they are not captured.
809  SmallPtrSet<Value *, 16> PtrValues;
810  PtrValues.insert(arg);
811
812  // Track all of the stores.
813  SmallVector<StoreInst *, 16> Stores;
814
815  // Scan through the uses recursively to make sure the pointer is always used
816  // sanely.
817  SmallVector<Value *, 16> WorkList(arg->users());
818  while (!WorkList.empty()) {
819    Value *V = WorkList.pop_back_val();
820    if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
821      if (PtrValues.insert(V).second)
822        llvm::append_range(WorkList, V->users());
823    } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
824      Stores.push_back(Store);
825    } else if (!isa<LoadInst>(V)) {
826      return true;
827    }
828  }
829
830  // Check to make sure the pointers aren't captured
831  for (StoreInst *Store : Stores)
832    if (PtrValues.count(Store->getValueOperand()))
833      return true;
834
835  return false;
836}
837
838bool ArgumentPromotionPass::areFunctionArgsABICompatible(
839    const Function &F, const TargetTransformInfo &TTI,
840    SmallPtrSetImpl<Argument *> &ArgsToPromote,
841    SmallPtrSetImpl<Argument *> &ByValArgsToTransform) {
842  for (const Use &U : F.uses()) {
843    CallBase *CB = dyn_cast<CallBase>(U.getUser());
844    if (!CB)
845      return false;
846    const Function *Caller = CB->getCaller();
847    const Function *Callee = CB->getCalledFunction();
848    if (!TTI.areFunctionArgsABICompatible(Caller, Callee, ArgsToPromote) ||
849        !TTI.areFunctionArgsABICompatible(Caller, Callee, ByValArgsToTransform))
850      return false;
851  }
852  return true;
853}
854
855/// PromoteArguments - This method checks the specified function to see if there
856/// are any promotable arguments and if it is safe to promote the function (for
857/// example, all callers are direct).  If safe to promote some arguments, it
858/// calls the DoPromotion method.
859static Function *
860promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter,
861                 unsigned MaxElements,
862                 Optional<function_ref<void(CallBase &OldCS, CallBase &NewCS)>>
863                     ReplaceCallSite,
864                 const TargetTransformInfo &TTI) {
865  // Don't perform argument promotion for naked functions; otherwise we can end
866  // up removing parameters that are seemingly 'not used' as they are referred
867  // to in the assembly.
868  if(F->hasFnAttribute(Attribute::Naked))
869    return nullptr;
870
871  // Make sure that it is local to this module.
872  if (!F->hasLocalLinkage())
873    return nullptr;
874
875  // Don't promote arguments for variadic functions. Adding, removing, or
876  // changing non-pack parameters can change the classification of pack
877  // parameters. Frontends encode that classification at the call site in the
878  // IR, while in the callee the classification is determined dynamically based
879  // on the number of registers consumed so far.
880  if (F->isVarArg())
881    return nullptr;
882
883  // Don't transform functions that receive inallocas, as the transformation may
884  // not be safe depending on calling convention.
885  if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca))
886    return nullptr;
887
888  // First check: see if there are any pointer arguments!  If not, quick exit.
889  SmallVector<Argument *, 16> PointerArgs;
890  for (Argument &I : F->args())
891    if (I.getType()->isPointerTy())
892      PointerArgs.push_back(&I);
893  if (PointerArgs.empty())
894    return nullptr;
895
896  // Second check: make sure that all callers are direct callers.  We can't
897  // transform functions that have indirect callers.  Also see if the function
898  // is self-recursive and check that target features are compatible.
899  bool isSelfRecursive = false;
900  for (Use &U : F->uses()) {
901    CallBase *CB = dyn_cast<CallBase>(U.getUser());
902    // Must be a direct call.
903    if (CB == nullptr || !CB->isCallee(&U))
904      return nullptr;
905
906    // Can't change signature of musttail callee
907    if (CB->isMustTailCall())
908      return nullptr;
909
910    if (CB->getParent()->getParent() == F)
911      isSelfRecursive = true;
912  }
913
914  // Can't change signature of musttail caller
915  // FIXME: Support promoting whole chain of musttail functions
916  for (BasicBlock &BB : *F)
917    if (BB.getTerminatingMustTailCall())
918      return nullptr;
919
920  const DataLayout &DL = F->getParent()->getDataLayout();
921
922  AAResults &AAR = AARGetter(*F);
923
924  // Check to see which arguments are promotable.  If an argument is promotable,
925  // add it to ArgsToPromote.
926  SmallPtrSet<Argument *, 8> ArgsToPromote;
927  SmallPtrSet<Argument *, 8> ByValArgsToTransform;
928  for (Argument *PtrArg : PointerArgs) {
929    Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
930
931    // Replace sret attribute with noalias. This reduces register pressure by
932    // avoiding a register copy.
933    if (PtrArg->hasStructRetAttr()) {
934      unsigned ArgNo = PtrArg->getArgNo();
935      F->removeParamAttr(ArgNo, Attribute::StructRet);
936      F->addParamAttr(ArgNo, Attribute::NoAlias);
937      for (Use &U : F->uses()) {
938        CallBase &CB = cast<CallBase>(*U.getUser());
939        CB.removeParamAttr(ArgNo, Attribute::StructRet);
940        CB.addParamAttr(ArgNo, Attribute::NoAlias);
941      }
942    }
943
944    // If this is a byval argument, and if the aggregate type is small, just
945    // pass the elements, which is always safe, if the passed value is densely
946    // packed or if we can prove the padding bytes are never accessed.
947    //
948    // Only handle arguments with specified alignment; if it's unspecified, the
949    // actual alignment of the argument is target-specific.
950    bool isSafeToPromote = PtrArg->hasByValAttr() && PtrArg->getParamAlign() &&
951                           (ArgumentPromotionPass::isDenselyPacked(AgTy, DL) ||
952                            !canPaddingBeAccessed(PtrArg));
953    if (isSafeToPromote) {
954      if (StructType *STy = dyn_cast<StructType>(AgTy)) {
955        if (MaxElements > 0 && STy->getNumElements() > MaxElements) {
956          LLVM_DEBUG(dbgs() << "argpromotion disable promoting argument '"
957                            << PtrArg->getName()
958                            << "' because it would require adding more"
959                            << " than " << MaxElements
960                            << " arguments to the function.\n");
961          continue;
962        }
963
964        // If all the elements are single-value types, we can promote it.
965        bool AllSimple = true;
966        for (const auto *EltTy : STy->elements()) {
967          if (!EltTy->isSingleValueType()) {
968            AllSimple = false;
969            break;
970          }
971        }
972
973        // Safe to transform, don't even bother trying to "promote" it.
974        // Passing the elements as a scalar will allow sroa to hack on
975        // the new alloca we introduce.
976        if (AllSimple) {
977          ByValArgsToTransform.insert(PtrArg);
978          continue;
979        }
980      }
981    }
982
983    // If the argument is a recursive type and we're in a recursive
984    // function, we could end up infinitely peeling the function argument.
985    if (isSelfRecursive) {
986      if (StructType *STy = dyn_cast<StructType>(AgTy)) {
987        bool RecursiveType =
988            llvm::is_contained(STy->elements(), PtrArg->getType());
989        if (RecursiveType)
990          continue;
991      }
992    }
993
994    // Otherwise, see if we can promote the pointer to its value.
995    Type *ByValTy =
996        PtrArg->hasByValAttr() ? PtrArg->getParamByValType() : nullptr;
997    if (isSafeToPromoteArgument(PtrArg, ByValTy, AAR, MaxElements))
998      ArgsToPromote.insert(PtrArg);
999  }
1000
1001  // No promotable pointer arguments.
1002  if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
1003    return nullptr;
1004
1005  if (!ArgumentPromotionPass::areFunctionArgsABICompatible(
1006          *F, TTI, ArgsToPromote, ByValArgsToTransform))
1007    return nullptr;
1008
1009  return doPromotion(F, ArgsToPromote, ByValArgsToTransform, ReplaceCallSite);
1010}
1011
1012PreservedAnalyses ArgumentPromotionPass::run(LazyCallGraph::SCC &C,
1013                                             CGSCCAnalysisManager &AM,
1014                                             LazyCallGraph &CG,
1015                                             CGSCCUpdateResult &UR) {
1016  bool Changed = false, LocalChange;
1017
1018  // Iterate until we stop promoting from this SCC.
1019  do {
1020    LocalChange = false;
1021
1022    for (LazyCallGraph::Node &N : C) {
1023      Function &OldF = N.getFunction();
1024
1025      FunctionAnalysisManager &FAM =
1026          AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1027      // FIXME: This lambda must only be used with this function. We should
1028      // skip the lambda and just get the AA results directly.
1029      auto AARGetter = [&](Function &F) -> AAResults & {
1030        assert(&F == &OldF && "Called with an unexpected function!");
1031        return FAM.getResult<AAManager>(F);
1032      };
1033
1034      const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(OldF);
1035      Function *NewF =
1036          promoteArguments(&OldF, AARGetter, MaxElements, None, TTI);
1037      if (!NewF)
1038        continue;
1039      LocalChange = true;
1040
1041      // Directly substitute the functions in the call graph. Note that this
1042      // requires the old function to be completely dead and completely
1043      // replaced by the new function. It does no call graph updates, it merely
1044      // swaps out the particular function mapped to a particular node in the
1045      // graph.
1046      C.getOuterRefSCC().replaceNodeFunction(N, *NewF);
1047      FAM.clear(OldF, OldF.getName());
1048      OldF.eraseFromParent();
1049    }
1050
1051    Changed |= LocalChange;
1052  } while (LocalChange);
1053
1054  if (!Changed)
1055    return PreservedAnalyses::all();
1056
1057  return PreservedAnalyses::none();
1058}
1059
1060namespace {
1061
1062/// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
1063struct ArgPromotion : public CallGraphSCCPass {
1064  // Pass identification, replacement for typeid
1065  static char ID;
1066
1067  explicit ArgPromotion(unsigned MaxElements = 3)
1068      : CallGraphSCCPass(ID), MaxElements(MaxElements) {
1069    initializeArgPromotionPass(*PassRegistry::getPassRegistry());
1070  }
1071
1072  void getAnalysisUsage(AnalysisUsage &AU) const override {
1073    AU.addRequired<AssumptionCacheTracker>();
1074    AU.addRequired<TargetLibraryInfoWrapperPass>();
1075    AU.addRequired<TargetTransformInfoWrapperPass>();
1076    getAAResultsAnalysisUsage(AU);
1077    CallGraphSCCPass::getAnalysisUsage(AU);
1078  }
1079
1080  bool runOnSCC(CallGraphSCC &SCC) override;
1081
1082private:
1083  using llvm::Pass::doInitialization;
1084
1085  bool doInitialization(CallGraph &CG) override;
1086
1087  /// The maximum number of elements to expand, or 0 for unlimited.
1088  unsigned MaxElements;
1089};
1090
1091} // end anonymous namespace
1092
1093char ArgPromotion::ID = 0;
1094
1095INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
1096                      "Promote 'by reference' arguments to scalars", false,
1097                      false)
1098INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1099INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1100INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1101INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1102INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
1103                    "Promote 'by reference' arguments to scalars", false, false)
1104
1105Pass *llvm::createArgumentPromotionPass(unsigned MaxElements) {
1106  return new ArgPromotion(MaxElements);
1107}
1108
1109bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
1110  if (skipSCC(SCC))
1111    return false;
1112
1113  // Get the callgraph information that we need to update to reflect our
1114  // changes.
1115  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
1116
1117  LegacyAARGetter AARGetter(*this);
1118
1119  bool Changed = false, LocalChange;
1120
1121  // Iterate until we stop promoting from this SCC.
1122  do {
1123    LocalChange = false;
1124    // Attempt to promote arguments from all functions in this SCC.
1125    for (CallGraphNode *OldNode : SCC) {
1126      Function *OldF = OldNode->getFunction();
1127      if (!OldF)
1128        continue;
1129
1130      auto ReplaceCallSite = [&](CallBase &OldCS, CallBase &NewCS) {
1131        Function *Caller = OldCS.getParent()->getParent();
1132        CallGraphNode *NewCalleeNode =
1133            CG.getOrInsertFunction(NewCS.getCalledFunction());
1134        CallGraphNode *CallerNode = CG[Caller];
1135        CallerNode->replaceCallEdge(cast<CallBase>(OldCS),
1136                                    cast<CallBase>(NewCS), NewCalleeNode);
1137      };
1138
1139      const TargetTransformInfo &TTI =
1140          getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*OldF);
1141      if (Function *NewF = promoteArguments(OldF, AARGetter, MaxElements,
1142                                            {ReplaceCallSite}, TTI)) {
1143        LocalChange = true;
1144
1145        // Update the call graph for the newly promoted function.
1146        CallGraphNode *NewNode = CG.getOrInsertFunction(NewF);
1147        NewNode->stealCalledFunctionsFrom(OldNode);
1148        if (OldNode->getNumReferences() == 0)
1149          delete CG.removeFunctionFromModule(OldNode);
1150        else
1151          OldF->setLinkage(Function::ExternalLinkage);
1152
1153        // And updat ethe SCC we're iterating as well.
1154        SCC.ReplaceNode(OldNode, NewNode);
1155      }
1156    }
1157    // Remember that we changed something.
1158    Changed |= LocalChange;
1159  } while (LocalChange);
1160
1161  return Changed;
1162}
1163
1164bool ArgPromotion::doInitialization(CallGraph &CG) {
1165  return CallGraphSCCPass::doInitialization(CG);
1166}
1167