InlineFunction.cpp revision 208954
1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements inlining of a function into a call site, resolving
11// parameters and the return value as appropriate.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/Cloning.h"
16#include "llvm/Constants.h"
17#include "llvm/DerivedTypes.h"
18#include "llvm/Module.h"
19#include "llvm/Instructions.h"
20#include "llvm/IntrinsicInst.h"
21#include "llvm/Intrinsics.h"
22#include "llvm/Attributes.h"
23#include "llvm/Analysis/CallGraph.h"
24#include "llvm/Analysis/DebugInfo.h"
25#include "llvm/Target/TargetData.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/Support/CallSite.h"
29using namespace llvm;
30
31bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
32  return InlineFunction(CallSite(CI), IFI);
33}
34bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
35  return InlineFunction(CallSite(II), IFI);
36}
37
38
39/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
40/// an invoke, we have to turn all of the calls that can throw into
41/// invokes.  This function analyze BB to see if there are any calls, and if so,
42/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
43/// nodes in that block with the values specified in InvokeDestPHIValues.
44///
45static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
46                                                   BasicBlock *InvokeDest,
47                           const SmallVectorImpl<Value*> &InvokeDestPHIValues) {
48  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
49    Instruction *I = BBI++;
50
51    // We only need to check for function calls: inlined invoke
52    // instructions require no special handling.
53    CallInst *CI = dyn_cast<CallInst>(I);
54    if (CI == 0) continue;
55
56    // If this call cannot unwind, don't convert it to an invoke.
57    if (CI->doesNotThrow())
58      continue;
59
60    // Convert this function call into an invoke instruction.
61    // First, split the basic block.
62    BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
63
64    // Next, create the new invoke instruction, inserting it at the end
65    // of the old basic block.
66    SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
67    InvokeInst *II =
68      InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
69                         InvokeArgs.begin(), InvokeArgs.end(),
70                         CI->getName(), BB->getTerminator());
71    II->setCallingConv(CI->getCallingConv());
72    II->setAttributes(CI->getAttributes());
73
74    // Make sure that anything using the call now uses the invoke!  This also
75    // updates the CallGraph if present, because it uses a WeakVH.
76    CI->replaceAllUsesWith(II);
77
78    // Delete the unconditional branch inserted by splitBasicBlock
79    BB->getInstList().pop_back();
80    Split->getInstList().pop_front();  // Delete the original call
81
82    // Update any PHI nodes in the exceptional block to indicate that
83    // there is now a new entry in them.
84    unsigned i = 0;
85    for (BasicBlock::iterator I = InvokeDest->begin();
86         isa<PHINode>(I); ++I, ++i)
87      cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB);
88
89    // This basic block is now complete, the caller will continue scanning the
90    // next one.
91    return;
92  }
93}
94
95
96/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
97/// in the body of the inlined function into invokes and turn unwind
98/// instructions into branches to the invoke unwind dest.
99///
100/// II is the invoke instruction being inlined.  FirstNewBlock is the first
101/// block of the inlined code (the last block is the end of the function),
102/// and InlineCodeInfo is information about the code that got inlined.
103static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
104                                ClonedCodeInfo &InlinedCodeInfo) {
105  BasicBlock *InvokeDest = II->getUnwindDest();
106  SmallVector<Value*, 8> InvokeDestPHIValues;
107
108  // If there are PHI nodes in the unwind destination block, we need to
109  // keep track of which values came into them from this invoke, then remove
110  // the entry for this block.
111  BasicBlock *InvokeBlock = II->getParent();
112  for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
113    PHINode *PN = cast<PHINode>(I);
114    // Save the value to use for this edge.
115    InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
116  }
117
118  Function *Caller = FirstNewBlock->getParent();
119
120  // The inlined code is currently at the end of the function, scan from the
121  // start of the inlined code to its end, checking for stuff we need to
122  // rewrite.  If the code doesn't have calls or unwinds, we know there is
123  // nothing to rewrite.
124  if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
125    // Now that everything is happy, we have one final detail.  The PHI nodes in
126    // the exception destination block still have entries due to the original
127    // invoke instruction.  Eliminate these entries (which might even delete the
128    // PHI node) now.
129    InvokeDest->removePredecessor(II->getParent());
130    return;
131  }
132
133  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
134    if (InlinedCodeInfo.ContainsCalls)
135      HandleCallsInBlockInlinedThroughInvoke(BB, InvokeDest,
136                                             InvokeDestPHIValues);
137
138    if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
139      // An UnwindInst requires special handling when it gets inlined into an
140      // invoke site.  Once this happens, we know that the unwind would cause
141      // a control transfer to the invoke exception destination, so we can
142      // transform it into a direct branch to the exception destination.
143      BranchInst::Create(InvokeDest, UI);
144
145      // Delete the unwind instruction!
146      UI->eraseFromParent();
147
148      // Update any PHI nodes in the exceptional block to indicate that
149      // there is now a new entry in them.
150      unsigned i = 0;
151      for (BasicBlock::iterator I = InvokeDest->begin();
152           isa<PHINode>(I); ++I, ++i) {
153        PHINode *PN = cast<PHINode>(I);
154        PN->addIncoming(InvokeDestPHIValues[i], BB);
155      }
156    }
157  }
158
159  // Now that everything is happy, we have one final detail.  The PHI nodes in
160  // the exception destination block still have entries due to the original
161  // invoke instruction.  Eliminate these entries (which might even delete the
162  // PHI node) now.
163  InvokeDest->removePredecessor(II->getParent());
164}
165
166/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
167/// into the caller, update the specified callgraph to reflect the changes we
168/// made.  Note that it's possible that not all code was copied over, so only
169/// some edges of the callgraph may remain.
170static void UpdateCallGraphAfterInlining(CallSite CS,
171                                         Function::iterator FirstNewBlock,
172                                       DenseMap<const Value*, Value*> &ValueMap,
173                                         InlineFunctionInfo &IFI) {
174  CallGraph &CG = *IFI.CG;
175  const Function *Caller = CS.getInstruction()->getParent()->getParent();
176  const Function *Callee = CS.getCalledFunction();
177  CallGraphNode *CalleeNode = CG[Callee];
178  CallGraphNode *CallerNode = CG[Caller];
179
180  // Since we inlined some uninlined call sites in the callee into the caller,
181  // add edges from the caller to all of the callees of the callee.
182  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
183
184  // Consider the case where CalleeNode == CallerNode.
185  CallGraphNode::CalledFunctionsVector CallCache;
186  if (CalleeNode == CallerNode) {
187    CallCache.assign(I, E);
188    I = CallCache.begin();
189    E = CallCache.end();
190  }
191
192  for (; I != E; ++I) {
193    const Value *OrigCall = I->first;
194
195    DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
196    // Only copy the edge if the call was inlined!
197    if (VMI == ValueMap.end() || VMI->second == 0)
198      continue;
199
200    // If the call was inlined, but then constant folded, there is no edge to
201    // add.  Check for this case.
202    Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
203    if (NewCall == 0) continue;
204
205    // Remember that this call site got inlined for the client of
206    // InlineFunction.
207    IFI.InlinedCalls.push_back(NewCall);
208
209    // It's possible that inlining the callsite will cause it to go from an
210    // indirect to a direct call by resolving a function pointer.  If this
211    // happens, set the callee of the new call site to a more precise
212    // destination.  This can also happen if the call graph node of the caller
213    // was just unnecessarily imprecise.
214    if (I->second->getFunction() == 0)
215      if (Function *F = CallSite(NewCall).getCalledFunction()) {
216        // Indirect call site resolved to direct call.
217        CallerNode->addCalledFunction(CallSite::get(NewCall), CG[F]);
218
219        continue;
220      }
221
222    CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
223  }
224
225  // Update the call graph by deleting the edge from Callee to Caller.  We must
226  // do this after the loop above in case Caller and Callee are the same.
227  CallerNode->removeCallEdgeFor(CS);
228}
229
230// InlineFunction - This function inlines the called function into the basic
231// block of the caller.  This returns false if it is not possible to inline this
232// call.  The program is still in a well defined state if this occurs though.
233//
234// Note that this only does one level of inlining.  For example, if the
235// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
236// exists in the instruction stream.  Similiarly this will inline a recursive
237// function by one level.
238//
239bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
240  Instruction *TheCall = CS.getInstruction();
241  LLVMContext &Context = TheCall->getContext();
242  assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
243         "Instruction not in function!");
244
245  // If IFI has any state in it, zap it before we fill it in.
246  IFI.reset();
247
248  const Function *CalledFunc = CS.getCalledFunction();
249  if (CalledFunc == 0 ||          // Can't inline external function or indirect
250      CalledFunc->isDeclaration() || // call, or call to a vararg function!
251      CalledFunc->getFunctionType()->isVarArg()) return false;
252
253
254  // If the call to the callee is not a tail call, we must clear the 'tail'
255  // flags on any calls that we inline.
256  bool MustClearTailCallFlags =
257    !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
258
259  // If the call to the callee cannot throw, set the 'nounwind' flag on any
260  // calls that we inline.
261  bool MarkNoUnwind = CS.doesNotThrow();
262
263  BasicBlock *OrigBB = TheCall->getParent();
264  Function *Caller = OrigBB->getParent();
265
266  // GC poses two hazards to inlining, which only occur when the callee has GC:
267  //  1. If the caller has no GC, then the callee's GC must be propagated to the
268  //     caller.
269  //  2. If the caller has a differing GC, it is invalid to inline.
270  if (CalledFunc->hasGC()) {
271    if (!Caller->hasGC())
272      Caller->setGC(CalledFunc->getGC());
273    else if (CalledFunc->getGC() != Caller->getGC())
274      return false;
275  }
276
277  // Get an iterator to the last basic block in the function, which will have
278  // the new function inlined after it.
279  //
280  Function::iterator LastBlock = &Caller->back();
281
282  // Make sure to capture all of the return instructions from the cloned
283  // function.
284  SmallVector<ReturnInst*, 8> Returns;
285  ClonedCodeInfo InlinedFunctionInfo;
286  Function::iterator FirstNewBlock;
287
288  { // Scope to destroy ValueMap after cloning.
289    DenseMap<const Value*, Value*> ValueMap;
290
291    assert(CalledFunc->arg_size() == CS.arg_size() &&
292           "No varargs calls can be inlined!");
293
294    // Calculate the vector of arguments to pass into the function cloner, which
295    // matches up the formal to the actual argument values.
296    CallSite::arg_iterator AI = CS.arg_begin();
297    unsigned ArgNo = 0;
298    for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
299         E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
300      Value *ActualArg = *AI;
301
302      // When byval arguments actually inlined, we need to make the copy implied
303      // by them explicit.  However, we don't do this if the callee is readonly
304      // or readnone, because the copy would be unneeded: the callee doesn't
305      // modify the struct.
306      if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) &&
307          !CalledFunc->onlyReadsMemory()) {
308        const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
309        const Type *VoidPtrTy =
310            Type::getInt8PtrTy(Context);
311
312        // Create the alloca.  If we have TargetData, use nice alignment.
313        unsigned Align = 1;
314        if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy);
315        Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
316                                          I->getName(),
317                                          &*Caller->begin()->begin());
318        // Emit a memcpy.
319        const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
320        Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
321                                                       Intrinsic::memcpy,
322                                                       Tys, 3);
323        Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
324        Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
325
326        Value *Size;
327        if (IFI.TD == 0)
328          Size = ConstantExpr::getSizeOf(AggTy);
329        else
330          Size = ConstantInt::get(Type::getInt64Ty(Context),
331                                  IFI.TD->getTypeStoreSize(AggTy));
332
333        // Always generate a memcpy of alignment 1 here because we don't know
334        // the alignment of the src pointer.  Other optimizations can infer
335        // better alignment.
336        Value *CallArgs[] = {
337          DestCast, SrcCast, Size,
338          ConstantInt::get(Type::getInt32Ty(Context), 1),
339          ConstantInt::get(Type::getInt1Ty(Context), 0)
340        };
341        CallInst *TheMemCpy =
342          CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
343
344        // If we have a call graph, update it.
345        if (CallGraph *CG = IFI.CG) {
346          CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
347          CallGraphNode *CallerNode = (*CG)[Caller];
348          CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
349        }
350
351        // Uses of the argument in the function should use our new alloca
352        // instead.
353        ActualArg = NewAlloca;
354      }
355
356      ValueMap[I] = ActualArg;
357    }
358
359    // We want the inliner to prune the code as it copies.  We would LOVE to
360    // have no dead or constant instructions leftover after inlining occurs
361    // (which can happen, e.g., because an argument was constant), but we'll be
362    // happy with whatever the cloner can do.
363    CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
364                              &InlinedFunctionInfo, IFI.TD, TheCall);
365
366    // Remember the first block that is newly cloned over.
367    FirstNewBlock = LastBlock; ++FirstNewBlock;
368
369    // Update the callgraph if requested.
370    if (IFI.CG)
371      UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, IFI);
372  }
373
374  // If there are any alloca instructions in the block that used to be the entry
375  // block for the callee, move them to the entry block of the caller.  First
376  // calculate which instruction they should be inserted before.  We insert the
377  // instructions at the end of the current alloca list.
378  //
379  {
380    BasicBlock::iterator InsertPoint = Caller->begin()->begin();
381    for (BasicBlock::iterator I = FirstNewBlock->begin(),
382         E = FirstNewBlock->end(); I != E; ) {
383      AllocaInst *AI = dyn_cast<AllocaInst>(I++);
384      if (AI == 0) continue;
385
386      // If the alloca is now dead, remove it.  This often occurs due to code
387      // specialization.
388      if (AI->use_empty()) {
389        AI->eraseFromParent();
390        continue;
391      }
392
393      if (!isa<Constant>(AI->getArraySize()))
394        continue;
395
396      // Keep track of the static allocas that we inline into the caller if the
397      // StaticAllocas pointer is non-null.
398      IFI.StaticAllocas.push_back(AI);
399
400      // Scan for the block of allocas that we can move over, and move them
401      // all at once.
402      while (isa<AllocaInst>(I) &&
403             isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
404        IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
405        ++I;
406      }
407
408      // Transfer all of the allocas over in a block.  Using splice means
409      // that the instructions aren't removed from the symbol table, then
410      // reinserted.
411      Caller->getEntryBlock().getInstList().splice(InsertPoint,
412                                                   FirstNewBlock->getInstList(),
413                                                   AI, I);
414    }
415  }
416
417  // If the inlined code contained dynamic alloca instructions, wrap the inlined
418  // code with llvm.stacksave/llvm.stackrestore intrinsics.
419  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
420    Module *M = Caller->getParent();
421    // Get the two intrinsics we care about.
422    Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
423    Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
424
425    // If we are preserving the callgraph, add edges to the stacksave/restore
426    // functions for the calls we insert.
427    CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
428    if (CallGraph *CG = IFI.CG) {
429      StackSaveCGN    = CG->getOrInsertFunction(StackSave);
430      StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
431      CallerNode = (*CG)[Caller];
432    }
433
434    // Insert the llvm.stacksave.
435    CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
436                                          FirstNewBlock->begin());
437    if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
438
439    // Insert a call to llvm.stackrestore before any return instructions in the
440    // inlined function.
441    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
442      CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
443      if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
444    }
445
446    // Count the number of StackRestore calls we insert.
447    unsigned NumStackRestores = Returns.size();
448
449    // If we are inlining an invoke instruction, insert restores before each
450    // unwind.  These unwinds will be rewritten into branches later.
451    if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
452      for (Function::iterator BB = FirstNewBlock, E = Caller->end();
453           BB != E; ++BB)
454        if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
455          CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
456          if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
457          ++NumStackRestores;
458        }
459    }
460  }
461
462  // If we are inlining tail call instruction through a call site that isn't
463  // marked 'tail', we must remove the tail marker for any calls in the inlined
464  // code.  Also, calls inlined through a 'nounwind' call site should be marked
465  // 'nounwind'.
466  if (InlinedFunctionInfo.ContainsCalls &&
467      (MustClearTailCallFlags || MarkNoUnwind)) {
468    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
469         BB != E; ++BB)
470      for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
471        if (CallInst *CI = dyn_cast<CallInst>(I)) {
472          if (MustClearTailCallFlags)
473            CI->setTailCall(false);
474          if (MarkNoUnwind)
475            CI->setDoesNotThrow();
476        }
477  }
478
479  // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
480  // instructions are unreachable.
481  if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
482    for (Function::iterator BB = FirstNewBlock, E = Caller->end();
483         BB != E; ++BB) {
484      TerminatorInst *Term = BB->getTerminator();
485      if (isa<UnwindInst>(Term)) {
486        new UnreachableInst(Context, Term);
487        BB->getInstList().erase(Term);
488      }
489    }
490
491  // If we are inlining for an invoke instruction, we must make sure to rewrite
492  // any inlined 'unwind' instructions into branches to the invoke exception
493  // destination, and call instructions into invoke instructions.
494  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
495    HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
496
497  // If we cloned in _exactly one_ basic block, and if that block ends in a
498  // return instruction, we splice the body of the inlined callee directly into
499  // the calling basic block.
500  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
501    // Move all of the instructions right before the call.
502    OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
503                                 FirstNewBlock->begin(), FirstNewBlock->end());
504    // Remove the cloned basic block.
505    Caller->getBasicBlockList().pop_back();
506
507    // If the call site was an invoke instruction, add a branch to the normal
508    // destination.
509    if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
510      BranchInst::Create(II->getNormalDest(), TheCall);
511
512    // If the return instruction returned a value, replace uses of the call with
513    // uses of the returned value.
514    if (!TheCall->use_empty()) {
515      ReturnInst *R = Returns[0];
516      if (TheCall == R->getReturnValue())
517        TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
518      else
519        TheCall->replaceAllUsesWith(R->getReturnValue());
520    }
521    // Since we are now done with the Call/Invoke, we can delete it.
522    TheCall->eraseFromParent();
523
524    // Since we are now done with the return instruction, delete it also.
525    Returns[0]->eraseFromParent();
526
527    // We are now done with the inlining.
528    return true;
529  }
530
531  // Otherwise, we have the normal case, of more than one block to inline or
532  // multiple return sites.
533
534  // We want to clone the entire callee function into the hole between the
535  // "starter" and "ender" blocks.  How we accomplish this depends on whether
536  // this is an invoke instruction or a call instruction.
537  BasicBlock *AfterCallBB;
538  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
539
540    // Add an unconditional branch to make this look like the CallInst case...
541    BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
542
543    // Split the basic block.  This guarantees that no PHI nodes will have to be
544    // updated due to new incoming edges, and make the invoke case more
545    // symmetric to the call case.
546    AfterCallBB = OrigBB->splitBasicBlock(NewBr,
547                                          CalledFunc->getName()+".exit");
548
549  } else {  // It's a call
550    // If this is a call instruction, we need to split the basic block that
551    // the call lives in.
552    //
553    AfterCallBB = OrigBB->splitBasicBlock(TheCall,
554                                          CalledFunc->getName()+".exit");
555  }
556
557  // Change the branch that used to go to AfterCallBB to branch to the first
558  // basic block of the inlined function.
559  //
560  TerminatorInst *Br = OrigBB->getTerminator();
561  assert(Br && Br->getOpcode() == Instruction::Br &&
562         "splitBasicBlock broken!");
563  Br->setOperand(0, FirstNewBlock);
564
565
566  // Now that the function is correct, make it a little bit nicer.  In
567  // particular, move the basic blocks inserted from the end of the function
568  // into the space made by splitting the source basic block.
569  Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
570                                     FirstNewBlock, Caller->end());
571
572  // Handle all of the return instructions that we just cloned in, and eliminate
573  // any users of the original call/invoke instruction.
574  const Type *RTy = CalledFunc->getReturnType();
575
576  if (Returns.size() > 1) {
577    // The PHI node should go at the front of the new basic block to merge all
578    // possible incoming values.
579    PHINode *PHI = 0;
580    if (!TheCall->use_empty()) {
581      PHI = PHINode::Create(RTy, TheCall->getName(),
582                            AfterCallBB->begin());
583      // Anything that used the result of the function call should now use the
584      // PHI node as their operand.
585      TheCall->replaceAllUsesWith(PHI);
586    }
587
588    // Loop over all of the return instructions adding entries to the PHI node
589    // as appropriate.
590    if (PHI) {
591      for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
592        ReturnInst *RI = Returns[i];
593        assert(RI->getReturnValue()->getType() == PHI->getType() &&
594               "Ret value not consistent in function!");
595        PHI->addIncoming(RI->getReturnValue(), RI->getParent());
596      }
597
598      // Now that we inserted the PHI, check to see if it has a single value
599      // (e.g. all the entries are the same or undef).  If so, remove the PHI so
600      // it doesn't block other optimizations.
601      if (Value *V = PHI->hasConstantValue()) {
602        PHI->replaceAllUsesWith(V);
603        PHI->eraseFromParent();
604      }
605    }
606
607
608    // Add a branch to the merge points and remove return instructions.
609    for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
610      ReturnInst *RI = Returns[i];
611      BranchInst::Create(AfterCallBB, RI);
612      RI->eraseFromParent();
613    }
614  } else if (!Returns.empty()) {
615    // Otherwise, if there is exactly one return value, just replace anything
616    // using the return value of the call with the computed value.
617    if (!TheCall->use_empty()) {
618      if (TheCall == Returns[0]->getReturnValue())
619        TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
620      else
621        TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
622    }
623
624    // Splice the code from the return block into the block that it will return
625    // to, which contains the code that was after the call.
626    BasicBlock *ReturnBB = Returns[0]->getParent();
627    AfterCallBB->getInstList().splice(AfterCallBB->begin(),
628                                      ReturnBB->getInstList());
629
630    // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
631    ReturnBB->replaceAllUsesWith(AfterCallBB);
632
633    // Delete the return instruction now and empty ReturnBB now.
634    Returns[0]->eraseFromParent();
635    ReturnBB->eraseFromParent();
636  } else if (!TheCall->use_empty()) {
637    // No returns, but something is using the return value of the call.  Just
638    // nuke the result.
639    TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
640  }
641
642  // Since we are now done with the Call/Invoke, we can delete it.
643  TheCall->eraseFromParent();
644
645  // We should always be able to fold the entry block of the function into the
646  // single predecessor of the block...
647  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
648  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
649
650  // Splice the code entry block into calling block, right before the
651  // unconditional branch.
652  OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
653  CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
654
655  // Remove the unconditional branch.
656  OrigBB->getInstList().erase(Br);
657
658  // Now we can remove the CalleeEntry block, which is now empty.
659  Caller->getBasicBlockList().erase(CalleeEntry);
660
661  return true;
662}
663