CodeGenFunction.cpp revision 223017
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This coordinates the per-function state used while generating code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGException.h"
19#include "clang/Basic/TargetInfo.h"
20#include "clang/AST/APValue.h"
21#include "clang/AST/ASTContext.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/DeclCXX.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/Frontend/CodeGenOptions.h"
26#include "llvm/Target/TargetData.h"
27#include "llvm/Intrinsics.h"
28using namespace clang;
29using namespace CodeGen;
30
31CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
32  : CodeGenTypeCache(cgm), CGM(cgm),
33    Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
34    BlockInfo(0), BlockPointer(0),
35    NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
36    ExceptionSlot(0), EHSelectorSlot(0),
37    DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
38    IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
39    CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
40    OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
41    TrapBB(0) {
42
43  CatchUndefined = getContext().getLangOptions().CatchUndefined;
44  CGM.getCXXABI().getMangleContext().startNewFunction();
45}
46
47
48const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
49  return CGM.getTypes().ConvertTypeForMem(T);
50}
51
52const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
53  return CGM.getTypes().ConvertType(T);
54}
55
56bool CodeGenFunction::hasAggregateLLVMType(QualType type) {
57  switch (type.getCanonicalType()->getTypeClass()) {
58#define TYPE(name, parent)
59#define ABSTRACT_TYPE(name, parent)
60#define NON_CANONICAL_TYPE(name, parent) case Type::name:
61#define DEPENDENT_TYPE(name, parent) case Type::name:
62#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
63#include "clang/AST/TypeNodes.def"
64    llvm_unreachable("non-canonical or dependent type in IR-generation");
65
66  case Type::Builtin:
67  case Type::Pointer:
68  case Type::BlockPointer:
69  case Type::LValueReference:
70  case Type::RValueReference:
71  case Type::MemberPointer:
72  case Type::Vector:
73  case Type::ExtVector:
74  case Type::FunctionProto:
75  case Type::FunctionNoProto:
76  case Type::Enum:
77  case Type::ObjCObjectPointer:
78    return false;
79
80  // Complexes, arrays, records, and Objective-C objects.
81  case Type::Complex:
82  case Type::ConstantArray:
83  case Type::IncompleteArray:
84  case Type::VariableArray:
85  case Type::Record:
86  case Type::ObjCObject:
87  case Type::ObjCInterface:
88    return true;
89  }
90  llvm_unreachable("unknown type kind!");
91}
92
93void CodeGenFunction::EmitReturnBlock() {
94  // For cleanliness, we try to avoid emitting the return block for
95  // simple cases.
96  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
97
98  if (CurBB) {
99    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
100
101    // We have a valid insert point, reuse it if it is empty or there are no
102    // explicit jumps to the return block.
103    if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
104      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
105      delete ReturnBlock.getBlock();
106    } else
107      EmitBlock(ReturnBlock.getBlock());
108    return;
109  }
110
111  // Otherwise, if the return block is the target of a single direct
112  // branch then we can just put the code in that block instead. This
113  // cleans up functions which started with a unified return block.
114  if (ReturnBlock.getBlock()->hasOneUse()) {
115    llvm::BranchInst *BI =
116      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
117    if (BI && BI->isUnconditional() &&
118        BI->getSuccessor(0) == ReturnBlock.getBlock()) {
119      // Reset insertion point and delete the branch.
120      Builder.SetInsertPoint(BI->getParent());
121      BI->eraseFromParent();
122      delete ReturnBlock.getBlock();
123      return;
124    }
125  }
126
127  // FIXME: We are at an unreachable point, there is no reason to emit the block
128  // unless it has uses. However, we still need a place to put the debug
129  // region.end for now.
130
131  EmitBlock(ReturnBlock.getBlock());
132}
133
134static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
135  if (!BB) return;
136  if (!BB->use_empty())
137    return CGF.CurFn->getBasicBlockList().push_back(BB);
138  delete BB;
139}
140
141void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
142  assert(BreakContinueStack.empty() &&
143         "mismatched push/pop in break/continue stack!");
144
145  // Emit function epilog (to return).
146  EmitReturnBlock();
147
148  if (ShouldInstrumentFunction())
149    EmitFunctionInstrumentation("__cyg_profile_func_exit");
150
151  // Emit debug descriptor for function end.
152  if (CGDebugInfo *DI = getDebugInfo()) {
153    DI->setLocation(EndLoc);
154    DI->EmitFunctionEnd(Builder);
155  }
156
157  EmitFunctionEpilog(*CurFnInfo);
158  EmitEndEHSpec(CurCodeDecl);
159
160  assert(EHStack.empty() &&
161         "did not remove all scopes from cleanup stack!");
162
163  // If someone did an indirect goto, emit the indirect goto block at the end of
164  // the function.
165  if (IndirectBranch) {
166    EmitBlock(IndirectBranch->getParent());
167    Builder.ClearInsertionPoint();
168  }
169
170  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
171  llvm::Instruction *Ptr = AllocaInsertPt;
172  AllocaInsertPt = 0;
173  Ptr->eraseFromParent();
174
175  // If someone took the address of a label but never did an indirect goto, we
176  // made a zero entry PHI node, which is illegal, zap it now.
177  if (IndirectBranch) {
178    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
179    if (PN->getNumIncomingValues() == 0) {
180      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
181      PN->eraseFromParent();
182    }
183  }
184
185  EmitIfUsed(*this, RethrowBlock.getBlock());
186  EmitIfUsed(*this, TerminateLandingPad);
187  EmitIfUsed(*this, TerminateHandler);
188  EmitIfUsed(*this, UnreachableBlock);
189
190  if (CGM.getCodeGenOpts().EmitDeclMetadata)
191    EmitDeclMetadata();
192}
193
194/// ShouldInstrumentFunction - Return true if the current function should be
195/// instrumented with __cyg_profile_func_* calls
196bool CodeGenFunction::ShouldInstrumentFunction() {
197  if (!CGM.getCodeGenOpts().InstrumentFunctions)
198    return false;
199  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
200    return false;
201  return true;
202}
203
204/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
205/// instrumentation function with the current function and the call site, if
206/// function instrumentation is enabled.
207void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
208  // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
209  const llvm::PointerType *PointerTy = Int8PtrTy;
210  const llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
211  const llvm::FunctionType *FunctionTy =
212    llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
213                            ProfileFuncArgs, false);
214
215  llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
216  llvm::CallInst *CallSite = Builder.CreateCall(
217    CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
218    llvm::ConstantInt::get(Int32Ty, 0),
219    "callsite");
220
221  Builder.CreateCall2(F,
222                      llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
223                      CallSite);
224}
225
226void CodeGenFunction::EmitMCountInstrumentation() {
227  llvm::FunctionType *FTy =
228    llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false);
229
230  llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
231                                                       Target.getMCountName());
232  Builder.CreateCall(MCountFn);
233}
234
235void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
236                                    llvm::Function *Fn,
237                                    const CGFunctionInfo &FnInfo,
238                                    const FunctionArgList &Args,
239                                    SourceLocation StartLoc) {
240  const Decl *D = GD.getDecl();
241
242  DidCallStackSave = false;
243  CurCodeDecl = CurFuncDecl = D;
244  FnRetTy = RetTy;
245  CurFn = Fn;
246  CurFnInfo = &FnInfo;
247  assert(CurFn->isDeclaration() && "Function already has body?");
248
249  // Pass inline keyword to optimizer if it appears explicitly on any
250  // declaration.
251  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
252    for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
253           RE = FD->redecls_end(); RI != RE; ++RI)
254      if (RI->isInlineSpecified()) {
255        Fn->addFnAttr(llvm::Attribute::InlineHint);
256        break;
257      }
258
259  if (getContext().getLangOptions().OpenCL) {
260    // Add metadata for a kernel function.
261    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
262      if (FD->hasAttr<OpenCLKernelAttr>()) {
263        llvm::LLVMContext &Context = getLLVMContext();
264        llvm::NamedMDNode *OpenCLMetadata =
265          CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
266
267        llvm::Value *Op = Fn;
268        OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
269      }
270  }
271
272  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
273
274  // Create a marker to make it easy to insert allocas into the entryblock
275  // later.  Don't create this with the builder, because we don't want it
276  // folded.
277  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
278  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
279  if (Builder.isNamePreserving())
280    AllocaInsertPt->setName("allocapt");
281
282  ReturnBlock = getJumpDestInCurrentScope("return");
283
284  Builder.SetInsertPoint(EntryBB);
285
286  // Emit subprogram debug descriptor.
287  if (CGDebugInfo *DI = getDebugInfo()) {
288    // FIXME: what is going on here and why does it ignore all these
289    // interesting type properties?
290    QualType FnType =
291      getContext().getFunctionType(RetTy, 0, 0,
292                                   FunctionProtoType::ExtProtoInfo());
293
294    DI->setLocation(StartLoc);
295    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
296  }
297
298  if (ShouldInstrumentFunction())
299    EmitFunctionInstrumentation("__cyg_profile_func_enter");
300
301  if (CGM.getCodeGenOpts().InstrumentForProfiling)
302    EmitMCountInstrumentation();
303
304  if (RetTy->isVoidType()) {
305    // Void type; nothing to return.
306    ReturnValue = 0;
307  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
308             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
309    // Indirect aggregate return; emit returned value directly into sret slot.
310    // This reduces code size, and affects correctness in C++.
311    ReturnValue = CurFn->arg_begin();
312  } else {
313    ReturnValue = CreateIRTemp(RetTy, "retval");
314  }
315
316  EmitStartEHSpec(CurCodeDecl);
317  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
318
319  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
320    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
321
322  // If any of the arguments have a variably modified type, make sure to
323  // emit the type size.
324  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
325       i != e; ++i) {
326    QualType Ty = (*i)->getType();
327
328    if (Ty->isVariablyModifiedType())
329      EmitVLASize(Ty);
330  }
331}
332
333void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
334  const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
335  assert(FD->getBody());
336  EmitStmt(FD->getBody());
337}
338
339/// Tries to mark the given function nounwind based on the
340/// non-existence of any throwing calls within it.  We believe this is
341/// lightweight enough to do at -O0.
342static void TryMarkNoThrow(llvm::Function *F) {
343  // LLVM treats 'nounwind' on a function as part of the type, so we
344  // can't do this on functions that can be overwritten.
345  if (F->mayBeOverridden()) return;
346
347  for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
348    for (llvm::BasicBlock::iterator
349           BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
350      if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI))
351        if (!Call->doesNotThrow())
352          return;
353  F->setDoesNotThrow(true);
354}
355
356void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
357                                   const CGFunctionInfo &FnInfo) {
358  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
359
360  // Check if we should generate debug info for this function.
361  if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
362    DebugInfo = CGM.getModuleDebugInfo();
363
364  FunctionArgList Args;
365  QualType ResTy = FD->getResultType();
366
367  CurGD = GD;
368  if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
369    CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
370
371  if (FD->getNumParams())
372    for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
373      Args.push_back(FD->getParamDecl(i));
374
375  SourceRange BodyRange;
376  if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
377
378  // Emit the standard function prologue.
379  StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
380
381  // Generate the body of the function.
382  if (isa<CXXDestructorDecl>(FD))
383    EmitDestructorBody(Args);
384  else if (isa<CXXConstructorDecl>(FD))
385    EmitConstructorBody(Args);
386  else
387    EmitFunctionBody(Args);
388
389  // Emit the standard function epilogue.
390  FinishFunction(BodyRange.getEnd());
391
392  // If we haven't marked the function nothrow through other means, do
393  // a quick pass now to see if we can.
394  if (!CurFn->doesNotThrow())
395    TryMarkNoThrow(CurFn);
396}
397
398/// ContainsLabel - Return true if the statement contains a label in it.  If
399/// this statement is not executed normally, it not containing a label means
400/// that we can just remove the code.
401bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
402  // Null statement, not a label!
403  if (S == 0) return false;
404
405  // If this is a label, we have to emit the code, consider something like:
406  // if (0) {  ...  foo:  bar(); }  goto foo;
407  //
408  // TODO: If anyone cared, we could track __label__'s, since we know that you
409  // can't jump to one from outside their declared region.
410  if (isa<LabelStmt>(S))
411    return true;
412
413  // If this is a case/default statement, and we haven't seen a switch, we have
414  // to emit the code.
415  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
416    return true;
417
418  // If this is a switch statement, we want to ignore cases below it.
419  if (isa<SwitchStmt>(S))
420    IgnoreCaseStmts = true;
421
422  // Scan subexpressions for verboten labels.
423  for (Stmt::const_child_range I = S->children(); I; ++I)
424    if (ContainsLabel(*I, IgnoreCaseStmts))
425      return true;
426
427  return false;
428}
429
430/// containsBreak - Return true if the statement contains a break out of it.
431/// If the statement (recursively) contains a switch or loop with a break
432/// inside of it, this is fine.
433bool CodeGenFunction::containsBreak(const Stmt *S) {
434  // Null statement, not a label!
435  if (S == 0) return false;
436
437  // If this is a switch or loop that defines its own break scope, then we can
438  // include it and anything inside of it.
439  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
440      isa<ForStmt>(S))
441    return false;
442
443  if (isa<BreakStmt>(S))
444    return true;
445
446  // Scan subexpressions for verboten breaks.
447  for (Stmt::const_child_range I = S->children(); I; ++I)
448    if (containsBreak(*I))
449      return true;
450
451  return false;
452}
453
454
455/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
456/// to a constant, or if it does but contains a label, return false.  If it
457/// constant folds return true and set the boolean result in Result.
458bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
459                                                   bool &ResultBool) {
460  llvm::APInt ResultInt;
461  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
462    return false;
463
464  ResultBool = ResultInt.getBoolValue();
465  return true;
466}
467
468/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
469/// to a constant, or if it does but contains a label, return false.  If it
470/// constant folds return true and set the folded value.
471bool CodeGenFunction::
472ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
473  // FIXME: Rename and handle conversion of other evaluatable things
474  // to bool.
475  Expr::EvalResult Result;
476  if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
477      Result.HasSideEffects)
478    return false;  // Not foldable, not integer or not fully evaluatable.
479
480  if (CodeGenFunction::ContainsLabel(Cond))
481    return false;  // Contains a label.
482
483  ResultInt = Result.Val.getInt();
484  return true;
485}
486
487
488
489/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
490/// statement) to the specified blocks.  Based on the condition, this might try
491/// to simplify the codegen of the conditional based on the branch.
492///
493void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
494                                           llvm::BasicBlock *TrueBlock,
495                                           llvm::BasicBlock *FalseBlock) {
496  Cond = Cond->IgnoreParens();
497
498  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
499    // Handle X && Y in a condition.
500    if (CondBOp->getOpcode() == BO_LAnd) {
501      // If we have "1 && X", simplify the code.  "0 && X" would have constant
502      // folded if the case was simple enough.
503      bool ConstantBool = false;
504      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
505          ConstantBool) {
506        // br(1 && X) -> br(X).
507        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
508      }
509
510      // If we have "X && 1", simplify the code to use an uncond branch.
511      // "X && 0" would have been constant folded to 0.
512      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
513          ConstantBool) {
514        // br(X && 1) -> br(X).
515        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
516      }
517
518      // Emit the LHS as a conditional.  If the LHS conditional is false, we
519      // want to jump to the FalseBlock.
520      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
521
522      ConditionalEvaluation eval(*this);
523      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
524      EmitBlock(LHSTrue);
525
526      // Any temporaries created here are conditional.
527      eval.begin(*this);
528      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
529      eval.end(*this);
530
531      return;
532    }
533
534    if (CondBOp->getOpcode() == BO_LOr) {
535      // If we have "0 || X", simplify the code.  "1 || X" would have constant
536      // folded if the case was simple enough.
537      bool ConstantBool = false;
538      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
539          !ConstantBool) {
540        // br(0 || X) -> br(X).
541        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
542      }
543
544      // If we have "X || 0", simplify the code to use an uncond branch.
545      // "X || 1" would have been constant folded to 1.
546      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
547          !ConstantBool) {
548        // br(X || 0) -> br(X).
549        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
550      }
551
552      // Emit the LHS as a conditional.  If the LHS conditional is true, we
553      // want to jump to the TrueBlock.
554      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
555
556      ConditionalEvaluation eval(*this);
557      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
558      EmitBlock(LHSFalse);
559
560      // Any temporaries created here are conditional.
561      eval.begin(*this);
562      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
563      eval.end(*this);
564
565      return;
566    }
567  }
568
569  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
570    // br(!x, t, f) -> br(x, f, t)
571    if (CondUOp->getOpcode() == UO_LNot)
572      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
573  }
574
575  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
576    // Handle ?: operator.
577
578    // Just ignore GNU ?: extension.
579    if (CondOp->getLHS()) {
580      // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
581      llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
582      llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
583
584      ConditionalEvaluation cond(*this);
585      EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
586
587      cond.begin(*this);
588      EmitBlock(LHSBlock);
589      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
590      cond.end(*this);
591
592      cond.begin(*this);
593      EmitBlock(RHSBlock);
594      EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
595      cond.end(*this);
596
597      return;
598    }
599  }
600
601  // Emit the code with the fully general case.
602  llvm::Value *CondV = EvaluateExprAsBool(Cond);
603  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
604}
605
606/// ErrorUnsupported - Print out an error that codegen doesn't support the
607/// specified stmt yet.
608void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
609                                       bool OmitOnError) {
610  CGM.ErrorUnsupported(S, Type, OmitOnError);
611}
612
613/// emitNonZeroVLAInit - Emit the "zero" initialization of a
614/// variable-length array whose elements have a non-zero bit-pattern.
615///
616/// \param src - a char* pointing to the bit-pattern for a single
617/// base element of the array
618/// \param sizeInChars - the total size of the VLA, in chars
619/// \param align - the total alignment of the VLA
620static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
621                               llvm::Value *dest, llvm::Value *src,
622                               llvm::Value *sizeInChars) {
623  std::pair<CharUnits,CharUnits> baseSizeAndAlign
624    = CGF.getContext().getTypeInfoInChars(baseType);
625
626  CGBuilderTy &Builder = CGF.Builder;
627
628  llvm::Value *baseSizeInChars
629    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
630
631  const llvm::Type *i8p = Builder.getInt8PtrTy();
632
633  llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
634  llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
635
636  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
637  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
638  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
639
640  // Make a loop over the VLA.  C99 guarantees that the VLA element
641  // count must be nonzero.
642  CGF.EmitBlock(loopBB);
643
644  llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
645  cur->addIncoming(begin, originBB);
646
647  // memcpy the individual element bit-pattern.
648  Builder.CreateMemCpy(cur, src, baseSizeInChars,
649                       baseSizeAndAlign.second.getQuantity(),
650                       /*volatile*/ false);
651
652  // Go to the next element.
653  llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
654
655  // Leave if that's the end of the VLA.
656  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
657  Builder.CreateCondBr(done, contBB, loopBB);
658  cur->addIncoming(next, loopBB);
659
660  CGF.EmitBlock(contBB);
661}
662
663void
664CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
665  // Ignore empty classes in C++.
666  if (getContext().getLangOptions().CPlusPlus) {
667    if (const RecordType *RT = Ty->getAs<RecordType>()) {
668      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
669        return;
670    }
671  }
672
673  // Cast the dest ptr to the appropriate i8 pointer type.
674  unsigned DestAS =
675    cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
676  const llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
677  if (DestPtr->getType() != BP)
678    DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
679
680  // Get size and alignment info for this aggregate.
681  std::pair<CharUnits, CharUnits> TypeInfo =
682    getContext().getTypeInfoInChars(Ty);
683  CharUnits Size = TypeInfo.first;
684  CharUnits Align = TypeInfo.second;
685
686  llvm::Value *SizeVal;
687  const VariableArrayType *vla;
688
689  // Don't bother emitting a zero-byte memset.
690  if (Size.isZero()) {
691    // But note that getTypeInfo returns 0 for a VLA.
692    if (const VariableArrayType *vlaType =
693          dyn_cast_or_null<VariableArrayType>(
694                                          getContext().getAsArrayType(Ty))) {
695      SizeVal = GetVLASize(vlaType);
696      vla = vlaType;
697    } else {
698      return;
699    }
700  } else {
701    SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
702    vla = 0;
703  }
704
705  // If the type contains a pointer to data member we can't memset it to zero.
706  // Instead, create a null constant and copy it to the destination.
707  // TODO: there are other patterns besides zero that we can usefully memset,
708  // like -1, which happens to be the pattern used by member-pointers.
709  if (!CGM.getTypes().isZeroInitializable(Ty)) {
710    // For a VLA, emit a single element, then splat that over the VLA.
711    if (vla) Ty = getContext().getBaseElementType(vla);
712
713    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
714
715    llvm::GlobalVariable *NullVariable =
716      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
717                               /*isConstant=*/true,
718                               llvm::GlobalVariable::PrivateLinkage,
719                               NullConstant, llvm::Twine());
720    llvm::Value *SrcPtr =
721      Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
722
723    if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
724
725    // Get and call the appropriate llvm.memcpy overload.
726    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
727    return;
728  }
729
730  // Otherwise, just memset the whole thing to zero.  This is legal
731  // because in LLVM, all default initializers (other than the ones we just
732  // handled above) are guaranteed to have a bit pattern of all zeros.
733  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
734                       Align.getQuantity(), false);
735}
736
737llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
738  // Make sure that there is a block for the indirect goto.
739  if (IndirectBranch == 0)
740    GetIndirectGotoBlock();
741
742  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
743
744  // Make sure the indirect branch includes all of the address-taken blocks.
745  IndirectBranch->addDestination(BB);
746  return llvm::BlockAddress::get(CurFn, BB);
747}
748
749llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
750  // If we already made the indirect branch for indirect goto, return its block.
751  if (IndirectBranch) return IndirectBranch->getParent();
752
753  CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
754
755  // Create the PHI node that indirect gotos will add entries to.
756  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
757                                              "indirect.goto.dest");
758
759  // Create the indirect branch instruction.
760  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
761  return IndirectBranch->getParent();
762}
763
764llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
765  llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
766
767  assert(SizeEntry && "Did not emit size for type");
768  return SizeEntry;
769}
770
771llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
772  assert(Ty->isVariablyModifiedType() &&
773         "Must pass variably modified type to EmitVLASizes!");
774
775  EnsureInsertPoint();
776
777  if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
778    // unknown size indication requires no size computation.
779    if (!VAT->getSizeExpr())
780      return 0;
781    llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
782
783    if (!SizeEntry) {
784      const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
785
786      // Get the element size;
787      QualType ElemTy = VAT->getElementType();
788      llvm::Value *ElemSize;
789      if (ElemTy->isVariableArrayType())
790        ElemSize = EmitVLASize(ElemTy);
791      else
792        ElemSize = llvm::ConstantInt::get(SizeTy,
793            getContext().getTypeSizeInChars(ElemTy).getQuantity());
794
795      llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
796      NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
797
798      SizeEntry = Builder.CreateMul(ElemSize, NumElements);
799    }
800
801    return SizeEntry;
802  }
803
804  if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
805    EmitVLASize(AT->getElementType());
806    return 0;
807  }
808
809  if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
810    EmitVLASize(PT->getInnerType());
811    return 0;
812  }
813
814  const PointerType *PT = Ty->getAs<PointerType>();
815  assert(PT && "unknown VM type!");
816  EmitVLASize(PT->getPointeeType());
817  return 0;
818}
819
820llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
821  if (getContext().getBuiltinVaListType()->isArrayType())
822    return EmitScalarExpr(E);
823  return EmitLValue(E).getAddress();
824}
825
826void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
827                                              llvm::Constant *Init) {
828  assert (Init && "Invalid DeclRefExpr initializer!");
829  if (CGDebugInfo *Dbg = getDebugInfo())
830    Dbg->EmitGlobalVariable(E->getDecl(), Init);
831}
832
833CodeGenFunction::PeepholeProtection
834CodeGenFunction::protectFromPeepholes(RValue rvalue) {
835  // At the moment, the only aggressive peephole we do in IR gen
836  // is trunc(zext) folding, but if we add more, we can easily
837  // extend this protection.
838
839  if (!rvalue.isScalar()) return PeepholeProtection();
840  llvm::Value *value = rvalue.getScalarVal();
841  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
842
843  // Just make an extra bitcast.
844  assert(HaveInsertPoint());
845  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
846                                                  Builder.GetInsertBlock());
847
848  PeepholeProtection protection;
849  protection.Inst = inst;
850  return protection;
851}
852
853void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
854  if (!protection.Inst) return;
855
856  // In theory, we could try to duplicate the peepholes now, but whatever.
857  protection.Inst->eraseFromParent();
858}
859