CodeGenFunction.cpp revision 234982
1//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This coordinates the per-function state used while generating code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "CGCUDARuntime.h"
17#include "CGCXXABI.h"
18#include "CGDebugInfo.h"
19#include "clang/Basic/TargetInfo.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/Decl.h"
22#include "clang/AST/DeclCXX.h"
23#include "clang/AST/StmtCXX.h"
24#include "clang/Frontend/CodeGenOptions.h"
25#include "llvm/Intrinsics.h"
26#include "llvm/Support/MDBuilder.h"
27#include "llvm/Target/TargetData.h"
28using namespace clang;
29using namespace CodeGen;
30
31CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
32  : CodeGenTypeCache(cgm), CGM(cgm),
33    Target(CGM.getContext().getTargetInfo()),
34    Builder(cgm.getModule().getContext()),
35    AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
36    LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
37    FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
38    DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
39    IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
40    CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
41    CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
42    TerminateHandler(0), TrapBB(0) {
43
44  CatchUndefined = getContext().getLangOpts().CatchUndefined;
45  CGM.getCXXABI().getMangleContext().startNewFunction();
46}
47
48CodeGenFunction::~CodeGenFunction() {
49  // If there are any unclaimed block infos, go ahead and destroy them
50  // now.  This can happen if IR-gen gets clever and skips evaluating
51  // something.
52  if (FirstBlockInfo)
53    destroyBlockInfos(FirstBlockInfo);
54}
55
56
57llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
58  return CGM.getTypes().ConvertTypeForMem(T);
59}
60
61llvm::Type *CodeGenFunction::ConvertType(QualType T) {
62  return CGM.getTypes().ConvertType(T);
63}
64
65bool CodeGenFunction::hasAggregateLLVMType(QualType type) {
66  switch (type.getCanonicalType()->getTypeClass()) {
67#define TYPE(name, parent)
68#define ABSTRACT_TYPE(name, parent)
69#define NON_CANONICAL_TYPE(name, parent) case Type::name:
70#define DEPENDENT_TYPE(name, parent) case Type::name:
71#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
72#include "clang/AST/TypeNodes.def"
73    llvm_unreachable("non-canonical or dependent type in IR-generation");
74
75  case Type::Builtin:
76  case Type::Pointer:
77  case Type::BlockPointer:
78  case Type::LValueReference:
79  case Type::RValueReference:
80  case Type::MemberPointer:
81  case Type::Vector:
82  case Type::ExtVector:
83  case Type::FunctionProto:
84  case Type::FunctionNoProto:
85  case Type::Enum:
86  case Type::ObjCObjectPointer:
87    return false;
88
89  // Complexes, arrays, records, and Objective-C objects.
90  case Type::Complex:
91  case Type::ConstantArray:
92  case Type::IncompleteArray:
93  case Type::VariableArray:
94  case Type::Record:
95  case Type::ObjCObject:
96  case Type::ObjCInterface:
97    return true;
98
99  // In IRGen, atomic types are just the underlying type
100  case Type::Atomic:
101    return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType());
102  }
103  llvm_unreachable("unknown type kind!");
104}
105
106void CodeGenFunction::EmitReturnBlock() {
107  // For cleanliness, we try to avoid emitting the return block for
108  // simple cases.
109  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
110
111  if (CurBB) {
112    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
113
114    // We have a valid insert point, reuse it if it is empty or there are no
115    // explicit jumps to the return block.
116    if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
117      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
118      delete ReturnBlock.getBlock();
119    } else
120      EmitBlock(ReturnBlock.getBlock());
121    return;
122  }
123
124  // Otherwise, if the return block is the target of a single direct
125  // branch then we can just put the code in that block instead. This
126  // cleans up functions which started with a unified return block.
127  if (ReturnBlock.getBlock()->hasOneUse()) {
128    llvm::BranchInst *BI =
129      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
130    if (BI && BI->isUnconditional() &&
131        BI->getSuccessor(0) == ReturnBlock.getBlock()) {
132      // Reset insertion point, including debug location, and delete the branch.
133      Builder.SetCurrentDebugLocation(BI->getDebugLoc());
134      Builder.SetInsertPoint(BI->getParent());
135      BI->eraseFromParent();
136      delete ReturnBlock.getBlock();
137      return;
138    }
139  }
140
141  // FIXME: We are at an unreachable point, there is no reason to emit the block
142  // unless it has uses. However, we still need a place to put the debug
143  // region.end for now.
144
145  EmitBlock(ReturnBlock.getBlock());
146}
147
148static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
149  if (!BB) return;
150  if (!BB->use_empty())
151    return CGF.CurFn->getBasicBlockList().push_back(BB);
152  delete BB;
153}
154
155void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
156  assert(BreakContinueStack.empty() &&
157         "mismatched push/pop in break/continue stack!");
158
159  // Pop any cleanups that might have been associated with the
160  // parameters.  Do this in whatever block we're currently in; it's
161  // important to do this before we enter the return block or return
162  // edges will be *really* confused.
163  if (EHStack.stable_begin() != PrologueCleanupDepth)
164    PopCleanupBlocks(PrologueCleanupDepth);
165
166  // Emit function epilog (to return).
167  EmitReturnBlock();
168
169  if (ShouldInstrumentFunction())
170    EmitFunctionInstrumentation("__cyg_profile_func_exit");
171
172  // Emit debug descriptor for function end.
173  if (CGDebugInfo *DI = getDebugInfo()) {
174    DI->setLocation(EndLoc);
175    DI->EmitFunctionEnd(Builder);
176  }
177
178  EmitFunctionEpilog(*CurFnInfo);
179  EmitEndEHSpec(CurCodeDecl);
180
181  assert(EHStack.empty() &&
182         "did not remove all scopes from cleanup stack!");
183
184  // If someone did an indirect goto, emit the indirect goto block at the end of
185  // the function.
186  if (IndirectBranch) {
187    EmitBlock(IndirectBranch->getParent());
188    Builder.ClearInsertionPoint();
189  }
190
191  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
192  llvm::Instruction *Ptr = AllocaInsertPt;
193  AllocaInsertPt = 0;
194  Ptr->eraseFromParent();
195
196  // If someone took the address of a label but never did an indirect goto, we
197  // made a zero entry PHI node, which is illegal, zap it now.
198  if (IndirectBranch) {
199    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
200    if (PN->getNumIncomingValues() == 0) {
201      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
202      PN->eraseFromParent();
203    }
204  }
205
206  EmitIfUsed(*this, EHResumeBlock);
207  EmitIfUsed(*this, TerminateLandingPad);
208  EmitIfUsed(*this, TerminateHandler);
209  EmitIfUsed(*this, UnreachableBlock);
210
211  if (CGM.getCodeGenOpts().EmitDeclMetadata)
212    EmitDeclMetadata();
213}
214
215/// ShouldInstrumentFunction - Return true if the current function should be
216/// instrumented with __cyg_profile_func_* calls
217bool CodeGenFunction::ShouldInstrumentFunction() {
218  if (!CGM.getCodeGenOpts().InstrumentFunctions)
219    return false;
220  if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
221    return false;
222  return true;
223}
224
225/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
226/// instrumentation function with the current function and the call site, if
227/// function instrumentation is enabled.
228void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
229  // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
230  llvm::PointerType *PointerTy = Int8PtrTy;
231  llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
232  llvm::FunctionType *FunctionTy =
233    llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
234
235  llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
236  llvm::CallInst *CallSite = Builder.CreateCall(
237    CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
238    llvm::ConstantInt::get(Int32Ty, 0),
239    "callsite");
240
241  Builder.CreateCall2(F,
242                      llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
243                      CallSite);
244}
245
246void CodeGenFunction::EmitMCountInstrumentation() {
247  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
248
249  llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
250                                                       Target.getMCountName());
251  Builder.CreateCall(MCountFn);
252}
253
254void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
255                                    llvm::Function *Fn,
256                                    const CGFunctionInfo &FnInfo,
257                                    const FunctionArgList &Args,
258                                    SourceLocation StartLoc) {
259  const Decl *D = GD.getDecl();
260
261  DidCallStackSave = false;
262  CurCodeDecl = CurFuncDecl = D;
263  FnRetTy = RetTy;
264  CurFn = Fn;
265  CurFnInfo = &FnInfo;
266  assert(CurFn->isDeclaration() && "Function already has body?");
267
268  // Pass inline keyword to optimizer if it appears explicitly on any
269  // declaration.
270  if (!CGM.getCodeGenOpts().NoInline)
271    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
272      for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
273             RE = FD->redecls_end(); RI != RE; ++RI)
274        if (RI->isInlineSpecified()) {
275          Fn->addFnAttr(llvm::Attribute::InlineHint);
276          break;
277        }
278
279  if (getContext().getLangOpts().OpenCL) {
280    // Add metadata for a kernel function.
281    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
282      if (FD->hasAttr<OpenCLKernelAttr>()) {
283        llvm::LLVMContext &Context = getLLVMContext();
284        llvm::NamedMDNode *OpenCLMetadata =
285          CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
286
287        llvm::Value *Op = Fn;
288        OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
289      }
290  }
291
292  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
293
294  // Create a marker to make it easy to insert allocas into the entryblock
295  // later.  Don't create this with the builder, because we don't want it
296  // folded.
297  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
298  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
299  if (Builder.isNamePreserving())
300    AllocaInsertPt->setName("allocapt");
301
302  ReturnBlock = getJumpDestInCurrentScope("return");
303
304  Builder.SetInsertPoint(EntryBB);
305
306  // Emit subprogram debug descriptor.
307  if (CGDebugInfo *DI = getDebugInfo()) {
308    unsigned NumArgs = 0;
309    QualType *ArgsArray = new QualType[Args.size()];
310    for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
311	 i != e; ++i) {
312      ArgsArray[NumArgs++] = (*i)->getType();
313    }
314
315    QualType FnType =
316      getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
317                                   FunctionProtoType::ExtProtoInfo());
318
319    delete[] ArgsArray;
320
321    DI->setLocation(StartLoc);
322    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
323  }
324
325  if (ShouldInstrumentFunction())
326    EmitFunctionInstrumentation("__cyg_profile_func_enter");
327
328  if (CGM.getCodeGenOpts().InstrumentForProfiling)
329    EmitMCountInstrumentation();
330
331  if (RetTy->isVoidType()) {
332    // Void type; nothing to return.
333    ReturnValue = 0;
334  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
335             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
336    // Indirect aggregate return; emit returned value directly into sret slot.
337    // This reduces code size, and affects correctness in C++.
338    ReturnValue = CurFn->arg_begin();
339  } else {
340    ReturnValue = CreateIRTemp(RetTy, "retval");
341
342    // Tell the epilog emitter to autorelease the result.  We do this
343    // now so that various specialized functions can suppress it
344    // during their IR-generation.
345    if (getLangOpts().ObjCAutoRefCount &&
346        !CurFnInfo->isReturnsRetained() &&
347        RetTy->isObjCRetainableType())
348      AutoreleaseResult = true;
349  }
350
351  EmitStartEHSpec(CurCodeDecl);
352
353  PrologueCleanupDepth = EHStack.stable_begin();
354  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
355
356  if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
357    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
358    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
359    if (MD->getParent()->isLambda() &&
360        MD->getOverloadedOperator() == OO_Call) {
361      // We're in a lambda; figure out the captures.
362      MD->getParent()->getCaptureFields(LambdaCaptureFields,
363                                        LambdaThisCaptureField);
364      if (LambdaThisCaptureField) {
365        // If this lambda captures this, load it.
366        QualType LambdaTagType =
367            getContext().getTagDeclType(LambdaThisCaptureField->getParent());
368        LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
369                                                     LambdaTagType);
370        LValue ThisLValue = EmitLValueForField(LambdaLV,
371                                               LambdaThisCaptureField);
372        CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
373      }
374    } else {
375      // Not in a lambda; just use 'this' from the method.
376      // FIXME: Should we generate a new load for each use of 'this'?  The
377      // fast register allocator would be happier...
378      CXXThisValue = CXXABIThisValue;
379    }
380  }
381
382  // If any of the arguments have a variably modified type, make sure to
383  // emit the type size.
384  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
385       i != e; ++i) {
386    QualType Ty = (*i)->getType();
387
388    if (Ty->isVariablyModifiedType())
389      EmitVariablyModifiedType(Ty);
390  }
391  // Emit a location at the end of the prologue.
392  if (CGDebugInfo *DI = getDebugInfo())
393    DI->EmitLocation(Builder, StartLoc);
394}
395
396void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
397  const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
398  assert(FD->getBody());
399  EmitStmt(FD->getBody());
400}
401
402/// Tries to mark the given function nounwind based on the
403/// non-existence of any throwing calls within it.  We believe this is
404/// lightweight enough to do at -O0.
405static void TryMarkNoThrow(llvm::Function *F) {
406  // LLVM treats 'nounwind' on a function as part of the type, so we
407  // can't do this on functions that can be overwritten.
408  if (F->mayBeOverridden()) return;
409
410  for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
411    for (llvm::BasicBlock::iterator
412           BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
413      if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
414        if (!Call->doesNotThrow())
415          return;
416      } else if (isa<llvm::ResumeInst>(&*BI)) {
417        return;
418      }
419  F->setDoesNotThrow(true);
420}
421
422void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
423                                   const CGFunctionInfo &FnInfo) {
424  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
425
426  // Check if we should generate debug info for this function.
427  if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
428    DebugInfo = CGM.getModuleDebugInfo();
429
430  FunctionArgList Args;
431  QualType ResTy = FD->getResultType();
432
433  CurGD = GD;
434  if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
435    CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
436
437  for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
438    Args.push_back(FD->getParamDecl(i));
439
440  SourceRange BodyRange;
441  if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
442
443  // Emit the standard function prologue.
444  StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
445
446  // Generate the body of the function.
447  if (isa<CXXDestructorDecl>(FD))
448    EmitDestructorBody(Args);
449  else if (isa<CXXConstructorDecl>(FD))
450    EmitConstructorBody(Args);
451  else if (getContext().getLangOpts().CUDA &&
452           !CGM.getCodeGenOpts().CUDAIsDevice &&
453           FD->hasAttr<CUDAGlobalAttr>())
454    CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
455  else if (isa<CXXConversionDecl>(FD) &&
456           cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
457    // The lambda conversion to block pointer is special; the semantics can't be
458    // expressed in the AST, so IRGen needs to special-case it.
459    EmitLambdaToBlockPointerBody(Args);
460  } else if (isa<CXXMethodDecl>(FD) &&
461             cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
462    // The lambda "__invoke" function is special, because it forwards or
463    // clones the body of the function call operator (but is actually static).
464    EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
465  }
466  else
467    EmitFunctionBody(Args);
468
469  // Emit the standard function epilogue.
470  FinishFunction(BodyRange.getEnd());
471
472  // If we haven't marked the function nothrow through other means, do
473  // a quick pass now to see if we can.
474  if (!CurFn->doesNotThrow())
475    TryMarkNoThrow(CurFn);
476}
477
478/// ContainsLabel - Return true if the statement contains a label in it.  If
479/// this statement is not executed normally, it not containing a label means
480/// that we can just remove the code.
481bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
482  // Null statement, not a label!
483  if (S == 0) return false;
484
485  // If this is a label, we have to emit the code, consider something like:
486  // if (0) {  ...  foo:  bar(); }  goto foo;
487  //
488  // TODO: If anyone cared, we could track __label__'s, since we know that you
489  // can't jump to one from outside their declared region.
490  if (isa<LabelStmt>(S))
491    return true;
492
493  // If this is a case/default statement, and we haven't seen a switch, we have
494  // to emit the code.
495  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
496    return true;
497
498  // If this is a switch statement, we want to ignore cases below it.
499  if (isa<SwitchStmt>(S))
500    IgnoreCaseStmts = true;
501
502  // Scan subexpressions for verboten labels.
503  for (Stmt::const_child_range I = S->children(); I; ++I)
504    if (ContainsLabel(*I, IgnoreCaseStmts))
505      return true;
506
507  return false;
508}
509
510/// containsBreak - Return true if the statement contains a break out of it.
511/// If the statement (recursively) contains a switch or loop with a break
512/// inside of it, this is fine.
513bool CodeGenFunction::containsBreak(const Stmt *S) {
514  // Null statement, not a label!
515  if (S == 0) return false;
516
517  // If this is a switch or loop that defines its own break scope, then we can
518  // include it and anything inside of it.
519  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
520      isa<ForStmt>(S))
521    return false;
522
523  if (isa<BreakStmt>(S))
524    return true;
525
526  // Scan subexpressions for verboten breaks.
527  for (Stmt::const_child_range I = S->children(); I; ++I)
528    if (containsBreak(*I))
529      return true;
530
531  return false;
532}
533
534
535/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
536/// to a constant, or if it does but contains a label, return false.  If it
537/// constant folds return true and set the boolean result in Result.
538bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
539                                                   bool &ResultBool) {
540  llvm::APInt ResultInt;
541  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
542    return false;
543
544  ResultBool = ResultInt.getBoolValue();
545  return true;
546}
547
548/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
549/// to a constant, or if it does but contains a label, return false.  If it
550/// constant folds return true and set the folded value.
551bool CodeGenFunction::
552ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
553  // FIXME: Rename and handle conversion of other evaluatable things
554  // to bool.
555  llvm::APSInt Int;
556  if (!Cond->EvaluateAsInt(Int, getContext()))
557    return false;  // Not foldable, not integer or not fully evaluatable.
558
559  if (CodeGenFunction::ContainsLabel(Cond))
560    return false;  // Contains a label.
561
562  ResultInt = Int;
563  return true;
564}
565
566
567
568/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
569/// statement) to the specified blocks.  Based on the condition, this might try
570/// to simplify the codegen of the conditional based on the branch.
571///
572void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
573                                           llvm::BasicBlock *TrueBlock,
574                                           llvm::BasicBlock *FalseBlock) {
575  Cond = Cond->IgnoreParens();
576
577  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
578    // Handle X && Y in a condition.
579    if (CondBOp->getOpcode() == BO_LAnd) {
580      // If we have "1 && X", simplify the code.  "0 && X" would have constant
581      // folded if the case was simple enough.
582      bool ConstantBool = false;
583      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
584          ConstantBool) {
585        // br(1 && X) -> br(X).
586        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
587      }
588
589      // If we have "X && 1", simplify the code to use an uncond branch.
590      // "X && 0" would have been constant folded to 0.
591      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
592          ConstantBool) {
593        // br(X && 1) -> br(X).
594        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
595      }
596
597      // Emit the LHS as a conditional.  If the LHS conditional is false, we
598      // want to jump to the FalseBlock.
599      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
600
601      ConditionalEvaluation eval(*this);
602      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
603      EmitBlock(LHSTrue);
604
605      // Any temporaries created here are conditional.
606      eval.begin(*this);
607      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
608      eval.end(*this);
609
610      return;
611    }
612
613    if (CondBOp->getOpcode() == BO_LOr) {
614      // If we have "0 || X", simplify the code.  "1 || X" would have constant
615      // folded if the case was simple enough.
616      bool ConstantBool = false;
617      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
618          !ConstantBool) {
619        // br(0 || X) -> br(X).
620        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
621      }
622
623      // If we have "X || 0", simplify the code to use an uncond branch.
624      // "X || 1" would have been constant folded to 1.
625      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
626          !ConstantBool) {
627        // br(X || 0) -> br(X).
628        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
629      }
630
631      // Emit the LHS as a conditional.  If the LHS conditional is true, we
632      // want to jump to the TrueBlock.
633      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
634
635      ConditionalEvaluation eval(*this);
636      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
637      EmitBlock(LHSFalse);
638
639      // Any temporaries created here are conditional.
640      eval.begin(*this);
641      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
642      eval.end(*this);
643
644      return;
645    }
646  }
647
648  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
649    // br(!x, t, f) -> br(x, f, t)
650    if (CondUOp->getOpcode() == UO_LNot)
651      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
652  }
653
654  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
655    // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
656    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
657    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
658
659    ConditionalEvaluation cond(*this);
660    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
661
662    cond.begin(*this);
663    EmitBlock(LHSBlock);
664    EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
665    cond.end(*this);
666
667    cond.begin(*this);
668    EmitBlock(RHSBlock);
669    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
670    cond.end(*this);
671
672    return;
673  }
674
675  // Emit the code with the fully general case.
676  llvm::Value *CondV = EvaluateExprAsBool(Cond);
677  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
678}
679
680/// ErrorUnsupported - Print out an error that codegen doesn't support the
681/// specified stmt yet.
682void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
683                                       bool OmitOnError) {
684  CGM.ErrorUnsupported(S, Type, OmitOnError);
685}
686
687/// emitNonZeroVLAInit - Emit the "zero" initialization of a
688/// variable-length array whose elements have a non-zero bit-pattern.
689///
690/// \param src - a char* pointing to the bit-pattern for a single
691/// base element of the array
692/// \param sizeInChars - the total size of the VLA, in chars
693/// \param align - the total alignment of the VLA
694static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
695                               llvm::Value *dest, llvm::Value *src,
696                               llvm::Value *sizeInChars) {
697  std::pair<CharUnits,CharUnits> baseSizeAndAlign
698    = CGF.getContext().getTypeInfoInChars(baseType);
699
700  CGBuilderTy &Builder = CGF.Builder;
701
702  llvm::Value *baseSizeInChars
703    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
704
705  llvm::Type *i8p = Builder.getInt8PtrTy();
706
707  llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
708  llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
709
710  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
711  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
712  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
713
714  // Make a loop over the VLA.  C99 guarantees that the VLA element
715  // count must be nonzero.
716  CGF.EmitBlock(loopBB);
717
718  llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
719  cur->addIncoming(begin, originBB);
720
721  // memcpy the individual element bit-pattern.
722  Builder.CreateMemCpy(cur, src, baseSizeInChars,
723                       baseSizeAndAlign.second.getQuantity(),
724                       /*volatile*/ false);
725
726  // Go to the next element.
727  llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
728
729  // Leave if that's the end of the VLA.
730  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
731  Builder.CreateCondBr(done, contBB, loopBB);
732  cur->addIncoming(next, loopBB);
733
734  CGF.EmitBlock(contBB);
735}
736
737void
738CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
739  // Ignore empty classes in C++.
740  if (getContext().getLangOpts().CPlusPlus) {
741    if (const RecordType *RT = Ty->getAs<RecordType>()) {
742      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
743        return;
744    }
745  }
746
747  // Cast the dest ptr to the appropriate i8 pointer type.
748  unsigned DestAS =
749    cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
750  llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
751  if (DestPtr->getType() != BP)
752    DestPtr = Builder.CreateBitCast(DestPtr, BP);
753
754  // Get size and alignment info for this aggregate.
755  std::pair<CharUnits, CharUnits> TypeInfo =
756    getContext().getTypeInfoInChars(Ty);
757  CharUnits Size = TypeInfo.first;
758  CharUnits Align = TypeInfo.second;
759
760  llvm::Value *SizeVal;
761  const VariableArrayType *vla;
762
763  // Don't bother emitting a zero-byte memset.
764  if (Size.isZero()) {
765    // But note that getTypeInfo returns 0 for a VLA.
766    if (const VariableArrayType *vlaType =
767          dyn_cast_or_null<VariableArrayType>(
768                                          getContext().getAsArrayType(Ty))) {
769      QualType eltType;
770      llvm::Value *numElts;
771      llvm::tie(numElts, eltType) = getVLASize(vlaType);
772
773      SizeVal = numElts;
774      CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
775      if (!eltSize.isOne())
776        SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
777      vla = vlaType;
778    } else {
779      return;
780    }
781  } else {
782    SizeVal = CGM.getSize(Size);
783    vla = 0;
784  }
785
786  // If the type contains a pointer to data member we can't memset it to zero.
787  // Instead, create a null constant and copy it to the destination.
788  // TODO: there are other patterns besides zero that we can usefully memset,
789  // like -1, which happens to be the pattern used by member-pointers.
790  if (!CGM.getTypes().isZeroInitializable(Ty)) {
791    // For a VLA, emit a single element, then splat that over the VLA.
792    if (vla) Ty = getContext().getBaseElementType(vla);
793
794    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
795
796    llvm::GlobalVariable *NullVariable =
797      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
798                               /*isConstant=*/true,
799                               llvm::GlobalVariable::PrivateLinkage,
800                               NullConstant, Twine());
801    llvm::Value *SrcPtr =
802      Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
803
804    if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
805
806    // Get and call the appropriate llvm.memcpy overload.
807    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
808    return;
809  }
810
811  // Otherwise, just memset the whole thing to zero.  This is legal
812  // because in LLVM, all default initializers (other than the ones we just
813  // handled above) are guaranteed to have a bit pattern of all zeros.
814  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
815                       Align.getQuantity(), false);
816}
817
818llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
819  // Make sure that there is a block for the indirect goto.
820  if (IndirectBranch == 0)
821    GetIndirectGotoBlock();
822
823  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
824
825  // Make sure the indirect branch includes all of the address-taken blocks.
826  IndirectBranch->addDestination(BB);
827  return llvm::BlockAddress::get(CurFn, BB);
828}
829
830llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
831  // If we already made the indirect branch for indirect goto, return its block.
832  if (IndirectBranch) return IndirectBranch->getParent();
833
834  CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
835
836  // Create the PHI node that indirect gotos will add entries to.
837  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
838                                              "indirect.goto.dest");
839
840  // Create the indirect branch instruction.
841  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
842  return IndirectBranch->getParent();
843}
844
845/// Computes the length of an array in elements, as well as the base
846/// element type and a properly-typed first element pointer.
847llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
848                                              QualType &baseType,
849                                              llvm::Value *&addr) {
850  const ArrayType *arrayType = origArrayType;
851
852  // If it's a VLA, we have to load the stored size.  Note that
853  // this is the size of the VLA in bytes, not its size in elements.
854  llvm::Value *numVLAElements = 0;
855  if (isa<VariableArrayType>(arrayType)) {
856    numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
857
858    // Walk into all VLAs.  This doesn't require changes to addr,
859    // which has type T* where T is the first non-VLA element type.
860    do {
861      QualType elementType = arrayType->getElementType();
862      arrayType = getContext().getAsArrayType(elementType);
863
864      // If we only have VLA components, 'addr' requires no adjustment.
865      if (!arrayType) {
866        baseType = elementType;
867        return numVLAElements;
868      }
869    } while (isa<VariableArrayType>(arrayType));
870
871    // We get out here only if we find a constant array type
872    // inside the VLA.
873  }
874
875  // We have some number of constant-length arrays, so addr should
876  // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
877  // down to the first element of addr.
878  SmallVector<llvm::Value*, 8> gepIndices;
879
880  // GEP down to the array type.
881  llvm::ConstantInt *zero = Builder.getInt32(0);
882  gepIndices.push_back(zero);
883
884  // It's more efficient to calculate the count from the LLVM
885  // constant-length arrays than to re-evaluate the array bounds.
886  uint64_t countFromCLAs = 1;
887
888  llvm::ArrayType *llvmArrayType =
889    cast<llvm::ArrayType>(
890      cast<llvm::PointerType>(addr->getType())->getElementType());
891  while (true) {
892    assert(isa<ConstantArrayType>(arrayType));
893    assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
894             == llvmArrayType->getNumElements());
895
896    gepIndices.push_back(zero);
897    countFromCLAs *= llvmArrayType->getNumElements();
898
899    llvmArrayType =
900      dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
901    if (!llvmArrayType) break;
902
903    arrayType = getContext().getAsArrayType(arrayType->getElementType());
904    assert(arrayType && "LLVM and Clang types are out-of-synch");
905  }
906
907  baseType = arrayType->getElementType();
908
909  // Create the actual GEP.
910  addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
911
912  llvm::Value *numElements
913    = llvm::ConstantInt::get(SizeTy, countFromCLAs);
914
915  // If we had any VLA dimensions, factor them in.
916  if (numVLAElements)
917    numElements = Builder.CreateNUWMul(numVLAElements, numElements);
918
919  return numElements;
920}
921
922std::pair<llvm::Value*, QualType>
923CodeGenFunction::getVLASize(QualType type) {
924  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
925  assert(vla && "type was not a variable array type!");
926  return getVLASize(vla);
927}
928
929std::pair<llvm::Value*, QualType>
930CodeGenFunction::getVLASize(const VariableArrayType *type) {
931  // The number of elements so far; always size_t.
932  llvm::Value *numElements = 0;
933
934  QualType elementType;
935  do {
936    elementType = type->getElementType();
937    llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
938    assert(vlaSize && "no size for VLA!");
939    assert(vlaSize->getType() == SizeTy);
940
941    if (!numElements) {
942      numElements = vlaSize;
943    } else {
944      // It's undefined behavior if this wraps around, so mark it that way.
945      numElements = Builder.CreateNUWMul(numElements, vlaSize);
946    }
947  } while ((type = getContext().getAsVariableArrayType(elementType)));
948
949  return std::pair<llvm::Value*,QualType>(numElements, elementType);
950}
951
952void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
953  assert(type->isVariablyModifiedType() &&
954         "Must pass variably modified type to EmitVLASizes!");
955
956  EnsureInsertPoint();
957
958  // We're going to walk down into the type and look for VLA
959  // expressions.
960  do {
961    assert(type->isVariablyModifiedType());
962
963    const Type *ty = type.getTypePtr();
964    switch (ty->getTypeClass()) {
965
966#define TYPE(Class, Base)
967#define ABSTRACT_TYPE(Class, Base)
968#define NON_CANONICAL_TYPE(Class, Base)
969#define DEPENDENT_TYPE(Class, Base) case Type::Class:
970#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
971#include "clang/AST/TypeNodes.def"
972      llvm_unreachable("unexpected dependent type!");
973
974    // These types are never variably-modified.
975    case Type::Builtin:
976    case Type::Complex:
977    case Type::Vector:
978    case Type::ExtVector:
979    case Type::Record:
980    case Type::Enum:
981    case Type::Elaborated:
982    case Type::TemplateSpecialization:
983    case Type::ObjCObject:
984    case Type::ObjCInterface:
985    case Type::ObjCObjectPointer:
986      llvm_unreachable("type class is never variably-modified!");
987
988    case Type::Pointer:
989      type = cast<PointerType>(ty)->getPointeeType();
990      break;
991
992    case Type::BlockPointer:
993      type = cast<BlockPointerType>(ty)->getPointeeType();
994      break;
995
996    case Type::LValueReference:
997    case Type::RValueReference:
998      type = cast<ReferenceType>(ty)->getPointeeType();
999      break;
1000
1001    case Type::MemberPointer:
1002      type = cast<MemberPointerType>(ty)->getPointeeType();
1003      break;
1004
1005    case Type::ConstantArray:
1006    case Type::IncompleteArray:
1007      // Losing element qualification here is fine.
1008      type = cast<ArrayType>(ty)->getElementType();
1009      break;
1010
1011    case Type::VariableArray: {
1012      // Losing element qualification here is fine.
1013      const VariableArrayType *vat = cast<VariableArrayType>(ty);
1014
1015      // Unknown size indication requires no size computation.
1016      // Otherwise, evaluate and record it.
1017      if (const Expr *size = vat->getSizeExpr()) {
1018        // It's possible that we might have emitted this already,
1019        // e.g. with a typedef and a pointer to it.
1020        llvm::Value *&entry = VLASizeMap[size];
1021        if (!entry) {
1022          // Always zexting here would be wrong if it weren't
1023          // undefined behavior to have a negative bound.
1024          entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
1025                                        /*signed*/ false);
1026        }
1027      }
1028      type = vat->getElementType();
1029      break;
1030    }
1031
1032    case Type::FunctionProto:
1033    case Type::FunctionNoProto:
1034      type = cast<FunctionType>(ty)->getResultType();
1035      break;
1036
1037    case Type::Paren:
1038    case Type::TypeOf:
1039    case Type::UnaryTransform:
1040    case Type::Attributed:
1041    case Type::SubstTemplateTypeParm:
1042      // Keep walking after single level desugaring.
1043      type = type.getSingleStepDesugaredType(getContext());
1044      break;
1045
1046    case Type::Typedef:
1047    case Type::Decltype:
1048    case Type::Auto:
1049      // Stop walking: nothing to do.
1050      return;
1051
1052    case Type::TypeOfExpr:
1053      // Stop walking: emit typeof expression.
1054      EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1055      return;
1056
1057    case Type::Atomic:
1058      type = cast<AtomicType>(ty)->getValueType();
1059      break;
1060    }
1061  } while (type->isVariablyModifiedType());
1062}
1063
1064llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
1065  if (getContext().getBuiltinVaListType()->isArrayType())
1066    return EmitScalarExpr(E);
1067  return EmitLValue(E).getAddress();
1068}
1069
1070void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
1071                                              llvm::Constant *Init) {
1072  assert (Init && "Invalid DeclRefExpr initializer!");
1073  if (CGDebugInfo *Dbg = getDebugInfo())
1074    Dbg->EmitGlobalVariable(E->getDecl(), Init);
1075}
1076
1077CodeGenFunction::PeepholeProtection
1078CodeGenFunction::protectFromPeepholes(RValue rvalue) {
1079  // At the moment, the only aggressive peephole we do in IR gen
1080  // is trunc(zext) folding, but if we add more, we can easily
1081  // extend this protection.
1082
1083  if (!rvalue.isScalar()) return PeepholeProtection();
1084  llvm::Value *value = rvalue.getScalarVal();
1085  if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
1086
1087  // Just make an extra bitcast.
1088  assert(HaveInsertPoint());
1089  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
1090                                                  Builder.GetInsertBlock());
1091
1092  PeepholeProtection protection;
1093  protection.Inst = inst;
1094  return protection;
1095}
1096
1097void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
1098  if (!protection.Inst) return;
1099
1100  // In theory, we could try to duplicate the peepholes now, but whatever.
1101  protection.Inst->eraseFromParent();
1102}
1103
1104llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
1105                                                 llvm::Value *AnnotatedVal,
1106                                                 llvm::StringRef AnnotationStr,
1107                                                 SourceLocation Location) {
1108  llvm::Value *Args[4] = {
1109    AnnotatedVal,
1110    Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
1111    Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
1112    CGM.EmitAnnotationLineNo(Location)
1113  };
1114  return Builder.CreateCall(AnnotationFn, Args);
1115}
1116
1117void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
1118  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1119  // FIXME We create a new bitcast for every annotation because that's what
1120  // llvm-gcc was doing.
1121  for (specific_attr_iterator<AnnotateAttr>
1122       ai = D->specific_attr_begin<AnnotateAttr>(),
1123       ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
1124    EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
1125                       Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
1126                       (*ai)->getAnnotation(), D->getLocation());
1127}
1128
1129llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
1130                                                   llvm::Value *V) {
1131  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1132  llvm::Type *VTy = V->getType();
1133  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
1134                                    CGM.Int8PtrTy);
1135
1136  for (specific_attr_iterator<AnnotateAttr>
1137       ai = D->specific_attr_begin<AnnotateAttr>(),
1138       ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) {
1139    // FIXME Always emit the cast inst so we can differentiate between
1140    // annotation on the first field of a struct and annotation on the struct
1141    // itself.
1142    if (VTy != CGM.Int8PtrTy)
1143      V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
1144    V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation());
1145    V = Builder.CreateBitCast(V, VTy);
1146  }
1147
1148  return V;
1149}
1150