CGClass.cpp revision 263508
1//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code dealing with C++ code generation of classes
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGBlocks.h"
15#include "CGDebugInfo.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CGCXXABI.h"
19#include "clang/AST/CXXInheritance.h"
20#include "clang/AST/DeclTemplate.h"
21#include "clang/AST/EvaluatedExprVisitor.h"
22#include "clang/AST/RecordLayout.h"
23#include "clang/AST/StmtCXX.h"
24#include "clang/Basic/TargetBuiltins.h"
25#include "clang/CodeGen/CGFunctionInfo.h"
26#include "clang/Frontend/CodeGenOptions.h"
27
28using namespace clang;
29using namespace CodeGen;
30
31static CharUnits
32ComputeNonVirtualBaseClassOffset(ASTContext &Context,
33                                 const CXXRecordDecl *DerivedClass,
34                                 CastExpr::path_const_iterator Start,
35                                 CastExpr::path_const_iterator End) {
36  CharUnits Offset = CharUnits::Zero();
37
38  const CXXRecordDecl *RD = DerivedClass;
39
40  for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
41    const CXXBaseSpecifier *Base = *I;
42    assert(!Base->isVirtual() && "Should not see virtual bases here!");
43
44    // Get the layout.
45    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
46
47    const CXXRecordDecl *BaseDecl =
48      cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
49
50    // Add the offset.
51    Offset += Layout.getBaseClassOffset(BaseDecl);
52
53    RD = BaseDecl;
54  }
55
56  return Offset;
57}
58
59llvm::Constant *
60CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
61                                   CastExpr::path_const_iterator PathBegin,
62                                   CastExpr::path_const_iterator PathEnd) {
63  assert(PathBegin != PathEnd && "Base path should not be empty!");
64
65  CharUnits Offset =
66    ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
67                                     PathBegin, PathEnd);
68  if (Offset.isZero())
69    return 0;
70
71  llvm::Type *PtrDiffTy =
72  Types.ConvertType(getContext().getPointerDiffType());
73
74  return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
75}
76
77/// Gets the address of a direct base class within a complete object.
78/// This should only be used for (1) non-virtual bases or (2) virtual bases
79/// when the type is known to be complete (e.g. in complete destructors).
80///
81/// The object pointed to by 'This' is assumed to be non-null.
82llvm::Value *
83CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
84                                                   const CXXRecordDecl *Derived,
85                                                   const CXXRecordDecl *Base,
86                                                   bool BaseIsVirtual) {
87  // 'this' must be a pointer (in some address space) to Derived.
88  assert(This->getType()->isPointerTy() &&
89         cast<llvm::PointerType>(This->getType())->getElementType()
90           == ConvertType(Derived));
91
92  // Compute the offset of the virtual base.
93  CharUnits Offset;
94  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
95  if (BaseIsVirtual)
96    Offset = Layout.getVBaseClassOffset(Base);
97  else
98    Offset = Layout.getBaseClassOffset(Base);
99
100  // Shift and cast down to the base type.
101  // TODO: for complete types, this should be possible with a GEP.
102  llvm::Value *V = This;
103  if (Offset.isPositive()) {
104    V = Builder.CreateBitCast(V, Int8PtrTy);
105    V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
106  }
107  V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
108
109  return V;
110}
111
112static llvm::Value *
113ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
114                                CharUnits nonVirtualOffset,
115                                llvm::Value *virtualOffset) {
116  // Assert that we have something to do.
117  assert(!nonVirtualOffset.isZero() || virtualOffset != 0);
118
119  // Compute the offset from the static and dynamic components.
120  llvm::Value *baseOffset;
121  if (!nonVirtualOffset.isZero()) {
122    baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
123                                        nonVirtualOffset.getQuantity());
124    if (virtualOffset) {
125      baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
126    }
127  } else {
128    baseOffset = virtualOffset;
129  }
130
131  // Apply the base offset.
132  ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
133  ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
134  return ptr;
135}
136
137llvm::Value *
138CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
139                                       const CXXRecordDecl *Derived,
140                                       CastExpr::path_const_iterator PathBegin,
141                                       CastExpr::path_const_iterator PathEnd,
142                                       bool NullCheckValue) {
143  assert(PathBegin != PathEnd && "Base path should not be empty!");
144
145  CastExpr::path_const_iterator Start = PathBegin;
146  const CXXRecordDecl *VBase = 0;
147
148  // Sema has done some convenient canonicalization here: if the
149  // access path involved any virtual steps, the conversion path will
150  // *start* with a step down to the correct virtual base subobject,
151  // and hence will not require any further steps.
152  if ((*Start)->isVirtual()) {
153    VBase =
154      cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
155    ++Start;
156  }
157
158  // Compute the static offset of the ultimate destination within its
159  // allocating subobject (the virtual base, if there is one, or else
160  // the "complete" object that we see).
161  CharUnits NonVirtualOffset =
162    ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
163                                     Start, PathEnd);
164
165  // If there's a virtual step, we can sometimes "devirtualize" it.
166  // For now, that's limited to when the derived type is final.
167  // TODO: "devirtualize" this for accesses to known-complete objects.
168  if (VBase && Derived->hasAttr<FinalAttr>()) {
169    const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
170    CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
171    NonVirtualOffset += vBaseOffset;
172    VBase = 0; // we no longer have a virtual step
173  }
174
175  // Get the base pointer type.
176  llvm::Type *BasePtrTy =
177    ConvertType((PathEnd[-1])->getType())->getPointerTo();
178
179  // If the static offset is zero and we don't have a virtual step,
180  // just do a bitcast; null checks are unnecessary.
181  if (NonVirtualOffset.isZero() && !VBase) {
182    return Builder.CreateBitCast(Value, BasePtrTy);
183  }
184
185  llvm::BasicBlock *origBB = 0;
186  llvm::BasicBlock *endBB = 0;
187
188  // Skip over the offset (and the vtable load) if we're supposed to
189  // null-check the pointer.
190  if (NullCheckValue) {
191    origBB = Builder.GetInsertBlock();
192    llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
193    endBB = createBasicBlock("cast.end");
194
195    llvm::Value *isNull = Builder.CreateIsNull(Value);
196    Builder.CreateCondBr(isNull, endBB, notNullBB);
197    EmitBlock(notNullBB);
198  }
199
200  // Compute the virtual offset.
201  llvm::Value *VirtualOffset = 0;
202  if (VBase) {
203    VirtualOffset =
204      CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
205  }
206
207  // Apply both offsets.
208  Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
209                                          NonVirtualOffset,
210                                          VirtualOffset);
211
212  // Cast to the destination type.
213  Value = Builder.CreateBitCast(Value, BasePtrTy);
214
215  // Build a phi if we needed a null check.
216  if (NullCheckValue) {
217    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
218    Builder.CreateBr(endBB);
219    EmitBlock(endBB);
220
221    llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
222    PHI->addIncoming(Value, notNullBB);
223    PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
224    Value = PHI;
225  }
226
227  return Value;
228}
229
230llvm::Value *
231CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
232                                          const CXXRecordDecl *Derived,
233                                        CastExpr::path_const_iterator PathBegin,
234                                          CastExpr::path_const_iterator PathEnd,
235                                          bool NullCheckValue) {
236  assert(PathBegin != PathEnd && "Base path should not be empty!");
237
238  QualType DerivedTy =
239    getContext().getCanonicalType(getContext().getTagDeclType(Derived));
240  llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
241
242  llvm::Value *NonVirtualOffset =
243    CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
244
245  if (!NonVirtualOffset) {
246    // No offset, we can just cast back.
247    return Builder.CreateBitCast(Value, DerivedPtrTy);
248  }
249
250  llvm::BasicBlock *CastNull = 0;
251  llvm::BasicBlock *CastNotNull = 0;
252  llvm::BasicBlock *CastEnd = 0;
253
254  if (NullCheckValue) {
255    CastNull = createBasicBlock("cast.null");
256    CastNotNull = createBasicBlock("cast.notnull");
257    CastEnd = createBasicBlock("cast.end");
258
259    llvm::Value *IsNull = Builder.CreateIsNull(Value);
260    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
261    EmitBlock(CastNotNull);
262  }
263
264  // Apply the offset.
265  Value = Builder.CreateBitCast(Value, Int8PtrTy);
266  Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
267                            "sub.ptr");
268
269  // Just cast.
270  Value = Builder.CreateBitCast(Value, DerivedPtrTy);
271
272  if (NullCheckValue) {
273    Builder.CreateBr(CastEnd);
274    EmitBlock(CastNull);
275    Builder.CreateBr(CastEnd);
276    EmitBlock(CastEnd);
277
278    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
279    PHI->addIncoming(Value, CastNotNull);
280    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
281                     CastNull);
282    Value = PHI;
283  }
284
285  return Value;
286}
287
288llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
289                                              bool ForVirtualBase,
290                                              bool Delegating) {
291  if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
292    // This constructor/destructor does not need a VTT parameter.
293    return 0;
294  }
295
296  const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
297  const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
298
299  llvm::Value *VTT;
300
301  uint64_t SubVTTIndex;
302
303  if (Delegating) {
304    // If this is a delegating constructor call, just load the VTT.
305    return LoadCXXVTT();
306  } else if (RD == Base) {
307    // If the record matches the base, this is the complete ctor/dtor
308    // variant calling the base variant in a class with virtual bases.
309    assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
310           "doing no-op VTT offset in base dtor/ctor?");
311    assert(!ForVirtualBase && "Can't have same class as virtual base!");
312    SubVTTIndex = 0;
313  } else {
314    const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
315    CharUnits BaseOffset = ForVirtualBase ?
316      Layout.getVBaseClassOffset(Base) :
317      Layout.getBaseClassOffset(Base);
318
319    SubVTTIndex =
320      CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
321    assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
322  }
323
324  if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
325    // A VTT parameter was passed to the constructor, use it.
326    VTT = LoadCXXVTT();
327    VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
328  } else {
329    // We're the complete constructor, so get the VTT by name.
330    VTT = CGM.getVTables().GetAddrOfVTT(RD);
331    VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
332  }
333
334  return VTT;
335}
336
337namespace {
338  /// Call the destructor for a direct base class.
339  struct CallBaseDtor : EHScopeStack::Cleanup {
340    const CXXRecordDecl *BaseClass;
341    bool BaseIsVirtual;
342    CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
343      : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
344
345    void Emit(CodeGenFunction &CGF, Flags flags) {
346      const CXXRecordDecl *DerivedClass =
347        cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
348
349      const CXXDestructorDecl *D = BaseClass->getDestructor();
350      llvm::Value *Addr =
351        CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
352                                                  DerivedClass, BaseClass,
353                                                  BaseIsVirtual);
354      CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
355                                /*Delegating=*/false, Addr);
356    }
357  };
358
359  /// A visitor which checks whether an initializer uses 'this' in a
360  /// way which requires the vtable to be properly set.
361  struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
362    typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
363
364    bool UsesThis;
365
366    DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
367
368    // Black-list all explicit and implicit references to 'this'.
369    //
370    // Do we need to worry about external references to 'this' derived
371    // from arbitrary code?  If so, then anything which runs arbitrary
372    // external code might potentially access the vtable.
373    void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
374  };
375}
376
377static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
378  DynamicThisUseChecker Checker(C);
379  Checker.Visit(const_cast<Expr*>(Init));
380  return Checker.UsesThis;
381}
382
383static void EmitBaseInitializer(CodeGenFunction &CGF,
384                                const CXXRecordDecl *ClassDecl,
385                                CXXCtorInitializer *BaseInit,
386                                CXXCtorType CtorType) {
387  assert(BaseInit->isBaseInitializer() &&
388         "Must have base initializer!");
389
390  llvm::Value *ThisPtr = CGF.LoadCXXThis();
391
392  const Type *BaseType = BaseInit->getBaseClass();
393  CXXRecordDecl *BaseClassDecl =
394    cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
395
396  bool isBaseVirtual = BaseInit->isBaseVirtual();
397
398  // The base constructor doesn't construct virtual bases.
399  if (CtorType == Ctor_Base && isBaseVirtual)
400    return;
401
402  // If the initializer for the base (other than the constructor
403  // itself) accesses 'this' in any way, we need to initialize the
404  // vtables.
405  if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
406    CGF.InitializeVTablePointers(ClassDecl);
407
408  // We can pretend to be a complete class because it only matters for
409  // virtual bases, and we only do virtual bases for complete ctors.
410  llvm::Value *V =
411    CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
412                                              BaseClassDecl,
413                                              isBaseVirtual);
414  CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
415  AggValueSlot AggSlot =
416    AggValueSlot::forAddr(V, Alignment, Qualifiers(),
417                          AggValueSlot::IsDestructed,
418                          AggValueSlot::DoesNotNeedGCBarriers,
419                          AggValueSlot::IsNotAliased);
420
421  CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
422
423  if (CGF.CGM.getLangOpts().Exceptions &&
424      !BaseClassDecl->hasTrivialDestructor())
425    CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
426                                          isBaseVirtual);
427}
428
429static void EmitAggMemberInitializer(CodeGenFunction &CGF,
430                                     LValue LHS,
431                                     Expr *Init,
432                                     llvm::Value *ArrayIndexVar,
433                                     QualType T,
434                                     ArrayRef<VarDecl *> ArrayIndexes,
435                                     unsigned Index) {
436  if (Index == ArrayIndexes.size()) {
437    LValue LV = LHS;
438
439    if (ArrayIndexVar) {
440      // If we have an array index variable, load it and use it as an offset.
441      // Then, increment the value.
442      llvm::Value *Dest = LHS.getAddress();
443      llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
444      Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
445      llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
446      Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
447      CGF.Builder.CreateStore(Next, ArrayIndexVar);
448
449      // Update the LValue.
450      LV.setAddress(Dest);
451      CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
452      LV.setAlignment(std::min(Align, LV.getAlignment()));
453    }
454
455    switch (CGF.getEvaluationKind(T)) {
456    case TEK_Scalar:
457      CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
458      break;
459    case TEK_Complex:
460      CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
461      break;
462    case TEK_Aggregate: {
463      AggValueSlot Slot =
464        AggValueSlot::forLValue(LV,
465                                AggValueSlot::IsDestructed,
466                                AggValueSlot::DoesNotNeedGCBarriers,
467                                AggValueSlot::IsNotAliased);
468
469      CGF.EmitAggExpr(Init, Slot);
470      break;
471    }
472    }
473
474    return;
475  }
476
477  const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
478  assert(Array && "Array initialization without the array type?");
479  llvm::Value *IndexVar
480    = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
481  assert(IndexVar && "Array index variable not loaded");
482
483  // Initialize this index variable to zero.
484  llvm::Value* Zero
485    = llvm::Constant::getNullValue(
486                              CGF.ConvertType(CGF.getContext().getSizeType()));
487  CGF.Builder.CreateStore(Zero, IndexVar);
488
489  // Start the loop with a block that tests the condition.
490  llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
491  llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
492
493  CGF.EmitBlock(CondBlock);
494
495  llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
496  // Generate: if (loop-index < number-of-elements) fall to the loop body,
497  // otherwise, go to the block after the for-loop.
498  uint64_t NumElements = Array->getSize().getZExtValue();
499  llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
500  llvm::Value *NumElementsPtr =
501    llvm::ConstantInt::get(Counter->getType(), NumElements);
502  llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
503                                                  "isless");
504
505  // If the condition is true, execute the body.
506  CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
507
508  CGF.EmitBlock(ForBody);
509  llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
510
511  // Inside the loop body recurse to emit the inner loop or, eventually, the
512  // constructor call.
513  EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
514                           Array->getElementType(), ArrayIndexes, Index + 1);
515
516  CGF.EmitBlock(ContinueBlock);
517
518  // Emit the increment of the loop counter.
519  llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
520  Counter = CGF.Builder.CreateLoad(IndexVar);
521  NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
522  CGF.Builder.CreateStore(NextVal, IndexVar);
523
524  // Finally, branch back up to the condition for the next iteration.
525  CGF.EmitBranch(CondBlock);
526
527  // Emit the fall-through block.
528  CGF.EmitBlock(AfterFor, true);
529}
530
531static void EmitMemberInitializer(CodeGenFunction &CGF,
532                                  const CXXRecordDecl *ClassDecl,
533                                  CXXCtorInitializer *MemberInit,
534                                  const CXXConstructorDecl *Constructor,
535                                  FunctionArgList &Args) {
536  assert(MemberInit->isAnyMemberInitializer() &&
537         "Must have member initializer!");
538  assert(MemberInit->getInit() && "Must have initializer!");
539
540  // non-static data member initializers.
541  FieldDecl *Field = MemberInit->getAnyMember();
542  QualType FieldType = Field->getType();
543
544  llvm::Value *ThisPtr = CGF.LoadCXXThis();
545  QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
546  LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
547
548  if (MemberInit->isIndirectMemberInitializer()) {
549    // If we are initializing an anonymous union field, drill down to
550    // the field.
551    IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
552    IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
553      IEnd = IndirectField->chain_end();
554    for ( ; I != IEnd; ++I)
555      LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
556    FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
557  } else {
558    LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
559  }
560
561  // Special case: if we are in a copy or move constructor, and we are copying
562  // an array of PODs or classes with trivial copy constructors, ignore the
563  // AST and perform the copy we know is equivalent.
564  // FIXME: This is hacky at best... if we had a bit more explicit information
565  // in the AST, we could generalize it more easily.
566  const ConstantArrayType *Array
567    = CGF.getContext().getAsConstantArrayType(FieldType);
568  if (Array && Constructor->isDefaulted() &&
569      Constructor->isCopyOrMoveConstructor()) {
570    QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
571    CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
572    if (BaseElementTy.isPODType(CGF.getContext()) ||
573        (CE && CE->getConstructor()->isTrivial())) {
574      // Find the source pointer. We know it's the last argument because
575      // we know we're in an implicit copy constructor.
576      unsigned SrcArgIndex = Args.size() - 1;
577      llvm::Value *SrcPtr
578        = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
579      LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
580      LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
581
582      // Copy the aggregate.
583      CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
584                            LHS.isVolatileQualified());
585      return;
586    }
587  }
588
589  ArrayRef<VarDecl *> ArrayIndexes;
590  if (MemberInit->getNumArrayIndices())
591    ArrayIndexes = MemberInit->getArrayIndexes();
592  CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
593}
594
595void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
596                                              LValue LHS, Expr *Init,
597                                             ArrayRef<VarDecl *> ArrayIndexes) {
598  QualType FieldType = Field->getType();
599  switch (getEvaluationKind(FieldType)) {
600  case TEK_Scalar:
601    if (LHS.isSimple()) {
602      EmitExprAsInit(Init, Field, LHS, false);
603    } else {
604      RValue RHS = RValue::get(EmitScalarExpr(Init));
605      EmitStoreThroughLValue(RHS, LHS);
606    }
607    break;
608  case TEK_Complex:
609    EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
610    break;
611  case TEK_Aggregate: {
612    llvm::Value *ArrayIndexVar = 0;
613    if (ArrayIndexes.size()) {
614      llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
615
616      // The LHS is a pointer to the first object we'll be constructing, as
617      // a flat array.
618      QualType BaseElementTy = getContext().getBaseElementType(FieldType);
619      llvm::Type *BasePtr = ConvertType(BaseElementTy);
620      BasePtr = llvm::PointerType::getUnqual(BasePtr);
621      llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
622                                                       BasePtr);
623      LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
624
625      // Create an array index that will be used to walk over all of the
626      // objects we're constructing.
627      ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
628      llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
629      Builder.CreateStore(Zero, ArrayIndexVar);
630
631
632      // Emit the block variables for the array indices, if any.
633      for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
634        EmitAutoVarDecl(*ArrayIndexes[I]);
635    }
636
637    EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
638                             ArrayIndexes, 0);
639  }
640  }
641
642  // Ensure that we destroy this object if an exception is thrown
643  // later in the constructor.
644  QualType::DestructionKind dtorKind = FieldType.isDestructedType();
645  if (needsEHCleanup(dtorKind))
646    pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
647}
648
649/// Checks whether the given constructor is a valid subject for the
650/// complete-to-base constructor delegation optimization, i.e.
651/// emitting the complete constructor as a simple call to the base
652/// constructor.
653static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
654
655  // Currently we disable the optimization for classes with virtual
656  // bases because (1) the addresses of parameter variables need to be
657  // consistent across all initializers but (2) the delegate function
658  // call necessarily creates a second copy of the parameter variable.
659  //
660  // The limiting example (purely theoretical AFAIK):
661  //   struct A { A(int &c) { c++; } };
662  //   struct B : virtual A {
663  //     B(int count) : A(count) { printf("%d\n", count); }
664  //   };
665  // ...although even this example could in principle be emitted as a
666  // delegation since the address of the parameter doesn't escape.
667  if (Ctor->getParent()->getNumVBases()) {
668    // TODO: white-list trivial vbase initializers.  This case wouldn't
669    // be subject to the restrictions below.
670
671    // TODO: white-list cases where:
672    //  - there are no non-reference parameters to the constructor
673    //  - the initializers don't access any non-reference parameters
674    //  - the initializers don't take the address of non-reference
675    //    parameters
676    //  - etc.
677    // If we ever add any of the above cases, remember that:
678    //  - function-try-blocks will always blacklist this optimization
679    //  - we need to perform the constructor prologue and cleanup in
680    //    EmitConstructorBody.
681
682    return false;
683  }
684
685  // We also disable the optimization for variadic functions because
686  // it's impossible to "re-pass" varargs.
687  if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
688    return false;
689
690  // FIXME: Decide if we can do a delegation of a delegating constructor.
691  if (Ctor->isDelegatingConstructor())
692    return false;
693
694  return true;
695}
696
697/// EmitConstructorBody - Emits the body of the current constructor.
698void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
699  const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
700  CXXCtorType CtorType = CurGD.getCtorType();
701
702  // Before we go any further, try the complete->base constructor
703  // delegation optimization.
704  if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
705      CGM.getTarget().getCXXABI().hasConstructorVariants()) {
706    if (CGDebugInfo *DI = getDebugInfo())
707      DI->EmitLocation(Builder, Ctor->getLocEnd());
708    EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
709    return;
710  }
711
712  Stmt *Body = Ctor->getBody();
713
714  // Enter the function-try-block before the constructor prologue if
715  // applicable.
716  bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
717  if (IsTryBody)
718    EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
719
720  RunCleanupsScope RunCleanups(*this);
721
722  // TODO: in restricted cases, we can emit the vbase initializers of
723  // a complete ctor and then delegate to the base ctor.
724
725  // Emit the constructor prologue, i.e. the base and member
726  // initializers.
727  EmitCtorPrologue(Ctor, CtorType, Args);
728
729  // Emit the body of the statement.
730  if (IsTryBody)
731    EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
732  else if (Body)
733    EmitStmt(Body);
734
735  // Emit any cleanup blocks associated with the member or base
736  // initializers, which includes (along the exceptional path) the
737  // destructors for those members and bases that were fully
738  // constructed.
739  RunCleanups.ForceCleanup();
740
741  if (IsTryBody)
742    ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
743}
744
745namespace {
746  /// RAII object to indicate that codegen is copying the value representation
747  /// instead of the object representation. Useful when copying a struct or
748  /// class which has uninitialized members and we're only performing
749  /// lvalue-to-rvalue conversion on the object but not its members.
750  class CopyingValueRepresentation {
751  public:
752    explicit CopyingValueRepresentation(CodeGenFunction &CGF)
753        : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) {
754      SO.Bool = false;
755      SO.Enum = false;
756      CGF.SanOpts = &SO;
757    }
758    ~CopyingValueRepresentation() {
759      CGF.SanOpts = OldSanOpts;
760    }
761  private:
762    CodeGenFunction &CGF;
763    SanitizerOptions SO;
764    const SanitizerOptions *OldSanOpts;
765  };
766}
767
768namespace {
769  class FieldMemcpyizer {
770  public:
771    FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
772                    const VarDecl *SrcRec)
773      : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
774        RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
775        FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0),
776        LastAddedFieldIndex(0) { }
777
778    static bool isMemcpyableField(FieldDecl *F) {
779      Qualifiers Qual = F->getType().getQualifiers();
780      if (Qual.hasVolatile() || Qual.hasObjCLifetime())
781        return false;
782      return true;
783    }
784
785    void addMemcpyableField(FieldDecl *F) {
786      if (FirstField == 0)
787        addInitialField(F);
788      else
789        addNextField(F);
790    }
791
792    CharUnits getMemcpySize() const {
793      unsigned LastFieldSize =
794        LastField->isBitField() ?
795          LastField->getBitWidthValue(CGF.getContext()) :
796          CGF.getContext().getTypeSize(LastField->getType());
797      uint64_t MemcpySizeBits =
798        LastFieldOffset + LastFieldSize - FirstFieldOffset +
799        CGF.getContext().getCharWidth() - 1;
800      CharUnits MemcpySize =
801        CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
802      return MemcpySize;
803    }
804
805    void emitMemcpy() {
806      // Give the subclass a chance to bail out if it feels the memcpy isn't
807      // worth it (e.g. Hasn't aggregated enough data).
808      if (FirstField == 0) {
809        return;
810      }
811
812      CharUnits Alignment;
813
814      if (FirstField->isBitField()) {
815        const CGRecordLayout &RL =
816          CGF.getTypes().getCGRecordLayout(FirstField->getParent());
817        const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
818        Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
819      } else {
820        Alignment = CGF.getContext().getDeclAlign(FirstField);
821      }
822
823      assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) %
824              Alignment) == 0 && "Bad field alignment.");
825
826      CharUnits MemcpySize = getMemcpySize();
827      QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
828      llvm::Value *ThisPtr = CGF.LoadCXXThis();
829      LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
830      LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
831      llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
832      LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
833      LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
834
835      emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
836                   Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
837                   MemcpySize, Alignment);
838      reset();
839    }
840
841    void reset() {
842      FirstField = 0;
843    }
844
845  protected:
846    CodeGenFunction &CGF;
847    const CXXRecordDecl *ClassDecl;
848
849  private:
850
851    void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
852                      CharUnits Size, CharUnits Alignment) {
853      llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
854      llvm::Type *DBP =
855        llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
856      DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
857
858      llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
859      llvm::Type *SBP =
860        llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
861      SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
862
863      CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
864                               Alignment.getQuantity());
865    }
866
867    void addInitialField(FieldDecl *F) {
868        FirstField = F;
869        LastField = F;
870        FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
871        LastFieldOffset = FirstFieldOffset;
872        LastAddedFieldIndex = F->getFieldIndex();
873        return;
874      }
875
876    void addNextField(FieldDecl *F) {
877      // For the most part, the following invariant will hold:
878      //   F->getFieldIndex() == LastAddedFieldIndex + 1
879      // The one exception is that Sema won't add a copy-initializer for an
880      // unnamed bitfield, which will show up here as a gap in the sequence.
881      assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
882             "Cannot aggregate fields out of order.");
883      LastAddedFieldIndex = F->getFieldIndex();
884
885      // The 'first' and 'last' fields are chosen by offset, rather than field
886      // index. This allows the code to support bitfields, as well as regular
887      // fields.
888      uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
889      if (FOffset < FirstFieldOffset) {
890        FirstField = F;
891        FirstFieldOffset = FOffset;
892      } else if (FOffset > LastFieldOffset) {
893        LastField = F;
894        LastFieldOffset = FOffset;
895      }
896    }
897
898    const VarDecl *SrcRec;
899    const ASTRecordLayout &RecLayout;
900    FieldDecl *FirstField;
901    FieldDecl *LastField;
902    uint64_t FirstFieldOffset, LastFieldOffset;
903    unsigned LastAddedFieldIndex;
904  };
905
906  class ConstructorMemcpyizer : public FieldMemcpyizer {
907  private:
908
909    /// Get source argument for copy constructor. Returns null if not a copy
910    /// constructor.
911    static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
912                                               FunctionArgList &Args) {
913      if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
914        return Args[Args.size() - 1];
915      return 0;
916    }
917
918    // Returns true if a CXXCtorInitializer represents a member initialization
919    // that can be rolled into a memcpy.
920    bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
921      if (!MemcpyableCtor)
922        return false;
923      FieldDecl *Field = MemberInit->getMember();
924      assert(Field != 0 && "No field for member init.");
925      QualType FieldType = Field->getType();
926      CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
927
928      // Bail out on non-POD, not-trivially-constructable members.
929      if (!(CE && CE->getConstructor()->isTrivial()) &&
930          !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
931            FieldType->isReferenceType()))
932        return false;
933
934      // Bail out on volatile fields.
935      if (!isMemcpyableField(Field))
936        return false;
937
938      // Otherwise we're good.
939      return true;
940    }
941
942  public:
943    ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
944                          FunctionArgList &Args)
945      : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
946        ConstructorDecl(CD),
947        MemcpyableCtor(CD->isDefaulted() &&
948                       CD->isCopyOrMoveConstructor() &&
949                       CGF.getLangOpts().getGC() == LangOptions::NonGC),
950        Args(Args) { }
951
952    void addMemberInitializer(CXXCtorInitializer *MemberInit) {
953      if (isMemberInitMemcpyable(MemberInit)) {
954        AggregatedInits.push_back(MemberInit);
955        addMemcpyableField(MemberInit->getMember());
956      } else {
957        emitAggregatedInits();
958        EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
959                              ConstructorDecl, Args);
960      }
961    }
962
963    void emitAggregatedInits() {
964      if (AggregatedInits.size() <= 1) {
965        // This memcpy is too small to be worthwhile. Fall back on default
966        // codegen.
967        if (!AggregatedInits.empty()) {
968          CopyingValueRepresentation CVR(CGF);
969          EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
970                                AggregatedInits[0], ConstructorDecl, Args);
971        }
972        reset();
973        return;
974      }
975
976      pushEHDestructors();
977      emitMemcpy();
978      AggregatedInits.clear();
979    }
980
981    void pushEHDestructors() {
982      llvm::Value *ThisPtr = CGF.LoadCXXThis();
983      QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
984      LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
985
986      for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
987        QualType FieldType = AggregatedInits[i]->getMember()->getType();
988        QualType::DestructionKind dtorKind = FieldType.isDestructedType();
989        if (CGF.needsEHCleanup(dtorKind))
990          CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
991      }
992    }
993
994    void finish() {
995      emitAggregatedInits();
996    }
997
998  private:
999    const CXXConstructorDecl *ConstructorDecl;
1000    bool MemcpyableCtor;
1001    FunctionArgList &Args;
1002    SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
1003  };
1004
1005  class AssignmentMemcpyizer : public FieldMemcpyizer {
1006  private:
1007
1008    // Returns the memcpyable field copied by the given statement, if one
1009    // exists. Otherwise returns null.
1010    FieldDecl *getMemcpyableField(Stmt *S) {
1011      if (!AssignmentsMemcpyable)
1012        return 0;
1013      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
1014        // Recognise trivial assignments.
1015        if (BO->getOpcode() != BO_Assign)
1016          return 0;
1017        MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
1018        if (!ME)
1019          return 0;
1020        FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1021        if (!Field || !isMemcpyableField(Field))
1022          return 0;
1023        Stmt *RHS = BO->getRHS();
1024        if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
1025          RHS = EC->getSubExpr();
1026        if (!RHS)
1027          return 0;
1028        MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
1029        if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
1030          return 0;
1031        return Field;
1032      } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
1033        CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
1034        if (!(MD && (MD->isCopyAssignmentOperator() ||
1035                       MD->isMoveAssignmentOperator()) &&
1036              MD->isTrivial()))
1037          return 0;
1038        MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
1039        if (!IOA)
1040          return 0;
1041        FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
1042        if (!Field || !isMemcpyableField(Field))
1043          return 0;
1044        MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
1045        if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
1046          return 0;
1047        return Field;
1048      } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
1049        FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
1050        if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1051          return 0;
1052        Expr *DstPtr = CE->getArg(0);
1053        if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
1054          DstPtr = DC->getSubExpr();
1055        UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
1056        if (!DUO || DUO->getOpcode() != UO_AddrOf)
1057          return 0;
1058        MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
1059        if (!ME)
1060          return 0;
1061        FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1062        if (!Field || !isMemcpyableField(Field))
1063          return 0;
1064        Expr *SrcPtr = CE->getArg(1);
1065        if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
1066          SrcPtr = SC->getSubExpr();
1067        UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
1068        if (!SUO || SUO->getOpcode() != UO_AddrOf)
1069          return 0;
1070        MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
1071        if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
1072          return 0;
1073        return Field;
1074      }
1075
1076      return 0;
1077    }
1078
1079    bool AssignmentsMemcpyable;
1080    SmallVector<Stmt*, 16> AggregatedStmts;
1081
1082  public:
1083
1084    AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1085                         FunctionArgList &Args)
1086      : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1087        AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1088      assert(Args.size() == 2);
1089    }
1090
1091    void emitAssignment(Stmt *S) {
1092      FieldDecl *F = getMemcpyableField(S);
1093      if (F) {
1094        addMemcpyableField(F);
1095        AggregatedStmts.push_back(S);
1096      } else {
1097        emitAggregatedStmts();
1098        CGF.EmitStmt(S);
1099      }
1100    }
1101
1102    void emitAggregatedStmts() {
1103      if (AggregatedStmts.size() <= 1) {
1104        if (!AggregatedStmts.empty()) {
1105          CopyingValueRepresentation CVR(CGF);
1106          CGF.EmitStmt(AggregatedStmts[0]);
1107        }
1108        reset();
1109      }
1110
1111      emitMemcpy();
1112      AggregatedStmts.clear();
1113    }
1114
1115    void finish() {
1116      emitAggregatedStmts();
1117    }
1118  };
1119
1120}
1121
1122/// EmitCtorPrologue - This routine generates necessary code to initialize
1123/// base classes and non-static data members belonging to this constructor.
1124void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1125                                       CXXCtorType CtorType,
1126                                       FunctionArgList &Args) {
1127  if (CD->isDelegatingConstructor())
1128    return EmitDelegatingCXXConstructorCall(CD, Args);
1129
1130  const CXXRecordDecl *ClassDecl = CD->getParent();
1131
1132  CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
1133                                          E = CD->init_end();
1134
1135  llvm::BasicBlock *BaseCtorContinueBB = 0;
1136  if (ClassDecl->getNumVBases() &&
1137      !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1138    // The ABIs that don't have constructor variants need to put a branch
1139    // before the virtual base initialization code.
1140    BaseCtorContinueBB =
1141      CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
1142    assert(BaseCtorContinueBB);
1143  }
1144
1145  // Virtual base initializers first.
1146  for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
1147    EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1148  }
1149
1150  if (BaseCtorContinueBB) {
1151    // Complete object handler should continue to the remaining initializers.
1152    Builder.CreateBr(BaseCtorContinueBB);
1153    EmitBlock(BaseCtorContinueBB);
1154  }
1155
1156  // Then, non-virtual base initializers.
1157  for (; B != E && (*B)->isBaseInitializer(); B++) {
1158    assert(!(*B)->isBaseVirtual());
1159    EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1160  }
1161
1162  InitializeVTablePointers(ClassDecl);
1163
1164  // And finally, initialize class members.
1165  FieldConstructionScope FCS(*this, CXXThisValue);
1166  ConstructorMemcpyizer CM(*this, CD, Args);
1167  for (; B != E; B++) {
1168    CXXCtorInitializer *Member = (*B);
1169    assert(!Member->isBaseInitializer());
1170    assert(Member->isAnyMemberInitializer() &&
1171           "Delegating initializer on non-delegating constructor");
1172    CM.addMemberInitializer(Member);
1173  }
1174  CM.finish();
1175}
1176
1177static bool
1178FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
1179
1180static bool
1181HasTrivialDestructorBody(ASTContext &Context,
1182                         const CXXRecordDecl *BaseClassDecl,
1183                         const CXXRecordDecl *MostDerivedClassDecl)
1184{
1185  // If the destructor is trivial we don't have to check anything else.
1186  if (BaseClassDecl->hasTrivialDestructor())
1187    return true;
1188
1189  if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1190    return false;
1191
1192  // Check fields.
1193  for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
1194       E = BaseClassDecl->field_end(); I != E; ++I) {
1195    const FieldDecl *Field = *I;
1196
1197    if (!FieldHasTrivialDestructorBody(Context, Field))
1198      return false;
1199  }
1200
1201  // Check non-virtual bases.
1202  for (CXXRecordDecl::base_class_const_iterator I =
1203       BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
1204       I != E; ++I) {
1205    if (I->isVirtual())
1206      continue;
1207
1208    const CXXRecordDecl *NonVirtualBase =
1209      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1210    if (!HasTrivialDestructorBody(Context, NonVirtualBase,
1211                                  MostDerivedClassDecl))
1212      return false;
1213  }
1214
1215  if (BaseClassDecl == MostDerivedClassDecl) {
1216    // Check virtual bases.
1217    for (CXXRecordDecl::base_class_const_iterator I =
1218         BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
1219         I != E; ++I) {
1220      const CXXRecordDecl *VirtualBase =
1221        cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1222      if (!HasTrivialDestructorBody(Context, VirtualBase,
1223                                    MostDerivedClassDecl))
1224        return false;
1225    }
1226  }
1227
1228  return true;
1229}
1230
1231static bool
1232FieldHasTrivialDestructorBody(ASTContext &Context,
1233                              const FieldDecl *Field)
1234{
1235  QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
1236
1237  const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
1238  if (!RT)
1239    return true;
1240
1241  CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
1242  return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
1243}
1244
1245/// CanSkipVTablePointerInitialization - Check whether we need to initialize
1246/// any vtable pointers before calling this destructor.
1247static bool CanSkipVTablePointerInitialization(ASTContext &Context,
1248                                               const CXXDestructorDecl *Dtor) {
1249  if (!Dtor->hasTrivialBody())
1250    return false;
1251
1252  // Check the fields.
1253  const CXXRecordDecl *ClassDecl = Dtor->getParent();
1254  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
1255       E = ClassDecl->field_end(); I != E; ++I) {
1256    const FieldDecl *Field = *I;
1257
1258    if (!FieldHasTrivialDestructorBody(Context, Field))
1259      return false;
1260  }
1261
1262  return true;
1263}
1264
1265/// EmitDestructorBody - Emits the body of the current destructor.
1266void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1267  const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
1268  CXXDtorType DtorType = CurGD.getDtorType();
1269
1270  // The call to operator delete in a deleting destructor happens
1271  // outside of the function-try-block, which means it's always
1272  // possible to delegate the destructor body to the complete
1273  // destructor.  Do so.
1274  if (DtorType == Dtor_Deleting) {
1275    EnterDtorCleanups(Dtor, Dtor_Deleting);
1276    EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
1277                          /*Delegating=*/false, LoadCXXThis());
1278    PopCleanupBlock();
1279    return;
1280  }
1281
1282  Stmt *Body = Dtor->getBody();
1283
1284  // If the body is a function-try-block, enter the try before
1285  // anything else.
1286  bool isTryBody = (Body && isa<CXXTryStmt>(Body));
1287  if (isTryBody)
1288    EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1289
1290  // Enter the epilogue cleanups.
1291  RunCleanupsScope DtorEpilogue(*this);
1292
1293  // If this is the complete variant, just invoke the base variant;
1294  // the epilogue will destruct the virtual bases.  But we can't do
1295  // this optimization if the body is a function-try-block, because
1296  // we'd introduce *two* handler blocks.  In the Microsoft ABI, we
1297  // always delegate because we might not have a definition in this TU.
1298  switch (DtorType) {
1299  case Dtor_Deleting: llvm_unreachable("already handled deleting case");
1300
1301  case Dtor_Complete:
1302    assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
1303           "can't emit a dtor without a body for non-Microsoft ABIs");
1304
1305    // Enter the cleanup scopes for virtual bases.
1306    EnterDtorCleanups(Dtor, Dtor_Complete);
1307
1308    if (!isTryBody) {
1309      EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
1310                            /*Delegating=*/false, LoadCXXThis());
1311      break;
1312    }
1313    // Fallthrough: act like we're in the base variant.
1314
1315  case Dtor_Base:
1316    assert(Body);
1317
1318    // Enter the cleanup scopes for fields and non-virtual bases.
1319    EnterDtorCleanups(Dtor, Dtor_Base);
1320
1321    // Initialize the vtable pointers before entering the body.
1322    if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
1323        InitializeVTablePointers(Dtor->getParent());
1324
1325    if (isTryBody)
1326      EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
1327    else if (Body)
1328      EmitStmt(Body);
1329    else {
1330      assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1331      // nothing to do besides what's in the epilogue
1332    }
1333    // -fapple-kext must inline any call to this dtor into
1334    // the caller's body.
1335    if (getLangOpts().AppleKext)
1336      CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
1337    break;
1338  }
1339
1340  // Jump out through the epilogue cleanups.
1341  DtorEpilogue.ForceCleanup();
1342
1343  // Exit the try if applicable.
1344  if (isTryBody)
1345    ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1346}
1347
1348void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
1349  const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
1350  const Stmt *RootS = AssignOp->getBody();
1351  assert(isa<CompoundStmt>(RootS) &&
1352         "Body of an implicit assignment operator should be compound stmt.");
1353  const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
1354
1355  LexicalScope Scope(*this, RootCS->getSourceRange());
1356
1357  AssignmentMemcpyizer AM(*this, AssignOp, Args);
1358  for (CompoundStmt::const_body_iterator I = RootCS->body_begin(),
1359                                         E = RootCS->body_end();
1360       I != E; ++I) {
1361    AM.emitAssignment(*I);
1362  }
1363  AM.finish();
1364}
1365
1366namespace {
1367  /// Call the operator delete associated with the current destructor.
1368  struct CallDtorDelete : EHScopeStack::Cleanup {
1369    CallDtorDelete() {}
1370
1371    void Emit(CodeGenFunction &CGF, Flags flags) {
1372      const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1373      const CXXRecordDecl *ClassDecl = Dtor->getParent();
1374      CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1375                         CGF.getContext().getTagDeclType(ClassDecl));
1376    }
1377  };
1378
1379  struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
1380    llvm::Value *ShouldDeleteCondition;
1381  public:
1382    CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1383      : ShouldDeleteCondition(ShouldDeleteCondition) {
1384      assert(ShouldDeleteCondition != NULL);
1385    }
1386
1387    void Emit(CodeGenFunction &CGF, Flags flags) {
1388      llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
1389      llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
1390      llvm::Value *ShouldCallDelete
1391        = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
1392      CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
1393
1394      CGF.EmitBlock(callDeleteBB);
1395      const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1396      const CXXRecordDecl *ClassDecl = Dtor->getParent();
1397      CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1398                         CGF.getContext().getTagDeclType(ClassDecl));
1399      CGF.Builder.CreateBr(continueBB);
1400
1401      CGF.EmitBlock(continueBB);
1402    }
1403  };
1404
1405  class DestroyField  : public EHScopeStack::Cleanup {
1406    const FieldDecl *field;
1407    CodeGenFunction::Destroyer *destroyer;
1408    bool useEHCleanupForArray;
1409
1410  public:
1411    DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1412                 bool useEHCleanupForArray)
1413      : field(field), destroyer(destroyer),
1414        useEHCleanupForArray(useEHCleanupForArray) {}
1415
1416    void Emit(CodeGenFunction &CGF, Flags flags) {
1417      // Find the address of the field.
1418      llvm::Value *thisValue = CGF.LoadCXXThis();
1419      QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
1420      LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
1421      LValue LV = CGF.EmitLValueForField(ThisLV, field);
1422      assert(LV.isSimple());
1423
1424      CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
1425                      flags.isForNormalCleanup() && useEHCleanupForArray);
1426    }
1427  };
1428}
1429
1430/// EmitDtorEpilogue - Emit all code that comes at the end of class's
1431/// destructor. This is to call destructors on members and base classes
1432/// in reverse order of their construction.
1433void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
1434                                        CXXDtorType DtorType) {
1435  assert(!DD->isTrivial() &&
1436         "Should not emit dtor epilogue for trivial dtor!");
1437
1438  // The deleting-destructor phase just needs to call the appropriate
1439  // operator delete that Sema picked up.
1440  if (DtorType == Dtor_Deleting) {
1441    assert(DD->getOperatorDelete() &&
1442           "operator delete missing - EmitDtorEpilogue");
1443    if (CXXStructorImplicitParamValue) {
1444      // If there is an implicit param to the deleting dtor, it's a boolean
1445      // telling whether we should call delete at the end of the dtor.
1446      EHStack.pushCleanup<CallDtorDeleteConditional>(
1447          NormalAndEHCleanup, CXXStructorImplicitParamValue);
1448    } else {
1449      EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
1450    }
1451    return;
1452  }
1453
1454  const CXXRecordDecl *ClassDecl = DD->getParent();
1455
1456  // Unions have no bases and do not call field destructors.
1457  if (ClassDecl->isUnion())
1458    return;
1459
1460  // The complete-destructor phase just destructs all the virtual bases.
1461  if (DtorType == Dtor_Complete) {
1462
1463    // We push them in the forward order so that they'll be popped in
1464    // the reverse order.
1465    for (CXXRecordDecl::base_class_const_iterator I =
1466           ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
1467              I != E; ++I) {
1468      const CXXBaseSpecifier &Base = *I;
1469      CXXRecordDecl *BaseClassDecl
1470        = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
1471
1472      // Ignore trivial destructors.
1473      if (BaseClassDecl->hasTrivialDestructor())
1474        continue;
1475
1476      EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1477                                        BaseClassDecl,
1478                                        /*BaseIsVirtual*/ true);
1479    }
1480
1481    return;
1482  }
1483
1484  assert(DtorType == Dtor_Base);
1485
1486  // Destroy non-virtual bases.
1487  for (CXXRecordDecl::base_class_const_iterator I =
1488        ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
1489    const CXXBaseSpecifier &Base = *I;
1490
1491    // Ignore virtual bases.
1492    if (Base.isVirtual())
1493      continue;
1494
1495    CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
1496
1497    // Ignore trivial destructors.
1498    if (BaseClassDecl->hasTrivialDestructor())
1499      continue;
1500
1501    EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1502                                      BaseClassDecl,
1503                                      /*BaseIsVirtual*/ false);
1504  }
1505
1506  // Destroy direct fields.
1507  SmallVector<const FieldDecl *, 16> FieldDecls;
1508  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
1509       E = ClassDecl->field_end(); I != E; ++I) {
1510    const FieldDecl *field = *I;
1511    QualType type = field->getType();
1512    QualType::DestructionKind dtorKind = type.isDestructedType();
1513    if (!dtorKind) continue;
1514
1515    // Anonymous union members do not have their destructors called.
1516    const RecordType *RT = type->getAsUnionType();
1517    if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
1518
1519    CleanupKind cleanupKind = getCleanupKind(dtorKind);
1520    EHStack.pushCleanup<DestroyField>(cleanupKind, field,
1521                                      getDestroyer(dtorKind),
1522                                      cleanupKind & EHCleanup);
1523  }
1524}
1525
1526/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1527/// constructor for each of several members of an array.
1528///
1529/// \param ctor the constructor to call for each element
1530/// \param arrayType the type of the array to initialize
1531/// \param arrayBegin an arrayType*
1532/// \param zeroInitialize true if each element should be
1533///   zero-initialized before it is constructed
1534void
1535CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1536                                            const ConstantArrayType *arrayType,
1537                                            llvm::Value *arrayBegin,
1538                                          CallExpr::const_arg_iterator argBegin,
1539                                            CallExpr::const_arg_iterator argEnd,
1540                                            bool zeroInitialize) {
1541  QualType elementType;
1542  llvm::Value *numElements =
1543    emitArrayLength(arrayType, elementType, arrayBegin);
1544
1545  EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
1546                             argBegin, argEnd, zeroInitialize);
1547}
1548
1549/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1550/// constructor for each of several members of an array.
1551///
1552/// \param ctor the constructor to call for each element
1553/// \param numElements the number of elements in the array;
1554///   may be zero
1555/// \param arrayBegin a T*, where T is the type constructed by ctor
1556/// \param zeroInitialize true if each element should be
1557///   zero-initialized before it is constructed
1558void
1559CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1560                                            llvm::Value *numElements,
1561                                            llvm::Value *arrayBegin,
1562                                         CallExpr::const_arg_iterator argBegin,
1563                                           CallExpr::const_arg_iterator argEnd,
1564                                            bool zeroInitialize) {
1565
1566  // It's legal for numElements to be zero.  This can happen both
1567  // dynamically, because x can be zero in 'new A[x]', and statically,
1568  // because of GCC extensions that permit zero-length arrays.  There
1569  // are probably legitimate places where we could assume that this
1570  // doesn't happen, but it's not clear that it's worth it.
1571  llvm::BranchInst *zeroCheckBranch = 0;
1572
1573  // Optimize for a constant count.
1574  llvm::ConstantInt *constantCount
1575    = dyn_cast<llvm::ConstantInt>(numElements);
1576  if (constantCount) {
1577    // Just skip out if the constant count is zero.
1578    if (constantCount->isZero()) return;
1579
1580  // Otherwise, emit the check.
1581  } else {
1582    llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
1583    llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
1584    zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
1585    EmitBlock(loopBB);
1586  }
1587
1588  // Find the end of the array.
1589  llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
1590                                                    "arrayctor.end");
1591
1592  // Enter the loop, setting up a phi for the current location to initialize.
1593  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1594  llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
1595  EmitBlock(loopBB);
1596  llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
1597                                         "arrayctor.cur");
1598  cur->addIncoming(arrayBegin, entryBB);
1599
1600  // Inside the loop body, emit the constructor call on the array element.
1601
1602  QualType type = getContext().getTypeDeclType(ctor->getParent());
1603
1604  // Zero initialize the storage, if requested.
1605  if (zeroInitialize)
1606    EmitNullInitialization(cur, type);
1607
1608  // C++ [class.temporary]p4:
1609  // There are two contexts in which temporaries are destroyed at a different
1610  // point than the end of the full-expression. The first context is when a
1611  // default constructor is called to initialize an element of an array.
1612  // If the constructor has one or more default arguments, the destruction of
1613  // every temporary created in a default argument expression is sequenced
1614  // before the construction of the next array element, if any.
1615
1616  {
1617    RunCleanupsScope Scope(*this);
1618
1619    // Evaluate the constructor and its arguments in a regular
1620    // partial-destroy cleanup.
1621    if (getLangOpts().Exceptions &&
1622        !ctor->getParent()->hasTrivialDestructor()) {
1623      Destroyer *destroyer = destroyCXXObject;
1624      pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
1625    }
1626
1627    EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
1628                           /*Delegating=*/false, cur, argBegin, argEnd);
1629  }
1630
1631  // Go to the next element.
1632  llvm::Value *next =
1633    Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
1634                              "arrayctor.next");
1635  cur->addIncoming(next, Builder.GetInsertBlock());
1636
1637  // Check whether that's the end of the loop.
1638  llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
1639  llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
1640  Builder.CreateCondBr(done, contBB, loopBB);
1641
1642  // Patch the earlier check to skip over the loop.
1643  if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
1644
1645  EmitBlock(contBB);
1646}
1647
1648void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
1649                                       llvm::Value *addr,
1650                                       QualType type) {
1651  const RecordType *rtype = type->castAs<RecordType>();
1652  const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
1653  const CXXDestructorDecl *dtor = record->getDestructor();
1654  assert(!dtor->isTrivial());
1655  CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
1656                            /*Delegating=*/false, addr);
1657}
1658
1659void
1660CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
1661                                        CXXCtorType Type, bool ForVirtualBase,
1662                                        bool Delegating,
1663                                        llvm::Value *This,
1664                                        CallExpr::const_arg_iterator ArgBeg,
1665                                        CallExpr::const_arg_iterator ArgEnd) {
1666  // If this is a trivial constructor, just emit what's needed.
1667  if (D->isTrivial()) {
1668    if (ArgBeg == ArgEnd) {
1669      // Trivial default constructor, no codegen required.
1670      assert(D->isDefaultConstructor() &&
1671             "trivial 0-arg ctor not a default ctor");
1672      return;
1673    }
1674
1675    assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1676    assert(D->isCopyOrMoveConstructor() &&
1677           "trivial 1-arg ctor not a copy/move ctor");
1678
1679    const Expr *E = (*ArgBeg);
1680    QualType Ty = E->getType();
1681    llvm::Value *Src = EmitLValue(E).getAddress();
1682    EmitAggregateCopy(This, Src, Ty);
1683    return;
1684  }
1685
1686  // Non-trivial constructors are handled in an ABI-specific manner.
1687  CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase,
1688                                      Delegating, This, ArgBeg, ArgEnd);
1689}
1690
1691void
1692CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1693                                        llvm::Value *This, llvm::Value *Src,
1694                                        CallExpr::const_arg_iterator ArgBeg,
1695                                        CallExpr::const_arg_iterator ArgEnd) {
1696  if (D->isTrivial()) {
1697    assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1698    assert(D->isCopyOrMoveConstructor() &&
1699           "trivial 1-arg ctor not a copy/move ctor");
1700    EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
1701    return;
1702  }
1703  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete);
1704  assert(D->isInstance() &&
1705         "Trying to emit a member call expr on a static method!");
1706
1707  const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
1708
1709  CallArgList Args;
1710
1711  // Push the this ptr.
1712  Args.add(RValue::get(This), D->getThisType(getContext()));
1713
1714
1715  // Push the src ptr.
1716  QualType QT = *(FPT->arg_type_begin());
1717  llvm::Type *t = CGM.getTypes().ConvertType(QT);
1718  Src = Builder.CreateBitCast(Src, t);
1719  Args.add(RValue::get(Src), QT);
1720
1721  // Skip over first argument (Src).
1722  ++ArgBeg;
1723  CallExpr::const_arg_iterator Arg = ArgBeg;
1724  for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
1725       E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
1726    assert(Arg != ArgEnd && "Running over edge of argument list!");
1727    EmitCallArg(Args, *Arg, *I);
1728  }
1729  // Either we've emitted all the call args, or we have a call to a
1730  // variadic function.
1731  assert((Arg == ArgEnd || FPT->isVariadic()) &&
1732         "Extra arguments in non-variadic function!");
1733  // If we still have any arguments, emit them using the type of the argument.
1734  for (; Arg != ArgEnd; ++Arg) {
1735    QualType ArgType = Arg->getType();
1736    EmitCallArg(Args, *Arg, ArgType);
1737  }
1738
1739  EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
1740           Callee, ReturnValueSlot(), Args, D);
1741}
1742
1743void
1744CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1745                                                CXXCtorType CtorType,
1746                                                const FunctionArgList &Args,
1747                                                SourceLocation Loc) {
1748  CallArgList DelegateArgs;
1749
1750  FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
1751  assert(I != E && "no parameters to constructor");
1752
1753  // this
1754  DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
1755  ++I;
1756
1757  // vtt
1758  if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
1759                                         /*ForVirtualBase=*/false,
1760                                         /*Delegating=*/true)) {
1761    QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
1762    DelegateArgs.add(RValue::get(VTT), VoidPP);
1763
1764    if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
1765      assert(I != E && "cannot skip vtt parameter, already done with args");
1766      assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
1767      ++I;
1768    }
1769  }
1770
1771  // Explicit arguments.
1772  for (; I != E; ++I) {
1773    const VarDecl *param = *I;
1774    // FIXME: per-argument source location
1775    EmitDelegateCallArg(DelegateArgs, param, Loc);
1776  }
1777
1778  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
1779  EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
1780           Callee, ReturnValueSlot(), DelegateArgs, Ctor);
1781}
1782
1783namespace {
1784  struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
1785    const CXXDestructorDecl *Dtor;
1786    llvm::Value *Addr;
1787    CXXDtorType Type;
1788
1789    CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
1790                           CXXDtorType Type)
1791      : Dtor(D), Addr(Addr), Type(Type) {}
1792
1793    void Emit(CodeGenFunction &CGF, Flags flags) {
1794      CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
1795                                /*Delegating=*/true, Addr);
1796    }
1797  };
1798}
1799
1800void
1801CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1802                                                  const FunctionArgList &Args) {
1803  assert(Ctor->isDelegatingConstructor());
1804
1805  llvm::Value *ThisPtr = LoadCXXThis();
1806
1807  QualType Ty = getContext().getTagDeclType(Ctor->getParent());
1808  CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1809  AggValueSlot AggSlot =
1810    AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
1811                          AggValueSlot::IsDestructed,
1812                          AggValueSlot::DoesNotNeedGCBarriers,
1813                          AggValueSlot::IsNotAliased);
1814
1815  EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
1816
1817  const CXXRecordDecl *ClassDecl = Ctor->getParent();
1818  if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
1819    CXXDtorType Type =
1820      CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
1821
1822    EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
1823                                                ClassDecl->getDestructor(),
1824                                                ThisPtr, Type);
1825  }
1826}
1827
1828void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
1829                                            CXXDtorType Type,
1830                                            bool ForVirtualBase,
1831                                            bool Delegating,
1832                                            llvm::Value *This) {
1833  GlobalDecl GD(DD, Type);
1834  llvm::Value *VTT = GetVTTParameter(GD, ForVirtualBase, Delegating);
1835  llvm::Value *Callee = 0;
1836  if (getLangOpts().AppleKext)
1837    Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
1838                                                 DD->getParent());
1839
1840  if (!Callee)
1841    Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
1842
1843  if (DD->isVirtual())
1844    This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, GD, This);
1845
1846  // FIXME: Provide a source location here.
1847  EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
1848                    VTT, getContext().getPointerType(getContext().VoidPtrTy),
1849                    0, 0);
1850}
1851
1852namespace {
1853  struct CallLocalDtor : EHScopeStack::Cleanup {
1854    const CXXDestructorDecl *Dtor;
1855    llvm::Value *Addr;
1856
1857    CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
1858      : Dtor(D), Addr(Addr) {}
1859
1860    void Emit(CodeGenFunction &CGF, Flags flags) {
1861      CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1862                                /*ForVirtualBase=*/false,
1863                                /*Delegating=*/false, Addr);
1864    }
1865  };
1866}
1867
1868void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
1869                                            llvm::Value *Addr) {
1870  EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
1871}
1872
1873void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
1874  CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
1875  if (!ClassDecl) return;
1876  if (ClassDecl->hasTrivialDestructor()) return;
1877
1878  const CXXDestructorDecl *D = ClassDecl->getDestructor();
1879  assert(D && D->isUsed() && "destructor not marked as used!");
1880  PushDestructorCleanup(D, Addr);
1881}
1882
1883void
1884CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
1885                                         const CXXRecordDecl *NearestVBase,
1886                                         CharUnits OffsetFromNearestVBase,
1887                                         const CXXRecordDecl *VTableClass) {
1888  // Compute the address point.
1889  bool NeedsVirtualOffset;
1890  llvm::Value *VTableAddressPoint =
1891      CGM.getCXXABI().getVTableAddressPointInStructor(
1892          *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset);
1893  if (!VTableAddressPoint)
1894    return;
1895
1896  // Compute where to store the address point.
1897  llvm::Value *VirtualOffset = 0;
1898  CharUnits NonVirtualOffset = CharUnits::Zero();
1899
1900  if (NeedsVirtualOffset) {
1901    // We need to use the virtual base offset offset because the virtual base
1902    // might have a different offset in the most derived class.
1903    VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
1904                                                              LoadCXXThis(),
1905                                                              VTableClass,
1906                                                              NearestVBase);
1907    NonVirtualOffset = OffsetFromNearestVBase;
1908  } else {
1909    // We can just use the base offset in the complete class.
1910    NonVirtualOffset = Base.getBaseOffset();
1911  }
1912
1913  // Apply the offsets.
1914  llvm::Value *VTableField = LoadCXXThis();
1915
1916  if (!NonVirtualOffset.isZero() || VirtualOffset)
1917    VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
1918                                                  NonVirtualOffset,
1919                                                  VirtualOffset);
1920
1921  // Finally, store the address point.
1922  llvm::Type *AddressPointPtrTy =
1923    VTableAddressPoint->getType()->getPointerTo();
1924  VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
1925  llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
1926  CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
1927}
1928
1929void
1930CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
1931                                          const CXXRecordDecl *NearestVBase,
1932                                          CharUnits OffsetFromNearestVBase,
1933                                          bool BaseIsNonVirtualPrimaryBase,
1934                                          const CXXRecordDecl *VTableClass,
1935                                          VisitedVirtualBasesSetTy& VBases) {
1936  // If this base is a non-virtual primary base the address point has already
1937  // been set.
1938  if (!BaseIsNonVirtualPrimaryBase) {
1939    // Initialize the vtable pointer for this base.
1940    InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
1941                            VTableClass);
1942  }
1943
1944  const CXXRecordDecl *RD = Base.getBase();
1945
1946  // Traverse bases.
1947  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1948       E = RD->bases_end(); I != E; ++I) {
1949    CXXRecordDecl *BaseDecl
1950      = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1951
1952    // Ignore classes without a vtable.
1953    if (!BaseDecl->isDynamicClass())
1954      continue;
1955
1956    CharUnits BaseOffset;
1957    CharUnits BaseOffsetFromNearestVBase;
1958    bool BaseDeclIsNonVirtualPrimaryBase;
1959
1960    if (I->isVirtual()) {
1961      // Check if we've visited this virtual base before.
1962      if (!VBases.insert(BaseDecl))
1963        continue;
1964
1965      const ASTRecordLayout &Layout =
1966        getContext().getASTRecordLayout(VTableClass);
1967
1968      BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
1969      BaseOffsetFromNearestVBase = CharUnits::Zero();
1970      BaseDeclIsNonVirtualPrimaryBase = false;
1971    } else {
1972      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1973
1974      BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
1975      BaseOffsetFromNearestVBase =
1976        OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
1977      BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
1978    }
1979
1980    InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
1981                             I->isVirtual() ? BaseDecl : NearestVBase,
1982                             BaseOffsetFromNearestVBase,
1983                             BaseDeclIsNonVirtualPrimaryBase,
1984                             VTableClass, VBases);
1985  }
1986}
1987
1988void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
1989  // Ignore classes without a vtable.
1990  if (!RD->isDynamicClass())
1991    return;
1992
1993  // Initialize the vtable pointers for this class and all of its bases.
1994  VisitedVirtualBasesSetTy VBases;
1995  InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
1996                           /*NearestVBase=*/0,
1997                           /*OffsetFromNearestVBase=*/CharUnits::Zero(),
1998                           /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases);
1999
2000  if (RD->getNumVBases())
2001    CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
2002}
2003
2004llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
2005                                           llvm::Type *Ty) {
2006  llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
2007  llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
2008  CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
2009  return VTable;
2010}
2011
2012
2013// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
2014// quite what we want.
2015static const Expr *skipNoOpCastsAndParens(const Expr *E) {
2016  while (true) {
2017    if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
2018      E = PE->getSubExpr();
2019      continue;
2020    }
2021
2022    if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2023      if (CE->getCastKind() == CK_NoOp) {
2024        E = CE->getSubExpr();
2025        continue;
2026      }
2027    }
2028    if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
2029      if (UO->getOpcode() == UO_Extension) {
2030        E = UO->getSubExpr();
2031        continue;
2032      }
2033    }
2034    return E;
2035  }
2036}
2037
2038bool
2039CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
2040                                                   const CXXMethodDecl *MD) {
2041  // When building with -fapple-kext, all calls must go through the vtable since
2042  // the kernel linker can do runtime patching of vtables.
2043  if (getLangOpts().AppleKext)
2044    return false;
2045
2046  // If the most derived class is marked final, we know that no subclass can
2047  // override this member function and so we can devirtualize it. For example:
2048  //
2049  // struct A { virtual void f(); }
2050  // struct B final : A { };
2051  //
2052  // void f(B *b) {
2053  //   b->f();
2054  // }
2055  //
2056  const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
2057  if (MostDerivedClassDecl->hasAttr<FinalAttr>())
2058    return true;
2059
2060  // If the member function is marked 'final', we know that it can't be
2061  // overridden and can therefore devirtualize it.
2062  if (MD->hasAttr<FinalAttr>())
2063    return true;
2064
2065  // Similarly, if the class itself is marked 'final' it can't be overridden
2066  // and we can therefore devirtualize the member function call.
2067  if (MD->getParent()->hasAttr<FinalAttr>())
2068    return true;
2069
2070  Base = skipNoOpCastsAndParens(Base);
2071  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
2072    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
2073      // This is a record decl. We know the type and can devirtualize it.
2074      return VD->getType()->isRecordType();
2075    }
2076
2077    return false;
2078  }
2079
2080  // We can devirtualize calls on an object accessed by a class member access
2081  // expression, since by C++11 [basic.life]p6 we know that it can't refer to
2082  // a derived class object constructed in the same location.
2083  if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
2084    if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
2085      return VD->getType()->isRecordType();
2086
2087  // We can always devirtualize calls on temporary object expressions.
2088  if (isa<CXXConstructExpr>(Base))
2089    return true;
2090
2091  // And calls on bound temporaries.
2092  if (isa<CXXBindTemporaryExpr>(Base))
2093    return true;
2094
2095  // Check if this is a call expr that returns a record type.
2096  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
2097    return CE->getCallReturnType()->isRecordType();
2098
2099  // We can't devirtualize the call.
2100  return false;
2101}
2102
2103llvm::Value *
2104CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2105                                             const CXXMethodDecl *MD,
2106                                             llvm::Value *This) {
2107  llvm::FunctionType *fnType =
2108    CGM.getTypes().GetFunctionType(
2109                             CGM.getTypes().arrangeCXXMethodDeclaration(MD));
2110
2111  if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD))
2112    return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType);
2113
2114  return CGM.GetAddrOfFunction(MD, fnType);
2115}
2116
2117void CodeGenFunction::EmitForwardingCallToLambda(
2118                                      const CXXMethodDecl *callOperator,
2119                                      CallArgList &callArgs) {
2120  // Get the address of the call operator.
2121  const CGFunctionInfo &calleeFnInfo =
2122    CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
2123  llvm::Value *callee =
2124    CGM.GetAddrOfFunction(GlobalDecl(callOperator),
2125                          CGM.getTypes().GetFunctionType(calleeFnInfo));
2126
2127  // Prepare the return slot.
2128  const FunctionProtoType *FPT =
2129    callOperator->getType()->castAs<FunctionProtoType>();
2130  QualType resultType = FPT->getResultType();
2131  ReturnValueSlot returnSlot;
2132  if (!resultType->isVoidType() &&
2133      calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
2134      !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
2135    returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
2136
2137  // We don't need to separately arrange the call arguments because
2138  // the call can't be variadic anyway --- it's impossible to forward
2139  // variadic arguments.
2140
2141  // Now emit our call.
2142  RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
2143                       callArgs, callOperator);
2144
2145  // If necessary, copy the returned value into the slot.
2146  if (!resultType->isVoidType() && returnSlot.isNull())
2147    EmitReturnOfRValue(RV, resultType);
2148  else
2149    EmitBranchThroughCleanup(ReturnBlock);
2150}
2151
2152void CodeGenFunction::EmitLambdaBlockInvokeBody() {
2153  const BlockDecl *BD = BlockInfo->getBlockDecl();
2154  const VarDecl *variable = BD->capture_begin()->getVariable();
2155  const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
2156
2157  // Start building arguments for forwarding call
2158  CallArgList CallArgs;
2159
2160  QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2161  llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
2162  CallArgs.add(RValue::get(ThisPtr), ThisType);
2163
2164  // Add the rest of the parameters.
2165  for (BlockDecl::param_const_iterator I = BD->param_begin(),
2166       E = BD->param_end(); I != E; ++I) {
2167    ParmVarDecl *param = *I;
2168    EmitDelegateCallArg(CallArgs, param, param->getLocStart());
2169  }
2170  assert(!Lambda->isGenericLambda() &&
2171            "generic lambda interconversion to block not implemented");
2172  EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
2173}
2174
2175void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
2176  if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
2177    // FIXME: Making this work correctly is nasty because it requires either
2178    // cloning the body of the call operator or making the call operator forward.
2179    CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
2180    return;
2181  }
2182
2183  EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody());
2184}
2185
2186void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
2187  const CXXRecordDecl *Lambda = MD->getParent();
2188
2189  // Start building arguments for forwarding call
2190  CallArgList CallArgs;
2191
2192  QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2193  llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
2194  CallArgs.add(RValue::get(ThisPtr), ThisType);
2195
2196  // Add the rest of the parameters.
2197  for (FunctionDecl::param_const_iterator I = MD->param_begin(),
2198       E = MD->param_end(); I != E; ++I) {
2199    ParmVarDecl *param = *I;
2200    EmitDelegateCallArg(CallArgs, param, param->getLocStart());
2201  }
2202  const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
2203  // For a generic lambda, find the corresponding call operator specialization
2204  // to which the call to the static-invoker shall be forwarded.
2205  if (Lambda->isGenericLambda()) {
2206    assert(MD->isFunctionTemplateSpecialization());
2207    const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
2208    FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
2209    void *InsertPos = 0;
2210    FunctionDecl *CorrespondingCallOpSpecialization =
2211        CallOpTemplate->findSpecialization(TAL->data(), TAL->size(), InsertPos);
2212    assert(CorrespondingCallOpSpecialization);
2213    CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
2214  }
2215  EmitForwardingCallToLambda(CallOp, CallArgs);
2216}
2217
2218void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
2219  if (MD->isVariadic()) {
2220    // FIXME: Making this work correctly is nasty because it requires either
2221    // cloning the body of the call operator or making the call operator forward.
2222    CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
2223    return;
2224  }
2225
2226  EmitLambdaDelegatingInvokeBody(MD);
2227}
2228