1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"
14#include "CGCXXABI.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CodeGenFunction.h"
18#include "ConstantEmitter.h"
19#include "TargetInfo.h"
20#include "clang/Basic/CodeGenOptions.h"
21#include "clang/CodeGen/CGFunctionInfo.h"
22#include "llvm/IR/Intrinsics.h"
23
24using namespace clang;
25using namespace CodeGen;
26
27namespace {
28struct MemberCallInfo {
29  RequiredArgs ReqArgs;
30  // Number of prefix arguments for the call. Ignores the `this` pointer.
31  unsigned PrefixSize;
32};
33}
34
35static MemberCallInfo
36commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37                                  llvm::Value *This, llvm::Value *ImplicitParam,
38                                  QualType ImplicitParamTy, const CallExpr *CE,
39                                  CallArgList &Args, CallArgList *RtlArgs) {
40  assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41         isa<CXXOperatorCallExpr>(CE));
42  assert(MD->isInstance() &&
43         "Trying to emit a member or operator call expr on a static method!");
44
45  // Push the this ptr.
46  const CXXRecordDecl *RD =
47      CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
48  Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
49
50  // If there is an implicit parameter (e.g. VTT), emit it.
51  if (ImplicitParam) {
52    Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53  }
54
55  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57  unsigned PrefixSize = Args.size() - 1;
58
59  // And the rest of the call args.
60  if (RtlArgs) {
61    // Special case: if the caller emitted the arguments right-to-left already
62    // (prior to emitting the *this argument), we're done. This happens for
63    // assignment operators.
64    Args.addFrom(*RtlArgs);
65  } else if (CE) {
66    // Special case: skip first argument of CXXOperatorCall (it is "this").
67    unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
68    CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
69                     CE->getDirectCallee());
70  } else {
71    assert(
72        FPT->getNumParams() == 0 &&
73        "No CallExpr specified for function with non-zero number of arguments");
74  }
75  return {required, PrefixSize};
76}
77
78RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
79    const CXXMethodDecl *MD, const CGCallee &Callee,
80    ReturnValueSlot ReturnValue,
81    llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
82    const CallExpr *CE, CallArgList *RtlArgs) {
83  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
84  CallArgList Args;
85  MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
86      *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
87  auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
88      Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
89  return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
90                  CE ? CE->getExprLoc() : SourceLocation());
91}
92
93RValue CodeGenFunction::EmitCXXDestructorCall(
94    GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
95    llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {
96  const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
97
98  assert(!ThisTy.isNull());
99  assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
100         "Pointer/Object mixup");
101
102  LangAS SrcAS = ThisTy.getAddressSpace();
103  LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
104  if (SrcAS != DstAS) {
105    QualType DstTy = DtorDecl->getThisType();
106    llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
107    This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,
108                                                 NewType);
109  }
110
111  CallArgList Args;
112  commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
113                                    ImplicitParamTy, CE, Args, nullptr);
114  return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
115                  ReturnValueSlot(), Args, nullptr,
116                  CE ? CE->getExprLoc() : SourceLocation{});
117}
118
119RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
120                                            const CXXPseudoDestructorExpr *E) {
121  QualType DestroyedType = E->getDestroyedType();
122  if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
123    // Automatic Reference Counting:
124    //   If the pseudo-expression names a retainable object with weak or
125    //   strong lifetime, the object shall be released.
126    Expr *BaseExpr = E->getBase();
127    Address BaseValue = Address::invalid();
128    Qualifiers BaseQuals;
129
130    // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
131    if (E->isArrow()) {
132      BaseValue = EmitPointerWithAlignment(BaseExpr);
133      const auto *PTy = BaseExpr->getType()->castAs<PointerType>();
134      BaseQuals = PTy->getPointeeType().getQualifiers();
135    } else {
136      LValue BaseLV = EmitLValue(BaseExpr);
137      BaseValue = BaseLV.getAddress(*this);
138      QualType BaseTy = BaseExpr->getType();
139      BaseQuals = BaseTy.getQualifiers();
140    }
141
142    switch (DestroyedType.getObjCLifetime()) {
143    case Qualifiers::OCL_None:
144    case Qualifiers::OCL_ExplicitNone:
145    case Qualifiers::OCL_Autoreleasing:
146      break;
147
148    case Qualifiers::OCL_Strong:
149      EmitARCRelease(Builder.CreateLoad(BaseValue,
150                        DestroyedType.isVolatileQualified()),
151                     ARCPreciseLifetime);
152      break;
153
154    case Qualifiers::OCL_Weak:
155      EmitARCDestroyWeak(BaseValue);
156      break;
157    }
158  } else {
159    // C++ [expr.pseudo]p1:
160    //   The result shall only be used as the operand for the function call
161    //   operator (), and the result of such a call has type void. The only
162    //   effect is the evaluation of the postfix-expression before the dot or
163    //   arrow.
164    EmitIgnoredExpr(E->getBase());
165  }
166
167  return RValue::get(nullptr);
168}
169
170static CXXRecordDecl *getCXXRecord(const Expr *E) {
171  QualType T = E->getType();
172  if (const PointerType *PTy = T->getAs<PointerType>())
173    T = PTy->getPointeeType();
174  const RecordType *Ty = T->castAs<RecordType>();
175  return cast<CXXRecordDecl>(Ty->getDecl());
176}
177
178// Note: This function also emit constructor calls to support a MSVC
179// extensions allowing explicit constructor function call.
180RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
181                                              ReturnValueSlot ReturnValue) {
182  const Expr *callee = CE->getCallee()->IgnoreParens();
183
184  if (isa<BinaryOperator>(callee))
185    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
186
187  const MemberExpr *ME = cast<MemberExpr>(callee);
188  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
189
190  if (MD->isStatic()) {
191    // The method is static, emit it as we would a regular call.
192    CGCallee callee =
193        CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
194    return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
195                    ReturnValue);
196  }
197
198  bool HasQualifier = ME->hasQualifier();
199  NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
200  bool IsArrow = ME->isArrow();
201  const Expr *Base = ME->getBase();
202
203  return EmitCXXMemberOrOperatorMemberCallExpr(
204      CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
205}
206
207RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
208    const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
209    bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
210    const Expr *Base) {
211  assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
212
213  // Compute the object pointer.
214  bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
215
216  const CXXMethodDecl *DevirtualizedMethod = nullptr;
217  if (CanUseVirtualCall &&
218      MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
219    const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
220    DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
221    assert(DevirtualizedMethod);
222    const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
223    const Expr *Inner = Base->ignoreParenBaseCasts();
224    if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
225        MD->getReturnType().getCanonicalType())
226      // If the return types are not the same, this might be a case where more
227      // code needs to run to compensate for it. For example, the derived
228      // method might return a type that inherits form from the return
229      // type of MD and has a prefix.
230      // For now we just avoid devirtualizing these covariant cases.
231      DevirtualizedMethod = nullptr;
232    else if (getCXXRecord(Inner) == DevirtualizedClass)
233      // If the class of the Inner expression is where the dynamic method
234      // is defined, build the this pointer from it.
235      Base = Inner;
236    else if (getCXXRecord(Base) != DevirtualizedClass) {
237      // If the method is defined in a class that is not the best dynamic
238      // one or the one of the full expression, we would have to build
239      // a derived-to-base cast to compute the correct this pointer, but
240      // we don't have support for that yet, so do a virtual call.
241      DevirtualizedMethod = nullptr;
242    }
243  }
244
245  bool TrivialForCodegen =
246      MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion());
247  bool TrivialAssignment =
248      TrivialForCodegen &&
249      (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
250      !MD->getParent()->mayInsertExtraPadding();
251
252  // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
253  // operator before the LHS.
254  CallArgList RtlArgStorage;
255  CallArgList *RtlArgs = nullptr;
256  LValue TrivialAssignmentRHS;
257  if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
258    if (OCE->isAssignmentOp()) {
259      if (TrivialAssignment) {
260        TrivialAssignmentRHS = EmitLValue(CE->getArg(1));
261      } else {
262        RtlArgs = &RtlArgStorage;
263        EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
264                     drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
265                     /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
266      }
267    }
268  }
269
270  LValue This;
271  if (IsArrow) {
272    LValueBaseInfo BaseInfo;
273    TBAAAccessInfo TBAAInfo;
274    Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
275    This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
276  } else {
277    This = EmitLValue(Base);
278  }
279
280  if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
281    // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
282    // constructing a new complete object of type Ctor.
283    assert(!RtlArgs);
284    assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
285    CallArgList Args;
286    commonEmitCXXMemberOrOperatorCall(
287        *this, Ctor, This.getPointer(*this), /*ImplicitParam=*/nullptr,
288        /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
289
290    EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
291                           /*Delegating=*/false, This.getAddress(*this), Args,
292                           AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
293                           /*NewPointerIsChecked=*/false);
294    return RValue::get(nullptr);
295  }
296
297  if (TrivialForCodegen) {
298    if (isa<CXXDestructorDecl>(MD))
299      return RValue::get(nullptr);
300
301    if (TrivialAssignment) {
302      // We don't like to generate the trivial copy/move assignment operator
303      // when it isn't necessary; just produce the proper effect here.
304      // It's important that we use the result of EmitLValue here rather than
305      // emitting call arguments, in order to preserve TBAA information from
306      // the RHS.
307      LValue RHS = isa<CXXOperatorCallExpr>(CE)
308                       ? TrivialAssignmentRHS
309                       : EmitLValue(*CE->arg_begin());
310      EmitAggregateAssign(This, RHS, CE->getType());
311      return RValue::get(This.getPointer(*this));
312    }
313
314    assert(MD->getParent()->mayInsertExtraPadding() &&
315           "unknown trivial member function");
316  }
317
318  // Compute the function type we're calling.
319  const CXXMethodDecl *CalleeDecl =
320      DevirtualizedMethod ? DevirtualizedMethod : MD;
321  const CGFunctionInfo *FInfo = nullptr;
322  if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
323    FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
324        GlobalDecl(Dtor, Dtor_Complete));
325  else
326    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
327
328  llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
329
330  // C++11 [class.mfct.non-static]p2:
331  //   If a non-static member function of a class X is called for an object that
332  //   is not of type X, or of a type derived from X, the behavior is undefined.
333  SourceLocation CallLoc;
334  ASTContext &C = getContext();
335  if (CE)
336    CallLoc = CE->getExprLoc();
337
338  SanitizerSet SkippedChecks;
339  if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
340    auto *IOA = CMCE->getImplicitObjectArgument();
341    bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
342    if (IsImplicitObjectCXXThis)
343      SkippedChecks.set(SanitizerKind::Alignment, true);
344    if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
345      SkippedChecks.set(SanitizerKind::Null, true);
346  }
347  EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
348                This.getPointer(*this),
349                C.getRecordType(CalleeDecl->getParent()),
350                /*Alignment=*/CharUnits::Zero(), SkippedChecks);
351
352  // C++ [class.virtual]p12:
353  //   Explicit qualification with the scope operator (5.1) suppresses the
354  //   virtual call mechanism.
355  //
356  // We also don't emit a virtual call if the base expression has a record type
357  // because then we know what the type is.
358  bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
359
360  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
361    assert(CE->arg_begin() == CE->arg_end() &&
362           "Destructor shouldn't have explicit parameters");
363    assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
364    if (UseVirtualCall) {
365      CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
366                                                This.getAddress(*this),
367                                                cast<CXXMemberCallExpr>(CE));
368    } else {
369      GlobalDecl GD(Dtor, Dtor_Complete);
370      CGCallee Callee;
371      if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)
372        Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
373      else if (!DevirtualizedMethod)
374        Callee =
375            CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
376      else {
377        Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
378      }
379
380      QualType ThisTy =
381          IsArrow ? Base->getType()->getPointeeType() : Base->getType();
382      EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy,
383                            /*ImplicitParam=*/nullptr,
384                            /*ImplicitParamTy=*/QualType(), CE);
385    }
386    return RValue::get(nullptr);
387  }
388
389  // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
390  // 'CalleeDecl' instead.
391
392  CGCallee Callee;
393  if (UseVirtualCall) {
394    Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty);
395  } else {
396    if (SanOpts.has(SanitizerKind::CFINVCall) &&
397        MD->getParent()->isDynamicClass()) {
398      llvm::Value *VTable;
399      const CXXRecordDecl *RD;
400      std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
401          *this, This.getAddress(*this), CalleeDecl->getParent());
402      EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
403    }
404
405    if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
406      Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
407    else if (!DevirtualizedMethod)
408      Callee =
409          CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
410    else {
411      Callee =
412          CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
413                              GlobalDecl(DevirtualizedMethod));
414    }
415  }
416
417  if (MD->isVirtual()) {
418    Address NewThisAddr =
419        CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
420            *this, CalleeDecl, This.getAddress(*this), UseVirtualCall);
421    This.setAddress(NewThisAddr);
422  }
423
424  return EmitCXXMemberOrOperatorCall(
425      CalleeDecl, Callee, ReturnValue, This.getPointer(*this),
426      /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
427}
428
429RValue
430CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
431                                              ReturnValueSlot ReturnValue) {
432  const BinaryOperator *BO =
433      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
434  const Expr *BaseExpr = BO->getLHS();
435  const Expr *MemFnExpr = BO->getRHS();
436
437  const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
438  const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
439  const auto *RD =
440      cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
441
442  // Emit the 'this' pointer.
443  Address This = Address::invalid();
444  if (BO->getOpcode() == BO_PtrMemI)
445    This = EmitPointerWithAlignment(BaseExpr);
446  else
447    This = EmitLValue(BaseExpr).getAddress(*this);
448
449  EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
450                QualType(MPT->getClass(), 0));
451
452  // Get the member function pointer.
453  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
454
455  // Ask the ABI to load the callee.  Note that This is modified.
456  llvm::Value *ThisPtrForCall = nullptr;
457  CGCallee Callee =
458    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
459                                             ThisPtrForCall, MemFnPtr, MPT);
460
461  CallArgList Args;
462
463  QualType ThisType =
464    getContext().getPointerType(getContext().getTagDeclType(RD));
465
466  // Push the this ptr.
467  Args.add(RValue::get(ThisPtrForCall), ThisType);
468
469  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
470
471  // And the rest of the call args
472  EmitCallArgs(Args, FPT, E->arguments());
473  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
474                                                      /*PrefixSize=*/0),
475                  Callee, ReturnValue, Args, nullptr, E->getExprLoc());
476}
477
478RValue
479CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
480                                               const CXXMethodDecl *MD,
481                                               ReturnValueSlot ReturnValue) {
482  assert(MD->isInstance() &&
483         "Trying to emit a member call expr on a static method!");
484  return EmitCXXMemberOrOperatorMemberCallExpr(
485      E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
486      /*IsArrow=*/false, E->getArg(0));
487}
488
489RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
490                                               ReturnValueSlot ReturnValue) {
491  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
492}
493
494static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
495                                            Address DestPtr,
496                                            const CXXRecordDecl *Base) {
497  if (Base->isEmpty())
498    return;
499
500  DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
501
502  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
503  CharUnits NVSize = Layout.getNonVirtualSize();
504
505  // We cannot simply zero-initialize the entire base sub-object if vbptrs are
506  // present, they are initialized by the most derived class before calling the
507  // constructor.
508  SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
509  Stores.emplace_back(CharUnits::Zero(), NVSize);
510
511  // Each store is split by the existence of a vbptr.
512  CharUnits VBPtrWidth = CGF.getPointerSize();
513  std::vector<CharUnits> VBPtrOffsets =
514      CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
515  for (CharUnits VBPtrOffset : VBPtrOffsets) {
516    // Stop before we hit any virtual base pointers located in virtual bases.
517    if (VBPtrOffset >= NVSize)
518      break;
519    std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
520    CharUnits LastStoreOffset = LastStore.first;
521    CharUnits LastStoreSize = LastStore.second;
522
523    CharUnits SplitBeforeOffset = LastStoreOffset;
524    CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
525    assert(!SplitBeforeSize.isNegative() && "negative store size!");
526    if (!SplitBeforeSize.isZero())
527      Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
528
529    CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
530    CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
531    assert(!SplitAfterSize.isNegative() && "negative store size!");
532    if (!SplitAfterSize.isZero())
533      Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
534  }
535
536  // If the type contains a pointer to data member we can't memset it to zero.
537  // Instead, create a null constant and copy it to the destination.
538  // TODO: there are other patterns besides zero that we can usefully memset,
539  // like -1, which happens to be the pattern used by member-pointers.
540  // TODO: isZeroInitializable can be over-conservative in the case where a
541  // virtual base contains a member pointer.
542  llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
543  if (!NullConstantForBase->isNullValue()) {
544    llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
545        CGF.CGM.getModule(), NullConstantForBase->getType(),
546        /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
547        NullConstantForBase, Twine());
548
549    CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
550                               DestPtr.getAlignment());
551    NullVariable->setAlignment(Align.getAsAlign());
552
553    Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
554
555    // Get and call the appropriate llvm.memcpy overload.
556    for (std::pair<CharUnits, CharUnits> Store : Stores) {
557      CharUnits StoreOffset = Store.first;
558      CharUnits StoreSize = Store.second;
559      llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
560      CGF.Builder.CreateMemCpy(
561          CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
562          CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
563          StoreSizeVal);
564    }
565
566  // Otherwise, just memset the whole thing to zero.  This is legal
567  // because in LLVM, all default initializers (other than the ones we just
568  // handled above) are guaranteed to have a bit pattern of all zeros.
569  } else {
570    for (std::pair<CharUnits, CharUnits> Store : Stores) {
571      CharUnits StoreOffset = Store.first;
572      CharUnits StoreSize = Store.second;
573      llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
574      CGF.Builder.CreateMemSet(
575          CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
576          CGF.Builder.getInt8(0), StoreSizeVal);
577    }
578  }
579}
580
581void
582CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
583                                      AggValueSlot Dest) {
584  assert(!Dest.isIgnored() && "Must have a destination!");
585  const CXXConstructorDecl *CD = E->getConstructor();
586
587  // If we require zero initialization before (or instead of) calling the
588  // constructor, as can be the case with a non-user-provided default
589  // constructor, emit the zero initialization now, unless destination is
590  // already zeroed.
591  if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
592    switch (E->getConstructionKind()) {
593    case CXXConstructExpr::CK_Delegating:
594    case CXXConstructExpr::CK_Complete:
595      EmitNullInitialization(Dest.getAddress(), E->getType());
596      break;
597    case CXXConstructExpr::CK_VirtualBase:
598    case CXXConstructExpr::CK_NonVirtualBase:
599      EmitNullBaseClassInitialization(*this, Dest.getAddress(),
600                                      CD->getParent());
601      break;
602    }
603  }
604
605  // If this is a call to a trivial default constructor, do nothing.
606  if (CD->isTrivial() && CD->isDefaultConstructor())
607    return;
608
609  // Elide the constructor if we're constructing from a temporary.
610  // The temporary check is required because Sema sets this on NRVO
611  // returns.
612  if (getLangOpts().ElideConstructors && E->isElidable()) {
613    assert(getContext().hasSameUnqualifiedType(E->getType(),
614                                               E->getArg(0)->getType()));
615    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
616      EmitAggExpr(E->getArg(0), Dest);
617      return;
618    }
619  }
620
621  if (const ArrayType *arrayType
622        = getContext().getAsArrayType(E->getType())) {
623    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
624                               Dest.isSanitizerChecked());
625  } else {
626    CXXCtorType Type = Ctor_Complete;
627    bool ForVirtualBase = false;
628    bool Delegating = false;
629
630    switch (E->getConstructionKind()) {
631     case CXXConstructExpr::CK_Delegating:
632      // We should be emitting a constructor; GlobalDecl will assert this
633      Type = CurGD.getCtorType();
634      Delegating = true;
635      break;
636
637     case CXXConstructExpr::CK_Complete:
638      Type = Ctor_Complete;
639      break;
640
641     case CXXConstructExpr::CK_VirtualBase:
642      ForVirtualBase = true;
643      LLVM_FALLTHROUGH;
644
645     case CXXConstructExpr::CK_NonVirtualBase:
646      Type = Ctor_Base;
647     }
648
649     // Call the constructor.
650     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
651  }
652}
653
654void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
655                                                 const Expr *Exp) {
656  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
657    Exp = E->getSubExpr();
658  assert(isa<CXXConstructExpr>(Exp) &&
659         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
660  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
661  const CXXConstructorDecl *CD = E->getConstructor();
662  RunCleanupsScope Scope(*this);
663
664  // If we require zero initialization before (or instead of) calling the
665  // constructor, as can be the case with a non-user-provided default
666  // constructor, emit the zero initialization now.
667  // FIXME. Do I still need this for a copy ctor synthesis?
668  if (E->requiresZeroInitialization())
669    EmitNullInitialization(Dest, E->getType());
670
671  assert(!getContext().getAsConstantArrayType(E->getType())
672         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
673  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
674}
675
676static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
677                                        const CXXNewExpr *E) {
678  if (!E->isArray())
679    return CharUnits::Zero();
680
681  // No cookie is required if the operator new[] being used is the
682  // reserved placement operator new[].
683  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
684    return CharUnits::Zero();
685
686  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
687}
688
689static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
690                                        const CXXNewExpr *e,
691                                        unsigned minElements,
692                                        llvm::Value *&numElements,
693                                        llvm::Value *&sizeWithoutCookie) {
694  QualType type = e->getAllocatedType();
695
696  if (!e->isArray()) {
697    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
698    sizeWithoutCookie
699      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
700    return sizeWithoutCookie;
701  }
702
703  // The width of size_t.
704  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
705
706  // Figure out the cookie size.
707  llvm::APInt cookieSize(sizeWidth,
708                         CalculateCookiePadding(CGF, e).getQuantity());
709
710  // Emit the array size expression.
711  // We multiply the size of all dimensions for NumElements.
712  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
713  numElements =
714    ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());
715  if (!numElements)
716    numElements = CGF.EmitScalarExpr(*e->getArraySize());
717  assert(isa<llvm::IntegerType>(numElements->getType()));
718
719  // The number of elements can be have an arbitrary integer type;
720  // essentially, we need to multiply it by a constant factor, add a
721  // cookie size, and verify that the result is representable as a
722  // size_t.  That's just a gloss, though, and it's wrong in one
723  // important way: if the count is negative, it's an error even if
724  // the cookie size would bring the total size >= 0.
725  bool isSigned
726    = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
727  llvm::IntegerType *numElementsType
728    = cast<llvm::IntegerType>(numElements->getType());
729  unsigned numElementsWidth = numElementsType->getBitWidth();
730
731  // Compute the constant factor.
732  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
733  while (const ConstantArrayType *CAT
734             = CGF.getContext().getAsConstantArrayType(type)) {
735    type = CAT->getElementType();
736    arraySizeMultiplier *= CAT->getSize();
737  }
738
739  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
740  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
741  typeSizeMultiplier *= arraySizeMultiplier;
742
743  // This will be a size_t.
744  llvm::Value *size;
745
746  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
747  // Don't bloat the -O0 code.
748  if (llvm::ConstantInt *numElementsC =
749        dyn_cast<llvm::ConstantInt>(numElements)) {
750    const llvm::APInt &count = numElementsC->getValue();
751
752    bool hasAnyOverflow = false;
753
754    // If 'count' was a negative number, it's an overflow.
755    if (isSigned && count.isNegative())
756      hasAnyOverflow = true;
757
758    // We want to do all this arithmetic in size_t.  If numElements is
759    // wider than that, check whether it's already too big, and if so,
760    // overflow.
761    else if (numElementsWidth > sizeWidth &&
762             numElementsWidth - sizeWidth > count.countLeadingZeros())
763      hasAnyOverflow = true;
764
765    // Okay, compute a count at the right width.
766    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
767
768    // If there is a brace-initializer, we cannot allocate fewer elements than
769    // there are initializers. If we do, that's treated like an overflow.
770    if (adjustedCount.ult(minElements))
771      hasAnyOverflow = true;
772
773    // Scale numElements by that.  This might overflow, but we don't
774    // care because it only overflows if allocationSize does, too, and
775    // if that overflows then we shouldn't use this.
776    numElements = llvm::ConstantInt::get(CGF.SizeTy,
777                                         adjustedCount * arraySizeMultiplier);
778
779    // Compute the size before cookie, and track whether it overflowed.
780    bool overflow;
781    llvm::APInt allocationSize
782      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
783    hasAnyOverflow |= overflow;
784
785    // Add in the cookie, and check whether it's overflowed.
786    if (cookieSize != 0) {
787      // Save the current size without a cookie.  This shouldn't be
788      // used if there was overflow.
789      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
790
791      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
792      hasAnyOverflow |= overflow;
793    }
794
795    // On overflow, produce a -1 so operator new will fail.
796    if (hasAnyOverflow) {
797      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
798    } else {
799      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
800    }
801
802  // Otherwise, we might need to use the overflow intrinsics.
803  } else {
804    // There are up to five conditions we need to test for:
805    // 1) if isSigned, we need to check whether numElements is negative;
806    // 2) if numElementsWidth > sizeWidth, we need to check whether
807    //   numElements is larger than something representable in size_t;
808    // 3) if minElements > 0, we need to check whether numElements is smaller
809    //    than that.
810    // 4) we need to compute
811    //      sizeWithoutCookie := numElements * typeSizeMultiplier
812    //    and check whether it overflows; and
813    // 5) if we need a cookie, we need to compute
814    //      size := sizeWithoutCookie + cookieSize
815    //    and check whether it overflows.
816
817    llvm::Value *hasOverflow = nullptr;
818
819    // If numElementsWidth > sizeWidth, then one way or another, we're
820    // going to have to do a comparison for (2), and this happens to
821    // take care of (1), too.
822    if (numElementsWidth > sizeWidth) {
823      llvm::APInt threshold(numElementsWidth, 1);
824      threshold <<= sizeWidth;
825
826      llvm::Value *thresholdV
827        = llvm::ConstantInt::get(numElementsType, threshold);
828
829      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
830      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
831
832    // Otherwise, if we're signed, we want to sext up to size_t.
833    } else if (isSigned) {
834      if (numElementsWidth < sizeWidth)
835        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
836
837      // If there's a non-1 type size multiplier, then we can do the
838      // signedness check at the same time as we do the multiply
839      // because a negative number times anything will cause an
840      // unsigned overflow.  Otherwise, we have to do it here. But at least
841      // in this case, we can subsume the >= minElements check.
842      if (typeSizeMultiplier == 1)
843        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
844                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
845
846    // Otherwise, zext up to size_t if necessary.
847    } else if (numElementsWidth < sizeWidth) {
848      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
849    }
850
851    assert(numElements->getType() == CGF.SizeTy);
852
853    if (minElements) {
854      // Don't allow allocation of fewer elements than we have initializers.
855      if (!hasOverflow) {
856        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
857                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
858      } else if (numElementsWidth > sizeWidth) {
859        // The other existing overflow subsumes this check.
860        // We do an unsigned comparison, since any signed value < -1 is
861        // taken care of either above or below.
862        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
863                          CGF.Builder.CreateICmpULT(numElements,
864                              llvm::ConstantInt::get(CGF.SizeTy, minElements)));
865      }
866    }
867
868    size = numElements;
869
870    // Multiply by the type size if necessary.  This multiplier
871    // includes all the factors for nested arrays.
872    //
873    // This step also causes numElements to be scaled up by the
874    // nested-array factor if necessary.  Overflow on this computation
875    // can be ignored because the result shouldn't be used if
876    // allocation fails.
877    if (typeSizeMultiplier != 1) {
878      llvm::Function *umul_with_overflow
879        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
880
881      llvm::Value *tsmV =
882        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
883      llvm::Value *result =
884          CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
885
886      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
887      if (hasOverflow)
888        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
889      else
890        hasOverflow = overflowed;
891
892      size = CGF.Builder.CreateExtractValue(result, 0);
893
894      // Also scale up numElements by the array size multiplier.
895      if (arraySizeMultiplier != 1) {
896        // If the base element type size is 1, then we can re-use the
897        // multiply we just did.
898        if (typeSize.isOne()) {
899          assert(arraySizeMultiplier == typeSizeMultiplier);
900          numElements = size;
901
902        // Otherwise we need a separate multiply.
903        } else {
904          llvm::Value *asmV =
905            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
906          numElements = CGF.Builder.CreateMul(numElements, asmV);
907        }
908      }
909    } else {
910      // numElements doesn't need to be scaled.
911      assert(arraySizeMultiplier == 1);
912    }
913
914    // Add in the cookie size if necessary.
915    if (cookieSize != 0) {
916      sizeWithoutCookie = size;
917
918      llvm::Function *uadd_with_overflow
919        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
920
921      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
922      llvm::Value *result =
923          CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
924
925      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
926      if (hasOverflow)
927        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
928      else
929        hasOverflow = overflowed;
930
931      size = CGF.Builder.CreateExtractValue(result, 0);
932    }
933
934    // If we had any possibility of dynamic overflow, make a select to
935    // overwrite 'size' with an all-ones value, which should cause
936    // operator new to throw.
937    if (hasOverflow)
938      size = CGF.Builder.CreateSelect(hasOverflow,
939                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
940                                      size);
941  }
942
943  if (cookieSize == 0)
944    sizeWithoutCookie = size;
945  else
946    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
947
948  return size;
949}
950
951static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
952                                    QualType AllocType, Address NewPtr,
953                                    AggValueSlot::Overlap_t MayOverlap) {
954  // FIXME: Refactor with EmitExprAsInit.
955  switch (CGF.getEvaluationKind(AllocType)) {
956  case TEK_Scalar:
957    CGF.EmitScalarInit(Init, nullptr,
958                       CGF.MakeAddrLValue(NewPtr, AllocType), false);
959    return;
960  case TEK_Complex:
961    CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
962                                  /*isInit*/ true);
963    return;
964  case TEK_Aggregate: {
965    AggValueSlot Slot
966      = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
967                              AggValueSlot::IsDestructed,
968                              AggValueSlot::DoesNotNeedGCBarriers,
969                              AggValueSlot::IsNotAliased,
970                              MayOverlap, AggValueSlot::IsNotZeroed,
971                              AggValueSlot::IsSanitizerChecked);
972    CGF.EmitAggExpr(Init, Slot);
973    return;
974  }
975  }
976  llvm_unreachable("bad evaluation kind");
977}
978
979void CodeGenFunction::EmitNewArrayInitializer(
980    const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
981    Address BeginPtr, llvm::Value *NumElements,
982    llvm::Value *AllocSizeWithoutCookie) {
983  // If we have a type with trivial initialization and no initializer,
984  // there's nothing to do.
985  if (!E->hasInitializer())
986    return;
987
988  Address CurPtr = BeginPtr;
989
990  unsigned InitListElements = 0;
991
992  const Expr *Init = E->getInitializer();
993  Address EndOfInit = Address::invalid();
994  QualType::DestructionKind DtorKind = ElementType.isDestructedType();
995  EHScopeStack::stable_iterator Cleanup;
996  llvm::Instruction *CleanupDominator = nullptr;
997
998  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
999  CharUnits ElementAlign =
1000    BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
1001
1002  // Attempt to perform zero-initialization using memset.
1003  auto TryMemsetInitialization = [&]() -> bool {
1004    // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1005    // we can initialize with a memset to -1.
1006    if (!CGM.getTypes().isZeroInitializable(ElementType))
1007      return false;
1008
1009    // Optimization: since zero initialization will just set the memory
1010    // to all zeroes, generate a single memset to do it in one shot.
1011
1012    // Subtract out the size of any elements we've already initialized.
1013    auto *RemainingSize = AllocSizeWithoutCookie;
1014    if (InitListElements) {
1015      // We know this can't overflow; we check this when doing the allocation.
1016      auto *InitializedSize = llvm::ConstantInt::get(
1017          RemainingSize->getType(),
1018          getContext().getTypeSizeInChars(ElementType).getQuantity() *
1019              InitListElements);
1020      RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
1021    }
1022
1023    // Create the memset.
1024    Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
1025    return true;
1026  };
1027
1028  // If the initializer is an initializer list, first do the explicit elements.
1029  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
1030    // Initializing from a (braced) string literal is a special case; the init
1031    // list element does not initialize a (single) array element.
1032    if (ILE->isStringLiteralInit()) {
1033      // Initialize the initial portion of length equal to that of the string
1034      // literal. The allocation must be for at least this much; we emitted a
1035      // check for that earlier.
1036      AggValueSlot Slot =
1037          AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1038                                AggValueSlot::IsDestructed,
1039                                AggValueSlot::DoesNotNeedGCBarriers,
1040                                AggValueSlot::IsNotAliased,
1041                                AggValueSlot::DoesNotOverlap,
1042                                AggValueSlot::IsNotZeroed,
1043                                AggValueSlot::IsSanitizerChecked);
1044      EmitAggExpr(ILE->getInit(0), Slot);
1045
1046      // Move past these elements.
1047      InitListElements =
1048          cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1049              ->getSize().getZExtValue();
1050      CurPtr =
1051          Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1052                                            Builder.getSize(InitListElements),
1053                                            "string.init.end"),
1054                  CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1055                                                          ElementSize));
1056
1057      // Zero out the rest, if any remain.
1058      llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1059      if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
1060        bool OK = TryMemsetInitialization();
1061        (void)OK;
1062        assert(OK && "couldn't memset character type?");
1063      }
1064      return;
1065    }
1066
1067    InitListElements = ILE->getNumInits();
1068
1069    // If this is a multi-dimensional array new, we will initialize multiple
1070    // elements with each init list element.
1071    QualType AllocType = E->getAllocatedType();
1072    if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1073            AllocType->getAsArrayTypeUnsafe())) {
1074      ElementTy = ConvertTypeForMem(AllocType);
1075      CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1076      InitListElements *= getContext().getConstantArrayElementCount(CAT);
1077    }
1078
1079    // Enter a partial-destruction Cleanup if necessary.
1080    if (needsEHCleanup(DtorKind)) {
1081      // In principle we could tell the Cleanup where we are more
1082      // directly, but the control flow can get so varied here that it
1083      // would actually be quite complex.  Therefore we go through an
1084      // alloca.
1085      EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1086                                   "array.init.end");
1087      CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1088      pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1089                                       ElementType, ElementAlign,
1090                                       getDestroyer(DtorKind));
1091      Cleanup = EHStack.stable_begin();
1092    }
1093
1094    CharUnits StartAlign = CurPtr.getAlignment();
1095    for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
1096      // Tell the cleanup that it needs to destroy up to this
1097      // element.  TODO: some of these stores can be trivially
1098      // observed to be unnecessary.
1099      if (EndOfInit.isValid()) {
1100        auto FinishedPtr =
1101          Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1102        Builder.CreateStore(FinishedPtr, EndOfInit);
1103      }
1104      // FIXME: If the last initializer is an incomplete initializer list for
1105      // an array, and we have an array filler, we can fold together the two
1106      // initialization loops.
1107      StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1108                              ILE->getInit(i)->getType(), CurPtr,
1109                              AggValueSlot::DoesNotOverlap);
1110      CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1111                                                 Builder.getSize(1),
1112                                                 "array.exp.next"),
1113                       StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1114    }
1115
1116    // The remaining elements are filled with the array filler expression.
1117    Init = ILE->getArrayFiller();
1118
1119    // Extract the initializer for the individual array elements by pulling
1120    // out the array filler from all the nested initializer lists. This avoids
1121    // generating a nested loop for the initialization.
1122    while (Init && Init->getType()->isConstantArrayType()) {
1123      auto *SubILE = dyn_cast<InitListExpr>(Init);
1124      if (!SubILE)
1125        break;
1126      assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1127      Init = SubILE->getArrayFiller();
1128    }
1129
1130    // Switch back to initializing one base element at a time.
1131    CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1132  }
1133
1134  // If all elements have already been initialized, skip any further
1135  // initialization.
1136  llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1137  if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
1138    // If there was a Cleanup, deactivate it.
1139    if (CleanupDominator)
1140      DeactivateCleanupBlock(Cleanup, CleanupDominator);
1141    return;
1142  }
1143
1144  assert(Init && "have trailing elements to initialize but no initializer");
1145
1146  // If this is a constructor call, try to optimize it out, and failing that
1147  // emit a single loop to initialize all remaining elements.
1148  if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1149    CXXConstructorDecl *Ctor = CCE->getConstructor();
1150    if (Ctor->isTrivial()) {
1151      // If new expression did not specify value-initialization, then there
1152      // is no initialization.
1153      if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
1154        return;
1155
1156      if (TryMemsetInitialization())
1157        return;
1158    }
1159
1160    // Store the new Cleanup position for irregular Cleanups.
1161    //
1162    // FIXME: Share this cleanup with the constructor call emission rather than
1163    // having it create a cleanup of its own.
1164    if (EndOfInit.isValid())
1165      Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1166
1167    // Emit a constructor call loop to initialize the remaining elements.
1168    if (InitListElements)
1169      NumElements = Builder.CreateSub(
1170          NumElements,
1171          llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1172    EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1173                               /*NewPointerIsChecked*/true,
1174                               CCE->requiresZeroInitialization());
1175    return;
1176  }
1177
1178  // If this is value-initialization, we can usually use memset.
1179  ImplicitValueInitExpr IVIE(ElementType);
1180  if (isa<ImplicitValueInitExpr>(Init)) {
1181    if (TryMemsetInitialization())
1182      return;
1183
1184    // Switch to an ImplicitValueInitExpr for the element type. This handles
1185    // only one case: multidimensional array new of pointers to members. In
1186    // all other cases, we already have an initializer for the array element.
1187    Init = &IVIE;
1188  }
1189
1190  // At this point we should have found an initializer for the individual
1191  // elements of the array.
1192  assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1193         "got wrong type of element to initialize");
1194
1195  // If we have an empty initializer list, we can usually use memset.
1196  if (auto *ILE = dyn_cast<InitListExpr>(Init))
1197    if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1198      return;
1199
1200  // If we have a struct whose every field is value-initialized, we can
1201  // usually use memset.
1202  if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1203    if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1204      if (RType->getDecl()->isStruct()) {
1205        unsigned NumElements = 0;
1206        if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1207          NumElements = CXXRD->getNumBases();
1208        for (auto *Field : RType->getDecl()->fields())
1209          if (!Field->isUnnamedBitfield())
1210            ++NumElements;
1211        // FIXME: Recurse into nested InitListExprs.
1212        if (ILE->getNumInits() == NumElements)
1213          for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1214            if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1215              --NumElements;
1216        if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1217          return;
1218      }
1219    }
1220  }
1221
1222  // Create the loop blocks.
1223  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1224  llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1225  llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1226
1227  // Find the end of the array, hoisted out of the loop.
1228  llvm::Value *EndPtr =
1229    Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1230
1231  // If the number of elements isn't constant, we have to now check if there is
1232  // anything left to initialize.
1233  if (!ConstNum) {
1234    llvm::Value *IsEmpty =
1235      Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1236    Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1237  }
1238
1239  // Enter the loop.
1240  EmitBlock(LoopBB);
1241
1242  // Set up the current-element phi.
1243  llvm::PHINode *CurPtrPhi =
1244    Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1245  CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1246
1247  CurPtr = Address(CurPtrPhi, ElementAlign);
1248
1249  // Store the new Cleanup position for irregular Cleanups.
1250  if (EndOfInit.isValid())
1251    Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1252
1253  // Enter a partial-destruction Cleanup if necessary.
1254  if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1255    pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1256                                   ElementType, ElementAlign,
1257                                   getDestroyer(DtorKind));
1258    Cleanup = EHStack.stable_begin();
1259    CleanupDominator = Builder.CreateUnreachable();
1260  }
1261
1262  // Emit the initializer into this element.
1263  StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
1264                          AggValueSlot::DoesNotOverlap);
1265
1266  // Leave the Cleanup if we entered one.
1267  if (CleanupDominator) {
1268    DeactivateCleanupBlock(Cleanup, CleanupDominator);
1269    CleanupDominator->eraseFromParent();
1270  }
1271
1272  // Advance to the next element by adjusting the pointer type as necessary.
1273  llvm::Value *NextPtr =
1274    Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1275                                       "array.next");
1276
1277  // Check whether we've gotten to the end of the array and, if so,
1278  // exit the loop.
1279  llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1280  Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1281  CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1282
1283  EmitBlock(ContBB);
1284}
1285
1286static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1287                               QualType ElementType, llvm::Type *ElementTy,
1288                               Address NewPtr, llvm::Value *NumElements,
1289                               llvm::Value *AllocSizeWithoutCookie) {
1290  ApplyDebugLocation DL(CGF, E);
1291  if (E->isArray())
1292    CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1293                                AllocSizeWithoutCookie);
1294  else if (const Expr *Init = E->getInitializer())
1295    StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
1296                            AggValueSlot::DoesNotOverlap);
1297}
1298
1299/// Emit a call to an operator new or operator delete function, as implicitly
1300/// created by new-expressions and delete-expressions.
1301static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1302                                const FunctionDecl *CalleeDecl,
1303                                const FunctionProtoType *CalleeType,
1304                                const CallArgList &Args) {
1305  llvm::CallBase *CallOrInvoke;
1306  llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1307  CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
1308  RValue RV =
1309      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1310                       Args, CalleeType, /*ChainCall=*/false),
1311                   Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1312
1313  /// C++1y [expr.new]p10:
1314  ///   [In a new-expression,] an implementation is allowed to omit a call
1315  ///   to a replaceable global allocation function.
1316  ///
1317  /// We model such elidable calls with the 'builtin' attribute.
1318  llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1319  if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1320      Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1321    CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
1322                               llvm::Attribute::Builtin);
1323  }
1324
1325  return RV;
1326}
1327
1328RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1329                                                 const CallExpr *TheCall,
1330                                                 bool IsDelete) {
1331  CallArgList Args;
1332  EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments());
1333  // Find the allocation or deallocation function that we're calling.
1334  ASTContext &Ctx = getContext();
1335  DeclarationName Name = Ctx.DeclarationNames
1336      .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1337
1338  for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1339    if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1340      if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1341        return EmitNewDeleteCall(*this, FD, Type, Args);
1342  llvm_unreachable("predeclared global operator new/delete is missing");
1343}
1344
1345namespace {
1346/// The parameters to pass to a usual operator delete.
1347struct UsualDeleteParams {
1348  bool DestroyingDelete = false;
1349  bool Size = false;
1350  bool Alignment = false;
1351};
1352}
1353
1354static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1355  UsualDeleteParams Params;
1356
1357  const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1358  auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1359
1360  // The first argument is always a void*.
1361  ++AI;
1362
1363  // The next parameter may be a std::destroying_delete_t.
1364  if (FD->isDestroyingOperatorDelete()) {
1365    Params.DestroyingDelete = true;
1366    assert(AI != AE);
1367    ++AI;
1368  }
1369
1370  // Figure out what other parameters we should be implicitly passing.
1371  if (AI != AE && (*AI)->isIntegerType()) {
1372    Params.Size = true;
1373    ++AI;
1374  }
1375
1376  if (AI != AE && (*AI)->isAlignValT()) {
1377    Params.Alignment = true;
1378    ++AI;
1379  }
1380
1381  assert(AI == AE && "unexpected usual deallocation function parameter");
1382  return Params;
1383}
1384
1385namespace {
1386  /// A cleanup to call the given 'operator delete' function upon abnormal
1387  /// exit from a new expression. Templated on a traits type that deals with
1388  /// ensuring that the arguments dominate the cleanup if necessary.
1389  template<typename Traits>
1390  class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1391    /// Type used to hold llvm::Value*s.
1392    typedef typename Traits::ValueTy ValueTy;
1393    /// Type used to hold RValues.
1394    typedef typename Traits::RValueTy RValueTy;
1395    struct PlacementArg {
1396      RValueTy ArgValue;
1397      QualType ArgType;
1398    };
1399
1400    unsigned NumPlacementArgs : 31;
1401    unsigned PassAlignmentToPlacementDelete : 1;
1402    const FunctionDecl *OperatorDelete;
1403    ValueTy Ptr;
1404    ValueTy AllocSize;
1405    CharUnits AllocAlign;
1406
1407    PlacementArg *getPlacementArgs() {
1408      return reinterpret_cast<PlacementArg *>(this + 1);
1409    }
1410
1411  public:
1412    static size_t getExtraSize(size_t NumPlacementArgs) {
1413      return NumPlacementArgs * sizeof(PlacementArg);
1414    }
1415
1416    CallDeleteDuringNew(size_t NumPlacementArgs,
1417                        const FunctionDecl *OperatorDelete, ValueTy Ptr,
1418                        ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1419                        CharUnits AllocAlign)
1420      : NumPlacementArgs(NumPlacementArgs),
1421        PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1422        OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1423        AllocAlign(AllocAlign) {}
1424
1425    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1426      assert(I < NumPlacementArgs && "index out of range");
1427      getPlacementArgs()[I] = {Arg, Type};
1428    }
1429
1430    void Emit(CodeGenFunction &CGF, Flags flags) override {
1431      const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1432      CallArgList DeleteArgs;
1433
1434      // The first argument is always a void* (or C* for a destroying operator
1435      // delete for class type C).
1436      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1437
1438      // Figure out what other parameters we should be implicitly passing.
1439      UsualDeleteParams Params;
1440      if (NumPlacementArgs) {
1441        // A placement deallocation function is implicitly passed an alignment
1442        // if the placement allocation function was, but is never passed a size.
1443        Params.Alignment = PassAlignmentToPlacementDelete;
1444      } else {
1445        // For a non-placement new-expression, 'operator delete' can take a
1446        // size and/or an alignment if it has the right parameters.
1447        Params = getUsualDeleteParams(OperatorDelete);
1448      }
1449
1450      assert(!Params.DestroyingDelete &&
1451             "should not call destroying delete in a new-expression");
1452
1453      // The second argument can be a std::size_t (for non-placement delete).
1454      if (Params.Size)
1455        DeleteArgs.add(Traits::get(CGF, AllocSize),
1456                       CGF.getContext().getSizeType());
1457
1458      // The next (second or third) argument can be a std::align_val_t, which
1459      // is an enum whose underlying type is std::size_t.
1460      // FIXME: Use the right type as the parameter type. Note that in a call
1461      // to operator delete(size_t, ...), we may not have it available.
1462      if (Params.Alignment)
1463        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1464                           CGF.SizeTy, AllocAlign.getQuantity())),
1465                       CGF.getContext().getSizeType());
1466
1467      // Pass the rest of the arguments, which must match exactly.
1468      for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1469        auto Arg = getPlacementArgs()[I];
1470        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1471      }
1472
1473      // Call 'operator delete'.
1474      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1475    }
1476  };
1477}
1478
1479/// Enter a cleanup to call 'operator delete' if the initializer in a
1480/// new-expression throws.
1481static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1482                                  const CXXNewExpr *E,
1483                                  Address NewPtr,
1484                                  llvm::Value *AllocSize,
1485                                  CharUnits AllocAlign,
1486                                  const CallArgList &NewArgs) {
1487  unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
1488
1489  // If we're not inside a conditional branch, then the cleanup will
1490  // dominate and we can do the easier (and more efficient) thing.
1491  if (!CGF.isInConditionalBranch()) {
1492    struct DirectCleanupTraits {
1493      typedef llvm::Value *ValueTy;
1494      typedef RValue RValueTy;
1495      static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1496      static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1497    };
1498
1499    typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1500
1501    DirectCleanup *Cleanup = CGF.EHStack
1502      .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1503                                           E->getNumPlacementArgs(),
1504                                           E->getOperatorDelete(),
1505                                           NewPtr.getPointer(),
1506                                           AllocSize,
1507                                           E->passAlignment(),
1508                                           AllocAlign);
1509    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1510      auto &Arg = NewArgs[I + NumNonPlacementArgs];
1511      Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
1512    }
1513
1514    return;
1515  }
1516
1517  // Otherwise, we need to save all this stuff.
1518  DominatingValue<RValue>::saved_type SavedNewPtr =
1519    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1520  DominatingValue<RValue>::saved_type SavedAllocSize =
1521    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1522
1523  struct ConditionalCleanupTraits {
1524    typedef DominatingValue<RValue>::saved_type ValueTy;
1525    typedef DominatingValue<RValue>::saved_type RValueTy;
1526    static RValue get(CodeGenFunction &CGF, ValueTy V) {
1527      return V.restore(CGF);
1528    }
1529  };
1530  typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1531
1532  ConditionalCleanup *Cleanup = CGF.EHStack
1533    .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1534                                              E->getNumPlacementArgs(),
1535                                              E->getOperatorDelete(),
1536                                              SavedNewPtr,
1537                                              SavedAllocSize,
1538                                              E->passAlignment(),
1539                                              AllocAlign);
1540  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
1541    auto &Arg = NewArgs[I + NumNonPlacementArgs];
1542    Cleanup->setPlacementArg(
1543        I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
1544  }
1545
1546  CGF.initFullExprCleanup();
1547}
1548
1549llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1550  // The element type being allocated.
1551  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1552
1553  // 1. Build a call to the allocation function.
1554  FunctionDecl *allocator = E->getOperatorNew();
1555
1556  // If there is a brace-initializer, cannot allocate fewer elements than inits.
1557  unsigned minElements = 0;
1558  if (E->isArray() && E->hasInitializer()) {
1559    const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1560    if (ILE && ILE->isStringLiteralInit())
1561      minElements =
1562          cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1563              ->getSize().getZExtValue();
1564    else if (ILE)
1565      minElements = ILE->getNumInits();
1566  }
1567
1568  llvm::Value *numElements = nullptr;
1569  llvm::Value *allocSizeWithoutCookie = nullptr;
1570  llvm::Value *allocSize =
1571    EmitCXXNewAllocSize(*this, E, minElements, numElements,
1572                        allocSizeWithoutCookie);
1573  CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1574
1575  // Emit the allocation call.  If the allocator is a global placement
1576  // operator, just "inline" it directly.
1577  Address allocation = Address::invalid();
1578  CallArgList allocatorArgs;
1579  if (allocator->isReservedGlobalPlacementOperator()) {
1580    assert(E->getNumPlacementArgs() == 1);
1581    const Expr *arg = *E->placement_arguments().begin();
1582
1583    LValueBaseInfo BaseInfo;
1584    allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1585
1586    // The pointer expression will, in many cases, be an opaque void*.
1587    // In these cases, discard the computed alignment and use the
1588    // formal alignment of the allocated type.
1589    if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1590      allocation = Address(allocation.getPointer(), allocAlign);
1591
1592    // Set up allocatorArgs for the call to operator delete if it's not
1593    // the reserved global operator.
1594    if (E->getOperatorDelete() &&
1595        !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1596      allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1597      allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1598    }
1599
1600  } else {
1601    const FunctionProtoType *allocatorType =
1602      allocator->getType()->castAs<FunctionProtoType>();
1603    unsigned ParamsToSkip = 0;
1604
1605    // The allocation size is the first argument.
1606    QualType sizeType = getContext().getSizeType();
1607    allocatorArgs.add(RValue::get(allocSize), sizeType);
1608    ++ParamsToSkip;
1609
1610    if (allocSize != allocSizeWithoutCookie) {
1611      CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1612      allocAlign = std::max(allocAlign, cookieAlign);
1613    }
1614
1615    // The allocation alignment may be passed as the second argument.
1616    if (E->passAlignment()) {
1617      QualType AlignValT = sizeType;
1618      if (allocatorType->getNumParams() > 1) {
1619        AlignValT = allocatorType->getParamType(1);
1620        assert(getContext().hasSameUnqualifiedType(
1621                   AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1622                   sizeType) &&
1623               "wrong type for alignment parameter");
1624        ++ParamsToSkip;
1625      } else {
1626        // Corner case, passing alignment to 'operator new(size_t, ...)'.
1627        assert(allocator->isVariadic() && "can't pass alignment to allocator");
1628      }
1629      allocatorArgs.add(
1630          RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1631          AlignValT);
1632    }
1633
1634    // FIXME: Why do we not pass a CalleeDecl here?
1635    EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1636                 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1637
1638    RValue RV =
1639      EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1640
1641    // Set !heapallocsite metadata on the call to operator new.
1642    if (getDebugInfo())
1643      if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))
1644        getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,
1645                                                 E->getExprLoc());
1646
1647    // If this was a call to a global replaceable allocation function that does
1648    // not take an alignment argument, the allocator is known to produce
1649    // storage that's suitably aligned for any object that fits, up to a known
1650    // threshold. Otherwise assume it's suitably aligned for the allocated type.
1651    CharUnits allocationAlign = allocAlign;
1652    if (!E->passAlignment() &&
1653        allocator->isReplaceableGlobalAllocationFunction()) {
1654      unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1655          Target.getNewAlign(), getContext().getTypeSize(allocType)));
1656      allocationAlign = std::max(
1657          allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1658    }
1659
1660    allocation = Address(RV.getScalarVal(), allocationAlign);
1661  }
1662
1663  // Emit a null check on the allocation result if the allocation
1664  // function is allowed to return null (because it has a non-throwing
1665  // exception spec or is the reserved placement new) and we have an
1666  // interesting initializer will be running sanitizers on the initialization.
1667  bool nullCheck = E->shouldNullCheckAllocation() &&
1668                   (!allocType.isPODType(getContext()) || E->hasInitializer() ||
1669                    sanitizePerformTypeCheck());
1670
1671  llvm::BasicBlock *nullCheckBB = nullptr;
1672  llvm::BasicBlock *contBB = nullptr;
1673
1674  // The null-check means that the initializer is conditionally
1675  // evaluated.
1676  ConditionalEvaluation conditional(*this);
1677
1678  if (nullCheck) {
1679    conditional.begin(*this);
1680
1681    nullCheckBB = Builder.GetInsertBlock();
1682    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1683    contBB = createBasicBlock("new.cont");
1684
1685    llvm::Value *isNull =
1686      Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1687    Builder.CreateCondBr(isNull, contBB, notNullBB);
1688    EmitBlock(notNullBB);
1689  }
1690
1691  // If there's an operator delete, enter a cleanup to call it if an
1692  // exception is thrown.
1693  EHScopeStack::stable_iterator operatorDeleteCleanup;
1694  llvm::Instruction *cleanupDominator = nullptr;
1695  if (E->getOperatorDelete() &&
1696      !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1697    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1698                          allocatorArgs);
1699    operatorDeleteCleanup = EHStack.stable_begin();
1700    cleanupDominator = Builder.CreateUnreachable();
1701  }
1702
1703  assert((allocSize == allocSizeWithoutCookie) ==
1704         CalculateCookiePadding(*this, E).isZero());
1705  if (allocSize != allocSizeWithoutCookie) {
1706    assert(E->isArray());
1707    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1708                                                       numElements,
1709                                                       E, allocType);
1710  }
1711
1712  llvm::Type *elementTy = ConvertTypeForMem(allocType);
1713  Address result = Builder.CreateElementBitCast(allocation, elementTy);
1714
1715  // Passing pointer through launder.invariant.group to avoid propagation of
1716  // vptrs information which may be included in previous type.
1717  // To not break LTO with different optimizations levels, we do it regardless
1718  // of optimization level.
1719  if (CGM.getCodeGenOpts().StrictVTablePointers &&
1720      allocator->isReservedGlobalPlacementOperator())
1721    result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
1722                     result.getAlignment());
1723
1724  // Emit sanitizer checks for pointer value now, so that in the case of an
1725  // array it was checked only once and not at each constructor call. We may
1726  // have already checked that the pointer is non-null.
1727  // FIXME: If we have an array cookie and a potentially-throwing allocator,
1728  // we'll null check the wrong pointer here.
1729  SanitizerSet SkippedChecks;
1730  SkippedChecks.set(SanitizerKind::Null, nullCheck);
1731  EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
1732                E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1733                result.getPointer(), allocType, result.getAlignment(),
1734                SkippedChecks, numElements);
1735
1736  EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1737                     allocSizeWithoutCookie);
1738  if (E->isArray()) {
1739    // NewPtr is a pointer to the base element type.  If we're
1740    // allocating an array of arrays, we'll need to cast back to the
1741    // array pointer type.
1742    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1743    if (result.getType() != resultType)
1744      result = Builder.CreateBitCast(result, resultType);
1745  }
1746
1747  // Deactivate the 'operator delete' cleanup if we finished
1748  // initialization.
1749  if (operatorDeleteCleanup.isValid()) {
1750    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1751    cleanupDominator->eraseFromParent();
1752  }
1753
1754  llvm::Value *resultPtr = result.getPointer();
1755  if (nullCheck) {
1756    conditional.end(*this);
1757
1758    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1759    EmitBlock(contBB);
1760
1761    llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1762    PHI->addIncoming(resultPtr, notNullBB);
1763    PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1764                     nullCheckBB);
1765
1766    resultPtr = PHI;
1767  }
1768
1769  return resultPtr;
1770}
1771
1772void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1773                                     llvm::Value *Ptr, QualType DeleteTy,
1774                                     llvm::Value *NumElements,
1775                                     CharUnits CookieSize) {
1776  assert((!NumElements && CookieSize.isZero()) ||
1777         DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1778
1779  const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
1780  CallArgList DeleteArgs;
1781
1782  auto Params = getUsualDeleteParams(DeleteFD);
1783  auto ParamTypeIt = DeleteFTy->param_type_begin();
1784
1785  // Pass the pointer itself.
1786  QualType ArgTy = *ParamTypeIt++;
1787  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1788  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1789
1790  // Pass the std::destroying_delete tag if present.
1791  if (Params.DestroyingDelete) {
1792    QualType DDTag = *ParamTypeIt++;
1793    // Just pass an 'undef'. We expect the tag type to be an empty struct.
1794    auto *V = llvm::UndefValue::get(getTypes().ConvertType(DDTag));
1795    DeleteArgs.add(RValue::get(V), DDTag);
1796  }
1797
1798  // Pass the size if the delete function has a size_t parameter.
1799  if (Params.Size) {
1800    QualType SizeType = *ParamTypeIt++;
1801    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1802    llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1803                                               DeleteTypeSize.getQuantity());
1804
1805    // For array new, multiply by the number of elements.
1806    if (NumElements)
1807      Size = Builder.CreateMul(Size, NumElements);
1808
1809    // If there is a cookie, add the cookie size.
1810    if (!CookieSize.isZero())
1811      Size = Builder.CreateAdd(
1812          Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1813
1814    DeleteArgs.add(RValue::get(Size), SizeType);
1815  }
1816
1817  // Pass the alignment if the delete function has an align_val_t parameter.
1818  if (Params.Alignment) {
1819    QualType AlignValType = *ParamTypeIt++;
1820    CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
1821        getContext().getTypeAlignIfKnown(DeleteTy));
1822    llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1823                                                DeleteTypeAlign.getQuantity());
1824    DeleteArgs.add(RValue::get(Align), AlignValType);
1825  }
1826
1827  assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1828         "unknown parameter to usual delete function");
1829
1830  // Emit the call to delete.
1831  EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1832}
1833
1834namespace {
1835  /// Calls the given 'operator delete' on a single object.
1836  struct CallObjectDelete final : EHScopeStack::Cleanup {
1837    llvm::Value *Ptr;
1838    const FunctionDecl *OperatorDelete;
1839    QualType ElementType;
1840
1841    CallObjectDelete(llvm::Value *Ptr,
1842                     const FunctionDecl *OperatorDelete,
1843                     QualType ElementType)
1844      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1845
1846    void Emit(CodeGenFunction &CGF, Flags flags) override {
1847      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1848    }
1849  };
1850}
1851
1852void
1853CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1854                                             llvm::Value *CompletePtr,
1855                                             QualType ElementType) {
1856  EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1857                                        OperatorDelete, ElementType);
1858}
1859
1860/// Emit the code for deleting a single object with a destroying operator
1861/// delete. If the element type has a non-virtual destructor, Ptr has already
1862/// been converted to the type of the parameter of 'operator delete'. Otherwise
1863/// Ptr points to an object of the static type.
1864static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1865                                       const CXXDeleteExpr *DE, Address Ptr,
1866                                       QualType ElementType) {
1867  auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1868  if (Dtor && Dtor->isVirtual())
1869    CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1870                                                Dtor);
1871  else
1872    CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
1873}
1874
1875/// Emit the code for deleting a single object.
1876/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1877/// if not.
1878static bool EmitObjectDelete(CodeGenFunction &CGF,
1879                             const CXXDeleteExpr *DE,
1880                             Address Ptr,
1881                             QualType ElementType,
1882                             llvm::BasicBlock *UnconditionalDeleteBlock) {
1883  // C++11 [expr.delete]p3:
1884  //   If the static type of the object to be deleted is different from its
1885  //   dynamic type, the static type shall be a base class of the dynamic type
1886  //   of the object to be deleted and the static type shall have a virtual
1887  //   destructor or the behavior is undefined.
1888  CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1889                    DE->getExprLoc(), Ptr.getPointer(),
1890                    ElementType);
1891
1892  const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1893  assert(!OperatorDelete->isDestroyingOperatorDelete());
1894
1895  // Find the destructor for the type, if applicable.  If the
1896  // destructor is virtual, we'll just emit the vcall and return.
1897  const CXXDestructorDecl *Dtor = nullptr;
1898  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1899    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1900    if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1901      Dtor = RD->getDestructor();
1902
1903      if (Dtor->isVirtual()) {
1904        bool UseVirtualCall = true;
1905        const Expr *Base = DE->getArgument();
1906        if (auto *DevirtualizedDtor =
1907                dyn_cast_or_null<const CXXDestructorDecl>(
1908                    Dtor->getDevirtualizedMethod(
1909                        Base, CGF.CGM.getLangOpts().AppleKext))) {
1910          UseVirtualCall = false;
1911          const CXXRecordDecl *DevirtualizedClass =
1912              DevirtualizedDtor->getParent();
1913          if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) {
1914            // Devirtualized to the class of the base type (the type of the
1915            // whole expression).
1916            Dtor = DevirtualizedDtor;
1917          } else {
1918            // Devirtualized to some other type. Would need to cast the this
1919            // pointer to that type but we don't have support for that yet, so
1920            // do a virtual call. FIXME: handle the case where it is
1921            // devirtualized to the derived type (the type of the inner
1922            // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1923            UseVirtualCall = true;
1924          }
1925        }
1926        if (UseVirtualCall) {
1927          CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1928                                                      Dtor);
1929          return false;
1930        }
1931      }
1932    }
1933  }
1934
1935  // Make sure that we call delete even if the dtor throws.
1936  // This doesn't have to a conditional cleanup because we're going
1937  // to pop it off in a second.
1938  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1939                                            Ptr.getPointer(),
1940                                            OperatorDelete, ElementType);
1941
1942  if (Dtor)
1943    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1944                              /*ForVirtualBase=*/false,
1945                              /*Delegating=*/false,
1946                              Ptr, ElementType);
1947  else if (auto Lifetime = ElementType.getObjCLifetime()) {
1948    switch (Lifetime) {
1949    case Qualifiers::OCL_None:
1950    case Qualifiers::OCL_ExplicitNone:
1951    case Qualifiers::OCL_Autoreleasing:
1952      break;
1953
1954    case Qualifiers::OCL_Strong:
1955      CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1956      break;
1957
1958    case Qualifiers::OCL_Weak:
1959      CGF.EmitARCDestroyWeak(Ptr);
1960      break;
1961    }
1962  }
1963
1964  // When optimizing for size, call 'operator delete' unconditionally.
1965  if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
1966    CGF.EmitBlock(UnconditionalDeleteBlock);
1967    CGF.PopCleanupBlock();
1968    return true;
1969  }
1970
1971  CGF.PopCleanupBlock();
1972  return false;
1973}
1974
1975namespace {
1976  /// Calls the given 'operator delete' on an array of objects.
1977  struct CallArrayDelete final : EHScopeStack::Cleanup {
1978    llvm::Value *Ptr;
1979    const FunctionDecl *OperatorDelete;
1980    llvm::Value *NumElements;
1981    QualType ElementType;
1982    CharUnits CookieSize;
1983
1984    CallArrayDelete(llvm::Value *Ptr,
1985                    const FunctionDecl *OperatorDelete,
1986                    llvm::Value *NumElements,
1987                    QualType ElementType,
1988                    CharUnits CookieSize)
1989      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1990        ElementType(ElementType), CookieSize(CookieSize) {}
1991
1992    void Emit(CodeGenFunction &CGF, Flags flags) override {
1993      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
1994                         CookieSize);
1995    }
1996  };
1997}
1998
1999/// Emit the code for deleting an array of objects.
2000static void EmitArrayDelete(CodeGenFunction &CGF,
2001                            const CXXDeleteExpr *E,
2002                            Address deletedPtr,
2003                            QualType elementType) {
2004  llvm::Value *numElements = nullptr;
2005  llvm::Value *allocatedPtr = nullptr;
2006  CharUnits cookieSize;
2007  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
2008                                      numElements, allocatedPtr, cookieSize);
2009
2010  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
2011
2012  // Make sure that we call delete even if one of the dtors throws.
2013  const FunctionDecl *operatorDelete = E->getOperatorDelete();
2014  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
2015                                           allocatedPtr, operatorDelete,
2016                                           numElements, elementType,
2017                                           cookieSize);
2018
2019  // Destroy the elements.
2020  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
2021    assert(numElements && "no element count for a type with a destructor!");
2022
2023    CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
2024    CharUnits elementAlign =
2025      deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
2026
2027    llvm::Value *arrayBegin = deletedPtr.getPointer();
2028    llvm::Value *arrayEnd =
2029      CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
2030
2031    // Note that it is legal to allocate a zero-length array, and we
2032    // can never fold the check away because the length should always
2033    // come from a cookie.
2034    CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
2035                         CGF.getDestroyer(dtorKind),
2036                         /*checkZeroLength*/ true,
2037                         CGF.needsEHCleanup(dtorKind));
2038  }
2039
2040  // Pop the cleanup block.
2041  CGF.PopCleanupBlock();
2042}
2043
2044void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2045  const Expr *Arg = E->getArgument();
2046  Address Ptr = EmitPointerWithAlignment(Arg);
2047
2048  // Null check the pointer.
2049  //
2050  // We could avoid this null check if we can determine that the object
2051  // destruction is trivial and doesn't require an array cookie; we can
2052  // unconditionally perform the operator delete call in that case. For now, we
2053  // assume that deleted pointers are null rarely enough that it's better to
2054  // keep the branch. This might be worth revisiting for a -O0 code size win.
2055  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
2056  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
2057
2058  llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
2059
2060  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
2061  EmitBlock(DeleteNotNull);
2062
2063  QualType DeleteTy = E->getDestroyedType();
2064
2065  // A destroying operator delete overrides the entire operation of the
2066  // delete expression.
2067  if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2068    EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
2069    EmitBlock(DeleteEnd);
2070    return;
2071  }
2072
2073  // We might be deleting a pointer to array.  If so, GEP down to the
2074  // first non-array element.
2075  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2076  if (DeleteTy->isConstantArrayType()) {
2077    llvm::Value *Zero = Builder.getInt32(0);
2078    SmallVector<llvm::Value*,8> GEP;
2079
2080    GEP.push_back(Zero); // point at the outermost array
2081
2082    // For each layer of array type we're pointing at:
2083    while (const ConstantArrayType *Arr
2084             = getContext().getAsConstantArrayType(DeleteTy)) {
2085      // 1. Unpeel the array type.
2086      DeleteTy = Arr->getElementType();
2087
2088      // 2. GEP to the first element of the array.
2089      GEP.push_back(Zero);
2090    }
2091
2092    Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
2093                  Ptr.getAlignment());
2094  }
2095
2096  assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2097
2098  if (E->isArrayForm()) {
2099    EmitArrayDelete(*this, E, Ptr, DeleteTy);
2100    EmitBlock(DeleteEnd);
2101  } else {
2102    if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))
2103      EmitBlock(DeleteEnd);
2104  }
2105}
2106
2107static bool isGLValueFromPointerDeref(const Expr *E) {
2108  E = E->IgnoreParens();
2109
2110  if (const auto *CE = dyn_cast<CastExpr>(E)) {
2111    if (!CE->getSubExpr()->isGLValue())
2112      return false;
2113    return isGLValueFromPointerDeref(CE->getSubExpr());
2114  }
2115
2116  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
2117    return isGLValueFromPointerDeref(OVE->getSourceExpr());
2118
2119  if (const auto *BO = dyn_cast<BinaryOperator>(E))
2120    if (BO->getOpcode() == BO_Comma)
2121      return isGLValueFromPointerDeref(BO->getRHS());
2122
2123  if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
2124    return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
2125           isGLValueFromPointerDeref(ACO->getFalseExpr());
2126
2127  // C++11 [expr.sub]p1:
2128  //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2129  if (isa<ArraySubscriptExpr>(E))
2130    return true;
2131
2132  if (const auto *UO = dyn_cast<UnaryOperator>(E))
2133    if (UO->getOpcode() == UO_Deref)
2134      return true;
2135
2136  return false;
2137}
2138
2139static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2140                                         llvm::Type *StdTypeInfoPtrTy) {
2141  // Get the vtable pointer.
2142  Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF);
2143
2144  QualType SrcRecordTy = E->getType();
2145
2146  // C++ [class.cdtor]p4:
2147  //   If the operand of typeid refers to the object under construction or
2148  //   destruction and the static type of the operand is neither the constructor
2149  //   or destructor���s class nor one of its bases, the behavior is undefined.
2150  CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
2151                    ThisPtr.getPointer(), SrcRecordTy);
2152
2153  // C++ [expr.typeid]p2:
2154  //   If the glvalue expression is obtained by applying the unary * operator to
2155  //   a pointer and the pointer is a null pointer value, the typeid expression
2156  //   throws the std::bad_typeid exception.
2157  //
2158  // However, this paragraph's intent is not clear.  We choose a very generous
2159  // interpretation which implores us to consider comma operators, conditional
2160  // operators, parentheses and other such constructs.
2161  if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2162          isGLValueFromPointerDeref(E), SrcRecordTy)) {
2163    llvm::BasicBlock *BadTypeidBlock =
2164        CGF.createBasicBlock("typeid.bad_typeid");
2165    llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2166
2167    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2168    CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2169
2170    CGF.EmitBlock(BadTypeidBlock);
2171    CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2172    CGF.EmitBlock(EndBlock);
2173  }
2174
2175  return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2176                                        StdTypeInfoPtrTy);
2177}
2178
2179llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2180  llvm::Type *StdTypeInfoPtrTy =
2181    ConvertType(E->getType())->getPointerTo();
2182
2183  if (E->isTypeOperand()) {
2184    llvm::Constant *TypeInfo =
2185        CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2186    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2187  }
2188
2189  // C++ [expr.typeid]p2:
2190  //   When typeid is applied to a glvalue expression whose type is a
2191  //   polymorphic class type, the result refers to a std::type_info object
2192  //   representing the type of the most derived object (that is, the dynamic
2193  //   type) to which the glvalue refers.
2194  if (E->isPotentiallyEvaluated())
2195    return EmitTypeidFromVTable(*this, E->getExprOperand(),
2196                                StdTypeInfoPtrTy);
2197
2198  QualType OperandTy = E->getExprOperand()->getType();
2199  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2200                               StdTypeInfoPtrTy);
2201}
2202
2203static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2204                                          QualType DestTy) {
2205  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2206  if (DestTy->isPointerType())
2207    return llvm::Constant::getNullValue(DestLTy);
2208
2209  /// C++ [expr.dynamic.cast]p9:
2210  ///   A failed cast to reference type throws std::bad_cast
2211  if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2212    return nullptr;
2213
2214  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2215  return llvm::UndefValue::get(DestLTy);
2216}
2217
2218llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2219                                              const CXXDynamicCastExpr *DCE) {
2220  CGM.EmitExplicitCastExprType(DCE, this);
2221  QualType DestTy = DCE->getTypeAsWritten();
2222
2223  QualType SrcTy = DCE->getSubExpr()->getType();
2224
2225  // C++ [expr.dynamic.cast]p7:
2226  //   If T is "pointer to cv void," then the result is a pointer to the most
2227  //   derived object pointed to by v.
2228  const PointerType *DestPTy = DestTy->getAs<PointerType>();
2229
2230  bool isDynamicCastToVoid;
2231  QualType SrcRecordTy;
2232  QualType DestRecordTy;
2233  if (DestPTy) {
2234    isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2235    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2236    DestRecordTy = DestPTy->getPointeeType();
2237  } else {
2238    isDynamicCastToVoid = false;
2239    SrcRecordTy = SrcTy;
2240    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2241  }
2242
2243  // C++ [class.cdtor]p5:
2244  //   If the operand of the dynamic_cast refers to the object under
2245  //   construction or destruction and the static type of the operand is not a
2246  //   pointer to or object of the constructor or destructor���s own class or one
2247  //   of its bases, the dynamic_cast results in undefined behavior.
2248  EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
2249                SrcRecordTy);
2250
2251  if (DCE->isAlwaysNull())
2252    if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2253      return T;
2254
2255  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2256
2257  // C++ [expr.dynamic.cast]p4:
2258  //   If the value of v is a null pointer value in the pointer case, the result
2259  //   is the null pointer value of type T.
2260  bool ShouldNullCheckSrcValue =
2261      CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2262                                                         SrcRecordTy);
2263
2264  llvm::BasicBlock *CastNull = nullptr;
2265  llvm::BasicBlock *CastNotNull = nullptr;
2266  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2267
2268  if (ShouldNullCheckSrcValue) {
2269    CastNull = createBasicBlock("dynamic_cast.null");
2270    CastNotNull = createBasicBlock("dynamic_cast.notnull");
2271
2272    llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2273    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2274    EmitBlock(CastNotNull);
2275  }
2276
2277  llvm::Value *Value;
2278  if (isDynamicCastToVoid) {
2279    Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2280                                                  DestTy);
2281  } else {
2282    assert(DestRecordTy->isRecordType() &&
2283           "destination type must be a record type!");
2284    Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2285                                                DestTy, DestRecordTy, CastEnd);
2286    CastNotNull = Builder.GetInsertBlock();
2287  }
2288
2289  if (ShouldNullCheckSrcValue) {
2290    EmitBranch(CastEnd);
2291
2292    EmitBlock(CastNull);
2293    EmitBranch(CastEnd);
2294  }
2295
2296  EmitBlock(CastEnd);
2297
2298  if (ShouldNullCheckSrcValue) {
2299    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2300    PHI->addIncoming(Value, CastNotNull);
2301    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2302
2303    Value = PHI;
2304  }
2305
2306  return Value;
2307}
2308