1193326Sed//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2193326Sed//
3353358Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4353358Sdim// See https://llvm.org/LICENSE.txt for license information.
5353358Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6193326Sed//
7193326Sed//===----------------------------------------------------------------------===//
8193326Sed//
9193326Sed// This contains code to emit Aggregate Expr nodes as LLVM code.
10193326Sed//
11193326Sed//===----------------------------------------------------------------------===//
12193326Sed
13341825Sdim#include "CGCXXABI.h"
14249423Sdim#include "CGObjCRuntime.h"
15360784Sdim#include "CodeGenFunction.h"
16193326Sed#include "CodeGenModule.h"
17338697Sdim#include "ConstantEmitter.h"
18193326Sed#include "clang/AST/ASTContext.h"
19360784Sdim#include "clang/AST/Attr.h"
20193326Sed#include "clang/AST/DeclCXX.h"
21234353Sdim#include "clang/AST/DeclTemplate.h"
22193326Sed#include "clang/AST/StmtVisitor.h"
23249423Sdim#include "llvm/IR/Constants.h"
24249423Sdim#include "llvm/IR/Function.h"
25249423Sdim#include "llvm/IR/GlobalVariable.h"
26360784Sdim#include "llvm/IR/IntrinsicInst.h"
27249423Sdim#include "llvm/IR/Intrinsics.h"
28193326Sedusing namespace clang;
29193326Sedusing namespace CodeGen;
30193326Sed
31193326Sed//===----------------------------------------------------------------------===//
32193326Sed//                        Aggregate Expression Emitter
33193326Sed//===----------------------------------------------------------------------===//
34193326Sed
35193326Sednamespace  {
36199990Srdivackyclass AggExprEmitter : public StmtVisitor<AggExprEmitter> {
37193326Sed  CodeGenFunction &CGF;
38193326Sed  CGBuilderTy &Builder;
39218893Sdim  AggValueSlot Dest;
40288943Sdim  bool IsResultUnused;
41208600Srdivacky
42218893Sdim  AggValueSlot EnsureSlot(QualType T) {
43218893Sdim    if (!Dest.isIgnored()) return Dest;
44218893Sdim    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
45218893Sdim  }
46239462Sdim  void EnsureDest(QualType T) {
47239462Sdim    if (!Dest.isIgnored()) return;
48239462Sdim    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
49239462Sdim  }
50218893Sdim
51341825Sdim  // Calls `Fn` with a valid return value slot, potentially creating a temporary
52341825Sdim  // to do so. If a temporary is created, an appropriate copy into `Dest` will
53341825Sdim  // be emitted, as will lifetime markers.
54341825Sdim  //
55341825Sdim  // The given function should take a ReturnValueSlot, and return an RValue that
56341825Sdim  // points to said slot.
57341825Sdim  void withReturnValueSlot(const Expr *E,
58341825Sdim                           llvm::function_ref<RValue(ReturnValueSlot)> Fn);
59341825Sdim
60193326Sedpublic:
61288943Sdim  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
62288943Sdim    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
63288943Sdim    IsResultUnused(IsResultUnused) { }
64193326Sed
65193326Sed  //===--------------------------------------------------------------------===//
66193326Sed  //                               Utilities
67193326Sed  //===--------------------------------------------------------------------===//
68193326Sed
69193326Sed  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
70193326Sed  /// represents a value lvalue, this method emits the address of the lvalue,
71193326Sed  /// then loads the result into DestPtr.
72193326Sed  void EmitAggLoadOfLValue(const Expr *E);
73193326Sed
74341825Sdim  enum ExprValueKind {
75341825Sdim    EVK_RValue,
76341825Sdim    EVK_NonRValue
77341825Sdim  };
78341825Sdim
79193326Sed  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80341825Sdim  /// SrcIsRValue is true if source comes from an RValue.
81341825Sdim  void EmitFinalDestCopy(QualType type, const LValue &src,
82341825Sdim                         ExprValueKind SrcValueKind = EVK_NonRValue);
83296417Sdim  void EmitFinalDestCopy(QualType type, RValue src);
84239462Sdim  void EmitCopy(QualType type, const AggValueSlot &dest,
85239462Sdim                const AggValueSlot &src);
86193326Sed
87226633Sdim  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
88208600Srdivacky
89296417Sdim  void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
90338697Sdim                     QualType ArrayQTy, InitListExpr *E);
91234353Sdim
92226633Sdim  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
93234353Sdim    if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
94226633Sdim      return AggValueSlot::NeedsGCBarriers;
95226633Sdim    return AggValueSlot::DoesNotNeedGCBarriers;
96226633Sdim  }
97226633Sdim
98208600Srdivacky  bool TypeRequiresGCollection(QualType T);
99208600Srdivacky
100193326Sed  //===--------------------------------------------------------------------===//
101193326Sed  //                            Visitor Methods
102193326Sed  //===--------------------------------------------------------------------===//
103198092Srdivacky
104288943Sdim  void Visit(Expr *E) {
105288943Sdim    ApplyDebugLocation DL(CGF, E);
106288943Sdim    StmtVisitor<AggExprEmitter>::Visit(E);
107288943Sdim  }
108288943Sdim
109193326Sed  void VisitStmt(Stmt *S) {
110193326Sed    CGF.ErrorUnsupported(S, "aggregate expression");
111193326Sed  }
112193326Sed  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
113221345Sdim  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
114221345Sdim    Visit(GE->getResultExpr());
115221345Sdim  }
116321369Sdim  void VisitCoawaitExpr(CoawaitExpr *E) {
117321369Sdim    CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
118321369Sdim  }
119321369Sdim  void VisitCoyieldExpr(CoyieldExpr *E) {
120321369Sdim    CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
121321369Sdim  }
122321369Sdim  void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
123193326Sed  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
124224145Sdim  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
125224145Sdim    return Visit(E->getReplacement());
126224145Sdim  }
127193326Sed
128344779Sdim  void VisitConstantExpr(ConstantExpr *E) {
129344779Sdim    return Visit(E->getSubExpr());
130344779Sdim  }
131344779Sdim
132193326Sed  // l-values.
133327952Sdim  void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
134193326Sed  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
135193326Sed  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
136193326Sed  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
137224145Sdim  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
138193326Sed  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
139193326Sed    EmitAggLoadOfLValue(E);
140193326Sed  }
141193326Sed  void VisitPredefinedExpr(const PredefinedExpr *E) {
142198092Srdivacky    EmitAggLoadOfLValue(E);
143193326Sed  }
144198092Srdivacky
145193326Sed  // Operators.
146198092Srdivacky  void VisitCastExpr(CastExpr *E);
147193326Sed  void VisitCallExpr(const CallExpr *E);
148193326Sed  void VisitStmtExpr(const StmtExpr *E);
149193326Sed  void VisitBinaryOperator(const BinaryOperator *BO);
150198398Srdivacky  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
151193326Sed  void VisitBinAssign(const BinaryOperator *E);
152193326Sed  void VisitBinComma(const BinaryOperator *E);
153341825Sdim  void VisitBinCmp(const BinaryOperator *E);
154360784Sdim  void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
155360784Sdim    Visit(E->getSemanticForm());
156360784Sdim  }
157193326Sed
158193326Sed  void VisitObjCMessageExpr(ObjCMessageExpr *E);
159193326Sed  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
160193326Sed    EmitAggLoadOfLValue(E);
161193326Sed  }
162198092Srdivacky
163288943Sdim  void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
164218893Sdim  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
165198092Srdivacky  void VisitChooseExpr(const ChooseExpr *CE);
166193326Sed  void VisitInitListExpr(InitListExpr *E);
167314564Sdim  void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
168314564Sdim                              llvm::Value *outerBegin = nullptr);
169201361Srdivacky  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
170288943Sdim  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
171193326Sed  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
172353358Sdim    CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
173193326Sed    Visit(DAE->getExpr());
174193326Sed  }
175251662Sdim  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
176353358Sdim    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
177251662Sdim    Visit(DIE->getExpr());
178251662Sdim  }
179193326Sed  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
180193326Sed  void VisitCXXConstructExpr(const CXXConstructExpr *E);
181309124Sdim  void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
182234353Sdim  void VisitLambdaExpr(LambdaExpr *E);
183261991Sdim  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
184218893Sdim  void VisitExprWithCleanups(ExprWithCleanups *E);
185210299Sed  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
186199482Srdivacky  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
187224145Sdim  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
188218893Sdim  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
189218893Sdim
190234353Sdim  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
191234353Sdim    if (E->isGLValue()) {
192234353Sdim      LValue LV = CGF.EmitPseudoObjectLValue(E);
193239462Sdim      return EmitFinalDestCopy(E->getType(), LV);
194234353Sdim    }
195234353Sdim
196234353Sdim    CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
197234353Sdim  }
198234353Sdim
199193326Sed  void VisitVAArgExpr(VAArgExpr *E);
200193326Sed
201224145Sdim  void EmitInitializationToLValue(Expr *E, LValue Address);
202224145Sdim  void EmitNullInitializationToLValue(LValue Address);
203193326Sed  //  case Expr::ChooseExprClass:
204200583Srdivacky  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
205226633Sdim  void VisitAtomicExpr(AtomicExpr *E) {
206296417Sdim    RValue Res = CGF.EmitAtomicExpr(E);
207296417Sdim    EmitFinalDestCopy(E->getType(), Res);
208226633Sdim  }
209193326Sed};
210193326Sed}  // end anonymous namespace.
211193326Sed
212193326Sed//===----------------------------------------------------------------------===//
213193326Sed//                                Utilities
214193326Sed//===----------------------------------------------------------------------===//
215193326Sed
216193326Sed/// EmitAggLoadOfLValue - Given an expression with aggregate type that
217193326Sed/// represents a value lvalue, this method emits the address of the lvalue,
218193326Sed/// then loads the result into DestPtr.
219193326Sedvoid AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
220193326Sed  LValue LV = CGF.EmitLValue(E);
221249423Sdim
222249423Sdim  // If the type of the l-value is atomic, then do an atomic load.
223288943Sdim  if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
224261991Sdim    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
225249423Sdim    return;
226249423Sdim  }
227249423Sdim
228239462Sdim  EmitFinalDestCopy(E->getType(), LV);
229193326Sed}
230193326Sed
231341825Sdim/// True if the given aggregate type requires special GC API calls.
232208600Srdivackybool AggExprEmitter::TypeRequiresGCollection(QualType T) {
233208600Srdivacky  // Only record types have members that might require garbage collection.
234208600Srdivacky  const RecordType *RecordTy = T->getAs<RecordType>();
235208600Srdivacky  if (!RecordTy) return false;
236208600Srdivacky
237208600Srdivacky  // Don't mess with non-trivial C++ types.
238208600Srdivacky  RecordDecl *Record = RecordTy->getDecl();
239208600Srdivacky  if (isa<CXXRecordDecl>(Record) &&
240249423Sdim      (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
241208600Srdivacky       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
242208600Srdivacky    return false;
243208600Srdivacky
244208600Srdivacky  // Check whether the type has an object member.
245208600Srdivacky  return Record->hasObjectMember();
246208600Srdivacky}
247208600Srdivacky
248341825Sdimvoid AggExprEmitter::withReturnValueSlot(
249341825Sdim    const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
250341825Sdim  QualType RetTy = E->getType();
251341825Sdim  bool RequiresDestruction =
252341825Sdim      Dest.isIgnored() &&
253341825Sdim      RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
254341825Sdim
255341825Sdim  // If it makes no observable difference, save a memcpy + temporary.
256341825Sdim  //
257341825Sdim  // We need to always provide our own temporary if destruction is required.
258341825Sdim  // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
259341825Sdim  // its lifetime before we have the chance to emit a proper destructor call.
260341825Sdim  bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
261341825Sdim                 (RequiresDestruction && !Dest.getAddress().isValid());
262341825Sdim
263341825Sdim  Address RetAddr = Address::invalid();
264341825Sdim  Address RetAllocaAddr = Address::invalid();
265341825Sdim
266341825Sdim  EHScopeStack::stable_iterator LifetimeEndBlock;
267341825Sdim  llvm::Value *LifetimeSizePtr = nullptr;
268341825Sdim  llvm::IntrinsicInst *LifetimeStartInst = nullptr;
269341825Sdim  if (!UseTemp) {
270341825Sdim    RetAddr = Dest.getAddress();
271341825Sdim  } else {
272341825Sdim    RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
273341825Sdim    uint64_t Size =
274341825Sdim        CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
275341825Sdim    LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
276341825Sdim    if (LifetimeSizePtr) {
277341825Sdim      LifetimeStartInst =
278341825Sdim          cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
279341825Sdim      assert(LifetimeStartInst->getIntrinsicID() ==
280341825Sdim                 llvm::Intrinsic::lifetime_start &&
281341825Sdim             "Last insertion wasn't a lifetime.start?");
282341825Sdim
283341825Sdim      CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
284341825Sdim          NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
285341825Sdim      LifetimeEndBlock = CGF.EHStack.stable_begin();
286341825Sdim    }
287341825Sdim  }
288341825Sdim
289341825Sdim  RValue Src =
290341825Sdim      EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
291341825Sdim
292341825Sdim  if (RequiresDestruction)
293341825Sdim    CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
294341825Sdim
295341825Sdim  if (!UseTemp)
296226633Sdim    return;
297341825Sdim
298341825Sdim  assert(Dest.getPointer() != Src.getAggregatePointer());
299341825Sdim  EmitFinalDestCopy(E->getType(), Src);
300341825Sdim
301341825Sdim  if (!RequiresDestruction && LifetimeStartInst) {
302341825Sdim    // If there's no dtor to run, the copy was the last use of our temporary.
303341825Sdim    // Since we're not guaranteed to be in an ExprWithCleanups, clean up
304341825Sdim    // eagerly.
305341825Sdim    CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
306341825Sdim    CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
307210299Sed  }
308208600Srdivacky}
309208600Srdivacky
310193326Sed/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
311296417Sdimvoid AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
312239462Sdim  assert(src.isAggregate() && "value must be aggregate value!");
313296417Sdim  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
314341825Sdim  EmitFinalDestCopy(type, srcLV, EVK_RValue);
315239462Sdim}
316193326Sed
317239462Sdim/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
318341825Sdimvoid AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
319341825Sdim                                       ExprValueKind SrcValueKind) {
320218893Sdim  // If Dest is ignored, then we're evaluating an aggregate expression
321239462Sdim  // in a context that doesn't care about the result.  Note that loads
322239462Sdim  // from volatile l-values force the existence of a non-ignored
323239462Sdim  // destination.
324239462Sdim  if (Dest.isIgnored())
325239462Sdim    return;
326212904Sdim
327341825Sdim  // Copy non-trivial C structs here.
328341825Sdim  LValue DstLV = CGF.MakeAddrLValue(
329341825Sdim      Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
330341825Sdim
331341825Sdim  if (SrcValueKind == EVK_RValue) {
332341825Sdim    if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
333341825Sdim      if (Dest.isPotentiallyAliased())
334341825Sdim        CGF.callCStructMoveAssignmentOperator(DstLV, src);
335341825Sdim      else
336341825Sdim        CGF.callCStructMoveConstructor(DstLV, src);
337341825Sdim      return;
338341825Sdim    }
339341825Sdim  } else {
340341825Sdim    if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
341341825Sdim      if (Dest.isPotentiallyAliased())
342341825Sdim        CGF.callCStructCopyAssignmentOperator(DstLV, src);
343341825Sdim      else
344341825Sdim        CGF.callCStructCopyConstructor(DstLV, src);
345341825Sdim      return;
346341825Sdim    }
347341825Sdim  }
348341825Sdim
349360784Sdim  AggValueSlot srcAgg = AggValueSlot::forLValue(
350360784Sdim      src, CGF, AggValueSlot::IsDestructed, needsGC(type),
351360784Sdim      AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
352239462Sdim  EmitCopy(type, Dest, srcAgg);
353239462Sdim}
354193326Sed
355239462Sdim/// Perform a copy from the source into the destination.
356239462Sdim///
357239462Sdim/// \param type - the type of the aggregate being copied; qualifiers are
358239462Sdim///   ignored
359239462Sdimvoid AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
360239462Sdim                              const AggValueSlot &src) {
361239462Sdim  if (dest.requiresGCollection()) {
362341825Sdim    CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
363239462Sdim    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
364198092Srdivacky    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
365296417Sdim                                                      dest.getAddress(),
366296417Sdim                                                      src.getAddress(),
367239462Sdim                                                      size);
368198092Srdivacky    return;
369198092Srdivacky  }
370239462Sdim
371193326Sed  // If the result of the assignment is used, copy the LHS there also.
372239462Sdim  // It's volatile if either side is.  Use the minimum alignment of
373239462Sdim  // the two sides.
374341825Sdim  LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
375341825Sdim  LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
376341825Sdim  CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
377296417Sdim                        dest.isVolatile() || src.isVolatile());
378193326Sed}
379193326Sed
380341825Sdim/// Emit the initializer for a std::initializer_list initialized with a
381234353Sdim/// real initializer list.
382261991Sdimvoid
383261991SdimAggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
384261991Sdim  // Emit an array containing the elements.  The array is externally destructed
385261991Sdim  // if the std::initializer_list object is.
386261991Sdim  ASTContext &Ctx = CGF.getContext();
387261991Sdim  LValue Array = CGF.EmitLValue(E->getSubExpr());
388261991Sdim  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
389360784Sdim  Address ArrayPtr = Array.getAddress(CGF);
390234353Sdim
391261991Sdim  const ConstantArrayType *ArrayType =
392261991Sdim      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
393261991Sdim  assert(ArrayType && "std::initializer_list constructed from non-array");
394234353Sdim
395261991Sdim  // FIXME: Perform the checks on the field types in SemaInit.
396261991Sdim  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
397261991Sdim  RecordDecl::field_iterator Field = Record->field_begin();
398261991Sdim  if (Field == Record->field_end()) {
399261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
400234353Sdim    return;
401234353Sdim  }
402234353Sdim
403234353Sdim  // Start pointer.
404261991Sdim  if (!Field->getType()->isPointerType() ||
405261991Sdim      !Ctx.hasSameType(Field->getType()->getPointeeType(),
406261991Sdim                       ArrayType->getElementType())) {
407261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
408234353Sdim    return;
409234353Sdim  }
410234353Sdim
411261991Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
412296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
413261991Sdim  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
414261991Sdim  llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
415261991Sdim  llvm::Value *IdxStart[] = { Zero, Zero };
416261991Sdim  llvm::Value *ArrayStart =
417296417Sdim      Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
418261991Sdim  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
419261991Sdim  ++Field;
420261991Sdim
421261991Sdim  if (Field == Record->field_end()) {
422261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
423234353Sdim    return;
424234353Sdim  }
425261991Sdim
426261991Sdim  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
427261991Sdim  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
428261991Sdim  if (Field->getType()->isPointerType() &&
429261991Sdim      Ctx.hasSameType(Field->getType()->getPointeeType(),
430261991Sdim                      ArrayType->getElementType())) {
431234353Sdim    // End pointer.
432261991Sdim    llvm::Value *IdxEnd[] = { Zero, Size };
433261991Sdim    llvm::Value *ArrayEnd =
434296417Sdim        Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
435261991Sdim    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
436261991Sdim  } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
437234353Sdim    // Length.
438261991Sdim    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
439234353Sdim  } else {
440261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
441234353Sdim    return;
442234353Sdim  }
443234353Sdim}
444234353Sdim
445341825Sdim/// Determine if E is a trivial array filler, that is, one that is
446276479Sdim/// equivalent to zero-initialization.
447276479Sdimstatic bool isTrivialFiller(Expr *E) {
448276479Sdim  if (!E)
449276479Sdim    return true;
450276479Sdim
451276479Sdim  if (isa<ImplicitValueInitExpr>(E))
452276479Sdim    return true;
453276479Sdim
454276479Sdim  if (auto *ILE = dyn_cast<InitListExpr>(E)) {
455276479Sdim    if (ILE->getNumInits())
456276479Sdim      return false;
457276479Sdim    return isTrivialFiller(ILE->getArrayFiller());
458276479Sdim  }
459276479Sdim
460276479Sdim  if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
461276479Sdim    return Cons->getConstructor()->isDefaultConstructor() &&
462276479Sdim           Cons->getConstructor()->isTrivial();
463276479Sdim
464276479Sdim  // FIXME: Are there other cases where we can avoid emitting an initializer?
465276479Sdim  return false;
466276479Sdim}
467276479Sdim
468341825Sdim/// Emit initialization of an array from an initializer list.
469296417Sdimvoid AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
470338697Sdim                                   QualType ArrayQTy, InitListExpr *E) {
471234353Sdim  uint64_t NumInitElements = E->getNumInits();
472234353Sdim
473234353Sdim  uint64_t NumArrayElements = AType->getNumElements();
474234353Sdim  assert(NumInitElements <= NumArrayElements);
475234353Sdim
476338697Sdim  QualType elementType =
477338697Sdim      CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
478338697Sdim
479234353Sdim  // DestPtr is an array*.  Construct an elementType* by drilling
480234353Sdim  // down a level.
481234353Sdim  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
482234353Sdim  llvm::Value *indices[] = { zero, zero };
483234353Sdim  llvm::Value *begin =
484296417Sdim    Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
485234353Sdim
486296417Sdim  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
487296417Sdim  CharUnits elementAlign =
488296417Sdim    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
489296417Sdim
490338697Sdim  // Consider initializing the array by copying from a global. For this to be
491338697Sdim  // more efficient than per-element initialization, the size of the elements
492338697Sdim  // with explicit initializers should be large enough.
493338697Sdim  if (NumInitElements * elementSize.getQuantity() > 16 &&
494338697Sdim      elementType.isTriviallyCopyableType(CGF.getContext())) {
495338697Sdim    CodeGen::CodeGenModule &CGM = CGF.CGM;
496360784Sdim    ConstantEmitter Emitter(CGF);
497338697Sdim    LangAS AS = ArrayQTy.getAddressSpace();
498338697Sdim    if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
499338697Sdim      auto GV = new llvm::GlobalVariable(
500338697Sdim          CGM.getModule(), C->getType(),
501338697Sdim          CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
502338697Sdim          llvm::GlobalValue::PrivateLinkage, C, "constinit",
503338697Sdim          /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
504338697Sdim          CGM.getContext().getTargetAddressSpace(AS));
505338697Sdim      Emitter.finalize(GV);
506338697Sdim      CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
507360784Sdim      GV->setAlignment(Align.getAsAlign());
508338697Sdim      EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
509338697Sdim      return;
510338697Sdim    }
511338697Sdim  }
512338697Sdim
513234353Sdim  // Exception safety requires us to destroy all the
514234353Sdim  // already-constructed members if an initializer throws.
515234353Sdim  // For that, we'll need an EH cleanup.
516234353Sdim  QualType::DestructionKind dtorKind = elementType.isDestructedType();
517296417Sdim  Address endOfInit = Address::invalid();
518234353Sdim  EHScopeStack::stable_iterator cleanup;
519276479Sdim  llvm::Instruction *cleanupDominator = nullptr;
520234353Sdim  if (CGF.needsEHCleanup(dtorKind)) {
521234353Sdim    // In principle we could tell the cleanup where we are more
522234353Sdim    // directly, but the control flow can get so varied here that it
523234353Sdim    // would actually be quite complex.  Therefore we go through an
524234353Sdim    // alloca.
525296417Sdim    endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
526234353Sdim                                     "arrayinit.endOfInit");
527234353Sdim    cleanupDominator = Builder.CreateStore(begin, endOfInit);
528234353Sdim    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
529296417Sdim                                         elementAlign,
530234353Sdim                                         CGF.getDestroyer(dtorKind));
531234353Sdim    cleanup = CGF.EHStack.stable_begin();
532234353Sdim
533234353Sdim  // Otherwise, remember that we didn't need a cleanup.
534234353Sdim  } else {
535234353Sdim    dtorKind = QualType::DK_none;
536234353Sdim  }
537234353Sdim
538234353Sdim  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
539234353Sdim
540234353Sdim  // The 'current element to initialize'.  The invariants on this
541234353Sdim  // variable are complicated.  Essentially, after each iteration of
542234353Sdim  // the loop, it points to the last initialized element, except
543234353Sdim  // that it points to the beginning of the array before any
544234353Sdim  // elements have been initialized.
545234353Sdim  llvm::Value *element = begin;
546234353Sdim
547234353Sdim  // Emit the explicit initializers.
548234353Sdim  for (uint64_t i = 0; i != NumInitElements; ++i) {
549234353Sdim    // Advance to the next element.
550234353Sdim    if (i > 0) {
551234353Sdim      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
552234353Sdim
553234353Sdim      // Tell the cleanup that it needs to destroy up to this
554234353Sdim      // element.  TODO: some of these stores can be trivially
555234353Sdim      // observed to be unnecessary.
556296417Sdim      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
557234353Sdim    }
558234353Sdim
559296417Sdim    LValue elementLV =
560296417Sdim      CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
561261991Sdim    EmitInitializationToLValue(E->getInit(i), elementLV);
562234353Sdim  }
563234353Sdim
564234353Sdim  // Check whether there's a non-trivial array-fill expression.
565234353Sdim  Expr *filler = E->getArrayFiller();
566276479Sdim  bool hasTrivialFiller = isTrivialFiller(filler);
567234353Sdim
568234353Sdim  // Any remaining elements need to be zero-initialized, possibly
569234353Sdim  // using the filler expression.  We can skip this if the we're
570234353Sdim  // emitting to zeroed memory.
571234353Sdim  if (NumInitElements != NumArrayElements &&
572234353Sdim      !(Dest.isZeroed() && hasTrivialFiller &&
573234353Sdim        CGF.getTypes().isZeroInitializable(elementType))) {
574234353Sdim
575234353Sdim    // Use an actual loop.  This is basically
576234353Sdim    //   do { *array++ = filler; } while (array != end);
577234353Sdim
578234353Sdim    // Advance to the start of the rest of the array.
579234353Sdim    if (NumInitElements) {
580234353Sdim      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
581296417Sdim      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
582234353Sdim    }
583234353Sdim
584234353Sdim    // Compute the end of the array.
585234353Sdim    llvm::Value *end = Builder.CreateInBoundsGEP(begin,
586234353Sdim                      llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
587234353Sdim                                                 "arrayinit.end");
588234353Sdim
589234353Sdim    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
590234353Sdim    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
591234353Sdim
592234353Sdim    // Jump into the body.
593234353Sdim    CGF.EmitBlock(bodyBB);
594234353Sdim    llvm::PHINode *currentElement =
595234353Sdim      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
596234353Sdim    currentElement->addIncoming(element, entryBB);
597234353Sdim
598234353Sdim    // Emit the actual filler expression.
599321369Sdim    {
600321369Sdim      // C++1z [class.temporary]p5:
601321369Sdim      //   when a default constructor is called to initialize an element of
602321369Sdim      //   an array with no corresponding initializer [...] the destruction of
603321369Sdim      //   every temporary created in a default argument is sequenced before
604321369Sdim      //   the construction of the next array element, if any
605321369Sdim      CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
606321369Sdim      LValue elementLV =
607321369Sdim        CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
608321369Sdim      if (filler)
609321369Sdim        EmitInitializationToLValue(filler, elementLV);
610321369Sdim      else
611321369Sdim        EmitNullInitializationToLValue(elementLV);
612321369Sdim    }
613234353Sdim
614234353Sdim    // Move on to the next element.
615234353Sdim    llvm::Value *nextElement =
616234353Sdim      Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
617234353Sdim
618234353Sdim    // Tell the EH cleanup that we finished with the last element.
619296417Sdim    if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
620234353Sdim
621234353Sdim    // Leave the loop if we're done.
622234353Sdim    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
623234353Sdim                                             "arrayinit.done");
624234353Sdim    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
625234353Sdim    Builder.CreateCondBr(done, endBB, bodyBB);
626234353Sdim    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
627234353Sdim
628234353Sdim    CGF.EmitBlock(endBB);
629234353Sdim  }
630234353Sdim
631234353Sdim  // Leave the partial-array cleanup if we entered one.
632234353Sdim  if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
633234353Sdim}
634234353Sdim
635193326Sed//===----------------------------------------------------------------------===//
636193326Sed//                            Visitor Methods
637193326Sed//===----------------------------------------------------------------------===//
638193326Sed
639224145Sdimvoid AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
640360784Sdim  Visit(E->getSubExpr());
641224145Sdim}
642224145Sdim
643218893Sdimvoid AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
644341825Sdim  // If this is a unique OVE, just visit its source expression.
645341825Sdim  if (e->isUnique())
646341825Sdim    Visit(e->getSourceExpr());
647341825Sdim  else
648341825Sdim    EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
649218893Sdim}
650218893Sdim
651224145Sdimvoid
652224145SdimAggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
653249423Sdim  if (Dest.isPotentiallyAliased() &&
654249423Sdim      E->getType().isPODType(CGF.getContext())) {
655224145Sdim    // For a POD type, just emit a load of the lvalue + a copy, because our
656224145Sdim    // compound literal might alias the destination.
657224145Sdim    EmitAggLoadOfLValue(E);
658224145Sdim    return;
659224145Sdim  }
660341825Sdim
661224145Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
662224145Sdim  CGF.EmitAggExpr(E->getInitializer(), Slot);
663224145Sdim}
664224145Sdim
665249423Sdim/// Attempt to look through various unimportant expressions to find a
666249423Sdim/// cast of the given kind.
667249423Sdimstatic Expr *findPeephole(Expr *op, CastKind kind) {
668249423Sdim  while (true) {
669249423Sdim    op = op->IgnoreParens();
670249423Sdim    if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
671249423Sdim      if (castE->getCastKind() == kind)
672249423Sdim        return castE->getSubExpr();
673249423Sdim      if (castE->getCastKind() == CK_NoOp)
674249423Sdim        continue;
675249423Sdim    }
676276479Sdim    return nullptr;
677249423Sdim  }
678249423Sdim}
679224145Sdim
680198092Srdivackyvoid AggExprEmitter::VisitCastExpr(CastExpr *E) {
681296417Sdim  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
682296417Sdim    CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
683198092Srdivacky  switch (E->getCastKind()) {
684212904Sdim  case CK_Dynamic: {
685243830Sdim    // FIXME: Can this actually happen? We have no test coverage for it.
686208600Srdivacky    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
687243830Sdim    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
688243830Sdim                                      CodeGenFunction::TCK_Load);
689208600Srdivacky    // FIXME: Do we also need to handle property references here?
690208600Srdivacky    if (LV.isSimple())
691360784Sdim      CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
692208600Srdivacky    else
693208600Srdivacky      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
694341825Sdim
695218893Sdim    if (!Dest.isIgnored())
696218893Sdim      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
697208600Srdivacky    break;
698208600Srdivacky  }
699341825Sdim
700212904Sdim  case CK_ToUnion: {
701288943Sdim    // Evaluate even if the destination is ignored.
702288943Sdim    if (Dest.isIgnored()) {
703288943Sdim      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
704288943Sdim                      /*ignoreResult=*/true);
705288943Sdim      break;
706288943Sdim    }
707221345Sdim
708198092Srdivacky    // GCC union extension
709212904Sdim    QualType Ty = E->getSubExpr()->getType();
710296417Sdim    Address CastPtr =
711296417Sdim      Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
712224145Sdim    EmitInitializationToLValue(E->getSubExpr(),
713224145Sdim                               CGF.MakeAddrLValue(CastPtr, Ty));
714198092Srdivacky    break;
715193326Sed  }
716193326Sed
717353358Sdim  case CK_LValueToRValueBitCast: {
718353358Sdim    if (Dest.isIgnored()) {
719353358Sdim      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
720353358Sdim                      /*ignoreResult=*/true);
721353358Sdim      break;
722353358Sdim    }
723353358Sdim
724353358Sdim    LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
725353358Sdim    Address SourceAddress =
726360784Sdim        Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty);
727353358Sdim    Address DestAddress =
728353358Sdim        Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
729353358Sdim    llvm::Value *SizeVal = llvm::ConstantInt::get(
730353358Sdim        CGF.SizeTy,
731353358Sdim        CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
732353358Sdim    Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
733353358Sdim    break;
734353358Sdim  }
735353358Sdim
736212904Sdim  case CK_DerivedToBase:
737212904Sdim  case CK_BaseToDerived:
738212904Sdim  case CK_UncheckedDerivedToBase: {
739226633Sdim    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
740208600Srdivacky                "should have been unpacked before we got here");
741208600Srdivacky  }
742208600Srdivacky
743249423Sdim  case CK_NonAtomicToAtomic:
744249423Sdim  case CK_AtomicToNonAtomic: {
745249423Sdim    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
746249423Sdim
747249423Sdim    // Determine the atomic and value types.
748249423Sdim    QualType atomicType = E->getSubExpr()->getType();
749249423Sdim    QualType valueType = E->getType();
750249423Sdim    if (isToAtomic) std::swap(atomicType, valueType);
751249423Sdim
752249423Sdim    assert(atomicType->isAtomicType());
753249423Sdim    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
754249423Sdim                          atomicType->castAs<AtomicType>()->getValueType()));
755249423Sdim
756249423Sdim    // Just recurse normally if we're ignoring the result or the
757249423Sdim    // atomic type doesn't change representation.
758249423Sdim    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
759249423Sdim      return Visit(E->getSubExpr());
760249423Sdim    }
761249423Sdim
762249423Sdim    CastKind peepholeTarget =
763249423Sdim      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
764249423Sdim
765249423Sdim    // These two cases are reverses of each other; try to peephole them.
766249423Sdim    if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
767249423Sdim      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
768249423Sdim                                                     E->getType()) &&
769249423Sdim           "peephole significantly changed types?");
770249423Sdim      return Visit(op);
771249423Sdim    }
772249423Sdim
773249423Sdim    // If we're converting an r-value of non-atomic type to an r-value
774261991Sdim    // of atomic type, just emit directly into the relevant sub-object.
775249423Sdim    if (isToAtomic) {
776261991Sdim      AggValueSlot valueDest = Dest;
777261991Sdim      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
778341825Sdim        // Zero-initialize.  (Strictly speaking, we only need to initialize
779261991Sdim        // the padding at the end, but this is simpler.)
780261991Sdim        if (!Dest.isZeroed())
781296417Sdim          CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
782261991Sdim
783261991Sdim        // Build a GEP to refer to the subobject.
784296417Sdim        Address valueAddr =
785353358Sdim            CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
786261991Sdim        valueDest = AggValueSlot::forAddr(valueAddr,
787261991Sdim                                          valueDest.getQualifiers(),
788261991Sdim                                          valueDest.isExternallyDestructed(),
789261991Sdim                                          valueDest.requiresGCollection(),
790261991Sdim                                          valueDest.isPotentiallyAliased(),
791341825Sdim                                          AggValueSlot::DoesNotOverlap,
792261991Sdim                                          AggValueSlot::IsZeroed);
793261991Sdim      }
794341825Sdim
795261991Sdim      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
796249423Sdim      return;
797249423Sdim    }
798249423Sdim
799249423Sdim    // Otherwise, we're converting an atomic type to a non-atomic type.
800261991Sdim    // Make an atomic temporary, emit into that, and then copy the value out.
801249423Sdim    AggValueSlot atomicSlot =
802249423Sdim      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
803249423Sdim    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
804249423Sdim
805353358Sdim    Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
806249423Sdim    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
807249423Sdim    return EmitFinalDestCopy(valueType, rvalue);
808249423Sdim  }
809353358Sdim  case CK_AddressSpaceConversion:
810353358Sdim     return Visit(E->getSubExpr());
811249423Sdim
812239462Sdim  case CK_LValueToRValue:
813239462Sdim    // If we're loading from a volatile type, force the destination
814239462Sdim    // into existence.
815239462Sdim    if (E->getSubExpr()->getType().isVolatileQualified()) {
816239462Sdim      EnsureDest(E->getType());
817239462Sdim      return Visit(E->getSubExpr());
818239462Sdim    }
819249423Sdim
820327952Sdim    LLVM_FALLTHROUGH;
821239462Sdim
822353358Sdim
823212904Sdim  case CK_NoOp:
824212904Sdim  case CK_UserDefinedConversion:
825212904Sdim  case CK_ConstructorConversion:
826198092Srdivacky    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
827198092Srdivacky                                                   E->getType()) &&
828198092Srdivacky           "Implicit cast types must be compatible");
829198092Srdivacky    Visit(E->getSubExpr());
830198092Srdivacky    break;
831341825Sdim
832212904Sdim  case CK_LValueBitCast:
833218893Sdim    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
834221345Sdim
835218893Sdim  case CK_Dependent:
836218893Sdim  case CK_BitCast:
837218893Sdim  case CK_ArrayToPointerDecay:
838218893Sdim  case CK_FunctionToPointerDecay:
839218893Sdim  case CK_NullToPointer:
840218893Sdim  case CK_NullToMemberPointer:
841218893Sdim  case CK_BaseToDerivedMemberPointer:
842218893Sdim  case CK_DerivedToBaseMemberPointer:
843218893Sdim  case CK_MemberPointerToBoolean:
844234353Sdim  case CK_ReinterpretMemberPointer:
845218893Sdim  case CK_IntegralToPointer:
846218893Sdim  case CK_PointerToIntegral:
847218893Sdim  case CK_PointerToBoolean:
848218893Sdim  case CK_ToVoid:
849218893Sdim  case CK_VectorSplat:
850218893Sdim  case CK_IntegralCast:
851296417Sdim  case CK_BooleanToSignedIntegral:
852218893Sdim  case CK_IntegralToBoolean:
853218893Sdim  case CK_IntegralToFloating:
854218893Sdim  case CK_FloatingToIntegral:
855218893Sdim  case CK_FloatingToBoolean:
856218893Sdim  case CK_FloatingCast:
857226633Sdim  case CK_CPointerToObjCPointerCast:
858226633Sdim  case CK_BlockPointerToObjCPointerCast:
859218893Sdim  case CK_AnyPointerToBlockPointerCast:
860218893Sdim  case CK_ObjCObjectLValueCast:
861218893Sdim  case CK_FloatingRealToComplex:
862218893Sdim  case CK_FloatingComplexToReal:
863218893Sdim  case CK_FloatingComplexToBoolean:
864218893Sdim  case CK_FloatingComplexCast:
865218893Sdim  case CK_FloatingComplexToIntegralComplex:
866218893Sdim  case CK_IntegralRealToComplex:
867218893Sdim  case CK_IntegralComplexToReal:
868218893Sdim  case CK_IntegralComplexToBoolean:
869218893Sdim  case CK_IntegralComplexCast:
870218893Sdim  case CK_IntegralComplexToFloatingComplex:
871226633Sdim  case CK_ARCProduceObject:
872226633Sdim  case CK_ARCConsumeObject:
873226633Sdim  case CK_ARCReclaimReturnedObject:
874226633Sdim  case CK_ARCExtendBlockObject:
875234353Sdim  case CK_CopyAndAutoreleaseBlockObject:
876243830Sdim  case CK_BuiltinFnToFnPtr:
877344779Sdim  case CK_ZeroToOCLOpaqueType:
878353358Sdim
879314564Sdim  case CK_IntToOCLSampler:
880344779Sdim  case CK_FixedPointCast:
881344779Sdim  case CK_FixedPointToBoolean:
882353358Sdim  case CK_FixedPointToIntegral:
883353358Sdim  case CK_IntegralToFixedPoint:
884218893Sdim    llvm_unreachable("cast kind invalid for aggregate types");
885198398Srdivacky  }
886193326Sed}
887193326Sed
888193326Sedvoid AggExprEmitter::VisitCallExpr(const CallExpr *E) {
889288943Sdim  if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
890193326Sed    EmitAggLoadOfLValue(E);
891193326Sed    return;
892193326Sed  }
893198092Srdivacky
894341825Sdim  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
895341825Sdim    return CGF.EmitCallExpr(E, Slot);
896341825Sdim  });
897193326Sed}
898193326Sed
899193326Sedvoid AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
900341825Sdim  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
901341825Sdim    return CGF.EmitObjCMessageExpr(E, Slot);
902341825Sdim  });
903193326Sed}
904193326Sed
905193326Sedvoid AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
906218893Sdim  CGF.EmitIgnoredExpr(E->getLHS());
907218893Sdim  Visit(E->getRHS());
908193326Sed}
909193326Sed
910193326Sedvoid AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
911218893Sdim  CodeGenFunction::StmtExprEvaluation eval(CGF);
912218893Sdim  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
913193326Sed}
914193326Sed
915341825Sdimenum CompareKind {
916341825Sdim  CK_Less,
917341825Sdim  CK_Greater,
918341825Sdim  CK_Equal,
919341825Sdim};
920341825Sdim
921341825Sdimstatic llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
922341825Sdim                                const BinaryOperator *E, llvm::Value *LHS,
923341825Sdim                                llvm::Value *RHS, CompareKind Kind,
924341825Sdim                                const char *NameSuffix = "") {
925341825Sdim  QualType ArgTy = E->getLHS()->getType();
926341825Sdim  if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
927341825Sdim    ArgTy = CT->getElementType();
928341825Sdim
929341825Sdim  if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
930341825Sdim    assert(Kind == CK_Equal &&
931341825Sdim           "member pointers may only be compared for equality");
932341825Sdim    return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
933341825Sdim        CGF, LHS, RHS, MPT, /*IsInequality*/ false);
934341825Sdim  }
935341825Sdim
936341825Sdim  // Compute the comparison instructions for the specified comparison kind.
937341825Sdim  struct CmpInstInfo {
938341825Sdim    const char *Name;
939341825Sdim    llvm::CmpInst::Predicate FCmp;
940341825Sdim    llvm::CmpInst::Predicate SCmp;
941341825Sdim    llvm::CmpInst::Predicate UCmp;
942341825Sdim  };
943341825Sdim  CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
944341825Sdim    using FI = llvm::FCmpInst;
945341825Sdim    using II = llvm::ICmpInst;
946341825Sdim    switch (Kind) {
947341825Sdim    case CK_Less:
948341825Sdim      return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
949341825Sdim    case CK_Greater:
950341825Sdim      return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
951341825Sdim    case CK_Equal:
952341825Sdim      return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
953341825Sdim    }
954341825Sdim    llvm_unreachable("Unrecognised CompareKind enum");
955341825Sdim  }();
956341825Sdim
957341825Sdim  if (ArgTy->hasFloatingRepresentation())
958341825Sdim    return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
959341825Sdim                              llvm::Twine(InstInfo.Name) + NameSuffix);
960341825Sdim  if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
961341825Sdim    auto Inst =
962341825Sdim        ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
963341825Sdim    return Builder.CreateICmp(Inst, LHS, RHS,
964341825Sdim                              llvm::Twine(InstInfo.Name) + NameSuffix);
965341825Sdim  }
966341825Sdim
967341825Sdim  llvm_unreachable("unsupported aggregate binary expression should have "
968341825Sdim                   "already been handled");
969341825Sdim}
970341825Sdim
971341825Sdimvoid AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
972341825Sdim  using llvm::BasicBlock;
973341825Sdim  using llvm::PHINode;
974341825Sdim  using llvm::Value;
975341825Sdim  assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
976341825Sdim                                      E->getRHS()->getType()));
977341825Sdim  const ComparisonCategoryInfo &CmpInfo =
978341825Sdim      CGF.getContext().CompCategories.getInfoForType(E->getType());
979341825Sdim  assert(CmpInfo.Record->isTriviallyCopyable() &&
980341825Sdim         "cannot copy non-trivially copyable aggregate");
981341825Sdim
982341825Sdim  QualType ArgTy = E->getLHS()->getType();
983341825Sdim
984341825Sdim  if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
985341825Sdim      !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
986341825Sdim      !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
987341825Sdim    return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
988341825Sdim  }
989341825Sdim  bool IsComplex = ArgTy->isAnyComplexType();
990341825Sdim
991341825Sdim  // Evaluate the operands to the expression and extract their values.
992341825Sdim  auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
993341825Sdim    RValue RV = CGF.EmitAnyExpr(E);
994341825Sdim    if (RV.isScalar())
995341825Sdim      return {RV.getScalarVal(), nullptr};
996341825Sdim    if (RV.isAggregate())
997341825Sdim      return {RV.getAggregatePointer(), nullptr};
998341825Sdim    assert(RV.isComplex());
999341825Sdim    return RV.getComplexVal();
1000341825Sdim  };
1001341825Sdim  auto LHSValues = EmitOperand(E->getLHS()),
1002341825Sdim       RHSValues = EmitOperand(E->getRHS());
1003341825Sdim
1004341825Sdim  auto EmitCmp = [&](CompareKind K) {
1005341825Sdim    Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1006341825Sdim                             K, IsComplex ? ".r" : "");
1007341825Sdim    if (!IsComplex)
1008341825Sdim      return Cmp;
1009341825Sdim    assert(K == CompareKind::CK_Equal);
1010341825Sdim    Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1011341825Sdim                                 RHSValues.second, K, ".i");
1012341825Sdim    return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1013341825Sdim  };
1014341825Sdim  auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1015341825Sdim    return Builder.getInt(VInfo->getIntValue());
1016341825Sdim  };
1017341825Sdim
1018341825Sdim  Value *Select;
1019341825Sdim  if (ArgTy->isNullPtrType()) {
1020341825Sdim    Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1021341825Sdim  } else if (!CmpInfo.isPartial()) {
1022341825Sdim    Value *SelectOne =
1023341825Sdim        Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1024341825Sdim                             EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1025341825Sdim    Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1026341825Sdim                                  EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1027341825Sdim                                  SelectOne, "sel.eq");
1028341825Sdim  } else {
1029341825Sdim    Value *SelectEq = Builder.CreateSelect(
1030341825Sdim        EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1031341825Sdim        EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1032341825Sdim    Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1033341825Sdim                                           EmitCmpRes(CmpInfo.getGreater()),
1034341825Sdim                                           SelectEq, "sel.gt");
1035341825Sdim    Select = Builder.CreateSelect(
1036341825Sdim        EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1037341825Sdim  }
1038341825Sdim  // Create the return value in the destination slot.
1039341825Sdim  EnsureDest(E->getType());
1040341825Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1041341825Sdim
1042341825Sdim  // Emit the address of the first (and only) field in the comparison category
1043341825Sdim  // type, and initialize it from the constant integer value selected above.
1044341825Sdim  LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1045341825Sdim      DestLV, *CmpInfo.Record->field_begin());
1046341825Sdim  CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1047341825Sdim
1048341825Sdim  // All done! The result is in the Dest slot.
1049341825Sdim}
1050341825Sdim
1051193326Sedvoid AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1052212904Sdim  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1053198398Srdivacky    VisitPointerToDataMemberBinaryOperator(E);
1054198398Srdivacky  else
1055198398Srdivacky    CGF.ErrorUnsupported(E, "aggregate binary expression");
1056193326Sed}
1057193326Sed
1058198398Srdivackyvoid AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1059198398Srdivacky                                                    const BinaryOperator *E) {
1060198398Srdivacky  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1061239462Sdim  EmitFinalDestCopy(E->getType(), LV);
1062198398Srdivacky}
1063198398Srdivacky
1064239462Sdim/// Is the value of the given expression possibly a reference to or
1065239462Sdim/// into a __block variable?
1066239462Sdimstatic bool isBlockVarRef(const Expr *E) {
1067239462Sdim  // Make sure we look through parens.
1068239462Sdim  E = E->IgnoreParens();
1069239462Sdim
1070239462Sdim  // Check for a direct reference to a __block variable.
1071239462Sdim  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1072239462Sdim    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1073239462Sdim    return (var && var->hasAttr<BlocksAttr>());
1074239462Sdim  }
1075239462Sdim
1076239462Sdim  // More complicated stuff.
1077239462Sdim
1078239462Sdim  // Binary operators.
1079239462Sdim  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1080239462Sdim    // For an assignment or pointer-to-member operation, just care
1081239462Sdim    // about the LHS.
1082239462Sdim    if (op->isAssignmentOp() || op->isPtrMemOp())
1083239462Sdim      return isBlockVarRef(op->getLHS());
1084239462Sdim
1085239462Sdim    // For a comma, just care about the RHS.
1086239462Sdim    if (op->getOpcode() == BO_Comma)
1087239462Sdim      return isBlockVarRef(op->getRHS());
1088239462Sdim
1089239462Sdim    // FIXME: pointer arithmetic?
1090239462Sdim    return false;
1091239462Sdim
1092239462Sdim  // Check both sides of a conditional operator.
1093239462Sdim  } else if (const AbstractConditionalOperator *op
1094239462Sdim               = dyn_cast<AbstractConditionalOperator>(E)) {
1095239462Sdim    return isBlockVarRef(op->getTrueExpr())
1096239462Sdim        || isBlockVarRef(op->getFalseExpr());
1097239462Sdim
1098239462Sdim  // OVEs are required to support BinaryConditionalOperators.
1099239462Sdim  } else if (const OpaqueValueExpr *op
1100239462Sdim               = dyn_cast<OpaqueValueExpr>(E)) {
1101239462Sdim    if (const Expr *src = op->getSourceExpr())
1102239462Sdim      return isBlockVarRef(src);
1103239462Sdim
1104239462Sdim  // Casts are necessary to get things like (*(int*)&var) = foo().
1105239462Sdim  // We don't really care about the kind of cast here, except
1106239462Sdim  // we don't want to look through l2r casts, because it's okay
1107239462Sdim  // to get the *value* in a __block variable.
1108239462Sdim  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1109239462Sdim    if (cast->getCastKind() == CK_LValueToRValue)
1110239462Sdim      return false;
1111239462Sdim    return isBlockVarRef(cast->getSubExpr());
1112239462Sdim
1113239462Sdim  // Handle unary operators.  Again, just aggressively look through
1114239462Sdim  // it, ignoring the operation.
1115239462Sdim  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1116239462Sdim    return isBlockVarRef(uop->getSubExpr());
1117239462Sdim
1118239462Sdim  // Look into the base of a field access.
1119239462Sdim  } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1120239462Sdim    return isBlockVarRef(mem->getBase());
1121239462Sdim
1122239462Sdim  // Look into the base of a subscript.
1123239462Sdim  } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1124239462Sdim    return isBlockVarRef(sub->getBase());
1125239462Sdim  }
1126239462Sdim
1127239462Sdim  return false;
1128239462Sdim}
1129239462Sdim
1130193326Sedvoid AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1131193326Sed  // For an assignment to work, the value on the right has
1132193326Sed  // to be compatible with the value on the left.
1133193326Sed  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1134193326Sed                                                 E->getRHS()->getType())
1135193326Sed         && "Invalid assignment");
1136218893Sdim
1137239462Sdim  // If the LHS might be a __block variable, and the RHS can
1138239462Sdim  // potentially cause a block copy, we need to evaluate the RHS first
1139239462Sdim  // so that the assignment goes the right place.
1140239462Sdim  // This is pretty semantically fragile.
1141239462Sdim  if (isBlockVarRef(E->getLHS()) &&
1142239462Sdim      E->getRHS()->HasSideEffects(CGF.getContext())) {
1143239462Sdim    // Ensure that we have a destination, and evaluate the RHS into that.
1144239462Sdim    EnsureDest(E->getRHS()->getType());
1145239462Sdim    Visit(E->getRHS());
1146239462Sdim
1147239462Sdim    // Now emit the LHS and copy into it.
1148243830Sdim    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1149239462Sdim
1150249423Sdim    // That copy is an atomic copy if the LHS is atomic.
1151288943Sdim    if (LHS.getType()->isAtomicType() ||
1152288943Sdim        CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1153249423Sdim      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1154249423Sdim      return;
1155249423Sdim    }
1156249423Sdim
1157239462Sdim    EmitCopy(E->getLHS()->getType(),
1158360784Sdim             AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
1159239462Sdim                                     needsGC(E->getLHS()->getType()),
1160341825Sdim                                     AggValueSlot::IsAliased,
1161341825Sdim                                     AggValueSlot::MayOverlap),
1162239462Sdim             Dest);
1163239462Sdim    return;
1164239462Sdim  }
1165341825Sdim
1166193326Sed  LValue LHS = CGF.EmitLValue(E->getLHS());
1167193326Sed
1168249423Sdim  // If we have an atomic type, evaluate into the destination and then
1169249423Sdim  // do an atomic copy.
1170288943Sdim  if (LHS.getType()->isAtomicType() ||
1171288943Sdim      CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1172249423Sdim    EnsureDest(E->getRHS()->getType());
1173249423Sdim    Visit(E->getRHS());
1174249423Sdim    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1175249423Sdim    return;
1176249423Sdim  }
1177249423Sdim
1178234353Sdim  // Codegen the RHS so that it stores directly into the LHS.
1179360784Sdim  AggValueSlot LHSSlot = AggValueSlot::forLValue(
1180360784Sdim      LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1181360784Sdim      AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
1182249423Sdim  // A non-volatile aggregate destination might have volatile member.
1183249423Sdim  if (!LHSSlot.isVolatile() &&
1184249423Sdim      CGF.hasVolatileMember(E->getLHS()->getType()))
1185249423Sdim    LHSSlot.setVolatile(true);
1186341825Sdim
1187239462Sdim  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1188239462Sdim
1189239462Sdim  // Copy into the destination if the assignment isn't ignored.
1190239462Sdim  EmitFinalDestCopy(E->getType(), LHS);
1191193326Sed}
1192193326Sed
1193218893Sdimvoid AggExprEmitter::
1194218893SdimVisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1195193326Sed  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1196193326Sed  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1197193326Sed  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1198198092Srdivacky
1199218893Sdim  // Bind the common expression if necessary.
1200218893Sdim  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1201218893Sdim
1202218893Sdim  CodeGenFunction::ConditionalEvaluation eval(CGF);
1203288943Sdim  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1204288943Sdim                           CGF.getProfileCount(E));
1205198092Srdivacky
1206218893Sdim  // Save whether the destination's lifetime is externally managed.
1207226633Sdim  bool isExternallyDestructed = Dest.isExternallyDestructed();
1208218893Sdim
1209218893Sdim  eval.begin(CGF);
1210193326Sed  CGF.EmitBlock(LHSBlock);
1211288943Sdim  CGF.incrementProfileCounter(E);
1212218893Sdim  Visit(E->getTrueExpr());
1213218893Sdim  eval.end(CGF);
1214198092Srdivacky
1215218893Sdim  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1216218893Sdim  CGF.Builder.CreateBr(ContBlock);
1217193326Sed
1218218893Sdim  // If the result of an agg expression is unused, then the emission
1219218893Sdim  // of the LHS might need to create a destination slot.  That's fine
1220218893Sdim  // with us, and we can safely emit the RHS into the same slot, but
1221226633Sdim  // we shouldn't claim that it's already being destructed.
1222226633Sdim  Dest.setExternallyDestructed(isExternallyDestructed);
1223198092Srdivacky
1224218893Sdim  eval.begin(CGF);
1225193326Sed  CGF.EmitBlock(RHSBlock);
1226218893Sdim  Visit(E->getFalseExpr());
1227218893Sdim  eval.end(CGF);
1228198092Srdivacky
1229193326Sed  CGF.EmitBlock(ContBlock);
1230193326Sed}
1231193326Sed
1232198092Srdivackyvoid AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1233261991Sdim  Visit(CE->getChosenSubExpr());
1234198092Srdivacky}
1235198092Srdivacky
1236193326Sedvoid AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1237296417Sdim  Address ArgValue = Address::invalid();
1238296417Sdim  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1239193326Sed
1240309124Sdim  // If EmitVAArg fails, emit an error.
1241296417Sdim  if (!ArgPtr.isValid()) {
1242309124Sdim    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1243193326Sed    return;
1244193326Sed  }
1245193326Sed
1246239462Sdim  EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1247193326Sed}
1248193326Sed
1249193326Sedvoid AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1250218893Sdim  // Ensure that we have a slot, but if we already do, remember
1251226633Sdim  // whether it was externally destructed.
1252226633Sdim  bool wasExternallyDestructed = Dest.isExternallyDestructed();
1253239462Sdim  EnsureDest(E->getType());
1254198092Srdivacky
1255226633Sdim  // We're going to push a destructor if there isn't already one.
1256226633Sdim  Dest.setExternallyDestructed();
1257226633Sdim
1258218893Sdim  Visit(E->getSubExpr());
1259193326Sed
1260226633Sdim  // Push that destructor we promised.
1261226633Sdim  if (!wasExternallyDestructed)
1262296417Sdim    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1263193326Sed}
1264193326Sed
1265193326Sedvoid
1266193326SedAggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1267218893Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1268218893Sdim  CGF.EmitCXXConstructExpr(E, Slot);
1269193326Sed}
1270193326Sed
1271309124Sdimvoid AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1272309124Sdim    const CXXInheritedCtorInitExpr *E) {
1273309124Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1274309124Sdim  CGF.EmitInheritedCXXConstructorCall(
1275309124Sdim      E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1276309124Sdim      E->inheritedFromVBase(), E);
1277309124Sdim}
1278309124Sdim
1279234353Sdimvoid
1280234353SdimAggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1281234353Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1282353358Sdim  LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1283353358Sdim
1284353358Sdim  // We'll need to enter cleanup scopes in case any of the element
1285353358Sdim  // initializers throws an exception.
1286353358Sdim  SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
1287353358Sdim  llvm::Instruction *CleanupDominator = nullptr;
1288353358Sdim
1289353358Sdim  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1290353358Sdim  for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1291353358Sdim                                               e = E->capture_init_end();
1292353358Sdim       i != e; ++i, ++CurField) {
1293353358Sdim    // Emit initialization
1294353358Sdim    LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1295353358Sdim    if (CurField->hasCapturedVLAType()) {
1296353358Sdim      CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1297353358Sdim      continue;
1298353358Sdim    }
1299353358Sdim
1300353358Sdim    EmitInitializationToLValue(*i, LV);
1301353358Sdim
1302353358Sdim    // Push a destructor if necessary.
1303353358Sdim    if (QualType::DestructionKind DtorKind =
1304353358Sdim            CurField->getType().isDestructedType()) {
1305353358Sdim      assert(LV.isSimple());
1306353358Sdim      if (CGF.needsEHCleanup(DtorKind)) {
1307353358Sdim        if (!CleanupDominator)
1308353358Sdim          CleanupDominator = CGF.Builder.CreateAlignedLoad(
1309353358Sdim              CGF.Int8Ty,
1310353358Sdim              llvm::Constant::getNullValue(CGF.Int8PtrTy),
1311353358Sdim              CharUnits::One()); // placeholder
1312353358Sdim
1313360784Sdim        CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(),
1314353358Sdim                        CGF.getDestroyer(DtorKind), false);
1315353358Sdim        Cleanups.push_back(CGF.EHStack.stable_begin());
1316353358Sdim      }
1317353358Sdim    }
1318353358Sdim  }
1319353358Sdim
1320353358Sdim  // Deactivate all the partial cleanups in reverse order, which
1321353358Sdim  // generally means popping them.
1322353358Sdim  for (unsigned i = Cleanups.size(); i != 0; --i)
1323353358Sdim    CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
1324353358Sdim
1325353358Sdim  // Destroy the placeholder if we made one.
1326353358Sdim  if (CleanupDominator)
1327353358Sdim    CleanupDominator->eraseFromParent();
1328234353Sdim}
1329234353Sdim
1330218893Sdimvoid AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1331234353Sdim  CGF.enterFullExpression(E);
1332234353Sdim  CodeGenFunction::RunCleanupsScope cleanups(CGF);
1333234353Sdim  Visit(E->getSubExpr());
1334193326Sed}
1335193326Sed
1336210299Sedvoid AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1337218893Sdim  QualType T = E->getType();
1338218893Sdim  AggValueSlot Slot = EnsureSlot(T);
1339296417Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1340198398Srdivacky}
1341198398Srdivacky
1342201361Srdivackyvoid AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1343218893Sdim  QualType T = E->getType();
1344218893Sdim  AggValueSlot Slot = EnsureSlot(T);
1345296417Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1346218893Sdim}
1347201361Srdivacky
1348218893Sdim/// isSimpleZero - If emitting this value will obviously just cause a store of
1349218893Sdim/// zero to memory, return true.  This can return false if uncertain, so it just
1350218893Sdim/// handles simple cases.
1351218893Sdimstatic bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1352221345Sdim  E = E->IgnoreParens();
1353221345Sdim
1354218893Sdim  // 0
1355218893Sdim  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1356218893Sdim    return IL->getValue() == 0;
1357218893Sdim  // +0.0
1358218893Sdim  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1359218893Sdim    return FL->getValue().isPosZero();
1360218893Sdim  // int()
1361218893Sdim  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1362218893Sdim      CGF.getTypes().isZeroInitializable(E->getType()))
1363218893Sdim    return true;
1364218893Sdim  // (int*)0 - Null pointer expressions.
1365218893Sdim  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1366314564Sdim    return ICE->getCastKind() == CK_NullToPointer &&
1367353358Sdim           CGF.getTypes().isPointerZeroInitializable(E->getType()) &&
1368353358Sdim           !E->HasSideEffects(CGF.getContext());
1369218893Sdim  // '\0'
1370218893Sdim  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1371218893Sdim    return CL->getValue() == 0;
1372341825Sdim
1373218893Sdim  // Otherwise, hard case: conservatively return false.
1374218893Sdim  return false;
1375201361Srdivacky}
1376201361Srdivacky
1377218893Sdim
1378341825Sdimvoid
1379261991SdimAggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1380224145Sdim  QualType type = LV.getType();
1381193326Sed  // FIXME: Ignore result?
1382193326Sed  // FIXME: Are initializers affected by volatile?
1383218893Sdim  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1384218893Sdim    // Storing "i32 0" to a zero'd memory location is a noop.
1385249423Sdim    return;
1386249423Sdim  } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1387249423Sdim    return EmitNullInitializationToLValue(LV);
1388288943Sdim  } else if (isa<NoInitExpr>(E)) {
1389288943Sdim    // Do nothing.
1390288943Sdim    return;
1391224145Sdim  } else if (type->isReferenceType()) {
1392261991Sdim    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1393249423Sdim    return CGF.EmitStoreThroughLValue(RV, LV);
1394249423Sdim  }
1395341825Sdim
1396249423Sdim  switch (CGF.getEvaluationKind(type)) {
1397249423Sdim  case TEK_Complex:
1398249423Sdim    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1399249423Sdim    return;
1400249423Sdim  case TEK_Aggregate:
1401360784Sdim    CGF.EmitAggExpr(
1402360784Sdim        E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
1403360784Sdim                                   AggValueSlot::DoesNotNeedGCBarriers,
1404360784Sdim                                   AggValueSlot::IsNotAliased,
1405360784Sdim                                   AggValueSlot::MayOverlap, Dest.isZeroed()));
1406249423Sdim    return;
1407249423Sdim  case TEK_Scalar:
1408249423Sdim    if (LV.isSimple()) {
1409276479Sdim      CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1410249423Sdim    } else {
1411249423Sdim      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1412249423Sdim    }
1413249423Sdim    return;
1414193326Sed  }
1415249423Sdim  llvm_unreachable("bad evaluation kind");
1416193326Sed}
1417193326Sed
1418224145Sdimvoid AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1419224145Sdim  QualType type = lv.getType();
1420224145Sdim
1421218893Sdim  // If the destination slot is already zeroed out before the aggregate is
1422218893Sdim  // copied into it, we don't have to emit any zeros here.
1423224145Sdim  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1424218893Sdim    return;
1425341825Sdim
1426249423Sdim  if (CGF.hasScalarEvaluationKind(type)) {
1427249423Sdim    // For non-aggregates, we can store the appropriate null constant.
1428249423Sdim    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1429234353Sdim    // Note that the following is not equivalent to
1430234353Sdim    // EmitStoreThroughBitfieldLValue for ARC types.
1431234353Sdim    if (lv.isBitField()) {
1432234353Sdim      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1433234353Sdim    } else {
1434234353Sdim      assert(lv.isSimple());
1435234353Sdim      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1436234353Sdim    }
1437193326Sed  } else {
1438193326Sed    // There's a potential optimization opportunity in combining
1439193326Sed    // memsets; that would be easy for arrays, but relatively
1440193326Sed    // difficult for structures with the current code.
1441360784Sdim    CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
1442193326Sed  }
1443193326Sed}
1444193326Sed
1445193326Sedvoid AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1446193326Sed#if 0
1447200583Srdivacky  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1448200583Srdivacky  // (Length of globals? Chunks of zeroed-out space?).
1449193326Sed  //
1450193326Sed  // If we can, prefer a copy from a global; this is a lot less code for long
1451193326Sed  // globals, and it's easier for the current optimizers to analyze.
1452200583Srdivacky  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1453193326Sed    llvm::GlobalVariable* GV =
1454200583Srdivacky    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1455200583Srdivacky                             llvm::GlobalValue::InternalLinkage, C, "");
1456239462Sdim    EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1457193326Sed    return;
1458193326Sed  }
1459193326Sed#endif
1460218893Sdim  if (E->hadArrayRangeDesignator())
1461193326Sed    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1462193326Sed
1463314564Sdim  if (E->isTransparent())
1464314564Sdim    return Visit(E->getInit(0));
1465314564Sdim
1466261991Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
1467218893Sdim
1468296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1469234353Sdim
1470193326Sed  // Handle initialization of an array.
1471193326Sed  if (E->getType()->isArrayType()) {
1472296417Sdim    auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1473338697Sdim    EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1474193326Sed    return;
1475193326Sed  }
1476198092Srdivacky
1477193326Sed  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1478198092Srdivacky
1479193326Sed  // Do struct initialization; this code just sets each individual member
1480193326Sed  // to the approprate value.  This makes bitfield support automatic;
1481193326Sed  // the disadvantage is that the generated code is more difficult for
1482193326Sed  // the optimizer, especially with bitfields.
1483193326Sed  unsigned NumInitElements = E->getNumInits();
1484224145Sdim  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1485251662Sdim
1486309124Sdim  // We'll need to enter cleanup scopes in case any of the element
1487309124Sdim  // initializers throws an exception.
1488309124Sdim  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1489309124Sdim  llvm::Instruction *cleanupDominator = nullptr;
1490353358Sdim  auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
1491353358Sdim    cleanups.push_back(cleanup);
1492353358Sdim    if (!cleanupDominator) // create placeholder once needed
1493353358Sdim      cleanupDominator = CGF.Builder.CreateAlignedLoad(
1494353358Sdim          CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy),
1495353358Sdim          CharUnits::One());
1496353358Sdim  };
1497309124Sdim
1498309124Sdim  unsigned curInitIndex = 0;
1499309124Sdim
1500309124Sdim  // Emit initialization of base classes.
1501309124Sdim  if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1502309124Sdim    assert(E->getNumInits() >= CXXRD->getNumBases() &&
1503309124Sdim           "missing initializer for base class");
1504309124Sdim    for (auto &Base : CXXRD->bases()) {
1505309124Sdim      assert(!Base.isVirtual() && "should not see vbases here");
1506309124Sdim      auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1507309124Sdim      Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1508309124Sdim          Dest.getAddress(), CXXRD, BaseRD,
1509309124Sdim          /*isBaseVirtual*/ false);
1510341825Sdim      AggValueSlot AggSlot = AggValueSlot::forAddr(
1511341825Sdim          V, Qualifiers(),
1512341825Sdim          AggValueSlot::IsDestructed,
1513341825Sdim          AggValueSlot::DoesNotNeedGCBarriers,
1514341825Sdim          AggValueSlot::IsNotAliased,
1515353358Sdim          CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1516309124Sdim      CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1517309124Sdim
1518309124Sdim      if (QualType::DestructionKind dtorKind =
1519309124Sdim              Base.getType().isDestructedType()) {
1520309124Sdim        CGF.pushDestroy(dtorKind, V, Base.getType());
1521353358Sdim        addCleanup(CGF.EHStack.stable_begin());
1522309124Sdim      }
1523309124Sdim    }
1524309124Sdim  }
1525309124Sdim
1526251662Sdim  // Prepare a 'this' for CXXDefaultInitExprs.
1527296417Sdim  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1528251662Sdim
1529224145Sdim  if (record->isUnion()) {
1530193326Sed    // Only initialize one field of a union. The field itself is
1531193326Sed    // specified by the initializer list.
1532193326Sed    if (!E->getInitializedFieldInUnion()) {
1533193326Sed      // Empty union; we have nothing to do.
1534198092Srdivacky
1535193326Sed#ifndef NDEBUG
1536193326Sed      // Make sure that it's really an empty and not a failure of
1537193326Sed      // semantic analysis.
1538276479Sdim      for (const auto *Field : record->fields())
1539193326Sed        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1540193326Sed#endif
1541193326Sed      return;
1542193326Sed    }
1543193326Sed
1544193326Sed    // FIXME: volatility
1545193326Sed    FieldDecl *Field = E->getInitializedFieldInUnion();
1546218893Sdim
1547234982Sdim    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1548193326Sed    if (NumInitElements) {
1549193326Sed      // Store the initializer into the field
1550224145Sdim      EmitInitializationToLValue(E->getInit(0), FieldLoc);
1551193326Sed    } else {
1552218893Sdim      // Default-initialize to null.
1553224145Sdim      EmitNullInitializationToLValue(FieldLoc);
1554193326Sed    }
1555193326Sed
1556193326Sed    return;
1557193326Sed  }
1558198092Srdivacky
1559193326Sed  // Here we iterate over the fields; this makes it simpler to both
1560193326Sed  // default-initialize fields and skip over unnamed fields.
1561276479Sdim  for (const auto *field : record->fields()) {
1562224145Sdim    // We're done once we hit the flexible array member.
1563224145Sdim    if (field->getType()->isIncompleteArrayType())
1564193326Sed      break;
1565193326Sed
1566224145Sdim    // Always skip anonymous bitfields.
1567224145Sdim    if (field->isUnnamedBitfield())
1568193326Sed      continue;
1569193326Sed
1570224145Sdim    // We're done if we reach the end of the explicit initializers, we
1571224145Sdim    // have a zeroed object, and the rest of the fields are
1572224145Sdim    // zero-initializable.
1573224145Sdim    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1574218893Sdim        CGF.getTypes().isZeroInitializable(E->getType()))
1575218893Sdim      break;
1576234982Sdim
1577341825Sdim
1578276479Sdim    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1579193326Sed    // We never generate write-barries for initialized fields.
1580224145Sdim    LV.setNonGC(true);
1581341825Sdim
1582224145Sdim    if (curInitIndex < NumInitElements) {
1583204962Srdivacky      // Store the initializer into the field.
1584224145Sdim      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1585193326Sed    } else {
1586321369Sdim      // We're out of initializers; default-initialize to null
1587224145Sdim      EmitNullInitializationToLValue(LV);
1588193326Sed    }
1589224145Sdim
1590224145Sdim    // Push a destructor if necessary.
1591224145Sdim    // FIXME: if we have an array of structures, all explicitly
1592224145Sdim    // initialized, we can end up pushing a linear number of cleanups.
1593224145Sdim    bool pushedCleanup = false;
1594224145Sdim    if (QualType::DestructionKind dtorKind
1595224145Sdim          = field->getType().isDestructedType()) {
1596224145Sdim      assert(LV.isSimple());
1597224145Sdim      if (CGF.needsEHCleanup(dtorKind)) {
1598360784Sdim        CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(),
1599224145Sdim                        CGF.getDestroyer(dtorKind), false);
1600353358Sdim        addCleanup(CGF.EHStack.stable_begin());
1601224145Sdim        pushedCleanup = true;
1602224145Sdim      }
1603224145Sdim    }
1604341825Sdim
1605218893Sdim    // If the GEP didn't get used because of a dead zero init or something
1606218893Sdim    // else, clean it up for -O0 builds and general tidiness.
1607341825Sdim    if (!pushedCleanup && LV.isSimple())
1608218893Sdim      if (llvm::GetElementPtrInst *GEP =
1609360784Sdim              dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF)))
1610218893Sdim        if (GEP->use_empty())
1611218893Sdim          GEP->eraseFromParent();
1612193326Sed  }
1613224145Sdim
1614224145Sdim  // Deactivate all the partial cleanups in reverse order, which
1615224145Sdim  // generally means popping them.
1616353358Sdim  assert((cleanupDominator || cleanups.empty()) &&
1617353358Sdim         "Missing cleanupDominator before deactivating cleanup blocks");
1618224145Sdim  for (unsigned i = cleanups.size(); i != 0; --i)
1619234353Sdim    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1620234353Sdim
1621234353Sdim  // Destroy the placeholder if we made one.
1622234353Sdim  if (cleanupDominator)
1623234353Sdim    cleanupDominator->eraseFromParent();
1624193326Sed}
1625193326Sed
1626314564Sdimvoid AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1627314564Sdim                                            llvm::Value *outerBegin) {
1628314564Sdim  // Emit the common subexpression.
1629314564Sdim  CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1630314564Sdim
1631314564Sdim  Address destPtr = EnsureSlot(E->getType()).getAddress();
1632314564Sdim  uint64_t numElements = E->getArraySize().getZExtValue();
1633314564Sdim
1634314564Sdim  if (!numElements)
1635314564Sdim    return;
1636314564Sdim
1637314564Sdim  // destPtr is an array*. Construct an elementType* by drilling down a level.
1638314564Sdim  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1639314564Sdim  llvm::Value *indices[] = {zero, zero};
1640314564Sdim  llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1641314564Sdim                                                 "arrayinit.begin");
1642314564Sdim
1643314564Sdim  // Prepare to special-case multidimensional array initialization: we avoid
1644314564Sdim  // emitting multiple destructor loops in that case.
1645314564Sdim  if (!outerBegin)
1646314564Sdim    outerBegin = begin;
1647314564Sdim  ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1648314564Sdim
1649314564Sdim  QualType elementType =
1650314564Sdim      CGF.getContext().getAsArrayType(E->getType())->getElementType();
1651314564Sdim  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1652314564Sdim  CharUnits elementAlign =
1653314564Sdim      destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1654314564Sdim
1655314564Sdim  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1656314564Sdim  llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1657314564Sdim
1658314564Sdim  // Jump into the body.
1659314564Sdim  CGF.EmitBlock(bodyBB);
1660314564Sdim  llvm::PHINode *index =
1661314564Sdim      Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1662314564Sdim  index->addIncoming(zero, entryBB);
1663314564Sdim  llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1664314564Sdim
1665314564Sdim  // Prepare for a cleanup.
1666314564Sdim  QualType::DestructionKind dtorKind = elementType.isDestructedType();
1667314564Sdim  EHScopeStack::stable_iterator cleanup;
1668314564Sdim  if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1669314564Sdim    if (outerBegin->getType() != element->getType())
1670314564Sdim      outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1671314564Sdim    CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1672314564Sdim                                       elementAlign,
1673314564Sdim                                       CGF.getDestroyer(dtorKind));
1674314564Sdim    cleanup = CGF.EHStack.stable_begin();
1675314564Sdim  } else {
1676314564Sdim    dtorKind = QualType::DK_none;
1677314564Sdim  }
1678314564Sdim
1679314564Sdim  // Emit the actual filler expression.
1680314564Sdim  {
1681314564Sdim    // Temporaries created in an array initialization loop are destroyed
1682314564Sdim    // at the end of each iteration.
1683314564Sdim    CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1684314564Sdim    CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1685314564Sdim    LValue elementLV =
1686314564Sdim        CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1687314564Sdim
1688314564Sdim    if (InnerLoop) {
1689314564Sdim      // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1690314564Sdim      auto elementSlot = AggValueSlot::forLValue(
1691360784Sdim          elementLV, CGF, AggValueSlot::IsDestructed,
1692360784Sdim          AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
1693341825Sdim          AggValueSlot::DoesNotOverlap);
1694314564Sdim      AggExprEmitter(CGF, elementSlot, false)
1695314564Sdim          .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1696314564Sdim    } else
1697314564Sdim      EmitInitializationToLValue(E->getSubExpr(), elementLV);
1698314564Sdim  }
1699314564Sdim
1700314564Sdim  // Move on to the next element.
1701314564Sdim  llvm::Value *nextIndex = Builder.CreateNUWAdd(
1702314564Sdim      index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1703314564Sdim  index->addIncoming(nextIndex, Builder.GetInsertBlock());
1704314564Sdim
1705314564Sdim  // Leave the loop if we're done.
1706314564Sdim  llvm::Value *done = Builder.CreateICmpEQ(
1707314564Sdim      nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1708314564Sdim      "arrayinit.done");
1709314564Sdim  llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1710314564Sdim  Builder.CreateCondBr(done, endBB, bodyBB);
1711314564Sdim
1712314564Sdim  CGF.EmitBlock(endBB);
1713314564Sdim
1714314564Sdim  // Leave the partial-array cleanup if we entered one.
1715314564Sdim  if (dtorKind)
1716314564Sdim    CGF.DeactivateCleanupBlock(cleanup, index);
1717314564Sdim}
1718314564Sdim
1719288943Sdimvoid AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1720288943Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
1721288943Sdim
1722296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1723288943Sdim  EmitInitializationToLValue(E->getBase(), DestLV);
1724288943Sdim  VisitInitListExpr(E->getUpdater());
1725288943Sdim}
1726288943Sdim
1727193326Sed//===----------------------------------------------------------------------===//
1728193326Sed//                        Entry Points into this File
1729193326Sed//===----------------------------------------------------------------------===//
1730193326Sed
1731218893Sdim/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1732218893Sdim/// non-zero bytes that will be stored when outputting the initializer for the
1733218893Sdim/// specified initializer expression.
1734221345Sdimstatic CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1735370359Sgit2svn  if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1736370359Sgit2svn    E = MTE->getSubExpr();
1737370359Sgit2svn  E = E->IgnoreParenNoopCasts(CGF.getContext());
1738218893Sdim
1739218893Sdim  // 0 and 0.0 won't require any non-zero stores!
1740221345Sdim  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1741218893Sdim
1742218893Sdim  // If this is an initlist expr, sum up the size of sizes of the (present)
1743218893Sdim  // elements.  If this is something weird, assume the whole thing is non-zero.
1744218893Sdim  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1745341825Sdim  while (ILE && ILE->isTransparent())
1746341825Sdim    ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1747276479Sdim  if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1748221345Sdim    return CGF.getContext().getTypeSizeInChars(E->getType());
1749341825Sdim
1750218893Sdim  // InitListExprs for structs have to be handled carefully.  If there are
1751218893Sdim  // reference members, we need to consider the size of the reference, not the
1752218893Sdim  // referencee.  InitListExprs for unions and arrays can't have references.
1753218893Sdim  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1754218893Sdim    if (!RT->isUnionType()) {
1755360784Sdim      RecordDecl *SD = RT->getDecl();
1756221345Sdim      CharUnits NumNonZeroBytes = CharUnits::Zero();
1757341825Sdim
1758218893Sdim      unsigned ILEElement = 0;
1759309124Sdim      if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1760309124Sdim        while (ILEElement != CXXRD->getNumBases())
1761309124Sdim          NumNonZeroBytes +=
1762309124Sdim              GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1763276479Sdim      for (const auto *Field : SD->fields()) {
1764218893Sdim        // We're done once we hit the flexible array member or run out of
1765218893Sdim        // InitListExpr elements.
1766218893Sdim        if (Field->getType()->isIncompleteArrayType() ||
1767218893Sdim            ILEElement == ILE->getNumInits())
1768218893Sdim          break;
1769218893Sdim        if (Field->isUnnamedBitfield())
1770218893Sdim          continue;
1771218893Sdim
1772218893Sdim        const Expr *E = ILE->getInit(ILEElement++);
1773341825Sdim
1774218893Sdim        // Reference values are always non-null and have the width of a pointer.
1775218893Sdim        if (Field->getType()->isReferenceType())
1776221345Sdim          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1777251662Sdim              CGF.getTarget().getPointerWidth(0));
1778218893Sdim        else
1779218893Sdim          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1780218893Sdim      }
1781341825Sdim
1782218893Sdim      return NumNonZeroBytes;
1783218893Sdim    }
1784218893Sdim  }
1785341825Sdim
1786370359Sgit2svn  // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1787221345Sdim  CharUnits NumNonZeroBytes = CharUnits::Zero();
1788218893Sdim  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1789218893Sdim    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1790218893Sdim  return NumNonZeroBytes;
1791218893Sdim}
1792218893Sdim
1793218893Sdim/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1794218893Sdim/// zeros in it, emit a memset and avoid storing the individual zeros.
1795218893Sdim///
1796218893Sdimstatic void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1797218893Sdim                                     CodeGenFunction &CGF) {
1798218893Sdim  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1799218893Sdim  // volatile stores.
1800296417Sdim  if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1801276479Sdim    return;
1802221345Sdim
1803221345Sdim  // C++ objects with a user-declared constructor don't need zero'ing.
1804243830Sdim  if (CGF.getLangOpts().CPlusPlus)
1805221345Sdim    if (const RecordType *RT = CGF.getContext()
1806221345Sdim                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1807221345Sdim      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1808221345Sdim      if (RD->hasUserDeclaredConstructor())
1809221345Sdim        return;
1810221345Sdim    }
1811221345Sdim
1812218893Sdim  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1813341825Sdim  CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1814296417Sdim  if (Size <= CharUnits::fromQuantity(16))
1815218893Sdim    return;
1816218893Sdim
1817218893Sdim  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1818218893Sdim  // we prefer to emit memset + individual stores for the rest.
1819221345Sdim  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1820296417Sdim  if (NumNonZeroBytes*4 > Size)
1821218893Sdim    return;
1822341825Sdim
1823218893Sdim  // Okay, it seems like a good idea to use an initial memset, emit the call.
1824296417Sdim  llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1825218893Sdim
1826341825Sdim  Address Loc = Slot.getAddress();
1827296417Sdim  Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1828296417Sdim  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1829341825Sdim
1830218893Sdim  // Tell the AggExprEmitter that the slot is known zero.
1831218893Sdim  Slot.setZeroed();
1832218893Sdim}
1833218893Sdim
1834218893Sdim
1835218893Sdim
1836218893Sdim
1837193326Sed/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1838193326Sed/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1839193326Sed/// the value of the aggregate expression is not needed.  If VolatileDest is
1840193326Sed/// true, DestPtr cannot be 0.
1841239462Sdimvoid CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1842249423Sdim  assert(E && hasAggregateEvaluationKind(E->getType()) &&
1843193326Sed         "Invalid aggregate expression to emit");
1844296417Sdim  assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1845218893Sdim         "slot has bits but no address");
1846198092Srdivacky
1847218893Sdim  // Optimize the slot if possible.
1848218893Sdim  CheckAggExprForMemSetUse(Slot, E, *this);
1849341825Sdim
1850288943Sdim  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1851193326Sed}
1852193326Sed
1853203955SrdivackyLValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1854249423Sdim  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1855296417Sdim  Address Temp = CreateMemTemp(E->getType());
1856212904Sdim  LValue LV = MakeAddrLValue(Temp, E->getType());
1857360784Sdim  EmitAggExpr(E, AggValueSlot::forLValue(
1858360784Sdim                     LV, *this, AggValueSlot::IsNotDestructed,
1859360784Sdim                     AggValueSlot::DoesNotNeedGCBarriers,
1860360784Sdim                     AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
1861212904Sdim  return LV;
1862203955Srdivacky}
1863203955Srdivacky
1864353358SdimAggValueSlot::Overlap_t
1865353358SdimCodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
1866353358Sdim  if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
1867353358Sdim    return AggValueSlot::DoesNotOverlap;
1868353358Sdim
1869353358Sdim  // If the field lies entirely within the enclosing class's nvsize, its tail
1870353358Sdim  // padding cannot overlap any already-initialized object. (The only subobjects
1871353358Sdim  // with greater addresses that might already be initialized are vbases.)
1872353358Sdim  const RecordDecl *ClassRD = FD->getParent();
1873353358Sdim  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
1874353358Sdim  if (Layout.getFieldOffset(FD->getFieldIndex()) +
1875353358Sdim          getContext().getTypeSize(FD->getType()) <=
1876353358Sdim      (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
1877353358Sdim    return AggValueSlot::DoesNotOverlap;
1878353358Sdim
1879353358Sdim  // The tail padding may contain values we need to preserve.
1880353358Sdim  return AggValueSlot::MayOverlap;
1881353358Sdim}
1882353358Sdim
1883353358SdimAggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
1884341825Sdim    const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
1885353358Sdim  // If the most-derived object is a field declared with [[no_unique_address]],
1886353358Sdim  // the tail padding of any virtual base could be reused for other subobjects
1887353358Sdim  // of that field's class.
1888341825Sdim  if (IsVirtual)
1889353358Sdim    return AggValueSlot::MayOverlap;
1890341825Sdim
1891341825Sdim  // If the base class is laid out entirely within the nvsize of the derived
1892341825Sdim  // class, its tail padding cannot yet be initialized, so we can issue
1893341825Sdim  // stores at the full width of the base class.
1894341825Sdim  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1895341825Sdim  if (Layout.getBaseClassOffset(BaseRD) +
1896341825Sdim          getContext().getASTRecordLayout(BaseRD).getSize() <=
1897341825Sdim      Layout.getNonVirtualSize())
1898341825Sdim    return AggValueSlot::DoesNotOverlap;
1899341825Sdim
1900341825Sdim  // The tail padding may contain values we need to preserve.
1901341825Sdim  return AggValueSlot::MayOverlap;
1902341825Sdim}
1903341825Sdim
1904341825Sdimvoid CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
1905341825Sdim                                        AggValueSlot::Overlap_t MayOverlap,
1906341825Sdim                                        bool isVolatile) {
1907193326Sed  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1908198092Srdivacky
1909360784Sdim  Address DestPtr = Dest.getAddress(*this);
1910360784Sdim  Address SrcPtr = Src.getAddress(*this);
1911341825Sdim
1912243830Sdim  if (getLangOpts().CPlusPlus) {
1913207619Srdivacky    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1914208600Srdivacky      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1915341825Sdim      assert((Record->hasTrivialCopyConstructor() ||
1916226633Sdim              Record->hasTrivialCopyAssignment() ||
1917226633Sdim              Record->hasTrivialMoveConstructor() ||
1918288943Sdim              Record->hasTrivialMoveAssignment() ||
1919370035Sgit2svn              Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
1920249423Sdim             "Trying to aggregate-copy a type without a trivial copy/move "
1921208600Srdivacky             "constructor or assignment operator");
1922208600Srdivacky      // Ignore empty classes in C++.
1923208600Srdivacky      if (Record->isEmpty())
1924207619Srdivacky        return;
1925207619Srdivacky    }
1926207619Srdivacky  }
1927341825Sdim
1928193326Sed  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1929193326Sed  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1930193326Sed  // read from another object that overlaps in anyway the storage of the first
1931193326Sed  // object, then the overlap shall be exact and the two objects shall have
1932193326Sed  // qualified or unqualified versions of a compatible type."
1933193326Sed  //
1934193326Sed  // memcpy is not defined if the source and destination pointers are exactly
1935193326Sed  // equal, but other compilers do this optimization, and almost every memcpy
1936193326Sed  // implementation handles this case safely.  If there is a libc that does not
1937193326Sed  // safely handle this, we can add a target hook.
1938198092Srdivacky
1939341825Sdim  // Get data size info for this aggregate. Don't copy the tail padding if this
1940341825Sdim  // might be a potentially-overlapping subobject, since the tail padding might
1941341825Sdim  // be occupied by a different object. Otherwise, copying it is fine.
1942243830Sdim  std::pair<CharUnits, CharUnits> TypeInfo;
1943341825Sdim  if (MayOverlap)
1944243830Sdim    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1945243830Sdim  else
1946243830Sdim    TypeInfo = getContext().getTypeInfoInChars(Ty);
1947198092Srdivacky
1948288943Sdim  llvm::Value *SizeVal = nullptr;
1949288943Sdim  if (TypeInfo.first.isZero()) {
1950288943Sdim    // But note that getTypeInfo returns 0 for a VLA.
1951288943Sdim    if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1952288943Sdim            getContext().getAsArrayType(Ty))) {
1953288943Sdim      QualType BaseEltTy;
1954288943Sdim      SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1955341825Sdim      TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1956288943Sdim      assert(!TypeInfo.first.isZero());
1957288943Sdim      SizeVal = Builder.CreateNUWMul(
1958288943Sdim          SizeVal,
1959288943Sdim          llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1960288943Sdim    }
1961288943Sdim  }
1962288943Sdim  if (!SizeVal) {
1963288943Sdim    SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1964288943Sdim  }
1965198092Srdivacky
1966193326Sed  // FIXME: If we have a volatile struct, the optimizer can remove what might
1967193326Sed  // appear to be `extra' memory ops:
1968193326Sed  //
1969193326Sed  // volatile struct { int i; } a, b;
1970193326Sed  //
1971193326Sed  // int main() {
1972193326Sed  //   a = b;
1973193326Sed  //   a = b;
1974193326Sed  // }
1975193326Sed  //
1976206275Srdivacky  // we need to use a different call here.  We use isVolatile to indicate when
1977193326Sed  // either the source or the destination is volatile.
1978206275Srdivacky
1979296417Sdim  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1980296417Sdim  SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1981206275Srdivacky
1982224145Sdim  // Don't do any of the memmove_collectable tests if GC isn't set.
1983234353Sdim  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1984224145Sdim    // fall through
1985224145Sdim  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1986210299Sed    RecordDecl *Record = RecordTy->getDecl();
1987210299Sed    if (Record->hasObjectMember()) {
1988341825Sdim      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1989210299Sed                                                    SizeVal);
1990210299Sed      return;
1991210299Sed    }
1992224145Sdim  } else if (Ty->isArrayType()) {
1993210299Sed    QualType BaseType = getContext().getBaseElementType(Ty);
1994210299Sed    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1995210299Sed      if (RecordTy->getDecl()->hasObjectMember()) {
1996341825Sdim        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1997210299Sed                                                      SizeVal);
1998210299Sed        return;
1999210299Sed      }
2000210299Sed    }
2001210299Sed  }
2002243830Sdim
2003296417Sdim  auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2004296417Sdim
2005243830Sdim  // Determine the metadata to describe the position of any padding in this
2006243830Sdim  // memcpy, as well as the TBAA tags for the members of the struct, in case
2007243830Sdim  // the optimizer wishes to expand it in to scalar memory operations.
2008296417Sdim  if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2009296417Sdim    Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2010341825Sdim
2011341825Sdim  if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2012341825Sdim    TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2013341825Sdim        Dest.getTBAAInfo(), Src.getTBAAInfo());
2014341825Sdim    CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2015341825Sdim  }
2016193326Sed}
2017