CGExprAgg.cpp revision 309124
1193326Sed//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2193326Sed//
3193326Sed//                     The LLVM Compiler Infrastructure
4193326Sed//
5193326Sed// This file is distributed under the University of Illinois Open Source
6193326Sed// License. See LICENSE.TXT for details.
7193326Sed//
8193326Sed//===----------------------------------------------------------------------===//
9193326Sed//
10193326Sed// This contains code to emit Aggregate Expr nodes as LLVM code.
11193326Sed//
12193326Sed//===----------------------------------------------------------------------===//
13193326Sed
14193326Sed#include "CodeGenFunction.h"
15249423Sdim#include "CGObjCRuntime.h"
16193326Sed#include "CodeGenModule.h"
17193326Sed#include "clang/AST/ASTContext.h"
18193326Sed#include "clang/AST/DeclCXX.h"
19234353Sdim#include "clang/AST/DeclTemplate.h"
20193326Sed#include "clang/AST/StmtVisitor.h"
21249423Sdim#include "llvm/IR/Constants.h"
22249423Sdim#include "llvm/IR/Function.h"
23249423Sdim#include "llvm/IR/GlobalVariable.h"
24249423Sdim#include "llvm/IR/Intrinsics.h"
25193326Sedusing namespace clang;
26193326Sedusing namespace CodeGen;
27193326Sed
28193326Sed//===----------------------------------------------------------------------===//
29193326Sed//                        Aggregate Expression Emitter
30193326Sed//===----------------------------------------------------------------------===//
31193326Sed
32193326Sednamespace  {
33199990Srdivackyclass AggExprEmitter : public StmtVisitor<AggExprEmitter> {
34193326Sed  CodeGenFunction &CGF;
35193326Sed  CGBuilderTy &Builder;
36218893Sdim  AggValueSlot Dest;
37288943Sdim  bool IsResultUnused;
38208600Srdivacky
39226633Sdim  /// We want to use 'dest' as the return slot except under two
40226633Sdim  /// conditions:
41226633Sdim  ///   - The destination slot requires garbage collection, so we
42226633Sdim  ///     need to use the GC API.
43226633Sdim  ///   - The destination slot is potentially aliased.
44226633Sdim  bool shouldUseDestForReturnSlot() const {
45226633Sdim    return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
46226633Sdim  }
47226633Sdim
48208600Srdivacky  ReturnValueSlot getReturnValueSlot() const {
49226633Sdim    if (!shouldUseDestForReturnSlot())
50226633Sdim      return ReturnValueSlot();
51208600Srdivacky
52296417Sdim    return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
53296417Sdim                           IsResultUnused);
54208600Srdivacky  }
55208600Srdivacky
56218893Sdim  AggValueSlot EnsureSlot(QualType T) {
57218893Sdim    if (!Dest.isIgnored()) return Dest;
58218893Sdim    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
59218893Sdim  }
60239462Sdim  void EnsureDest(QualType T) {
61239462Sdim    if (!Dest.isIgnored()) return;
62239462Sdim    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
63239462Sdim  }
64218893Sdim
65193326Sedpublic:
66288943Sdim  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
67288943Sdim    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
68288943Sdim    IsResultUnused(IsResultUnused) { }
69193326Sed
70193326Sed  //===--------------------------------------------------------------------===//
71193326Sed  //                               Utilities
72193326Sed  //===--------------------------------------------------------------------===//
73193326Sed
74193326Sed  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
75193326Sed  /// represents a value lvalue, this method emits the address of the lvalue,
76193326Sed  /// then loads the result into DestPtr.
77193326Sed  void EmitAggLoadOfLValue(const Expr *E);
78193326Sed
79193326Sed  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80239462Sdim  void EmitFinalDestCopy(QualType type, const LValue &src);
81296417Sdim  void EmitFinalDestCopy(QualType type, RValue src);
82239462Sdim  void EmitCopy(QualType type, const AggValueSlot &dest,
83239462Sdim                const AggValueSlot &src);
84193326Sed
85226633Sdim  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
86208600Srdivacky
87296417Sdim  void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
88234353Sdim                     QualType elementType, InitListExpr *E);
89234353Sdim
90226633Sdim  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
91234353Sdim    if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
92226633Sdim      return AggValueSlot::NeedsGCBarriers;
93226633Sdim    return AggValueSlot::DoesNotNeedGCBarriers;
94226633Sdim  }
95226633Sdim
96208600Srdivacky  bool TypeRequiresGCollection(QualType T);
97208600Srdivacky
98193326Sed  //===--------------------------------------------------------------------===//
99193326Sed  //                            Visitor Methods
100193326Sed  //===--------------------------------------------------------------------===//
101198092Srdivacky
102288943Sdim  void Visit(Expr *E) {
103288943Sdim    ApplyDebugLocation DL(CGF, E);
104288943Sdim    StmtVisitor<AggExprEmitter>::Visit(E);
105288943Sdim  }
106288943Sdim
107193326Sed  void VisitStmt(Stmt *S) {
108193326Sed    CGF.ErrorUnsupported(S, "aggregate expression");
109193326Sed  }
110193326Sed  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
111221345Sdim  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
112221345Sdim    Visit(GE->getResultExpr());
113221345Sdim  }
114193326Sed  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
115224145Sdim  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
116224145Sdim    return Visit(E->getReplacement());
117224145Sdim  }
118193326Sed
119193326Sed  // l-values.
120234353Sdim  void VisitDeclRefExpr(DeclRefExpr *E) {
121234353Sdim    // For aggregates, we should always be able to emit the variable
122234353Sdim    // as an l-value unless it's a reference.  This is due to the fact
123234353Sdim    // that we can't actually ever see a normal l2r conversion on an
124234353Sdim    // aggregate in C++, and in C there's no language standard
125234353Sdim    // actively preventing us from listing variables in the captures
126234353Sdim    // list of a block.
127234353Sdim    if (E->getDecl()->getType()->isReferenceType()) {
128234353Sdim      if (CodeGenFunction::ConstantEmission result
129234353Sdim            = CGF.tryEmitAsConstant(E)) {
130239462Sdim        EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
131234353Sdim        return;
132234353Sdim      }
133234353Sdim    }
134234353Sdim
135234353Sdim    EmitAggLoadOfLValue(E);
136234353Sdim  }
137234353Sdim
138193326Sed  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
139193326Sed  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
140193326Sed  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
141224145Sdim  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
142193326Sed  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
143193326Sed    EmitAggLoadOfLValue(E);
144193326Sed  }
145193326Sed  void VisitPredefinedExpr(const PredefinedExpr *E) {
146198092Srdivacky    EmitAggLoadOfLValue(E);
147193326Sed  }
148198092Srdivacky
149193326Sed  // Operators.
150198092Srdivacky  void VisitCastExpr(CastExpr *E);
151193326Sed  void VisitCallExpr(const CallExpr *E);
152193326Sed  void VisitStmtExpr(const StmtExpr *E);
153193326Sed  void VisitBinaryOperator(const BinaryOperator *BO);
154198398Srdivacky  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
155193326Sed  void VisitBinAssign(const BinaryOperator *E);
156193326Sed  void VisitBinComma(const BinaryOperator *E);
157193326Sed
158193326Sed  void VisitObjCMessageExpr(ObjCMessageExpr *E);
159193326Sed  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
160193326Sed    EmitAggLoadOfLValue(E);
161193326Sed  }
162198092Srdivacky
163288943Sdim  void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
164218893Sdim  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
165198092Srdivacky  void VisitChooseExpr(const ChooseExpr *CE);
166193326Sed  void VisitInitListExpr(InitListExpr *E);
167201361Srdivacky  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
168288943Sdim  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
169193326Sed  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
170193326Sed    Visit(DAE->getExpr());
171193326Sed  }
172251662Sdim  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
173251662Sdim    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
174251662Sdim    Visit(DIE->getExpr());
175251662Sdim  }
176193326Sed  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
177193326Sed  void VisitCXXConstructExpr(const CXXConstructExpr *E);
178309124Sdim  void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
179234353Sdim  void VisitLambdaExpr(LambdaExpr *E);
180261991Sdim  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
181218893Sdim  void VisitExprWithCleanups(ExprWithCleanups *E);
182210299Sed  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
183199482Srdivacky  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
184224145Sdim  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
185218893Sdim  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
186218893Sdim
187234353Sdim  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
188234353Sdim    if (E->isGLValue()) {
189234353Sdim      LValue LV = CGF.EmitPseudoObjectLValue(E);
190239462Sdim      return EmitFinalDestCopy(E->getType(), LV);
191234353Sdim    }
192234353Sdim
193234353Sdim    CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
194234353Sdim  }
195234353Sdim
196193326Sed  void VisitVAArgExpr(VAArgExpr *E);
197193326Sed
198224145Sdim  void EmitInitializationToLValue(Expr *E, LValue Address);
199224145Sdim  void EmitNullInitializationToLValue(LValue Address);
200193326Sed  //  case Expr::ChooseExprClass:
201200583Srdivacky  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
202226633Sdim  void VisitAtomicExpr(AtomicExpr *E) {
203296417Sdim    RValue Res = CGF.EmitAtomicExpr(E);
204296417Sdim    EmitFinalDestCopy(E->getType(), Res);
205226633Sdim  }
206193326Sed};
207193326Sed}  // end anonymous namespace.
208193326Sed
209193326Sed//===----------------------------------------------------------------------===//
210193326Sed//                                Utilities
211193326Sed//===----------------------------------------------------------------------===//
212193326Sed
213193326Sed/// EmitAggLoadOfLValue - Given an expression with aggregate type that
214193326Sed/// represents a value lvalue, this method emits the address of the lvalue,
215193326Sed/// then loads the result into DestPtr.
216193326Sedvoid AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
217193326Sed  LValue LV = CGF.EmitLValue(E);
218249423Sdim
219249423Sdim  // If the type of the l-value is atomic, then do an atomic load.
220288943Sdim  if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
221261991Sdim    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
222249423Sdim    return;
223249423Sdim  }
224249423Sdim
225239462Sdim  EmitFinalDestCopy(E->getType(), LV);
226193326Sed}
227193326Sed
228208600Srdivacky/// \brief True if the given aggregate type requires special GC API calls.
229208600Srdivackybool AggExprEmitter::TypeRequiresGCollection(QualType T) {
230208600Srdivacky  // Only record types have members that might require garbage collection.
231208600Srdivacky  const RecordType *RecordTy = T->getAs<RecordType>();
232208600Srdivacky  if (!RecordTy) return false;
233208600Srdivacky
234208600Srdivacky  // Don't mess with non-trivial C++ types.
235208600Srdivacky  RecordDecl *Record = RecordTy->getDecl();
236208600Srdivacky  if (isa<CXXRecordDecl>(Record) &&
237249423Sdim      (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
238208600Srdivacky       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
239208600Srdivacky    return false;
240208600Srdivacky
241208600Srdivacky  // Check whether the type has an object member.
242208600Srdivacky  return Record->hasObjectMember();
243208600Srdivacky}
244208600Srdivacky
245226633Sdim/// \brief Perform the final move to DestPtr if for some reason
246226633Sdim/// getReturnValueSlot() didn't use it directly.
247208600Srdivacky///
248208600Srdivacky/// The idea is that you do something like this:
249208600Srdivacky///   RValue Result = EmitSomething(..., getReturnValueSlot());
250226633Sdim///   EmitMoveFromReturnSlot(E, Result);
251226633Sdim///
252226633Sdim/// If nothing interferes, this will cause the result to be emitted
253226633Sdim/// directly into the return value slot.  Otherwise, a final move
254226633Sdim/// will be performed.
255239462Sdimvoid AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
256226633Sdim  if (shouldUseDestForReturnSlot()) {
257226633Sdim    // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
258226633Sdim    // The possibility of undef rvalues complicates that a lot,
259226633Sdim    // though, so we can't really assert.
260226633Sdim    return;
261210299Sed  }
262226633Sdim
263239462Sdim  // Otherwise, copy from there to the destination.
264296417Sdim  assert(Dest.getPointer() != src.getAggregatePointer());
265296417Sdim  EmitFinalDestCopy(E->getType(), src);
266208600Srdivacky}
267208600Srdivacky
268193326Sed/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
269296417Sdimvoid AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
270239462Sdim  assert(src.isAggregate() && "value must be aggregate value!");
271296417Sdim  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
272239462Sdim  EmitFinalDestCopy(type, srcLV);
273239462Sdim}
274193326Sed
275239462Sdim/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
276239462Sdimvoid AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
277218893Sdim  // If Dest is ignored, then we're evaluating an aggregate expression
278239462Sdim  // in a context that doesn't care about the result.  Note that loads
279239462Sdim  // from volatile l-values force the existence of a non-ignored
280239462Sdim  // destination.
281239462Sdim  if (Dest.isIgnored())
282239462Sdim    return;
283212904Sdim
284239462Sdim  AggValueSlot srcAgg =
285239462Sdim    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
286239462Sdim                            needsGC(type), AggValueSlot::IsAliased);
287239462Sdim  EmitCopy(type, Dest, srcAgg);
288239462Sdim}
289193326Sed
290239462Sdim/// Perform a copy from the source into the destination.
291239462Sdim///
292239462Sdim/// \param type - the type of the aggregate being copied; qualifiers are
293239462Sdim///   ignored
294239462Sdimvoid AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
295239462Sdim                              const AggValueSlot &src) {
296239462Sdim  if (dest.requiresGCollection()) {
297239462Sdim    CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
298239462Sdim    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
299198092Srdivacky    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
300296417Sdim                                                      dest.getAddress(),
301296417Sdim                                                      src.getAddress(),
302239462Sdim                                                      size);
303198092Srdivacky    return;
304198092Srdivacky  }
305239462Sdim
306193326Sed  // If the result of the assignment is used, copy the LHS there also.
307239462Sdim  // It's volatile if either side is.  Use the minimum alignment of
308239462Sdim  // the two sides.
309296417Sdim  CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
310296417Sdim                        dest.isVolatile() || src.isVolatile());
311193326Sed}
312193326Sed
313234353Sdim/// \brief Emit the initializer for a std::initializer_list initialized with a
314234353Sdim/// real initializer list.
315261991Sdimvoid
316261991SdimAggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
317261991Sdim  // Emit an array containing the elements.  The array is externally destructed
318261991Sdim  // if the std::initializer_list object is.
319261991Sdim  ASTContext &Ctx = CGF.getContext();
320261991Sdim  LValue Array = CGF.EmitLValue(E->getSubExpr());
321261991Sdim  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
322296417Sdim  Address ArrayPtr = Array.getAddress();
323234353Sdim
324261991Sdim  const ConstantArrayType *ArrayType =
325261991Sdim      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
326261991Sdim  assert(ArrayType && "std::initializer_list constructed from non-array");
327234353Sdim
328261991Sdim  // FIXME: Perform the checks on the field types in SemaInit.
329261991Sdim  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
330261991Sdim  RecordDecl::field_iterator Field = Record->field_begin();
331261991Sdim  if (Field == Record->field_end()) {
332261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
333234353Sdim    return;
334234353Sdim  }
335234353Sdim
336234353Sdim  // Start pointer.
337261991Sdim  if (!Field->getType()->isPointerType() ||
338261991Sdim      !Ctx.hasSameType(Field->getType()->getPointeeType(),
339261991Sdim                       ArrayType->getElementType())) {
340261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
341234353Sdim    return;
342234353Sdim  }
343234353Sdim
344261991Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
345296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
346261991Sdim  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
347261991Sdim  llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
348261991Sdim  llvm::Value *IdxStart[] = { Zero, Zero };
349261991Sdim  llvm::Value *ArrayStart =
350296417Sdim      Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
351261991Sdim  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
352261991Sdim  ++Field;
353261991Sdim
354261991Sdim  if (Field == Record->field_end()) {
355261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
356234353Sdim    return;
357234353Sdim  }
358261991Sdim
359261991Sdim  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
360261991Sdim  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
361261991Sdim  if (Field->getType()->isPointerType() &&
362261991Sdim      Ctx.hasSameType(Field->getType()->getPointeeType(),
363261991Sdim                      ArrayType->getElementType())) {
364234353Sdim    // End pointer.
365261991Sdim    llvm::Value *IdxEnd[] = { Zero, Size };
366261991Sdim    llvm::Value *ArrayEnd =
367296417Sdim        Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
368261991Sdim    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
369261991Sdim  } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
370234353Sdim    // Length.
371261991Sdim    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
372234353Sdim  } else {
373261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
374234353Sdim    return;
375234353Sdim  }
376234353Sdim}
377234353Sdim
378276479Sdim/// \brief Determine if E is a trivial array filler, that is, one that is
379276479Sdim/// equivalent to zero-initialization.
380276479Sdimstatic bool isTrivialFiller(Expr *E) {
381276479Sdim  if (!E)
382276479Sdim    return true;
383276479Sdim
384276479Sdim  if (isa<ImplicitValueInitExpr>(E))
385276479Sdim    return true;
386276479Sdim
387276479Sdim  if (auto *ILE = dyn_cast<InitListExpr>(E)) {
388276479Sdim    if (ILE->getNumInits())
389276479Sdim      return false;
390276479Sdim    return isTrivialFiller(ILE->getArrayFiller());
391276479Sdim  }
392276479Sdim
393276479Sdim  if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
394276479Sdim    return Cons->getConstructor()->isDefaultConstructor() &&
395276479Sdim           Cons->getConstructor()->isTrivial();
396276479Sdim
397276479Sdim  // FIXME: Are there other cases where we can avoid emitting an initializer?
398276479Sdim  return false;
399276479Sdim}
400276479Sdim
401234353Sdim/// \brief Emit initialization of an array from an initializer list.
402296417Sdimvoid AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
403234353Sdim                                   QualType elementType, InitListExpr *E) {
404234353Sdim  uint64_t NumInitElements = E->getNumInits();
405234353Sdim
406234353Sdim  uint64_t NumArrayElements = AType->getNumElements();
407234353Sdim  assert(NumInitElements <= NumArrayElements);
408234353Sdim
409234353Sdim  // DestPtr is an array*.  Construct an elementType* by drilling
410234353Sdim  // down a level.
411234353Sdim  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
412234353Sdim  llvm::Value *indices[] = { zero, zero };
413234353Sdim  llvm::Value *begin =
414296417Sdim    Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
415234353Sdim
416296417Sdim  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
417296417Sdim  CharUnits elementAlign =
418296417Sdim    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
419296417Sdim
420234353Sdim  // Exception safety requires us to destroy all the
421234353Sdim  // already-constructed members if an initializer throws.
422234353Sdim  // For that, we'll need an EH cleanup.
423234353Sdim  QualType::DestructionKind dtorKind = elementType.isDestructedType();
424296417Sdim  Address endOfInit = Address::invalid();
425234353Sdim  EHScopeStack::stable_iterator cleanup;
426276479Sdim  llvm::Instruction *cleanupDominator = nullptr;
427234353Sdim  if (CGF.needsEHCleanup(dtorKind)) {
428234353Sdim    // In principle we could tell the cleanup where we are more
429234353Sdim    // directly, but the control flow can get so varied here that it
430234353Sdim    // would actually be quite complex.  Therefore we go through an
431234353Sdim    // alloca.
432296417Sdim    endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
433234353Sdim                                     "arrayinit.endOfInit");
434234353Sdim    cleanupDominator = Builder.CreateStore(begin, endOfInit);
435234353Sdim    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
436296417Sdim                                         elementAlign,
437234353Sdim                                         CGF.getDestroyer(dtorKind));
438234353Sdim    cleanup = CGF.EHStack.stable_begin();
439234353Sdim
440234353Sdim  // Otherwise, remember that we didn't need a cleanup.
441234353Sdim  } else {
442234353Sdim    dtorKind = QualType::DK_none;
443234353Sdim  }
444234353Sdim
445234353Sdim  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
446234353Sdim
447234353Sdim  // The 'current element to initialize'.  The invariants on this
448234353Sdim  // variable are complicated.  Essentially, after each iteration of
449234353Sdim  // the loop, it points to the last initialized element, except
450234353Sdim  // that it points to the beginning of the array before any
451234353Sdim  // elements have been initialized.
452234353Sdim  llvm::Value *element = begin;
453234353Sdim
454234353Sdim  // Emit the explicit initializers.
455234353Sdim  for (uint64_t i = 0; i != NumInitElements; ++i) {
456234353Sdim    // Advance to the next element.
457234353Sdim    if (i > 0) {
458234353Sdim      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
459234353Sdim
460234353Sdim      // Tell the cleanup that it needs to destroy up to this
461234353Sdim      // element.  TODO: some of these stores can be trivially
462234353Sdim      // observed to be unnecessary.
463296417Sdim      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
464234353Sdim    }
465234353Sdim
466296417Sdim    LValue elementLV =
467296417Sdim      CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
468261991Sdim    EmitInitializationToLValue(E->getInit(i), elementLV);
469234353Sdim  }
470234353Sdim
471234353Sdim  // Check whether there's a non-trivial array-fill expression.
472234353Sdim  Expr *filler = E->getArrayFiller();
473276479Sdim  bool hasTrivialFiller = isTrivialFiller(filler);
474234353Sdim
475234353Sdim  // Any remaining elements need to be zero-initialized, possibly
476234353Sdim  // using the filler expression.  We can skip this if the we're
477234353Sdim  // emitting to zeroed memory.
478234353Sdim  if (NumInitElements != NumArrayElements &&
479234353Sdim      !(Dest.isZeroed() && hasTrivialFiller &&
480234353Sdim        CGF.getTypes().isZeroInitializable(elementType))) {
481234353Sdim
482234353Sdim    // Use an actual loop.  This is basically
483234353Sdim    //   do { *array++ = filler; } while (array != end);
484234353Sdim
485234353Sdim    // Advance to the start of the rest of the array.
486234353Sdim    if (NumInitElements) {
487234353Sdim      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
488296417Sdim      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
489234353Sdim    }
490234353Sdim
491234353Sdim    // Compute the end of the array.
492234353Sdim    llvm::Value *end = Builder.CreateInBoundsGEP(begin,
493234353Sdim                      llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
494234353Sdim                                                 "arrayinit.end");
495234353Sdim
496234353Sdim    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
497234353Sdim    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
498234353Sdim
499234353Sdim    // Jump into the body.
500234353Sdim    CGF.EmitBlock(bodyBB);
501234353Sdim    llvm::PHINode *currentElement =
502234353Sdim      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
503234353Sdim    currentElement->addIncoming(element, entryBB);
504234353Sdim
505234353Sdim    // Emit the actual filler expression.
506296417Sdim    LValue elementLV =
507296417Sdim      CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
508234353Sdim    if (filler)
509234353Sdim      EmitInitializationToLValue(filler, elementLV);
510234353Sdim    else
511234353Sdim      EmitNullInitializationToLValue(elementLV);
512234353Sdim
513234353Sdim    // Move on to the next element.
514234353Sdim    llvm::Value *nextElement =
515234353Sdim      Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
516234353Sdim
517234353Sdim    // Tell the EH cleanup that we finished with the last element.
518296417Sdim    if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
519234353Sdim
520234353Sdim    // Leave the loop if we're done.
521234353Sdim    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
522234353Sdim                                             "arrayinit.done");
523234353Sdim    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
524234353Sdim    Builder.CreateCondBr(done, endBB, bodyBB);
525234353Sdim    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
526234353Sdim
527234353Sdim    CGF.EmitBlock(endBB);
528234353Sdim  }
529234353Sdim
530234353Sdim  // Leave the partial-array cleanup if we entered one.
531234353Sdim  if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
532234353Sdim}
533234353Sdim
534193326Sed//===----------------------------------------------------------------------===//
535193326Sed//                            Visitor Methods
536193326Sed//===----------------------------------------------------------------------===//
537193326Sed
538224145Sdimvoid AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
539224145Sdim  Visit(E->GetTemporaryExpr());
540224145Sdim}
541224145Sdim
542218893Sdimvoid AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
543239462Sdim  EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
544218893Sdim}
545218893Sdim
546224145Sdimvoid
547224145SdimAggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
548249423Sdim  if (Dest.isPotentiallyAliased() &&
549249423Sdim      E->getType().isPODType(CGF.getContext())) {
550224145Sdim    // For a POD type, just emit a load of the lvalue + a copy, because our
551224145Sdim    // compound literal might alias the destination.
552224145Sdim    EmitAggLoadOfLValue(E);
553224145Sdim    return;
554224145Sdim  }
555224145Sdim
556224145Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
557224145Sdim  CGF.EmitAggExpr(E->getInitializer(), Slot);
558224145Sdim}
559224145Sdim
560249423Sdim/// Attempt to look through various unimportant expressions to find a
561249423Sdim/// cast of the given kind.
562249423Sdimstatic Expr *findPeephole(Expr *op, CastKind kind) {
563249423Sdim  while (true) {
564249423Sdim    op = op->IgnoreParens();
565249423Sdim    if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
566249423Sdim      if (castE->getCastKind() == kind)
567249423Sdim        return castE->getSubExpr();
568249423Sdim      if (castE->getCastKind() == CK_NoOp)
569249423Sdim        continue;
570249423Sdim    }
571276479Sdim    return nullptr;
572249423Sdim  }
573249423Sdim}
574224145Sdim
575198092Srdivackyvoid AggExprEmitter::VisitCastExpr(CastExpr *E) {
576296417Sdim  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
577296417Sdim    CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
578198092Srdivacky  switch (E->getCastKind()) {
579212904Sdim  case CK_Dynamic: {
580243830Sdim    // FIXME: Can this actually happen? We have no test coverage for it.
581208600Srdivacky    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
582243830Sdim    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
583243830Sdim                                      CodeGenFunction::TCK_Load);
584208600Srdivacky    // FIXME: Do we also need to handle property references here?
585208600Srdivacky    if (LV.isSimple())
586208600Srdivacky      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
587208600Srdivacky    else
588208600Srdivacky      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
589208600Srdivacky
590218893Sdim    if (!Dest.isIgnored())
591218893Sdim      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
592208600Srdivacky    break;
593208600Srdivacky  }
594208600Srdivacky
595212904Sdim  case CK_ToUnion: {
596288943Sdim    // Evaluate even if the destination is ignored.
597288943Sdim    if (Dest.isIgnored()) {
598288943Sdim      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
599288943Sdim                      /*ignoreResult=*/true);
600288943Sdim      break;
601288943Sdim    }
602221345Sdim
603198092Srdivacky    // GCC union extension
604212904Sdim    QualType Ty = E->getSubExpr()->getType();
605296417Sdim    Address CastPtr =
606296417Sdim      Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
607224145Sdim    EmitInitializationToLValue(E->getSubExpr(),
608224145Sdim                               CGF.MakeAddrLValue(CastPtr, Ty));
609198092Srdivacky    break;
610193326Sed  }
611193326Sed
612212904Sdim  case CK_DerivedToBase:
613212904Sdim  case CK_BaseToDerived:
614212904Sdim  case CK_UncheckedDerivedToBase: {
615226633Sdim    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
616208600Srdivacky                "should have been unpacked before we got here");
617208600Srdivacky  }
618208600Srdivacky
619249423Sdim  case CK_NonAtomicToAtomic:
620249423Sdim  case CK_AtomicToNonAtomic: {
621249423Sdim    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
622249423Sdim
623249423Sdim    // Determine the atomic and value types.
624249423Sdim    QualType atomicType = E->getSubExpr()->getType();
625249423Sdim    QualType valueType = E->getType();
626249423Sdim    if (isToAtomic) std::swap(atomicType, valueType);
627249423Sdim
628249423Sdim    assert(atomicType->isAtomicType());
629249423Sdim    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
630249423Sdim                          atomicType->castAs<AtomicType>()->getValueType()));
631249423Sdim
632249423Sdim    // Just recurse normally if we're ignoring the result or the
633249423Sdim    // atomic type doesn't change representation.
634249423Sdim    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
635249423Sdim      return Visit(E->getSubExpr());
636249423Sdim    }
637249423Sdim
638249423Sdim    CastKind peepholeTarget =
639249423Sdim      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
640249423Sdim
641249423Sdim    // These two cases are reverses of each other; try to peephole them.
642249423Sdim    if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
643249423Sdim      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
644249423Sdim                                                     E->getType()) &&
645249423Sdim           "peephole significantly changed types?");
646249423Sdim      return Visit(op);
647249423Sdim    }
648249423Sdim
649249423Sdim    // If we're converting an r-value of non-atomic type to an r-value
650261991Sdim    // of atomic type, just emit directly into the relevant sub-object.
651249423Sdim    if (isToAtomic) {
652261991Sdim      AggValueSlot valueDest = Dest;
653261991Sdim      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
654261991Sdim        // Zero-initialize.  (Strictly speaking, we only need to intialize
655261991Sdim        // the padding at the end, but this is simpler.)
656261991Sdim        if (!Dest.isZeroed())
657296417Sdim          CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
658261991Sdim
659261991Sdim        // Build a GEP to refer to the subobject.
660296417Sdim        Address valueAddr =
661296417Sdim            CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
662296417Sdim                                        CharUnits());
663261991Sdim        valueDest = AggValueSlot::forAddr(valueAddr,
664261991Sdim                                          valueDest.getQualifiers(),
665261991Sdim                                          valueDest.isExternallyDestructed(),
666261991Sdim                                          valueDest.requiresGCollection(),
667261991Sdim                                          valueDest.isPotentiallyAliased(),
668261991Sdim                                          AggValueSlot::IsZeroed);
669261991Sdim      }
670261991Sdim
671261991Sdim      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
672249423Sdim      return;
673249423Sdim    }
674249423Sdim
675249423Sdim    // Otherwise, we're converting an atomic type to a non-atomic type.
676261991Sdim    // Make an atomic temporary, emit into that, and then copy the value out.
677249423Sdim    AggValueSlot atomicSlot =
678249423Sdim      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
679249423Sdim    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
680249423Sdim
681296417Sdim    Address valueAddr =
682296417Sdim      Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
683249423Sdim    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
684249423Sdim    return EmitFinalDestCopy(valueType, rvalue);
685249423Sdim  }
686249423Sdim
687239462Sdim  case CK_LValueToRValue:
688239462Sdim    // If we're loading from a volatile type, force the destination
689239462Sdim    // into existence.
690239462Sdim    if (E->getSubExpr()->getType().isVolatileQualified()) {
691239462Sdim      EnsureDest(E->getType());
692239462Sdim      return Visit(E->getSubExpr());
693239462Sdim    }
694249423Sdim
695239462Sdim    // fallthrough
696239462Sdim
697212904Sdim  case CK_NoOp:
698212904Sdim  case CK_UserDefinedConversion:
699212904Sdim  case CK_ConstructorConversion:
700198092Srdivacky    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
701198092Srdivacky                                                   E->getType()) &&
702198092Srdivacky           "Implicit cast types must be compatible");
703198092Srdivacky    Visit(E->getSubExpr());
704198092Srdivacky    break;
705218893Sdim
706212904Sdim  case CK_LValueBitCast:
707218893Sdim    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
708221345Sdim
709218893Sdim  case CK_Dependent:
710218893Sdim  case CK_BitCast:
711218893Sdim  case CK_ArrayToPointerDecay:
712218893Sdim  case CK_FunctionToPointerDecay:
713218893Sdim  case CK_NullToPointer:
714218893Sdim  case CK_NullToMemberPointer:
715218893Sdim  case CK_BaseToDerivedMemberPointer:
716218893Sdim  case CK_DerivedToBaseMemberPointer:
717218893Sdim  case CK_MemberPointerToBoolean:
718234353Sdim  case CK_ReinterpretMemberPointer:
719218893Sdim  case CK_IntegralToPointer:
720218893Sdim  case CK_PointerToIntegral:
721218893Sdim  case CK_PointerToBoolean:
722218893Sdim  case CK_ToVoid:
723218893Sdim  case CK_VectorSplat:
724218893Sdim  case CK_IntegralCast:
725296417Sdim  case CK_BooleanToSignedIntegral:
726218893Sdim  case CK_IntegralToBoolean:
727218893Sdim  case CK_IntegralToFloating:
728218893Sdim  case CK_FloatingToIntegral:
729218893Sdim  case CK_FloatingToBoolean:
730218893Sdim  case CK_FloatingCast:
731226633Sdim  case CK_CPointerToObjCPointerCast:
732226633Sdim  case CK_BlockPointerToObjCPointerCast:
733218893Sdim  case CK_AnyPointerToBlockPointerCast:
734218893Sdim  case CK_ObjCObjectLValueCast:
735218893Sdim  case CK_FloatingRealToComplex:
736218893Sdim  case CK_FloatingComplexToReal:
737218893Sdim  case CK_FloatingComplexToBoolean:
738218893Sdim  case CK_FloatingComplexCast:
739218893Sdim  case CK_FloatingComplexToIntegralComplex:
740218893Sdim  case CK_IntegralRealToComplex:
741218893Sdim  case CK_IntegralComplexToReal:
742218893Sdim  case CK_IntegralComplexToBoolean:
743218893Sdim  case CK_IntegralComplexCast:
744218893Sdim  case CK_IntegralComplexToFloatingComplex:
745226633Sdim  case CK_ARCProduceObject:
746226633Sdim  case CK_ARCConsumeObject:
747226633Sdim  case CK_ARCReclaimReturnedObject:
748226633Sdim  case CK_ARCExtendBlockObject:
749234353Sdim  case CK_CopyAndAutoreleaseBlockObject:
750243830Sdim  case CK_BuiltinFnToFnPtr:
751249423Sdim  case CK_ZeroToOCLEvent:
752276479Sdim  case CK_AddressSpaceConversion:
753218893Sdim    llvm_unreachable("cast kind invalid for aggregate types");
754198398Srdivacky  }
755193326Sed}
756193326Sed
757193326Sedvoid AggExprEmitter::VisitCallExpr(const CallExpr *E) {
758288943Sdim  if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
759193326Sed    EmitAggLoadOfLValue(E);
760193326Sed    return;
761193326Sed  }
762198092Srdivacky
763208600Srdivacky  RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
764226633Sdim  EmitMoveFromReturnSlot(E, RV);
765193326Sed}
766193326Sed
767193326Sedvoid AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
768208600Srdivacky  RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
769226633Sdim  EmitMoveFromReturnSlot(E, RV);
770193326Sed}
771193326Sed
772193326Sedvoid AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
773218893Sdim  CGF.EmitIgnoredExpr(E->getLHS());
774218893Sdim  Visit(E->getRHS());
775193326Sed}
776193326Sed
777193326Sedvoid AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
778218893Sdim  CodeGenFunction::StmtExprEvaluation eval(CGF);
779218893Sdim  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
780193326Sed}
781193326Sed
782193326Sedvoid AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
783212904Sdim  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
784198398Srdivacky    VisitPointerToDataMemberBinaryOperator(E);
785198398Srdivacky  else
786198398Srdivacky    CGF.ErrorUnsupported(E, "aggregate binary expression");
787193326Sed}
788193326Sed
789198398Srdivackyvoid AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
790198398Srdivacky                                                    const BinaryOperator *E) {
791198398Srdivacky  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
792239462Sdim  EmitFinalDestCopy(E->getType(), LV);
793198398Srdivacky}
794198398Srdivacky
795239462Sdim/// Is the value of the given expression possibly a reference to or
796239462Sdim/// into a __block variable?
797239462Sdimstatic bool isBlockVarRef(const Expr *E) {
798239462Sdim  // Make sure we look through parens.
799239462Sdim  E = E->IgnoreParens();
800239462Sdim
801239462Sdim  // Check for a direct reference to a __block variable.
802239462Sdim  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
803239462Sdim    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
804239462Sdim    return (var && var->hasAttr<BlocksAttr>());
805239462Sdim  }
806239462Sdim
807239462Sdim  // More complicated stuff.
808239462Sdim
809239462Sdim  // Binary operators.
810239462Sdim  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
811239462Sdim    // For an assignment or pointer-to-member operation, just care
812239462Sdim    // about the LHS.
813239462Sdim    if (op->isAssignmentOp() || op->isPtrMemOp())
814239462Sdim      return isBlockVarRef(op->getLHS());
815239462Sdim
816239462Sdim    // For a comma, just care about the RHS.
817239462Sdim    if (op->getOpcode() == BO_Comma)
818239462Sdim      return isBlockVarRef(op->getRHS());
819239462Sdim
820239462Sdim    // FIXME: pointer arithmetic?
821239462Sdim    return false;
822239462Sdim
823239462Sdim  // Check both sides of a conditional operator.
824239462Sdim  } else if (const AbstractConditionalOperator *op
825239462Sdim               = dyn_cast<AbstractConditionalOperator>(E)) {
826239462Sdim    return isBlockVarRef(op->getTrueExpr())
827239462Sdim        || isBlockVarRef(op->getFalseExpr());
828239462Sdim
829239462Sdim  // OVEs are required to support BinaryConditionalOperators.
830239462Sdim  } else if (const OpaqueValueExpr *op
831239462Sdim               = dyn_cast<OpaqueValueExpr>(E)) {
832239462Sdim    if (const Expr *src = op->getSourceExpr())
833239462Sdim      return isBlockVarRef(src);
834239462Sdim
835239462Sdim  // Casts are necessary to get things like (*(int*)&var) = foo().
836239462Sdim  // We don't really care about the kind of cast here, except
837239462Sdim  // we don't want to look through l2r casts, because it's okay
838239462Sdim  // to get the *value* in a __block variable.
839239462Sdim  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
840239462Sdim    if (cast->getCastKind() == CK_LValueToRValue)
841239462Sdim      return false;
842239462Sdim    return isBlockVarRef(cast->getSubExpr());
843239462Sdim
844239462Sdim  // Handle unary operators.  Again, just aggressively look through
845239462Sdim  // it, ignoring the operation.
846239462Sdim  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
847239462Sdim    return isBlockVarRef(uop->getSubExpr());
848239462Sdim
849239462Sdim  // Look into the base of a field access.
850239462Sdim  } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
851239462Sdim    return isBlockVarRef(mem->getBase());
852239462Sdim
853239462Sdim  // Look into the base of a subscript.
854239462Sdim  } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
855239462Sdim    return isBlockVarRef(sub->getBase());
856239462Sdim  }
857239462Sdim
858239462Sdim  return false;
859239462Sdim}
860239462Sdim
861193326Sedvoid AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
862193326Sed  // For an assignment to work, the value on the right has
863193326Sed  // to be compatible with the value on the left.
864193326Sed  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
865193326Sed                                                 E->getRHS()->getType())
866193326Sed         && "Invalid assignment");
867218893Sdim
868239462Sdim  // If the LHS might be a __block variable, and the RHS can
869239462Sdim  // potentially cause a block copy, we need to evaluate the RHS first
870239462Sdim  // so that the assignment goes the right place.
871239462Sdim  // This is pretty semantically fragile.
872239462Sdim  if (isBlockVarRef(E->getLHS()) &&
873239462Sdim      E->getRHS()->HasSideEffects(CGF.getContext())) {
874239462Sdim    // Ensure that we have a destination, and evaluate the RHS into that.
875239462Sdim    EnsureDest(E->getRHS()->getType());
876239462Sdim    Visit(E->getRHS());
877239462Sdim
878239462Sdim    // Now emit the LHS and copy into it.
879243830Sdim    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
880239462Sdim
881249423Sdim    // That copy is an atomic copy if the LHS is atomic.
882288943Sdim    if (LHS.getType()->isAtomicType() ||
883288943Sdim        CGF.LValueIsSuitableForInlineAtomic(LHS)) {
884249423Sdim      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
885249423Sdim      return;
886249423Sdim    }
887249423Sdim
888239462Sdim    EmitCopy(E->getLHS()->getType(),
889239462Sdim             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
890239462Sdim                                     needsGC(E->getLHS()->getType()),
891239462Sdim                                     AggValueSlot::IsAliased),
892239462Sdim             Dest);
893239462Sdim    return;
894239462Sdim  }
895221345Sdim
896193326Sed  LValue LHS = CGF.EmitLValue(E->getLHS());
897193326Sed
898249423Sdim  // If we have an atomic type, evaluate into the destination and then
899249423Sdim  // do an atomic copy.
900288943Sdim  if (LHS.getType()->isAtomicType() ||
901288943Sdim      CGF.LValueIsSuitableForInlineAtomic(LHS)) {
902249423Sdim    EnsureDest(E->getRHS()->getType());
903249423Sdim    Visit(E->getRHS());
904249423Sdim    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
905249423Sdim    return;
906249423Sdim  }
907249423Sdim
908234353Sdim  // Codegen the RHS so that it stores directly into the LHS.
909234353Sdim  AggValueSlot LHSSlot =
910234353Sdim    AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
911234353Sdim                            needsGC(E->getLHS()->getType()),
912234353Sdim                            AggValueSlot::IsAliased);
913249423Sdim  // A non-volatile aggregate destination might have volatile member.
914249423Sdim  if (!LHSSlot.isVolatile() &&
915249423Sdim      CGF.hasVolatileMember(E->getLHS()->getType()))
916249423Sdim    LHSSlot.setVolatile(true);
917249423Sdim
918239462Sdim  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
919239462Sdim
920239462Sdim  // Copy into the destination if the assignment isn't ignored.
921239462Sdim  EmitFinalDestCopy(E->getType(), LHS);
922193326Sed}
923193326Sed
924218893Sdimvoid AggExprEmitter::
925218893SdimVisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
926193326Sed  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
927193326Sed  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
928193326Sed  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
929198092Srdivacky
930218893Sdim  // Bind the common expression if necessary.
931218893Sdim  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
932218893Sdim
933218893Sdim  CodeGenFunction::ConditionalEvaluation eval(CGF);
934288943Sdim  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
935288943Sdim                           CGF.getProfileCount(E));
936198092Srdivacky
937218893Sdim  // Save whether the destination's lifetime is externally managed.
938226633Sdim  bool isExternallyDestructed = Dest.isExternallyDestructed();
939218893Sdim
940218893Sdim  eval.begin(CGF);
941193326Sed  CGF.EmitBlock(LHSBlock);
942288943Sdim  CGF.incrementProfileCounter(E);
943218893Sdim  Visit(E->getTrueExpr());
944218893Sdim  eval.end(CGF);
945198092Srdivacky
946218893Sdim  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
947218893Sdim  CGF.Builder.CreateBr(ContBlock);
948193326Sed
949218893Sdim  // If the result of an agg expression is unused, then the emission
950218893Sdim  // of the LHS might need to create a destination slot.  That's fine
951218893Sdim  // with us, and we can safely emit the RHS into the same slot, but
952226633Sdim  // we shouldn't claim that it's already being destructed.
953226633Sdim  Dest.setExternallyDestructed(isExternallyDestructed);
954198092Srdivacky
955218893Sdim  eval.begin(CGF);
956193326Sed  CGF.EmitBlock(RHSBlock);
957218893Sdim  Visit(E->getFalseExpr());
958218893Sdim  eval.end(CGF);
959198092Srdivacky
960193326Sed  CGF.EmitBlock(ContBlock);
961193326Sed}
962193326Sed
963198092Srdivackyvoid AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
964261991Sdim  Visit(CE->getChosenSubExpr());
965198092Srdivacky}
966198092Srdivacky
967193326Sedvoid AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
968296417Sdim  Address ArgValue = Address::invalid();
969296417Sdim  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
970193326Sed
971309124Sdim  // If EmitVAArg fails, emit an error.
972296417Sdim  if (!ArgPtr.isValid()) {
973309124Sdim    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
974193326Sed    return;
975193326Sed  }
976193326Sed
977239462Sdim  EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
978193326Sed}
979193326Sed
980193326Sedvoid AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
981218893Sdim  // Ensure that we have a slot, but if we already do, remember
982226633Sdim  // whether it was externally destructed.
983226633Sdim  bool wasExternallyDestructed = Dest.isExternallyDestructed();
984239462Sdim  EnsureDest(E->getType());
985198092Srdivacky
986226633Sdim  // We're going to push a destructor if there isn't already one.
987226633Sdim  Dest.setExternallyDestructed();
988226633Sdim
989218893Sdim  Visit(E->getSubExpr());
990193326Sed
991226633Sdim  // Push that destructor we promised.
992226633Sdim  if (!wasExternallyDestructed)
993296417Sdim    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
994193326Sed}
995193326Sed
996193326Sedvoid
997193326SedAggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
998218893Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
999218893Sdim  CGF.EmitCXXConstructExpr(E, Slot);
1000193326Sed}
1001193326Sed
1002309124Sdimvoid AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1003309124Sdim    const CXXInheritedCtorInitExpr *E) {
1004309124Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1005309124Sdim  CGF.EmitInheritedCXXConstructorCall(
1006309124Sdim      E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1007309124Sdim      E->inheritedFromVBase(), E);
1008309124Sdim}
1009309124Sdim
1010234353Sdimvoid
1011234353SdimAggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1012234353Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1013234353Sdim  CGF.EmitLambdaExpr(E, Slot);
1014234353Sdim}
1015234353Sdim
1016218893Sdimvoid AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1017234353Sdim  CGF.enterFullExpression(E);
1018234353Sdim  CodeGenFunction::RunCleanupsScope cleanups(CGF);
1019234353Sdim  Visit(E->getSubExpr());
1020193326Sed}
1021193326Sed
1022210299Sedvoid AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1023218893Sdim  QualType T = E->getType();
1024218893Sdim  AggValueSlot Slot = EnsureSlot(T);
1025296417Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1026198398Srdivacky}
1027198398Srdivacky
1028201361Srdivackyvoid AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1029218893Sdim  QualType T = E->getType();
1030218893Sdim  AggValueSlot Slot = EnsureSlot(T);
1031296417Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1032218893Sdim}
1033201361Srdivacky
1034218893Sdim/// isSimpleZero - If emitting this value will obviously just cause a store of
1035218893Sdim/// zero to memory, return true.  This can return false if uncertain, so it just
1036218893Sdim/// handles simple cases.
1037218893Sdimstatic bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1038221345Sdim  E = E->IgnoreParens();
1039221345Sdim
1040218893Sdim  // 0
1041218893Sdim  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1042218893Sdim    return IL->getValue() == 0;
1043218893Sdim  // +0.0
1044218893Sdim  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1045218893Sdim    return FL->getValue().isPosZero();
1046218893Sdim  // int()
1047218893Sdim  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1048218893Sdim      CGF.getTypes().isZeroInitializable(E->getType()))
1049218893Sdim    return true;
1050218893Sdim  // (int*)0 - Null pointer expressions.
1051218893Sdim  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1052218893Sdim    return ICE->getCastKind() == CK_NullToPointer;
1053218893Sdim  // '\0'
1054218893Sdim  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1055218893Sdim    return CL->getValue() == 0;
1056218893Sdim
1057218893Sdim  // Otherwise, hard case: conservatively return false.
1058218893Sdim  return false;
1059201361Srdivacky}
1060201361Srdivacky
1061218893Sdim
1062203955Srdivackyvoid
1063261991SdimAggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1064224145Sdim  QualType type = LV.getType();
1065193326Sed  // FIXME: Ignore result?
1066193326Sed  // FIXME: Are initializers affected by volatile?
1067218893Sdim  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1068218893Sdim    // Storing "i32 0" to a zero'd memory location is a noop.
1069249423Sdim    return;
1070249423Sdim  } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1071249423Sdim    return EmitNullInitializationToLValue(LV);
1072288943Sdim  } else if (isa<NoInitExpr>(E)) {
1073288943Sdim    // Do nothing.
1074288943Sdim    return;
1075224145Sdim  } else if (type->isReferenceType()) {
1076261991Sdim    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1077249423Sdim    return CGF.EmitStoreThroughLValue(RV, LV);
1078249423Sdim  }
1079249423Sdim
1080249423Sdim  switch (CGF.getEvaluationKind(type)) {
1081249423Sdim  case TEK_Complex:
1082249423Sdim    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1083249423Sdim    return;
1084249423Sdim  case TEK_Aggregate:
1085226633Sdim    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1086226633Sdim                                               AggValueSlot::IsDestructed,
1087226633Sdim                                      AggValueSlot::DoesNotNeedGCBarriers,
1088226633Sdim                                               AggValueSlot::IsNotAliased,
1089224145Sdim                                               Dest.isZeroed()));
1090249423Sdim    return;
1091249423Sdim  case TEK_Scalar:
1092249423Sdim    if (LV.isSimple()) {
1093276479Sdim      CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1094249423Sdim    } else {
1095249423Sdim      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1096249423Sdim    }
1097249423Sdim    return;
1098193326Sed  }
1099249423Sdim  llvm_unreachable("bad evaluation kind");
1100193326Sed}
1101193326Sed
1102224145Sdimvoid AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1103224145Sdim  QualType type = lv.getType();
1104224145Sdim
1105218893Sdim  // If the destination slot is already zeroed out before the aggregate is
1106218893Sdim  // copied into it, we don't have to emit any zeros here.
1107224145Sdim  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1108218893Sdim    return;
1109218893Sdim
1110249423Sdim  if (CGF.hasScalarEvaluationKind(type)) {
1111249423Sdim    // For non-aggregates, we can store the appropriate null constant.
1112249423Sdim    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1113234353Sdim    // Note that the following is not equivalent to
1114234353Sdim    // EmitStoreThroughBitfieldLValue for ARC types.
1115234353Sdim    if (lv.isBitField()) {
1116234353Sdim      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1117234353Sdim    } else {
1118234353Sdim      assert(lv.isSimple());
1119234353Sdim      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1120234353Sdim    }
1121193326Sed  } else {
1122193326Sed    // There's a potential optimization opportunity in combining
1123193326Sed    // memsets; that would be easy for arrays, but relatively
1124193326Sed    // difficult for structures with the current code.
1125224145Sdim    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1126193326Sed  }
1127193326Sed}
1128193326Sed
1129193326Sedvoid AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1130193326Sed#if 0
1131200583Srdivacky  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1132200583Srdivacky  // (Length of globals? Chunks of zeroed-out space?).
1133193326Sed  //
1134193326Sed  // If we can, prefer a copy from a global; this is a lot less code for long
1135193326Sed  // globals, and it's easier for the current optimizers to analyze.
1136200583Srdivacky  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1137193326Sed    llvm::GlobalVariable* GV =
1138200583Srdivacky    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1139200583Srdivacky                             llvm::GlobalValue::InternalLinkage, C, "");
1140239462Sdim    EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1141193326Sed    return;
1142193326Sed  }
1143193326Sed#endif
1144218893Sdim  if (E->hadArrayRangeDesignator())
1145193326Sed    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1146193326Sed
1147261991Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
1148218893Sdim
1149296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1150234353Sdim
1151193326Sed  // Handle initialization of an array.
1152193326Sed  if (E->getType()->isArrayType()) {
1153234982Sdim    if (E->isStringLiteralInit())
1154234982Sdim      return Visit(E->getInit(0));
1155193326Sed
1156234353Sdim    QualType elementType =
1157234353Sdim        CGF.getContext().getAsArrayType(E->getType())->getElementType();
1158193326Sed
1159296417Sdim    auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1160296417Sdim    EmitArrayInit(Dest.getAddress(), AType, elementType, E);
1161193326Sed    return;
1162193326Sed  }
1163198092Srdivacky
1164276479Sdim  if (E->getType()->isAtomicType()) {
1165276479Sdim    // An _Atomic(T) object can be list-initialized from an expression
1166276479Sdim    // of the same type.
1167276479Sdim    assert(E->getNumInits() == 1 &&
1168276479Sdim           CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
1169276479Sdim                                                   E->getType()) &&
1170276479Sdim           "unexpected list initialization for atomic object");
1171276479Sdim    return Visit(E->getInit(0));
1172276479Sdim  }
1173276479Sdim
1174193326Sed  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1175198092Srdivacky
1176193326Sed  // Do struct initialization; this code just sets each individual member
1177193326Sed  // to the approprate value.  This makes bitfield support automatic;
1178193326Sed  // the disadvantage is that the generated code is more difficult for
1179193326Sed  // the optimizer, especially with bitfields.
1180193326Sed  unsigned NumInitElements = E->getNumInits();
1181224145Sdim  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1182251662Sdim
1183309124Sdim  // We'll need to enter cleanup scopes in case any of the element
1184309124Sdim  // initializers throws an exception.
1185309124Sdim  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1186309124Sdim  llvm::Instruction *cleanupDominator = nullptr;
1187309124Sdim
1188309124Sdim  unsigned curInitIndex = 0;
1189309124Sdim
1190309124Sdim  // Emit initialization of base classes.
1191309124Sdim  if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1192309124Sdim    assert(E->getNumInits() >= CXXRD->getNumBases() &&
1193309124Sdim           "missing initializer for base class");
1194309124Sdim    for (auto &Base : CXXRD->bases()) {
1195309124Sdim      assert(!Base.isVirtual() && "should not see vbases here");
1196309124Sdim      auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1197309124Sdim      Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1198309124Sdim          Dest.getAddress(), CXXRD, BaseRD,
1199309124Sdim          /*isBaseVirtual*/ false);
1200309124Sdim      AggValueSlot AggSlot =
1201309124Sdim        AggValueSlot::forAddr(V, Qualifiers(),
1202309124Sdim                              AggValueSlot::IsDestructed,
1203309124Sdim                              AggValueSlot::DoesNotNeedGCBarriers,
1204309124Sdim                              AggValueSlot::IsNotAliased);
1205309124Sdim      CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1206309124Sdim
1207309124Sdim      if (QualType::DestructionKind dtorKind =
1208309124Sdim              Base.getType().isDestructedType()) {
1209309124Sdim        CGF.pushDestroy(dtorKind, V, Base.getType());
1210309124Sdim        cleanups.push_back(CGF.EHStack.stable_begin());
1211309124Sdim      }
1212309124Sdim    }
1213309124Sdim  }
1214309124Sdim
1215251662Sdim  // Prepare a 'this' for CXXDefaultInitExprs.
1216296417Sdim  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1217251662Sdim
1218224145Sdim  if (record->isUnion()) {
1219193326Sed    // Only initialize one field of a union. The field itself is
1220193326Sed    // specified by the initializer list.
1221193326Sed    if (!E->getInitializedFieldInUnion()) {
1222193326Sed      // Empty union; we have nothing to do.
1223198092Srdivacky
1224193326Sed#ifndef NDEBUG
1225193326Sed      // Make sure that it's really an empty and not a failure of
1226193326Sed      // semantic analysis.
1227276479Sdim      for (const auto *Field : record->fields())
1228193326Sed        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1229193326Sed#endif
1230193326Sed      return;
1231193326Sed    }
1232193326Sed
1233193326Sed    // FIXME: volatility
1234193326Sed    FieldDecl *Field = E->getInitializedFieldInUnion();
1235218893Sdim
1236234982Sdim    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1237193326Sed    if (NumInitElements) {
1238193326Sed      // Store the initializer into the field
1239224145Sdim      EmitInitializationToLValue(E->getInit(0), FieldLoc);
1240193326Sed    } else {
1241218893Sdim      // Default-initialize to null.
1242224145Sdim      EmitNullInitializationToLValue(FieldLoc);
1243193326Sed    }
1244193326Sed
1245193326Sed    return;
1246193326Sed  }
1247198092Srdivacky
1248193326Sed  // Here we iterate over the fields; this makes it simpler to both
1249193326Sed  // default-initialize fields and skip over unnamed fields.
1250276479Sdim  for (const auto *field : record->fields()) {
1251224145Sdim    // We're done once we hit the flexible array member.
1252224145Sdim    if (field->getType()->isIncompleteArrayType())
1253193326Sed      break;
1254193326Sed
1255224145Sdim    // Always skip anonymous bitfields.
1256224145Sdim    if (field->isUnnamedBitfield())
1257193326Sed      continue;
1258193326Sed
1259224145Sdim    // We're done if we reach the end of the explicit initializers, we
1260224145Sdim    // have a zeroed object, and the rest of the fields are
1261224145Sdim    // zero-initializable.
1262224145Sdim    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1263218893Sdim        CGF.getTypes().isZeroInitializable(E->getType()))
1264218893Sdim      break;
1265218893Sdim
1266234982Sdim
1267276479Sdim    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1268193326Sed    // We never generate write-barries for initialized fields.
1269224145Sdim    LV.setNonGC(true);
1270218893Sdim
1271224145Sdim    if (curInitIndex < NumInitElements) {
1272204962Srdivacky      // Store the initializer into the field.
1273224145Sdim      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1274193326Sed    } else {
1275193326Sed      // We're out of initalizers; default-initialize to null
1276224145Sdim      EmitNullInitializationToLValue(LV);
1277193326Sed    }
1278224145Sdim
1279224145Sdim    // Push a destructor if necessary.
1280224145Sdim    // FIXME: if we have an array of structures, all explicitly
1281224145Sdim    // initialized, we can end up pushing a linear number of cleanups.
1282224145Sdim    bool pushedCleanup = false;
1283224145Sdim    if (QualType::DestructionKind dtorKind
1284224145Sdim          = field->getType().isDestructedType()) {
1285224145Sdim      assert(LV.isSimple());
1286224145Sdim      if (CGF.needsEHCleanup(dtorKind)) {
1287234353Sdim        if (!cleanupDominator)
1288296417Sdim          cleanupDominator = CGF.Builder.CreateAlignedLoad(
1289296417Sdim              CGF.Int8Ty,
1290296417Sdim              llvm::Constant::getNullValue(CGF.Int8PtrTy),
1291296417Sdim              CharUnits::One()); // placeholder
1292234353Sdim
1293224145Sdim        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1294224145Sdim                        CGF.getDestroyer(dtorKind), false);
1295224145Sdim        cleanups.push_back(CGF.EHStack.stable_begin());
1296224145Sdim        pushedCleanup = true;
1297224145Sdim      }
1298224145Sdim    }
1299218893Sdim
1300218893Sdim    // If the GEP didn't get used because of a dead zero init or something
1301218893Sdim    // else, clean it up for -O0 builds and general tidiness.
1302224145Sdim    if (!pushedCleanup && LV.isSimple())
1303218893Sdim      if (llvm::GetElementPtrInst *GEP =
1304296417Sdim            dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1305218893Sdim        if (GEP->use_empty())
1306218893Sdim          GEP->eraseFromParent();
1307193326Sed  }
1308224145Sdim
1309224145Sdim  // Deactivate all the partial cleanups in reverse order, which
1310224145Sdim  // generally means popping them.
1311224145Sdim  for (unsigned i = cleanups.size(); i != 0; --i)
1312234353Sdim    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1313234353Sdim
1314234353Sdim  // Destroy the placeholder if we made one.
1315234353Sdim  if (cleanupDominator)
1316234353Sdim    cleanupDominator->eraseFromParent();
1317193326Sed}
1318193326Sed
1319288943Sdimvoid AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1320288943Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
1321288943Sdim
1322296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1323288943Sdim  EmitInitializationToLValue(E->getBase(), DestLV);
1324288943Sdim  VisitInitListExpr(E->getUpdater());
1325288943Sdim}
1326288943Sdim
1327193326Sed//===----------------------------------------------------------------------===//
1328193326Sed//                        Entry Points into this File
1329193326Sed//===----------------------------------------------------------------------===//
1330193326Sed
1331218893Sdim/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1332218893Sdim/// non-zero bytes that will be stored when outputting the initializer for the
1333218893Sdim/// specified initializer expression.
1334221345Sdimstatic CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1335221345Sdim  E = E->IgnoreParens();
1336218893Sdim
1337218893Sdim  // 0 and 0.0 won't require any non-zero stores!
1338221345Sdim  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1339218893Sdim
1340218893Sdim  // If this is an initlist expr, sum up the size of sizes of the (present)
1341218893Sdim  // elements.  If this is something weird, assume the whole thing is non-zero.
1342218893Sdim  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1343276479Sdim  if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1344221345Sdim    return CGF.getContext().getTypeSizeInChars(E->getType());
1345218893Sdim
1346218893Sdim  // InitListExprs for structs have to be handled carefully.  If there are
1347218893Sdim  // reference members, we need to consider the size of the reference, not the
1348218893Sdim  // referencee.  InitListExprs for unions and arrays can't have references.
1349218893Sdim  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1350218893Sdim    if (!RT->isUnionType()) {
1351218893Sdim      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1352221345Sdim      CharUnits NumNonZeroBytes = CharUnits::Zero();
1353218893Sdim
1354218893Sdim      unsigned ILEElement = 0;
1355309124Sdim      if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1356309124Sdim        while (ILEElement != CXXRD->getNumBases())
1357309124Sdim          NumNonZeroBytes +=
1358309124Sdim              GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1359276479Sdim      for (const auto *Field : SD->fields()) {
1360218893Sdim        // We're done once we hit the flexible array member or run out of
1361218893Sdim        // InitListExpr elements.
1362218893Sdim        if (Field->getType()->isIncompleteArrayType() ||
1363218893Sdim            ILEElement == ILE->getNumInits())
1364218893Sdim          break;
1365218893Sdim        if (Field->isUnnamedBitfield())
1366218893Sdim          continue;
1367218893Sdim
1368218893Sdim        const Expr *E = ILE->getInit(ILEElement++);
1369218893Sdim
1370218893Sdim        // Reference values are always non-null and have the width of a pointer.
1371218893Sdim        if (Field->getType()->isReferenceType())
1372221345Sdim          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1373251662Sdim              CGF.getTarget().getPointerWidth(0));
1374218893Sdim        else
1375218893Sdim          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1376218893Sdim      }
1377218893Sdim
1378218893Sdim      return NumNonZeroBytes;
1379218893Sdim    }
1380218893Sdim  }
1381218893Sdim
1382218893Sdim
1383221345Sdim  CharUnits NumNonZeroBytes = CharUnits::Zero();
1384218893Sdim  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1385218893Sdim    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1386218893Sdim  return NumNonZeroBytes;
1387218893Sdim}
1388218893Sdim
1389218893Sdim/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1390218893Sdim/// zeros in it, emit a memset and avoid storing the individual zeros.
1391218893Sdim///
1392218893Sdimstatic void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1393218893Sdim                                     CodeGenFunction &CGF) {
1394218893Sdim  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1395218893Sdim  // volatile stores.
1396296417Sdim  if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1397276479Sdim    return;
1398221345Sdim
1399221345Sdim  // C++ objects with a user-declared constructor don't need zero'ing.
1400243830Sdim  if (CGF.getLangOpts().CPlusPlus)
1401221345Sdim    if (const RecordType *RT = CGF.getContext()
1402221345Sdim                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1403221345Sdim      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1404221345Sdim      if (RD->hasUserDeclaredConstructor())
1405221345Sdim        return;
1406221345Sdim    }
1407221345Sdim
1408218893Sdim  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1409296417Sdim  CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
1410296417Sdim  if (Size <= CharUnits::fromQuantity(16))
1411218893Sdim    return;
1412218893Sdim
1413218893Sdim  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1414218893Sdim  // we prefer to emit memset + individual stores for the rest.
1415221345Sdim  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1416296417Sdim  if (NumNonZeroBytes*4 > Size)
1417218893Sdim    return;
1418218893Sdim
1419218893Sdim  // Okay, it seems like a good idea to use an initial memset, emit the call.
1420296417Sdim  llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1421218893Sdim
1422296417Sdim  Address Loc = Slot.getAddress();
1423296417Sdim  Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1424296417Sdim  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1425218893Sdim
1426218893Sdim  // Tell the AggExprEmitter that the slot is known zero.
1427218893Sdim  Slot.setZeroed();
1428218893Sdim}
1429218893Sdim
1430218893Sdim
1431218893Sdim
1432218893Sdim
1433193326Sed/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1434193326Sed/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1435193326Sed/// the value of the aggregate expression is not needed.  If VolatileDest is
1436193326Sed/// true, DestPtr cannot be 0.
1437239462Sdimvoid CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1438249423Sdim  assert(E && hasAggregateEvaluationKind(E->getType()) &&
1439193326Sed         "Invalid aggregate expression to emit");
1440296417Sdim  assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1441218893Sdim         "slot has bits but no address");
1442198092Srdivacky
1443218893Sdim  // Optimize the slot if possible.
1444218893Sdim  CheckAggExprForMemSetUse(Slot, E, *this);
1445218893Sdim
1446288943Sdim  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1447193326Sed}
1448193326Sed
1449203955SrdivackyLValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1450249423Sdim  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1451296417Sdim  Address Temp = CreateMemTemp(E->getType());
1452212904Sdim  LValue LV = MakeAddrLValue(Temp, E->getType());
1453226633Sdim  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1454226633Sdim                                         AggValueSlot::DoesNotNeedGCBarriers,
1455226633Sdim                                         AggValueSlot::IsNotAliased));
1456212904Sdim  return LV;
1457203955Srdivacky}
1458203955Srdivacky
1459296417Sdimvoid CodeGenFunction::EmitAggregateCopy(Address DestPtr,
1460296417Sdim                                        Address SrcPtr, QualType Ty,
1461239462Sdim                                        bool isVolatile,
1462243830Sdim                                        bool isAssignment) {
1463193326Sed  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1464198092Srdivacky
1465243830Sdim  if (getLangOpts().CPlusPlus) {
1466207619Srdivacky    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1467208600Srdivacky      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1468208600Srdivacky      assert((Record->hasTrivialCopyConstructor() ||
1469226633Sdim              Record->hasTrivialCopyAssignment() ||
1470226633Sdim              Record->hasTrivialMoveConstructor() ||
1471288943Sdim              Record->hasTrivialMoveAssignment() ||
1472288943Sdim              Record->isUnion()) &&
1473249423Sdim             "Trying to aggregate-copy a type without a trivial copy/move "
1474208600Srdivacky             "constructor or assignment operator");
1475208600Srdivacky      // Ignore empty classes in C++.
1476208600Srdivacky      if (Record->isEmpty())
1477207619Srdivacky        return;
1478207619Srdivacky    }
1479207619Srdivacky  }
1480207619Srdivacky
1481193326Sed  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1482193326Sed  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1483193326Sed  // read from another object that overlaps in anyway the storage of the first
1484193326Sed  // object, then the overlap shall be exact and the two objects shall have
1485193326Sed  // qualified or unqualified versions of a compatible type."
1486193326Sed  //
1487193326Sed  // memcpy is not defined if the source and destination pointers are exactly
1488193326Sed  // equal, but other compilers do this optimization, and almost every memcpy
1489193326Sed  // implementation handles this case safely.  If there is a libc that does not
1490193326Sed  // safely handle this, we can add a target hook.
1491198092Srdivacky
1492296417Sdim  // Get data size info for this aggregate. If this is an assignment,
1493296417Sdim  // don't copy the tail padding, because we might be assigning into a
1494296417Sdim  // base subobject where the tail padding is claimed.  Otherwise,
1495296417Sdim  // copying it is fine.
1496243830Sdim  std::pair<CharUnits, CharUnits> TypeInfo;
1497243830Sdim  if (isAssignment)
1498243830Sdim    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1499243830Sdim  else
1500243830Sdim    TypeInfo = getContext().getTypeInfoInChars(Ty);
1501198092Srdivacky
1502288943Sdim  llvm::Value *SizeVal = nullptr;
1503288943Sdim  if (TypeInfo.first.isZero()) {
1504288943Sdim    // But note that getTypeInfo returns 0 for a VLA.
1505288943Sdim    if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1506288943Sdim            getContext().getAsArrayType(Ty))) {
1507288943Sdim      QualType BaseEltTy;
1508288943Sdim      SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1509288943Sdim      TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
1510288943Sdim      std::pair<CharUnits, CharUnits> LastElementTypeInfo;
1511288943Sdim      if (!isAssignment)
1512288943Sdim        LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1513288943Sdim      assert(!TypeInfo.first.isZero());
1514288943Sdim      SizeVal = Builder.CreateNUWMul(
1515288943Sdim          SizeVal,
1516288943Sdim          llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1517288943Sdim      if (!isAssignment) {
1518288943Sdim        SizeVal = Builder.CreateNUWSub(
1519288943Sdim            SizeVal,
1520288943Sdim            llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1521288943Sdim        SizeVal = Builder.CreateNUWAdd(
1522288943Sdim            SizeVal, llvm::ConstantInt::get(
1523288943Sdim                         SizeTy, LastElementTypeInfo.first.getQuantity()));
1524288943Sdim      }
1525288943Sdim    }
1526288943Sdim  }
1527288943Sdim  if (!SizeVal) {
1528288943Sdim    SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1529288943Sdim  }
1530198092Srdivacky
1531193326Sed  // FIXME: If we have a volatile struct, the optimizer can remove what might
1532193326Sed  // appear to be `extra' memory ops:
1533193326Sed  //
1534193326Sed  // volatile struct { int i; } a, b;
1535193326Sed  //
1536193326Sed  // int main() {
1537193326Sed  //   a = b;
1538193326Sed  //   a = b;
1539193326Sed  // }
1540193326Sed  //
1541206275Srdivacky  // we need to use a different call here.  We use isVolatile to indicate when
1542193326Sed  // either the source or the destination is volatile.
1543206275Srdivacky
1544296417Sdim  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1545296417Sdim  SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1546206275Srdivacky
1547224145Sdim  // Don't do any of the memmove_collectable tests if GC isn't set.
1548234353Sdim  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1549224145Sdim    // fall through
1550224145Sdim  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1551210299Sed    RecordDecl *Record = RecordTy->getDecl();
1552210299Sed    if (Record->hasObjectMember()) {
1553210299Sed      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1554210299Sed                                                    SizeVal);
1555210299Sed      return;
1556210299Sed    }
1557224145Sdim  } else if (Ty->isArrayType()) {
1558210299Sed    QualType BaseType = getContext().getBaseElementType(Ty);
1559210299Sed    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1560210299Sed      if (RecordTy->getDecl()->hasObjectMember()) {
1561210299Sed        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1562210299Sed                                                      SizeVal);
1563210299Sed        return;
1564210299Sed      }
1565210299Sed    }
1566210299Sed  }
1567243830Sdim
1568296417Sdim  auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1569296417Sdim
1570243830Sdim  // Determine the metadata to describe the position of any padding in this
1571243830Sdim  // memcpy, as well as the TBAA tags for the members of the struct, in case
1572243830Sdim  // the optimizer wishes to expand it in to scalar memory operations.
1573296417Sdim  if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1574296417Sdim    Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
1575193326Sed}
1576