CGExprAgg.cpp revision 296417
1193326Sed//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2193326Sed//
3193326Sed//                     The LLVM Compiler Infrastructure
4193326Sed//
5193326Sed// This file is distributed under the University of Illinois Open Source
6193326Sed// License. See LICENSE.TXT for details.
7193326Sed//
8193326Sed//===----------------------------------------------------------------------===//
9193326Sed//
10193326Sed// This contains code to emit Aggregate Expr nodes as LLVM code.
11193326Sed//
12193326Sed//===----------------------------------------------------------------------===//
13193326Sed
14193326Sed#include "CodeGenFunction.h"
15249423Sdim#include "CGObjCRuntime.h"
16193326Sed#include "CodeGenModule.h"
17193326Sed#include "clang/AST/ASTContext.h"
18193326Sed#include "clang/AST/DeclCXX.h"
19234353Sdim#include "clang/AST/DeclTemplate.h"
20193326Sed#include "clang/AST/StmtVisitor.h"
21249423Sdim#include "llvm/IR/Constants.h"
22249423Sdim#include "llvm/IR/Function.h"
23249423Sdim#include "llvm/IR/GlobalVariable.h"
24249423Sdim#include "llvm/IR/Intrinsics.h"
25193326Sedusing namespace clang;
26193326Sedusing namespace CodeGen;
27193326Sed
28193326Sed//===----------------------------------------------------------------------===//
29193326Sed//                        Aggregate Expression Emitter
30193326Sed//===----------------------------------------------------------------------===//
31193326Sed
32193326Sednamespace  {
33199990Srdivackyclass AggExprEmitter : public StmtVisitor<AggExprEmitter> {
34193326Sed  CodeGenFunction &CGF;
35193326Sed  CGBuilderTy &Builder;
36218893Sdim  AggValueSlot Dest;
37288943Sdim  bool IsResultUnused;
38208600Srdivacky
39226633Sdim  /// We want to use 'dest' as the return slot except under two
40226633Sdim  /// conditions:
41226633Sdim  ///   - The destination slot requires garbage collection, so we
42226633Sdim  ///     need to use the GC API.
43226633Sdim  ///   - The destination slot is potentially aliased.
44226633Sdim  bool shouldUseDestForReturnSlot() const {
45226633Sdim    return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
46226633Sdim  }
47226633Sdim
48208600Srdivacky  ReturnValueSlot getReturnValueSlot() const {
49226633Sdim    if (!shouldUseDestForReturnSlot())
50226633Sdim      return ReturnValueSlot();
51208600Srdivacky
52296417Sdim    return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
53296417Sdim                           IsResultUnused);
54208600Srdivacky  }
55208600Srdivacky
56218893Sdim  AggValueSlot EnsureSlot(QualType T) {
57218893Sdim    if (!Dest.isIgnored()) return Dest;
58218893Sdim    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
59218893Sdim  }
60239462Sdim  void EnsureDest(QualType T) {
61239462Sdim    if (!Dest.isIgnored()) return;
62239462Sdim    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
63239462Sdim  }
64218893Sdim
65193326Sedpublic:
66288943Sdim  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
67288943Sdim    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
68288943Sdim    IsResultUnused(IsResultUnused) { }
69193326Sed
70193326Sed  //===--------------------------------------------------------------------===//
71193326Sed  //                               Utilities
72193326Sed  //===--------------------------------------------------------------------===//
73193326Sed
74193326Sed  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
75193326Sed  /// represents a value lvalue, this method emits the address of the lvalue,
76193326Sed  /// then loads the result into DestPtr.
77193326Sed  void EmitAggLoadOfLValue(const Expr *E);
78193326Sed
79193326Sed  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80239462Sdim  void EmitFinalDestCopy(QualType type, const LValue &src);
81296417Sdim  void EmitFinalDestCopy(QualType type, RValue src);
82239462Sdim  void EmitCopy(QualType type, const AggValueSlot &dest,
83239462Sdim                const AggValueSlot &src);
84193326Sed
85226633Sdim  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
86208600Srdivacky
87296417Sdim  void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
88234353Sdim                     QualType elementType, InitListExpr *E);
89234353Sdim
90226633Sdim  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
91234353Sdim    if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
92226633Sdim      return AggValueSlot::NeedsGCBarriers;
93226633Sdim    return AggValueSlot::DoesNotNeedGCBarriers;
94226633Sdim  }
95226633Sdim
96208600Srdivacky  bool TypeRequiresGCollection(QualType T);
97208600Srdivacky
98193326Sed  //===--------------------------------------------------------------------===//
99193326Sed  //                            Visitor Methods
100193326Sed  //===--------------------------------------------------------------------===//
101198092Srdivacky
102288943Sdim  void Visit(Expr *E) {
103288943Sdim    ApplyDebugLocation DL(CGF, E);
104288943Sdim    StmtVisitor<AggExprEmitter>::Visit(E);
105288943Sdim  }
106288943Sdim
107193326Sed  void VisitStmt(Stmt *S) {
108193326Sed    CGF.ErrorUnsupported(S, "aggregate expression");
109193326Sed  }
110193326Sed  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
111221345Sdim  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
112221345Sdim    Visit(GE->getResultExpr());
113221345Sdim  }
114193326Sed  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
115224145Sdim  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
116224145Sdim    return Visit(E->getReplacement());
117224145Sdim  }
118193326Sed
119193326Sed  // l-values.
120234353Sdim  void VisitDeclRefExpr(DeclRefExpr *E) {
121234353Sdim    // For aggregates, we should always be able to emit the variable
122234353Sdim    // as an l-value unless it's a reference.  This is due to the fact
123234353Sdim    // that we can't actually ever see a normal l2r conversion on an
124234353Sdim    // aggregate in C++, and in C there's no language standard
125234353Sdim    // actively preventing us from listing variables in the captures
126234353Sdim    // list of a block.
127234353Sdim    if (E->getDecl()->getType()->isReferenceType()) {
128234353Sdim      if (CodeGenFunction::ConstantEmission result
129234353Sdim            = CGF.tryEmitAsConstant(E)) {
130239462Sdim        EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
131234353Sdim        return;
132234353Sdim      }
133234353Sdim    }
134234353Sdim
135234353Sdim    EmitAggLoadOfLValue(E);
136234353Sdim  }
137234353Sdim
138193326Sed  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
139193326Sed  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
140193326Sed  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
141224145Sdim  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
142193326Sed  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
143193326Sed    EmitAggLoadOfLValue(E);
144193326Sed  }
145193326Sed  void VisitPredefinedExpr(const PredefinedExpr *E) {
146198092Srdivacky    EmitAggLoadOfLValue(E);
147193326Sed  }
148198092Srdivacky
149193326Sed  // Operators.
150198092Srdivacky  void VisitCastExpr(CastExpr *E);
151193326Sed  void VisitCallExpr(const CallExpr *E);
152193326Sed  void VisitStmtExpr(const StmtExpr *E);
153193326Sed  void VisitBinaryOperator(const BinaryOperator *BO);
154198398Srdivacky  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
155193326Sed  void VisitBinAssign(const BinaryOperator *E);
156193326Sed  void VisitBinComma(const BinaryOperator *E);
157193326Sed
158193326Sed  void VisitObjCMessageExpr(ObjCMessageExpr *E);
159193326Sed  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
160193326Sed    EmitAggLoadOfLValue(E);
161193326Sed  }
162198092Srdivacky
163288943Sdim  void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
164218893Sdim  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
165198092Srdivacky  void VisitChooseExpr(const ChooseExpr *CE);
166193326Sed  void VisitInitListExpr(InitListExpr *E);
167201361Srdivacky  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
168288943Sdim  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
169193326Sed  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
170193326Sed    Visit(DAE->getExpr());
171193326Sed  }
172251662Sdim  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
173251662Sdim    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
174251662Sdim    Visit(DIE->getExpr());
175251662Sdim  }
176193326Sed  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
177193326Sed  void VisitCXXConstructExpr(const CXXConstructExpr *E);
178234353Sdim  void VisitLambdaExpr(LambdaExpr *E);
179261991Sdim  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
180218893Sdim  void VisitExprWithCleanups(ExprWithCleanups *E);
181210299Sed  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
182199482Srdivacky  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
183224145Sdim  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
184218893Sdim  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
185218893Sdim
186234353Sdim  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
187234353Sdim    if (E->isGLValue()) {
188234353Sdim      LValue LV = CGF.EmitPseudoObjectLValue(E);
189239462Sdim      return EmitFinalDestCopy(E->getType(), LV);
190234353Sdim    }
191234353Sdim
192234353Sdim    CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
193234353Sdim  }
194234353Sdim
195193326Sed  void VisitVAArgExpr(VAArgExpr *E);
196193326Sed
197224145Sdim  void EmitInitializationToLValue(Expr *E, LValue Address);
198224145Sdim  void EmitNullInitializationToLValue(LValue Address);
199193326Sed  //  case Expr::ChooseExprClass:
200200583Srdivacky  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
201226633Sdim  void VisitAtomicExpr(AtomicExpr *E) {
202296417Sdim    RValue Res = CGF.EmitAtomicExpr(E);
203296417Sdim    EmitFinalDestCopy(E->getType(), Res);
204226633Sdim  }
205193326Sed};
206193326Sed}  // end anonymous namespace.
207193326Sed
208193326Sed//===----------------------------------------------------------------------===//
209193326Sed//                                Utilities
210193326Sed//===----------------------------------------------------------------------===//
211193326Sed
212193326Sed/// EmitAggLoadOfLValue - Given an expression with aggregate type that
213193326Sed/// represents a value lvalue, this method emits the address of the lvalue,
214193326Sed/// then loads the result into DestPtr.
215193326Sedvoid AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
216193326Sed  LValue LV = CGF.EmitLValue(E);
217249423Sdim
218249423Sdim  // If the type of the l-value is atomic, then do an atomic load.
219288943Sdim  if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
220261991Sdim    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
221249423Sdim    return;
222249423Sdim  }
223249423Sdim
224239462Sdim  EmitFinalDestCopy(E->getType(), LV);
225193326Sed}
226193326Sed
227208600Srdivacky/// \brief True if the given aggregate type requires special GC API calls.
228208600Srdivackybool AggExprEmitter::TypeRequiresGCollection(QualType T) {
229208600Srdivacky  // Only record types have members that might require garbage collection.
230208600Srdivacky  const RecordType *RecordTy = T->getAs<RecordType>();
231208600Srdivacky  if (!RecordTy) return false;
232208600Srdivacky
233208600Srdivacky  // Don't mess with non-trivial C++ types.
234208600Srdivacky  RecordDecl *Record = RecordTy->getDecl();
235208600Srdivacky  if (isa<CXXRecordDecl>(Record) &&
236249423Sdim      (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
237208600Srdivacky       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
238208600Srdivacky    return false;
239208600Srdivacky
240208600Srdivacky  // Check whether the type has an object member.
241208600Srdivacky  return Record->hasObjectMember();
242208600Srdivacky}
243208600Srdivacky
244226633Sdim/// \brief Perform the final move to DestPtr if for some reason
245226633Sdim/// getReturnValueSlot() didn't use it directly.
246208600Srdivacky///
247208600Srdivacky/// The idea is that you do something like this:
248208600Srdivacky///   RValue Result = EmitSomething(..., getReturnValueSlot());
249226633Sdim///   EmitMoveFromReturnSlot(E, Result);
250226633Sdim///
251226633Sdim/// If nothing interferes, this will cause the result to be emitted
252226633Sdim/// directly into the return value slot.  Otherwise, a final move
253226633Sdim/// will be performed.
254239462Sdimvoid AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
255226633Sdim  if (shouldUseDestForReturnSlot()) {
256226633Sdim    // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
257226633Sdim    // The possibility of undef rvalues complicates that a lot,
258226633Sdim    // though, so we can't really assert.
259226633Sdim    return;
260210299Sed  }
261226633Sdim
262239462Sdim  // Otherwise, copy from there to the destination.
263296417Sdim  assert(Dest.getPointer() != src.getAggregatePointer());
264296417Sdim  EmitFinalDestCopy(E->getType(), src);
265208600Srdivacky}
266208600Srdivacky
267193326Sed/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
268296417Sdimvoid AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
269239462Sdim  assert(src.isAggregate() && "value must be aggregate value!");
270296417Sdim  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
271239462Sdim  EmitFinalDestCopy(type, srcLV);
272239462Sdim}
273193326Sed
274239462Sdim/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
275239462Sdimvoid AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
276218893Sdim  // If Dest is ignored, then we're evaluating an aggregate expression
277239462Sdim  // in a context that doesn't care about the result.  Note that loads
278239462Sdim  // from volatile l-values force the existence of a non-ignored
279239462Sdim  // destination.
280239462Sdim  if (Dest.isIgnored())
281239462Sdim    return;
282212904Sdim
283239462Sdim  AggValueSlot srcAgg =
284239462Sdim    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
285239462Sdim                            needsGC(type), AggValueSlot::IsAliased);
286239462Sdim  EmitCopy(type, Dest, srcAgg);
287239462Sdim}
288193326Sed
289239462Sdim/// Perform a copy from the source into the destination.
290239462Sdim///
291239462Sdim/// \param type - the type of the aggregate being copied; qualifiers are
292239462Sdim///   ignored
293239462Sdimvoid AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
294239462Sdim                              const AggValueSlot &src) {
295239462Sdim  if (dest.requiresGCollection()) {
296239462Sdim    CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
297239462Sdim    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
298198092Srdivacky    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
299296417Sdim                                                      dest.getAddress(),
300296417Sdim                                                      src.getAddress(),
301239462Sdim                                                      size);
302198092Srdivacky    return;
303198092Srdivacky  }
304239462Sdim
305193326Sed  // If the result of the assignment is used, copy the LHS there also.
306239462Sdim  // It's volatile if either side is.  Use the minimum alignment of
307239462Sdim  // the two sides.
308296417Sdim  CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
309296417Sdim                        dest.isVolatile() || src.isVolatile());
310193326Sed}
311193326Sed
312234353Sdim/// \brief Emit the initializer for a std::initializer_list initialized with a
313234353Sdim/// real initializer list.
314261991Sdimvoid
315261991SdimAggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
316261991Sdim  // Emit an array containing the elements.  The array is externally destructed
317261991Sdim  // if the std::initializer_list object is.
318261991Sdim  ASTContext &Ctx = CGF.getContext();
319261991Sdim  LValue Array = CGF.EmitLValue(E->getSubExpr());
320261991Sdim  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
321296417Sdim  Address ArrayPtr = Array.getAddress();
322234353Sdim
323261991Sdim  const ConstantArrayType *ArrayType =
324261991Sdim      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
325261991Sdim  assert(ArrayType && "std::initializer_list constructed from non-array");
326234353Sdim
327261991Sdim  // FIXME: Perform the checks on the field types in SemaInit.
328261991Sdim  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
329261991Sdim  RecordDecl::field_iterator Field = Record->field_begin();
330261991Sdim  if (Field == Record->field_end()) {
331261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
332234353Sdim    return;
333234353Sdim  }
334234353Sdim
335234353Sdim  // Start pointer.
336261991Sdim  if (!Field->getType()->isPointerType() ||
337261991Sdim      !Ctx.hasSameType(Field->getType()->getPointeeType(),
338261991Sdim                       ArrayType->getElementType())) {
339261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
340234353Sdim    return;
341234353Sdim  }
342234353Sdim
343261991Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
344296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
345261991Sdim  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
346261991Sdim  llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
347261991Sdim  llvm::Value *IdxStart[] = { Zero, Zero };
348261991Sdim  llvm::Value *ArrayStart =
349296417Sdim      Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
350261991Sdim  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
351261991Sdim  ++Field;
352261991Sdim
353261991Sdim  if (Field == Record->field_end()) {
354261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
355234353Sdim    return;
356234353Sdim  }
357261991Sdim
358261991Sdim  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
359261991Sdim  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
360261991Sdim  if (Field->getType()->isPointerType() &&
361261991Sdim      Ctx.hasSameType(Field->getType()->getPointeeType(),
362261991Sdim                      ArrayType->getElementType())) {
363234353Sdim    // End pointer.
364261991Sdim    llvm::Value *IdxEnd[] = { Zero, Size };
365261991Sdim    llvm::Value *ArrayEnd =
366296417Sdim        Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
367261991Sdim    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
368261991Sdim  } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
369234353Sdim    // Length.
370261991Sdim    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
371234353Sdim  } else {
372261991Sdim    CGF.ErrorUnsupported(E, "weird std::initializer_list");
373234353Sdim    return;
374234353Sdim  }
375234353Sdim}
376234353Sdim
377276479Sdim/// \brief Determine if E is a trivial array filler, that is, one that is
378276479Sdim/// equivalent to zero-initialization.
379276479Sdimstatic bool isTrivialFiller(Expr *E) {
380276479Sdim  if (!E)
381276479Sdim    return true;
382276479Sdim
383276479Sdim  if (isa<ImplicitValueInitExpr>(E))
384276479Sdim    return true;
385276479Sdim
386276479Sdim  if (auto *ILE = dyn_cast<InitListExpr>(E)) {
387276479Sdim    if (ILE->getNumInits())
388276479Sdim      return false;
389276479Sdim    return isTrivialFiller(ILE->getArrayFiller());
390276479Sdim  }
391276479Sdim
392276479Sdim  if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
393276479Sdim    return Cons->getConstructor()->isDefaultConstructor() &&
394276479Sdim           Cons->getConstructor()->isTrivial();
395276479Sdim
396276479Sdim  // FIXME: Are there other cases where we can avoid emitting an initializer?
397276479Sdim  return false;
398276479Sdim}
399276479Sdim
400234353Sdim/// \brief Emit initialization of an array from an initializer list.
401296417Sdimvoid AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
402234353Sdim                                   QualType elementType, InitListExpr *E) {
403234353Sdim  uint64_t NumInitElements = E->getNumInits();
404234353Sdim
405234353Sdim  uint64_t NumArrayElements = AType->getNumElements();
406234353Sdim  assert(NumInitElements <= NumArrayElements);
407234353Sdim
408234353Sdim  // DestPtr is an array*.  Construct an elementType* by drilling
409234353Sdim  // down a level.
410234353Sdim  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
411234353Sdim  llvm::Value *indices[] = { zero, zero };
412234353Sdim  llvm::Value *begin =
413296417Sdim    Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
414234353Sdim
415296417Sdim  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
416296417Sdim  CharUnits elementAlign =
417296417Sdim    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
418296417Sdim
419234353Sdim  // Exception safety requires us to destroy all the
420234353Sdim  // already-constructed members if an initializer throws.
421234353Sdim  // For that, we'll need an EH cleanup.
422234353Sdim  QualType::DestructionKind dtorKind = elementType.isDestructedType();
423296417Sdim  Address endOfInit = Address::invalid();
424234353Sdim  EHScopeStack::stable_iterator cleanup;
425276479Sdim  llvm::Instruction *cleanupDominator = nullptr;
426234353Sdim  if (CGF.needsEHCleanup(dtorKind)) {
427234353Sdim    // In principle we could tell the cleanup where we are more
428234353Sdim    // directly, but the control flow can get so varied here that it
429234353Sdim    // would actually be quite complex.  Therefore we go through an
430234353Sdim    // alloca.
431296417Sdim    endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
432234353Sdim                                     "arrayinit.endOfInit");
433234353Sdim    cleanupDominator = Builder.CreateStore(begin, endOfInit);
434234353Sdim    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
435296417Sdim                                         elementAlign,
436234353Sdim                                         CGF.getDestroyer(dtorKind));
437234353Sdim    cleanup = CGF.EHStack.stable_begin();
438234353Sdim
439234353Sdim  // Otherwise, remember that we didn't need a cleanup.
440234353Sdim  } else {
441234353Sdim    dtorKind = QualType::DK_none;
442234353Sdim  }
443234353Sdim
444234353Sdim  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
445234353Sdim
446234353Sdim  // The 'current element to initialize'.  The invariants on this
447234353Sdim  // variable are complicated.  Essentially, after each iteration of
448234353Sdim  // the loop, it points to the last initialized element, except
449234353Sdim  // that it points to the beginning of the array before any
450234353Sdim  // elements have been initialized.
451234353Sdim  llvm::Value *element = begin;
452234353Sdim
453234353Sdim  // Emit the explicit initializers.
454234353Sdim  for (uint64_t i = 0; i != NumInitElements; ++i) {
455234353Sdim    // Advance to the next element.
456234353Sdim    if (i > 0) {
457234353Sdim      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
458234353Sdim
459234353Sdim      // Tell the cleanup that it needs to destroy up to this
460234353Sdim      // element.  TODO: some of these stores can be trivially
461234353Sdim      // observed to be unnecessary.
462296417Sdim      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
463234353Sdim    }
464234353Sdim
465296417Sdim    LValue elementLV =
466296417Sdim      CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
467261991Sdim    EmitInitializationToLValue(E->getInit(i), elementLV);
468234353Sdim  }
469234353Sdim
470234353Sdim  // Check whether there's a non-trivial array-fill expression.
471234353Sdim  Expr *filler = E->getArrayFiller();
472276479Sdim  bool hasTrivialFiller = isTrivialFiller(filler);
473234353Sdim
474234353Sdim  // Any remaining elements need to be zero-initialized, possibly
475234353Sdim  // using the filler expression.  We can skip this if the we're
476234353Sdim  // emitting to zeroed memory.
477234353Sdim  if (NumInitElements != NumArrayElements &&
478234353Sdim      !(Dest.isZeroed() && hasTrivialFiller &&
479234353Sdim        CGF.getTypes().isZeroInitializable(elementType))) {
480234353Sdim
481234353Sdim    // Use an actual loop.  This is basically
482234353Sdim    //   do { *array++ = filler; } while (array != end);
483234353Sdim
484234353Sdim    // Advance to the start of the rest of the array.
485234353Sdim    if (NumInitElements) {
486234353Sdim      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
487296417Sdim      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
488234353Sdim    }
489234353Sdim
490234353Sdim    // Compute the end of the array.
491234353Sdim    llvm::Value *end = Builder.CreateInBoundsGEP(begin,
492234353Sdim                      llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
493234353Sdim                                                 "arrayinit.end");
494234353Sdim
495234353Sdim    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
496234353Sdim    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
497234353Sdim
498234353Sdim    // Jump into the body.
499234353Sdim    CGF.EmitBlock(bodyBB);
500234353Sdim    llvm::PHINode *currentElement =
501234353Sdim      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
502234353Sdim    currentElement->addIncoming(element, entryBB);
503234353Sdim
504234353Sdim    // Emit the actual filler expression.
505296417Sdim    LValue elementLV =
506296417Sdim      CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
507234353Sdim    if (filler)
508234353Sdim      EmitInitializationToLValue(filler, elementLV);
509234353Sdim    else
510234353Sdim      EmitNullInitializationToLValue(elementLV);
511234353Sdim
512234353Sdim    // Move on to the next element.
513234353Sdim    llvm::Value *nextElement =
514234353Sdim      Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
515234353Sdim
516234353Sdim    // Tell the EH cleanup that we finished with the last element.
517296417Sdim    if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
518234353Sdim
519234353Sdim    // Leave the loop if we're done.
520234353Sdim    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
521234353Sdim                                             "arrayinit.done");
522234353Sdim    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
523234353Sdim    Builder.CreateCondBr(done, endBB, bodyBB);
524234353Sdim    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
525234353Sdim
526234353Sdim    CGF.EmitBlock(endBB);
527234353Sdim  }
528234353Sdim
529234353Sdim  // Leave the partial-array cleanup if we entered one.
530234353Sdim  if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
531234353Sdim}
532234353Sdim
533193326Sed//===----------------------------------------------------------------------===//
534193326Sed//                            Visitor Methods
535193326Sed//===----------------------------------------------------------------------===//
536193326Sed
537224145Sdimvoid AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
538224145Sdim  Visit(E->GetTemporaryExpr());
539224145Sdim}
540224145Sdim
541218893Sdimvoid AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
542239462Sdim  EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
543218893Sdim}
544218893Sdim
545224145Sdimvoid
546224145SdimAggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
547249423Sdim  if (Dest.isPotentiallyAliased() &&
548249423Sdim      E->getType().isPODType(CGF.getContext())) {
549224145Sdim    // For a POD type, just emit a load of the lvalue + a copy, because our
550224145Sdim    // compound literal might alias the destination.
551224145Sdim    EmitAggLoadOfLValue(E);
552224145Sdim    return;
553224145Sdim  }
554224145Sdim
555224145Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
556224145Sdim  CGF.EmitAggExpr(E->getInitializer(), Slot);
557224145Sdim}
558224145Sdim
559249423Sdim/// Attempt to look through various unimportant expressions to find a
560249423Sdim/// cast of the given kind.
561249423Sdimstatic Expr *findPeephole(Expr *op, CastKind kind) {
562249423Sdim  while (true) {
563249423Sdim    op = op->IgnoreParens();
564249423Sdim    if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
565249423Sdim      if (castE->getCastKind() == kind)
566249423Sdim        return castE->getSubExpr();
567249423Sdim      if (castE->getCastKind() == CK_NoOp)
568249423Sdim        continue;
569249423Sdim    }
570276479Sdim    return nullptr;
571249423Sdim  }
572249423Sdim}
573224145Sdim
574198092Srdivackyvoid AggExprEmitter::VisitCastExpr(CastExpr *E) {
575296417Sdim  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
576296417Sdim    CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
577198092Srdivacky  switch (E->getCastKind()) {
578212904Sdim  case CK_Dynamic: {
579243830Sdim    // FIXME: Can this actually happen? We have no test coverage for it.
580208600Srdivacky    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
581243830Sdim    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
582243830Sdim                                      CodeGenFunction::TCK_Load);
583208600Srdivacky    // FIXME: Do we also need to handle property references here?
584208600Srdivacky    if (LV.isSimple())
585208600Srdivacky      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
586208600Srdivacky    else
587208600Srdivacky      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
588208600Srdivacky
589218893Sdim    if (!Dest.isIgnored())
590218893Sdim      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
591208600Srdivacky    break;
592208600Srdivacky  }
593208600Srdivacky
594212904Sdim  case CK_ToUnion: {
595288943Sdim    // Evaluate even if the destination is ignored.
596288943Sdim    if (Dest.isIgnored()) {
597288943Sdim      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
598288943Sdim                      /*ignoreResult=*/true);
599288943Sdim      break;
600288943Sdim    }
601221345Sdim
602198092Srdivacky    // GCC union extension
603212904Sdim    QualType Ty = E->getSubExpr()->getType();
604296417Sdim    Address CastPtr =
605296417Sdim      Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
606224145Sdim    EmitInitializationToLValue(E->getSubExpr(),
607224145Sdim                               CGF.MakeAddrLValue(CastPtr, Ty));
608198092Srdivacky    break;
609193326Sed  }
610193326Sed
611212904Sdim  case CK_DerivedToBase:
612212904Sdim  case CK_BaseToDerived:
613212904Sdim  case CK_UncheckedDerivedToBase: {
614226633Sdim    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
615208600Srdivacky                "should have been unpacked before we got here");
616208600Srdivacky  }
617208600Srdivacky
618249423Sdim  case CK_NonAtomicToAtomic:
619249423Sdim  case CK_AtomicToNonAtomic: {
620249423Sdim    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
621249423Sdim
622249423Sdim    // Determine the atomic and value types.
623249423Sdim    QualType atomicType = E->getSubExpr()->getType();
624249423Sdim    QualType valueType = E->getType();
625249423Sdim    if (isToAtomic) std::swap(atomicType, valueType);
626249423Sdim
627249423Sdim    assert(atomicType->isAtomicType());
628249423Sdim    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
629249423Sdim                          atomicType->castAs<AtomicType>()->getValueType()));
630249423Sdim
631249423Sdim    // Just recurse normally if we're ignoring the result or the
632249423Sdim    // atomic type doesn't change representation.
633249423Sdim    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
634249423Sdim      return Visit(E->getSubExpr());
635249423Sdim    }
636249423Sdim
637249423Sdim    CastKind peepholeTarget =
638249423Sdim      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
639249423Sdim
640249423Sdim    // These two cases are reverses of each other; try to peephole them.
641249423Sdim    if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
642249423Sdim      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
643249423Sdim                                                     E->getType()) &&
644249423Sdim           "peephole significantly changed types?");
645249423Sdim      return Visit(op);
646249423Sdim    }
647249423Sdim
648249423Sdim    // If we're converting an r-value of non-atomic type to an r-value
649261991Sdim    // of atomic type, just emit directly into the relevant sub-object.
650249423Sdim    if (isToAtomic) {
651261991Sdim      AggValueSlot valueDest = Dest;
652261991Sdim      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
653261991Sdim        // Zero-initialize.  (Strictly speaking, we only need to intialize
654261991Sdim        // the padding at the end, but this is simpler.)
655261991Sdim        if (!Dest.isZeroed())
656296417Sdim          CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
657261991Sdim
658261991Sdim        // Build a GEP to refer to the subobject.
659296417Sdim        Address valueAddr =
660296417Sdim            CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
661296417Sdim                                        CharUnits());
662261991Sdim        valueDest = AggValueSlot::forAddr(valueAddr,
663261991Sdim                                          valueDest.getQualifiers(),
664261991Sdim                                          valueDest.isExternallyDestructed(),
665261991Sdim                                          valueDest.requiresGCollection(),
666261991Sdim                                          valueDest.isPotentiallyAliased(),
667261991Sdim                                          AggValueSlot::IsZeroed);
668261991Sdim      }
669261991Sdim
670261991Sdim      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
671249423Sdim      return;
672249423Sdim    }
673249423Sdim
674249423Sdim    // Otherwise, we're converting an atomic type to a non-atomic type.
675261991Sdim    // Make an atomic temporary, emit into that, and then copy the value out.
676249423Sdim    AggValueSlot atomicSlot =
677249423Sdim      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
678249423Sdim    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
679249423Sdim
680296417Sdim    Address valueAddr =
681296417Sdim      Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
682249423Sdim    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
683249423Sdim    return EmitFinalDestCopy(valueType, rvalue);
684249423Sdim  }
685249423Sdim
686239462Sdim  case CK_LValueToRValue:
687239462Sdim    // If we're loading from a volatile type, force the destination
688239462Sdim    // into existence.
689239462Sdim    if (E->getSubExpr()->getType().isVolatileQualified()) {
690239462Sdim      EnsureDest(E->getType());
691239462Sdim      return Visit(E->getSubExpr());
692239462Sdim    }
693249423Sdim
694239462Sdim    // fallthrough
695239462Sdim
696212904Sdim  case CK_NoOp:
697212904Sdim  case CK_UserDefinedConversion:
698212904Sdim  case CK_ConstructorConversion:
699198092Srdivacky    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
700198092Srdivacky                                                   E->getType()) &&
701198092Srdivacky           "Implicit cast types must be compatible");
702198092Srdivacky    Visit(E->getSubExpr());
703198092Srdivacky    break;
704218893Sdim
705212904Sdim  case CK_LValueBitCast:
706218893Sdim    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
707221345Sdim
708218893Sdim  case CK_Dependent:
709218893Sdim  case CK_BitCast:
710218893Sdim  case CK_ArrayToPointerDecay:
711218893Sdim  case CK_FunctionToPointerDecay:
712218893Sdim  case CK_NullToPointer:
713218893Sdim  case CK_NullToMemberPointer:
714218893Sdim  case CK_BaseToDerivedMemberPointer:
715218893Sdim  case CK_DerivedToBaseMemberPointer:
716218893Sdim  case CK_MemberPointerToBoolean:
717234353Sdim  case CK_ReinterpretMemberPointer:
718218893Sdim  case CK_IntegralToPointer:
719218893Sdim  case CK_PointerToIntegral:
720218893Sdim  case CK_PointerToBoolean:
721218893Sdim  case CK_ToVoid:
722218893Sdim  case CK_VectorSplat:
723218893Sdim  case CK_IntegralCast:
724296417Sdim  case CK_BooleanToSignedIntegral:
725218893Sdim  case CK_IntegralToBoolean:
726218893Sdim  case CK_IntegralToFloating:
727218893Sdim  case CK_FloatingToIntegral:
728218893Sdim  case CK_FloatingToBoolean:
729218893Sdim  case CK_FloatingCast:
730226633Sdim  case CK_CPointerToObjCPointerCast:
731226633Sdim  case CK_BlockPointerToObjCPointerCast:
732218893Sdim  case CK_AnyPointerToBlockPointerCast:
733218893Sdim  case CK_ObjCObjectLValueCast:
734218893Sdim  case CK_FloatingRealToComplex:
735218893Sdim  case CK_FloatingComplexToReal:
736218893Sdim  case CK_FloatingComplexToBoolean:
737218893Sdim  case CK_FloatingComplexCast:
738218893Sdim  case CK_FloatingComplexToIntegralComplex:
739218893Sdim  case CK_IntegralRealToComplex:
740218893Sdim  case CK_IntegralComplexToReal:
741218893Sdim  case CK_IntegralComplexToBoolean:
742218893Sdim  case CK_IntegralComplexCast:
743218893Sdim  case CK_IntegralComplexToFloatingComplex:
744226633Sdim  case CK_ARCProduceObject:
745226633Sdim  case CK_ARCConsumeObject:
746226633Sdim  case CK_ARCReclaimReturnedObject:
747226633Sdim  case CK_ARCExtendBlockObject:
748234353Sdim  case CK_CopyAndAutoreleaseBlockObject:
749243830Sdim  case CK_BuiltinFnToFnPtr:
750249423Sdim  case CK_ZeroToOCLEvent:
751276479Sdim  case CK_AddressSpaceConversion:
752218893Sdim    llvm_unreachable("cast kind invalid for aggregate types");
753198398Srdivacky  }
754193326Sed}
755193326Sed
756193326Sedvoid AggExprEmitter::VisitCallExpr(const CallExpr *E) {
757288943Sdim  if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
758193326Sed    EmitAggLoadOfLValue(E);
759193326Sed    return;
760193326Sed  }
761198092Srdivacky
762208600Srdivacky  RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
763226633Sdim  EmitMoveFromReturnSlot(E, RV);
764193326Sed}
765193326Sed
766193326Sedvoid AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
767208600Srdivacky  RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
768226633Sdim  EmitMoveFromReturnSlot(E, RV);
769193326Sed}
770193326Sed
771193326Sedvoid AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
772218893Sdim  CGF.EmitIgnoredExpr(E->getLHS());
773218893Sdim  Visit(E->getRHS());
774193326Sed}
775193326Sed
776193326Sedvoid AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
777218893Sdim  CodeGenFunction::StmtExprEvaluation eval(CGF);
778218893Sdim  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
779193326Sed}
780193326Sed
781193326Sedvoid AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
782212904Sdim  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
783198398Srdivacky    VisitPointerToDataMemberBinaryOperator(E);
784198398Srdivacky  else
785198398Srdivacky    CGF.ErrorUnsupported(E, "aggregate binary expression");
786193326Sed}
787193326Sed
788198398Srdivackyvoid AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
789198398Srdivacky                                                    const BinaryOperator *E) {
790198398Srdivacky  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
791239462Sdim  EmitFinalDestCopy(E->getType(), LV);
792198398Srdivacky}
793198398Srdivacky
794239462Sdim/// Is the value of the given expression possibly a reference to or
795239462Sdim/// into a __block variable?
796239462Sdimstatic bool isBlockVarRef(const Expr *E) {
797239462Sdim  // Make sure we look through parens.
798239462Sdim  E = E->IgnoreParens();
799239462Sdim
800239462Sdim  // Check for a direct reference to a __block variable.
801239462Sdim  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
802239462Sdim    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
803239462Sdim    return (var && var->hasAttr<BlocksAttr>());
804239462Sdim  }
805239462Sdim
806239462Sdim  // More complicated stuff.
807239462Sdim
808239462Sdim  // Binary operators.
809239462Sdim  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
810239462Sdim    // For an assignment or pointer-to-member operation, just care
811239462Sdim    // about the LHS.
812239462Sdim    if (op->isAssignmentOp() || op->isPtrMemOp())
813239462Sdim      return isBlockVarRef(op->getLHS());
814239462Sdim
815239462Sdim    // For a comma, just care about the RHS.
816239462Sdim    if (op->getOpcode() == BO_Comma)
817239462Sdim      return isBlockVarRef(op->getRHS());
818239462Sdim
819239462Sdim    // FIXME: pointer arithmetic?
820239462Sdim    return false;
821239462Sdim
822239462Sdim  // Check both sides of a conditional operator.
823239462Sdim  } else if (const AbstractConditionalOperator *op
824239462Sdim               = dyn_cast<AbstractConditionalOperator>(E)) {
825239462Sdim    return isBlockVarRef(op->getTrueExpr())
826239462Sdim        || isBlockVarRef(op->getFalseExpr());
827239462Sdim
828239462Sdim  // OVEs are required to support BinaryConditionalOperators.
829239462Sdim  } else if (const OpaqueValueExpr *op
830239462Sdim               = dyn_cast<OpaqueValueExpr>(E)) {
831239462Sdim    if (const Expr *src = op->getSourceExpr())
832239462Sdim      return isBlockVarRef(src);
833239462Sdim
834239462Sdim  // Casts are necessary to get things like (*(int*)&var) = foo().
835239462Sdim  // We don't really care about the kind of cast here, except
836239462Sdim  // we don't want to look through l2r casts, because it's okay
837239462Sdim  // to get the *value* in a __block variable.
838239462Sdim  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
839239462Sdim    if (cast->getCastKind() == CK_LValueToRValue)
840239462Sdim      return false;
841239462Sdim    return isBlockVarRef(cast->getSubExpr());
842239462Sdim
843239462Sdim  // Handle unary operators.  Again, just aggressively look through
844239462Sdim  // it, ignoring the operation.
845239462Sdim  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
846239462Sdim    return isBlockVarRef(uop->getSubExpr());
847239462Sdim
848239462Sdim  // Look into the base of a field access.
849239462Sdim  } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
850239462Sdim    return isBlockVarRef(mem->getBase());
851239462Sdim
852239462Sdim  // Look into the base of a subscript.
853239462Sdim  } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
854239462Sdim    return isBlockVarRef(sub->getBase());
855239462Sdim  }
856239462Sdim
857239462Sdim  return false;
858239462Sdim}
859239462Sdim
860193326Sedvoid AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
861193326Sed  // For an assignment to work, the value on the right has
862193326Sed  // to be compatible with the value on the left.
863193326Sed  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
864193326Sed                                                 E->getRHS()->getType())
865193326Sed         && "Invalid assignment");
866218893Sdim
867239462Sdim  // If the LHS might be a __block variable, and the RHS can
868239462Sdim  // potentially cause a block copy, we need to evaluate the RHS first
869239462Sdim  // so that the assignment goes the right place.
870239462Sdim  // This is pretty semantically fragile.
871239462Sdim  if (isBlockVarRef(E->getLHS()) &&
872239462Sdim      E->getRHS()->HasSideEffects(CGF.getContext())) {
873239462Sdim    // Ensure that we have a destination, and evaluate the RHS into that.
874239462Sdim    EnsureDest(E->getRHS()->getType());
875239462Sdim    Visit(E->getRHS());
876239462Sdim
877239462Sdim    // Now emit the LHS and copy into it.
878243830Sdim    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
879239462Sdim
880249423Sdim    // That copy is an atomic copy if the LHS is atomic.
881288943Sdim    if (LHS.getType()->isAtomicType() ||
882288943Sdim        CGF.LValueIsSuitableForInlineAtomic(LHS)) {
883249423Sdim      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
884249423Sdim      return;
885249423Sdim    }
886249423Sdim
887239462Sdim    EmitCopy(E->getLHS()->getType(),
888239462Sdim             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
889239462Sdim                                     needsGC(E->getLHS()->getType()),
890239462Sdim                                     AggValueSlot::IsAliased),
891239462Sdim             Dest);
892239462Sdim    return;
893239462Sdim  }
894221345Sdim
895193326Sed  LValue LHS = CGF.EmitLValue(E->getLHS());
896193326Sed
897249423Sdim  // If we have an atomic type, evaluate into the destination and then
898249423Sdim  // do an atomic copy.
899288943Sdim  if (LHS.getType()->isAtomicType() ||
900288943Sdim      CGF.LValueIsSuitableForInlineAtomic(LHS)) {
901249423Sdim    EnsureDest(E->getRHS()->getType());
902249423Sdim    Visit(E->getRHS());
903249423Sdim    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
904249423Sdim    return;
905249423Sdim  }
906249423Sdim
907234353Sdim  // Codegen the RHS so that it stores directly into the LHS.
908234353Sdim  AggValueSlot LHSSlot =
909234353Sdim    AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
910234353Sdim                            needsGC(E->getLHS()->getType()),
911234353Sdim                            AggValueSlot::IsAliased);
912249423Sdim  // A non-volatile aggregate destination might have volatile member.
913249423Sdim  if (!LHSSlot.isVolatile() &&
914249423Sdim      CGF.hasVolatileMember(E->getLHS()->getType()))
915249423Sdim    LHSSlot.setVolatile(true);
916249423Sdim
917239462Sdim  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
918239462Sdim
919239462Sdim  // Copy into the destination if the assignment isn't ignored.
920239462Sdim  EmitFinalDestCopy(E->getType(), LHS);
921193326Sed}
922193326Sed
923218893Sdimvoid AggExprEmitter::
924218893SdimVisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
925193326Sed  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
926193326Sed  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
927193326Sed  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
928198092Srdivacky
929218893Sdim  // Bind the common expression if necessary.
930218893Sdim  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
931218893Sdim
932218893Sdim  CodeGenFunction::ConditionalEvaluation eval(CGF);
933288943Sdim  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
934288943Sdim                           CGF.getProfileCount(E));
935198092Srdivacky
936218893Sdim  // Save whether the destination's lifetime is externally managed.
937226633Sdim  bool isExternallyDestructed = Dest.isExternallyDestructed();
938218893Sdim
939218893Sdim  eval.begin(CGF);
940193326Sed  CGF.EmitBlock(LHSBlock);
941288943Sdim  CGF.incrementProfileCounter(E);
942218893Sdim  Visit(E->getTrueExpr());
943218893Sdim  eval.end(CGF);
944198092Srdivacky
945218893Sdim  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
946218893Sdim  CGF.Builder.CreateBr(ContBlock);
947193326Sed
948218893Sdim  // If the result of an agg expression is unused, then the emission
949218893Sdim  // of the LHS might need to create a destination slot.  That's fine
950218893Sdim  // with us, and we can safely emit the RHS into the same slot, but
951226633Sdim  // we shouldn't claim that it's already being destructed.
952226633Sdim  Dest.setExternallyDestructed(isExternallyDestructed);
953198092Srdivacky
954218893Sdim  eval.begin(CGF);
955193326Sed  CGF.EmitBlock(RHSBlock);
956218893Sdim  Visit(E->getFalseExpr());
957218893Sdim  eval.end(CGF);
958198092Srdivacky
959193326Sed  CGF.EmitBlock(ContBlock);
960193326Sed}
961193326Sed
962198092Srdivackyvoid AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
963261991Sdim  Visit(CE->getChosenSubExpr());
964198092Srdivacky}
965198092Srdivacky
966193326Sedvoid AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
967296417Sdim  Address ArgValue = Address::invalid();
968296417Sdim  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
969193326Sed
970296417Sdim  if (!ArgPtr.isValid()) {
971276479Sdim    // If EmitVAArg fails, we fall back to the LLVM instruction.
972296417Sdim    llvm::Value *Val = Builder.CreateVAArg(ArgValue.getPointer(),
973296417Sdim                                           CGF.ConvertType(VE->getType()));
974276479Sdim    if (!Dest.isIgnored())
975296417Sdim      Builder.CreateStore(Val, Dest.getAddress());
976193326Sed    return;
977193326Sed  }
978193326Sed
979239462Sdim  EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
980193326Sed}
981193326Sed
982193326Sedvoid AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
983218893Sdim  // Ensure that we have a slot, but if we already do, remember
984226633Sdim  // whether it was externally destructed.
985226633Sdim  bool wasExternallyDestructed = Dest.isExternallyDestructed();
986239462Sdim  EnsureDest(E->getType());
987198092Srdivacky
988226633Sdim  // We're going to push a destructor if there isn't already one.
989226633Sdim  Dest.setExternallyDestructed();
990226633Sdim
991218893Sdim  Visit(E->getSubExpr());
992193326Sed
993226633Sdim  // Push that destructor we promised.
994226633Sdim  if (!wasExternallyDestructed)
995296417Sdim    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
996193326Sed}
997193326Sed
998193326Sedvoid
999193326SedAggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1000218893Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1001218893Sdim  CGF.EmitCXXConstructExpr(E, Slot);
1002193326Sed}
1003193326Sed
1004234353Sdimvoid
1005234353SdimAggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1006234353Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
1007234353Sdim  CGF.EmitLambdaExpr(E, Slot);
1008234353Sdim}
1009234353Sdim
1010218893Sdimvoid AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1011234353Sdim  CGF.enterFullExpression(E);
1012234353Sdim  CodeGenFunction::RunCleanupsScope cleanups(CGF);
1013234353Sdim  Visit(E->getSubExpr());
1014193326Sed}
1015193326Sed
1016210299Sedvoid AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1017218893Sdim  QualType T = E->getType();
1018218893Sdim  AggValueSlot Slot = EnsureSlot(T);
1019296417Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1020198398Srdivacky}
1021198398Srdivacky
1022201361Srdivackyvoid AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1023218893Sdim  QualType T = E->getType();
1024218893Sdim  AggValueSlot Slot = EnsureSlot(T);
1025296417Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1026218893Sdim}
1027201361Srdivacky
1028218893Sdim/// isSimpleZero - If emitting this value will obviously just cause a store of
1029218893Sdim/// zero to memory, return true.  This can return false if uncertain, so it just
1030218893Sdim/// handles simple cases.
1031218893Sdimstatic bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1032221345Sdim  E = E->IgnoreParens();
1033221345Sdim
1034218893Sdim  // 0
1035218893Sdim  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1036218893Sdim    return IL->getValue() == 0;
1037218893Sdim  // +0.0
1038218893Sdim  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1039218893Sdim    return FL->getValue().isPosZero();
1040218893Sdim  // int()
1041218893Sdim  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1042218893Sdim      CGF.getTypes().isZeroInitializable(E->getType()))
1043218893Sdim    return true;
1044218893Sdim  // (int*)0 - Null pointer expressions.
1045218893Sdim  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1046218893Sdim    return ICE->getCastKind() == CK_NullToPointer;
1047218893Sdim  // '\0'
1048218893Sdim  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1049218893Sdim    return CL->getValue() == 0;
1050218893Sdim
1051218893Sdim  // Otherwise, hard case: conservatively return false.
1052218893Sdim  return false;
1053201361Srdivacky}
1054201361Srdivacky
1055218893Sdim
1056203955Srdivackyvoid
1057261991SdimAggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1058224145Sdim  QualType type = LV.getType();
1059193326Sed  // FIXME: Ignore result?
1060193326Sed  // FIXME: Are initializers affected by volatile?
1061218893Sdim  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1062218893Sdim    // Storing "i32 0" to a zero'd memory location is a noop.
1063249423Sdim    return;
1064249423Sdim  } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1065249423Sdim    return EmitNullInitializationToLValue(LV);
1066288943Sdim  } else if (isa<NoInitExpr>(E)) {
1067288943Sdim    // Do nothing.
1068288943Sdim    return;
1069224145Sdim  } else if (type->isReferenceType()) {
1070261991Sdim    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1071249423Sdim    return CGF.EmitStoreThroughLValue(RV, LV);
1072249423Sdim  }
1073249423Sdim
1074249423Sdim  switch (CGF.getEvaluationKind(type)) {
1075249423Sdim  case TEK_Complex:
1076249423Sdim    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1077249423Sdim    return;
1078249423Sdim  case TEK_Aggregate:
1079226633Sdim    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1080226633Sdim                                               AggValueSlot::IsDestructed,
1081226633Sdim                                      AggValueSlot::DoesNotNeedGCBarriers,
1082226633Sdim                                               AggValueSlot::IsNotAliased,
1083224145Sdim                                               Dest.isZeroed()));
1084249423Sdim    return;
1085249423Sdim  case TEK_Scalar:
1086249423Sdim    if (LV.isSimple()) {
1087276479Sdim      CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1088249423Sdim    } else {
1089249423Sdim      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1090249423Sdim    }
1091249423Sdim    return;
1092193326Sed  }
1093249423Sdim  llvm_unreachable("bad evaluation kind");
1094193326Sed}
1095193326Sed
1096224145Sdimvoid AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1097224145Sdim  QualType type = lv.getType();
1098224145Sdim
1099218893Sdim  // If the destination slot is already zeroed out before the aggregate is
1100218893Sdim  // copied into it, we don't have to emit any zeros here.
1101224145Sdim  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1102218893Sdim    return;
1103218893Sdim
1104249423Sdim  if (CGF.hasScalarEvaluationKind(type)) {
1105249423Sdim    // For non-aggregates, we can store the appropriate null constant.
1106249423Sdim    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1107234353Sdim    // Note that the following is not equivalent to
1108234353Sdim    // EmitStoreThroughBitfieldLValue for ARC types.
1109234353Sdim    if (lv.isBitField()) {
1110234353Sdim      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1111234353Sdim    } else {
1112234353Sdim      assert(lv.isSimple());
1113234353Sdim      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1114234353Sdim    }
1115193326Sed  } else {
1116193326Sed    // There's a potential optimization opportunity in combining
1117193326Sed    // memsets; that would be easy for arrays, but relatively
1118193326Sed    // difficult for structures with the current code.
1119224145Sdim    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1120193326Sed  }
1121193326Sed}
1122193326Sed
1123193326Sedvoid AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1124193326Sed#if 0
1125200583Srdivacky  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1126200583Srdivacky  // (Length of globals? Chunks of zeroed-out space?).
1127193326Sed  //
1128193326Sed  // If we can, prefer a copy from a global; this is a lot less code for long
1129193326Sed  // globals, and it's easier for the current optimizers to analyze.
1130200583Srdivacky  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1131193326Sed    llvm::GlobalVariable* GV =
1132200583Srdivacky    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1133200583Srdivacky                             llvm::GlobalValue::InternalLinkage, C, "");
1134239462Sdim    EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1135193326Sed    return;
1136193326Sed  }
1137193326Sed#endif
1138218893Sdim  if (E->hadArrayRangeDesignator())
1139193326Sed    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1140193326Sed
1141261991Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
1142218893Sdim
1143296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1144234353Sdim
1145193326Sed  // Handle initialization of an array.
1146193326Sed  if (E->getType()->isArrayType()) {
1147234982Sdim    if (E->isStringLiteralInit())
1148234982Sdim      return Visit(E->getInit(0));
1149193326Sed
1150234353Sdim    QualType elementType =
1151234353Sdim        CGF.getContext().getAsArrayType(E->getType())->getElementType();
1152193326Sed
1153296417Sdim    auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1154296417Sdim    EmitArrayInit(Dest.getAddress(), AType, elementType, E);
1155193326Sed    return;
1156193326Sed  }
1157198092Srdivacky
1158276479Sdim  if (E->getType()->isAtomicType()) {
1159276479Sdim    // An _Atomic(T) object can be list-initialized from an expression
1160276479Sdim    // of the same type.
1161276479Sdim    assert(E->getNumInits() == 1 &&
1162276479Sdim           CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
1163276479Sdim                                                   E->getType()) &&
1164276479Sdim           "unexpected list initialization for atomic object");
1165276479Sdim    return Visit(E->getInit(0));
1166276479Sdim  }
1167276479Sdim
1168193326Sed  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1169198092Srdivacky
1170193326Sed  // Do struct initialization; this code just sets each individual member
1171193326Sed  // to the approprate value.  This makes bitfield support automatic;
1172193326Sed  // the disadvantage is that the generated code is more difficult for
1173193326Sed  // the optimizer, especially with bitfields.
1174193326Sed  unsigned NumInitElements = E->getNumInits();
1175224145Sdim  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1176251662Sdim
1177251662Sdim  // Prepare a 'this' for CXXDefaultInitExprs.
1178296417Sdim  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1179251662Sdim
1180224145Sdim  if (record->isUnion()) {
1181193326Sed    // Only initialize one field of a union. The field itself is
1182193326Sed    // specified by the initializer list.
1183193326Sed    if (!E->getInitializedFieldInUnion()) {
1184193326Sed      // Empty union; we have nothing to do.
1185198092Srdivacky
1186193326Sed#ifndef NDEBUG
1187193326Sed      // Make sure that it's really an empty and not a failure of
1188193326Sed      // semantic analysis.
1189276479Sdim      for (const auto *Field : record->fields())
1190193326Sed        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1191193326Sed#endif
1192193326Sed      return;
1193193326Sed    }
1194193326Sed
1195193326Sed    // FIXME: volatility
1196193326Sed    FieldDecl *Field = E->getInitializedFieldInUnion();
1197218893Sdim
1198234982Sdim    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1199193326Sed    if (NumInitElements) {
1200193326Sed      // Store the initializer into the field
1201224145Sdim      EmitInitializationToLValue(E->getInit(0), FieldLoc);
1202193326Sed    } else {
1203218893Sdim      // Default-initialize to null.
1204224145Sdim      EmitNullInitializationToLValue(FieldLoc);
1205193326Sed    }
1206193326Sed
1207193326Sed    return;
1208193326Sed  }
1209198092Srdivacky
1210224145Sdim  // We'll need to enter cleanup scopes in case any of the member
1211224145Sdim  // initializers throw an exception.
1212226633Sdim  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1213276479Sdim  llvm::Instruction *cleanupDominator = nullptr;
1214224145Sdim
1215193326Sed  // Here we iterate over the fields; this makes it simpler to both
1216193326Sed  // default-initialize fields and skip over unnamed fields.
1217224145Sdim  unsigned curInitIndex = 0;
1218276479Sdim  for (const auto *field : record->fields()) {
1219224145Sdim    // We're done once we hit the flexible array member.
1220224145Sdim    if (field->getType()->isIncompleteArrayType())
1221193326Sed      break;
1222193326Sed
1223224145Sdim    // Always skip anonymous bitfields.
1224224145Sdim    if (field->isUnnamedBitfield())
1225193326Sed      continue;
1226193326Sed
1227224145Sdim    // We're done if we reach the end of the explicit initializers, we
1228224145Sdim    // have a zeroed object, and the rest of the fields are
1229224145Sdim    // zero-initializable.
1230224145Sdim    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1231218893Sdim        CGF.getTypes().isZeroInitializable(E->getType()))
1232218893Sdim      break;
1233218893Sdim
1234234982Sdim
1235276479Sdim    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1236193326Sed    // We never generate write-barries for initialized fields.
1237224145Sdim    LV.setNonGC(true);
1238218893Sdim
1239224145Sdim    if (curInitIndex < NumInitElements) {
1240204962Srdivacky      // Store the initializer into the field.
1241224145Sdim      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1242193326Sed    } else {
1243193326Sed      // We're out of initalizers; default-initialize to null
1244224145Sdim      EmitNullInitializationToLValue(LV);
1245193326Sed    }
1246224145Sdim
1247224145Sdim    // Push a destructor if necessary.
1248224145Sdim    // FIXME: if we have an array of structures, all explicitly
1249224145Sdim    // initialized, we can end up pushing a linear number of cleanups.
1250224145Sdim    bool pushedCleanup = false;
1251224145Sdim    if (QualType::DestructionKind dtorKind
1252224145Sdim          = field->getType().isDestructedType()) {
1253224145Sdim      assert(LV.isSimple());
1254224145Sdim      if (CGF.needsEHCleanup(dtorKind)) {
1255234353Sdim        if (!cleanupDominator)
1256296417Sdim          cleanupDominator = CGF.Builder.CreateAlignedLoad(
1257296417Sdim              CGF.Int8Ty,
1258296417Sdim              llvm::Constant::getNullValue(CGF.Int8PtrTy),
1259296417Sdim              CharUnits::One()); // placeholder
1260234353Sdim
1261224145Sdim        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1262224145Sdim                        CGF.getDestroyer(dtorKind), false);
1263224145Sdim        cleanups.push_back(CGF.EHStack.stable_begin());
1264224145Sdim        pushedCleanup = true;
1265224145Sdim      }
1266224145Sdim    }
1267218893Sdim
1268218893Sdim    // If the GEP didn't get used because of a dead zero init or something
1269218893Sdim    // else, clean it up for -O0 builds and general tidiness.
1270224145Sdim    if (!pushedCleanup && LV.isSimple())
1271218893Sdim      if (llvm::GetElementPtrInst *GEP =
1272296417Sdim            dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1273218893Sdim        if (GEP->use_empty())
1274218893Sdim          GEP->eraseFromParent();
1275193326Sed  }
1276224145Sdim
1277224145Sdim  // Deactivate all the partial cleanups in reverse order, which
1278224145Sdim  // generally means popping them.
1279224145Sdim  for (unsigned i = cleanups.size(); i != 0; --i)
1280234353Sdim    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1281234353Sdim
1282234353Sdim  // Destroy the placeholder if we made one.
1283234353Sdim  if (cleanupDominator)
1284234353Sdim    cleanupDominator->eraseFromParent();
1285193326Sed}
1286193326Sed
1287288943Sdimvoid AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1288288943Sdim  AggValueSlot Dest = EnsureSlot(E->getType());
1289288943Sdim
1290296417Sdim  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1291288943Sdim  EmitInitializationToLValue(E->getBase(), DestLV);
1292288943Sdim  VisitInitListExpr(E->getUpdater());
1293288943Sdim}
1294288943Sdim
1295193326Sed//===----------------------------------------------------------------------===//
1296193326Sed//                        Entry Points into this File
1297193326Sed//===----------------------------------------------------------------------===//
1298193326Sed
1299218893Sdim/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1300218893Sdim/// non-zero bytes that will be stored when outputting the initializer for the
1301218893Sdim/// specified initializer expression.
1302221345Sdimstatic CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1303221345Sdim  E = E->IgnoreParens();
1304218893Sdim
1305218893Sdim  // 0 and 0.0 won't require any non-zero stores!
1306221345Sdim  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1307218893Sdim
1308218893Sdim  // If this is an initlist expr, sum up the size of sizes of the (present)
1309218893Sdim  // elements.  If this is something weird, assume the whole thing is non-zero.
1310218893Sdim  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1311276479Sdim  if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1312221345Sdim    return CGF.getContext().getTypeSizeInChars(E->getType());
1313218893Sdim
1314218893Sdim  // InitListExprs for structs have to be handled carefully.  If there are
1315218893Sdim  // reference members, we need to consider the size of the reference, not the
1316218893Sdim  // referencee.  InitListExprs for unions and arrays can't have references.
1317218893Sdim  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1318218893Sdim    if (!RT->isUnionType()) {
1319218893Sdim      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1320221345Sdim      CharUnits NumNonZeroBytes = CharUnits::Zero();
1321218893Sdim
1322218893Sdim      unsigned ILEElement = 0;
1323276479Sdim      for (const auto *Field : SD->fields()) {
1324218893Sdim        // We're done once we hit the flexible array member or run out of
1325218893Sdim        // InitListExpr elements.
1326218893Sdim        if (Field->getType()->isIncompleteArrayType() ||
1327218893Sdim            ILEElement == ILE->getNumInits())
1328218893Sdim          break;
1329218893Sdim        if (Field->isUnnamedBitfield())
1330218893Sdim          continue;
1331218893Sdim
1332218893Sdim        const Expr *E = ILE->getInit(ILEElement++);
1333218893Sdim
1334218893Sdim        // Reference values are always non-null and have the width of a pointer.
1335218893Sdim        if (Field->getType()->isReferenceType())
1336221345Sdim          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1337251662Sdim              CGF.getTarget().getPointerWidth(0));
1338218893Sdim        else
1339218893Sdim          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1340218893Sdim      }
1341218893Sdim
1342218893Sdim      return NumNonZeroBytes;
1343218893Sdim    }
1344218893Sdim  }
1345218893Sdim
1346218893Sdim
1347221345Sdim  CharUnits NumNonZeroBytes = CharUnits::Zero();
1348218893Sdim  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1349218893Sdim    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1350218893Sdim  return NumNonZeroBytes;
1351218893Sdim}
1352218893Sdim
1353218893Sdim/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1354218893Sdim/// zeros in it, emit a memset and avoid storing the individual zeros.
1355218893Sdim///
1356218893Sdimstatic void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1357218893Sdim                                     CodeGenFunction &CGF) {
1358218893Sdim  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1359218893Sdim  // volatile stores.
1360296417Sdim  if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1361276479Sdim    return;
1362221345Sdim
1363221345Sdim  // C++ objects with a user-declared constructor don't need zero'ing.
1364243830Sdim  if (CGF.getLangOpts().CPlusPlus)
1365221345Sdim    if (const RecordType *RT = CGF.getContext()
1366221345Sdim                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1367221345Sdim      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1368221345Sdim      if (RD->hasUserDeclaredConstructor())
1369221345Sdim        return;
1370221345Sdim    }
1371221345Sdim
1372218893Sdim  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1373296417Sdim  CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
1374296417Sdim  if (Size <= CharUnits::fromQuantity(16))
1375218893Sdim    return;
1376218893Sdim
1377218893Sdim  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1378218893Sdim  // we prefer to emit memset + individual stores for the rest.
1379221345Sdim  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1380296417Sdim  if (NumNonZeroBytes*4 > Size)
1381218893Sdim    return;
1382218893Sdim
1383218893Sdim  // Okay, it seems like a good idea to use an initial memset, emit the call.
1384296417Sdim  llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1385218893Sdim
1386296417Sdim  Address Loc = Slot.getAddress();
1387296417Sdim  Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1388296417Sdim  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1389218893Sdim
1390218893Sdim  // Tell the AggExprEmitter that the slot is known zero.
1391218893Sdim  Slot.setZeroed();
1392218893Sdim}
1393218893Sdim
1394218893Sdim
1395218893Sdim
1396218893Sdim
1397193326Sed/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1398193326Sed/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1399193326Sed/// the value of the aggregate expression is not needed.  If VolatileDest is
1400193326Sed/// true, DestPtr cannot be 0.
1401239462Sdimvoid CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1402249423Sdim  assert(E && hasAggregateEvaluationKind(E->getType()) &&
1403193326Sed         "Invalid aggregate expression to emit");
1404296417Sdim  assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1405218893Sdim         "slot has bits but no address");
1406198092Srdivacky
1407218893Sdim  // Optimize the slot if possible.
1408218893Sdim  CheckAggExprForMemSetUse(Slot, E, *this);
1409218893Sdim
1410288943Sdim  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1411193326Sed}
1412193326Sed
1413203955SrdivackyLValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1414249423Sdim  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1415296417Sdim  Address Temp = CreateMemTemp(E->getType());
1416212904Sdim  LValue LV = MakeAddrLValue(Temp, E->getType());
1417226633Sdim  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1418226633Sdim                                         AggValueSlot::DoesNotNeedGCBarriers,
1419226633Sdim                                         AggValueSlot::IsNotAliased));
1420212904Sdim  return LV;
1421203955Srdivacky}
1422203955Srdivacky
1423296417Sdimvoid CodeGenFunction::EmitAggregateCopy(Address DestPtr,
1424296417Sdim                                        Address SrcPtr, QualType Ty,
1425239462Sdim                                        bool isVolatile,
1426243830Sdim                                        bool isAssignment) {
1427193326Sed  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1428198092Srdivacky
1429243830Sdim  if (getLangOpts().CPlusPlus) {
1430207619Srdivacky    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1431208600Srdivacky      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1432208600Srdivacky      assert((Record->hasTrivialCopyConstructor() ||
1433226633Sdim              Record->hasTrivialCopyAssignment() ||
1434226633Sdim              Record->hasTrivialMoveConstructor() ||
1435288943Sdim              Record->hasTrivialMoveAssignment() ||
1436288943Sdim              Record->isUnion()) &&
1437249423Sdim             "Trying to aggregate-copy a type without a trivial copy/move "
1438208600Srdivacky             "constructor or assignment operator");
1439208600Srdivacky      // Ignore empty classes in C++.
1440208600Srdivacky      if (Record->isEmpty())
1441207619Srdivacky        return;
1442207619Srdivacky    }
1443207619Srdivacky  }
1444207619Srdivacky
1445193326Sed  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1446193326Sed  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1447193326Sed  // read from another object that overlaps in anyway the storage of the first
1448193326Sed  // object, then the overlap shall be exact and the two objects shall have
1449193326Sed  // qualified or unqualified versions of a compatible type."
1450193326Sed  //
1451193326Sed  // memcpy is not defined if the source and destination pointers are exactly
1452193326Sed  // equal, but other compilers do this optimization, and almost every memcpy
1453193326Sed  // implementation handles this case safely.  If there is a libc that does not
1454193326Sed  // safely handle this, we can add a target hook.
1455198092Srdivacky
1456296417Sdim  // Get data size info for this aggregate. If this is an assignment,
1457296417Sdim  // don't copy the tail padding, because we might be assigning into a
1458296417Sdim  // base subobject where the tail padding is claimed.  Otherwise,
1459296417Sdim  // copying it is fine.
1460243830Sdim  std::pair<CharUnits, CharUnits> TypeInfo;
1461243830Sdim  if (isAssignment)
1462243830Sdim    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1463243830Sdim  else
1464243830Sdim    TypeInfo = getContext().getTypeInfoInChars(Ty);
1465198092Srdivacky
1466288943Sdim  llvm::Value *SizeVal = nullptr;
1467288943Sdim  if (TypeInfo.first.isZero()) {
1468288943Sdim    // But note that getTypeInfo returns 0 for a VLA.
1469288943Sdim    if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1470288943Sdim            getContext().getAsArrayType(Ty))) {
1471288943Sdim      QualType BaseEltTy;
1472288943Sdim      SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1473288943Sdim      TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
1474288943Sdim      std::pair<CharUnits, CharUnits> LastElementTypeInfo;
1475288943Sdim      if (!isAssignment)
1476288943Sdim        LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1477288943Sdim      assert(!TypeInfo.first.isZero());
1478288943Sdim      SizeVal = Builder.CreateNUWMul(
1479288943Sdim          SizeVal,
1480288943Sdim          llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1481288943Sdim      if (!isAssignment) {
1482288943Sdim        SizeVal = Builder.CreateNUWSub(
1483288943Sdim            SizeVal,
1484288943Sdim            llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1485288943Sdim        SizeVal = Builder.CreateNUWAdd(
1486288943Sdim            SizeVal, llvm::ConstantInt::get(
1487288943Sdim                         SizeTy, LastElementTypeInfo.first.getQuantity()));
1488288943Sdim      }
1489288943Sdim    }
1490288943Sdim  }
1491288943Sdim  if (!SizeVal) {
1492288943Sdim    SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1493288943Sdim  }
1494198092Srdivacky
1495193326Sed  // FIXME: If we have a volatile struct, the optimizer can remove what might
1496193326Sed  // appear to be `extra' memory ops:
1497193326Sed  //
1498193326Sed  // volatile struct { int i; } a, b;
1499193326Sed  //
1500193326Sed  // int main() {
1501193326Sed  //   a = b;
1502193326Sed  //   a = b;
1503193326Sed  // }
1504193326Sed  //
1505206275Srdivacky  // we need to use a different call here.  We use isVolatile to indicate when
1506193326Sed  // either the source or the destination is volatile.
1507206275Srdivacky
1508296417Sdim  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1509296417Sdim  SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1510206275Srdivacky
1511224145Sdim  // Don't do any of the memmove_collectable tests if GC isn't set.
1512234353Sdim  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1513224145Sdim    // fall through
1514224145Sdim  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1515210299Sed    RecordDecl *Record = RecordTy->getDecl();
1516210299Sed    if (Record->hasObjectMember()) {
1517210299Sed      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1518210299Sed                                                    SizeVal);
1519210299Sed      return;
1520210299Sed    }
1521224145Sdim  } else if (Ty->isArrayType()) {
1522210299Sed    QualType BaseType = getContext().getBaseElementType(Ty);
1523210299Sed    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1524210299Sed      if (RecordTy->getDecl()->hasObjectMember()) {
1525210299Sed        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1526210299Sed                                                      SizeVal);
1527210299Sed        return;
1528210299Sed      }
1529210299Sed    }
1530210299Sed  }
1531243830Sdim
1532296417Sdim  auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1533296417Sdim
1534243830Sdim  // Determine the metadata to describe the position of any padding in this
1535243830Sdim  // memcpy, as well as the TBAA tags for the members of the struct, in case
1536243830Sdim  // the optimizer wishes to expand it in to scalar memory operations.
1537296417Sdim  if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1538296417Sdim    Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
1539193326Sed}
1540