CGExprAgg.cpp revision 321369
1168404Spjd//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2185029Spjd//
3168404Spjd//                     The LLVM Compiler Infrastructure
4168404Spjd//
5168404Spjd// This file is distributed under the University of Illinois Open Source
6168404Spjd// License. See LICENSE.TXT for details.
7168404Spjd//
8168404Spjd//===----------------------------------------------------------------------===//
9168404Spjd//
10168404Spjd// This contains code to emit Aggregate Expr nodes as LLVM code.
11168404Spjd//
12168404Spjd//===----------------------------------------------------------------------===//
13168404Spjd
14168404Spjd#include "CodeGenFunction.h"
15168404Spjd#include "CGObjCRuntime.h"
16168404Spjd#include "CodeGenModule.h"
17168404Spjd#include "clang/AST/ASTContext.h"
18168404Spjd#include "clang/AST/DeclCXX.h"
19168404Spjd#include "clang/AST/DeclTemplate.h"
20168404Spjd#include "clang/AST/StmtVisitor.h"
21168404Spjd#include "llvm/IR/Constants.h"
22168404Spjd#include "llvm/IR/Function.h"
23219089Spjd#include "llvm/IR/GlobalVariable.h"
24307053Smav#include "llvm/IR/Intrinsics.h"
25307122Smavusing namespace clang;
26247540Smmusing namespace CodeGen;
27168404Spjd
28168404Spjd//===----------------------------------------------------------------------===//
29248571Smm//                        Aggregate Expression Emitter
30248571Smm//===----------------------------------------------------------------------===//
31168404Spjd
32168404Spjdnamespace  {
33185029Spjdclass AggExprEmitter : public StmtVisitor<AggExprEmitter> {
34168404Spjd  CodeGenFunction &CGF;
35307122Smav  CGBuilderTy &Builder;
36307122Smav  AggValueSlot Dest;
37168404Spjd  bool IsResultUnused;
38185029Spjd
39168404Spjd  /// We want to use 'dest' as the return slot except under two
40168404Spjd  /// conditions:
41248571Smm  ///   - The destination slot requires garbage collection, so we
42248571Smm  ///     need to use the GC API.
43168404Spjd  ///   - The destination slot is potentially aliased.
44168404Spjd  bool shouldUseDestForReturnSlot() const {
45168404Spjd    return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
46168404Spjd  }
47168404Spjd
48185029Spjd  ReturnValueSlot getReturnValueSlot() const {
49185029Spjd    if (!shouldUseDestForReturnSlot())
50185029Spjd      return ReturnValueSlot();
51185029Spjd
52185029Spjd    return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
53219089Spjd                           IsResultUnused);
54219089Spjd  }
55219089Spjd
56219089Spjd  AggValueSlot EnsureSlot(QualType T) {
57219089Spjd    if (!Dest.isIgnored()) return Dest;
58219089Spjd    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
59219089Spjd  }
60168404Spjd  void EnsureDest(QualType T) {
61168404Spjd    if (!Dest.isIgnored()) return;
62168404Spjd    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
63168404Spjd  }
64168404Spjd
65185029Spjdpublic:
66168404Spjd  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
67168404Spjd    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
68168404Spjd    IsResultUnused(IsResultUnused) { }
69168404Spjd
70168404Spjd  //===--------------------------------------------------------------------===//
71168404Spjd  //                               Utilities
72168404Spjd  //===--------------------------------------------------------------------===//
73219089Spjd
74185029Spjd  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
75185029Spjd  /// represents a value lvalue, this method emits the address of the lvalue,
76209962Smm  /// then loads the result into DestPtr.
77209962Smm  void EmitAggLoadOfLValue(const Expr *E);
78219089Spjd
79219089Spjd  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80219089Spjd  void EmitFinalDestCopy(QualType type, const LValue &src);
81219089Spjd  void EmitFinalDestCopy(QualType type, RValue src);
82168404Spjd  void EmitCopy(QualType type, const AggValueSlot &dest,
83219089Spjd                const AggValueSlot &src);
84185029Spjd
85168404Spjd  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
86168404Spjd
87168404Spjd  void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
88185029Spjd                     QualType elementType, InitListExpr *E);
89307122Smav
90168404Spjd  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
91168404Spjd    if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
92168404Spjd      return AggValueSlot::NeedsGCBarriers;
93168404Spjd    return AggValueSlot::DoesNotNeedGCBarriers;
94168404Spjd  }
95219089Spjd
96168404Spjd  bool TypeRequiresGCollection(QualType T);
97168404Spjd
98205198Sdelphij  //===--------------------------------------------------------------------===//
99168404Spjd  //                            Visitor Methods
100168404Spjd  //===--------------------------------------------------------------------===//
101168404Spjd
102168404Spjd  void Visit(Expr *E) {
103168404Spjd    ApplyDebugLocation DL(CGF, E);
104168404Spjd    StmtVisitor<AggExprEmitter>::Visit(E);
105168404Spjd  }
106168404Spjd
107168404Spjd  void VisitStmt(Stmt *S) {
108168404Spjd    CGF.ErrorUnsupported(S, "aggregate expression");
109185029Spjd  }
110307122Smav  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
111168404Spjd  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
112168404Spjd    Visit(GE->getResultExpr());
113168404Spjd  }
114168404Spjd  void VisitCoawaitExpr(CoawaitExpr *E) {
115168404Spjd    CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
116185029Spjd  }
117168404Spjd  void VisitCoyieldExpr(CoyieldExpr *E) {
118168404Spjd    CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
119228103Smm  }
120185029Spjd  void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
121185029Spjd  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
122185029Spjd  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
123185029Spjd    return Visit(E->getReplacement());
124185029Spjd  }
125185029Spjd
126185029Spjd  // l-values.
127185029Spjd  void VisitDeclRefExpr(DeclRefExpr *E) {
128185029Spjd    // For aggregates, we should always be able to emit the variable
129185029Spjd    // as an l-value unless it's a reference.  This is due to the fact
130185029Spjd    // that we can't actually ever see a normal l2r conversion on an
131185029Spjd    // aggregate in C++, and in C there's no language standard
132185029Spjd    // actively preventing us from listing variables in the captures
133185029Spjd    // list of a block.
134185029Spjd    if (E->getDecl()->getType()->isReferenceType()) {
135168404Spjd      if (CodeGenFunction::ConstantEmission result
136168404Spjd            = CGF.tryEmitAsConstant(E)) {
137168404Spjd        EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
138168404Spjd        return;
139168404Spjd      }
140219089Spjd    }
141168404Spjd
142168404Spjd    EmitAggLoadOfLValue(E);
143168404Spjd  }
144168404Spjd
145168404Spjd  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
146168404Spjd  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
147168404Spjd  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
148168404Spjd  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
149168404Spjd  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
150168404Spjd    EmitAggLoadOfLValue(E);
151228103Smm  }
152230438Spjd  void VisitPredefinedExpr(const PredefinedExpr *E) {
153168404Spjd    EmitAggLoadOfLValue(E);
154185029Spjd  }
155185029Spjd
156185029Spjd  // Operators.
157185029Spjd  void VisitCastExpr(CastExpr *E);
158168404Spjd  void VisitCallExpr(const CallExpr *E);
159185029Spjd  void VisitStmtExpr(const StmtExpr *E);
160185029Spjd  void VisitBinaryOperator(const BinaryOperator *BO);
161185029Spjd  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
162185029Spjd  void VisitBinAssign(const BinaryOperator *E);
163185029Spjd  void VisitBinComma(const BinaryOperator *E);
164226676Spjd
165226676Spjd  void VisitObjCMessageExpr(ObjCMessageExpr *E);
166226705Spjd  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
167226676Spjd    EmitAggLoadOfLValue(E);
168226705Spjd  }
169185029Spjd
170168404Spjd  void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
171168404Spjd  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
172168404Spjd  void VisitChooseExpr(const ChooseExpr *CE);
173185029Spjd  void VisitInitListExpr(InitListExpr *E);
174185029Spjd  void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
175168404Spjd                              llvm::Value *outerBegin = nullptr);
176168404Spjd  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
177168404Spjd  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
178168404Spjd  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
179168404Spjd    Visit(DAE->getExpr());
180168404Spjd  }
181168404Spjd  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
182185029Spjd    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
183168404Spjd    Visit(DIE->getExpr());
184185029Spjd  }
185185029Spjd  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
186168404Spjd  void VisitCXXConstructExpr(const CXXConstructExpr *E);
187168404Spjd  void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
188168404Spjd  void VisitLambdaExpr(LambdaExpr *E);
189185029Spjd  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
190185029Spjd  void VisitExprWithCleanups(ExprWithCleanups *E);
191168404Spjd  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
192168404Spjd  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
193263407Sdelphij  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
194263407Sdelphij  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
195168404Spjd
196168404Spjd  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
197168404Spjd    if (E->isGLValue()) {
198185029Spjd      LValue LV = CGF.EmitPseudoObjectLValue(E);
199168404Spjd      return EmitFinalDestCopy(E->getType(), LV);
200219089Spjd    }
201219089Spjd
202219089Spjd    CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
203168404Spjd  }
204168404Spjd
205185029Spjd  void VisitVAArgExpr(VAArgExpr *E);
206185029Spjd
207185029Spjd  void EmitInitializationToLValue(Expr *E, LValue Address);
208185029Spjd  void EmitNullInitializationToLValue(LValue Address);
209185029Spjd  //  case Expr::ChooseExprClass:
210185029Spjd  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
211185029Spjd  void VisitAtomicExpr(AtomicExpr *E) {
212185029Spjd    RValue Res = CGF.EmitAtomicExpr(E);
213209962Smm    EmitFinalDestCopy(E->getType(), Res);
214185029Spjd  }
215185029Spjd};
216219089Spjd}  // end anonymous namespace.
217185029Spjd
218168404Spjd//===----------------------------------------------------------------------===//
219168404Spjd//                                Utilities
220168404Spjd//===----------------------------------------------------------------------===//
221168404Spjd
222248571Smm/// EmitAggLoadOfLValue - Given an expression with aggregate type that
223/// represents a value lvalue, this method emits the address of the lvalue,
224/// then loads the result into DestPtr.
225void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
226  LValue LV = CGF.EmitLValue(E);
227
228  // If the type of the l-value is atomic, then do an atomic load.
229  if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
230    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
231    return;
232  }
233
234  EmitFinalDestCopy(E->getType(), LV);
235}
236
237/// \brief True if the given aggregate type requires special GC API calls.
238bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
239  // Only record types have members that might require garbage collection.
240  const RecordType *RecordTy = T->getAs<RecordType>();
241  if (!RecordTy) return false;
242
243  // Don't mess with non-trivial C++ types.
244  RecordDecl *Record = RecordTy->getDecl();
245  if (isa<CXXRecordDecl>(Record) &&
246      (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
247       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
248    return false;
249
250  // Check whether the type has an object member.
251  return Record->hasObjectMember();
252}
253
254/// \brief Perform the final move to DestPtr if for some reason
255/// getReturnValueSlot() didn't use it directly.
256///
257/// The idea is that you do something like this:
258///   RValue Result = EmitSomething(..., getReturnValueSlot());
259///   EmitMoveFromReturnSlot(E, Result);
260///
261/// If nothing interferes, this will cause the result to be emitted
262/// directly into the return value slot.  Otherwise, a final move
263/// will be performed.
264void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
265  if (shouldUseDestForReturnSlot()) {
266    // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
267    // The possibility of undef rvalues complicates that a lot,
268    // though, so we can't really assert.
269    return;
270  }
271
272  // Otherwise, copy from there to the destination.
273  assert(Dest.getPointer() != src.getAggregatePointer());
274  EmitFinalDestCopy(E->getType(), src);
275}
276
277/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
278void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
279  assert(src.isAggregate() && "value must be aggregate value!");
280  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
281  EmitFinalDestCopy(type, srcLV);
282}
283
284/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
285void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
286  // If Dest is ignored, then we're evaluating an aggregate expression
287  // in a context that doesn't care about the result.  Note that loads
288  // from volatile l-values force the existence of a non-ignored
289  // destination.
290  if (Dest.isIgnored())
291    return;
292
293  AggValueSlot srcAgg =
294    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
295                            needsGC(type), AggValueSlot::IsAliased);
296  EmitCopy(type, Dest, srcAgg);
297}
298
299/// Perform a copy from the source into the destination.
300///
301/// \param type - the type of the aggregate being copied; qualifiers are
302///   ignored
303void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
304                              const AggValueSlot &src) {
305  if (dest.requiresGCollection()) {
306    CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
307    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
308    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
309                                                      dest.getAddress(),
310                                                      src.getAddress(),
311                                                      size);
312    return;
313  }
314
315  // If the result of the assignment is used, copy the LHS there also.
316  // It's volatile if either side is.  Use the minimum alignment of
317  // the two sides.
318  CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
319                        dest.isVolatile() || src.isVolatile());
320}
321
322/// \brief Emit the initializer for a std::initializer_list initialized with a
323/// real initializer list.
324void
325AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
326  // Emit an array containing the elements.  The array is externally destructed
327  // if the std::initializer_list object is.
328  ASTContext &Ctx = CGF.getContext();
329  LValue Array = CGF.EmitLValue(E->getSubExpr());
330  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
331  Address ArrayPtr = Array.getAddress();
332
333  const ConstantArrayType *ArrayType =
334      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
335  assert(ArrayType && "std::initializer_list constructed from non-array");
336
337  // FIXME: Perform the checks on the field types in SemaInit.
338  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
339  RecordDecl::field_iterator Field = Record->field_begin();
340  if (Field == Record->field_end()) {
341    CGF.ErrorUnsupported(E, "weird std::initializer_list");
342    return;
343  }
344
345  // Start pointer.
346  if (!Field->getType()->isPointerType() ||
347      !Ctx.hasSameType(Field->getType()->getPointeeType(),
348                       ArrayType->getElementType())) {
349    CGF.ErrorUnsupported(E, "weird std::initializer_list");
350    return;
351  }
352
353  AggValueSlot Dest = EnsureSlot(E->getType());
354  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
355  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
356  llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
357  llvm::Value *IdxStart[] = { Zero, Zero };
358  llvm::Value *ArrayStart =
359      Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
360  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
361  ++Field;
362
363  if (Field == Record->field_end()) {
364    CGF.ErrorUnsupported(E, "weird std::initializer_list");
365    return;
366  }
367
368  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
369  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
370  if (Field->getType()->isPointerType() &&
371      Ctx.hasSameType(Field->getType()->getPointeeType(),
372                      ArrayType->getElementType())) {
373    // End pointer.
374    llvm::Value *IdxEnd[] = { Zero, Size };
375    llvm::Value *ArrayEnd =
376        Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
377    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
378  } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
379    // Length.
380    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
381  } else {
382    CGF.ErrorUnsupported(E, "weird std::initializer_list");
383    return;
384  }
385}
386
387/// \brief Determine if E is a trivial array filler, that is, one that is
388/// equivalent to zero-initialization.
389static bool isTrivialFiller(Expr *E) {
390  if (!E)
391    return true;
392
393  if (isa<ImplicitValueInitExpr>(E))
394    return true;
395
396  if (auto *ILE = dyn_cast<InitListExpr>(E)) {
397    if (ILE->getNumInits())
398      return false;
399    return isTrivialFiller(ILE->getArrayFiller());
400  }
401
402  if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
403    return Cons->getConstructor()->isDefaultConstructor() &&
404           Cons->getConstructor()->isTrivial();
405
406  // FIXME: Are there other cases where we can avoid emitting an initializer?
407  return false;
408}
409
410/// \brief Emit initialization of an array from an initializer list.
411void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
412                                   QualType elementType, InitListExpr *E) {
413  uint64_t NumInitElements = E->getNumInits();
414
415  uint64_t NumArrayElements = AType->getNumElements();
416  assert(NumInitElements <= NumArrayElements);
417
418  // DestPtr is an array*.  Construct an elementType* by drilling
419  // down a level.
420  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
421  llvm::Value *indices[] = { zero, zero };
422  llvm::Value *begin =
423    Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
424
425  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
426  CharUnits elementAlign =
427    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
428
429  // Exception safety requires us to destroy all the
430  // already-constructed members if an initializer throws.
431  // For that, we'll need an EH cleanup.
432  QualType::DestructionKind dtorKind = elementType.isDestructedType();
433  Address endOfInit = Address::invalid();
434  EHScopeStack::stable_iterator cleanup;
435  llvm::Instruction *cleanupDominator = nullptr;
436  if (CGF.needsEHCleanup(dtorKind)) {
437    // In principle we could tell the cleanup where we are more
438    // directly, but the control flow can get so varied here that it
439    // would actually be quite complex.  Therefore we go through an
440    // alloca.
441    endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
442                                     "arrayinit.endOfInit");
443    cleanupDominator = Builder.CreateStore(begin, endOfInit);
444    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
445                                         elementAlign,
446                                         CGF.getDestroyer(dtorKind));
447    cleanup = CGF.EHStack.stable_begin();
448
449  // Otherwise, remember that we didn't need a cleanup.
450  } else {
451    dtorKind = QualType::DK_none;
452  }
453
454  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
455
456  // The 'current element to initialize'.  The invariants on this
457  // variable are complicated.  Essentially, after each iteration of
458  // the loop, it points to the last initialized element, except
459  // that it points to the beginning of the array before any
460  // elements have been initialized.
461  llvm::Value *element = begin;
462
463  // Emit the explicit initializers.
464  for (uint64_t i = 0; i != NumInitElements; ++i) {
465    // Advance to the next element.
466    if (i > 0) {
467      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
468
469      // Tell the cleanup that it needs to destroy up to this
470      // element.  TODO: some of these stores can be trivially
471      // observed to be unnecessary.
472      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
473    }
474
475    LValue elementLV =
476      CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
477    EmitInitializationToLValue(E->getInit(i), elementLV);
478  }
479
480  // Check whether there's a non-trivial array-fill expression.
481  Expr *filler = E->getArrayFiller();
482  bool hasTrivialFiller = isTrivialFiller(filler);
483
484  // Any remaining elements need to be zero-initialized, possibly
485  // using the filler expression.  We can skip this if the we're
486  // emitting to zeroed memory.
487  if (NumInitElements != NumArrayElements &&
488      !(Dest.isZeroed() && hasTrivialFiller &&
489        CGF.getTypes().isZeroInitializable(elementType))) {
490
491    // Use an actual loop.  This is basically
492    //   do { *array++ = filler; } while (array != end);
493
494    // Advance to the start of the rest of the array.
495    if (NumInitElements) {
496      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
497      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
498    }
499
500    // Compute the end of the array.
501    llvm::Value *end = Builder.CreateInBoundsGEP(begin,
502                      llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
503                                                 "arrayinit.end");
504
505    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
506    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
507
508    // Jump into the body.
509    CGF.EmitBlock(bodyBB);
510    llvm::PHINode *currentElement =
511      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
512    currentElement->addIncoming(element, entryBB);
513
514    // Emit the actual filler expression.
515    {
516      // C++1z [class.temporary]p5:
517      //   when a default constructor is called to initialize an element of
518      //   an array with no corresponding initializer [...] the destruction of
519      //   every temporary created in a default argument is sequenced before
520      //   the construction of the next array element, if any
521      CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
522      LValue elementLV =
523        CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
524      if (filler)
525        EmitInitializationToLValue(filler, elementLV);
526      else
527        EmitNullInitializationToLValue(elementLV);
528    }
529
530    // Move on to the next element.
531    llvm::Value *nextElement =
532      Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
533
534    // Tell the EH cleanup that we finished with the last element.
535    if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
536
537    // Leave the loop if we're done.
538    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
539                                             "arrayinit.done");
540    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
541    Builder.CreateCondBr(done, endBB, bodyBB);
542    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
543
544    CGF.EmitBlock(endBB);
545  }
546
547  // Leave the partial-array cleanup if we entered one.
548  if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
549}
550
551//===----------------------------------------------------------------------===//
552//                            Visitor Methods
553//===----------------------------------------------------------------------===//
554
555void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
556  Visit(E->GetTemporaryExpr());
557}
558
559void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
560  EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
561}
562
563void
564AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
565  if (Dest.isPotentiallyAliased() &&
566      E->getType().isPODType(CGF.getContext())) {
567    // For a POD type, just emit a load of the lvalue + a copy, because our
568    // compound literal might alias the destination.
569    EmitAggLoadOfLValue(E);
570    return;
571  }
572
573  AggValueSlot Slot = EnsureSlot(E->getType());
574  CGF.EmitAggExpr(E->getInitializer(), Slot);
575}
576
577/// Attempt to look through various unimportant expressions to find a
578/// cast of the given kind.
579static Expr *findPeephole(Expr *op, CastKind kind) {
580  while (true) {
581    op = op->IgnoreParens();
582    if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
583      if (castE->getCastKind() == kind)
584        return castE->getSubExpr();
585      if (castE->getCastKind() == CK_NoOp)
586        continue;
587    }
588    return nullptr;
589  }
590}
591
592void AggExprEmitter::VisitCastExpr(CastExpr *E) {
593  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
594    CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
595  switch (E->getCastKind()) {
596  case CK_Dynamic: {
597    // FIXME: Can this actually happen? We have no test coverage for it.
598    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
599    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
600                                      CodeGenFunction::TCK_Load);
601    // FIXME: Do we also need to handle property references here?
602    if (LV.isSimple())
603      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
604    else
605      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
606
607    if (!Dest.isIgnored())
608      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
609    break;
610  }
611
612  case CK_ToUnion: {
613    // Evaluate even if the destination is ignored.
614    if (Dest.isIgnored()) {
615      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
616                      /*ignoreResult=*/true);
617      break;
618    }
619
620    // GCC union extension
621    QualType Ty = E->getSubExpr()->getType();
622    Address CastPtr =
623      Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
624    EmitInitializationToLValue(E->getSubExpr(),
625                               CGF.MakeAddrLValue(CastPtr, Ty));
626    break;
627  }
628
629  case CK_DerivedToBase:
630  case CK_BaseToDerived:
631  case CK_UncheckedDerivedToBase: {
632    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
633                "should have been unpacked before we got here");
634  }
635
636  case CK_NonAtomicToAtomic:
637  case CK_AtomicToNonAtomic: {
638    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
639
640    // Determine the atomic and value types.
641    QualType atomicType = E->getSubExpr()->getType();
642    QualType valueType = E->getType();
643    if (isToAtomic) std::swap(atomicType, valueType);
644
645    assert(atomicType->isAtomicType());
646    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
647                          atomicType->castAs<AtomicType>()->getValueType()));
648
649    // Just recurse normally if we're ignoring the result or the
650    // atomic type doesn't change representation.
651    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
652      return Visit(E->getSubExpr());
653    }
654
655    CastKind peepholeTarget =
656      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
657
658    // These two cases are reverses of each other; try to peephole them.
659    if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
660      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
661                                                     E->getType()) &&
662           "peephole significantly changed types?");
663      return Visit(op);
664    }
665
666    // If we're converting an r-value of non-atomic type to an r-value
667    // of atomic type, just emit directly into the relevant sub-object.
668    if (isToAtomic) {
669      AggValueSlot valueDest = Dest;
670      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
671        // Zero-initialize.  (Strictly speaking, we only need to intialize
672        // the padding at the end, but this is simpler.)
673        if (!Dest.isZeroed())
674          CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
675
676        // Build a GEP to refer to the subobject.
677        Address valueAddr =
678            CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
679                                        CharUnits());
680        valueDest = AggValueSlot::forAddr(valueAddr,
681                                          valueDest.getQualifiers(),
682                                          valueDest.isExternallyDestructed(),
683                                          valueDest.requiresGCollection(),
684                                          valueDest.isPotentiallyAliased(),
685                                          AggValueSlot::IsZeroed);
686      }
687
688      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
689      return;
690    }
691
692    // Otherwise, we're converting an atomic type to a non-atomic type.
693    // Make an atomic temporary, emit into that, and then copy the value out.
694    AggValueSlot atomicSlot =
695      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
696    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
697
698    Address valueAddr =
699      Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
700    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
701    return EmitFinalDestCopy(valueType, rvalue);
702  }
703
704  case CK_LValueToRValue:
705    // If we're loading from a volatile type, force the destination
706    // into existence.
707    if (E->getSubExpr()->getType().isVolatileQualified()) {
708      EnsureDest(E->getType());
709      return Visit(E->getSubExpr());
710    }
711
712    // fallthrough
713
714  case CK_NoOp:
715  case CK_UserDefinedConversion:
716  case CK_ConstructorConversion:
717    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
718                                                   E->getType()) &&
719           "Implicit cast types must be compatible");
720    Visit(E->getSubExpr());
721    break;
722
723  case CK_LValueBitCast:
724    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
725
726  case CK_Dependent:
727  case CK_BitCast:
728  case CK_ArrayToPointerDecay:
729  case CK_FunctionToPointerDecay:
730  case CK_NullToPointer:
731  case CK_NullToMemberPointer:
732  case CK_BaseToDerivedMemberPointer:
733  case CK_DerivedToBaseMemberPointer:
734  case CK_MemberPointerToBoolean:
735  case CK_ReinterpretMemberPointer:
736  case CK_IntegralToPointer:
737  case CK_PointerToIntegral:
738  case CK_PointerToBoolean:
739  case CK_ToVoid:
740  case CK_VectorSplat:
741  case CK_IntegralCast:
742  case CK_BooleanToSignedIntegral:
743  case CK_IntegralToBoolean:
744  case CK_IntegralToFloating:
745  case CK_FloatingToIntegral:
746  case CK_FloatingToBoolean:
747  case CK_FloatingCast:
748  case CK_CPointerToObjCPointerCast:
749  case CK_BlockPointerToObjCPointerCast:
750  case CK_AnyPointerToBlockPointerCast:
751  case CK_ObjCObjectLValueCast:
752  case CK_FloatingRealToComplex:
753  case CK_FloatingComplexToReal:
754  case CK_FloatingComplexToBoolean:
755  case CK_FloatingComplexCast:
756  case CK_FloatingComplexToIntegralComplex:
757  case CK_IntegralRealToComplex:
758  case CK_IntegralComplexToReal:
759  case CK_IntegralComplexToBoolean:
760  case CK_IntegralComplexCast:
761  case CK_IntegralComplexToFloatingComplex:
762  case CK_ARCProduceObject:
763  case CK_ARCConsumeObject:
764  case CK_ARCReclaimReturnedObject:
765  case CK_ARCExtendBlockObject:
766  case CK_CopyAndAutoreleaseBlockObject:
767  case CK_BuiltinFnToFnPtr:
768  case CK_ZeroToOCLEvent:
769  case CK_ZeroToOCLQueue:
770  case CK_AddressSpaceConversion:
771  case CK_IntToOCLSampler:
772    llvm_unreachable("cast kind invalid for aggregate types");
773  }
774}
775
776void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
777  if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
778    EmitAggLoadOfLValue(E);
779    return;
780  }
781
782  RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
783  EmitMoveFromReturnSlot(E, RV);
784}
785
786void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
787  RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
788  EmitMoveFromReturnSlot(E, RV);
789}
790
791void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
792  CGF.EmitIgnoredExpr(E->getLHS());
793  Visit(E->getRHS());
794}
795
796void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
797  CodeGenFunction::StmtExprEvaluation eval(CGF);
798  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
799}
800
801void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
802  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
803    VisitPointerToDataMemberBinaryOperator(E);
804  else
805    CGF.ErrorUnsupported(E, "aggregate binary expression");
806}
807
808void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
809                                                    const BinaryOperator *E) {
810  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
811  EmitFinalDestCopy(E->getType(), LV);
812}
813
814/// Is the value of the given expression possibly a reference to or
815/// into a __block variable?
816static bool isBlockVarRef(const Expr *E) {
817  // Make sure we look through parens.
818  E = E->IgnoreParens();
819
820  // Check for a direct reference to a __block variable.
821  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
822    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
823    return (var && var->hasAttr<BlocksAttr>());
824  }
825
826  // More complicated stuff.
827
828  // Binary operators.
829  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
830    // For an assignment or pointer-to-member operation, just care
831    // about the LHS.
832    if (op->isAssignmentOp() || op->isPtrMemOp())
833      return isBlockVarRef(op->getLHS());
834
835    // For a comma, just care about the RHS.
836    if (op->getOpcode() == BO_Comma)
837      return isBlockVarRef(op->getRHS());
838
839    // FIXME: pointer arithmetic?
840    return false;
841
842  // Check both sides of a conditional operator.
843  } else if (const AbstractConditionalOperator *op
844               = dyn_cast<AbstractConditionalOperator>(E)) {
845    return isBlockVarRef(op->getTrueExpr())
846        || isBlockVarRef(op->getFalseExpr());
847
848  // OVEs are required to support BinaryConditionalOperators.
849  } else if (const OpaqueValueExpr *op
850               = dyn_cast<OpaqueValueExpr>(E)) {
851    if (const Expr *src = op->getSourceExpr())
852      return isBlockVarRef(src);
853
854  // Casts are necessary to get things like (*(int*)&var) = foo().
855  // We don't really care about the kind of cast here, except
856  // we don't want to look through l2r casts, because it's okay
857  // to get the *value* in a __block variable.
858  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
859    if (cast->getCastKind() == CK_LValueToRValue)
860      return false;
861    return isBlockVarRef(cast->getSubExpr());
862
863  // Handle unary operators.  Again, just aggressively look through
864  // it, ignoring the operation.
865  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
866    return isBlockVarRef(uop->getSubExpr());
867
868  // Look into the base of a field access.
869  } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
870    return isBlockVarRef(mem->getBase());
871
872  // Look into the base of a subscript.
873  } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
874    return isBlockVarRef(sub->getBase());
875  }
876
877  return false;
878}
879
880void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
881  // For an assignment to work, the value on the right has
882  // to be compatible with the value on the left.
883  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
884                                                 E->getRHS()->getType())
885         && "Invalid assignment");
886
887  // If the LHS might be a __block variable, and the RHS can
888  // potentially cause a block copy, we need to evaluate the RHS first
889  // so that the assignment goes the right place.
890  // This is pretty semantically fragile.
891  if (isBlockVarRef(E->getLHS()) &&
892      E->getRHS()->HasSideEffects(CGF.getContext())) {
893    // Ensure that we have a destination, and evaluate the RHS into that.
894    EnsureDest(E->getRHS()->getType());
895    Visit(E->getRHS());
896
897    // Now emit the LHS and copy into it.
898    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
899
900    // That copy is an atomic copy if the LHS is atomic.
901    if (LHS.getType()->isAtomicType() ||
902        CGF.LValueIsSuitableForInlineAtomic(LHS)) {
903      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
904      return;
905    }
906
907    EmitCopy(E->getLHS()->getType(),
908             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
909                                     needsGC(E->getLHS()->getType()),
910                                     AggValueSlot::IsAliased),
911             Dest);
912    return;
913  }
914
915  LValue LHS = CGF.EmitLValue(E->getLHS());
916
917  // If we have an atomic type, evaluate into the destination and then
918  // do an atomic copy.
919  if (LHS.getType()->isAtomicType() ||
920      CGF.LValueIsSuitableForInlineAtomic(LHS)) {
921    EnsureDest(E->getRHS()->getType());
922    Visit(E->getRHS());
923    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
924    return;
925  }
926
927  // Codegen the RHS so that it stores directly into the LHS.
928  AggValueSlot LHSSlot =
929    AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
930                            needsGC(E->getLHS()->getType()),
931                            AggValueSlot::IsAliased);
932  // A non-volatile aggregate destination might have volatile member.
933  if (!LHSSlot.isVolatile() &&
934      CGF.hasVolatileMember(E->getLHS()->getType()))
935    LHSSlot.setVolatile(true);
936
937  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
938
939  // Copy into the destination if the assignment isn't ignored.
940  EmitFinalDestCopy(E->getType(), LHS);
941}
942
943void AggExprEmitter::
944VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
945  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
946  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
947  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
948
949  // Bind the common expression if necessary.
950  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
951
952  CodeGenFunction::ConditionalEvaluation eval(CGF);
953  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
954                           CGF.getProfileCount(E));
955
956  // Save whether the destination's lifetime is externally managed.
957  bool isExternallyDestructed = Dest.isExternallyDestructed();
958
959  eval.begin(CGF);
960  CGF.EmitBlock(LHSBlock);
961  CGF.incrementProfileCounter(E);
962  Visit(E->getTrueExpr());
963  eval.end(CGF);
964
965  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
966  CGF.Builder.CreateBr(ContBlock);
967
968  // If the result of an agg expression is unused, then the emission
969  // of the LHS might need to create a destination slot.  That's fine
970  // with us, and we can safely emit the RHS into the same slot, but
971  // we shouldn't claim that it's already being destructed.
972  Dest.setExternallyDestructed(isExternallyDestructed);
973
974  eval.begin(CGF);
975  CGF.EmitBlock(RHSBlock);
976  Visit(E->getFalseExpr());
977  eval.end(CGF);
978
979  CGF.EmitBlock(ContBlock);
980}
981
982void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
983  Visit(CE->getChosenSubExpr());
984}
985
986void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
987  Address ArgValue = Address::invalid();
988  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
989
990  // If EmitVAArg fails, emit an error.
991  if (!ArgPtr.isValid()) {
992    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
993    return;
994  }
995
996  EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
997}
998
999void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1000  // Ensure that we have a slot, but if we already do, remember
1001  // whether it was externally destructed.
1002  bool wasExternallyDestructed = Dest.isExternallyDestructed();
1003  EnsureDest(E->getType());
1004
1005  // We're going to push a destructor if there isn't already one.
1006  Dest.setExternallyDestructed();
1007
1008  Visit(E->getSubExpr());
1009
1010  // Push that destructor we promised.
1011  if (!wasExternallyDestructed)
1012    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1013}
1014
1015void
1016AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1017  AggValueSlot Slot = EnsureSlot(E->getType());
1018  CGF.EmitCXXConstructExpr(E, Slot);
1019}
1020
1021void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1022    const CXXInheritedCtorInitExpr *E) {
1023  AggValueSlot Slot = EnsureSlot(E->getType());
1024  CGF.EmitInheritedCXXConstructorCall(
1025      E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1026      E->inheritedFromVBase(), E);
1027}
1028
1029void
1030AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1031  AggValueSlot Slot = EnsureSlot(E->getType());
1032  CGF.EmitLambdaExpr(E, Slot);
1033}
1034
1035void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1036  CGF.enterFullExpression(E);
1037  CodeGenFunction::RunCleanupsScope cleanups(CGF);
1038  Visit(E->getSubExpr());
1039}
1040
1041void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1042  QualType T = E->getType();
1043  AggValueSlot Slot = EnsureSlot(T);
1044  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1045}
1046
1047void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1048  QualType T = E->getType();
1049  AggValueSlot Slot = EnsureSlot(T);
1050  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1051}
1052
1053/// isSimpleZero - If emitting this value will obviously just cause a store of
1054/// zero to memory, return true.  This can return false if uncertain, so it just
1055/// handles simple cases.
1056static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1057  E = E->IgnoreParens();
1058
1059  // 0
1060  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1061    return IL->getValue() == 0;
1062  // +0.0
1063  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1064    return FL->getValue().isPosZero();
1065  // int()
1066  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1067      CGF.getTypes().isZeroInitializable(E->getType()))
1068    return true;
1069  // (int*)0 - Null pointer expressions.
1070  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1071    return ICE->getCastKind() == CK_NullToPointer &&
1072        CGF.getTypes().isPointerZeroInitializable(E->getType());
1073  // '\0'
1074  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1075    return CL->getValue() == 0;
1076
1077  // Otherwise, hard case: conservatively return false.
1078  return false;
1079}
1080
1081
1082void
1083AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1084  QualType type = LV.getType();
1085  // FIXME: Ignore result?
1086  // FIXME: Are initializers affected by volatile?
1087  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1088    // Storing "i32 0" to a zero'd memory location is a noop.
1089    return;
1090  } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1091    return EmitNullInitializationToLValue(LV);
1092  } else if (isa<NoInitExpr>(E)) {
1093    // Do nothing.
1094    return;
1095  } else if (type->isReferenceType()) {
1096    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1097    return CGF.EmitStoreThroughLValue(RV, LV);
1098  }
1099
1100  switch (CGF.getEvaluationKind(type)) {
1101  case TEK_Complex:
1102    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1103    return;
1104  case TEK_Aggregate:
1105    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1106                                               AggValueSlot::IsDestructed,
1107                                      AggValueSlot::DoesNotNeedGCBarriers,
1108                                               AggValueSlot::IsNotAliased,
1109                                               Dest.isZeroed()));
1110    return;
1111  case TEK_Scalar:
1112    if (LV.isSimple()) {
1113      CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1114    } else {
1115      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1116    }
1117    return;
1118  }
1119  llvm_unreachable("bad evaluation kind");
1120}
1121
1122void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1123  QualType type = lv.getType();
1124
1125  // If the destination slot is already zeroed out before the aggregate is
1126  // copied into it, we don't have to emit any zeros here.
1127  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1128    return;
1129
1130  if (CGF.hasScalarEvaluationKind(type)) {
1131    // For non-aggregates, we can store the appropriate null constant.
1132    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1133    // Note that the following is not equivalent to
1134    // EmitStoreThroughBitfieldLValue for ARC types.
1135    if (lv.isBitField()) {
1136      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1137    } else {
1138      assert(lv.isSimple());
1139      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1140    }
1141  } else {
1142    // There's a potential optimization opportunity in combining
1143    // memsets; that would be easy for arrays, but relatively
1144    // difficult for structures with the current code.
1145    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1146  }
1147}
1148
1149void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1150#if 0
1151  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1152  // (Length of globals? Chunks of zeroed-out space?).
1153  //
1154  // If we can, prefer a copy from a global; this is a lot less code for long
1155  // globals, and it's easier for the current optimizers to analyze.
1156  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1157    llvm::GlobalVariable* GV =
1158    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1159                             llvm::GlobalValue::InternalLinkage, C, "");
1160    EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1161    return;
1162  }
1163#endif
1164  if (E->hadArrayRangeDesignator())
1165    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1166
1167  if (E->isTransparent())
1168    return Visit(E->getInit(0));
1169
1170  AggValueSlot Dest = EnsureSlot(E->getType());
1171
1172  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1173
1174  // Handle initialization of an array.
1175  if (E->getType()->isArrayType()) {
1176    QualType elementType =
1177        CGF.getContext().getAsArrayType(E->getType())->getElementType();
1178
1179    auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1180    EmitArrayInit(Dest.getAddress(), AType, elementType, E);
1181    return;
1182  }
1183
1184  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1185
1186  // Do struct initialization; this code just sets each individual member
1187  // to the approprate value.  This makes bitfield support automatic;
1188  // the disadvantage is that the generated code is more difficult for
1189  // the optimizer, especially with bitfields.
1190  unsigned NumInitElements = E->getNumInits();
1191  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1192
1193  // We'll need to enter cleanup scopes in case any of the element
1194  // initializers throws an exception.
1195  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1196  llvm::Instruction *cleanupDominator = nullptr;
1197
1198  unsigned curInitIndex = 0;
1199
1200  // Emit initialization of base classes.
1201  if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1202    assert(E->getNumInits() >= CXXRD->getNumBases() &&
1203           "missing initializer for base class");
1204    for (auto &Base : CXXRD->bases()) {
1205      assert(!Base.isVirtual() && "should not see vbases here");
1206      auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1207      Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1208          Dest.getAddress(), CXXRD, BaseRD,
1209          /*isBaseVirtual*/ false);
1210      AggValueSlot AggSlot =
1211        AggValueSlot::forAddr(V, Qualifiers(),
1212                              AggValueSlot::IsDestructed,
1213                              AggValueSlot::DoesNotNeedGCBarriers,
1214                              AggValueSlot::IsNotAliased);
1215      CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1216
1217      if (QualType::DestructionKind dtorKind =
1218              Base.getType().isDestructedType()) {
1219        CGF.pushDestroy(dtorKind, V, Base.getType());
1220        cleanups.push_back(CGF.EHStack.stable_begin());
1221      }
1222    }
1223  }
1224
1225  // Prepare a 'this' for CXXDefaultInitExprs.
1226  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1227
1228  if (record->isUnion()) {
1229    // Only initialize one field of a union. The field itself is
1230    // specified by the initializer list.
1231    if (!E->getInitializedFieldInUnion()) {
1232      // Empty union; we have nothing to do.
1233
1234#ifndef NDEBUG
1235      // Make sure that it's really an empty and not a failure of
1236      // semantic analysis.
1237      for (const auto *Field : record->fields())
1238        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1239#endif
1240      return;
1241    }
1242
1243    // FIXME: volatility
1244    FieldDecl *Field = E->getInitializedFieldInUnion();
1245
1246    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1247    if (NumInitElements) {
1248      // Store the initializer into the field
1249      EmitInitializationToLValue(E->getInit(0), FieldLoc);
1250    } else {
1251      // Default-initialize to null.
1252      EmitNullInitializationToLValue(FieldLoc);
1253    }
1254
1255    return;
1256  }
1257
1258  // Here we iterate over the fields; this makes it simpler to both
1259  // default-initialize fields and skip over unnamed fields.
1260  for (const auto *field : record->fields()) {
1261    // We're done once we hit the flexible array member.
1262    if (field->getType()->isIncompleteArrayType())
1263      break;
1264
1265    // Always skip anonymous bitfields.
1266    if (field->isUnnamedBitfield())
1267      continue;
1268
1269    // We're done if we reach the end of the explicit initializers, we
1270    // have a zeroed object, and the rest of the fields are
1271    // zero-initializable.
1272    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1273        CGF.getTypes().isZeroInitializable(E->getType()))
1274      break;
1275
1276
1277    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1278    // We never generate write-barries for initialized fields.
1279    LV.setNonGC(true);
1280
1281    if (curInitIndex < NumInitElements) {
1282      // Store the initializer into the field.
1283      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1284    } else {
1285      // We're out of initializers; default-initialize to null
1286      EmitNullInitializationToLValue(LV);
1287    }
1288
1289    // Push a destructor if necessary.
1290    // FIXME: if we have an array of structures, all explicitly
1291    // initialized, we can end up pushing a linear number of cleanups.
1292    bool pushedCleanup = false;
1293    if (QualType::DestructionKind dtorKind
1294          = field->getType().isDestructedType()) {
1295      assert(LV.isSimple());
1296      if (CGF.needsEHCleanup(dtorKind)) {
1297        if (!cleanupDominator)
1298          cleanupDominator = CGF.Builder.CreateAlignedLoad(
1299              CGF.Int8Ty,
1300              llvm::Constant::getNullValue(CGF.Int8PtrTy),
1301              CharUnits::One()); // placeholder
1302
1303        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1304                        CGF.getDestroyer(dtorKind), false);
1305        cleanups.push_back(CGF.EHStack.stable_begin());
1306        pushedCleanup = true;
1307      }
1308    }
1309
1310    // If the GEP didn't get used because of a dead zero init or something
1311    // else, clean it up for -O0 builds and general tidiness.
1312    if (!pushedCleanup && LV.isSimple())
1313      if (llvm::GetElementPtrInst *GEP =
1314            dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1315        if (GEP->use_empty())
1316          GEP->eraseFromParent();
1317  }
1318
1319  // Deactivate all the partial cleanups in reverse order, which
1320  // generally means popping them.
1321  for (unsigned i = cleanups.size(); i != 0; --i)
1322    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1323
1324  // Destroy the placeholder if we made one.
1325  if (cleanupDominator)
1326    cleanupDominator->eraseFromParent();
1327}
1328
1329void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1330                                            llvm::Value *outerBegin) {
1331  // Emit the common subexpression.
1332  CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1333
1334  Address destPtr = EnsureSlot(E->getType()).getAddress();
1335  uint64_t numElements = E->getArraySize().getZExtValue();
1336
1337  if (!numElements)
1338    return;
1339
1340  // destPtr is an array*. Construct an elementType* by drilling down a level.
1341  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1342  llvm::Value *indices[] = {zero, zero};
1343  llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1344                                                 "arrayinit.begin");
1345
1346  // Prepare to special-case multidimensional array initialization: we avoid
1347  // emitting multiple destructor loops in that case.
1348  if (!outerBegin)
1349    outerBegin = begin;
1350  ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1351
1352  QualType elementType =
1353      CGF.getContext().getAsArrayType(E->getType())->getElementType();
1354  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1355  CharUnits elementAlign =
1356      destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1357
1358  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1359  llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1360
1361  // Jump into the body.
1362  CGF.EmitBlock(bodyBB);
1363  llvm::PHINode *index =
1364      Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1365  index->addIncoming(zero, entryBB);
1366  llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1367
1368  // Prepare for a cleanup.
1369  QualType::DestructionKind dtorKind = elementType.isDestructedType();
1370  EHScopeStack::stable_iterator cleanup;
1371  if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1372    if (outerBegin->getType() != element->getType())
1373      outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1374    CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1375                                       elementAlign,
1376                                       CGF.getDestroyer(dtorKind));
1377    cleanup = CGF.EHStack.stable_begin();
1378  } else {
1379    dtorKind = QualType::DK_none;
1380  }
1381
1382  // Emit the actual filler expression.
1383  {
1384    // Temporaries created in an array initialization loop are destroyed
1385    // at the end of each iteration.
1386    CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1387    CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1388    LValue elementLV =
1389        CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1390
1391    if (InnerLoop) {
1392      // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1393      auto elementSlot = AggValueSlot::forLValue(
1394          elementLV, AggValueSlot::IsDestructed,
1395          AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased);
1396      AggExprEmitter(CGF, elementSlot, false)
1397          .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1398    } else
1399      EmitInitializationToLValue(E->getSubExpr(), elementLV);
1400  }
1401
1402  // Move on to the next element.
1403  llvm::Value *nextIndex = Builder.CreateNUWAdd(
1404      index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1405  index->addIncoming(nextIndex, Builder.GetInsertBlock());
1406
1407  // Leave the loop if we're done.
1408  llvm::Value *done = Builder.CreateICmpEQ(
1409      nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1410      "arrayinit.done");
1411  llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1412  Builder.CreateCondBr(done, endBB, bodyBB);
1413
1414  CGF.EmitBlock(endBB);
1415
1416  // Leave the partial-array cleanup if we entered one.
1417  if (dtorKind)
1418    CGF.DeactivateCleanupBlock(cleanup, index);
1419}
1420
1421void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1422  AggValueSlot Dest = EnsureSlot(E->getType());
1423
1424  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1425  EmitInitializationToLValue(E->getBase(), DestLV);
1426  VisitInitListExpr(E->getUpdater());
1427}
1428
1429//===----------------------------------------------------------------------===//
1430//                        Entry Points into this File
1431//===----------------------------------------------------------------------===//
1432
1433/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1434/// non-zero bytes that will be stored when outputting the initializer for the
1435/// specified initializer expression.
1436static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1437  E = E->IgnoreParens();
1438
1439  // 0 and 0.0 won't require any non-zero stores!
1440  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1441
1442  // If this is an initlist expr, sum up the size of sizes of the (present)
1443  // elements.  If this is something weird, assume the whole thing is non-zero.
1444  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1445  if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1446    return CGF.getContext().getTypeSizeInChars(E->getType());
1447
1448  // InitListExprs for structs have to be handled carefully.  If there are
1449  // reference members, we need to consider the size of the reference, not the
1450  // referencee.  InitListExprs for unions and arrays can't have references.
1451  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1452    if (!RT->isUnionType()) {
1453      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1454      CharUnits NumNonZeroBytes = CharUnits::Zero();
1455
1456      unsigned ILEElement = 0;
1457      if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1458        while (ILEElement != CXXRD->getNumBases())
1459          NumNonZeroBytes +=
1460              GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1461      for (const auto *Field : SD->fields()) {
1462        // We're done once we hit the flexible array member or run out of
1463        // InitListExpr elements.
1464        if (Field->getType()->isIncompleteArrayType() ||
1465            ILEElement == ILE->getNumInits())
1466          break;
1467        if (Field->isUnnamedBitfield())
1468          continue;
1469
1470        const Expr *E = ILE->getInit(ILEElement++);
1471
1472        // Reference values are always non-null and have the width of a pointer.
1473        if (Field->getType()->isReferenceType())
1474          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1475              CGF.getTarget().getPointerWidth(0));
1476        else
1477          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1478      }
1479
1480      return NumNonZeroBytes;
1481    }
1482  }
1483
1484
1485  CharUnits NumNonZeroBytes = CharUnits::Zero();
1486  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1487    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1488  return NumNonZeroBytes;
1489}
1490
1491/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1492/// zeros in it, emit a memset and avoid storing the individual zeros.
1493///
1494static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1495                                     CodeGenFunction &CGF) {
1496  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1497  // volatile stores.
1498  if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1499    return;
1500
1501  // C++ objects with a user-declared constructor don't need zero'ing.
1502  if (CGF.getLangOpts().CPlusPlus)
1503    if (const RecordType *RT = CGF.getContext()
1504                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1505      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1506      if (RD->hasUserDeclaredConstructor())
1507        return;
1508    }
1509
1510  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1511  CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
1512  if (Size <= CharUnits::fromQuantity(16))
1513    return;
1514
1515  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1516  // we prefer to emit memset + individual stores for the rest.
1517  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1518  if (NumNonZeroBytes*4 > Size)
1519    return;
1520
1521  // Okay, it seems like a good idea to use an initial memset, emit the call.
1522  llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1523
1524  Address Loc = Slot.getAddress();
1525  Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1526  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1527
1528  // Tell the AggExprEmitter that the slot is known zero.
1529  Slot.setZeroed();
1530}
1531
1532
1533
1534
1535/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1536/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1537/// the value of the aggregate expression is not needed.  If VolatileDest is
1538/// true, DestPtr cannot be 0.
1539void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1540  assert(E && hasAggregateEvaluationKind(E->getType()) &&
1541         "Invalid aggregate expression to emit");
1542  assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1543         "slot has bits but no address");
1544
1545  // Optimize the slot if possible.
1546  CheckAggExprForMemSetUse(Slot, E, *this);
1547
1548  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1549}
1550
1551LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1552  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1553  Address Temp = CreateMemTemp(E->getType());
1554  LValue LV = MakeAddrLValue(Temp, E->getType());
1555  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1556                                         AggValueSlot::DoesNotNeedGCBarriers,
1557                                         AggValueSlot::IsNotAliased));
1558  return LV;
1559}
1560
1561void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
1562                                        Address SrcPtr, QualType Ty,
1563                                        bool isVolatile,
1564                                        bool isAssignment) {
1565  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1566
1567  if (getLangOpts().CPlusPlus) {
1568    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1569      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1570      assert((Record->hasTrivialCopyConstructor() ||
1571              Record->hasTrivialCopyAssignment() ||
1572              Record->hasTrivialMoveConstructor() ||
1573              Record->hasTrivialMoveAssignment() ||
1574              Record->isUnion()) &&
1575             "Trying to aggregate-copy a type without a trivial copy/move "
1576             "constructor or assignment operator");
1577      // Ignore empty classes in C++.
1578      if (Record->isEmpty())
1579        return;
1580    }
1581  }
1582
1583  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1584  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1585  // read from another object that overlaps in anyway the storage of the first
1586  // object, then the overlap shall be exact and the two objects shall have
1587  // qualified or unqualified versions of a compatible type."
1588  //
1589  // memcpy is not defined if the source and destination pointers are exactly
1590  // equal, but other compilers do this optimization, and almost every memcpy
1591  // implementation handles this case safely.  If there is a libc that does not
1592  // safely handle this, we can add a target hook.
1593
1594  // Get data size info for this aggregate. If this is an assignment,
1595  // don't copy the tail padding, because we might be assigning into a
1596  // base subobject where the tail padding is claimed.  Otherwise,
1597  // copying it is fine.
1598  std::pair<CharUnits, CharUnits> TypeInfo;
1599  if (isAssignment)
1600    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1601  else
1602    TypeInfo = getContext().getTypeInfoInChars(Ty);
1603
1604  llvm::Value *SizeVal = nullptr;
1605  if (TypeInfo.first.isZero()) {
1606    // But note that getTypeInfo returns 0 for a VLA.
1607    if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1608            getContext().getAsArrayType(Ty))) {
1609      QualType BaseEltTy;
1610      SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1611      TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
1612      std::pair<CharUnits, CharUnits> LastElementTypeInfo;
1613      if (!isAssignment)
1614        LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1615      assert(!TypeInfo.first.isZero());
1616      SizeVal = Builder.CreateNUWMul(
1617          SizeVal,
1618          llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1619      if (!isAssignment) {
1620        SizeVal = Builder.CreateNUWSub(
1621            SizeVal,
1622            llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1623        SizeVal = Builder.CreateNUWAdd(
1624            SizeVal, llvm::ConstantInt::get(
1625                         SizeTy, LastElementTypeInfo.first.getQuantity()));
1626      }
1627    }
1628  }
1629  if (!SizeVal) {
1630    SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1631  }
1632
1633  // FIXME: If we have a volatile struct, the optimizer can remove what might
1634  // appear to be `extra' memory ops:
1635  //
1636  // volatile struct { int i; } a, b;
1637  //
1638  // int main() {
1639  //   a = b;
1640  //   a = b;
1641  // }
1642  //
1643  // we need to use a different call here.  We use isVolatile to indicate when
1644  // either the source or the destination is volatile.
1645
1646  DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1647  SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1648
1649  // Don't do any of the memmove_collectable tests if GC isn't set.
1650  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1651    // fall through
1652  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1653    RecordDecl *Record = RecordTy->getDecl();
1654    if (Record->hasObjectMember()) {
1655      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1656                                                    SizeVal);
1657      return;
1658    }
1659  } else if (Ty->isArrayType()) {
1660    QualType BaseType = getContext().getBaseElementType(Ty);
1661    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1662      if (RecordTy->getDecl()->hasObjectMember()) {
1663        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1664                                                      SizeVal);
1665        return;
1666      }
1667    }
1668  }
1669
1670  auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1671
1672  // Determine the metadata to describe the position of any padding in this
1673  // memcpy, as well as the TBAA tags for the members of the struct, in case
1674  // the optimizer wishes to expand it in to scalar memory operations.
1675  if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1676    Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
1677}
1678